Example usage for com.google.common.util.concurrent MoreExecutors listeningDecorator

List of usage examples for com.google.common.util.concurrent MoreExecutors listeningDecorator

Introduction

In this page you can find the example usage for com.google.common.util.concurrent MoreExecutors listeningDecorator.

Prototype

@GwtIncompatible("TODO")
public static ListeningScheduledExecutorService listeningDecorator(ScheduledExecutorService delegate) 

Source Link

Document

Creates a ScheduledExecutorService whose submit and invokeAll methods submit ListenableFutureTask instances to the given delegate executor.

Usage

From source file:org.apache.tez.runtime.library.common.writers.UnorderedPartitionedKVWriter.java

public UnorderedPartitionedKVWriter(OutputContext outputContext, Configuration conf, int numOutputs,
        long availableMemoryBytes) throws IOException {
    super(outputContext, conf, numOutputs);

    Preconditions.checkArgument(availableMemoryBytes >= 0, "availableMemory should be >= 0 bytes");

    this.destNameTrimmed = TezUtilsInternal.cleanVertexName(outputContext.getDestinationVertexName());
    //Not checking for TEZ_RUNTIME_ENABLE_FINAL_MERGE_IN_OUTPUT as it might not add much value in
    // this case.  Add it later if needed.
    boolean pipelinedShuffleConf = this.conf.getBoolean(
            TezRuntimeConfiguration.TEZ_RUNTIME_PIPELINED_SHUFFLE_ENABLED,
            TezRuntimeConfiguration.TEZ_RUNTIME_PIPELINED_SHUFFLE_ENABLED_DEFAULT);
    this.isFinalMergeEnabled = conf.getBoolean(TezRuntimeConfiguration.TEZ_RUNTIME_ENABLE_FINAL_MERGE_IN_OUTPUT,
            TezRuntimeConfiguration.TEZ_RUNTIME_ENABLE_FINAL_MERGE_IN_OUTPUT_DEFAULT);
    this.pipelinedShuffle = pipelinedShuffleConf && !isFinalMergeEnabled;
    this.finalEvents = Lists.newLinkedList();

    if (availableMemoryBytes == 0) {
        Preconditions.checkArgument(((numPartitions == 1) && !pipelinedShuffle),
                "availableMemory " + "can be set to 0 only when numPartitions=1 and "
                        + TezRuntimeConfiguration.TEZ_RUNTIME_PIPELINED_SHUFFLE_ENABLED
                        + " is disabled. current numPartitions=" + numPartitions + ", "
                        + TezRuntimeConfiguration.TEZ_RUNTIME_PIPELINED_SHUFFLE_ENABLED + "="
                        + pipelinedShuffle);
    }//from  w  ww.ja  v  a  2 s  .  c  o  m

    // Ideally, should be significantly larger.
    availableMemory = availableMemoryBytes;

    // Allow unit tests to control the buffer sizes.
    int maxSingleBufferSizeBytes = conf.getInt(
            TezRuntimeConfiguration.TEZ_RUNTIME_UNORDERED_OUTPUT_MAX_PER_BUFFER_SIZE_BYTES, Integer.MAX_VALUE);
    computeNumBuffersAndSize(maxSingleBufferSizeBytes);

    availableBuffers = new LinkedBlockingQueue<WrappedBuffer>();
    buffers = new WrappedBuffer[numBuffers];
    // Set up only the first buffer to start with.
    buffers[0] = new WrappedBuffer(numOutputs, sizePerBuffer);
    numInitializedBuffers = 1;
    if (LOG.isDebugEnabled()) {
        LOG.debug(destNameTrimmed + ": " + "Initializing Buffer #" + numInitializedBuffers + " with size="
                + sizePerBuffer);
    }
    currentBuffer = buffers[0];
    baos = new ByteArrayOutputStream();
    dos = new NonSyncDataOutputStream(baos);
    keySerializer.open(dos);
    valSerializer.open(dos);
    rfs = ((LocalFileSystem) FileSystem.getLocal(this.conf)).getRaw();

    int maxThreads = Math.max(2, numBuffers / 2);
    //TODO: Make use of TezSharedExecutor later
    ExecutorService executor = new ThreadPoolExecutor(1, maxThreads, 60L, TimeUnit.SECONDS,
            new SynchronousQueue<Runnable>(),
            new ThreadFactoryBuilder().setDaemon(true).setNameFormat("UnorderedOutSpiller {"
                    + TezUtilsInternal.cleanVertexName(outputContext.getDestinationVertexName()) + "} #%d")
                    .build());
    // to restrict submission of more tasks than threads (e.g numBuffers > numThreads)
    // This is maxThreads - 1, to avoid race between callback thread releasing semaphore and the
    // thread calling tryAcquire.
    availableSlots = new Semaphore(maxThreads - 1, true);
    spillExecutor = MoreExecutors.listeningDecorator(executor);
    numRecordsPerPartition = new int[numPartitions];
    reportPartitionStats = ReportPartitionStats
            .fromString(conf.get(TezRuntimeConfiguration.TEZ_RUNTIME_REPORT_PARTITION_STATS,
                    TezRuntimeConfiguration.TEZ_RUNTIME_REPORT_PARTITION_STATS_DEFAULT));
    sizePerPartition = (reportPartitionStats.isEnabled()) ? new long[numPartitions] : null;

    outputLargeRecordsCounter = outputContext.getCounters().findCounter(TaskCounter.OUTPUT_LARGE_RECORDS);

    indexFileSizeEstimate = numPartitions * Constants.MAP_OUTPUT_INDEX_RECORD_LENGTH;

    if (numPartitions == 1 && !pipelinedShuffle) {
        //special case, where in only one partition is available.
        finalOutPath = outputFileHandler.getOutputFileForWrite();
        finalIndexPath = outputFileHandler.getOutputIndexFileForWrite(indexFileSizeEstimate);
        skipBuffers = true;
        writer = new IFile.Writer(conf, rfs, finalOutPath, keyClass, valClass, codec, outputRecordsCounter,
                outputRecordBytesCounter);
    } else {
        skipBuffers = false;
        writer = null;
    }
    LOG.info(destNameTrimmed + ": " + "numBuffers=" + numBuffers + ", sizePerBuffer=" + sizePerBuffer
            + ", skipBuffers=" + skipBuffers + ", numPartitions=" + numPartitions + ", availableMemory="
            + availableMemory + ", maxSingleBufferSizeBytes=" + maxSingleBufferSizeBytes + ", pipelinedShuffle="
            + pipelinedShuffle + ", isFinalMergeEnabled=" + isFinalMergeEnabled + ", numPartitions="
            + numPartitions + ", reportPartitionStats=" + reportPartitionStats);
}

From source file:io.druid.indexing.kafka.supervisor.KafkaSupervisor.java

public KafkaSupervisor(final TaskStorage taskStorage, final TaskMaster taskMaster,
        final IndexerMetadataStorageCoordinator indexerMetadataStorageCoordinator,
        final KafkaIndexTaskClientFactory taskClientFactory, final ObjectMapper mapper,
        final KafkaSupervisorSpec spec) {
    this.taskStorage = taskStorage;
    this.taskMaster = taskMaster;
    this.indexerMetadataStorageCoordinator = indexerMetadataStorageCoordinator;
    this.sortingMapper = mapper.copy().configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true);
    this.spec = spec;

    this.dataSource = spec.getDataSchema().getDataSource();
    this.ioConfig = spec.getIoConfig();
    this.tuningConfig = spec.getTuningConfig();
    this.taskTuningConfig = KafkaTuningConfig.copyOf(this.tuningConfig);
    this.supervisorId = String.format("KafkaSupervisor-%s", dataSource);
    this.exec = Execs.singleThreaded(supervisorId);
    this.scheduledExec = Execs.scheduledSingleThreaded(supervisorId + "-Scheduler-%d");

    int workerThreads = (this.tuningConfig.getWorkerThreads() != null ? this.tuningConfig.getWorkerThreads()
            : Math.min(10, this.ioConfig.getTaskCount()));
    this.workerExec = MoreExecutors
            .listeningDecorator(Execs.multiThreaded(workerThreads, supervisorId + "-Worker-%d"));
    log.info("Created worker pool with [%d] threads for dataSource [%s]", workerThreads, this.dataSource);

    this.taskInfoProvider = new TaskInfoProvider() {
        @Override//from  w  w w  . j  a v a 2 s .c o  m
        public TaskLocation getTaskLocation(final String id) {
            Preconditions.checkNotNull(id, "id");
            Optional<TaskRunner> taskRunner = taskMaster.getTaskRunner();
            if (taskRunner.isPresent()) {
                Optional<? extends TaskRunnerWorkItem> item = Iterables
                        .tryFind(taskRunner.get().getRunningTasks(), new Predicate<TaskRunnerWorkItem>() {
                            @Override
                            public boolean apply(TaskRunnerWorkItem taskRunnerWorkItem) {
                                return id.equals(taskRunnerWorkItem.getTaskId());
                            }
                        });

                if (item.isPresent()) {
                    return item.get().getLocation();
                }
            } else {
                log.error("Failed to get task runner because I'm not the leader!");
            }

            return TaskLocation.unknown();
        }

        @Override
        public Optional<TaskStatus> getTaskStatus(String id) {
            return taskStorage.getStatus(id);
        }
    };

    int chatThreads = (this.tuningConfig.getChatThreads() != null ? this.tuningConfig.getChatThreads()
            : Math.min(10, this.ioConfig.getTaskCount() * this.ioConfig.getReplicas()));
    this.taskClient = taskClientFactory.build(taskInfoProvider, dataSource, chatThreads,
            this.tuningConfig.getHttpTimeout(), this.tuningConfig.getChatRetries());
    log.info("Created taskClient with dataSource[%s] chatThreads[%d] httpTimeout[%s] chatRetries[%d]",
            dataSource, chatThreads, this.tuningConfig.getHttpTimeout(), this.tuningConfig.getChatRetries());
}

From source file:com.ngdata.hbaseindexer.master.IndexerMaster.java

private void startFullIndexBuild(final String indexerName) {
    try {//ww w  .  j  ava 2 s. co  m
        String lock = indexerModel.lockIndexer(indexerName);
        try {
            // Read current situation of record and assure it is still actual
            final IndexerDefinition indexer = indexerModel.getFreshIndexer(indexerName);
            IndexerDefinitionBuilder updatedIndexer = new IndexerDefinitionBuilder().startFrom(indexer);
            final String[] batchArguments = createBatchArguments(indexer);
            if (needsBatchBuildStart(indexer)) {
                final ListeningExecutorService executor = MoreExecutors
                        .listeningDecorator(Executors.newSingleThreadExecutor());
                ListenableFuture<Integer> future = executor.submit(new Callable<Integer>() {
                    @Override
                    public Integer call() throws Exception {
                        HBaseMapReduceIndexerTool tool = new HBaseMapReduceIndexerTool();
                        tool.setConf(hbaseConf);
                        return tool.run(batchArguments,
                                new IndexerDefinitionUpdaterJobProgressCallback(indexerName));
                    }
                });

                Futures.addCallback(future, new FutureCallback<Integer>() {
                    @Override
                    public void onSuccess(Integer exitCode) {
                        markBatchBuildCompleted(indexerName, exitCode == 0);
                        executor.shutdownNow();
                    }

                    @Override
                    public void onFailure(Throwable throwable) {
                        log.error("batch index build failed", throwable);
                        markBatchBuildCompleted(indexerName, false);
                        executor.shutdownNow();
                    }
                });

                BatchBuildInfo jobInfo = new BatchBuildInfo(System.currentTimeMillis(), null, null,
                        batchArguments);
                updatedIndexer.activeBatchBuildInfo(jobInfo).batchIndexingState(BatchIndexingState.BUILDING)
                        .batchIndexCliArguments(null).build();

                indexerModel.updateIndexerInternal(updatedIndexer.build());

                log.info("Started batch index build for index " + indexerName);

            }
        } finally {
            indexerModel.unlockIndexer(lock);
        }
    } catch (Throwable t) {
        log.error("Error trying to start index build job for index " + indexerName, t);
    }
}

From source file:org.nuxeo.elasticsearch.ElasticSearchComponent.java

protected void initListenerThreadPool() {
    waiterExecutorService = MoreExecutors
            .listeningDecorator(Executors.newCachedThreadPool(new NamedThreadFactory()));
}

From source file:com.splout.db.qnode.Deployer.java

/**
 * The Deployer deals with deploy and switch version requests.
 *///w ww  .j a va2  s  . c  o m
public Deployer(QNodeHandlerContext context) {
    super(context);
    deployExecutor = MoreExecutors.listeningDecorator(
            Executors.newCachedThreadPool(new ThreadFactoryBuilder().setNameFormat("deploy-%d").build()));
}

From source file:flipkart.lego.engine.Lego.java

private ListeningExecutorService getListeningExecutorService(ExecutorService executorService) {
    return MoreExecutors.listeningDecorator(executorService);
}

From source file:com.google.cloud.dataflow.sdk.io.FileBasedSource.java

@Override
public final List<? extends FileBasedSource<T>> splitIntoBundles(long desiredBundleSizeBytes,
        PipelineOptions options) throws Exception {
    // This implementation of method splitIntoBundles is provided to simplify subclasses. Here we
    // split a FileBasedSource based on a file pattern to FileBasedSources based on full single
    // files. For files that can be efficiently seeked, we further split FileBasedSources based on
    // those files to FileBasedSources based on sub ranges of single files.

    if (mode == Mode.FILEPATTERN) {
        long startTime = System.currentTimeMillis();
        List<ListenableFuture<List<? extends FileBasedSource<T>>>> futures = new ArrayList<>();

        ListeningExecutorService service = MoreExecutors
                .listeningDecorator(Executors.newFixedThreadPool(THREAD_POOL_SIZE));
        try {/*from  w w  w. ja v a 2s. c  om*/
            for (final String file : FileBasedSource.expandFilePattern(fileOrPatternSpec)) {
                futures.add(createFutureForFileSplit(file, desiredBundleSizeBytes, options, service));
            }
            List<? extends FileBasedSource<T>> splitResults = ImmutableList
                    .copyOf(Iterables.concat(Futures.allAsList(futures).get()));
            LOG.debug("Splitting the source based on file pattern " + fileOrPatternSpec + " took "
                    + (System.currentTimeMillis() - startTime) + " ms");
            return splitResults;
        } finally {
            service.shutdown();
        }
    } else {
        if (isSplittable()) {
            List<FileBasedSource<T>> splitResults = new ArrayList<>();
            for (OffsetBasedSource<T> split : super.splitIntoBundles(desiredBundleSizeBytes, options)) {
                splitResults.add((FileBasedSource<T>) split);
            }
            return splitResults;
        } else {
            LOG.debug("The source for file " + fileOrPatternSpec
                    + " is not split into sub-range based sources since the file is not seekable");
            return ImmutableList.of(this);
        }
    }
}

From source file:com.vmware.photon.controller.core.Main.java

/**
 * Creates a new Deployer Service Group.
 *
 * @param deployerConfig// w ww .  j a  v a 2 s.  c  o  m
 * @param apiFeServerSet
 * @param cloudStoreServerSet
 * @param httpClient
 * @return
 */
private static DeployerServiceGroup createDeployerServiceGroup(PhotonControllerConfig photonControllerConfig,
        DeployerConfig deployerConfig, ServerSet apiFeServerSet, ServerSet cloudStoreServerSet,
        CloseableHttpAsyncClient httpClient) {

    logger.info("Creating Deployer Service Group");

    // Set containers config to deployer config
    try {
        deployerConfig.setContainersConfig(new ServiceConfigurator()
                .generateContainersConfig(deployerConfig.getDeployerContext().getConfigDirectory()));
    } catch (Exception e) {
        throw new RuntimeException(e);
    }

    final DockerProvisionerFactory dockerProvisionerFactory = new com.vmware.photon.controller.core.Main.DockerProvisionerFactoryImpl();
    final ApiClientFactory apiClientFactory = new ApiClientFactory(apiFeServerSet, httpClient,
            deployerConfig.getDeployerContext().getSharedSecret(),
            deployerConfig.getDeployerContext().isAuthEnabled());

    /**
     * The blocking queue associated with the thread pool executor service
     * controls the rejection policy for new work items: a bounded queue, such as
     * an ArrayBlockingQueue, will cause new work items to be rejected (and thus
     * failed) when the queue length is reached. A LinkedBlockingQueue, which is
     * unbounded, is used here in order to enable the submission of an arbitrary
     * number of work items since this is the pattern expected for the deployer
     * (a large number of work items arrive all at once, and then no more).
     */
    final BlockingQueue<Runnable> blockingQueue = new LinkedBlockingDeque<>();
    final ListeningExecutorService listeningExecutorService = MoreExecutors
            .listeningDecorator(new ThreadPoolExecutor(deployerConfig.getDeployerContext().getCorePoolSize(),
                    deployerConfig.getDeployerContext().getMaximumPoolSize(),
                    deployerConfig.getDeployerContext().getKeepAliveTime(), TimeUnit.SECONDS, blockingQueue));

    final HttpFileServiceClientFactory httpFileServiceClientFactory = new com.vmware.photon.controller.core.Main.HttpFileServiceClientFactoryImpl();
    final AuthHelperFactory authHelperFactory = new com.vmware.photon.controller.core.Main.AuthHelperFactoryImpl();
    final HealthCheckHelperFactory healthCheckHelperFactory = new com.vmware.photon.controller.core.Main.HealthCheckHelperFactoryImpl();
    final ServiceConfiguratorFactory serviceConfiguratorFactory = new com.vmware.photon.controller.core.Main.ServiceConfiguratorFactoryImpl();
    final ZookeeperClientFactory zookeeperServerSetBuilderFactory = new com.vmware.photon.controller.core.Main.ZookeeperClientFactoryImpl();
    final HostManagementVmAddressValidatorFactory hostManagementVmAddressValidatorFactory = new com.vmware.photon.controller.core.Main.HostManagementVmAddressValidatorFactoryImpl();

    final ClusterManagerFactory clusterManagerFactory = new ClusterManagerFactory(listeningExecutorService,
            httpClient, apiFeServerSet, deployerConfig.getDeployerContext().getSharedSecret(),
            cloudStoreServerSet,
            Paths.get(deployerConfig.getDeployerContext().getScriptDirectory(), CLUSTER_SCRIPTS_DIRECTORY)
                    .toString(),
            deployerConfig.getDeployerContext().isAuthEnabled());

    return new DeployerServiceGroup(deployerConfig.getDeployerContext(), dockerProvisionerFactory,
            apiClientFactory, deployerConfig.getContainersConfig(), listeningExecutorService,
            httpFileServiceClientFactory, authHelperFactory, healthCheckHelperFactory,
            serviceConfiguratorFactory, zookeeperServerSetBuilderFactory,
            hostManagementVmAddressValidatorFactory, clusterManagerFactory);
}

From source file:org.apache.tez.runtime.library.common.shuffle.impl.ShuffleManager.java

public ShuffleManager(InputContext inputContext, Configuration conf, int numInputs, int bufferSize,
        boolean ifileReadAheadEnabled, int ifileReadAheadLength, CompressionCodec codec,
        FetchedInputAllocator inputAllocator) throws IOException {
    this.inputContext = inputContext;
    this.numInputs = numInputs;

    this.shuffledInputsCounter = inputContext.getCounters().findCounter(TaskCounter.NUM_SHUFFLED_INPUTS);
    this.failedShufflesCounter = inputContext.getCounters().findCounter(TaskCounter.NUM_FAILED_SHUFFLE_INPUTS);
    this.bytesShuffledCounter = inputContext.getCounters().findCounter(TaskCounter.SHUFFLE_BYTES);
    this.decompressedDataSizeCounter = inputContext.getCounters()
            .findCounter(TaskCounter.SHUFFLE_BYTES_DECOMPRESSED);
    this.bytesShuffledToDiskCounter = inputContext.getCounters().findCounter(TaskCounter.SHUFFLE_BYTES_TO_DISK);
    this.bytesShuffledToMemCounter = inputContext.getCounters().findCounter(TaskCounter.SHUFFLE_BYTES_TO_MEM);
    this.bytesShuffledDirectDiskCounter = inputContext.getCounters()
            .findCounter(TaskCounter.SHUFFLE_BYTES_DISK_DIRECT);

    this.ifileBufferSize = bufferSize;
    this.ifileReadAhead = ifileReadAheadEnabled;
    this.ifileReadAheadLength = ifileReadAheadLength;
    this.codec = codec;
    this.inputManager = inputAllocator;
    this.localDiskFetchEnabled = conf.getBoolean(TezRuntimeConfiguration.TEZ_RUNTIME_OPTIMIZE_LOCAL_FETCH,
            TezRuntimeConfiguration.TEZ_RUNTIME_OPTIMIZE_LOCAL_FETCH_DEFAULT);
    this.sharedFetchEnabled = conf.getBoolean(TezRuntimeConfiguration.TEZ_RUNTIME_OPTIMIZE_SHARED_FETCH,
            TezRuntimeConfiguration.TEZ_RUNTIME_OPTIMIZE_SHARED_FETCH_DEFAULT);
    this.verifyDiskChecksum = conf.getBoolean(
            TezRuntimeConfiguration.TEZ_RUNTIME_SHUFFLE_FETCH_VERIFY_DISK_CHECKSUM,
            TezRuntimeConfiguration.TEZ_RUNTIME_SHUFFLE_FETCH_VERIFY_DISK_CHECKSUM_DEFAULT);
    this.maxTimeToWaitForReportMillis = conf.getInt(TezRuntimeConfiguration.TEZ_RUNTIME_SHUFFLE_BATCH_WAIT,
            TezRuntimeConfiguration.TEZ_RUNTIME_SHUFFLE_BATCH_WAIT_DEFAULT);

    this.shufflePhaseTime = inputContext.getCounters().findCounter(TaskCounter.SHUFFLE_PHASE_TIME);
    this.firstEventReceived = inputContext.getCounters().findCounter(TaskCounter.FIRST_EVENT_RECEIVED);
    this.lastEventReceived = inputContext.getCounters().findCounter(TaskCounter.LAST_EVENT_RECEIVED);
    this.compositeFetch = ShuffleUtils.isTezShuffleHandler(conf);

    this.srcNameTrimmed = TezUtilsInternal.cleanVertexName(inputContext.getSourceVertexName());

    completedInputSet = new BitSet(numInputs);
    /**/*from ww  w .  j  a v a 2 s.c om*/
     * In case of pipelined shuffle, it is possible to get multiple FetchedInput per attempt.
     * We do not know upfront the number of spills from source.
     */
    completedInputs = new LinkedBlockingDeque<FetchedInput>();
    knownSrcHosts = new ConcurrentHashMap<HostPort, InputHost>();
    pendingHosts = new LinkedBlockingQueue<InputHost>();
    obsoletedInputs = Collections.newSetFromMap(new ConcurrentHashMap<InputAttemptIdentifier, Boolean>());
    runningFetchers = Collections.newSetFromMap(new ConcurrentHashMap<Fetcher, Boolean>());

    int maxConfiguredFetchers = conf.getInt(TezRuntimeConfiguration.TEZ_RUNTIME_SHUFFLE_PARALLEL_COPIES,
            TezRuntimeConfiguration.TEZ_RUNTIME_SHUFFLE_PARALLEL_COPIES_DEFAULT);

    this.numFetchers = Math.min(maxConfiguredFetchers, numInputs);

    final ExecutorService fetcherRawExecutor;
    if (conf.getBoolean(TezRuntimeConfiguration.TEZ_RUNTIME_SHUFFLE_FETCHER_USE_SHARED_POOL,
            TezRuntimeConfiguration.TEZ_RUNTIME_SHUFFLE_FETCHER_USE_SHARED_POOL_DEFAULT)) {
        fetcherRawExecutor = inputContext.createTezFrameworkExecutorService(numFetchers,
                "Fetcher_B {" + srcNameTrimmed + "} #%d");
    } else {
        fetcherRawExecutor = Executors.newFixedThreadPool(numFetchers, new ThreadFactoryBuilder()
                .setDaemon(true).setNameFormat("Fetcher_B {" + srcNameTrimmed + "} #%d").build());
    }
    this.fetcherExecutor = MoreExecutors.listeningDecorator(fetcherRawExecutor);

    ExecutorService schedulerRawExecutor = Executors.newFixedThreadPool(1, new ThreadFactoryBuilder()
            .setDaemon(true).setNameFormat("ShuffleRunner {" + srcNameTrimmed + "}").build());
    this.schedulerExecutor = MoreExecutors.listeningDecorator(schedulerRawExecutor);
    this.schedulerCallable = new RunShuffleCallable(conf);

    this.startTime = System.currentTimeMillis();
    this.lastProgressTime = startTime;

    String auxiliaryService = conf.get(TezConfiguration.TEZ_AM_SHUFFLE_AUXILIARY_SERVICE_ID,
            TezConfiguration.TEZ_AM_SHUFFLE_AUXILIARY_SERVICE_ID_DEFAULT);
    SecretKey shuffleSecret = ShuffleUtils
            .getJobTokenSecretFromTokenBytes(inputContext.getServiceConsumerMetaData(auxiliaryService));
    this.jobTokenSecretMgr = new JobTokenSecretManager(shuffleSecret);
    this.asyncHttp = conf.getBoolean(TezRuntimeConfiguration.TEZ_RUNTIME_SHUFFLE_USE_ASYNC_HTTP, false);
    httpConnectionParams = ShuffleUtils.getHttpConnectionParams(conf);

    this.localFs = (RawLocalFileSystem) FileSystem.getLocal(conf).getRaw();

    this.localDirAllocator = new LocalDirAllocator(TezRuntimeFrameworkConfigs.LOCAL_DIRS);

    this.localDisks = Iterables.toArray(localDirAllocator.getAllLocalPathsToRead(".", conf), Path.class);
    this.localhostName = inputContext.getExecutionContext().getHostName();
    final ByteBuffer shuffleMetaData = inputContext.getServiceProviderMetaData(auxiliaryService);
    this.shufflePort = ShuffleUtils.deserializeShuffleProviderMetaData(shuffleMetaData);

    /**
     * Setting to very high val can lead to Http 400 error. Cap it to 75; every attempt id would
     * be approximately 48 bytes; 48 * 75 = 3600 which should give some room for other info in URL.
     */
    this.maxTaskOutputAtOnce = Math.max(1,
            Math.min(75, conf.getInt(TezRuntimeConfiguration.TEZ_RUNTIME_SHUFFLE_FETCH_MAX_TASK_OUTPUT_AT_ONCE,
                    TezRuntimeConfiguration.TEZ_RUNTIME_SHUFFLE_FETCH_MAX_TASK_OUTPUT_AT_ONCE_DEFAULT)));

    Arrays.sort(this.localDisks);

    shuffleInfoEventsMap = new ConcurrentHashMap<Integer, ShuffleEventInfo>();

    LOG.info(srcNameTrimmed + ": numInputs=" + numInputs + ", compressionCodec="
            + (codec == null ? "NoCompressionCodec" : codec.getClass().getName()) + ", numFetchers="
            + numFetchers + ", ifileBufferSize=" + ifileBufferSize + ", ifileReadAheadEnabled=" + ifileReadAhead
            + ", ifileReadAheadLength=" + ifileReadAheadLength + ", " + "localDiskFetchEnabled="
            + localDiskFetchEnabled + ", " + "sharedFetchEnabled=" + sharedFetchEnabled + ", "
            + httpConnectionParams.toString() + ", maxTaskOutputAtOnce=" + maxTaskOutputAtOnce);
}

From source file:com.netflix.metacat.main.services.search.ElasticSearchMetacatRefresh.java

@SuppressWarnings("checkstyle:methodname")
private void _process(final List<QualifiedName> qNames, final Supplier<ListenableFuture<Void>> supplier,
        final String requestName, final boolean delete, final int queueSize) {
    if (isElasticSearchMetacatRefreshAlreadyRunning.compareAndSet(false, true)) {
        final TimerWrapper timer = TimerWrapper
                .createStarted("dse.metacat.timer.ElasticSearchMetacatRefresh." + requestName);
        try {//from w ww  .  j  av a 2s  .  com
            log.info("Start: Full refresh of metacat index in elastic search. Processing {} ...", qNames);
            final MetacatRequestContext context = new MetacatRequestContext("admin", "elasticSearchRefresher",
                    null, null, null);
            MetacatContextManager.setContext(context);
            refreshMarker = Instant.now();
            refreshMarkerText = refreshMarker.toString();
            service = MoreExecutors
                    .listeningDecorator(newFixedThreadPool(10, "elasticsearch-refresher-%d", queueSize));
            esService = MoreExecutors
                    .listeningDecorator(newFixedThreadPool(5, "elasticsearch-refresher-es-%d", queueSize));
            supplier.get().get(24, TimeUnit.HOURS);
            log.info("End: Full refresh of metacat index in elastic search");
            if (delete) {
                deleteUnmarkedEntities(qNames, config.getElasticSearchRefreshExcludeQualifiedNames());
            }
        } catch (Exception e) {
            log.error("Full refresh of metacat index failed", e);
            CounterWrapper.incrementCounter("dse.metacat.elasticSearchMetacatRefreshFailureCount");
        } finally {
            try {
                shutdown(service);
                shutdown(esService);
            } finally {
                isElasticSearchMetacatRefreshAlreadyRunning.set(false);
                log.info("### Time taken to complete {} is {} ms", requestName, timer.stop());
            }
        }

    } else {
        log.info("Full refresh of metacat index is already running.");
        CounterWrapper.incrementCounter("dse.metacat.elasticSearchMetacatRefreshAlreadyRunning");
    }
}