Example usage for java.util.concurrent ConcurrentMap isEmpty

List of usage examples for java.util.concurrent ConcurrentMap isEmpty

Introduction

In this page you can find the example usage for java.util.concurrent ConcurrentMap isEmpty.

Prototype

boolean isEmpty();

Source Link

Document

Returns true if this map contains no key-value mappings.

Usage

From source file:kieker.tools.bridge.cli.CLIServerMain.java

/**
 * Create a record map of classes implementing IMonitoringRecord interface out of libraries with such classes and a textual mapping file.
 *
 * @return A record map. null is never returned as a call of usage terminates the program.
 * @throws IOException/*from  w  ww  . ja va2s.co  m*/
 *             if an error occured reading the mapping file
 * @throws CLIConfigurationErrorException
 *             if a configuration error occured
 */
private static ConcurrentMap<Integer, Class<? extends IMonitoringRecord>> createRecordMap()
        throws IOException, CLIConfigurationErrorException {
    if (commandLine.hasOption("L")) {
        final String[] libraries = commandLine.getOptionValues("L");

        if (commandLine.hasOption(CMD_MAP_FILE)) {
            final ConcurrentMap<Integer, Class<? extends IMonitoringRecord>> recordMap = CLIServerMain
                    .readMapping(libraries, commandLine.getOptionValue(CMD_MAP_FILE));
            if (recordMap.isEmpty()) {
                throw new CLIConfigurationErrorException(
                        "At least one mapping must be specified in the mapping file.");
            }
            return recordMap;
        } else {
            throw new CLIConfigurationErrorException("Mapping file is required.");
        }
    } else {
        throw new CLIConfigurationErrorException("At least one library reference is required.");
    }
}

From source file:org.honeysoft.akka.actor.BusinessActorTest.java

@Test
public void shouldBeValidWhenNoOneIsNull() throws Exception {
    //GIVEN/*from   w ww  .  j ava  2 s.  co m*/
    final ConcurrentMap<String, Object> threadSafeMap = new ConcurrentHashMap<String, Object>(1);
    field("logger").ofType(Logger.class).in(businessService).postDecorateWith(new TestLogger(threadSafeMap));

    //WHEN
    String testString = "test-string";
    businessActorRef.tell(testString);

    //THEN
    Awaitility.waitAtMost(Duration.FIVE_SECONDS).until(new Callable<Boolean>() {
        @Override
        public Boolean call() throws Exception {
            return !threadSafeMap.isEmpty();
        }
    });

    Assertions.assertThat(threadSafeMap).hasSize(1);
    Assertions.assertThat(threadSafeMap.values().iterator().next()).isEqualTo(testString);
}

From source file:com.github.podd.example.ExamplePoddClient.java

public Map<Path, String> uploadToStorage(final List<Path> bagsToUpload, final String sshServerFingerprint,
        final String sshHost, final int portNo, final String username, final Path pathToPublicKey,
        final Path localRootPath, final Path remoteRootPath, final PasswordFinder keyExtractor)
        throws PoddClientException, NoSuchAlgorithmException, IOException {
    final Map<Path, String> results = new ConcurrentHashMap<>();

    final ConcurrentMap<Path, ConcurrentMap<PoddDigestUtils.Algorithm, String>> digests = PoddDigestUtils
            .getDigests(bagsToUpload);/*  ww  w.  java  2 s.com*/

    try (SSHClient sshClient = new SSHClient(ExamplePoddClient.DEFAULT_CONFIG);) {
        sshClient.useCompression();
        sshClient.addHostKeyVerifier(sshServerFingerprint);
        sshClient.connect(sshHost, portNo);
        if (!Files.exists(pathToPublicKey)) {
            throw new PoddClientException("Could not find public key: " + pathToPublicKey);
        }
        if (!SecurityUtils.isBouncyCastleRegistered()) {
            throw new PoddClientException("Bouncy castle needed");
        }
        final FileKeyProvider rsa = new PKCS8KeyFile();
        rsa.init(pathToPublicKey.toFile(), keyExtractor);
        sshClient.authPublickey(username, rsa);
        // Session session = sshClient.startSession();
        try (SFTPClient sftp = sshClient.newSFTPClient();) {
            for (final Path nextBag : bagsToUpload) {
                // Check to make sure that the bag was under the local root path
                final Path localPath = nextBag.toAbsolutePath();
                if (!localPath.startsWith(localRootPath)) {
                    this.log.error(
                            "Local bag path was not a direct descendant of the local root path: {} {} {}",
                            localRootPath, nextBag, localPath);
                    throw new PoddClientException(
                            "Local bag path was not a direct descendant of the local root path: " + localPath
                                    + " " + localRootPath);
                }

                // Take the local root path out to get the subpath to use on the remote
                final Path remoteSubPath = localPath.subpath(localRootPath.getNameCount(),
                        nextBag.getNameCount() - 1);

                this.log.info("Remote sub path: {}", remoteSubPath);

                final Path remoteDirPath = remoteRootPath.resolve(remoteSubPath);
                this.log.info("Remote dir path: {}", remoteDirPath);

                final Path remoteBagPath = remoteDirPath.resolve(nextBag.getFileName());

                this.log.info("Remote bag path: {}", remoteBagPath);

                boolean fileFound = false;
                boolean sizeCorrect = false;
                try {
                    // check details of a remote bag
                    final FileAttributes attribs = sftp.lstat(remoteBagPath.toAbsolutePath().toString());
                    final long localSize = Files.size(nextBag);
                    final long remoteSize = attribs.getSize();

                    if (localSize <= 0) {
                        this.log.error("Local bag was empty: {}", nextBag);
                        sizeCorrect = false;
                        fileFound = false;
                    } else if (remoteSize <= 0) {
                        this.log.warn("Remote bag was empty: {} {}", nextBag, attribs);
                        sizeCorrect = false;
                        fileFound = false;
                    } else if (localSize == remoteSize) {
                        this.log.info("Found file on remote already with same size as local: {} {}", nextBag,
                                remoteBagPath);
                        sizeCorrect = true;
                        fileFound = true;
                    } else {
                        sizeCorrect = false;
                        fileFound = true;
                        // We always assume that a non-zero local file is correct
                        // The bags contain time-stamps that will be modified when they are
                        // regenerated, likely changing the file-size, and hopefully changing
                        // the digest checksums
                        // throw new PoddClientException(
                        // "Could not automatically compare file sizes (need manual intervention to delete one) : "
                        // + nextBag + " " + remoteBagPath + " localSize=" + localSize
                        // + " remoteSize=" + remoteSize);
                    }
                } catch (final IOException e) {
                    // lstat() throws an IOException if the file does not exist
                    // Ignore
                    sizeCorrect = false;
                    fileFound = false;
                }

                final ConcurrentMap<Algorithm, String> bagDigests = digests.get(nextBag);
                if (bagDigests.isEmpty()) {
                    this.log.error("No bag digests were generated for bag: {}", nextBag);
                }
                for (final Entry<Algorithm, String> entry : bagDigests.entrySet()) {
                    final Path localDigestPath = localPath
                            .resolveSibling(localPath.getFileName() + entry.getKey().getExtension());
                    // Create the local digest file
                    Files.copy(
                            new ReaderInputStream(new StringReader(entry.getValue()), StandardCharsets.UTF_8),
                            localDigestPath);
                    final Path remoteDigestPath = remoteBagPath
                            .resolveSibling(remoteBagPath.getFileName() + entry.getKey().getExtension());
                    boolean nextDigestFileFound = false;
                    boolean nextDigestCorrect = false;
                    try {
                        final Path tempFile = Files.createTempFile("podd-digest-",
                                entry.getKey().getExtension());
                        final SFTPFileTransfer sftpFileTransfer = new SFTPFileTransfer(sftp.getSFTPEngine());
                        sftpFileTransfer.download(remoteBagPath.toAbsolutePath().toString(),
                                tempFile.toAbsolutePath().toString());
                        nextDigestFileFound = true;

                        final List<String> allLines = Files.readAllLines(tempFile, StandardCharsets.UTF_8);
                        if (allLines.isEmpty()) {
                            nextDigestCorrect = false;
                        } else if (allLines.size() > 1) {
                            nextDigestCorrect = false;
                        }
                        // Check if the digests match exactly
                        else if (allLines.get(0).equals(entry.getValue())) {
                            nextDigestCorrect = true;
                        } else {
                            nextDigestCorrect = false;
                        }
                    } catch (final IOException e) {
                        nextDigestFileFound = false;
                        nextDigestCorrect = false;
                    }
                    if (nextDigestFileFound && nextDigestCorrect) {
                        this.log.info(
                                "Not copying digest to remote as it exists and contains the same content as the local digest");
                    } else if (nextDigestFileFound && !nextDigestCorrect) {
                        this.log.error("Found remote digest but content was not correct: {} {}",
                                localDigestPath, remoteDigestPath);
                        sftp.rm(remoteDigestPath.toString());
                        this.log.info("Copying digest to remote: {}", remoteDigestPath);
                        sftp.put(new FileSystemFile(localDigestPath.toString()), remoteDigestPath.toString());
                    } else if (!nextDigestFileFound) {
                        this.log.info("About to make directories on remote: {}", remoteDirPath);
                        sftp.mkdirs(remoteDirPath.toString());
                        this.log.info("Copying digest to remote: {}", remoteDigestPath);
                        sftp.put(new FileSystemFile(localDigestPath.toString()), remoteDigestPath.toString());
                    }
                }

                if (fileFound && sizeCorrect) {
                    this.log.info("Not copying bag to remote as it exists and is the same size as local bag");
                } else if (fileFound && !sizeCorrect) {
                    this.log.error("Found remote bag but size was not correct: {} {}", nextBag, remoteBagPath);
                    sftp.rm(remoteBagPath.toString());
                    this.log.info("Copying bag to remote: {}", remoteBagPath);
                    sftp.put(new FileSystemFile(localPath.toString()), remoteBagPath.toString());
                } else if (!fileFound) {
                    this.log.info("About to make directories on remote: {}", remoteDirPath);
                    sftp.mkdirs(remoteDirPath.toString());
                    this.log.info("Copying bag to remote: {}", remoteBagPath);
                    sftp.put(new FileSystemFile(localPath.toString()), remoteBagPath.toString());
                }

            }
        }
    } catch (final IOException e) {
        throw new PoddClientException("Could not copy a bag to the remote location", e);
    }

    return results;
}

From source file:com.weibo.api.motan.util.StatsUtil.java

public static void logAccessStatistic(boolean clear) {
    DecimalFormat mbFormat = new DecimalFormat("#0.00");
    long currentTimeMillis = System.currentTimeMillis();

    ConcurrentMap<String, AccessStatisticResult> totalResults = new ConcurrentHashMap<String, AccessStatisticResult>();

    for (Map.Entry<String, AccessStatisticItem> entry : accessStatistics.entrySet()) {
        AccessStatisticItem item = entry.getValue();

        AccessStatisticResult result = item.getStatisticResult(currentTimeMillis,
                MotanConstants.STATISTIC_PEROID);

        if (clear) {
            item.clearStatistic(currentTimeMillis, MotanConstants.STATISTIC_PEROID);
        }/*  w  w  w .  jav  a  2 s.com*/

        String key = entry.getKey();
        String[] keys = key.split(SEPARATE);
        if (keys.length != 3) {
            continue;
        }
        String application = keys[1];
        String module = keys[2];
        key = application + "|" + module;
        AccessStatisticResult appResult = totalResults.get(key);
        if (appResult == null) {
            totalResults.putIfAbsent(key, new AccessStatisticResult());
            appResult = totalResults.get(key);
        }

        appResult.totalCount += result.totalCount;
        appResult.bizExceptionCount += result.bizExceptionCount;
        appResult.slowCount += result.slowCount;
        appResult.costTime += result.costTime;
        appResult.bizTime += result.bizTime;
        appResult.otherExceptionCount += result.otherExceptionCount;

        Snapshot snapshot = InternalMetricsFactory.getRegistryInstance(entry.getKey()).histogram(HISTOGRAM_NAME)
                .getSnapshot();

        if (application.equals(APPLICATION_STATISTIC)) {
            continue;
        }
        if (result.totalCount == 0) {
            LoggerUtil.accessStatsLog("[motan-accessStatistic] app: " + application + " module: " + module
                    + " item: " + keys[0]
                    + " total_count: 0 slow_count: 0 biz_excp: 0 other_excp: 0 avg_time: 0.00ms biz_time: 0.00ms avg_tps: 0 max_tps: 0 min_tps: 0");
        } else {
            LoggerUtil.accessStatsLog(
                    "[motan-accessStatistic] app: {} module: {} item: {} total_count: {} slow_count: {} p75: {} p95: {} p98: {} p99: {} p999: {} biz_excp: {} other_excp: {} avg_time: {}ms biz_time: {}ms avg_tps: {} max_tps: {} min_tps: {} ",
                    application, module, keys[0], result.totalCount, result.slowCount,
                    mbFormat.format(snapshot.get75thPercentile()),
                    mbFormat.format(snapshot.get95thPercentile()),
                    mbFormat.format(snapshot.get98thPercentile()),
                    mbFormat.format(snapshot.get99thPercentile()),
                    mbFormat.format(snapshot.get999thPercentile()), result.bizExceptionCount,
                    result.otherExceptionCount, mbFormat.format(result.costTime / result.totalCount),
                    mbFormat.format(result.bizTime / result.totalCount),
                    (result.totalCount / MotanConstants.STATISTIC_PEROID), result.maxCount, result.minCount);
        }

    }

    if (!totalResults.isEmpty()) {
        for (Map.Entry<String, AccessStatisticResult> entry : totalResults.entrySet()) {
            String application = entry.getKey().split(SEPARATE)[0];
            String module = entry.getKey().split(SEPARATE)[1];
            AccessStatisticResult totalResult = entry.getValue();
            Snapshot snapshot = InternalMetricsFactory.getRegistryInstance(entry.getKey())
                    .histogram(HISTOGRAM_NAME).getSnapshot();
            if (totalResult.totalCount > 0) {
                LoggerUtil.accessStatsLog(
                        "[motan-totalAccessStatistic] app: {} module: {} total_count: {} slow_count: {} p75: {} p95: {} p98: {} p99: {} p999: {} biz_excp: {} other_excp: {} avg_time: {}ms biz_time: {}ms avg_tps: {}",
                        application, module, totalResult.totalCount, totalResult.slowCount,
                        mbFormat.format(snapshot.get75thPercentile()),
                        mbFormat.format(snapshot.get95thPercentile()),
                        mbFormat.format(snapshot.get98thPercentile()),
                        mbFormat.format(snapshot.get99thPercentile()),
                        mbFormat.format(snapshot.get999thPercentile()), totalResult.bizExceptionCount,
                        totalResult.otherExceptionCount,
                        mbFormat.format(totalResult.costTime / totalResult.totalCount),
                        mbFormat.format(totalResult.bizTime / totalResult.totalCount),
                        (totalResult.totalCount / MotanConstants.STATISTIC_PEROID));
            } else {
                LoggerUtil.accessStatsLog("[motan-totalAccessStatistic] app: " + application + " module: "
                        + module
                        + " total_count: 0 slow_count: 0 biz_excp: 0 other_excp: 0 avg_time: 0.00ms biz_time: 0.00ms avg_tps: 0");
            }

        }
    } else {
        LoggerUtil.accessStatsLog("[motan-totalAccessStatistic] app: " + URLParamType.application.getValue()
                + " module: " + URLParamType.module.getValue()
                + " total_count: 0 slow_count: 0 biz_excp: 0 other_excp: 0 avg_time: 0.00ms biz_time: 0.00ms avg_tps: 0");
    }

}

From source file:com.github.podd.example.ExamplePoddClient.java

/**
 * Parses the mapping of line numbers to the line names used to identify lines in the
 * randomisation process.//from   w  w w  . j  a v  a  2 s. com
 * 
 * @param in
 *            An {@link InputStream} containing the CSV file with the mapping of line numbers to
 *            line names
 * @return A map from line numbers to line names.
 * @throws IOException
 *             If there is an {@link IOException}.
 * @throws PoddClientException
 *             If there is a problem communicating with the PODD server.
 */
public ConcurrentMap<String, String> processLineNameMappingList(final InputStream in)
        throws IOException, PoddClientException {
    // -----------------------------------------------------------------------------------------
    // Now process the CSV file line by line using the caches to reduce multiple queries to the
    // server where possible
    // -----------------------------------------------------------------------------------------

    List<String> headers = null;
    final ConcurrentMap<String, String> result = new ConcurrentHashMap<>();
    // Supressing try-with-resources warning generated erroneously by Eclipse:
    // https://bugs.eclipse.org/bugs/show_bug.cgi?id=371614
    try (@SuppressWarnings("resource")
    final InputStreamReader inputStreamReader = new InputStreamReader(in, StandardCharsets.UTF_8);
            final CSVReader reader = new CSVReader(inputStreamReader);) {
        String[] nextLine;
        while ((nextLine = reader.readNext()) != null) {
            if (headers == null) {
                // header line is mandatory in PODD CSV
                headers = Arrays.asList(nextLine);
                try {
                    if (headers.size() != 2) {
                        throw new IllegalArgumentException("Did not find required number of headers");
                    }

                    if (!headers.get(0).equals(ExampleLineMappingConstants.RAND_LINE_NUMBER)) {
                        throw new IllegalArgumentException(
                                "Missing " + ExampleLineMappingConstants.RAND_LINE_NUMBER + " header");
                    }

                    if (!headers.get(1).equals(ExampleLineMappingConstants.RAND_CLIENT_LINE_NAME)) {
                        throw new IllegalArgumentException(
                                "Missing " + ExampleLineMappingConstants.RAND_CLIENT_LINE_NAME + " header");
                    }
                } catch (final IllegalArgumentException e) {
                    this.log.error("Could not verify headers for line name mappings file: {}", e.getMessage());
                    throw new PoddClientException("Could not verify headers for line name mappings file", e);
                }
            } else {
                if (nextLine.length != headers.size()) {
                    this.log.error("Line and header sizes were different: {} {}", headers, nextLine);
                }

                final String putIfAbsent = result.putIfAbsent(nextLine[0], nextLine[1]);
                if (putIfAbsent != null) {
                    this.log.error(
                            "Found multiple mappings for line name and number: linenumber={} duplicate={} original={}",
                            nextLine[0], nextLine[1], putIfAbsent);
                }
            }
        }
    }

    if (headers == null) {
        this.log.error("Document did not contain a valid header line");
    }

    if (result.isEmpty()) {
        this.log.error("Document did not contain any valid rows");
    }

    return result;
}

From source file:org.dkpro.lab.engine.impl.MultiThreadBatchTaskEngine.java

@Override
protected void executeConfiguration(BatchTask aConfiguration, TaskContext aContext, Map<String, Object> aConfig,
        Set<String> aExecutedSubtasks) throws ExecutionException, LifeCycleException {
    if (log.isTraceEnabled()) {
        // Show all subtasks executed so far
        for (String est : aExecutedSubtasks) {
            log.trace("-- Already executed: " + est);
        }//w w w. j a  v a 2 s  .c o m
    }

    // Set up initial scope used by sub-batch-tasks using the inherited scope. The scope is
    // extended as the subtasks of this batch are executed with the present configuration.
    // FIXME: That means that sub-batch-tasks in two different configurations cannot see
    // each other. Is that intended? Mind that the "executedSubtasks" set is intentionally
    // maintained *across* configurations, so maybe the scope should also be maintained
    // *across* configurations? - REC 2014-06-15
    Set<String> scope = new HashSet<>();
    if (aConfiguration.getScope() != null) {
        scope.addAll(aConfiguration.getScope());
    }

    // Configure subtasks
    for (Task task : aConfiguration.getTasks()) {
        // Now the setup is complete
        aContext.getLifeCycleManager().configure(aContext, task, aConfig);
    }

    Queue<Task> queue = new LinkedList<>(aConfiguration.getTasks());
    // keeps track of the execution threads; 
    // TODO MW: do we really need this or can we work with the futures list only?
    Map<Task, ExecutionThread> threads = new HashMap<>();
    // keeps track of submitted Futures and their associated tasks
    Map<Future<?>, Task> futures = new HashMap<Future<?>, Task>();
    // will be instantiated with all exceptions from current loop
    ConcurrentMap<Task, Throwable> exceptionsFromLastLoop = null;
    ConcurrentMap<Task, Throwable> exceptionsFromCurrentLoop = new ConcurrentHashMap<>();

    int outerLoopCounter = 0;

    // main loop
    do {
        outerLoopCounter++;

        threads.clear();
        futures.clear();
        ExecutorService executor = Executors.newFixedThreadPool(maxThreads);

        // set the exceptions from the last loop
        exceptionsFromLastLoop = new ConcurrentHashMap<>(exceptionsFromCurrentLoop);

        // Fix MW: Clear exceptionsFromCurrentLoop; otherwise the loop with run at most twice.
        exceptionsFromCurrentLoop.clear();

        // process all tasks from the queue
        while (!queue.isEmpty()) {
            Task task = queue.poll();

            TaskContextMetadata execution = getExistingExecution(aConfiguration, aContext, task, aConfig,
                    aExecutedSubtasks);

            // Check if a subtask execution compatible with the present configuration has
            // does already exist ...
            if (execution == null) {
                // ... otherwise execute it with the present configuration
                log.info("Executing task [" + task.getType() + "]");

                // set scope here so that the inherited scopes are considered
                if (task instanceof BatchTask) {
                    ((BatchTask) task).setScope(scope);
                }

                ExecutionThread thread = new ExecutionThread(aContext, task, aConfig, aExecutedSubtasks);
                threads.put(task, thread);

                futures.put(executor.submit(thread), task);
            } else {
                log.debug("Using existing execution [" + execution.getId() + "]");

                // Record new/existing execution
                aExecutedSubtasks.add(execution.getId());
                scope.add(execution.getId());
            }
        }

        // try and get results from all futures to check for failed executions
        for (Map.Entry<Future<?>, Task> entry : futures.entrySet()) {
            try {
                entry.getKey().get();
            } catch (java.util.concurrent.ExecutionException ex) {
                Task task = entry.getValue();
                // TODO MW: add a retry-counter here to prevent endless loops?
                log.info("Task exec failed for [" + task.getType() + "]");
                // record the failed task, so that it can be re-added to the queue
                exceptionsFromCurrentLoop.put(task, ex);
            } catch (InterruptedException ex) {
                // thread interrupted, exit
                throw new RuntimeException(ex);
            }
        }

        log.debug("Calling shutdown");
        executor.shutdown();
        log.debug("All threads finished");

        // collect the results
        for (Map.Entry<Task, ExecutionThread> entry : threads.entrySet()) {
            Task task = entry.getKey();
            ExecutionThread thread = entry.getValue();
            TaskContextMetadata execution = thread.getTaskContextMetadata();

            // probably failed
            if (execution == null) {
                Throwable exception = exceptionsFromCurrentLoop.get(task);
                if (!(exception instanceof UnresolvedImportException)
                        && !(exception instanceof java.util.concurrent.ExecutionException)) {
                    throw new RuntimeException(exception);
                }
                exceptionsFromCurrentLoop.put(task, exception);

                // re-add to the queue
                queue.add(task);
            } else {

                // Record new/existing execution
                aExecutedSubtasks.add(execution.getId());
                scope.add(execution.getId());
            }
        }

    }
    // finish if the same tasks failed again
    while (!exceptionsFromCurrentLoop.keySet().equals(exceptionsFromLastLoop.keySet()));
    // END OF DO; finish if the same tasks failed again

    if (!exceptionsFromCurrentLoop.isEmpty()) {
        // collect all details
        StringBuilder details = new StringBuilder();
        for (Throwable throwable : exceptionsFromCurrentLoop.values()) {
            details.append("\n -");
            details.append(throwable.getMessage());
        }

        // we re-throw the first exception
        Throwable next = exceptionsFromCurrentLoop.values().iterator().next();
        if (next instanceof RuntimeException) {
            throw (RuntimeException) next;
        }

        // otherwise wrap it
        throw new RuntimeException(details.toString(), next);
    }
    log.info("MultiThreadBatchTask completed successfully. Total number of outer loop runs: "
            + outerLoopCounter);
}

From source file:com.github.podd.example.ExamplePoddClient.java

/**
 * Parses the given TrayScan project/experiment/tray/pot list and inserts the items into PODD
 * where they do not exist.//from  ww  w. j  a va 2s .c o m
 * 
 * TODO: Should this process create new projects where they do not already exist? Ideally they
 * should be created and roles assigned before this process, but could be fine to do that in
 * here
 */
public ConcurrentMap<InferredOWLOntologyID, Model> processTrayScanList(final InputStream in)
        throws IOException, PoddClientException, OpenRDFException, SQLException {
    // Keep a queue so that we only need to update each project once for
    // this operation to succeed
    final ConcurrentMap<InferredOWLOntologyID, Model> uploadQueue = new ConcurrentHashMap<>();

    // Map starting at project name strings and ending with both the URI of
    // the project and the artifact
    final ConcurrentMap<String, ConcurrentMap<URI, InferredOWLOntologyID>> projectUriMap = new ConcurrentHashMap<>();

    // Map starting at experiment name strings and ending with a mapping from the URI of
    // the experiment to the URI of the project that contains the experiment
    // TODO: This could be converted to not be prefilled in future, but currently it contains
    // all experiments in all unpublished projects in PODD that are accessible to the current
    // user
    final ConcurrentMap<String, ConcurrentMap<URI, URI>> experimentUriMap = new ConcurrentHashMap<>();

    // Material mappings, starting at the URI of the experiment and mapping to the URI of the
    // material and the RDF Model containing the statements describing this material
    final ConcurrentMap<URI, ConcurrentMap<URI, Model>> materialUriMap = new ConcurrentHashMap<>();

    // Genotype mappings, starting at the URI of the project and mapping to the URI of the
    // genotype and the RDF Model containing the statements describing this genotype
    final ConcurrentMap<URI, ConcurrentMap<URI, Model>> genotypeUriMap = new ConcurrentHashMap<>();

    // Cache for tray name mappings, starting at tray barcodes and ending with a mapping from
    // the URI of the tray to the URI of the experiment that contains the tray
    // NOTE: This is not prefilled, as it is populated on demand during processing of lines to
    // only contain the necessary elements
    final ConcurrentMap<String, ConcurrentMap<URI, URI>> trayUriMap = new ConcurrentHashMap<>();

    // Cache for pot name mappings, starting at pot barcodes and ending with a mapping from
    // the URI of the pot to the URI of the tray that contains the pot
    // NOTE: This is not prefilled, as it is populated on demand during processing of lines to
    // only contain the necessary elements
    final ConcurrentMap<String, ConcurrentMap<URI, URI>> potUriMap = new ConcurrentHashMap<>();

    // -----------------------------------------------------------------------------------------
    // Now cache URIs for projects, experiments, and genotypes for all unpublished projects that
    // the current user can access
    // -----------------------------------------------------------------------------------------

    // Only select the unpublished artifacts, as we cannot edit published artifacts
    final Model currentUnpublishedArtifacts = this.listArtifacts(false, true);

    // Map known project names to their URIs, as the URIs are needed to
    // create statements internally
    this.populateProjectUriMap(currentUnpublishedArtifacts, projectUriMap);

    this.populateExperimentUriMap(projectUriMap, experimentUriMap);

    this.populateGenotypeUriMap(projectUriMap, genotypeUriMap);

    // -----------------------------------------------------------------------------------------
    // Now process the CSV file line by line using the caches to reduce multiple queries to the
    // server where possible
    // -----------------------------------------------------------------------------------------

    List<String> headers = null;
    // Supressing try-with-resources warning generated erroneously by Eclipse:
    // https://bugs.eclipse.org/bugs/show_bug.cgi?id=371614
    try (@SuppressWarnings("resource")
    final InputStreamReader inputStreamReader = new InputStreamReader(in, StandardCharsets.UTF_8);
            final CSVReader reader = new CSVReader(inputStreamReader);) {
        String[] nextLine;
        while ((nextLine = reader.readNext()) != null) {
            if (headers == null) {
                // header line is mandatory in PODD CSV
                headers = Arrays.asList(nextLine);
                try {
                    this.verifyTrayScanListHeaders(headers);
                } catch (final IllegalArgumentException e) {
                    this.log.error("Could not verify headers for project list: {}", e.getMessage());
                    throw new PoddClientException("Could not verify headers for project list", e);
                }
            } else {
                if (nextLine.length != headers.size()) {
                    this.log.error("Line and header sizes were different: {} {}", headers, nextLine);
                }

                // Process the next line and add it to the upload queue
                this.processTrayScanLine(headers, Arrays.asList(nextLine), projectUriMap, experimentUriMap,
                        trayUriMap, potUriMap, materialUriMap, genotypeUriMap, uploadQueue);
            }
        }
    }

    if (headers == null) {
        this.log.error("Document did not contain a valid header line");
    }

    if (uploadQueue.isEmpty()) {
        this.log.error("Document did not contain any valid rows");
    }

    return uploadQueue;
}

From source file:org.apache.hadoop.hbase.client.AsyncBatchRpcRetryingCaller.java

private void groupAndSend(Stream<Action> actions, int tries) {
    long locateTimeoutNs;
    if (operationTimeoutNs > 0) {
        locateTimeoutNs = remainingTimeNs();
        if (locateTimeoutNs <= 0) {
            failAll(actions, tries);/*w  w w.  j a v  a2s  . c  o m*/
            return;
        }
    } else {
        locateTimeoutNs = -1L;
    }
    ConcurrentMap<ServerName, ServerRequest> actionsByServer = new ConcurrentHashMap<>();
    ConcurrentLinkedQueue<Action> locateFailed = new ConcurrentLinkedQueue<>();
    CompletableFuture.allOf(
            actions.map(action -> conn.getLocator().getRegionLocation(tableName, action.getAction().getRow(),
                    RegionLocateType.CURRENT, locateTimeoutNs).whenComplete((loc, error) -> {
                        if (error != null) {
                            error = translateException(error);
                            if (error instanceof DoNotRetryIOException) {
                                failOne(action, tries, error, EnvironmentEdgeManager.currentTime(), "");
                                return;
                            }
                            addError(action, error, null);
                            locateFailed.add(action);
                        } else {
                            computeIfAbsent(actionsByServer, loc.getServerName(), ServerRequest::new)
                                    .addAction(loc, action);
                        }
                    })).toArray(CompletableFuture[]::new))
            .whenComplete((v, r) -> {
                if (!actionsByServer.isEmpty()) {
                    send(actionsByServer, tries);
                }
                if (!locateFailed.isEmpty()) {
                    tryResubmit(locateFailed.stream(), tries);
                }
            });
}

From source file:org.apache.hadoop.yarn.server.resourcemanager.ResourceTrackerService.java

@SuppressWarnings("unchecked")
@Override/*from ww  w .jav a  2 s  . co  m*/
public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request) throws YarnException, IOException {

    NodeStatus remoteNodeStatus = request.getNodeStatus();
    /**
     * Here is the node heartbeat sequence...
     * 1. Check if it's a valid (i.e. not excluded) node
     * 2. Check if it's a registered node
     * 3. Check if it's a 'fresh' heartbeat i.e. not duplicate heartbeat
     * 4. Send healthStatus to RMNode
     * 5. Update node's labels if distributed Node Labels configuration is enabled
     */

    NodeId nodeId = remoteNodeStatus.getNodeId();

    // 1. Check if it's a valid (i.e. not excluded) node, if not, see if it is
    // in decommissioning.
    if (!this.nodesListManager.isValidNode(nodeId.getHost()) && !isNodeInDecommissioning(nodeId)) {
        String message = "Disallowed NodeManager nodeId: " + nodeId + " hostname: " + nodeId.getHost();
        LOG.info(message);
        return YarnServerBuilderUtils.newNodeHeartbeatResponse(NodeAction.SHUTDOWN, message);
    }

    // 2. Check if it's a registered node
    RMNode rmNode = this.rmContext.getRMNodes().get(nodeId);
    if (rmNode == null) {
        /* node does not exist */
        String message = "Node not found resyncing " + remoteNodeStatus.getNodeId();
        LOG.info(message);
        return YarnServerBuilderUtils.newNodeHeartbeatResponse(NodeAction.RESYNC, message);
    }

    // Send ping
    this.nmLivelinessMonitor.receivedPing(nodeId);

    if (isHopsTLSEnabled()) {
        Set<ApplicationId> updatedApps = request.getUpdatedApplicationsWithNewCryptoMaterial();
        if (updatedApps != null) {
            for (ApplicationId appId : updatedApps) {
                rmNode.getAppX509ToUpdate().remove(appId);
                RMApp rmApp = rmContext.getRMApps().get(appId);
                rmApp.rmNodeHasUpdatedCryptoMaterial(rmNode.getNodeID());
            }
        }
    }

    // 3. Check if it's a 'fresh' heartbeat i.e. not duplicate heartbeat
    NodeHeartbeatResponse lastNodeHeartbeatResponse = rmNode.getLastNodeHeartBeatResponse();
    if (remoteNodeStatus.getResponseId() + 1 == lastNodeHeartbeatResponse.getResponseId()) {
        LOG.info("Received duplicate heartbeat from node " + rmNode.getNodeAddress() + " responseId="
                + remoteNodeStatus.getResponseId());
        return lastNodeHeartbeatResponse;
    } else if (remoteNodeStatus.getResponseId() + 1 < lastNodeHeartbeatResponse.getResponseId()) {
        String message = "Too far behind rm response id:" + lastNodeHeartbeatResponse.getResponseId()
                + " nm response id:" + remoteNodeStatus.getResponseId();
        LOG.info(message);
        // TODO: Just sending reboot is not enough. Think more.
        this.rmContext.getDispatcher().getEventHandler()
                .handle(new RMNodeEvent(nodeId, RMNodeEventType.REBOOTING));
        return YarnServerBuilderUtils.newNodeHeartbeatResponse(NodeAction.RESYNC, message);
    }

    // Heartbeat response
    NodeHeartbeatResponse nodeHeartBeatResponse = YarnServerBuilderUtils.newNodeHeartbeatResponse(
            lastNodeHeartbeatResponse.getResponseId() + 1, NodeAction.NORMAL, null, null, null, null,
            nextHeartBeatInterval);
    rmNode.updateNodeHeartbeatResponseForCleanup(nodeHeartBeatResponse);
    rmNode.updateNodeHeartbeatResponseForContainersDecreasing(nodeHeartBeatResponse);

    populateKeys(request, nodeHeartBeatResponse);
    if (isHopsTLSEnabled() || isJWTEnabled()) {
        Map<ApplicationId, UpdatedCryptoForApp> mergedUpdates = mergeNewSecurityMaterialForApps(rmNode);
        nodeHeartBeatResponse.setUpdatedCryptoForApps(mergedUpdates);
    }

    ConcurrentMap<ApplicationId, ByteBuffer> systemCredentials = rmContext.getSystemCredentialsForApps();
    if (!systemCredentials.isEmpty()) {
        nodeHeartBeatResponse.setSystemCredentialsForApps(systemCredentials);
    }

    nodeHeartBeatResponse.setNextheartbeat(((RMNodeImpl) rmNode).getNextHeartbeat());
    // 4. Send status to RMNode, saving the latest response.
    RMNodeStatusEvent nodeStatusEvent = new RMNodeStatusEvent(nodeId, remoteNodeStatus, nodeHeartBeatResponse);
    if (request.getLogAggregationReportsForApps() != null
            && !request.getLogAggregationReportsForApps().isEmpty()) {
        nodeStatusEvent.setLogAggregationReportsForApps(request.getLogAggregationReportsForApps());
    }
    this.rmContext.getDispatcher().getEventHandler().handle(nodeStatusEvent);

    // 5. Update node's labels to RM's NodeLabelManager.
    if (isDistributedNodeLabelsConf && request.getNodeLabels() != null) {
        try {
            updateNodeLabelsFromNMReport(NodeLabelsUtils.convertToStringSet(request.getNodeLabels()), nodeId);
            nodeHeartBeatResponse.setAreNodeLabelsAcceptedByRM(true);
        } catch (IOException ex) {
            //ensure the error message is captured and sent across in response
            nodeHeartBeatResponse.setDiagnosticsMessage(ex.getMessage());
            nodeHeartBeatResponse.setAreNodeLabelsAcceptedByRM(false);
        }
    }

    // 6. check if node's capacity is load from dynamic-resources.xml
    // if so, send updated resource back to NM.
    String nid = nodeId.toString();
    Resource capability = loadNodeResourceFromDRConfiguration(nid);
    // sync back with new resource if not null.
    if (capability != null) {
        nodeHeartBeatResponse.setResource(capability);
    }

    return nodeHeartBeatResponse;
}

From source file:org.apereo.portal.io.xml.JaxbPortalDataHandlerService.java

@Override
public void importDataDirectory(File directory, String pattern, final BatchImportOptions options) {
    if (!directory.exists()) {
        throw new IllegalArgumentException("The specified directory '" + directory + "' does not exist");
    }/* www. ja  va  2 s. c  o m*/

    //Create the file filter to use when searching for files to import
    final FileFilter fileFilter;
    if (pattern != null) {
        fileFilter = new AntPatternFileFilter(true, false, pattern, this.dataFileExcludes);
    } else {
        fileFilter = new AntPatternFileFilter(true, false, this.dataFileIncludes, this.dataFileExcludes);
    }

    //Determine the parent directory to log to
    final File logDirectory = determineLogDirectory(options, "import");

    //Setup reporting file
    final File importReport = new File(logDirectory, "data-import.txt");
    final PrintWriter reportWriter;
    try {
        reportWriter = new PrintWriter(new PeriodicFlushingBufferedWriter(500, new FileWriter(importReport)));
    } catch (IOException e) {
        throw new RuntimeException("Failed to create FileWriter for: " + importReport, e);
    }

    //Convert directory to URI String to provide better logging output
    final URI directoryUri = directory.toURI();
    final String directoryUriStr = directoryUri.toString();
    IMPORT_BASE_DIR.set(directoryUriStr);
    try {
        //Scan the specified directory for files to import
        logger.info("Scanning for files to Import from: {}", directory);
        final PortalDataKeyFileProcessor fileProcessor = new PortalDataKeyFileProcessor(this.dataKeyTypes,
                options);
        this.directoryScanner.scanDirectoryNoResults(directory, fileFilter, fileProcessor);
        final long resourceCount = fileProcessor.getResourceCount();
        logger.info("Found {} files to Import from: {}", resourceCount, directory);

        //See if the import should fail on error
        final boolean failOnError = options != null ? options.isFailOnError() : true;

        //Map of files to import, grouped by type
        final ConcurrentMap<PortalDataKey, Queue<Resource>> dataToImport = fileProcessor.getDataToImport();

        //Import the data files
        for (final PortalDataKey portalDataKey : this.dataKeyImportOrder) {
            final Queue<Resource> files = dataToImport.remove(portalDataKey);
            if (files == null) {
                continue;
            }

            final Queue<ImportFuture<?>> importFutures = new LinkedList<ImportFuture<?>>();
            final List<FutureHolder<?>> failedFutures = new LinkedList<FutureHolder<?>>();

            final int fileCount = files.size();
            logger.info("Importing {} files of type {}", fileCount, portalDataKey);
            reportWriter.println(portalDataKey + "," + fileCount);

            while (!files.isEmpty()) {
                final Resource file = files.poll();

                //Check for completed futures on every iteration, needed to fail as fast as possible on an import exception
                final List<FutureHolder<?>> newFailed = waitForFutures(importFutures, reportWriter,
                        logDirectory, false);
                failedFutures.addAll(newFailed);

                final AtomicLong importTime = new AtomicLong(-1);

                //Create import task
                final Callable<Object> task = new CallableWithoutResult() {
                    @Override
                    protected void callWithoutResult() {
                        IMPORT_BASE_DIR.set(directoryUriStr);
                        importTime.set(System.nanoTime());
                        try {
                            importData(file, portalDataKey);
                        } finally {
                            importTime.set(System.nanoTime() - importTime.get());
                            IMPORT_BASE_DIR.remove();
                        }
                    }
                };

                //Submit the import task
                final Future<?> importFuture = this.importExportThreadPool.submit(task);

                //Add the future for tracking
                importFutures.offer(new ImportFuture(importFuture, file, portalDataKey, importTime));
            }

            //Wait for all of the imports on of this type to complete
            final List<FutureHolder<?>> newFailed = waitForFutures(importFutures, reportWriter, logDirectory,
                    true);
            failedFutures.addAll(newFailed);

            if (failOnError && !failedFutures.isEmpty()) {
                throw new RuntimeException(
                        failedFutures.size() + " " + portalDataKey + " entities failed to import.\n\n"
                                + "\tPer entity exception logs and a full report can be found in "
                                + logDirectory + "\n");
            }

            reportWriter.flush();
        }

        if (!dataToImport.isEmpty()) {
            throw new IllegalStateException(
                    "The following PortalDataKeys are not listed in the dataTypeImportOrder List: "
                            + dataToImport.keySet());
        }

        logger.info("For a detailed report on the data import see " + importReport);
    } catch (InterruptedException e) {
        throw new RuntimeException("Interrupted while waiting for entities to import", e);
    } finally {
        IOUtils.closeQuietly(reportWriter);
        IMPORT_BASE_DIR.remove();
    }
}