Example usage for java.util.concurrent ConcurrentLinkedQueue ConcurrentLinkedQueue

List of usage examples for java.util.concurrent ConcurrentLinkedQueue ConcurrentLinkedQueue

Introduction

In this page you can find the example usage for java.util.concurrent ConcurrentLinkedQueue ConcurrentLinkedQueue.

Prototype

public ConcurrentLinkedQueue() 

Source Link

Document

Creates a ConcurrentLinkedQueue that is initially empty.

Usage

From source file:spade.storage.Neo4j.java

public static void index(String dbpath, boolean printProgress) {

    int totalThreads = Runtime.getRuntime().availableProcessors();
    final ConcurrentLinkedQueue<Node> nodeTaskQueue = new ConcurrentLinkedQueue<Node>();
    final ConcurrentLinkedQueue<Relationship> edgeTaskQueue = new ConcurrentLinkedQueue<Relationship>();
    final ReentrantReadWriteLock nodeRwlock = new ReentrantReadWriteLock();
    final ReentrantReadWriteLock edgeRwlock = new ReentrantReadWriteLock();
    final Index<Node> vertexIndex;
    final RelationshipIndex edgeIndex;
    System.out.println("Loading database...");
    File databaseFile = new File(dbpath);
    final GraphDatabaseService graphDb = new GraphDatabaseFactory().newEmbeddedDatabaseBuilder(databaseFile)
            .setConfig(GraphDatabaseSettings.pagecache_memory,
                    "" + (Runtime.getRuntime().totalMemory() * 9) / 10)
            // .setConfig(GraphDatabaseSettings.keep_logical_logs, "false")
            .newGraphDatabase();//  w w  w.  j ava2  s .co  m

    System.out.println("Loaded");
    // clear already present indexes
    try (Transaction tx = graphDb.beginTx()) {
        graphDb.index().forNodes(spade.storage.Neo4j.VERTEX_INDEX).delete();
        tx.success();
    }

    try (Transaction tx = graphDb.beginTx()) {
        graphDb.index().forRelationships(spade.storage.Neo4j.EDGE_INDEX).delete();
        tx.success();
    }
    //

    System.out.println("Creating Indexing discriptors...");

    try (Transaction tx = graphDb.beginTx()) {
        vertexIndex = graphDb.index().forNodes(spade.storage.Neo4j.VERTEX_INDEX);
        tx.success();
    }

    try (Transaction tx = graphDb.beginTx()) {
        edgeIndex = graphDb.index().forRelationships(spade.storage.Neo4j.EDGE_INDEX);
        tx.success();
    }

    System.out.println("Created");

    class NodeIndexer implements Runnable {

        public void run() {

            Transaction tx = graphDb.beginTx();
            int counter = 0;
            try {
                while (!Thread.currentThread().isInterrupted()) {

                    if (counter < 10000) {
                        Node node = nodeTaskQueue.poll();
                        if (node == null) {
                            continue;
                        }

                        for (String key : node.getPropertyKeys()) {
                            vertexIndex.add(node, key, (String) node.getProperty(key));
                        }
                        node.setProperty(ID_STRING, node.getId());
                        vertexIndex.add(node, ID_STRING, Long.toString(node.getId()));

                        counter++;
                    }

                    if (counter > 1000 && nodeRwlock.writeLock().tryLock()) {
                        tx.success();
                        tx.close();
                        tx = graphDb.beginTx();
                        nodeRwlock.writeLock().unlock();
                        counter = 0;
                    }

                }

            } finally {
                // tx.success();
                tx.close();
                if (nodeRwlock.writeLock().isHeldByCurrentThread()) {
                    nodeRwlock.writeLock().unlock();
                }
            }
        }
    }

    class RelationshipIndexer implements Runnable {

        public void run() {

            Transaction tx = graphDb.beginTx();
            int counter = 0;
            try {
                while (!Thread.currentThread().isInterrupted()) {

                    if (counter < 10000) {
                        Relationship relationship = edgeTaskQueue.poll();
                        if (relationship == null) {
                            continue;
                        }

                        for (String key : relationship.getPropertyKeys()) {
                            edgeIndex.add(relationship, key, (String) relationship.getProperty(key));
                        }
                        relationship.setProperty(ID_STRING, relationship.getId());
                        edgeIndex.add(relationship, ID_STRING, Long.toString(relationship.getId()));

                        counter++;
                    }

                    if (counter > 1000 && edgeRwlock.writeLock().tryLock()) {
                        // tx.success();
                        tx.close();
                        tx = graphDb.beginTx();
                        edgeRwlock.writeLock().unlock();
                        counter = 0;
                    }

                }

            } finally {
                // tx.success();
                tx.close();
                if (edgeRwlock.writeLock().isHeldByCurrentThread()) {
                    edgeRwlock.writeLock().unlock();
                }
            }

        }
    }

    ArrayList<Thread> nodeWorkers = new ArrayList<Thread>();
    for (int i = 0; i < totalThreads / 2; i++) {
        Thread th = new Thread(new NodeIndexer());
        nodeWorkers.add(th);
        th.start();
    }

    ArrayList<Thread> edgeWorkers = new ArrayList<Thread>();
    for (int i = 0; i < totalThreads / 2; i++) {
        Thread th = new Thread(new RelationshipIndexer());
        edgeWorkers.add(th);
        th.start();
    }

    System.out.println("Counted Nodes and Relationships to index...");
    final long total;

    try (Transaction tx = graphDb.beginTx()) {
        total = Iterators.count(graphDb.getAllNodes().iterator())
                + Iterators.count(graphDb.getAllRelationships().iterator());
        tx.success();
    }
    System.out.println("done.\n");

    long percentageCompleted = 0;
    int count = 0;

    try (Transaction tx = graphDb.beginTx()) {

        // index nodes
        Iterator<Node> nodeIterator = graphDb.getAllNodes().iterator();
        Iterator<Relationship> edgeIterator = graphDb.getAllRelationships().iterator();

        while (edgeIterator.hasNext() || nodeIterator.hasNext()) {

            if (nodeIterator.hasNext() && nodeTaskQueue.size() < 10000) {
                nodeTaskQueue.add(nodeIterator.next());
                count = count + 1;
            }

            if (edgeIterator.hasNext() && edgeTaskQueue.size() < 10000) {
                edgeTaskQueue.add(edgeIterator.next());
                count = count + 1;
            }

            if (printProgress) {

                if (((count * 100) / total) > percentageCompleted) {
                    Runtime rt = Runtime.getRuntime();
                    long totalMemory = rt.totalMemory() / 1024 / 1024;
                    long freeMemory = rt.freeMemory() / 1024 / 1024;
                    long usedMemory = totalMemory - freeMemory;
                    System.out.print("| Cores: " + rt.availableProcessors() + " | Threads: " + totalThreads
                            + " | Heap (MB) - total: " + totalMemory + " , " + (freeMemory * 100) / totalMemory
                            + "% free"
                            // + " | Total Objects (nodes + relationships) to Index: " + total
                            + " | Indexing Object (nodes + relationships): " + count + " / " + total
                            + " | Completed: " + percentageCompleted + " %" + " |\r");
                }

                percentageCompleted = (count * 100) / total;
            }

        }

        tx.success();
    }

    System.out.println("\n\nIndexing completed. Waiting for queues to clear...");

    try {
        while (nodeTaskQueue.size() != 0 || edgeTaskQueue.size() != 0) {
            Thread.sleep(1000);
        }
    } catch (InterruptedException exception) {

    }

    System.out.println("Queues cleared. Threads teardown started...");

    for (int i = 0; i < totalThreads / 2; i++) {
        nodeWorkers.get(i).interrupt();
        try {
            nodeWorkers.get(i).join();
        } catch (InterruptedException exception) {

        }
    }

    for (int i = 0; i < totalThreads / 2; i++) {
        edgeWorkers.get(i).interrupt();
        try {
            edgeWorkers.get(i).join();
        } catch (InterruptedException exception) {

        }
    }

    System.out.println("Database shutdown started...");
    graphDb.shutdown();
}

From source file:de.innovationgate.wgpublisher.lucene.LuceneManager.java

private void addAdditionRequest(IndexingRequest request) {
    synchronized (_indexingRequestLock) {
        // add to list and set service status running for DB
        Queue<IndexingRequest> requests = _additionRequestsMap.get(request.getDbkey());
        if (requests == null) {
            requests = new ConcurrentLinkedQueue<LuceneManager.IndexingRequest>();
            _additionRequestsMap.put(request.getDbkey(), requests);
        }/*from w  w w . j  a va2  s .co  m*/
        requests.add(request);
    }
}

From source file:de.innovationgate.wgpublisher.lucene.LuceneManager.java

private void addDeletionRequest(IndexingRequest request) {
    synchronized (_indexingRequestLock) {
        // add to list and set service status running for DB
        Queue<IndexingRequest> requests = _deletionRequestsMap.get(request.getDbkey());
        if (requests == null) {
            requests = new ConcurrentLinkedQueue<IndexingRequest>();
            _deletionRequestsMap.put(request.getDbkey(), requests);
        }/*from  www .java 2s .  c o m*/
        requests.add(request);
    }
}

From source file:io.hops.ha.common.TransactionStateImpl.java

private void persistFiCaSchedulerNodeToRemove(ResourceDataAccess resourceDA,
        FiCaSchedulerNodeDataAccess ficaNodeDA, RMContainerDataAccess rmcontainerDA,
        LaunchedContainersDataAccess launchedContainersDA) throws StorageException {
    if (!ficaSchedulerNodeInfoToRemove.isEmpty()) {
        Queue<FiCaSchedulerNode> toRemoveFiCaSchedulerNodes = new ConcurrentLinkedQueue<FiCaSchedulerNode>();
        for (String nodeId : ficaSchedulerNodeInfoToRemove.keySet()) {
            toRemoveFiCaSchedulerNodes.add(new FiCaSchedulerNode(nodeId));
        }//  w w  w. j  ava  2s  . c  o m
        ficaNodeDA.removeAll(toRemoveFiCaSchedulerNodes);
    }
}

From source file:org.apereo.portal.io.xml.JaxbPortalDataHandlerService.java

@Override
public void exportAllDataOfType(Set<String> typeIds, File directory, BatchExportOptions options) {
    final Queue<ExportFuture<?>> exportFutures = new ConcurrentLinkedQueue<ExportFuture<?>>();
    final boolean failOnError = options != null ? options.isFailOnError() : true;

    //Determine the parent directory to log to
    final File logDirectory = determineLogDirectory(options, "export");

    //Setup reporting file
    final File exportReport = new File(logDirectory, "data-export.txt");
    final PrintWriter reportWriter;
    try {//  w ww  .j  av a 2  s  .c o m
        reportWriter = new PrintWriter(new BufferedWriter(new FileWriter(exportReport)));
    } catch (IOException e) {
        throw new RuntimeException("Failed to create FileWriter for: " + exportReport, e);
    }

    try {
        for (final String typeId : typeIds) {
            final List<FutureHolder<?>> failedFutures = new LinkedList<FutureHolder<?>>();

            final File typeDir = new File(directory, typeId);
            logger.info("Adding all data of type {} to export queue: {}", typeId, typeDir);

            reportWriter.println(typeId + "," + typeDir);

            final Iterable<? extends IPortalData> dataForType = this.getPortalData(typeId);
            for (final IPortalData data : dataForType) {
                final String dataId = data.getDataId();

                //Check for completed futures on every iteration, needed to fail as fast as possible on an import exception
                final List<FutureHolder<?>> newFailed = waitForFutures(exportFutures, reportWriter,
                        logDirectory, false);
                failedFutures.addAll(newFailed);

                final AtomicLong exportTime = new AtomicLong(-1);

                //Create export task
                Callable<Object> task = new CallableWithoutResult() {
                    @Override
                    protected void callWithoutResult() {
                        exportTime.set(System.nanoTime());
                        try {
                            exportData(typeId, dataId, typeDir);
                        } finally {
                            exportTime.set(System.nanoTime() - exportTime.get());
                        }
                    }
                };

                //Submit the export task
                final Future<?> exportFuture = this.importExportThreadPool.submit(task);

                //Add the future for tracking
                final ExportFuture futureHolder = new ExportFuture(exportFuture, typeId, dataId, exportTime);
                exportFutures.offer(futureHolder);
            }

            final List<FutureHolder<?>> newFailed = waitForFutures(exportFutures, reportWriter, logDirectory,
                    true);
            failedFutures.addAll(newFailed);

            reportWriter.flush();

            if (failOnError && !failedFutures.isEmpty()) {
                throw new RuntimeException(failedFutures.size() + " " + typeId + " entities failed to export.\n"
                        + "\tPer entity exception logs and a full report can be found in " + logDirectory);
            }
        }
    } catch (InterruptedException e) {
        throw new RuntimeException("Interrupted while waiting for entities to export", e);
    } finally {
        IOUtils.closeQuietly(reportWriter);
    }
}

From source file:org.apache.sysml.runtime.matrix.data.LibMatrixDNN.java

private static void runConvTask(TaskType type, ConvolutionParameters params) throws DMLRuntimeException {
    int constrainedNumThreads = OptimizerUtils.getConstrainedNumThreads(params.numThreads);
    ConcurrentLinkedQueue<MatrixBlock> im2ColOutBlocks = new ConcurrentLinkedQueue<MatrixBlock>();
    ConcurrentLinkedQueue<MatrixBlock> doutReshapedBlocks = new ConcurrentLinkedQueue<MatrixBlock>();
    ConcurrentLinkedQueue<MatrixBlock> partialRetBlocks = new ConcurrentLinkedQueue<MatrixBlock>();
    if (ALLOW_MULTI_THREADED_OPS && params.isOutputThreadSafe() && constrainedNumThreads > 1) {
        int poolSize = Math.min(constrainedNumThreads, params.N);
        addMatrixBlocks(poolSize, type, params, im2ColOutBlocks, doutReshapedBlocks, partialRetBlocks);
        ArrayList<ConvTask> tasks = new ArrayList<ConvTask>();
        int NSize = params.N - poolSize;
        if (NSize >= constrainedNumThreads) {
            for (int n = 0; n < params.N; n++)
                tasks.add(new ConvTask(n, n + 1, type, params, im2ColOutBlocks, doutReshapedBlocks,
                        partialRetBlocks));
        } else {/*from w w  w  . jav a 2 s.c o m*/
            int numNTasks = (int) Math.ceil(((double) NSize) / constrainedNumThreads);
            for (int n = 0; n < NSize; n += numNTasks) {
                tasks.add(new ConvTask(n, Math.min(NSize, n + numNTasks), type, params, im2ColOutBlocks,
                        doutReshapedBlocks, partialRetBlocks));
            }
            for (int n = NSize; n < params.N; n++)
                tasks.add(new ConvTask(n, n + 1, type, params, im2ColOutBlocks, doutReshapedBlocks,
                        partialRetBlocks));
        }

        ExecutorService pool = Executors.newFixedThreadPool(poolSize);
        List<Future<Object>> taskret;
        try {
            taskret = pool.invokeAll(tasks);
            pool.shutdown();
            for (Future<Object> task : taskret) {
                task.get();
            }
            if (type == TaskType.LoopedIm2ColConv2dBwdFilter) {
                for (MatrixBlock partialRetBlock : partialRetBlocks) {
                    elementWiseInPlaceAddition(params.output, partialRetBlock);
                }
            }
        } catch (InterruptedException e) {
            throw new DMLRuntimeException("Error while executing multi-threaded " + type.name(), e);
        } catch (ExecutionException e) {
            throw new DMLRuntimeException("Error while executing multi-threaded " + type.name(), e);
        }
    } else {
        addMatrixBlocks(1, type, params, im2ColOutBlocks, doutReshapedBlocks, partialRetBlocks);
        ConvTask task = new ConvTask(0, 0, type, params, im2ColOutBlocks, doutReshapedBlocks, partialRetBlocks);
        try {
            for (int n = 0; n < params.N; n++) {
                task.n1 = n;
                task.n2 = n + 1;
                task.call();
            }
            if (type == TaskType.LoopedIm2ColConv2dBwdFilter) {
                for (MatrixBlock partialRetBlock : partialRetBlocks) {
                    elementWiseInPlaceAddition(params.output, partialRetBlock);
                }
            }
        } catch (Exception e) {
            throw new DMLRuntimeException("Error while executing single-threaded " + type.name(), e);
        }
    }
}

From source file:io.openvidu.test.e2e.OpenViduTestAppE2eTest.java

@Test
@DisplayName("Stream property changed event")
void streamPropertyChangedEventTest() throws Exception {

    Queue<Boolean> threadAssertions = new ConcurrentLinkedQueue<Boolean>();

    setupBrowser("chromeAlternateScreenShare");

    log.info("Stream property changed event");

    WebElement oneToManyInput = user.getDriver().findElement(By.id("one2many-input"));
    oneToManyInput.clear();/*from www.j  a va2  s .  c  o  m*/
    oneToManyInput.sendKeys("1");

    user.getDriver().findElement(By.id("one2many-btn")).click();
    user.getDriver().findElement(By.className("screen-radio")).click();

    List<WebElement> joinButtons = user.getDriver().findElements(By.className("join-btn"));
    for (WebElement el : joinButtons) {
        el.sendKeys(Keys.ENTER);
    }

    user.getEventManager().waitUntilEventReaches("connectionCreated", 4);
    user.getEventManager().waitUntilEventReaches("accessAllowed", 1);
    user.getEventManager().waitUntilEventReaches("streamCreated", 2);
    user.getEventManager().waitUntilEventReaches("streamPlaying", 2);

    // Unpublish video
    final CountDownLatch latch1 = new CountDownLatch(2);
    user.getEventManager().on("streamPropertyChanged", (event) -> {
        threadAssertions.add("videoActive".equals(event.get("changedProperty").getAsString()));
        threadAssertions.add(!event.get("newValue").getAsBoolean());
        latch1.countDown();
    });
    user.getDriver().findElement(By.cssSelector("#openvidu-instance-0 .pub-video-btn")).click();
    user.getEventManager().waitUntilEventReaches("streamPropertyChanged", 2);

    if (!latch1.await(5000, TimeUnit.MILLISECONDS)) {
        gracefullyLeaveParticipants(2);
        fail();
        return;
    }

    user.getEventManager().off("streamPropertyChanged");
    log.info("Thread assertions: {}", threadAssertions.toString());
    for (Iterator<Boolean> iter = threadAssertions.iterator(); iter.hasNext();) {
        Assert.assertTrue("Some Event property was wrong", iter.next());
        iter.remove();
    }

    // Unpublish audio
    final CountDownLatch latch2 = new CountDownLatch(2);
    user.getEventManager().on("streamPropertyChanged", (event) -> {
        threadAssertions.add("audioActive".equals(event.get("changedProperty").getAsString()));
        threadAssertions.add(!event.get("newValue").getAsBoolean());
        latch2.countDown();
    });
    user.getDriver().findElement(By.cssSelector("#openvidu-instance-0 .pub-audio-btn")).click();
    user.getEventManager().waitUntilEventReaches("streamPropertyChanged", 4);

    if (!latch2.await(5000, TimeUnit.MILLISECONDS)) {
        gracefullyLeaveParticipants(2);
        fail();
        return;
    }

    user.getEventManager().off("streamPropertyChanged");
    log.info("Thread assertions: {}", threadAssertions.toString());
    for (Iterator<Boolean> iter = threadAssertions.iterator(); iter.hasNext();) {
        Assert.assertTrue("Some Event property was wrong", iter.next());
        iter.remove();
    }

    // Resize captured window
    final CountDownLatch latch3 = new CountDownLatch(2);
    int newWidth = 1000;
    int newHeight = 700;

    final long[] expectedWidthHeight = new long[2];

    user.getEventManager().on("streamPropertyChanged", (event) -> {
        String expectedDimensions = "{\"width\":" + expectedWidthHeight[0] + ",\"height\":"
                + expectedWidthHeight[1] + "}";
        threadAssertions.add("videoDimensions".equals(event.get("changedProperty").getAsString()));
        threadAssertions.add(expectedDimensions.equals(event.get("newValue").getAsJsonObject().toString()));
        latch3.countDown();
    });

    user.getDriver().manage().window().setSize(new Dimension(newWidth, newHeight));

    String widthAndHeight = user.getEventManager().getDimensionOfViewport();
    JSONObject obj = (JSONObject) new JSONParser().parse(widthAndHeight);

    expectedWidthHeight[0] = (long) obj.get("width");
    expectedWidthHeight[1] = (long) obj.get("height");

    System.out.println("New viewport dimension: " + obj.toJSONString());

    user.getEventManager().waitUntilEventReaches("streamPropertyChanged", 6);

    if (!latch3.await(5000, TimeUnit.MILLISECONDS)) {
        gracefullyLeaveParticipants(2);
        fail();
        return;
    }

    user.getEventManager().off("streamPropertyChanged");
    log.info("Thread assertions: {}", threadAssertions.toString());
    for (Iterator<Boolean> iter = threadAssertions.iterator(); iter.hasNext();) {
        Assert.assertTrue("Some Event property was wrong", iter.next());
        iter.remove();
    }

    gracefullyLeaveParticipants(2);
}

From source file:com.chinamobile.bcbsp.comm.MessageQueuesForDisk.java

/**
 * Change String to queue./*from  ww  w.java  2 s  .co m*/
 * @return ConcurrentLinkedQueue<IMessage>
 */
@SuppressWarnings("unchecked")
private ConcurrentLinkedQueue<IMessage> stringToQueue(String queueBuffer) {
    ConcurrentLinkedQueue<IMessage> queue = new ConcurrentLinkedQueue<IMessage>();
    if (queueBuffer != null) {
        String[] msgs = queueBuffer.split(Constants.SPACE_SPLIT_FLAG);
        for (int i = 0; i < msgs.length; i++) {
            // Note BSPMessage Is Temporayly Left.Should Be class.newInstance().
            // class Should Be Transferred In.
            IMessage msg = new BSPMessage();
            msg.fromString(msgs[i]);
            queue.add(msg);
        }
    }
    return queue;
}

From source file:io.hops.metadata.util.RMUtilities.java

public static org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode getRMNode(final String id,
        final RMContext context, final Configuration conf) throws IOException {
    LightWeightRequestHandler getRMNodeHandler = new LightWeightRequestHandler(YARNOperationType.TEST) {
        @Override//from  w w  w. j  ava 2 s.  c o  m
        public Object performTask() throws IOException {
            connector.beginTransaction();
            connector.readLock();
            org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode rmNode = null;
            RMNodeDataAccess rmnodeDA = (RMNodeDataAccess) RMStorageFactory
                    .getDataAccess(RMNodeDataAccess.class);
            RMNode hopRMNode = (RMNode) rmnodeDA.findByNodeId(id);
            if (hopRMNode != null) {
                ResourceDataAccess resDA = (ResourceDataAccess) RMStorageFactory
                        .getDataAccess(ResourceDataAccess.class);
                NodeDataAccess nodeDA = (NodeDataAccess) RMStorageFactory.getDataAccess(NodeDataAccess.class);
                //Retrieve resource of RMNode
                Resource res = (Resource) resDA.findEntry(hopRMNode.getNodeId(), Resource.TOTAL_CAPABILITY,
                        Resource.RMNODE);

                NodeId nodeId = ConverterUtils.toNodeId(id);
                //Retrieve and Initialize NodeBase for RMNode
                org.apache.hadoop.net.Node node = null;
                if (hopRMNode.getNodeId() != null) {
                    Node hopNode = (Node) nodeDA.findById(hopRMNode.getNodeId());
                    node = new NodeBase(hopNode.getName(), hopNode.getLocation());
                    if (hopNode.getParent() != null) {
                        node.setParent(new NodeBase(hopNode.getParent()));
                    }
                    node.setLevel(hopNode.getLevel());
                }
                //Retrieve nextHeartbeat
                NextHeartbeatDataAccess nextHBDA = (NextHeartbeatDataAccess) RMStorageFactory
                        .getDataAccess(NextHeartbeatDataAccess.class);
                boolean nextHeartbeat = nextHBDA.findEntry(id);
                //Create Resource
                ResourceOption resourceOption = null;
                if (res != null) {
                    resourceOption = ResourceOption.newInstance(org.apache.hadoop.yarn.api.records.Resource
                            .newInstance(res.getMemory(), res.getVirtualCores()),
                            hopRMNode.getOvercommittimeout());
                }
                rmNode = new RMNodeImpl(nodeId, context, hopRMNode.getHostName(), hopRMNode.getCommandPort(),
                        hopRMNode.getHttpPort(), node, resourceOption, hopRMNode.getNodemanagerVersion(),
                        hopRMNode.getHealthReport(), hopRMNode.getLastHealthReportTime(), nextHeartbeat,
                        conf.getBoolean(YarnConfiguration.HOPS_DISTRIBUTED_RT_ENABLED,
                                YarnConfiguration.DEFAULT_HOPS_DISTRIBUTED_RT_ENABLED));

                ((RMNodeImpl) rmNode).setState(hopRMNode.getCurrentState());
                // *** Recover maps/lists of RMNode ***
                //Use a cache for retrieved ContainerStatus
                Map<String, ContainerStatus> hopContainerStatuses = new HashMap<String, ContainerStatus>();
                //1. Recover JustLaunchedContainers
                JustLaunchedContainersDataAccess jlcDA = (JustLaunchedContainersDataAccess) RMStorageFactory
                        .getDataAccess(JustLaunchedContainersDataAccess.class);
                ContainerStatusDataAccess containerStatusDA = (ContainerStatusDataAccess) RMStorageFactory
                        .getDataAccess(ContainerStatusDataAccess.class);
                List<JustLaunchedContainers> hopJlcList = jlcDA.findByRMNode(id);
                if (hopJlcList != null && !hopJlcList.isEmpty()) {
                    Map<org.apache.hadoop.yarn.api.records.ContainerId, org.apache.hadoop.yarn.api.records.ContainerStatus> justLaunchedContainers = new HashMap<org.apache.hadoop.yarn.api.records.ContainerId, org.apache.hadoop.yarn.api.records.ContainerStatus>();
                    for (JustLaunchedContainers hop : hopJlcList) {
                        //Create ContainerId
                        org.apache.hadoop.yarn.api.records.ContainerId cid = ConverterUtils
                                .toContainerId(hop.getContainerId());
                        //Find and create ContainerStatus
                        if (!hopContainerStatuses.containsKey(hop.getContainerId())) {
                            hopContainerStatuses.put(hop.getContainerId(),
                                    (ContainerStatus) containerStatusDA.findEntry(hop.getContainerId(), id));
                        }
                        org.apache.hadoop.yarn.api.records.ContainerStatus conStatus = org.apache.hadoop.yarn.api.records.ContainerStatus
                                .newInstance(cid,
                                        ContainerState.valueOf(
                                                hopContainerStatuses.get(hop.getContainerId()).getState()),
                                        hopContainerStatuses.get(hop.getContainerId()).getDiagnostics(),
                                        hopContainerStatuses.get(hop.getContainerId()).getExitstatus());
                        justLaunchedContainers.put(cid, conStatus);
                    }
                    ((RMNodeImpl) rmNode).setJustLaunchedContainers(justLaunchedContainers);
                }
                //2. Return ContainerIdToClean
                ContainerIdToCleanDataAccess cidToCleanDA = (ContainerIdToCleanDataAccess) RMStorageFactory
                        .getDataAccess(ContainerIdToCleanDataAccess.class);
                List<ContainerId> cidToCleanList = cidToCleanDA.findByRMNode(id);
                if (cidToCleanList != null && !cidToCleanList.isEmpty()) {
                    Set<org.apache.hadoop.yarn.api.records.ContainerId> containersToClean = new TreeSet<org.apache.hadoop.yarn.api.records.ContainerId>();
                    for (ContainerId hop : cidToCleanList) {
                        //Create ContainerId
                        containersToClean.add(ConverterUtils.toContainerId(hop.getContainerId()));
                    }
                    ((RMNodeImpl) rmNode).setContainersToClean(containersToClean);
                }
                //3. Finished Applications
                FinishedApplicationsDataAccess finishedAppsDA = (FinishedApplicationsDataAccess) RMStorageFactory
                        .getDataAccess(FinishedApplicationsDataAccess.class);
                List<FinishedApplications> hopFinishedAppsList = finishedAppsDA.findByRMNode(id);
                if (hopFinishedAppsList != null && !hopFinishedAppsList.isEmpty()) {
                    List<ApplicationId> finishedApps = new ArrayList<ApplicationId>();
                    for (FinishedApplications hop : hopFinishedAppsList) {
                        finishedApps.add(ConverterUtils.toApplicationId(hop.getApplicationId()));
                    }
                    ((RMNodeImpl) rmNode).setFinishedApplications(finishedApps);
                }

                //4. UpdadedContainerInfo
                UpdatedContainerInfoDataAccess uciDA = (UpdatedContainerInfoDataAccess) RMStorageFactory
                        .getDataAccess(UpdatedContainerInfoDataAccess.class);
                //Retrieve all UpdatedContainerInfo entries for this particular RMNode
                Map<Integer, List<UpdatedContainerInfo>> hopUpdatedContainerInfoMap = uciDA.findByRMNode(id);
                if (hopUpdatedContainerInfoMap != null && !hopUpdatedContainerInfoMap.isEmpty()) {
                    ConcurrentLinkedQueue<org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo> updatedContainerInfoQueue = new ConcurrentLinkedQueue<org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo>();
                    for (int uciId : hopUpdatedContainerInfoMap.keySet()) {
                        for (UpdatedContainerInfo hopUCI : hopUpdatedContainerInfoMap.get(uciId)) {
                            List<org.apache.hadoop.yarn.api.records.ContainerStatus> newlyAllocated = new ArrayList<org.apache.hadoop.yarn.api.records.ContainerStatus>();
                            List<org.apache.hadoop.yarn.api.records.ContainerStatus> completed = new ArrayList<org.apache.hadoop.yarn.api.records.ContainerStatus>();
                            //Retrieve containerstatus entries for the particular updatedcontainerinfo
                            org.apache.hadoop.yarn.api.records.ContainerId cid = ConverterUtils
                                    .toContainerId(hopUCI.getContainerId());
                            if (!hopContainerStatuses.containsKey(hopUCI.getContainerId())) {
                                hopContainerStatuses.put(hopUCI.getContainerId(),
                                        (ContainerStatus) containerStatusDA.findEntry(hopUCI.getContainerId(),
                                                id));
                            }
                            org.apache.hadoop.yarn.api.records.ContainerStatus conStatus = org.apache.hadoop.yarn.api.records.ContainerStatus
                                    .newInstance(cid,
                                            ContainerState.valueOf(hopContainerStatuses
                                                    .get(hopUCI.getContainerId()).getState()),
                                            hopContainerStatuses.get(hopUCI.getContainerId()).getDiagnostics(),
                                            hopContainerStatuses.get(hopUCI.getContainerId()).getExitstatus());
                            //Check ContainerStatus state to add it to appropriate list
                            if (conStatus != null) {
                                if (conStatus.getState().toString()
                                        .equals(TablesDef.ContainerStatusTableDef.STATE_RUNNING)) {
                                    newlyAllocated.add(conStatus);
                                } else if (conStatus.getState().toString()
                                        .equals(TablesDef.ContainerStatusTableDef.STATE_COMPLETED)) {
                                    completed.add(conStatus);
                                }
                            }
                            org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo uci = new org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo(
                                    newlyAllocated, completed, hopUCI.getUpdatedContainerInfoId());
                            updatedContainerInfoQueue.add(uci);
                            ((RMNodeImpl) rmNode).setUpdatedContainerInfo(updatedContainerInfoQueue);
                            //Update uci counter
                            ((RMNodeImpl) rmNode).setUpdatedContainerInfoId(hopRMNode.getUciId());
                        }
                    }
                }

                //5. Retrieve latestNodeHeartBeatResponse
                NodeHBResponseDataAccess hbDA = (NodeHBResponseDataAccess) RMStorageFactory
                        .getDataAccess(NodeHBResponseDataAccess.class);
                NodeHBResponse hopHB = (NodeHBResponse) hbDA.findById(id);
                if (hopHB != null) {
                    NodeHeartbeatResponse hb = new NodeHeartbeatResponsePBImpl(
                            YarnServerCommonServiceProtos.NodeHeartbeatResponseProto
                                    .parseFrom(hopHB.getResponse()));
                    ((RMNodeImpl) rmNode).setLatestNodeHBResponse(hb);
                }
            }
            connector.commit();
            return rmNode;
        }
    };
    return (org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode) getRMNodeHandler.handle();
}

From source file:com.gatf.executor.report.ReportHandler.java

public void initializeResultsHolders(int runNums, String fileName) {
    if (runNums > 1) {
        for (int i = 0; i < runNums; i++) {
            finalTestResults.put("Run-" + (i + 1), new ConcurrentLinkedQueue<TestCaseReport>());
        }// w  w  w.  j  av  a  2 s  . c o m
    } else {
        finalTestResults.put(fileName, new ConcurrentLinkedQueue<TestCaseReport>());
    }
}