Example usage for java.util.concurrent.atomic AtomicInteger incrementAndGet

List of usage examples for java.util.concurrent.atomic AtomicInteger incrementAndGet

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger incrementAndGet.

Prototype

public final int incrementAndGet() 

Source Link

Document

Atomically increments the current value, with memory effects as specified by VarHandle#getAndAdd .

Usage

From source file:com.spectralogic.ds3client.integration.GetJobManagement_Test.java

@Test
public void testFiringFailureHandlerWhenGettingChunks() throws URISyntaxException, NoSuchMethodException,
        InvocationTargetException, IllegalAccessException, IOException {
    final String tempPathPrefix = null;
    final Path tempDirectory = Files.createTempDirectory(Paths.get("."), tempPathPrefix);

    try {//  w w w  .j  a  v a2s.  c  o  m
        final AtomicInteger numFailuresRecorded = new AtomicInteger();

        final FailureEventListener failureEventListener = new FailureEventListener() {
            @Override
            public void onFailure(final FailureEvent failureEvent) {
                numFailuresRecorded.incrementAndGet();
                assertEquals(FailureEvent.FailureActivity.GettingObject, failureEvent.doingWhat());
            }
        };

        final Ds3ClientHelpers.Job readJob = createReadJobWithObjectsReadyToTransfer(
                Ds3ClientShimFactory.ClientFailureType.ChunkAllocation);

        readJob.attachFailureEventListener(failureEventListener);

        try {
            readJob.transfer(new FileObjectGetter(tempDirectory));
        } catch (final IOException e) {
            assertEquals(1, numFailuresRecorded.get());
        }
    } finally {
        FileUtils.deleteDirectory(tempDirectory.toFile());
    }
}

From source file:com.spectralogic.ds3client.integration.GetJobManagement_Test.java

@Test
public void testFiringFailureHandlerWhenGettingObject() throws URISyntaxException, NoSuchMethodException,
        InvocationTargetException, IllegalAccessException, IOException {
    final String tempPathPrefix = null;
    final Path tempDirectory = Files.createTempDirectory(Paths.get("."), tempPathPrefix);

    try {/*from w w w. j av  a 2 s. c om*/
        final AtomicInteger numFailuresRecorded = new AtomicInteger();

        final FailureEventListener failureEventListener = new FailureEventListener() {
            @Override
            public void onFailure(final FailureEvent failureEvent) {
                numFailuresRecorded.incrementAndGet();
                assertEquals(FailureEvent.FailureActivity.GettingObject, failureEvent.doingWhat());
            }
        };

        final Ds3ClientHelpers.Job readJob = createReadJobWithObjectsReadyToTransfer(
                Ds3ClientShimFactory.ClientFailureType.GetObject);

        readJob.attachFailureEventListener(failureEventListener);

        try {
            readJob.transfer(new FileObjectGetter(tempDirectory));
        } catch (final IOException e) {
            assertEquals(1, numFailuresRecorded.get());
        }
    } finally {
        FileUtils.deleteDirectory(tempDirectory.toFile());
    }
}

From source file:com.spectralogic.ds3client.integration.GetJobManagement_Test.java

@Test
public void testStartReadAllJobUsingStreamedBehavior() throws IOException {
    final String tempPathPrefix = null;
    final Path tempDirectory = Files.createTempDirectory(Paths.get("."), tempPathPrefix);

    try {//from w ww. j  av a2s .c  o m
        final AtomicInteger numFailuresRecorded = new AtomicInteger(0);

        final FailureEventListener failureEventListener = new FailureEventListener() {
            @Override
            public void onFailure(final FailureEvent failureEvent) {
                numFailuresRecorded.incrementAndGet();
                assertEquals(FailureEvent.FailureActivity.GettingObject, failureEvent.doingWhat());
            }
        };

        final Ds3ClientHelpers.Job readJob = HELPERS.startReadAllJobUsingStreamedBehavior(BUCKET_NAME);
        readJob.attachFailureEventListener(failureEventListener);
        readJob.transfer(new FileObjectGetter(tempDirectory));

        final Collection<File> filesInTempDirectory = FileUtils.listFiles(tempDirectory.toFile(), null, false);

        final List<String> filesWeExpectToBeInTempDirectory = Arrays.asList("beowulf.txt", "lesmis.txt",
                "lesmis-copies.txt", "GreatExpectations.txt");

        for (final File fileInTempDirectory : filesInTempDirectory) {
            assertTrue(filesWeExpectToBeInTempDirectory.contains(fileInTempDirectory.getName()));
        }

        assertEquals(0, numFailuresRecorded.get());
    } finally {
        FileUtils.deleteDirectory(tempDirectory.toFile());
    }
}

From source file:com.spectralogic.ds3client.integration.GetJobManagement_Test.java

@Test
public void testStartReadAllJobUsingRandomAccessBehavior() throws IOException {
    final String tempPathPrefix = null;
    final Path tempDirectory = Files.createTempDirectory(Paths.get("."), tempPathPrefix);

    try {//from w  ww.  j a v a  2s  .  c  o m
        final AtomicInteger numFailuresRecorded = new AtomicInteger(0);

        final FailureEventListener failureEventListener = new FailureEventListener() {
            @Override
            public void onFailure(final FailureEvent failureEvent) {
                numFailuresRecorded.incrementAndGet();
                assertEquals(FailureEvent.FailureActivity.GettingObject, failureEvent.doingWhat());
            }
        };

        final Ds3ClientHelpers.Job readJob = HELPERS.startReadAllJobUsingRandomAccessBehavior(BUCKET_NAME);
        readJob.attachFailureEventListener(failureEventListener);
        readJob.transfer(new FileObjectGetter(tempDirectory));

        final Collection<File> filesInTempDirectory = FileUtils.listFiles(tempDirectory.toFile(), null, false);

        final List<String> filesWeExpectToBeInTempDirectory = Arrays.asList("beowulf.txt", "lesmis.txt",
                "lesmis-copies.txt", "GreatExpectations.txt");

        for (final File fileInTempDirectory : filesInTempDirectory) {
            assertTrue(filesWeExpectToBeInTempDirectory.contains(fileInTempDirectory.getName()));
        }

        assertEquals(0, numFailuresRecorded.get());
    } finally {
        FileUtils.deleteDirectory(tempDirectory.toFile());
    }
}

From source file:org.apache.nifi.cluster.coordination.http.replication.TestThreadPoolRequestReplicator.java

@Test(timeout = 15000)
public void testMultipleRequestWithTwoPhaseCommit() {
    final Set<NodeIdentifier> nodeIds = new HashSet<>();
    final NodeIdentifier nodeId = new NodeIdentifier("1", "localhost", 8100, "localhost", 8101, "localhost",
            8102, 8103, false);// ww w .j a  v  a 2 s .  c  o  m
    nodeIds.add(nodeId);

    final ClusterCoordinator coordinator = Mockito.mock(ClusterCoordinator.class);
    Mockito.when(coordinator.getConnectionStatus(Mockito.any(NodeIdentifier.class)))
            .thenReturn(new NodeConnectionStatus(nodeId, NodeConnectionState.CONNECTED));

    final AtomicInteger requestCount = new AtomicInteger(0);
    final ThreadPoolRequestReplicator replicator = new ThreadPoolRequestReplicator(2, new Client(), coordinator,
            "1 sec", "1 sec", null, null, NiFiProperties.createBasicNiFiProperties(null, null)) {
        @Override
        protected NodeResponse replicateRequest(final WebResource.Builder resourceBuilder,
                final NodeIdentifier nodeId, final String method, final URI uri, final String requestId,
                Map<String, String> givenHeaders) {
            // the resource builder will not expose its headers to us, so we are using Mockito's Whitebox class to extract them.
            final OutBoundHeaders headers = (OutBoundHeaders) Whitebox.getInternalState(resourceBuilder,
                    "metadata");
            final Object expectsHeader = headers
                    .getFirst(ThreadPoolRequestReplicator.REQUEST_VALIDATION_HTTP_HEADER);

            final int statusCode;
            if (requestCount.incrementAndGet() == 1) {
                assertEquals(ThreadPoolRequestReplicator.NODE_CONTINUE, expectsHeader);
                statusCode = 150;
            } else {
                assertNull(expectsHeader);
                statusCode = Status.OK.getStatusCode();
            }

            // Return given response from all nodes.
            final ClientResponse clientResponse = new ClientResponse(statusCode, new InBoundHeaders(),
                    new ByteArrayInputStream(new byte[0]), null);
            return new NodeResponse(nodeId, method, uri, clientResponse, -1L, requestId);
        }
    };

    try {
        // set the user
        final Authentication authentication = new NiFiAuthenticationToken(
                new NiFiUserDetails(StandardNiFiUser.ANONYMOUS));
        SecurityContextHolder.getContext().setAuthentication(authentication);

        final AsyncClusterResponse clusterResponse = replicator.replicate(nodeIds, HttpMethod.POST,
                new URI("http://localhost:80/processors/1"), new ProcessorEntity(), new HashMap<>(), true,
                true);
        clusterResponse.awaitMergedResponse();

        // Ensure that we received two requests - the first should contain the X-NcmExpects header; the second should not.
        // These assertions are validated above, in the overridden replicateRequest method.
        assertEquals(2, requestCount.get());
    } catch (final Exception e) {
        e.printStackTrace();
        Assert.fail(e.toString());
    } finally {
        replicator.shutdown();
    }
}

From source file:org.apache.hadoop.hbase.regionserver.TestAtomicOperation.java

@Test
public void testAppendMultiThreads() throws IOException {
    LOG.info("Starting test testAppendMultiThreads");
    // run a with mixed column families (1 and 3 versions)
    initHRegion(tableName, name.getMethodName(), new int[] { 1, 3 }, fam1, fam2);

    int numThreads = 100;
    int opsPerThread = 100;
    AtomicOperation[] all = new AtomicOperation[numThreads];
    final byte[] val = new byte[] { 1 };

    AtomicInteger failures = new AtomicInteger(0);
    // create all threads
    for (int i = 0; i < numThreads; i++) {
        all[i] = new AtomicOperation(region, opsPerThread, null, failures) {
            @Override/*  w w  w.  j av a2s  . co  m*/
            public void run() {
                for (int i = 0; i < numOps; i++) {
                    try {
                        Append a = new Append(row);
                        a.add(fam1, qual1, val);
                        a.add(fam1, qual2, val);
                        a.add(fam2, qual3, val);
                        region.append(a);

                        Get g = new Get(row);
                        Result result = region.get(g);
                        assertEquals(result.getValue(fam1, qual1).length, result.getValue(fam1, qual2).length);
                        assertEquals(result.getValue(fam1, qual1).length, result.getValue(fam2, qual3).length);
                    } catch (IOException e) {
                        e.printStackTrace();
                        failures.incrementAndGet();
                        fail();
                    }
                }
            }
        };
    }

    // run all threads
    for (int i = 0; i < numThreads; i++) {
        all[i].start();
    }

    // wait for all threads to finish
    for (int i = 0; i < numThreads; i++) {
        try {
            all[i].join();
        } catch (InterruptedException e) {
        }
    }
    assertEquals(0, failures.get());
    Get g = new Get(row);
    Result result = region.get(g);
    assertEquals(result.getValue(fam1, qual1).length, 10000);
    assertEquals(result.getValue(fam1, qual2).length, 10000);
    assertEquals(result.getValue(fam2, qual3).length, 10000);
}

From source file:com.btoddb.fastpersitentqueue.InMemorySegmentMgrTest.java

@Test
public void testThreading() throws IOException, ExecutionException {
    final int entrySize = 1000;
    final int numEntries = 3000;
    final int numPushers = 3;
    int numPoppers = 3;

    final Random pushRand = new Random(1000L);
    final Random popRand = new Random(1000000L);
    final AtomicInteger pusherFinishCount = new AtomicInteger();
    final AtomicInteger numPops = new AtomicInteger();
    final AtomicLong pushSum = new AtomicLong();
    final AtomicLong popSum = new AtomicLong();

    mgr.setMaxSegmentSizeInBytes(10000);
    mgr.init();/*from   w ww.  j  a v a 2  s.  c o m*/

    ExecutorService execSrvc = Executors.newFixedThreadPool(numPushers + numPoppers);

    Set<Future> futures = new HashSet<Future>();

    // start pushing
    for (int i = 0; i < numPushers; i++) {
        Future future = execSrvc.submit(new Runnable() {
            @Override
            public void run() {
                for (int i = 0; i < numEntries; i++) {
                    try {
                        long x = idGen.incrementAndGet();
                        pushSum.addAndGet(x);
                        FpqEntry entry = new FpqEntry(x, new byte[entrySize]);
                        mgr.push(entry);
                        if (x % 500 == 0) {
                            System.out.println("pushed ID = " + x);
                        }
                        Thread.sleep(pushRand.nextInt(5));
                    } catch (Exception e) {
                        e.printStackTrace();
                    }
                }
                pusherFinishCount.incrementAndGet();
            }
        });
        futures.add(future);
    }

    // start popping
    for (int i = 0; i < numPoppers; i++) {
        Future future = execSrvc.submit(new Runnable() {
            @Override
            public void run() {
                while (pusherFinishCount.get() < numPushers || !mgr.isEmpty()) {
                    try {
                        FpqEntry entry;
                        while (null != (entry = mgr.pop())) {
                            if (entry.getId() % 500 == 0) {
                                System.out.println("popped ID = " + entry.getId());
                            }

                            popSum.addAndGet(entry.getId());
                            numPops.incrementAndGet();
                            Thread.sleep(popRand.nextInt(5));
                        }
                    } catch (Exception e) {
                        e.printStackTrace();
                    }
                }
            }
        });
        futures.add(future);
    }

    boolean finished = false;
    while (!finished) {
        try {
            for (Future f : futures) {
                f.get();
            }
            finished = true;
        } catch (InterruptedException e) {
            // ignore
            Thread.interrupted();
        }
    }

    assertThat(numPops.get(), is(numEntries * numPushers));
    assertThat(popSum.get(), is(pushSum.get()));
    assertThat(mgr.getNumberOfEntries(), is(0L));
    assertThat(mgr.getNumberOfActiveSegments(), is(1));
    assertThat(mgr.getSegments(), hasSize(1));
    assertThat(FileUtils.listFiles(theDir, TrueFileFilter.INSTANCE, TrueFileFilter.INSTANCE), is(empty()));

    // make sure we tested paging in/out
    assertThat(mgr.getNumberOfSwapOut(), is(greaterThan(0L)));
    assertThat(mgr.getNumberOfSwapIn(), is(mgr.getNumberOfSwapOut()));
}

From source file:juicebox.data.MatrixZoomData.java

/**
 * Return the blocks of normalized, observed values overlapping the rectangular region specified.
 * The units are "bins"//from   w  w  w . j  a  v  a2 s .  co  m
 *
 * @param binY1 leftmost position in "bins"
 * @param binX2 rightmost position in "bins"
 * @param binY2 bottom position in "bins"
 * @param no    normalization type
 * @return List of overlapping blocks, normalized
 */
public int addNormalizedBlocksToList(final List<Block> blockList, int binX1, int binY1, int binX2, int binY2,
        final NormalizationType no) {

    int col1 = binX1 / blockBinCount;
    int row1 = binY1 / blockBinCount;

    int col2 = binX2 / blockBinCount;
    int row2 = binY2 / blockBinCount;

    List<Integer> blocksToLoad = new ArrayList<Integer>();
    for (int r = row1; r <= row2; r++) {
        for (int c = col1; c <= col2; c++) {
            int blockNumber = r * getBlockColumnCount() + c;

            String key = getKey() + "_" + blockNumber + "_" + no;
            Block b;
            if (HiCGlobals.useCache && blockCache.containsKey(key)) {
                b = blockCache.get(key);
                blockList.add(b);
            } else {
                blocksToLoad.add(blockNumber);
            }
        }
    }

    final AtomicInteger errorCounter = new AtomicInteger();

    List<Thread> threads = new ArrayList<Thread>();
    for (final int blockNumber : blocksToLoad) {
        Runnable loader = new Runnable() {
            @Override
            public void run() {
                try {
                    String key = getKey() + "_" + blockNumber + "_" + no;
                    Block b = reader.readNormalizedBlock(blockNumber, MatrixZoomData.this, no);
                    if (b == null) {
                        b = new Block(blockNumber); // An empty block
                    }
                    if (HiCGlobals.useCache) {
                        blockCache.put(key, b);
                    }
                    blockList.add(b);
                } catch (IOException e) {
                    errorCounter.incrementAndGet();
                }
            }
        };

        Thread t = new Thread(loader);
        threads.add(t);
        t.start();
    }

    // Wait for all threads to complete
    for (Thread t : threads) {
        try {
            t.join();
        } catch (InterruptedException ignore) {
        }
    }

    // untested since files got fixed - MSS
    return errorCounter.get();
}

From source file:com.ibm.jaggr.service.impl.deps.DepTree.java

/**
 * Object constructor. Attempts to de-serialize the cached dependency lists
 * from disk and then validates the dependency lists based on last-modified
 * dates, looking for any new or removed files. If the cached dependency
 * list data cannot be de-serialized, new lists are constructed. Once the
 * dependency lists have been validated, the list data is serialized back
 * out to disk./*from   www. j ava 2s.  c o  m*/
 * 
 * @param paths
 *            Collection of URIs which specify the target resources
 *            to be scanned for javascript files.
 * @param aggregator
 *            The servlet instance for this object
 * @param stamp
 *            timestamp associated with external override/customization 
 *            resources that are check on every server restart                     
 * @param clean
 *            If true, then the dependency lists are generated from scratch
 *            rather than by de-serializing and then validating the cached
 *            dependency lists.
 * @param validateDeps
 *            If true, then validate existing cached dependencies using
 *            file last-modified times.
 * @throws IOException
 */
public DepTree(Collection<URI> paths, IAggregator aggregator, long stamp, boolean clean, boolean validateDeps)
        throws IOException {
    this.stamp = stamp;
    IConfig config = aggregator.getConfig();
    rawConfig = config.toString();

    File cacheDir = new File(aggregator.getWorkingDirectory(), DEPCACHE_DIRNAME);
    File cacheFile = new File(cacheDir, CACHE_FILE);

    /*
     * The de-serialized dependency map. If we have a cached dependency map,
     * then it will be validated against the last-modified dates of the
     * current files and only the files that have changed will need to be
     * re-parsed to update the dependency lists.
     */
    DepTree cached = null;

    if (!clean) {
        // If we're not starting clean, try to de-serialize the map from
        // cache
        try {
            ObjectInputStream is = new ObjectInputStream(new FileInputStream(cacheFile));
            try {
                cached = (DepTree) is.readObject();
            } finally {
                try {
                    is.close();
                } catch (Exception ignore) {
                }
            }
        } catch (FileNotFoundException e) {
            /*
             * Not an error. Just means that the cache file hasn't been
             * written yet or else it's been deleted.
             */
            if (log.isLoggable(Level.INFO))
                log.log(Level.INFO, Messages.DepTree_1);
        } catch (Exception e) {
            if (log.isLoggable(Level.SEVERE))
                log.log(Level.SEVERE, e.getMessage(), e);
        }
    }

    // If the cacheBust config param has changed, then do a clean build
    // of the dependencies.
    if (cached != null) {
        if (stamp == 0) {
            // no init stamp provided.  Preserve the cached one.
            stamp = cached.stamp;
        }
        if (stamp > cached.stamp) {
            // init stamp has been updated.  Validate dependencies.
            validateDeps = true;
        }
        cacheBust = aggregator.getOptions().getCacheBust();
        if (!StringUtils.equals(cacheBust, cached.cacheBust)) {
            if (log.isLoggable(Level.INFO)) {
                log.info(Messages.DepTree_2);
            }
            cached = null;
        }
    }

    /*
     * If we de-serialized a previously saved dependency map, then go with
     * that.
     */
    if (cached != null && rawConfig.equals(cached.rawConfig) && !validateDeps && !clean) {
        depMap = cached.depMap;
        return;
    }

    // Initialize the dependency map
    depMap = new ConcurrentHashMap<URI, DepTreeNode>();

    // This can take a while, so print something to the console
    String msg = MessageFormat.format(Messages.DepTree_3, new Object[] { aggregator.getName() });

    ConsoleService cs = new ConsoleService();
    cs.println(msg);

    if (log.isLoggable(Level.INFO)) {
        log.info(msg);
    }
    // Make sure that all the paths are unique and orthogonal
    paths = DepUtils.removeRedundantPaths(paths);

    /*
     * Create the thread pools, one for the tree builders and one for the
     * parsers. Since a tree builder thread will wait for all the outstanding
     * parser threads started by that builder to complete, we need to use two
     * independent thread pools to guard against the possibility of deadlock
     * caused by all the threads in the pool being consumed by tree builders
     * and leaving none available to service the parsers.
     */
    final ThreadGroup treeBuilderTG = new ThreadGroup(TREEBUILDER_TGNAME),
            parserTG = new ThreadGroup(JSPARSER_TGNAME);
    ExecutorService treeBuilderExc = Executors.newFixedThreadPool(10, new ThreadFactory() {
        public Thread newThread(Runnable r) {
            return new Thread(treeBuilderTG, r, MessageFormat.format(THREADNAME,
                    new Object[] { treeBuilderTG.getName(), treeBuilderTG.activeCount() }));
        }
    }), parserExc = Executors.newFixedThreadPool(20, new ThreadFactory() {
        public Thread newThread(Runnable r) {
            return new Thread(parserTG, r, MessageFormat.format(THREADNAME,
                    new Object[] { parserTG.getName(), parserTG.activeCount() }));
        }
    });

    // Counter to keep track of number of tree builder threads started
    AtomicInteger treeBuilderCount = new AtomicInteger(0);

    // The completion services for the thread pools
    final CompletionService<URI> parserCs = new ExecutorCompletionService<URI>(parserExc);
    CompletionService<DepTreeBuilder.Result> treeBuilderCs = new ExecutorCompletionService<DepTreeBuilder.Result>(
            treeBuilderExc);

    // Start the tree builder threads to process the paths
    for (final URI path : paths) {
        /*
         * Create or get from cache the root node for this path and
         * add it to the new map.
         */
        DepTreeNode root = new DepTreeNode(PathUtil.getModuleName(path));
        DepTreeNode cachedNode = null;
        if (cached != null) {
            cachedNode = cached.depMap.get(path);
            if (log.isLoggable(Level.INFO)) {
                log.info(MessageFormat.format(Messages.DepTree_4, new Object[] { path }));
            }
        } else {
            if (log.isLoggable(Level.INFO)) {
                log.info(MessageFormat.format(Messages.DepTree_5, new Object[] { path }));
            }
        }
        depMap.put(path, root);

        treeBuilderCount.incrementAndGet();
        treeBuilderCs.submit(new DepTreeBuilder(aggregator, parserCs, path, root, cachedNode));
    }

    // List of parser exceptions
    LinkedList<Exception> parserExceptions = new LinkedList<Exception>();

    /*
     * Pull the completed tree builder tasks from the completion queue until
     * all the paths have been processed
     */
    while (treeBuilderCount.decrementAndGet() >= 0) {
        try {
            DepTreeBuilder.Result result = treeBuilderCs.take().get();
            if (log.isLoggable(Level.INFO)) {
                log.info(MessageFormat.format(Messages.DepTree_6,
                        new Object[] { result.parseCount, result.dirName }));
            }
        } catch (Exception e) {
            if (log.isLoggable(Level.SEVERE))
                log.log(Level.SEVERE, e.getMessage(), e);
            parserExceptions.add(e);
        }
    }

    // shutdown the thread pools now that we're done with them
    parserExc.shutdown();
    treeBuilderExc.shutdown();

    // If parser exceptions occurred, then rethrow the first one 
    if (parserExceptions.size() > 0) {
        throw new RuntimeException(parserExceptions.get(0));
    }

    // Prune dead nodes (nodes with no children or dependency lists)
    for (Map.Entry<URI, DepTreeNode> entry : depMap.entrySet()) {
        entry.getValue().prune();
    }

    /*
     * Make sure the cache directory exists before we try to serialize the
     * dependency map.
     */
    if (!cacheDir.exists())
        if (!cacheDir.mkdirs()) {
            throw new IOException(
                    MessageFormat.format(Messages.DepTree_0, new Object[] { cacheDir.getAbsolutePath() }));
        }

    // Serialize the map to the cache directory
    ObjectOutputStream os;
    os = new ObjectOutputStream(new FileOutputStream(cacheFile));
    try {
        os.writeObject(this);
    } finally {
        try {
            os.close();
        } catch (Exception ignore) {
        }
    }
    msg = MessageFormat.format(Messages.DepTree_7, new Object[] { aggregator.getName() });

    // Output that we're done.
    cs.println(msg);
    if (log.isLoggable(Level.INFO)) {
        log.info(msg);
    }
}

From source file:juicebox.data.MatrixZoomData.java

/**
 * Return the blocks of normalized, observed values overlapping the rectangular region specified.
 * The units are "bins"/*from  w  w w  .  jav a 2 s  .com*/
 *
 * @param binY1 leftmost position in "bins"
 * @param binX2 rightmost position in "bins"
 * @param binY2 bottom position in "bins"
 * @param no    normalization type
 * @return List of overlapping blocks, normalized
 */
public List<Block> getNormalizedBlocksOverlapping(int binX1, int binY1, int binX2, int binY2,
        final NormalizationType no) {

    int col1 = binX1 / blockBinCount;
    int row1 = binY1 / blockBinCount;

    int col2 = binX2 / blockBinCount;
    int row2 = binY2 / blockBinCount;

    int maxSize = (col2 - col1 + 1) * (row2 - row1 + 1);

    final List<Block> blockList = new ArrayList<Block>(maxSize);
    final List<Integer> blocksToLoad = new ArrayList<Integer>();
    for (int r = row1; r <= row2; r++) {
        for (int c = col1; c <= col2; c++) {
            int blockNumber = r * getBlockColumnCount() + c;

            String key = getKey() + "_" + blockNumber + "_" + no;
            Block b;
            if (HiCGlobals.useCache && blockCache.containsKey(key)) {
                b = blockCache.get(key);
                blockList.add(b);
            } else {
                blocksToLoad.add(blockNumber);
            }
        }
    }

    final AtomicInteger errorCounter = new AtomicInteger();

    List<Thread> threads = new ArrayList<Thread>();
    for (final int blockNumber : blocksToLoad) {
        Runnable loader = new Runnable() {
            @Override
            public void run() {
                try {
                    String key = getKey() + "_" + blockNumber + "_" + no;
                    Block b = reader.readNormalizedBlock(blockNumber, MatrixZoomData.this, no);
                    if (b == null) {
                        b = new Block(blockNumber); // An empty block
                    }
                    if (HiCGlobals.useCache) {
                        blockCache.put(key, b);
                    }
                    blockList.add(b);
                } catch (IOException e) {
                    errorCounter.incrementAndGet();
                }
            }
        };

        Thread t = new Thread(loader);
        threads.add(t);
        t.start();
    }

    // Wait for all threads to complete
    for (Thread t : threads) {
        try {
            t.join();
        } catch (InterruptedException ignore) {
        }
    }

    // untested since files got fixed - MSS
    if (errorCounter.get() > 0) {
        return null;
    }

    return blockList;
}