Example usage for java.util.concurrent.atomic AtomicInteger get

List of usage examples for java.util.concurrent.atomic AtomicInteger get

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger get.

Prototype

public final int get() 

Source Link

Document

Returns the current value, with memory effects as specified by VarHandle#getVolatile .

Usage

From source file:io.uploader.drive.drive.media.MediaHttpUploader.java

/**
 * Uploads the media in a resumable manner.
 *
 * @param initiationRequestUrl//from  w w  w .  j  a  v a2  s .  c o m
 *            The request URL where the initiation request will be sent
 * @return HTTP response
 */
// https://developers.google.com/drive/web/manage-uploads#resumable
private HttpResponse resumableUpload(GenericUrl initiationRequestUrl) throws IOException {

    // Make initial request to get the unique upload URL.
    HttpResponse initialResponse = executeUploadInitiation(initiationRequestUrl);
    if (!initialResponse.isSuccessStatusCode()) {
        // If the initiation request is not successful return it
        // immediately.
        logger.info("Unsuccessful: " + initialResponse.getStatusMessage());

        return initialResponse;
    }
    GenericUrl uploadUrl;
    try {
        uploadUrl = new GenericUrl(initialResponse.getHeaders().getLocation());
    } finally {
        initialResponse.disconnect();
    }

    // Convert media content into a byte stream to upload in chunks.
    contentInputStream = mediaContent.getInputStream();
    if (!contentInputStream.markSupported() && isMediaLengthKnown()) {
        // If we know the media content length then wrap the stream into a
        // Buffered input stream to
        // support the {@link InputStream#mark} and {@link
        // InputStream#reset} methods required for
        // handling server errors.
        contentInputStream = new BufferedInputStream(contentInputStream);
    }

    HttpResponse response = null;
    // Upload the media content in chunks.
    while (true) {
        currentRequest = requestFactory.buildPutRequest(uploadUrl, null);
        setContentAndHeadersOnCurrentRequest();
        // set mediaErrorHandler as I/O exception handler and as
        // unsuccessful response handler for
        // calling to serverErrorCallback on an I/O exception or an abnormal
        // HTTP response
        AtomicInteger httpErrorCounter = new AtomicInteger(0);
        new MediaUploadErrorHandler(this, currentRequest, httpErrorCounter);

        AtomicInteger tryCounter = new AtomicInteger(0);
        while (true) {
            try {
                if (isMediaLengthKnown()) {
                    // TODO(rmistry): Support gzipping content for the case where
                    // media content length is
                    // known
                    // (https://code.google.com/p/google-api-java-client/issues/detail?id=691).
                    response = executeCurrentRequestWithoutGZip(currentRequest);
                    break;
                } else {
                    response = executeCurrentRequest(currentRequest);
                    break;
                }
            } catch (Throwable e) {
                logger.error("Error occurred while uploading", e);
                if (tryCounter.getAndIncrement() >= 5) {
                    logger.error("Could not be recovered...");
                    throw e;
                }
                logger.error("Retry (" + tryCounter.get() + " times)", e);
            }
        }

        boolean returningResponse = false;
        try {
            if (response.isSuccessStatusCode()) {
                totalBytesServerReceived = getMediaContentLength();
                if (mediaContent.getCloseInputStream()) {
                    contentInputStream.close();
                }
                updateStateAndNotifyListener(UploadState.MEDIA_COMPLETE);
                returningResponse = true;
                return response;
            }

            int statusCode = response.getStatusCode();
            if (statusCode != 308) {
                // https://developers.google.com/drive/web/manage-uploads#resume-upload
                returningResponse = true;
                return response;
            } else {
                httpErrorCounter.set(0);
            }

            // Check to see if the upload URL has changed on the server.
            String updatedUploadUrl = response.getHeaders().getLocation();
            if (updatedUploadUrl != null) {
                uploadUrl = new GenericUrl(updatedUploadUrl);
            }

            // we check the amount of bytes the server received so far,
            // because the server may process
            // fewer bytes than the amount of bytes the client had sent
            long newBytesServerReceived = getNextByteIndex(response.getHeaders().getRange());
            // the server can receive any amount of bytes from 0 to current
            // chunk length
            long currentBytesServerReceived = newBytesServerReceived - totalBytesServerReceived;
            Preconditions.checkState(
                    currentBytesServerReceived >= 0 && currentBytesServerReceived <= currentChunkLength);
            long copyBytes = currentChunkLength - currentBytesServerReceived;
            if (isMediaLengthKnown()) {
                if (copyBytes > 0) {
                    // If the server didn't receive all the bytes the client
                    // sent the current position of
                    // the input stream is incorrect. So we should reset the
                    // stream and skip those bytes
                    // that the server had already received.
                    // Otherwise (the server got all bytes the client sent),
                    // the stream is in its right
                    // position, and we can continue from there
                    contentInputStream.reset();
                    long actualSkipValue = contentInputStream.skip(currentBytesServerReceived);
                    Preconditions.checkState(currentBytesServerReceived == actualSkipValue);
                }
            } else if (copyBytes == 0) {
                // server got all the bytes, so we don't need to use this
                // buffer. Otherwise, we have to
                // keep the buffer and copy part (or all) of its bytes to
                // the stream we are sending to the
                // server
                currentRequestContentBuffer = null;
            }
            totalBytesServerReceived = newBytesServerReceived;

            updateStateAndNotifyListener(UploadState.MEDIA_IN_PROGRESS);
        } finally {
            if (!returningResponse) {
                response.disconnect();
            }
        }
    }
}

From source file:jcuda.jcublas.kernel.TestMatrixOperations.java

@Test
public void testMultipleThreads() throws InterruptedException {
    int numThreads = 10;
    final INDArray array = Nd4j.rand(300, 300);
    final INDArray expected = array.dup().mmul(array).mmul(array).div(array).div(array);
    final AtomicInteger correct = new AtomicInteger();
    final CountDownLatch latch = new CountDownLatch(numThreads);
    System.out.println("Running on " + ContextHolder.getInstance().deviceNum());
    ExecutorService executors = ExecutorServiceProvider.getExecutorService();

    for (int x = 0; x < numThreads; x++) {
        executors.execute(new Runnable() {
            @Override//from  w  ww  .ja v a 2  s. c  o m
            public void run() {
                try {
                    int total = 10;
                    int right = 0;
                    for (int x = 0; x < total; x++) {
                        StopWatch watch = new StopWatch();
                        watch.start();
                        INDArray actual = array.dup().mmul(array).mmul(array).div(array).div(array);
                        watch.stop();
                        if (expected.equals(actual))
                            right++;
                    }

                    if (total == right)
                        correct.incrementAndGet();
                } finally {
                    latch.countDown();
                }

            }
        });
    }

    latch.await();

    assertEquals(numThreads, correct.get());

}

From source file:com.netflix.curator.framework.recipes.locks.TestInterProcessSemaphoreCluster.java

@Test
public void testCluster() throws Exception {
    final int QTY = 20;
    final int OPERATION_TIME_MS = 1000;
    final String PATH = "/foo/bar/lock";

    ExecutorService executorService = Executors.newFixedThreadPool(QTY);
    ExecutorCompletionService<Void> completionService = new ExecutorCompletionService<Void>(executorService);
    final Timing timing = new Timing();
    TestingCluster cluster = new TestingCluster(3);
    List<SemaphoreClient> semaphoreClients = Lists.newArrayList();
    try {/*from  ww w .j ava2 s  .co m*/
        cluster.start();

        final AtomicInteger opCount = new AtomicInteger(0);
        for (int i = 0; i < QTY; ++i) {
            SemaphoreClient semaphoreClient = new SemaphoreClient(cluster.getConnectString(), PATH,
                    new Callable<Void>() {
                        @Override
                        public Void call() throws Exception {
                            opCount.incrementAndGet();
                            Thread.sleep(OPERATION_TIME_MS);
                            return null;
                        }
                    });
            completionService.submit(semaphoreClient);
            semaphoreClients.add(semaphoreClient);
        }

        timing.forWaiting().sleepABit();

        Assert.assertNotNull(SemaphoreClient.getActiveClient());

        final CountDownLatch latch = new CountDownLatch(1);
        CuratorFramework client = CuratorFrameworkFactory.newClient(cluster.getConnectString(),
                timing.session(), timing.connection(), new ExponentialBackoffRetry(100, 3));
        ConnectionStateListener listener = new ConnectionStateListener() {
            @Override
            public void stateChanged(CuratorFramework client, ConnectionState newState) {
                if (newState == ConnectionState.LOST) {
                    latch.countDown();
                }
            }
        };
        client.getConnectionStateListenable().addListener(listener);
        client.start();
        try {
            client.getZookeeperClient().blockUntilConnectedOrTimedOut();

            cluster.stop();

            latch.await();
        } finally {
            IOUtils.closeQuietly(client);
        }

        long startTicks = System.currentTimeMillis();
        for (;;) {
            int thisOpCount = opCount.get();
            Thread.sleep(2 * OPERATION_TIME_MS);
            if (thisOpCount == opCount.get()) {
                break; // checking that the op count isn't increasing
            }
            Assert.assertTrue((System.currentTimeMillis() - startTicks) < timing.forWaiting().milliseconds());
        }

        int thisOpCount = opCount.get();

        Iterator<InstanceSpec> iterator = cluster.getInstances().iterator();
        cluster = new TestingCluster(iterator.next(), iterator.next());
        cluster.start();
        timing.forWaiting().sleepABit();

        startTicks = System.currentTimeMillis();
        for (;;) {
            Thread.sleep(2 * OPERATION_TIME_MS);
            if (opCount.get() > thisOpCount) {
                break; // checking that semaphore has started working again
            }
            Assert.assertTrue((System.currentTimeMillis() - startTicks) < timing.forWaiting().milliseconds());
        }
    } finally {
        for (SemaphoreClient semaphoreClient : semaphoreClients) {
            IOUtils.closeQuietly(semaphoreClient);
        }
        IOUtils.closeQuietly(cluster);
        executorService.shutdownNow();
    }
}

From source file:org.apache.distributedlog.auditor.DLAuditor.java

/**
 * Find leak ledgers phase 2: collect ledgers from uris.
 *//*from ww  w. ja  v a  2 s.  co m*/
private Set<Long> collectLedgersFromDL(List<URI> uris, List<List<String>> allocationPaths) throws IOException {
    final Set<Long> ledgers = new TreeSet<Long>();
    List<Namespace> namespaces = new ArrayList<Namespace>(uris.size());
    try {
        for (URI uri : uris) {
            namespaces.add(NamespaceBuilder.newBuilder().conf(conf).uri(uri).build());
        }
        final CountDownLatch doneLatch = new CountDownLatch(uris.size());
        final AtomicInteger numFailures = new AtomicInteger(0);
        ExecutorService executor = Executors.newFixedThreadPool(uris.size());
        try {
            int i = 0;
            for (final Namespace namespace : namespaces) {
                final Namespace dlNamespace = namespace;
                final URI uri = uris.get(i);
                final List<String> aps = allocationPaths.get(i);
                i++;
                executor.submit(new Runnable() {
                    @Override
                    public void run() {
                        try {
                            logger.info("Collecting ledgers from {} : {}", uri, aps);
                            collectLedgersFromAllocator(uri, namespace, aps, ledgers);
                            synchronized (ledgers) {
                                logger.info("Collected {} ledgers from allocators for {} : {} ",
                                        new Object[] { ledgers.size(), uri, ledgers });
                            }
                            collectLedgersFromDL(uri, namespace, ledgers);
                        } catch (IOException e) {
                            numFailures.incrementAndGet();
                            logger.info("Error to collect ledgers from DL : ", e);
                        }
                        doneLatch.countDown();
                    }
                });
            }
            try {
                doneLatch.await();
                if (numFailures.get() > 0) {
                    throw new IOException(numFailures.get() + " errors to collect ledgers from DL");
                }
            } catch (InterruptedException e) {
                Thread.currentThread().interrupt();
                logger.warn("Interrupted on collecting ledgers from DL : ", e);
                throw new DLInterruptedException("Interrupted on collecting ledgers from DL : ", e);
            }
        } finally {
            executor.shutdown();
        }
    } finally {
        for (Namespace namespace : namespaces) {
            namespace.close();
        }
    }
    return ledgers;
}

From source file:com.btoddb.fastpersitentqueue.JournalMgrIT.java

@Test
public void testThreading() throws IOException, ExecutionException {
    final int numEntries = 10000;
    final int numPushers = 3;
    int numPoppers = 3;

    final Random pushRand = new Random(1000L);
    final Random popRand = new Random(1000000L);
    final ConcurrentLinkedQueue<FpqEntry> events = new ConcurrentLinkedQueue<FpqEntry>();
    final AtomicInteger pusherFinishCount = new AtomicInteger();
    final AtomicInteger numPops = new AtomicInteger();
    final AtomicLong pushSum = new AtomicLong();
    final AtomicLong popSum = new AtomicLong();

    mgr.setMaxJournalFileSize(1000);//ww  w.  j  av a2 s  .c o m
    mgr.init();

    ExecutorService execSrvc = Executors.newFixedThreadPool(numPushers + numPoppers);

    Set<Future> futures = new HashSet<Future>();

    // start pushing
    for (int i = 0; i < numPushers; i++) {
        Future future = execSrvc.submit(new Runnable() {
            @Override
            public void run() {
                for (int i = 0; i < numEntries; i++) {
                    try {
                        long x = idGen.incrementAndGet();
                        FpqEntry entry = mgr.append(new FpqEntry(x, new byte[100]));
                        events.offer(entry);
                        pushSum.addAndGet(x);
                        if (x % 500 == 0) {
                            System.out.println("pushed ID = " + x);
                        }
                        Thread.sleep(pushRand.nextInt(5));
                    } catch (Exception e) {
                        e.printStackTrace();
                    }
                }
                pusherFinishCount.incrementAndGet();
            }
        });
        futures.add(future);
    }

    // start popping
    for (int i = 0; i < numPoppers; i++) {
        Future future = execSrvc.submit(new Runnable() {
            @Override
            public void run() {
                while (pusherFinishCount.get() < numPushers || !events.isEmpty()) {
                    try {
                        FpqEntry entry;
                        while (null != (entry = events.poll())) {
                            if (entry.getId() % 500 == 0) {
                                System.out.println("popped ID = " + entry.getId());
                            }
                            popSum.addAndGet(entry.getId());
                            numPops.incrementAndGet();
                            mgr.reportTake(entry);
                            Thread.sleep(popRand.nextInt(5));
                        }
                    } catch (Exception e) {
                        e.printStackTrace();
                    }
                }
            }
        });
        futures.add(future);
    }

    boolean finished = false;
    while (!finished) {
        try {
            for (Future f : futures) {
                f.get();
            }
            finished = true;
        } catch (InterruptedException e) {
            // ignore
            Thread.interrupted();
        }
    }

    assertThat(numPops.get(), is(numEntries * numPushers));
    assertThat(popSum.get(), is(pushSum.get()));
    assertThat(mgr.getJournalIdMap().entrySet(), hasSize(1));
    assertThat(FileUtils.listFiles(theDir, TrueFileFilter.INSTANCE, TrueFileFilter.INSTANCE), hasSize(1));
}

From source file:com.couchbase.client.core.endpoint.query.QueryHandlerTest.java

@Test
public void shouldDecodeNRowResponseChunked() throws Exception {
    String response = Resources.read("success_5.json", this.getClass());
    HttpResponse responseHeader = new DefaultHttpResponse(HttpVersion.HTTP_1_1,
            new HttpResponseStatus(200, "OK"));
    HttpContent responseChunk1 = new DefaultLastHttpContent(
            Unpooled.copiedBuffer(response.substring(0, 300), CharsetUtil.UTF_8));
    HttpContent responseChunk2 = new DefaultLastHttpContent(
            Unpooled.copiedBuffer(response.substring(300, 950), CharsetUtil.UTF_8));
    HttpContent responseChunk3 = new DefaultLastHttpContent(
            Unpooled.copiedBuffer(response.substring(950, 1345), CharsetUtil.UTF_8));
    HttpContent responseChunk4 = new DefaultLastHttpContent(
            Unpooled.copiedBuffer(response.substring(1345, 3000), CharsetUtil.UTF_8));
    HttpContent responseChunk5 = new DefaultLastHttpContent(
            Unpooled.copiedBuffer(response.substring(3000), CharsetUtil.UTF_8));

    GenericQueryRequest requestMock = mock(GenericQueryRequest.class);
    queue.add(requestMock);/*w  w  w . j  a v a 2  s . c  o m*/
    channel.writeInbound(responseHeader, responseChunk1, responseChunk2, responseChunk3, responseChunk4,
            responseChunk5);
    latch.await(1, TimeUnit.SECONDS);
    assertEquals(1, firedEvents.size());
    GenericQueryResponse inbound = (GenericQueryResponse) firedEvents.get(0);

    final AtomicInteger found = new AtomicInteger(0);
    assertResponse(inbound, true, ResponseStatus.SUCCESS, FAKE_REQUESTID, FAKE_CLIENTID, "success",
            FAKE_SIGNATURE, new Action1<ByteBuf>() {
                @Override
                public void call(ByteBuf byteBuf) {
                    found.incrementAndGet();
                    String content = byteBuf.toString(CharsetUtil.UTF_8);
                    byteBuf.release();
                    assertNotNull(content);
                    assertTrue(!content.isEmpty());
                    try {
                        Map decoded = mapper.readValue(content, Map.class);
                        assertTrue(decoded.size() > 0);
                        assertTrue(decoded.containsKey("name"));
                    } catch (Exception e) {
                        assertTrue(false);
                    }
                }
            }, new Action1<ByteBuf>() {
                @Override
                public void call(ByteBuf buf) {
                    fail("no error expected");
                }
            }, expectedMetricsCounts(0, 5));
    assertEquals(5, found.get());
}

From source file:com.twitter.distributedlog.BKLogHandler.java

public Future<LogRecordWithDLSN> asyncReadLastRecord(final LogSegmentMetadata l, final boolean fence,
        final boolean includeControl, final boolean includeEndOfStream) {
    final AtomicInteger numRecordsScanned = new AtomicInteger(0);
    final Stopwatch stopwatch = Stopwatch.createStarted();
    final LedgerHandleCache handleCache = LedgerHandleCache.newBuilder().bkc(bookKeeperClient).conf(conf)
            .build();/*from  w  w  w  .j  av  a2s.c om*/
    return ReadUtils.asyncReadLastRecord(getFullyQualifiedName(), l, fence, includeControl, includeEndOfStream,
            firstNumEntriesPerReadLastRecordScan, maxNumEntriesPerReadLastRecordScan, numRecordsScanned,
            scheduler, handleCache).addEventListener(new FutureEventListener<LogRecordWithDLSN>() {
                @Override
                public void onSuccess(LogRecordWithDLSN value) {
                    recoverLastEntryStats
                            .registerSuccessfulEvent(stopwatch.stop().elapsed(TimeUnit.MICROSECONDS));
                    recoverScannedEntriesStats.registerSuccessfulEvent(numRecordsScanned.get());
                }

                @Override
                public void onFailure(Throwable cause) {
                    recoverLastEntryStats.registerFailedEvent(stopwatch.stop().elapsed(TimeUnit.MICROSECONDS));
                }
            }).ensure(new AbstractFunction0<BoxedUnit>() {
                @Override
                public BoxedUnit apply() {
                    handleCache.clear();
                    return BoxedUnit.UNIT;
                }
            });
}

From source file:com.github.totyumengr.minicubes.cluster.TimeSeriesMiniCubeManagerHzImpl.java

@Override
public <T> List<T> execute(Callable<T> task, Collection<String> cubeIds, int timeoutSeconds) {

    Set<Member> members = hazelcastInstance.getCluster().getMembers();
    Set<Member> selected = new LinkedHashSet<Member>();
    if (cubeIds != null && !cubeIds.isEmpty()) {
        List<String> cubeNodes = cubeIds.stream().map(e -> e.split("@")[1]).collect(Collectors.toList());
        for (Member m : members) {
            if (cubeNodes.contains(m.getSocketAddress().toString())) {
                selected.add(m);//from  w  w w  .  j  a v a2 s. c  o  m
            }
        }
    } else {
        selected.addAll(members);
        LOGGER.warn("Select all members {} in cluster to execute on.", selected);
    }

    final int size = selected.size();
    LOGGER.debug("Start to run task {} on {}", task, selected);

    // Call distributed execute service to run it.
    final List<T> result = new ArrayList<T>(selected.size());
    final List<Exception> exceptionResult = new ArrayList<Exception>();
    CountDownLatch cdl = new CountDownLatch(1);
    AtomicInteger completedCount = new AtomicInteger(0);
    hazelcastInstance.getExecutorService(DISTRIBUTED_EXECUTOR).submitToMembers(task, selected,
            new MultiExecutionCallback() {

                @SuppressWarnings("unchecked")
                @Override
                public void onResponse(Member member, Object value) {
                    int i = completedCount.incrementAndGet();
                    LOGGER.debug("Completed of {}/{}, {} and {}", i, size, member, value);
                    if (value instanceof Exception) {
                        exceptionResult.add((Exception) value);
                    } else {
                        result.add((T) value);
                    }
                }

                @Override
                public void onComplete(Map<Member, Object> values) {
                    LOGGER.info("Successfully execute {} on cluster, collect {} result.", task, values.size());
                    cdl.countDown();
                }
            });

    if (completedCount.get() < size) {
        // FIXME: When some task do not executed. Maybe reject? error?
    }

    try {
        cdl.await(timeoutSeconds > 0 ? timeoutSeconds : Integer.MAX_VALUE, TimeUnit.SECONDS);
    } catch (InterruptedException e) {
        // Ignore
    }

    // Exception handled
    if (!exceptionResult.isEmpty()) {
        LOGGER.error("{} exceptions occurred when try to execute {} on {}", exceptionResult.size(), task,
                ObjectUtils.getDisplayString(selected));
        for (int i = 0; i < exceptionResult.size(); i++) {
            LOGGER.error("#1 exception === ", exceptionResult.get(i));
        }
        throw new RuntimeException("Exception occurred when try to execute, please see detail logs above.");
    }

    return result;
}

From source file:org.apache.hadoop.hbase.master.CatalogJanitor.java

/**
 * Scans hbase:meta and returns a number of scanned rows, and a map of merged
 * regions, and an ordered map of split parents. if the given table name is
 * null, return merged regions and split parents of all tables, else only the
 * specified table//from  w w  w  . j a  v  a  2  s  .  c o  m
 * @param tableName null represents all tables
 * @return triple of scanned rows, and map of merged regions, and map of split
 *         parent regioninfos
 * @throws IOException
 */
Triple<Integer, Map<HRegionInfo, Result>, Map<HRegionInfo, Result>> getMergedRegionsAndSplitParents(
        final TableName tableName) throws IOException {
    final boolean isTableSpecified = (tableName != null);
    // TODO: Only works with single hbase:meta region currently.  Fix.
    final AtomicInteger count = new AtomicInteger(0);
    // Keep Map of found split parents.  There are candidates for cleanup.
    // Use a comparator that has split parents come before its daughters.
    final Map<HRegionInfo, Result> splitParents = new TreeMap<HRegionInfo, Result>(
            new SplitParentFirstComparator());
    final Map<HRegionInfo, Result> mergedRegions = new TreeMap<HRegionInfo, Result>();
    // This visitor collects split parents and counts rows in the hbase:meta table

    MetaScannerVisitor visitor = new MetaScanner.MetaScannerVisitorBase() {
        @Override
        public boolean processRow(Result r) throws IOException {
            if (r == null || r.isEmpty())
                return true;
            count.incrementAndGet();
            HRegionInfo info = HRegionInfo.getHRegionInfo(r);
            if (info == null)
                return true; // Keep scanning
            if (isTableSpecified && info.getTable().compareTo(tableName) > 0) {
                // Another table, stop scanning
                return false;
            }
            if (info.isSplitParent())
                splitParents.put(info, r);
            if (r.getValue(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER) != null) {
                mergedRegions.put(info, r);
            }
            // Returning true means "keep scanning"
            return true;
        }
    };

    // Run full scan of hbase:meta catalog table passing in our custom visitor with
    // the start row
    MetaScanner.metaScan(server.getConfiguration(), null, visitor, tableName);

    return new Triple<Integer, Map<HRegionInfo, Result>, Map<HRegionInfo, Result>>(count.get(), mergedRegions,
            splitParents);
}

From source file:juicebox.data.MatrixZoomData.java

/**
 * Return the blocks of normalized, observed values overlapping the rectangular region specified.
 * The units are "bins"//from w  w  w .java 2s.  co m
 *
 * @param binY1 leftmost position in "bins"
 * @param binX2 rightmost position in "bins"
 * @param binY2 bottom position in "bins"
 * @param no    normalization type
 * @return List of overlapping blocks, normalized
 */
public int addNormalizedBlocksToList(final List<Block> blockList, int binX1, int binY1, int binX2, int binY2,
        final NormalizationType no) {

    int col1 = binX1 / blockBinCount;
    int row1 = binY1 / blockBinCount;

    int col2 = binX2 / blockBinCount;
    int row2 = binY2 / blockBinCount;

    List<Integer> blocksToLoad = new ArrayList<Integer>();
    for (int r = row1; r <= row2; r++) {
        for (int c = col1; c <= col2; c++) {
            int blockNumber = r * getBlockColumnCount() + c;

            String key = getKey() + "_" + blockNumber + "_" + no;
            Block b;
            if (HiCGlobals.useCache && blockCache.containsKey(key)) {
                b = blockCache.get(key);
                blockList.add(b);
            } else {
                blocksToLoad.add(blockNumber);
            }
        }
    }

    final AtomicInteger errorCounter = new AtomicInteger();

    List<Thread> threads = new ArrayList<Thread>();
    for (final int blockNumber : blocksToLoad) {
        Runnable loader = new Runnable() {
            @Override
            public void run() {
                try {
                    String key = getKey() + "_" + blockNumber + "_" + no;
                    Block b = reader.readNormalizedBlock(blockNumber, MatrixZoomData.this, no);
                    if (b == null) {
                        b = new Block(blockNumber); // An empty block
                    }
                    if (HiCGlobals.useCache) {
                        blockCache.put(key, b);
                    }
                    blockList.add(b);
                } catch (IOException e) {
                    errorCounter.incrementAndGet();
                }
            }
        };

        Thread t = new Thread(loader);
        threads.add(t);
        t.start();
    }

    // Wait for all threads to complete
    for (Thread t : threads) {
        try {
            t.join();
        } catch (InterruptedException ignore) {
        }
    }

    // untested since files got fixed - MSS
    return errorCounter.get();
}