Example usage for java.util.concurrent.atomic AtomicInteger get

List of usage examples for java.util.concurrent.atomic AtomicInteger get

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger get.

Prototype

public final int get() 

Source Link

Document

Returns the current value, with memory effects as specified by VarHandle#getVolatile .

Usage

From source file:org.apache.cassandra.repair.RepairRunnable.java

protected void runMayThrow() throws Exception {
    final TraceState traceState;

    final String tag = "repair:" + cmd;

    final AtomicInteger progress = new AtomicInteger();
    final int totalProgress = 3 + options.getRanges().size(); // calculate neighbors, validation, prepare for repair + number of ranges to repair

    String[] columnFamilies = options.getColumnFamilies()
            .toArray(new String[options.getColumnFamilies().size()]);
    Iterable<ColumnFamilyStore> validColumnFamilies = storageService.getValidColumnFamilies(false, false,
            keyspace, columnFamilies);/*from   w w  w  .  j  a  v  a  2 s.  com*/

    final long startTime = System.currentTimeMillis();
    String message = String.format("Starting repair command #%d, repairing keyspace %s with %s", cmd, keyspace,
            options);
    logger.info(message);
    fireProgressEvent(tag, new ProgressEvent(ProgressEventType.START, 0, 100, message));
    if (options.isTraced()) {
        StringBuilder cfsb = new StringBuilder();
        for (ColumnFamilyStore cfs : validColumnFamilies)
            cfsb.append(", ").append(cfs.keyspace.getName()).append(".").append(cfs.name);

        UUID sessionId = Tracing.instance.newSession(Tracing.TraceType.REPAIR);
        traceState = Tracing.instance.begin("repair",
                ImmutableMap.of("keyspace", keyspace, "columnFamilies", cfsb.substring(2)));
        Tracing.traceRepair(message);
        traceState.enableActivityNotification(tag);
        for (ProgressListener listener : listeners)
            traceState.addProgressListener(listener);
        Thread queryThread = createQueryThread(cmd, sessionId);
        queryThread.setName("RepairTracePolling");
        queryThread.start();
    } else {
        traceState = null;
    }

    final Set<InetAddress> allNeighbors = new HashSet<>();
    Map<Range, Set<InetAddress>> rangeToNeighbors = new HashMap<>();
    try {
        for (Range<Token> range : options.getRanges()) {
            Set<InetAddress> neighbors = ActiveRepairService.getNeighbors(keyspace, range,
                    options.getDataCenters(), options.getHosts());
            rangeToNeighbors.put(range, neighbors);
            allNeighbors.addAll(neighbors);
        }
        progress.incrementAndGet();
    } catch (IllegalArgumentException e) {
        logger.error("Repair failed:", e);
        fireErrorAndComplete(tag, progress.get(), totalProgress, e.getMessage());
        return;
    }

    // Validate columnfamilies
    List<ColumnFamilyStore> columnFamilyStores = new ArrayList<>();
    try {
        Iterables.addAll(columnFamilyStores, validColumnFamilies);
        progress.incrementAndGet();
    } catch (IllegalArgumentException e) {
        fireErrorAndComplete(tag, progress.get(), totalProgress, e.getMessage());
        return;
    }

    String[] cfnames = new String[columnFamilyStores.size()];
    for (int i = 0; i < columnFamilyStores.size(); i++) {
        cfnames[i] = columnFamilyStores.get(i).name;
    }

    final UUID parentSession = UUIDGen.getTimeUUID();
    SystemDistributedKeyspace.startParentRepair(parentSession, keyspace, cfnames, options.getRanges());
    long repairedAt;
    try {
        ActiveRepairService.instance.prepareForRepair(parentSession, allNeighbors, options, columnFamilyStores);
        repairedAt = ActiveRepairService.instance.getParentRepairSession(parentSession).getRepairedAt();
        progress.incrementAndGet();
    } catch (Throwable t) {
        SystemDistributedKeyspace.failParentRepair(parentSession, t);
        fireErrorAndComplete(tag, progress.get(), totalProgress, t.getMessage());
        return;
    }

    // Set up RepairJob executor for this repair command.
    final ListeningExecutorService executor = MoreExecutors.listeningDecorator(
            new JMXConfigurableThreadPoolExecutor(options.getJobThreads(), Integer.MAX_VALUE, TimeUnit.SECONDS,
                    new LinkedBlockingQueue<Runnable>(), new NamedThreadFactory("Repair#" + cmd), "internal"));

    List<ListenableFuture<RepairSessionResult>> futures = new ArrayList<>(options.getRanges().size());
    for (Range<Token> range : options.getRanges()) {
        final RepairSession session = ActiveRepairService.instance.submitRepairSession(parentSession, range,
                keyspace, options.getParallelism(), rangeToNeighbors.get(range), repairedAt, executor, cfnames);
        if (session == null)
            continue;
        // After repair session completes, notify client its result
        Futures.addCallback(session, new FutureCallback<RepairSessionResult>() {
            public void onSuccess(RepairSessionResult result) {
                String message = String.format("Repair session %s for range %s finished", session.getId(),
                        session.getRange().toString());
                logger.info(message);
                fireProgressEvent(tag, new ProgressEvent(ProgressEventType.PROGRESS, progress.incrementAndGet(),
                        totalProgress, message));
            }

            public void onFailure(Throwable t) {
                String message = String.format("Repair session %s for range %s failed with error %s",
                        session.getId(), session.getRange().toString(), t.getMessage());
                logger.error(message, t);
                fireProgressEvent(tag, new ProgressEvent(ProgressEventType.PROGRESS, progress.incrementAndGet(),
                        totalProgress, message));
            }
        });
        futures.add(session);
    }

    // After all repair sessions completes(successful or not),
    // run anticompaction if necessary and send finish notice back to client
    final Collection<Range<Token>> successfulRanges = new ArrayList<>();
    final AtomicBoolean hasFailure = new AtomicBoolean();
    final ListenableFuture<List<RepairSessionResult>> allSessions = Futures.successfulAsList(futures);
    ListenableFuture anticompactionResult = Futures.transform(allSessions,
            new AsyncFunction<List<RepairSessionResult>, Object>() {
                @SuppressWarnings("unchecked")
                public ListenableFuture apply(List<RepairSessionResult> results) throws Exception {
                    // filter out null(=failed) results and get successful ranges
                    for (RepairSessionResult sessionResult : results) {
                        if (sessionResult != null) {
                            successfulRanges.add(sessionResult.range);
                        } else {
                            hasFailure.compareAndSet(false, true);
                        }
                    }
                    return ActiveRepairService.instance.finishParentSession(parentSession, allNeighbors,
                            successfulRanges);
                }
            });
    Futures.addCallback(anticompactionResult, new FutureCallback<Object>() {
        public void onSuccess(Object result) {
            SystemDistributedKeyspace.successfulParentRepair(parentSession, successfulRanges);
            if (hasFailure.get()) {
                fireProgressEvent(tag, new ProgressEvent(ProgressEventType.ERROR, progress.get(), totalProgress,
                        "Some repair failed"));
            } else {
                fireProgressEvent(tag, new ProgressEvent(ProgressEventType.SUCCESS, progress.get(),
                        totalProgress, "Repair completed successfully"));
            }
            repairComplete();
        }

        public void onFailure(Throwable t) {
            fireProgressEvent(tag,
                    new ProgressEvent(ProgressEventType.ERROR, progress.get(), totalProgress, t.getMessage()));
            SystemDistributedKeyspace.failParentRepair(parentSession, t);
            repairComplete();
        }

        private void repairComplete() {
            String duration = DurationFormatUtils.formatDurationWords(System.currentTimeMillis() - startTime,
                    true, true);
            String message = String.format("Repair command #%d finished in %s", cmd, duration);
            fireProgressEvent(tag,
                    new ProgressEvent(ProgressEventType.COMPLETE, progress.get(), totalProgress, message));
            logger.info(message);
            if (options.isTraced() && traceState != null) {
                for (ProgressListener listener : listeners)
                    traceState.removeProgressListener(listener);
                // Because DebuggableThreadPoolExecutor#afterExecute and this callback
                // run in a nondeterministic order (within the same thread), the
                // TraceState may have been nulled out at this point. The TraceState
                // should be traceState, so just set it without bothering to check if it
                // actually was nulled out.
                Tracing.instance.set(traceState);
                Tracing.traceRepair(message);
                Tracing.instance.stopSession();
            }
            executor.shutdownNow();
        }
    });
}

From source file:org.dasein.cloud.azure.tests.network.AzureLoadBalancerSupportWithMockHttpClientTest.java

@Test
public void addServersShouldPostCorrectRequest() throws CloudException, InternalException {
    final String ROLE_NAME_2 = "TESTROLENAME2";
    final String VM_ID_2 = String.format("%s:%s:%s", SERVICE_NAME, DEPLOYMENT_NAME, ROLE_NAME_2);

    final AtomicInteger postCount = new AtomicInteger(0);
    new MockUp<CloseableHttpClient>() {
        @Mock/*from   w ww .  j  av a2 s  . c o  m*/
        public CloseableHttpResponse execute(Invocation inv, HttpUriRequest request) throws IOException {
            if ("GET".equals(request.getMethod()) && DEFINITION_URL.equals(request.getURI().toString())) {
                assertGet(request, DEFINITION_URL,
                        new Header[] { new BasicHeader("x-ms-version", "2012-03-01") });
                DaseinObjectToXmlEntity<DefinitionModel> daseinEntity = new DaseinObjectToXmlEntity<DefinitionModel>(
                        createDefinitionModel("Failover", "Enabled", HC_PORT));
                return getHttpResponseMock(getStatusLineMock(HttpServletResponse.SC_OK), daseinEntity,
                        new Header[] { new BasicHeader("x-ms-request-id", UUID.randomUUID().toString()) });
            } else if ("POST".equals(request.getMethod())
                    && DEFINITIONS_URL.equals(request.getURI().toString())) {
                postCount.incrementAndGet();
                assertPost(request, DEFINITIONS_URL,
                        new Header[] { new BasicHeader("x-ms-version", "2012-03-01") },
                        createDefinitionModelWithAnotherServer("Failover", "Enabled", ROLE_NAME_2));

                DefinitionModel definitionModel = new DefinitionModel();
                definitionModel.setVersion("2");
                DaseinObjectToXmlEntity<DefinitionModel> daseinEntity = new DaseinObjectToXmlEntity<DefinitionModel>(
                        definitionModel);
                return getHttpResponseMock(getStatusLineMock(HttpServletResponse.SC_OK), daseinEntity,
                        new Header[] { new BasicHeader("x-ms-request-id", UUID.randomUUID().toString()) });
            } else {
                throw new IOException("Request is not mocked");
            }
        }
    };
    loadBalancerSupport.addServers(LB_NAME, ROLE_NAME_2);
    assertEquals("LoadBalancerSupport.addServers() ", 1, postCount.get());
}

From source file:org.apache.bookkeeper.mledger.impl.ManagedLedgerTest.java

@Test
public void testManagedLedgerWithCreateLedgerTimeOut() throws Exception {
    ManagedLedgerConfig config = new ManagedLedgerConfig().setMetadataOperationsTimeoutSeconds(3);
    ManagedLedgerImpl ledger = (ManagedLedgerImpl) factory.open("timeout_ledger_test", config);

    BookKeeper bk = mock(BookKeeper.class);
    doNothing().when(bk).asyncCreateLedger(anyInt(), anyInt(), anyInt(), any(), any(), any(), any(), any());
    AtomicInteger response = new AtomicInteger(0);
    CountDownLatch latch = new CountDownLatch(1);
    ledger.asyncCreateLedger(bk, config, null, new CreateCallback() {
        @Override/*from  w  w  w  . ja v a2  s  .c o m*/
        public void createComplete(int rc, LedgerHandle lh, Object ctx) {
            response.set(rc);
            latch.countDown();
        }
    }, Collections.emptyMap());

    latch.await(config.getMetadataOperationsTimeoutSeconds() + 2, TimeUnit.SECONDS);
    assertEquals(response.get(), BKException.Code.TimeoutException);

    ledger.close();
}

From source file:org.jdesktop.swingworker.AccumulativeRunnable.java

public final void test6493680() throws Exception {
    final AtomicInteger lastProgressValue = new AtomicInteger(-1);
    final Exchanger<Boolean> exchanger = new Exchanger<Boolean>();
    class Test {//from  www. j a  va2s. c  o  m
        private final AtomicInteger lastProgressValue = 
            new AtomicInteger(-1);
        private final Exchanger<Boolean> exchanger = 
            new Exchanger<Boolean>();

        boolean test() throws Exception {
            TestSwingWorker swingWorker = new TestSwingWorker();
            swingWorker.addPropertyChangeListener(
                new PropertyChangeListener() {
                    public void propertyChange(PropertyChangeEvent evt) {
                        if ("progress" == evt.getPropertyName()) {
                            lastProgressValue.set((Integer) evt.getNewValue());
                        }
                    }
                });

            swingWorker.execute();
            return exchanger.exchange(true);
        }

        class TestSwingWorker extends SwingWorker<Void, Void> {
            @Override
            protected Void doInBackground() throws Exception {
                for (int i = 0; i <= 100; i++) {
                    Thread.sleep(1);
                    setProgress(i);
                }
                return null;
            }
            @Override
            protected void done() {
                boolean isPassed = (lastProgressValue.get() == 100);
                try {
                    exchanger.exchange(isPassed);
                } catch (Exception ingore) {
                }
            }
        }
    }
    /*
     * because timing is involved in this bug we will run the test
     * NUMBER_OF_TRIES times.
     * the tes`t passes if it does not fail once.
     */
     final int NUMBER_OF_TRIES = 50;
     for (int i = 0; i < NUMBER_OF_TRIES; i++) {
         assertTrue((new Test()).test());
     }
}

From source file:org.apache.camel.processor.MulticastProcessor.java

private boolean doProcessSequential(final Exchange original, final AtomicExchange result,
        final Iterable<ProcessorExchangePair> pairs, final Iterator<ProcessorExchangePair> it,
        final ProcessorExchangePair pair, final AsyncCallback callback, final AtomicInteger total) {
    boolean sync = true;

    final Exchange exchange = pair.getExchange();
    Processor processor = pair.getProcessor();
    Producer producer = pair.getProducer();

    TracedRouteNodes traced = exchange.getUnitOfWork() != null ? exchange.getUnitOfWork().getTracedRouteNodes()
            : null;//from   ww w  . j  a v  a2 s.  c  o m

    // compute time taken if sending to another endpoint
    StopWatch watch = null;
    if (producer != null) {
        watch = new StopWatch();
    }

    try {
        // prepare tracing starting from a new block
        if (traced != null) {
            traced.pushBlock();
        }

        // let the prepared process it, remember to begin the exchange pair
        AsyncProcessor async = AsyncProcessorTypeConverter.convert(processor);
        pair.begin();
        sync = AsyncProcessorHelper.process(async, exchange, new AsyncCallback() {
            public void done(boolean doneSync) {
                // we are done with the exchange pair
                pair.done();

                // we only have to handle async completion of the routing slip
                if (doneSync) {
                    return;
                }

                // continue processing the multicast asynchronously
                Exchange subExchange = exchange;

                // Decide whether to continue with the multicast or not; similar logic to the Pipeline
                // remember to test for stop on exception and aggregate before copying back results
                boolean continueProcessing = PipelineHelper.continueProcessing(subExchange,
                        "Sequential processing failed for number " + total.get(), LOG);
                if (stopOnException && !continueProcessing) {
                    if (subExchange.getException() != null) {
                        // wrap in exception to explain where it failed
                        subExchange.setException(
                                new CamelExchangeException("Sequential processing failed for number " + total,
                                        subExchange, subExchange.getException()));
                    } else {
                        // we want to stop on exception, and the exception was handled by the error handler
                        // this is similar to what the pipeline does, so we should do the same to not surprise end users
                        // so we should set the failed exchange as the result and be done
                        result.set(subExchange);
                    }
                    // and do the done work
                    doDone(original, subExchange, callback, false, true);
                    return;
                }

                try {
                    doAggregate(getAggregationStrategy(subExchange), result, subExchange);
                } catch (Throwable e) {
                    // wrap in exception to explain where it failed
                    subExchange.setException(new CamelExchangeException(
                            "Sequential processing failed for number " + total, subExchange, e));
                    // and do the done work
                    doDone(original, subExchange, callback, false, true);
                    return;
                }

                total.incrementAndGet();

                // maybe there are more processors to multicast
                while (it.hasNext()) {

                    // prepare and run the next
                    ProcessorExchangePair pair = it.next();
                    subExchange = pair.getExchange();
                    updateNewExchange(subExchange, total.get(), pairs, it);
                    boolean sync = doProcessSequential(original, result, pairs, it, pair, callback, total);

                    if (!sync) {
                        if (LOG.isTraceEnabled()) {
                            LOG.trace("Processing exchangeId: " + original.getExchangeId()
                                    + " is continued being processed asynchronously");
                        }
                        return;
                    }

                    // Decide whether to continue with the multicast or not; similar logic to the Pipeline
                    // remember to test for stop on exception and aggregate before copying back results
                    continueProcessing = PipelineHelper.continueProcessing(subExchange,
                            "Sequential processing failed for number " + total.get(), LOG);
                    if (stopOnException && !continueProcessing) {
                        if (subExchange.getException() != null) {
                            // wrap in exception to explain where it failed
                            subExchange.setException(new CamelExchangeException(
                                    "Sequential processing failed for number " + total, subExchange,
                                    subExchange.getException()));
                        } else {
                            // we want to stop on exception, and the exception was handled by the error handler
                            // this is similar to what the pipeline does, so we should do the same to not surprise end users
                            // so we should set the failed exchange as the result and be done
                            result.set(subExchange);
                        }
                        // and do the done work
                        doDone(original, subExchange, callback, false, true);
                        return;
                    }

                    try {
                        doAggregate(getAggregationStrategy(subExchange), result, subExchange);
                    } catch (Throwable e) {
                        // wrap in exception to explain where it failed
                        subExchange.setException(new CamelExchangeException(
                                "Sequential processing failed for number " + total, subExchange, e));
                        // and do the done work
                        doDone(original, subExchange, callback, false, true);
                        return;
                    }

                    total.incrementAndGet();
                }

                // do the done work
                subExchange = result.get() != null ? result.get() : null;
                doDone(original, subExchange, callback, false, true);
            }
        });
    } finally {
        // pop the block so by next round we have the same staring point and thus the tracing looks accurate
        if (traced != null) {
            traced.popBlock();
        }
        if (producer != null) {
            long timeTaken = watch.stop();
            Endpoint endpoint = producer.getEndpoint();
            // emit event that the exchange was sent to the endpoint
            EventHelper.notifyExchangeSent(exchange.getContext(), exchange, endpoint, timeTaken);
        }
    }

    return sync;
}

From source file:com.indeed.lsmtree.core.TestImmutableBTreeIndex.java

public void testSeekPrevious() throws Exception {
    final int[] ints = createTree();
    final ImmutableBTreeIndex.Reader<Integer, Long> reader = new ImmutableBTreeIndex.Reader(tmpDir,
            new IntSerializer(), new LongSerializer(), false);
    final int max = ints[ints.length - 1];
    final AtomicInteger done = new AtomicInteger(8);
    for (int i = 0; i < 8; i++) {
        final int index = i;
        new Thread(new Runnable() {
            @Override//from w  w w.  j a va2 s.co m
            public void run() {
                try {
                    final Random r = new Random(index);
                    for (int i = 0; i < treeSize; i++) {
                        int rand = r.nextInt(max + 10);
                        int insertionindex = Arrays.binarySearch(ints, rand);
                        final Iterator<Generation.Entry<Integer, Long>> iterator = reader.reverseIterator(rand,
                                true);
                        final boolean hasPrevious = iterator.hasNext();
                        Generation.Entry<Integer, Long> entry = null;
                        assertEquals(
                                "rand: " + rand + " hasPrevious: " + hasPrevious
                                        + (hasPrevious ? " previous: " + (entry = iterator.next()) : ""),
                                hasPrevious, insertionindex != -1);
                        if (hasPrevious) {
                            if (entry == null)
                                entry = iterator.next();
                            assertTrue(entry.getKey() <= rand);
                            assertTrue(entry.getKey().longValue() == entry.getValue());
                        }
                        if (insertionindex >= 0) {
                            if (entry == null)
                                entry = iterator.next();
                            assertTrue(rand == ints[insertionindex]);
                            assertTrue(entry.getKey() == rand);
                            Generation.Entry<Integer, Long> result = reader.get(rand);
                            assertTrue(result.getValue() == rand);
                        } else {
                            if (hasPrevious) {
                                assertTrue(ints[(~insertionindex) - 1] < rand);
                                assertTrue(ints[(~insertionindex) - 1] == entry.getKey());
                            }
                            Generation.Entry<Integer, Long> result = reader.get(rand);
                            assertTrue(result == null);
                        }
                    }
                } finally {
                    done.decrementAndGet();
                }
            }
        }).start();
    }
    while (done.get() > 0) {
        Thread.yield();
    }
    reader.close();
}

From source file:com.inmobi.grill.driver.hive.TestRemoteHiveDriver.java

@Test
public void testMultiThreadClient() throws Exception {
    LOG.info("@@ Starting multi thread test");
    // Launch two threads
    createTestTable("test_multithreads");
    HiveConf thConf = new HiveConf(conf, TestRemoteHiveDriver.class);
    thConf.setLong(HiveDriver.GRILL_CONNECTION_EXPIRY_DELAY, 10000);
    final HiveDriver thrDriver = new HiveDriver();
    thrDriver.configure(thConf);/*from   ww  w  .ja va  2 s  .co  m*/
    QueryContext ctx = new QueryContext("USE " + TestRemoteHiveDriver.class.getSimpleName(), null, conf);
    thrDriver.execute(ctx);

    // Launch a select query
    final int QUERIES = 5;
    int launchedQueries = 0;
    final int THREADS = 5;
    final long POLL_DELAY = 500;
    List<Thread> thrs = new ArrayList<Thread>();
    final AtomicInteger errCount = new AtomicInteger();
    for (int q = 0; q < QUERIES; q++) {
        final QueryContext qctx;
        try {
            qctx = new QueryContext("SELECT * FROM test_multithreads", null, conf);
            thrDriver.executeAsync(qctx);
        } catch (GrillException e) {
            errCount.incrementAndGet();
            LOG.info(q + " executeAsync error: " + e.getCause());
            continue;
        }
        LOG.info("@@ Launched query: " + q + " " + qctx.getQueryHandle());
        launchedQueries++;
        // Launch many threads to poll for status
        final QueryHandle handle = qctx.getQueryHandle();

        for (int i = 0; i < THREADS; i++) {
            int thid = q * THREADS + i;
            Thread th = new Thread(new Runnable() {
                @Override
                public void run() {
                    for (int i = 0; i < 1000; i++) {
                        try {
                            thrDriver.updateStatus(qctx);
                            if (qctx.getDriverStatus().isFinished()) {
                                LOG.info("@@ " + handle.getHandleId() + " >> "
                                        + qctx.getDriverStatus().getState());
                                thrDriver.closeQuery(handle);
                                break;
                            }
                            Thread.sleep(POLL_DELAY);
                        } catch (GrillException e) {
                            LOG.error("Got Exception", e.getCause());
                            e.printStackTrace();
                            errCount.incrementAndGet();
                            break;
                        } catch (InterruptedException e) {
                            e.printStackTrace();
                            break;
                        }
                    }
                }
            });
            thrs.add(th);
            th.setName("Poller#" + (thid));
            th.start();
        }
    }

    for (Thread th : thrs) {
        try {
            th.join(10000);
        } catch (InterruptedException e) {
            LOG.warn("Not ended yet: " + th.getName());
        }
    }
    Assert.assertEquals(0, thrDriver.getHiveHandleSize());
    LOG.info("@@ Completed all pollers. Total thrift errors: " + errCount.get());
    assertEquals(launchedQueries, QUERIES);
    assertEquals(thrs.size(), QUERIES * THREADS);
    assertEquals(errCount.get(), 0);
}

From source file:com.spectralogic.ds3client.integration.GetJobManagement_Test.java

@Test
public void testGetJobWithUserSuppliedBlobStrategy() throws IOException, InterruptedException {
    final String tempPathPrefix = null;
    final Path tempDirectory = Files.createTempDirectory(Paths.get("."), tempPathPrefix);
    final String fileName = "beowulf.txt";

    try {/*  w w  w  .j  a  v  a 2s  . co  m*/
        final List<Ds3Object> objects = Lists.newArrayList(new Ds3Object(fileName));

        final GetBulkJobSpectraS3Request getBulkJobSpectraS3Request = new GetBulkJobSpectraS3Request(
                BUCKET_NAME, objects);

        final GetBulkJobSpectraS3Response getBulkJobSpectraS3Response = client
                .getBulkJobSpectraS3(getBulkJobSpectraS3Request);

        final MasterObjectList masterObjectList = getBulkJobSpectraS3Response.getMasterObjectList();

        final EventDispatcher eventDispatcher = new EventDispatcherImpl(new SameThreadEventRunner());

        final AtomicInteger numChunkAllocationAttempts = new AtomicInteger(0);

        final TransferStrategyBuilder transferStrategyBuilder = new TransferStrategyBuilder()
                .withDs3Client(client).withMasterObjectList(masterObjectList)
                .withChannelBuilder(new FileObjectGetter(tempDirectory))
                .withRangesForBlobs(PartialObjectHelpers.mapRangesToBlob(masterObjectList.getObjects(),
                        PartialObjectHelpers.getPartialObjectsRanges(objects)))
                .withBlobStrategy(new UserSuppliedPutBlobStrategy(client, masterObjectList, eventDispatcher,
                        new MaxChunkAttemptsRetryBehavior(5),
                        new ClientDefinedChunkAttemptRetryDelayBehavior(1, eventDispatcher), new Monitorable() {
                            @Override
                            public void monitor() {
                                numChunkAllocationAttempts.getAndIncrement();
                            }
                        }));

        final TransferStrategy transferStrategy = transferStrategyBuilder.makeGetTransferStrategy();

        transferStrategy.transfer();

        final Collection<File> filesInTempDirectory = FileUtils.listFiles(tempDirectory.toFile(), null, false);

        for (final File file : filesInTempDirectory) {
            assertEquals(fileName, file.getName());
        }

        assertEquals(1, numChunkAllocationAttempts.get());
    } finally {
        FileUtils.deleteDirectory(tempDirectory.toFile());
    }
}

From source file:com.ethlo.geodata.importer.file.FileIpLookupImporter.java

@Override
public long importData() throws IOException {
    final Map.Entry<Date, File> ipDataFile = super.fetchResource(DataType.IP, url);
    final AtomicInteger count = new AtomicInteger(0);

    final File csvFile = ipDataFile.getValue();
    final long total = IoUtils.lineCount(csvFile);
    final ProgressListener prg = new ProgressListener(
            l -> publish(new DataLoadedEvent(this, DataType.IP, Operation.IMPORT, l, total)));

    final IpLookupImporter ipLookupImporter = new IpLookupImporter(csvFile);

    final JsonFactory f = new JsonFactory();
    f.enable(JsonGenerator.Feature.ESCAPE_NON_ASCII);
    f.disable(JsonGenerator.Feature.AUTO_CLOSE_TARGET);
    final ObjectMapper mapper = new ObjectMapper(f);

    final byte newLine = (byte) "\n".charAt(0);

    logger.info("Writing IP data to file {}", getFile().getAbsolutePath());
    try (final OutputStream out = new BufferedOutputStream(new FileOutputStream(getFile()))) {
        ipLookupImporter.processFile(entry -> {
            final String strGeoNameId = findMapValue(entry, "geoname_id", "represented_country_geoname_id",
                    "registered_country_geoname_id");
            final String strGeoNameCountryId = findMapValue(entry, "represented_country_geoname_id",
                    "registered_country_geoname_id");
            final Long geonameId = strGeoNameId != null ? Long.parseLong(strGeoNameId) : null;
            final Long geonameCountryId = strGeoNameCountryId != null ? Long.parseLong(strGeoNameCountryId)
                    : null;/*from   ww w .  j  av a 2 s. co  m*/
            if (geonameId != null) {
                final SubnetUtils u = new SubnetUtils(entry.get("network"));
                final long lower = UnsignedInteger
                        .fromIntBits(InetAddresses
                                .coerceToInteger(InetAddresses.forString(u.getInfo().getLowAddress())))
                        .longValue();
                final long upper = UnsignedInteger
                        .fromIntBits(InetAddresses
                                .coerceToInteger(InetAddresses.forString(u.getInfo().getHighAddress())))
                        .longValue();
                final Map<String, Object> paramMap = new HashMap<>(5);
                paramMap.put("geoname_id", geonameId);
                paramMap.put("geoname_country_id", geonameCountryId);
                paramMap.put("first", lower);
                paramMap.put("last", upper);

                try {
                    mapper.writeValue(out, paramMap);
                    out.write(newLine);
                } catch (IOException exc) {
                    throw new DataAccessResourceFailureException(exc.getMessage(), exc);
                }
            }

            if (count.get() % 100_000 == 0) {
                logger.info("Processed {}", count.get());
            }

            count.getAndIncrement();

            prg.update();
        });
    }

    return total;
}

From source file:org.apache.hadoop.hbase.client.TestAdmin.java

@Test(timeout = 300000)
public void testCreateBadTables() throws IOException {
    String msg = null;// w  w w.  j ava2  s. co  m
    try {
        this.admin.createTable(HTableDescriptor.META_TABLEDESC);
    } catch (TableExistsException e) {
        msg = e.toString();
    }
    assertTrue("Unexcepted exception message " + msg,
            msg != null && msg.startsWith(TableExistsException.class.getName())
                    && msg.contains(HTableDescriptor.META_TABLEDESC.getTableName().getNameAsString()));

    // Now try and do concurrent creation with a bunch of threads.
    final HTableDescriptor threadDesc = new HTableDescriptor(TableName.valueOf("threaded_testCreateBadTables"));
    threadDesc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
    int count = 10;
    Thread[] threads = new Thread[count];
    final AtomicInteger successes = new AtomicInteger(0);
    final AtomicInteger failures = new AtomicInteger(0);
    final HBaseAdmin localAdmin = this.admin;
    for (int i = 0; i < count; i++) {
        threads[i] = new Thread(Integer.toString(i)) {
            @Override
            public void run() {
                try {
                    localAdmin.createTable(threadDesc);
                    successes.incrementAndGet();
                } catch (TableExistsException e) {
                    failures.incrementAndGet();
                } catch (IOException e) {
                    throw new RuntimeException("Failed threaded create" + getName(), e);
                }
            }
        };
    }
    for (int i = 0; i < count; i++) {
        threads[i].start();
    }
    for (int i = 0; i < count; i++) {
        while (threads[i].isAlive()) {
            try {
                Thread.sleep(100);
            } catch (InterruptedException e) {
                // continue
            }
        }
    }
    // All threads are now dead.  Count up how many tables were created and
    // how many failed w/ appropriate exception.
    assertEquals(1, successes.get());
    assertEquals(count - 1, failures.get());
}