Example usage for java.util.concurrent.atomic AtomicInteger getAndIncrement

List of usage examples for java.util.concurrent.atomic AtomicInteger getAndIncrement

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger getAndIncrement.

Prototype

public final int getAndIncrement() 

Source Link

Document

Atomically increments the current value, with memory effects as specified by VarHandle#getAndAdd .

Usage

From source file:org.apache.hadoop.hbase.regionserver.wal.TestHLogSplit.java

@Test(timeout = 300000)
public void testTerminationAskedByReporter() throws IOException, CorruptedLogFileException {
    generateHLogs(1, 10, -1);/*from  w  w w. java2 s . c o m*/
    FileStatus logfile = fs.listStatus(HLOGDIR)[0];
    fs.initialize(fs.getUri(), conf);

    final AtomicInteger count = new AtomicInteger();

    CancelableProgressable localReporter = new CancelableProgressable() {
        @Override
        public boolean progress() {
            count.getAndIncrement();
            return false;
        }
    };

    FileSystem spiedFs = Mockito.spy(fs);
    Mockito.doAnswer(new Answer<FSDataInputStream>() {
        public FSDataInputStream answer(InvocationOnMock invocation) throws Throwable {
            Thread.sleep(1500); // Sleep a while and wait report status invoked
            return (FSDataInputStream) invocation.callRealMethod();
        }
    }).when(spiedFs).open(Mockito.<Path>any(), Mockito.anyInt());

    try {
        conf.setInt("hbase.splitlog.report.period", 1000);
        boolean ret = HLogSplitter.splitLogFile(HBASEDIR, logfile, spiedFs, conf, localReporter, null, null,
                null);
        assertFalse("Log splitting should failed", ret);
        assertTrue(count.get() > 0);
    } catch (IOException e) {
        fail("There shouldn't be any exception but: " + e.toString());
    } finally {
        // reset it back to its default value
        conf.setInt("hbase.splitlog.report.period", 59000);
    }
}

From source file:info.archinnov.achilles.it.TestDSLSimpleEntity.java

@Test
public void should_dsl_select_with_options() throws Exception {
    //Given/*from www . ja  v  a  2s. co m*/
    final Map<String, Object> values = new HashMap<>();
    final long id = RandomUtils.nextLong(0L, Long.MAX_VALUE);
    values.put("id", id);
    SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss z");
    dateFormat.setTimeZone(TimeZone.getTimeZone("GMT"));
    final Date date1 = dateFormat.parse("2015-10-01 00:00:00 GMT");
    final Date date2 = dateFormat.parse("2015-10-02 00:00:00 GMT");
    final Date date3 = dateFormat.parse("2015-10-03 00:00:00 GMT");
    final Date date6 = dateFormat.parse("2015-10-06 00:00:00 GMT");

    values.put("date1", "'2015-10-01 00:00:00+0000'");
    values.put("date2", "'2015-10-02 00:00:00+0000'");
    values.put("date3", "'2015-10-03 00:00:00+0000'");
    values.put("date4", "'2015-10-04 00:00:00+0000'");
    values.put("date5", "'2015-10-05 00:00:00+0000'");
    values.put("date6", "'2015-10-06 00:00:00+0000'");
    values.put("date7", "'2015-10-07 00:00:00+0000'");
    values.put("date8", "'2015-10-08 00:00:00+0000'");
    values.put("date9", "'2015-10-09 00:00:00+0000'");
    scriptExecutor.executeScriptTemplate("SimpleEntity/insert_many_rows.cql", values);

    final AtomicInteger rsCount = new AtomicInteger(0);
    final AtomicInteger rowCounter = new AtomicInteger(0);

    final CassandraLogAsserter logAsserter = new CassandraLogAsserter();
    logAsserter.prepareLogLevelForDriverConnection();

    //When
    final List<SimpleEntity> found = manager.dsl().select().value().fromBaseTable().where().id_Eq(id)
            .date_IN(date1, date2, date3, date6).orderByDateDescending().limit(3).withConsistencyLevel(THREE)
            .withRetryPolicy(DowngradingConsistencyRetryPolicy.INSTANCE).withResultSetAsyncListener(rs -> {
                rsCount.getAndSet(rs.getAvailableWithoutFetching());
                return rs;
            }).withRowAsyncListener(row -> {
                rowCounter.getAndIncrement();
                return row;
            }).getList();

    //Then
    assertThat(found).hasSize(3);
    assertThat(found.get(0).getValue()).isEqualTo("id - date6");
    assertThat(found.get(1).getValue()).isEqualTo("id - date3");
    assertThat(found.get(2).getValue()).isEqualTo("id - date2");
    assertThat(rsCount.get()).isEqualTo(3);
    assertThat(rowCounter.get()).isEqualTo(3);
    logAsserter.assertConsistencyLevels(THREE, ONE);
}

From source file:voldemort.store.routed.ThreadPoolRoutedStore.java

@Override
public void put(final ByteArray key, final Versioned<byte[]> versioned, final byte[] transforms)
        throws VoldemortException {
    long startNs = System.nanoTime();
    StoreUtils.assertValidKey(key);/*w  ww .  ja  va  2  s.  co  m*/
    final List<Node> nodes = availableNodes(routingStrategy.routeRequest(key.get()));

    // quickly fail if there aren't enough nodes to meet the requirement
    final int numNodes = nodes.size();
    if (numNodes < this.storeDef.getRequiredWrites())
        throw new InsufficientOperationalNodesException("Only " + numNodes + " nodes in preference list, but "
                + this.storeDef.getRequiredWrites() + " writes required.");

    // A count of the number of successful operations
    final AtomicInteger successes = new AtomicInteger(0);

    // A list of thrown exceptions, indicating the number of failures
    final List<Exception> failures = Collections.synchronizedList(new ArrayList<Exception>(1));

    // If requiredWrites > 0 then do a single blocking write to the first
    // live node in the preference list if this node throws an
    // ObsoleteVersionException allow it to propagate
    Node master = null;
    int currentNode = 0;
    Versioned<byte[]> versionedCopy = null;
    for (; currentNode < numNodes; currentNode++) {
        Node current = nodes.get(currentNode);
        long startNsLocal = System.nanoTime();
        try {
            versionedCopy = incremented(versioned, current.getId());
            innerStores.get(current.getId()).put(key, versionedCopy, transforms);
            successes.getAndIncrement();
            recordSuccess(current, startNsLocal);
            master = current;
            break;
        } catch (UnreachableStoreException e) {
            recordException(current, startNsLocal, e);
            failures.add(e);
        } catch (VoldemortApplicationException e) {
            throw e;
        } catch (Exception e) {
            failures.add(e);
        }
    }

    if (successes.get() < 1)
        throw new InsufficientOperationalNodesException("No master node succeeded!",
                failures.size() > 0 ? failures.get(0) : null);
    else
        currentNode++;

    // A semaphore indicating the number of completed operations
    // Once inititialized all permits are acquired, after that
    // permits are released when an operation is completed.
    // semaphore.acquire(n) waits for n operations to complete
    final Versioned<byte[]> finalVersionedCopy = versionedCopy;
    final Semaphore semaphore = new Semaphore(0, false);
    // Add the operations to the pool
    int attempts = 0;
    for (; currentNode < numNodes; currentNode++) {
        attempts++;
        final Node node = nodes.get(currentNode);
        this.executor.execute(new Runnable() {

            @Override
            public void run() {
                long startNsLocal = System.nanoTime();
                try {
                    innerStores.get(node.getId()).put(key, finalVersionedCopy, transforms);
                    successes.incrementAndGet();
                    recordSuccess(node, startNsLocal);
                } catch (UnreachableStoreException e) {
                    recordException(node, startNsLocal, e);
                    failures.add(e);
                } catch (ObsoleteVersionException e) {
                    // ignore this completely here
                    // this means that a higher version was able
                    // to write on this node and should be termed as clean
                    // success.
                } catch (VoldemortApplicationException e) {
                    throw e;
                } catch (Exception e) {
                    logger.warn("Error in PUT on node " + node.getId() + "(" + node.getHost() + ")", e);
                    failures.add(e);
                } finally {
                    // signal that the operation is complete
                    semaphore.release();
                }
            }
        });
    }

    // Block until we get enough completions
    int blockCount = Math.min(storeDef.getPreferredWrites() - 1, attempts);
    boolean noTimeout = blockOnPut(startNs, semaphore, 0, blockCount, successes, storeDef.getPreferredWrites());

    if (successes.get() < storeDef.getRequiredWrites()) {
        /*
         * We don't have enough required writes, but we haven't timed out
         * yet, so block a little more if there are healthy nodes that can
         * help us achieve our target.
         */
        if (noTimeout) {
            int startingIndex = blockCount - 1;
            blockCount = Math.max(storeDef.getPreferredWrites() - 1, attempts);
            blockOnPut(startNs, semaphore, startingIndex, blockCount, successes, storeDef.getRequiredWrites());
        }
        if (successes.get() < storeDef.getRequiredWrites())
            throw new InsufficientOperationalNodesException(successes.get() + " writes succeeded, but "
                    + this.storeDef.getRequiredWrites() + " are required.", failures);
    }

    // Okay looks like it worked, increment the version for the caller
    VectorClock versionedClock = (VectorClock) versioned.getVersion();
    versionedClock.incrementVersion(master.getId(), time.getMilliseconds());
}

From source file:org.elasticsearch.xpack.ml.integration.MlJobIT.java

public void testDelete_multipleRequest() throws Exception {
    String jobId = "delete-job-mulitple-times";
    createFarequoteJob(jobId);/*from   w ww.  j a v  a2 s .  co m*/

    ConcurrentMapLong<Response> responses = ConcurrentCollections.newConcurrentMapLong();
    ConcurrentMapLong<ResponseException> responseExceptions = ConcurrentCollections.newConcurrentMapLong();
    AtomicReference<IOException> ioe = new AtomicReference<>();
    AtomicInteger recreationGuard = new AtomicInteger(0);
    AtomicReference<Response> recreationResponse = new AtomicReference<>();
    AtomicReference<ResponseException> recreationException = new AtomicReference<>();

    Runnable deleteJob = () -> {
        try {
            boolean forceDelete = randomBoolean();
            String url = MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId;
            if (forceDelete) {
                url += "?force=true";
            }
            Response response = client().performRequest("delete", url);
            responses.put(Thread.currentThread().getId(), response);
        } catch (ResponseException re) {
            responseExceptions.put(Thread.currentThread().getId(), re);
        } catch (IOException e) {
            ioe.set(e);
        }

        // Immediately after the first deletion finishes, recreate the job.  This should pick up
        // race conditions where another delete request deletes part of the newly created job.
        if (recreationGuard.getAndIncrement() == 0) {
            try {
                recreationResponse.set(createFarequoteJob(jobId));
            } catch (ResponseException re) {
                recreationException.set(re);
            } catch (IOException e) {
                ioe.set(e);
            }
        }
    };

    // The idea is to hit the situation where one request waits for
    // the other to complete. This is difficult to schedule but
    // hopefully it will happen in CI
    int numThreads = 5;
    Thread[] threads = new Thread[numThreads];
    for (int i = 0; i < numThreads; i++) {
        threads[i] = new Thread(deleteJob);
    }
    for (int i = 0; i < numThreads; i++) {
        threads[i].start();
    }
    for (int i = 0; i < numThreads; i++) {
        threads[i].join();
    }

    if (ioe.get() != null) {
        // This looks redundant but the check is done so we can
        // print the exception's error message
        assertNull(ioe.get().getMessage(), ioe.get());
    }

    assertEquals(numThreads, responses.size() + responseExceptions.size());

    // 404s are ok as it means the job had already been deleted.
    for (ResponseException re : responseExceptions.values()) {
        assertEquals(re.getMessage(), 404, re.getResponse().getStatusLine().getStatusCode());
    }

    for (Response response : responses.values()) {
        assertEquals(responseEntityToString(response), 200, response.getStatusLine().getStatusCode());
    }

    assertNotNull(recreationResponse.get());
    assertEquals(responseEntityToString(recreationResponse.get()), 200,
            recreationResponse.get().getStatusLine().getStatusCode());

    if (recreationException.get() != null) {
        assertNull(recreationException.get().getMessage(), recreationException.get());
    }

    try {
        // The idea of the code above is that the deletion is sufficiently time-consuming that
        // all threads enter the deletion call before the first one exits it.  Usually this happens,
        // but in the case that it does not the job that is recreated may get deleted.
        // It is not a error if the job does not exist but the following assertions
        // will fail in that case.
        client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId);

        // Check that the job aliases exist.  These are the last thing to be deleted when a job is deleted, so
        // if there's been a race between deletion and recreation these are what will be missing.
        String aliases = getAliases();

        assertThat(aliases, containsString("\"" + AnomalyDetectorsIndex.jobResultsAliasedName(jobId)
                + "\":{\"filter\":{\"term\":{\"job_id\":{\"value\":\"" + jobId + "\",\"boost\":1.0}}}}"));
        assertThat(aliases, containsString("\"" + AnomalyDetectorsIndex.resultsWriteAlias(jobId) + "\":{}"));

    } catch (ResponseException missingJobException) {
        // The job does not exist
        assertThat(missingJobException.getResponse().getStatusLine().getStatusCode(), equalTo(404));

        // The job aliases should be deleted
        String aliases = getAliases();
        assertThat(aliases, not(containsString("\"" + AnomalyDetectorsIndex.jobResultsAliasedName(jobId)
                + "\":{\"filter\":{\"term\":{\"job_id\":{\"value\":\"" + jobId + "\",\"boost\":1.0}}}}")));
        assertThat(aliases,
                not(containsString("\"" + AnomalyDetectorsIndex.resultsWriteAlias(jobId) + "\":{}")));
    }

    assertEquals(numThreads, recreationGuard.get());
}

From source file:voldemort.store.routed.RoutedStore.java

public void put(final ByteArray key, final Versioned<byte[]> versioned) throws VoldemortException {
    long startNs = System.nanoTime();
    StoreUtils.assertValidKey(key);/*from   w w  w.j  ava 2  s  .c  o m*/
    final List<Node> nodes = availableNodes(routingStrategy.routeRequest(key.get()));

    // quickly fail if there aren't enough nodes to meet the requirement
    final int numNodes = nodes.size();
    if (numNodes < this.storeDef.getRequiredWrites())
        throw new InsufficientOperationalNodesException("Only " + numNodes + " nodes in preference list, but "
                + this.storeDef.getRequiredWrites() + " writes required.");

    // A count of the number of successful operations
    final AtomicInteger successes = new AtomicInteger(0);

    // A list of thrown exceptions, indicating the number of failures
    final List<Exception> failures = Collections.synchronizedList(new ArrayList<Exception>(1));

    // If requiredWrites > 0 then do a single blocking write to the first
    // live node in the preference list if this node throws an
    // ObsoleteVersionException allow it to propagate
    Node master = null;
    int currentNode = 0;
    Versioned<byte[]> versionedCopy = null;
    for (; currentNode < numNodes; currentNode++) {
        Node current = nodes.get(currentNode);
        long startNsLocal = System.nanoTime();
        try {
            versionedCopy = incremented(versioned, current.getId());
            innerStores.get(current.getId()).put(key, versionedCopy);
            successes.getAndIncrement();
            recordSuccess(current, startNsLocal);
            master = current;
            break;
        } catch (UnreachableStoreException e) {
            recordException(current, startNsLocal, e);
            failures.add(e);
        } catch (VoldemortApplicationException e) {
            throw e;
        } catch (Exception e) {
            failures.add(e);
        }
    }

    if (successes.get() < 1)
        throw new InsufficientOperationalNodesException("No master node succeeded!",
                failures.size() > 0 ? failures.get(0) : null);
    else
        currentNode++;

    // A semaphore indicating the number of completed operations
    // Once inititialized all permits are acquired, after that
    // permits are released when an operation is completed.
    // semaphore.acquire(n) waits for n operations to complete
    final Versioned<byte[]> finalVersionedCopy = versionedCopy;
    final Semaphore semaphore = new Semaphore(0, false);
    // Add the operations to the pool
    int attempts = 0;
    for (; currentNode < numNodes; currentNode++) {
        attempts++;
        final Node node = nodes.get(currentNode);
        this.executor.execute(new Runnable() {

            public void run() {
                long startNsLocal = System.nanoTime();
                try {
                    innerStores.get(node.getId()).put(key, finalVersionedCopy);
                    successes.incrementAndGet();
                    recordSuccess(node, startNsLocal);
                } catch (UnreachableStoreException e) {
                    recordException(node, startNsLocal, e);
                    failures.add(e);
                } catch (ObsoleteVersionException e) {
                    // ignore this completely here
                    // this means that a higher version was able
                    // to write on this node and should be termed as clean
                    // success.
                } catch (VoldemortApplicationException e) {
                    throw e;
                } catch (Exception e) {
                    logger.warn("Error in PUT on node " + node.getId() + "(" + node.getHost() + ")", e);
                    failures.add(e);
                } finally {
                    // signal that the operation is complete
                    semaphore.release();
                }
            }
        });
    }

    // Block until we get enough completions
    int blockCount = Math.min(storeDef.getPreferredWrites() - 1, attempts);
    boolean noTimeout = blockOnPut(startNs, semaphore, 0, blockCount, successes, storeDef.getPreferredWrites());

    if (successes.get() < storeDef.getRequiredWrites()) {
        /*
         * We don't have enough required writes, but we haven't timed out
         * yet, so block a little more if there are healthy nodes that can
         * help us achieve our target.
         */
        if (noTimeout) {
            int startingIndex = blockCount - 1;
            blockCount = Math.max(storeDef.getPreferredWrites() - 1, attempts);
            blockOnPut(startNs, semaphore, startingIndex, blockCount, successes, storeDef.getRequiredWrites());
        }
        if (successes.get() < storeDef.getRequiredWrites())
            throw new InsufficientOperationalNodesException(successes.get() + " writes succeeded, but "
                    + this.storeDef.getRequiredWrites() + " are required.", failures);
    }

    // Okay looks like it worked, increment the version for the caller
    VectorClock versionedClock = (VectorClock) versioned.getVersion();
    versionedClock.incrementVersion(master.getId(), time.getMilliseconds());
}

From source file:org.apache.nifi.processors.standard.PutDatabaseRecord.java

SqlAndIncludedColumns generateInsert(final RecordSchema recordSchema, final String tableName,
        final TableSchema tableSchema, final DMLSettings settings)
        throws IllegalArgumentException, SQLException {

    final Set<String> normalizedFieldNames = getNormalizedColumnNames(recordSchema,
            settings.translateFieldNames);

    for (final String requiredColName : tableSchema.getRequiredColumnNames()) {
        final String normalizedColName = normalizeColumnName(requiredColName, settings.translateFieldNames);
        if (!normalizedFieldNames.contains(normalizedColName)) {
            String missingColMessage = "Record does not have a value for the Required column '"
                    + requiredColName + "'";
            if (settings.failUnmappedColumns) {
                getLogger().error(missingColMessage);
                throw new IllegalArgumentException(missingColMessage);
            } else if (settings.warningUnmappedColumns) {
                getLogger().warn(missingColMessage);
            }//w  ww  .ja v  a  2  s  .c  o  m
        }
    }

    final StringBuilder sqlBuilder = new StringBuilder();
    sqlBuilder.append("INSERT INTO ");
    if (settings.quoteTableName) {
        sqlBuilder.append(tableSchema.getQuotedIdentifierString()).append(tableName)
                .append(tableSchema.getQuotedIdentifierString());
    } else {
        sqlBuilder.append(tableName);
    }
    sqlBuilder.append(" (");

    // iterate over all of the fields in the record, building the SQL statement by adding the column names
    List<String> fieldNames = recordSchema.getFieldNames();
    final List<Integer> includedColumns = new ArrayList<>();
    if (fieldNames != null) {
        int fieldCount = fieldNames.size();
        AtomicInteger fieldsFound = new AtomicInteger(0);

        for (int i = 0; i < fieldCount; i++) {
            RecordField field = recordSchema.getField(i);
            String fieldName = field.getFieldName();

            final ColumnDescription desc = tableSchema.getColumns()
                    .get(normalizeColumnName(fieldName, settings.translateFieldNames));
            if (desc == null && !settings.ignoreUnmappedFields) {
                throw new SQLDataException(
                        "Cannot map field '" + fieldName + "' to any column in the database");
            }

            if (desc != null) {
                if (fieldsFound.getAndIncrement() > 0) {
                    sqlBuilder.append(", ");
                }

                if (settings.escapeColumnNames) {
                    sqlBuilder.append(tableSchema.getQuotedIdentifierString()).append(desc.getColumnName())
                            .append(tableSchema.getQuotedIdentifierString());
                } else {
                    sqlBuilder.append(desc.getColumnName());
                }
                includedColumns.add(i);
            }
        }

        // complete the SQL statements by adding ?'s for all of the values to be escaped.
        sqlBuilder.append(") VALUES (");
        sqlBuilder.append(StringUtils.repeat("?", ",", includedColumns.size()));
        sqlBuilder.append(")");

        if (fieldsFound.get() == 0) {
            throw new SQLDataException("None of the fields in the record map to the columns defined by the "
                    + tableName + " table");
        }
    }
    return new SqlAndIncludedColumns(sqlBuilder.toString(), includedColumns);
}

From source file:org.apache.nifi.processors.standard.PutDatabaseRecord.java

SqlAndIncludedColumns generateDelete(final RecordSchema recordSchema, final String tableName,
        final TableSchema tableSchema, final DMLSettings settings)
        throws IllegalArgumentException, MalformedRecordException, SQLDataException {

    final Set<String> normalizedFieldNames = getNormalizedColumnNames(recordSchema,
            settings.translateFieldNames);
    for (final String requiredColName : tableSchema.getRequiredColumnNames()) {
        final String normalizedColName = normalizeColumnName(requiredColName, settings.translateFieldNames);
        if (!normalizedFieldNames.contains(normalizedColName)) {
            String missingColMessage = "Record does not have a value for the Required column '"
                    + requiredColName + "'";
            if (settings.failUnmappedColumns) {
                getLogger().error(missingColMessage);
                throw new MalformedRecordException(missingColMessage);
            } else if (settings.warningUnmappedColumns) {
                getLogger().warn(missingColMessage);
            }//ww  w  .ja  v  a 2s  .  co m
        }
    }

    final StringBuilder sqlBuilder = new StringBuilder();
    sqlBuilder.append("DELETE FROM ");
    if (settings.quoteTableName) {
        sqlBuilder.append(tableSchema.getQuotedIdentifierString()).append(tableName)
                .append(tableSchema.getQuotedIdentifierString());
    } else {
        sqlBuilder.append(tableName);
    }

    // iterate over all of the fields in the record, building the SQL statement by adding the column names
    List<String> fieldNames = recordSchema.getFieldNames();
    final List<Integer> includedColumns = new ArrayList<>();
    if (fieldNames != null) {
        sqlBuilder.append(" WHERE ");
        int fieldCount = fieldNames.size();
        AtomicInteger fieldsFound = new AtomicInteger(0);

        for (int i = 0; i < fieldCount; i++) {

            RecordField field = recordSchema.getField(i);
            String fieldName = field.getFieldName();

            final ColumnDescription desc = tableSchema.getColumns()
                    .get(normalizeColumnName(fieldName, settings.translateFieldNames));
            if (desc == null && !settings.ignoreUnmappedFields) {
                throw new SQLDataException(
                        "Cannot map field '" + fieldName + "' to any column in the database");
            }

            if (desc != null) {
                if (fieldsFound.getAndIncrement() > 0) {
                    sqlBuilder.append(" AND ");
                }

                String columnName;
                if (settings.escapeColumnNames) {
                    columnName = tableSchema.getQuotedIdentifierString() + desc.getColumnName()
                            + tableSchema.getQuotedIdentifierString();
                } else {
                    columnName = desc.getColumnName();
                }
                // Need to build a null-safe construct for the WHERE clause, since we are using PreparedStatement and won't know if the values are null. If they are null,
                // then the filter should be "column IS null" vs "column = null". Since we don't know whether the value is null, we can use the following construct (from NIFI-3742):
                //   (column = ? OR (column is null AND ? is null))
                sqlBuilder.append("(");
                sqlBuilder.append(columnName);
                sqlBuilder.append(" = ? OR (");
                sqlBuilder.append(columnName);
                sqlBuilder.append(" is null AND ? is null))");
                includedColumns.add(i);

            }
        }

        if (fieldsFound.get() == 0) {
            throw new SQLDataException("None of the fields in the record map to the columns defined by the "
                    + tableName + " table");
        }
    }

    return new SqlAndIncludedColumns(sqlBuilder.toString(), includedColumns);
}

From source file:org.apache.camel.processor.LoopProcessor.java

@Override
public boolean process(Exchange exchange, AsyncCallback callback) {
    // use atomic integer to be able to pass reference and keep track on the values
    AtomicInteger index = new AtomicInteger();
    AtomicInteger count = new AtomicInteger();

    // Intermediate conversion to String is needed when direct conversion to Integer is not available
    // but evaluation result is a textual representation of a numeric value.
    String text = expression.evaluate(exchange, String.class);
    try {/*from  w w w  .jav a  2  s. com*/
        int num = ExchangeHelper.convertToMandatoryType(exchange, Integer.class, text);
        count.set(num);
    } catch (NoTypeConversionAvailableException e) {
        exchange.setException(e);
        callback.done(true);
        return true;
    }

    // set the size before we start
    exchange.setProperty(Exchange.LOOP_SIZE, count);

    // loop synchronously
    while (index.get() < count.get()) {

        // and prepare for next iteration
        ExchangeHelper.prepareOutToIn(exchange);
        boolean sync = process(exchange, callback, index, count);

        if (!sync) {
            if (LOG.isTraceEnabled()) {
                LOG.trace("Processing exchangeId: " + exchange.getExchangeId()
                        + " is continued being processed asynchronously");
            }
            // the remainder of the routing slip will be completed async
            // so we break out now, then the callback will be invoked which then continue routing from where we left here
            return false;
        }

        if (LOG.isTraceEnabled()) {
            LOG.trace("Processing exchangeId: " + exchange.getExchangeId()
                    + " is continued being processed synchronously");
        }

        // increment counter before next loop
        index.getAndIncrement();
    }

    // we are done so prepare the result
    ExchangeHelper.prepareOutToIn(exchange);
    if (LOG.isTraceEnabled()) {
        LOG.trace("Processing complete for exchangeId: " + exchange.getExchangeId() + " >>> " + exchange);
    }
    callback.done(true);
    return true;
}

From source file:io.uploader.drive.drive.media.MediaHttpUploader.java

/**
 * Uploads the media in a resumable manner.
 *
 * @param initiationRequestUrl/*www .  ja v a2  s. c o  m*/
 *            The request URL where the initiation request will be sent
 * @return HTTP response
 */
// https://developers.google.com/drive/web/manage-uploads#resumable
private HttpResponse resumableUpload(GenericUrl initiationRequestUrl) throws IOException {

    // Make initial request to get the unique upload URL.
    HttpResponse initialResponse = executeUploadInitiation(initiationRequestUrl);
    if (!initialResponse.isSuccessStatusCode()) {
        // If the initiation request is not successful return it
        // immediately.
        logger.info("Unsuccessful: " + initialResponse.getStatusMessage());

        return initialResponse;
    }
    GenericUrl uploadUrl;
    try {
        uploadUrl = new GenericUrl(initialResponse.getHeaders().getLocation());
    } finally {
        initialResponse.disconnect();
    }

    // Convert media content into a byte stream to upload in chunks.
    contentInputStream = mediaContent.getInputStream();
    if (!contentInputStream.markSupported() && isMediaLengthKnown()) {
        // If we know the media content length then wrap the stream into a
        // Buffered input stream to
        // support the {@link InputStream#mark} and {@link
        // InputStream#reset} methods required for
        // handling server errors.
        contentInputStream = new BufferedInputStream(contentInputStream);
    }

    HttpResponse response = null;
    // Upload the media content in chunks.
    while (true) {
        currentRequest = requestFactory.buildPutRequest(uploadUrl, null);
        setContentAndHeadersOnCurrentRequest();
        // set mediaErrorHandler as I/O exception handler and as
        // unsuccessful response handler for
        // calling to serverErrorCallback on an I/O exception or an abnormal
        // HTTP response
        AtomicInteger httpErrorCounter = new AtomicInteger(0);
        new MediaUploadErrorHandler(this, currentRequest, httpErrorCounter);

        AtomicInteger tryCounter = new AtomicInteger(0);
        while (true) {
            try {
                if (isMediaLengthKnown()) {
                    // TODO(rmistry): Support gzipping content for the case where
                    // media content length is
                    // known
                    // (https://code.google.com/p/google-api-java-client/issues/detail?id=691).
                    response = executeCurrentRequestWithoutGZip(currentRequest);
                    break;
                } else {
                    response = executeCurrentRequest(currentRequest);
                    break;
                }
            } catch (Throwable e) {
                logger.error("Error occurred while uploading", e);
                if (tryCounter.getAndIncrement() >= 5) {
                    logger.error("Could not be recovered...");
                    throw e;
                }
                logger.error("Retry (" + tryCounter.get() + " times)", e);
            }
        }

        boolean returningResponse = false;
        try {
            if (response.isSuccessStatusCode()) {
                totalBytesServerReceived = getMediaContentLength();
                if (mediaContent.getCloseInputStream()) {
                    contentInputStream.close();
                }
                updateStateAndNotifyListener(UploadState.MEDIA_COMPLETE);
                returningResponse = true;
                return response;
            }

            int statusCode = response.getStatusCode();
            if (statusCode != 308) {
                // https://developers.google.com/drive/web/manage-uploads#resume-upload
                returningResponse = true;
                return response;
            } else {
                httpErrorCounter.set(0);
            }

            // Check to see if the upload URL has changed on the server.
            String updatedUploadUrl = response.getHeaders().getLocation();
            if (updatedUploadUrl != null) {
                uploadUrl = new GenericUrl(updatedUploadUrl);
            }

            // we check the amount of bytes the server received so far,
            // because the server may process
            // fewer bytes than the amount of bytes the client had sent
            long newBytesServerReceived = getNextByteIndex(response.getHeaders().getRange());
            // the server can receive any amount of bytes from 0 to current
            // chunk length
            long currentBytesServerReceived = newBytesServerReceived - totalBytesServerReceived;
            Preconditions.checkState(
                    currentBytesServerReceived >= 0 && currentBytesServerReceived <= currentChunkLength);
            long copyBytes = currentChunkLength - currentBytesServerReceived;
            if (isMediaLengthKnown()) {
                if (copyBytes > 0) {
                    // If the server didn't receive all the bytes the client
                    // sent the current position of
                    // the input stream is incorrect. So we should reset the
                    // stream and skip those bytes
                    // that the server had already received.
                    // Otherwise (the server got all bytes the client sent),
                    // the stream is in its right
                    // position, and we can continue from there
                    contentInputStream.reset();
                    long actualSkipValue = contentInputStream.skip(currentBytesServerReceived);
                    Preconditions.checkState(currentBytesServerReceived == actualSkipValue);
                }
            } else if (copyBytes == 0) {
                // server got all the bytes, so we don't need to use this
                // buffer. Otherwise, we have to
                // keep the buffer and copy part (or all) of its bytes to
                // the stream we are sending to the
                // server
                currentRequestContentBuffer = null;
            }
            totalBytesServerReceived = newBytesServerReceived;

            updateStateAndNotifyListener(UploadState.MEDIA_IN_PROGRESS);
        } finally {
            if (!returningResponse) {
                response.disconnect();
            }
        }
    }
}

From source file:com.htmlhifive.pitalium.core.selenium.PtlWebDriver.java

private void exportDebugScreenshot(Object screenshot) {
    File imageFile;/*  w  ww . j a va2  s. c o m*/
    if (screenshot instanceof String) {
        imageFile = OutputType.FILE.convertFromBase64Png((String) screenshot);
    } else if (screenshot instanceof byte[]) {
        imageFile = OutputType.FILE.convertFromPngBytes((byte[]) screenshot);
    } else if (screenshot instanceof File) {
        imageFile = (File) screenshot;
    } else {
        LOG.warn("Unknown OutputType: \"{}\"", screenshot.getClass().getName());
        return;
    }

    // Filename -> logs/screenshots/firefox/43/0000.png
    // FIXME: 2016/01/04 ????
    String filename;
    File dir;
    synchronized (DEBUG_SCREENSHOT_COUNTS) {
        AtomicInteger counter = DEBUG_SCREENSHOT_COUNTS.get(capabilities);
        if (counter == null) {
            counter = new AtomicInteger();
            DEBUG_SCREENSHOT_COUNTS.put(capabilities, counter);
        }
        int count = counter.getAndIncrement();

        filename = String.format(Locale.US, "%04d.png", count);
        dir = new File("logs", "screenshots");
        dir = new File(dir, capabilities.getBrowserName());
        if (!Strings.isNullOrEmpty(capabilities.getVersion())) {
            dir = new File(dir, capabilities.getVersion());
        }

        // First time => delete old files
        if (count == 0 && dir.exists()) {
            try {
                FileUtils.deleteDirectory(dir);
            } catch (IOException e) {
                LOG.warn("Cannot delete debug screenshot directory \"" + dir.getAbsolutePath() + "\"", e);
                return;
            }
        }

        if (!dir.exists() && !dir.mkdirs()) {
            LOG.warn("Debug screenshot persist error. Cannot make directory \"{}\"", dir.getAbsolutePath());
            return;
        }
    }

    try {
        Files.copy(imageFile, new File(dir, filename));
    } catch (IOException e) {
        LOG.warn("Debug screenshot persist error", e);
    }
}