Example usage for java.util.concurrent.atomic AtomicInteger getAndIncrement

List of usage examples for java.util.concurrent.atomic AtomicInteger getAndIncrement

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger getAndIncrement.

Prototype

public final int getAndIncrement() 

Source Link

Document

Atomically increments the current value, with memory effects as specified by VarHandle#getAndAdd .

Usage

From source file:org.apache.jackrabbit.oak.plugins.blob.MarkSweepGarbageCollector.java

/**
 * Iterates the complete node tree and collect all blob references
 * @param fs the garbage collector file state
 *//* ww  w .  j  ava2  s  . c o  m*/
protected void iterateNodeTree(GarbageCollectorFileState fs) throws IOException {
    final BufferedWriter writer = Files.newWriter(fs.getMarkedRefs(), Charsets.UTF_8);
    final AtomicInteger count = new AtomicInteger();
    try {
        marker.collectReferences(new ReferenceCollector() {
            private final List<String> idBatch = Lists.newArrayListWithCapacity(getBatchCount());

            private final boolean debugMode = LOG.isTraceEnabled();

            @Override
            public void addReference(String blobId, String nodeId) {
                if (debugMode) {
                    LOG.trace("BlobId : {}, NodeId : {}", blobId, nodeId);
                }

                try {
                    Iterator<String> idIter = blobStore.resolveChunks(blobId);
                    Joiner delimJoiner = Joiner.on(DELIM).skipNulls();
                    while (idIter.hasNext()) {
                        String id = idIter.next();

                        idBatch.add(delimJoiner.join(id, nodeId));

                        if (idBatch.size() >= getBatchCount()) {
                            saveBatchToFile(idBatch, writer);
                            idBatch.clear();
                        }

                        if (debugMode) {
                            LOG.trace("chunkId : {}", id);
                        }
                        count.getAndIncrement();
                    }

                    if (!idBatch.isEmpty()) {
                        saveBatchToFile(idBatch, writer);
                        idBatch.clear();
                    }

                    if (count.get() % getBatchCount() == 0) {
                        LOG.info("Collected ({}) blob references", count.get());
                    }
                } catch (Exception e) {
                    throw new RuntimeException("Error in retrieving references", e);
                }
            }
        });
        LOG.info("Number of valid blob references marked under mark phase of " + "Blob garbage collection [{}]",
                count.get());
        // sort the marked references with the first part of the key
        GarbageCollectorFileState.sort(fs.getMarkedRefs(), new Comparator<String>() {
            @Override
            public int compare(String s1, String s2) {
                return s1.split(DELIM)[0].compareTo(s2.split(DELIM)[0]);
            }
        });
    } finally {
        IOUtils.closeQuietly(writer);
    }
}

From source file:com.spectralogic.ds3client.integration.GetJobManagement_Test.java

@Test
public void testGetJobUserSuppliedTransferRetryDecorator() throws IOException, InterruptedException {
    final String tempPathPrefix = null;
    final Path tempDirectory = Files.createTempDirectory(Paths.get("."), tempPathPrefix);
    final String fileName = "beowulf.txt";

    try {//from w  w w  .j a  v  a2s .c  o m
        final List<Ds3Object> objects = Lists.newArrayList(new Ds3Object(fileName));

        final GetBulkJobSpectraS3Request getBulkJobSpectraS3Request = new GetBulkJobSpectraS3Request(
                BUCKET_NAME, objects);

        final GetBulkJobSpectraS3Response getBulkJobSpectraS3Response = client
                .getBulkJobSpectraS3(getBulkJobSpectraS3Request);

        final MasterObjectList masterObjectList = getBulkJobSpectraS3Response.getMasterObjectList();

        final AtomicInteger numTimesTransferCalled = new AtomicInteger(0);

        final TransferStrategyBuilder transferStrategyBuilder = new TransferStrategyBuilder()
                .withDs3Client(client).withMasterObjectList(masterObjectList)
                .withChannelBuilder(new FileObjectGetter(tempDirectory))
                .withRangesForBlobs(PartialObjectHelpers.mapRangesToBlob(masterObjectList.getObjects(),
                        PartialObjectHelpers.getPartialObjectsRanges(objects)))
                .withTransferRetryDecorator(new UserSuppliedTransferRetryDecorator(new Monitorable() {
                    @Override
                    public void monitor() {
                        numTimesTransferCalled.getAndIncrement();
                    }
                }));

        final TransferStrategy transferStrategy = transferStrategyBuilder.makeGetTransferStrategy();

        transferStrategy.transfer();

        final Collection<File> filesInTempDirectory = FileUtils.listFiles(tempDirectory.toFile(), null, false);

        for (final File file : filesInTempDirectory) {
            assertEquals(fileName, file.getName());
        }

        assertEquals(1, numTimesTransferCalled.get());
    } finally {
        FileUtils.deleteDirectory(tempDirectory.toFile());
    }
}

From source file:com.vmware.admiral.adapter.docker.service.DockerAdapterService.java

@SuppressWarnings("unchecked")
private void processCreateContainer(RequestContext context, int retriesCount) {
    AssertUtil.assertNotEmpty(context.containerState.names, "containerState.names");

    String fullImageName = DockerImage.fromImageName(context.containerDescription.image).toString();

    CommandInput createCommandInput = new CommandInput(context.commandInput)
            .withProperty(DOCKER_CONTAINER_IMAGE_PROP_NAME, fullImageName)
            .withProperty(DOCKER_CONTAINER_TTY_PROP_NAME, true)
            .withProperty(DOCKER_CONTAINER_OPEN_STDIN_PROP_NAME, true)
            .withPropertyIfNotNull(DOCKER_CONTAINER_COMMAND_PROP_NAME,
                    CommandUtil.spread(context.containerDescription.command))
            .withProperty(DOCKER_CONTAINER_NAME_PROP_NAME, context.containerState.names.get(0))
            .withPropertyIfNotNull(DOCKER_CONTAINER_ENV_PROP_NAME, context.containerState.env)
            .withPropertyIfNotNull(DOCKER_CONTAINER_USER_PROP_NAME, context.containerDescription.user)
            .withPropertyIfNotNull(DOCKER_CONTAINER_ENTRYPOINT_PROP_NAME,
                    context.containerDescription.entryPoint)
            .withPropertyIfNotNull(DOCKER_CONTAINER_HOSTNAME_PROP_NAME, context.containerDescription.hostname)
            .withPropertyIfNotNull(DOCKER_CONTAINER_DOMAINNAME_PROP_NAME,
                    context.containerDescription.domainName)
            .withPropertyIfNotNull(DOCKER_CONTAINER_WORKING_DIR_PROP_NAME,
                    context.containerDescription.workingDir);

    Map<String, Object> hostConfig = getOrAddMap(createCommandInput, DOCKER_CONTAINER_HOST_CONFIG_PROP_NAME);

    hostConfig.put(MEMORY_SWAP_PROP_NAME, context.containerDescription.memorySwapLimit);

    hostConfig.put(MEMORY_PROP_NAME, context.containerState.memoryLimit);
    hostConfig.put(CPU_SHARES_PROP_NAME, context.containerState.cpuShares);

    // TODO Can't limit the storage? https://github.com/docker/docker/issues/3804

    hostConfig.put(DNS_PROP_NAME, context.containerDescription.dns);
    hostConfig.put(DNS_SEARCH_PROP_NAME, context.containerDescription.dnsSearch);
    hostConfig.put(EXTRA_HOSTS_PROP_NAME, context.containerState.extraHosts);

    // the volumes are added as binds property
    hostConfig.put(BINDS_PROP_NAME, filterVolumeBindings(context.containerState.volumes));
    hostConfig.put(VOLUME_DRIVER, context.containerDescription.volumeDriver);
    hostConfig.put(CAP_ADD_PROP_NAME, context.containerDescription.capAdd);
    hostConfig.put(CAP_DROP_PROP_NAME, context.containerDescription.capDrop);
    hostConfig.put(NETWORK_MODE_PROP_NAME, context.containerDescription.networkMode);
    hostConfig.put(LINKS_PROP_NAME, context.containerState.links);
    hostConfig.put(PRIVILEGED_PROP_NAME, context.containerDescription.privileged);
    hostConfig.put(PID_MODE_PROP_NAME, context.containerDescription.pidMode);

    if (context.containerDescription.publishAll != null) {
        hostConfig.put(PUBLISH_ALL, context.containerDescription.publishAll);
    }/*from   w  w w.  ja  va  2 s.  c o m*/

    // Mapping properties from containerState to the docker config:
    hostConfig.put(VOLUMES_FROM_PROP_NAME, context.containerState.volumesFrom);

    // Add first container network to avoid container to be connected to default network.
    // Other container networks will be added after container is created.
    // Docker APIs fail if there is more than one network added to the container when it is created
    if (context.containerState.networks != null && !context.containerState.networks.isEmpty()) {
        createNetworkConfig(createCommandInput, context.containerState.networks.entrySet().iterator().next());
    }

    if (context.containerState.ports != null) {
        addPortBindings(createCommandInput, context.containerState.ports);
    }

    if (context.containerDescription.logConfig != null) {
        addLogConfiguration(createCommandInput, context.containerDescription.logConfig);
    }

    if (context.containerDescription.restartPolicy != null) {
        Map<String, Object> restartPolicy = new HashMap<>();
        restartPolicy.put(RESTART_POLICY_NAME_PROP_NAME, context.containerDescription.restartPolicy);
        if (context.containerDescription.maximumRetryCount != null
                && context.containerDescription.maximumRetryCount != 0) {
            restartPolicy.put(RESTART_POLICY_RETRIES_PROP_NAME, context.containerDescription.maximumRetryCount);
        }
        hostConfig.put(RESTART_POLICY_PROP_NAME, restartPolicy);
    }

    if (context.containerState.volumes != null) {
        Map<String, Object> volumeMap = new HashMap<>();
        for (String volume : context.containerState.volumes) {
            // docker expects each volume to be mapped to an empty object (an empty map)
            // where the key is the container_path (second element in the volume string)
            String containerPart = VolumeBinding.fromString(volume).getContainerPart();
            volumeMap.put(containerPart, Collections.emptyMap());
        }

        createCommandInput.withProperty(DOCKER_CONTAINER_VOLUMES_PROP_NAME, volumeMap);
    }

    if (context.containerDescription.device != null) {
        List<?> devices = Arrays.stream(context.containerDescription.device)
                .map(deviceStr -> DockerDevice.fromString(deviceStr).toMap()).collect(Collectors.toList());

        hostConfig.put(DEVICES_PROP_NAME, devices);
    }

    // copy custom properties
    if (context.containerState.customProperties != null) {
        for (Map.Entry<String, String> customProperty : context.containerState.customProperties.entrySet()) {
            createCommandInput.withProperty(customProperty.getKey(), customProperty.getValue());
        }
    }

    if (ContainerHostUtil.isVicHost(context.computeState)) {
        // VIC has requires several mandatory elements, add them
        addVicRequiredConfig(createCommandInput);
    }

    AtomicInteger retryCount = new AtomicInteger(retriesCount);
    ensurePropertyExists((retryCountProperty) -> {
        context.executor.createContainer(createCommandInput, (o, ex) -> {
            if (ex != null) {
                if (shouldTryCreateFromLocalImage(context.containerDescription)) {
                    logInfo("Unable to create container using local image. Will be fetched from a remote "
                            + "location...");
                    context.containerDescription.customProperties
                            .put(DOCKER_CONTAINER_CREATE_USE_LOCAL_IMAGE_WITH_PRIORITY, "false");
                    processContainerDescription(context);
                } else if (RETRIABLE_HTTP_STATUSES.contains(o.getStatusCode())
                        && retryCount.getAndIncrement() < retryCountProperty) {
                    logWarning("Provisioning for container %s failed with %s. Retries left %d",
                            context.containerState.names.get(0), Utils.toString(ex),
                            retryCountProperty - retryCount.get());
                    processCreateContainer(context, retryCount.get());
                } else {
                    fail(context.request, o, ex);
                }
            } else {
                handleExceptions(context.request, context.operation, () -> {
                    Map<String, Object> body = o.getBody(Map.class);
                    context.containerState.id = (String) body.get(DOCKER_CONTAINER_ID_PROP_NAME);
                    processCreatedContainer(context);
                });
            }
        });
    });
}

From source file:com.opengamma.masterdb.portfolio.DbPortfolioMaster.java

/**
 * Recursively create the arguments to insert into the tree existing nodes.
 * //from   www .  java  2  s  . com
 * @param portfolioUid the portfolio unique identifier, not null
 * @param parentNodeUid the parent node unique identifier, not null
 * @param node the root node, not null
 * @param update true if updating portfolio, false if adding new portfolio
 * @param portfolioId the portfolio id, not null
 * @param portfolioOid the portfolio oid, not null
 * @param parentNodeId the parent node id, null if root node
 * @param parentNodeOid the parent node oid, null if root node
 * @param counter the counter to create the node id, use {@code getAndIncrement}, not null
 * @param depth the depth of the node in the portfolio
 * @param argsList the list of arguments to build, not null
 * @param posList the list of arguments to for inserting positions, not null
 */
protected void insertBuildArgs(final UniqueId portfolioUid, final UniqueId parentNodeUid,
        final ManageablePortfolioNode node, final boolean update, final Long portfolioId,
        final Long portfolioOid, final Long parentNodeId, final Long parentNodeOid, final AtomicInteger counter,
        final int depth, final List<DbMapSqlParameterSource> argsList,
        final List<DbMapSqlParameterSource> posList) {
    // need to insert parent before children for referential integrity
    final Long nodeId = nextId("prt_master_seq");
    final Long nodeOid = (update && node.getUniqueId() != null ? extractOid(node.getUniqueId()) : nodeId);
    UniqueId nodeUid = createUniqueId(nodeOid, nodeId);
    node.setUniqueId(nodeUid);
    node.setParentNodeId(parentNodeUid);
    node.setPortfolioId(portfolioUid);
    final DbMapSqlParameterSource treeArgs = new DbMapSqlParameterSource().addValue("node_id", nodeId)
            .addValue("node_oid", nodeOid).addValue("portfolio_id", portfolioId)
            .addValue("portfolio_oid", portfolioOid).addValue("parent_node_id", parentNodeId, Types.BIGINT)
            .addValue("parent_node_oid", parentNodeOid, Types.BIGINT).addValue("depth", depth)
            .addValue("name", StringUtils.defaultString(node.getName()));
    argsList.add(treeArgs);

    // store position links
    Set<ObjectId> positionIds = new LinkedHashSet<ObjectId>(node.getPositionIds());
    node.getPositionIds().clear();
    node.getPositionIds().addAll(positionIds);
    for (ObjectId positionId : positionIds) {
        final DbMapSqlParameterSource posArgs = new DbMapSqlParameterSource().addValue("node_id", nodeId)
                .addValue("key_scheme", positionId.getScheme()).addValue("key_value", positionId.getValue());
        posList.add(posArgs);
    }

    // store the left/right before/after the child loop and back fill into stored args row
    treeArgs.addValue("tree_left", counter.getAndIncrement());
    for (ManageablePortfolioNode childNode : node.getChildNodes()) {
        insertBuildArgs(portfolioUid, nodeUid, childNode, update, portfolioId, portfolioOid, nodeId, nodeOid,
                counter, depth + 1, argsList, posList);
    }
    treeArgs.addValue("tree_right", counter.getAndIncrement());
}

From source file:services.object.ObjectService.java

public int objsInContainer(SWGObject owner, TangibleObject container) {
    if (owner == null) {
        Console.println("Owner null!");
    }//from  w  w  w .j a v a 2  s .  co m
    if (container == null) {
        Console.println("Container is null!");
    }
    final AtomicInteger count = new AtomicInteger();

    container.viewChildren(owner, false, false, new Traverser() {

        @Override
        public void process(SWGObject child) {
            count.getAndIncrement();
        }

    });

    return count.get();
}

From source file:com.spectralogic.ds3client.integration.GetJobManagement_Test.java

private void testGetJobWithUserSuppliedChannelStrategy(
        final TransferStrategyBuilderModifiable transferStrategyBuilderModifiable)
        throws IOException, InterruptedException {
    final String tempPathPrefix = null;
    final Path tempDirectory = Files.createTempDirectory(Paths.get("."), tempPathPrefix);
    final String fileName = "beowulf.txt";

    try {/*from   www .  j a v a  2  s .c  o  m*/
        final List<Ds3Object> objects = Lists.newArrayList(new Ds3Object(fileName));

        final GetBulkJobSpectraS3Request getBulkJobSpectraS3Request = new GetBulkJobSpectraS3Request(
                BUCKET_NAME, objects);

        final GetBulkJobSpectraS3Response getBulkJobSpectraS3Response = client
                .getBulkJobSpectraS3(getBulkJobSpectraS3Request);

        final MasterObjectList masterObjectList = getBulkJobSpectraS3Response.getMasterObjectList();

        final AtomicInteger numTimesChannelOpened = new AtomicInteger(0);
        final AtomicInteger numTimesChannelClosed = new AtomicInteger(0);

        TransferStrategyBuilder transferStrategyBuilder = new TransferStrategyBuilder().withDs3Client(client)
                .withMasterObjectList(masterObjectList).withChannelBuilder(new FileObjectGetter(tempDirectory))
                .withRangesForBlobs(PartialObjectHelpers.mapRangesToBlob(masterObjectList.getObjects(),
                        PartialObjectHelpers.getPartialObjectsRanges(objects)))
                .withChannelStrategy(new UserSuppliedPutChannelStrategy(new FileObjectGetter(tempDirectory),
                        new ChannelMonitorable() {
                            @Override
                            public void acquired() {
                                numTimesChannelOpened.getAndIncrement();
                            }

                            @Override
                            public void released() {
                                numTimesChannelClosed.getAndIncrement();
                            }
                        }));

        transferStrategyBuilder = transferStrategyBuilderModifiable.modify(transferStrategyBuilder);

        final TransferStrategy transferStrategy = transferStrategyBuilder.makeGetTransferStrategy();

        transferStrategy.transfer();

        final Collection<File> filesInTempDirectory = FileUtils.listFiles(tempDirectory.toFile(), null, false);

        for (final File file : filesInTempDirectory) {
            assertEquals(fileName, file.getName());
        }

        assertEquals(1, numTimesChannelOpened.get());
        assertEquals(1, numTimesChannelClosed.get());
    } finally {
        FileUtils.deleteDirectory(tempDirectory.toFile());
    }
}

From source file:org.apache.nifi.processors.standard.PutDatabaseRecord.java

SqlAndIncludedColumns generateUpdate(final RecordSchema recordSchema, final String tableName,
        final String updateKeys, final TableSchema tableSchema, final DMLSettings settings)
        throws IllegalArgumentException, MalformedRecordException, SQLException {

    final Set<String> updateKeyNames;
    if (updateKeys == null) {
        updateKeyNames = tableSchema.getPrimaryKeyColumnNames();
    } else {// www . j  a  v a 2s  .co  m
        updateKeyNames = new HashSet<>();
        for (final String updateKey : updateKeys.split(",")) {
            updateKeyNames.add(updateKey.trim());
        }
    }

    if (updateKeyNames.isEmpty()) {
        throw new SQLIntegrityConstraintViolationException(
                "Table '" + tableName + "' does not have a Primary Key and no Update Keys were specified");
    }

    final StringBuilder sqlBuilder = new StringBuilder();
    sqlBuilder.append("UPDATE ");
    if (settings.quoteTableName) {
        sqlBuilder.append(tableSchema.getQuotedIdentifierString()).append(tableName)
                .append(tableSchema.getQuotedIdentifierString());
    } else {
        sqlBuilder.append(tableName);
    }

    // Create a Set of all normalized Update Key names, and ensure that there is a field in the record
    // for each of the Update Key fields.
    final Set<String> normalizedFieldNames = getNormalizedColumnNames(recordSchema,
            settings.translateFieldNames);
    final Set<String> normalizedUpdateNames = new HashSet<>();
    for (final String uk : updateKeyNames) {
        final String normalizedUK = normalizeColumnName(uk, settings.translateFieldNames);
        normalizedUpdateNames.add(normalizedUK);

        if (!normalizedFieldNames.contains(normalizedUK)) {
            String missingColMessage = "Record does not have a value for the "
                    + (updateKeys == null ? "Primary" : "Update") + "Key column '" + uk + "'";
            if (settings.failUnmappedColumns) {
                getLogger().error(missingColMessage);
                throw new MalformedRecordException(missingColMessage);
            } else if (settings.warningUnmappedColumns) {
                getLogger().warn(missingColMessage);
            }
        }
    }

    // iterate over all of the fields in the record, building the SQL statement by adding the column names
    List<String> fieldNames = recordSchema.getFieldNames();
    final List<Integer> includedColumns = new ArrayList<>();
    if (fieldNames != null) {
        sqlBuilder.append(" SET ");

        int fieldCount = fieldNames.size();
        AtomicInteger fieldsFound = new AtomicInteger(0);

        for (int i = 0; i < fieldCount; i++) {
            RecordField field = recordSchema.getField(i);
            String fieldName = field.getFieldName();

            final String normalizedColName = normalizeColumnName(fieldName, settings.translateFieldNames);
            final ColumnDescription desc = tableSchema.getColumns()
                    .get(normalizeColumnName(fieldName, settings.translateFieldNames));
            if (desc == null) {
                if (!settings.ignoreUnmappedFields) {
                    throw new SQLDataException(
                            "Cannot map field '" + fieldName + "' to any column in the database");
                } else {
                    continue;
                }
            }

            // Check if this column is an Update Key. If so, skip it for now. We will come
            // back to it after we finish the SET clause
            if (!normalizedUpdateNames.contains(normalizedColName)) {
                if (fieldsFound.getAndIncrement() > 0) {
                    sqlBuilder.append(", ");
                }

                if (settings.escapeColumnNames) {
                    sqlBuilder.append(tableSchema.getQuotedIdentifierString()).append(desc.getColumnName())
                            .append(tableSchema.getQuotedIdentifierString());
                } else {
                    sqlBuilder.append(desc.getColumnName());
                }

                sqlBuilder.append(" = ?");
                includedColumns.add(i);
            }
        }

        // Set the WHERE clause based on the Update Key values
        sqlBuilder.append(" WHERE ");
        AtomicInteger whereFieldCount = new AtomicInteger(0);

        for (int i = 0; i < fieldCount; i++) {

            RecordField field = recordSchema.getField(i);
            String fieldName = field.getFieldName();

            final String normalizedColName = normalizeColumnName(fieldName, settings.translateFieldNames);
            final ColumnDescription desc = tableSchema.getColumns()
                    .get(normalizeColumnName(fieldName, settings.translateFieldNames));
            if (desc != null) {

                // Check if this column is a Update Key. If so, add it to the WHERE clause
                if (normalizedUpdateNames.contains(normalizedColName)) {

                    if (whereFieldCount.getAndIncrement() > 0) {
                        sqlBuilder.append(" AND ");
                    }

                    if (settings.escapeColumnNames) {
                        sqlBuilder.append(tableSchema.getQuotedIdentifierString()).append(normalizedColName)
                                .append(tableSchema.getQuotedIdentifierString());
                    } else {
                        sqlBuilder.append(normalizedColName);
                    }
                    sqlBuilder.append(" = ?");
                    includedColumns.add(i);
                }
            }
        }
    }
    return new SqlAndIncludedColumns(sqlBuilder.toString(), includedColumns);
}

From source file:com.spectralogic.ds3client.integration.GetJobManagement_Test.java

@Test
public void testGetJobUserSuppliedChunkAttemptRetryBehavior() throws IOException, InterruptedException {
    final String tempPathPrefix = null;
    final Path tempDirectory = Files.createTempDirectory(Paths.get("."), tempPathPrefix);
    final String fileName = "beowulf.txt";

    try {//  ww w .  ja  v a  2s. com
        final List<Ds3Object> objects = Lists.newArrayList(new Ds3Object(fileName));

        final GetBulkJobSpectraS3Request getBulkJobSpectraS3Request = new GetBulkJobSpectraS3Request(
                BUCKET_NAME, objects);

        final GetBulkJobSpectraS3Response getBulkJobSpectraS3Response = client
                .getBulkJobSpectraS3(getBulkJobSpectraS3Request);

        final MasterObjectList masterObjectList = getBulkJobSpectraS3Response.getMasterObjectList();

        final AtomicInteger numTimesInvokeCalled = new AtomicInteger(0);
        final AtomicInteger numTimesResetCalled = new AtomicInteger(0);

        final TransferStrategyBuilder transferStrategyBuilder = new TransferStrategyBuilder()
                .withDs3Client(client).withMasterObjectList(masterObjectList)
                .withChannelBuilder(new FileObjectGetter(tempDirectory))
                .withRangesForBlobs(PartialObjectHelpers.mapRangesToBlob(masterObjectList.getObjects(),
                        PartialObjectHelpers.getPartialObjectsRanges(objects)))
                .withChunkAttemptRetryBehavior(
                        new UserSuppliedChunkAttemptRetryBehavior(new ChunkAttemptRetryBehaviorMonitorable() {
                            @Override
                            public void invoke() {
                                numTimesInvokeCalled.getAndIncrement();
                            }

                            @Override
                            public void reset() {
                                numTimesResetCalled.getAndIncrement();
                            }
                        }));

        final TransferStrategy transferStrategy = transferStrategyBuilder.makeGetTransferStrategy();

        transferStrategy.transfer();

        final Collection<File> filesInTempDirectory = FileUtils.listFiles(tempDirectory.toFile(), null, false);

        for (final File file : filesInTempDirectory) {
            assertEquals(fileName, file.getName());
        }

        assertEquals(0, numTimesInvokeCalled.get());
        assertEquals(1, numTimesResetCalled.get());
    } finally {
        FileUtils.deleteDirectory(tempDirectory.toFile());
    }
}

From source file:org.sakaiproject.tool.assessment.facade.ItemHashUtil.java

/**
 * Bit of a hack to allow reuse between {@link ItemFacadeQueries} and {@link PublishedItemFacadeQueries}.
 * Arguments are rather arbitrary extension points to support what we happen to <em>know</em> are the differences
 * between item and published item processing, as well as the common utilities/service dependencies.
 *
 * @param batchSize//from   w ww .j ava  2  s .  com
 * @param hqlQueries
 * @param concreteType
 * @param hashAndAssignCallback
 * @param hibernateTemplate
 * @return
 */
BackfillItemHashResult backfillItemHashes(int batchSize, Map<String, String> hqlQueries,
        Class<? extends ItemDataIfc> concreteType, Function<ItemDataIfc, ItemDataIfc> hashAndAssignCallback,
        HibernateTemplate hibernateTemplate) {

    final long startTime = System.currentTimeMillis();
    log.debug("Hash backfill starting for items of type [" + concreteType.getSimpleName() + "]");

    if (batchSize <= 0) {
        batchSize = 100;
    }
    final int flushSize = batchSize;

    final AtomicInteger totalItems = new AtomicInteger(0);
    final AtomicInteger totalItemsNeedingBackfill = new AtomicInteger(0);
    final AtomicInteger batchNumber = new AtomicInteger(0);
    final AtomicInteger recordsRead = new AtomicInteger(0);
    final AtomicInteger recordsUpdated = new AtomicInteger(0);
    final Map<Long, Throwable> hashingErrors = new TreeMap<>();
    final Map<Integer, Throwable> otherErrors = new TreeMap<>();
    final List<Long> batchElapsedTimes = new ArrayList<>();
    // always needed as *printable* average per-batch timing value, so just store as string. and cache at this
    // scope b/c we sometimes need to print a single calculation multiple times, e.g. in last batch and
    // at method exit
    final AtomicReference<String> currentAvgBatchElapsedTime = new AtomicReference<>("0.00");
    final AtomicBoolean areMoreItems = new AtomicBoolean(true);

    // Get the item totals up front since a) we know any questions created while the job is running will be
    // assigned hashes and thus won't need to be handled by the job and b) makes bookkeeping within the job much
    // easier
    hibernateTemplate.execute(session -> {
        session.setDefaultReadOnly(true);
        totalItems.set(countItems(hqlQueries, session));
        totalItemsNeedingBackfill.set(countItemsNeedingHashBackfill(hqlQueries, session));
        log.debug("Hash backfill required for [" + totalItemsNeedingBackfill + "] of [" + totalItems
                + "] items of type [" + concreteType.getSimpleName() + "]");
        return null;
    });

    while (areMoreItems.get()) {
        long batchStartTime = System.currentTimeMillis();
        batchNumber.getAndIncrement();
        final AtomicInteger itemsHashedInBatch = new AtomicInteger(0);
        final AtomicInteger itemsReadInBatch = new AtomicInteger(0);
        final AtomicReference<Throwable> failure = new AtomicReference<>(null);

        // Idea here is a) avoid very long running transactions and b) avoid reading all items into memory
        // and c) avoid weirdness, e.g. duplicate results, when paginating complex hibernate objects. So
        // there's a per-batch transaction, and each batch re-runs the same two item lookup querys, one to
        // get the list of IDs for the next page of items, and one to resolve those IDs to items
        try {
            new TransactionTemplate(transactionManager, requireNewTransaction()).execute(status -> {
                hibernateTemplate.execute(session -> {
                    List<ItemDataIfc> itemsInBatch = null;
                    try { // resource cleanup block
                        session.setFlushMode(FlushMode.MANUAL);
                        try { // initial read block (failures here are fatal)

                            // set up the actual result set for this batch of items. use error count to skip over failed items
                            final List<Long> itemIds = itemIdsNeedingHashBackfill(hqlQueries, flushSize,
                                    hashingErrors.size(), session);
                            itemsInBatch = itemsById(itemIds, hqlQueries, session);

                        } catch (RuntimeException e) {
                            // Panic on failure to read counts and/or the actual items in the batch.
                            // Otherwise would potentially loop indefinitely since this design has no way way to
                            // skip this page of results.
                            log.error("Failed to read batch of hashable items. Giving up at record ["
                                    + recordsRead + "] of [" + totalItemsNeedingBackfill + "] Type: ["
                                    + concreteType.getSimpleName() + "]", e);
                            areMoreItems.set(false); // force overall loop to exit
                            throw e; // force txn to give up
                        }

                        for (ItemDataIfc item : itemsInBatch) {
                            recordsRead.getAndIncrement();
                            itemsReadInBatch.getAndIncrement();

                            // Assign the item's hash/es
                            try {
                                log.debug("Backfilling hash for item [" + recordsRead + "] of ["
                                        + totalItemsNeedingBackfill + "] Type: [" + concreteType.getSimpleName()
                                        + "] ID: [" + item.getItemId() + "]");
                                hashAndAssignCallback.apply(item);
                                itemsHashedInBatch.getAndIncrement();
                            } catch (Throwable t) {
                                // Failures considered ignorable here... probably some unexpected item state
                                // that prevented hash calculation.
                                //
                                // Re the log statement... yes, the caller probably logs exceptions, but likely
                                // without stack traces, and we'd like to advertise failures as quickly as possible,
                                // so we go ahead and emit an error log here.
                                log.error("Item hash calculation failed for item [" + recordsRead + "] of ["
                                        + totalItemsNeedingBackfill + "] Type: [" + concreteType.getSimpleName()
                                        + "] ID: [" + (item == null ? "?" : item.getItemId()) + "]", t);
                                hashingErrors.put(item.getItemId(), t);
                            }

                        }
                        if (itemsHashedInBatch.get() > 0) {
                            session.flush();
                            recordsUpdated.getAndAdd(itemsHashedInBatch.get());
                        }
                        areMoreItems.set(itemsInBatch.size() >= flushSize);

                    } finally {
                        quietlyClear(session); // potentially very large, so clear aggressively
                    }
                    return null;
                }); // end session
                return null;
            }); // end transaction
        } catch (Throwable t) {
            // We're still in the loop over all batches, but something caused the current batch (and its
            // transaction) to exit abnormally. Logging of both success and failure cases is quite detailed,
            // and needs the same timing calcs, so is consolidated into the  'finally' block below.
            failure.set(t);
            otherErrors.put(batchNumber.get(), t);
        } finally {
            // Detailed batch-level reporting
            final long batchElapsed = (System.currentTimeMillis() - batchStartTime);
            batchElapsedTimes.add(batchElapsed);
            currentAvgBatchElapsedTime.set(new DecimalFormat("#.00")
                    .format(batchElapsedTimes.stream().collect(Collectors.averagingLong(l -> l))));
            if (failure.get() == null) {
                log.debug("Item hash backfill batch flushed to database. Type: [" + concreteType.getSimpleName()
                        + "] Batch number: [" + batchNumber + "] Items attempted in batch: [" + itemsReadInBatch
                        + "] Items succeeded in batch: [" + itemsHashedInBatch + "] Total items attempted: ["
                        + recordsRead + "] Total items succeeded: [" + recordsUpdated
                        + "] Total attemptable items: [" + totalItemsNeedingBackfill + "] Elapsed batch time: ["
                        + batchElapsed + "ms] Avg time/batch: [" + currentAvgBatchElapsedTime + "ms]");
            } else {
                // yes, caller probably logs exceptions later, but probably without stack traces, and we'd
                // like to advertise failures as quickly as possible, so we go ahead and emit an error log
                // here.
                log.error("Item hash backfill failed. Type: [" + concreteType.getSimpleName()
                        + "] Batch number: [" + batchNumber + "] Items attempted in batch: [" + itemsReadInBatch
                        + "] Items flushable (but failed) in batch: [" + itemsHashedInBatch
                        + "] Total items attempted: [" + recordsRead + "] Total items succeeded: ["
                        + recordsUpdated + "] Total attemptable items: [" + totalItemsNeedingBackfill
                        + "] Elapsed batch time: [" + batchElapsed + "ms] Avg time/batch: ["
                        + currentAvgBatchElapsedTime + "ms]", failure.get());
            }
        }
    } // end loop over all batches

    final long elapsedTime = System.currentTimeMillis() - startTime;
    log.debug("Hash backfill completed for items of type [" + concreteType.getSimpleName()
            + "]. Total items attempted: [" + recordsRead + "] Total items succeeded: [" + recordsUpdated
            + "] Target attemptable items: [" + totalItemsNeedingBackfill + "] Total elapsed time: ["
            + elapsedTime + "ms] Total batches: [" + batchNumber + "] Avg time/batch: ["
            + currentAvgBatchElapsedTime + "ms]");

    return new BackfillItemHashResult(elapsedTime, totalItems.get(), totalItemsNeedingBackfill.get(),
            recordsRead.get(), recordsUpdated.get(), flushSize, hashingErrors, otherErrors);
}