Example usage for java.util.concurrent.atomic AtomicLong AtomicLong

List of usage examples for java.util.concurrent.atomic AtomicLong AtomicLong

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicLong AtomicLong.

Prototype

public AtomicLong(long initialValue) 

Source Link

Document

Creates a new AtomicLong with the given initial value.

Usage

From source file:org.apache.ignite.yardstick.cache.load.IgniteCacheRandomOperationBenchmark.java

/**
 * @throws Exception If failed.// ww w. j a  v  a  2 s. co  m
 */
private void searchCache() throws Exception {
    availableCaches = new ArrayList<>(ignite().cacheNames().size());
    txCaches = new ArrayList<>();
    affCaches = new ArrayList<>();
    keysCacheClasses = new HashMap<>();
    valuesCacheClasses = new HashMap<>();
    replaceEntryProc = new BenchmarkReplaceValueEntryProcessor(null);
    rmvEntryProc = new BenchmarkRemoveEntryProcessor();
    cacheSqlDescriptors = new HashMap<>();
    operationStatistics = new HashMap<>();

    loadQueries();

    loadAllowedOperations();

    for (String cacheName : ignite().cacheNames()) {
        IgniteCache<Object, Object> cache = ignite().cache(cacheName);

        for (Operation op : Operation.values())
            operationStatistics.put(op + "_" + cacheName, new AtomicLong(0));

        CacheConfiguration configuration = cache.getConfiguration(CacheConfiguration.class);

        if (isClassDefinedInConfig(configuration)) {
            if (configuration.getMemoryMode() == CacheMemoryMode.OFFHEAP_TIERED
                    && configuration.getQueryEntities().size() > 2) {
                throw new IgniteException(
                        "Off-heap mode is unsupported by the load test due to bugs IGNITE-2982"
                                + " and IGNITE-2997");
            }

            ArrayList<Class> keys = new ArrayList<>();
            ArrayList<Class> values = new ArrayList<>();

            if (configuration.getQueryEntities() != null) {
                Collection<QueryEntity> entries = configuration.getQueryEntities();

                for (QueryEntity queryEntity : entries) {
                    try {
                        if (queryEntity.getKeyType() != null) {
                            Class keyCls = Class.forName(queryEntity.getKeyType());

                            if (ModelUtil.canCreateInstance(keyCls))
                                keys.add(keyCls);
                            else
                                throw new IgniteException("Class is unknown for the load test. Make sure you "
                                        + "specified its full name [clsName=" + queryEntity.getKeyType() + ']');
                        }

                        if (queryEntity.getValueType() != null) {
                            Class valCls = Class.forName(queryEntity.getValueType());

                            if (ModelUtil.canCreateInstance(valCls))
                                values.add(valCls);
                            else
                                throw new IgniteException("Class is unknown for the load test. Make sure you "
                                        + "specified its full name [clsName=" + queryEntity.getKeyType() + ']');

                            configureCacheSqlDescriptor(cacheName, queryEntity, valCls);
                        }
                    } catch (ClassNotFoundException e) {
                        BenchmarkUtils.println(e.getMessage());
                        BenchmarkUtils.println("This can be a BinaryObject. Ignoring exception.");

                        if (!cacheSqlDescriptors.containsKey(cacheName))
                            cacheSqlDescriptors.put(cacheName, new ArrayList<SqlCacheDescriptor>());
                    }
                }
            }

            if (configuration.getTypeMetadata() != null) {
                Collection<CacheTypeMetadata> entries = configuration.getTypeMetadata();

                for (CacheTypeMetadata cacheTypeMetadata : entries) {
                    try {
                        if (cacheTypeMetadata.getKeyType() != null) {
                            Class keyCls = Class.forName(cacheTypeMetadata.getKeyType());

                            if (ModelUtil.canCreateInstance(keyCls))
                                keys.add(keyCls);
                            else
                                throw new IgniteException("Class is unknown for the load test. Make sure you "
                                        + "specified its full name [clsName=" + cacheTypeMetadata.getKeyType()
                                        + ']');
                        }

                        if (cacheTypeMetadata.getValueType() != null) {
                            Class valCls = Class.forName(cacheTypeMetadata.getValueType());

                            if (ModelUtil.canCreateInstance(valCls))
                                values.add(valCls);
                            else
                                throw new IgniteException("Class is unknown for the load test. Make sure you "
                                        + "specified its full name [clsName=" + cacheTypeMetadata.getKeyType()
                                        + ']');
                        }
                    } catch (ClassNotFoundException e) {
                        BenchmarkUtils.println(e.getMessage());
                        BenchmarkUtils.println("This can be a BinaryObject. Ignoring exception.");

                        if (!cacheSqlDescriptors.containsKey(cacheName))
                            cacheSqlDescriptors.put(cacheName, new ArrayList<SqlCacheDescriptor>());
                    }
                }
            }

            keysCacheClasses.put(cacheName, keys.toArray(new Class[] {}));

            valuesCacheClasses.put(cacheName, values.toArray(new Class[] {}));
        } else
            keysCacheClasses.put(cacheName, new Class[] { randomKeyClass(cacheName) });

        valuesCacheClasses.put(cacheName, determineValueClasses(cacheName));

        if (configuration.getCacheMode() != CacheMode.LOCAL)
            affCaches.add(cache);

        if (configuration.getAtomicityMode() == CacheAtomicityMode.TRANSACTIONAL)
            txCaches.add(cache);

        availableCaches.add(cache);
    }
}

From source file:io.pravega.segmentstore.server.containers.StreamSegmentMapperTests.java

/**
 * Tests the ability of the StreamSegmentMapper to generate/return the Id of an existing StreamSegment, as well as
 * retrieving existing attributes./*from w  w  w.ja  v a 2 s.c  om*/
 */
@Test
public void testGetOrAssignStreamSegmentId() {
    final int segmentCount = 10;
    final int transactionsPerSegment = 5;
    final long noSegmentId = ContainerMetadata.NO_STREAM_SEGMENT_ID;
    AtomicLong currentSegmentId = new AtomicLong(Integer.MAX_VALUE);
    Supplier<Long> nextSegmentId = () -> currentSegmentId.decrementAndGet() % 2 == 0 ? noSegmentId
            : currentSegmentId.get();

    @Cleanup
    TestContext context = new TestContext();

    HashSet<String> storageSegments = new HashSet<>();
    for (int i = 0; i < segmentCount; i++) {
        String segmentName = getName(i);
        storageSegments.add(segmentName);
        setAttributes(segmentName, nextSegmentId.get(), storageSegments.size() % ATTRIBUTE_COUNT, context);

        for (int j = 0; j < transactionsPerSegment; j++) {
            // There is a small chance of a name conflict here, but we don't care. As long as we get at least one
            // Transaction per segment, we should be fine.
            String transactionName = StreamSegmentNameUtils.getTransactionNameFromId(segmentName,
                    UUID.randomUUID());
            storageSegments.add(transactionName);
            setAttributes(transactionName, nextSegmentId.get(), storageSegments.size() % ATTRIBUTE_COUNT,
                    context);
        }
    }

    // We setup all necessary handlers, except the one for create. We do not need to create new Segments here.
    setupOperationLog(context);
    Predicate<String> isSealed = segmentName -> segmentName.hashCode() % 2 == 0;
    Function<String, Long> getInitialLength = segmentName -> (long) Math.abs(segmentName.hashCode());
    setupStorageGetHandler(context, storageSegments, segmentName -> new StreamSegmentInformation(segmentName,
            getInitialLength.apply(segmentName), isSealed.test(segmentName), false, new ImmutableDate()));

    // First, map all the parents (stand-alone segments).
    for (String name : storageSegments) {
        if (StreamSegmentNameUtils.getParentStreamSegmentName(name) == null) {
            long id = context.mapper.getOrAssignStreamSegmentId(name, TIMEOUT).join();
            Assert.assertNotEquals("No id was assigned for StreamSegment " + name,
                    ContainerMetadata.NO_STREAM_SEGMENT_ID, id);
            SegmentMetadata sm = context.metadata.getStreamSegmentMetadata(id);
            Assert.assertNotNull("No metadata was created for StreamSegment " + name, sm);
            long expectedLength = getInitialLength.apply(name);
            boolean expectedSeal = isSealed.test(name);
            Assert.assertEquals("Metadata does not have the expected length for StreamSegment " + name,
                    expectedLength, sm.getDurableLogLength());
            Assert.assertEquals(
                    "Metadata does not have the expected value for isSealed for StreamSegment " + name,
                    expectedSeal, sm.isSealed());

            val segmentState = context.stateStore.get(name, TIMEOUT).join();
            Map<UUID, Long> expectedAttributes = segmentState == null ? null : segmentState.getAttributes();
            SegmentMetadataComparer.assertSameAttributes(
                    "Unexpected attributes in metadata for StreamSegment " + name, expectedAttributes, sm);
        }
    }

    // Now, map all the Transactions.
    for (String name : storageSegments) {
        String parentName = StreamSegmentNameUtils.getParentStreamSegmentName(name);
        if (parentName != null) {
            long id = context.mapper.getOrAssignStreamSegmentId(name, TIMEOUT).join();
            Assert.assertNotEquals("No id was assigned for Transaction " + name,
                    ContainerMetadata.NO_STREAM_SEGMENT_ID, id);
            SegmentMetadata sm = context.metadata.getStreamSegmentMetadata(id);
            Assert.assertNotNull("No metadata was created for Transaction " + name, sm);
            long expectedLength = getInitialLength.apply(name);
            boolean expectedSeal = isSealed.test(name);
            Assert.assertEquals("Metadata does not have the expected length for Transaction " + name,
                    expectedLength, sm.getDurableLogLength());
            Assert.assertEquals(
                    "Metadata does not have the expected value for isSealed for Transaction " + name,
                    expectedSeal, sm.isSealed());

            val segmentState = context.stateStore.get(name, TIMEOUT).join();
            Map<UUID, Long> expectedAttributes = segmentState == null ? null : segmentState.getAttributes();
            SegmentMetadataComparer.assertSameAttributes(
                    "Unexpected attributes in metadata for Transaction " + name, expectedAttributes, sm);

            // Check parenthood.
            Assert.assertNotEquals("No parent defined in metadata for Transaction " + name,
                    ContainerMetadata.NO_STREAM_SEGMENT_ID, sm.getParentId());
            long parentId = context.metadata.getStreamSegmentId(parentName, false);
            Assert.assertEquals("Unexpected parent defined in metadata for Transaction " + name, parentId,
                    sm.getParentId());
        }
    }
}

From source file:com.moscona.dataSpace.DataSpace.java

/**
 * This method is called by the data store after the object was loaded from disk and before it returns so that the
 * data space can recover to a functional state with all transients in a reasonable shape.
 * DO NOT CALL unless you're part of the implementation (Java has no friends)
 * @param dataStore/*ww  w  .  j  av  a  2s .  c  o  m*/
 * @param memoryManager
 */
public void initTransientsAfterRestore(IDataStore dataStore, IMemoryManager memoryManager)
        throws DataSpaceException {
    closeHelper = new CloseHelper();
    this.memoryManager = memoryManager;
    this.dataStore = dataStore;
    initNameSpaces(defaultPersistenceType);
    changesInProgress = new AtomicInteger(0);
    lastFlush = new AtomicLong(System.currentTimeMillis());
    // now we need to find all the vectors in the persistent data space and iterate over their segments and mark
    // them all as swapped out
    for (String name : persistentNameSpace.keySet()) {
        IDataElement element = persistentNameSpace.get(name);
        element.setNameSpace(persistentNameSpace);
        element.setPersistenceType(PersistenceType.PERSISTENT);
        if (AbstractVector.class.isAssignableFrom(element.getClass())) {
            AbstractVector vector = (AbstractVector) element;
            vector.initCloseHelper();
            vector.setDataSpace(this);
            vector.markAllSegmentsSwappedOut();
        }
    }
}

From source file:hr.diskobolos.persistence.impl.EvaluationAnswerPersistenceImpl.java

@Override
public ConcurrentMap<TermsOfConditionStatus, AtomicLong> fetchTermsOfCompetitionStatistic() {
    CriteriaBuilder cb = entityManager.getCriteriaBuilder();
    CriteriaQuery<EvaluationAnswer> cq = cb.createQuery(EvaluationAnswer.class);
    Root<EvaluationAnswer> evaluationAnswer = cq.from(EvaluationAnswer.class);
    Join<EvaluationAnswer, QuestionChoicesDef> choiceDef = evaluationAnswer.join(EvaluationAnswer_.answer);
    Join<QuestionChoicesDef, EvaluationQuestionDef> questionDef = choiceDef
            .join(QuestionChoicesDef_.evaluationQuestionDef);
    ParameterExpression<QuestionnaireType> questionnaireType = cb.parameter(QuestionnaireType.class,
            "questionnaireType");
    cq.select(evaluationAnswer);//w  w w. j  ava  2s. c om
    cq.where(cb.equal(questionDef.get(EvaluationQuestionDef_.questionnaireType), questionnaireType));
    TypedQuery<EvaluationAnswer> query = entityManager.createQuery(cq);
    query.setParameter("questionnaireType", QuestionnaireType.TERMS_OF_CONDITION);
    List<EvaluationAnswer> evaluationAnswers = query.getResultList();

    ConcurrentMap<TermsOfConditionStatus, AtomicLong> distributionByTermsOfCompetitionStatus = new ConcurrentHashMap<>();

    List<EvaluationQuestionnaireDefEnum> questionnaireDef = Arrays
            .asList(EvaluationQuestionnaireDefEnum.values());
    long numberOfQuestion = questionnaireDef.stream()
            .filter(q -> q.getQuestionnaireType().equals(QuestionnaireType.TERMS_OF_CONDITION))
            .collect(Collectors.counting());

    List<MemberRegister> memberRegisters = evaluationAnswers.stream()
            .filter(StreamUtil.distinctByKey((EvaluationAnswer e) -> e.getMemberRegister().getId()))
            .map(EvaluationAnswer::getMemberRegister).collect(Collectors.toList());
    memberRegisters.stream().forEach((memberRegister) -> {
        TermsOfConditionStatus termsOfConditionStatus = TermsOfConditionStatus.NONE;
        if (evaluationAnswers.stream().filter(m -> m.getMemberRegister().equals(memberRegister))
                .count() == numberOfQuestion) {
            boolean isValid = evaluationAnswers.stream()
                    .filter(m -> m.getMemberRegister().equals(memberRegister))
                    .allMatch(e -> e.getAnswer().getLabel()
                            .equals(messageSource.getMessage("QuestionChoicesDef.yes", null, Locale.ENGLISH)));
            termsOfConditionStatus = isValid ? TermsOfConditionStatus.VALID : TermsOfConditionStatus.INVALID;
        }
        distributionByTermsOfCompetitionStatus.putIfAbsent(termsOfConditionStatus, new AtomicLong(0));
        distributionByTermsOfCompetitionStatus.get(termsOfConditionStatus).incrementAndGet();
    });

    return distributionByTermsOfCompetitionStatus;
}

From source file:gobblin.runtime.Task.java

/**
 * Instantiate a new {@link Task}.//from   ww w  . j  a  v  a  2s  . co m
 *
 * @param context a {@link TaskContext} containing all necessary information to construct and run a {@link Task}
 * @param taskStateTracker a {@link TaskStateTracker} for tracking task state
 * @param taskExecutor a {@link TaskExecutor} for executing the {@link Task} and its {@link Fork}s
 * @param countDownLatch an optional {@link java.util.concurrent.CountDownLatch} used to signal the task completion
 */
public Task(TaskContext context, TaskStateTracker taskStateTracker, TaskExecutor taskExecutor,
        Optional<CountDownLatch> countDownLatch) {
    this.taskContext = context;
    this.taskState = context.getTaskState();
    this.jobId = this.taskState.getJobId();
    this.taskId = this.taskState.getTaskId();
    this.taskKey = this.taskState.getTaskKey();
    this.taskStateTracker = taskStateTracker;
    this.taskExecutor = taskExecutor;
    this.countDownLatch = countDownLatch;
    this.closer = Closer.create();
    this.closer.register(this.taskState.getTaskBrokerNullable());
    this.extractor = closer
            .register(new InstrumentedExtractorDecorator<>(this.taskState, this.taskContext.getExtractor()));

    this.converter = closer.register(new MultiConverter(this.taskContext.getConverters()));
    try {
        this.rowChecker = closer.register(this.taskContext.getRowLevelPolicyChecker());
    } catch (Exception e) {
        try {
            closer.close();
        } catch (Throwable t) {
            LOG.error("Failed to close all open resources", t);
        }
        throw new RuntimeException("Failed to instantiate row checker.", e);
    }

    this.taskMode = getExecutionModel(this.taskState);
    this.recordsPulled = new AtomicLong(0);
    this.lastRecordPulledTimestampMillis = 0;
    this.shutdownRequested = new AtomicBoolean(false);
    this.shutdownLatch = new CountDownLatch(1);

    // Setup Streaming constructs

    this.watermarkingStrategy = "FineGrain"; // TODO: Configure

    if (isStreamingTask()) {
        Extractor underlyingExtractor = this.taskContext.getRawSourceExtractor();
        if (!(underlyingExtractor instanceof StreamingExtractor)) {
            LOG.error(
                    "Extractor {}  is not an instance of StreamingExtractor but the task is configured to run in continuous mode",
                    underlyingExtractor.getClass().getName());
            throw new TaskInstantiationException("Extraction " + underlyingExtractor.getClass().getName()
                    + " is not an instance of StreamingExtractor but the task is configured to run in continuous mode");
        }

        this.watermarkStorage = Optional.of(taskContext.getWatermarkStorage());
        Config config;
        try {
            config = ConfigUtils.propertiesToConfig(taskState.getProperties());
        } catch (Exception e) {
            LOG.warn("Failed to deserialize taskState into Config.. continuing with an empty config", e);
            config = ConfigFactory.empty();
        }

        long commitIntervalMillis = ConfigUtils.getLong(config,
                TaskConfigurationKeys.STREAMING_WATERMARK_COMMIT_INTERVAL_MILLIS,
                TaskConfigurationKeys.DEFAULT_STREAMING_WATERMARK_COMMIT_INTERVAL_MILLIS);
        if (watermarkingStrategy.equals("FineGrain")) { // TODO: Configure
            this.watermarkTracker = Optional.of(this.closer.register(new FineGrainedWatermarkTracker(config)));
            this.watermarkManager = Optional.of((WatermarkManager) this.closer
                    .register(new TrackerBasedWatermarkManager(this.watermarkStorage.get(),
                            this.watermarkTracker.get(), commitIntervalMillis, Optional.of(this.LOG))));

        } else {
            // writer-based watermarking
            this.watermarkManager = Optional
                    .of((WatermarkManager) this.closer.register(new MultiWriterWatermarkManager(
                            this.watermarkStorage.get(), commitIntervalMillis, Optional.of(this.LOG))));
            this.watermarkTracker = Optional.absent();
        }
    } else {
        this.watermarkManager = Optional.absent();
        this.watermarkTracker = Optional.absent();
        this.watermarkStorage = Optional.absent();
    }
}

From source file:org.lendingclub.mercator.docker.SwarmScanner.java

public void scan() {
    WebTarget t = extractWebTarget(dockerScanner.getDockerClient());
    logger.info("Scanning {}", t);
    JsonNode response = t.path("/info").request().buildGet().invoke(JsonNode.class);

    JsonNode swarm = response.path("Swarm");
    JsonNode cluster = swarm.path("Cluster");
    String swarmClusterId = cluster.path("ID").asText();

    // need to parse these dates
    String createdAt = cluster.path("CreatedAt").asText();
    String updatedAt = cluster.path("UpdatedAt").asText();
    ObjectNode props = mapper.createObjectNode();
    props.put("swarmClusterId", swarmClusterId);
    props.put("createdAt", createdAt);
    props.put("updatedAt", updatedAt);

    JsonNode swarmNode = dockerScanner.getNeoRxClient().execCypher(
            "merge (c:DockerSwarm {swarmClusterId:{id}}) set c+={props},c.updateTs=timestamp() return c", "id",
            swarmClusterId, "props", props).blockingFirst(MissingNode.getInstance());

    if (isUnixDomainScoket(t.getUri().toString())) {
        // Only set managerApiUrl to a unix domain socket if it has not
        // already been set.
        // This is useful for trident
        if (!isUnixDomainScoket(swarmNode.path("managerApiUrl").asText())) {

            String LOCAL_DOCKER_DAEMON_SOCKET_URL = "unix:///var/run/docker.sock";
            logger.info("setting mangerApiUrl to {} for swarm {}", LOCAL_DOCKER_DAEMON_SOCKET_URL,
                    swarmClusterId);//from w  w w.ja v  a2s  . co m

            String name = "local";
            dockerScanner.getNeoRxClient()
                    .execCypher("match (c:DockerSwarm {name:{name}}) return c", "name", name).forEach(it -> {
                        String oldSwarmClusterId = it.path("swarmClusterId").asText();
                        if (!swarmClusterId.equals(oldSwarmClusterId)) {
                            dockerScanner.getNeoRxClient().execCypher(
                                    "match (c:DockerSwarm {swarmClusterId:{swarmClusterId}}) detach delete c",
                                    "swarmClusterId", oldSwarmClusterId);
                        }
                    });

            dockerScanner.getNeoRxClient().execCypher(
                    "match (c:DockerSwarm {swarmClusterId:{id}}) set c.managerApiUrl={managerApiUrl},c.name={name},c.tridentClusterId={name} return c",
                    "id", swarmClusterId, "managerApiUrl", LOCAL_DOCKER_DAEMON_SOCKET_URL, "name", name);

        }
    }

    AtomicBoolean fail = new AtomicBoolean(false);
    response = t.path("/nodes").request().buildGet().invoke(JsonNode.class);
    AtomicLong earliestTimestamp = new AtomicLong(Long.MAX_VALUE);
    response.elements().forEachRemaining(it -> {
        try {
            earliestTimestamp.set(
                    Math.min(earliestTimestamp.get(), saveDockerNode(swarmClusterId, flattenSwarmNode(it))));
        } catch (RuntimeException e) {
            logger.warn("problem", e);
            fail.set(true);
        }
    });

    if (!fail.get()) {
        if (earliestTimestamp.get() < System.currentTimeMillis()) {
            logger.info("deleting DockerHost nodes before with updateTs<{}", earliestTimestamp.get());
            dockerScanner.getNeoRxClient().execCypher(
                    "match (s:DockerSwarm {swarmClusterId:{id}})--(x:DockerHost) where s.updateTs>x.updateTs detach delete x",
                    "id", swarmClusterId);
        }
    }
    scanServicesForSwarm(swarmClusterId);
    scanTasksForSwarm(swarmClusterId);
}

From source file:org.commonjava.indy.core.ctl.NfcController.java

public NotFoundCacheInfoDTO getInfo(StoreKey key) throws IndyWorkflowException {
    NotFoundCacheInfoDTO dto = new NotFoundCacheInfoDTO();
    final AtomicLong size = new AtomicLong(0);
    try {/*from   ww w .jav a 2 s .com*/
        switch (key.getType()) {
        case group: {
            //Warn: This is very expensive if group holds thousands of repositories
            final List<StoreKey> stores = storeManager.query().packageType(key.getPackageType())
                    .getOrderedConcreteStoresInGroup(key.getName()).stream()
                    .map(artifactStore -> artifactStore.getKey()).collect(Collectors.toList());

            if (stores.size() >= MAX_GROUP_MEMBER_SIZE_FOR_GET_MISSING) {
                throw new IndyWorkflowException(SC_UNPROCESSABLE_ENTITY,
                        "Get missing info for group failed (too many members), size: " + stores.size());
            }

            for (final StoreKey storeKey : stores) {
                size.addAndGet(cache.getSize(storeKey));
            }
            break;
        }
        default: {
            size.addAndGet(cache.getSize(key));
            break;
        }
        }
        dto.setSize(size.get());
        return dto;
    } catch (final IndyDataException e) {
        throw new IndyWorkflowException("Failed to get info for ArtifactStore: %s.", e, key);
    }
}

From source file:com.facebook.presto.accumulo.index.Indexer.java

public Indexer(Connector connector, Authorizations auths, AccumuloTable table, BatchWriterConfig writerConfig)
        throws TableNotFoundException {
    this.connector = requireNonNull(connector, "connector is null");
    this.table = requireNonNull(table, "table is null");
    this.writerConfig = requireNonNull(writerConfig, "writerConfig is null");
    requireNonNull(auths, "auths is null");

    this.serializer = table.getSerializerInstance();

    // Create our batch writer
    indexWriter = connector.createBatchWriter(table.getIndexTableName(), writerConfig);

    ImmutableMultimap.Builder<ByteBuffer, ByteBuffer> indexColumnsBuilder = ImmutableMultimap.builder();
    Map<ByteBuffer, Map<ByteBuffer, Type>> indexColumnTypesBuilder = new HashMap<>();

    // Initialize metadata
    table.getColumns().forEach(columnHandle -> {
        if (columnHandle.isIndexed()) {
            // Wrap the column family and qualifier for this column and add it to
            // collection of indexed columns
            ByteBuffer family = wrap(columnHandle.getFamily().get().getBytes(UTF_8));
            ByteBuffer qualifier = wrap(columnHandle.getQualifier().get().getBytes(UTF_8));
            indexColumnsBuilder.put(family, qualifier);

            // Create a mapping for this column's Presto type, again creating a new one for the
            // family if necessary
            Map<ByteBuffer, Type> types = indexColumnTypesBuilder.get(family);
            if (types == null) {
                types = new HashMap<>();
                indexColumnTypesBuilder.put(family, types);
            }/*w  ww  .j a  v  a  2  s  . co  m*/
            types.put(qualifier, columnHandle.getType());
        }
    });

    indexColumns = indexColumnsBuilder.build();
    indexColumnTypes = ImmutableMap.copyOf(indexColumnTypesBuilder);

    // If there are no indexed columns, throw an exception
    if (indexColumns.isEmpty()) {
        throw new PrestoException(NOT_SUPPORTED,
                "No indexed columns in table metadata. Refusing to index a table with no indexed columns");
    }

    // Initialize metrics map
    // This metrics map is for column cardinality
    metrics.put(METRICS_TABLE_ROW_COUNT, new AtomicLong(0));

    // Scan the metrics table for existing first row and last row
    Pair<byte[], byte[]> minmax = getMinMaxRowIds(connector, table, auths);
    firstRow = minmax.getLeft();
    lastRow = minmax.getRight();
}

From source file:fi.luontola.cqrshotel.framework.EventStoreContract.java

private Runnable createRuntimeInvariantChecker(int batchSize) {
    long initialPosition = eventStore.getCurrentPosition();
    AtomicLong position = new AtomicLong(initialPosition);
    return () -> {
        long pos = position.get();
        List<Event> events = eventStore.getAllEvents(pos);
        assertAtomicBatches(batchSize, events);
        position.set(pos + events.size());
    };/*from ww w .ja  v a 2 s . co  m*/
}

From source file:com.koda.integ.hbase.blockcache.OnHeapBlockCache.java

/**
 * Configurable constructor.  Use this constructor if not using defaults.
 * @param maxSize maximum size of this cache, in bytes
 * @param blockSize expected average size of blocks, in bytes
 * @param evictionThread whether to run evictions in a bg thread or not
 * @param mapInitialSize initial size of backing ConcurrentHashMap
 * @param mapLoadFactor initial load factor of backing ConcurrentHashMap
 * @param mapConcurrencyLevel initial concurrency factor for backing CHM
 * @param minFactor percentage of total size that eviction will evict until
 * @param acceptableFactor percentage of total size that triggers eviction
 * @param singleFactor percentage of total size for single-access blocks
 * @param multiFactor percentage of total size for multiple-access blocks
 * @param memoryFactor percentage of total size for in-memory blocks
 *//*from w  w  w  . j  a v  a 2s.co  m*/
public OnHeapBlockCache(long maxSize, long blockSize, boolean evictionThread, int mapInitialSize,
        float mapLoadFactor, int mapConcurrencyLevel, float minFactor, float acceptableFactor,
        float singleFactor, float multiFactor, float memoryFactor) {
    if (singleFactor + multiFactor + memoryFactor != 1) {
        throw new IllegalArgumentException("Single, multi, and memory factors " + " should total 1.0");
    }
    if (minFactor >= acceptableFactor) {
        throw new IllegalArgumentException("minFactor must be smaller than acceptableFactor");
    }
    if (minFactor >= 1.0f || acceptableFactor >= 1.0f) {
        throw new IllegalArgumentException("all factors must be < 1");
    }
    this.maxSize = maxSize;
    this.blockSize = blockSize;
    map = new ConcurrentHashMap<BlockCacheKey, CachedBlock>(mapInitialSize, mapLoadFactor, mapConcurrencyLevel);
    this.minFactor = minFactor;
    this.acceptableFactor = acceptableFactor;
    this.singleFactor = singleFactor;
    this.multiFactor = multiFactor;
    this.memoryFactor = memoryFactor;
    this.stats = new CacheStats();
    this.count = new AtomicLong(0);
    this.elements = new AtomicLong(0);
    this.overhead = calculateOverhead(maxSize, blockSize, mapConcurrencyLevel);
    this.size = new AtomicLong(this.overhead);
    if (evictionThread) {
        this.evictionThread = new EvictionThread(this);
        this.evictionThread.start(); // FindBugs SC_START_IN_CTOR
    } else {
        this.evictionThread = null;
    }
}