Example usage for java.util.concurrent.atomic AtomicLong AtomicLong

List of usage examples for java.util.concurrent.atomic AtomicLong AtomicLong

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicLong AtomicLong.

Prototype

public AtomicLong(long initialValue) 

Source Link

Document

Creates a new AtomicLong with the given initial value.

Usage

From source file:com.nextdoor.bender.handler.BaseHandler.java

/**
 * Method called by Handler implementations to process records.
 *
 * @param context Lambda invocation context.
 * @throws HandlerException/*from   w ww  . j  a va  2  s  . co m*/
 */
private void processInternal(Context context) throws HandlerException {
    Stat runtime = new Stat("runtime.ns");
    runtime.start();

    Source source = this.getSource();
    DeserializerProcessor deser = source.getDeserProcessor();
    List<OperationProcessor> operations = source.getOperationProcessors();
    List<String> containsStrings = source.getContainsStrings();
    List<Pattern> regexPatterns = source.getRegexPatterns();

    this.getIpcService().setContext(context);

    Iterator<InternalEvent> events = this.getInternalEventIterator();

    /*
     * For logging purposes log when the function started running
     */
    this.monitor.invokeTimeNow();

    AtomicLong eventCount = new AtomicLong(0);
    AtomicLong oldestArrivalTime = new AtomicLong(System.currentTimeMillis());
    AtomicLong oldestOccurrenceTime = new AtomicLong(System.currentTimeMillis());

    /*
     * eventQueue allows for InternalEvents to be pulled from the Iterator and published to a
     * stream. A Thread is created that loops through events in the iterator and offers them to the
     * queue. Note that offering will be blocked if the queue is full (back pressure being applied).
     * When the iterator reaches the end (hasNext = false) the queue is closed.
     */
    this.eventQueue = new Queue<InternalEvent>(new LinkedBlockingQueue<InternalEvent>(this.queueSize));

    /*
     * Thread will live for duration of invocation and supply Stream with events.
     */
    new Thread(new Runnable() {
        @Override
        public void run() {
            while (events.hasNext()) {
                try {
                    eventQueue.offer(events.next());
                } catch (Queue.ClosedQueueException e) {
                    break;
                }
            }
            try {
                eventQueue.close();
            } catch (Queue.ClosedQueueException e) {
            }
        }
    }).start();

    Stream<InternalEvent> input = this.eventQueue.jdkStream();

    /*
     * Filter out raw events
     */
    Stream<InternalEvent> filtered = input.filter(
            /*
             * Perform regex filter
             */
            ievent -> {
                eventCount.incrementAndGet();
                String eventStr = ievent.getEventString();

                /*
                 * Apply String contains filters before deserialization
                 */
                for (String containsString : containsStrings) {
                    if (eventStr.contains(containsString)) {
                        return false;
                    }
                }

                /*
                 * Apply regex patterns before deserialization
                 */
                for (Pattern regexPattern : regexPatterns) {
                    Matcher m = regexPattern.matcher(eventStr);

                    if (m.find()) {
                        return false;
                    }
                }

                return true;
            });

    /*
     * Deserialize
     */
    Stream<InternalEvent> deserialized = filtered.map(ievent -> {
        DeserializedEvent data = deser.deserialize(ievent.getEventString());

        if (data == null || data.getPayload() == null) {
            logger.warn("Failed to deserialize: " + ievent.getEventString());
            return null;
        }

        ievent.setEventObj(data);
        return ievent;
    }).filter(Objects::nonNull);

    /*
     * Perform Operations
     */
    Stream<InternalEvent> operated = deserialized;
    for (OperationProcessor operation : operations) {
        operated = operation.perform(operated);
    }

    /*
     * Serialize
     */
    Stream<InternalEvent> serialized = operated.map(ievent -> {
        try {
            String raw = null;
            raw = this.ser.serialize(this.wrapper.getWrapped(ievent));
            ievent.setSerialized(raw);
            return ievent;
        } catch (SerializationException e) {
            return null;
        }
    }).filter(Objects::nonNull);

    /*
     * Transport
     */
    serialized.forEach(ievent -> {
        /*
         * Update times
         */
        updateOldest(oldestArrivalTime, ievent.getArrivalTime());
        updateOldest(oldestOccurrenceTime, ievent.getEventTime());

        try {
            this.getIpcService().add(ievent);
        } catch (TransportException e) {
            logger.warn("error adding event", e);
        }
    });

    /*
     * Wait for transporters to finish
     */
    try {
        this.getIpcService().flush();
    } catch (TransportException e) {
        throw new HandlerException("encounted TransportException while shutting down ipcService", e);
    } catch (InterruptedException e) {
        throw new HandlerException("thread was interruptedwhile shutting down ipcService", e);
    } finally {
        String evtSource = this.getSourceName();

        runtime.stop();

        if (!this.skipWriteStats) {
            writeStats(eventCount.get(), oldestArrivalTime.get(), oldestOccurrenceTime.get(), evtSource,
                    runtime);
        }

        if (logger.isTraceEnabled()) {
            getGCStats();
        }
    }
}

From source file:com.joyent.manta.client.MantaSeekableByteChannel.java

@Override
public SeekableByteChannel position(final long newPosition) throws IOException {
    return new MantaSeekableByteChannel(new AtomicReference<>(), new AtomicReference<>(), path,
            new AtomicLong(newPosition), httpHelper);
}

From source file:org.apache.hadoop.hbase.wal.TestWALFactory.java

/**
 * Test new HDFS-265 sync.//from  w  w  w.j  a va  2s  . co m
 * @throws Exception
 */
@Test
public void Broken_testSync() throws Exception {
    TableName tableName = TableName.valueOf(currentTest.getMethodName());
    // First verify that using streams all works.
    Path p = new Path(dir, currentTest.getMethodName() + ".fsdos");
    FSDataOutputStream out = fs.create(p);
    out.write(tableName.getName());
    Method syncMethod = null;
    try {
        syncMethod = out.getClass().getMethod("hflush", new Class<?>[] {});
    } catch (NoSuchMethodException e) {
        try {
            syncMethod = out.getClass().getMethod("sync", new Class<?>[] {});
        } catch (NoSuchMethodException ex) {
            fail("This version of Hadoop supports neither Syncable.sync() " + "nor Syncable.hflush().");
        }
    }
    syncMethod.invoke(out, new Object[] {});
    FSDataInputStream in = fs.open(p);
    assertTrue(in.available() > 0);
    byte[] buffer = new byte[1024];
    int read = in.read(buffer);
    assertEquals(tableName.getName().length, read);
    out.close();
    in.close();

    final AtomicLong sequenceId = new AtomicLong(1);
    final int total = 20;
    WAL.Reader reader = null;

    try {
        HRegionInfo info = new HRegionInfo(tableName, null, null, false);
        HTableDescriptor htd = new HTableDescriptor(tableName);
        htd.addFamily(new HColumnDescriptor(tableName.getName()));
        final WAL wal = wals.getWAL(info.getEncodedNameAsBytes());

        for (int i = 0; i < total; i++) {
            WALEdit kvs = new WALEdit();
            kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName()));
            wal.append(htd, info,
                    new WALKey(info.getEncodedNameAsBytes(), tableName, System.currentTimeMillis()), kvs,
                    sequenceId, true, null);
        }
        // Now call sync and try reading.  Opening a Reader before you sync just
        // gives you EOFE.
        wal.sync();
        // Open a Reader.
        Path walPath = DefaultWALProvider.getCurrentFileName(wal);
        reader = wals.createReader(fs, walPath);
        int count = 0;
        WAL.Entry entry = new WAL.Entry();
        while ((entry = reader.next(entry)) != null)
            count++;
        assertEquals(total, count);
        reader.close();
        // Add test that checks to see that an open of a Reader works on a file
        // that has had a sync done on it.
        for (int i = 0; i < total; i++) {
            WALEdit kvs = new WALEdit();
            kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName()));
            wal.append(htd, info,
                    new WALKey(info.getEncodedNameAsBytes(), tableName, System.currentTimeMillis()), kvs,
                    sequenceId, true, null);
        }
        wal.sync();
        reader = wals.createReader(fs, walPath);
        count = 0;
        while ((entry = reader.next(entry)) != null)
            count++;
        assertTrue(count >= total);
        reader.close();
        // If I sync, should see double the edits.
        wal.sync();
        reader = wals.createReader(fs, walPath);
        count = 0;
        while ((entry = reader.next(entry)) != null)
            count++;
        assertEquals(total * 2, count);
        reader.close();
        // Now do a test that ensures stuff works when we go over block boundary,
        // especially that we return good length on file.
        final byte[] value = new byte[1025 * 1024]; // Make a 1M value.
        for (int i = 0; i < total; i++) {
            WALEdit kvs = new WALEdit();
            kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), value));
            wal.append(htd, info,
                    new WALKey(info.getEncodedNameAsBytes(), tableName, System.currentTimeMillis()), kvs,
                    sequenceId, true, null);
        }
        // Now I should have written out lots of blocks.  Sync then read.
        wal.sync();
        reader = wals.createReader(fs, walPath);
        count = 0;
        while ((entry = reader.next(entry)) != null)
            count++;
        assertEquals(total * 3, count);
        reader.close();
        // shutdown and ensure that Reader gets right length also.
        wal.shutdown();
        reader = wals.createReader(fs, walPath);
        count = 0;
        while ((entry = reader.next(entry)) != null)
            count++;
        assertEquals(total * 3, count);
        reader.close();
    } finally {
        if (reader != null)
            reader.close();
    }
}

From source file:org.apache.flink.monitor.trackers.HistogramTaskTracker.java

private final void addToLocalHistogram(Tuple tuple) {
    Serializable field = tuple.getField(this.keyPos);
    if (field == null) {
        return;//  w  w w . ja v  a2 s  . c  o m
    }
    if (!this.exact) {
        /* pass Serializable to Bloom filter for presence indication */
        this.filter.add(field);
        AtomicLong frequency = this.topKMap.get(field);
        if (frequency == null) {
            frequency = new AtomicLong(0L);
            this.topKMap.put(field, frequency);
        }
        this.topKMap.get(field).incrementAndGet();

    } else {
        if (this.exactMap.get(field) == null) {
            this.exactMap.put(field, new AtomicLong(1));
        } else {
            this.exactMap.get(field).incrementAndGet();
        }
    }
    this.tuplecount++;

}

From source file:com.seajas.search.profiler.service.repository.RepositoryService.java

/**
 * Process a paged list of all resources within the repository.
 * //w w  w . j a v a2 s .  c o  m
 * @param collection
 * @param sourceId
 * @param taxonomyMatch
 * @param url
 * @param startDate
 * @param endDate
 * @param parameters
 * @param rangeStart
 * @param rangeEnd
 * @param processor
 * @return boolean
 */
public boolean processResources(final String collection, final Integer sourceId, final String taxonomyMatch,
        final String url, final Date startDate, final Date endDate, final Map<String, String> parameters,
        final Integer rangeStart, final Integer rangeEnd, final RepositoryProcessor processor) {
    Query query = createQuery(true, collection, sourceId, taxonomyMatch, startDate, endDate, url, parameters);

    query.fields().include("_id");
    query.fields().include("currentState");
    query.fields().include("element.hostname");

    // Determine the total number of document this affects

    final AtomicLong currentResult = new AtomicLong(0L);

    // Then skip to it and get going

    query.skip(rangeStart);

    if (rangeEnd != null)
        query.limit(rangeEnd - rangeStart);

    if (logger.isInfoEnabled())
        logger.info(String.format("Processing ranges %d to %s of (unknown) results through the given processor",
                rangeStart, rangeEnd != null ? rangeEnd.toString() : "end"));

    mongoTemplate.executeQuery(query, defaultCollection, new DocumentCallbackHandler() {
        @Override
        public void processDocument(final DBObject dbObject) throws MongoException, DataAccessException {
            CompositeState currentState = CompositeState.valueOf((String) dbObject.get("currentState"));

            if (!EnumSet.of(CompositeState.Content, CompositeState.CompletedDocument,
                    CompositeState.InitialDocument).contains(currentState)) {
                if (logger.isDebugEnabled()) {
                    ObjectId id = (ObjectId) dbObject.get("_id");

                    logger.debug("Skipping over element with ID '" + id + "' and current state '" + currentState
                            + "'");
                }

                return;
            }

            ObjectId id = (ObjectId) dbObject.get("_id");
            String hostname = (String) ((BasicDBObject) dbObject.get("element")).get("hostname");

            if (logger.isInfoEnabled())
                logger.info("Processing re-indexing entry " + currentResult.getAndIncrement()
                        + " / (unknown) with ID '" + id + "' and hostname '" + hostname + "'");

            processor.process(id, hostname);
        }
    });

    return true;
}

From source file:org.apache.hadoop.hbase.regionserver.wal.TestFSHLog.java

/**
 * On rolling a wal after reaching the threshold, {@link WAL#rollWriter()} returns the
 * list of regions which should be flushed in order to archive the oldest wal file.
 * <p>/*from   w w w.  j av  a  2  s .c om*/
 * This method tests this behavior by inserting edits and rolling the wal enough times to reach
 * the max number of logs threshold. It checks whether we get the "right regions" for flush on
 * rolling the wal.
 * @throws Exception
 */
@Test
public void testFindMemStoresEligibleForFlush() throws Exception {
    LOG.debug("testFindMemStoresEligibleForFlush");
    Configuration conf1 = HBaseConfiguration.create(conf);
    conf1.setInt("hbase.regionserver.maxlogs", 1);
    FSHLog wal = new FSHLog(fs, FSUtils.getRootDir(conf1), dir.toString(), HConstants.HREGION_OLDLOGDIR_NAME,
            conf1, null, true, null, null);
    HTableDescriptor t1 = new HTableDescriptor(TableName.valueOf("t1")).addFamily(new HColumnDescriptor("row"));
    HTableDescriptor t2 = new HTableDescriptor(TableName.valueOf("t2")).addFamily(new HColumnDescriptor("row"));
    HRegionInfo hri1 = new HRegionInfo(t1.getTableName(), HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
    HRegionInfo hri2 = new HRegionInfo(t2.getTableName(), HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
    // variables to mock region sequenceIds
    final AtomicLong sequenceId1 = new AtomicLong(1);
    final AtomicLong sequenceId2 = new AtomicLong(1);
    // add edits and roll the wal
    try {
        addEdits(wal, hri1, t1, 2, sequenceId1);
        wal.rollWriter();
        // add some more edits and roll the wal. This would reach the log number threshold
        addEdits(wal, hri1, t1, 2, sequenceId1);
        wal.rollWriter();
        // with above rollWriter call, the max logs limit is reached.
        assertTrue(wal.getNumRolledLogFiles() == 2);

        // get the regions to flush; since there is only one region in the oldest wal, it should
        // return only one region.
        byte[][] regionsToFlush = wal.findRegionsToForceFlush();
        assertEquals(1, regionsToFlush.length);
        assertEquals(hri1.getEncodedNameAsBytes(), regionsToFlush[0]);
        // insert edits in second region
        addEdits(wal, hri2, t2, 2, sequenceId2);
        // get the regions to flush, it should still read region1.
        regionsToFlush = wal.findRegionsToForceFlush();
        assertEquals(regionsToFlush.length, 1);
        assertEquals(hri1.getEncodedNameAsBytes(), regionsToFlush[0]);
        // flush region 1, and roll the wal file. Only last wal which has entries for region1 should
        // remain.
        flushRegion(wal, hri1.getEncodedNameAsBytes(), t1.getFamiliesKeys());
        wal.rollWriter();
        // only one wal should remain now (that is for the second region).
        assertEquals(1, wal.getNumRolledLogFiles());
        // flush the second region
        flushRegion(wal, hri2.getEncodedNameAsBytes(), t2.getFamiliesKeys());
        wal.rollWriter(true);
        // no wal should remain now.
        assertEquals(0, wal.getNumRolledLogFiles());
        // add edits both to region 1 and region 2, and roll.
        addEdits(wal, hri1, t1, 2, sequenceId1);
        addEdits(wal, hri2, t2, 2, sequenceId2);
        wal.rollWriter();
        // add edits and roll the writer, to reach the max logs limit.
        assertEquals(1, wal.getNumRolledLogFiles());
        addEdits(wal, hri1, t1, 2, sequenceId1);
        wal.rollWriter();
        // it should return two regions to flush, as the oldest wal file has entries
        // for both regions.
        regionsToFlush = wal.findRegionsToForceFlush();
        assertEquals(2, regionsToFlush.length);
        // flush both regions
        flushRegion(wal, hri1.getEncodedNameAsBytes(), t1.getFamiliesKeys());
        flushRegion(wal, hri2.getEncodedNameAsBytes(), t2.getFamiliesKeys());
        wal.rollWriter(true);
        assertEquals(0, wal.getNumRolledLogFiles());
        // Add an edit to region1, and roll the wal.
        addEdits(wal, hri1, t1, 2, sequenceId1);
        // tests partial flush: roll on a partial flush, and ensure that wal is not archived.
        wal.startCacheFlush(hri1.getEncodedNameAsBytes(), t1.getFamiliesKeys());
        wal.rollWriter();
        wal.completeCacheFlush(hri1.getEncodedNameAsBytes());
        assertEquals(1, wal.getNumRolledLogFiles());
    } finally {
        if (wal != null) {
            wal.close();
        }
    }
}

From source file:com.spectralogic.ds3client.helpers.FileSystemHelper_Test.java

private void putObjectThenRunVerification(final FileSystemHelper fileSystemHelper,
        final ResultVerifier resultVerifier) throws IOException, URISyntaxException {
    try {//from  www. j a v  a2 s  . com
        final String DIR_NAME = "largeFiles/";
        final String[] FILE_NAMES = new String[] { "lesmis-copies.txt" };

        final Path dirPath = ResourceUtils.loadFileResource(DIR_NAME);

        final AtomicLong totalBookSizes = new AtomicLong(0);

        final List<String> bookTitles = new ArrayList<>();
        final List<Ds3Object> objects = new ArrayList<>();
        for (final String book : FILE_NAMES) {
            final Path objPath = ResourceUtils.loadFileResource(DIR_NAME + book);
            final long bookSize = Files.size(objPath);
            totalBookSizes.getAndAdd(bookSize);
            final Ds3Object obj = new Ds3Object(book, bookSize);

            bookTitles.add(book);
            objects.add(obj);
        }

        final int maxNumBlockAllocationRetries = 1;
        final int maxNumObjectTransferAttempts = 1;
        final int retryDelay = -1;
        final Ds3ClientHelpers ds3ClientHelpers = new Ds3ClientHelpersImpl(client, maxNumBlockAllocationRetries,
                maxNumObjectTransferAttempts, retryDelay, new SameThreadEventRunner(), fileSystemHelper);

        final AtomicInteger numTimesCallbackCalled = new AtomicInteger(0);

        final Ds3ClientHelpers.Job writeJob = ds3ClientHelpers.startWriteJob(BUCKET_NAME, objects);
        writeJob.attachObjectCompletedListener(new ObjectCompletedListener() {
            @Override
            public void objectCompleted(final String name) {
                numTimesCallbackCalled.getAndIncrement();

                final ObjectStorageSpaceVerificationResult result = ds3ClientHelpers
                        .objectsFromBucketWillFitInDirectory(BUCKET_NAME, Arrays.asList(FILE_NAMES),
                                Paths.get("."));

                resultVerifier.verifyResult(result, totalBookSizes.get());
            }
        });

        writeJob.transfer(new FileObjectPutter(dirPath));

        assertEquals(1, numTimesCallbackCalled.get());
    } finally {
        deleteAllContents(client, BUCKET_NAME);
    }
}

From source file:com.mirth.connect.donkey.server.channel.Statistics.java

private Map<Status, AtomicLong> getConnectorStatsMap(Map<Integer, Map<Status, AtomicLong>> channelStats,
        Integer metaDataId) {/*from  w w w .  j  av  a  2s  . co m*/
    Map<Status, AtomicLong> connectorStats = channelStats.get(metaDataId);

    if (connectorStats == null) {
        synchronized (channelStats) {
            connectorStats = channelStats.get(metaDataId);

            if (connectorStats == null) {
                connectorStats = new LinkedHashMap<Status, AtomicLong>();
                connectorStats.put(Status.RECEIVED, new AtomicLong(0L));
                connectorStats.put(Status.FILTERED, new AtomicLong(0L));
                connectorStats.put(Status.SENT, new AtomicLong(0L));
                connectorStats.put(Status.ERROR, new AtomicLong(0L));

                channelStats.put(metaDataId, connectorStats);
            }
        }
    }

    return connectorStats;
}

From source file:org.apache.gobblin.runtime.Task.java

/**
 * Instantiate a new {@link Task}.//ww  w  . j  a  v  a 2 s . c o m
 *
 * @param context a {@link TaskContext} containing all necessary information to construct and run a {@link Task}
 * @param taskStateTracker a {@link TaskStateTracker} for tracking task state
 * @param taskExecutor a {@link TaskExecutor} for executing the {@link Task} and its {@link Fork}s
 * @param countDownLatch an optional {@link java.util.concurrent.CountDownLatch} used to signal the task completion
 */
public Task(TaskContext context, TaskStateTracker taskStateTracker, TaskExecutor taskExecutor,
        Optional<CountDownLatch> countDownLatch) {
    this.taskContext = context;
    this.taskState = context.getTaskState();
    this.jobId = this.taskState.getJobId();
    this.taskId = this.taskState.getTaskId();
    this.taskKey = this.taskState.getTaskKey();
    this.taskStateTracker = taskStateTracker;
    this.taskExecutor = taskExecutor;
    this.countDownLatch = countDownLatch;
    this.closer = Closer.create();
    this.closer.register(this.taskState.getTaskBrokerNullable());
    this.extractor = closer
            .register(new InstrumentedExtractorDecorator<>(this.taskState, this.taskContext.getExtractor()));

    this.recordStreamProcessors = this.taskContext.getRecordStreamProcessors();

    // add record stream processors to closer if they are closeable
    for (RecordStreamProcessor r : recordStreamProcessors) {
        if (r instanceof Closeable) {
            this.closer.register((Closeable) r);
        }
    }

    List<Converter<?, ?, ?, ?>> converters = this.taskContext.getConverters();

    this.converter = closer.register(new MultiConverter(converters));

    // can't have both record stream processors and converter lists configured
    try {
        Preconditions.checkState(this.recordStreamProcessors.isEmpty() || converters.isEmpty(),
                "Converters cannot be specified when RecordStreamProcessors are specified");
    } catch (IllegalStateException e) {
        try {
            closer.close();
        } catch (Throwable t) {
            LOG.error("Failed to close all open resources", t);
        }
        throw new TaskInstantiationException(
                "Converters cannot be specified when RecordStreamProcessors are specified");
    }

    try {
        this.rowChecker = closer.register(this.taskContext.getRowLevelPolicyChecker());
    } catch (Exception e) {
        try {
            closer.close();
        } catch (Throwable t) {
            LOG.error("Failed to close all open resources", t);
        }
        throw new RuntimeException("Failed to instantiate row checker.", e);
    }

    this.taskMode = getExecutionModel(this.taskState);
    this.recordsPulled = new AtomicLong(0);
    this.lastRecordPulledTimestampMillis = 0;
    this.shutdownRequested = new AtomicBoolean(false);
    this.shutdownLatch = new CountDownLatch(1);

    // Setup Streaming constructs

    this.watermarkingStrategy = "FineGrain"; // TODO: Configure

    if (isStreamingTask()) {
        Extractor underlyingExtractor = this.taskContext.getRawSourceExtractor();
        if (!(underlyingExtractor instanceof StreamingExtractor)) {
            LOG.error(
                    "Extractor {}  is not an instance of StreamingExtractor but the task is configured to run in continuous mode",
                    underlyingExtractor.getClass().getName());
            throw new TaskInstantiationException("Extraction " + underlyingExtractor.getClass().getName()
                    + " is not an instance of StreamingExtractor but the task is configured to run in continuous mode");
        }

        this.watermarkStorage = Optional.of(taskContext.getWatermarkStorage());
        Config config;
        try {
            config = ConfigUtils.propertiesToConfig(taskState.getProperties());
        } catch (Exception e) {
            LOG.warn("Failed to deserialize taskState into Config.. continuing with an empty config", e);
            config = ConfigFactory.empty();
        }

        long commitIntervalMillis = ConfigUtils.getLong(config,
                TaskConfigurationKeys.STREAMING_WATERMARK_COMMIT_INTERVAL_MILLIS,
                TaskConfigurationKeys.DEFAULT_STREAMING_WATERMARK_COMMIT_INTERVAL_MILLIS);
        if (watermarkingStrategy.equals("FineGrain")) { // TODO: Configure
            this.watermarkTracker = Optional.of(this.closer.register(new FineGrainedWatermarkTracker(config)));
            this.watermarkManager = Optional.of((WatermarkManager) this.closer
                    .register(new TrackerBasedWatermarkManager(this.watermarkStorage.get(),
                            this.watermarkTracker.get(), commitIntervalMillis, Optional.of(this.LOG))));

        } else {
            // writer-based watermarking
            this.watermarkManager = Optional
                    .of((WatermarkManager) this.closer.register(new MultiWriterWatermarkManager(
                            this.watermarkStorage.get(), commitIntervalMillis, Optional.of(this.LOG))));
            this.watermarkTracker = Optional.absent();
        }
    } else {
        this.watermarkManager = Optional.absent();
        this.watermarkTracker = Optional.absent();
        this.watermarkStorage = Optional.absent();
    }
}

From source file:com.blazegraph.gremlin.structure.BlazeGraph.java

/**
 * Construct an instance using the supplied configuration.
 *///from w ww  . j  ava  2  s.c o m
protected BlazeGraph(final Configuration config) {
    this.config = config;

    this.vf = Optional.ofNullable((BlazeValueFactory) config.getProperty(Options.VALUE_FACTORY))
            .orElse(BlazeValueFactory.INSTANCE);

    final long listIndexFloor = config.getLong(Options.LIST_INDEX_FLOOR, System.currentTimeMillis());
    this.vpIdFactory = new AtomicLong(listIndexFloor);

    this.maxQueryTime = config.getInt(Options.MAX_QUERY_TIME, 0);

    this.sparqlLogMax = config.getInt(Options.SPARQL_LOG_MAX, Options.DEFAULT_SPARQL_LOG_MAX);

    this.TYPE = vf.type();
    this.VALUE = vf.value();
    this.LI_DATATYPE = vf.liDatatype();

    this.sparql = new SparqlGenerator(vf);
    this.transforms = new Transforms();
}