Example usage for java.util.concurrent.atomic AtomicLong AtomicLong

List of usage examples for java.util.concurrent.atomic AtomicLong AtomicLong

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicLong AtomicLong.

Prototype

public AtomicLong(long initialValue) 

Source Link

Document

Creates a new AtomicLong with the given initial value.

Usage

From source file:org.apache.druid.client.cache.CacheDistributionTest.java

@Ignore
@Test/*from   ww  w.  j av  a2  s. co m*/
public void testDistribution() {
    KetamaNodeLocator locator = new KetamaNodeLocator(ImmutableList.of(dummyNode("druid-cache.0001", 11211),
            dummyNode("druid-cache.0002", 11211), dummyNode("druid-cache.0003", 11211),
            dummyNode("druid-cache.0004", 11211), dummyNode("druid-cache.0005", 11211)), hash,
            new DefaultKetamaNodeLocatorConfiguration() {
                @Override
                public int getNodeRepetitions() {
                    return reps;
                }
            });

    Map<MemcachedNode, AtomicLong> counter = Maps.newHashMap();
    long t = 0;
    for (int i = 0; i < KEY_COUNT; ++i) {
        final String k = DigestUtils.sha1Hex("abc" + i) + ":" + DigestUtils.sha1Hex("xyz" + i);
        long t0 = System.nanoTime();
        MemcachedNode node = locator.getPrimary(k);
        t += System.nanoTime() - t0;
        if (counter.containsKey(node)) {
            counter.get(node).incrementAndGet();
        } else {
            counter.put(node, new AtomicLong(1));
        }
    }

    long min = Long.MAX_VALUE;
    long max = 0;
    System.out.printf(Locale.ENGLISH, "%25s\t%5d\t", hash, reps);
    for (AtomicLong count : counter.values()) {
        System.out.printf(Locale.ENGLISH, "%10d\t", count.get());
        min = Math.min(min, count.get());
        max = Math.max(max, count.get());
    }
    System.out.printf(Locale.ENGLISH, "%7.2f\t%5.0f%n", (double) min / (double) max, (double) t / KEY_COUNT);
}

From source file:com.bytelightning.opensource.pokerface.RequestHandler.java

/**
 * Primary constructor.// w ww.  ja va  2 s . c  o  m
 * @param executor   <code>HttpAsyncRequester</code> which will perform the actual request to the remote Target and receive it's response.
 * @param connPool   The client connection pool that will be used by the <code>executor</code>
 * @param patternTargetMapping   The mapping of relative uri paths to configured Targets.
 * @param scripts   Mapping of all JavaScript endpoints.  This map *may* be dynamically updated, or it may be null to reflect that JavaScript endpoints are not configured.
 * @param dynamicHostMap   If non-null, we will allow JavaScript endpoints to proxy to remote Target's not specified in the configuration file.
 */
public RequestHandler(HttpAsyncRequester executor, BasicNIOConnPool connPool, ObjectPool<ByteBuffer> bufferPool,
        Path staticFilesPath, Map<String, TargetDescriptor> patternTargetMapping,
        NavigableMap<String, ScriptObjectMirror> scripts, ConcurrentMap<String, HttpHost> dynamicHostMap) {
    this.executor = executor;
    this.connPool = connPool;
    this.bufferPool = bufferPool;
    this.staticFilesPath = staticFilesPath;
    this.patternTargetMapping = patternTargetMapping;
    this.scripts = scripts;
    this.dynamicHostMap = dynamicHostMap;
    this.idCounter = new AtomicLong(1);
}

From source file:org.apache.hadoop.hbase.mapreduce.TestHLogRecordReader.java

/**
 * Test partial reads from the log based on passed time range
 * @throws Exception//from w  w w . j a  v a2  s  . com
 */
@Test
public void testPartialRead() throws Exception {
    HLog log = HLogFactory.createHLog(fs, hbaseDir, logName, conf);
    // This test depends on timestamp being millisecond based and the filename of the WAL also
    // being millisecond based.
    long ts = System.currentTimeMillis();
    WALEdit edit = new WALEdit();
    final AtomicLong sequenceId = new AtomicLong(0);
    edit.add(new KeyValue(rowName, family, Bytes.toBytes("1"), ts, value));
    log.append(info, tableName, edit, ts, htd, sequenceId);
    edit = new WALEdit();
    edit.add(new KeyValue(rowName, family, Bytes.toBytes("2"), ts + 1, value));
    log.append(info, tableName, edit, ts + 1, htd, sequenceId);
    LOG.info("Before 1st WAL roll " + log.getFilenum());
    log.rollWriter();
    LOG.info("Past 1st WAL roll " + log.getFilenum());

    Thread.sleep(1);
    long ts1 = System.currentTimeMillis();

    edit = new WALEdit();
    edit.add(new KeyValue(rowName, family, Bytes.toBytes("3"), ts1 + 1, value));
    log.append(info, tableName, edit, ts1 + 1, htd, sequenceId);
    edit = new WALEdit();
    edit.add(new KeyValue(rowName, family, Bytes.toBytes("4"), ts1 + 2, value));
    log.append(info, tableName, edit, ts1 + 2, htd, sequenceId);
    log.close();
    LOG.info("Closed WAL " + log.getFilenum());

    HLogInputFormat input = new HLogInputFormat();
    Configuration jobConf = new Configuration(conf);
    jobConf.set("mapreduce.input.fileinputformat.inputdir", logDir.toString());
    jobConf.setLong(HLogInputFormat.END_TIME_KEY, ts);

    // only 1st file is considered, and only its 1st entry is used
    List<InputSplit> splits = input.getSplits(MapreduceTestingShim.createJobContext(jobConf));

    assertEquals(1, splits.size());
    testSplit(splits.get(0), Bytes.toBytes("1"));

    jobConf.setLong(HLogInputFormat.START_TIME_KEY, ts + 1);
    jobConf.setLong(HLogInputFormat.END_TIME_KEY, ts1 + 1);
    splits = input.getSplits(MapreduceTestingShim.createJobContext(jobConf));
    // both files need to be considered
    assertEquals(2, splits.size());
    // only the 2nd entry from the 1st file is used
    testSplit(splits.get(0), Bytes.toBytes("2"));
    // only the 1nd entry from the 2nd file is used
    testSplit(splits.get(1), Bytes.toBytes("3"));
}

From source file:reactor.bus.SelectorUnitTests.java

@SuppressWarnings("unchecked")
private void runTest(String type, Function<Integer, Tuple2<Selector, Object>> fn) {
    final AtomicLong counter = new AtomicLong(selectors * iterations);
    Registry<Object, Consumer<?>> registry = Registries.create();

    Consumer<?> countDown = new Consumer<Object>() {
        @Override//from   w  w w.ja  va  2  s.c o  m
        public void accept(Object obj) {
            counter.decrementAndGet();
        }
    };

    Selector<Object>[] sels = new Selector[selectors];
    Object[] keys = new Object[selectors];

    for (int i = 0; i < selectors; i++) {
        Tuple2<Selector, Object> tup = fn.apply(i);
        sels[i] = tup.getT1();
        keys[i] = tup.getT2();
        registry.register(sels[i], countDown);
    }

    long start = System.currentTimeMillis();
    for (int i = 0; i < selectors * iterations; i++) {
        int j = i % selectors;
        for (Registration<?, ? extends Consumer<?>> reg : registry.select(keys[j])) {
            reg.getObject().accept(null);
        }
    }
    long end = System.currentTimeMillis();
    double elapsed = (end - start);
    long throughput = Math.round((selectors * iterations) / (elapsed / 1000));
    LOG.info("{} throughput: {}M/s in {}ms", type, throughput, Math.round(elapsed));

    assertThat("All handlers have been found and executed.", counter.get() == 0);
}

From source file:net.hydromatic.foodbench.Main.java

/** Does the work. */
private void run(String jdbcUrl, String catalog, String driverClassName)
        throws IOException, SQLException, ClassNotFoundException {
    URL url = FoodMartQuery.class.getResource("/queries.json");
    InputStream inputStream = url.openStream();
    ObjectMapper mapper = new ObjectMapper();
    Map values = mapper.readValue(inputStream, Map.class);
    //noinspection unchecked
    List<Map<String, Object>> tests = (List) values.get("queries");
    if (driverClassName != null) {
        Class.forName(driverClassName);
    }/*from   w ww  .j a  v a2 s  .  c  o  m*/
    Connection connection = DriverManager.getConnection(jdbcUrl);
    if (catalog != null) {
        connection.setCatalog(catalog);
    }
    Statement statement = connection.createStatement();
    for (Map<String, Object> test : tests) {
        int id = (Integer) test.get("id");
        if (!idSet.contains(id)) {
            continue;
        }
        String sql = (String) test.get("sql");
        if (jdbcUrl.startsWith("jdbc:mysql:")) {
            sql = sql.replace("\"", "`");
            sql = sql.replace(" NULLS FIRST", "");
            sql = sql.replace(" NULLS LAST", "");
            if (sql.contains("VALUES ")) {
                System.out.println("query id: " + id + " sql: " + sql + " skipped");
                continue;
            }
        }
        if (jdbcUrl.startsWith("jdbc:optiq:")) {
            sql = sql.replace("RTRIM(", "TRIM(TRAILING ' ' FROM ");
        }
        final AtomicLong tPrepare = new AtomicLong(0);
        Hook.Closeable hook = Hook.JAVA_PLAN.add(new Function1<Object, Object>() {
            public Object apply(Object a0) {
                tPrepare.set(System.nanoTime());
                return null;
            }
        });
        try {
            final long t0 = System.nanoTime();
            ResultSet resultSet = statement.executeQuery(sql);
            int n = 0;
            while (resultSet.next()) {
                ++n;
            }
            resultSet.close();
            final long tEnd = System.nanoTime();
            final long nanos = tEnd - t0;
            final long prepare = tPrepare.longValue() - t0;
            final long execute = tEnd - tPrepare.longValue();
            System.out.println("query id: " + id + " rows: " + n + " nanos: " + NF.format(nanos) + " prepare: "
                    + NF.format(prepare) + " execute: " + NF.format(execute) + " prepare%: "
                    + ((float) prepare / (float) nanos * 100f));
        } catch (SQLException e) {
            System.out.println("query id: " + id + " sql: " + sql + " error: " + e.getMessage());
            if (verbose) {
                e.printStackTrace();
            }
        } finally {
            hook.close();
        }
    }
    statement.close();
    connection.close();
}

From source file:eu.fthevenet.binjr.data.codec.CsvDecoder.java

@Override
public Map<TimeSeriesInfo<T>, TimeSeriesProcessor<T>> decode(InputStream in, List<TimeSeriesInfo<T>> seriesInfo)
        throws IOException, DecodingDataFromAdapterException {
    try (Profiler ignored = Profiler.start("Building time series from csv data", logger::trace)) {
        try (BufferedReader reader = new BufferedReader(new InputStreamReader(in, encoding))) {
            CSVFormat csvFormat = CSVFormat.DEFAULT.withAllowMissingColumnNames(false).withFirstRecordAsHeader()
                    .withSkipHeaderRecord().withDelimiter(delimiter);
            Iterable<CSVRecord> records = csvFormat.parse(reader);
            Map<TimeSeriesInfo<T>, TimeSeriesProcessor<T>> series = new HashMap<>();
            final AtomicLong nbpoints = new AtomicLong(0);
            for (CSVRecord csvRecord : records) {
                nbpoints.incrementAndGet();
                ZonedDateTime timeStamp = dateParser.apply(csvRecord.get(0));
                for (TimeSeriesInfo<T> info : seriesInfo) {
                    T val = numberParser.apply(csvRecord.get(info.getBinding().getLabel()));
                    XYChart.Data<ZonedDateTime, T> point = new XYChart.Data<>(timeStamp, val);
                    TimeSeriesProcessor<T> l = series.computeIfAbsent(info, k -> timeSeriesFactory.create());
                    l.addSample(point);/*from  ww w. j av a 2  s  . c  om*/
                }
            }
            logger.trace(() -> String.format("Built %d series with %d samples each (%d total samples)",
                    seriesInfo.size(), nbpoints.get(), seriesInfo.size() * nbpoints.get()));
            return series;
        }
    }
}

From source file:org.lol.reddit.reddit.api.RedditAPIIndividualSubredditDataRequester.java

public void performRequest(final Collection<String> subredditCanonicalIds, final TimestampBound timestampBound,
        final RequestResponseHandler<HashMap<String, RedditSubreddit>, SubredditRequestFailure> handler) {

    // TODO if there's a bulk API to do this, that would be good... :)

    final HashMap<String, RedditSubreddit> result = new HashMap<String, RedditSubreddit>();
    final AtomicBoolean stillOkay = new AtomicBoolean(true);
    final AtomicInteger requestsToGo = new AtomicInteger(subredditCanonicalIds.size());
    final AtomicLong oldestResult = new AtomicLong(Long.MAX_VALUE);

    final RequestResponseHandler<RedditSubreddit, SubredditRequestFailure> innerHandler = new RequestResponseHandler<RedditSubreddit, SubredditRequestFailure>() {
        @Override//from  w ww . j  a va2 s .  c o m
        public void onRequestFailed(SubredditRequestFailure failureReason) {
            synchronized (result) {
                if (stillOkay.get()) {
                    stillOkay.set(false);
                    handler.onRequestFailed(failureReason);
                }
            }
        }

        @Override
        public void onRequestSuccess(RedditSubreddit innerResult, long timeCached) {
            synchronized (result) {
                if (stillOkay.get()) {

                    result.put(innerResult.getKey(), innerResult);
                    oldestResult.set(Math.min(oldestResult.get(), timeCached));

                    if (requestsToGo.decrementAndGet() == 0) {
                        handler.onRequestSuccess(result, oldestResult.get());
                    }
                }
            }
        }
    };

    for (String subredditCanonicalId : subredditCanonicalIds) {
        performRequest(subredditCanonicalId, timestampBound, innerHandler);
    }
}

From source file:com.github.naoghuman.testdata.abclist.service.LinkMappingService.java

@Override
protected Task<Void> createTask() {
    return new Task<Void>() {
        {//from w w  w .ja va  2 s .  c  o m
            updateProgress(0, saveMaxEntities);
        }

        @Override
        protected Void call() throws Exception {
            LoggerFacade.getDefault().deactivate(Boolean.TRUE);

            final StopWatch stopWatch = new StopWatch();
            stopWatch.start();

            /*
             1) over all links
             2) if random > 0.005d then do
             3) otherwise create a link without parent
             4) get 1-10 terms, create LinkMapping foreach of them
             - means a link is mapped to 1-10 terms
             5) get 0-10 topics, create LinkMapping foreach of them
             - means a link is mapped to 0-10 topics
            */

            final ObservableList<Link> links = SqlProvider.getDefault().findAllLinks();
            final ObservableList<Term> terms = SqlProvider.getDefault().findAllTerms();
            final int sizeTerms = terms.size();
            final ObservableList<Topic> topics = SqlProvider.getDefault().findAllTopics();
            final int sizeTopics = topics.size();
            final AtomicInteger index = new AtomicInteger(0);

            final CrudService crudService = DatabaseFacade.getDefault().getCrudService(entityName);
            final AtomicLong id = new AtomicLong(
                    -1_000_000_000L + DatabaseFacade.getDefault().getCrudService().count(entityName));
            links.stream() // 1
                    .forEach(link -> {
                        // 2) Should the [Link] have a parent
                        final double random = TestdataGenerator.RANDOM.nextDouble();
                        if (random > 0.005d) {
                            // 4) Create [Link]s with parent [Term]
                            final int maxTerms = TestdataGenerator.RANDOM.nextInt(10) + 1;
                            for (int i = 0; i < maxTerms; i++) {
                                final LinkMapping lm = ModelProvider.getDefault().getLinkMapping();
                                lm.setId(id.getAndIncrement());

                                final Term term = terms.get(TestdataGenerator.RANDOM.nextInt(sizeTerms));
                                lm.setParentId(term.getId());
                                lm.setParentType(LinkMappingType.TERM);

                                lm.setChildId(link.getId());
                                lm.setChildType(LinkMappingType.LINK);

                                crudService.create(lm);
                            }

                            // 5) Create [Link]s with parent [Topic]
                            final int maxTopics = TestdataGenerator.RANDOM.nextInt(11);
                            for (int i = 0; i < maxTopics; i++) {
                                final LinkMapping lm = ModelProvider.getDefault().getLinkMapping();
                                lm.setId(id.getAndIncrement());

                                final Topic topic = topics.get(TestdataGenerator.RANDOM.nextInt(sizeTopics));
                                lm.setParentId(topic.getId());
                                lm.setParentType(LinkMappingType.TOPIC);

                                lm.setChildId(link.getId());
                                lm.setChildType(LinkMappingType.LINK);

                                crudService.create(lm);
                            }
                        } else {
                            // 3) Some [Link]s havn't a parent
                            final LinkMapping lm = ModelProvider.getDefault().getLinkMapping();
                            lm.setId(id.getAndIncrement());
                            lm.setParentId(IDefaultConfiguration.DEFAULT_ID);
                            lm.setParentType(LinkMappingType.NOT_DEFINED);
                            lm.setChildId(link.getId());
                            lm.setChildType(LinkMappingType.LINK);

                            crudService.create(lm);
                        }

                        updateProgress(index.getAndIncrement(), saveMaxEntities);
                    });

            LoggerFacade.getDefault().deactivate(Boolean.FALSE);
            stopWatch.split();
            LoggerFacade.getDefault().debug(this.getClass(),
                    "  + " + stopWatch.toSplitString() + " for " + saveMaxEntities + " LinkMappings."); // NOI18N
            stopWatch.stop();

            return null;
        }
    };
}

From source file:org.apache.nifi.controller.StandardFunnel.java

public StandardFunnel(final String identifier, final ProcessGroup processGroup,
        final ProcessScheduler scheduler) {
    this.identifier = identifier;
    this.processGroupRef = new AtomicReference<>(processGroup);

    outgoingConnections = new HashSet<>();
    incomingConnections = new ArrayList<>();

    final List<Relationship> relationships = new ArrayList<>();
    relationships.add(Relationship.ANONYMOUS);
    this.relationships = Collections.unmodifiableList(relationships);

    lossTolerant = new AtomicBoolean(false);
    position = new AtomicReference<>(new Position(0D, 0D));
    scheduledState = new AtomicReference<>(ScheduledState.STOPPED);
    penalizationPeriod = new AtomicReference<>("30 sec");
    yieldPeriod = new AtomicReference<>("250 millis");
    yieldExpiration = new AtomicLong(0L);
    schedulingPeriod = new AtomicReference<>("0 millis");
    schedulingNanos = new AtomicLong(MINIMUM_SCHEDULING_NANOS);
    name = new AtomicReference<>("Funnel");
}

From source file:org.apache.hadoop.corona.SessionManager.java

/**
 * Constructor for SessionManager, used when we are reading back the
 * ClusterManager state from the disk/*from  w w  w.j  a v a  2s . c  o m*/
 *
 * @param clusterManager The ClusterManager instance to be used
 * @param coronaSerializer The CoronaSerializer instance, which will be used
 *                         to read JSON from disk
 * @throws IOException
 */
public SessionManager(ClusterManager clusterManager, CoronaSerializer coronaSerializer) throws IOException {
    this(clusterManager);
    // Even though the expireSessions thread would be running now, it would
    // not expire any sessions we would be creating now, because the
    // ClusterManager would be in Safe Mode.

    // Expecting the START_OBJECT token for sessionManager
    coronaSerializer.readStartObjectToken("sessionManager");

    readSessions(coronaSerializer);

    coronaSerializer.readField("sessionCounter");
    sessionCounter = new AtomicLong(coronaSerializer.readValueAs(Long.class));

    // Expecting the END_OBJECT token for sessionManager
    coronaSerializer.readEndObjectToken("sessionManager");

    // Restoring the runnableSessions map
    for (String sessionId : sessions.keySet()) {
        Session session = sessions.get(sessionId);
        if (session.getPendingRequestCount() > 0) {
            runnableSessions.put(sessionId, session);
        }
    }
}