Example usage for java.util.concurrent.atomic AtomicLong addAndGet

List of usage examples for java.util.concurrent.atomic AtomicLong addAndGet

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicLong addAndGet.

Prototype

public final long addAndGet(long delta) 

Source Link

Document

Atomically adds the given value to the current value, with memory effects as specified by VarHandle#getAndAdd .

Usage

From source file:org.apache.hadoop.hbase.client.TestFastFail.java

@Test
public void testFastFail() throws IOException, InterruptedException {
    Admin admin = TEST_UTIL.getHBaseAdmin();

    final String tableName = "testClientRelearningExperiment";
    HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(Bytes.toBytes(tableName)));
    desc.addFamily(new HColumnDescriptor(FAMILY));
    admin.createTable(desc, Bytes.toBytes("aaaa"), Bytes.toBytes("zzzz"), 32);
    final long numRows = 1000;

    Configuration conf = TEST_UTIL.getConfiguration();
    conf.setLong(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, SLEEPTIME * 100);
    conf.setInt(HConstants.HBASE_CLIENT_PAUSE, SLEEPTIME / 10);
    conf.setBoolean(HConstants.HBASE_CLIENT_FAST_FAIL_MODE_ENABLED, true);
    conf.setLong(HConstants.HBASE_CLIENT_FAST_FAIL_THREASHOLD_MS, 0);
    conf.setClass(HConstants.HBASE_CLIENT_FAST_FAIL_INTERCEPTOR_IMPL, MyPreemptiveFastFailInterceptor.class,
            PreemptiveFastFailInterceptor.class);

    final Connection connection = ConnectionFactory.createConnection(conf);

    /**/* w ww.  jav a  2 s . com*/
     * Write numRows worth of data, so that the workers can arbitrarily read.
     */
    List<Put> puts = new ArrayList<>();
    for (long i = 0; i < numRows; i++) {
        byte[] rowKey = longToByteArrayKey(i);
        Put put = new Put(rowKey);
        byte[] value = rowKey; // value is the same as the row key
        put.add(FAMILY, QUALIFIER, value);
        puts.add(put);
    }
    try (Table table = connection.getTable(TableName.valueOf(tableName))) {
        table.put(puts);
        LOG.info("Written all puts.");
    }

    /**
     * The number of threads that are going to perform actions against the test
     * table.
     */
    int nThreads = 100;
    ExecutorService service = Executors.newFixedThreadPool(nThreads);
    final CountDownLatch continueOtherHalf = new CountDownLatch(1);
    final CountDownLatch doneHalfway = new CountDownLatch(nThreads);

    final AtomicInteger numSuccessfullThreads = new AtomicInteger(0);
    final AtomicInteger numFailedThreads = new AtomicInteger(0);

    // The total time taken for the threads to perform the second put;
    final AtomicLong totalTimeTaken = new AtomicLong(0);
    final AtomicInteger numBlockedWorkers = new AtomicInteger(0);
    final AtomicInteger numPreemptiveFastFailExceptions = new AtomicInteger(0);

    List<Future<Boolean>> futures = new ArrayList<Future<Boolean>>();
    for (int i = 0; i < nThreads; i++) {
        futures.add(service.submit(new Callable<Boolean>() {
            /**
             * The workers are going to perform a couple of reads. The second read
             * will follow the killing of a regionserver so that we make sure that
             * some of threads go into PreemptiveFastFailExcception
             */
            public Boolean call() throws Exception {
                try (Table table = connection.getTable(TableName.valueOf(tableName))) {
                    Thread.sleep(Math.abs(random.nextInt()) % 250); // Add some jitter here
                    byte[] row = longToByteArrayKey(Math.abs(random.nextLong()) % numRows);
                    Get g = new Get(row);
                    g.addColumn(FAMILY, QUALIFIER);
                    try {
                        table.get(g);
                    } catch (Exception e) {
                        LOG.debug("Get failed : ", e);
                        doneHalfway.countDown();
                        return false;
                    }

                    // Done with one get, proceeding to do the next one.
                    doneHalfway.countDown();
                    continueOtherHalf.await();

                    long startTime = System.currentTimeMillis();
                    g = new Get(row);
                    g.addColumn(FAMILY, QUALIFIER);
                    try {
                        table.get(g);
                        // The get was successful
                        numSuccessfullThreads.addAndGet(1);
                    } catch (Exception e) {
                        if (e instanceof PreemptiveFastFailException) {
                            // We were issued a PreemptiveFastFailException
                            numPreemptiveFastFailExceptions.addAndGet(1);
                        }
                        // Irrespective of PFFE, the request failed.
                        numFailedThreads.addAndGet(1);
                        return false;
                    } finally {
                        long enTime = System.currentTimeMillis();
                        totalTimeTaken.addAndGet(enTime - startTime);
                        if ((enTime - startTime) >= SLEEPTIME) {
                            // Considering the slow workers as the blockedWorkers.
                            // This assumes that the threads go full throttle at performing
                            // actions. In case the thread scheduling itself is as slow as
                            // SLEEPTIME, then this test might fail and so, we might have
                            // set it to a higher number on slower machines.
                            numBlockedWorkers.addAndGet(1);
                        }
                    }
                    return true;
                } catch (Exception e) {
                    LOG.error("Caught unknown exception", e);
                    doneHalfway.countDown();
                    return false;
                }
            }
        }));
    }

    doneHalfway.await();

    ClusterStatus status = TEST_UTIL.getHBaseCluster().getClusterStatus();

    // Kill a regionserver
    TEST_UTIL.getHBaseCluster().getRegionServer(0).getRpcServer().stop();
    TEST_UTIL.getHBaseCluster().getRegionServer(0).stop("Testing");

    // Let the threads continue going
    continueOtherHalf.countDown();

    Thread.sleep(2 * SLEEPTIME);
    // Restore the cluster
    TEST_UTIL.getHBaseCluster().restoreClusterStatus(status);

    int numThreadsReturnedFalse = 0;
    int numThreadsReturnedTrue = 0;
    int numThreadsThrewExceptions = 0;
    for (Future<Boolean> f : futures) {
        try {
            numThreadsReturnedTrue += f.get() ? 1 : 0;
            numThreadsReturnedFalse += f.get() ? 0 : 1;
        } catch (Exception e) {
            numThreadsThrewExceptions++;
        }
    }
    LOG.debug("numThreadsReturnedFalse:" + numThreadsReturnedFalse + " numThreadsReturnedTrue:"
            + numThreadsReturnedTrue + " numThreadsThrewExceptions:" + numThreadsThrewExceptions
            + " numFailedThreads:" + numFailedThreads.get() + " numSuccessfullThreads:"
            + numSuccessfullThreads.get() + " numBlockedWorkers:" + numBlockedWorkers.get()
            + " totalTimeWaited: "
            + totalTimeTaken.get() / (numBlockedWorkers.get() == 0 ? Long.MAX_VALUE : numBlockedWorkers.get())
            + " numPFFEs: " + numPreemptiveFastFailExceptions.get());

    assertEquals(
            "The expected number of all the successfull and the failed "
                    + "threads should equal the total number of threads that we spawned",
            nThreads, numFailedThreads.get() + numSuccessfullThreads.get());
    assertEquals("All the failures should be coming from the secondput failure", numFailedThreads.get(),
            numThreadsReturnedFalse);
    assertEquals("Number of threads that threw execution exceptions " + "otherwise should be 0",
            numThreadsThrewExceptions, 0);
    assertEquals("The regionservers that returned true should equal to the" + " number of successful threads",
            numThreadsReturnedTrue, numSuccessfullThreads.get());
    /* 'should' is not worthy of an assert. Disabling because randomly this seems to randomly
     * not but true. St.Ack 20151012
     *
    assertTrue(
        "There should be atleast one thread that retried instead of failing",
        MyPreemptiveFastFailInterceptor.numBraveSouls.get() > 0);
    assertTrue(
        "There should be atleast one PreemptiveFastFail exception,"
    + " otherwise, the test makes little sense."
    + "numPreemptiveFastFailExceptions: "
    + numPreemptiveFastFailExceptions.get(),
        numPreemptiveFastFailExceptions.get() > 0);
    */
    assertTrue(
            "Only few thread should ideally be waiting for the dead "
                    + "regionserver to be coming back. numBlockedWorkers:" + numBlockedWorkers.get()
                    + " threads that retried : " + MyPreemptiveFastFailInterceptor.numBraveSouls.get(),
            numBlockedWorkers.get() <= MyPreemptiveFastFailInterceptor.numBraveSouls.get());
}

From source file:org.tomitribe.tribestream.registryng.resources.ClientResource.java

@GET
@Path("invoke/stream")
@Produces("text/event-stream") // will be part of JAX-RS 2.1, for now just making it working
public void invokeScenario(@Suspended final AsyncResponse asyncResponse, @Context final Providers providers,
        @Context final HttpServletRequest httpServletRequest,
        // base64 encoded json with the request and identify since EventSource doesnt handle it very well
        // TODO: use a ciphering with a POST endpoint to avoid to have it readable (or other)
        @QueryParam("request") final String requestBytes) {
    final SseRequest in = loadPayload(SseRequest.class, providers, requestBytes);

    final String auth = in.getIdentity();
    security.check(auth, httpServletRequest, () -> {
    }, () -> {/*w  w  w.j a  v  a 2s  .  co m*/
        throw new WebApplicationException(Response.Status.FORBIDDEN);
    });

    final GenericClientService.Request req = toRequest(in.getHttp());
    final Scenario scenario = in.getHttp().getScenario();

    final MultivaluedHashMap<String, Object> fakeHttpHeaders = new MultivaluedHashMap<>();
    final ConcurrentMap<Future<?>, Boolean> computations = new ConcurrentHashMap<>();
    final MessageBodyWriter<LightHttpResponse> writerResponse = providers.getMessageBodyWriter(
            LightHttpResponse.class, LightHttpResponse.class, annotations, APPLICATION_JSON_TYPE);
    final MessageBodyWriter<ScenarioEnd> writerEnd = providers.getMessageBodyWriter(ScenarioEnd.class,
            ScenarioEnd.class, annotations, APPLICATION_JSON_TYPE);

    // not jaxrs one cause cxf wraps this one and prevents the flush() to works
    final HttpServletResponse httpServletResponse = HttpServletResponse.class
            .cast(httpServletRequest.getAttribute("tribe.registry.response"));
    httpServletResponse.setHeader("Content-Type", "text/event-stream");
    try {
        httpServletResponse.flushBuffer();
    } catch (final IOException e) {
        throw new IllegalStateException(e);
    }

    final ServletOutputStream out;
    try {
        out = httpServletResponse.getOutputStream();
    } catch (final IOException e) {
        throw new IllegalStateException(e);
    }

    mes.submit(() -> {
        final AtomicReference<Invoker.Handle> handleRef = new AtomicReference<>();

        try {
            // we compute some easy stats asynchronously
            final Map<Integer, AtomicInteger> sumPerResponse = new HashMap<>();
            final AtomicInteger total = new AtomicInteger();
            final AtomicLong min = new AtomicLong();
            final AtomicLong max = new AtomicLong();
            final AtomicLong sum = new AtomicLong();

            final AtomicInteger writeErrors = new AtomicInteger(0);

            final long start = System.currentTimeMillis();
            handleRef.set(invoker.invoke(scenario.getThreads(), scenario.getInvocations(),
                    scenario.getDuration(), timeout, () -> {
                        if (handleRef.get().isCancelled()) {
                            return;
                        }

                        LightHttpResponse resp;
                        try {
                            final GenericClientService.Response invoke = service.invoke(req);
                            resp = new LightHttpResponse(invoke.getStatus(), null,
                                    invoke.getClientExecutionDurationMs());
                        } catch (final RuntimeException e) {
                            resp = new LightHttpResponse(-1, e.getMessage(), -1);
                        }

                        // let's process it in an environment where synchronisation is fine
                        final LightHttpResponse respRef = resp;
                        computations.put(mes.submit(() -> {
                            synchronized (out) {
                                try {
                                    out.write(dataStart);
                                    writerResponse.writeTo(respRef, LightHttpResponse.class,
                                            LightHttpResponse.class, annotations, APPLICATION_JSON_TYPE,
                                            fakeHttpHeaders, out);
                                    out.write(dataEnd);
                                    out.flush();
                                } catch (final IOException e) {
                                    if (writeErrors.incrementAndGet() > toleratedWriteErrors) {
                                        handleRef.get().cancel();
                                    }
                                    throw new IllegalStateException(e);
                                }
                            }

                            if (handleRef.get().isCancelled()) {
                                return;
                            }

                            final long clientExecutionDurationMs = respRef.getClientExecutionDurationMs();

                            total.incrementAndGet();
                            sumPerResponse.computeIfAbsent(respRef.getStatus(), k -> new AtomicInteger())
                                    .incrementAndGet();
                            sum.addAndGet(clientExecutionDurationMs);
                            {
                                long m = min.get();
                                do {
                                    m = min.get();
                                    if (min.compareAndSet(m, clientExecutionDurationMs)) {
                                        break;
                                    }
                                } while (m > clientExecutionDurationMs);
                            }

                            {
                                long m = max.get();
                                do {
                                    m = max.get();
                                    if (max.compareAndSet(m, clientExecutionDurationMs)) {
                                        break;
                                    }
                                } while (m < clientExecutionDurationMs);
                            }
                        }), true);
                    }));

            handleRef.get().await();

            final long end = System.currentTimeMillis();

            do { // wait all threads finished to compute the stats
                final Iterator<Future<?>> iterator = computations.keySet().iterator();
                while (iterator.hasNext()) {
                    try {
                        iterator.next().get(timeout, TimeUnit.MILLISECONDS);
                    } catch (final InterruptedException e) {
                        Thread.interrupted();
                    } catch (final ExecutionException | TimeoutException e) {
                        throw new IllegalStateException(e.getCause());
                    } finally {
                        iterator.remove();
                    }
                }
            } while (!computations.isEmpty());

            if (handleRef.get().isCancelled()) {
                return;
            }

            try {
                out.write(dataStart);
                writerEnd.writeTo(
                        new ScenarioEnd(
                                sumPerResponse.entrySet().stream()
                                        .collect(toMap(Map.Entry::getKey, t -> t.getValue().get())),
                                end - start, total.get(), min.get(), max.get(), sum.get() * 1. / total.get()),
                        ScenarioEnd.class, ScenarioEnd.class, annotations, APPLICATION_JSON_TYPE,
                        new MultivaluedHashMap<>(), out);
                out.write(dataEnd);
                out.flush();
            } catch (final IOException e) {
                throw new IllegalStateException(e);
            }
        } finally {
            try {
                // cxf will skip it since we already write ourself
                asyncResponse.resume("");
            } catch (final RuntimeException re) {
                // no-op: not that important
            }
        }
    });
}

From source file:org.apache.pulsar.client.impl.ConsumerImpl.java

private void internalGetLastMessageIdAsync(final Backoff backoff, final AtomicLong remainingTime,
        CompletableFuture<MessageId> future) {
    ClientCnx cnx = cnx();/*from   w  w w.  j  a v a  2  s .  c o m*/
    if (isConnected() && cnx != null) {
        if (!Commands.peerSupportsGetLastMessageId(cnx.getRemoteEndpointProtocolVersion())) {
            future.completeExceptionally(new PulsarClientException.NotSupportedException(
                    "GetLastMessageId Not supported for ProtocolVersion: "
                            + cnx.getRemoteEndpointProtocolVersion()));
        }

        long requestId = client.newRequestId();
        ByteBuf getLastIdCmd = Commands.newGetLastMessageId(consumerId, requestId);
        log.info("[{}][{}] Get topic last message Id", topic, subscription);

        cnx.sendGetLastMessageId(getLastIdCmd, requestId).thenAccept((result) -> {
            log.info("[{}][{}] Successfully getLastMessageId {}:{}", topic, subscription, result.getLedgerId(),
                    result.getEntryId());
            future.complete(
                    new MessageIdImpl(result.getLedgerId(), result.getEntryId(), result.getPartition()));
        }).exceptionally(e -> {
            log.error("[{}][{}] Failed getLastMessageId command", topic, subscription);
            future.completeExceptionally(e.getCause());
            return null;
        });
    } else {
        long nextDelay = Math.min(backoff.next(), remainingTime.get());
        if (nextDelay <= 0) {
            future.completeExceptionally(new PulsarClientException.TimeoutException(
                    "Could not getLastMessageId within configured timeout."));
            return;
        }

        ((ScheduledExecutorService) listenerExecutor).schedule(() -> {
            log.warn("[{}] [{}] Could not get connection while getLastMessageId -- Will try again in {} ms",
                    topic, getHandlerName(), nextDelay);
            remainingTime.addAndGet(-nextDelay);
            internalGetLastMessageIdAsync(backoff, remainingTime, future);
        }, nextDelay, TimeUnit.MILLISECONDS);
    }
}

From source file:org.apache.hadoop.raid.RaidShell.java

private long estimateSaving(final Codec codec, final List<Path> files, final int targetReplication,
        final int numThreads, final boolean isDebug) throws IOException {
    final AtomicLong totalSavingSize = new AtomicLong(0);
    ExecutorService executor = Executors.newFixedThreadPool(numThreads);
    LOG.info("Processing " + files.size() + " files/dirs for " + codec.id + " in " + numThreads + " threads");
    if (isDebug) {
        System.out.println("oldDiskSize | oldParitySize | newDiskSize | newParitySize"
                + "| savingSize | totalSavingSize | path ");
    }/*from w  ww. j av  a 2  s. co  m*/
    final AtomicInteger finishNum = new AtomicInteger(0);
    for (int i = 0; i < numThreads; i++) {
        final int startIdx = i;
        Runnable work = new Runnable() {
            public void run() {
                try {
                    for (int idx = startIdx; idx < files.size(); idx += numThreads) {
                        try {
                            Path p = files.get(idx);
                            FileSystem fs = FileSystem.get(conf);
                            p = fs.makeQualified(p);
                            FileStatus stat = null;
                            try {
                                stat = fs.getFileStatus(p);
                            } catch (FileNotFoundException e) {
                                LOG.warn("Path " + p + " does not exist", e);
                            }
                            if (stat == null) {
                                continue;
                            }
                            short repl = 0;
                            List<FileStatus> lfs = null;
                            if (codec.isDirRaid) {
                                if (!stat.isDir()) {
                                    continue;
                                }
                                lfs = RaidNode.listDirectoryRaidFileStatus(conf, fs, p);
                                if (lfs == null) {
                                    continue;
                                }
                                repl = DirectoryStripeReader.getReplication(lfs);
                            } else {
                                repl = stat.getReplication();
                            }

                            // if should not raid, will not put the file into the write list.
                            if (!RaidNode.shouldRaid(conf, fs, stat, codec, lfs)) {
                                LOG.info("Should not raid file: " + p);
                                continue;
                            }
                            // check the replication.
                            boolean add = false;
                            if (repl > targetReplication) {
                                add = true;
                            } else if (repl == targetReplication
                                    && !ParityFilePair.parityExists(stat, codec, conf)) {
                                add = true;
                            }
                            if (add) {
                                long oldDiskSize = 0L;
                                long newDiskSize = 0L;
                                long numBlocks = 0L;
                                long parityBlockSize = 0L;
                                if (codec.isDirRaid) {
                                    for (FileStatus fsStat : lfs) {
                                        oldDiskSize += fsStat.getLen() * (fsStat.getReplication());
                                        newDiskSize += fsStat.getLen() * targetReplication;
                                    }
                                    numBlocks = DirectoryStripeReader.getBlockNum(lfs);
                                    parityBlockSize = DirectoryStripeReader.getParityBlockSize(conf, lfs);
                                } else {
                                    oldDiskSize = stat.getLen() * stat.getReplication();
                                    newDiskSize = stat.getLen() * targetReplication;
                                    numBlocks = RaidNode.getNumBlocks(stat);
                                    parityBlockSize = stat.getBlockSize();
                                }

                                long numStripes = RaidNode.numStripes(numBlocks, codec.stripeLength);
                                long newParitySize = numStripes * codec.parityLength * parityBlockSize
                                        * targetReplication;
                                long oldParitySize = 0L;
                                for (Codec other : Codec.getCodecs()) {
                                    if (other.priority < codec.priority) {
                                        Path parityPath = new Path(other.parityDirectory,
                                                RaidNode.makeRelative(stat.getPath()));
                                        long logicalSize = 0;
                                        try {
                                            logicalSize = fs.getContentSummary(parityPath).getSpaceConsumed();
                                        } catch (IOException ioe) {
                                            // doesn't exist
                                            continue;
                                        }
                                        oldParitySize += logicalSize;
                                    }
                                }
                                long savingSize = oldDiskSize + oldParitySize - newDiskSize - newParitySize;
                                totalSavingSize.addAndGet(savingSize);
                                if (isDebug) {
                                    System.out.println(oldDiskSize + " " + oldParitySize + " " + newDiskSize
                                            + " " + newParitySize + " " + savingSize + " "
                                            + totalSavingSize.get() + " " + stat.getPath());
                                }
                            }
                        } catch (IOException ioe) {
                            LOG.warn("Get IOException", ioe);
                        }
                    }
                } finally {
                    finishNum.incrementAndGet();
                }
            }
        };
        if (executor != null) {
            executor.execute(work);
        }
    }
    if (executor != null) {
        try {
            while (finishNum.get() < numThreads) {
                try {
                    Thread.sleep(2000);
                } catch (InterruptedException ie) {
                    LOG.warn("EstimateSaving get exception ", ie);
                    throw new IOException(ie);
                }
            }
        } finally {
            executor.shutdown(); // Waits for submitted tasks to finish.
        }
    }
    return totalSavingSize.get();
}

From source file:io.warp10.script.functions.FETCH.java

@Override
public Object apply(WarpScriptStack stack) throws WarpScriptException {
    ///*w w  w.jav a2s  .  com*/
    // Extract parameters from the stack
    //

    Object top = stack.peek();

    //
    // Handle the new (as of 20150805) parameter passing mechanism as a map
    //

    Map<String, Object> params = null;

    if (top instanceof Map) {
        stack.pop();
        params = paramsFromMap(stack, (Map<String, Object>) top);
    }

    if (top instanceof List) {
        if (5 != ((List) top).size()) {
            stack.drop();
            throw new WarpScriptException(getName() + " expects 5 parameters.");
        }

        //
        // Explode list and remove its size
        //

        listTo.apply(stack);
        stack.drop();
    }

    if (null == params) {

        params = new HashMap<String, Object>();

        //
        // Extract time span
        //

        Object oStop = stack.pop();
        Object oStart = stack.pop();

        long endts;
        long timespan;

        if (oStart instanceof String && oStop instanceof String) {
            long start = fmt.parseDateTime((String) oStart).getMillis() * Constants.TIME_UNITS_PER_MS;
            long stop = fmt.parseDateTime((String) oStop).getMillis() * Constants.TIME_UNITS_PER_MS;

            if (start < stop) {
                endts = stop;
                timespan = stop - start;
            } else {
                endts = start;
                timespan = start - stop;
            }
        } else if (oStart instanceof Long && oStop instanceof Long) {
            endts = (long) oStart;
            timespan = (long) oStop;
        } else {
            throw new WarpScriptException("Invalid timespan specification.");
        }

        params.put(PARAM_END, endts);

        if (timespan < 0) {
            params.put(PARAM_COUNT, -timespan);
        } else {
            params.put(PARAM_TIMESPAN, timespan);
        }

        //
        // Extract labels selector
        //

        Object oLabelsSelector = stack.pop();

        if (!(oLabelsSelector instanceof Map)) {
            throw new WarpScriptException("Label selectors must be a map.");
        }

        Map<String, String> labelSelectors = (Map<String, String>) oLabelsSelector;

        params.put(PARAM_LABELS, labelSelectors);

        //
        // Extract class selector
        //

        Object oClassSelector = stack.pop();

        if (!(oClassSelector instanceof String)) {
            throw new WarpScriptException("Class selector must be a string.");
        }

        String classSelector = (String) oClassSelector;

        params.put(PARAM_CLASS, classSelector);

        //
        // Extract token
        //

        Object oToken = stack.pop();

        if (!(oToken instanceof String)) {
            throw new WarpScriptException("Token must be a string.");
        }

        String token = (String) oToken;

        params.put(PARAM_TOKEN, token);
    }

    StoreClient gtsStore = stack.getStoreClient();

    DirectoryClient directoryClient = stack.getDirectoryClient();

    GeoTimeSerie base = null;
    GeoTimeSerie[] bases = null;
    String typelabel = (String) params.get(PARAM_TYPEATTR);

    if (null != typelabel) {
        bases = new GeoTimeSerie[4];
    }

    ReadToken rtoken = Tokens.extractReadToken(params.get(PARAM_TOKEN).toString());

    List<String> clsSels = new ArrayList<String>();
    List<Map<String, String>> lblsSels = new ArrayList<Map<String, String>>();

    if (params.containsKey(PARAM_SELECTOR_PAIRS)) {
        for (Pair<Object, Object> pair : (List<Pair<Object, Object>>) params.get(PARAM_SELECTOR_PAIRS)) {
            clsSels.add(pair.getLeft().toString());
            Map<String, String> labelSelectors = (Map<String, String>) pair.getRight();
            labelSelectors.putAll(Tokens.labelSelectorsFromReadToken(rtoken));
            lblsSels.add((Map<String, String>) labelSelectors);
        }
    } else {
        Map<String, String> labelSelectors = (Map<String, String>) params.get(PARAM_LABELS);
        labelSelectors.putAll(Tokens.labelSelectorsFromReadToken(rtoken));
        clsSels.add(params.get(PARAM_CLASS).toString());
        lblsSels.add(labelSelectors);
    }

    List<Metadata> metadatas = null;

    Iterator<Metadata> iter = null;

    try {
        metadatas = directoryClient.find(clsSels, lblsSels);
        iter = metadatas.iterator();
    } catch (IOException ioe) {
        try {
            iter = directoryClient.iterator(clsSels, lblsSels);
        } catch (Exception e) {
            throw new WarpScriptException(e);
        }
    }

    metadatas = new ArrayList<Metadata>();

    List<GeoTimeSerie> series = new ArrayList<GeoTimeSerie>();
    AtomicLong fetched = (AtomicLong) stack.getAttribute(WarpScriptStack.ATTRIBUTE_FETCH_COUNT);
    long fetchLimit = (long) stack.getAttribute(WarpScriptStack.ATTRIBUTE_FETCH_LIMIT);
    long gtsLimit = (long) stack.getAttribute(WarpScriptStack.ATTRIBUTE_GTS_LIMIT);

    AtomicLong gtscount = (AtomicLong) stack.getAttribute(WarpScriptStack.ATTRIBUTE_GTS_COUNT);

    try {
        while (iter.hasNext()) {

            metadatas.add(iter.next());

            if (gtscount.incrementAndGet() > gtsLimit) {
                throw new WarpScriptException(getName() + " exceeded limit of " + gtsLimit
                        + " Geo Time Series, current count is " + gtscount);
            }

            if (metadatas.size() < EgressFetchHandler.FETCH_BATCHSIZE && iter.hasNext()) {
                continue;
            }

            //
            // Filter the retrieved Metadata according to geo
            //

            if (params.containsKey(PARAM_GEO)) {
                GeoDirectoryClient geoclient = stack.getGeoDirectoryClient();
                long end = (long) params.get(PARAM_END);
                long start = Long.MIN_VALUE;
                if (params.containsKey(PARAM_TIMESPAN)) {
                    start = end - (long) params.get(PARAM_TIMESPAN);
                }

                boolean inside = false;

                if (PARAM_GEOOP_IN.equals(params.get(PARAM_GEOOP))) {
                    inside = true;
                }

                try {
                    metadatas = geoclient.filter((String) params.get(PARAM_GEODIR), metadatas,
                            (GeoXPShape) params.get(PARAM_GEO), inside, start, end);
                } catch (IOException ioe) {
                    throw new WarpScriptException(ioe);
                }
            }

            //
            // Generate extra Metadata if PARAM_EXTRA is set
            //

            if (params.containsKey(PARAM_EXTRA)) {

                Set<Metadata> withextra = new HashSet<Metadata>();

                withextra.addAll(metadatas);

                for (Metadata meta : metadatas) {
                    for (String cls : (Set<String>) params.get(PARAM_EXTRA)) {
                        // The following is safe, the constructor allocates new maps
                        Metadata metadata = new Metadata(meta);
                        metadata.setName(cls);
                        metadata.setClassId(GTSHelper.classId(this.SIPHASH_CLASS, cls));
                        metadata.setLabelsId(GTSHelper.labelsId(this.SIPHASH_LABELS, metadata.getLabels()));
                        withextra.add(metadata);
                    }
                }

                metadatas.clear();
                metadatas.addAll(withextra);
            }

            //
            // We assume that GTS will be fetched in a continuous way, i.e. without having a GTSDecoder from one
            // then one from another, then one from the first one.
            //

            long timespan = params.containsKey(PARAM_TIMESPAN) ? (long) params.get(PARAM_TIMESPAN)
                    : -((long) params.get(PARAM_COUNT));

            TYPE type = (TYPE) params.get(PARAM_TYPE);

            if (null != this.forcedType) {
                if (null != type) {
                    throw new WarpScriptException(getName() + " type of fetched GTS cannot be changed.");
                }
                type = this.forcedType;
            }

            boolean writeTimestamp = Boolean.TRUE.equals(params.get(PARAM_WRITE_TIMESTAMP));

            boolean showUUID = Boolean.TRUE.equals(params.get(PARAM_SHOWUUID));

            try (GTSDecoderIterator gtsiter = gtsStore.fetch(rtoken, metadatas, (long) params.get(PARAM_END),
                    timespan, fromArchive, writeTimestamp)) {
                while (gtsiter.hasNext()) {
                    GTSDecoder decoder = gtsiter.next();

                    GeoTimeSerie gts;

                    //
                    // If we should ventilate per type, do so now
                    //

                    if (null != typelabel) {

                        Map<String, String> labels = new HashMap<String, String>(
                                decoder.getMetadata().getLabels());
                        labels.remove(Constants.PRODUCER_LABEL);
                        labels.remove(Constants.OWNER_LABEL);

                        java.util.UUID uuid = null;

                        if (showUUID) {
                            uuid = new java.util.UUID(decoder.getClassId(), decoder.getLabelsId());
                        }

                        long count = 0;

                        Metadata decoderMeta = decoder.getMetadata();

                        while (decoder.next()) {
                            count++;
                            long ts = decoder.getTimestamp();
                            long location = decoder.getLocation();
                            long elevation = decoder.getElevation();
                            Object value = decoder.getValue();

                            int gtsidx = 0;
                            String typename = "DOUBLE";

                            if (value instanceof Long) {
                                gtsidx = 1;
                                typename = "LONG";
                            } else if (value instanceof Boolean) {
                                gtsidx = 2;
                                typename = "BOOLEAN";
                            } else if (value instanceof String) {
                                gtsidx = 3;
                                typename = "STRING";
                            }

                            base = bases[gtsidx];

                            if (null == base || !base.getMetadata().getName().equals(decoderMeta.getName())
                                    || !base.getMetadata().getLabels().equals(decoderMeta.getLabels())) {
                                bases[gtsidx] = new GeoTimeSerie();
                                base = bases[gtsidx];
                                series.add(base);
                                base.setLabels(decoder.getLabels());
                                base.getMetadata().putToAttributes(typelabel, typename);
                                base.setName(decoder.getName());
                                if (null != uuid) {
                                    base.getMetadata().putToAttributes(Constants.UUID_ATTRIBUTE,
                                            uuid.toString());
                                }
                            }

                            GTSHelper.setValue(base, ts, location, elevation, value, false);
                        }

                        if (fetched.addAndGet(count) > fetchLimit) {
                            Map<String, String> sensisionLabels = new HashMap<String, String>();
                            sensisionLabels.put(SensisionConstants.SENSISION_LABEL_CONSUMERID,
                                    Tokens.getUUID(rtoken.getBilledId()));
                            Sensision.update(SensisionConstants.SENSISION_CLASS_EINSTEIN_FETCHCOUNT_EXCEEDED,
                                    sensisionLabels, 1);
                            throw new WarpScriptException(getName() + " exceeded limit of " + fetchLimit
                                    + " datapoints, current count is " + fetched.get());
                        }

                        continue;
                    }

                    if (null != type) {
                        gts = decoder.decode(type);
                    } else {
                        gts = decoder.decode();
                    }

                    //
                    // Remove producer/owner labels
                    //

                    //
                    // Add a .uuid attribute if instructed to do so
                    //

                    if (showUUID) {
                        java.util.UUID uuid = new java.util.UUID(gts.getClassId(), gts.getLabelsId());
                        gts.getMetadata().putToAttributes(Constants.UUID_ATTRIBUTE, uuid.toString());
                    }

                    Map<String, String> labels = new HashMap<String, String>();
                    labels.putAll(gts.getMetadata().getLabels());
                    labels.remove(Constants.PRODUCER_LABEL);
                    labels.remove(Constants.OWNER_LABEL);
                    gts.setLabels(labels);

                    //
                    // If it's the first GTS, take it as is.
                    //

                    if (null == base) {
                        base = gts;
                    } else {
                        //
                        // If name and labels are identical to the previous GTS, merge them
                        // Otherwise add 'base' to the stack and set it to 'gts'.
                        //
                        if (!base.getMetadata().getName().equals(gts.getMetadata().getName())
                                || !base.getMetadata().getLabels().equals(gts.getMetadata().getLabels())) {
                            series.add(base);
                            base = gts;
                        } else {
                            base = GTSHelper.merge(base, gts);
                        }
                    }

                    if (fetched.addAndGet(gts.size()) > fetchLimit) {
                        Map<String, String> sensisionLabels = new HashMap<String, String>();
                        sensisionLabels.put(SensisionConstants.SENSISION_LABEL_CONSUMERID,
                                Tokens.getUUID(rtoken.getBilledId()));
                        Sensision.update(SensisionConstants.SENSISION_CLASS_EINSTEIN_FETCHCOUNT_EXCEEDED,
                                sensisionLabels, 1);
                        throw new WarpScriptException(getName() + " exceeded limit of " + fetchLimit
                                + " datapoints, current count is " + fetched.get());
                        //break;
                    }
                }
            } catch (WarpScriptException ee) {
                throw ee;
            } catch (Exception e) {
                e.printStackTrace();
            }

            //
            // If there is one current GTS, push it onto the stack (only if not ventilating per type)
            //

            if (null != base && null == typelabel) {
                series.add(base);
            }

            //
            // Reset state
            //

            base = null;
            metadatas.clear();
        }
    } catch (Throwable t) {
        throw t;
    } finally {
        if (iter instanceof MetadataIterator) {
            try {
                ((MetadataIterator) iter).close();
            } catch (Exception e) {
            }
        }
    }

    stack.push(series);

    //
    // Apply a possible postfetch hook
    //

    if (rtoken.getHooksSize() > 0 && rtoken.getHooks().containsKey(POSTFETCH_HOOK)) {
        stack.execMulti(rtoken.getHooks().get(POSTFETCH_HOOK));
    }

    return stack;
}