Example usage for java.util.concurrent.atomic AtomicLong incrementAndGet

List of usage examples for java.util.concurrent.atomic AtomicLong incrementAndGet

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicLong incrementAndGet.

Prototype

public final long incrementAndGet() 

Source Link

Document

Atomically increments the current value, with memory effects as specified by VarHandle#getAndAdd .

Usage

From source file:org.apache.hadoop.hbase.regionserver.TestAtomicOperation.java

/**
 * Test multi-threaded region mutations.
 *//*from  w ww. ja  va 2  s .co  m*/
@Test
public void testMultiRowMutationMultiThreads() throws IOException {

    LOG.info("Starting test testMultiRowMutationMultiThreads");
    initHRegion(tableName, name.getMethodName(), fam1);

    // create 10 threads, each will alternate between adding and
    // removing a column
    int numThreads = 10;
    int opsPerThread = 500;
    AtomicOperation[] all = new AtomicOperation[numThreads];

    AtomicLong timeStamps = new AtomicLong(0);
    AtomicInteger failures = new AtomicInteger(0);
    final List<byte[]> rowsToLock = Arrays.asList(row, row2);
    // create all threads
    for (int i = 0; i < numThreads; i++) {
        all[i] = new AtomicOperation(region, opsPerThread, timeStamps, failures) {
            @Override
            public void run() {
                boolean op = true;
                for (int i = 0; i < numOps; i++) {
                    try {
                        // throw in some flushes
                        if (i % 10 == 0) {
                            synchronized (region) {
                                LOG.debug("flushing");
                                region.flushcache();
                                if (i % 100 == 0) {
                                    region.compactStores();
                                }
                            }
                        }
                        long ts = timeStamps.incrementAndGet();
                        List<Mutation> mrm = new ArrayList<Mutation>();
                        if (op) {
                            Put p = new Put(row2, ts);
                            p.add(fam1, qual1, value1);
                            mrm.add(p);
                            Delete d = new Delete(row);
                            d.deleteColumns(fam1, qual1, ts);
                            mrm.add(d);
                        } else {
                            Delete d = new Delete(row2);
                            d.deleteColumns(fam1, qual1, ts);
                            mrm.add(d);
                            Put p = new Put(row, ts);
                            p.add(fam1, qual1, value2);
                            mrm.add(p);
                        }
                        region.mutateRowsWithLocks(mrm, rowsToLock);
                        op ^= true;
                        // check: should always see exactly one column
                        Scan s = new Scan(row);
                        RegionScanner rs = region.getScanner(s);
                        List<Cell> r = new ArrayList<Cell>();
                        while (rs.next(r))
                            ;
                        rs.close();
                        if (r.size() != 1) {
                            LOG.debug(r);
                            failures.incrementAndGet();
                            fail();
                        }
                    } catch (IOException e) {
                        e.printStackTrace();
                        failures.incrementAndGet();
                        fail();
                    }
                }
            }
        };
    }

    // run all threads
    for (int i = 0; i < numThreads; i++) {
        all[i].start();
    }

    // wait for all threads to finish
    for (int i = 0; i < numThreads; i++) {
        try {
            all[i].join();
        } catch (InterruptedException e) {
        }
    }
    assertEquals(0, failures.get());
}

From source file:io.druid.server.namespace.cache.NamespaceExtractionCacheManagerExecutorsTest.java

@Test(timeout = 50_000)
public void testRunCount() throws InterruptedException, ExecutionException {
    final Lifecycle lifecycle = new Lifecycle();
    final NamespaceExtractionCacheManager onHeap;
    final AtomicLong runCount = new AtomicLong(0);
    final CountDownLatch latch = new CountDownLatch(1);
    try {/*from  w  w  w  .  j  a v a 2s. c  o m*/
        onHeap = new OnHeapNamespaceExtractionCacheManager(lifecycle,
                new ConcurrentHashMap<String, Function<String, String>>(), new NoopServiceEmitter(),
                ImmutableMap.<Class<? extends ExtractionNamespace>, ExtractionNamespaceFunctionFactory<?>>of(
                        URIExtractionNamespace.class,
                        new URIExtractionNamespaceFunctionFactory(
                                ImmutableMap.<String, SearchableVersionedDataFinder>of("file",
                                        new LocalFileTimestampVersionFinder()))));

        final URIExtractionNamespace namespace = new URIExtractionNamespace("ns", tmpFile.toURI(),
                new URIExtractionNamespace.ObjectMapperFlatDataParser(
                        URIExtractionNamespaceTest.registerTypes(new ObjectMapper())),
                new Period(1l), null);
        final String cacheId = UUID.randomUUID().toString();
        ListenableFuture<?> future = onHeap.schedule(namespace, factory, new Runnable() {
            @Override
            public void run() {
                manager.getPostRunnable(namespace, factory, cacheId).run();
                latch.countDown();
                runCount.incrementAndGet();
            }
        }, cacheId);
        latch.await();
        Thread.sleep(20);
    } finally {
        lifecycle.stop();
    }
    onHeap.waitForServiceToEnd(1_000, TimeUnit.MILLISECONDS);
    Assert.assertTrue(runCount.get() > 5);
}

From source file:org.apache.druid.server.initialization.JettyTest.java

@Test
@Ignore // this test will deadlock if it hits an issue, so ignored by default
public void testTimeouts() throws Exception {
    // test for request timeouts properly not locking up all threads
    final Executor executor = Executors.newFixedThreadPool(100);
    final AtomicLong count = new AtomicLong(0);
    final CountDownLatch latch = new CountDownLatch(1000);
    for (int i = 0; i < 10000; i++) {
        executor.execute(new Runnable() {
            @Override/*w w w.j a  v a 2  s.  c o  m*/
            public void run() {
                executor.execute(new Runnable() {
                    @Override
                    public void run() {
                        long startTime = System.currentTimeMillis();
                        long startTime2 = 0;
                        try {
                            ListenableFuture<StatusResponseHolder> go = client.go(
                                    new Request(HttpMethod.GET,
                                            new URL("http://localhost:" + port + "/slow/hello")),
                                    new StatusResponseHandler(Charset.defaultCharset()));
                            startTime2 = System.currentTimeMillis();
                            go.get();
                        } catch (Exception e) {
                            e.printStackTrace();
                        } finally {
                            System.out.printf(Locale.ENGLISH,
                                    "Response time client%dtime taken for getting future%dCounter %d%n",
                                    System.currentTimeMillis() - startTime,
                                    System.currentTimeMillis() - startTime2, count.incrementAndGet());
                            latch.countDown();

                        }
                    }
                });
            }
        });
    }

    latch.await();
}

From source file:org.apache.hadoop.mapred.LocalDistributedCacheManager.java

/**
 * Set up the distributed cache by localizing the resources, and updating
 * the configuration with references to the localized resources.
 * @param conf/* w w  w. j  ava2s.  com*/
 * @throws IOException
 */
public void setup(JobConf conf) throws IOException {
    File workDir = new File(System.getProperty("user.dir"));

    // Generate YARN local resources objects corresponding to the distributed
    // cache configuration
    Map<String, LocalResource> localResources = new LinkedHashMap<String, LocalResource>();
    MRApps.setupDistributedCache(conf, localResources);
    // Generating unique numbers for FSDownload.
    AtomicLong uniqueNumberGenerator = new AtomicLong(System.currentTimeMillis());

    // Find which resources are to be put on the local classpath
    Map<String, Path> classpaths = new HashMap<String, Path>();
    Path[] archiveClassPaths = DistributedCache.getArchiveClassPaths(conf);
    if (archiveClassPaths != null) {
        for (Path p : archiveClassPaths) {
            classpaths.put(p.toUri().getPath().toString(), p);
        }
    }
    Path[] fileClassPaths = DistributedCache.getFileClassPaths(conf);
    if (fileClassPaths != null) {
        for (Path p : fileClassPaths) {
            classpaths.put(p.toUri().getPath().toString(), p);
        }
    }

    // Localize the resources
    LocalDirAllocator localDirAllocator = new LocalDirAllocator(MRConfig.LOCAL_DIR);
    FileContext localFSFileContext = FileContext.getLocalFSFileContext();
    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();

    ExecutorService exec = null;
    try {
        ThreadFactory tf = new ThreadFactoryBuilder()
                .setNameFormat("LocalDistributedCacheManager Downloader #%d").build();
        exec = Executors.newCachedThreadPool(tf);
        Path destPath = localDirAllocator.getLocalPathForWrite(".", conf);
        Map<LocalResource, Future<Path>> resourcesToPaths = Maps.newHashMap();
        for (LocalResource resource : localResources.values()) {
            Callable<Path> download = new FSDownload(localFSFileContext, ugi, conf,
                    new Path(destPath, Long.toString(uniqueNumberGenerator.incrementAndGet())), resource);
            Future<Path> future = exec.submit(download);
            resourcesToPaths.put(resource, future);
        }
        for (Entry<String, LocalResource> entry : localResources.entrySet()) {
            LocalResource resource = entry.getValue();
            Path path;
            try {
                path = resourcesToPaths.get(resource).get();
            } catch (InterruptedException e) {
                throw new IOException(e);
            } catch (ExecutionException e) {
                throw new IOException(e);
            }
            String pathString = path.toUri().toString();
            String link = entry.getKey();
            String target = new File(path.toUri()).getPath();
            symlink(workDir, target, link);

            if (resource.getType() == LocalResourceType.ARCHIVE) {
                localArchives.add(pathString);
            } else if (resource.getType() == LocalResourceType.FILE) {
                localFiles.add(pathString);
            } else if (resource.getType() == LocalResourceType.PATTERN) {
                //PATTERN is not currently used in local mode
                throw new IllegalArgumentException(
                        "Resource type PATTERN is not " + "implemented yet. " + resource.getResource());
            }
            Path resourcePath;
            try {
                resourcePath = resource.getResource().toPath();
            } catch (URISyntaxException e) {
                throw new IOException(e);
            }
            LOG.info(String.format("Localized %s as %s", resourcePath, path));
            String cp = resourcePath.toUri().getPath();
            if (classpaths.keySet().contains(cp)) {
                localClasspaths.add(path.toUri().getPath().toString());
            }
        }
    } finally {
        if (exec != null) {
            exec.shutdown();
        }
    }
    // Update the configuration object with localized data.
    if (!localArchives.isEmpty()) {
        conf.set(MRJobConfig.CACHE_LOCALARCHIVES,
                StringUtils.arrayToString(localArchives.toArray(new String[localArchives.size()])));
    }
    if (!localFiles.isEmpty()) {
        conf.set(MRJobConfig.CACHE_LOCALFILES,
                StringUtils.arrayToString(localFiles.toArray(new String[localArchives.size()])));
    }
    setupCalled = true;
}

From source file:org.apache.bookkeeper.bookie.InterleavedLedgerStorageTest.java

@Test
public void testShellCommands() throws Exception {
    interleavedStorage.flush();/*from w w  w .j a  v a 2s  . com*/
    interleavedStorage.shutdown();
    final Pattern entryPattern = Pattern
            .compile("entry (?<entry>\\d+)\t:\t((?<na>N/A)|\\(log:(?<logid>\\d+), pos: (?<pos>\\d+)\\))");

    class Metadata {
        final Pattern keyPattern = Pattern.compile("master key +: ([0-9a-f])");
        final Pattern sizePattern = Pattern.compile("size +: (\\d+)");
        final Pattern entriesPattern = Pattern.compile("entries +: (\\d+)");
        final Pattern isFencedPattern = Pattern.compile("isFenced +: (\\w+)");

        public String masterKey;
        public long size = -1;
        public long entries = -1;
        public boolean foundFenced = false;

        void check(String s) {
            Matcher keyMatcher = keyPattern.matcher(s);
            if (keyMatcher.matches()) {
                masterKey = keyMatcher.group(1);
                return;
            }

            Matcher sizeMatcher = sizePattern.matcher(s);
            if (sizeMatcher.matches()) {
                size = Long.valueOf(sizeMatcher.group(1));
                return;
            }

            Matcher entriesMatcher = entriesPattern.matcher(s);
            if (entriesMatcher.matches()) {
                entries = Long.valueOf(entriesMatcher.group(1));
                return;
            }

            Matcher isFencedMatcher = isFencedPattern.matcher(s);
            if (isFencedMatcher.matches()) {
                Assert.assertEquals("true", isFencedMatcher.group(1));
                foundFenced = true;
                return;
            }
        }

        void validate(long foundEntries) {
            Assert.assertTrue(entries >= numWrites * entriesPerWrite);
            Assert.assertEquals(entries, foundEntries);
            Assert.assertTrue(foundFenced);
            Assert.assertNotEquals(-1, size);
        }
    }
    final Metadata foundMetadata = new Metadata();

    AtomicLong curEntry = new AtomicLong(0);
    AtomicLong someEntryLogger = new AtomicLong(-1);
    BookieShell shell = new BookieShell(LedgerIdFormatter.LONG_LEDGERID_FORMATTER,
            EntryFormatter.STRING_FORMATTER) {
        @Override
        void printInfoLine(String s) {
            Matcher matcher = entryPattern.matcher(s);
            System.out.println(s);
            if (matcher.matches()) {
                assertEquals(Long.toString(curEntry.get()), matcher.group("entry"));

                if (matcher.group("na") == null) {
                    String logId = matcher.group("logid");
                    Assert.assertNotEquals(matcher.group("logid"), null);
                    Assert.assertNotEquals(matcher.group("pos"), null);
                    Assert.assertTrue((curEntry.get() % entriesPerWrite) == 0);
                    Assert.assertTrue(curEntry.get() <= numWrites * entriesPerWrite);
                    if (someEntryLogger.get() == -1) {
                        someEntryLogger.set(Long.valueOf(logId));
                    }
                } else {
                    Assert.assertEquals(matcher.group("logid"), null);
                    Assert.assertEquals(matcher.group("pos"), null);
                    Assert.assertTrue(((curEntry.get() % entriesPerWrite) != 0)
                            || ((curEntry.get() >= (entriesPerWrite * numWrites))));
                }
                curEntry.incrementAndGet();
            } else {
                foundMetadata.check(s);
            }
        }
    };
    shell.setConf(conf);
    int res = shell.run(new String[] { "ledger", "-m", "0" });
    Assert.assertEquals(0, res);
    Assert.assertTrue(curEntry.get() >= numWrites * entriesPerWrite);
    foundMetadata.validate(curEntry.get());

    // Should pass consistency checker
    res = shell.run(new String[] { "localconsistencycheck" });
    Assert.assertEquals(0, res);

    // Remove a logger
    EntryLogger entryLogger = new EntryLogger(conf);
    entryLogger.removeEntryLog(someEntryLogger.get());

    // Should fail consistency checker
    res = shell.run(new String[] { "localconsistencycheck" });
    Assert.assertEquals(1, res);
}

From source file:org.codice.ddf.commands.catalog.DumpCommand.java

@Override
protected Object executeWithSubject() throws Exception {
    final File dumpDir = new File(dirPath);

    if (!dumpDir.exists()) {
        printErrorMessage("Directory [" + dirPath + "] must exist.");
        console.println("If the directory does indeed exist, try putting the path in quotes.");
        return null;
    }//from  w ww .ja v a2 s .  c  o  m

    if (!dumpDir.isDirectory()) {
        printErrorMessage("Path [" + dirPath + "] must be a directory.");
        return null;
    }

    if (!DEFAULT_TRANSFORMER_ID.matches(transformerId)) {
        transformers = getTransformers();
        if (transformers == null) {
            console.println(transformerId + " is an invalid metacard transformer.");
            return null;
        }
    }

    CatalogFacade catalog = getCatalog();
    FilterBuilder builder = getFilterBuilder();

    Filter createdFilter = null;
    if ((createdAfter != null) && (createdBefore != null)) {
        DateTime createStartDateTime = DateTime.parse(createdAfter);
        DateTime createEndDateTime = DateTime.parse(createdBefore);
        createdFilter = builder.attribute(Metacard.CREATED).is().during().dates(createStartDateTime.toDate(),
                createEndDateTime.toDate());
    } else if (createdAfter != null) {
        DateTime createStartDateTime = DateTime.parse(createdAfter);
        createdFilter = builder.attribute(Metacard.CREATED).is().after().date(createStartDateTime.toDate());
    } else if (createdBefore != null) {
        DateTime createEndDateTime = DateTime.parse(createdBefore);
        createdFilter = builder.attribute(Metacard.CREATED).is().before().date(createEndDateTime.toDate());
    }

    Filter modifiedFilter = null;
    if ((modifiedAfter != null) && (modifiedBefore != null)) {
        DateTime modifiedStartDateTime = DateTime.parse(modifiedAfter);
        DateTime modifiedEndDateTime = DateTime.parse(modifiedBefore);
        modifiedFilter = builder.attribute(Metacard.MODIFIED).is().during()
                .dates(modifiedStartDateTime.toDate(), modifiedEndDateTime.toDate());
    } else if (modifiedAfter != null) {
        DateTime modifiedStartDateTime = DateTime.parse(modifiedAfter);
        modifiedFilter = builder.attribute(Metacard.MODIFIED).is().after().date(modifiedStartDateTime.toDate());
    } else if (modifiedBefore != null) {
        DateTime modifiedEndDateTime = DateTime.parse(modifiedBefore);
        modifiedFilter = builder.attribute(Metacard.MODIFIED).is().before().date(modifiedEndDateTime.toDate());
    }

    Filter filter = null;
    if ((createdFilter != null) && (modifiedFilter != null)) {
        // Filter by both created and modified dates
        filter = builder.allOf(createdFilter, modifiedFilter);
    } else if (createdFilter != null) {
        // Only filter by created date
        filter = createdFilter;
    } else if (modifiedFilter != null) {
        // Only filter by modified date
        filter = modifiedFilter;
    } else {
        // Don't filter by date range
        filter = builder.attribute(Metacard.ID).is().like().text(WILDCARD);
    }

    if (cqlFilter != null) {
        filter = CQL.toFilter(cqlFilter);
    }

    QueryImpl query = new QueryImpl(filter);
    query.setRequestsTotalResultsCount(false);
    query.setPageSize(pageSize);

    Map<String, Serializable> props = new HashMap<String, Serializable>();
    // Avoid caching all results while dumping with native query mode
    props.put("mode", "native");

    final AtomicLong resultCount = new AtomicLong(0);
    long start = System.currentTimeMillis();

    SourceResponse response = catalog.query(new QueryRequestImpl(query, props));

    BlockingQueue<Runnable> blockingQueue = new ArrayBlockingQueue<Runnable>(multithreaded);
    RejectedExecutionHandler rejectedExecutionHandler = new ThreadPoolExecutor.CallerRunsPolicy();
    final ExecutorService executorService = new ThreadPoolExecutor(multithreaded, multithreaded, 0L,
            TimeUnit.MILLISECONDS, blockingQueue, rejectedExecutionHandler);

    while (response.getResults().size() > 0) {
        response = catalog.query(new QueryRequestImpl(query, props));

        if (multithreaded > 1) {
            final List<Result> results = new ArrayList<Result>(response.getResults());
            executorService.submit(new Runnable() {
                @Override
                public void run() {
                    boolean transformationFailed = false;
                    for (final Result result : results) {
                        Metacard metacard = result.getMetacard();
                        try {
                            exportMetacard(dumpDir, metacard);
                        } catch (IOException | CatalogTransformerException e) {
                            transformationFailed = true;
                            LOGGER.debug("Failed to dump metacard {}", metacard.getId(), e);
                            executorService.shutdownNow();
                        }
                        printStatus(resultCount.incrementAndGet());
                    }
                    if (transformationFailed) {
                        LOGGER.error(
                                "One or more metacards failed to transform. Enable debug log for more details.");
                    }
                }
            });
        } else {
            for (final Result result : response.getResults()) {
                Metacard metacard = result.getMetacard();
                exportMetacard(dumpDir, metacard);
                printStatus(resultCount.incrementAndGet());
            }
        }

        if (response.getResults().size() < pageSize || pageSize == -1) {
            break;
        }

        if (pageSize > 0) {
            query.setStartIndex(query.getStartIndex() + pageSize);
        }
    }

    executorService.shutdown();

    while (!executorService.isTerminated()) {
        try {
            TimeUnit.MILLISECONDS.sleep(100);
        } catch (InterruptedException e) {
            // ignore
        }
    }

    long end = System.currentTimeMillis();
    String elapsedTime = timeFormatter.print(new Period(start, end).withMillis(0));
    console.printf(" %d file(s) dumped in %s\t%n", resultCount.get(), elapsedTime);
    LOGGER.info("{} file(s) dumped in {}", resultCount.get(), elapsedTime);
    console.println();

    return null;
}

From source file:com.github.jackygurui.vertxredissonrepository.repository.Impl.RedisRepositoryImpl.java

private void persistBlocking(String id, JsonObject data, RBatch redissonBatch,
        Handler<AsyncResult<Boolean>> resultHandler) {
    RBatch batch = redissonBatch == null ? redissonWrite.createBatch() : redissonBatch;
    AtomicBoolean failed = new AtomicBoolean(false);
    try {//www. j a  v  a 2 s  . co  m
        BeanMap pMap = new BeanMap(cls.newInstance());
        //remove the indexes;
        if (isRedisEntity()) {
            AtomicBoolean finished = new AtomicBoolean(false);
            AtomicBoolean hasNested = new AtomicBoolean(false);
            AtomicLong stack = new AtomicLong();
            pMap.forEach((k, v) -> {
                if ("class".equals(k)) {
                    return;
                }
                Class<?> type = pMap.getType((String) k);
                if (!isRedisEntity(type)) {
                    //recreate the indexes;
                    if ("id".equals(k)) {
                        batch.getMap(getStorageKey(), StringCodec.INSTANCE).fastPutAsync(id, id);
                    } else {
                        batch.getMap(getStorageKey((String) k)).fastPutAsync(id, data.getValue((String) k));
                    }
                } else {
                    hasNested.set(true);
                    stack.incrementAndGet();
                    RedisRepositoryImpl<?> innerRepo;
                    try {
                        innerRepo = (RedisRepositoryImpl) factory.instance(type);
                    } catch (RepositoryException e) {
                        throw new RuntimeException(e);
                    }
                    JsonObject value = data.getJsonObject((String) k);
                    final boolean newOne = !value.containsKey("id") || value.getString("id") == null
                            || "null".equals(value.getString("id"));
                    final String ID = newOne ? id : value.getString("id");
                    innerRepo.persist(ID, value, batch, c -> {//making the nested entity shares the same id as the parent when its 1:1 relation. This makes fetch a lot faster since it doesn't not need to resolve the reference when fetching 1:1 nested objects.
                        if (c.succeeded()) {
                            long s = stack.decrementAndGet();
                            if (newOne) {
                                batch.getMap(getStorageKey((String) k)).fastPutAsync(id, ID);//different to the update, create needs to add the reference field to batch
                            }
                            if (s == 0 && finished.get() && !failed.get()) { //finished iterating and no outstanding processes. 
                                if (redissonBatch == null) {//if it's not inside a nested process.
                                    finishPersist(id, data, batch, resultHandler);
                                } else {//if it is inside a nested process.
                                    resultHandler.handle(Future.succeededFuture(true));
                                }
                            }
                            //else wait for others to complete
                        } else {
                            boolean firstToFail = failed.compareAndSet(false, true);
                            if (firstToFail) {
                                resultHandler.handle(Future.failedFuture(c.cause()));
                            }
                        }
                    });
                }
            });
            batch.getAtomicLongAsync(getCounterKey()).incrementAndGetAsync();
            finished.set(true);
            if (!hasNested.get()) {//does not have nested RedissonEntity within
                if (redissonBatch == null) {//if it's not inside a nested process.
                    finishPersist(id, data, batch, resultHandler);
                } else {//if it is inside a nested process.
                    resultHandler.handle(Future.succeededFuture(true));
                }
            }
        } else {//not a RedissonEntity class, persist as json string.
            //recreate the indexes;
            batch.<String, String>getMap(getStorageKey(), StringCodec.INSTANCE).fastPutAsync(id,
                    Json.encode(data));
            batch.getAtomicLongAsync(getCounterKey()).incrementAndGetAsync();
            if (redissonBatch == null) {//if it's not inside a nested process.
                finishPersist(id, data, batch, resultHandler);
            } else {//if it is inside a nested process.
                resultHandler.handle(Future.succeededFuture(true));
            }
        }
    } catch (InstantiationException | IllegalAccessException | RuntimeException ex) {
        failed.set(true);
        resultHandler.handle(Future.failedFuture(ex));
    }
}

From source file:com.nextdoor.bender.handler.BaseHandler.java

/**
 * Method called by Handler implementations to process records.
 *
 * @param context Lambda invocation context.
 * @throws HandlerException// w w  w  .  jav a2  s .  c  o  m
 */
private void processInternal(Context context) throws HandlerException {
    Stat runtime = new Stat("runtime.ns");
    runtime.start();

    Source source = this.getSource();
    DeserializerProcessor deser = source.getDeserProcessor();
    List<OperationProcessor> operations = source.getOperationProcessors();
    List<String> containsStrings = source.getContainsStrings();
    List<Pattern> regexPatterns = source.getRegexPatterns();

    this.getIpcService().setContext(context);

    Iterator<InternalEvent> events = this.getInternalEventIterator();

    /*
     * For logging purposes log when the function started running
     */
    this.monitor.invokeTimeNow();

    AtomicLong eventCount = new AtomicLong(0);
    AtomicLong oldestArrivalTime = new AtomicLong(System.currentTimeMillis());
    AtomicLong oldestOccurrenceTime = new AtomicLong(System.currentTimeMillis());

    /*
     * eventQueue allows for InternalEvents to be pulled from the Iterator and published to a
     * stream. A Thread is created that loops through events in the iterator and offers them to the
     * queue. Note that offering will be blocked if the queue is full (back pressure being applied).
     * When the iterator reaches the end (hasNext = false) the queue is closed.
     */
    this.eventQueue = new Queue<InternalEvent>(new LinkedBlockingQueue<InternalEvent>(this.queueSize));

    /*
     * Thread will live for duration of invocation and supply Stream with events.
     */
    new Thread(new Runnable() {
        @Override
        public void run() {
            while (events.hasNext()) {
                try {
                    eventQueue.offer(events.next());
                } catch (Queue.ClosedQueueException e) {
                    break;
                }
            }
            try {
                eventQueue.close();
            } catch (Queue.ClosedQueueException e) {
            }
        }
    }).start();

    Stream<InternalEvent> input = this.eventQueue.jdkStream();

    /*
     * Filter out raw events
     */
    Stream<InternalEvent> filtered = input.filter(
            /*
             * Perform regex filter
             */
            ievent -> {
                eventCount.incrementAndGet();
                String eventStr = ievent.getEventString();

                /*
                 * Apply String contains filters before deserialization
                 */
                for (String containsString : containsStrings) {
                    if (eventStr.contains(containsString)) {
                        return false;
                    }
                }

                /*
                 * Apply regex patterns before deserialization
                 */
                for (Pattern regexPattern : regexPatterns) {
                    Matcher m = regexPattern.matcher(eventStr);

                    if (m.find()) {
                        return false;
                    }
                }

                return true;
            });

    /*
     * Deserialize
     */
    Stream<InternalEvent> deserialized = filtered.map(ievent -> {
        DeserializedEvent data = deser.deserialize(ievent.getEventString());

        if (data == null || data.getPayload() == null) {
            logger.warn("Failed to deserialize: " + ievent.getEventString());
            return null;
        }

        ievent.setEventObj(data);
        return ievent;
    }).filter(Objects::nonNull);

    /*
     * Perform Operations
     */
    Stream<InternalEvent> operated = deserialized;
    for (OperationProcessor operation : operations) {
        operated = operation.perform(operated);
    }

    /*
     * Serialize
     */
    Stream<InternalEvent> serialized = operated.map(ievent -> {
        try {
            String raw = null;
            raw = this.ser.serialize(this.wrapper.getWrapped(ievent));
            ievent.setSerialized(raw);
            return ievent;
        } catch (SerializationException e) {
            return null;
        }
    }).filter(Objects::nonNull);

    /*
     * Transport
     */
    serialized.forEach(ievent -> {
        /*
         * Update times
         */
        updateOldest(oldestArrivalTime, ievent.getArrivalTime());
        updateOldest(oldestOccurrenceTime, ievent.getEventTime());

        try {
            this.getIpcService().add(ievent);
        } catch (TransportException e) {
            logger.warn("error adding event", e);
        }
    });

    /*
     * Wait for transporters to finish
     */
    try {
        this.getIpcService().flush();
    } catch (TransportException e) {
        throw new HandlerException("encounted TransportException while shutting down ipcService", e);
    } catch (InterruptedException e) {
        throw new HandlerException("thread was interruptedwhile shutting down ipcService", e);
    } finally {
        String evtSource = this.getSourceName();

        runtime.stop();

        if (!this.skipWriteStats) {
            writeStats(eventCount.get(), oldestArrivalTime.get(), oldestOccurrenceTime.get(), evtSource,
                    runtime);
        }

        if (logger.isTraceEnabled()) {
            getGCStats();
        }
    }
}

From source file:com.scaleoutsoftware.soss.hserver.hadoop.DistributedCacheManager.java

/**
 * Set up the distributed cache by localizing the resources, and updating
 * the configuration with references to the localized resources.
 * @param conf job configuration//from w w  w .j  a v  a  2 s  .  c  o m
 * @throws IOException
 */
public void setup(Configuration conf) throws IOException {
    //If we are not 0th worker, wait for 0th worker to set up the cache
    if (InvocationWorker.getIgWorkerIndex() > 0 && InvocationWorker.getNumberOfWorkers() > 1) {
        try {
            InvocationWorker.getSynchronizationBarrier().waitForComplete(ACTION_NAME, SYNCHRONIZATION_WAIT_MS,
                    WAIT_GRANULARITY_MS);
        } catch (Exception e) {
            throw new RuntimeException(e);
        }
        return;
    }

    File workDir = new File(System.getProperty("user.dir"));

    // Generate YARN local resources objects corresponding to the distributed
    // cache configuration
    Map<String, LocalResource> localResources = new LinkedHashMap<String, LocalResource>();
    MRApps.setupDistributedCache(conf, localResources);

    //CODE CHANGE FROM ORIGINAL FILE:
    //We need to clear the resources from jar files, since they are distributed through the IG.
    //
    Iterator<Map.Entry<String, LocalResource>> iterator = localResources.entrySet().iterator();
    while (iterator.hasNext()) {
        Entry<String, LocalResource> entry = iterator.next();
        if (entry.getKey().endsWith(".jar")) {
            iterator.remove();
        }
    }

    // Generating unique numbers for FSDownload.

    AtomicLong uniqueNumberGenerator = new AtomicLong(System.currentTimeMillis());

    // Find which resources are to be put on the local classpath
    Map<String, Path> classpaths = new HashMap<String, Path>();
    Path[] archiveClassPaths = DistributedCache.getArchiveClassPaths(conf);
    if (archiveClassPaths != null) {
        for (Path p : archiveClassPaths) {
            FileSystem remoteFS = p.getFileSystem(conf);
            p = remoteFS.resolvePath(p.makeQualified(remoteFS.getUri(), remoteFS.getWorkingDirectory()));
            classpaths.put(p.toUri().getPath().toString(), p);
        }
    }

    Path[] fileClassPaths = DistributedCache.getFileClassPaths(conf);
    if (fileClassPaths != null) {
        for (Path p : fileClassPaths) {
            FileSystem remoteFS = p.getFileSystem(conf);
            p = remoteFS.resolvePath(p.makeQualified(remoteFS.getUri(), remoteFS.getWorkingDirectory()));
            classpaths.put(p.toUri().getPath().toString(), p);
        }
    }

    // Localize the resources
    LocalDirAllocator localDirAllocator = new LocalDirAllocator(MRConfig.LOCAL_DIR);
    FileContext localFSFileContext = FileContext.getLocalFSFileContext();
    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();

    ExecutorService exec = null;
    try {
        ThreadFactory tf = new ThreadFactoryBuilder()
                .setNameFormat("LocalDistributedCacheManager Downloader #%d").build();
        exec = Executors.newCachedThreadPool(tf);
        Path destPath = localDirAllocator.getLocalPathForWrite(".", conf);
        Map<LocalResource, Future<Path>> resourcesToPaths = Maps.newHashMap();
        for (LocalResource resource : localResources.values()) {
            Callable<Path> download = new FSDownload(localFSFileContext, ugi, conf,
                    new Path(destPath, Long.toString(uniqueNumberGenerator.incrementAndGet())), resource);
            Future<Path> future = exec.submit(download);
            resourcesToPaths.put(resource, future);
        }
        for (Entry<String, LocalResource> entry : localResources.entrySet()) {
            LocalResource resource = entry.getValue();
            Path path;
            try {
                path = resourcesToPaths.get(resource).get();
            } catch (InterruptedException e) {
                throw new IOException(e);
            } catch (ExecutionException e) {
                throw new IOException(e);
            }
            String pathString = path.toUri().toString();
            String link = entry.getKey();
            String target = new File(path.toUri()).getPath();
            symlink(workDir, target, link);

            if (resource.getType() == LocalResourceType.ARCHIVE) {
                localArchives.add(pathString);
            } else if (resource.getType() == LocalResourceType.FILE) {
                localFiles.add(pathString);
            } else if (resource.getType() == LocalResourceType.PATTERN) {
                //PATTERN is not currently used in local mode
                throw new IllegalArgumentException(
                        "Resource type PATTERN is not " + "implemented yet. " + resource.getResource());
            }
            Path resourcePath;
            try {
                resourcePath = ConverterUtils.getPathFromYarnURL(resource.getResource());
            } catch (URISyntaxException e) {
                throw new IOException(e);
            }
            LOG.info(String.format("Localized %s as %s", resourcePath, path));
            String cp = resourcePath.toUri().getPath();
            if (classpaths.keySet().contains(cp)) {
                localClasspaths.add(path.toUri().getPath().toString());
            }
        }
    } finally {
        if (exec != null) {
            exec.shutdown();
        }
    }
    // Update the configuration object with localized data.
    if (!localArchives.isEmpty()) {
        conf.set(MRJobConfig.CACHE_LOCALARCHIVES,
                StringUtils.arrayToString(localArchives.toArray(new String[localArchives.size()])));
    }
    if (!localFiles.isEmpty()) {
        conf.set(MRJobConfig.CACHE_LOCALFILES,
                StringUtils.arrayToString(localFiles.toArray(new String[localArchives.size()])));
    }
    setupCalled = true;

    //If we are  0th worker, signal action complete
    if (InvocationWorker.getIgWorkerIndex() == 0 && InvocationWorker.getNumberOfWorkers() > 1) {
        try {
            InvocationWorker.getSynchronizationBarrier().signalComplete(ACTION_NAME);
        } catch (Exception e) {
            throw new RuntimeException(e);
        }
    }

}