Example usage for java.util.concurrent.atomic AtomicLong AtomicLong

List of usage examples for java.util.concurrent.atomic AtomicLong AtomicLong

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicLong AtomicLong.

Prototype

public AtomicLong(long initialValue) 

Source Link

Document

Creates a new AtomicLong with the given initial value.

Usage

From source file:org.apache.hadoop.hbase.mapreduce.TestWALRecordReader.java

/**
 * Test partial reads from the log based on passed time range
 * @throws Exception/*from   w ww  .  j  av a  2s.  c  om*/
 */
@Test
public void testPartialRead() throws Exception {
    final WALFactory walfactory = new WALFactory(conf, null, getName());
    WAL log = walfactory.getWAL(info.getEncodedNameAsBytes());
    // This test depends on timestamp being millisecond based and the filename of the WAL also
    // being millisecond based.
    long ts = System.currentTimeMillis();
    WALEdit edit = new WALEdit();
    final AtomicLong sequenceId = new AtomicLong(0);
    edit.add(new KeyValue(rowName, family, Bytes.toBytes("1"), ts, value));
    log.append(htd, info, getWalKey(ts), edit, sequenceId, true, null);
    edit = new WALEdit();
    edit.add(new KeyValue(rowName, family, Bytes.toBytes("2"), ts + 1, value));
    log.append(htd, info, getWalKey(ts + 1), edit, sequenceId, true, null);
    log.sync();
    LOG.info("Before 1st WAL roll " + log.toString());
    log.rollWriter();
    LOG.info("Past 1st WAL roll " + log.toString());

    Thread.sleep(1);
    long ts1 = System.currentTimeMillis();

    edit = new WALEdit();
    edit.add(new KeyValue(rowName, family, Bytes.toBytes("3"), ts1 + 1, value));
    log.append(htd, info, getWalKey(ts1 + 1), edit, sequenceId, true, null);
    edit = new WALEdit();
    edit.add(new KeyValue(rowName, family, Bytes.toBytes("4"), ts1 + 2, value));
    log.append(htd, info, getWalKey(ts1 + 2), edit, sequenceId, true, null);
    log.sync();
    log.shutdown();
    walfactory.shutdown();
    LOG.info("Closed WAL " + log.toString());

    WALInputFormat input = new WALInputFormat();
    Configuration jobConf = new Configuration(conf);
    jobConf.set("mapreduce.input.fileinputformat.inputdir", logDir.toString());
    jobConf.setLong(WALInputFormat.END_TIME_KEY, ts);

    // only 1st file is considered, and only its 1st entry is used
    List<InputSplit> splits = input.getSplits(MapreduceTestingShim.createJobContext(jobConf));

    assertEquals(1, splits.size());
    testSplit(splits.get(0), Bytes.toBytes("1"));

    jobConf.setLong(WALInputFormat.START_TIME_KEY, ts + 1);
    jobConf.setLong(WALInputFormat.END_TIME_KEY, ts1 + 1);
    splits = input.getSplits(MapreduceTestingShim.createJobContext(jobConf));
    // both files need to be considered
    assertEquals(2, splits.size());
    // only the 2nd entry from the 1st file is used
    testSplit(splits.get(0), Bytes.toBytes("2"));
    // only the 1nd entry from the 2nd file is used
    testSplit(splits.get(1), Bytes.toBytes("3"));
}

From source file:com.opentech.camel.task.TaskDomainRuntime.java

/**
 * /*from  ww w . j ava2s.c  o  m*/
 * @param taskDomain
 * @param timeout
 * @param resourceController
 * @param defaultRuntime
 * @param watchdog
 */
public TaskDomainRuntime(TaskDomain taskDomain, long timeout, TaskDomainResourceController resourceController,
        TaskDomainRuntime defaultRuntime, Watchdog watchdog) {
    this.taskDomain = taskDomain;
    this.timeout = timeout;
    this.resourceController = resourceController;
    this.defaultRuntime = defaultRuntime;
    this.watchdog = watchdog;
    requestTasks = new AtomicLong(0L);
    totalTasks = new AtomicLong(0L);
    succeedTasks = new AtomicLong(0L);
    failedTasks = new AtomicLong(0L);
    executingTasks = new AtomicLong(0L);
    timeoutTasks = new AtomicLong(0L);
}

From source file:com.aol.advertising.qiao.management.metrics.StatisticsStore.java

public void incr(String key, long delta) {
    if (stats.containsKey(key)) {
        stats.get(key).addAndGet(delta);
    } else {//w  ww  .j a v  a  2  s  .  c  o m
        stats.put(key, new AtomicLong(delta));
    }

}

From source file:org.apache.hadoop.hbase.regionserver.MemStore.java

/**
 * Constructor.//from   ww w  .  ja  v  a 2 s.co m
 * @param c Comparator
 */
public MemStore(final Configuration conf, final KeyValue.KVComparator c) {
    this.conf = conf;
    this.comparator = c;
    this.comparatorIgnoreTimestamp = this.comparator.getComparatorIgnoringTimestamps();
    this.comparatorIgnoreType = this.comparator.getComparatorIgnoringType();
    this.kvset = new KeyValueSkipListSet(c);
    this.snapshot = new KeyValueSkipListSet(c);
    timeRangeTracker = new TimeRangeTracker();
    snapshotTimeRangeTracker = new TimeRangeTracker();
    this.size = new AtomicLong(DEEP_OVERHEAD);
    if (conf.getBoolean(USEMSLAB_KEY, USEMSLAB_DEFAULT)) {
        this.allocator = new MemStoreLAB(conf);
    } else {
        this.allocator = null;
    }
}

From source file:org.apache.hadoop.hbase.wal.TestWALReaderOnSecureWAL.java

private Path writeWAL(final WALFactory wals, final String tblName) throws IOException {
    Configuration conf = TEST_UTIL.getConfiguration();
    String clsName = conf.get(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName());
    conf.setClass(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, SecureWALCellCodec.class, WALCellCodec.class);
    try {/* www  .j a  v a2  s .  c  om*/
        TableName tableName = TableName.valueOf(tblName);
        HTableDescriptor htd = new HTableDescriptor(tableName);
        htd.addFamily(new HColumnDescriptor(tableName.getName()));
        HRegionInfo regioninfo = new HRegionInfo(tableName, HConstants.EMPTY_START_ROW,
                HConstants.EMPTY_END_ROW, false);
        final int total = 10;
        final byte[] row = Bytes.toBytes("row");
        final byte[] family = Bytes.toBytes("family");
        FileSystem fs = TEST_UTIL.getTestFileSystem();
        Path logDir = TEST_UTIL.getDataTestDir(tblName);
        final AtomicLong sequenceId = new AtomicLong(1);

        // Write the WAL
        WAL wal = wals.getWAL(regioninfo.getEncodedNameAsBytes());
        for (int i = 0; i < total; i++) {
            WALEdit kvs = new WALEdit();
            kvs.add(new KeyValue(row, family, Bytes.toBytes(i), value));
            wal.append(htd, regioninfo,
                    new WALKey(regioninfo.getEncodedNameAsBytes(), tableName, System.currentTimeMillis()), kvs,
                    sequenceId, true, null);
        }
        wal.sync();
        final Path walPath = DefaultWALProvider.getCurrentFileName(wal);
        wal.shutdown();

        return walPath;
    } finally {
        // restore the cell codec class
        conf.set(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, clsName);
    }
}

From source file:com.scaleoutsoftware.soss.hserver.hadoop.DistributedCacheManager.java

/**
 * Set up the distributed cache by localizing the resources, and updating
 * the configuration with references to the localized resources.
 * @param conf job configuration//  w w  w  . j ava 2s  .  com
 * @throws IOException
 */
public void setup(Configuration conf) throws IOException {
    //If we are not 0th worker, wait for 0th worker to set up the cache
    if (InvocationWorker.getIgWorkerIndex() > 0 && InvocationWorker.getNumberOfWorkers() > 1) {
        try {
            InvocationWorker.getSynchronizationBarrier().waitForComplete(ACTION_NAME, SYNCHRONIZATION_WAIT_MS,
                    WAIT_GRANULARITY_MS);
        } catch (Exception e) {
            throw new RuntimeException(e);
        }
        return;
    }

    File workDir = new File(System.getProperty("user.dir"));

    // Generate YARN local resources objects corresponding to the distributed
    // cache configuration
    Map<String, LocalResource> localResources = new LinkedHashMap<String, LocalResource>();
    MRApps.setupDistributedCache(conf, localResources);

    //CODE CHANGE FROM ORIGINAL FILE:
    //We need to clear the resources from jar files, since they are distributed through the IG.
    //
    Iterator<Map.Entry<String, LocalResource>> iterator = localResources.entrySet().iterator();
    while (iterator.hasNext()) {
        Entry<String, LocalResource> entry = iterator.next();
        if (entry.getKey().endsWith(".jar")) {
            iterator.remove();
        }
    }

    // Generating unique numbers for FSDownload.

    AtomicLong uniqueNumberGenerator = new AtomicLong(System.currentTimeMillis());

    // Find which resources are to be put on the local classpath
    Map<String, Path> classpaths = new HashMap<String, Path>();
    Path[] archiveClassPaths = DistributedCache.getArchiveClassPaths(conf);
    if (archiveClassPaths != null) {
        for (Path p : archiveClassPaths) {
            FileSystem remoteFS = p.getFileSystem(conf);
            p = remoteFS.resolvePath(p.makeQualified(remoteFS.getUri(), remoteFS.getWorkingDirectory()));
            classpaths.put(p.toUri().getPath().toString(), p);
        }
    }

    Path[] fileClassPaths = DistributedCache.getFileClassPaths(conf);
    if (fileClassPaths != null) {
        for (Path p : fileClassPaths) {
            FileSystem remoteFS = p.getFileSystem(conf);
            p = remoteFS.resolvePath(p.makeQualified(remoteFS.getUri(), remoteFS.getWorkingDirectory()));
            classpaths.put(p.toUri().getPath().toString(), p);
        }
    }

    // Localize the resources
    LocalDirAllocator localDirAllocator = new LocalDirAllocator(MRConfig.LOCAL_DIR);
    FileContext localFSFileContext = FileContext.getLocalFSFileContext();
    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();

    ExecutorService exec = null;
    try {
        ThreadFactory tf = new ThreadFactoryBuilder()
                .setNameFormat("LocalDistributedCacheManager Downloader #%d").build();
        exec = Executors.newCachedThreadPool(tf);
        Path destPath = localDirAllocator.getLocalPathForWrite(".", conf);
        Map<LocalResource, Future<Path>> resourcesToPaths = Maps.newHashMap();
        for (LocalResource resource : localResources.values()) {
            Callable<Path> download = new FSDownload(localFSFileContext, ugi, conf,
                    new Path(destPath, Long.toString(uniqueNumberGenerator.incrementAndGet())), resource);
            Future<Path> future = exec.submit(download);
            resourcesToPaths.put(resource, future);
        }
        for (Entry<String, LocalResource> entry : localResources.entrySet()) {
            LocalResource resource = entry.getValue();
            Path path;
            try {
                path = resourcesToPaths.get(resource).get();
            } catch (InterruptedException e) {
                throw new IOException(e);
            } catch (ExecutionException e) {
                throw new IOException(e);
            }
            String pathString = path.toUri().toString();
            String link = entry.getKey();
            String target = new File(path.toUri()).getPath();
            symlink(workDir, target, link);

            if (resource.getType() == LocalResourceType.ARCHIVE) {
                localArchives.add(pathString);
            } else if (resource.getType() == LocalResourceType.FILE) {
                localFiles.add(pathString);
            } else if (resource.getType() == LocalResourceType.PATTERN) {
                //PATTERN is not currently used in local mode
                throw new IllegalArgumentException(
                        "Resource type PATTERN is not " + "implemented yet. " + resource.getResource());
            }
            Path resourcePath;
            try {
                resourcePath = ConverterUtils.getPathFromYarnURL(resource.getResource());
            } catch (URISyntaxException e) {
                throw new IOException(e);
            }
            LOG.info(String.format("Localized %s as %s", resourcePath, path));
            String cp = resourcePath.toUri().getPath();
            if (classpaths.keySet().contains(cp)) {
                localClasspaths.add(path.toUri().getPath().toString());
            }
        }
    } finally {
        if (exec != null) {
            exec.shutdown();
        }
    }
    // Update the configuration object with localized data.
    if (!localArchives.isEmpty()) {
        conf.set(MRJobConfig.CACHE_LOCALARCHIVES,
                StringUtils.arrayToString(localArchives.toArray(new String[localArchives.size()])));
    }
    if (!localFiles.isEmpty()) {
        conf.set(MRJobConfig.CACHE_LOCALFILES,
                StringUtils.arrayToString(localFiles.toArray(new String[localArchives.size()])));
    }
    setupCalled = true;

    //If we are  0th worker, signal action complete
    if (InvocationWorker.getIgWorkerIndex() == 0 && InvocationWorker.getNumberOfWorkers() > 1) {
        try {
            InvocationWorker.getSynchronizationBarrier().signalComplete(ACTION_NAME);
        } catch (Exception e) {
            throw new RuntimeException(e);
        }
    }

}

From source file:com.ning.metrics.serialization.writer.DiskSpoolEventWriter.java

public DiskSpoolEventWriter(final EventHandler eventHandler, final String spoolPath, final boolean flushEnabled,
        final long flushIntervalInSeconds, final ScheduledExecutorService executor, final SyncType syncType,
        final int syncBatchSize, final CompressionCodec codec, final EventSerializer eventSerializer) {
    this.eventHandler = eventHandler;
    this.syncType = syncType;
    this.syncBatchSize = syncBatchSize;
    this.spoolDirectory = new File(spoolPath);
    this.executor = executor;
    this.tmpSpoolDirectory = new File(spoolDirectory, "_tmp");
    this.quarantineDirectory = new File(spoolDirectory, "_quarantine");
    this.lockDirectory = new File(spoolDirectory, "_lock");
    this.flushEnabled = new AtomicBoolean(flushEnabled);
    this.flushIntervalInSeconds = new AtomicLong(flushIntervalInSeconds);
    this.codec = codec;
    this.eventSerializer = eventSerializer;
    writeTimerName = new MetricName(DiskSpoolEventWriter.class, spoolPath);
    writeTimer = Metrics.newTimer(writeTimerName, TimeUnit.MILLISECONDS, TimeUnit.SECONDS);

    createSpoolDir(spoolDirectory);//from   w  w  w .ja v a 2s .  c  o  m
    createSpoolDir(tmpSpoolDirectory);
    createSpoolDir(quarantineDirectory);
    createSpoolDir(lockDirectory);

    // Fail early
    if (!spoolDirectory.exists() || !tmpSpoolDirectory.exists() || !quarantineDirectory.exists()
            || !lockDirectory.exists()) {
        throw new IllegalArgumentException("Eventwriter misconfigured - couldn't create the spool directories");
    }

    scheduleFlush();
    recoverFiles();

    acceptsEvents = true;
}

From source file:org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationImpl.java

public ApplicationImpl(Dispatcher dispatcher, String user, ApplicationId appId, Credentials credentials,
        Context context, String userFolder, int x509Version, long jwtExpiration) {
    this.dispatcher = dispatcher;
    this.user = user;
    this.userFolder = userFolder;
    this.appId = appId;
    this.credentials = credentials;
    this.aclsManager = context.getApplicationACLsManager();
    this.context = context;
    this.x509Version = new AtomicInteger(x509Version);
    this.jwtExpiration = new AtomicLong(jwtExpiration);
    ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
    readLock = lock.readLock();/*  ww w . j a v a  2 s  .com*/
    writeLock = lock.writeLock();
    stateMachine = stateMachineFactory.make(this);
}

From source file:com.aol.advertising.qiao.management.metrics.StatisticsStore.java

public void incr(String key) {
    if (stats.containsKey(key)) {
        stats.get(key).incrementAndGet();
    } else {/*  w w w  .j av  a 2  s  .c o  m*/
        stats.put(key, new AtomicLong(1));
    }
}

From source file:org.apache.streams.local.queues.ThroughputQueue.java

/**
 * Creates a bounded, registered {@code ThroughputQueue}
 *
 * @param maxSize maximum capacity of queue, if maxSize < 1 then unbounded
 * @param id      unique id for this queue to be registered with. if id == NULL then not registered
 *///from w w w .  jav  a  2  s.  c om
public ThroughputQueue(int maxSize, String id, String streamIdentifier, long startedAt) {
    if (maxSize < 1) {
        this.underlyingQueue = new LinkedBlockingQueue<>();
    } else {
        this.underlyingQueue = new LinkedBlockingQueue<>(maxSize);
    }
    this.elementsAdded = new AtomicLong(0);
    this.elementsRemoved = new AtomicLong(0);
    this.startTime = new AtomicLong(-1);
    this.active = false;
    this.maxQueuedTime = 0;
    this.maxQueueTimeLock = new ReentrantReadWriteLock();
    this.totalQueueTime = new AtomicLong(0);
    if (id != null) {
        try {
            ObjectName name = new ObjectName(String.format(NAME_TEMPLATE, id, streamIdentifier, startedAt));
            MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
            mbs.registerMBean(this, name);
        } catch (MalformedObjectNameException | InstanceAlreadyExistsException | MBeanRegistrationException
                | NotCompliantMBeanException e) {
            LOGGER.error("Failed to register MXBean : {}", e);
            throw new RuntimeException(e);
        }
    }
}