Example usage for java.util.concurrent TimeUnit HOURS

List of usage examples for java.util.concurrent TimeUnit HOURS

Introduction

In this page you can find the example usage for java.util.concurrent TimeUnit HOURS.

Prototype

TimeUnit HOURS

To view the source code for java.util.concurrent TimeUnit HOURS.

Click Source Link

Document

Time unit representing sixty minutes.

Usage

From source file:org.apache.hadoop.mapreduce.v2.app.commit.CommitterEventHandler.java

@Override
protected void serviceStart() throws Exception {
    ThreadFactoryBuilder tfBuilder = new ThreadFactoryBuilder().setNameFormat("CommitterEvent Processor #%d");
    if (jobClassLoader != null) {
        // if the job classloader is enabled, we need to use the job classloader
        // as the thread context classloader (TCCL) of these threads in case the
        // committer needs to load another class via TCCL
        ThreadFactory backingTf = new ThreadFactory() {
            @Override/*ww w.j  a v  a 2s  .  c om*/
            public Thread newThread(Runnable r) {
                Thread thread = new Thread(r);
                thread.setContextClassLoader(jobClassLoader);
                return thread;
            }
        };
        tfBuilder.setThreadFactory(backingTf);
    }
    ThreadFactory tf = tfBuilder.build();
    launcherPool = new ThreadPoolExecutor(5, 5, 1, TimeUnit.HOURS, new LinkedBlockingQueue<Runnable>(), tf);
    eventHandlingThread = new Thread(new Runnable() {
        @Override
        public void run() {
            CommitterEvent event = null;
            while (!stopped.get() && !Thread.currentThread().isInterrupted()) {
                try {
                    event = eventQueue.take();
                } catch (InterruptedException e) {
                    if (!stopped.get()) {
                        LOG.error("Returning, interrupted : " + e);
                    }
                    return;
                }
                // the events from the queue are handled in parallel
                // using a thread pool
                launcherPool.execute(new EventProcessor(event));
            }
        }
    });
    eventHandlingThread.setName("CommitterEvent Handler");
    eventHandlingThread.start();
    super.serviceStart();
}

From source file:org.apache.jackrabbit.oak.plugins.blob.MarkSweepGarbageCollector.java

/**
 * Instantiates a new blob garbage collector.
 *//*from w  w  w . j ava  2s.  c  o m*/
public MarkSweepGarbageCollector(BlobReferenceRetriever marker, GarbageCollectableBlobStore blobStore,
        Executor executor, @Nullable String repositoryId) throws IOException {
    this(marker, blobStore, executor, TEMP_DIR, DEFAULT_BATCH_COUNT, TimeUnit.HOURS.toMillis(24), repositoryId);
}

From source file:com.linkedin.pinot.core.segment.index.loader.SegmentPreProcessorTest.java

private void constructSegment() throws Exception {
    FileUtils.deleteQuietly(INDEX_DIR);/*from www .jav a2 s  . co  m*/
    File avroFile = new File(
            TestUtils.getFileFromResourceUrl(getClass().getClassLoader().getResource(AVRO_DATA)));

    // NOTE: We create inverted index for 'column7' when constructing the segment.
    // Intentionally changed this to TimeUnit.Hours to make it non-default for testing.
    SegmentGeneratorConfig config = SegmentTestUtils.getSegmentGenSpecWithSchemAndProjectedColumns(avroFile,
            INDEX_DIR, "daysSinceEpoch", TimeUnit.HOURS, "testTable");
    config.setSegmentNamePostfix("1");
    config.getInvertedIndexCreationColumns().clear();
    config.setInvertedIndexCreationColumns(Collections.singletonList(COLUMN7_NAME));
    SegmentIndexCreationDriver driver = SegmentCreationDriverFactory.get(null);
    driver.init(config);
    driver.build();

    segmentDirectoryFile = new File(INDEX_DIR, driver.getSegmentName());
}

From source file:org.datavec.api.transform.transform.RegressionTestJson.java

@Test
public void regressionTestJson100a() throws Exception {
    //JSON saved in 1.0.0-alpha, before JSON format change

    File f = new ClassPathResource("datavec-api/regression_test/100a/transformprocess_regression_100a.json")
            .getFile();/*w  w  w  . j  a v a 2  s.  com*/
    String s = FileUtils.readFileToString(f);

    TransformProcess fromJson = TransformProcess.fromJson(s);

    Schema schema = new Schema.Builder().addColumnCategorical("Cat", "State1", "State2")
            .addColumnCategorical("Cat2", "State1", "State2").addColumnDouble("Dbl")
            .addColumnDouble("Dbl2", null, 100.0, true, false).addColumnInteger("Int")
            .addColumnInteger("Int2", 0, 10).addColumnLong("Long").addColumnLong("Long2", -100L, null)
            .addColumnString("Str").addColumnString("Str2", "someregexhere", 1, null).addColumnString("Str3")
            .addColumnTime("TimeCol", DateTimeZone.UTC).addColumnTime("TimeCol2", DateTimeZone.UTC, null, 1000L)
            .build();

    Map<String, String> map = new HashMap<>();
    map.put("from", "to");
    map.put("anotherFrom", "anotherTo");

    TransformProcess expected = new TransformProcess.Builder(schema).categoricalToInteger("Cat")
            .categoricalToOneHot("Cat2").appendStringColumnTransform("Str3", "ToAppend")
            .integerToCategorical("Cat", Arrays.asList("State1", "State2"))
            .stringToCategorical("Str", Arrays.asList("State1", "State2")).duplicateColumn("Str", "Str2a")
            .removeColumns("Str2a").renameColumn("Str2", "Str2a").reorderColumns("Cat", "Dbl")
            .conditionalCopyValueTransform("Dbl", "Dbl2",
                    new DoubleColumnCondition("Dbl", ConditionOp.Equal, 0.0))
            .conditionalReplaceValueTransform("Dbl", new DoubleWritable(1.0),
                    new DoubleColumnCondition("Dbl", ConditionOp.Equal, 1.0))
            .doubleColumnsMathOp("NewDouble", MathOp.Add, "Dbl", "Dbl2").doubleMathOp("Dbl", MathOp.Add, 1.0)
            .integerColumnsMathOp("NewInt", MathOp.Subtract, "Int", "Int2")
            .integerMathOp("Int", MathOp.Multiply, 2)
            .transform(new ReplaceEmptyIntegerWithValueTransform("Int", 1))
            .transform(new ReplaceInvalidWithIntegerTransform("Int", 1))
            .longColumnsMathOp("Long", MathOp.Multiply, "Long", "Long2").longMathOp("Long", MathOp.ScalarMax, 0)
            .transform(new MapAllStringsExceptListTransform("Str", "Other", Arrays.asList("Ok", "SomeVal")))
            .stringRemoveWhitespaceTransform("Str")
            .transform(new ReplaceEmptyStringTransform("Str", "WasEmpty")).replaceStringTransform("Str", map)
            .transform(new StringListToCategoricalSetTransform("Str", Arrays.asList("StrA", "StrB"),
                    Arrays.asList("StrA", "StrB"), ","))
            .stringMapTransform("Str2a", map)
            .transform(new DeriveColumnsFromTimeTransform.Builder("TimeCol")
                    .addIntegerDerivedColumn("Hour", DateTimeFieldType.hourOfDay())
                    .addStringDerivedColumn("Date", "YYYY-MM-dd", DateTimeZone.UTC).build())
            .stringToTimeTransform("Str2a", "YYYY-MM-dd hh:mm:ss", DateTimeZone.UTC)
            .timeMathOp("TimeCol2", MathOp.Add, 1, TimeUnit.HOURS)

            //Filters:
            .filter(new FilterInvalidValues("Cat", "Str2a"))
            .filter(new ConditionFilter(new NullWritableColumnCondition("Long")))

            //Convert to/from sequence
            .convertToSequence("Int", new NumericalColumnComparator("TimeCol2")).convertFromSequence()

            //Sequence split
            .convertToSequence("Int", new StringComparator("Str2a"))
            .splitSequence(new SequenceSplitTimeSeparation("TimeCol2", 1, TimeUnit.HOURS))

            //Reducers and reduce by window:
            .reduce(new Reducer.Builder(ReduceOp.TakeFirst).keyColumns("TimeCol2").countColumns("Cat")
                    .sumColumns("Dbl").build())
            .reduceSequenceByWindow(
                    new Reducer.Builder(ReduceOp.TakeFirst).countColumns("Cat2").stdevColumns("Dbl2").build(),
                    new OverlappingTimeWindowFunction.Builder().timeColumn("TimeCol2")
                            .addWindowStartTimeColumn(true).addWindowEndTimeColumn(true)
                            .windowSize(1, TimeUnit.HOURS).offset(5, TimeUnit.MINUTES)
                            .windowSeparation(15, TimeUnit.MINUTES).excludeEmptyWindows(true).build())

            //Calculate sorted rank
            .convertFromSequence().calculateSortedRank("rankColName", "TimeCol2", new LongWritableComparator())
            .sequenceMovingWindowReduce("rankColName", 20, ReduceOp.Mean)
            .addConstantColumn("someIntColumn", ColumnType.Integer, new IntWritable(0))
            .integerToOneHot("someIntColumn", 0, 3).filter(new SequenceLengthCondition(ConditionOp.LessThan, 1))
            .addConstantColumn("testColSeq", ColumnType.Integer, new DoubleWritable(0))
            .offsetSequence(Collections.singletonList("testColSeq"), 1,
                    SequenceOffsetTransform.OperationType.InPlace)
            .addConstantColumn("someTextCol", ColumnType.String, new Text("some values")).build();

    assertEquals(expected, fromJson);
}

From source file:org.hillview.dataset.remoting.HillviewServer.java

public HillviewServer(final HostAndPort listenAddress, final IDataSet initialDataset) throws IOException {
    this.initialDataset = initialDataset;
    this.listenAddress = listenAddress;
    this.memoizedCommands = new MemoizedResults();
    this.server = NettyServerBuilder
            .forAddress(new InetSocketAddress(listenAddress.getHost(), listenAddress.getPort()))
            .executor(executorService).workerEventLoopGroup(workerElg).bossEventLoopGroup(bossElg)
            .addService(this).maxMessageSize(MAX_MESSAGE_SIZE).build().start();
    this.dataSets = CacheBuilder.<Integer, IDataSet>newBuilder()
            .expireAfterAccess(EXPIRE_TIME_IN_HOURS, TimeUnit.HOURS)
            .removalListener((RemovalListener<Integer, IDataSet>) removalNotification -> HillviewLogger.instance
                    .info("Removing reference to dataset", "{0}: {1}", removalNotification.getKey(),
                            removalNotification.getValue().toString()))
            .build();//w ww  .j a  v  a2s  . c o  m
    this.toUnsubscribe = CacheBuilder.<UUID, Boolean>newBuilder()
            .expireAfterAccess(EXPIRE_TIME_IN_HOURS, TimeUnit.HOURS).build();
}

From source file:org.apache.tajo.master.GlobalEngine.java

private QueryContext createQueryContext(Session session) {
    QueryContext newQueryContext = new QueryContext(context.getConf(), session);

    // Set default space uri and its root uri
    newQueryContext.setDefaultSpaceUri(TablespaceManager.getDefault().getUri());
    newQueryContext.setDefaultSpaceRootUri(TablespaceManager.getDefault().getRootUri());

    if (TajoConstants.IS_TEST_MODE) {
        newQueryContext.putAll(CommonTestingUtil.getSessionVarsForTest());
    }// w  w w. j av  a2s .  com

    // Set queryCache in session
    int queryCacheSize = context.getConf().getIntVar(TajoConf.ConfVars.QUERY_SESSION_QUERY_CACHE_SIZE);
    if (queryCacheSize > 0 && session.getQueryCache() == null) {
        Weigher<String, Expr> weighByLength = new Weigher<String, Expr>() {
            public int weigh(String key, Expr expr) {
                return key.length();
            }
        };
        LoadingCache<String, Expr> cache = CacheBuilder.newBuilder().maximumWeight(queryCacheSize * 1024)
                .weigher(weighByLength).expireAfterAccess(1, TimeUnit.HOURS)
                .build(new CacheLoader<String, Expr>() {
                    public Expr load(String sql) throws SQLSyntaxError {
                        return analyzer.parse(sql);
                    }
                });
        session.setQueryCache(cache);
    }
    return newQueryContext;
}

From source file:org.dcache.util.histograms.TimeseriesHistogramTest.java

@Test
public void updateOnTimeseriesHistogramShouldAverageLastValue() throws Exception {
    givenTimeseriesHistogram();/*from w ww. ja  v a2 s  .  co m*/
    givenQueueCountValuesFor(48);
    givenBinUnitOf((double) TimeUnit.HOURS.toMillis(1));
    givenBinCountOf(48);
    givenBinLabelOf(TimeUnit.HOURS.name());
    givenDataLabelOf("COUNT");
    givenHistogramTypeOf("Queued Movers");
    givenHighestBinOf(getHoursInThePastFromNow(0));
    whenConfigureIsCalled();
    assertThatUpdateAveragesLastValue();
}

From source file:net.solarnetwork.node.backup.s3.S3BackupService.java

/**
 * Default constructor./*from www .jav a2s.  com*/
 */
public S3BackupService() {
    super();
    setRegionName(DEFAULT_REGION_NAME);
    setObjectKeyPrefix(DEFAULT_OBJECT_KEY_PREFIX);
    setCacheSeconds((int) TimeUnit.HOURS.toSeconds(1));
    setAdditionalBackupCount(DEFAULT_ADDITIONAL_BACKUP_COUNT);
}

From source file:org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl.java

@Override
protected void serviceStart() throws Exception {
    client.start();/* w  w w.  j  a va  2  s . c  om*/

    ThreadFactory tf = new ThreadFactoryBuilder().setNameFormat(this.getClass().getName() + " #%d")
            .setDaemon(true).build();

    // Start with a default core-pool size and change it dynamically.
    int initSize = Math.min(INITIAL_THREAD_POOL_SIZE, maxThreadPoolSize);
    threadPool = new ThreadPoolExecutor(initSize, Integer.MAX_VALUE, 1, TimeUnit.HOURS,
            new LinkedBlockingQueue<Runnable>(), tf);

    eventDispatcherThread = new Thread() {
        @Override
        public void run() {
            ContainerEvent event = null;
            Set<String> allNodes = new HashSet<String>();

            while (!stopped.get() && !Thread.currentThread().isInterrupted()) {
                try {
                    event = events.take();
                } catch (InterruptedException e) {
                    if (!stopped.get()) {
                        LOG.error("Returning, thread interrupted", e);
                    }
                    return;
                }

                allNodes.add(event.getNodeId().toString());

                int threadPoolSize = threadPool.getCorePoolSize();

                // We can increase the pool size only if haven't reached the maximum
                // limit yet.
                if (threadPoolSize != maxThreadPoolSize) {

                    // nodes where containers will run at *this* point of time. This is
                    // *not* the cluster size and doesn't need to be.
                    int nodeNum = allNodes.size();
                    int idealThreadPoolSize = Math.min(maxThreadPoolSize, nodeNum);

                    if (threadPoolSize < idealThreadPoolSize) {
                        // Bump up the pool size to idealThreadPoolSize +
                        // INITIAL_POOL_SIZE, the later is just a buffer so we are not
                        // always increasing the pool-size
                        int newThreadPoolSize = Math.min(maxThreadPoolSize,
                                idealThreadPoolSize + INITIAL_THREAD_POOL_SIZE);
                        LOG.info("Set NMClientAsync thread pool size to " + newThreadPoolSize
                                + " as the number of nodes to talk to is " + nodeNum);
                        threadPool.setCorePoolSize(newThreadPoolSize);
                    }
                }

                // the events from the queue are handled in parallel with a thread
                // pool
                threadPool.execute(getContainerEventProcessor(event));

                // TODO: Group launching of multiple containers to a single
                // NodeManager into a single connection
            }
        }
    };
    eventDispatcherThread.setName("Container  Event Dispatcher");
    eventDispatcherThread.setDaemon(false);
    eventDispatcherThread.start();

    super.serviceStart();
}

From source file:org.opencastproject.capture.admin.impl.CaptureAgentStateServiceImpl.java

public void activate(ComponentContext cc) {
    emf = persistenceProvider.createEntityManagerFactory(
            "org.opencastproject.capture.admin.impl.CaptureAgentStateServiceImpl", persistenceProperties);

    // Setup the agent cache
    agentCache = new MapMaker().expireAfterWrite(1, TimeUnit.HOURS)
            .makeComputingMap(new Function<String, Object>() {
                public Object apply(String id) {
                    String[] key = id.split(DELIMITER);
                    AgentImpl agent;/*from  w  ww  .  j  a  v  a 2  s .co m*/
                    try {
                        agent = getAgent(key[0], key[1]);
                    } catch (NotFoundException e) {
                        return nullToken;
                    }
                    return agent == null ? nullToken : Tuple.tuple(agent.getState(), agent.getConfiguration());
                }
            });
}