Example usage for java.util.concurrent TimeUnit MINUTES

List of usage examples for java.util.concurrent TimeUnit MINUTES

Introduction

In this page you can find the example usage for java.util.concurrent TimeUnit MINUTES.

Prototype

TimeUnit MINUTES

To view the source code for java.util.concurrent TimeUnit MINUTES.

Click Source Link

Document

Time unit representing sixty seconds.

Usage

From source file:com.jivesoftware.sdk.service.filter.JiveAuthorizationValidator.java

public void authenticate(ContainerRequestContext request) {
    String authorization = request.getHeaderString(HttpHeaders.AUTHORIZATION);
    if (authorization == null) {
        throw UNAUTHORIZED;
    }/* www. j  a va2  s . com*/

    if (log.isTraceEnabled()) {
        log.trace("Authz Header:\n" + authorization);
    }

    if (!authorization.startsWith(JIVE_EXTN) || !authorization.contains(QUERY_PARAM_SIGNATURE)) {
        if (log.isInfoEnabled()) {
            log.info("Jive authorization isn't properly formatted: " + authorization);
        }
        throw BAD_REQUEST;
    } // end if

    Map<String, String> paramMap = getParamsFromAuthz(authorization);

    if (log.isDebugEnabled()) {
        log.debug("Authz Parameters: \n" + paramMap);
    }

    String signature = paramMap.get(PARAM_SIGNATURE);
    String algorithm = paramMap.get(PARAM_ALGORITHM);
    String clientId = paramMap.get(PARAM_CLIENT_ID);
    String jiveUrl = paramMap.get(PARAM_JIVE_URL);
    String tenantId = paramMap.get(PARAM_TENANT_ID);
    String timeStampStr = paramMap.get(PARAM_TIMESTAMP);

    if (!JiveSDKUtils.isAllExist(algorithm, clientId, jiveUrl, tenantId, timeStampStr)) {
        log.error("Jive authorization is partial: " + paramMap);
        throw BAD_REQUEST;
    } // end if

    long timeStamp = Long.parseLong(timeStampStr);
    long millisPassed = System.currentTimeMillis() - timeStamp;
    if (millisPassed < 0 || millisPassed > TimeUnit.MINUTES.toMillis(5)) {
        log.error("Jive authorization is rejected since it's " + millisPassed
                + "ms old (max. allowed is 5 minutes): " + paramMap);
        throw UNAUTHORIZED;
    } // end if

    //JiveInstance jiveInstance = jiveInstanceProvider.getInstanceByTenantId(tenantId);
    JiveInstance jiveInstance = jiveAddOnApplication.getJiveInstanceProvider().getInstanceByTenantId(tenantId);

    if (jiveInstance == null) {
        log.error("Jive authorization failed due to invalid tenant ID: " + tenantId);
        throw UNAUTHORIZED;
    } // end if

    String expectedClientId = jiveInstance.getClientId();
    if (!clientId.equals(expectedClientId)) {
        String msg = String.format(
                "Jive authorization failed due to missing Client ID: Actual [%s], Expected [%s]", clientId,
                expectedClientId);
        log.error(msg);
        throw UNAUTHORIZED;
    } // end if

    String clientSecret = jiveInstance.getClientSecret();
    String paramStrWithoutSignature = authorization.substring(JIVE_EXTN.length(),
            authorization.indexOf(QUERY_PARAM_SIGNATURE));

    try {
        String expectedSignature = sign(paramStrWithoutSignature, clientSecret, algorithm);
        if (expectedSignature.equals(signature)) {
            //SAVING jiveInstance INSTANCE TO REQUEST
            //TODO:
            request.setProperty(JIVE_INSTANCE, jiveInstance);
        } else {
            log.error("Jive authorization failed due to tampered signature! Original authz: " + authorization);
            throw UNAUTHORIZED;
        } // end if
    } catch (Exception e) {
        log.error("Failed validating Jive auth. scheme" + e.getMessage());
        throw UNAUTHORIZED;
    } // end try/catch

}

From source file:org.codelibs.empros.agent.operation.rest.RestApiOperation.java

public RestApiOperation() {
    url = PropertiesUtil.getAsString(EMPROSAPI_PROPERTIES, "emprosUrl", null);

    if (StringUtil.isBlank(url)) {
        throw new EmprosSystemException("emprosUrl is empty.");
    }/*  ww w  . j  a  v a 2  s.  com*/

    eventCapacity = PropertiesUtil.getAsInt(EMPROSAPI_PROPERTIES, "eventCapacity", 100);
    requestInterval = PropertiesUtil.getAsInt(EMPROSAPI_PROPERTIES, "requestInterval", 100);
    maxRetryCount = PropertiesUtil.getAsInt(EMPROSAPI_PROPERTIES, "maxRetryCount", 5);
    apiMonitorInterval = PropertiesUtil.getAsLong(EMPROSAPI_PROPERTIES, "apiMonitorInterval", 1 * 60 * 1000);

    final long connectionCheckInterval = PropertiesUtil.getAsLong(EMPROSAPI_PROPERTIES,
            "connectionCheckInterval", 5000);
    final long idleConnectionTimeout = PropertiesUtil.getAsLong(EMPROSAPI_PROPERTIES, "idleConnectionTimeout",
            60 * 1000);

    final SchemeRegistry schemeRegistry = new SchemeRegistry();
    schemeRegistry.register(new Scheme("http", 80, PlainSocketFactory.getSocketFactory()));
    schemeRegistry.register(new Scheme("https", 443, SSLSocketFactory.getSocketFactory()));
    final ClientConnectionManager clientConnectionManager = new PoolingClientConnectionManager(schemeRegistry,
            5, TimeUnit.MINUTES);

    httpClient = new DefaultHttpClient(clientConnectionManager);
    HttpParams httpParams = httpClient.getParams();

    // TODO auth
    HttpConnectionParams.setConnectionTimeout(httpParams, 10 * 1000);
    HttpConnectionParams.setSoTimeout(httpParams, 10 * 1000);

    connectionMonitor = new ConnectionMonitor(clientConnectionManager, connectionCheckInterval,
            idleConnectionTimeout);
    connectionMonitor.setDaemon(true);
    connectionMonitor.start();

    apiMonitor = new ApiMonitor();
    apiMonitorTimer = new Timer();
    apiMonitorTimer.schedule(apiMonitor, 0, apiMonitorInterval);
}

From source file:com.falcon.orca.handlers.SlaveHandler.java

@Override
public void handle() {
    CommandLine commandLine;/*from  w ww. j  a  v a 2s.  c o m*/
    CommandLineParser commandLineParser = new DefaultParser();
    BufferedReader br = new BufferedReader(new InputStreamReader(System.in, StandardCharsets.UTF_8));
    Options options = createSlaveOptions();
    printOnCmd("Welcome to ORCA type help to see what ORCA can do.");
    try {
        String command = br.readLine();
        while (command != null) {
            if (!StringUtils.isEmpty(command)) {
                try {
                    String[] treatedCommandParts = treatCommands(command);
                    commandLine = commandLineParser.parse(options, treatedCommandParts);
                    if (commandLine.hasOption("connect")) {
                        String masterHost;
                        if (commandLine.hasOption("masterHost")) {
                            masterHost = commandLine.getOptionValue("masterHost");
                        } else {
                            throw new MissingArgumentException("Master host is required to connect");
                        }

                        Integer masterPort;
                        if (commandLine.hasOption("masterPort")) {
                            masterPort = Integer.valueOf(commandLine.getOptionValue("masterPort"));
                        } else {
                            throw new MissingArgumentException("Master port is required to connect");
                        }
                        nodeManager = actorSystem.actorOf(NodeManager.props(masterHost, masterPort),
                                "node_manager");
                    } else if (commandLine.hasOption("disconnect")) {
                        if (nodeManager == null || nodeManager.isTerminated()) {
                            printOnCmd("node is not part of any cluster");
                        } else {
                            NodeManagerCommand nodeManagerCommand = new NodeManagerCommand();
                            nodeManagerCommand.setType(NodeManagerCommandType.UNREGISTER_FROM_MASTER);
                            nodeManager.tell(nodeManagerCommand, nodeManager);
                        }
                    } else if (commandLine.hasOption("exit")) {
                        if (nodeManager != null && !nodeManager.isTerminated()) {
                            NodeManagerCommand nodeManagerCommand = new NodeManagerCommand();
                            nodeManagerCommand.setType(NodeManagerCommandType.EXIT);
                            nodeManager.tell(nodeManagerCommand, nodeManager);
                        }
                        actorSystem.shutdown();
                        actorSystem.awaitTermination(new FiniteDuration(1, TimeUnit.MINUTES));
                        break;
                    } else {
                        printOnCmd(printHelpSlaveMode());
                    }
                } catch (ParseException pe) {
                    printOnCmd(printHelpSlaveMode());
                }
            } else {
                printOnCmd("", false);
            }
            command = br.readLine();
        }
    } catch (IOException e) {
        printOnCmd("Failed to read input from command line, please try again.");
    }
}

From source file:com.github.restdriver.clientdriver.integration.ClientDriverRuleTest.java

@Test
public void responseTimeoutOverridesClientDriverRuleExpectationTimeout() throws InterruptedException {

    // Given/* w ww. j  a  v  a  2 s . co  m*/
    final ClientDriverRule driver = new ClientDriverRule().expectResponsesWithin(5, TimeUnit.MINUTES);
    final ClientDriverResponse response = giveResponse("", null);

    // When
    driver.addExpectation(onRequestTo("/path"), response.within(1, TimeUnit.MILLISECONDS));

    Thread.sleep(5);

    // Then
    assertThat(response.hasNotExpired(), is(false));
}

From source file:com.linkedin.pinot.integration.tests.DefaultColumnsClusterIntegrationTest.java

protected void setUp(boolean sendSchema) throws Exception {
    // Set up directories.
    FileUtils.deleteQuietly(TMP_DIR);//ww  w .j av a2 s . c o  m
    Assert.assertTrue(TMP_DIR.mkdirs());
    Assert.assertTrue(SEGMENT_DIR.mkdir());
    Assert.assertTrue(TAR_DIR.mkdir());

    // Start the cluster.
    startZk();
    startController();
    startBroker();
    startServer();

    // Create the table.
    addOfflineTable("mytable", "DaysSinceEpoch", "daysSinceEpoch", -1, "", null, null);

    // Add the schema.
    if (sendSchema) {
        sendSchema();
    }

    // Unpack the Avro files.
    List<File> avroFiles = unpackAvroData(TMP_DIR, SEGMENT_COUNT);

    // Load data into H2.
    ExecutorService executor = Executors.newCachedThreadPool();
    setupH2AndInsertAvro(avroFiles, executor);

    // Create segments from Avro data.
    buildSegmentsFromAvro(avroFiles, executor, 0, SEGMENT_DIR, TAR_DIR, "mytable", false, null);

    // Initialize query generator.
    setupQueryGenerator(avroFiles, executor);

    executor.shutdown();
    executor.awaitTermination(10, TimeUnit.MINUTES);

    // Set up a Helix spectator to count the number of segments that are uploaded and unlock the latch once 12 segments
    // are online.
    CountDownLatch latch = setupSegmentCountCountDownLatch("mytable", SEGMENT_COUNT);

    // Upload the segments.
    for (String segmentName : TAR_DIR.list()) {
        File file = new File(TAR_DIR, segmentName);
        FileUploadUtils.sendSegmentFile("localhost", "8998", segmentName, new FileInputStream(file),
                file.length());
    }

    // Wait for all segments to be ONLINE.
    latch.await();
    waitForSegmentsOnline();
}

From source file:edu.sdsc.scigraph.owlapi.loader.BatchOwlLoader.java

public static void load(OwlLoadConfiguration config) throws InterruptedException {
    Injector i = Guice.createInjector(new OwlLoaderModule(config),
            new Neo4jModule(config.getGraphConfiguration()));
    BatchOwlLoader loader = i.getInstance(BatchOwlLoader.class);
    loader.ontologies = config.getOntologies();
    logger.info("Loading ontologies...");
    Stopwatch timer = Stopwatch.createStarted();
    loader.loadOntology();//w w w.j  a v  a 2  s .  c  om
    logger.info(format("Loading took %d minutes", timer.elapsed(TimeUnit.MINUTES)));
}

From source file:com.mnt.base.stream.server.AssureCallbackProcessorManager.java

@Override
public void run() {
    while (runningFlag) {
        StreamPacket streamPacket = null;
        try {/*from w w w  .  j a va  2s  .  co  m*/
            streamPacket = streamPacketQueueMap.get((int) Math.abs(pushAi.incrementAndGet() % maxQueueMapSize))
                    .poll(1, TimeUnit.MINUTES);
        } catch (InterruptedException e) {
            log.error("Error while process the stream packet in processor queue", e);
        }

        if (streamPacket != null) {

            NetTraffic.log("ready to process: ", streamPacket);

            try {
                dispatchPacket(streamPacket);
            } catch (Exception e) {

                Connection connection = ConnectionManager.getConnection(streamPacket.getConnectionId());

                if (connection != null && !connection.isClosed()) {
                    StreamPacket errorResponse = StreamPacket.valueOf(streamPacket.getRequestId(),
                            new StringBuilder(streamPacket.getProcessorIdentifier()).append(StreamPacketDef.DOT)
                                    .append(streamPacket.getMethodIdentifier()).toString(),
                            new RuntimeException("no corresponding processor identifier"));
                    connection.deliver(errorResponse);
                }

                log.error("error while dispatch packet: " + streamPacket.toString(), e);
            }
        }

        NetTraffic.log("processed: ", streamPacket);
    }
}

From source file:com.cubeia.backoffice.operator.client.OperatorServiceClientHTTP.java

private void setUpConfigCache() {
    configParamCache = CacheBuilder.newBuilder().expireAfterWrite(getConfigCacheTTL(), TimeUnit.MINUTES)
            .maximumSize(1000).build();// w  ww  .j a  v  a 2  s.co m

    operatorEnabledCache = CacheBuilder.newBuilder().expireAfterWrite(getConfigCacheTTL(), TimeUnit.MINUTES)
            .maximumSize(200).build();
}

From source file:learn.jersey.services.BufferedMutatorExample.java

@Override
public int run(String[] args) throws InterruptedException, ExecutionException, TimeoutException {

    /** a callback invoked when an asynchronous write fails. */
    final BufferedMutator.ExceptionListener listener = new BufferedMutator.ExceptionListener() {
        @Override//from  w w  w.j  av  a  2s. c o  m
        public void onException(RetriesExhaustedWithDetailsException e, BufferedMutator mutator) {
            for (int i = 0; i < e.getNumExceptions(); i++) {
                LOG.info("Failed to sent put " + e.getRow(i) + ".");
            }
        }
    };
    BufferedMutatorParams params = new BufferedMutatorParams(TABLE).listener(listener);

    //
    // step 1: create a single Connection and a BufferedMutator, shared by
    // all worker threads.
    //
    try (final Connection conn = ConnectionFactory.createConnection(getConf());
            final BufferedMutator mutator = conn.getBufferedMutator(params)) {

        /** worker pool that operates on BufferedTable instances */
        final ExecutorService workerPool = Executors.newFixedThreadPool(POOL_SIZE);
        List<Future<Void>> futures = new ArrayList<>(TASK_COUNT);

        for (int i = 0; i < TASK_COUNT; i++) {
            futures.add(workerPool.submit(new Callable<Void>() {
                @Override
                public Void call() throws Exception {
                    //
                    // step 2: each worker sends edits to the shared
                    // BufferedMutator instance. They all use
                    // the same backing buffer, call-back "listener", and
                    // RPC executor pool.
                    //
                    Put p = new Put(Bytes.toBytes("someRow"));
                    p.addColumn(FAMILY, Bytes.toBytes("someQualifier"), Bytes.toBytes("some value"));
                    mutator.mutate(p);
                    // do work... maybe you want to call mutator.flush()
                    // after many edits to ensure any of
                    // this worker's edits are sent before exiting the
                    // Callable
                    return null;
                }
            }));
        }

        //
        // step 3: clean up the worker pool, shut down.
        //
        for (Future<Void> f : futures) {
            f.get(5, TimeUnit.MINUTES);
        }
        workerPool.shutdown();
    } catch (IOException e) {
        // exception while creating/destroying Connection or BufferedMutator
        LOG.info("exception while creating/destroying Connection or BufferedMutator", e);
    } // BufferedMutator.close() ensures all work is flushed. Could be the
      // custom listener is
      // invoked from here.
    return 0;
}

From source file:net.openhft.chronicle.logger.slf4j.Slf4jVanillaChronicleLoggerPerfTest.java

@Test
public void testMultiThreadLogging() throws IOException, InterruptedException {
    warmup(LoggerFactory.getLogger("perf-vanilla-chronicle"));

    final int RUNS = 300000;
    final int THREADS = Runtime.getRuntime().availableProcessors();

    for (int size : new int[] { 64, 128, 256 }) {
        {//from   www .  j  a  v a2 s .c o  m
            final long start = System.nanoTime();

            ExecutorService es = Executors.newFixedThreadPool(THREADS);
            for (int t = 0; t < THREADS; t++) {
                es.submit(new RunnableLogger(RUNS, size, "perf-vanilla-chronicle"));
            }

            es.shutdown();
            es.awaitTermination(2, TimeUnit.MINUTES);

            final long time = System.nanoTime() - start;

            System.out.printf(
                    "Plain.MT (runs=%d, min size=%03d, elapsed=%.3f ms) took an average of %.3f us per entry\n",
                    RUNS, size, time / 1e6, time / 1e3 / (RUNS * THREADS));
        }
    }

    ChronicleTools.deleteOnExit(basePath("perf-vanilla-chronicle"));
}