Example usage for com.google.common.base Stopwatch start

List of usage examples for com.google.common.base Stopwatch start

Introduction

In this page you can find the example usage for com.google.common.base Stopwatch start.

Prototype

public Stopwatch start() 

Source Link

Document

Starts the stopwatch.

Usage

From source file:org.apache.druid.server.coordinator.DruidCoordinatorBalancerProfiler.java

public void profileRun() {
    Stopwatch watch = Stopwatch.createUnstarted();
    LoadQueuePeonTester fromPeon = new LoadQueuePeonTester();
    LoadQueuePeonTester toPeon = new LoadQueuePeonTester();

    EasyMock.expect(druidServer1.getName()).andReturn("from").atLeastOnce();
    EasyMock.expect(druidServer1.getCurrSize()).andReturn(30L).atLeastOnce();
    EasyMock.expect(druidServer1.getMaxSize()).andReturn(100L).atLeastOnce();
    EasyMock.expect(druidServer1.getSegments()).andReturn(segments).anyTimes();
    EasyMock.expect(druidServer1.getSegment(EasyMock.anyObject())).andReturn(null).anyTimes();
    EasyMock.replay(druidServer1);//from w  w w .  ja  v  a2s  .com

    EasyMock.expect(druidServer2.getName()).andReturn("to").atLeastOnce();
    EasyMock.expect(druidServer2.getTier()).andReturn("normal").anyTimes();
    EasyMock.expect(druidServer2.getCurrSize()).andReturn(0L).atLeastOnce();
    EasyMock.expect(druidServer2.getMaxSize()).andReturn(100L).atLeastOnce();
    EasyMock.expect(druidServer2.getSegments()).andReturn(new HashMap<String, DataSegment>()).anyTimes();
    EasyMock.expect(druidServer2.getSegment(EasyMock.anyObject())).andReturn(null).anyTimes();
    EasyMock.replay(druidServer2);

    coordinator.moveSegment(EasyMock.anyObject(), EasyMock.anyObject(), EasyMock.anyObject(),
            EasyMock.anyObject());
    EasyMock.expectLastCall().anyTimes();
    EasyMock.replay(coordinator);

    DruidCoordinatorRuntimeParams params = DruidCoordinatorRuntimeParams.newBuilder()
            .withDruidCluster(new DruidCluster(null, ImmutableMap.of("normal",
                    Stream.of(new ServerHolder(druidServer1, fromPeon), new ServerHolder(druidServer2, toPeon))
                            .collect(Collectors.toCollection(
                                    () -> new TreeSet<>(DruidCoordinatorBalancer.percentUsedComparator))))))
            .withLoadManagementPeons(ImmutableMap.of("from", fromPeon, "to", toPeon))
            .withAvailableSegments(segments.values())
            .withDynamicConfigs(
                    CoordinatorDynamicConfig.builder().withMaxSegmentsToMove(MAX_SEGMENTS_TO_MOVE).build())
            .withBalancerReferenceTimestamp(DateTimes.of("2013-01-01")).build();
    DruidCoordinatorBalancerTester tester = new DruidCoordinatorBalancerTester(coordinator);
    watch.start();
    DruidCoordinatorRuntimeParams balanceParams = tester.run(params);
    System.out.println(watch.stop());
}

From source file:org.ow2.proactive.scheduler.task.TaskLauncher.java

public void doTask(ExecutableContainer executableContainer, TaskResult[] previousTasksResults,
        TaskTerminateNotification terminateNotification) {
    logger.info("Task started " + taskId.getJobId().getReadableName() + " : " + taskId.getReadableName());

    this.taskKiller = this.replaceTaskKillerWithDoubleTimeoutValueIfRunAsMe(executableContainer.isRunAsUser());

    WallTimer wallTimer = new WallTimer(initializer.getWalltime(), taskKiller);

    Stopwatch taskStopwatchForFailures = Stopwatch.createUnstarted();

    TaskResultImpl taskResult;//w w  w  .j  a  v a2  s . co  m

    TaskDataspaces dataspaces = null;

    try {
        addShutdownHook();
        // lock the cache space cleaning mechanism
        DataSpaceNodeConfigurationAgent.lockCacheSpaceCleaning();
        dataspaces = factory.createTaskDataspaces(taskId, initializer.getNamingService(),
                executableContainer.isRunAsUser());

        File taskLogFile = taskLogger.createFileAppender(dataspaces.getScratchFolder());

        progressFileReader.start(dataspaces.getScratchFolder(), taskId);

        TaskContext context = new TaskContext(executableContainer, initializer, previousTasksResults,
                new NodeDataSpacesURIs(dataspaces.getScratchURI(), dataspaces.getCacheURI(),
                        dataspaces.getInputURI(), dataspaces.getOutputURI(), dataspaces.getUserURI(),
                        dataspaces.getGlobalURI()),
                progressFileReader.getProgressFile().toString(), getHostname(), decrypter);

        File workingDir = getTaskWorkingDir(context, dataspaces);

        logger.info("Task working dir: " + workingDir);
        logger.info("Cache space: " + context.getNodeDataSpaceURIs().getCacheURI());
        logger.info("Input space: " + context.getNodeDataSpaceURIs().getInputURI());
        logger.info("Output space: " + context.getNodeDataSpaceURIs().getOutputURI());
        logger.info("User space: " + context.getNodeDataSpaceURIs().getUserURI());
        logger.info("Global space: " + context.getNodeDataSpaceURIs().getGlobalURI());
        logger.info("Scheduler rest url: " + context.getSchedulerRestUrl());

        wallTimer.start();

        dataspaces.copyInputDataToScratch(initializer.getFilteredInputFiles(fileSelectorsFilters(context))); // should handle interrupt

        if (decrypter != null) {
            decrypter.setCredentials(executableContainer.getCredentials());
        }

        TaskExecutor taskExecutor = factory.createTaskExecutor(workingDir);

        taskStopwatchForFailures.start();
        taskResult = taskExecutor.execute(context, taskLogger.getOutputSink(), taskLogger.getErrorSink());
        taskStopwatchForFailures.stop();

        switch (taskKiller.getStatus()) {
        case WALLTIME_REACHED:
            taskResult = getWalltimedTaskResult(taskStopwatchForFailures);
            sendResultToScheduler(terminateNotification, taskResult);
            return;
        case KILLED_MANUALLY:
            // killed by Scheduler, no need to send results back
            return;
        }

        dataspaces.copyScratchDataToOutput(
                initializer.getFilteredOutputFiles(fileSelectorsFilters(context, taskResult)));

        wallTimer.stop();

        copyTaskLogsToUserSpace(taskLogFile, dataspaces);
        taskResult.setLogs(taskLogger.getLogs());

        sendResultToScheduler(terminateNotification, taskResult);
    } catch (Throwable taskFailure) {
        wallTimer.stop();

        switch (taskKiller.getStatus()) {
        case WALLTIME_REACHED:
            taskResult = getWalltimedTaskResult(taskStopwatchForFailures);
            sendResultToScheduler(terminateNotification, taskResult);
            break;
        case KILLED_MANUALLY:
            // killed by Scheduler, no need to send results back
            return;
        default:
            logger.info("Failed to execute task", taskFailure);
            taskFailure.printStackTrace(taskLogger.getErrorSink());
            taskResult = new TaskResultImpl(taskId, taskFailure, taskLogger.getLogs(),
                    taskStopwatchForFailures.elapsed(TimeUnit.MILLISECONDS));
            sendResultToScheduler(terminateNotification, taskResult);
        }
    } finally {
        try {
            progressFileReader.stop();
            taskLogger.close();

            if (dataspaces != null) {
                dataspaces.close();
            }
            // unlocks the cache space cleaning thread
            DataSpaceNodeConfigurationAgent.unlockCacheSpaceCleaning();
            removeShutdownHook();
        } finally {
            terminate();
        }
    }
}

From source file:com.metamx.druid.utils.DruidMasterBalancerProfiler.java

public void profileRun() {
    Stopwatch watch = new Stopwatch();
    LoadQueuePeonTester fromPeon = new LoadQueuePeonTester();
    LoadQueuePeonTester toPeon = new LoadQueuePeonTester();

    EasyMock.expect(druidServer1.getName()).andReturn("from").atLeastOnce();
    EasyMock.expect(druidServer1.getCurrSize()).andReturn(30L).atLeastOnce();
    EasyMock.expect(druidServer1.getMaxSize()).andReturn(100L).atLeastOnce();
    EasyMock.expect(druidServer1.getSegments()).andReturn(segments).anyTimes();
    EasyMock.expect(druidServer1.getSegment(EasyMock.<String>anyObject())).andReturn(null).anyTimes();
    EasyMock.replay(druidServer1);/*www  .  j  a v a  2s.  com*/

    EasyMock.expect(druidServer2.getName()).andReturn("to").atLeastOnce();
    EasyMock.expect(druidServer2.getTier()).andReturn("normal").anyTimes();
    EasyMock.expect(druidServer2.getCurrSize()).andReturn(0L).atLeastOnce();
    EasyMock.expect(druidServer2.getMaxSize()).andReturn(100L).atLeastOnce();
    EasyMock.expect(druidServer2.getSegments()).andReturn(new HashMap<String, DataSegment>()).anyTimes();
    EasyMock.expect(druidServer2.getSegment(EasyMock.<String>anyObject())).andReturn(null).anyTimes();
    EasyMock.replay(druidServer2);

    master.moveSegment(EasyMock.<String>anyObject(), EasyMock.<String>anyObject(), EasyMock.<String>anyObject(),
            EasyMock.<LoadPeonCallback>anyObject());
    EasyMock.expectLastCall().anyTimes();
    EasyMock.replay(master);

    DruidMasterRuntimeParams params = DruidMasterRuntimeParams.newBuilder()
            .withDruidCluster(
                    new DruidCluster(ImmutableMap.<String, MinMaxPriorityQueue<ServerHolder>>of("normal",
                            MinMaxPriorityQueue.orderedBy(DruidMasterBalancerTester.percentUsedComparator)
                                    .create(Arrays.asList(new ServerHolder(druidServer1, fromPeon),
                                            new ServerHolder(druidServer2, toPeon))))))
            .withLoadManagementPeons(ImmutableMap.<String, LoadQueuePeon>of("from", fromPeon, "to", toPeon))
            .withAvailableSegments(segments.values())
            .withMasterSegmentSettings(
                    new MasterSegmentSettings.Builder().withMaxSegmentsToMove(MAX_SEGMENTS_TO_MOVE).build())
            .withBalancerReferenceTimestamp(new DateTime("2013-01-01")).build();
    DruidMasterBalancerTester tester = new DruidMasterBalancerTester(master);
    watch.start();
    DruidMasterRuntimeParams balanceParams = tester.run(params);
    System.out.println(watch.stop());
}

From source file:io.druid.server.coordinator.DruidCoordinatorBalancerProfiler.java

public void profileRun() {
    Stopwatch watch = Stopwatch.createUnstarted();
    LoadQueuePeonTester fromPeon = new LoadQueuePeonTester();
    LoadQueuePeonTester toPeon = new LoadQueuePeonTester();

    EasyMock.expect(druidServer1.getName()).andReturn("from").atLeastOnce();
    EasyMock.expect(druidServer1.getCurrSize()).andReturn(30L).atLeastOnce();
    EasyMock.expect(druidServer1.getMaxSize()).andReturn(100L).atLeastOnce();
    EasyMock.expect(druidServer1.getSegments()).andReturn(segments).anyTimes();
    EasyMock.expect(druidServer1.getSegment(EasyMock.<String>anyObject())).andReturn(null).anyTimes();
    EasyMock.replay(druidServer1);//from   w w  w  .j  a  va 2 s  .  c  o  m

    EasyMock.expect(druidServer2.getName()).andReturn("to").atLeastOnce();
    EasyMock.expect(druidServer2.getTier()).andReturn("normal").anyTimes();
    EasyMock.expect(druidServer2.getCurrSize()).andReturn(0L).atLeastOnce();
    EasyMock.expect(druidServer2.getMaxSize()).andReturn(100L).atLeastOnce();
    EasyMock.expect(druidServer2.getSegments()).andReturn(new HashMap<String, DataSegment>()).anyTimes();
    EasyMock.expect(druidServer2.getSegment(EasyMock.<String>anyObject())).andReturn(null).anyTimes();
    EasyMock.replay(druidServer2);

    coordinator.moveSegment(EasyMock.<ImmutableDruidServer>anyObject(),
            EasyMock.<ImmutableDruidServer>anyObject(), EasyMock.<String>anyObject(),
            EasyMock.<LoadPeonCallback>anyObject());
    EasyMock.expectLastCall().anyTimes();
    EasyMock.replay(coordinator);

    DruidCoordinatorRuntimeParams params = DruidCoordinatorRuntimeParams.newBuilder()
            .withDruidCluster(
                    new DruidCluster(ImmutableMap.<String, MinMaxPriorityQueue<ServerHolder>>of("normal",
                            MinMaxPriorityQueue.orderedBy(DruidCoordinatorBalancerTester.percentUsedComparator)
                                    .create(Arrays.asList(new ServerHolder(druidServer1, fromPeon),
                                            new ServerHolder(druidServer2, toPeon))))))
            .withLoadManagementPeons(ImmutableMap.<String, LoadQueuePeon>of("from", fromPeon, "to", toPeon))
            .withAvailableSegments(segments.values())
            .withDynamicConfigs(
                    new CoordinatorDynamicConfig.Builder().withMaxSegmentsToMove(MAX_SEGMENTS_TO_MOVE).build())
            .withBalancerReferenceTimestamp(new DateTime("2013-01-01")).build();
    DruidCoordinatorBalancerTester tester = new DruidCoordinatorBalancerTester(coordinator);
    watch.start();
    DruidCoordinatorRuntimeParams balanceParams = tester.run(params);
    System.out.println(watch.stop());
}

From source file:com.thinkbiganalytics.feedmgr.service.feed.DefaultFeedManagerFeedService.java

private void saveFeed(final FeedMetadata feed) {
    if (StringUtils.isBlank(feed.getId())) {
        feed.setIsNew(true);//from www  .  ja  v  a2 s  .  c om
    }
    metadataAccess.commit(() -> {
        Stopwatch stopwatch = Stopwatch.createStarted();
        List<? extends HadoopSecurityGroup> previousSavedSecurityGroups = null;
        // Store the old security groups before saving beccause we need to compare afterward
        if (feed.isNew()) {
            Feed existing = feedProvider.findBySystemName(feed.getCategory().getSystemName(),
                    feed.getSystemFeedName());
            // Since we know this is expected to be new check if the category/feed name combo is already being used.
            if (existing != null) {
                throw new DuplicateFeedNameException(feed.getCategoryName(), feed.getFeedName());
            }
        } else {
            Feed previousStateBeforeSaving = feedProvider.findById(feedProvider.resolveId(feed.getId()));
            Map<String, String> userProperties = previousStateBeforeSaving.getUserProperties();
            previousSavedSecurityGroups = previousStateBeforeSaving.getSecurityGroups();
        }

        //if this is the first time saving this feed create a new one
        Feed domainFeed = feedModelTransform.feedToDomain(feed);

        if (domainFeed.getState() == null) {
            domainFeed.setState(Feed.State.ENABLED);
        }
        stopwatch.stop();
        log.debug("Time to transform the feed to a domain object for saving: {} ms",
                stopwatch.elapsed(TimeUnit.MILLISECONDS));
        stopwatch.reset();

        //initially save the feed
        if (feed.isNew()) {
            stopwatch.start();
            domainFeed = feedProvider.update(domainFeed);
            stopwatch.stop();
            log.debug("Time to save the New feed: {} ms", stopwatch.elapsed(TimeUnit.MILLISECONDS));
            stopwatch.reset();
        }

        final String domainId = domainFeed.getId().toString();
        final String feedName = FeedNameUtil.fullName(domainFeed.getCategory().getSystemName(),
                domainFeed.getName());

        // Build preconditions
        stopwatch.start();
        assignFeedDependencies(feed, domainFeed);
        stopwatch.stop();
        log.debug("Time to assignFeedDependencies: {} ms", stopwatch.elapsed(TimeUnit.MILLISECONDS));
        stopwatch.reset();

        //Assign the datasources
        stopwatch.start();
        assignFeedDatasources(feed, domainFeed);
        stopwatch.stop();
        log.debug("Time to assignFeedDatasources: {} ms", stopwatch.elapsed(TimeUnit.MILLISECONDS));
        stopwatch.reset();

        stopwatch.start();
        boolean isStream = feed.getRegisteredTemplate() != null ? feed.getRegisteredTemplate().isStream()
                : false;
        Long timeBetweenBatchJobs = feed.getRegisteredTemplate() != null
                ? feed.getRegisteredTemplate().getTimeBetweenStartingBatchJobs()
                : 0L;
        //sync the feed information to ops manager
        metadataAccess.commit(() -> opsManagerFeedProvider.save(opsManagerFeedProvider.resolveId(domainId),
                feedName, isStream, timeBetweenBatchJobs));

        stopwatch.stop();
        log.debug("Time to sync feed data with Operations Manager: {} ms",
                stopwatch.elapsed(TimeUnit.MILLISECONDS));
        stopwatch.reset();

        // Update hadoop security group polices if the groups changed
        if (!feed.isNew()
                && !ListUtils.isEqualList(previousSavedSecurityGroups, domainFeed.getSecurityGroups())) {
            stopwatch.start();
            List<? extends HadoopSecurityGroup> securityGroups = domainFeed.getSecurityGroups();
            List<String> groupsAsCommaList = securityGroups.stream().map(group -> group.getName())
                    .collect(Collectors.toList());
            hadoopAuthorizationService.updateSecurityGroupsForAllPolicies(feed.getSystemCategoryName(),
                    feed.getSystemFeedName(), groupsAsCommaList, domainFeed.getProperties());
            stopwatch.stop();
            log.debug("Time to update hadoop security groups: {} ms", stopwatch.elapsed(TimeUnit.MILLISECONDS));
            stopwatch.reset();
        }

        // Update Hive metastore
        stopwatch.start();
        final boolean hasHiveDestination = domainFeed.getDestinations().stream()
                .map(FeedDestination::getDatasource).filter(DerivedDatasource.class::isInstance)
                .map(DerivedDatasource.class::cast)
                .anyMatch(datasource -> "HiveDatasource".equals(datasource.getDatasourceType()));
        if (hasHiveDestination) {
            try {
                feedHiveTableService.updateColumnDescriptions(feed);
            } catch (final DataAccessException e) {
                log.warn("Failed to update column descriptions for feed: {}",
                        feed.getCategoryAndFeedDisplayName(), e);
            }
        }
        stopwatch.stop();
        log.debug("Time to update hive metastore: {} ms", stopwatch.elapsed(TimeUnit.MILLISECONDS));
        stopwatch.reset();

        // Update Kylo metastore
        stopwatch.start();
        domainFeed = feedProvider.update(domainFeed);
        stopwatch.stop();
        log.debug("Time to call feedProvider.update: {} ms", stopwatch.elapsed(TimeUnit.MILLISECONDS));
        stopwatch.reset();
    }, (e) -> {
        if (feed.isNew() && StringUtils.isNotBlank(feed.getId())) {
            //Rollback ops Manager insert if it is newly created
            metadataAccess.commit(() -> {
                opsManagerFeedProvider.delete(opsManagerFeedProvider.resolveId(feed.getId()));
            });
        }
    });

}

From source file:io.ecarf.core.cloud.task.processor.reason.phase2.DoReasonTask9.java

@Override
public void run() throws IOException {

    GoogleCloudService cloud = (GoogleCloudService) this.getCloudService();

    Stopwatch stopwatch1 = Stopwatch.createUnstarted();
    Stopwatch stopwatch2 = Stopwatch.createUnstarted();

    this.setup(cloud);

    String decoratedTable = table;
    int emptyRetries = 0;
    int totalInferredTriples = 0;

    int maxRetries;
    if (this.retries == null) {
        maxRetries = Config.getIntegerProperty(Constants.REASON_RETRY_KEY, 6);

    } else {//from www . j  a  v a 2 s. com
        maxRetries = this.retries;
    }

    int cycleSleep;
    if (this.sleep == null) {
        cycleSleep = Config.getIntegerProperty(Constants.REASON_SLEEP_KEY, 20);
    } else {

        cycleSleep = this.sleep;
    }

    this.ddLimit = Config.getIntegerProperty(Constants.REASON_DATA_DIRECT_DOWNLOAD_LIMIT, 1_200_000);
    int streamingThreshold = Config.getIntegerProperty("ecarf.io.reasoning.streaming.threshold", 100000);
    String instanceId = cloud.getInstanceId();

    int processors = Runtime.getRuntime().availableProcessors();

    if (processors > 1) {
        this.executor = Utils.createFixedThreadPool(processors);
    }

    int count = 0;

    QueryGenerator<Long> generator = new QueryGenerator<Long>(schemaTerms, null);

    // timestamp loop
    do {

        // First of all run all the queries asynchronously and remember the jobId and filename for each term
        generator.setDecoratedTable(decoratedTable);

        String query = generator.getQuery();
        log.debug("Generated Query: " + query);

        String queryResultFilePrefix = instanceId + "_QueryResults_" + count;

        String jobId = cloud.startBigDataQuery(query, new BigDataTable(this.table));
        //QueryResult   queryResult = QueryResult.create().setFilename(queryResultFilePrefix).setJobId(jobId);

        long start = System.currentTimeMillis();

        // block and wait for each job to complete then save results to a file
        QueryStats stats = cloud.saveBigQueryResultsToFile(jobId, queryResultFilePrefix, this.bucket,
                processors, this.ddLimit);

        BigInteger rows = stats.getTotalRows();

        this.totalBytes = this.totalBytes + stats.getTotalProcessedBytes();

        Set<Long> productiveTerms = new HashSet<>();
        Set<String> inferredTriplesFiles = new HashSet<>();
        int interimInferredTriples = 0;

        // only process if triples are found matching this term
        if ((rows != null) && !BigInteger.ZERO.equals(rows)) {

            stopwatch1.start();

            interimInferredTriples = this.inferAndSaveTriplesToFile(stats, productiveTerms, processors,
                    inferredTriplesFiles);

            this.totalRows = this.totalRows.add(rows);

            stopwatch1.stop();

        } else {
            log.info("Skipping query as no data is found");
        }

        totalInferredTriples += interimInferredTriples;

        if (interimInferredTriples > 0) {

            // stream smaller numbers of inferred triples
            // try uploading from cloud storage

            log.info("Inserting " + interimInferredTriples + ", inferred triples into Big Data table for "
                    + productiveTerms.size() + " productive terms. Filename: " + inferredTriplesFiles);

            if (interimInferredTriples <= streamingThreshold) {
                // stream the data

                Set<Triple> inferredTriples = new HashSet<>();
                for (String inferredTriplesFile : inferredTriplesFiles) {
                    TripleUtils.loadCompressedCSVTriples(inferredTriplesFile, true, inferredTriples);
                }

                log.info("Total triples to stream into Big Data: " + inferredTriples.size());
                cloud.streamObjectsIntoBigData(inferredTriples,
                        TableUtils.getBigQueryEncodedTripleTable(table));

                log.info("All inferred triples are streamed into Big Data table");

            } else {

                List<String> cloudStorageFiles = new ArrayList<>();
                // load the data through cloud storage
                // upload the file to cloud storage
                for (String inferredTriplesFile : inferredTriplesFiles) {
                    log.info("Uploading inferred triples file into cloud storage: " + inferredTriplesFile);
                    StorageObject file = cloud.uploadFileToCloudStorage(inferredTriplesFile, bucket);
                    log.info("File " + file + ", uploaded successfully. Now loading it into big data.");
                    cloudStorageFiles.add(file.getUri());
                }

                jobId = cloud.loadCloudStorageFilesIntoBigData(cloudStorageFiles,
                        TableUtils.getBigQueryEncodedTripleTable(table), false);

                log.info(
                        "All inferred triples are loaded into Big Data table through cloud storage, completed jobId: "
                                + jobId);

            }

            // reset empty retries
            emptyRetries = 0;

            stopwatch2.reset();

        } else {
            log.info("No new inferred triples");
            // increment empty retries
            emptyRetries++;

            if (!stopwatch2.isRunning()) {
                stopwatch2.start();
            }
        }

        log.info("Total inferred triples so far = " + totalInferredTriples + ", current retry count: "
                + emptyRetries);

        if (emptyRetries < maxRetries) {
            ApiUtils.block(cycleSleep);

            // FIXME move into the particular cloud implementation service
            long elapsed = System.currentTimeMillis() - start;
            decoratedTable = "[" + table + "@-" + elapsed + "-]";

            log.info("Using table decorator: " + decoratedTable + ". Empty retries count: " + emptyRetries);
        }

        count++;

    } while (emptyRetries < maxRetries); // end timestamp loop

    executor.shutdown();
    log.info("Finished reasoning, total inferred triples = " + totalInferredTriples);
    //log.info("Number of avoided duplicate terms = " + this.duplicates);
    log.info("Total rows retrieved from big data = " + this.totalRows);
    log.info("Total processed GBytes = " + ((double) this.totalBytes / FileUtils.ONE_GB));
    log.info("Total process reasoning time (serialization in inf file) = " + stopwatch1);
    log.info("Total time spent in empty inference cycles = " + stopwatch2);
}

From source file:com.metamx.druid.utils.DruidMasterBalancerProfiler.java

public void bigProfiler() {
    Stopwatch watch = new Stopwatch();
    int numSegments = 55000;
    int numServers = 50;
    EasyMock.expect(manager.getAllRules()).andReturn(ImmutableMap.<String, List<Rule>>of("test", rules))
            .anyTimes();/*  w  w w .j ava 2 s. co  m*/
    EasyMock.expect(manager.getRules(EasyMock.<String>anyObject())).andReturn(rules).anyTimes();
    EasyMock.expect(manager.getRulesWithDefault(EasyMock.<String>anyObject())).andReturn(rules).anyTimes();
    EasyMock.replay(manager);

    master.moveSegment(EasyMock.<String>anyObject(), EasyMock.<String>anyObject(), EasyMock.<String>anyObject(),
            EasyMock.<LoadPeonCallback>anyObject());
    EasyMock.expectLastCall().anyTimes();
    EasyMock.replay(master);

    List<DruidServer> serverList = Lists.newArrayList();
    Map<String, LoadQueuePeon> peonMap = Maps.newHashMap();
    List<ServerHolder> serverHolderList = Lists.newArrayList();
    Map<String, DataSegment> segmentMap = Maps.newHashMap();
    for (int i = 0; i < numSegments; i++) {
        segmentMap.put("segment" + i, new DataSegment("datasource" + i,
                new Interval(new DateTime("2012-01-01"), (new DateTime("2012-01-01")).plusHours(1)),
                (new DateTime("2012-03-01")).toString(), Maps.<String, Object>newHashMap(),
                Lists.<String>newArrayList(), Lists.<String>newArrayList(), new NoneShardSpec(), 0, 4L));
    }

    for (int i = 0; i < numServers; i++) {
        DruidServer server = EasyMock.createMock(DruidServer.class);
        EasyMock.expect(server.getMetadata()).andReturn(null).anyTimes();
        EasyMock.expect(server.getCurrSize()).andReturn(30L).atLeastOnce();
        EasyMock.expect(server.getMaxSize()).andReturn(100L).atLeastOnce();
        EasyMock.expect(server.getTier()).andReturn("normal").anyTimes();
        EasyMock.expect(server.getName()).andReturn(Integer.toString(i)).atLeastOnce();
        EasyMock.expect(server.getHost()).andReturn(Integer.toString(i)).anyTimes();
        if (i == 0) {
            EasyMock.expect(server.getSegments()).andReturn(segmentMap).anyTimes();
        } else {
            EasyMock.expect(server.getSegments()).andReturn(new HashMap<String, DataSegment>()).anyTimes();
        }
        EasyMock.expect(server.getSegment(EasyMock.<String>anyObject())).andReturn(null).anyTimes();
        EasyMock.replay(server);

        LoadQueuePeon peon = new LoadQueuePeonTester();
        peonMap.put(Integer.toString(i), peon);
        serverHolderList.add(new ServerHolder(server, peon));
    }

    DruidMasterRuntimeParams params = DruidMasterRuntimeParams.newBuilder()
            .withDruidCluster(
                    new DruidCluster(ImmutableMap.<String, MinMaxPriorityQueue<ServerHolder>>of("normal",
                            MinMaxPriorityQueue.orderedBy(DruidMasterBalancerTester.percentUsedComparator)
                                    .create(serverHolderList))))
            .withLoadManagementPeons(peonMap).withAvailableSegments(segmentMap.values())
            .withMasterSegmentSettings(
                    new MasterSegmentSettings.Builder().withMaxSegmentsToMove(MAX_SEGMENTS_TO_MOVE).build())
            .withBalancerReferenceTimestamp(new DateTime("2013-01-01")).withEmitter(emitter)
            .withDatabaseRuleManager(manager).withReplicationManager(new ReplicationThrottler(2, 500))
            .withSegmentReplicantLookup(SegmentReplicantLookup
                    .make(new DruidCluster(ImmutableMap.<String, MinMaxPriorityQueue<ServerHolder>>of("normal",
                            MinMaxPriorityQueue.orderedBy(DruidMasterBalancerTester.percentUsedComparator)
                                    .create(serverHolderList)))))
            .build();

    DruidMasterBalancerTester tester = new DruidMasterBalancerTester(master);
    DruidMasterRuleRunner runner = new DruidMasterRuleRunner(master, 500, 5);
    watch.start();
    DruidMasterRuntimeParams balanceParams = tester.run(params);
    DruidMasterRuntimeParams assignParams = runner.run(params);
    System.out.println(watch.stop());
}

From source file:org.apache.druid.server.coordinator.DruidCoordinatorBalancerProfiler.java

public void bigProfiler() {
    Stopwatch watch = Stopwatch.createUnstarted();
    int numSegments = 55000;
    int numServers = 50;
    EasyMock.expect(manager.getAllRules()).andReturn(ImmutableMap.of("test", rules)).anyTimes();
    EasyMock.expect(manager.getRules(EasyMock.anyObject())).andReturn(rules).anyTimes();
    EasyMock.expect(manager.getRulesWithDefault(EasyMock.anyObject())).andReturn(rules).anyTimes();
    EasyMock.replay(manager);//from  ww  w  . j  a va 2 s .co  m

    coordinator.moveSegment(EasyMock.anyObject(), EasyMock.anyObject(), EasyMock.anyObject(),
            EasyMock.anyObject());
    EasyMock.expectLastCall().anyTimes();
    EasyMock.replay(coordinator);

    List<DruidServer> serverList = Lists.newArrayList();
    Map<String, LoadQueuePeon> peonMap = Maps.newHashMap();
    List<ServerHolder> serverHolderList = Lists.newArrayList();
    Map<String, DataSegment> segmentMap = Maps.newHashMap();
    for (int i = 0; i < numSegments; i++) {
        segmentMap.put("segment" + i,
                new DataSegment("datasource" + i,
                        new Interval(DateTimes.of("2012-01-01"), (DateTimes.of("2012-01-01")).plusHours(1)),
                        (DateTimes.of("2012-03-01")).toString(), Maps.newHashMap(), Lists.newArrayList(),
                        Lists.newArrayList(), NoneShardSpec.instance(), 0, 4L));
    }

    for (int i = 0; i < numServers; i++) {
        ImmutableDruidServer server = EasyMock.createMock(ImmutableDruidServer.class);
        EasyMock.expect(server.getMetadata()).andReturn(null).anyTimes();
        EasyMock.expect(server.getCurrSize()).andReturn(30L).atLeastOnce();
        EasyMock.expect(server.getMaxSize()).andReturn(100L).atLeastOnce();
        EasyMock.expect(server.getTier()).andReturn("normal").anyTimes();
        EasyMock.expect(server.getName()).andReturn(Integer.toString(i)).atLeastOnce();
        EasyMock.expect(server.getHost()).andReturn(Integer.toString(i)).anyTimes();
        if (i == 0) {
            EasyMock.expect(server.getSegments()).andReturn(segmentMap).anyTimes();
        } else {
            EasyMock.expect(server.getSegments()).andReturn(new HashMap<String, DataSegment>()).anyTimes();
        }
        EasyMock.expect(server.getSegment(EasyMock.anyObject())).andReturn(null).anyTimes();
        EasyMock.replay(server);

        LoadQueuePeon peon = new LoadQueuePeonTester();
        peonMap.put(Integer.toString(i), peon);
        serverHolderList.add(new ServerHolder(server, peon));
    }

    DruidCoordinatorRuntimeParams params = DruidCoordinatorRuntimeParams.newBuilder()
            .withDruidCluster(new DruidCluster(null,
                    ImmutableMap.of("normal",
                            serverHolderList.stream().collect(Collectors.toCollection(
                                    () -> new TreeSet<>(DruidCoordinatorBalancer.percentUsedComparator))))))
            .withLoadManagementPeons(peonMap).withAvailableSegments(segmentMap.values())
            .withDynamicConfigs(CoordinatorDynamicConfig.builder().withMaxSegmentsToMove(MAX_SEGMENTS_TO_MOVE)
                    .withReplicantLifetime(500).withReplicationThrottleLimit(5).build())
            .withBalancerReferenceTimestamp(DateTimes.of("2013-01-01")).withEmitter(emitter)
            .withDatabaseRuleManager(manager).withReplicationManager(new ReplicationThrottler(2, 500))
            .withSegmentReplicantLookup(
                    SegmentReplicantLookup.make(new DruidCluster(null,
                            ImmutableMap.of("normal", serverHolderList.stream().collect(Collectors.toCollection(
                                    () -> new TreeSet<>(DruidCoordinatorBalancer.percentUsedComparator)))))))
            .build();

    DruidCoordinatorBalancerTester tester = new DruidCoordinatorBalancerTester(coordinator);
    DruidCoordinatorRuleRunner runner = new DruidCoordinatorRuleRunner(coordinator);
    watch.start();
    DruidCoordinatorRuntimeParams balanceParams = tester.run(params);
    DruidCoordinatorRuntimeParams assignParams = runner.run(params);
    System.out.println(watch.stop());
}

From source file:io.ecarf.core.cloud.task.processor.reason.phase1.DoReasonTask5.java

@Override
public void run() throws IOException {

    GoogleCloudService cloud = (GoogleCloudService) this.getCloudService();

    //String table = metadata.getValue(EcarfMetaData.ECARF_TABLE);
    //Set<String> terms = metadata.getTerms();
    //String schemaFile = metadata.getValue(EcarfMetaData.ECARF_SCHEMA);
    //String bucket = metadata.getBucket();
    Stopwatch stopwatch1 = Stopwatch.createUnstarted();
    Stopwatch stopwatch2 = Stopwatch.createUnstarted();
    Set<String> termsSet;

    if (terms == null) {
        // too large, probably saved as a file
        //String termsFile = metadata.getValue(EcarfMetaData.ECARF_TERMS_FILE);
        log.info("Using json file for terms: " + termsFile);
        Validate.notNull(termsFile);/*from   w w  w.j a  va2  s .  co m*/

        String localTermsFile = Utils.TEMP_FOLDER + termsFile;
        cloud.downloadObjectFromCloudStorage(termsFile, localTermsFile, bucket);

        // convert from JSON
        termsSet = io.cloudex.framework.utils.FileUtils.jsonFileToSet(localTermsFile);

    } else {
        termsSet = ObjectUtils.csvToSet(terms);
    }

    String localSchemaFile = Utils.TEMP_FOLDER + schemaFile;
    // download the file from the cloud storage
    cloud.downloadObjectFromCloudStorage(schemaFile, localSchemaFile, bucket);

    // uncompress if compressed
    if (GzipUtils.isCompressedFilename(schemaFile)) {
        localSchemaFile = GzipUtils.getUncompressedFilename(localSchemaFile);
    }

    Map<String, Set<Triple>> allSchemaTriples = TripleUtils.getRelevantSchemaNTriples(localSchemaFile,
            TermUtils.RDFS_TBOX);

    // get all the triples we care about
    Map<Term, Set<Triple>> schemaTerms = new HashMap<>();

    for (String term : termsSet) {
        if (allSchemaTriples.containsKey(term)) {
            schemaTerms.put(new Term(term), allSchemaTriples.get(term));
        }
    }

    String decoratedTable = table;
    int emptyRetries = 0;
    int totalInferredTriples = 0;
    int maxRetries = Config.getIntegerProperty(Constants.REASON_RETRY_KEY, 6);
    String instanceId = cloud.getInstanceId();

    // timestamp loop
    do {

        List<String> productiveTerms = new ArrayList<>();
        int interimInferredTriples = 0;

        // First of all run all the queries asynchronously and remember the jobId and filename for each term
        List<Callable<Void>> queryTasks = new ArrayList<>();
        List<Callable<Void>> saveTasks = new ArrayList<>();

        for (Entry<Term, Set<Triple>> entry : schemaTerms.entrySet()) {

            Term term = entry.getKey();
            Set<Triple> triples = entry.getValue();

            QuerySubTask queryTask = new QuerySubTask(term, triples, decoratedTable, cloud);
            queryTasks.add(queryTask);

            SaveResultsSubTask saveTask = new SaveResultsSubTask(term, cloud);
            saveTasks.add(saveTask);
        }

        // invoke all the queries in parallel
        this.invokeAll(queryTasks);

        long start = System.currentTimeMillis();

        String inferredTriplesFile = Utils.TEMP_FOLDER + instanceId + '_' + start + Constants.DOT_INF;

        // save all the query results in files in parallel
        this.invokeAll(saveTasks);

        try (PrintWriter writer = new PrintWriter(
                new GZIPOutputStream(new FileOutputStream(inferredTriplesFile), Constants.GZIP_BUF_SIZE))) {

            // now loop through the queries
            for (Entry<Term, Set<Triple>> entry : schemaTerms.entrySet()) {

                Term term = entry.getKey();

                BigInteger rows = term.getRows();

                this.totalBytes = this.totalBytes + term.getBytes();

                // only process if triples are found matching this term
                if (!BigInteger.ZERO.equals(rows)) {

                    stopwatch1.start();

                    log.info("Reasoning for Term: " + term);

                    Set<Triple> schemaTriples = entry.getValue();
                    log.info("Schema Triples: " + Joiner.on('\n').join(schemaTriples));

                    List<String> select = GenericRule.getSelect(schemaTriples);

                    int inferredTriplesCount = this.inferAndSaveTriplesToFile(term, select, schemaTriples, rows,
                            decoratedTable, writer);

                    productiveTerms.add(term.getTerm());

                    interimInferredTriples += inferredTriplesCount;

                    this.totalRows = this.totalRows.add(rows);

                    stopwatch1.stop();

                } else {
                    log.info("Skipping term as no data found: " + term);
                }
            }
        }

        totalInferredTriples += interimInferredTriples;

        if (interimInferredTriples > 0) {

            // stream smaller numbers of inferred triples
            // try uploading from cloud storage
            int streamingThreshold = Config.getIntegerProperty("ecarf.io.reasoning.streaming.threshold",
                    100000);

            log.info("Inserting " + interimInferredTriples + ", inferred triples into Big Data table for "
                    + productiveTerms.size() + " productive terms. Filename: " + inferredTriplesFile);

            if (interimInferredTriples <= streamingThreshold) {
                // stream the data

                Set<Triple> inferredTriples = TripleUtils.loadCompressedCSVTriples(inferredTriplesFile, false);
                log.info("Total triples to stream into Big Data: " + inferredTriples.size());
                cloud.streamObjectsIntoBigData(inferredTriples, TableUtils.getBigQueryTripleTable(table));

                log.info("All inferred triples are streamed into Big Data table");

            } else {

                // load the data through cloud storage
                // upload the file to cloud storage
                log.info("Uploading inferred triples file into cloud storage: " + inferredTriplesFile);
                StorageObject file = cloud.uploadFileToCloudStorage(inferredTriplesFile, bucket);
                log.info("File " + file + ", uploaded successfully. Now loading it into big data.");

                String jobId = cloud.loadCloudStorageFilesIntoBigData(Lists.newArrayList(file.getUri()),
                        TableUtils.getBigQueryTripleTable(table), false);
                log.info(
                        "All inferred triples are loaded into Big Data table through cloud storage, completed jobId: "
                                + jobId);

            }

            // reset empty retries
            emptyRetries = 0;

            stopwatch2.reset();

        } else {
            log.info("No new inferred triples");
            // increment empty retries
            emptyRetries++;

            if (!stopwatch2.isRunning()) {
                stopwatch2.start();
            }
        }

        log.info("Total inferred triples so far = " + totalInferredTriples + ", current retry count: "
                + emptyRetries);

        if (emptyRetries < maxRetries) {
            ApiUtils.block(Config.getIntegerProperty(Constants.REASON_SLEEP_KEY, 20));

            // FIXME move into the particular cloud implementation service
            long elapsed = System.currentTimeMillis() - start;
            decoratedTable = "[" + table + "@-" + elapsed + "-]";

            log.info("Using table decorator: " + decoratedTable + ". Empty retries count: " + emptyRetries);
        }

    } while (emptyRetries < maxRetries); // end timestamp loop

    executor.shutdown();
    log.info("Finished reasoning, total inferred triples = " + totalInferredTriples);
    log.info("Number of avoided duplicate terms = " + this.duplicates);
    log.info("Total rows retrieved from big data = " + this.totalRows);
    log.info("Total processed GBytes = " + ((double) this.totalBytes / FileUtils.ONE_GB));
    log.info("Total process reasoning time (serialization in inf file) = " + stopwatch1);
    log.info("Total time spent in empty inference cycles = " + stopwatch2);
}

From source file:io.druid.server.coordinator.DruidCoordinatorBalancerProfiler.java

public void bigProfiler() {
    Stopwatch watch = Stopwatch.createUnstarted();
    int numSegments = 55000;
    int numServers = 50;
    EasyMock.expect(manager.getAllRules()).andReturn(ImmutableMap.<String, List<Rule>>of("test", rules))
            .anyTimes();//from   w ww.  j a v a2 s  . c  o  m
    EasyMock.expect(manager.getRules(EasyMock.<String>anyObject())).andReturn(rules).anyTimes();
    EasyMock.expect(manager.getRulesWithDefault(EasyMock.<String>anyObject())).andReturn(rules).anyTimes();
    EasyMock.replay(manager);

    coordinator.moveSegment(EasyMock.<ImmutableDruidServer>anyObject(),
            EasyMock.<ImmutableDruidServer>anyObject(), EasyMock.<String>anyObject(),
            EasyMock.<LoadPeonCallback>anyObject());
    EasyMock.expectLastCall().anyTimes();
    EasyMock.replay(coordinator);

    List<DruidServer> serverList = Lists.newArrayList();
    Map<String, LoadQueuePeon> peonMap = Maps.newHashMap();
    List<ServerHolder> serverHolderList = Lists.newArrayList();
    Map<String, DataSegment> segmentMap = Maps.newHashMap();
    for (int i = 0; i < numSegments; i++) {
        segmentMap.put("segment" + i, new DataSegment("datasource" + i,
                new Interval(new DateTime("2012-01-01"), (new DateTime("2012-01-01")).plusHours(1)),
                (new DateTime("2012-03-01")).toString(), Maps.<String, Object>newHashMap(),
                Lists.<String>newArrayList(), Lists.<String>newArrayList(), new NoneShardSpec(), 0, 4L));
    }

    for (int i = 0; i < numServers; i++) {
        ImmutableDruidServer server = EasyMock.createMock(ImmutableDruidServer.class);
        EasyMock.expect(server.getMetadata()).andReturn(null).anyTimes();
        EasyMock.expect(server.getCurrSize()).andReturn(30L).atLeastOnce();
        EasyMock.expect(server.getMaxSize()).andReturn(100L).atLeastOnce();
        EasyMock.expect(server.getTier()).andReturn("normal").anyTimes();
        EasyMock.expect(server.getName()).andReturn(Integer.toString(i)).atLeastOnce();
        EasyMock.expect(server.getHost()).andReturn(Integer.toString(i)).anyTimes();
        if (i == 0) {
            EasyMock.expect(server.getSegments()).andReturn(segmentMap).anyTimes();
        } else {
            EasyMock.expect(server.getSegments()).andReturn(new HashMap<String, DataSegment>()).anyTimes();
        }
        EasyMock.expect(server.getSegment(EasyMock.<String>anyObject())).andReturn(null).anyTimes();
        EasyMock.replay(server);

        LoadQueuePeon peon = new LoadQueuePeonTester();
        peonMap.put(Integer.toString(i), peon);
        serverHolderList.add(new ServerHolder(server, peon));
    }

    DruidCoordinatorRuntimeParams params = DruidCoordinatorRuntimeParams.newBuilder()
            .withDruidCluster(
                    new DruidCluster(ImmutableMap.<String, MinMaxPriorityQueue<ServerHolder>>of("normal",
                            MinMaxPriorityQueue.orderedBy(DruidCoordinatorBalancerTester.percentUsedComparator)
                                    .create(serverHolderList))))
            .withLoadManagementPeons(peonMap).withAvailableSegments(segmentMap.values())
            .withDynamicConfigs(
                    new CoordinatorDynamicConfig.Builder().withMaxSegmentsToMove(MAX_SEGMENTS_TO_MOVE)
                            .withReplicantLifetime(500).withReplicationThrottleLimit(5).build())
            .withBalancerReferenceTimestamp(new DateTime("2013-01-01")).withEmitter(emitter)
            .withDatabaseRuleManager(manager).withReplicationManager(new ReplicationThrottler(2, 500))
            .withSegmentReplicantLookup(SegmentReplicantLookup
                    .make(new DruidCluster(ImmutableMap.<String, MinMaxPriorityQueue<ServerHolder>>of("normal",
                            MinMaxPriorityQueue.orderedBy(DruidCoordinatorBalancerTester.percentUsedComparator)
                                    .create(serverHolderList)))))
            .build();

    DruidCoordinatorBalancerTester tester = new DruidCoordinatorBalancerTester(coordinator);
    DruidCoordinatorRuleRunner runner = new DruidCoordinatorRuleRunner(coordinator);
    watch.start();
    DruidCoordinatorRuntimeParams balanceParams = tester.run(params);
    DruidCoordinatorRuntimeParams assignParams = runner.run(params);
    System.out.println(watch.stop());
}