Example usage for com.google.common.base Stopwatch start

List of usage examples for com.google.common.base Stopwatch start

Introduction

In this page you can find the example usage for com.google.common.base Stopwatch start.

Prototype

public Stopwatch start() 

Source Link

Document

Starts the stopwatch.

Usage

From source file:org.apache.drill.exec.store.schedule.AssignmentCreator.java

/**
 * Does the work of creating the mappings for this AssignmentCreator
 * @return the minor fragment id to work units mapping
 *///from  w  ww  .java  2  s.  c o  m
private ListMultimap<Integer, T> getMappings() {
    Stopwatch watch = new Stopwatch();
    watch.start();
    maxWork = (int) Math.ceil(units.size() / ((float) incomingEndpoints.size()));
    LinkedList<WorkEndpointListPair<T>> workList = getWorkList();
    LinkedList<WorkEndpointListPair<T>> unassignedWorkList;
    Map<DrillbitEndpoint, FragIteratorWrapper> endpointIterators = getEndpointIterators();

    unassignedWorkList = assign(workList, endpointIterators, true);

    assignLeftovers(unassignedWorkList, endpointIterators, true);
    assignLeftovers(unassignedWorkList, endpointIterators, false);

    if (unassignedWorkList.size() != 0) {
        throw new DrillRuntimeException("There are still unassigned work units");
    }

    logger.debug("Took {} ms to assign {} work units to {} fragments", watch.elapsed(TimeUnit.MILLISECONDS),
            units.size(), incomingEndpoints.size());
    return mappings;
}

From source file:org.apache.drill.exec.store.schedule.AssignmentCreator.java

/**
 * Groups minor fragments together by corresponding endpoint, and creates an iterator that can be used to evenly
 * distribute work assigned to a given endpoint to all corresponding minor fragments evenly
 *
 * @return/*from  w ww.j av  a  2s  .c o  m*/
 */
private Map<DrillbitEndpoint, FragIteratorWrapper> getEndpointIterators() {
    Stopwatch watch = new Stopwatch();
    watch.start();
    Map<DrillbitEndpoint, FragIteratorWrapper> map = Maps.newLinkedHashMap();
    Map<DrillbitEndpoint, List<Integer>> mmap = Maps.newLinkedHashMap();
    for (int i = 0; i < incomingEndpoints.size(); i++) {
        DrillbitEndpoint endpoint = incomingEndpoints.get(i);
        List<Integer> intList = mmap.get(incomingEndpoints.get(i));
        if (intList == null) {
            intList = Lists.newArrayList();
        }
        intList.add(Integer.valueOf(i));
        mmap.put(endpoint, intList);
    }

    for (DrillbitEndpoint endpoint : mmap.keySet()) {
        FragIteratorWrapper wrapper = new FragIteratorWrapper();
        wrapper.iter = Iterators.cycle(mmap.get(endpoint));
        wrapper.maxCount = maxWork * mmap.get(endpoint).size();
        wrapper.minCount = Math.max(maxWork - 1, 1) * mmap.get(endpoint).size();
        map.put(endpoint, wrapper);
    }
    return map;
}

From source file:com.Grande.GSM.BACCWS_WAR.WS.REST.EOS.SNMPEndpoint.java

@GET
public String fetchModemByMac(@QueryParam("mac") final String strMac) {

    // <editor-fold defaultstate="collapsed" desc="****** Method vars ******">
    final Stopwatch timer = new Stopwatch();
    final QueryResponse qRes = new QueryResponse();
    String strResponse = null;//from w  w w  .j a v a 2 s  .  co m
    Map mapModem = null;
    // start the execution timer
    timer.start();
    // </editor-fold>

    try {

        qRes.vSetNode(java.net.InetAddress.getLocalHost().getHostName());
        mapModem = this.bacEJB.mapFetchModemByMac(strMac);
        qRes.vSetSuccessFlag(true);
        qRes.vAddResult(mapModem);

    } catch (Exception e) {

        // <editor-fold defaultstate="collapsed" desc="****** Handle failures ******">
        qRes.vSetSuccessFlag(false);
        // handle NPE differently since getMessage() is null
        if (e instanceof NullPointerException) {
            qRes.vSetMessage("NPE occured when serializing result to JSON! " + "File: "
                    + e.getStackTrace()[0].getFileName() + ", " + "Method: "
                    + e.getStackTrace()[0].getMethodName() + ", " + "Line: "
                    + e.getStackTrace()[0].getLineNumber());
        } else {
            qRes.vSetMessage(e.getMessage());
        }
        SimpleLogging.vLogException(this.strThreadId, e);
        // </editor-fold>

    } finally {

        // <editor-fold defaultstate="collapsed" desc="****** Stop timer, convert response to JSON ******">
        timer.stop();
        qRes.vSetRoundTrip(String.valueOf(timer.elapsedTime(TimeUnit.SECONDS)) + "."
                + String.valueOf(timer.elapsedTime(TimeUnit.MILLISECONDS)));
        strResponse = this.trnBN.strQueryResponseToJSON(qRes);
        SimpleLogging.vLogEvent(this.strThreadId + "|" + qRes.strGetRoundTripInSeconds() + "s",
                "retrieved " + qRes.intGetDataCount() + " records");
        // </editor-fold>

    }
    return strResponse;

}

From source file:com.Grande.GSM.BACCWS_WAR.WS.REST.EOS.DHCPEndpoint.java

@GET
public String fetchDHCPInfoByMAC(@QueryParam("mac") final String strMac) {

    // <editor-fold defaultstate="collapsed" desc="****** Method vars ******">
    final Stopwatch timer = new Stopwatch();
    final QueryResponse qRes = new QueryResponse();
    String strResponse = null;//from  www  .  ja  v  a 2  s.  c  o  m
    Map mapModem = null;
    // start the execution timer
    timer.start();
    // </editor-fold>

    try {

        qRes.vSetNode(java.net.InetAddress.getLocalHost().getHostName());
        mapModem = this.bacEJB.mapFetchDHCPByMac(strMac);
        qRes.vSetSuccessFlag(true);
        qRes.vAddResult(mapModem);

    } catch (Exception e) {

        // <editor-fold defaultstate="collapsed" desc="****** Handle failures ******">
        qRes.vSetSuccessFlag(false);
        // handle NPE differently since getMessage() is null
        if (e instanceof NullPointerException) {
            qRes.vSetMessage("NPE occured when serializing result to JSON! " + "File: "
                    + e.getStackTrace()[0].getFileName() + ", " + "Method: "
                    + e.getStackTrace()[0].getMethodName() + ", " + "Line: "
                    + e.getStackTrace()[0].getLineNumber());
        } else {
            qRes.vSetMessage(e.getMessage());
        }
        SimpleLogging.vLogException(this.strThreadId, e);
        // </editor-fold>

    } finally {

        // <editor-fold defaultstate="collapsed" desc="****** Stop timer, convert response to JSON ******">
        timer.stop();
        qRes.vSetRoundTrip(String.valueOf(timer.elapsedTime(TimeUnit.SECONDS)) + "."
                + String.valueOf(timer.elapsedTime(TimeUnit.MILLISECONDS)));
        strResponse = this.trnBN.strQueryResponseToJSON(qRes);
        SimpleLogging.vLogEvent(this.strThreadId + "|" + qRes.strGetRoundTripInSeconds() + "s",
                "retrieved " + qRes.intGetDataCount() + " records");
        // </editor-fold>

    }
    return strResponse;

}

From source file:com.Grande.GSM.BACCWS_WAR.WS.REST.EOS.CPEEndpoint.java

@GET
public String fetchDHCPInfoByMAC(@QueryParam("mac") final String strMac) {

    // <editor-fold defaultstate="collapsed" desc="****** Method vars ******">
    final Stopwatch timer = new Stopwatch();
    final QueryResponse qRes = new QueryResponse();
    String strResponse = null;//from w  w  w  . ja  v a  2 s.c o  m
    List lstCPE = null;
    // start the execution timer
    timer.start();
    // </editor-fold>

    try {

        qRes.vSetNode(java.net.InetAddress.getLocalHost().getHostName());
        lstCPE = this.bacEJB.mapFetchCPEByMac(strMac);
        qRes.vSetSuccessFlag(true);
        qRes.vSquashResult(lstCPE);

    } catch (Exception e) {

        // <editor-fold defaultstate="collapsed" desc="****** Handle failures ******">
        qRes.vSetSuccessFlag(false);
        // handle NPE differently since getMessage() is null
        if (e instanceof NullPointerException) {
            qRes.vSetMessage("NPE occured when serializing result to JSON! " + "File: "
                    + e.getStackTrace()[0].getFileName() + ", " + "Method: "
                    + e.getStackTrace()[0].getMethodName() + ", " + "Line: "
                    + e.getStackTrace()[0].getLineNumber());
        } else {
            qRes.vSetMessage(e.getMessage());
        }
        SimpleLogging.vLogException(this.strThreadId, e);
        // </editor-fold>

    } finally {

        // <editor-fold defaultstate="collapsed" desc="****** Stop timer, convert response to JSON ******">
        timer.stop();
        qRes.vSetRoundTrip(String.valueOf(timer.elapsedTime(TimeUnit.SECONDS)) + "."
                + String.valueOf(timer.elapsedTime(TimeUnit.MILLISECONDS)));
        strResponse = this.trnBN.strQueryResponseToJSON(qRes);
        SimpleLogging.vLogEvent(this.strThreadId + "|" + qRes.strGetRoundTripInSeconds() + "s",
                "retrieved " + qRes.intGetDataCount() + " records");
        // </editor-fold>

    }
    return strResponse;

}

From source file:es.usc.citius.composit.core.composition.search.NaiveForwardServiceDiscoverer.java

public ServiceMatchNetwork<E, T> search(Signature<E> signature) {
    Set<E> availableInputs = new HashSet<E>(signature.getInputs());
    Set<E> newOutputs = new HashSet<E>(signature.getInputs());
    Set<Operation<E>> usedServices = new HashSet<Operation<E>>();
    List<Set<Operation<E>>> leveledOps = new LinkedList<Set<Operation<E>>>();

    boolean checkExpectedOutputs = !signature.getOutputs().isEmpty();
    boolean stop;

    Stopwatch timer = Stopwatch.createStarted();
    Stopwatch levelTimer = Stopwatch.createUnstarted();
    int level = 0;
    do {//  ww  w  .  j av a2 s  .  c om
        HashSet<Operation<E>> candidates = new HashSet<Operation<E>>();
        levelTimer.start();
        candidates.addAll(discovery.findOperationsConsumingSome(newOutputs));
        log.info("(Level {}) {} potential candidates selected in {}", level++, candidates.size(),
                levelTimer.toString());
        // Remove services that cannot be invoked with the available inputs
        for (Iterator<Operation<E>> it = candidates.iterator(); it.hasNext();) {
            Operation<E> candidate = it.next();
            Set<E> matched = matcher.partialMatch(availableInputs, candidate.getSignature().getInputs())
                    .getTargetElements();
            // Invokable?
            if (matched.equals(candidate.getSignature().getInputs())) {
                // Invokable operation, check if it was used previously
                boolean isNew = usedServices.add(candidate);
                if (!isNew)
                    it.remove();
            } else {
                it.remove();
            }
        }
        log.info("\t + [{}] operations selected for this level in {}: {}", candidates.size(),
                levelTimer.toString(), candidates);

        // Collect the new outputs of the new candidates
        newOutputs = Operations.outputs(candidates);
        availableInputs.addAll(newOutputs);
        Set<E> matchedOutputs = matcher.partialMatch(availableInputs, signature.getOutputs())
                .getTargetElements();

        // Add the discovered ops
        if (!candidates.isEmpty())
            leveledOps.add(candidates);

        log.debug("\t + Available inputs: {}, new outputs: {}", availableInputs.size(), newOutputs.size());
        // Stop condition. Stop if there are no more candidates and/or expected outputs are satisfied.
        stop = (checkExpectedOutputs) ? candidates.isEmpty() || matchedOutputs.equals(signature.getOutputs())
                : candidates.isEmpty();
        levelTimer.reset();
    } while (!stop);

    // Add the source and sink operations
    Source<E> sourceOp = new Source<E>(signature.getInputs());
    Sink<E> sinkOp = new Sink<E>(signature.getOutputs());
    leveledOps.add(0, Collections.<Operation<E>>singleton(sourceOp));
    leveledOps.add(leveledOps.size(), Collections.<Operation<E>>singleton(sinkOp));
    Stopwatch networkWatch = Stopwatch.createStarted();
    // Create a service match network with the discovered services
    DirectedAcyclicSMN<E, T> matchNetwork = new DirectedAcyclicSMN<E, T>(new HashLeveledServices<E>(leveledOps),
            this.matcher);
    log.info(" > Service match network computed in {}", networkWatch.stop().toString());
    log.info("Service Match Network created with {} levels (including source and sink) and {} operations.",
            leveledOps.size(), matchNetwork.listOperations().size());
    log.info("Forward Discovery done in {}", timer.toString());
    return matchNetwork;
}

From source file:org.apache.drill.exec.store.schedule.AssignmentCreator.java

/**
 * Builds the list of WorkEndpointListPairs, which pair a work unit with a list of endpoints sorted by affinity
 * @return the list of WorkEndpointListPairs
 *///from  w  w  w  .  j a  va2s  . c o  m
private LinkedList<WorkEndpointListPair<T>> getWorkList() {
    Stopwatch watch = new Stopwatch();
    watch.start();
    LinkedList<WorkEndpointListPair<T>> workList = Lists.newLinkedList();
    for (T work : units) {
        List<Map.Entry<DrillbitEndpoint, Long>> entries = Lists.newArrayList();
        for (ObjectLongCursor<DrillbitEndpoint> cursor : work.getByteMap()) {
            final DrillbitEndpoint ep = cursor.key;
            final Long val = cursor.value;
            Map.Entry<DrillbitEndpoint, Long> entry = new Entry() {

                @Override
                public Object getKey() {
                    return ep;
                }

                @Override
                public Object getValue() {
                    return val;
                }

                @Override
                public Object setValue(Object value) {
                    throw new UnsupportedOperationException();
                }
            };
            entries.add(entry);
        }
        Collections.sort(entries, comparator);
        List<DrillbitEndpoint> sortedEndpoints = Lists.newArrayList();
        for (Entry<DrillbitEndpoint, Long> entry : entries) {
            sortedEndpoints.add(entry.getKey());
        }
        workList.add(new WorkEndpointListPair<T>(work, sortedEndpoints));
    }
    return workList;
}

From source file:co.cask.cdap.data2.util.hbase.DefaultHBaseDDLExecutor.java

@Override
public void createTableIfNotExists(TableDescriptor descriptor, @Nullable byte[][] splitKeys)
        throws IOException {
    HTableDescriptor htd = getHTableDescriptor(descriptor);
    if (admin.tableExists(htd.getName())) {
        return;/*w w  w  .j a  va 2 s  .co m*/
    }

    boolean tableExistsFailure = false;
    try {
        LOG.debug("Attempting to create table '{}' if it does not exist", Bytes.toString(htd.getName()));
        admin.createTable(htd, splitKeys);
    } catch (TableExistsException e) {
        // table may exist because someone else is creating it at the same
        // time. But it may not be available yet, and opening it might fail.
        LOG.debug("Table '{}' already exists.", Bytes.toString(htd.getName()), e);
        tableExistsFailure = true;
    }

    // Wait for table to materialize
    try {
        Stopwatch stopwatch = new Stopwatch();
        stopwatch.start();
        long sleepTime = TimeUnit.MILLISECONDS.toNanos(5000L) / 10;
        sleepTime = sleepTime <= 0 ? 1 : sleepTime;
        do {
            if (admin.tableExists(htd.getName())) {
                if (tableExistsFailure) {
                    LOG.info("Table '{}' exists now. Assuming that another process concurrently created it.",
                            Bytes.toString(htd.getName()));
                } else {
                    LOG.info("Table '{}' created.", Bytes.toString(htd.getName()));
                }
                return;
            } else {
                TimeUnit.NANOSECONDS.sleep(sleepTime);
            }
        } while (stopwatch.elapsedTime(TimeUnit.MILLISECONDS) < 5000L);
    } catch (InterruptedException e) {
        LOG.warn("Sleeping thread interrupted.");
    }
    LOG.error("Table '{}' does not exist after waiting {} ms. Giving up.", Bytes.toString(htd.getName()),
            MAX_CREATE_TABLE_WAIT);
}

From source file:org.apache.accumulo.master.replication.RemoveCompleteReplicationRecords.java

@Override
public void run() {
    BatchScanner bs;//from   w w w .  j  a  va  2  s .  c  o  m
    BatchWriter bw;
    try {
        bs = ReplicationTable.getBatchScanner(conn, 4);
        bw = ReplicationTable.getBatchWriter(conn);

        if (bs == null || bw == null)
            throw new AssertionError(
                    "Inconceivable; an exception should have been thrown, but 'bs' or 'bw' was null instead");
    } catch (ReplicationTableOfflineException e) {
        log.debug("Not attempting to remove complete replication records as the table ({}) isn't yet online",
                ReplicationTable.NAME);
        return;
    }

    bs.setRanges(Collections.singleton(new Range()));
    IteratorSetting cfg = new IteratorSetting(50, WholeRowIterator.class);
    StatusSection.limit(bs);
    WorkSection.limit(bs);
    bs.addScanIterator(cfg);

    Stopwatch sw = new Stopwatch();
    long recordsRemoved = 0;
    try {
        sw.start();
        recordsRemoved = removeCompleteRecords(conn, bs, bw);
    } finally {
        if (null != bs) {
            bs.close();
        }
        if (null != bw) {
            try {
                bw.close();
            } catch (MutationsRejectedException e) {
                log.error("Error writing mutations to {}, will retry", ReplicationTable.NAME, e);
            }
        }

        sw.stop();
    }

    log.info("Removed {} complete replication entries from the table {}", recordsRemoved,
            ReplicationTable.NAME);
}

From source file:de.lemo.dms.processing.questions.QCourseUserPaths.java

@SuppressWarnings("unchecked")
@POST//  ww w. ja v a 2 s. com
public JSONObject compute(@FormParam(MetaParam.COURSE_IDS) final List<Long> courses,
        @FormParam(MetaParam.START_TIME) final Long startTime, @FormParam(MetaParam.END_TIME) Long endTime,
        @FormParam(MetaParam.GENDER) List<Long> gender) throws JSONException {

    validateTimestamps(startTime, endTime);

    final Stopwatch stopWatch = new Stopwatch();
    stopWatch.start();

    final IDBHandler dbHandler = ServerConfiguration.getInstance().getMiningDbHandler();
    final Session session = dbHandler.getMiningSession();

    Criteria criteria;
    List<Long> users = new ArrayList<Long>(StudentHelper.getCourseStudentsAliasKeys(courses, gender).values());

    criteria = session.createCriteria(ILogMining.class, "log").add(Restrictions.in("log.course.id", courses))
            .add(Restrictions.between("log.timestamp", startTime, endTime))
            .add(Restrictions.eq("log.action", "view"));
    if (!users.isEmpty()) {
        criteria.add(Restrictions.in("log.user.id", users));
    } else {
        this.logger.debug("No users found for courses. Returning empty JSONObject.");
        return new JSONObject();
    }

    final List<ILogMining> logs = criteria.list();

    this.logger.debug("Found " + users.size() + " actions. " + +stopWatch.elapsedTime(TimeUnit.SECONDS));

    long courseCount = 0;
    final BiMap<CourseMining, Long> courseNodePositions = HashBiMap.create();
    final Map<Long/* user id */, List<Long/* course id */>> userPaths = Maps.newHashMap();

    this.logger.debug("Paths fetched: " + logs.size() + ". " + stopWatch.elapsedTime(TimeUnit.SECONDS));

    Map<Long, Long> idToAlias = StudentHelper.getCourseStudentsRealKeys(courses, gender);

    for (final ILogMining log : logs) {

        final CourseMining course = log.getCourse();
        Long nodeID = courseNodePositions.get(course);
        if (nodeID == null) {
            nodeID = courseCount++;
            courseNodePositions.put(course, nodeID);
        }

        final long userId = idToAlias.get(log.getUser().getId());

        List<Long> nodeIDs = userPaths.get(userId);
        if (nodeIDs == null) {
            nodeIDs = Lists.newArrayList();
            userPaths.put(userId, nodeIDs);
        }
        nodeIDs.add(nodeID);
    }

    this.logger.debug("userPaths: " + userPaths.size());

    final Map<Long /* node id */, List<UserPathLink>> coursePaths = Maps.newHashMap();

    for (final Entry<Long, List<Long>> userEntry : userPaths.entrySet()) {

        UserPathLink lastLink = null;

        for (final Long nodeID : userEntry.getValue()) {
            List<UserPathLink> links = coursePaths.get(nodeID);
            if (links == null) {
                links = Lists.newArrayList();
                coursePaths.put(nodeID, links);
            }
            final UserPathLink link = new UserPathLink(String.valueOf(nodeID), "0");
            links.add(link);

            if (lastLink != null) {
                lastLink.setTarget(String.valueOf(nodeID));
            }
            lastLink = link;
        }
    }
    stopWatch.stop();
    this.logger.debug("coursePaths: " + coursePaths.size());
    this.logger.debug("Total Fetched log entries: " + (logs.size() + logs.size()) + " log entries."
            + stopWatch.elapsedTime(TimeUnit.SECONDS));

    final Set<UserPathLink> links = Sets.newHashSet();

    final JSONObject result = new JSONObject();
    final JSONArray nodes = new JSONArray();
    final JSONArray edges = new JSONArray();

    for (final Entry<Long, List<UserPathLink>> courseEntry : coursePaths.entrySet()) {
        final JSONObject node = new JSONObject();
        node.put("name", courseNodePositions.inverse().get(courseEntry.getKey()).getTitle());
        node.put("value", courseEntry.getValue().size());
        node.put("group", courses.contains(courseNodePositions.inverse().get(courseEntry.getKey())) ? 1 : 2);
        nodes.put(node);

        for (final UserPathLink edge : courseEntry.getValue()) {
            if (edge.getTarget() == edge.getSource()) {
                continue;
            }
            links.add(edge);
        }
    }

    for (final UserPathLink link : links) {
        final JSONObject edgeJSON = new JSONObject();
        edgeJSON.put("target", link.getTarget());
        edgeJSON.put("source", link.getSource());
        edges.put(edgeJSON);
    }

    this.logger.debug("Nodes: " + nodes.length() + ", Links: " + edges.length() + "   / time: "
            + stopWatch.elapsedTime(TimeUnit.SECONDS));

    result.put("nodes", nodes);
    result.put("links", edges);
    session.close();
    return result;
}