Example usage for org.apache.commons.lang3.mutable MutableLong MutableLong

List of usage examples for org.apache.commons.lang3.mutable MutableLong MutableLong

Introduction

In this page you can find the example usage for org.apache.commons.lang3.mutable MutableLong MutableLong.

Prototype

public MutableLong(final String value) throws NumberFormatException 

Source Link

Document

Constructs a new MutableLong parsing the given string.

Usage

From source file:com.cg.mapreduce.fpgrowth.mahout.fpm.fpgrowth2.FPGrowthIds.java

/**
 * Top K FpGrowth Algorithm//from w ww  . j ava2s.  c o m
 *
 * @param tree
 *          to be mined
 * @param minSupportValue
 *          minimum support of the pattern to keep
 * @param k
 *          Number of top frequent patterns to keep
 * @param requiredFeatures
 *          Set of integer id's of features to mine
 * @param outputCollector
 *          the Collector class which converts the given frequent pattern in
 *          integer to A
 * @return Top K Frequent Patterns for each feature and their support
 */
private static Map<Integer, FrequentPatternMaxHeap> fpGrowth(FPTree tree, long minSupportValue, int k,
        IntArrayList requiredFeatures, TopKPatternsOutputConverter<Integer> outputCollector,
        StatusUpdater updater) throws IOException {

    Map<Integer, FrequentPatternMaxHeap> patterns = Maps.newHashMap();
    requiredFeatures.sort();
    for (int attribute : tree.attrIterableRev()) {
        if (requiredFeatures.binarySearch(attribute) >= 0) {
            log.info("Mining FTree Tree for all patterns with {}", attribute);
            MutableLong minSupport = new MutableLong(minSupportValue);
            FrequentPatternMaxHeap frequentPatterns = growth(tree, minSupport, k, attribute, updater);
            patterns.put(attribute, frequentPatterns);
            outputCollector.collect(attribute, frequentPatterns);

            minSupportValue = Math.max(minSupportValue, minSupport.longValue() / 2);
            //System.out.println( patterns.get(attribute).count()+": "+patterns.get(attribute).leastSupport());
            log.info("Found {} Patterns with Least Support {}", patterns.get(attribute).count(),
                    patterns.get(attribute).leastSupport());
        }
    }
    return patterns;
}

From source file:com.datatorrent.lib.appdata.query.WEQueryQueueManagerTest.java

@Test
public void testResetRead() {
    final int numQueries = 3;

    WindowEndQueueManager<Query, Void> wqqm = new WindowEndQueueManager<>();

    wqqm.setup(null);//from www  . j  av  a2  s  . com
    wqqm.beginWindow(0);

    for (int qc = 0; qc < numQueries; qc++) {
        Query query = new MockQuery(Integer.toString(qc));
        wqqm.enqueue(query, null, new MutableLong(3L));
    }

    Query query = wqqm.dequeue().getQuery();
    Query query1 = wqqm.dequeue().getQuery();

    Assert.assertEquals("Query ids must equal.", "0", query.getId());
    Assert.assertEquals("Query ids must equal.", "1", query1.getId());

    wqqm.endWindow();
    wqqm.beginWindow(1);

    {
        int qc = 0;

        for (QueryBundle<Query, Void, MutableLong> tquery; (tquery = wqqm.dequeue()) != null; qc++) {
            Assert.assertEquals("Query ids must equal.", Integer.toString(qc), tquery.getQuery().getId());
        }

        Assert.assertEquals("The number of queries must match.", numQueries, qc);
    }

    wqqm.endWindow();
    wqqm.teardown();
}

From source file:com.romeikat.datamessie.core.sync.service.template.withIdAndVersion.EntityWithIdAndVersionSynchronizer.java

private void delete(final TaskExecution taskExecution) throws TaskCancelledException {
    final String msg = String.format("Deleting RHS for %s", clazz.getSimpleName());
    taskExecution.reportWork(msg);//ww w  .  j av a  2s  . co m

    // Process in batches
    final MutableLong firstId = new MutableLong(0);
    while (true) {
        try {
            final boolean moreBatches = deleteBatch(taskExecution, firstId);
            if (!moreBatches) {
                return;
            }
        } catch (final PersistenceException e) {
            retry(taskExecution);
        }
    }
}

From source file:com.thinkbiganalytics.nifi.v2.ingest.StripHeader.java

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
    final StripHeaderSupport headerSupport = new StripHeaderSupport();
    final FlowFile flowFile = session.get();
    if (flowFile == null) {
        return;/*from  w ww. j av  a2  s .  c o m*/
    }

    final boolean isEnabled = context.getProperty(ENABLED).evaluateAttributeExpressions(flowFile).asBoolean();
    final int headerCount = context.getProperty(HEADER_LINE_COUNT).evaluateAttributeExpressions(flowFile)
            .asInteger();

    // Empty files and no work to do will simply pass along content
    if (!isEnabled || headerCount == 0 || flowFile.getSize() == 0L) {
        final FlowFile contentFlowFile = session.clone(flowFile);
        session.transfer(contentFlowFile, REL_CONTENT);
        session.transfer(flowFile, REL_ORIGINAL);
        return;
    }

    final MutableLong headerBoundaryInBytes = new MutableLong(-1);

    session.read(flowFile, false, rawIn -> {
        try {
            // Identify the byte boundary of the header
            long bytes = headerSupport.findHeaderBoundary(headerCount, rawIn);
            headerBoundaryInBytes.setValue(bytes);

            if (bytes < 0) {
                getLog().error("Unable to strip header {} expecting at least {} lines in file",
                        new Object[] { flowFile, headerCount });
            }

        } catch (IOException e) {
            getLog().error("Unable to strip header {} due to {}; routing to failure",
                    new Object[] { flowFile, e.getLocalizedMessage() }, e);
        }

    });

    long headerBytes = headerBoundaryInBytes.getValue();
    if (headerBytes < 0) {
        session.transfer(flowFile, REL_FAILURE);
    } else {
        // Transfer header
        final FlowFile headerFlowFile = session.clone(flowFile, 0, headerBytes);
        session.transfer(headerFlowFile, REL_HEADER);

        // Transfer content
        long contentBytes = flowFile.getSize() - headerBytes;
        final FlowFile contentFlowFile = session.clone(flowFile, headerBytes, contentBytes);
        session.transfer(contentFlowFile, REL_CONTENT);

        session.transfer(flowFile, REL_ORIGINAL);
    }
}

From source file:com.mgmtp.perfload.perfalyzer.binning.MeasuringResponseTimesBinningStrategy.java

@Override
public void binData(final Scanner scanner, final WritableByteChannel destChannel) throws IOException {
    while (scanner.hasNextLine()) {
        tokenizer.reset(scanner.nextLine());
        String[] tokens = tokenizer.getTokenArray();

        long timestampMillis = Long.parseLong(tokens[0]);
        Long responseTime = Long.valueOf(tokens[2]);
        String type = tokens[MEASURING_NORMALIZED_COL_REQUEST_TYPE];
        String uriAlias = tokens[MEASURING_NORMALIZED_COL_URI_ALIAS];
        String result = tokens[MEASURING_NORMALIZED_COL_RESULT];
        String executionId = tokens[MEASURING_NORMALIZED_COL_EXECUTION_ID];

        String key = type + "||" + uriAlias;
        UriMeasurings measurings = measuringsMap.get(key);
        if (measurings == null) {
            measurings = new UriMeasurings();
            measurings.type = type;/*  w  ww . ja v a2 s  .  co m*/
            measurings.uriAlias = uriAlias;
            measuringsMap.put(key, measurings);
        }

        if (responseTime > 0) {
            // response time distribution is calculated by grouping by response time
            // only positive values allowed on logarithmic axis
            // response time might by -1 in case of an error
            MutableInt mutableInt = measurings.responseDistributions.get(responseTime);
            if (mutableInt == null) {
                mutableInt = new MutableInt();
                measurings.responseDistributions.put(responseTime, mutableInt);
            }
            mutableInt.increment();
        }

        // collect all response times for a URI, so quantiles can be calculated later
        measurings.responseTimes.add(responseTime.doubleValue());

        if ("ERROR".equals(result)) {
            measurings.errorCount.increment();

            errorExecutions.add(executionId);
        }

        if (!isNullOrEmpty(executionId)) {
            ExecutionMeasurings execMeasurings = perExecutionResponseTimes.get(executionId);
            if (execMeasurings == null) {
                execMeasurings = new ExecutionMeasurings();
                execMeasurings.sumResponseTimes = new MutableLong(responseTime);
                perExecutionResponseTimes.put(executionId, execMeasurings);
            } else {
                perExecutionResponseTimes.get(executionId).sumResponseTimes.add(responseTime);
            }
            // always update timestamp so we eventually have the last timestamp of the execution
            execMeasurings.timestampMillis = timestampMillis;
        }
    }
}

From source file:com.datatorrent.contrib.dimensions.DimensionsQueryExecutorTest.java

private void simpleQueryCountHelper(int rollingCount) {
    final String publisher = "google";
    final String advertiser = "safeway";

    final long impressions = 10L;
    final double cost = 1.0;

    String eventSchemaString = SchemaUtils.jarResourceFileToString("dimensionsTestSchema.json");

    String basePath = testMeta.getDir();
    TFileImpl hdsFile = new TFileImpl.DefaultTFileImpl();
    hdsFile.setBasePath(basePath);//from   w w  w  . j a v a2s. c  om

    AppDataSingleSchemaDimensionStoreHDHT store = new AppDataSingleSchemaDimensionStoreHDHT();

    store.setCacheWindowDuration(2);
    store.setConfigurationSchemaJSON(eventSchemaString);
    store.setFileStore(hdsFile);
    store.setFlushIntervalCount(1);
    store.setFlushSize(0);

    store.setup(new OperatorContextTestHelper.TestIdOperatorContext(1, new DefaultAttributeMap()));

    DimensionalConfigurationSchema eventSchema = store.configurationSchema;
    DimensionsQueryExecutor dqe = new DimensionsQueryExecutor(store, store.schemaRegistry);

    store.beginWindow(0L);

    long currentTime = 0L;

    List<Map<String, HDSQuery>> hdsQueries = Lists.newArrayList();
    List<Map<String, EventKey>> eventKeys = Lists.newArrayList();

    for (int rollingCounter = 0;; currentTime += TimeUnit.MINUTES.toMillis(1L)) {
        Aggregate aggregate = AppDataSingleSchemaDimensionStoreHDHTTest.createEvent(eventSchema, publisher,
                advertiser, currentTime, TimeBucket.MINUTE, impressions, cost);

        store.input.put(aggregate);

        issueHDSQuery(store, aggregate.getEventKey());

        Map<String, HDSQuery> aggregatorToQuery = Maps.newHashMap();
        aggregatorToQuery.put("SUM", store.getQueries().values().iterator().next());
        hdsQueries.add(aggregatorToQuery);

        Map<String, EventKey> aggregatorToEventKey = Maps.newHashMap();
        aggregatorToEventKey.put("SUM", aggregate.getEventKey());
        eventKeys.add(aggregatorToEventKey);

        rollingCounter++;

        if (rollingCounter == rollingCount) {
            break;
        }
    }

    QueryMeta queryMeta = new QueryMeta();
    queryMeta.setHdsQueries(hdsQueries);
    queryMeta.setEventKeys(eventKeys);

    GPOMutable keys = AppDataSingleSchemaDimensionStoreHDHTTest.createQueryKey(eventSchema, publisher,
            advertiser);
    Map<String, Set<String>> fieldToAggregators = Maps.newHashMap();
    fieldToAggregators.put("impressions", Sets.newHashSet("SUM"));
    fieldToAggregators.put("cost", Sets.newHashSet("SUM"));

    FieldsAggregatable fieldsAggregatable = new FieldsAggregatable(fieldToAggregators);

    DataQueryDimensional query = new DataQueryDimensional("1", DataQueryDimensional.TYPE, currentTime,
            currentTime, TimeBucket.MINUTE, keys, fieldsAggregatable, true);
    query.setSlidingAggregateSize(rollingCount);

    DataResultDimensional drd = (DataResultDimensional) dqe.executeQuery(query, queryMeta, new MutableLong(1L));

    store.endWindow();

    Assert.assertEquals(1, drd.getValues().size());
    Assert.assertEquals(impressions * rollingCount,
            drd.getValues().get(0).get("SUM").getFieldLong("impressions"));

    store.teardown();
}

From source file:com.datatorrent.lib.appdata.query.WEQueryQueueManagerTest.java

@Test
public void testExpirationReadAll() {
    final int numQueries = 3;

    WindowEndQueueManager<Query, Void> wqqm = new WindowEndQueueManager<>();

    wqqm.setup(null);/*from www  .j  a va 2s. com*/
    wqqm.beginWindow(0);

    for (int qc = 0; qc < numQueries; qc++) {
        Query query = new MockQuery(Integer.toString(qc));
        wqqm.enqueue(query, null, new MutableLong(2L));
    }

    wqqm.endWindow();
    wqqm.beginWindow(1);

    {
        int qc = 0;

        for (QueryBundle<Query, Void, MutableLong> qb; (qb = wqqm.dequeue()) != null; qc++) {
            Query query = qb.getQuery();
            Assert.assertEquals("Query ids must equal.", Integer.toString(qc), query.getId());
        }

        Assert.assertEquals("The number of queries must match.", numQueries, qc);
    }

    wqqm.endWindow();
    wqqm.beginWindow(2);

    Assert.assertEquals("There should be no queries now", null, wqqm.dequeue());

    wqqm.endWindow();
    wqqm.teardown();
}

From source file:com.cg.mapreduce.fpgrowth.mahout.fpm.TransactionTree.java

public Map<Integer, MutableLong> generateFList() {
    Map<Integer, MutableLong> frequencyList = Maps.newHashMap();
    Iterator<Pair<IntArrayList, Long>> it = iterator();
    while (it.hasNext()) {
        Pair<IntArrayList, Long> p = it.next();
        IntArrayList items = p.getFirst();
        for (int idx = 0; idx < items.size(); idx++) {
            if (!frequencyList.containsKey(items.get(idx))) {
                frequencyList.put(items.get(idx), new MutableLong(0));
            }/*from  w  w w . j  ava2 s  .co  m*/
            frequencyList.get(items.get(idx)).add(p.getSecond());
        }
    }
    return frequencyList;
}

From source file:com.datatorrent.lib.appdata.query.WEQueryQueueManagerTest.java

@Test
public void testMixedExpiration() {
    final int numQueries = 3;
    WindowEndQueueManager<Query, Void> wqqm = new WindowEndQueueManager<>();

    wqqm.setup(null);//w ww . ja v a  2  s  .c o  m
    wqqm.beginWindow(0);

    {
        for (int qc = 0; qc < numQueries; qc++) {
            Query query = new MockQuery(Integer.toString(qc));
            wqqm.enqueue(query, null, new MutableLong(2L));
        }

        for (int qc = 0; qc < numQueries; qc++) {
            Query query = new MockQuery(Integer.toString(qc + numQueries));
            wqqm.enqueue(query, null, new MutableLong(3L));
        }
    }

    wqqm.endWindow();
    wqqm.beginWindow(1);

    {
        int qc = 0;

        for (QueryBundle<Query, Void, MutableLong> qb; (qb = wqqm.dequeue()) != null; qc++) {
            Query query = qb.getQuery();
            Assert.assertEquals("Query ids must equal.", Integer.toString(qc), query.getId());
        }

        Assert.assertEquals("The number of queries must match.", 2 * numQueries, qc);
    }

    wqqm.endWindow();
    wqqm.beginWindow(2);

    {
        int qc = 0;

        for (QueryBundle<Query, Void, MutableLong> qb; (qb = wqqm.dequeue()) != null; qc++) {
            Query query = qb.getQuery();
            Assert.assertEquals("Query ids must equal.", Integer.toString(qc + numQueries), query.getId());
        }

        Assert.assertEquals("The number of queries must match.", numQueries, qc);
    }

    wqqm.endWindow();
    wqqm.beginWindow(3);

    Assert.assertEquals("There should be no queries now", null, wqqm.dequeue());

    wqqm.endWindow();
    wqqm.teardown();
}

From source file:com.romeikat.datamessie.core.sync.service.template.withIdAndVersion.EntityWithIdAndVersionSynchronizer.java

private void createOrUpdate(final TaskExecution taskExecution) throws TaskCancelledException {
    final String msg = String.format("Creating/updating LHS > RHS for %s", clazz.getSimpleName());
    taskExecution.reportWork(msg);//w w  w. ja va2  s. com

    // Process in batches
    final MutableLong firstId = new MutableLong(0);
    while (true) {
        try {
            final boolean moreBatches = createOrUpdateBatch(taskExecution, firstId);
            if (!moreBatches) {
                return;
            }
        } catch (final PersistenceException e) {
            retry(taskExecution);
        }
    }
}