Example usage for com.google.common.collect Iterators concat

List of usage examples for com.google.common.collect Iterators concat

Introduction

In this page you can find the example usage for com.google.common.collect Iterators concat.

Prototype

public static <T> Iterator<T> concat(final Iterator<? extends Iterator<? extends T>> inputs) 

Source Link

Document

Combines multiple iterators into a single iterator.

Usage

From source file:co.cask.cdap.data2.dataset2.lib.partitioned.PartitionedFileSetDataset.java

@Override
public PartitionConsumerResult consumePartitions(PartitionConsumerState partitionConsumerState) {
    List<Long> previousInProgress = partitionConsumerState.getVersionsToCheck();
    Set<Long> noLongerInProgress = setDiff(previousInProgress, tx.getInProgress());

    List<Iterator<Partition>> partitionIterators = Lists.newArrayList();
    for (Long txId : noLongerInProgress) {
        Scanner scanner = partitionsTable.readByIndex(WRITE_PTR_COL, Bytes.toBytes(txId));
        partitionIterators.add(new PartitionIterator(scanner));
    }//from w  w w.  j av  a 2s .  c  o  m

    // exclusive scan end
    long scanUpTo = Math.min(tx.getWritePointer(), tx.getReadPointer() + 1);
    // no read your own writes (partitions)
    Scanner scanner = partitionsTable.scanByIndex(WRITE_PTR_COL,
            Bytes.toBytes(partitionConsumerState.getStartVersion()), Bytes.toBytes(scanUpTo));
    partitionIterators.add(new PartitionIterator(scanner));

    List<Long> inProgressBeforeScanEnd = Lists.newArrayList();
    for (long txId : tx.getInProgress()) {
        if (txId >= scanUpTo) {
            break;
        }
        inProgressBeforeScanEnd.add(txId);
    }
    return new PartitionConsumerResult(new PartitionConsumerState(scanUpTo, inProgressBeforeScanEnd),
            Iterators.concat(partitionIterators.iterator()));
}

From source file:org.pshdl.model.impl.AbstractHDLArrayInit.java

@Override
public Iterator<IHDLObject> deepIterator() {
    return new Iterator<IHDLObject>() {

        private int pos = 0;
        private Iterator<? extends IHDLObject> current;

        @Override/*from   w  ww.  j  a  v a  2s  .  co  m*/
        public boolean hasNext() {
            if ((current != null) && !current.hasNext()) {
                current = null;
            }
            while (current == null) {
                switch (pos++) {
                case 0:
                    if ((exp != null) && (exp.size() != 0)) {
                        final List<Iterator<? extends IHDLObject>> iters = Lists
                                .newArrayListWithCapacity(exp.size());
                        for (final HDLExpression o : exp) {
                            iters.add(Iterators.forArray(o));
                            iters.add(o.deepIterator());
                        }
                        current = Iterators.concat(iters.iterator());
                    }
                    break;
                default:
                    return false;
                }
            }
            return (current != null) && current.hasNext();
        }

        @Override
        public IHDLObject next() {
            return current.next();
        }

        @Override
        public void remove() {
            throw new IllegalArgumentException("Not supported");
        }

    };
}

From source file:org.pshdl.model.impl.AbstractHDLConcat.java

@Override
public Iterator<IHDLObject> deepIterator() {
    return new Iterator<IHDLObject>() {

        private int pos = 0;
        private Iterator<? extends IHDLObject> current;

        @Override/*from ww w .ja v a2s.com*/
        public boolean hasNext() {
            if ((current != null) && !current.hasNext()) {
                current = null;
            }
            while (current == null) {
                switch (pos++) {
                case 0:
                    if ((cats != null) && (cats.size() != 0)) {
                        final List<Iterator<? extends IHDLObject>> iters = Lists
                                .newArrayListWithCapacity(cats.size());
                        for (final HDLExpression o : cats) {
                            iters.add(Iterators.forArray(o));
                            iters.add(o.deepIterator());
                        }
                        current = Iterators.concat(iters.iterator());
                    }
                    break;
                default:
                    return false;
                }
            }
            return (current != null) && current.hasNext();
        }

        @Override
        public IHDLObject next() {
            return current.next();
        }

        @Override
        public void remove() {
            throw new IllegalArgumentException("Not supported");
        }

    };
}

From source file:org.pshdl.model.impl.AbstractHDLInstantiation.java

@Override
public Iterator<IHDLObject> deepIterator() {
    return new Iterator<IHDLObject>() {

        private int pos = 0;
        private Iterator<? extends IHDLObject> current;

        @Override/*from   ww  w  . java 2 s . c  o  m*/
        public boolean hasNext() {
            if ((current != null) && !current.hasNext()) {
                current = null;
            }
            while (current == null) {
                switch (pos++) {
                case 0:
                    if ((annotations != null) && (annotations.size() != 0)) {
                        final List<Iterator<? extends IHDLObject>> iters = Lists
                                .newArrayListWithCapacity(annotations.size());
                        for (final HDLAnnotation o : annotations) {
                            iters.add(Iterators.forArray(o));
                            iters.add(o.deepIterator());
                        }
                        current = Iterators.concat(iters.iterator());
                    }
                    break;
                case 1:
                    if (var != null) {
                        current = Iterators.concat(Iterators.forArray(var), var.deepIterator());
                    }
                    break;
                case 2:
                    if ((arguments != null) && (arguments.size() != 0)) {
                        final List<Iterator<? extends IHDLObject>> iters = Lists
                                .newArrayListWithCapacity(arguments.size());
                        for (final HDLArgument o : arguments) {
                            iters.add(Iterators.forArray(o));
                            iters.add(o.deepIterator());
                        }
                        current = Iterators.concat(iters.iterator());
                    }
                    break;
                default:
                    return false;
                }
            }
            return (current != null) && current.hasNext();
        }

        @Override
        public IHDLObject next() {
            return current.next();
        }

        @Override
        public void remove() {
            throw new IllegalArgumentException("Not supported");
        }

    };
}

From source file:org.apache.rya.accumulo.query.AccumuloRyaQueryEngine.java

@Override
public CloseableIteration<? extends Map.Entry<RyaStatement, BindingSet>, RyaDAOException> queryWithBindingSet(
        Collection<Map.Entry<RyaStatement, BindingSet>> stmts, AccumuloRdfConfiguration conf)
        throws RyaDAOException {
    if (conf == null) {
        conf = configuration;/*from   w w  w.j  a v  a2 s  .c  o  m*/
    }
    // query configuration
    Authorizations authorizations = conf.getAuthorizations();
    Long ttl = conf.getTtl();
    Long maxResults = conf.getLimit();
    Integer maxRanges = conf.getMaxRangesForScanner();
    Integer numThreads = conf.getNumThreads();

    // TODO: cannot span multiple tables here
    try {
        Collection<Range> ranges = new HashSet<Range>();
        RangeBindingSetEntries rangeMap = new RangeBindingSetEntries();
        TABLE_LAYOUT layout = null;
        RyaURI context = null;
        TriplePatternStrategy strategy = null;
        RyaURI columnFamily = null;
        boolean columnFamilySet = false;
        for (Map.Entry<RyaStatement, BindingSet> stmtbs : stmts) {
            RyaStatement stmt = stmtbs.getKey();
            context = stmt.getContext();
            // if all RyaStatements for this query have the same context,
            // then set the columnFamily to be that value so that Scanner can fetch
            // only that ColumnFamily. Otherwise set columnFamily to null so that
            // Scanner will fetch all ColumnFamilies.
            if (!columnFamilySet) {
                columnFamily = context;
                columnFamilySet = true;
            } else if (columnFamily != null && !columnFamily.equals(context)) {
                columnFamily = null;
            }
            BindingSet bs = stmtbs.getValue();
            strategy = ryaContext.retrieveStrategy(stmt);
            if (strategy == null) {
                throw new IllegalArgumentException("TriplePattern[" + stmt + "] not supported");
            }

            Map.Entry<RdfCloudTripleStoreConstants.TABLE_LAYOUT, ByteRange> entry = strategy.defineRange(
                    stmt.getSubject(), stmt.getPredicate(), stmt.getObject(), stmt.getContext(), conf);

            // use range to set scanner
            // populate scanner based on authorizations, ttl
            layout = entry.getKey();
            ByteRange byteRange = entry.getValue();
            Range range = new Range(new Text(byteRange.getStart()), new Text(byteRange.getEnd()));
            Range rangeMapRange = range;
            // if context != null, bind context info to Range so that
            // ColumnFamily Keys returned by Scanner
            // can be compared to ColumnFamily of start and stop Keys of
            // Range -- important when querying for named
            // graphs by requiring that Statements have same context Value
            // as the Value specified in the BindingSet
            if (context != null) {
                byte[] contextBytes = context.getData().getBytes("UTF-8");
                rangeMapRange = range.bound(
                        new Column(contextBytes, new byte[] { (byte) 0x00 }, new byte[] { (byte) 0x00 }),
                        new Column(contextBytes, new byte[] { (byte) 0xff }, new byte[] { (byte) 0xff }));
            }
            // ranges gets a Range that has no Column bounds, but
            // rangeMap gets a Range that does have Column bounds
            // If we inserted multiple Ranges with the same Row (but
            // distinct Column bounds) into the Set ranges, we would get
            // duplicate
            // results when the Row is not exact. So RyaStatements that
            // differ only in their context are all mapped to the same
            // Range (with no Column bounds) for scanning purposes.
            // However, context information is included in a Column that
            // bounds the Range inserted into rangeMap. This is because
            // in the class {@link RyaStatementBindingSetKeyValueIterator},
            // the rangeMap is
            // used to join the scan results with the BindingSets to produce
            // the query results. The additional ColumnFamily info is
            // required in this join
            // process to allow for the Statement contexts to be compared
            // with the BindingSet contexts
            // See {@link RangeBindingSetEntries#containsKey}.
            ranges.add(range);
            rangeMap.put(rangeMapRange, bs);
        }
        // no ranges
        if (layout == null)
            return null;
        String regexSubject = conf.getRegexSubject();
        String regexPredicate = conf.getRegexPredicate();
        String regexObject = conf.getRegexObject();
        TripleRowRegex tripleRowRegex = strategy.buildRegex(regexSubject, regexPredicate, regexObject, null,
                null);

        String table = layoutToTable(layout, conf);
        boolean useBatchScanner = ranges.size() > maxRanges;
        RyaStatementBindingSetKeyValueIterator iterator = null;
        if (useBatchScanner) {
            ScannerBase scanner = connector.createBatchScanner(table, authorizations, numThreads);
            ((BatchScanner) scanner).setRanges(ranges);
            fillScanner(scanner, columnFamily, null, ttl, null, tripleRowRegex, conf);
            iterator = new RyaStatementBindingSetKeyValueIterator(layout, ryaContext, scanner, rangeMap);
        } else {
            Scanner scannerBase = null;
            Iterator<Map.Entry<Key, Value>>[] iters = new Iterator[ranges.size()];
            int i = 0;
            for (Range range : ranges) {
                scannerBase = connector.createScanner(table, authorizations);
                scannerBase.setRange(range);
                fillScanner(scannerBase, columnFamily, null, ttl, null, tripleRowRegex, conf);
                iters[i] = scannerBase.iterator();
                i++;
            }
            iterator = new RyaStatementBindingSetKeyValueIterator(layout, Iterators.concat(iters), rangeMap,
                    ryaContext);
        }
        if (maxResults != null) {
            iterator.setMaxResults(maxResults);
        }
        return iterator;
    } catch (Exception e) {
        throw new RyaDAOException(e);
    }

}

From source file:org.eclipse.sirius.ecore.extender.business.api.accessor.CompositeMetamodelExtender.java

@Override
public Iterator<String> getContributedAttributeNames(final EObject next) {
    final List<Iterator<String>> iterators = Lists.newArrayList();
    for (final IMetamodelExtender extender : getActivatedExtenders()) {
        iterators.add(extender.getContributedAttributeNames(next));
    }/*from   www  .jav  a 2  s. c o  m*/
    return Iterators.concat(iterators.iterator());
}

From source file:org.apache.kylin.storage.hbase.cube.v2.coprocessor.endpoint.CubeVisitService.java

@SuppressWarnings("checkstyle:methodlength")
@Override//from   w  w w.j a va 2s .c  o m
public void visitCube(final RpcController controller, final CubeVisitProtos.CubeVisitRequest request,
        RpcCallback<CubeVisitProtos.CubeVisitResponse> done) {
    List<RegionScanner> regionScanners = Lists.newArrayList();
    HRegion region = null;

    StringBuilder sb = new StringBuilder();
    byte[] allRows;
    String debugGitTag = "";

    CubeVisitProtos.CubeVisitResponse.ErrorInfo errorInfo = null;

    String queryId = request.hasQueryId() ? request.getQueryId() : "UnknownId";
    try (SetThreadName ignored = new SetThreadName("Query %s", queryId)) {
        final long serviceStartTime = System.currentTimeMillis();

        region = (HRegion) env.getRegion();
        region.startRegionOperation();

        // if user change kylin.properties on kylin server, need to manually redeploy coprocessor jar to update KylinConfig of Env.
        KylinConfig kylinConfig = KylinConfig.createKylinConfig(request.getKylinProperties());
        KylinConfig.setKylinConfigThreadLocal(kylinConfig);

        debugGitTag = region.getTableDesc().getValue(IRealizationConstants.HTableGitTag);

        final GTScanRequest scanReq = GTScanRequest.serializer.deserialize(
                ByteBuffer.wrap(HBaseZeroCopyByteString.zeroCopyGetBytes(request.getGtScanRequest())));
        List<List<Integer>> hbaseColumnsToGT = Lists.newArrayList();
        for (IntList intList : request.getHbaseColumnsToGTList()) {
            hbaseColumnsToGT.add(intList.getIntsList());
        }
        StorageSideBehavior behavior = StorageSideBehavior.valueOf(scanReq.getStorageBehavior());
        final List<RawScan> hbaseRawScans = deserializeRawScans(
                ByteBuffer.wrap(HBaseZeroCopyByteString.zeroCopyGetBytes(request.getHbaseRawScan())));

        appendProfileInfo(sb, "start latency: " + (serviceStartTime - scanReq.getStartTime()),
                serviceStartTime);

        final List<InnerScannerAsIterator> cellListsForeachRawScan = Lists.newArrayList();

        for (RawScan hbaseRawScan : hbaseRawScans) {
            if (request.getRowkeyPreambleSize() - RowConstants.ROWKEY_CUBOIDID_LEN > 0) {
                //if has shard, fill region shard to raw scan start/end
                updateRawScanByCurrentRegion(hbaseRawScan, region,
                        request.getRowkeyPreambleSize() - RowConstants.ROWKEY_CUBOIDID_LEN);
            }

            Scan scan = CubeHBaseRPC.buildScan(hbaseRawScan);
            RegionScanner innerScanner = region.getScanner(scan);
            regionScanners.add(innerScanner);

            InnerScannerAsIterator cellListIterator = new InnerScannerAsIterator(innerScanner);
            cellListsForeachRawScan.add(cellListIterator);
        }

        final Iterator<List<Cell>> allCellLists = Iterators.concat(cellListsForeachRawScan.iterator());

        if (behavior.ordinal() < StorageSideBehavior.SCAN.ordinal()) {
            //this is only for CoprocessorBehavior.RAW_SCAN case to profile hbase scan speed
            List<Cell> temp = Lists.newArrayList();
            int counter = 0;
            for (RegionScanner innerScanner : regionScanners) {
                while (innerScanner.nextRaw(temp)) {
                    counter++;
                }
            }
            appendProfileInfo(sb, "scanned " + counter, serviceStartTime);
        }

        if (behavior.ordinal() < StorageSideBehavior.SCAN_FILTER_AGGR_CHECKMEM.ordinal()) {
            scanReq.disableAggCacheMemCheck(); // disable mem check if so told
        }

        final long storagePushDownLimit = scanReq.getStoragePushDownLimit();

        ResourceTrackingCellListIterator cellListIterator = new ResourceTrackingCellListIterator(allCellLists,
                scanReq.getStorageScanRowNumThreshold(), // for old client (scan threshold)
                !request.hasMaxScanBytes() ? Long.MAX_VALUE : request.getMaxScanBytes(), // for new client
                scanReq.getTimeout());

        IGTStore store = new HBaseReadonlyStore(cellListIterator, scanReq, hbaseRawScans.get(0).hbaseColumns,
                hbaseColumnsToGT, request.getRowkeyPreambleSize(), behavior.delayToggledOn(),
                request.getIsExactAggregate());

        IGTScanner rawScanner = store.scan(scanReq);
        IGTScanner finalScanner = scanReq.decorateScanner(rawScanner, behavior.filterToggledOn(),
                behavior.aggrToggledOn(), false, request.getSpillEnabled());

        ByteBuffer buffer = ByteBuffer.allocate(BufferedMeasureCodec.DEFAULT_BUFFER_SIZE);

        ByteArrayOutputStream outputStream = new ByteArrayOutputStream(
                BufferedMeasureCodec.DEFAULT_BUFFER_SIZE);//ByteArrayOutputStream will auto grow
        int finalRowCount = 0;

        try {
            for (GTRecord oneRecord : finalScanner) {
                buffer.clear();
                try {
                    oneRecord.exportColumns(scanReq.getColumns(), buffer);
                } catch (BufferOverflowException boe) {
                    buffer = ByteBuffer.allocate(oneRecord.sizeOf(scanReq.getColumns()) * 2);
                    oneRecord.exportColumns(scanReq.getColumns(), buffer);
                }

                outputStream.write(buffer.array(), 0, buffer.position());

                finalRowCount++;

                //if it's doing storage aggr, then should rely on GTAggregateScanner's limit check
                if (!scanReq.isDoingStorageAggregation() && finalRowCount >= storagePushDownLimit) {
                    //read one more record than limit
                    logger.info("The finalScanner aborted because storagePushDownLimit is satisfied");
                    break;
                }
            }
        } catch (KylinTimeoutException e) {
            logger.info("Abort scan: {}", e.getMessage());
            errorInfo = CubeVisitProtos.CubeVisitResponse.ErrorInfo.newBuilder()
                    .setType(CubeVisitProtos.CubeVisitResponse.ErrorType.TIMEOUT).setMessage(e.getMessage())
                    .build();
        } catch (ResourceLimitExceededException e) {
            logger.info("Abort scan: {}", e.getMessage());
            errorInfo = CubeVisitProtos.CubeVisitResponse.ErrorInfo.newBuilder()
                    .setType(CubeVisitProtos.CubeVisitResponse.ErrorType.RESOURCE_LIMIT_EXCEEDED)
                    .setMessage(e.getMessage()).build();
        } finally {
            finalScanner.close();
        }

        appendProfileInfo(sb, "agg done", serviceStartTime);
        logger.info("Total scanned {} rows and {} bytes", cellListIterator.getTotalScannedRowCount(),
                cellListIterator.getTotalScannedRowBytes());

        //outputStream.close() is not necessary
        byte[] compressedAllRows;
        if (errorInfo == null) {
            allRows = outputStream.toByteArray();
        } else {
            allRows = new byte[0];
        }
        if (!kylinConfig.getCompressionResult()) {
            compressedAllRows = allRows;
        } else {
            compressedAllRows = CompressionUtils.compress(allRows);
        }

        appendProfileInfo(sb, "compress done", serviceStartTime);
        logger.info("Size of final result = {} ({} before compressing)", compressedAllRows.length,
                allRows.length);

        OperatingSystemMXBean operatingSystemMXBean = (OperatingSystemMXBean) ManagementFactory
                .getOperatingSystemMXBean();
        double systemCpuLoad = operatingSystemMXBean.getSystemCpuLoad();
        double freePhysicalMemorySize = operatingSystemMXBean.getFreePhysicalMemorySize();
        double freeSwapSpaceSize = operatingSystemMXBean.getFreeSwapSpaceSize();

        appendProfileInfo(sb, "server stats done", serviceStartTime);
        sb.append(" debugGitTag:" + debugGitTag);

        CubeVisitProtos.CubeVisitResponse.Builder responseBuilder = CubeVisitProtos.CubeVisitResponse
                .newBuilder();
        if (errorInfo != null) {
            responseBuilder.setErrorInfo(errorInfo);
        }
        done.run(responseBuilder.//
                setCompressedRows(HBaseZeroCopyByteString.wrap(compressedAllRows)).//too many array copies 
                setStats(CubeVisitProtos.CubeVisitResponse.Stats.newBuilder()
                        .setAggregatedRowCount(cellListIterator.getTotalScannedRowCount() - finalRowCount)
                        .setScannedRowCount(cellListIterator.getTotalScannedRowCount())
                        .setScannedBytes(cellListIterator.getTotalScannedRowBytes())
                        .setServiceStartTime(serviceStartTime).setServiceEndTime(System.currentTimeMillis())
                        .setSystemCpuLoad(systemCpuLoad).setFreePhysicalMemorySize(freePhysicalMemorySize)
                        .setFreeSwapSpaceSize(freeSwapSpaceSize)
                        .setHostname(InetAddress.getLocalHost().getHostName()).setEtcMsg(sb.toString())
                        .setNormalComplete(errorInfo == null ? 1 : 0).build())
                .build());

    } catch (IOException ioe) {
        logger.error(ioe.toString(), ioe);
        IOException wrapped = new IOException("Error in coprocessor " + debugGitTag, ioe);
        ResponseConverter.setControllerException(controller, wrapped);
    } finally {
        for (RegionScanner innerScanner : regionScanners) {
            IOUtils.closeQuietly(innerScanner);
        }
        if (region != null) {
            try {
                region.closeRegionOperation();
            } catch (IOException e) {
                e.printStackTrace();
                throw new RuntimeException(e);
            }
        }
    }
}

From source file:org.eclipse.sirius.ecore.extender.business.api.accessor.CompositeMetamodelExtender.java

@Override
public Iterator<String> getContributedReferenceNames(final EObject instance) {
    final List<Iterator<String>> iterators = Lists.newArrayList();
    for (final IMetamodelExtender extender : getActivatedExtenders()) {
        iterators.add(extender.getContributedReferenceNames(instance));
    }/*from   w ww . jav  a 2  s. co  m*/
    return Iterators.concat(iterators.iterator());

}

From source file:org.pshdl.model.impl.AbstractHDLEnumDeclaration.java

@Override
public Iterator<IHDLObject> deepIterator() {
    return new Iterator<IHDLObject>() {

        private int pos = 0;
        private Iterator<? extends IHDLObject> current;

        @Override//from   w w  w. j a  v a  2  s .  com
        public boolean hasNext() {
            if ((current != null) && !current.hasNext()) {
                current = null;
            }
            while (current == null) {
                switch (pos++) {
                case 0:
                    if ((annotations != null) && (annotations.size() != 0)) {
                        final List<Iterator<? extends IHDLObject>> iters = Lists
                                .newArrayListWithCapacity(annotations.size());
                        for (final HDLAnnotation o : annotations) {
                            iters.add(Iterators.forArray(o));
                            iters.add(o.deepIterator());
                        }
                        current = Iterators.concat(iters.iterator());
                    }
                    break;
                case 1:
                    if (hEnum != null) {
                        current = Iterators.concat(Iterators.forArray(hEnum), hEnum.deepIterator());
                    }
                    break;
                default:
                    return false;
                }
            }
            return (current != null) && current.hasNext();
        }

        @Override
        public IHDLObject next() {
            return current.next();
        }

        @Override
        public void remove() {
            throw new IllegalArgumentException("Not supported");
        }

    };
}

From source file:org.pshdl.model.impl.AbstractHDLInterfaceDeclaration.java

@Override
public Iterator<IHDLObject> deepIterator() {
    return new Iterator<IHDLObject>() {

        private int pos = 0;
        private Iterator<? extends IHDLObject> current;

        @Override//from  w w w  .  j  a  va2  s.c o  m
        public boolean hasNext() {
            if ((current != null) && !current.hasNext()) {
                current = null;
            }
            while (current == null) {
                switch (pos++) {
                case 0:
                    if ((annotations != null) && (annotations.size() != 0)) {
                        final List<Iterator<? extends IHDLObject>> iters = Lists
                                .newArrayListWithCapacity(annotations.size());
                        for (final HDLAnnotation o : annotations) {
                            iters.add(Iterators.forArray(o));
                            iters.add(o.deepIterator());
                        }
                        current = Iterators.concat(iters.iterator());
                    }
                    break;
                case 1:
                    if (hIf != null) {
                        current = Iterators.concat(Iterators.forArray(hIf), hIf.deepIterator());
                    }
                    break;
                default:
                    return false;
                }
            }
            return (current != null) && current.hasNext();
        }

        @Override
        public IHDLObject next() {
            return current.next();
        }

        @Override
        public void remove() {
            throw new IllegalArgumentException("Not supported");
        }

    };
}