Example usage for org.apache.commons.lang.mutable MutableLong setValue

List of usage examples for org.apache.commons.lang.mutable MutableLong setValue

Introduction

In this page you can find the example usage for org.apache.commons.lang.mutable MutableLong setValue.

Prototype

public void setValue(Object value) 

Source Link

Document

Sets the value from any Number instance.

Usage

From source file:com.ebay.erl.mobius.core.mapred.CounterUpdateThread.java

public void updateCounter(String groupName, String counterName, long newCounts) {
    Counter counter = this.getCounterByName(groupName, counterName);
    synchronized (this.counts) {
        MutableLong count;
        if ((count = this.counts.get(counter)) == null) {
            count = new MutableLong(0L);
            this.counts.put(counter, count);
        }// ww  w . j  a  v  a 2  s. c  om
        count.setValue(newCounts);
    }
}

From source file:com.jivesoftware.os.upena.amza.transport.http.replication.endpoints.UpenaAmzaReplicationRestEndpoints.java

@POST
@Consumes("application/json")
@Path("/changes/take")
public Response take(final RowUpdates rowUpdates) {
    try {/* ww  w .  j a v a 2  s .  co  m*/

        final BinaryRowMarshaller rowMarshaller = new BinaryRowMarshaller();
        final List<byte[]> rows = new ArrayList<>();
        final MutableLong highestTransactionId = new MutableLong();
        amzaInstance.takeRowUpdates(rowUpdates.getTableName(), rowUpdates.getHighestTransactionId(),
                new RowScan() {
                    @Override
                    public boolean row(long orderId, RowIndexKey key, RowIndexValue value) throws Exception {
                        rows.add(rowMarshaller.toRow(orderId, key, value));
                        if (orderId > highestTransactionId.longValue()) {
                            highestTransactionId.setValue(orderId);
                        }
                        return true;
                    }
                });

        return ResponseHelper.INSTANCE.jsonResponse(
                new RowUpdates(highestTransactionId.longValue(), rowUpdates.getTableName(), rows));
    } catch (Exception x) {
        LOG.warn("Failed to apply changeset: " + rowUpdates, x);
        return ResponseHelper.INSTANCE.errorResponse("Failed to changeset " + rowUpdates, x);
    }
}

From source file:it.unimi.dsi.sux4j.mph.CHDMinimalPerfectHashFunction.java

/**
 * Creates a new CHD minimal perfect hash function for the given keys.
 * /* w ww.  j a v a2  s. c  o m*/
 * @param keys the keys to hash, or {@code null}.
 * @param transform a transformation strategy for the keys.
 * @param lambda the average bucket size.
 * @param loadFactor the load factor.
 * @param signatureWidth a signature width, or 0 for no signature.
 * @param tempDir a temporary directory for the store files, or {@code null} for the standard temporary directory.
 * @param chunkedHashStore a chunked hash store containing the keys, or {@code null}; the store
 * can be unchecked, but in this case <code>keys</code> and <code>transform</code> must be non-{@code null}. 
 */
protected CHDMinimalPerfectHashFunction(final Iterable<? extends T> keys,
        final TransformationStrategy<? super T> transform, final int lambda, double loadFactor,
        final int signatureWidth, final File tempDir, ChunkedHashStore<T> chunkedHashStore) throws IOException {
    this.transform = transform;

    final ProgressLogger pl = new ProgressLogger(LOGGER);
    pl.displayLocalSpeed = true;
    pl.displayFreeMemory = true;
    final RandomGenerator r = new XorShift1024StarRandomGenerator();
    pl.itemsName = "keys";

    final boolean givenChunkedHashStore = chunkedHashStore != null;
    if (!givenChunkedHashStore) {
        chunkedHashStore = new ChunkedHashStore<T>(transform, tempDir, pl);
        chunkedHashStore.reset(r.nextLong());
        chunkedHashStore.addAll(keys.iterator());
    }
    n = chunkedHashStore.size();

    defRetValue = -1; // For the very few cases in which we can decide

    int log2NumChunks = Math.max(0, Fast.mostSignificantBit(n >> LOG2_CHUNK_SIZE));
    chunkShift = chunkedHashStore.log2Chunks(log2NumChunks);
    final int numChunks = 1 << log2NumChunks;

    LOGGER.debug("Number of chunks: " + numChunks);
    LOGGER.debug("Average chunk size: " + (double) n / numChunks);

    offsetNumBucketsSeed = new long[(numChunks + 1) * 3 + 2];

    int duplicates = 0;
    final LongArrayList holes = new LongArrayList();

    @SuppressWarnings("resource")
    final OfflineIterable<MutableLong, MutableLong> coefficients = new OfflineIterable<MutableLong, MutableLong>(
            new Serializer<MutableLong, MutableLong>() {

                @Override
                public void write(final MutableLong a, final DataOutput dos) throws IOException {
                    long x = a.longValue();
                    while ((x & ~0x7FL) != 0) {
                        dos.writeByte((int) (x | 0x80));
                        x >>>= 7;
                    }
                    dos.writeByte((int) x);
                }

                @Override
                public void read(final DataInput dis, final MutableLong x) throws IOException {
                    byte b = dis.readByte();
                    long t = b & 0x7F;
                    for (int shift = 7; (b & 0x80) != 0; shift += 7) {
                        b = dis.readByte();
                        t |= (b & 0x7FL) << shift;
                    }
                    x.setValue(t);
                }
            }, new MutableLong());

    for (;;) {
        LOGGER.debug("Generating minimal perfect hash function...");

        holes.clear();
        coefficients.clear();
        pl.expectedUpdates = numChunks;
        pl.itemsName = "chunks";
        pl.start("Analysing chunks... ");

        try {
            int chunkNumber = 0;

            for (ChunkedHashStore.Chunk chunk : chunkedHashStore) {
                /* We treat a chunk as a single hash function. The number of bins is thus
                 * the first prime larger than the chunk size divided by the load factor. */
                final int p = Primes.nextPrime((int) Math.ceil(chunk.size() / loadFactor) + 1);
                final boolean used[] = new boolean[p];

                final int numBuckets = (chunk.size() + lambda - 1) / lambda;
                numBuckets(chunkNumber + 1, numBuckets(chunkNumber) + numBuckets);
                final int[] cc0 = new int[numBuckets];
                final int[] cc1 = new int[numBuckets];
                @SuppressWarnings("unchecked")
                final ArrayList<long[]>[] bucket = new ArrayList[numBuckets];
                for (int i = bucket.length; i-- != 0;)
                    bucket[i] = new ArrayList<long[]>();

                tryChunk: for (;;) {
                    for (ArrayList<long[]> b : bucket)
                        b.clear();
                    Arrays.fill(used, false);

                    /* At each try, the allocation to keys to bucket is randomized differently. */
                    final long seed = r.nextLong();
                    // System.err.println( "Number of keys: " + chunk.size()  + " Number of bins: " + p + " seed: " + seed );
                    /* We distribute the keys in this chunks in the buckets. */
                    for (Iterator<long[]> iterator = chunk.iterator(); iterator.hasNext();) {
                        final long[] triple = iterator.next();
                        final long[] h = new long[3];
                        Hashes.spooky4(triple, seed, h);
                        final ArrayList<long[]> b = bucket[(int) ((h[0] >>> 1) % numBuckets)];
                        h[1] = (int) ((h[1] >>> 1) % p);
                        h[2] = (int) ((h[2] >>> 1) % (p - 1)) + 1;

                        // All elements in a bucket must have either different h[ 1 ] or different h[ 2 ]
                        for (long[] t : b)
                            if (t[1] == h[1] && t[2] == h[2]) {
                                LOGGER.info("Duplicate index" + Arrays.toString(t));
                                continue tryChunk;
                            }
                        b.add(h);
                    }

                    final int[] perm = Util.identity(bucket.length);
                    IntArrays.quickSort(perm, new AbstractIntComparator() {
                        private static final long serialVersionUID = 1L;

                        @Override
                        public int compare(int a0, int a1) {
                            return Integer.compare(bucket[a1].size(), bucket[a0].size());
                        }
                    });

                    for (int i = 0; i < perm.length;) {
                        final LinkedList<Integer> bucketsToDo = new LinkedList<Integer>();
                        final int size = bucket[perm[i]].size();
                        //System.err.println( "Bucket size: " + size );
                        int j;
                        // Gather indices of all buckets with the same size
                        for (j = i; j < perm.length && bucket[perm[j]].size() == size; j++)
                            bucketsToDo.add(Integer.valueOf(perm[j]));

                        // Examine for each pair (c0,c1) the buckets still to do
                        ext: for (int c1 = 0; c1 < p; c1++)
                            for (int c0 = 0; c0 < p; c0++) {
                                //System.err.println( "Testing " + c0 + ", " + c1 + " (to do: " + bucketsToDo.size() + ")" );
                                for (Iterator<Integer> iterator = bucketsToDo.iterator(); iterator.hasNext();) {
                                    final int k = iterator.next().intValue();
                                    final ArrayList<long[]> b = bucket[k];
                                    boolean completed = true;
                                    final IntArrayList done = new IntArrayList();
                                    // Try to see whether the necessary entries are not used
                                    for (long[] h : b) {
                                        //assert k == h[ 0 ];

                                        int pos = (int) ((h[1] + c0 * h[2] + c1) % p);
                                        //System.err.println( "Testing pos " + pos + " for " + Arrays.toString( e  ));
                                        if (used[pos]) {
                                            completed = false;
                                            break;
                                        } else {
                                            used[pos] = true;
                                            done.add(pos);
                                        }
                                    }

                                    if (completed) {
                                        // All positions were free
                                        cc0[k] = c0;
                                        cc1[k] = c1;
                                        iterator.remove();
                                    } else
                                        for (int d : done)
                                            used[d] = false;
                                }
                                if (bucketsToDo.isEmpty())
                                    break ext;
                            }
                        if (!bucketsToDo.isEmpty())
                            continue tryChunk;

                        seed(chunkNumber, seed);
                        i = j;
                    }
                    break;
                }

                // System.err.println("DONE!");

                if (ASSERTS) {
                    final IntOpenHashSet pos = new IntOpenHashSet();
                    final long h[] = new long[3];
                    for (Iterator<long[]> iterator = chunk.iterator(); iterator.hasNext();) {
                        final long[] triple = iterator.next();
                        Hashes.spooky4(triple, seed(chunkNumber), h);
                        h[0] = (h[0] >>> 1) % numBuckets;
                        h[1] = (int) ((h[1] >>> 1) % p);
                        h[2] = (int) ((h[2] >>> 1) % (p - 1)) + 1;
                        //System.err.println( Arrays.toString(  e  ) );
                        assert pos.add((int) ((h[1] + cc0[(int) (h[0])] * h[2] + cc1[(int) (h[0])]) % p));
                    }
                }

                final MutableLong l = new MutableLong();
                for (int i = 0; i < numBuckets; i++) {
                    l.setValue(cc0[i] + cc1[i] * p);
                    coefficients.add(l);
                }

                for (int i = 0; i < p; i++)
                    if (!used[i])
                        holes.add(offset(chunkNumber) + i);

                offset(chunkNumber + 1, offset(chunkNumber) + p);
                chunkNumber++;
                pl.update();
            }

            pl.done();
            break;
        } catch (ChunkedHashStore.DuplicateException e) {
            if (keys == null)
                throw new IllegalStateException(
                        "You provided no keys, but the chunked hash store was not checked");
            if (duplicates++ > 3)
                throw new IllegalArgumentException("The input list contains duplicates");
            LOGGER.warn("Found duplicate. Recomputing triples...");
            chunkedHashStore.reset(r.nextLong());
            chunkedHashStore.addAll(keys.iterator());
        }
    }

    rank = new SparseRank(offset(offsetNumBucketsSeed.length / 3 - 1), holes.size(), holes.iterator());

    globalSeed = chunkedHashStore.seed();

    this.coefficients = new EliasFanoLongBigList(new AbstractLongIterator() {
        final OfflineIterator<MutableLong, MutableLong> iterator = coefficients.iterator();

        @Override
        public boolean hasNext() {
            return iterator.hasNext();
        }

        public long nextLong() {
            return iterator.next().longValue();
        }
    }, 0, true);

    coefficients.close();

    LOGGER.info("Completed.");
    LOGGER.info("Actual bit cost per key: " + (double) numBits() / n);

    if (signatureWidth != 0) {
        signatureMask = -1L >>> Long.SIZE - signatureWidth;
        (signatures = LongArrayBitVector.getInstance().asLongBigList(signatureWidth)).size(n);
        pl.expectedUpdates = n;
        pl.itemsName = "signatures";
        pl.start("Signing...");
        for (ChunkedHashStore.Chunk chunk : chunkedHashStore) {
            Iterator<long[]> iterator = chunk.iterator();
            for (int i = chunk.size(); i-- != 0;) {
                final long[] triple = iterator.next();
                long t = getLongByTripleNoCheck(triple);
                signatures.set(t, signatureMask & triple[0]);
                pl.lightUpdate();
            }
        }
        pl.done();
    } else {
        signatureMask = 0;
        signatures = null;
    }

    if (!givenChunkedHashStore)
        chunkedHashStore.close();
}

From source file:com.datatorrent.lib.io.fs.AbstractFSWriter.java

@Override
public void setup(Context.OperatorContext context) {
    rollingFile = maxLength < Long.MAX_VALUE;

    //Getting required file system instance.
    try {/*  ww  w.j ava 2 s  .  com*/
        fs = getFSInstance();
    } catch (IOException ex) {
        throw new RuntimeException(ex);
    }

    LOG.debug("FS class {}", fs.getClass());

    //Setting listener for debugging
    LOG.debug("setup initiated");
    RemovalListener<String, FSDataOutputStream> removalListener = new RemovalListener<String, FSDataOutputStream>() {
        @Override
        public void onRemoval(RemovalNotification<String, FSDataOutputStream> notification) {
            FSDataOutputStream value = notification.getValue();
            if (value != null) {
                try {
                    LOG.debug("closing {}", notification.getKey());
                    value.close();
                } catch (IOException e) {
                    throw new RuntimeException(e);
                }
            }
        }
    };

    //Define cache
    CacheLoader<String, FSDataOutputStream> loader = new CacheLoader<String, FSDataOutputStream>() {
        @Override
        public FSDataOutputStream load(String filename) {
            String partFileName = getPartFileNamePri(filename);
            Path lfilepath = new Path(filePath + File.separator + partFileName);

            FSDataOutputStream fsOutput;
            if (replication <= 0) {
                replication = fs.getDefaultReplication(lfilepath);
            }

            boolean sawThisFileBefore = endOffsets.containsKey(filename);

            try {
                if (fs.exists(lfilepath)) {
                    if (sawThisFileBefore || append) {
                        FileStatus fileStatus = fs.getFileStatus(lfilepath);
                        MutableLong endOffset = endOffsets.get(filename);

                        if (endOffset != null) {
                            endOffset.setValue(fileStatus.getLen());
                        } else {
                            endOffsets.put(filename, new MutableLong(fileStatus.getLen()));
                        }

                        fsOutput = fs.append(lfilepath);
                        LOG.debug("appending to {}", lfilepath);
                    }
                    //We never saw this file before and we don't want to append
                    else {
                        //If the file is rolling we need to delete all its parts.
                        if (rollingFile) {
                            int part = 0;

                            while (true) {
                                Path seenPartFilePath = new Path(
                                        filePath + "/" + getPartFileName(filename, part));
                                if (!fs.exists(seenPartFilePath)) {
                                    break;
                                }

                                fs.delete(seenPartFilePath, true);
                                part = part + 1;
                            }

                            fsOutput = fs.create(lfilepath, (short) replication);
                        }
                        //Not rolling is easy, just delete the file and create it again.
                        else {
                            fs.delete(lfilepath, true);
                            fsOutput = fs.create(lfilepath, (short) replication);
                        }
                    }
                } else {
                    fsOutput = fs.create(lfilepath, (short) replication);
                }

                //Get the end offset of the file.

                LOG.debug("full path: {}", fs.getFileStatus(lfilepath).getPath());
                return fsOutput;
            } catch (IOException e) {
                throw new RuntimeException(e);
            }
        }
    };

    streamsCache = CacheBuilder.newBuilder().maximumSize(maxOpenFiles).removalListener(removalListener)
            .build(loader);

    try {
        LOG.debug("File system class: {}", fs.getClass());
        LOG.debug("end-offsets {}", endOffsets);

        //Restore the files in case they were corrupted and the operator
        Path writerPath = new Path(filePath);
        if (fs.exists(writerPath)) {
            for (String seenFileName : endOffsets.keySet()) {
                String seenFileNamePart = getPartFileNamePri(seenFileName);
                LOG.debug("seenFileNamePart: {}", seenFileNamePart);
                Path seenPartFilePath = new Path(filePath + "/" + seenFileNamePart);
                if (fs.exists(seenPartFilePath)) {
                    LOG.debug("file exists {}", seenFileNamePart);
                    long offset = endOffsets.get(seenFileName).longValue();
                    FSDataInputStream inputStream = fs.open(seenPartFilePath);
                    FileStatus status = fs.getFileStatus(seenPartFilePath);

                    if (status.getLen() != offset) {
                        LOG.info("file corrupted {} {} {}", seenFileNamePart, offset, status.getLen());
                        byte[] buffer = new byte[COPY_BUFFER_SIZE];

                        String tmpFileName = seenFileNamePart + TMP_EXTENSION;
                        FSDataOutputStream fsOutput = streamsCache.get(tmpFileName);
                        while (inputStream.getPos() < offset) {
                            long remainingBytes = offset - inputStream.getPos();
                            int bytesToWrite = remainingBytes < COPY_BUFFER_SIZE ? (int) remainingBytes
                                    : COPY_BUFFER_SIZE;
                            inputStream.read(buffer);
                            fsOutput.write(buffer, 0, bytesToWrite);
                        }

                        flush(fsOutput);
                        FileContext fileContext = FileContext.getFileContext(fs.getUri());
                        String tempTmpFilePath = getPartFileNamePri(filePath + File.separator + tmpFileName);

                        Path tmpFilePath = new Path(tempTmpFilePath);
                        tmpFilePath = fs.getFileStatus(tmpFilePath).getPath();
                        LOG.debug("temp file path {}, rolling file path {}", tmpFilePath.toString(),
                                status.getPath().toString());
                        fileContext.rename(tmpFilePath, status.getPath(), Options.Rename.OVERWRITE);
                    }
                }
            }
        }

        //delete the left over future rolling files produced from the previous crashed instance
        //of this operator.
        if (rollingFile) {
            for (String seenFileName : endOffsets.keySet()) {
                try {
                    Integer part = openPart.get(seenFileName).getValue() + 1;

                    while (true) {
                        Path seenPartFilePath = new Path(filePath + "/" + getPartFileName(seenFileName, part));
                        if (!fs.exists(seenPartFilePath)) {
                            break;
                        }

                        fs.delete(seenPartFilePath, true);
                        part = part + 1;
                    }

                    Path seenPartFilePath = new Path(filePath + "/"
                            + getPartFileName(seenFileName, openPart.get(seenFileName).intValue()));

                    //Handle the case when restoring to a checkpoint where the current rolling file
                    //already has a length greater than max length.
                    if (fs.getFileStatus(seenPartFilePath).getLen() > maxLength) {
                        LOG.debug("rotating file at setup.");
                        rotate(seenFileName);
                    }
                } catch (IOException e) {
                    throw new RuntimeException(e);
                } catch (ExecutionException e) {
                    throw new RuntimeException(e);
                }
            }
        }

        LOG.debug("setup completed");
        LOG.debug("end-offsets {}", endOffsets);
    } catch (IOException e) {
        throw new RuntimeException(e);
    } catch (ExecutionException e) {
        throw new RuntimeException(e);
    }

    this.context = context;
    lastTimeStamp = System.currentTimeMillis();

    fileCounters.setCounter(Counters.TOTAL_BYTES_WRITTEN, new MutableLong());
    fileCounters.setCounter(Counters.TOTAL_TIME_ELAPSED, new MutableLong());
}

From source file:com.datatorrent.stram.client.RecordingsAgent.java

private void processPartFile(BufferedReader partBr, QueryType queryType, long low, long high, long limit,
        String[] ports, MutableLong numRemainingTuples, MutableLong currentTimestamp,
        MutableLong currentWindowLow, long currentOffset, TuplesInfo info) throws IOException {
    String partLine;/*from   w w w.ja v a 2s .c om*/
    long tmpOffset = currentOffset;
    // advance until offset is reached
    while ((partLine = partBr.readLine()) != null) {
        int partCursor = 2;
        if (partLine.startsWith("B:")) {
            int partCursor2 = partLine.indexOf(':', partCursor);
            currentTimestamp.setValue(Long.valueOf(partLine.substring(partCursor, partCursor2)));
            partCursor = partCursor2 + 1;
            currentWindowLow.setValue(Long.valueOf(partLine.substring(partCursor)));
            if (limit != numRemainingTuples.longValue()) {
                WindowTuplesInfo wtinfo;
                wtinfo = new WindowTuplesInfo();
                wtinfo.windowId = currentWindowLow.longValue();
                info.tuples.add(wtinfo);
            }
        } else if (partLine.startsWith("T:")) {
            int partCursor2 = partLine.indexOf(':', partCursor);
            currentTimestamp.setValue(Long.valueOf(partLine.substring(partCursor, partCursor2)));
            partCursor = partCursor2 + 1;
            partCursor2 = partLine.indexOf(':', partCursor);
            String port = partLine.substring(partCursor, partCursor2);
            boolean portMatch = (ports == null) || (ports.length == 0) || Arrays.asList(ports).contains(port);
            partCursor = partCursor2 + 1;

            if (portMatch && ((queryType == QueryType.WINDOW && currentWindowLow.longValue() >= low)
                    || (queryType == QueryType.OFFSET && tmpOffset >= low)
                    || (queryType == QueryType.TIME && currentTimestamp.longValue() >= low))) {

                if (numRemainingTuples.longValue() > 0) {
                    if (info.startOffset == -1) {
                        info.startOffset = tmpOffset;
                    }
                    WindowTuplesInfo wtinfo;
                    if (info.tuples.isEmpty()
                            || info.tuples.get(info.tuples.size() - 1).windowId != currentWindowLow
                                    .longValue()) {
                        wtinfo = new WindowTuplesInfo();
                        wtinfo.windowId = currentWindowLow.longValue();
                        info.tuples.add(wtinfo);
                    } else {
                        wtinfo = info.tuples.get(info.tuples.size() - 1);
                    }

                    partCursor2 = partLine.indexOf(':', partCursor);
                    int size = Integer.valueOf(partLine.substring(partCursor, partCursor2));
                    partCursor = partCursor2 + 1;
                    //partCursor2 = partCursor + size;
                    String tupleValue = partLine.substring(partCursor);
                    wtinfo.tuples.add(new TupleInfo(port, tupleValue));
                    numRemainingTuples.decrement();
                } else {
                    break;
                }
            }
            if (portMatch) {
                tmpOffset++;
            }
        }
    }
}

From source file:com.indeed.lsmtree.recordlog.BasicRecordFile.java

private Option<E> readAndCheck(long address, MutableLong nextElementStart) throws IOException {
    if (address + 4 > memory.length()) {
        throw new ConsistencyException("not enough bytes in file");
    }/*from   ww  w  .j a  v a  2 s.c  o  m*/
    final int length = memory.getInt(address);
    if (length < 0) {
        return Option.none();
    }
    if (address + 8 > memory.length()) {
        throw new ConsistencyException("not enough bytes in file");
    }
    if (address + 8 + length > memory.length()) {
        throw new ConsistencyException("not enough bytes in file");
    }
    final int checksum = memory.getInt(address + 4);
    MemoryDataInput in = new MemoryDataInput(memory);
    in.seek(address + 8);
    CRC32 crc32 = new CRC32();
    crc32.update(CRC_SEED);
    byte[] bytes = new byte[length];
    in.readFully(bytes);
    crc32.update(bytes);
    if ((int) crc32.getValue() != checksum) {
        throw new ConsistencyException("checksum for record does not match: expected " + checksum + " actual "
                + (int) crc32.getValue());
    }
    E ret = serializer.read(ByteStreams.newDataInput(bytes));
    if (nextElementStart != null)
        nextElementStart.setValue(address + 8 + length);
    return Option.some(ret);
}

From source file:com.datatorrent.stram.client.RecordingsAgent.java

private TuplesInfo getTuplesInfo(String appId, String opId, String id, long low, long high, long limit,
        String[] ports, QueryType queryType) {
    TuplesInfo info = new TuplesInfo();
    info.startOffset = -1;//from  w w w  .jav a2  s .  co m
    String dir = getRecordingDirectory(appId, opId, id);
    if (dir == null) {
        return null;
    }
    IndexFileBufferedReader ifbr = null;
    try {
        ifbr = new IndexFileBufferedReader(new InputStreamReader(
                stramAgent.getFileSystem().open(new Path(dir, FSPartFileCollection.INDEX_FILE))), dir);
        long currentOffset = 0;
        boolean readPartFile = false;
        MutableLong numRemainingTuples = new MutableLong(limit);
        MutableLong currentTimestamp = new MutableLong();
        RecordingsIndexLine indexLine;
        String lastProcessPartFile = null;
        while ((indexLine = (RecordingsIndexLine) ifbr.readIndexLine()) != null) {
            if (indexLine.isEndLine) {
                continue;
            }
            MutableLong currentWindowLow = new MutableLong();
            MutableLong currentWindowHigh = new MutableLong();
            long numTuples = 0;

            if (ports == null || ports.length == 0) {
                numTuples = indexLine.tupleCount;
            } else {
                for (String port : ports) {
                    if (indexLine.portTupleCount.containsKey(port)) {
                        numTuples += indexLine.portTupleCount.get(port).longValue();
                    } else {
                        LOG.warn("Port index {} is not found, ignoring...", port);
                    }
                }
            }
            currentWindowLow.setValue(indexLine.windowIdRanges.get(0).low);
            currentWindowHigh.setValue(indexLine.windowIdRanges.get(indexLine.windowIdRanges.size() - 1).high);

            if (!readPartFile) {
                if (queryType == QueryType.WINDOW) {
                    if (currentWindowLow.longValue() > low) {
                        break;
                    } else if (currentWindowLow.longValue() <= low && low <= currentWindowHigh.longValue()) {
                        readPartFile = true;
                    }
                } else if (queryType == QueryType.OFFSET) {
                    if (currentOffset + numTuples > low) {
                        readPartFile = true;
                    }
                } else { // time
                    if (indexLine.fromTime > low) {
                        break;
                    } else if (indexLine.fromTime <= low && low <= indexLine.toTime) {
                        readPartFile = true;
                    }
                }
            }

            if (readPartFile) {
                lastProcessPartFile = indexLine.partFile;
                BufferedReader partBr = new BufferedReader(new InputStreamReader(
                        stramAgent.getFileSystem().open(new Path(dir, indexLine.partFile))));
                try {
                    processPartFile(partBr, queryType, low, high, limit, ports, numRemainingTuples,
                            currentTimestamp, currentWindowLow, currentOffset, info);
                } finally {
                    partBr.close();
                }
            }
            currentOffset += numTuples;
            if (numRemainingTuples.longValue() <= 0
                    || (queryType == QueryType.TIME && currentTimestamp.longValue() > high)) {
                return info;
            }
        }
        BufferedReader partBr = null;
        try {
            String extraPartFile = getNextPartFile(lastProcessPartFile);
            if (extraPartFile != null) {
                partBr = new BufferedReader(
                        new InputStreamReader(stramAgent.getFileSystem().open(new Path(dir, extraPartFile))));
                processPartFile(partBr, queryType, low, high, limit, ports, numRemainingTuples,
                        currentTimestamp, new MutableLong(), currentOffset, info);
            }
        } catch (Exception ex) {
            // ignore
        } finally {
            IOUtils.closeQuietly(partBr);
        }

    } catch (Exception ex) {
        LOG.warn("Got exception when getting tuples info", ex);
        return null;
    } finally {
        IOUtils.closeQuietly(ifbr);
    }

    return info;
}

From source file:com.datatorrent.lib.io.fs.AbstractFileOutputOperator.java

/**
 * Creates the {@link CacheLoader} for loading an output stream when it is not present in the cache.
 * @return cache loader//from   w  w w .j a v a  2s .co m
 */
private CacheLoader<String, FSFilterStreamContext> createCacheLoader() {
    return new CacheLoader<String, FSFilterStreamContext>() {
        @Override
        public FSFilterStreamContext load(@Nonnull String filename) {
            if (rollingFile) {
                RotationState state = getRotationState(filename);
                if (rollingFile && state.rotated) {
                    openPart.get(filename).add(1);
                    state.rotated = false;
                    MutableLong offset = endOffsets.get(filename);
                    offset.setValue(0);
                }
            }

            String partFileName = getPartFileNamePri(filename);
            Path originalFilePath = new Path(filePath + Path.SEPARATOR + partFileName);

            Path activeFilePath;
            if (!alwaysWriteToTmp) {
                activeFilePath = originalFilePath;
            } else {
                //MLHR-1776 : writing to tmp file
                String tmpFileName = fileNameToTmpName.get(partFileName);
                if (tmpFileName == null) {
                    tmpFileName = partFileName + '.' + System.currentTimeMillis() + TMP_EXTENSION;
                    fileNameToTmpName.put(partFileName, tmpFileName);
                }
                activeFilePath = new Path(filePath + Path.SEPARATOR + tmpFileName);
            }

            FSDataOutputStream fsOutput;

            boolean sawThisFileBefore = endOffsets.containsKey(filename);

            try {
                if (fs.exists(originalFilePath) || (alwaysWriteToTmp && fs.exists(activeFilePath))) {
                    if (sawThisFileBefore) {
                        FileStatus fileStatus = fs.getFileStatus(activeFilePath);
                        MutableLong endOffset = endOffsets.get(filename);

                        if (endOffset != null) {
                            endOffset.setValue(fileStatus.getLen());
                        } else {
                            endOffsets.put(filename, new MutableLong(fileStatus.getLen()));
                        }

                        fsOutput = openStream(activeFilePath, true);
                        LOG.debug("appending to {}", activeFilePath);
                    } else {
                        //We never saw this file before and we don't want to append
                        //If the file is rolling we need to delete all its parts.
                        if (rollingFile) {
                            int part = 0;

                            while (true) {
                                Path seenPartFilePath = new Path(
                                        filePath + Path.SEPARATOR + getPartFileName(filename, part));
                                if (!fs.exists(seenPartFilePath)) {
                                    break;
                                }

                                fs.delete(seenPartFilePath, true);
                                part = part + 1;
                            }

                            fsOutput = openStream(activeFilePath, false);
                        } else {
                            //Not rolling is easy, just delete the file and create it again.
                            fs.delete(activeFilePath, true);
                            if (alwaysWriteToTmp) {
                                //we need to delete original file if that exists
                                if (fs.exists(originalFilePath)) {
                                    fs.delete(originalFilePath, true);
                                }
                            }
                            fsOutput = openStream(activeFilePath, false);
                        }
                    }
                } else {
                    fsOutput = openStream(activeFilePath, false);
                }
                filesWithOpenStreams.add(filename);

                LOG.info("opened {}, active {}", partFileName, activeFilePath);
                return new FSFilterStreamContext(fsOutput);
            } catch (IOException e) {
                throw new RuntimeException(e);
            }
        }
    };
}

From source file:com.jivesoftware.os.amza.service.storage.WALStorage.java

private void flush(IoStats ioStats, RowType rowType, long txId, int estimatedNumberOfRows,
        int estimatedSizeInBytes, RawRows rows, IndexableKeys indexableKeys,
        final MutableLong indexCommittedFromTxId, final MutableLong indexCommitedUpToTxId, WALWriter rowWriter,
        WALHighwater highwater, TxKeyPointerFpStream stream) throws Exception {

    synchronized (oneTransactionAtATimeLock) {
        if (txId == -1) {
            txId = orderIdProvider.nextId();
        }/*  w ww  . j a  va2 s. c  om*/
        if (indexCommittedFromTxId.longValue() > txId) {
            indexCommittedFromTxId.setValue(txId);
        }
        try {
            if (indexCommitedUpToTxId.longValue() < txId) {
                indexCommitedUpToTxId.setValue(txId);
            }
        } catch (NullPointerException e) {
            throw new IllegalStateException("Illogical NPE: " + (indexCommitedUpToTxId == null), e);
        }
        rowWriter.write(ioStats, txId, rowType, estimatedNumberOfRows, estimatedSizeInBytes, rows,
                indexableKeys, stream, true, hardFsyncBeforeLeapBoundary);
        if (highwater != null) {
            writeHighwaterMarker(ioStats, rowWriter, highwater);
        }
        highestTxId.set(indexCommitedUpToTxId.longValue());
    }
}

From source file:eu.eubrazilcc.lvl.storage.mongodb.MongoDBConnector.java

/**
 * Lists all the files in the specified name space. Only latest versions are included in the list.
 * @param namespace - (optional) name space to be searched for files. When nothing specified, the default bucket is used
 * @param sortCriteria - objects in the collection are sorted with this criteria
 * @param start - starting index//  www .j a  va2s  .  c o  m
 * @param size - maximum number of objects returned
 * @param count - (optional) is updated with the number of objects in the database
 * @return a view of the files stored under the specified name space that contains the specified range.
 */
public List<GridFSDBFile> listFiles(final @Nullable String namespace, final DBObject sortCriteria,
        final int start, final int size, final @Nullable MutableLong count) {
    final List<GridFSDBFile> list = newArrayList();
    final DB db = client().getDB(CONFIG_MANAGER.getDbName());
    final GridFS gfsNs = isNotBlank(namespace) ? new GridFS(db, namespace.trim()) : new GridFS(db);
    final DBCursor cursor = gfsNs.getFileList(
            new BasicDBObject(FILE_VERSION_PROP, new BasicDBObject("$exists", true)), sortCriteria);
    cursor.skip(start).limit(size);
    try {
        while (cursor.hasNext()) {
            list.add((GridFSDBFile) cursor.next());
        }
    } finally {
        cursor.close();
    }
    if (count != null) {
        count.setValue(cursor.count());
    }
    return list;
}