Example usage for org.apache.hadoop.io Text compareTo

List of usage examples for org.apache.hadoop.io Text compareTo

Introduction

In this page you can find the example usage for org.apache.hadoop.io Text compareTo.

Prototype

@Override
public int compareTo(BinaryComparable other) 

Source Link

Document

Compare bytes from {#getBytes()}.

Usage

From source file:org.apache.accumulo.core.clientImpl.TableOperationsImpl.java

License:Apache License

@Override
public SummaryRetriever summaries(String tableName) {

    return new SummaryRetriever() {

        private Text startRow = null;
        private Text endRow = null;
        private List<TSummarizerConfiguration> summariesToFetch = Collections.emptyList();
        private String summarizerClassRegex;
        private boolean flush = false;

        @Override//w  w w  .  j a  v a2s.c  o m
        public SummaryRetriever startRow(Text startRow) {
            Objects.requireNonNull(startRow);
            if (endRow != null) {
                Preconditions.checkArgument(startRow.compareTo(endRow) < 0,
                        "Start row must be less than end row : %s >= %s", startRow, endRow);
            }
            this.startRow = startRow;
            return this;
        }

        @Override
        public SummaryRetriever startRow(CharSequence startRow) {
            return startRow(new Text(startRow.toString()));
        }

        @Override
        public List<Summary> retrieve()
                throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
            TableId tableId = Tables.getTableId(context, tableName);
            if (Tables.getTableState(context, tableId) == TableState.OFFLINE)
                throw new TableOfflineException(Tables.getTableOfflineMsg(context, tableId));

            TRowRange range = new TRowRange(TextUtil.getByteBuffer(startRow), TextUtil.getByteBuffer(endRow));
            TSummaryRequest request = new TSummaryRequest(tableId.canonical(), range, summariesToFetch,
                    summarizerClassRegex);
            if (flush) {
                _flush(tableId, startRow, endRow, true);
            }

            TSummaries ret = ServerClient.execute(context, new TabletClientService.Client.Factory(), client -> {
                TSummaries tsr = client.startGetSummaries(TraceUtil.traceInfo(), context.rpcCreds(), request);
                while (!tsr.finished) {
                    tsr = client.contiuneGetSummaries(TraceUtil.traceInfo(), tsr.sessionId);
                }
                return tsr;
            });
            return new SummaryCollection(ret).getSummaries();
        }

        @Override
        public SummaryRetriever endRow(Text endRow) {
            Objects.requireNonNull(endRow);
            if (startRow != null) {
                Preconditions.checkArgument(startRow.compareTo(endRow) < 0,
                        "Start row must be less than end row : %s >= %s", startRow, endRow);
            }
            this.endRow = endRow;
            return this;
        }

        @Override
        public SummaryRetriever endRow(CharSequence endRow) {
            return endRow(new Text(endRow.toString()));
        }

        @Override
        public SummaryRetriever withConfiguration(Collection<SummarizerConfiguration> configs) {
            Objects.requireNonNull(configs);
            summariesToFetch = configs.stream().map(SummarizerConfigurationUtil::toThrift)
                    .collect(Collectors.toList());
            return this;
        }

        @Override
        public SummaryRetriever withConfiguration(SummarizerConfiguration... config) {
            Objects.requireNonNull(config);
            return withConfiguration(Arrays.asList(config));
        }

        @Override
        public SummaryRetriever withMatchingConfiguration(String regex) {
            Objects.requireNonNull(regex);
            // Do a sanity check here to make sure that regex compiles, instead of having it fail on a
            // tserver.
            Pattern.compile(regex);
            this.summarizerClassRegex = regex;
            return this;
        }

        @Override
        public SummaryRetriever flush(boolean b) {
            this.flush = b;
            return this;
        }
    };
}

From source file:org.apache.accumulo.core.clientImpl.TabletLocatorImpl.java

License:Apache License

private void lookupTabletLocation(ClientContext context, Text row, boolean retry, LockCheckerSession lcSession)
        throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
    Text metadataRow = new Text(tableId.canonical());
    metadataRow.append(new byte[] { ';' }, 0, 1);
    metadataRow.append(row.getBytes(), 0, row.getLength());
    TabletLocation ptl = parent.locateTablet(context, metadataRow, false, retry);

    if (ptl != null) {
        TabletLocations locations = locationObtainer.lookupTablet(context, ptl, metadataRow, lastTabletRow,
                parent);/*from   w w w  . j av  a2 s.  co  m*/
        while (locations != null && locations.getLocations().isEmpty()
                && locations.getLocationless().isEmpty()) {
            // try the next tablet, the current tablet does not have any tablets that overlap the row
            Text er = ptl.tablet_extent.getEndRow();
            if (er != null && er.compareTo(lastTabletRow) < 0) {
                // System.out.println("er "+er+" ltr "+lastTabletRow);
                ptl = parent.locateTablet(context, er, true, retry);
                if (ptl != null)
                    locations = locationObtainer.lookupTablet(context, ptl, metadataRow, lastTabletRow, parent);
                else
                    break;
            } else {
                break;
            }
        }

        if (locations == null)
            return;

        // cannot assume the list contains contiguous key extents... so it is probably
        // best to deal with each extent individually

        Text lastEndRow = null;
        for (TabletLocation tabletLocation : locations.getLocations()) {

            KeyExtent ke = tabletLocation.tablet_extent;
            TabletLocation locToCache;

            // create new location if current prevEndRow == endRow
            if ((lastEndRow != null) && (ke.getPrevEndRow() != null) && ke.getPrevEndRow().equals(lastEndRow)) {
                locToCache = new TabletLocation(new KeyExtent(ke.getTableId(), ke.getEndRow(), lastEndRow),
                        tabletLocation.tablet_location, tabletLocation.tablet_session);
            } else {
                locToCache = tabletLocation;
            }

            // save endRow for next iteration
            lastEndRow = locToCache.tablet_extent.getEndRow();

            updateCache(locToCache, lcSession);
        }
    }

}

From source file:org.apache.accumulo.core.data.MapFileTest.java

License:Apache License

public void testMapFileFix() {
    try {/*from  ww w  .j  a v a 2  s  . c o  m*/
        Configuration conf = CachedConfiguration.getInstance();
        FileSystem fs = FileSystem.get(conf);
        conf.setInt("io.seqfile.compress.blocksize", 4000);

        for (CompressionType compressionType : CompressionType.values()) {
            /*****************************
             * write out the test map file
             */
            MyMapFile.Writer mfw = new MyMapFile.Writer(conf, fs, "/tmp/testMapFileIndexingMap", Text.class,
                    BytesWritable.class, compressionType);
            BytesWritable value;
            Random r = new Random();
            byte[] bytes = new byte[1024];
            for (int i = 0; i < 1000; i++) {
                String keyString = Integer.toString(i + 1000000);
                Text key = new Text(keyString);
                r.nextBytes(bytes);
                value = new BytesWritable(bytes);
                mfw.append(key, value);
            }
            mfw.close();

            /************************************
             * move the index file
             */
            fs.rename(new Path("/tmp/testMapFileIndexingMap/index"),
                    new Path("/tmp/testMapFileIndexingMap/oldIndex"));

            /************************************
             * recreate the index
             */
            MyMapFile.fix(fs, new Path("/tmp/testMapFileIndexingMap"), Text.class, BytesWritable.class, false,
                    conf);

            /************************************
             * compare old and new indices
             */
            MySequenceFile.Reader oldIndexReader = new MySequenceFile.Reader(fs,
                    new Path("/tmp/testMapFileIndexingMap/oldIndex"), conf);
            MySequenceFile.Reader newIndexReader = new MySequenceFile.Reader(fs,
                    new Path("/tmp/testMapFileIndexingMap/index"), conf);

            Text oldKey = new Text();
            Text newKey = new Text();
            LongWritable oldValue = new LongWritable();
            LongWritable newValue = new LongWritable();
            while (true) {
                boolean moreKeys = false;
                // check for the same number of records
                assertTrue((moreKeys = oldIndexReader.next(oldKey, oldValue)) == newIndexReader.next(newKey,
                        newValue));
                if (!moreKeys)
                    break;
                assertTrue(oldKey.compareTo(newKey) == 0);
                assertTrue(oldValue.compareTo(newValue) == 0);
            }
            oldIndexReader.close();
            newIndexReader.close();

            fs.delete(new Path("/tmp/testMapFileIndexingMap"), true);
        }
    } catch (Exception e) {
        e.printStackTrace();
    }
}

From source file:org.apache.accumulo.core.util.MetadataTable.java

License:Apache License

public static boolean isContiguousRange(KeyExtent ke, SortedSet<KeyExtent> children) {
    if (children.size() == 0)
        return false;

    if (children.size() == 1)
        return children.first().equals(ke);

    Text per = children.first().getPrevEndRow();
    Text er = children.last().getEndRow();

    boolean perEqual = (per == ke.getPrevEndRow()
            || per != null && ke.getPrevEndRow() != null && ke.getPrevEndRow().compareTo(per) == 0);

    boolean erEqual = (er == ke.getEndRow()
            || er != null && ke.getEndRow() != null && ke.getEndRow().compareTo(er) == 0);

    if (!perEqual || !erEqual)
        return false;

    Iterator<KeyExtent> iter = children.iterator();

    Text lastEndRow = iter.next().getEndRow();

    while (iter.hasNext()) {
        KeyExtent cke = iter.next();/*from ww  w . j  a  va  2s . co  m*/

        per = cke.getPrevEndRow();

        // something in the middle should not be null

        if (per == null || lastEndRow == null)
            return false;

        if (per.compareTo(lastEndRow) != 0)
            return false;

        lastEndRow = cke.getEndRow();
    }

    return true;
}

From source file:org.apache.accumulo.examples.wikisearch.iterator.UniqFieldNameValueIterator.java

License:Apache License

@Override
public void next() throws IOException {
    if (log.isDebugEnabled()) {
        log.debug("next()");
    }/*from w w w . j  a  v a 2  s  . c  om*/
    if (!source.hasTop()) {
        topKey = null;
        topValue = null;
        return;
    }

    Key currentKey = topKey;
    keyParser.parse(topKey);
    String fValue = keyParser.getFieldValue();

    Text currentRow = currentKey.getRow();
    Text currentFam = currentKey.getColumnFamily();

    if (overallRange.getEndKey() != null && overallRange.getEndKey().getRow().compareTo(currentRow) < 0) {
        if (log.isDebugEnabled()) {
            log.debug("next, overall endRow: " + overallRange.getEndKey().getRow() + "  currentRow: "
                    + currentRow);
        }
        topKey = null;
        topValue = null;
        return;
    }

    if (fValue.compareTo(this.fieldValueUpperBound.toString()) > 0) {
        topKey = null;
        topValue = null;
        return;
    }
    Key followingKey = new Key(currentKey.getRow(), this.fieldName, new Text(fValue + ONE_BYTE));
    if (log.isDebugEnabled()) {
        log.debug("next, followingKey to seek on: " + followingKey);
    }
    Range r = new Range(followingKey, followingKey);
    source.seek(r, EMPTY_COL_FAMS, false);
    while (true) {
        if (!source.hasTop()) {
            topKey = null;
            topValue = null;
            return;
        }

        Key k = source.getTopKey();
        if (!overallRange.contains(k)) {
            topKey = null;
            topValue = null;
            return;
        }
        if (log.isDebugEnabled()) {
            log.debug("next(), key: " + k + " subrange: " + this.currentSubRange);
        }
        // if (this.currentSubRange.contains(k)) {
        keyParser.parse(k);
        Text currentVal = new Text(keyParser.getFieldValue());
        if (k.getRow().equals(currentRow) && k.getColumnFamily().equals(currentFam)
                && currentVal.compareTo(fieldValueUpperBound) <= 0) {
            topKey = k;
            topValue = source.getTopValue();
            return;

        } else { // need to move to next row.
            if (this.overallRange.contains(k) && this.multiRow) {
                // need to find the next sub range
                // STEPS
                // 1. check if you moved past your current row on last call to next
                // 2. figure out next row
                // 3. build new start key with lowerbound fvalue
                // 4. seek the source
                // 5. test the subrange.
                if (k.getRow().equals(currentRow)) {
                    // get next row
                    currentRow = getNextRow();
                    if (currentRow == null) {
                        topKey = null;
                        topValue = null;
                        return;
                    }
                } else {
                    // i'm already in the next row
                    currentRow = source.getTopKey().getRow();
                }

                // build new startKey
                Key sKey = new Key(currentRow, fieldName, fieldValueLowerBound);
                Key eKey = new Key(currentRow, fieldName, fieldValueUpperBound);
                currentSubRange = new Range(sKey, eKey);
                source.seek(currentSubRange, EMPTY_COL_FAMS, seekInclusive);

            } else { // not multi-row or outside overall range, we're done
                topKey = null;
                topValue = null;
                return;
            }
        }

    }

}

From source file:org.apache.accumulo.master.MasterClientServiceHandler.java

License:Apache License

@Override
public void waitForFlush(TInfo tinfo, TCredentials c, String tableId, ByteBuffer startRow, ByteBuffer endRow,
        long flushID, long maxLoops) throws ThriftSecurityException, ThriftTableOperationException {
    String namespaceId = Tables.getNamespaceId(instance, tableId);
    master.security.canFlush(c, tableId, namespaceId);

    if (endRow != null && startRow != null
            && ByteBufferUtil.toText(startRow).compareTo(ByteBufferUtil.toText(endRow)) >= 0)
        throw new ThriftTableOperationException(tableId, null, TableOperation.FLUSH,
                TableOperationExceptionType.BAD_RANGE, "start row must be less than end row");

    Set<TServerInstance> serversToFlush = new HashSet<>(master.tserverSet.getCurrentServers());

    for (long l = 0; l < maxLoops; l++) {

        for (TServerInstance instance : serversToFlush) {
            try {
                final TServerConnection server = master.tserverSet.getConnection(instance);
                if (server != null)
                    server.flush(master.masterLock, tableId, ByteBufferUtil.toBytes(startRow),
                            ByteBufferUtil.toBytes(endRow));
            } catch (TException ex) {
                Master.log.error(ex.toString());
            }/*from ww  w .  j a v  a  2  s.c  om*/
        }

        if (l == maxLoops - 1)
            break;

        sleepUninterruptibly(50, TimeUnit.MILLISECONDS);

        serversToFlush.clear();

        try {
            Connector conn = master.getConnector();
            Scanner scanner;
            if (tableId.equals(MetadataTable.ID)) {
                scanner = new IsolatedScanner(conn.createScanner(RootTable.NAME, Authorizations.EMPTY));
                scanner.setRange(MetadataSchema.TabletsSection.getRange());
            } else {
                scanner = new IsolatedScanner(conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY));
                Range range = new KeyExtent(tableId, null, ByteBufferUtil.toText(startRow)).toMetadataRange();
                scanner.setRange(range.clip(MetadataSchema.TabletsSection.getRange()));
            }
            TabletsSection.ServerColumnFamily.FLUSH_COLUMN.fetch(scanner);
            TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(scanner);
            scanner.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME);
            scanner.fetchColumnFamily(LogColumnFamily.NAME);

            RowIterator ri = new RowIterator(scanner);

            int tabletsToWaitFor = 0;
            int tabletCount = 0;

            Text ert = ByteBufferUtil.toText(endRow);

            while (ri.hasNext()) {
                Iterator<Entry<Key, Value>> row = ri.next();
                long tabletFlushID = -1;
                int logs = 0;
                boolean online = false;

                TServerInstance server = null;

                Entry<Key, Value> entry = null;
                while (row.hasNext()) {
                    entry = row.next();
                    Key key = entry.getKey();

                    if (TabletsSection.ServerColumnFamily.FLUSH_COLUMN.equals(key.getColumnFamily(),
                            key.getColumnQualifier())) {
                        tabletFlushID = Long.parseLong(entry.getValue().toString());
                    }

                    if (LogColumnFamily.NAME.equals(key.getColumnFamily()))
                        logs++;

                    if (TabletsSection.CurrentLocationColumnFamily.NAME.equals(key.getColumnFamily())) {
                        online = true;
                        server = new TServerInstance(entry.getValue(), key.getColumnQualifier());
                    }

                }

                // when tablet is not online and has no logs, there is no reason to wait for it
                if ((online || logs > 0) && tabletFlushID < flushID) {
                    tabletsToWaitFor++;
                    if (server != null)
                        serversToFlush.add(server);
                }

                tabletCount++;

                Text tabletEndRow = new KeyExtent(entry.getKey().getRow(), (Text) null).getEndRow();
                if (tabletEndRow == null || (ert != null && tabletEndRow.compareTo(ert) >= 0))
                    break;
            }

            if (tabletsToWaitFor == 0)
                break;

            // TODO detect case of table offline AND tablets w/ logs? - ACCUMULO-1296

            if (tabletCount == 0 && !Tables.exists(master.getInstance(), tableId))
                throw new ThriftTableOperationException(tableId, null, TableOperation.FLUSH,
                        TableOperationExceptionType.NOTFOUND, null);

        } catch (AccumuloException e) {
            Master.log.debug("Failed to scan " + MetadataTable.NAME + " table to wait for flush " + tableId, e);
        } catch (TabletDeletedException tde) {
            Master.log.debug("Failed to scan " + MetadataTable.NAME + " table to wait for flush " + tableId,
                    tde);
        } catch (AccumuloSecurityException e) {
            Master.log.warn("{}", e.getMessage(), e);
            throw new ThriftSecurityException();
        } catch (TableNotFoundException e) {
            Master.log.error("{}", e.getMessage(), e);
            throw new ThriftTableOperationException();
        }
    }

}

From source file:org.apache.accumulo.master.tableOps.CompactionDriver.java

License:Apache License

@Override
public long isReady(long tid, Master master) throws Exception {

    String zCancelID = Constants.ZROOT + "/" + master.getInstance().getInstanceID() + Constants.ZTABLES + "/"
            + tableId + Constants.ZTABLE_COMPACT_CANCEL_ID;

    IZooReaderWriter zoo = ZooReaderWriter.getInstance();

    if (Long.parseLong(new String(zoo.getData(zCancelID, null))) >= compactId) {
        // compaction was canceled
        throw new AcceptableThriftTableOperationException(tableId, null, TableOperation.COMPACT,
                TableOperationExceptionType.OTHER, "Compaction canceled");
    }//  w ww .  j av  a 2s.  c o m

    MapCounter<TServerInstance> serversToFlush = new MapCounter<>();
    Connector conn = master.getConnector();

    Scanner scanner;

    if (tableId.equals(MetadataTable.ID)) {
        scanner = new IsolatedScanner(conn.createScanner(RootTable.NAME, Authorizations.EMPTY));
        scanner.setRange(MetadataSchema.TabletsSection.getRange());
    } else {
        scanner = new IsolatedScanner(conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY));
        Range range = new KeyExtent(tableId, null, startRow == null ? null : new Text(startRow))
                .toMetadataRange();
        scanner.setRange(range);
    }

    TabletsSection.ServerColumnFamily.COMPACT_COLUMN.fetch(scanner);
    TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(scanner);
    scanner.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME);

    long t1 = System.currentTimeMillis();
    RowIterator ri = new RowIterator(scanner);

    int tabletsToWaitFor = 0;
    int tabletCount = 0;

    while (ri.hasNext()) {
        Iterator<Entry<Key, Value>> row = ri.next();
        long tabletCompactID = -1;

        TServerInstance server = null;

        Entry<Key, Value> entry = null;
        while (row.hasNext()) {
            entry = row.next();
            Key key = entry.getKey();

            if (TabletsSection.ServerColumnFamily.COMPACT_COLUMN.equals(key.getColumnFamily(),
                    key.getColumnQualifier()))
                tabletCompactID = Long.parseLong(entry.getValue().toString());

            if (TabletsSection.CurrentLocationColumnFamily.NAME.equals(key.getColumnFamily()))
                server = new TServerInstance(entry.getValue(), key.getColumnQualifier());
        }

        if (tabletCompactID < compactId) {
            tabletsToWaitFor++;
            if (server != null)
                serversToFlush.increment(server, 1);
        }

        tabletCount++;

        Text tabletEndRow = new KeyExtent(entry.getKey().getRow(), (Text) null).getEndRow();
        if (tabletEndRow == null || (endRow != null && tabletEndRow.compareTo(new Text(endRow)) >= 0))
            break;
    }

    long scanTime = System.currentTimeMillis() - t1;

    Instance instance = master.getInstance();
    Tables.clearCache(instance);
    if (tabletCount == 0 && !Tables.exists(instance, tableId))
        throw new AcceptableThriftTableOperationException(tableId, null, TableOperation.COMPACT,
                TableOperationExceptionType.NOTFOUND, null);

    if (serversToFlush.size() == 0 && Tables.getTableState(instance, tableId) == TableState.OFFLINE)
        throw new AcceptableThriftTableOperationException(tableId, null, TableOperation.COMPACT,
                TableOperationExceptionType.OFFLINE, null);

    if (tabletsToWaitFor == 0)
        return 0;

    for (TServerInstance tsi : serversToFlush.keySet()) {
        try {
            final TServerConnection server = master.getConnection(tsi);
            if (server != null)
                server.compact(master.getMasterLock(), tableId, startRow, endRow);
        } catch (TException ex) {
            LoggerFactory.getLogger(CompactionDriver.class).error(ex.toString());
        }
    }

    long sleepTime = 500;

    if (serversToFlush.size() > 0)
        sleepTime = Collections.max(serversToFlush.values()) * sleepTime; // make wait time depend on the server with the most to
                                                                          // compact

    sleepTime = Math.max(2 * scanTime, sleepTime);

    sleepTime = Math.min(sleepTime, 30000);

    return sleepTime;
}

From source file:org.apache.accumulo.master.tableOps.merge.TableRangeOp.java

License:Apache License

@Override
public Repo<Master> call(long tid, Master env) throws Exception {

    if (RootTable.ID.equals(tableId) && Operation.MERGE.equals(op)) {
        log.warn("Attempt to merge tablets for {} does nothing. It is not splittable.", RootTable.NAME);
    }//  w  w w  .jav  a 2  s .c o m

    Text start = startRow.length == 0 ? null : new Text(startRow);
    Text end = endRow.length == 0 ? null : new Text(endRow);

    if (start != null && end != null)
        if (start.compareTo(end) >= 0)
            throw new AcceptableThriftTableOperationException(tableId.canonical(), null, TableOperation.MERGE,
                    TableOperationExceptionType.BAD_RANGE, "start row must be less than end row");

    env.mustBeOnline(tableId);

    MergeInfo info = env.getMergeInfo(tableId);

    if (info.getState() == MergeState.NONE) {
        KeyExtent range = new KeyExtent(tableId, end, start);
        env.setMergeState(new MergeInfo(range, op), MergeState.STARTED);
    }

    return new TableRangeOpWait(namespaceId, tableId);
}

From source file:org.apache.accumulo.master.tableOps.TableRangeOp.java

License:Apache License

@Override
public Repo<Master> call(long tid, Master env) throws Exception {

    if (RootTable.ID.equals(tableId) && Operation.MERGE.equals(op)) {
        log.warn("Attempt to merge tablets for " + RootTable.NAME + " does nothing. It is not splittable.");
    }//from  w  w  w.jav  a2 s.  com

    Text start = startRow.length == 0 ? null : new Text(startRow);
    Text end = endRow.length == 0 ? null : new Text(endRow);

    if (start != null && end != null)
        if (start.compareTo(end) >= 0)
            throw new AcceptableThriftTableOperationException(tableId, null, TableOperation.MERGE,
                    TableOperationExceptionType.BAD_RANGE, "start row must be less than end row");

    env.mustBeOnline(tableId);

    MergeInfo info = env.getMergeInfo(tableId);

    if (info.getState() == MergeState.NONE) {
        KeyExtent range = new KeyExtent(tableId, end, start);
        env.setMergeState(new MergeInfo(range, op), MergeState.STARTED);
    }

    return new TableRangeOpWait(tableId);
}

From source file:org.apache.accumulo.server.client.BulkImporter.java

License:Apache License

public static List<TabletLocation> findOverlappingTablets(ClientContext context, VolumeManager vm,
        TabletLocator locator, Path file, Text startRow, Text endRow) throws Exception {
    List<TabletLocation> result = new ArrayList<>();
    Collection<ByteSequence> columnFamilies = Collections.emptyList();
    String filename = file.toString();
    // log.debug(filename + " finding overlapping tablets " + startRow + " -> " + endRow);
    FileSystem fs = vm.getVolumeByPath(file).getFileSystem();
    FileSKVIterator reader = FileOperations.getInstance().newReaderBuilder().forFile(filename, fs, fs.getConf())
            .withTableConfiguration(context.getConfiguration()).seekToBeginning().build();
    try {/*w w  w  .j a v  a 2  s.  c  o m*/
        Text row = startRow;
        if (row == null)
            row = new Text();
        while (true) {
            // log.debug(filename + " Seeking to row " + row);
            reader.seek(new Range(row, null), columnFamilies, false);
            if (!reader.hasTop()) {
                // log.debug(filename + " not found");
                break;
            }
            row = reader.getTopKey().getRow();
            TabletLocation tabletLocation = locator.locateTablet(context, row, false, true);
            // log.debug(filename + " found row " + row + " at location " + tabletLocation);
            result.add(tabletLocation);
            row = tabletLocation.tablet_extent.getEndRow();
            if (row != null && (endRow == null || row.compareTo(endRow) < 0)) {
                row = new Text(row);
                row.append(byte0, 0, byte0.length);
            } else
                break;
        }
    } finally {
        reader.close();
    }
    // log.debug(filename + " to be sent to " + result);
    return result;
}