Example usage for org.apache.hadoop.io Text compareTo

List of usage examples for org.apache.hadoop.io Text compareTo

Introduction

In this page you can find the example usage for org.apache.hadoop.io Text compareTo.

Prototype

@Override
public int compareTo(BinaryComparable other) 

Source Link

Document

Compare bytes from {#getBytes()}.

Usage

From source file:org.apache.accumulo.server.constraints.MetadataConstraints.java

License:Apache License

@Override
public List<Short> check(Environment env, Mutation mutation) {

    ArrayList<Short> violations = null;

    Collection<ColumnUpdate> colUpdates = mutation.getUpdates();

    // check the row, it should contains at least one ; or end with <
    boolean containsSemiC = false;

    byte[] row = mutation.getRow();

    // always allow rows that fall within reserved areas
    if (row.length > 0 && row[0] == '~')
        return null;
    if (row.length > 2 && row[0] == '!' && row[1] == '!' && row[2] == '~')
        return null;

    for (byte b : row) {
        if (b == ';') {
            containsSemiC = true;//ww w  . j  a v a 2s .  c  o m
        }

        if (b == ';' || b == '<')
            break;

        if (!validTableNameChars[0xff & b]) {
            violations = addIfNotPresent(violations, 4);
        }
    }

    if (!containsSemiC) {
        // see if last row char is <
        if (row.length == 0 || row[row.length - 1] != '<') {
            violations = addIfNotPresent(violations, 4);
        }
    } else {
        if (row.length == 0) {
            violations = addIfNotPresent(violations, 4);
        }
    }

    if (row.length > 0 && row[0] == '!') {
        if (row.length < 3 || row[1] != '0' || (row[2] != '<' && row[2] != ';')) {
            violations = addIfNotPresent(violations, 4);
        }
    }

    // ensure row is not less than Constants.METADATA_TABLE_ID
    if (new Text(row).compareTo(new Text(MetadataTable.ID)) < 0) {
        violations = addViolation(violations, 5);
    }

    boolean checkedBulk = false;

    for (ColumnUpdate columnUpdate : colUpdates) {
        Text columnFamily = new Text(columnUpdate.getColumnFamily());

        if (columnUpdate.isDeleted()) {
            if (!isValidColumn(columnUpdate)) {
                violations = addViolation(violations, 2);
            }
            continue;
        }

        if (columnUpdate.getValue().length == 0 && !columnFamily.equals(ScanFileColumnFamily.NAME)) {
            violations = addViolation(violations, 6);
        }

        if (columnFamily.equals(DataFileColumnFamily.NAME)) {
            try {
                DataFileValue dfv = new DataFileValue(columnUpdate.getValue());

                if (dfv.getSize() < 0 || dfv.getNumEntries() < 0) {
                    violations = addViolation(violations, 1);
                }
            } catch (NumberFormatException nfe) {
                violations = addViolation(violations, 1);
            } catch (ArrayIndexOutOfBoundsException aiooe) {
                violations = addViolation(violations, 1);
            }
        } else if (columnFamily.equals(ScanFileColumnFamily.NAME)) {

        } else if (columnFamily.equals(TabletsSection.BulkFileColumnFamily.NAME)) {
            if (!columnUpdate.isDeleted() && !checkedBulk) {
                // splits, which also write the time reference, are allowed to write this reference even when
                // the transaction is not running because the other half of the tablet is holding a reference
                // to the file.
                boolean isSplitMutation = false;
                // When a tablet is assigned, it re-writes the metadata. It should probably only update the location information,
                // but it writes everything. We allow it to re-write the bulk information if it is setting the location.
                // See ACCUMULO-1230.
                boolean isLocationMutation = false;

                HashSet<Text> dataFiles = new HashSet<>();
                HashSet<Text> loadedFiles = new HashSet<>();

                String tidString = new String(columnUpdate.getValue(), UTF_8);
                int otherTidCount = 0;

                for (ColumnUpdate update : mutation.getUpdates()) {
                    if (new ColumnFQ(update).equals(TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN)) {
                        isSplitMutation = true;
                    } else if (new Text(update.getColumnFamily())
                            .equals(TabletsSection.CurrentLocationColumnFamily.NAME)) {
                        isLocationMutation = true;
                    } else if (new Text(update.getColumnFamily()).equals(DataFileColumnFamily.NAME)) {
                        dataFiles.add(new Text(update.getColumnQualifier()));
                    } else if (new Text(update.getColumnFamily())
                            .equals(TabletsSection.BulkFileColumnFamily.NAME)) {
                        loadedFiles.add(new Text(update.getColumnQualifier()));

                        if (!new String(update.getValue(), UTF_8).equals(tidString)) {
                            otherTidCount++;
                        }
                    }
                }

                if (!isSplitMutation && !isLocationMutation) {
                    long tid = Long.parseLong(tidString);

                    try {
                        if (otherTidCount > 0 || !dataFiles.equals(loadedFiles)
                                || !getArbitrator().transactionAlive(Constants.BULK_ARBITRATOR_TYPE, tid)) {
                            violations = addViolation(violations, 8);
                        }
                    } catch (Exception ex) {
                        violations = addViolation(violations, 8);
                    }
                }

                checkedBulk = true;
            }
        } else {
            if (!isValidColumn(columnUpdate)) {
                violations = addViolation(violations, 2);
            } else if (new ColumnFQ(columnUpdate).equals(TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN)
                    && columnUpdate.getValue().length > 0
                    && (violations == null || !violations.contains((short) 4))) {
                KeyExtent ke = new KeyExtent(new Text(mutation.getRow()), (Text) null);

                Text per = KeyExtent.decodePrevEndRow(new Value(columnUpdate.getValue()));

                boolean prevEndRowLessThanEndRow = per == null || ke.getEndRow() == null
                        || per.compareTo(ke.getEndRow()) < 0;

                if (!prevEndRowLessThanEndRow) {
                    violations = addViolation(violations, 3);
                }
            } else if (new ColumnFQ(columnUpdate).equals(TabletsSection.ServerColumnFamily.LOCK_COLUMN)) {
                if (zooCache == null) {
                    zooCache = new ZooCache();
                }

                if (zooRoot == null) {
                    zooRoot = ZooUtil.getRoot(HdfsZooInstance.getInstance());
                }

                boolean lockHeld = false;
                String lockId = new String(columnUpdate.getValue(), UTF_8);

                try {
                    lockHeld = ZooLock.isLockHeld(zooCache, new ZooUtil.LockID(zooRoot, lockId));
                } catch (Exception e) {
                    log.debug("Failed to verify lock was held {} {}", lockId, e.getMessage());
                }

                if (!lockHeld) {
                    violations = addViolation(violations, 7);
                }
            }

        }
    }

    if (violations != null) {
        log.debug("violating metadata mutation : {}", new String(mutation.getRow(), UTF_8));
        for (ColumnUpdate update : mutation.getUpdates()) {
            log.debug(" update: {}:{} value {}", new String(update.getColumnFamily(), UTF_8),
                    new String(update.getColumnQualifier(), UTF_8),
                    (update.isDeleted() ? "[delete]" : new String(update.getValue(), UTF_8)));
        }
    }

    return violations;
}

From source file:org.apache.accumulo.server.master.state.MetaDataTableScanner.java

License:Apache License

public static TabletLocationState createTabletLocationState(Key k, Value v)
        throws IOException, BadLocationStateException {
    final SortedMap<Key, Value> decodedRow = WholeRowIterator.decodeRow(k, v);
    KeyExtent extent = null;//from  w  ww.j a v a  2  s.  c  om
    TServerInstance future = null;
    TServerInstance current = null;
    TServerInstance last = null;
    SuspendingTServer suspend = null;
    long lastTimestamp = 0;
    List<Collection<String>> walogs = new ArrayList<>();
    boolean chopped = false;

    for (Entry<Key, Value> entry : decodedRow.entrySet()) {

        Key key = entry.getKey();
        Text row = key.getRow();
        Text cf = key.getColumnFamily();
        Text cq = key.getColumnQualifier();

        if (cf.compareTo(TabletsSection.FutureLocationColumnFamily.NAME) == 0) {
            TServerInstance location = new TServerInstance(entry.getValue(), cq);
            if (future != null) {
                throw new BadLocationStateException("found two assignments for the same extent " + key.getRow()
                        + ": " + future + " and " + location, entry.getKey().getRow());
            }
            future = location;
        } else if (cf.compareTo(TabletsSection.CurrentLocationColumnFamily.NAME) == 0) {
            TServerInstance location = new TServerInstance(entry.getValue(), cq);
            if (current != null) {
                throw new BadLocationStateException("found two locations for the same extent " + key.getRow()
                        + ": " + current + " and " + location, entry.getKey().getRow());
            }
            current = location;
        } else if (cf.compareTo(LogColumnFamily.NAME) == 0) {
            String[] split = entry.getValue().toString().split("\\|")[0].split(";");
            walogs.add(Arrays.asList(split));
        } else if (cf.compareTo(TabletsSection.LastLocationColumnFamily.NAME) == 0) {
            if (lastTimestamp < entry.getKey().getTimestamp())
                last = new TServerInstance(entry.getValue(), cq);
        } else if (cf.compareTo(ChoppedColumnFamily.NAME) == 0) {
            chopped = true;
        } else if (TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.equals(cf, cq)) {
            extent = new KeyExtent(row, entry.getValue());
        } else if (TabletsSection.SuspendLocationColumn.SUSPEND_COLUMN.equals(cf, cq)) {
            suspend = SuspendingTServer.fromValue(entry.getValue());
        }
    }
    if (extent == null) {
        String msg = "No prev-row for key extent " + decodedRow;
        log.error(msg);
        throw new BadLocationStateException(msg, k.getRow());
    }
    return new TabletLocationState(extent, future, current, last, suspend, walogs, chopped);
}

From source file:org.apache.accumulo.server.master.tableOps.CompactionDriver.java

License:Apache License

@Override
public long isReady(long tid, Master master) throws Exception {

    String zCancelID = Constants.ZROOT + "/" + HdfsZooInstance.getInstance().getInstanceID() + Constants.ZTABLES
            + "/" + tableId + Constants.ZTABLE_COMPACT_CANCEL_ID;

    IZooReaderWriter zoo = ZooReaderWriter.getRetryingInstance();

    if (Long.parseLong(new String(zoo.getData(zCancelID, null))) >= compactId) {
        // compaction was canceled
        throw new ThriftTableOperationException(tableId, null, TableOperation.COMPACT,
                TableOperationExceptionType.OTHER, "Compaction canceled");
    }/*from  w w w  .  j  a  v a 2s  .c om*/

    MapCounter<TServerInstance> serversToFlush = new MapCounter<TServerInstance>();
    Connector conn = master.getConnector();
    Scanner scanner = new IsolatedScanner(conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY));

    Range range = new KeyExtent(new Text(tableId), null, startRow == null ? null : new Text(startRow))
            .toMetadataRange();

    if (tableId.equals(MetadataTable.ID))
        range = range.clip(new Range(RootTable.EXTENT.getMetadataEntry(), false, null, true));

    scanner.setRange(range);
    TabletsSection.ServerColumnFamily.COMPACT_COLUMN.fetch(scanner);
    TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(scanner);
    scanner.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME);

    long t1 = System.currentTimeMillis();
    RowIterator ri = new RowIterator(scanner);

    int tabletsToWaitFor = 0;
    int tabletCount = 0;

    while (ri.hasNext()) {
        Iterator<Entry<Key, Value>> row = ri.next();
        long tabletCompactID = -1;

        TServerInstance server = null;

        Entry<Key, Value> entry = null;
        while (row.hasNext()) {
            entry = row.next();
            Key key = entry.getKey();

            if (TabletsSection.ServerColumnFamily.COMPACT_COLUMN.equals(key.getColumnFamily(),
                    key.getColumnQualifier()))
                tabletCompactID = Long.parseLong(entry.getValue().toString());

            if (TabletsSection.CurrentLocationColumnFamily.NAME.equals(key.getColumnFamily()))
                server = new TServerInstance(entry.getValue(), key.getColumnQualifier());
        }

        if (tabletCompactID < compactId) {
            tabletsToWaitFor++;
            if (server != null)
                serversToFlush.increment(server, 1);
        }

        tabletCount++;

        Text tabletEndRow = new KeyExtent(entry.getKey().getRow(), (Text) null).getEndRow();
        if (tabletEndRow == null || (endRow != null && tabletEndRow.compareTo(new Text(endRow)) >= 0))
            break;
    }

    long scanTime = System.currentTimeMillis() - t1;

    Instance instance = master.getInstance();
    Tables.clearCache(instance);
    if (tabletCount == 0 && !Tables.exists(instance, tableId))
        throw new ThriftTableOperationException(tableId, null, TableOperation.COMPACT,
                TableOperationExceptionType.NOTFOUND, null);

    if (serversToFlush.size() == 0 && Tables.getTableState(instance, tableId) == TableState.OFFLINE)
        throw new ThriftTableOperationException(tableId, null, TableOperation.COMPACT,
                TableOperationExceptionType.OFFLINE, null);

    if (tabletsToWaitFor == 0)
        return 0;

    for (TServerInstance tsi : serversToFlush.keySet()) {
        try {
            final TServerConnection server = master.getConnection(tsi);
            if (server != null)
                server.compact(master.getMasterLock(), tableId, startRow, endRow);
        } catch (TException ex) {
            Logger.getLogger(CompactionDriver.class).error(ex.toString());
        }
    }

    long sleepTime = 500;

    if (serversToFlush.size() > 0)
        sleepTime = Collections.max(serversToFlush.values()) * sleepTime; // make wait time depend on the server with the most to
                                                                          // compact

    sleepTime = Math.max(2 * scanTime, sleepTime);

    sleepTime = Math.min(sleepTime, 30000);

    return sleepTime;
}

From source file:org.apache.accumulo.server.master.tableOps.TableRangeOpWait.java

License:Apache License

@Override
public Repo<Master> call(long tid, Master env) throws Exception {

    if (RootTable.ID.equals(tableId) && TableOperation.MERGE.equals(op)) {
        log.warn("Attempt to merge tablets for " + RootTable.NAME + " does nothing. It is not splittable.");
    }//  w w  w .  j  a va 2 s  . c  o m

    Text start = startRow.length == 0 ? null : new Text(startRow);
    Text end = endRow.length == 0 ? null : new Text(endRow);
    Text tableIdText = new Text(tableId);

    if (start != null && end != null)
        if (start.compareTo(end) >= 0)
            throw new ThriftTableOperationException(tableId, null, TableOperation.MERGE,
                    TableOperationExceptionType.BAD_RANGE, "start row must be less than end row");

    env.mustBeOnline(tableId);

    MergeInfo info = env.getMergeInfo(tableIdText);

    if (info.getState() == MergeState.NONE) {
        KeyExtent range = new KeyExtent(tableIdText, end, start);
        env.setMergeState(new MergeInfo(range, op), MergeState.STARTED);
    }

    return new TableRangeOpWait(tableId);
}

From source file:org.apache.accumulo.server.test.functional.DeleteRowsSplitTest.java

License:Apache License

@Override
public void run() throws Exception {
    // Delete ranges of rows, and verify the are removed
    // Do this while adding many splits

    // Eliminate whole tablets
    for (int test = 0; test < 50; test++) {
        // create a table
        log.info("Test " + test);
        getConnector().tableOperations().create(TABLE);

        // put some data in it
        fillTable(TABLE);//from   w w  w.  jav  a 2 s .  co m

        // generate a random delete range
        final Text start = new Text();
        final Text end = new Text();
        generateRandomRange(start, end);

        // initiate the delete range
        final boolean fail[] = { false };
        Thread t = new Thread() {
            public void run() {
                try {
                    // split the table
                    final SortedSet<Text> afterEnd = SPLITS.tailSet(new Text(end.toString() + "\0"));
                    getConnector().tableOperations().addSplits(TABLE, afterEnd);
                } catch (Exception ex) {
                    log.error(ex, ex);
                    synchronized (fail) {
                        fail[0] = true;
                    }
                }
            }
        };
        t.start();

        UtilWaitThread.sleep(test * 2);

        getConnector().tableOperations().deleteRows(TABLE, start, end);

        t.join();
        synchronized (fail) {
            assertTrue(!fail[0]);
        }

        // scan the table
        Scanner scanner = getConnector().createScanner(TABLE, Constants.NO_AUTHS);
        for (Entry<Key, Value> entry : scanner) {
            Text row = entry.getKey().getRow();
            assertTrue(row.compareTo(start) <= 0 || row.compareTo(end) > 0);
        }

        // delete the table
        getConnector().tableOperations().delete(TABLE);
    }
}

From source file:org.apache.accumulo.server.test.randomwalk.image.Verify.java

License:Apache License

@Override
public void visit(State state, Properties props) throws Exception {

    Random rand = new Random();

    int maxVerify = Integer.parseInt(props.getProperty("maxVerify"));
    int numVerifications = rand.nextInt(maxVerify - 1) + 1;

    indexTableName = state.getString("indexTableName");
    imageTableName = state.getString("imageTableName");

    Connector conn = state.getConnector();

    Scanner indexScanner = conn.createScanner(indexTableName, new Authorizations());
    Scanner imageScanner = conn.createScanner(imageTableName, new Authorizations());

    String uuid = UUID.randomUUID().toString();

    MessageDigest alg = MessageDigest.getInstance("SHA-1");
    alg.update(uuid.getBytes());/*from  w ww  . j a  v  a 2 s . co  m*/

    indexScanner.setRange(new Range(new Text(alg.digest()), null));
    indexScanner.setBatchSize(numVerifications);

    Text curRow = null;
    int count = 0;
    for (Entry<Key, Value> entry : indexScanner) {

        curRow = entry.getKey().getRow();
        String rowToVerify = entry.getValue().toString();

        verifyRow(imageScanner, rowToVerify);

        count++;
        if (count == numVerifications) {
            break;
        }
    }

    if (count != numVerifications && curRow != null) {
        Text lastRow = (Text) state.get("lastIndexRow");
        if (lastRow.compareTo(curRow) != 0) {
            log.error("Verified only " + count + " of " + numVerifications + " - curRow " + curRow + " lastKey "
                    + lastRow);
        }
    }

    int verified = ((Integer) state.get("verified")).intValue() + numVerifications;
    log.debug("Verified " + numVerifications + " - Total " + verified);
    state.set("verified", new Integer(verified));
}

From source file:org.apache.accumulo.server.test.randomwalk.image.Write.java

License:Apache License

@Override
public void visit(State state, Properties props) throws Exception {

    MultiTableBatchWriter mtbw = state.getMultiTableBatchWriter();

    BatchWriter imagesBW = mtbw.getBatchWriter(state.getString("imageTableName"));
    BatchWriter indexBW = mtbw.getBatchWriter(state.getString("indexTableName"));

    String uuid = UUID.randomUUID().toString();
    Mutation m = new Mutation(new Text(uuid));

    // create a fake image between 4KB and 1MB
    int maxSize = Integer.parseInt(props.getProperty("maxSize"));
    int minSize = Integer.parseInt(props.getProperty("minSize"));

    Random rand = new Random();
    int numBytes = rand.nextInt((maxSize - minSize)) + minSize;
    byte[] imageBytes = new byte[numBytes];
    rand.nextBytes(imageBytes);/*from   ww  w . j a  v a 2s  .  c  o m*/
    m.put(CONTENT_COLUMN_FAMILY, IMAGE_COLUMN_QUALIFIER, new Value(imageBytes));

    // store size
    m.put(META_COLUMN_FAMILY, new Text("size"), new Value(String.format("%d", numBytes).getBytes()));

    // store hash
    MessageDigest alg = MessageDigest.getInstance("SHA-1");
    alg.update(imageBytes);
    byte[] hash = alg.digest();
    m.put(META_COLUMN_FAMILY, SHA1_COLUMN_QUALIFIER, new Value(hash));

    // update write counts
    state.set("numWrites", state.getInteger("numWrites") + 1);
    Integer totalWrites = state.getInteger("totalWrites") + 1;
    state.set("totalWrites", totalWrites);

    // set count
    m.put(META_COLUMN_FAMILY, COUNT_COLUMN_QUALIFIER, new Value(String.format("%d", totalWrites).getBytes()));

    // add mutation
    imagesBW.addMutation(m);

    // now add mutation to index
    Text row = new Text(hash);
    m = new Mutation(row);
    m.put(META_COLUMN_FAMILY, UUID_COLUMN_QUALIFIER, new Value(uuid.getBytes()));

    indexBW.addMutation(m);

    Text lastRow = (Text) state.get("lastIndexRow");
    if (lastRow.compareTo(row) < 0) {
        state.set("lastIndexRow", new Text(row));
    }
}

From source file:org.apache.accumulo.server.util.MetadataTable.java

License:Apache License

public static void splitDatafiles(Text table, Text midRow, double splitRatio,
        Map<String, FileUtil.FileInfo> firstAndLastRows, SortedMap<String, DataFileValue> datafiles,
        SortedMap<String, DataFileValue> lowDatafileSizes, SortedMap<String, DataFileValue> highDatafileSizes,
        List<String> highDatafilesToRemove) {

    for (Entry<String, DataFileValue> entry : datafiles.entrySet()) {

        Text firstRow = null;
        Text lastRow = null;/*from www.j  a  v a2 s .c o  m*/

        boolean rowsKnown = false;

        FileUtil.FileInfo mfi = firstAndLastRows.get(entry.getKey());

        if (mfi != null) {
            firstRow = mfi.getFirstRow();
            lastRow = mfi.getLastRow();
            rowsKnown = true;
        }

        if (rowsKnown && firstRow.compareTo(midRow) > 0) {
            // only in high
            long highSize = entry.getValue().getSize();
            long highEntries = entry.getValue().getNumEntries();
            highDatafileSizes.put(entry.getKey(),
                    new DataFileValue(highSize, highEntries, entry.getValue().getTime()));
        } else if (rowsKnown && lastRow.compareTo(midRow) <= 0) {
            // only in low
            long lowSize = entry.getValue().getSize();
            long lowEntries = entry.getValue().getNumEntries();
            lowDatafileSizes.put(entry.getKey(),
                    new DataFileValue(lowSize, lowEntries, entry.getValue().getTime()));

            highDatafilesToRemove.add(entry.getKey());
        } else {
            long lowSize = (long) Math.floor((entry.getValue().getSize() * splitRatio));
            long lowEntries = (long) Math.floor((entry.getValue().getNumEntries() * splitRatio));
            lowDatafileSizes.put(entry.getKey(),
                    new DataFileValue(lowSize, lowEntries, entry.getValue().getTime()));

            long highSize = (long) Math.ceil((entry.getValue().getSize() * (1.0 - splitRatio)));
            long highEntries = (long) Math.ceil((entry.getValue().getNumEntries() * (1.0 - splitRatio)));
            highDatafileSizes.put(entry.getKey(),
                    new DataFileValue(highSize, highEntries, entry.getValue().getTime()));
        }
    }
}

From source file:org.apache.accumulo.server.util.MetadataTableUtil.java

License:Apache License

public static void splitDatafiles(String tableId, Text midRow, double splitRatio,
        Map<FileRef, FileUtil.FileInfo> firstAndLastRows, SortedMap<FileRef, DataFileValue> datafiles,
        SortedMap<FileRef, DataFileValue> lowDatafileSizes, SortedMap<FileRef, DataFileValue> highDatafileSizes,
        List<FileRef> highDatafilesToRemove) {

    for (Entry<FileRef, DataFileValue> entry : datafiles.entrySet()) {

        Text firstRow = null;
        Text lastRow = null;//from  ww  w.  j  av a 2s  .  c o m

        boolean rowsKnown = false;

        FileUtil.FileInfo mfi = firstAndLastRows.get(entry.getKey());

        if (mfi != null) {
            firstRow = mfi.getFirstRow();
            lastRow = mfi.getLastRow();
            rowsKnown = true;
        }

        if (rowsKnown && firstRow.compareTo(midRow) > 0) {
            // only in high
            long highSize = entry.getValue().getSize();
            long highEntries = entry.getValue().getNumEntries();
            highDatafileSizes.put(entry.getKey(),
                    new DataFileValue(highSize, highEntries, entry.getValue().getTime()));
        } else if (rowsKnown && lastRow.compareTo(midRow) <= 0) {
            // only in low
            long lowSize = entry.getValue().getSize();
            long lowEntries = entry.getValue().getNumEntries();
            lowDatafileSizes.put(entry.getKey(),
                    new DataFileValue(lowSize, lowEntries, entry.getValue().getTime()));

            highDatafilesToRemove.add(entry.getKey());
        } else {
            long lowSize = (long) Math.floor((entry.getValue().getSize() * splitRatio));
            long lowEntries = (long) Math.floor((entry.getValue().getNumEntries() * splitRatio));
            lowDatafileSizes.put(entry.getKey(),
                    new DataFileValue(lowSize, lowEntries, entry.getValue().getTime()));

            long highSize = (long) Math.ceil((entry.getValue().getSize() * (1.0 - splitRatio)));
            long highEntries = (long) Math.ceil((entry.getValue().getNumEntries() * (1.0 - splitRatio)));
            highDatafileSizes.put(entry.getKey(),
                    new DataFileValue(highSize, highEntries, entry.getValue().getTime()));
        }
    }
}

From source file:org.apache.accumulo.test.functional.DeleteRowsSplitIT.java

License:Apache License

@Test
public void run() throws Exception {
    // Delete ranges of rows, and verify the are removed
    // Do this while adding many splits
    final String tableName = getUniqueNames(1)[0];
    final Connector conn = getConnector();

    // Eliminate whole tablets
    for (int test = 0; test < 10; test++) {
        // create a table
        log.info("Test " + test);
        conn.tableOperations().create(tableName);

        // put some data in it
        fillTable(conn, tableName);//from w  w w.ja va2 s. c  o m

        // generate a random delete range
        final Text start = new Text();
        final Text end = new Text();
        generateRandomRange(start, end);

        // initiate the delete range
        final boolean fail[] = { false };
        Thread t = new Thread() {
            @Override
            public void run() {
                try {
                    // split the table
                    final SortedSet<Text> afterEnd = SPLITS.tailSet(new Text(end.toString() + "\0"));
                    conn.tableOperations().addSplits(tableName, afterEnd);
                } catch (Exception ex) {
                    log.error("Exception", ex);
                    synchronized (fail) {
                        fail[0] = true;
                    }
                }
            }
        };
        t.start();

        sleepUninterruptibly(test * 2, TimeUnit.MILLISECONDS);

        conn.tableOperations().deleteRows(tableName, start, end);

        t.join();
        synchronized (fail) {
            assertTrue(!fail[0]);
        }

        // scan the table
        Scanner scanner = conn.createScanner(tableName, Authorizations.EMPTY);
        for (Entry<Key, Value> entry : scanner) {
            Text row = entry.getKey().getRow();
            assertTrue(row.compareTo(start) <= 0 || row.compareTo(end) > 0);
        }

        // delete the table
        conn.tableOperations().delete(tableName);
    }
}