Example usage for java.util.concurrent.atomic AtomicLong AtomicLong

List of usage examples for java.util.concurrent.atomic AtomicLong AtomicLong

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicLong AtomicLong.

Prototype

public AtomicLong(long initialValue) 

Source Link

Document

Creates a new AtomicLong with the given initial value.

Usage

From source file:org.apache.hadoop.hbase.regionserver.TestHRegion.java

@Test
public void testRecoveredEditsReplayCompaction() throws Exception {
    String method = name.getMethodName();
    TableName tableName = TableName.valueOf(method);
    byte[] family = Bytes.toBytes("family");
    this.region = initHRegion(tableName, method, CONF, family);
    try {/*from  w  ww  . jav  a  2s  . c  o m*/
        Path regiondir = region.getRegionFileSystem().getRegionDir();
        FileSystem fs = region.getRegionFileSystem().getFileSystem();
        byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();

        long maxSeqId = 3;
        long minSeqId = 0;

        for (long i = minSeqId; i < maxSeqId; i++) {
            Put put = new Put(Bytes.toBytes(i));
            put.add(family, Bytes.toBytes(i), Bytes.toBytes(i));
            region.put(put);
            region.flushcache();
        }

        // this will create a region with 3 files
        assertEquals(3, region.getStore(family).getStorefilesCount());
        List<Path> storeFiles = new ArrayList<Path>(3);
        for (StoreFile sf : region.getStore(family).getStorefiles()) {
            storeFiles.add(sf.getPath());
        }

        // disable compaction completion
        CONF.setBoolean("hbase.hstore.compaction.complete", false);
        region.compactStores();

        // ensure that nothing changed
        assertEquals(3, region.getStore(family).getStorefilesCount());

        // now find the compacted file, and manually add it to the recovered edits
        Path tmpDir = region.getRegionFileSystem().getTempDir();
        FileStatus[] files = FSUtils.listStatus(fs, tmpDir);
        String errorMsg = "Expected to find 1 file in the region temp directory "
                + "from the compaction, could not find any";
        assertNotNull(errorMsg, files);
        assertEquals(errorMsg, 1, files.length);
        // move the file inside region dir
        Path newFile = region.getRegionFileSystem().commitStoreFile(Bytes.toString(family), files[0].getPath());

        CompactionDescriptor compactionDescriptor = ProtobufUtil.toCompactionDescriptor(
                this.region.getRegionInfo(), family, storeFiles, Lists.newArrayList(newFile),
                region.getRegionFileSystem().getStoreDir(Bytes.toString(family)));

        HLogUtil.writeCompactionMarker(region.getLog(), this.region.getTableDesc(), this.region.getRegionInfo(),
                compactionDescriptor, new AtomicLong(1));

        Path recoveredEditsDir = HLogUtil.getRegionDirRecoveredEditsDir(regiondir);

        Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", 1000));
        fs.create(recoveredEdits);
        HLog.Writer writer = HLogFactory.createRecoveredEditsWriter(fs, recoveredEdits, CONF);

        long time = System.nanoTime();

        writer.append(
                new HLog.Entry(new HLogKey(regionName, tableName, 10, time, HConstants.DEFAULT_CLUSTER_ID),
                        WALEdit.createCompaction(compactionDescriptor)));
        writer.close();

        // close the region now, and reopen again
        region.getTableDesc();
        region.getRegionInfo();
        region.close();
        region = HRegion.openHRegion(region, null);

        // now check whether we have only one store file, the compacted one
        Collection<StoreFile> sfs = region.getStore(family).getStorefiles();
        for (StoreFile sf : sfs) {
            LOG.info(sf.getPath());
        }
        assertEquals(1, region.getStore(family).getStorefilesCount());
        files = FSUtils.listStatus(fs, tmpDir);
        assertTrue("Expected to find 0 files inside " + tmpDir, files == null || files.length == 0);

        for (long i = minSeqId; i < maxSeqId; i++) {
            Get get = new Get(Bytes.toBytes(i));
            Result result = region.get(get);
            byte[] value = result.getValue(family, Bytes.toBytes(i));
            assertArrayEquals(Bytes.toBytes(i), value);
        }
    } finally {
        HRegion.closeHRegion(this.region);
        this.region = null;
    }
}

From source file:org.apache.nifi.processors.standard.TailFile.java

private void processTailFile(final ProcessContext context, final ProcessSession session,
        final String tailFile) {
    // If user changes the file that is being tailed, we need to consume the already-rolled-over data according
    // to the Initial Start Position property
    boolean rolloverOccurred;
    TailFileObject tfo = states.get(tailFile);

    if (tfo.isTailFileChanged()) {
        rolloverOccurred = false;/*from  w  w  w  . j  a v a 2 s . c om*/
        final String recoverPosition = context.getProperty(START_POSITION).getValue();

        if (START_BEGINNING_OF_TIME.getValue().equals(recoverPosition)) {
            recoverRolledFiles(context, session, tailFile, tfo.getExpectedRecoveryChecksum(),
                    tfo.getState().getTimestamp(), tfo.getState().getPosition());
        } else if (START_CURRENT_FILE.getValue().equals(recoverPosition)) {
            cleanup();
            tfo.setState(new TailFileState(tailFile, null, null, 0L, 0L, 0L, null, tfo.getState().getBuffer()));
        } else {
            final String filename = tailFile;
            final File file = new File(filename);

            try {
                final FileChannel fileChannel = FileChannel.open(file.toPath(), StandardOpenOption.READ);
                getLogger().debug("Created FileChannel {} for {}", new Object[] { fileChannel, file });

                final Checksum checksum = new CRC32();
                final long position = file.length();
                final long timestamp = file.lastModified();

                try (final InputStream fis = new FileInputStream(file);
                        final CheckedInputStream in = new CheckedInputStream(fis, checksum)) {
                    StreamUtils.copy(in, new NullOutputStream(), position);
                }

                fileChannel.position(position);
                cleanup();
                tfo.setState(new TailFileState(filename, file, fileChannel, position, timestamp, file.length(),
                        checksum, tfo.getState().getBuffer()));
            } catch (final IOException ioe) {
                getLogger().error(
                        "Attempted to position Reader at current position in file {} but failed to do so due to {}",
                        new Object[] { file, ioe.toString() }, ioe);
                context.yield();
                return;
            }
        }

        tfo.setTailFileChanged(false);
    } else {
        // Recover any data that may have rolled over since the last time that this processor ran.
        // If expectedRecoveryChecksum != null, that indicates that this is the first iteration since processor was started, so use whatever checksum value
        // was present when the state was last persisted. In this case, we must then null out the value so that the next iteration won't keep using the "recovered"
        // value. If the value is null, then we know that either the processor has already recovered that data, or there was no state persisted. In either case,
        // use whatever checksum value is currently in the state.
        Long expectedChecksumValue = tfo.getExpectedRecoveryChecksum();
        if (expectedChecksumValue == null) {
            expectedChecksumValue = tfo.getState().getChecksum() == null ? null
                    : tfo.getState().getChecksum().getValue();
        }

        rolloverOccurred = recoverRolledFiles(context, session, tailFile, expectedChecksumValue,
                tfo.getState().getTimestamp(), tfo.getState().getPosition());
        tfo.setExpectedRecoveryChecksum(null);
    }

    // initialize local variables from state object; this is done so that we can easily change the values throughout
    // the onTrigger method and then create a new state object after we finish processing the files.
    TailFileState state = tfo.getState();
    File file = state.getFile();
    FileChannel reader = state.getReader();
    Checksum checksum = state.getChecksum();
    if (checksum == null) {
        checksum = new CRC32();
    }
    long position = state.getPosition();
    long timestamp = state.getTimestamp();
    long length = state.getLength();

    // Create a reader if necessary.
    if (file == null || reader == null) {
        file = new File(tailFile);
        reader = createReader(file, position);
        if (reader == null) {
            context.yield();
            return;
        }
    }

    final long startNanos = System.nanoTime();

    // Check if file has rotated
    if (rolloverOccurred || (timestamp <= file.lastModified() && length > file.length())
            || (timestamp < file.lastModified() && length >= file.length())) {

        // Since file has rotated, we close the reader, create a new one, and then reset our state.
        try {
            reader.close();
            getLogger().debug("Closed FileChannel {}", new Object[] { reader, reader });
        } catch (final IOException ioe) {
            getLogger().warn("Failed to close reader for {} due to {}", new Object[] { file, ioe });
        }

        reader = createReader(file, 0L);
        position = 0L;
        checksum.reset();
    }

    if (file.length() == position || !file.exists()) {
        // no data to consume so rather than continually running, yield to allow other processors to use the thread.
        getLogger().debug("No data to consume; created no FlowFiles");
        tfo.setState(new TailFileState(tailFile, file, reader, position, timestamp, length, checksum,
                state.getBuffer()));
        persistState(tfo, context);
        context.yield();
        return;
    }

    // If there is data to consume, read as much as we can.
    final TailFileState currentState = state;
    final Checksum chksum = checksum;
    // data has been written to file. Stream it to a new FlowFile.
    FlowFile flowFile = session.create();

    final FileChannel fileReader = reader;
    final AtomicLong positionHolder = new AtomicLong(position);
    flowFile = session.write(flowFile, new OutputStreamCallback() {
        @Override
        public void process(final OutputStream rawOut) throws IOException {
            try (final OutputStream out = new BufferedOutputStream(rawOut)) {
                positionHolder.set(readLines(fileReader, currentState.getBuffer(), out, chksum));
            }
        }
    });

    // If there ended up being no data, just remove the FlowFile
    if (flowFile.getSize() == 0) {
        session.remove(flowFile);
        getLogger().debug("No data to consume; removed created FlowFile");
    } else {
        // determine filename for FlowFile by using <base filename of log file>.<initial offset>-<final offset>.<extension>
        final String tailFilename = file.getName();
        final String baseName = StringUtils.substringBeforeLast(tailFilename, ".");
        final String flowFileName;
        if (baseName.length() < tailFilename.length()) {
            flowFileName = baseName + "." + position + "-" + positionHolder.get() + "."
                    + StringUtils.substringAfterLast(tailFilename, ".");
        } else {
            flowFileName = baseName + "." + position + "-" + positionHolder.get();
        }

        final Map<String, String> attributes = new HashMap<>(3);
        attributes.put(CoreAttributes.FILENAME.key(), flowFileName);
        attributes.put(CoreAttributes.MIME_TYPE.key(), "text/plain");
        attributes.put("tailfile.original.path", tailFile);
        flowFile = session.putAllAttributes(flowFile, attributes);

        session.getProvenanceReporter().receive(flowFile, file.toURI().toString(),
                "FlowFile contains bytes " + position + " through " + positionHolder.get() + " of source file",
                TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos));
        session.transfer(flowFile, REL_SUCCESS);
        position = positionHolder.get();

        // Set timestamp to the latest of when the file was modified and the current timestamp stored in the state.
        // We do this because when we read a file that has been rolled over, we set the state to 1 millisecond later than the last mod date
        // in order to avoid ingesting that file again. If we then read from this file during the same second (or millisecond, depending on the
        // operating system file last mod precision), then we could set the timestamp to a smaller value, which could result in reading in the
        // rotated file a second time.
        timestamp = Math.max(state.getTimestamp(), file.lastModified());
        length = file.length();
        getLogger().debug("Created {} and routed to success", new Object[] { flowFile });
    }

    // Create a new state object to represent our current position, timestamp, etc.
    tfo.setState(new TailFileState(tailFile, file, reader, position, timestamp, length, checksum,
            state.getBuffer()));

    // We must commit session before persisting state in order to avoid data loss on restart
    session.commit();
    persistState(tfo, context);
}

From source file:com.twitter.distributedlog.BKLogHandler.java

/**
 * Get a list of all segments in the journal.
 */// w  ww .  j  a v a  2s . c o  m
protected List<LogSegmentMetadata> forceGetLedgerList(final Comparator<LogSegmentMetadata> comparator,
        final LogSegmentFilter segmentFilter, boolean throwOnEmpty) throws IOException {
    final List<LogSegmentMetadata> ledgers = new ArrayList<LogSegmentMetadata>();
    final AtomicInteger result = new AtomicInteger(-1);
    final CountDownLatch latch = new CountDownLatch(1);
    Stopwatch stopwatch = Stopwatch.createStarted();
    asyncGetLedgerListInternal(comparator, segmentFilter, null,
            new GenericCallback<List<LogSegmentMetadata>>() {
                @Override
                public void operationComplete(int rc, List<LogSegmentMetadata> logSegmentMetadatas) {
                    result.set(rc);
                    if (KeeperException.Code.OK.intValue() == rc) {
                        ledgers.addAll(logSegmentMetadatas);
                    } else {
                        LOG.error("Failed to get ledger list for {} : with error {}", getFullyQualifiedName(),
                                rc);
                    }
                    latch.countDown();
                }
            }, new AtomicInteger(conf.getZKNumRetries()), new AtomicLong(conf.getZKRetryBackoffStartMillis()));
    try {
        latch.await();
    } catch (InterruptedException e) {
        forceGetListStat.registerFailedEvent(stopwatch.stop().elapsed(TimeUnit.MICROSECONDS));
        throw new DLInterruptedException(
                "Interrupted on reading ledger list from zkfor " + getFullyQualifiedName(), e);
    }
    long elapsedMicros = stopwatch.stop().elapsed(TimeUnit.MICROSECONDS);

    KeeperException.Code rc = KeeperException.Code.get(result.get());
    if (rc == KeeperException.Code.OK) {
        forceGetListStat.registerSuccessfulEvent(elapsedMicros);
    } else {
        forceGetListStat.registerFailedEvent(elapsedMicros);
        if (KeeperException.Code.NONODE == rc) {
            throw new LogNotFoundException("Log " + getFullyQualifiedName() + " is not found");
        } else {
            throw new IOException("ZK Exception " + rc + " reading ledger list for " + getFullyQualifiedName());
        }
    }

    if (throwOnEmpty && ledgers.isEmpty()) {
        throw new LogEmptyException("Log " + getFullyQualifiedName() + " is empty");
    }
    return ledgers;
}

From source file:org.apereo.portal.io.xml.JaxbPortalDataHandlerService.java

@Override
public void exportAllDataOfType(Set<String> typeIds, File directory, BatchExportOptions options) {
    final Queue<ExportFuture<?>> exportFutures = new ConcurrentLinkedQueue<ExportFuture<?>>();
    final boolean failOnError = options != null ? options.isFailOnError() : true;

    //Determine the parent directory to log to
    final File logDirectory = determineLogDirectory(options, "export");

    //Setup reporting file
    final File exportReport = new File(logDirectory, "data-export.txt");
    final PrintWriter reportWriter;
    try {/*  ww w.  jav  a  2s  .  c  o  m*/
        reportWriter = new PrintWriter(new BufferedWriter(new FileWriter(exportReport)));
    } catch (IOException e) {
        throw new RuntimeException("Failed to create FileWriter for: " + exportReport, e);
    }

    try {
        for (final String typeId : typeIds) {
            final List<FutureHolder<?>> failedFutures = new LinkedList<FutureHolder<?>>();

            final File typeDir = new File(directory, typeId);
            logger.info("Adding all data of type {} to export queue: {}", typeId, typeDir);

            reportWriter.println(typeId + "," + typeDir);

            final Iterable<? extends IPortalData> dataForType = this.getPortalData(typeId);
            for (final IPortalData data : dataForType) {
                final String dataId = data.getDataId();

                //Check for completed futures on every iteration, needed to fail as fast as possible on an import exception
                final List<FutureHolder<?>> newFailed = waitForFutures(exportFutures, reportWriter,
                        logDirectory, false);
                failedFutures.addAll(newFailed);

                final AtomicLong exportTime = new AtomicLong(-1);

                //Create export task
                Callable<Object> task = new CallableWithoutResult() {
                    @Override
                    protected void callWithoutResult() {
                        exportTime.set(System.nanoTime());
                        try {
                            exportData(typeId, dataId, typeDir);
                        } finally {
                            exportTime.set(System.nanoTime() - exportTime.get());
                        }
                    }
                };

                //Submit the export task
                final Future<?> exportFuture = this.importExportThreadPool.submit(task);

                //Add the future for tracking
                final ExportFuture futureHolder = new ExportFuture(exportFuture, typeId, dataId, exportTime);
                exportFutures.offer(futureHolder);
            }

            final List<FutureHolder<?>> newFailed = waitForFutures(exportFutures, reportWriter, logDirectory,
                    true);
            failedFutures.addAll(newFailed);

            reportWriter.flush();

            if (failOnError && !failedFutures.isEmpty()) {
                throw new RuntimeException(failedFutures.size() + " " + typeId + " entities failed to export.\n"
                        + "\tPer entity exception logs and a full report can be found in " + logDirectory);
            }
        }
    } catch (InterruptedException e) {
        throw new RuntimeException("Interrupted while waiting for entities to export", e);
    } finally {
        IOUtils.closeQuietly(reportWriter);
    }
}

From source file:com.cyberway.issue.crawler.frontier.AbstractFrontier.java

private void readObject(java.io.ObjectInputStream in) throws IOException, ClassNotFoundException {
    in.defaultReadObject();//from w w  w.j a  v  a2  s . c o  m
    liveQueuedUriCount = new AtomicLong(queuedUriCount);
    liveSucceededFetchCount = new AtomicLong(succeededFetchCount);
    liveFailedFetchCount = new AtomicLong(failedFetchCount);
    liveDisregardedUriCount = new AtomicLong(disregardedUriCount);
}

From source file:org.elasticsearch.test.ElasticsearchIntegrationTest.java

/**
 * Waits until at least a give number of document is visible for searchers
 *
 * @param numDocs         number of documents to wait for
 * @param maxWaitTime     if not progress have been made during this time, fail the test
 * @param maxWaitTimeUnit the unit in which maxWaitTime is specified
 * @param indexer         a {@link org.elasticsearch.test.BackgroundIndexer}. If supplied it will be first checked for documents indexed.
 *                        This saves on unneeded searches.
 * @return the actual number of docs seen.
 * @throws InterruptedException//from  w w  w. ja  v a  2  s.c o m
 */
public long waitForDocs(final long numDocs, int maxWaitTime, TimeUnit maxWaitTimeUnit,
        final @Nullable BackgroundIndexer indexer) throws InterruptedException {
    final AtomicLong lastKnownCount = new AtomicLong(-1);
    long lastStartCount = -1;
    Predicate<Object> testDocs = new Predicate<Object>() {
        @Override
        public boolean apply(Object o) {
            if (indexer != null) {
                lastKnownCount.set(indexer.totalIndexedDocs());
            }
            if (lastKnownCount.get() >= numDocs) {
                try {
                    long count = client().prepareCount().setQuery(matchAllQuery()).execute().actionGet()
                            .getCount();
                    if (count == lastKnownCount.get()) {
                        // no progress - try to refresh for the next time
                        client().admin().indices().prepareRefresh().get();
                    }
                    lastKnownCount.set(count);
                } catch (Throwable e) { // count now acts like search and barfs if all shards failed...
                    logger.debug("failed to executed count", e);
                    return false;
                }
                logger.debug("[{}] docs visible for search. waiting for [{}]", lastKnownCount.get(), numDocs);
            } else {
                logger.debug("[{}] docs indexed. waiting for [{}]", lastKnownCount.get(), numDocs);
            }
            return lastKnownCount.get() >= numDocs;
        }
    };

    while (!awaitBusy(testDocs, maxWaitTime, maxWaitTimeUnit)) {
        if (lastStartCount == lastKnownCount.get()) {
            // we didn't make any progress
            fail("failed to reach " + numDocs + "docs");
        }
        lastStartCount = lastKnownCount.get();
    }
    return lastKnownCount.get();
}

From source file:org.apache.hadoop.hbase.regionserver.wal.TestHLogSplit.java

@Test(timeout = 300000)
@Ignore("Need HADOOP-6886, HADOOP-6840, & HDFS-617 for this. HDFS 0.20.205.1+ should have this")
public void testLogRollAfterSplitStart() throws IOException {
    HLog log = null;//  ww w.j av a2 s .co m
    String logName = "testLogRollAfterSplitStart";
    Path thisTestsDir = new Path(HBASEDIR, logName);

    try {
        // put some entries in an HLog
        TableName tableName = TableName.valueOf(this.getClass().getName());
        HRegionInfo regioninfo = new HRegionInfo(tableName, HConstants.EMPTY_START_ROW,
                HConstants.EMPTY_END_ROW);
        log = HLogFactory.createHLog(fs, HBASEDIR, logName, conf);
        final AtomicLong sequenceId = new AtomicLong(1);

        final int total = 20;
        for (int i = 0; i < total; i++) {
            WALEdit kvs = new WALEdit();
            kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName()));
            HTableDescriptor htd = new HTableDescriptor(tableName);
            htd.addFamily(new HColumnDescriptor("column"));
            log.append(regioninfo, tableName, kvs, System.currentTimeMillis(), htd, sequenceId);
        }
        // Send the data to HDFS datanodes and close the HDFS writer
        log.sync();
        ((FSHLog) log).replaceWriter(((FSHLog) log).getOldPath(), null, null, null);

        /* code taken from ProcessServerShutdown.process()
         * handles RS shutdowns (as observed by the Master)
         */
        // rename the directory so a rogue RS doesn't create more HLogs
        Path rsSplitDir = new Path(thisTestsDir.getParent(), thisTestsDir.getName() + "-splitting");
        fs.rename(thisTestsDir, rsSplitDir);
        LOG.debug("Renamed region directory: " + rsSplitDir);

        // Process the old log files
        HLogSplitter.split(HBASEDIR, rsSplitDir, OLDLOGDIR, fs, conf);

        // Now, try to roll the HLog and verify failure
        try {
            log.rollWriter();
            Assert.fail("rollWriter() did not throw any exception.");
        } catch (IOException ioe) {
            if (ioe.getCause().getMessage().contains("FileNotFound")) {
                LOG.info("Got the expected exception: ", ioe.getCause());
            } else {
                Assert.fail("Unexpected exception: " + ioe);
            }
        }
    } finally {
        if (log != null) {
            log.close();
        }
        if (fs.exists(thisTestsDir)) {
            fs.delete(thisTestsDir, true);
        }
    }
}

From source file:org.apache.hadoop.hbase.regionserver.wal.TestHLog.java

/**
 * Tests wal archiving by adding data, doing flushing/rolling and checking we archive old logs
 * and also don't archive "live logs" (that is, a log with un-flushed entries).
 * <p>//  ww w  .ja  v a  2  s. c o  m
 * This is what it does:
 * It creates two regions, and does a series of inserts along with log rolling.
 * Whenever a WAL is rolled, FSHLog checks previous wals for archiving. A wal is eligible for
 * archiving if for all the regions which have entries in that wal file, have flushed - past
 * their maximum sequence id in that wal file.
 * <p>
 * @throws IOException
 */
@Test
public void testWALArchiving() throws IOException {
    LOG.debug("testWALArchiving");
    TableName table1 = TableName.valueOf("t1");
    TableName table2 = TableName.valueOf("t2");
    HLog hlog = HLogFactory.createHLog(fs, FSUtils.getRootDir(conf), dir.toString(), conf);
    try {
        assertEquals(0, ((FSHLog) hlog).getNumRolledLogFiles());
        HRegionInfo hri1 = new HRegionInfo(table1, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
        HRegionInfo hri2 = new HRegionInfo(table2, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
        // ensure that we don't split the regions.
        hri1.setSplit(false);
        hri2.setSplit(false);
        // variables to mock region sequenceIds.
        final AtomicLong sequenceId1 = new AtomicLong(1);
        final AtomicLong sequenceId2 = new AtomicLong(1);
        // start with the testing logic: insert a waledit, and roll writer
        addEdits(hlog, hri1, table1, 1, sequenceId1);
        hlog.rollWriter();
        // assert that the wal is rolled
        assertEquals(1, ((FSHLog) hlog).getNumRolledLogFiles());
        // add edits in the second wal file, and roll writer.
        addEdits(hlog, hri1, table1, 1, sequenceId1);
        hlog.rollWriter();
        // assert that the wal is rolled
        assertEquals(2, ((FSHLog) hlog).getNumRolledLogFiles());
        // add a waledit to table1, and flush the region.
        addEdits(hlog, hri1, table1, 3, sequenceId1);
        flushRegion(hlog, hri1.getEncodedNameAsBytes());
        // roll log; all old logs should be archived.
        hlog.rollWriter();
        assertEquals(0, ((FSHLog) hlog).getNumRolledLogFiles());
        // add an edit to table2, and roll writer
        addEdits(hlog, hri2, table2, 1, sequenceId2);
        hlog.rollWriter();
        assertEquals(1, ((FSHLog) hlog).getNumRolledLogFiles());
        // add edits for table1, and roll writer
        addEdits(hlog, hri1, table1, 2, sequenceId1);
        hlog.rollWriter();
        assertEquals(2, ((FSHLog) hlog).getNumRolledLogFiles());
        // add edits for table2, and flush hri1.
        addEdits(hlog, hri2, table2, 2, sequenceId2);
        flushRegion(hlog, hri1.getEncodedNameAsBytes());
        // the log : region-sequenceId map is
        // log1: region2 (unflushed)
        // log2: region1 (flushed)
        // log3: region2 (unflushed)
        // roll the writer; log2 should be archived.
        hlog.rollWriter();
        assertEquals(2, ((FSHLog) hlog).getNumRolledLogFiles());
        // flush region2, and all logs should be archived.
        addEdits(hlog, hri2, table2, 2, sequenceId2);
        flushRegion(hlog, hri2.getEncodedNameAsBytes());
        hlog.rollWriter();
        assertEquals(0, ((FSHLog) hlog).getNumRolledLogFiles());
    } finally {
        if (hlog != null)
            hlog.close();
    }
}

From source file:org.apache.hadoop.hbase.master.TestDistributedLogSplitting.java

@Test(timeout = 300000)
public void testSameVersionUpdatesRecovery() throws Exception {
    LOG.info("testSameVersionUpdatesRecovery");
    conf.setLong("hbase.regionserver.hlog.blocksize", 15 * 1024);
    conf.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, true);
    conf.setInt("hfile.format.version", 3);
    startCluster(NUM_RS);/*from   w  w w . j a v  a 2 s.  com*/
    final AtomicLong sequenceId = new AtomicLong(100);
    final int NUM_REGIONS_TO_CREATE = 40;
    final int NUM_LOG_LINES = 1000;
    // turn off load balancing to prevent regions from moving around otherwise
    // they will consume recovered.edits
    master.balanceSwitch(false);

    List<RegionServerThread> rsts = cluster.getLiveRegionServerThreads();
    final ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "table-creation", null);
    HTable ht = installTable(zkw, "table", "family", NUM_REGIONS_TO_CREATE);

    List<HRegionInfo> regions = null;
    HRegionServer hrs = null;
    for (int i = 0; i < NUM_RS; i++) {
        boolean isCarryingMeta = false;
        hrs = rsts.get(i).getRegionServer();
        regions = ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices());
        for (HRegionInfo region : regions) {
            if (region.isMetaRegion()) {
                isCarryingMeta = true;
                break;
            }
        }
        if (isCarryingMeta) {
            continue;
        }
        break;
    }

    LOG.info("#regions = " + regions.size());
    Iterator<HRegionInfo> it = regions.iterator();
    while (it.hasNext()) {
        HRegionInfo region = it.next();
        if (region.isMetaTable()
                || region.getEncodedName().equals(HRegionInfo.FIRST_META_REGIONINFO.getEncodedName())) {
            it.remove();
        }
    }
    if (regions.size() == 0)
        return;
    HRegionInfo curRegionInfo = regions.get(0);
    byte[] startRow = curRegionInfo.getStartKey();
    if (startRow == null || startRow.length == 0) {
        startRow = new byte[] { 0, 0, 0, 0, 1 };
    }
    byte[] row = Bytes.incrementBytes(startRow, 1);
    // use last 5 bytes because HBaseTestingUtility.createMultiRegions use 5 bytes key
    row = Arrays.copyOfRange(row, 3, 8);
    long value = 0;
    byte[] tableName = Bytes.toBytes("table");
    byte[] family = Bytes.toBytes("family");
    byte[] qualifier = Bytes.toBytes("c1");
    long timeStamp = System.currentTimeMillis();
    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
    htd.addFamily(new HColumnDescriptor(family));
    for (int i = 0; i < NUM_LOG_LINES; i += 1) {
        WALEdit e = new WALEdit();
        value++;
        e.add(new KeyValue(row, family, qualifier, timeStamp, Bytes.toBytes(value)));
        hrs.getWAL().append(curRegionInfo, TableName.valueOf(tableName), e, System.currentTimeMillis(), htd,
                sequenceId);
    }
    hrs.getWAL().sync();
    hrs.getWAL().close();

    // wait for abort completes
    this.abortRSAndWaitForRecovery(hrs, zkw, NUM_REGIONS_TO_CREATE);

    // verify we got the last value
    LOG.info("Verification Starts...");
    Get g = new Get(row);
    Result r = ht.get(g);
    long theStoredVal = Bytes.toLong(r.getValue(family, qualifier));
    assertEquals(value, theStoredVal);

    // after flush
    LOG.info("Verification after flush...");
    TEST_UTIL.getHBaseAdmin().flush(tableName);
    r = ht.get(g);
    theStoredVal = Bytes.toLong(r.getValue(family, qualifier));
    assertEquals(value, theStoredVal);
    ht.close();
}