Example usage for java.util.concurrent.atomic AtomicLong get

List of usage examples for java.util.concurrent.atomic AtomicLong get

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicLong get.

Prototype

public final long get() 

Source Link

Document

Returns the current value, with memory effects as specified by VarHandle#getVolatile .

Usage

From source file:com.microsoft.tfs.core.clients.versioncontrol.internal.localworkspace.LocalDataAccessLayer.java

public static void afterCheckin(final Workspace workspace, final int changesetId, final Calendar checkinDate,
        final GetOperation[] localVersionUpdates, final PendingChange[] newPendingChanges,
        final UploadedBaselinesCollection uploadedBaselinesCollection) {
    final List<BaselineRequest> baselineRequests = new ArrayList<BaselineRequest>();
    final Map<GetOperation, Long> operationLengths = new HashMap<GetOperation, Long>();

    final LocalWorkspaceTransaction transaction = new LocalWorkspaceTransaction(workspace);
    try {//from  ww  w .j a v  a  2s  .c  o m
        transaction.execute(new AllTablesTransaction() {
            @Override
            public void invoke(final LocalWorkspaceProperties wp, final WorkspaceVersionTable lv,
                    final LocalPendingChangesTable pc) {
                for (final GetOperation operation : localVersionUpdates) {
                    final WorkspaceLocalItem lvEntry = lv.getByServerItem(operation.getSourceServerItem(),
                            operation.getVersionLocal() != 0);

                    if (null != lvEntry) {
                        BaselineRequest baselineRequest = null;

                        if (null == operation.getTargetLocalItem() && null != lvEntry.getBaselineFileGUID()) {
                            baselineRequest = BaselineRequest.makeRemoveRequest(lvEntry.getBaselineFileGUID());
                        } else if (operation.getChangeType().contains(ChangeType.EDIT)) {
                            final long uncompressedLength;
                            final byte[] uploadedBaselineFileGuid;

                            if (null != uploadedBaselinesCollection) {
                                final String targetItem = operation.getTargetLocalItem();
                                final AtomicLong out = new AtomicLong(-1);

                                uploadedBaselineFileGuid = uploadedBaselinesCollection
                                        .getUploadedBaseline(targetItem, out);
                                uncompressedLength = out.get();
                            } else {
                                uploadedBaselineFileGuid = null;
                                uncompressedLength = -1;
                            }

                            // Did UploadChanges save gzipped content for
                            // this item for us? If so, we don't have to
                            // re-gzip the content on disk.
                            if (null != uploadedBaselineFileGuid && uncompressedLength >= 0) {
                                operation.setBaselineFileGUID(uploadedBaselineFileGuid);
                                operationLengths.put(operation, uncompressedLength);

                                // Remove the entry from the collection so
                                // that cleanup code in CheckIn will not
                                // delete the baseline file from disk on the
                                // way out.
                                uploadedBaselinesCollection
                                        .removeUploadedBaseline(operation.getTargetLocalItem());
                            } else {
                                operation.setBaselineFileGUID(GUID.newGUID().getGUIDBytes());

                                baselineRequest = BaselineRequest.fromDisk(operation.getBaselineFileGUID(),
                                        operation.getTargetLocalItem(), operation.getTargetLocalItem(),
                                        operation.getHashValue());

                                baselineRequests.add(baselineRequest);

                                try {
                                    // The server doesn't supply the length
                                    // of the content we just committed.
                                    // We'll go grab it from the disk here.
                                    // There is a race here where the
                                    // content could have already changed on
                                    // disk since we uploaded it. This could
                                    // be corrected by having the server
                                    // return the length of each piece of
                                    // content it committed.
                                    final File file = new File(operation.getTargetLocalItem());
                                    operationLengths.put(operation, new Long(file.length()));
                                } catch (final Throwable t) {
                                    log.warn(t);
                                    operationLengths.put(operation, new Long(-1));
                                }
                            }
                        } else {
                            operation.setBaselineFileGUID(lvEntry.getBaselineFileGUID());
                            operationLengths.put(operation, new Long(lvEntry.getLength()));
                        }
                    } else if (operation.getTargetLocalItem() != null
                            && operation.getTargetLocalItem().length() > 0) {
                        boolean setTargetLocalItemToNull = true;

                        if (ItemType.FOLDER == operation.getItemType() && null == operation.getSourceLocalItem()
                                && operation.getChangeType().contains(ChangeType.ADD)) {
                            final File directory = new File(operation.getTargetLocalItem());
                            if (!directory.exists()) {
                                try {
                                    directory.mkdirs();
                                    setTargetLocalItemToNull = false;
                                } catch (final Throwable t) {
                                    log.warn(t);
                                }
                            } else {
                                setTargetLocalItemToNull = false;
                            }
                        }

                        if (setTargetLocalItemToNull) {
                            operation.setTargetLocalItem(null);
                        }
                    }
                }

                pc.replacePendingChanges(newPendingChanges);

                if (baselineRequests.size() != 0) {
                    new BaselineFolderCollection(workspace, wp.getBaselineFolders())
                            .processBaselineRequests(workspace, baselineRequests);
                }
            }
        });
    } finally {
        try {
            transaction.close();
        } catch (final IOException e) {
            throw new VersionControlException(e);
        }
    }

    // As a final step, take the GetOperations which were returned by
    // CheckIn and process them.
    final UpdateLocalVersionQueue ulvQueue = new UpdateLocalVersionQueue(workspace,
            UpdateLocalVersionQueueOptions.UPDATE_BOTH, null, 5000, 10000, Integer.MAX_VALUE);

    try {
        for (final GetOperation operation : localVersionUpdates) {
            ClientLocalVersionUpdate lvUpdate;

            final String targetLocalItem = operation.getTargetLocalItem();
            if (targetLocalItem == null || targetLocalItem.length() == 0) {
                lvUpdate = new ClientLocalVersionUpdate(operation.getSourceServerItem(), operation.getItemID(),
                        null, operation.getVersionLocal(), null);
            } else {
                Long length = operationLengths.get(operation);
                if (length == null) {
                    length = new Long(-1);
                }

                /*
                 * TODO get the current item (after check-in) properties
                 * back on the get operation from the server instead of
                 * detecting them from the filesystem. Getting them from the
                 * filesystem isn't too bad because it should accurately
                 * reflect what we just checked in.
                 */
                PropertyValue[] detectedItemProperties = null;
                if (Platform.isCurrentPlatform(Platform.GENERIC_UNIX)) {
                    final FileSystemAttributes attrs = FileSystemUtils.getInstance()
                            .getAttributes(targetLocalItem);
                    if (attrs.exists()) {
                        if (attrs.isSymbolicLink()) {
                            detectedItemProperties = new PropertyValue[] { PropertyConstants.IS_SYMLINK };
                        } else if (!attrs.isDirectory() && attrs.isExecutable()) {
                            detectedItemProperties = new PropertyValue[] {
                                    PropertyConstants.EXECUTABLE_ENABLED_VALUE };
                        }
                    }
                }

                lvUpdate = new ClientLocalVersionUpdate(operation.getTargetServerItem(), operation.getItemID(),
                        operation.getTargetLocalItem(), operation.getVersionServer(), checkinDate,
                        operation.getEncoding(), operation.getHashValue(),
                        // Not supplied by the server; retrieved above from
                        // the disk
                        length.longValue(),
                        // Not supplied by the server; tagged onto the
                        // GetOperation by
                        // the transaction earlier in this method
                        operation.getBaselineFileGUID(), null /* pendingChangeTargetServerItem */,
                        detectedItemProperties);

                if (workspace.getOptions().contains(WorkspaceOptions.SET_FILE_TO_CHECKIN)) {
                    // This UpdateLocalVersionQueue is going to flush before
                    // we go back to the disk and reset the dates on all the
                    // items which were just checked in (in
                    // Workspace\Checkin.cs). So we'll force
                    // UpdateLocalVersion to put the checkin date as the
                    // last modified date.

                    lvUpdate.setLastModifiedDate(DotNETDate.toWindowsFileTimeUTC(checkinDate));
                }
            }

            ulvQueue.queueUpdate(lvUpdate);
        }
    } finally {
        ulvQueue.close();
    }
}

From source file:com.thinkbiganalytics.nifi.v2.ingest.GetTableData.java

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    FlowFile flowFile = null;/*from   w ww. ja v  a 2s  .  c o m*/
    if (context.hasIncomingConnection()) {
        flowFile = session.get();

        // If we have no FlowFile, and all incoming connections are self-loops then we can continue on.
        // However, if we have no FlowFile and we have connections coming from other Processors, then
        // we know that we should run only if we have a FlowFile.
        if (flowFile == null && context.hasNonLoopConnection()) {
            return;
        }
    }

    final FlowFile incoming = flowFile;
    final ComponentLog logger = getLog();

    final DBCPService dbcpService = context.getProperty(JDBC_SERVICE).asControllerService(DBCPService.class);
    final MetadataProviderService metadataService = context.getProperty(METADATA_SERVICE)
            .asControllerService(MetadataProviderService.class);
    final String loadStrategy = context.getProperty(LOAD_STRATEGY).getValue();
    final String categoryName = context.getProperty(FEED_CATEGORY).evaluateAttributeExpressions(incoming)
            .getValue();
    final String feedName = context.getProperty(FEED_NAME).evaluateAttributeExpressions(incoming).getValue();
    final String tableName = context.getProperty(TABLE_NAME).evaluateAttributeExpressions(incoming).getValue();
    final String fieldSpecs = context.getProperty(TABLE_SPECS).evaluateAttributeExpressions(incoming)
            .getValue();
    final String dateField = context.getProperty(DATE_FIELD).evaluateAttributeExpressions(incoming).getValue();
    final Integer queryTimeout = context.getProperty(QUERY_TIMEOUT).asTimePeriod(TimeUnit.SECONDS).intValue();
    final Integer overlapTime = context.getProperty(OVERLAP_TIME).evaluateAttributeExpressions(incoming)
            .asTimePeriod(TimeUnit.SECONDS).intValue();
    final Integer backoffTime = context.getProperty(BACKOFF_PERIOD).asTimePeriod(TimeUnit.SECONDS).intValue();
    final String unitSize = context.getProperty(UNIT_SIZE).getValue();
    final String outputType = context.getProperty(OUTPUT_TYPE).getValue();
    String outputDelimiter = context.getProperty(OUTPUT_DELIMITER).evaluateAttributeExpressions(incoming)
            .getValue();
    final String delimiter = StringUtils.isBlank(outputDelimiter) ? "," : outputDelimiter;

    final PropertyValue waterMarkPropName = context.getProperty(HIGH_WATER_MARK_PROP)
            .evaluateAttributeExpressions(incoming);

    final String[] selectFields = parseFields(fieldSpecs);

    final LoadStrategy strategy = LoadStrategy.valueOf(loadStrategy);
    final StopWatch stopWatch = new StopWatch(true);

    try (final Connection conn = dbcpService.getConnection()) {

        FlowFile outgoing = (incoming == null ? session.create() : incoming);
        final AtomicLong nrOfRows = new AtomicLong(0L);
        final LastFieldVisitor visitor = new LastFieldVisitor(dateField, null);
        final FlowFile current = outgoing;

        outgoing = session.write(outgoing, new OutputStreamCallback() {
            @Override
            public void process(final OutputStream out) throws IOException {
                ResultSet rs = null;
                try {
                    GetTableDataSupport support = new GetTableDataSupport(conn, queryTimeout);
                    if (strategy == LoadStrategy.FULL_LOAD) {
                        rs = support.selectFullLoad(tableName, selectFields);
                    } else if (strategy == LoadStrategy.INCREMENTAL) {
                        String waterMarkValue = getIncrementalWaterMarkValue(current, waterMarkPropName);
                        LocalDateTime waterMarkTime = LocalDateTime.parse(waterMarkValue, DATE_TIME_FORMAT);
                        Date lastLoadDate = toDate(waterMarkTime);
                        visitor.setLastModifyDate(lastLoadDate);
                        rs = support.selectIncremental(tableName, selectFields, dateField, overlapTime,
                                lastLoadDate, backoffTime, GetTableDataSupport.UnitSizes.valueOf(unitSize));
                    } else {
                        throw new RuntimeException("Unsupported loadStrategy [" + loadStrategy + "]");
                    }

                    if (GetTableDataSupport.OutputType.DELIMITED
                            .equals(GetTableDataSupport.OutputType.valueOf(outputType))) {
                        nrOfRows.set(JdbcCommon.convertToDelimitedStream(rs, out,
                                (strategy == LoadStrategy.INCREMENTAL ? visitor : null), delimiter));
                    } else if (GetTableDataSupport.OutputType.AVRO
                            .equals(GetTableDataSupport.OutputType.valueOf(outputType))) {
                        avroSchema = JdbcCommon.createSchema(rs);
                        nrOfRows.set(JdbcCommon.convertToAvroStream(rs, out,
                                (strategy == LoadStrategy.INCREMENTAL ? visitor : null), avroSchema));
                    } else {
                        throw new RuntimeException("Unsupported output format type [" + outputType + "]");
                    }
                } catch (final SQLException e) {
                    throw new IOException("SQL execution failure", e);
                } finally {
                    if (rs != null) {
                        try {
                            if (rs.getStatement() != null) {
                                rs.getStatement().close();
                            }
                            rs.close();
                        } catch (SQLException e) {
                            getLog().error("Error closing sql statement and resultset");
                        }
                    }
                }
            }
        });

        // set attribute how many rows were selected
        outgoing = session.putAttribute(outgoing, RESULT_ROW_COUNT, Long.toString(nrOfRows.get()));

        //set output format type and avro schema for feed setup, if available
        outgoing = session.putAttribute(outgoing, "db.table.output.format", outputType);
        String avroSchemaForFeedSetup = (avroSchema != null) ? JdbcCommon.getAvroSchemaForFeedSetup(avroSchema)
                : EMPTY_STRING;
        outgoing = session.putAttribute(outgoing, "db.table.avro.schema", avroSchemaForFeedSetup);

        session.getProvenanceReporter().modifyContent(outgoing, "Retrieved " + nrOfRows.get() + " rows",
                stopWatch.getElapsed(TimeUnit.MILLISECONDS));

        // Terminate flow file if no work
        Long rowcount = nrOfRows.get();
        outgoing = session.putAttribute(outgoing, ComponentAttributes.NUM_SOURCE_RECORDS.key(),
                String.valueOf(rowcount));

        if (nrOfRows.get() == 0L) {
            logger.info("{} contains no data; transferring to 'nodata'", new Object[] { outgoing });
            session.transfer(outgoing, REL_NO_DATA);
        } else {

            logger.info("{} contains {} records; transferring to 'success'",
                    new Object[] { outgoing, nrOfRows.get() });

            if (strategy == LoadStrategy.INCREMENTAL) {
                String newWaterMarkStr = format(visitor.getLastModifyDate());
                outgoing = setIncrementalWaterMarkValue(session, outgoing, waterMarkPropName, newWaterMarkStr);

                logger.info("Recorded load status feed {} date {}", new Object[] { feedName, newWaterMarkStr });
            }
            session.transfer(outgoing, REL_SUCCESS);
        }
    } catch (final Exception e) {
        if (incoming == null) {
            logger.error(
                    "Unable to execute SQL select from table due to {}. No incoming flow file to route to failure",
                    new Object[] { e });
        } else {
            logger.error("Unable to execute SQL select from table due to {}; routing to failure",
                    new Object[] { incoming, e });
            session.transfer(incoming, REL_FAILURE);
        }
    }
}

From source file:org.loklak.DumpImporter.java

@Override
public void run() {

    // work loop//  w  w w  .  j a  va  2 s .  c  om
    loop: while (this.shallRun)
        try {

            this.isBusy = false;

            // scan dump input directory to import files
            Collection<File> import_dumps = DAO.message_dump.getImportDumps(this.count);

            // check if we can do anything
            if (import_dumps == null || import_dumps.size() == 0 || !DAO.wait_ready(Long.MAX_VALUE)) {
                try {
                    Thread.sleep(10000);
                } catch (InterruptedException e) {
                }
                continue loop;
            }
            this.isBusy = true;

            // take only one file and process this file
            File import_dump = import_dumps.iterator().next();
            final JsonReader dumpReader = DAO.message_dump.getDumpReader(import_dump);
            final AtomicLong newTweets = new AtomicLong(0);
            Log.getLog().info("started import of dump file " + import_dump.getAbsolutePath());

            // we start concurrent indexing threads to process the json objects
            Thread[] indexerThreads = new Thread[dumpReader.getConcurrency()];
            for (int i = 0; i < dumpReader.getConcurrency(); i++) {
                indexerThreads[i] = new Thread() {
                    public void run() {
                        JsonFactory tweet;
                        try {
                            List<IndexEntry<UserEntry>> userBulk = new ArrayList<>();
                            List<IndexEntry<MessageEntry>> messageBulk = new ArrayList<>();
                            while ((tweet = dumpReader.take()) != JsonStreamReader.POISON_JSON_MAP) {
                                try {
                                    JSONObject json = tweet.getJSON();
                                    JSONObject user = (JSONObject) json.remove("user");
                                    if (user == null)
                                        continue;
                                    UserEntry u = new UserEntry(user);
                                    MessageEntry t = new MessageEntry(json);
                                    // record user into search index
                                    userBulk.add(
                                            new IndexEntry<UserEntry>(u.getScreenName(), t.getSourceType(), u));
                                    messageBulk.add(
                                            new IndexEntry<MessageEntry>(t.getIdStr(), t.getSourceType(), t));
                                    if (userBulk.size() > 1500 || messageBulk.size() > 1500) {
                                        DAO.users.writeEntries(userBulk);
                                        DAO.messages.writeEntries(messageBulk);
                                        userBulk.clear();
                                        messageBulk.clear();
                                    }
                                    newTweets.incrementAndGet();
                                } catch (IOException e) {
                                    Log.getLog().warn(e);
                                }
                                if (LoklakServer.queuedIndexing.isBusy())
                                    try {
                                        Thread.sleep(200);
                                    } catch (InterruptedException e) {
                                    }
                            }
                            try {
                                DAO.users.writeEntries(userBulk);
                                DAO.messages.writeEntries(messageBulk);
                            } catch (IOException e) {
                                Log.getLog().warn(e);
                            }
                        } catch (InterruptedException e) {
                            Log.getLog().warn(e);
                        }
                    }
                };
                indexerThreads[i].start();
            }

            // wait for termination of the indexing threads and do logging meanwhile
            boolean running = true;
            while (running) {
                long startTime = System.currentTimeMillis();
                long startCount = newTweets.get();
                running = false;
                for (int i = 0; i < dumpReader.getConcurrency(); i++) {
                    if (indexerThreads[i].isAlive())
                        running = true;
                }
                try {
                    Thread.sleep(10000);
                } catch (InterruptedException e) {
                }
                long runtime = System.currentTimeMillis() - startTime;
                long count = newTweets.get() - startCount;
                Log.getLog().info("imported " + newTweets.get() + " tweets at " + (count * 1000 / runtime)
                        + " tweets per second from " + import_dump.getName());
            }

            // catch up the number of processed tweets
            Log.getLog().info("finished import of dump file " + import_dump.getAbsolutePath() + ", "
                    + newTweets.get() + " new tweets");

            // shift the dump file to prevent that it is imported again
            DAO.message_dump.shiftProcessedDump(import_dump.getName());
            this.isBusy = false;

        } catch (Throwable e) {
            Log.getLog().warn("DumpImporter THREAD", e);
            try {
                Thread.sleep(10000);
            } catch (InterruptedException e1) {
            }
        }

    Log.getLog().info("DumpImporter terminated");
}

From source file:io.pravega.segmentstore.server.writer.SegmentAggregatorTests.java

/**
 * Tests the flush() method with Append and StreamSegmentSealOperations.
 *///from   w ww.  jav a2  s.c  o  m
@Test
public void testSeal() throws Exception {
    // Add some appends and seal, and then flush together. Verify that everything got flushed in one go.
    final int appendCount = 1000;
    final WriterConfig config = WriterConfig.builder()
            .with(WriterConfig.FLUSH_THRESHOLD_BYTES, appendCount * 50) // Extra high length threshold.
            .with(WriterConfig.FLUSH_THRESHOLD_MILLIS, 1000L).with(WriterConfig.MAX_FLUSH_SIZE_BYTES, 10000)
            .with(WriterConfig.MIN_READ_TIMEOUT_MILLIS, 10L).build();

    @Cleanup
    TestContext context = new TestContext(config);
    context.storage.create(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
    context.segmentAggregator.initialize(TIMEOUT, executorService()).join();

    @Cleanup
    ByteArrayOutputStream writtenData = new ByteArrayOutputStream();

    // Accumulate some Appends
    AtomicLong outstandingSize = new AtomicLong();
    SequenceNumberCalculator sequenceNumbers = new SequenceNumberCalculator(context, outstandingSize);
    for (int i = 0; i < appendCount; i++) {
        // Add another operation and record its length.
        StorageOperation appendOp = generateAppendAndUpdateMetadata(i, SEGMENT_ID, context);
        outstandingSize.addAndGet(appendOp.getLength());
        context.segmentAggregator.add(appendOp);
        getAppendData(appendOp, writtenData, context);
        sequenceNumbers.record(appendOp);

        // Call flush() and verify that we haven't flushed anything (by design).
        FlushResult flushResult = context.segmentAggregator.flush(TIMEOUT, executorService()).join();
        Assert.assertEquals(String.format("Not expecting a flush. OutstandingSize=%s, Threshold=%d",
                outstandingSize, config.getFlushThresholdBytes()), 0, flushResult.getFlushedBytes());
        Assert.assertEquals("Not expecting any merged bytes in this test.", 0, flushResult.getMergedBytes());
    }

    Assert.assertFalse("Unexpected value returned by mustFlush() before adding StreamSegmentSealOperation.",
            context.segmentAggregator.mustFlush());

    // Generate and add a Seal Operation.
    StorageOperation sealOp = generateSealAndUpdateMetadata(SEGMENT_ID, context);
    context.segmentAggregator.add(sealOp);
    Assert.assertEquals(
            "Unexpected value returned by getLowestUncommittedSequenceNumber() after adding StreamSegmentSealOperation.",
            sequenceNumbers.getLowestUncommitted(),
            context.segmentAggregator.getLowestUncommittedSequenceNumber());
    Assert.assertTrue("Unexpected value returned by mustFlush() after adding StreamSegmentSealOperation.",
            context.segmentAggregator.mustFlush());

    // Call flush and verify that the entire Aggregator got flushed and the Seal got persisted to Storage.
    FlushResult flushResult = context.segmentAggregator.flush(TIMEOUT, executorService()).join();
    Assert.assertEquals("Expected the entire Aggregator to be flushed.", outstandingSize.get(),
            flushResult.getFlushedBytes());
    Assert.assertFalse("Unexpected value returned by mustFlush() after flushing.",
            context.segmentAggregator.mustFlush());
    Assert.assertEquals("Unexpected value returned by getLowestUncommittedSequenceNumber() after flushing.",
            Operation.NO_SEQUENCE_NUMBER, context.segmentAggregator.getLowestUncommittedSequenceNumber());

    // Verify data.
    byte[] expectedData = writtenData.toByteArray();
    byte[] actualData = new byte[expectedData.length];
    SegmentProperties storageInfo = context.storage
            .getStreamSegmentInfo(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
    Assert.assertEquals("Unexpected number of bytes flushed to Storage.", expectedData.length,
            storageInfo.getLength());
    Assert.assertTrue("Segment is not sealed in storage post flush.", storageInfo.isSealed());
    Assert.assertTrue("Segment is not marked in metadata as sealed in storage post flush.",
            context.segmentAggregator.getMetadata().isSealedInStorage());
    context.storage.read(InMemoryStorage.newHandle(context.segmentAggregator.getMetadata().getName(), false), 0,
            actualData, 0, actualData.length, TIMEOUT).join();

    Assert.assertArrayEquals("Unexpected data written to storage.", expectedData, actualData);
}

From source file:org.apache.accumulo.tserver.tablet.Tablet.java

public Tablet(final TabletServer tabletServer, final KeyExtent extent, final TabletResourceManager trm,
        TabletData data) throws IOException {

    this.tabletServer = tabletServer;
    this.extent = extent;
    this.tabletResources = trm;
    this.lastLocation = data.getLastLocation();
    this.lastFlushID = data.getFlushID();
    this.lastCompactID = data.getCompactID();
    this.splitCreationTime = data.getSplitTime();
    this.tabletTime = TabletTime.getInstance(data.getTime());
    this.persistedTime = tabletTime.getTime();
    this.logId = tabletServer.createLogId(extent);

    TableConfiguration tblConf = tabletServer.getTableConfiguration(extent);
    if (null == tblConf) {
        Tables.clearCache(tabletServer.getInstance());
        tblConf = tabletServer.getTableConfiguration(extent);
        requireNonNull(tblConf, "Could not get table configuration for " + extent.getTableId());
    }//  ww  w.j a v a2 s. co  m

    this.tableConfiguration = tblConf;

    // translate any volume changes
    VolumeManager fs = tabletServer.getFileSystem();
    boolean replicationEnabled = ReplicationConfigurationUtil.isEnabled(extent, this.tableConfiguration);
    TabletFiles tabletPaths = new TabletFiles(data.getDirectory(), data.getLogEntris(), data.getDataFiles());
    tabletPaths = VolumeUtil.updateTabletVolumes(tabletServer, tabletServer.getLock(), fs, extent, tabletPaths,
            replicationEnabled);

    // deal with relative path for the directory
    Path locationPath;
    if (tabletPaths.dir.contains(":")) {
        locationPath = new Path(tabletPaths.dir);
    } else {
        locationPath = tabletServer.getFileSystem().getFullPath(FileType.TABLE,
                extent.getTableId() + tabletPaths.dir);
    }
    this.location = locationPath;
    this.tabletDirectory = tabletPaths.dir;
    for (Entry<Long, List<FileRef>> entry : data.getBulkImported().entrySet()) {
        this.bulkImported.put(entry.getKey(), new CopyOnWriteArrayList<FileRef>(entry.getValue()));
    }
    setupDefaultSecurityLabels(extent);

    final List<LogEntry> logEntries = tabletPaths.logEntries;
    final SortedMap<FileRef, DataFileValue> datafiles = tabletPaths.datafiles;

    tableConfiguration.addObserver(configObserver = new ConfigurationObserver() {

        private void reloadConstraints() {
            log.debug("Reloading constraints for extent: " + extent);
            constraintChecker.set(new ConstraintChecker(tableConfiguration));
        }

        @Override
        public void propertiesChanged() {
            reloadConstraints();

            try {
                setupDefaultSecurityLabels(extent);
            } catch (Exception e) {
                log.error("Failed to reload default security labels for extent: " + extent.toString());
            }
        }

        @Override
        public void propertyChanged(String prop) {
            if (prop.startsWith(Property.TABLE_CONSTRAINT_PREFIX.getKey()))
                reloadConstraints();
            else if (prop.equals(Property.TABLE_DEFAULT_SCANTIME_VISIBILITY.getKey())) {
                try {
                    log.info("Default security labels changed for extent: " + extent.toString());
                    setupDefaultSecurityLabels(extent);
                } catch (Exception e) {
                    log.error("Failed to reload default security labels for extent: " + extent.toString());
                }
            }

        }

        @Override
        public void sessionExpired() {
            log.debug("Session expired, no longer updating per table props...");
        }

    });

    tableConfiguration.getNamespaceConfiguration().addObserver(configObserver);
    tabletMemory = new TabletMemory(this);

    // Force a load of any per-table properties
    configObserver.propertiesChanged();
    if (!logEntries.isEmpty()) {
        log.info("Starting Write-Ahead Log recovery for " + this.extent);
        final AtomicLong entriesUsedOnTablet = new AtomicLong(0);
        // track max time from walog entries without timestamps
        final AtomicLong maxTime = new AtomicLong(Long.MIN_VALUE);
        final CommitSession commitSession = getTabletMemory().getCommitSession();
        try {
            Set<String> absPaths = new HashSet<String>();
            for (FileRef ref : datafiles.keySet())
                absPaths.add(ref.path().toString());

            tabletServer.recover(this.getTabletServer().getFileSystem(), extent, tableConfiguration, logEntries,
                    absPaths, new MutationReceiver() {
                        @Override
                        public void receive(Mutation m) {
                            // LogReader.printMutation(m);
                            Collection<ColumnUpdate> muts = m.getUpdates();
                            for (ColumnUpdate columnUpdate : muts) {
                                if (!columnUpdate.hasTimestamp()) {
                                    // if it is not a user set timestamp, it must have been set
                                    // by the system
                                    maxTime.set(Math.max(maxTime.get(), columnUpdate.getTimestamp()));
                                }
                            }
                            getTabletMemory().mutate(commitSession, Collections.singletonList(m));
                            entriesUsedOnTablet.incrementAndGet();
                        }
                    });

            if (maxTime.get() != Long.MIN_VALUE) {
                tabletTime.useMaxTimeFromWALog(maxTime.get());
            }
            commitSession.updateMaxCommittedTime(tabletTime.getTime());

            if (entriesUsedOnTablet.get() == 0) {
                log.debug("No replayed mutations applied, removing unused entries for " + extent);
                MetadataTableUtil.removeUnusedWALEntries(getTabletServer(), extent, logEntries,
                        tabletServer.getLock());

                // No replication update to be made because the fact that this tablet didn't use any mutations
                // from the WAL implies nothing about use of this WAL by other tablets. Do nothing.

                logEntries.clear();
            } else if (ReplicationConfigurationUtil.isEnabled(extent,
                    tabletServer.getTableConfiguration(extent))) {
                // The logs are about to be re-used by this tablet, we need to record that they have data for this extent,
                // but that they may get more data. logEntries is not cleared which will cause the elements
                // in logEntries to be added to the currentLogs for this Tablet below.
                //
                // This update serves the same purpose as an update during a MinC. We know that the WAL was defined
                // (written when the WAL was opened) but this lets us know there are mutations written to this WAL
                // that could potentially be replicated. Because the Tablet is using this WAL, we can be sure that
                // the WAL isn't closed (WRT replication Status) and thus we're safe to update its progress.
                Status status = StatusUtil.openWithUnknownLength();
                for (LogEntry logEntry : logEntries) {
                    log.debug("Writing updated status to metadata table for " + logEntry.filename + " "
                            + ProtobufUtil.toString(status));
                    ReplicationTableUtil.updateFiles(tabletServer, extent, logEntry.filename, status);
                }
            }

        } catch (Throwable t) {
            if (tableConfiguration.getBoolean(Property.TABLE_FAILURES_IGNORE)) {
                log.warn("Error recovering from log files: ", t);
            } else {
                throw new RuntimeException(t);
            }
        }
        // make some closed references that represent the recovered logs
        currentLogs = new ConcurrentSkipListSet<DfsLogger>();
        for (LogEntry logEntry : logEntries) {
            currentLogs.add(new DfsLogger(tabletServer.getServerConfig(), logEntry.filename,
                    logEntry.getColumnQualifier().toString()));
        }

        log.info("Write-Ahead Log recovery complete for " + this.extent + " (" + entriesUsedOnTablet.get()
                + " mutations applied, " + getTabletMemory().getNumEntries() + " entries created)");
    }

    String contextName = tableConfiguration.get(Property.TABLE_CLASSPATH);
    if (contextName != null && !contextName.equals("")) {
        // initialize context classloader, instead of possibly waiting for it to initialize for a scan
        // TODO this could hang, causing other tablets to fail to load - ACCUMULO-1292
        AccumuloVFSClassLoader.getContextManager().getClassLoader(contextName);
    }

    // do this last after tablet is completely setup because it
    // could cause major compaction to start
    datafileManager = new DatafileManager(this, datafiles);

    computeNumEntries();

    getDatafileManager().removeFilesAfterScan(data.getScanFiles());

    // look for hints of a failure on the previous tablet server
    if (!logEntries.isEmpty() || needsMajorCompaction(MajorCompactionReason.NORMAL)) {
        // look for any temp files hanging around
        removeOldTemporaryFiles();
    }

    log.log(TLevel.TABLET_HIST, extent + " opened");
}

From source file:org.apache.hadoop.hbase.regionserver.wal.TestWALReplay.java

/**
 * Create an HRegion with the result of a HLog split and test we only see the
 * good edits//from w  w  w .  j  a va 2  s  . co  m
 * @throws Exception
 */
@Test
public void testReplayEditsWrittenIntoWAL() throws Exception {
    final TableName tableName = TableName.valueOf("testReplayEditsWrittenIntoWAL");
    final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
    final Path basedir = FSUtils.getTableDir(hbaseRootDir, tableName);
    deleteDir(basedir);

    final HTableDescriptor htd = createBasic3FamilyHTD(tableName);
    HRegion region2 = HRegion.createHRegion(hri, hbaseRootDir, this.conf, htd);
    HRegion.closeHRegion(region2);
    final HLog wal = createWAL(this.conf);
    final byte[] rowName = tableName.getName();
    final byte[] regionName = hri.getEncodedNameAsBytes();
    final AtomicLong sequenceId = new AtomicLong(1);

    // Add 1k to each family.
    final int countPerFamily = 1000;
    for (HColumnDescriptor hcd : htd.getFamilies()) {
        addWALEdits(tableName, hri, rowName, hcd.getName(), countPerFamily, ee, wal, htd, sequenceId);
    }

    // Add a cache flush, shouldn't have any effect
    wal.startCacheFlush(regionName);
    wal.completeCacheFlush(regionName);

    // Add an edit to another family, should be skipped.
    WALEdit edit = new WALEdit();
    long now = ee.currentTimeMillis();
    edit.add(new KeyValue(rowName, Bytes.toBytes("another family"), rowName, now, rowName));
    wal.append(hri, tableName, edit, now, htd, sequenceId);

    // Delete the c family to verify deletes make it over.
    edit = new WALEdit();
    now = ee.currentTimeMillis();
    edit.add(new KeyValue(rowName, Bytes.toBytes("c"), null, now, KeyValue.Type.DeleteFamily));
    wal.append(hri, tableName, edit, now, htd, sequenceId);

    // Sync.
    wal.sync();
    // Set down maximum recovery so we dfsclient doesn't linger retrying something
    // long gone.
    HBaseTestingUtility.setMaxRecoveryErrorCount(((FSHLog) wal).getOutputStream(), 1);
    // Make a new conf and a new fs for the splitter to run on so we can take
    // over old wal.
    final Configuration newConf = HBaseConfiguration.create(this.conf);
    User user = HBaseTestingUtility.getDifferentUser(newConf, ".replay.wal.secondtime");
    user.runAs(new PrivilegedExceptionAction() {
        public Object run() throws Exception {
            runWALSplit(newConf);
            FileSystem newFS = FileSystem.get(newConf);
            // 100k seems to make for about 4 flushes during HRegion#initialize.
            newConf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024 * 100);
            // Make a new wal for new region.
            HLog newWal = createWAL(newConf);
            final AtomicInteger flushcount = new AtomicInteger(0);
            try {
                final HRegion region = new HRegion(basedir, newWal, newFS, newConf, hri, htd, null) {
                    protected FlushResult internalFlushcache(final HLog wal, final long myseqid,
                            MonitoredTask status) throws IOException {
                        LOG.info("InternalFlushCache Invoked");
                        FlushResult fs = super.internalFlushcache(wal, myseqid,
                                Mockito.mock(MonitoredTask.class));
                        flushcount.incrementAndGet();
                        return fs;
                    };
                };
                long seqid = region.initialize();
                // We flushed during init.
                assertTrue("Flushcount=" + flushcount.get(), flushcount.get() > 0);
                assertTrue(seqid - 1 == sequenceId.get());

                Get get = new Get(rowName);
                Result result = region.get(get);
                // Make sure we only see the good edits
                assertEquals(countPerFamily * (htd.getFamilies().size() - 1), result.size());
                region.close();
            } finally {
                newWal.closeAndDelete();
            }
            return null;
        }
    });
}

From source file:com.btoddb.fastpersitentqueue.FpqIT.java

@Test
public void testThreading() throws Exception {
    final int numEntries = 1000;
    final int numPushers = 4;
    final int numPoppers = 4;
    final int entrySize = 1000;
    fpq1.setMaxTransactionSize(2000);//from  w  w w  .j  a v a 2s.  c  o m
    final int popBatchSize = 100;
    fpq1.setMaxMemorySegmentSizeInBytes(10000000);
    fpq1.setMaxJournalFileSize(10000000);
    fpq1.setMaxJournalDurationInMs(30000);
    fpq1.setFlushPeriodInMs(1000);
    fpq1.setNumberOfFlushWorkers(4);

    final Random pushRand = new Random(1000L);
    final Random popRand = new Random(1000000L);
    final AtomicInteger pusherFinishCount = new AtomicInteger();
    final AtomicInteger numPops = new AtomicInteger();
    final AtomicLong counter = new AtomicLong();
    final AtomicLong pushSum = new AtomicLong();
    final AtomicLong popSum = new AtomicLong();

    fpq1.init();

    ExecutorService execSrvc = Executors.newFixedThreadPool(numPushers + numPoppers);

    Set<Future> futures = new HashSet<Future>();

    // start pushing
    for (int i = 0; i < numPushers; i++) {
        Future future = execSrvc.submit(new Runnable() {
            @Override
            public void run() {
                for (int i = 0; i < numEntries; i++) {
                    try {
                        long x = counter.getAndIncrement();
                        pushSum.addAndGet(x);
                        ByteBuffer bb = ByteBuffer.wrap(new byte[entrySize]);
                        bb.putLong(x);

                        fpq1.beginTransaction();
                        fpq1.push(bb.array());
                        fpq1.commit();
                        if ((x + 1) % 500 == 0) {
                            System.out.println("pushed ID = " + x);
                        }
                        Thread.sleep(pushRand.nextInt(5));
                    } catch (Exception e) {
                        e.printStackTrace();
                    }
                }
                pusherFinishCount.incrementAndGet();
            }
        });
        futures.add(future);
    }

    // start popping
    for (int i = 0; i < numPoppers; i++) {
        Future future = execSrvc.submit(new Runnable() {
            @Override
            public void run() {
                while (pusherFinishCount.get() < numPushers || !fpq1.isEmpty()) {
                    try {
                        fpq1.beginTransaction();
                        try {
                            Collection<FpqEntry> entries = fpq1.pop(popBatchSize);
                            if (null == entries) {
                                Thread.sleep(100);
                                continue;
                            }

                            for (FpqEntry entry : entries) {
                                ByteBuffer bb = ByteBuffer.wrap(entry.getData());
                                popSum.addAndGet(bb.getLong());
                                if (entry.getId() % 500 == 0) {
                                    System.out.println("popped ID = " + entry.getId());
                                }
                            }
                            numPops.addAndGet(entries.size());
                            fpq1.commit();
                            entries.clear();
                        } finally {
                            if (fpq1.isTransactionActive()) {
                                fpq1.rollback();
                            }
                        }
                        Thread.sleep(popRand.nextInt(10));
                    } catch (Exception e) {
                        e.printStackTrace();
                    }
                }
            }
        });
        futures.add(future);
    }

    boolean finished = false;
    while (!finished) {
        try {
            for (Future f : futures) {
                f.get();
            }
            finished = true;
        } catch (InterruptedException e) {
            // ignore
            Thread.interrupted();
        }
    }

    assertThat(numPops.get(), is(numEntries * numPushers));
    assertThat(fpq1.getNumberOfEntries(), is(0L));
    assertThat(pushSum.get(), is(popSum.get()));
    assertThat(fpq1.getMemoryMgr().getNumberOfActiveSegments(), is(1));
    assertThat(fpq1.getMemoryMgr().getSegments(), hasSize(1));
    assertThat(fpq1.getJournalMgr().getJournalFiles().entrySet(), hasSize(1));
    assertThat(FileUtils.listFiles(fpq1.getPagingDirectory(), TrueFileFilter.INSTANCE, TrueFileFilter.INSTANCE),
            is(empty()));
    assertThat(
            FileUtils.listFiles(fpq1.getJournalDirectory(), TrueFileFilter.INSTANCE, TrueFileFilter.INSTANCE),
            hasSize(1));
}

From source file:org.apache.hadoop.hbase.client.TestFastFail.java

@Test
public void testFastFail() throws IOException, InterruptedException {
    Admin admin = TEST_UTIL.getHBaseAdmin();

    final String tableName = "testClientRelearningExperiment";
    HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(Bytes.toBytes(tableName)));
    desc.addFamily(new HColumnDescriptor(FAMILY));
    admin.createTable(desc, Bytes.toBytes("aaaa"), Bytes.toBytes("zzzz"), 32);
    final long numRows = 1000;

    Configuration conf = TEST_UTIL.getConfiguration();
    conf.setLong(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, SLEEPTIME * 100);
    conf.setInt(HConstants.HBASE_CLIENT_PAUSE, SLEEPTIME / 10);
    conf.setBoolean(HConstants.HBASE_CLIENT_FAST_FAIL_MODE_ENABLED, true);
    conf.setLong(HConstants.HBASE_CLIENT_FAST_FAIL_THREASHOLD_MS, 0);
    conf.setClass(HConstants.HBASE_CLIENT_FAST_FAIL_INTERCEPTOR_IMPL, MyPreemptiveFastFailInterceptor.class,
            PreemptiveFastFailInterceptor.class);

    final Connection connection = ConnectionFactory.createConnection(conf);

    /**//from   w  w  w. j  av a 2 s.c  o  m
     * Write numRows worth of data, so that the workers can arbitrarily read.
     */
    List<Put> puts = new ArrayList<>();
    for (long i = 0; i < numRows; i++) {
        byte[] rowKey = longToByteArrayKey(i);
        Put put = new Put(rowKey);
        byte[] value = rowKey; // value is the same as the row key
        put.add(FAMILY, QUALIFIER, value);
        puts.add(put);
    }
    try (Table table = connection.getTable(TableName.valueOf(tableName))) {
        table.put(puts);
        LOG.info("Written all puts.");
    }

    /**
     * The number of threads that are going to perform actions against the test
     * table.
     */
    int nThreads = 100;
    ExecutorService service = Executors.newFixedThreadPool(nThreads);
    final CountDownLatch continueOtherHalf = new CountDownLatch(1);
    final CountDownLatch doneHalfway = new CountDownLatch(nThreads);

    final AtomicInteger numSuccessfullThreads = new AtomicInteger(0);
    final AtomicInteger numFailedThreads = new AtomicInteger(0);

    // The total time taken for the threads to perform the second put;
    final AtomicLong totalTimeTaken = new AtomicLong(0);
    final AtomicInteger numBlockedWorkers = new AtomicInteger(0);
    final AtomicInteger numPreemptiveFastFailExceptions = new AtomicInteger(0);

    List<Future<Boolean>> futures = new ArrayList<Future<Boolean>>();
    for (int i = 0; i < nThreads; i++) {
        futures.add(service.submit(new Callable<Boolean>() {
            /**
             * The workers are going to perform a couple of reads. The second read
             * will follow the killing of a regionserver so that we make sure that
             * some of threads go into PreemptiveFastFailExcception
             */
            public Boolean call() throws Exception {
                try (Table table = connection.getTable(TableName.valueOf(tableName))) {
                    Thread.sleep(Math.abs(random.nextInt()) % 250); // Add some jitter here
                    byte[] row = longToByteArrayKey(Math.abs(random.nextLong()) % numRows);
                    Get g = new Get(row);
                    g.addColumn(FAMILY, QUALIFIER);
                    try {
                        table.get(g);
                    } catch (Exception e) {
                        LOG.debug("Get failed : ", e);
                        doneHalfway.countDown();
                        return false;
                    }

                    // Done with one get, proceeding to do the next one.
                    doneHalfway.countDown();
                    continueOtherHalf.await();

                    long startTime = System.currentTimeMillis();
                    g = new Get(row);
                    g.addColumn(FAMILY, QUALIFIER);
                    try {
                        table.get(g);
                        // The get was successful
                        numSuccessfullThreads.addAndGet(1);
                    } catch (Exception e) {
                        if (e instanceof PreemptiveFastFailException) {
                            // We were issued a PreemptiveFastFailException
                            numPreemptiveFastFailExceptions.addAndGet(1);
                        }
                        // Irrespective of PFFE, the request failed.
                        numFailedThreads.addAndGet(1);
                        return false;
                    } finally {
                        long enTime = System.currentTimeMillis();
                        totalTimeTaken.addAndGet(enTime - startTime);
                        if ((enTime - startTime) >= SLEEPTIME) {
                            // Considering the slow workers as the blockedWorkers.
                            // This assumes that the threads go full throttle at performing
                            // actions. In case the thread scheduling itself is as slow as
                            // SLEEPTIME, then this test might fail and so, we might have
                            // set it to a higher number on slower machines.
                            numBlockedWorkers.addAndGet(1);
                        }
                    }
                    return true;
                } catch (Exception e) {
                    LOG.error("Caught unknown exception", e);
                    doneHalfway.countDown();
                    return false;
                }
            }
        }));
    }

    doneHalfway.await();

    ClusterStatus status = TEST_UTIL.getHBaseCluster().getClusterStatus();

    // Kill a regionserver
    TEST_UTIL.getHBaseCluster().getRegionServer(0).getRpcServer().stop();
    TEST_UTIL.getHBaseCluster().getRegionServer(0).stop("Testing");

    // Let the threads continue going
    continueOtherHalf.countDown();

    Thread.sleep(2 * SLEEPTIME);
    // Restore the cluster
    TEST_UTIL.getHBaseCluster().restoreClusterStatus(status);

    int numThreadsReturnedFalse = 0;
    int numThreadsReturnedTrue = 0;
    int numThreadsThrewExceptions = 0;
    for (Future<Boolean> f : futures) {
        try {
            numThreadsReturnedTrue += f.get() ? 1 : 0;
            numThreadsReturnedFalse += f.get() ? 0 : 1;
        } catch (Exception e) {
            numThreadsThrewExceptions++;
        }
    }
    LOG.debug("numThreadsReturnedFalse:" + numThreadsReturnedFalse + " numThreadsReturnedTrue:"
            + numThreadsReturnedTrue + " numThreadsThrewExceptions:" + numThreadsThrewExceptions
            + " numFailedThreads:" + numFailedThreads.get() + " numSuccessfullThreads:"
            + numSuccessfullThreads.get() + " numBlockedWorkers:" + numBlockedWorkers.get()
            + " totalTimeWaited: "
            + totalTimeTaken.get() / (numBlockedWorkers.get() == 0 ? Long.MAX_VALUE : numBlockedWorkers.get())
            + " numPFFEs: " + numPreemptiveFastFailExceptions.get());

    assertEquals(
            "The expected number of all the successfull and the failed "
                    + "threads should equal the total number of threads that we spawned",
            nThreads, numFailedThreads.get() + numSuccessfullThreads.get());
    assertEquals("All the failures should be coming from the secondput failure", numFailedThreads.get(),
            numThreadsReturnedFalse);
    assertEquals("Number of threads that threw execution exceptions " + "otherwise should be 0",
            numThreadsThrewExceptions, 0);
    assertEquals("The regionservers that returned true should equal to the" + " number of successful threads",
            numThreadsReturnedTrue, numSuccessfullThreads.get());
    /* 'should' is not worthy of an assert. Disabling because randomly this seems to randomly
     * not but true. St.Ack 20151012
     *
    assertTrue(
        "There should be atleast one thread that retried instead of failing",
        MyPreemptiveFastFailInterceptor.numBraveSouls.get() > 0);
    assertTrue(
        "There should be atleast one PreemptiveFastFail exception,"
    + " otherwise, the test makes little sense."
    + "numPreemptiveFastFailExceptions: "
    + numPreemptiveFastFailExceptions.get(),
        numPreemptiveFastFailExceptions.get() > 0);
    */
    assertTrue(
            "Only few thread should ideally be waiting for the dead "
                    + "regionserver to be coming back. numBlockedWorkers:" + numBlockedWorkers.get()
                    + " threads that retried : " + MyPreemptiveFastFailInterceptor.numBraveSouls.get(),
            numBlockedWorkers.get() <= MyPreemptiveFastFailInterceptor.numBraveSouls.get());
}