Example usage for java.util.concurrent.atomic AtomicLong set

List of usage examples for java.util.concurrent.atomic AtomicLong set

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicLong set.

Prototype

public final void set(long newValue) 

Source Link

Document

Sets the value to newValue , with memory effects as specified by VarHandle#setVolatile .

Usage

From source file:org.deeplearning4j.models.embeddings.inmemory.InMemoryLookupTable.java

/**
 * Iterate on the given 2 vocab words/*ww w  . j a  v a2  s.co  m*/
 *
 * @param w1 the first word to iterate on
 * @param w2 the second word to iterate on
 * @param nextRandom next random for sampling
 */
@Override
@Deprecated
public void iterateSample(T w1, T w2, AtomicLong nextRandom, double alpha) {
    if (w2 == null || w2.getIndex() < 0 || w1.getIndex() == w2.getIndex() || w1.getLabel().equals("STOP")
            || w2.getLabel().equals("STOP") || w1.getLabel().equals("UNK") || w2.getLabel().equals("UNK"))
        return;
    //current word vector
    INDArray l1 = this.syn0.slice(w2.getIndex());

    //error for current word and context
    INDArray neu1e = Nd4j.create(vectorLength);

    for (int i = 0; i < w1.getCodeLength(); i++) {
        int code = w1.getCodes().get(i);
        int point = w1.getPoints().get(i);
        if (point >= syn0.rows() || point < 0)
            throw new IllegalStateException("Illegal point " + point);
        //other word vector

        INDArray syn1 = this.syn1.slice(point);

        double dot = Nd4j.getBlasWrapper().dot(l1, syn1);

        if (dot < -MAX_EXP || dot >= MAX_EXP)
            continue;

        int idx = (int) ((dot + MAX_EXP) * ((double) expTable.length / MAX_EXP / 2.0));
        if (idx >= expTable.length)
            continue;

        //score
        double f = expTable[idx];
        //gradient
        double g = useAdaGrad ? w1.getGradient(i, (1 - code - f), lr.get()) : (1 - code - f) * alpha;

        Nd4j.getBlasWrapper().level1().axpy(syn1.length(), g, syn1, neu1e);
        Nd4j.getBlasWrapper().level1().axpy(syn1.length(), g, l1, syn1);

    }

    int target = w1.getIndex();
    int label;
    //negative sampling
    if (negative > 0)
        for (int d = 0; d < negative + 1; d++) {
            if (d == 0)
                label = 1;
            else {
                nextRandom.set(nextRandom.get() * 25214903917L + 11);
                int idx = Math.abs((int) (nextRandom.get() >> 16) % table.length());

                target = table.getInt(idx);
                if (target <= 0)
                    target = (int) nextRandom.get() % (vocab.numWords() - 1) + 1;

                if (target == w1.getIndex())
                    continue;
                label = 0;
            }

            if (target >= syn1Neg.rows() || target < 0)
                continue;

            double f = Nd4j.getBlasWrapper().dot(l1, syn1Neg.slice(target));
            double g;
            if (f > MAX_EXP)
                g = useAdaGrad ? w1.getGradient(target, (label - 1), alpha) : (label - 1) * alpha;
            else if (f < -MAX_EXP)
                g = label * (useAdaGrad ? w1.getGradient(target, alpha, alpha) : alpha);
            else
                g = useAdaGrad ? w1.getGradient(target,
                        label - expTable[(int) ((f + MAX_EXP) * (expTable.length / MAX_EXP / 2))], alpha)
                        : (label - expTable[(int) ((f + MAX_EXP) * (expTable.length / MAX_EXP / 2))]) * alpha;
            if (syn0.data().dataType() == DataBuffer.Type.DOUBLE)
                Nd4j.getBlasWrapper().axpy(g, syn1Neg.slice(target), neu1e);
            else
                Nd4j.getBlasWrapper().axpy((float) g, syn1Neg.slice(target), neu1e);

            if (syn0.data().dataType() == DataBuffer.Type.DOUBLE)
                Nd4j.getBlasWrapper().axpy(g, l1, syn1Neg.slice(target));
            else
                Nd4j.getBlasWrapper().axpy((float) g, l1, syn1Neg.slice(target));
        }

    if (syn0.data().dataType() == DataBuffer.Type.DOUBLE)
        Nd4j.getBlasWrapper().axpy(1.0, neu1e, l1);

    else
        Nd4j.getBlasWrapper().axpy(1.0f, neu1e, l1);

}

From source file:org.voltdb.TableHelper.java

/**
 * Load random data into a partitioned table in VoltDB that has a bigint pkey.
 *
 * If the VoltTable indicates which column is its pkey, then it will use it, but otherwise it will
 * assume the first column is the bigint pkey. Note, this works with other integer keys, but
 * your keyspace is pretty small.//from  w  w w . j  a  v a 2  s  .c om
 *
 * If mb == 0, then maxRows is used. If maxRows == 0, then mb is used.
 *
 * @param table Table with or without schema metadata.
 * @param mb Target RSS (approximate)
 * @param maxRows Target maximum rows
 * @param client To load with.
 * @param offset Generated pkey values start here.
 * @param jump Generated pkey values increment by this value.
 * @throws Exception
 */
public void fillTableWithBigintPkey(VoltTable table, int mb, long maxRows, final Client client, long offset,
        long jump) throws Exception {
    // make sure some kind of limit is set
    assert ((maxRows > 0) || (mb > 0));
    assert (maxRows >= 0);
    assert (mb >= 0);
    final int mbTarget = mb > 0 ? mb : Integer.MAX_VALUE;
    if (maxRows == 0) {
        maxRows = Long.MAX_VALUE;
    }

    System.out.printf(
            "Filling table %s with rows starting with pkey id %d (every %d rows) until either RSS=%dmb or rowcount=%d\n",
            table.m_extraMetadata.name, offset, jump, mbTarget, maxRows);

    // find the primary key, assume first col if not found
    int pkeyColIndex = getBigintPrimaryKeyIndexIfExists(table);
    if (pkeyColIndex == -1) {
        pkeyColIndex = 0;
        assert (table.getColumnType(0).isInteger());
    }

    final AtomicLong rss = new AtomicLong(0);

    ProcedureCallback insertCallback = new ProcedureCallback() {
        @Override
        public void clientCallback(ClientResponse clientResponse) throws Exception {
            if (clientResponse.getStatus() != ClientResponse.SUCCESS) {
                System.out.println("Error in loader callback:");
                System.out.println(((ClientResponseImpl) clientResponse).toJSONString());
                assert (false);
            }
        }
    };

    // update the rss value asynchronously
    final AtomicBoolean rssThreadShouldStop = new AtomicBoolean(false);
    Thread rssThread = new Thread() {
        @Override
        public void run() {
            long tempRss = rss.get();
            long rssPrev = tempRss;
            while (!rssThreadShouldStop.get()) {
                tempRss = MiscUtils.getMBRss(client);
                if (tempRss != rssPrev) {
                    rssPrev = tempRss;
                    rss.set(tempRss);
                    System.out.printf("RSS=%dmb\n", tempRss);
                    // bail when done
                    if (tempRss > mbTarget) {
                        return;
                    }
                }
                try {
                    Thread.sleep(2000);
                } catch (Exception e) {
                }
            }
        }
    };

    // load rows until RSS goal is met (status print every 100k)
    long i = offset;
    long rows = 0;
    rssThread.start();
    final String insertProcName = table.m_extraMetadata.name.toUpperCase() + ".insert";
    RandomRowMaker filler = createRandomRowMaker(table, Integer.MAX_VALUE, false, false);
    while (rss.get() < mbTarget) {
        Object[] row = filler.randomRow();
        row[pkeyColIndex] = i;
        client.callProcedure(insertCallback, insertProcName, row);
        rows++;
        if ((rows % 100000) == 0) {
            System.out.printf("Loading 100000 rows. %d inserts sent (%d max id).\n", rows, i);
        }
        // if row limit is set, break if it's hit
        if (rows >= maxRows) {
            break;
        }
        i += jump;
    }
    rssThreadShouldStop.set(true);
    client.drain();
    rssThread.join();

    System.out.printf("Filled table %s with %d rows and now RSS=%dmb\n", table.m_extraMetadata.name, rows,
            rss.get());
}

From source file:InMemoryLookupTable.java

/**
 * Iterate on the given 2 vocab words//from w  w  w  . j a  v  a  2  s .  co m
 *
 * @param w1 the first word to iterate on
 * @param w2 the second word to iterate on
 * @param nextRandom next random for sampling
 */
@Override
@Deprecated
public void iterateSample(T w1, T w2, AtomicLong nextRandom, double alpha) {
    if (w2 == null || w2.getIndex() < 0 || w1.getIndex() == w2.getIndex() || w1.getLabel().equals("STOP")
            || w2.getLabel().equals("STOP") || w1.getLabel().equals("UNK") || w2.getLabel().equals("UNK"))
        return;
    //current word vector
    INDArray l1 = this.syn0.slice(w2.getIndex());

    //error for current word and context
    INDArray neu1e = Nd4j.create(vectorLength);

    for (int i = 0; i < w1.getCodeLength(); i++) {
        int code = w1.getCodes().get(i);
        int point = w1.getPoints().get(i);
        if (point >= syn0.rows() || point < 0)
            throw new IllegalStateException("Illegal point " + point);
        //other word vector

        INDArray syn1 = this.syn1.slice(point);

        double dot = Nd4j.getBlasWrapper().dot(l1, syn1);

        if (dot < -MAX_EXP || dot >= MAX_EXP)
            continue;

        int idx = (int) ((dot + MAX_EXP) * ((double) expTable.length / MAX_EXP / 2.0));
        if (idx >= expTable.length)
            continue;

        //score
        double f = expTable[idx];
        //gradient
        double g = useAdaGrad ? w1.getGradient(i, (1 - code - f)) : (1 - code - f) * alpha;

        if (neu1e.data().dataType() == DataBuffer.Type.FLOAT) {
            Nd4j.getBlasWrapper().level1().axpy(syn1.length(), g, syn1, neu1e);
            Nd4j.getBlasWrapper().level1().axpy(syn1.length(), g, l1, syn1);

        }

        else {
            Nd4j.getBlasWrapper().level1().axpy(syn1.length(), g, syn1, neu1e);
            Nd4j.getBlasWrapper().level1().axpy(syn1.length(), g, l1, syn1);

        }

    }

    int target = w1.getIndex();
    int label;
    //negative sampling
    if (negative > 0)
        for (int d = 0; d < negative + 1; d++) {
            if (d == 0)
                label = 1;
            else {
                nextRandom.set(nextRandom.get() * 25214903917L + 11);
                int idx = Math.abs((int) (nextRandom.get() >> 16) % table.length());

                target = table.getInt(idx);
                if (target <= 0)
                    target = (int) nextRandom.get() % (vocab.numWords() - 1) + 1;

                if (target == w1.getIndex())
                    continue;
                label = 0;
            }

            if (target >= syn1Neg.rows() || target < 0)
                continue;

            double f = Nd4j.getBlasWrapper().dot(l1, syn1Neg.slice(target));
            double g;
            if (f > MAX_EXP)
                g = useAdaGrad ? w1.getGradient(target, (label - 1)) : (label - 1) * alpha;
            else if (f < -MAX_EXP)
                g = label * (useAdaGrad ? w1.getGradient(target, alpha) : alpha);
            else
                g = useAdaGrad
                        ? w1.getGradient(target,
                                label - expTable[(int) ((f + MAX_EXP) * (expTable.length / MAX_EXP / 2))])
                        : (label - expTable[(int) ((f + MAX_EXP) * (expTable.length / MAX_EXP / 2))]) * alpha;
            if (syn0.data().dataType() == DataBuffer.Type.DOUBLE)
                Nd4j.getBlasWrapper().axpy(g, syn1Neg.slice(target), neu1e);
            else
                Nd4j.getBlasWrapper().axpy((float) g, syn1Neg.slice(target), neu1e);

            if (syn0.data().dataType() == DataBuffer.Type.DOUBLE)
                Nd4j.getBlasWrapper().axpy(g, l1, syn1Neg.slice(target));
            else
                Nd4j.getBlasWrapper().axpy((float) g, l1, syn1Neg.slice(target));
        }

    if (syn0.data().dataType() == DataBuffer.Type.DOUBLE)
        Nd4j.getBlasWrapper().axpy(1.0, neu1e, l1);

    else
        Nd4j.getBlasWrapper().axpy(1.0f, neu1e, l1);

}

From source file:org.jasig.ssp.service.impl.PersonServiceImpl.java

@Override
public PagingWrapper<Person> syncCoaches() {
    long methodStart = new Date().getTime();
    final Collection<Person> coaches = Lists.newArrayList();

    if (Thread.currentThread().isInterrupted()) {
        LOGGER.info("Abandoning syncCoaches because of thread interruption");
        return new PagingWrapper<Person>(coaches);
    }/*from  ww  w  .ja  v a  2 s . com*/

    final Collection<String> coachUsernames = getAllCoachUsernamesFromDirectory();

    long mergeLoopStart = new Date().getTime();
    final AtomicLong timeInExternalReads = new AtomicLong();
    final AtomicLong timeInExternalWrites = new AtomicLong();
    for (final String coachUsername : coachUsernames) {

        if (Thread.currentThread().isInterrupted()) {
            LOGGER.info("Abandoning syncCoaches on username {} because of thread interruption", coachUsername);
            break;
        }

        long singlePersonStart = new Date().getTime();

        final AtomicReference<Person> coach = new AtomicReference<Person>();

        try {
            withCoachSyncTransaction(new Callable<Object>() {
                @Override
                public Object call() throws Exception {
                    long localPersonLookupStart = new Date().getTime();
                    try {
                        coach.set(personFromUsername(coachUsername));
                    } catch (final ObjectNotFoundException e) {
                        LOGGER.debug("Coach {} not found", coachUsername);
                    }
                    long localPersonLookupEnd = new Date().getTime();
                    TIMING_LOGGER.info("Read local coach by username {} in {} ms", coachUsername,
                            localPersonLookupEnd - localPersonLookupStart);

                    // Does coach exist in local SSP.person table?

                    if (coach.get() == null) {

                        // Attempt to find coach in external data
                        try {
                            long externalPersonLookupStart = new Date().getTime();

                            final ExternalPerson externalPerson = externalPersonService
                                    .getByUsername(coachUsername);

                            long externalPersonLookupEnd = new Date().getTime();
                            long externalPersonLookupElapsed = externalPersonLookupEnd
                                    - externalPersonLookupStart;
                            timeInExternalReads.set(timeInExternalReads.get() + externalPersonLookupElapsed);
                            TIMING_LOGGER.info("Read external coach by username {} in {} ms", coachUsername,
                                    externalPersonLookupElapsed);

                            long externalPersonSyncStart = new Date().getTime();

                            coach.set(new Person()); // NOPMD
                            externalPersonService.updatePersonFromExternalPerson(coach.get(), externalPerson,
                                    true);

                            long externalPersonSyncEnd = new Date().getTime();
                            long externalPersonSyncElapsed = externalPersonSyncEnd - externalPersonSyncStart;
                            timeInExternalWrites.set(timeInExternalWrites.get() + externalPersonSyncElapsed);
                            TIMING_LOGGER.info("Synced external coach by username {} in {} ms", coachUsername,
                                    externalPersonSyncElapsed);

                        } catch (final ObjectNotFoundException e) {
                            LOGGER.debug("Coach {} not found in external data", coachUsername);
                        }
                    }
                    return coach.get();
                }
            });
        } catch (ConstraintViolationException e) {
            if ("uq_person_school_id".equals(e.getConstraintName())) {
                LOGGER.warn("Skipping coach with non-unique schoolId '{}' (username '{}')",
                        new Object[] { coach.get().getSchoolId(), coachUsername, e });
                coach.set(null);
            } else if ("unique_person_username".equals(e.getConstraintName())) {
                LOGGER.warn("Skipping coach with non-unique username '{}' (schoolId '{}')",
                        new Object[] { coachUsername, coach.get().getSchoolId(), e });
                coach.set(null);
            } else {
                throw e;
            }
        }

        if (coach.get() != null) {
            coaches.add(coach.get());
        }
        long singlePersonEnd = new Date().getTime();
        TIMING_LOGGER.info("SSP coach merge for username {} completed in {} ms", coachUsername,
                singlePersonEnd - singlePersonStart);
    }
    Long mergeLoopEnd = new Date().getTime();
    TIMING_LOGGER.info("All SSP merges for {} coaches completed in {} ms. Reading: {} ms. Writing: {} ms",
            new Object[] { coachUsernames.size(), mergeLoopEnd - mergeLoopStart, timeInExternalReads.get(),
                    timeInExternalWrites.get() });

    PagingWrapper pw = new PagingWrapper<Person>(coaches);
    long methodEnd = new Date().getTime();
    TIMING_LOGGER.info("Read and merged PersonAttributesService {} coaches in {} ms", coaches.size(),
            methodEnd - methodStart);
    return pw;
}

From source file:org.hyperic.hq.measurement.server.session.DataManagerImpl.java

public Collection<HighLowMetricValue> getRawData(Measurement m, long begin, long end,
        AtomicLong publishedInterval) {
    final long interval = m.getInterval();
    begin = TimingVoodoo.roundDownTime(begin, interval);
    end = TimingVoodoo.roundDownTime(end, interval);
    Collection<HighLowMetricValue> points;
    if (m.getTemplate().isAvailability()) {
        points = availabilityManager.getHistoricalAvailData(new Integer[] { m.getId() }, begin, end, interval,
                PageControl.PAGE_ALL, true);
        publishedInterval.set(interval);
    } else {//from w  ww.j a va2 s.co m
        points = getRawDataPoints(m, begin, end, publishedInterval);
    }
    return points;
}

From source file:org.hyperic.hq.measurement.server.session.DataManagerImpl.java

private StringBuilder getRawDataSql(Measurement m, long begin, long end, AtomicLong publishedInterval) {
    final String sql = new StringBuilder(128).append("SELECT value, timestamp FROM :table")
            .append(" WHERE timestamp BETWEEN ").append(begin).append(" AND ").append(end)
            .append(" AND measurement_id=").append(m.getId()).toString();
    final String[] tables = getDataTables(begin, end, false);
    if (tables.length == 1) {
        if (tables[0].equals(TAB_DATA_1H)) {
            publishedInterval.set(HOUR);
        } else if (tables[0].equals(TAB_DATA_6H)) {
            publishedInterval.set(HOUR * 6);
        } else if (tables[0].equals(TAB_DATA_1D)) {
            publishedInterval.set(HOUR * 24);
        }/*from  www  .  ja  v a 2s.  c o m*/
    }
    final StringBuilder sqlBuf = new StringBuilder(128 * tables.length);
    for (int i = 0; i < tables.length; i++) {
        sqlBuf.append(sql.replace(":table", tables[i]));
        if (i < (tables.length - 1)) {
            sqlBuf.append(" UNION ALL ");
        }
    }
    return sqlBuf;
}

From source file:com.twitter.distributedlog.BKLogHandler.java

private void asyncGetLedgerListInternal(final Comparator<LogSegmentMetadata> comparator,
        final LogSegmentFilter segmentFilter, final Watcher watcher,
        final GenericCallback<List<LogSegmentMetadata>> finalCallback, final AtomicInteger numAttemptsLeft,
        final AtomicLong backoffMillis) {
    final Stopwatch stopwatch = Stopwatch.createStarted();
    try {/*from   w  w  w  .  ja  v  a2 s .c  o  m*/
        if (LOG.isTraceEnabled()) {
            LOG.trace("Async getting ledger list for {}.", getFullyQualifiedName());
        }
        final GenericCallback<List<LogSegmentMetadata>> callback = new GenericCallback<List<LogSegmentMetadata>>() {
            @Override
            public void operationComplete(int rc, List<LogSegmentMetadata> result) {
                long elapsedMicros = stopwatch.stop().elapsed(TimeUnit.MICROSECONDS);
                if (KeeperException.Code.OK.intValue() != rc) {
                    getListStat.registerFailedEvent(elapsedMicros);
                } else {
                    if (LogSegmentFilter.DEFAULT_FILTER == segmentFilter) {
                        isFullListFetched.set(true);
                    }
                    getListStat.registerSuccessfulEvent(elapsedMicros);
                }
                finalCallback.operationComplete(rc, result);
            }
        };
        zooKeeperClient.get().getChildren(logMetadata.getLogSegmentsPath(), watcher,
                new AsyncCallback.Children2Callback() {
                    @Override
                    public void processResult(final int rc, final String path, final Object ctx,
                            final List<String> children, final Stat stat) {
                        if (KeeperException.Code.OK.intValue() != rc) {

                            if ((KeeperException.Code.CONNECTIONLOSS.intValue() == rc
                                    || KeeperException.Code.SESSIONEXPIRED.intValue() == rc
                                    || KeeperException.Code.SESSIONMOVED.intValue() == rc)
                                    && numAttemptsLeft.decrementAndGet() > 0) {
                                long backoffMs = backoffMillis.get();
                                backoffMillis.set(Math.min(conf.getZKRetryBackoffMaxMillis(), 2 * backoffMs));
                                scheduler.schedule(new Runnable() {
                                    @Override
                                    public void run() {
                                        asyncGetLedgerListInternal(comparator, segmentFilter, watcher,
                                                finalCallback, numAttemptsLeft, backoffMillis);
                                    }
                                }, backoffMs, TimeUnit.MILLISECONDS);
                                return;
                            }
                            callback.operationComplete(rc, null);
                            return;
                        }

                        if (LOG.isTraceEnabled()) {
                            LOG.trace("Got ledger list from {} : {}", logMetadata.getLogSegmentsPath(),
                                    children);
                        }

                        ledgerListWatchSet.set(true);
                        Set<String> segmentsReceived = new HashSet<String>();
                        segmentsReceived.addAll(segmentFilter.filter(children));
                        Set<String> segmentsAdded;
                        final Set<String> removedSegments = Collections.synchronizedSet(new HashSet<String>());
                        final Map<String, LogSegmentMetadata> addedSegments = Collections
                                .synchronizedMap(new HashMap<String, LogSegmentMetadata>());
                        Pair<Set<String>, Set<String>> segmentChanges = logSegmentCache.diff(segmentsReceived);
                        segmentsAdded = segmentChanges.getLeft();
                        removedSegments.addAll(segmentChanges.getRight());

                        if (segmentsAdded.isEmpty()) {
                            if (LOG.isTraceEnabled()) {
                                LOG.trace("No segments added for {}.", getFullyQualifiedName());
                            }

                            // update the cache before fetch
                            logSegmentCache.update(removedSegments, addedSegments);

                            List<LogSegmentMetadata> segmentList;
                            try {
                                segmentList = getCachedLogSegments(comparator);
                            } catch (UnexpectedException e) {
                                callback.operationComplete(KeeperException.Code.DATAINCONSISTENCY.intValue(),
                                        null);
                                return;
                            }
                            callback.operationComplete(KeeperException.Code.OK.intValue(), segmentList);
                            notifyUpdatedLogSegments(segmentList);
                            if (!removedSegments.isEmpty()) {
                                notifyOnOperationComplete();
                            }
                            return;
                        }

                        final AtomicInteger numChildren = new AtomicInteger(segmentsAdded.size());
                        final AtomicInteger numFailures = new AtomicInteger(0);
                        for (final String segment : segmentsAdded) {
                            metadataStore.getLogSegment(logMetadata.getLogSegmentPath(segment))
                                    .addEventListener(new FutureEventListener<LogSegmentMetadata>() {

                                        @Override
                                        public void onSuccess(LogSegmentMetadata result) {
                                            addedSegments.put(segment, result);
                                            complete();
                                        }

                                        @Override
                                        public void onFailure(Throwable cause) {
                                            // NONODE exception is possible in two cases
                                            // 1. A log segment was deleted by truncation between the call to getChildren and read
                                            // attempt on the znode corresponding to the segment
                                            // 2. In progress segment has been completed => inprogress ZNode does not exist
                                            if (cause instanceof KeeperException
                                                    && KeeperException.Code.NONODE == ((KeeperException) cause)
                                                            .code()) {
                                                removedSegments.add(segment);
                                                complete();
                                            } else {
                                                // fail fast
                                                if (1 == numFailures.incrementAndGet()) {
                                                    int rcToReturn = KeeperException.Code.SYSTEMERROR
                                                            .intValue();
                                                    if (cause instanceof KeeperException) {
                                                        rcToReturn = ((KeeperException) cause).code()
                                                                .intValue();
                                                    } else if (cause instanceof ZKException) {
                                                        rcToReturn = ((ZKException) cause)
                                                                .getKeeperExceptionCode().intValue();
                                                    }
                                                    // :( properly we need dlog related response code.
                                                    callback.operationComplete(rcToReturn, null);
                                                    return;
                                                }
                                            }
                                        }

                                        private void complete() {
                                            if (0 == numChildren.decrementAndGet() && numFailures.get() == 0) {
                                                // update the cache only when fetch completed
                                                logSegmentCache.update(removedSegments, addedSegments);
                                                List<LogSegmentMetadata> segmentList;
                                                try {
                                                    segmentList = getCachedLogSegments(comparator);
                                                } catch (UnexpectedException e) {
                                                    callback.operationComplete(
                                                            KeeperException.Code.DATAINCONSISTENCY.intValue(),
                                                            null);
                                                    return;
                                                }
                                                callback.operationComplete(KeeperException.Code.OK.intValue(),
                                                        segmentList);
                                                notifyUpdatedLogSegments(segmentList);
                                                notifyOnOperationComplete();
                                            }
                                        }
                                    });
                        }
                    }
                }, null);
    } catch (ZooKeeperClient.ZooKeeperConnectionException e) {
        getListStat.registerFailedEvent(stopwatch.stop().elapsed(TimeUnit.MICROSECONDS));
        finalCallback.operationComplete(KeeperException.Code.CONNECTIONLOSS.intValue(), null);
    } catch (InterruptedException e) {
        getListStat.registerFailedEvent(stopwatch.stop().elapsed(TimeUnit.MICROSECONDS));
        finalCallback.operationComplete(KeeperException.Code.CONNECTIONLOSS.intValue(), null);
    }
}

From source file:org.apache.nifi.controller.repository.FileSystemRepository.java

private synchronized void initializeRepository() throws IOException {
    final Map<String, Path> realPathMap = new HashMap<>();
    final ExecutorService executor = Executors.newFixedThreadPool(containers.size());
    final List<Future<Long>> futures = new ArrayList<>();

    // Run through each of the containers. For each container, create the sections if necessary.
    // Then, we need to scan through the archived data so that we can determine what the oldest
    // archived data is, so that we know when we have to start aging data off.
    for (final Map.Entry<String, Path> container : containers.entrySet()) {
        final String containerName = container.getKey();
        final ContainerState containerState = containerStateMap.get(containerName);
        final Path containerPath = container.getValue();
        final boolean pathExists = Files.exists(containerPath);

        final Path realPath;
        if (pathExists) {
            realPath = containerPath.toRealPath();
        } else {/*  w  ww .  j a va  2  s .  co m*/
            realPath = Files.createDirectories(containerPath).toRealPath();
        }

        for (int i = 0; i < SECTIONS_PER_CONTAINER; i++) {
            Files.createDirectories(realPath.resolve(String.valueOf(i)));
        }

        realPathMap.put(containerName, realPath);

        // We need to scan the archive directories to find out the oldest timestamp so that know whether or not we
        // will have to delete archived data based on time threshold. Scanning all of the directories can be very
        // expensive because of all of the disk accesses. So we do this in multiple threads. Since containers are
        // often unique to a disk, we just map 1 thread to each container.
        final Callable<Long> scanContainer = new Callable<Long>() {
            @Override
            public Long call() throws IOException {
                final AtomicLong oldestDateHolder = new AtomicLong(0L);

                // the path already exists, so scan the path to find any files and update maxIndex to the max of
                // all filenames seen.
                Files.walkFileTree(realPath, new SimpleFileVisitor<Path>() {
                    @Override
                    public FileVisitResult visitFileFailed(Path file, IOException exc) throws IOException {
                        LOG.warn("Content repository contains un-readable file or directory '"
                                + file.getFileName() + "'. Skipping. ", exc);
                        return FileVisitResult.SKIP_SUBTREE;
                    }

                    @Override
                    public FileVisitResult visitFile(final Path file, final BasicFileAttributes attrs)
                            throws IOException {
                        if (attrs.isDirectory()) {
                            return FileVisitResult.CONTINUE;
                        }

                        // Check if this is an 'archive' directory
                        final Path relativePath = realPath.relativize(file);
                        if (relativePath.getNameCount() > 3
                                && ARCHIVE_DIR_NAME.equals(relativePath.subpath(1, 2).toString())) {
                            final long lastModifiedTime = getLastModTime(file);

                            if (lastModifiedTime < oldestDateHolder.get()) {
                                oldestDateHolder.set(lastModifiedTime);
                            }
                            containerState.incrementArchiveCount();
                        }

                        return FileVisitResult.CONTINUE;
                    }
                });

                return oldestDateHolder.get();
            }
        };

        // If the path didn't exist to begin with, there's no archive directory, so don't bother scanning.
        if (pathExists) {
            futures.add(executor.submit(scanContainer));
        }
    }

    executor.shutdown();
    for (final Future<Long> future : futures) {
        try {
            final Long oldestDate = future.get();
            if (oldestDate < oldestArchiveDate.get()) {
                oldestArchiveDate.set(oldestDate);
            }
        } catch (final ExecutionException | InterruptedException e) {
            if (e.getCause() instanceof IOException) {
                throw (IOException) e.getCause();
            } else {
                throw new RuntimeException(e);
            }
        }
    }

    containers.clear();
    containers.putAll(realPathMap);
}

From source file:org.apache.accumulo.tserver.tablet.Tablet.java

public Tablet(final TabletServer tabletServer, final KeyExtent extent, final TabletResourceManager trm,
        TabletData data) throws IOException {

    this.tabletServer = tabletServer;
    this.extent = extent;
    this.tabletResources = trm;
    this.lastLocation = data.getLastLocation();
    this.lastFlushID = data.getFlushID();
    this.lastCompactID = data.getCompactID();
    this.splitCreationTime = data.getSplitTime();
    this.tabletTime = TabletTime.getInstance(data.getTime());
    this.persistedTime = tabletTime.getTime();
    this.logId = tabletServer.createLogId(extent);

    TableConfiguration tblConf = tabletServer.getTableConfiguration(extent);
    if (null == tblConf) {
        Tables.clearCache(tabletServer.getInstance());
        tblConf = tabletServer.getTableConfiguration(extent);
        requireNonNull(tblConf, "Could not get table configuration for " + extent.getTableId());
    }//from  www  .j a  v a  2 s. co  m

    this.tableConfiguration = tblConf;

    // translate any volume changes
    VolumeManager fs = tabletServer.getFileSystem();
    boolean replicationEnabled = ReplicationConfigurationUtil.isEnabled(extent, this.tableConfiguration);
    TabletFiles tabletPaths = new TabletFiles(data.getDirectory(), data.getLogEntris(), data.getDataFiles());
    tabletPaths = VolumeUtil.updateTabletVolumes(tabletServer, tabletServer.getLock(), fs, extent, tabletPaths,
            replicationEnabled);

    // deal with relative path for the directory
    Path locationPath;
    if (tabletPaths.dir.contains(":")) {
        locationPath = new Path(tabletPaths.dir);
    } else {
        locationPath = tabletServer.getFileSystem().getFullPath(FileType.TABLE,
                extent.getTableId() + tabletPaths.dir);
    }
    this.location = locationPath;
    this.tabletDirectory = tabletPaths.dir;
    for (Entry<Long, List<FileRef>> entry : data.getBulkImported().entrySet()) {
        this.bulkImported.put(entry.getKey(), new CopyOnWriteArrayList<FileRef>(entry.getValue()));
    }
    setupDefaultSecurityLabels(extent);

    final List<LogEntry> logEntries = tabletPaths.logEntries;
    final SortedMap<FileRef, DataFileValue> datafiles = tabletPaths.datafiles;

    tableConfiguration.addObserver(configObserver = new ConfigurationObserver() {

        private void reloadConstraints() {
            log.debug("Reloading constraints for extent: " + extent);
            constraintChecker.set(new ConstraintChecker(tableConfiguration));
        }

        @Override
        public void propertiesChanged() {
            reloadConstraints();

            try {
                setupDefaultSecurityLabels(extent);
            } catch (Exception e) {
                log.error("Failed to reload default security labels for extent: " + extent.toString());
            }
        }

        @Override
        public void propertyChanged(String prop) {
            if (prop.startsWith(Property.TABLE_CONSTRAINT_PREFIX.getKey()))
                reloadConstraints();
            else if (prop.equals(Property.TABLE_DEFAULT_SCANTIME_VISIBILITY.getKey())) {
                try {
                    log.info("Default security labels changed for extent: " + extent.toString());
                    setupDefaultSecurityLabels(extent);
                } catch (Exception e) {
                    log.error("Failed to reload default security labels for extent: " + extent.toString());
                }
            }

        }

        @Override
        public void sessionExpired() {
            log.debug("Session expired, no longer updating per table props...");
        }

    });

    tableConfiguration.getNamespaceConfiguration().addObserver(configObserver);
    tabletMemory = new TabletMemory(this);

    // Force a load of any per-table properties
    configObserver.propertiesChanged();
    if (!logEntries.isEmpty()) {
        log.info("Starting Write-Ahead Log recovery for " + this.extent);
        final AtomicLong entriesUsedOnTablet = new AtomicLong(0);
        // track max time from walog entries without timestamps
        final AtomicLong maxTime = new AtomicLong(Long.MIN_VALUE);
        final CommitSession commitSession = getTabletMemory().getCommitSession();
        try {
            Set<String> absPaths = new HashSet<String>();
            for (FileRef ref : datafiles.keySet())
                absPaths.add(ref.path().toString());

            tabletServer.recover(this.getTabletServer().getFileSystem(), extent, tableConfiguration, logEntries,
                    absPaths, new MutationReceiver() {
                        @Override
                        public void receive(Mutation m) {
                            // LogReader.printMutation(m);
                            Collection<ColumnUpdate> muts = m.getUpdates();
                            for (ColumnUpdate columnUpdate : muts) {
                                if (!columnUpdate.hasTimestamp()) {
                                    // if it is not a user set timestamp, it must have been set
                                    // by the system
                                    maxTime.set(Math.max(maxTime.get(), columnUpdate.getTimestamp()));
                                }
                            }
                            getTabletMemory().mutate(commitSession, Collections.singletonList(m));
                            entriesUsedOnTablet.incrementAndGet();
                        }
                    });

            if (maxTime.get() != Long.MIN_VALUE) {
                tabletTime.useMaxTimeFromWALog(maxTime.get());
            }
            commitSession.updateMaxCommittedTime(tabletTime.getTime());

            if (entriesUsedOnTablet.get() == 0) {
                log.debug("No replayed mutations applied, removing unused entries for " + extent);
                MetadataTableUtil.removeUnusedWALEntries(getTabletServer(), extent, logEntries,
                        tabletServer.getLock());

                // No replication update to be made because the fact that this tablet didn't use any mutations
                // from the WAL implies nothing about use of this WAL by other tablets. Do nothing.

                logEntries.clear();
            } else if (ReplicationConfigurationUtil.isEnabled(extent,
                    tabletServer.getTableConfiguration(extent))) {
                // The logs are about to be re-used by this tablet, we need to record that they have data for this extent,
                // but that they may get more data. logEntries is not cleared which will cause the elements
                // in logEntries to be added to the currentLogs for this Tablet below.
                //
                // This update serves the same purpose as an update during a MinC. We know that the WAL was defined
                // (written when the WAL was opened) but this lets us know there are mutations written to this WAL
                // that could potentially be replicated. Because the Tablet is using this WAL, we can be sure that
                // the WAL isn't closed (WRT replication Status) and thus we're safe to update its progress.
                Status status = StatusUtil.openWithUnknownLength();
                for (LogEntry logEntry : logEntries) {
                    log.debug("Writing updated status to metadata table for " + logEntry.filename + " "
                            + ProtobufUtil.toString(status));
                    ReplicationTableUtil.updateFiles(tabletServer, extent, logEntry.filename, status);
                }
            }

        } catch (Throwable t) {
            if (tableConfiguration.getBoolean(Property.TABLE_FAILURES_IGNORE)) {
                log.warn("Error recovering from log files: ", t);
            } else {
                throw new RuntimeException(t);
            }
        }
        // make some closed references that represent the recovered logs
        currentLogs = new ConcurrentSkipListSet<DfsLogger>();
        for (LogEntry logEntry : logEntries) {
            currentLogs.add(new DfsLogger(tabletServer.getServerConfig(), logEntry.filename,
                    logEntry.getColumnQualifier().toString()));
        }

        log.info("Write-Ahead Log recovery complete for " + this.extent + " (" + entriesUsedOnTablet.get()
                + " mutations applied, " + getTabletMemory().getNumEntries() + " entries created)");
    }

    String contextName = tableConfiguration.get(Property.TABLE_CLASSPATH);
    if (contextName != null && !contextName.equals("")) {
        // initialize context classloader, instead of possibly waiting for it to initialize for a scan
        // TODO this could hang, causing other tablets to fail to load - ACCUMULO-1292
        AccumuloVFSClassLoader.getContextManager().getClassLoader(contextName);
    }

    // do this last after tablet is completely setup because it
    // could cause major compaction to start
    datafileManager = new DatafileManager(this, datafiles);

    computeNumEntries();

    getDatafileManager().removeFilesAfterScan(data.getScanFiles());

    // look for hints of a failure on the previous tablet server
    if (!logEntries.isEmpty() || needsMajorCompaction(MajorCompactionReason.NORMAL)) {
        // look for any temp files hanging around
        removeOldTemporaryFiles();
    }

    log.log(TLevel.TABLET_HIST, extent + " opened");
}