List of usage examples for java.util.concurrent.atomic AtomicLong addAndGet
public final long addAndGet(long delta)
From source file:org.apache.hadoop.raid.RaidShell.java
/** * //from w w w . j ava 2s.c o m * @param dfs * @param filePath * @param cntMissingBlksPerStrp * @param numNonRaidedMissingBlks * @param numStrpMissingBlksMap * @return * @throws java.io.IOException */ public static boolean isFileCorrupt(final DistributedFileSystem dfs, final FileStatus fileStat, final boolean cntMissingBlksPerStrp, final Configuration conf, AtomicLong numNonRaidedMissingBlks, Map<String, AtomicLongArray> numStrpMissingBlksMap) throws IOException { if (fileStat == null) { return false; } Path filePath = fileStat.getPath(); try { // corruptBlocksPerStripe: // map stripe # -> # of corrupt blocks in that stripe (data + parity) HashMap<Integer, Integer> corruptBlocksPerStripe = new LinkedHashMap<Integer, Integer>(); boolean fileCorrupt = false; // Har checking requires one more RPC to namenode per file // skip it for performance. RaidInfo raidInfo = RaidUtils.getFileRaidInfo(fileStat, conf, true); if (raidInfo.codec == null) { raidInfo = RaidUtils.getFileRaidInfo(fileStat, conf, false); } if (raidInfo.codec == null) { // Couldn't find out the parity file, so the file is corrupt int count = collectNumCorruptBlocksInFile(dfs, filePath); if (cntMissingBlksPerStrp && numNonRaidedMissingBlks != null) { numNonRaidedMissingBlks.addAndGet(count); } return true; } if (raidInfo.codec.isDirRaid) { RaidUtils.collectDirectoryCorruptBlocksInStripe(conf, dfs, raidInfo, fileStat, corruptBlocksPerStripe); } else { RaidUtils.collectFileCorruptBlocksInStripe(dfs, raidInfo, fileStat, corruptBlocksPerStripe); } final int maxCorruptBlocksPerStripe = raidInfo.parityBlocksPerStripe; for (Integer corruptBlocksInStripe : corruptBlocksPerStripe.values()) { if (corruptBlocksInStripe == null) { continue; } //detect if the file has any stripes which cannot be fixed by Raid if (LOG.isDebugEnabled()) { LOG.debug("file " + filePath.toString() + " has corrupt blocks per Stripe value " + corruptBlocksInStripe); } if (!fileCorrupt) { if (corruptBlocksInStripe > maxCorruptBlocksPerStripe) { fileCorrupt = true; } } if (cntMissingBlksPerStrp && numStrpMissingBlksMap != null) { numStrpMissingBlksMap.get(raidInfo.codec.id).incrementAndGet(corruptBlocksInStripe - 1); } } return fileCorrupt; } catch (SocketException e) { // Re-throw network-related exceptions. throw e; } catch (SocketTimeoutException e) { throw e; } catch (IOException e) { // re-throw local exceptions. if (e.getCause() != null && !(e.getCause() instanceof RemoteException)) { throw e; } LOG.error("While trying to check isFileCorrupt " + filePath + " got exception ", e); return true; } }
From source file:com.twitter.distributedlog.auditor.DLAuditor.java
private long calculateLedgerSpaceUsage(BookKeeperClient bkc, final ExecutorService executorService) throws IOException { final AtomicLong totalBytes = new AtomicLong(0); final AtomicLong totalEntries = new AtomicLong(0); final AtomicLong numLedgers = new AtomicLong(0); LedgerManager lm = BookKeeperAccessor.getLedgerManager(bkc.get()); final SettableFuture<Void> doneFuture = SettableFuture.create(); final BookKeeper bk = bkc.get(); BookkeeperInternalCallbacks.Processor<Long> collector = new BookkeeperInternalCallbacks.Processor<Long>() { @Override//www . ja v a 2s .c o m public void process(final Long lid, final AsyncCallback.VoidCallback cb) { numLedgers.incrementAndGet(); executorService.submit(new Runnable() { @Override public void run() { bk.asyncOpenLedgerNoRecovery(lid, BookKeeper.DigestType.CRC32, conf.getBKDigestPW().getBytes(UTF_8), new org.apache.bookkeeper.client.AsyncCallback.OpenCallback() { @Override public void openComplete(int rc, LedgerHandle lh, Object ctx) { final int cbRc; if (BKException.Code.OK == rc) { totalBytes.addAndGet(lh.getLength()); totalEntries.addAndGet(lh.getLastAddConfirmed() + 1); cbRc = rc; } else { cbRc = BKException.Code.ZKException; } executorService.submit(new Runnable() { @Override public void run() { cb.processResult(cbRc, null, null); } }); } }, null); } }); } }; AsyncCallback.VoidCallback finalCb = new AsyncCallback.VoidCallback() { @Override public void processResult(int rc, String path, Object ctx) { if (BKException.Code.OK == rc) { doneFuture.set(null); } else { doneFuture.setException(BKException.create(rc)); } } }; lm.asyncProcessLedgers(collector, finalCb, null, BKException.Code.OK, BKException.Code.ZKException); try { doneFuture.get(); logger.info("calculated {} ledgers\n\ttotal bytes = {}\n\ttotal entries = {}", new Object[] { numLedgers.get(), totalBytes.get(), totalEntries.get() }); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new DLInterruptedException("Interrupted on calculating ledger space : ", e); } catch (ExecutionException e) { if (e.getCause() instanceof IOException) { throw (IOException) (e.getCause()); } else { throw new IOException("Failed to calculate ledger space : ", e.getCause()); } } return totalBytes.get(); }
From source file:org.apache.distributedlog.auditor.DLAuditor.java
private long calculateLedgerSpaceUsage(BookKeeperClient bkc, final ExecutorService executorService) throws IOException { final AtomicLong totalBytes = new AtomicLong(0); final AtomicLong totalEntries = new AtomicLong(0); final AtomicLong numLedgers = new AtomicLong(0); LedgerManager lm = BookKeeperAccessor.getLedgerManager(bkc.get()); final CompletableFuture<Void> doneFuture = FutureUtils.createFuture(); final BookKeeper bk = bkc.get(); BookkeeperInternalCallbacks.Processor<Long> collector = new BookkeeperInternalCallbacks.Processor<Long>() { @Override/* w w w . j a v a 2s .c o m*/ public void process(final Long lid, final AsyncCallback.VoidCallback cb) { numLedgers.incrementAndGet(); executorService.submit(new Runnable() { @Override public void run() { bk.asyncOpenLedgerNoRecovery(lid, BookKeeper.DigestType.CRC32, conf.getBKDigestPW().getBytes(UTF_8), new org.apache.bookkeeper.client.AsyncCallback.OpenCallback() { @Override public void openComplete(int rc, LedgerHandle lh, Object ctx) { final int cbRc; if (BKException.Code.OK == rc) { totalBytes.addAndGet(lh.getLength()); totalEntries.addAndGet(lh.getLastAddConfirmed() + 1); cbRc = rc; } else { cbRc = BKException.Code.ZKException; } executorService.submit(new Runnable() { @Override public void run() { cb.processResult(cbRc, null, null); } }); } }, null); } }); } }; AsyncCallback.VoidCallback finalCb = new AsyncCallback.VoidCallback() { @Override public void processResult(int rc, String path, Object ctx) { if (BKException.Code.OK == rc) { doneFuture.complete(null); } else { doneFuture.completeExceptionally(BKException.create(rc)); } } }; lm.asyncProcessLedgers(collector, finalCb, null, BKException.Code.OK, BKException.Code.ZKException); try { doneFuture.get(); logger.info("calculated {} ledgers\n\ttotal bytes = {}\n\ttotal entries = {}", new Object[] { numLedgers.get(), totalBytes.get(), totalEntries.get() }); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new DLInterruptedException("Interrupted on calculating ledger space : ", e); } catch (ExecutionException e) { if (e.getCause() instanceof IOException) { throw (IOException) (e.getCause()); } else { throw new IOException("Failed to calculate ledger space : ", e.getCause()); } } return totalBytes.get(); }
From source file:org.apache.hadoop.raid.DistBlockIntegrityMonitor.java
public static Job startOneJob(Worker newWorker, Priority pri, Set<String> jobFiles, long detectTime, AtomicLong numFilesSubmitted, AtomicLong lastCheckingTime, long maxPendingJobs) throws IOException, InterruptedException, ClassNotFoundException { if (lastCheckingTime != null) { lastCheckingTime.set(System.currentTimeMillis()); }//from w w w. j a v a 2 s . co m String startTimeStr = dateFormat.format(new Date()); String jobName = newWorker.JOB_NAME_PREFIX + "." + newWorker.jobCounter + "." + pri + "-pri" + "." + startTimeStr; Job job = null; synchronized (jobFiles) { if (jobFiles.size() == 0) { return null; } newWorker.jobCounter++; synchronized (newWorker.jobIndex) { if (newWorker.jobIndex.size() >= maxPendingJobs) { // full return null; } job = newWorker.startJob(jobName, jobFiles, pri, detectTime); } numFilesSubmitted.addAndGet(jobFiles.size()); jobFiles.clear(); } return job; }
From source file:io.pravega.segmentstore.server.writer.SegmentAggregatorTests.java
/** * Tests the flush() method with Append and StreamSegmentSealOperations. *///from ww w . j a va2 s. co m @Test public void testSeal() throws Exception { // Add some appends and seal, and then flush together. Verify that everything got flushed in one go. final int appendCount = 1000; final WriterConfig config = WriterConfig.builder() .with(WriterConfig.FLUSH_THRESHOLD_BYTES, appendCount * 50) // Extra high length threshold. .with(WriterConfig.FLUSH_THRESHOLD_MILLIS, 1000L).with(WriterConfig.MAX_FLUSH_SIZE_BYTES, 10000) .with(WriterConfig.MIN_READ_TIMEOUT_MILLIS, 10L).build(); @Cleanup TestContext context = new TestContext(config); context.storage.create(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join(); context.segmentAggregator.initialize(TIMEOUT, executorService()).join(); @Cleanup ByteArrayOutputStream writtenData = new ByteArrayOutputStream(); // Accumulate some Appends AtomicLong outstandingSize = new AtomicLong(); SequenceNumberCalculator sequenceNumbers = new SequenceNumberCalculator(context, outstandingSize); for (int i = 0; i < appendCount; i++) { // Add another operation and record its length. StorageOperation appendOp = generateAppendAndUpdateMetadata(i, SEGMENT_ID, context); outstandingSize.addAndGet(appendOp.getLength()); context.segmentAggregator.add(appendOp); getAppendData(appendOp, writtenData, context); sequenceNumbers.record(appendOp); // Call flush() and verify that we haven't flushed anything (by design). FlushResult flushResult = context.segmentAggregator.flush(TIMEOUT, executorService()).join(); Assert.assertEquals(String.format("Not expecting a flush. OutstandingSize=%s, Threshold=%d", outstandingSize, config.getFlushThresholdBytes()), 0, flushResult.getFlushedBytes()); Assert.assertEquals("Not expecting any merged bytes in this test.", 0, flushResult.getMergedBytes()); } Assert.assertFalse("Unexpected value returned by mustFlush() before adding StreamSegmentSealOperation.", context.segmentAggregator.mustFlush()); // Generate and add a Seal Operation. StorageOperation sealOp = generateSealAndUpdateMetadata(SEGMENT_ID, context); context.segmentAggregator.add(sealOp); Assert.assertEquals( "Unexpected value returned by getLowestUncommittedSequenceNumber() after adding StreamSegmentSealOperation.", sequenceNumbers.getLowestUncommitted(), context.segmentAggregator.getLowestUncommittedSequenceNumber()); Assert.assertTrue("Unexpected value returned by mustFlush() after adding StreamSegmentSealOperation.", context.segmentAggregator.mustFlush()); // Call flush and verify that the entire Aggregator got flushed and the Seal got persisted to Storage. FlushResult flushResult = context.segmentAggregator.flush(TIMEOUT, executorService()).join(); Assert.assertEquals("Expected the entire Aggregator to be flushed.", outstandingSize.get(), flushResult.getFlushedBytes()); Assert.assertFalse("Unexpected value returned by mustFlush() after flushing.", context.segmentAggregator.mustFlush()); Assert.assertEquals("Unexpected value returned by getLowestUncommittedSequenceNumber() after flushing.", Operation.NO_SEQUENCE_NUMBER, context.segmentAggregator.getLowestUncommittedSequenceNumber()); // Verify data. byte[] expectedData = writtenData.toByteArray(); byte[] actualData = new byte[expectedData.length]; SegmentProperties storageInfo = context.storage .getStreamSegmentInfo(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join(); Assert.assertEquals("Unexpected number of bytes flushed to Storage.", expectedData.length, storageInfo.getLength()); Assert.assertTrue("Segment is not sealed in storage post flush.", storageInfo.isSealed()); Assert.assertTrue("Segment is not marked in metadata as sealed in storage post flush.", context.segmentAggregator.getMetadata().isSealedInStorage()); context.storage.read(InMemoryStorage.newHandle(context.segmentAggregator.getMetadata().getName(), false), 0, actualData, 0, actualData.length, TIMEOUT).join(); Assert.assertArrayEquals("Unexpected data written to storage.", expectedData, actualData); }
From source file:org.nanoframework.orm.jedis.sharded.RedisClientImpl.java
@Override public long del(final String... keys) { if (keys.length == 0) { return 0; }//from ww w . j a va 2 s.co m ShardedJedis jedis = null; try { jedis = POOL.getJedis(config.getRedisType()); final ShardedJedisPipeline pipeline = jedis.pipelined(); final List<Response<Long>> responses = new ArrayList<>(); for (String key : keys) { responses.add(pipeline.del(key)); } pipeline.sync(); final AtomicLong dels = new AtomicLong(0); if (!CollectionUtils.isEmpty(responses)) { responses.forEach(res -> dels.addAndGet(res.get())); } return dels.get(); } catch (final Throwable e) { throw new RedisClientException(e.getMessage(), e); } finally { POOL.close(jedis); } }
From source file:org.apache.pulsar.client.impl.BinaryProtoLookupService.java
private void getTopicsUnderNamespace(InetSocketAddress socketAddress, NamespaceName namespace, Backoff backoff, AtomicLong remainingTime, CompletableFuture<List<String>> topicsFuture, Mode mode) { client.getCnxPool().getConnection(socketAddress).thenAccept(clientCnx -> { long requestId = client.newRequestId(); ByteBuf request = Commands.newGetTopicsOfNamespaceRequest(namespace.toString(), requestId, mode); clientCnx.newGetTopicsOfNamespace(request, requestId).thenAccept(topicsList -> { if (log.isDebugEnabled()) { log.debug("[namespace: {}] Success get topics list in request: {}", namespace.toString(), requestId);/* www. j a va2 s .c o m*/ } // do not keep partition part of topic name List<String> result = Lists.newArrayList(); topicsList.forEach(topic -> { String filtered = TopicName.get(topic).getPartitionedTopicName(); if (!result.contains(filtered)) { result.add(filtered); } }); topicsFuture.complete(result); }).exceptionally((e) -> { topicsFuture.completeExceptionally(e); return null; }); }).exceptionally((e) -> { long nextDelay = Math.min(backoff.next(), remainingTime.get()); if (nextDelay <= 0) { topicsFuture.completeExceptionally(new PulsarClientException.TimeoutException( "Could not getTopicsUnderNamespace within configured timeout.")); return null; } ((ScheduledExecutorService) executor).schedule(() -> { log.warn( "[namespace: {}] Could not get connection while getTopicsUnderNamespace -- Will try again in {} ms", namespace, nextDelay); remainingTime.addAndGet(-nextDelay); getTopicsUnderNamespace(socketAddress, namespace, backoff, remainingTime, topicsFuture, mode); }, nextDelay, TimeUnit.MILLISECONDS); return null; }); }
From source file:io.pravega.segmentstore.server.writer.SegmentAggregatorTests.java
/** * Tests the flush() method only with Append operations. * Verifies both length-based and time-based flush triggers, as well as flushing rather large operations. *//*from ww w . ja v a2 s. c om*/ @Test public void testFlushAppend() throws Exception { final WriterConfig config = DEFAULT_CONFIG; final int appendCount = config.getFlushThresholdBytes() * 10; @Cleanup TestContext context = new TestContext(config); context.storage.create(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join(); context.segmentAggregator.initialize(TIMEOUT, executorService()).join(); @Cleanup ByteArrayOutputStream writtenData = new ByteArrayOutputStream(); AtomicLong outstandingSize = new AtomicLong(); // Number of bytes remaining to be flushed. SequenceNumberCalculator sequenceNumbers = new SequenceNumberCalculator(context, outstandingSize); // Part 1: flush triggered by accumulated size. for (int i = 0; i < appendCount; i++) { // Add another operation and record its length. StorageOperation appendOp = generateAppendAndUpdateMetadata(i, SEGMENT_ID, context); outstandingSize.addAndGet(appendOp.getLength()); context.segmentAggregator.add(appendOp); getAppendData(appendOp, writtenData, context); sequenceNumbers.record(appendOp); boolean expectFlush = outstandingSize.get() >= config.getFlushThresholdBytes(); Assert.assertEquals("Unexpected value returned by mustFlush() (size threshold).", expectFlush, context.segmentAggregator.mustFlush()); Assert.assertEquals( "Unexpected value returned by getLowestUncommittedSequenceNumber() before flush (size threshold).", sequenceNumbers.getLowestUncommitted(), context.segmentAggregator.getLowestUncommittedSequenceNumber()); // Call flush() and inspect the result. FlushResult flushResult = context.segmentAggregator.flush(TIMEOUT, executorService()).join(); if (expectFlush) { AssertExtensions.assertGreaterThanOrEqual("Not enough bytes were flushed (size threshold).", config.getFlushThresholdBytes(), flushResult.getFlushedBytes()); outstandingSize.addAndGet(-flushResult.getFlushedBytes()); Assert.assertEquals( "Unexpected value returned by getLowestUncommittedSequenceNumber() after flush (size threshold).", sequenceNumbers.getLowestUncommitted(), context.segmentAggregator.getLowestUncommittedSequenceNumber()); } else { Assert.assertEquals(String.format("Not expecting a flush. OutstandingSize=%s, Threshold=%d", outstandingSize, config.getFlushThresholdBytes()), 0, flushResult.getFlushedBytes()); } Assert.assertFalse("Unexpected value returned by mustFlush() after flush (size threshold).", context.segmentAggregator.mustFlush()); Assert.assertEquals("Not expecting any merged bytes in this test.", 0, flushResult.getMergedBytes()); } // Part 2: flush triggered by time. for (int i = 0; i < appendCount; i++) { // Add another operation and record its length. StorageOperation appendOp = generateAppendAndUpdateMetadata(i, SEGMENT_ID, context); outstandingSize.addAndGet(appendOp.getLength()); context.segmentAggregator.add(appendOp); getAppendData(appendOp, writtenData, context); sequenceNumbers.record(appendOp); // Call flush() and inspect the result. context.increaseTime(config.getFlushThresholdTime().toMillis() + 1); // Force a flush by incrementing the time by a lot. Assert.assertTrue("Unexpected value returned by mustFlush() (time threshold).", context.segmentAggregator.mustFlush()); Assert.assertEquals( "Unexpected value returned by getLowestUncommittedSequenceNumber() before flush (time threshold).", sequenceNumbers.getLowestUncommitted(), context.segmentAggregator.getLowestUncommittedSequenceNumber()); FlushResult flushResult = context.segmentAggregator.flush(TIMEOUT, executorService()).join(); // We are always expecting a flush. AssertExtensions.assertGreaterThan("Not enough bytes were flushed (time threshold).", 0, flushResult.getFlushedBytes()); outstandingSize.addAndGet(-flushResult.getFlushedBytes()); Assert.assertFalse("Unexpected value returned by mustFlush() after flush (time threshold).", context.segmentAggregator.mustFlush()); Assert.assertEquals( "Unexpected value returned by getLowestUncommittedSequenceNumber() after flush (time threshold).", sequenceNumbers.getLowestUncommitted(), context.segmentAggregator.getLowestUncommittedSequenceNumber()); Assert.assertEquals("Not expecting any merged bytes in this test.", 0, flushResult.getMergedBytes()); } // Part 3: Transaction appends. This will force an internal loop inside flush() to do so repeatedly. final int transactionSize = 100; for (int i = 0; i < appendCount / 10; i++) { for (int j = 0; j < transactionSize; j++) { // Add another operation and record its length. StorageOperation appendOp = generateAppendAndUpdateMetadata(i, SEGMENT_ID, context); outstandingSize.addAndGet(appendOp.getLength()); context.segmentAggregator.add(appendOp); getAppendData(appendOp, writtenData, context); sequenceNumbers.record(appendOp); Assert.assertEquals( "Unexpected value returned by getLowestUncommittedSequenceNumber() before flush (Transaction appends).", sequenceNumbers.getLowestUncommitted(), context.segmentAggregator.getLowestUncommittedSequenceNumber()); } // Call flush() and inspect the result. Assert.assertTrue("Unexpected value returned by mustFlush() (Transaction appends).", context.segmentAggregator.mustFlush()); FlushResult flushResult = context.segmentAggregator.flush(TIMEOUT, executorService()).join(); // We are always expecting a flush. AssertExtensions.assertGreaterThan("Not enough bytes were flushed (Transaction appends).", 0, flushResult.getFlushedBytes()); outstandingSize.addAndGet(-flushResult.getFlushedBytes()); Assert.assertFalse("Unexpected value returned by mustFlush() after flush (Transaction appends).", context.segmentAggregator.mustFlush()); Assert.assertEquals( "Unexpected value returned by getLowestUncommittedSequenceNumber() after flush (Transaction appends).", sequenceNumbers.getLowestUncommitted(), context.segmentAggregator.getLowestUncommittedSequenceNumber()); Assert.assertEquals("Not expecting any merged bytes in this test.", 0, flushResult.getMergedBytes()); } // Part 4: large appends (larger than MaxFlushSize). Random random = new Random(); for (int i = 0; i < appendCount; i++) { // Add another operation and record its length. byte[] largeAppendData = new byte[config.getMaxFlushSizeBytes() * 10 + 1]; random.nextBytes(largeAppendData); StorageOperation appendOp = generateAppendAndUpdateMetadata(SEGMENT_ID, largeAppendData, context); outstandingSize.addAndGet(appendOp.getLength()); context.segmentAggregator.add(appendOp); getAppendData(appendOp, writtenData, context); sequenceNumbers.record(appendOp); // Call flush() and inspect the result. context.increaseTime(config.getFlushThresholdTime().toMillis() + 1); // Force a flush by incrementing the time by a lot. Assert.assertTrue("Unexpected value returned by mustFlush() (large appends).", context.segmentAggregator.mustFlush()); Assert.assertEquals( "Unexpected value returned by getLowestUncommittedSequenceNumber() before flush (large appends).", sequenceNumbers.getLowestUncommitted(), context.segmentAggregator.getLowestUncommittedSequenceNumber()); FlushResult flushResult = context.segmentAggregator.flush(TIMEOUT, executorService()).join(); // We are always expecting a flush. AssertExtensions.assertGreaterThan("Not enough bytes were flushed (large appends).", 0, flushResult.getFlushedBytes()); outstandingSize.addAndGet(-flushResult.getFlushedBytes()); Assert.assertFalse("Unexpected value returned by mustFlush() after flush (time threshold).", context.segmentAggregator.mustFlush()); Assert.assertEquals( "Unexpected value returned by getLowestUncommittedSequenceNumber() after flush (large appends).", sequenceNumbers.getLowestUncommitted(), context.segmentAggregator.getLowestUncommittedSequenceNumber()); Assert.assertEquals("Not expecting any merged bytes in this test (large appends).", 0, flushResult.getMergedBytes()); } // Verify data. Assert.assertEquals("Not expecting leftover data not flushed.", 0, outstandingSize.get()); byte[] expectedData = writtenData.toByteArray(); byte[] actualData = new byte[expectedData.length]; long storageLength = context.storage .getStreamSegmentInfo(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join() .getLength(); Assert.assertEquals("Unexpected number of bytes flushed to Storage.", expectedData.length, storageLength); context.storage.read(readHandle(context.segmentAggregator.getMetadata().getName()), 0, actualData, 0, actualData.length, TIMEOUT).join(); Assert.assertArrayEquals("Unexpected data written to storage.", expectedData, actualData); }
From source file:org.nanoframework.orm.jedis.sharded.RedisClientImpl.java
@Override public boolean smove(final String source, final String destination, final String member) { Assert.hasText(source);/*w w w . j a v a 2s . com*/ Assert.hasText(destination); Assert.hasText(member); ShardedJedis jedis = null; try { jedis = POOL.getJedis(config.getRedisType()); final Collection<Jedis> allShards; if ((allShards = jedis.getAllShards()).size() == 1) { return isSuccess(allShards.iterator().next().smove(source, destination, member)); } else if (allShards.size() > 1) { final AtomicLong val = new AtomicLong(); allShards.parallelStream().forEach(shard -> { Pipeline pipeline = shard.pipelined(); pipeline.sismember(source, member); Response<Long> response = pipeline.smove(source, destination, member); pipeline.sync(); val.addAndGet(response.get()); }); if (val.get() > 0) { return true; } } return false; } catch (final Throwable e) { throw new RedisClientException(e.getMessage(), e); } finally { POOL.close(jedis); } }
From source file:org.apache.hadoop.hbase.io.hfile.bucket.BucketAllocator.java
/** * Rebuild the allocator's data structures from a persisted map. * @param availableSpace capacity of cache * @param map A map stores the block key and BucketEntry(block's meta data * like offset, length)/*from w w w .j a v a 2 s.c o m*/ * @param realCacheSize cached data size statistics for bucket cache * @throws BucketAllocatorException */ BucketAllocator(long availableSpace, Map<BlockCacheKey, BucketEntry> map, AtomicLong realCacheSize) throws BucketAllocatorException { this(availableSpace); // each bucket has an offset, sizeindex. probably the buckets are too big // in our default state. so what we do is reconfigure them according to what // we've found. we can only reconfigure each bucket once; if more than once, // we know there's a bug, so we just log the info, throw, and start again... boolean[] reconfigured = new boolean[buckets.length]; for (Map.Entry<BlockCacheKey, BucketEntry> entry : map.entrySet()) { long foundOffset = entry.getValue().offset(); int foundLen = entry.getValue().getLength(); int bucketSizeIndex = -1; for (int i = 0; i < BUCKET_SIZES.length; ++i) { if (foundLen <= BUCKET_SIZES[i]) { bucketSizeIndex = i; break; } } if (bucketSizeIndex == -1) { throw new BucketAllocatorException("Can't match bucket size for the block with size " + foundLen); } int bucketNo = (int) (foundOffset / (long) BUCKET_CAPACITY); if (bucketNo < 0 || bucketNo >= buckets.length) throw new BucketAllocatorException("Can't find bucket " + bucketNo + ", total buckets=" + buckets.length + "; did you shrink the cache?"); Bucket b = buckets[bucketNo]; if (reconfigured[bucketNo] == true) { if (b.sizeIndex() != bucketSizeIndex) throw new BucketAllocatorException("Inconsistent allocation in bucket map;"); } else { if (!b.isCompletelyFree()) throw new BucketAllocatorException( "Reconfiguring bucket " + bucketNo + " but it's already allocated; corrupt data"); // Need to remove the bucket from whichever list it's currently in at // the moment... BucketSizeInfo bsi = bucketSizeInfos[bucketSizeIndex]; BucketSizeInfo oldbsi = bucketSizeInfos[b.sizeIndex()]; oldbsi.removeBucket(b); bsi.instantiateBucket(b); reconfigured[bucketNo] = true; } realCacheSize.addAndGet(foundLen); buckets[bucketNo].addAllocation(foundOffset); usedSize += buckets[bucketNo].itemAllocationSize(); bucketSizeInfos[bucketSizeIndex].blockAllocated(b); } }