Example usage for java.util.concurrent.atomic AtomicBoolean get

List of usage examples for java.util.concurrent.atomic AtomicBoolean get

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicBoolean get.

Prototype

public final boolean get() 

Source Link

Document

Returns the current value, with memory effects as specified by VarHandle#getVolatile .

Usage

From source file:ch.cyberduck.core.editor.AbstractEditorTest.java

@Test
public void testOpen() throws Exception {
    final AtomicBoolean t = new AtomicBoolean();
    final NullSession session = new NullSession(new Host(new TestProtocol())) {
        @Override/*  w w  w .  j a v  a 2 s.co m*/
        @SuppressWarnings("unchecked")
        public <T> T _getFeature(final Class<T> type) {
            if (type.equals(Read.class)) {
                return (T) new Read() {
                    @Override
                    public InputStream read(final Path file, final TransferStatus status,
                            final ConnectionCallback callback) throws BackgroundException {
                        t.set(true);
                        return IOUtils.toInputStream("content", Charset.defaultCharset());
                    }

                    @Override
                    public boolean offset(final Path file) {
                        assertEquals(new Path("/f", EnumSet.of(Path.Type.file)), file);
                        return false;
                    }
                };
            }
            return super._getFeature(type);
        }
    };
    final AtomicBoolean e = new AtomicBoolean();
    final Path file = new Path("/f", EnumSet.of(Path.Type.file));
    file.attributes().setSize("content".getBytes().length);
    final AbstractEditor editor = new AbstractEditor(new Application("com.editor"),
            new StatelessSessionPool(new TestLoginConnectionService(), session, PathCache.empty(),
                    new DisabledTranscriptListener(), new DefaultVaultRegistry(new DisabledPasswordCallback())),
            file, new DisabledProgressListener()) {
        @Override
        protected void edit(final ApplicationQuitCallback quit, final FileWatcherListener listener)
                throws IOException {
            e.set(true);
        }

        @Override
        protected void watch(final Local local, final FileWatcherListener listener) throws IOException {
            //
        }
    };
    editor.open(new DisabledApplicationQuitCallback(), new DisabledTransferErrorCallback(),
            new DisabledFileWatcherListener()).run(session);
    assertTrue(t.get());
    assertNotNull(editor.getLocal());
    assertTrue(e.get());
    assertTrue(editor.getLocal().exists());
}

From source file:org.apache.tinkerpop.gremlin.groovy.jsr223.GremlinGroovyScriptEngineTest.java

@Test
public void shouldReloadClassLoaderWhileDoingEvalInSeparateThread() throws Exception {
    final AtomicBoolean fail = new AtomicBoolean(false);
    final AtomicInteger counter = new AtomicInteger(0);
    final CountDownLatch latch = new CountDownLatch(1);
    final AtomicReference<Color> color = new AtomicReference<>(Color.RED);

    final GremlinGroovyScriptEngine scriptEngine = new GremlinGroovyScriptEngine();

    try {/*ww  w  .j a va  2s . c o  m*/
        scriptEngine.eval("Color.BLACK");
        fail("Should fail as class is not yet imported");
    } catch (ScriptException se) {
        // should get here as Color.BLACK is not imported yet.
        logger.info("Failed to execute Color.BLACK as expected.");
    }

    final Thread evalThread = new Thread(() -> {
        try {
            // execute scripts until the other thread releases this latch (i.e. after import)
            while (latch.getCount() == 1) {
                scriptEngine.eval("1+1");
                counter.incrementAndGet();
            }

            color.set((Color) scriptEngine.eval("Color.BLACK"));
        } catch (Exception se) {
            fail.set(true);
        }
    }, "test-reload-classloader-1");

    evalThread.start();

    // let the first thread execute a bit.
    Thread.sleep(1000);

    final Thread importThread = new Thread(() -> {
        logger.info("Importing java.awt.Color...");
        final Set<String> imports = new HashSet<String>() {
            {
                add("import java.awt.Color");
            }
        };
        scriptEngine.addImports(imports);
        latch.countDown();
    }, "test-reload-classloader-2");

    importThread.start();

    // block until both threads are done
    importThread.join();
    evalThread.join();

    assertEquals(Color.BLACK, color.get());
    assertThat(counter.get(), greaterThan(0));
    assertFalse(fail.get());
}

From source file:org.apache.bookkeeper.bookie.LedgerStorageCheckpointTest.java

@Test
public void testCheckPointForEntryLoggerWithMultipleActiveEntryLogs() throws Exception {
    File tmpDir = createTempDir("DiskCheck", "test");

    final ServerConfiguration conf = TestBKConfiguration.newServerConfiguration()
            .setMetadataServiceUri(zkUtil.getMetadataServiceUri()).setZkTimeout(5000)
            .setJournalDirName(tmpDir.getPath()).setLedgerDirNames(new String[] { tmpDir.getPath() })
            .setAutoRecoveryDaemonEnabled(false).setFlushInterval(3000)
            .setBookiePort(PortManager.nextFreePort())
            // entrylog per ledger is enabled
            .setEntryLogPerLedgerEnabled(true)
            .setLedgerStorageClass(MockInterleavedLedgerStorage.class.getName());

    Assert.assertEquals("Number of JournalDirs", 1, conf.getJournalDirs().length);
    // we know there is only one ledgerDir
    File ledgerDir = Bookie.getCurrentDirectories(conf.getLedgerDirs())[0];
    BookieServer server = new BookieServer(conf);
    server.start();// w  w w .j  av  a2 s.  c o  m
    ClientConfiguration clientConf = new ClientConfiguration();
    clientConf.setMetadataServiceUri(zkUtil.getMetadataServiceUri());
    final BookKeeper bkClient = new BookKeeper(clientConf);

    int numOfLedgers = 12;
    int numOfEntries = 100;
    byte[] dataBytes = "data".getBytes();
    AtomicBoolean receivedExceptionForAdd = new AtomicBoolean(false);
    LongStream.range(0, numOfLedgers).parallel().mapToObj((ledgerId) -> {
        LedgerHandle handle = null;
        try {
            handle = bkClient.createLedgerAdv(ledgerId, 1, 1, 1, DigestType.CRC32, "passwd".getBytes(), null);
        } catch (BKException | InterruptedException exc) {
            receivedExceptionForAdd.compareAndSet(false, true);
            LOG.error("Got Exception while trying to create LedgerHandle for ledgerId: " + ledgerId, exc);
        }
        return handle;
    }).forEach((writeHandle) -> {
        IntStream.range(0, numOfEntries).forEach((entryId) -> {
            try {
                writeHandle.addEntry(entryId, dataBytes);
            } catch (BKException | InterruptedException exc) {
                receivedExceptionForAdd.compareAndSet(false, true);
                LOG.error("Got Exception while trying to AddEntry of ledgerId: " + writeHandle.getId()
                        + " entryId: " + entryId, exc);
            }
        });
        try {
            writeHandle.close();
        } catch (BKException | InterruptedException e) {
            receivedExceptionForAdd.compareAndSet(false, true);
            LOG.error("Got Exception while trying to close writeHandle of ledgerId: " + writeHandle.getId(), e);
        }
    });

    Assert.assertFalse(
            "There shouldn't be any exceptions while creating writeHandle and adding entries to writeHandle",
            receivedExceptionForAdd.get());

    executorController.advance(Duration.ofMillis(conf.getFlushInterval()));
    // since we have waited for more than flushInterval SyncThread should have checkpointed.
    // if entrylogperledger is not enabled, then we checkpoint only when currentLog in EntryLogger
    // is rotated. but if entrylogperledger is enabled, then we checkpoint for every flushInterval period
    File lastMarkFile = new File(ledgerDir, "lastMark");
    Assert.assertTrue("lastMark file must be existing, because checkpoint should have happened",
            lastMarkFile.exists());
    LogMark rolledLogMark = readLastMarkFile(lastMarkFile);
    Assert.assertNotEquals("rolledLogMark should not be zero, since checkpoint has happenend", 0,
            rolledLogMark.compare(new LogMark()));

    bkClient.close();
    // here we are calling shutdown, but MockInterleavedLedgerStorage shudown/flush
    // methods are noop, so entrylogger is not flushed as part of this shutdown
    // here we are trying to simulate Bookie crash, but there is no way to
    // simulate bookie abrupt crash
    server.shutdown();

    // delete journal files and lastMark, to make sure that we are not reading from
    // Journal file
    File[] journalDirs = conf.getJournalDirs();
    for (File journalDir : journalDirs) {
        File journalDirectory = Bookie.getCurrentDirectory(journalDir);
        List<Long> journalLogsId = Journal.listJournalIds(journalDirectory, null);
        for (long journalId : journalLogsId) {
            File journalFile = new File(journalDirectory, Long.toHexString(journalId) + ".txn");
            journalFile.delete();
        }
    }

    // we know there is only one ledgerDir
    lastMarkFile = new File(ledgerDir, "lastMark");
    lastMarkFile.delete();

    // now we are restarting BookieServer
    conf.setLedgerStorageClass(InterleavedLedgerStorage.class.getName());
    server = new BookieServer(conf);
    server.start();
    BookKeeper newBKClient = new BookKeeper(clientConf);
    // since Bookie checkpointed successfully before shutdown/crash,
    // we should be able to read from entryLogs though journal is deleted

    AtomicBoolean receivedExceptionForRead = new AtomicBoolean(false);

    LongStream.range(0, numOfLedgers).parallel().forEach((ledgerId) -> {
        try {
            LedgerHandle lh = newBKClient.openLedger(ledgerId, DigestType.CRC32, "passwd".getBytes());
            Enumeration<LedgerEntry> entries = lh.readEntries(0, numOfEntries - 1);
            while (entries.hasMoreElements()) {
                LedgerEntry entry = entries.nextElement();
                byte[] readData = entry.getEntry();
                Assert.assertEquals("Ledger Entry Data should match", new String("data".getBytes()),
                        new String(readData));
            }
            lh.close();
        } catch (BKException | InterruptedException e) {
            receivedExceptionForRead.compareAndSet(false, true);
            LOG.error("Got Exception while trying to read entries of ledger, ledgerId: " + ledgerId, e);
        }
    });
    Assert.assertFalse("There shouldn't be any exceptions while creating readHandle and while reading"
            + "entries using readHandle", receivedExceptionForRead.get());

    newBKClient.close();
    server.shutdown();
}

From source file:com.microsoft.tfs.core.clients.versioncontrol.internal.WebServiceLayerLocalWorkspaces.java

@Override
public GetOperation[] undoPendingChanges(final String workspaceName, final String ownerName,
        final ItemSpec[] items, final AtomicReference<Failure[]> failures, final String[] itemAttributeFilters,
        final String[] itemPropertyFilters, final AtomicBoolean onlineOperation, final boolean deleteAdds,
        final AtomicReference<ChangePendedFlags> changePendedFlags) {
    onlineOperation.set(true);/*from ww  w.java2 s.  co m*/

    // set this to none for local workspaces, if the call reaches the server
    // the flag will get overwritten
    changePendedFlags.set(ChangePendedFlags.NONE);

    final Workspace localWorkspace = getLocalWorkspace(workspaceName, ownerName);

    if (localWorkspace != null) {
        final AtomicReference<GetOperation[]> toReturn = new AtomicReference<GetOperation[]>();

        final LocalWorkspaceTransaction transaction = new LocalWorkspaceTransaction(localWorkspace);
        try {
            final AtomicReference<Failure[]> delegateFailures = new AtomicReference<Failure[]>(new Failure[0]);
            final AtomicBoolean onlineOperationRequired = new AtomicBoolean(false);

            transaction.execute(new AllTablesTransaction() {
                @Override
                public void invoke(final LocalWorkspaceProperties wp, final WorkspaceVersionTable lv,
                        final LocalPendingChangesTable pc) {
                    toReturn.set(LocalDataAccessLayer.undoPendingChanges(localWorkspace, wp, lv, pc, items,
                            delegateFailures, onlineOperationRequired, itemPropertyFilters));

                    if (onlineOperationRequired.get()) {
                        transaction.abort();
                        toReturn.set(null);
                    }

                    /*
                     * we should check to see if we are going to cause an
                     * existing file conflict if we are abort the
                     * transaction - since we don't want to try and contact
                     * the server in an offline undo.
                     */
                    if (toReturn.get() != null) {
                        Map<String, GetOperation> localItemDictionary = null;

                        for (final GetOperation op : toReturn.get()) {
                            if (op.getItemType() == ItemType.FILE && op.getTargetLocalItem() != null
                                    && op.getTargetLocalItem().length() > 0
                                    && LocalPath.equals(op.getTargetLocalItem(),
                                            op.getCurrentLocalItem()) == false) {
                                final WorkspaceLocalItem item = lv.getByLocalItem(op.getTargetLocalItem());

                                if ((item == null || item.isDeleted())
                                        && new File(op.getTargetLocalItem()).exists()) {
                                    if (localItemDictionary == null) {
                                        localItemDictionary = new HashMap<String, GetOperation>();
                                        /*
                                         * we go through our list and keep
                                         * track of adds we are removing
                                         * this is for the shelve /move
                                         * case.
                                         */
                                        for (final GetOperation getOp : toReturn.get()) {
                                            if (getOp.getTargetLocalItem() != null
                                                    && getOp.getTargetLocalItem().length() > 0
                                                    && getOp.getItemType() == ItemType.FILE) {
                                                final GetOperation currentValue = localItemDictionary
                                                        .get(getOp.getTargetLocalItem());
                                                if (currentValue != null) {
                                                    // don't overwrite an
                                                    // add
                                                    if (currentValue.getChangeType().contains(ChangeType.ADD)) {
                                                        localItemDictionary.put(getOp.getTargetLocalItem(),
                                                                getOp);
                                                    }
                                                } else {
                                                    localItemDictionary.put(getOp.getTargetLocalItem(), getOp);
                                                }
                                            }
                                        }
                                    }

                                    final GetOperation existingItem = localItemDictionary
                                            .get(op.getTargetLocalItem());
                                    if (existingItem != null
                                            && existingItem.getChangeType().contains(ChangeType.ADD)) {
                                        /*
                                         * if we are going to be removing
                                         * this anyway don't worry
                                         */
                                        if (deleteAdds) {
                                            continue;
                                        }
                                    }

                                    if (existingItem == null
                                            || !tryMoveAddLocation(existingItem, localItemDictionary)) {
                                        throw new VersionControlException(MessageFormat.format(
                                                //@formatter:off
                                                Messages.getString(
                                                        "WebServiceLayerLocalWorkspaces.UndoItemExistsLocallyFormat"), //$NON-NLS-1$
                                                //@formatter:on
                                                (op.getCurrentLocalItem() != null
                                                        && op.getCurrentLocalItem().length() > 0)
                                                                ? op.getCurrentLocalItem()
                                                                : op.getTargetLocalItem(),
                                                op.getTargetLocalItem()));
                                    }
                                }
                            }
                        }
                    }
                }
            });

            if (null != toReturn.get()) {
                onlineOperation.set(false);
                failures.set(delegateFailures.get());
                return toReturn.get();
            }
        } finally {
            try {
                transaction.close();
            } catch (final IOException e) {
                throw new VersionControlException(e);
            }
        }

        final Workspace w = reconcileIfLocal(workspaceName, ownerName);

        // Lock the workspace which will receive the pending changes
        final WorkspaceLock lock = lockIfLocal(w);

        try {
            try {
                if (getServiceLevel().getValue() >= WebServiceLevel.TFS_2012_QU1.getValue()) {
                    final _Repository5Soap_UndoPendingChangesInLocalWorkspaceResponse response = getRepository5()
                            .undoPendingChangesInLocalWorkspace(workspaceName, ownerName,
                                    (_ItemSpec[]) WrapperUtils.unwrap(_ItemSpec.class, items),
                                    itemPropertyFilters, itemAttributeFilters,
                                    VersionControlConstants.MAX_SERVER_PATH_SIZE);

                    failures.set((Failure[]) WrapperUtils.wrap(Failure.class, response.getFailures()));
                    changePendedFlags.set(new ChangePendedFlags(response.getChangePendedFlags()));
                    toReturn.set((GetOperation[]) WrapperUtils.wrap(GetOperation.class,
                            response.getUndoPendingChangesInLocalWorkspaceResult()));
                } else {
                    final _Repository4Soap_UndoPendingChangesInLocalWorkspaceResponse response = getRepository4()
                            .undoPendingChangesInLocalWorkspace(workspaceName, ownerName,
                                    (_ItemSpec[]) WrapperUtils.unwrap(_ItemSpec.class, items),
                                    itemPropertyFilters, itemAttributeFilters);

                    failures.set((Failure[]) WrapperUtils.wrap(Failure.class, response.getFailures()));
                    changePendedFlags.set(new ChangePendedFlags(response.getChangePendedFlags()));
                    toReturn.set((GetOperation[]) WrapperUtils.wrap(GetOperation.class,
                            response.getUndoPendingChangesInLocalWorkspaceResult()));
                }
            } catch (final ProxyException e) {
                VersionControlExceptionMapper.map(e);
            }

            syncWorkingFoldersIfNecessary(w, changePendedFlags.get());
            syncPendingChangesIfLocal(w, toReturn.get(), itemPropertyFilters);

            // When a pending add is undone, the item on disk is not
            // touched; so we need to inform the scanner that the item is
            // invalidated so it is re-scanned. We'll invalidate the scanner
            // if we detect that we went to the server to undo a pending
            // add.
            if (null != toReturn.get()) {
                for (final GetOperation op : toReturn.get()) {
                    if (op.getChangeType().contains(ChangeType.ADD)) {
                        localWorkspace.getWorkspaceWatcher().markPathChanged(""); //$NON-NLS-1$
                        break;
                    }
                }
            }

            return toReturn.get();
        } finally {
            if (lock != null) {
                lock.close();
            }
        }
    } else {
        return super.undoPendingChanges(workspaceName, ownerName, items, failures, itemAttributeFilters,
                itemPropertyFilters, onlineOperation, deleteAdds, changePendedFlags);
    }
}

From source file:org.apache.tinkerpop.gremlin.groovy.engine.GremlinExecutorTest.java

@Test
public void shouldAllowConcurrentModificationOfGlobals() throws Exception {
    // this test simulates a scenario that likely shouldn't happen - where globals are modified by multiple
    // threads.  globals are created in a synchronized fashion typically but it's possible that someone
    // could do something like this and this test validate that concurrency exceptions don't occur as a
    // result/*from   w ww.ja  va  2 s  .  co m*/
    final ExecutorService service = Executors.newFixedThreadPool(8, testingThreadFactory);
    final Bindings globals = new SimpleBindings();
    globals.put("g", -1);
    final GremlinExecutor gremlinExecutor = GremlinExecutor.build().globalBindings(globals).create();

    final AtomicBoolean failed = new AtomicBoolean(false);
    final int max = 512;
    final List<Pair<Integer, List<Integer>>> futures = Collections.synchronizedList(new ArrayList<>(max));
    IntStream.range(0, max).forEach(i -> {
        final int yValue = i * 2;
        final Bindings b = new SimpleBindings();
        b.put("x", i);
        b.put("y", yValue);
        final int zValue = i * -1;

        final String script = "z=" + zValue + ";[x,y,z,g]";
        try {
            service.submit(() -> {
                try {
                    // modify the global in a separate thread
                    gremlinExecutor.getGlobalBindings().put("g", i);
                    gremlinExecutor.getGlobalBindings().put(Integer.toString(i), i);
                    gremlinExecutor.getGlobalBindings().keySet().stream()
                            .filter(s -> i % 2 == 0 && !s.equals("g")).findFirst().ifPresent(globals::remove);
                    final List<Integer> result = (List<Integer>) gremlinExecutor.eval(script, b).get();
                    futures.add(Pair.with(i, result));
                } catch (Exception ex) {
                    failed.set(true);
                }
            });
        } catch (Exception ex) {
            throw new RuntimeException(ex);
        }
    });

    service.shutdown();
    assertThat(service.awaitTermination(60000, TimeUnit.MILLISECONDS), is(true));

    // likely a concurrency exception if it occurs - and if it does then we've messed up because that's what this
    // test is partially designed to protected against.
    assertThat(failed.get(), is(false));

    assertEquals(max, futures.size());
    futures.forEach(t -> {
        assertEquals(t.getValue0(), t.getValue1().get(0));
        assertEquals(t.getValue0() * 2, t.getValue1().get(1).intValue());
        assertEquals(t.getValue0() * -1, t.getValue1().get(2).intValue());
        assertThat(t.getValue1().get(3).intValue(), greaterThan(-1));
    });
}

From source file:com.indeed.lsmtree.recordcache.PersistentRecordCache.java

/**
 * Performs lookup for multiple keys and returns a streaming iterator to results.
 * Each element in the iterator is one of
 *  (1) an exception associated with a single lookup
 *  (2) a key value tuple//from   w w w.j ava  2  s .  c o  m
 *
 * @param keys      lookup keys
 * @param progress  (optional) an AtomicInteger for tracking progress
 * @param skipped   (optional) an AtomicInteger for tracking missing keys
 * @return          iterator of lookup results
 */
public Iterator<Either<Exception, P2<K, V>>> getStreaming(final @Nonnull Iterator<K> keys,
        final @Nullable AtomicInteger progress, final @Nullable AtomicInteger skipped) {
    log.info("starting store lookups");
    LongArrayList addressList = new LongArrayList();
    int notFound = 0;
    while (keys.hasNext()) {
        final K key = keys.next();
        final Long address;
        try {
            address = index.get(key);
        } catch (IOException e) {
            log.error("error", e);
            return Iterators.singletonIterator(Left.<Exception, P2<K, V>>of(new IndexReadException(e)));
        }
        if (address != null) {
            addressList.add(address);
        } else {
            notFound++;
        }
    }
    if (progress != null)
        progress.addAndGet(notFound);
    if (skipped != null)
        skipped.addAndGet(notFound);
    log.info("store lookups complete, sorting addresses");

    final long[] addresses = addressList.elements();
    Arrays.sort(addresses, 0, addressList.size());

    log.info("initializing store lookup iterator");
    final BlockingQueue<Runnable> taskQueue = new ArrayBlockingQueue<Runnable>(100);
    final Iterator<List<Long>> iterable = Iterators.partition(addressList.iterator(), 1000);
    final ExecutorService primerThreads = new ThreadPoolExecutor(10, 10, 0L, TimeUnit.MILLISECONDS, taskQueue,
            new NamedThreadFactory("store priming thread", true, log), new RejectedExecutionHandler() {
                @Override
                public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
                    try {
                        taskQueue.put(r);
                    } catch (InterruptedException e) {
                        log.error("error", e);
                        throw new RuntimeException(e);
                    }
                }
            });
    final BlockingQueue<List<Either<Exception, P2<K, V>>>> completionQueue = new ArrayBlockingQueue<List<Either<Exception, P2<K, V>>>>(
            10);
    final AtomicLong runningTasks = new AtomicLong(0);
    final AtomicBoolean taskSubmitterRunning = new AtomicBoolean(true);

    new Thread(new Runnable() {
        @Override
        public void run() {
            while (iterable.hasNext()) {
                runningTasks.incrementAndGet();
                final List<Long> addressesSublist = iterable.next();
                primerThreads.submit(new FutureTask<List<Either<Exception, P2<K, V>>>>(
                        new RecordLookupTask(addressesSublist)) {
                    @Override
                    protected void done() {
                        try {
                            final List<Either<Exception, P2<K, V>>> results = get();
                            if (progress != null) {
                                progress.addAndGet(results.size());
                            }
                            completionQueue.put(results);
                        } catch (InterruptedException e) {
                            log.error("error", e);
                            throw new RuntimeException(e);
                        } catch (ExecutionException e) {
                            log.error("error", e);
                            throw new RuntimeException(e);
                        }
                    }
                });
            }
            taskSubmitterRunning.set(false);
        }
    }, "RecordLookupTaskSubmitterThread").start();

    return new Iterator<Either<Exception, P2<K, V>>>() {

        Iterator<Either<Exception, P2<K, V>>> currentIterator;

        @Override
        public boolean hasNext() {
            if (currentIterator != null && currentIterator.hasNext())
                return true;
            while (taskSubmitterRunning.get() || runningTasks.get() > 0) {
                try {
                    final List<Either<Exception, P2<K, V>>> list = completionQueue.poll(1, TimeUnit.SECONDS);
                    if (list != null) {
                        log.debug("remaining: " + runningTasks.decrementAndGet());
                        currentIterator = list.iterator();
                        if (currentIterator.hasNext())
                            return true;
                    }
                } catch (InterruptedException e) {
                    log.error("error", e);
                    throw new RuntimeException(e);
                }
            }
            primerThreads.shutdown();
            return false;
        }

        @Override
        public Either<Exception, P2<K, V>> next() {
            return currentIterator.next();
        }

        @Override
        public void remove() {
            throw new UnsupportedOperationException();
        }
    };
}

From source file:org.apache.bookkeeper.bookie.CreateNewLogTest.java

@Test
public void testLockConsistency() throws Exception {
    ServerConfiguration conf = TestBKConfiguration.newServerConfiguration();

    conf.setLedgerDirNames(ledgerDirs);/*from   www .  java2s. c  om*/
    conf.setEntryLogFilePreAllocationEnabled(false);
    conf.setEntryLogPerLedgerEnabled(true);
    conf.setMaximumNumberOfActiveEntryLogs(5);

    CountDownLatch latch = new CountDownLatch(1);
    AtomicInteger count = new AtomicInteger(0);

    /*
     * Inject wait operation in 'getWritableLedgerDirsForNewLog' method of
     * ledgerDirsManager. getWritableLedgerDirsForNewLog will be called when
     * entryLogManager.createNewLog is called.
     */
    LedgerDirsManager ledgerDirsManager = new LedgerDirsManager(conf, conf.getLedgerDirs(),
            new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold())) {
        /*
         * getWritableLedgerDirsForNewLog is called for the first time, it
         * will await on 'latch' latch before calling super
         * getWritableLedgerDirsForNewLog.
         */
        @Override
        public List<File> getWritableLedgerDirsForNewLog() throws NoWritableLedgerDirException {
            if (count.incrementAndGet() == 1) {
                try {
                    latch.await();
                } catch (InterruptedException e) {
                    LOG.error("Got InterruptedException while awaiting for latch countdown", e);
                }
            }
            return super.getWritableLedgerDirsForNewLog();
        }
    };

    EntryLogger el = new EntryLogger(conf, ledgerDirsManager);
    EntryLogManagerForEntryLogPerLedger entryLogManager = (EntryLogManagerForEntryLogPerLedger) el
            .getEntryLogManager();

    long firstLedgerId = 100L;
    AtomicBoolean newLogCreated = new AtomicBoolean(false);

    Assert.assertFalse("EntryLogManager cacheMap should not contain entry for firstLedgerId",
            entryLogManager.getCacheAsMap().containsKey(firstLedgerId));
    Assert.assertEquals("Value of the count should be 0", 0, count.get());
    /*
     * In a new thread, create newlog for 'firstLedgerId' and then set
     * 'newLogCreated' to true. Since this is the first createNewLog call,
     * it is going to be blocked untill latch is countdowned to 0.
     */
    new Thread() {
        @Override
        public void run() {
            try {
                entryLogManager.createNewLog(firstLedgerId);
                newLogCreated.set(true);
            } catch (IOException e) {
                LOG.error("Got IOException while creating new log", e);
            }
        }
    }.start();

    /*
     * Wait until entry for 'firstLedgerId' is created in cacheMap. It will
     * be created because in the other thread createNewLog is called.
     */
    while (!entryLogManager.getCacheAsMap().containsKey(firstLedgerId)) {
        Thread.sleep(200);
    }
    Lock firstLedgersLock = entryLogManager.getLock(firstLedgerId);

    /*
     * since 'latch' is not counteddown, newlog should not be created even
     * after waitign for 2 secs.
     */
    Thread.sleep(2000);
    Assert.assertFalse("New log shouldn't have created", newLogCreated.get());

    /*
     * create MaximumNumberOfActiveEntryLogs of entrylogs and do cache
     * cleanup, so that the earliest entry from cache will be removed.
     */
    for (int i = 1; i <= conf.getMaximumNumberOfActiveEntryLogs(); i++) {
        entryLogManager.createNewLog(firstLedgerId + i);
    }
    entryLogManager.doEntryLogMapCleanup();
    Assert.assertFalse("Entry for that ledger shouldn't be there",
            entryLogManager.getCacheAsMap().containsKey(firstLedgerId));

    /*
     * now countdown the latch, so that the other thread can make progress
     * with createNewLog and since this entry is evicted from cache,
     * entrylog of the newly created entrylog will be added to
     * rotatedentrylogs.
     */
    latch.countDown();
    while (!newLogCreated.get()) {
        Thread.sleep(200);
    }
    while (entryLogManager.getRotatedLogChannels().size() < 1) {
        Thread.sleep(200);
    }

    /*
     * Entry for 'firstLedgerId' is removed from cache, but even in this
     * case when we get lock for the 'firstLedgerId' it should be the same
     * as we got earlier.
     */
    Lock lockForThatLedgerAfterRemoval = entryLogManager.getLock(firstLedgerId);
    Assert.assertEquals("For a given ledger lock should be the same before and after removal", firstLedgersLock,
            lockForThatLedgerAfterRemoval);
}

From source file:org.apache.pulsar.broker.service.persistent.PersistentTopic.java

protected boolean addReplicationCluster(String remoteCluster, PersistentTopic persistentTopic,
        ManagedCursor cursor, String localCluster) {
    AtomicBoolean isReplicatorStarted = new AtomicBoolean(true);
    replicators.computeIfAbsent(remoteCluster, r -> {
        try {/*w  w w  .j ava2  s  . c  o m*/
            return new PersistentReplicator(PersistentTopic.this, cursor, localCluster, remoteCluster,
                    brokerService);
        } catch (NamingException e) {
            isReplicatorStarted.set(false);
            log.error("[{}] Replicator startup failed due to partitioned-topic {}", topic, remoteCluster);
        }
        return null;
    });
    // clean up replicator if startup is failed
    if (!isReplicatorStarted.get()) {
        replicators.remove(remoteCluster);
    }
    return isReplicatorStarted.get();
}

From source file:org.apache.hadoop.yarn.applications.distributedshell.TestDistributedShell.java

public void testDSShell(boolean haveDomain) throws Exception {
    String[] args = { "--jar", APPMASTER_JAR, "--num_containers", "2", "--shell_command",
            Shell.WINDOWS ? "dir" : "ls", "--master_memory", "512", "--master_vcores", "2",
            "--container_memory", "128", "--container_vcores", "1" };
    if (haveDomain) {
        String[] domainArgs = { "--domain", "TEST_DOMAIN", "--view_acls", "reader_user reader_group",
                "--modify_acls", "writer_user writer_group", "--create" };
        List<String> argsList = new ArrayList<String>(Arrays.asList(args));
        argsList.addAll(Arrays.asList(domainArgs));
        args = argsList.toArray(new String[argsList.size()]);
    }//from w ww .  j  a  va 2s.  c o  m

    LOG.info("Initializing DS Client");
    final Client client = new Client(new Configuration(yarnCluster.getConfig()));
    boolean initSuccess = client.init(args);
    Assert.assertTrue(initSuccess);
    LOG.info("Running DS Client");
    final AtomicBoolean result = new AtomicBoolean(false);
    Thread t = new Thread() {
        public void run() {
            try {
                result.set(client.run());
            } catch (Exception e) {
                throw new RuntimeException(e);
            }
        }
    };
    t.start();

    YarnClient yarnClient = YarnClient.createYarnClient();
    yarnClient.init(new Configuration(yarnCluster.getConfig()));
    yarnClient.start();
    String hostName = NetUtils.getHostname();

    boolean verified = false;
    String errorMessage = "";
    while (!verified) {
        List<ApplicationReport> apps = yarnClient.getApplications();
        if (apps.size() == 0) {
            Thread.sleep(10);
            continue;
        }
        ApplicationReport appReport = apps.get(0);
        if (appReport.getHost().equals("N/A")) {
            Thread.sleep(10);
            continue;
        }
        errorMessage = "Expected host name to start with '" + hostName + "', was '" + appReport.getHost()
                + "'. Expected rpc port to be '-1', was '" + appReport.getRpcPort() + "'.";
        if (checkHostname(appReport.getHost()) && appReport.getRpcPort() == -1) {
            verified = true;
        }
        if (appReport.getYarnApplicationState() == YarnApplicationState.FINISHED) {
            break;
        }
    }
    Assert.assertTrue(errorMessage, verified);
    t.join();
    LOG.info("Client run completed. Result=" + result);
    Assert.assertTrue(result.get());

    if (timelineVersionWatcher.getTimelineVersion() == 1.5f) {
        long scanInterval = conf.getLong(
                YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_SCAN_INTERVAL_SECONDS,
                YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_SCAN_INTERVAL_SECONDS_DEFAULT);
        Path doneDir = new Path(YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_DONE_DIR_DEFAULT);
        // Wait till the data is moved to done dir, or timeout and fail
        while (true) {
            RemoteIterator<FileStatus> iterApps = fs.listStatusIterator(doneDir);
            if (iterApps.hasNext()) {
                break;
            }
            Thread.sleep(scanInterval * 2);
        }
    }

    TimelineDomain domain = null;
    if (haveDomain) {
        domain = yarnCluster.getApplicationHistoryServer().getTimelineStore().getDomain("TEST_DOMAIN");
        Assert.assertNotNull(domain);
        Assert.assertEquals("reader_user reader_group", domain.getReaders());
        Assert.assertEquals("writer_user writer_group", domain.getWriters());
    }
    TimelineEntities entitiesAttempts = yarnCluster.getApplicationHistoryServer().getTimelineStore()
            .getEntities(ApplicationMaster.DSEntity.DS_APP_ATTEMPT.toString(), null, null, null, null, null,
                    null, null, null, null);
    Assert.assertNotNull(entitiesAttempts);
    Assert.assertEquals(1, entitiesAttempts.getEntities().size());
    Assert.assertEquals(2, entitiesAttempts.getEntities().get(0).getEvents().size());
    Assert.assertEquals(entitiesAttempts.getEntities().get(0).getEntityType().toString(),
            ApplicationMaster.DSEntity.DS_APP_ATTEMPT.toString());
    if (haveDomain) {
        Assert.assertEquals(domain.getId(), entitiesAttempts.getEntities().get(0).getDomainId());
    } else {
        Assert.assertEquals("DEFAULT", entitiesAttempts.getEntities().get(0).getDomainId());
    }
    String currAttemptEntityId = entitiesAttempts.getEntities().get(0).getEntityId();
    ApplicationAttemptId attemptId = ApplicationAttemptId.fromString(currAttemptEntityId);
    NameValuePair primaryFilter = new NameValuePair(ApplicationMaster.APPID_TIMELINE_FILTER_NAME,
            attemptId.getApplicationId().toString());
    TimelineEntities entities = yarnCluster.getApplicationHistoryServer().getTimelineStore().getEntities(
            ApplicationMaster.DSEntity.DS_CONTAINER.toString(), null, null, null, null, null, primaryFilter,
            null, null, null);
    Assert.assertNotNull(entities);
    Assert.assertEquals(2, entities.getEntities().size());
    Assert.assertEquals(entities.getEntities().get(0).getEntityType().toString(),
            ApplicationMaster.DSEntity.DS_CONTAINER.toString());
    if (haveDomain) {
        Assert.assertEquals(domain.getId(), entities.getEntities().get(0).getDomainId());
    } else {
        Assert.assertEquals("DEFAULT", entities.getEntities().get(0).getDomainId());
    }
}

From source file:org.apache.hadoop.hbase.regionserver.TestHStore.java

@Test
public void testFlushBeforeCompletingScanWithFilter() throws IOException, InterruptedException {
    final AtomicBoolean timeToGoNextRow = new AtomicBoolean(false);
    final int expectedSize = 2;
    testFlushBeforeCompletingScan(new MyListHook() {
        @Override/* w w  w  . j a va 2 s  .c  o  m*/
        public void hook(int currentSize) {
            if (currentSize == expectedSize - 1) {
                try {
                    flushStore(store, id++);
                    timeToGoNextRow.set(true);
                } catch (IOException e) {
                    throw new RuntimeException(e);
                }
            }
        }
    }, new FilterBase() {
        @Override
        public Filter.ReturnCode filterKeyValue(Cell v) throws IOException {
            if (timeToGoNextRow.get()) {
                timeToGoNextRow.set(false);
                return ReturnCode.NEXT_ROW;
            } else {
                return ReturnCode.INCLUDE;
            }
        }
    }, expectedSize);
}