Example usage for java.util.concurrent.atomic AtomicBoolean set

List of usage examples for java.util.concurrent.atomic AtomicBoolean set

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicBoolean set.

Prototype

public final void set(boolean newValue) 

Source Link

Document

Sets the value to newValue , with memory effects as specified by VarHandle#setVolatile .

Usage

From source file:eu.europa.ec.markt.dss.validation102853.pades.PAdESSignature.java

private boolean hasDocumentTimestamp() {
    boolean levelReached;
    final PDFSignatureService pdfTimestampSignatureService = PdfObjFactory.getInstance()
            .newTimestampSignatureService();
    try {// ww  w  .ja  v a 2 s  .  co m
        final AtomicBoolean atomicLevelReached = new AtomicBoolean(false);
        pdfTimestampSignatureService.validateSignatures(document.openStream(),
                new SignatureValidationCallback() {
                    @Override
                    public void validate(PdfDict catalog, PdfDict outerCatalog, X509Certificate signingCert,
                            Date signingDate, Certificate[] certs, PdfDict signatureDictionary,
                            PdfSignatureInfo pk) {
                        try {
                            final byte[] subFilters = signatureDictionary.get("SubFilter");
                            if (subFilters != null) {
                                String pdfSubFilter = new String(subFilters); //
                                if (StringUtils.equals("/ETSI.RFC3161", pdfSubFilter)) {
                                    atomicLevelReached.set(true);
                                }
                            }
                        } catch (IOException e) {
                            throw new DSSException(e);
                        }
                    }
                });
        levelReached = atomicLevelReached.get();
    } catch (IOException e) {
        throw new DSSException(e);
    } catch (SignatureException e) {
        throw new DSSException(e);
    }
    return levelReached;
}

From source file:com.netflix.dyno.connectionpool.impl.lb.CircularListTest.java

@Test
public void testMultipleThreadsWithElementsRemoved() throws Exception {

    final AtomicBoolean stop = new AtomicBoolean(false);
    final CyclicBarrier barrier = new CyclicBarrier(5);
    final List<Future<Map<Integer, Integer>>> futures = new ArrayList<Future<Map<Integer, Integer>>>();

    for (int i = 0; i < 5; i++) {
        futures.add(threadPool.submit(new Callable<Map<Integer, Integer>>() {

            @Override// www  .  j av  a  2s .  com
            public Map<Integer, Integer> call() throws Exception {

                barrier.await();

                TestWorker worker = new TestWorker();

                while (!stop.get()) {
                    worker.process();
                }

                return worker.map;
            }
        }));
    }

    Thread.sleep(200);

    List<Integer> newList = new ArrayList<Integer>(iList);

    final List<Integer> removedElements = new ArrayList<Integer>();
    removedElements.add(newList.remove(2));
    removedElements.add(newList.remove(5));
    removedElements.add(newList.remove(6));

    cList.swapWithList(newList);

    Thread.sleep(200);
    stop.set(true);

    Map<Integer, Integer> result = getTotalMap(futures);

    Map<Integer, Integer> subMap = CollectionUtils.filterKeys(result, new Predicate<Integer>() {

        @Override
        public boolean apply(Integer x) {
            return !removedElements.contains(x);
        }
    });

    checkValues(new ArrayList<Integer>(subMap.values()));
}

From source file:org.apache.hadoop.hdfs.TestFileConcurrentReader.java

private void runTestUnfinishedBlockCRCError(final boolean transferToAllowed, final SyncType syncType,
        final int writeSize, Configuration conf) throws IOException {
    conf.setBoolean(DFSConfigKeys.DFS_DATANODE_TRANSFERTO_ALLOWED_KEY, transferToAllowed);
    init(conf);//from  www  . j a  va 2 s  .  c o  m

    final Path file = new Path("/block-being-written-to");
    final int numWrites = 2000;
    final AtomicBoolean writerDone = new AtomicBoolean(false);
    final AtomicBoolean writerStarted = new AtomicBoolean(false);
    final AtomicBoolean error = new AtomicBoolean(false);
    final FSDataOutputStream initialOutputStream = fileSystem.create(file);
    final Thread writer = new Thread(new Runnable() {
        private FSDataOutputStream outputStream = initialOutputStream;

        @Override
        public void run() {
            try {
                for (int i = 0; !error.get() && i < numWrites; i++) {
                    try {
                        final byte[] writeBuf = DFSTestUtil.generateSequentialBytes(i * writeSize, writeSize);
                        outputStream.write(writeBuf);
                        if (syncType == SyncType.SYNC) {
                            outputStream.hflush();
                        } else { // append
                            outputStream.close();
                            outputStream = fileSystem.append(file);
                        }
                        writerStarted.set(true);
                    } catch (IOException e) {
                        error.set(true);
                        LOG.error("error writing to file", e);
                    }
                }

                writerDone.set(true);
                outputStream.close();
            } catch (Exception e) {
                LOG.error("error in writer", e);

                throw new RuntimeException(e);
            }
        }
    });
    Thread tailer = new Thread(new Runnable() {
        @Override
        public void run() {
            try {
                long startPos = 0;
                while (!writerDone.get() && !error.get()) {
                    if (writerStarted.get()) {
                        try {
                            startPos = tailFile(file, startPos);
                        } catch (IOException e) {
                            LOG.error(String.format("error tailing file %s", file), e);

                            throw new RuntimeException(e);
                        }
                    }
                }
            } catch (RuntimeException e) {
                if (e.getCause() instanceof ChecksumException) {
                    error.set(true);
                }

                writer.interrupt();
                LOG.error("error in tailer", e);
                throw e;
            }
        }
    });

    writer.start();
    tailer.start();

    try {
        writer.join();
        tailer.join();

        assertFalse("error occurred, see log above", error.get());
    } catch (InterruptedException e) {
        LOG.info("interrupted waiting for writer or tailer to complete");

        Thread.currentThread().interrupt();
    }
    initialOutputStream.close();
}

From source file:org.apache.hadoop.hbase.client.TestAsyncGetMultiThread.java

@Test
public void test() throws IOException, InterruptedException, ExecutionException {
    int numThreads = 20;
    AtomicBoolean stop = new AtomicBoolean(false);
    ExecutorService executor = Executors.newFixedThreadPool(numThreads,
            Threads.newDaemonThreadFactory("TestAsyncGet-"));
    List<Future<?>> futures = new ArrayList<>();
    IntStream.range(0, numThreads).forEach(i -> futures.add(executor.submit(() -> {
        run(stop);/*from   ww  w .j  av a  2 s  .c om*/
        return null;
    })));
    Collections.shuffle(Arrays.asList(SPLIT_KEYS), new Random(123));
    Admin admin = TEST_UTIL.getAdmin();
    for (byte[] splitPoint : SPLIT_KEYS) {
        admin.split(TABLE_NAME, splitPoint);
        for (HRegion region : TEST_UTIL.getHBaseCluster().getRegions(TABLE_NAME)) {
            region.compact(true);
        }
        Thread.sleep(5000);
        admin.balancer(true);
        Thread.sleep(5000);
        ServerName metaServer = TEST_UTIL.getHBaseCluster().getServerHoldingMeta();
        ServerName newMetaServer = TEST_UTIL.getHBaseCluster().getRegionServerThreads().stream()
                .map(t -> t.getRegionServer().getServerName()).filter(s -> !s.equals(metaServer)).findAny()
                .get();
        admin.move(HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes(),
                Bytes.toBytes(newMetaServer.getServerName()));
        Thread.sleep(5000);
    }
    stop.set(true);
    executor.shutdown();
    for (Future<?> future : futures) {
        future.get();
    }
}

From source file:com.android.tools.idea.tests.gui.gradle.GradleSyncTest.java

@Test
public void shouldUseLibrary() throws IOException {
    guiTest.importSimpleApplication();//from  w  w w . j  a va  2s.  co m
    IdeFrameFixture ideFrame = guiTest.ideFrame();

    Project project = ideFrame.getProject();

    // Make sure the library was added.
    LibraryTable libraryTable = ProjectLibraryTable.getInstance(project);
    String libraryName = "org.apache.http.legacy-" + TestUtils.getLatestAndroidPlatform();
    Library library = libraryTable.getLibraryByName(libraryName);

    // Verify that the library has the right j
    VirtualFile[] jarFiles = library.getFiles(CLASSES);
    assertThat(jarFiles).asList().hasSize(1);
    VirtualFile jarFile = jarFiles[0];
    assertEquals("org.apache.http.legacy.jar", jarFile.getName());

    // Verify that the module depends on the library
    Module appModule = ideFrame.getModule("app");
    AtomicBoolean dependencyFound = new AtomicBoolean();
    new ReadAction() {
        @Override
        protected void run(@NotNull Result result) throws Throwable {
            ModifiableRootModel modifiableModel = ModuleRootManager.getInstance(appModule).getModifiableModel();
            try {
                for (OrderEntry orderEntry : modifiableModel.getOrderEntries()) {
                    if (orderEntry instanceof LibraryOrderEntry) {
                        LibraryOrderEntry libraryDependency = (LibraryOrderEntry) orderEntry;
                        if (libraryDependency.getLibrary() == library) {
                            dependencyFound.set(true);
                        }
                    }
                }
            } finally {
                modifiableModel.dispose();
            }
        }
    }.execute();
    assertTrue("Module app should depend on library '" + library.getName() + "'", dependencyFound.get());
}

From source file:org.apache.hadoop.hbase.master.procedure.TestMasterProcedureScheduler.java

@Test
public void testConcurrentCreateDelete() throws Exception {
    final MasterProcedureScheduler procQueue = queue;
    final TableName table = TableName.valueOf("testtb");
    final AtomicBoolean running = new AtomicBoolean(true);
    final AtomicBoolean failure = new AtomicBoolean(false);
    Thread createThread = new Thread() {
        @Override/* w w w  .j a  v a2  s  .c om*/
        public void run() {
            try {
                TestTableProcedure proc = new TestTableProcedure(1, table,
                        TableProcedureInterface.TableOperationType.CREATE);
                while (running.get() && !failure.get()) {
                    if (procQueue.tryAcquireTableExclusiveLock(proc, table)) {
                        procQueue.releaseTableExclusiveLock(proc, table);
                    }
                }
            } catch (Throwable e) {
                LOG.error("create failed", e);
                failure.set(true);
            }
        }
    };

    Thread deleteThread = new Thread() {
        @Override
        public void run() {
            try {
                TestTableProcedure proc = new TestTableProcedure(2, table,
                        TableProcedureInterface.TableOperationType.DELETE);
                while (running.get() && !failure.get()) {
                    if (procQueue.tryAcquireTableExclusiveLock(proc, table)) {
                        procQueue.releaseTableExclusiveLock(proc, table);
                    }
                    procQueue.markTableAsDeleted(table);
                }
            } catch (Throwable e) {
                LOG.error("delete failed", e);
                failure.set(true);
            }
        }
    };

    createThread.start();
    deleteThread.start();
    for (int i = 0; i < 100 && running.get() && !failure.get(); ++i) {
        Thread.sleep(100);
    }
    running.set(false);
    createThread.join();
    deleteThread.join();
    assertEquals(false, failure.get());
}

From source file:org.apache.nifi.processors.csv.ParseCSVRecord.java

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    final FlowFile original = session.get();
    if (original == null) {
        return;//from   w w  w  . ja  va  2s  . c  om
    }

    final AtomicBoolean lineFound = new AtomicBoolean(false);
    final Map<String, String> outputAttrs = new HashMap<>();

    session.read(original, new InputStreamCallback() {
        @Override
        public void process(InputStream inputStream) throws IOException {
            final String fromAttribute = context.getProperty(PROP_RECORD_FROM_ATTRIBUTE).getValue();

            String unparsedRecord;
            // data source is the attribute
            if (StringUtils.isNotBlank(fromAttribute)) {
                unparsedRecord = original.getAttribute(fromAttribute);
                if (StringUtils.isBlank(unparsedRecord)) {
                    // will be routed to failure at the end of the method implementation
                    return;
                }
            } else {
                // data source is the content
                // TODO expose the charset property?
                LineIterator iterator = IOUtils.lineIterator(inputStream, UTF_8);
                if (!iterator.hasNext()) {
                    return;
                }
                unparsedRecord = iterator.next();
            }

            lineFound.set(true);
            final String format = context.getProperty(PROP_FORMAT).getValue();
            final String delimiter = context.getProperty(PROP_DELIMITER).evaluateAttributeExpressions(original)
                    .getValue();
            final String schemaPrefix = context.getProperty(PROP_SCHEMA_ATTR_PREFIX)
                    .evaluateAttributeExpressions(original).getValue();
            final String valuePrefix = context.getProperty(PROP_VALUE_ATTR_PREFIX)
                    .evaluateAttributeExpressions(original).getValue();
            final boolean trimValues = context.getProperty(PROP_TRIM_VALUES).asBoolean();

            final CSVFormat csvFormat = buildFormat(format, delimiter, false, // this is a payload, not header anymore
                    null); // no custom header

            final CSVParser parser = csvFormat.parse(new StringReader(unparsedRecord));
            List<CSVRecord> records = parser.getRecords();
            if (records.size() > 1) {
                // TODO revisit for NiFi's native micro-batching
                throw new ProcessException("Multi-line entries not supported");
            }

            CSVRecord record = records.get(0);

            Map<String, String> originalAttrs = original.getAttributes();
            // filter delimited schema attributes only
            Map<String, String> schemaAttrs = new HashMap<>();
            for (String key : originalAttrs.keySet()) {
                if (key.startsWith(schemaPrefix)) {
                    schemaAttrs.put(key, originalAttrs.get(key));
                }
            }

            // put key/value pairs into attributes
            for (int i = 0; i < record.size(); i++) {
                String columnName = schemaAttrs.get(schemaPrefix + (i + 1)); // 1-based column numbering
                if (columnName == null) {
                    // 1-based column index
                    columnName = String.valueOf(i + 1);
                }
                // TODO indexed schemaless parsing vs auto-schema vs user-provided schema
                String columnValue = record.get(i);
                if (trimValues) {
                    columnValue = columnValue.trim();
                }
                String attrName = (StringUtils.isBlank(valuePrefix) ? "delimited.column." : valuePrefix)
                        + columnName;
                outputAttrs.put(attrName, columnValue);
            }
        }
    });

    if (lineFound.get()) {
        FlowFile ff = session.putAllAttributes(original, outputAttrs);
        session.transfer(ff, REL_SUCCESS);
    } else {
        session.transfer(original, REL_FAILURE);
    }
}

From source file:org.apache.tinkerpop.gremlin.groovy.engine.GremlinExecutorTest.java

@Test
public void shouldAllowVariableReuseAcrossThreads() throws Exception {
    final ExecutorService service = Executors.newFixedThreadPool(8, testingThreadFactory);
    final GremlinExecutor gremlinExecutor = GremlinExecutor.build().create();

    final AtomicBoolean failed = new AtomicBoolean(false);
    final int max = 512;
    final List<Pair<Integer, List<Integer>>> futures = Collections.synchronizedList(new ArrayList<>(max));
    IntStream.range(0, max).forEach(i -> {
        final int yValue = i * 2;
        final Bindings b = new SimpleBindings();
        b.put("x", i);
        b.put("y", yValue);
        final int zValue = i * -1;

        final String script = "z=" + zValue + ";[x,y,z]";
        try {// w ww .  j  a  v  a  2s.  c o  m
            service.submit(() -> {
                try {
                    final List<Integer> result = (List<Integer>) gremlinExecutor.eval(script, b).get();
                    futures.add(Pair.with(i, result));
                } catch (Exception ex) {
                    failed.set(true);
                }
            });
        } catch (Exception ex) {
            throw new RuntimeException(ex);
        }
    });

    service.shutdown();
    assertThat(service.awaitTermination(60000, TimeUnit.MILLISECONDS), is(true));

    // likely a concurrency exception if it occurs - and if it does then we've messed up because that's what this
    // test is partially designed to protected against.
    assertThat(failed.get(), is(false));

    assertEquals(max, futures.size());
    futures.forEach(t -> {
        assertEquals(t.getValue0(), t.getValue1().get(0));
        assertEquals(t.getValue0() * 2, t.getValue1().get(1).intValue());
        assertEquals(t.getValue0() * -1, t.getValue1().get(2).intValue());
    });
}

From source file:org.apache.hadoop.hdfs.qjournal.client.TestQJMWriteRead.java

@Test
public void testTailing() throws Exception {
    // Unlike the other unit test, numEdits here is constant as this is
    // a longer running test
    final int numEdits = 1000;
    final AtomicBoolean finishedProducing = new AtomicBoolean(false);
    final EditLogOutputStream out = qjm.startLogSegment(0);

    Callable<Void> producerThread = new Callable<Void>() {
        @Override/*from   w  w w.  ja v a 2 s  . c o  m*/
        public Void call() throws Exception {
            try {
                for (int i = 0; i < numEdits; i++) {
                    FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
                    // Set an increasing transaction id to verify correctness
                    op.setTransactionId(i);
                    if (LOG.isDebugEnabled()) {
                        LOG.debug("Writing " + op);
                    }

                    FSEditLogTestUtil.writeToStreams(op, out);

                    if (i % 50 == 0) {
                        Thread.sleep(100);
                        FSEditLogTestUtil.flushStreams(out);
                    }
                }

                FSEditLogTestUtil.flushStreams(out);
                FSEditLogTestUtil.closeStreams(out);
            } finally {
                // Let the producer know that we've reached the end.
                finishedProducing.set(true);
            }
            return null;
        }
    };
    Callable<Void> consumerThread = new Callable<Void>() {
        @Override
        public Void call() throws Exception {
            List<EditLogInputStream> streams = Lists.newArrayList();
            qjm.selectInputStreams(streams, 0, true, false);
            EditLogInputStream in = streams.get(0);

            long numOps = 0;
            long maxTxId = -1;
            FSEditLogOp op;
            long lastPos = in.getPosition();
            do {
                op = in.readOp();
                if (op == null) { // If we've reached the end prematurely...
                    Thread.sleep(200);
                    LOG.info("Refreshing to " + lastPos);

                    in.refresh(lastPos, maxTxId); // Then refresh to last known good position
                } else {
                    long txId = op.getTransactionId();
                    if (txId > maxTxId) {
                        // Standby ingest contains similar logic: transactions
                        // with ids lower than what is already read are ignored.
                        numOps++;
                        maxTxId = txId;
                    }

                    // Remember the last known safe position that we can refresh to
                    lastPos = in.getPosition();
                }
            } while (op != null || !finishedProducing.get());
            Thread.sleep(1000);

            // finalize the segment, so we can read to the end
            qjm.finalizeLogSegment(0, numEdits - 1);

            // Once producer is shutdown, scan again from last known good position
            // until the end of the ledger. This mirrors the Ingest logic (last
            // read when being quiesced).
            in.refresh(lastPos, maxTxId);
            do {
                op = in.readOp();
                if (op != null) {
                    long txId = op.getTransactionId();
                    if (txId > maxTxId) {
                        numOps++;
                        maxTxId = txId;
                    }
                }
            } while (op != null);

            assertEquals("Must have read " + numEdits + " edits", numEdits, numOps);
            assertEquals("Must end at txid = " + (numEdits - 1), numEdits - 1, maxTxId);
            return null;
        }
    };
    // Allow producer and consumer to run concurrently
    ExecutorService executor = Executors.newFixedThreadPool(2);
    Future<Void> producerFuture = executor.submit(producerThread);
    Future<Void> consumerFuture = executor.submit(consumerThread);

    // Calling a .get() on the future will rethrow any exceptions thrown in
    // the future.
    producerFuture.get();
    consumerFuture.get();
}

From source file:org.apache.nifi.processors.standard.SplitText.java

/**
 * Will split the incoming stream releasing all splits as FlowFile at once.
 *///w  w  w .j a  v a 2s  .  com
@Override
public void onTrigger(ProcessContext context, ProcessSession processSession) throws ProcessException {
    FlowFile sourceFlowFile = processSession.get();
    if (sourceFlowFile == null) {
        return;
    }
    AtomicBoolean error = new AtomicBoolean();
    List<SplitInfo> computedSplitsInfo = new ArrayList<>();
    AtomicReference<SplitInfo> headerSplitInfoRef = new AtomicReference<>();
    processSession.read(sourceFlowFile, new InputStreamCallback() {
        @Override
        public void process(InputStream in) throws IOException {
            TextLineDemarcator demarcator = new TextLineDemarcator(in);
            SplitInfo splitInfo = null;
            long startOffset = 0;

            // Compute fragment representing the header (if available)
            long start = System.nanoTime();
            try {
                if (SplitText.this.headerLineCount > 0) {
                    splitInfo = SplitText.this.computeHeader(demarcator, startOffset,
                            SplitText.this.headerLineCount, null, null);
                    if ((splitInfo != null) && (splitInfo.lineCount < SplitText.this.headerLineCount)) {
                        error.set(true);
                        getLogger().error("Unable to split " + sourceFlowFile
                                + " due to insufficient amount of header lines. Required "
                                + SplitText.this.headerLineCount + " but was " + splitInfo.lineCount
                                + ". Routing to failure.");
                    }
                } else if (SplitText.this.headerMarker != null) {
                    splitInfo = SplitText.this.computeHeader(demarcator, startOffset, Long.MAX_VALUE,
                            SplitText.this.headerMarker.getBytes(StandardCharsets.UTF_8), null);
                }
                headerSplitInfoRef.set(splitInfo);
            } catch (IllegalStateException e) {
                error.set(true);
                getLogger().error(e.getMessage() + " Routing to failure.", e);
            }

            // Compute and collect fragments representing the individual splits
            if (!error.get()) {
                if (headerSplitInfoRef.get() != null) {
                    startOffset = headerSplitInfoRef.get().length;
                }
                long preAccumulatedLength = startOffset;
                while ((splitInfo = SplitText.this.nextSplit(demarcator, startOffset, SplitText.this.lineCount,
                        splitInfo, preAccumulatedLength)) != null) {
                    computedSplitsInfo.add(splitInfo);
                    startOffset += splitInfo.length;
                }
                long stop = System.nanoTime();
                if (getLogger().isDebugEnabled()) {
                    getLogger().debug("Computed splits in " + (stop - start) + " milliseconds.");
                }
            }
        }
    });

    if (error.get()) {
        processSession.transfer(sourceFlowFile, REL_FAILURE);
    } else {
        final String fragmentId = UUID.randomUUID().toString();
        List<FlowFile> splitFlowFiles = this.generateSplitFlowFiles(fragmentId, sourceFlowFile,
                headerSplitInfoRef.get(), computedSplitsInfo, processSession);
        final FlowFile originalFlowFile = FragmentAttributes.copyAttributesToOriginal(processSession,
                sourceFlowFile, fragmentId, splitFlowFiles.size());
        processSession.transfer(originalFlowFile, REL_ORIGINAL);
        if (!splitFlowFiles.isEmpty()) {
            processSession.transfer(splitFlowFiles, REL_SPLITS);
        }
    }
}