Example usage for java.util.concurrent.atomic AtomicBoolean get

List of usage examples for java.util.concurrent.atomic AtomicBoolean get

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicBoolean get.

Prototype

public final boolean get() 

Source Link

Document

Returns the current value, with memory effects as specified by VarHandle#getVolatile .

Usage

From source file:org.apache.hadoop.hdfs.TestFileConcurrentReader.java

private void runTestUnfinishedBlockCRCError(final boolean transferToAllowed, final SyncType syncType,
        final int writeSize, Configuration conf) throws IOException {
    conf.setBoolean(DFSConfigKeys.DFS_DATANODE_TRANSFERTO_ALLOWED_KEY, transferToAllowed);
    init(conf);// ww w  . ja v  a 2 s  . c  o m

    final Path file = new Path("/block-being-written-to");
    final int numWrites = 2000;
    final AtomicBoolean writerDone = new AtomicBoolean(false);
    final AtomicBoolean writerStarted = new AtomicBoolean(false);
    final AtomicBoolean error = new AtomicBoolean(false);
    final FSDataOutputStream initialOutputStream = fileSystem.create(file);
    final Thread writer = new Thread(new Runnable() {
        private FSDataOutputStream outputStream = initialOutputStream;

        @Override
        public void run() {
            try {
                for (int i = 0; !error.get() && i < numWrites; i++) {
                    try {
                        final byte[] writeBuf = DFSTestUtil.generateSequentialBytes(i * writeSize, writeSize);
                        outputStream.write(writeBuf);
                        if (syncType == SyncType.SYNC) {
                            outputStream.hflush();
                        } else { // append
                            outputStream.close();
                            outputStream = fileSystem.append(file);
                        }
                        writerStarted.set(true);
                    } catch (IOException e) {
                        error.set(true);
                        LOG.error("error writing to file", e);
                    }
                }

                writerDone.set(true);
                outputStream.close();
            } catch (Exception e) {
                LOG.error("error in writer", e);

                throw new RuntimeException(e);
            }
        }
    });
    Thread tailer = new Thread(new Runnable() {
        @Override
        public void run() {
            try {
                long startPos = 0;
                while (!writerDone.get() && !error.get()) {
                    if (writerStarted.get()) {
                        try {
                            startPos = tailFile(file, startPos);
                        } catch (IOException e) {
                            LOG.error(String.format("error tailing file %s", file), e);

                            throw new RuntimeException(e);
                        }
                    }
                }
            } catch (RuntimeException e) {
                if (e.getCause() instanceof ChecksumException) {
                    error.set(true);
                }

                writer.interrupt();
                LOG.error("error in tailer", e);
                throw e;
            }
        }
    });

    writer.start();
    tailer.start();

    try {
        writer.join();
        tailer.join();

        assertFalse("error occurred, see log above", error.get());
    } catch (InterruptedException e) {
        LOG.info("interrupted waiting for writer or tailer to complete");

        Thread.currentThread().interrupt();
    }
    initialOutputStream.close();
}

From source file:org.apache.jackrabbit.oak.spi.blob.AbstractBlobStoreTest.java

@Test
public void testCloseStream() throws Exception {
    final AtomicBoolean closed = new AtomicBoolean();
    InputStream in = new InputStream() {
        @Override//from  w ww.  ja  v a  2s. c  om
        public void close() {
            closed.set(true);
        }

        @Override
        public int read() throws IOException {
            return -1;
        }
    };
    store.writeBlob(in);
    assertTrue(closed.get());
}

From source file:com.asakusafw.testdriver.inprocess.InProcessJobExecutorTest.java

/**
 * Test method for executing emulated command.
 *//* w w  w  .ja  va2  s  .  c  o  m*/
@Test
public void executeCommand_simple() {
    AtomicBoolean call = new AtomicBoolean();
    MockCommandEmulator.callback(args -> {
        call.set(true);
        assertThat(args, contains("hello", "world"));
    });
    JobExecutor executor = new InProcessJobExecutor(context);
    try {
        executor.execute(command("mock", "hello", "world"), Collections.emptyMap());
    } catch (IOException e) {
        throw new AssertionError(e);
    }
    assertThat(call.get(), is(true));
}

From source file:com.microsoft.tfs.core.clients.versioncontrol.internal.localworkspace.BaselineFolderCollection.java

/**
 * Given a baseline file GUID and a target location on disk, copies the
 * baseline from the baseline store to the target location. (The target
 * location always receives a decompressed copy of the baseline, even if it
 * is stored compressed in the baseline folder.)
 *
 *
 * @param workspace/*from  ww w. j a  va  2  s  .c om*/
 * @param baselineFolders
 * @param baselineFileGuid
 *        Baseline file GUID to copy
 * @param targetLocalItem
 *        Target location for the baseline file
 * @param baselineFileLength
 *        (optional) If provided, the uncompressed baseline length will be
 *        compared against this value and checked after decompression. If
 *        the values do not match, an exception will be thrown.
 * @param baselineHashValue
 *        (optional) If provided, the uncompressed baseline will be hashed
 *        and its hash compared to this value after decompression. If the
 *        values to not match, an exception will be thrown.
 */
public static void copyBaselineToTarget(final Workspace workspace, final List<BaselineFolder> baselineFolders,
        final byte[] baselineFileGuid, final String targetLocalItem, final long baselineFileLength,
        final byte[] baselineHashValue, final boolean symlink) {
    Check.notNullOrEmpty(targetLocalItem, "targetLocalItem"); //$NON-NLS-1$
    BaselineFolder.checkForValidBaselineFileGUID(baselineFileGuid);

    // Clear the target location.
    final File file = new File(targetLocalItem);
    file.delete();

    final AtomicBoolean outIsBaselineCompressed = new AtomicBoolean();
    final String baselineLocation = getBaselineLocation(workspace, baselineFolders, baselineFileGuid,
            outIsBaselineCompressed);

    if (null == baselineLocation) {
        // The baseline could not be located on disk.
        throw new MissingBaselineException(targetLocalItem);
    }

    String decompressedBaselineLocation = baselineLocation;

    try {
        byte[] decompressedHashValue = null;
        final boolean haveBaselineHashValue = null != baselineHashValue && 16 == baselineHashValue.length;

        MessageDigest md5Digest = null;
        if (haveBaselineHashValue) {
            md5Digest = MessageDigest.getInstance("MD5"); //$NON-NLS-1$
        }

        if (outIsBaselineCompressed.get()) {
            // The temporary file is created in the folder where the
            // compressed baseline currently exists. We use the temporary
            // file extension so that we can clean up the file later if we
            // happen to lose it.
            decompressedBaselineLocation = LocalPath.combine(LocalPath.getParent(baselineLocation),
                    GUID.newGUIDString()) + TMP_EXTENSION;

            // Decompress the baseline to a temporary file. Then move the
            // temporary file to the target location.
            final byte[] buffer = new byte[DECOMPRESSION_BUFFER_SIZE];

            InputStream inputStream = null;
            OutputStream outputStream = null;

            try {
                inputStream = new GZIPInputStream(new FileInputStream(baselineLocation));
                if (!symlink) {
                    outputStream = new FileOutputStream(decompressedBaselineLocation);
                }

                int bytesRead;
                while (true) {
                    bytesRead = inputStream.read(buffer, 0, buffer.length);

                    if (bytesRead < 0) {
                        break;
                    } else if (bytesRead == 0) {
                        continue;
                    }

                    if (null != md5Digest) {
                        md5Digest.update(buffer, 0, bytesRead);
                    }

                    if (symlink) {
                        final String targetLink = new String(buffer, 0, bytesRead);
                        FileSystemUtils.getInstance().createSymbolicLink(targetLink, targetLocalItem);
                    } else {
                        outputStream.write(buffer, 0, bytesRead);
                    }
                }

                if (null != md5Digest) {
                    decompressedHashValue = md5Digest.digest();
                }
            } finally {
                if (inputStream != null) {
                    IOUtils.closeSafely(inputStream);
                }
                if (outputStream != null) {
                    IOUtils.closeSafely(outputStream);
                }
            }
        }

        // First, check to see if the length of the file matches.
        if (-1 != baselineFileLength && baselineFileLength != new File(decompressedBaselineLocation).length()) {
            throw new CorruptBaselineException(targetLocalItem,
                    Messages.getString("BaselineFolderCollection.BaselineLengthDoesNotMatch")); //$NON-NLS-1$
        }

        if (null != md5Digest && null == decompressedHashValue && !symlink) {
            // Calculate the decompressed hash value for a raw file (.rw
            // extension) as we will not have gone through the streaming
            // logic above
            decompressedHashValue = HashUtils.hashFile(new File(decompressedBaselineLocation),
                    HashUtils.ALGORITHM_MD5);
        }

        if (haveBaselineHashValue && null != decompressedHashValue && 16 == decompressedHashValue.length) {
            if (!Arrays.equals(baselineHashValue, decompressedHashValue)) {
                throw new CorruptBaselineException(targetLocalItem,
                        Messages.getString("BaselineFolderCollection.BaselineHashValueDoesNotMatch")); //$NON-NLS-1$
            }
        }

        // Put the decompressed baseline at the target location. We've
        // verified its contents are correct.
        if (!symlink) {
            if (outIsBaselineCompressed.get()) {
                FileHelpers.rename(decompressedBaselineLocation, targetLocalItem);
            } else {
                FileCopyHelper.copy(decompressedBaselineLocation, targetLocalItem);
            }
        }
    } catch (final Exception ex) {
        // If the baseline is corrupt, delete it so we'll throw a missing
        // baseline exception next time. (This is not strictly necessary.)
        if (ex instanceof CorruptBaselineException && null != baselineLocation) {
            FileHelpers.deleteFileWithoutException(baselineLocation);
        }

        // Try not to leak a temp file on the way out if we're throwing.
        final File tempFile = new File(decompressedBaselineLocation);
        if (outIsBaselineCompressed.get() && null != decompressedBaselineLocation && tempFile.exists()) {
            FileHelpers.deleteFileWithoutException(decompressedBaselineLocation);
        }

        throw new VersionControlException(ex);
    }
}

From source file:net.sourceforge.ganttproject.io.CsvImportTest.java

public void testIncompleteHeader() throws IOException {
    String header = "A, B";
    String data = "a1, b1";
    final AtomicBoolean wasCalled = new AtomicBoolean(false);
    GanttCSVOpen.RecordGroup recordGroup = new GanttCSVOpen.RecordGroup("ABC",
            ImmutableSet.<String>of("A", "B", "C"), // all fields
            ImmutableSet.<String>of("A", "B")) { // mandatory fields
        @Override//  w  ww  .ja  v  a  2  s.  c  o m
        protected boolean doProcess(CSVRecord record) {
            wasCalled.set(true);
            assertEquals("a1", record.get("A"));
            assertEquals("b1", record.get("B"));
            return true;
        }
    };
    GanttCSVOpen importer = new GanttCSVOpen(createSupplier(Joiner.on('\n').join(header, data)), recordGroup);
    importer.load();
    assertTrue(wasCalled.get());
}

From source file:biz.ganttproject.impex.csv.CsvImportTest.java

public void testBasic() throws Exception {
    String header = "A, B";
    String data = "a1, b1";
    final AtomicBoolean wasCalled = new AtomicBoolean(false);
    RecordGroup recordGroup = new RecordGroup("AB", ImmutableSet.<String>of("A", "B")) {
        @Override//from   w w  w .  j  av  a2 s.  co  m
        protected boolean doProcess(CSVRecord record) {
            if (!super.doProcess(record)) {
                return false;
            }
            wasCalled.set(true);
            assertEquals("a1", record.get("A"));
            assertEquals("b1", record.get("B"));
            return true;
        }
    };
    GanttCSVOpen importer = new GanttCSVOpen(createSupplier(Joiner.on('\n').join(header, data)), recordGroup);
    importer.load();
    assertTrue(wasCalled.get());
}

From source file:org.apache.nifi.processors.msgpack.MessagePackPack.java

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    FlowFile flowFile = session.get();//from  w  w  w .  j  a  v  a  2s  . c  om
    if (flowFile == null) {
        return;
    }

    final ObjectMapper reader = new ObjectMapper();
    final ObjectMapper writer = new ObjectMapper(new MessagePackFactory());
    writer.setAnnotationIntrospector(new JsonArrayFormat());

    final AtomicBoolean failed = new AtomicBoolean(false);
    flowFile = session.write(flowFile, new StreamCallback() {
        @Override
        public void process(InputStream is, OutputStream os) throws IOException {
            try (final OutputStream msgpack = new BufferedOutputStream(os)) {
                final JsonNode json = reader.readTree(is);
                final byte[] bytes = writer.writeValueAsBytes(json);
                msgpack.write(bytes);
                msgpack.flush();
            } catch (JsonProcessingException e) {
                getLogger().error(e.getMessage(), e);
                failed.set(true);
            }
        }
    });

    if (failed.get()) {
        session.transfer(flowFile, REL_FAILURE);
        return;
    }

    flowFile = session.putAttribute(flowFile, CoreAttributes.MIME_TYPE.key(), MIME_TYPE);
    flowFile = session.putAttribute(flowFile, MIME_EXT_KEY, MIME_EXT);

    session.transfer(flowFile, REL_SUCCESS);
}

From source file:com.quartzdesk.executor.dao.AbstractDao.java

/**
 * Checks if the specified table exists in the specified schema and returns true if
 * it exists, false otherwise. This method tries to look up the table using both
 * lower-case and upper-case schema and table names because some databases seems to
 * require the names to be in upper case (DB2, Oracle), whereas other databases require
 * the names to be in lower-case.//from  w w w.  j  a  v  a2s. co  m
 *
 * @param session    a Hibernate session.
 * @param schemaName an optional schema name where to look for the table name.
 * @param tableName  a table name.
 * @return true if the table exists, false otherwise.
 */
public boolean tableExists(Session session, final String schemaName, final String tableName) {
    final AtomicBoolean tableExists = new AtomicBoolean(false);

    session.doWork(new Work() {
        @Override
        public void execute(Connection connection) throws SQLException {
            log.debug("Checking if table '{}' exists.", tableName);

            DatabaseMetaData metaData = connection.getMetaData();

            // 1. attempt - try schema and table name in lower-case (does not work in DB2 and Oracle)
            ResultSet res = metaData.getTables(null,
                    schemaName == null ? null : schemaName.toLowerCase(Locale.US),
                    tableName.toLowerCase(Locale.US), new String[] { "TABLE" });

            tableExists.set(res.next());
            DbUtils.close(res);

            if (tableExists.get()) {
                log.debug("Table '{}' exists.", tableName);
            } else {
                // 2. attempt - try schema and table name in upper-case (required for DB2 and Oracle)
                res = metaData.getTables(null, schemaName == null ? null : schemaName.toUpperCase(Locale.US),
                        tableName.toUpperCase(Locale.US), new String[] { "TABLE" });

                tableExists.set(res.next());
                DbUtils.close(res);

                if (tableExists.get()) {
                    log.debug("Table '{}' exists.", tableName);
                } else {
                    log.debug("Table '{}' does not exist.", tableName);
                }
            }
        }
    });

    return tableExists.get();
}

From source file:biz.ganttproject.impex.csv.CsvImportTest.java

public void testSkipEmptyLine() throws Exception {
    String header = "A, B";
    String data = "a1, b1";
    final AtomicBoolean wasCalled = new AtomicBoolean(false);
    RecordGroup recordGroup = new RecordGroup("AB", ImmutableSet.<String>of("A", "B")) {
        @Override//from  w  w w  . j a  v  a 2 s .c  o m
        protected boolean doProcess(CSVRecord record) {
            if (!super.doProcess(record)) {
                return false;
            }
            wasCalled.set(true);
            assertEquals("a1", record.get("A"));
            assertEquals("b1", record.get("B"));
            return true;
        }
    };
    GanttCSVOpen importer = new GanttCSVOpen(createSupplier(Joiner.on('\n').join(header, "", data)),
            recordGroup);
    importer.load();
    assertTrue(wasCalled.get());
}

From source file:ddf.catalog.resource.download.ReliableResourceDownloadManager.java

/**
 * @param resourceRequest/*from   ww  w.  java 2 s .  co  m*/
 *            the original @ResourceRequest to retrieve the resource
 * @param metacard
 *            the @Metacard associated with the resource being downloaded
 * @param retriever
 *            the @ResourceRetriever to be used to get the resource
 * @return the modified @ResourceResponse with the @ReliableResourceInputStream that the client
 *         should read from
 * @throws DownloadException
 */
public ResourceResponse download(ResourceRequest resourceRequest, Metacard metacard,
        ResourceRetriever retriever) throws DownloadException {

    if (metacard == null) {
        throw new DownloadException("Cannot download resource if metacard is null");
    } else if (StringUtils.isBlank(metacard.getId())) {
        throw new DownloadException("Metacard must have unique id.");
    } else if (retriever == null) {
        throw new DownloadException("Cannot download resource if retriever is null");
    } else if (resourceRequest == null) {
        throw new DownloadException("Cannot download resource if request is null");
    }

    try {
        resourceResponse = retriever.retrieveResource();
    } catch (ResourceNotFoundException | ResourceNotSupportedException | IOException e) {
        throw new DownloadException("Cannot download resource", e);
    }

    resourceResponse.getProperties().put(Metacard.ID, metacard.getId());
    // Sources do not create ResourceResponses with the original ResourceRequest, hence
    // it is added here because it will be needed for caching
    resourceResponse = new ResourceResponseImpl(resourceRequest, resourceResponse.getProperties(),
            resourceResponse.getResource());

    // TODO - this should be before retrieveResource() but eventPublisher requires a
    // resourceResponse and that resource response must have a resource request in it (to get
    // USER property)
    eventPublisher.postRetrievalStatus(resourceResponse, ProductRetrievalStatus.STARTED, metacard, null, 0L,
            downloadIdentifier);

    AtomicBoolean downloadStarted = new AtomicBoolean(Boolean.FALSE);
    ReliableResourceDownloader downloader = new ReliableResourceDownloader(downloaderConfig, downloadStarted,
            downloadIdentifier, resourceResponse, retriever);
    resourceResponse = downloader.setupDownload(metacard, downloadStatusInfo);

    // Start download in separate thread so can return ResourceResponse with
    // ReliableResourceInputStream available for client to start reading from
    executor.submit(downloader);

    // Wait for download to get started before returning control to client
    Stopwatch stopwatch = Stopwatch.createStarted();
    while (!downloadStarted.get()) {
        try {
            Thread.sleep(10);
        } catch (InterruptedException e) {
        }
        long elapsedTime = stopwatch.elapsed(TimeUnit.MILLISECONDS);
        if (elapsedTime > ONE_SECOND_IN_MS) {
            LOGGER.debug("downloadStarted still FALSE - elapsedTime = {}", elapsedTime);
            break;
        }
    }
    LOGGER.debug("elapsedTime = {}", stopwatch.elapsed(TimeUnit.MILLISECONDS));
    stopwatch.stop();

    return resourceResponse;
}