Example usage for java.util.concurrent.atomic AtomicReference set

List of usage examples for java.util.concurrent.atomic AtomicReference set

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicReference set.

Prototype

public final void set(V newValue) 

Source Link

Document

Sets the value to newValue , with memory effects as specified by VarHandle#setVolatile .

Usage

From source file:de.hybris.platform.test.TransactionTest.java

@Test
public void testNestedTAError() throws Exception {

    final AtomicBoolean storeCalled = new AtomicBoolean();
    final AtomicBoolean commitCalled = new AtomicBoolean();
    final AtomicBoolean rollbackCalled = new AtomicBoolean();

    final Transaction transaction = new DefaultTransaction() {
        @Override//  w  ww  .  ja va2s. co m
        public void rollback() throws TransactionException {
            rollbackCalled.set(true);
            super.rollback();
        }

        @Override
        public void commit() throws TransactionException {
            commitCalled.set(true);
            super.commit();
        }
    };
    transaction.enableDelayedStore(true);

    final EntityInstanceContext eCtx = new EntityInstanceContext() {

        @Override
        public ItemDeployment getItemDeployment() {
            return null;
        }

        @Override
        public PK getPK() {
            return PK.NULL_PK;
        }

        @Override
        public PersistencePool getPersistencePool() {
            return null;
        }

        @Override
        public void setPK(final PK pk) {
            // mock
        }
    };

    final EntityInstance mockEntity = new EntityInstance() {
        final EntityInstanceContext ctx = eCtx;

        @Override
        public PK ejbFindByPrimaryKey(final PK pkValue) throws YObjectNotFoundException {
            return null;
        }

        @Override
        public void ejbLoad() {
            // mock
        }

        @Override
        public void ejbRemove() {
            // mock
        }

        @Override
        public void ejbStore() {
            storeCalled.set(true);
            throw new IllegalArgumentException("let's rollback ;)");
        }

        @Override
        public EntityInstanceContext getEntityContext() {
            return ctx;
        }

        @Override
        public boolean needsStoring() {
            return true;
        }

        @Override
        public void setEntityContext(final EntityInstanceContext ctx) {
            // mock
        }

        @Override
        public void setNeedsStoring(final boolean needs) {
            // mock
        }

    };

    final ByteArrayOutputStream bos = new ByteArrayOutputStream();
    final PrintStream printstream = new PrintStream(bos);

    final PrintStream err = System.err;

    final AtomicReference<Title> itemRef = new AtomicReference<Title>();

    try {
        System.setErr(printstream);

        // outer TA
        transaction.execute(new TransactionBody() {
            @Override
            public Object execute() throws Exception {
                // inner TA
                transaction.execute(new TransactionBody() {
                    @Override
                    public Object execute() throws Exception {
                        itemRef.set(UserManager.getInstance().createTitle("T" + System.currentTimeMillis()));

                        // inject mock entity to call ejbStore upon -> throws exception
                        transaction.registerEntityInstance(mockEntity);

                        return null;
                    }

                });
                return null;
            }

        });
        fail("IllegalArgumentException expected");
    } catch (final IllegalArgumentException ex) {
        assertTrue(storeCalled.get());
        assertEquals("let's rollback ;)", ex.getMessage());

        assertFalse(transaction.isRunning());
        assertEquals(0, transaction.getOpenTransactionCount());
        assertNotNull(itemRef.get());
        assertFalse(itemRef.get().isAlive());

        final String errorLog = new String(bos.toByteArray());

        assertFalse(errorLog.contains("no transaction running"));
    } catch (final Exception e) {
        fail("unexpected error " + e.getMessage());
    } finally {
        System.setErr(err);
    }
}

From source file:org.dataconservancy.packaging.tool.integration.PackageGenerationTest.java

@Test
public void verifyRemediationTest() throws Exception {

    PackageState state = initializer.initialize(DCS_PROFILE);

    Set<URI> originalFileLocations = new HashSet<>();

    ipm2rdf.transformToNode(state.getPackageTree())
            .walk(node -> originalFileLocations.add(node.getFileInfo().getLocation()));

    // The package should contain two files:
    // - READMX//from   w w  w .j  a  v a2s .  co  m
    // - READM
    //
    // The file with the acute E will be remediated to a resource named 'READMX', which will collide with
    // an existing resource of the same name.

    // assert that our sample problem files are in the content to be packaged
    assertTrue(originalFileLocations.stream().anyMatch(uri -> uri.getPath().endsWith("READMX")));
    // 0x0301 is the UTF-16 encoding of the 'COMBINING ACUTE ACCENT' combining diacritic
    // 0x00c9 is the UTF-16 encoding of 'LATIN CAPITAL LETTER E WITH ACUTE'
    assertTrue(originalFileLocations.stream().anyMatch(uri -> (uri.getPath().endsWith("README" + '\u0301'))
            || (uri.getPath().endsWith("READM" + '\u00c9'))));

    OpenedPackage opened = packager.createPackage(state, folder.getRoot());

    AtomicBoolean foundIllegal = new AtomicBoolean(Boolean.FALSE);
    AtomicBoolean foundRemediated = new AtomicBoolean(Boolean.FALSE);
    AtomicReference<String> remediatedFilename = new AtomicReference<>();
    AtomicBoolean foundCollision = new AtomicBoolean(Boolean.FALSE);
    AtomicReference<String> collidingFilename = new AtomicReference<>();

    // Walk the generated package, and make sure that
    // 1. That a resource with illegal characters does not exist
    // 2. That a resource named 'READMX' does exist
    // 3. That a resource named after the SHA-1 hex of its identifier exists
    // 4. That those two resources originate from two different files in the original package content
    opened.getPackageTree().walk(node -> {
        if (node.getFileInfo() == null || !node.getFileInfo().isFile()) {
            return;
        }

        System.err.println(node.getFileInfo().getName());
        System.err.println("  " + node.getFileInfo().getLocation().toString());

        // this should not happen, because a file name with invalid characters should have
        // been remediated prior to being inserted into the package
        if (node.getFileInfo().getLocation().getPath().endsWith("README" + '\u0301')
                || node.getFileInfo().getLocation().getPath().endsWith("READM" + '\u00c9')) {
            foundIllegal.set(Boolean.TRUE);
        }

        if (node.getFileInfo().getLocation().getPath().endsWith(shaHex(node.getIdentifier().toString()))) {
            foundRemediated.set(Boolean.TRUE);
            remediatedFilename.set(node.getFileInfo().getName());
            // short circuit
            return;
        }

        if (node.getFileInfo().getName().equals("READMX") || node.getFileInfo().getName().equals("READM")) {
            foundCollision.set(Boolean.TRUE);
            collidingFilename.set(node.getFileInfo().getName());
        }
    });

    assertFalse(foundIllegal.get());
    assertTrue(foundCollision.get());
    assertTrue(foundRemediated.get());

    assertNotNull(remediatedFilename.get());
    assertNotNull(collidingFilename.get());
    assertNotEquals(remediatedFilename.get(), collidingFilename.get());

}

From source file:org.apache.nifi.processors.standard.CompressContent.java

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
    FlowFile flowFile = session.get();/* w  w  w . ja  v a  2 s  . c o m*/
    if (flowFile == null) {
        return;
    }

    final ComponentLog logger = getLogger();
    final long sizeBeforeCompression = flowFile.getSize();
    final String compressionMode = context.getProperty(MODE).getValue();

    String compressionFormatValue = context.getProperty(COMPRESSION_FORMAT).getValue();
    if (compressionFormatValue.equals(COMPRESSION_FORMAT_ATTRIBUTE)) {
        final String mimeType = flowFile.getAttribute(CoreAttributes.MIME_TYPE.key());
        if (mimeType == null) {
            logger.error("No {} attribute exists for {}; routing to failure",
                    new Object[] { CoreAttributes.MIME_TYPE.key(), flowFile });
            session.transfer(flowFile, REL_FAILURE);
            return;
        }

        compressionFormatValue = compressionFormatMimeTypeMap.get(mimeType);
        if (compressionFormatValue == null) {
            logger.info(
                    "Mime Type of {} is '{}', which does not indicate a supported Compression Format; routing to success without decompressing",
                    new Object[] { flowFile, mimeType });
            session.transfer(flowFile, REL_SUCCESS);
            return;
        }
    }

    final String compressionFormat = compressionFormatValue;
    final AtomicReference<String> mimeTypeRef = new AtomicReference<>(null);
    final StopWatch stopWatch = new StopWatch(true);

    final String fileExtension;
    switch (compressionFormat.toLowerCase()) {
    case COMPRESSION_FORMAT_GZIP:
        fileExtension = ".gz";
        break;
    case COMPRESSION_FORMAT_LZMA:
        fileExtension = ".lzma";
        break;
    case COMPRESSION_FORMAT_XZ_LZMA2:
        fileExtension = ".xz";
        break;
    case COMPRESSION_FORMAT_BZIP2:
        fileExtension = ".bz2";
        break;
    case COMPRESSION_FORMAT_SNAPPY:
        fileExtension = ".snappy";
        break;
    case COMPRESSION_FORMAT_SNAPPY_FRAMED:
        fileExtension = ".sz";
        break;
    default:
        fileExtension = "";
        break;
    }

    try {
        flowFile = session.write(flowFile, new StreamCallback() {
            @Override
            public void process(final InputStream rawIn, final OutputStream rawOut) throws IOException {
                final OutputStream compressionOut;
                final InputStream compressionIn;

                final OutputStream bufferedOut = new BufferedOutputStream(rawOut, 65536);
                final InputStream bufferedIn = new BufferedInputStream(rawIn, 65536);

                try {
                    if (MODE_COMPRESS.equalsIgnoreCase(compressionMode)) {
                        compressionIn = bufferedIn;

                        switch (compressionFormat.toLowerCase()) {
                        case COMPRESSION_FORMAT_GZIP:
                            final int compressionLevel = context.getProperty(COMPRESSION_LEVEL).asInteger();
                            compressionOut = new GZIPOutputStream(bufferedOut, compressionLevel);
                            mimeTypeRef.set("application/gzip");
                            break;
                        case COMPRESSION_FORMAT_LZMA:
                            compressionOut = new LzmaOutputStream.Builder(bufferedOut).build();
                            mimeTypeRef.set("application/x-lzma");
                            break;
                        case COMPRESSION_FORMAT_XZ_LZMA2:
                            compressionOut = new XZOutputStream(bufferedOut, new LZMA2Options());
                            mimeTypeRef.set("application/x-xz");
                            break;
                        case COMPRESSION_FORMAT_SNAPPY:
                            compressionOut = new SnappyOutputStream(bufferedOut);
                            mimeTypeRef.set("application/x-snappy");
                            break;
                        case COMPRESSION_FORMAT_SNAPPY_FRAMED:
                            compressionOut = new SnappyFramedOutputStream(bufferedOut);
                            mimeTypeRef.set("application/x-snappy-framed");
                            break;
                        case COMPRESSION_FORMAT_BZIP2:
                        default:
                            mimeTypeRef.set("application/x-bzip2");
                            compressionOut = new CompressorStreamFactory()
                                    .createCompressorOutputStream(compressionFormat.toLowerCase(), bufferedOut);
                            break;
                        }
                    } else {
                        compressionOut = bufferedOut;
                        switch (compressionFormat.toLowerCase()) {
                        case COMPRESSION_FORMAT_LZMA:
                            compressionIn = new LzmaInputStream(bufferedIn, new Decoder());
                            break;
                        case COMPRESSION_FORMAT_XZ_LZMA2:
                            compressionIn = new XZInputStream(bufferedIn);
                            break;
                        case COMPRESSION_FORMAT_BZIP2:
                            // need this two-arg constructor to support concatenated streams
                            compressionIn = new BZip2CompressorInputStream(bufferedIn, true);
                            break;
                        case COMPRESSION_FORMAT_GZIP:
                            compressionIn = new GzipCompressorInputStream(bufferedIn, true);
                            break;
                        case COMPRESSION_FORMAT_SNAPPY:
                            compressionIn = new SnappyInputStream(bufferedIn);
                            break;
                        case COMPRESSION_FORMAT_SNAPPY_FRAMED:
                            compressionIn = new SnappyFramedInputStream(bufferedIn);
                            break;
                        default:
                            compressionIn = new CompressorStreamFactory()
                                    .createCompressorInputStream(compressionFormat.toLowerCase(), bufferedIn);
                        }
                    }
                } catch (final Exception e) {
                    closeQuietly(bufferedOut);
                    throw new IOException(e);
                }

                try (final InputStream in = compressionIn; final OutputStream out = compressionOut) {
                    final byte[] buffer = new byte[8192];
                    int len;
                    while ((len = in.read(buffer)) > 0) {
                        out.write(buffer, 0, len);
                    }
                    out.flush();
                }
            }
        });
        stopWatch.stop();

        final long sizeAfterCompression = flowFile.getSize();
        if (MODE_DECOMPRESS.equalsIgnoreCase(compressionMode)) {
            flowFile = session.removeAttribute(flowFile, CoreAttributes.MIME_TYPE.key());

            if (context.getProperty(UPDATE_FILENAME).asBoolean()) {
                final String filename = flowFile.getAttribute(CoreAttributes.FILENAME.key());
                if (filename.toLowerCase().endsWith(fileExtension)) {
                    flowFile = session.putAttribute(flowFile, CoreAttributes.FILENAME.key(),
                            filename.substring(0, filename.length() - fileExtension.length()));
                }
            }
        } else {
            flowFile = session.putAttribute(flowFile, CoreAttributes.MIME_TYPE.key(), mimeTypeRef.get());

            if (context.getProperty(UPDATE_FILENAME).asBoolean()) {
                final String filename = flowFile.getAttribute(CoreAttributes.FILENAME.key());
                flowFile = session.putAttribute(flowFile, CoreAttributes.FILENAME.key(),
                        filename + fileExtension);
            }
        }

        logger.info("Successfully {}ed {} using {} compression format; size changed from {} to {} bytes",
                new Object[] { compressionMode.toLowerCase(), flowFile, compressionFormat,
                        sizeBeforeCompression, sizeAfterCompression });
        session.getProvenanceReporter().modifyContent(flowFile, stopWatch.getDuration(TimeUnit.MILLISECONDS));
        session.transfer(flowFile, REL_SUCCESS);
    } catch (final ProcessException e) {
        logger.error("Unable to {} {} using {} compression format due to {}; routing to failure",
                new Object[] { compressionMode.toLowerCase(), flowFile, compressionFormat, e });
        session.transfer(flowFile, REL_FAILURE);
    }
}

From source file:brooklyn.util.internal.ssh.sshj.SshjTool.java

/**
 * Executes the script in the background (`nohup ... &`), and then executes other ssh commands to poll for the
 * stdout, stderr and exit code of that original process (which will each have been written to separate files).
 * /*from   w w w . j a  v a 2s  .  co m*/
 * The polling is a "long poll". That is, it executes a long-running ssh command to retrieve the stdout, etc.
 * If that long-poll command fails, then we just execute another one to pick up from where it left off.
 * This means we do not need to execute many ssh commands (which are expensive), but can still return promptly
 * when the command completes.
 * 
 * Much of this was motivated by https://issues.apache.org/jira/browse/BROOKLYN-106, which is no longer
 * an issue. The retries (e.g. in the upload-script) are arguably overkill given that {@link #acquire(SshAction)}
 * will already retry. However, leaving this in place as it could prove useful when working with flakey
 * networks in the future.
 * 
 * TODO There are (probably) issues with this method when using {@link ShellTool#PROP_RUN_AS_ROOT}.
 * I (Aled) saw the .pid file having an owner of root:root, and a failure message in stderr of:
 *   -bash: line 3: /tmp/brooklyn-20150113-161203056-XMEo-move_install_dir_from_user_to_.pid: Permission denied
 */
protected int execScriptAsyncAndPoll(final Map<String, ?> props, final List<String> commands,
        final Map<String, ?> env) {
    return new ToolAbstractAsyncExecScript(props) {
        private int maxConsecutiveSshFailures = 3;
        private Duration maxDelayBetweenPolls = Duration.seconds(20);
        private Duration pollTimeout = getOptionalVal(props, PROP_EXEC_ASYNC_POLLING_TIMEOUT,
                Duration.FIVE_MINUTES);
        private int iteration = 0;
        private int consecutiveSshFailures = 0;
        private int stdoutCount = 0;
        private int stderrCount = 0;
        private Stopwatch timer;

        public int run() {
            timer = Stopwatch.createStarted();
            final String scriptContents = toScript(props, commands, env);
            if (LOG.isTraceEnabled())
                LOG.trace("Running shell command at {} as async script: {}", host, scriptContents);

            // Upload script; try repeatedly because have seen timeout intermittently on vcloud-director (BROOKLYN-106 related).
            boolean uploadSuccess = Repeater
                    .create("async script upload on " + SshjTool.this.toString() + " (for " + getSummary()
                            + ")")
                    .backoffTo(maxDelayBetweenPolls).limitIterationsTo(3).rethrowException()
                    .until(new Callable<Boolean>() {
                        @Override
                        public Boolean call() throws Exception {
                            iteration++;
                            if (LOG.isDebugEnabled()) {
                                String msg = "Uploading (iteration=" + iteration + ") for async script on "
                                        + SshjTool.this.toString() + " (for " + getSummary() + ")";
                                if (iteration == 1) {
                                    LOG.trace(msg);
                                } else {
                                    LOG.debug(msg);
                                }
                            }
                            copyToServer(ImmutableMap.of("permissions", "0700"), scriptContents.getBytes(),
                                    scriptPath);
                            return true;
                        }
                    }).run();

            if (!uploadSuccess) {
                // Unexpected! Should have either returned true or have rethrown the exception; should never get false.
                String msg = "Unexpected state: repeated failure for async script upload on "
                        + SshjTool.this.toString() + " (" + getSummary() + ")";
                LOG.warn(msg + "; rethrowing");
                throw new IllegalStateException(msg);
            }

            // Execute script asynchronously
            int execResult = asInt(acquire(new ShellAction(buildRunScriptCommand(), out, err, execTimeout)),
                    -1);
            if (execResult != 0)
                return execResult;

            // Long polling to get the status
            try {
                final AtomicReference<Integer> result = new AtomicReference<Integer>();
                boolean success = Repeater
                        .create("async script long-poll on " + SshjTool.this.toString() + " (for "
                                + getSummary() + ")")
                        .backoffTo(maxDelayBetweenPolls).limitTimeTo(execTimeout)
                        .until(new Callable<Boolean>() {
                            @Override
                            public Boolean call() throws Exception {
                                iteration++;
                                if (LOG.isDebugEnabled())
                                    LOG.debug("Doing long-poll (iteration=" + iteration
                                            + ") for async script to complete on " + SshjTool.this.toString()
                                            + " (for " + getSummary() + ")");
                                Integer exitstatus = longPoll();
                                result.set(exitstatus);
                                return exitstatus != null;
                            }
                        }).run();

                if (!success) {
                    // Timed out
                    String msg = "Timeout for async script to complete on " + SshjTool.this.toString() + " ("
                            + getSummary() + ")";
                    LOG.warn(msg + "; rethrowing");
                    throw new TimeoutException(msg);
                }

                return result.get();

            } catch (Exception e) {
                LOG.debug("Problem polling for async script on " + SshjTool.this.toString() + " (for "
                        + getSummary() + "); rethrowing after deleting temporary files", e);
                throw Exceptions.propagate(e);
            } finally {
                // Delete the temporary files created (and the `tail -c` commands that might have been left behind by long-polls).
                // Using pollTimeout so doesn't wait forever, but waits for a reasonable (configurable) length of time.
                // TODO also execute this if the `buildRunScriptCommand` fails, as that might have left files behind?
                try {
                    int execDeleteResult = asInt(
                            acquire(new ShellAction(deleteTemporaryFilesCommand(), out, err, pollTimeout)), -1);
                    if (execDeleteResult != 0) {
                        LOG.debug("Problem deleting temporary files of async script on "
                                + SshjTool.this.toString() + " (for " + getSummary() + "): exit status "
                                + execDeleteResult);
                    }
                } catch (Exception e) {
                    Exceptions.propagateIfFatal(e);
                    LOG.debug("Problem deleting temporary files of async script on " + SshjTool.this.toString()
                            + " (for " + getSummary() + "); continuing", e);
                }
            }
        }

        Integer longPoll() throws IOException {
            // Long-polling to get stdout, stderr + exit status of async task.
            // If our long-poll disconnects, we will just re-execute.
            // We wrap the stdout/stderr so that we can get the size count. 
            // If we disconnect, we will pick up from that char of the stream.
            // TODO Additional stdout/stderr written by buildLongPollCommand() could interfere, 
            //      causing us to miss some characters.
            Duration nextPollTimeout = Duration.min(pollTimeout,
                    Duration.millis(execTimeout.toMilliseconds() - timer.elapsed(TimeUnit.MILLISECONDS)));
            CountingOutputStream countingOut = (out == null) ? null : new CountingOutputStream(out);
            CountingOutputStream countingErr = (err == null) ? null : new CountingOutputStream(err);
            List<String> pollCommand = buildLongPollCommand(stdoutCount, stderrCount, nextPollTimeout);
            Duration sshJoinTimeout = nextPollTimeout.add(Duration.TEN_SECONDS);
            ShellAction action = new ShellAction(pollCommand, countingOut, countingErr, sshJoinTimeout);

            int longPollResult;
            try {
                longPollResult = asInt(acquire(action, 3, nextPollTimeout), -1);
            } catch (RuntimeTimeoutException e) {
                if (LOG.isDebugEnabled())
                    LOG.debug("Long-poll timed out on " + SshjTool.this.toString() + " (for " + getSummary()
                            + "): " + e);
                return null;
            }
            stdoutCount += (countingOut == null) ? 0 : countingOut.getCount();
            stderrCount += (countingErr == null) ? 0 : countingErr.getCount();

            if (longPollResult == 0) {
                if (LOG.isDebugEnabled())
                    LOG.debug("Long-poll succeeded (exit status 0) on " + SshjTool.this.toString() + " (for "
                            + getSummary() + ")");
                return longPollResult; // success

            } else if (longPollResult == -1) {
                // probably a connection failure; try again
                if (LOG.isDebugEnabled())
                    LOG.debug("Long-poll received exit status -1; will retry on " + SshjTool.this.toString()
                            + " (for " + getSummary() + ")");
                return null;

            } else if (longPollResult == 125) {
                // 125 is the special code for timeout in long-poll (see buildLongPollCommand).
                // However, there is a tiny chance that the underlying command might have returned that exact exit code!
                // Don't treat a timeout as a "consecutiveSshFailure".
                if (LOG.isDebugEnabled())
                    LOG.debug("Long-poll received exit status " + longPollResult
                            + "; most likely timeout; retrieving actual status on " + SshjTool.this.toString()
                            + " (for " + getSummary() + ")");
                return retrieveStatusCommand();

            } else {
                // want to double-check whether this is the exit-code from the async process, or
                // some unexpected failure in our long-poll command.
                if (LOG.isDebugEnabled())
                    LOG.debug("Long-poll received exit status " + longPollResult
                            + "; retrieving actual status on " + SshjTool.this.toString() + " (for "
                            + getSummary() + ")");
                Integer result = retrieveStatusCommand();
                if (result != null) {
                    return result;
                }
            }

            consecutiveSshFailures++;
            if (consecutiveSshFailures > maxConsecutiveSshFailures) {
                LOG.warn("Aborting on " + consecutiveSshFailures
                        + " consecutive ssh connection errors (return -1) when polling for async script to complete on "
                        + SshjTool.this.toString() + " (" + getSummary() + ")");
                return -1;
            } else {
                LOG.info("Retrying after ssh connection error when polling for async script to complete on "
                        + SshjTool.this.toString() + " (" + getSummary() + ")");
                return null;
            }
        }

        Integer retrieveStatusCommand() throws IOException {
            // want to double-check whether this is the exit-code from the async process, or
            // some unexpected failure in our long-poll command.
            ByteArrayOutputStream statusOut = new ByteArrayOutputStream();
            ByteArrayOutputStream statusErr = new ByteArrayOutputStream();
            int statusResult = asInt(
                    acquire(new ShellAction(buildRetrieveStatusCommand(), statusOut, statusErr, execTimeout)),
                    -1);

            if (statusResult == 0) {
                // The status we retrieved really is valid; return it.
                // TODO How to ensure no additional output in stdout/stderr when parsing below?
                String statusOutStr = new String(statusOut.toByteArray()).trim();
                if (Strings.isEmpty(statusOutStr)) {
                    // suggests not yet completed; will retry with long-poll
                    if (LOG.isDebugEnabled())
                        LOG.debug(
                                "Long-poll retrieved status directly; command successful but no result available on "
                                        + SshjTool.this.toString() + " (for " + getSummary() + ")");
                    return null;
                } else {
                    if (LOG.isDebugEnabled())
                        LOG.debug("Long-poll retrieved status directly; returning '" + statusOutStr + "' on "
                                + SshjTool.this.toString() + " (for " + getSummary() + ")");
                    int result = Integer.parseInt(statusOutStr);
                    return result;
                }

            } else if (statusResult == -1) {
                // probably a connection failure; try again with long-poll
                if (LOG.isDebugEnabled())
                    LOG.debug("Long-poll retrieving status directly received exit status -1; will retry on "
                            + SshjTool.this.toString() + " (for " + getSummary() + ")");
                return null;

            } else {
                if (out != null) {
                    out.write(toUTF8ByteArray(
                            "retrieving status failed with exit code " + statusResult + " (stdout follow)"));
                    out.write(statusOut.toByteArray());
                }
                if (err != null) {
                    err.write(toUTF8ByteArray(
                            "retrieving status failed with exit code " + statusResult + " (stderr follow)"));
                    err.write(statusErr.toByteArray());
                }

                if (LOG.isDebugEnabled())
                    LOG.debug("Long-poll retrieving status failed; returning " + statusResult + " on "
                            + SshjTool.this.toString() + " (for " + getSummary() + ")");
                return statusResult;
            }
        }
    }.run();
}

From source file:org.apache.pulsar.broker.admin.impl.PersistentTopicsBase.java

protected void internalCreateSubscription(String subscriptionName, MessageIdImpl messageId,
        boolean authoritative) {
    if (topicName.isGlobal()) {
        validateGlobalNamespaceOwnership(namespaceName);
    }//from   w  ww.ja  va2  s .  c  om
    messageId = messageId == null ? (MessageIdImpl) MessageId.earliest : messageId;
    log.info("[{}][{}] Creating subscription {} at message id {}", clientAppId(), topicName, subscriptionName,
            messageId);

    PartitionedTopicMetadata partitionMetadata = getPartitionedTopicMetadata(topicName, authoritative);

    try {
        if (partitionMetadata.partitions > 0) {
            // Create the subscription on each partition
            PulsarAdmin admin = pulsar().getAdminClient();

            CountDownLatch latch = new CountDownLatch(partitionMetadata.partitions);
            AtomicReference<Throwable> exception = new AtomicReference<>();
            AtomicInteger failureCount = new AtomicInteger(0);

            for (int i = 0; i < partitionMetadata.partitions; i++) {
                admin.persistentTopics().createSubscriptionAsync(topicName.getPartition(i).toString(),
                        subscriptionName, messageId).handle((result, ex) -> {
                            if (ex != null) {
                                int c = failureCount.incrementAndGet();
                                // fail the operation on unknown exception or if all the partitioned failed due to
                                // subscription-already-exist
                                if (c == partitionMetadata.partitions
                                        || !(ex instanceof PulsarAdminException.ConflictException)) {
                                    exception.set(ex);
                                }
                            }
                            latch.countDown();
                            return null;
                        });
            }

            latch.await();
            if (exception.get() != null) {
                throw exception.get();
            }
        } else {
            validateAdminAccessForSubscriber(subscriptionName, authoritative);

            PersistentTopic topic = (PersistentTopic) getOrCreateTopic(topicName);

            if (topic.getSubscriptions().containsKey(subscriptionName)) {
                throw new RestException(Status.CONFLICT, "Subscription already exists for topic");
            }

            PersistentSubscription subscription = (PersistentSubscription) topic
                    .createSubscription(subscriptionName, InitialPosition.Latest).get();
            subscription.resetCursor(PositionImpl.get(messageId.getLedgerId(), messageId.getEntryId())).get();
            log.info("[{}][{}] Successfully created subscription {} at message id {}", clientAppId(), topicName,
                    subscriptionName, messageId);
        }
    } catch (Throwable e) {
        Throwable t = e.getCause();
        log.warn("[{}] [{}] Failed to create subscription {} at message id {}", clientAppId(), topicName,
                subscriptionName, messageId, e);
        if (t instanceof SubscriptionInvalidCursorPosition) {
            throw new RestException(Status.PRECONDITION_FAILED,
                    "Unable to find position for position specified: " + t.getMessage());
        } else {
            throw new RestException(e);
        }
    }
}

From source file:org.gridgain.grid.kernal.managers.eventstorage.GridEventStorageManager.java

/**
 * @param p Grid event predicate.//  ww w . j a  va2s.  c  o m
 * @param nodes Collection of nodes.
 * @param timeout Maximum time to wait for result, if {@code 0}, then wait until result is received.
 * @return Collection of events.
 * @throws GridException Thrown in case of any errors.
 */
@SuppressWarnings({ "SynchronizationOnLocalVariableOrMethodParameter", "deprecation" })
private List<GridEvent> query(GridPredicate<? super GridEvent> p, Collection<? extends GridNode> nodes,
        long timeout) throws GridException {
    assert p != null;
    assert nodes != null;

    if (nodes.isEmpty()) {
        U.warn(log, "Failed to query events for empty nodes collection.");

        return Collections.emptyList();
    }

    GridIoManager ioMgr = ctx.io();

    final List<GridEvent> evts = new ArrayList<GridEvent>();

    final AtomicReference<Throwable> err = new AtomicReference<Throwable>(null);

    final Set<UUID> uids = new HashSet<UUID>();

    final Object qryMux = new Object();

    for (GridNode node : nodes)
        uids.add(node.id());

    GridLocalEventListener evtLsnr = new GridLocalEventListener() {
        @Override
        public void onEvent(GridEvent evt) {
            assert evt instanceof GridDiscoveryEvent;

            synchronized (qryMux) {
                uids.remove(((GridDiscoveryEvent) evt).eventNodeId());

                if (uids.isEmpty()) {
                    qryMux.notifyAll();
                }
            }
        }
    };

    GridMessageListener resLsnr = new GridMessageListener() {
        @SuppressWarnings("deprecation")
        @Override
        public void onMessage(UUID nodeId, Object msg) {
            assert nodeId != null;
            assert msg != null;

            if (!(msg instanceof GridEventStorageMessage)) {
                U.error(log, "Received unknown message: " + msg);

                return;
            }

            GridEventStorageMessage res = (GridEventStorageMessage) msg;

            synchronized (qryMux) {
                if (uids.remove(nodeId)) {
                    if (res.events() != null)
                        evts.addAll(res.events());
                } else
                    U.warn(log,
                            "Received duplicate response (ignoring) [nodeId=" + nodeId + ", msg=" + res + ']');

                if (res.exception() != null)
                    err.set(res.exception());

                if (uids.isEmpty() || err.get() != null)
                    qryMux.notifyAll();
            }
        }
    };

    String resTopic = TOPIC_EVENT.name(UUID.randomUUID());

    try {
        addLocalEventListener(evtLsnr, new int[] { EVT_NODE_LEFT, EVT_NODE_FAILED });

        ioMgr.addMessageListener(resTopic, resLsnr);

        GridByteArrayList serFilter = U.marshal(ctx.config().getMarshaller(), p);

        GridDeployment dep = ctx.deploy().deploy(p.getClass(), U.detectClassLoader(p.getClass()));

        if (dep == null)
            throw new GridDeploymentException("Failed to deploy event filter: " + p);

        Serializable msg = new GridEventStorageMessage(resTopic, serFilter, p.getClass().getName(),
                dep.classLoaderId(), dep.deployMode(), dep.sequenceNumber(), dep.userVersion(),
                dep.participants());

        ioMgr.send(nodes, TOPIC_EVENT, msg, PUBLIC_POOL);

        if (timeout == 0)
            timeout = Long.MAX_VALUE;

        long now = System.currentTimeMillis();

        // Account for overflow of long value.
        long endTime = now + timeout <= 0 ? Long.MAX_VALUE : now + timeout;

        long delta = timeout;

        Collection<UUID> uidsCp = null;

        synchronized (qryMux) {
            try {
                while (!uids.isEmpty() && err.get() == null && delta > 0) {
                    qryMux.wait(delta);

                    delta = endTime - System.currentTimeMillis();
                }
            } catch (InterruptedException e) {
                throw new GridException("Got interrupted while waiting for event query responses.", e);
            }

            if (err.get() != null)
                throw new GridException("Failed to query events due to exception on remote node.", err.get());

            if (!uids.isEmpty())
                uidsCp = new LinkedList<UUID>(uids);
        }

        // Outside of synchronization.
        if (uidsCp != null) {
            for (Iterator<UUID> iter = uidsCp.iterator(); iter.hasNext();)
                // Ignore nodes that have left the grid.
                if (ctx.discovery().node(iter.next()) == null)
                    iter.remove();

            if (!uidsCp.isEmpty())
                throw new GridException(
                        "Failed to receive event query response from following nodes: " + uidsCp);
        }
    } finally {
        ioMgr.removeMessageListener(resTopic, resLsnr);

        removeLocalEventListener(evtLsnr);
    }

    return evts;
}

From source file:org.apache.hadoop.hbase.master.HMaster.java

/**
 * Return the region and current deployment for the region containing
 * the given row. If the region cannot be found, returns null. If it
 * is found, but not currently deployed, the second element of the pair
 * may be null.//from   w ww .  j  a  v a2  s  .  c  o m
 */
Pair<HRegionInfo, ServerName> getTableRegionForRow(final TableName tableName, final byte[] rowKey)
        throws IOException {
    final AtomicReference<Pair<HRegionInfo, ServerName>> result = new AtomicReference<Pair<HRegionInfo, ServerName>>(
            null);

    MetaScannerVisitor visitor = new MetaScannerVisitorBase() {
        @Override
        public boolean processRow(Result data) throws IOException {
            if (data == null || data.size() <= 0) {
                return true;
            }
            Pair<HRegionInfo, ServerName> pair = HRegionInfo.getHRegionInfoAndServerName(data);
            if (pair == null) {
                return false;
            }
            if (!pair.getFirst().getTable().equals(tableName)) {
                return false;
            }
            result.set(pair);
            return true;
        }
    };

    MetaScanner.metaScan(conf, visitor, tableName, rowKey, 1);
    return result.get();
}

From source file:org.geoserver.wms.map.PDFGetMapTest.java

/**
 * Returns the last tiling pattern found during a render of the PDF document. Can be used to extract
 * one tiling pattern that gets actually used to render shapes (meant to be used against a document
 * that only has a single tiling pattern)
 * /*from  w w  w .  j  a va  2  s  .c o  m*/
 * @param pdfDocument
 * @return
 * @throws InvalidPasswordException
 * @throws IOException
 */
PDTilingPattern getTilingPattern(byte[] pdfDocument) throws InvalidPasswordException, IOException {
    // load the document using PDFBOX (iText is no good for parsing tiling patterns, mostly works
    // well for text and image extraction, spent a few hours trying to use it with no results)
    PDDocument doc = PDDocument.load(pdfDocument);
    PDPage page = doc.getPage(0);

    // use a graphics stream engine, it's the only thing I could find that parses the PDF
    // deep enough to allow catching the tiling pattern in parsed form 
    AtomicReference<PDTilingPattern> pattern = new AtomicReference<>();
    PDFStreamEngine engine = new PDFGraphicsStreamEngine(page) {

        @Override
        public void strokePath() throws IOException {
        }

        @Override
        public void shadingFill(COSName shadingName) throws IOException {
        }

        @Override
        public void moveTo(float x, float y) throws IOException {
        }

        @Override
        public void lineTo(float x, float y) throws IOException {
        }

        @Override
        public Point2D getCurrentPoint() throws IOException {
            return null;
        }

        @Override
        public void fillPath(int windingRule) throws IOException {
        }

        @Override
        public void fillAndStrokePath(int windingRule) throws IOException {
        }

        @Override
        public void endPath() throws IOException {
        }

        @Override
        public void drawImage(PDImage pdImage) throws IOException {
        }

        @Override
        public void curveTo(float x1, float y1, float x2, float y2, float x3, float y3) throws IOException {
        }

        @Override
        public void closePath() throws IOException {
        }

        @Override
        public void clip(int windingRule) throws IOException {
        }

        @Override
        public void appendRectangle(Point2D p0, Point2D p1, Point2D p2, Point2D p3) throws IOException {
        }
    };

    // setup the tiling pattern trap
    engine.addOperator(new SetNonStrokingColorN() {

        @Override
        public void process(Operator operator, List<COSBase> arguments) throws IOException {
            super.process(operator, arguments);

            PDColor color = context.getGraphicsState().getNonStrokingColor();
            if (context.getGraphicsState().getNonStrokingColorSpace() instanceof PDPattern) {
                PDPattern colorSpace = (PDPattern) context.getGraphicsState().getNonStrokingColorSpace();
                PDAbstractPattern ap = colorSpace.getPattern(color);
                if (ap instanceof PDTilingPattern) {
                    pattern.set((PDTilingPattern) ap);
                }
            }
        }
    });
    // run it
    engine.processPage(page);

    return pattern.get();
}

From source file:no.barentswatch.fiskinfo.BaseActivity.java

/**
 * Sends a request to BarentsWatch for the given service, which returns a
 * JSONArray on success.//from w w  w.ja v  a2  s  .  c  o  m
 * 
 * @param service
 *            The service to call in the API.
 * @return A JSONArray containing the response from BarentsWatch if the
 *         request succeeded, null otherwise.
 */
public JSONArray authenticatedGetRequestToBarentswatchAPIService(final String service) {
    if (!userIsAuthenticated) {
        Log.e("FiskInfo",
                "This should never happen. User must be logged in before we fetch the users geodata subs");
        return null;
    }

    final AtomicReference<String> responseAsString = new AtomicReference<String>();
    Thread thread = new Thread(new Runnable() {
        @Override
        public void run() {
            try {
                CloseableHttpClient httpclient = HttpClients.createDefault();
                try {
                    String base_url = "https://www.barentswatch.no/api/v1/" + service;
                    List<NameValuePair> getParameters = new ArrayList<NameValuePair>(1);
                    getParameters
                            .add(new BasicNameValuePair("access_token", storedToken.getString("access_token")));
                    String paramsString = URLEncodedUtils.format(getParameters, "UTF-8");

                    HttpGet httpGet = new HttpGet(base_url + "?" + paramsString);
                    httpGet.addHeader(HTTP.CONTENT_TYPE, "application/json");

                    CloseableHttpResponse response = httpclient.execute(httpGet);
                    try {
                        responseAsString.set(EntityUtils.toString(response.getEntity()));
                    } finally {
                        response.close();
                    }

                } finally {
                    httpclient.close();
                }
            } catch (Exception e) {
                e.printStackTrace();
            }
        }
    });

    thread.start();
    try {
        thread.join();
    } catch (InterruptedException e) {
        e.printStackTrace();
    }
    String barentswatchResponse = responseAsString.get();
    if (barentswatchResponse == null || barentswatchResponse.trim().length() == 0) {
        return null;
    }
    try {
        JSONArray barentswatchAPIJSONResponse = new JSONArray(barentswatchResponse);
        return barentswatchAPIJSONResponse;
    } catch (JSONException e) {
        e.printStackTrace();
    }
    return null;
}