Example usage for javax.management OperationsException OperationsException

List of usage examples for javax.management OperationsException OperationsException

Introduction

In this page you can find the example usage for javax.management OperationsException OperationsException.

Prototype

public OperationsException(String message) 

Source Link

Document

Constructor that allows a specific error message to be specified.

Usage

From source file:com.microsoft.azure.management.datalake.store.uploader.DataLakeStoreUploader.java

/**
 * Validates that the metadata is valid for a resume operation, and also updates the internal Segment States to match what the Server looks like.
 * If any changes are made, the metadata will be saved to its canonical location.
 *
 * @param metadata The {@link UploadMetadata} to resume the upload from.
 * @throws Exception/*from   w ww  .  j a va2  s . c  o m*/
 */
private void validateMetadataForResume(UploadMetadata metadata) throws Exception {
    validateMetadataMatchesLocalFile(metadata);

    //verify that the target stream does not already exist (in case we don't want to overwrite)
    if (!this.getParameters().isOverwrite() && frontEnd.streamExists(metadata.getTargetStreamPath())) {
        throw new OperationsException("Target Stream already exists");
    }

    //make sure we don't upload part of the file as binary, while the rest is non-binary (that's just asking for trouble)
    if (this.getParameters().isBinary() != metadata.isBinary()) {
        throw new OperationsException(MessageFormat.format(
                "Existing metadata was created for a {0}binary file while the current parameters requested a {1}binary upload.",
                metadata.isBinary() ? "" : "non-", this.getParameters().isBinary() ? "" : "non-"));
    }

    //see what files(segments) already exist - update metadata accordingly (only for segments that are missing from server; if it's on the server but not in metadata, reupload)
    for (UploadSegmentMetadata segment : metadata.getSegments()) {
        if (segment.getStatus() == SegmentUploadStatus.Complete) {
            int retryCount = 0;
            while (retryCount < SingleSegmentUploader.MAX_BUFFER_UPLOAD_ATTEMPT_COUNT) {
                retryCount++;
                try {
                    //verify that the stream exists and that the length is as expected
                    if (!frontEnd.streamExists(segment.getPath())) {
                        // this segment was marked as completed, but no target stream exists; it needs to be reuploaded
                        segment.setStatus(SegmentUploadStatus.Pending);
                    } else {
                        long remoteLength = frontEnd.getStreamLength(segment.getPath());
                        if (remoteLength != segment.getLength()) {
                            //the target stream has a different length than the input segment, which implies they are inconsistent; it needs to be reuploaded
                            segment.setStatus(SegmentUploadStatus.Pending);
                        }
                    }

                    break;
                } catch (Exception e) {
                    if (retryCount >= SingleSegmentUploader.MAX_BUFFER_UPLOAD_ATTEMPT_COUNT) {
                        throw new UploadFailedException(MessageFormat.format(
                                "Cannot validate metadata in order to resume due to the following exception retrieving file information: {0}",
                                e));
                    }

                    SingleSegmentUploader.waitForRetry(retryCount,
                            parameters.isUseSegmentBlockBackOffRetryStrategy());
                }
            }
        } else {
            //anything which is not in 'Completed' status needs to be reuploaded
            segment.setStatus(SegmentUploadStatus.Pending);
        }
    }
    metadata.save();
}

From source file:com.microsoft.azure.management.datalake.store.uploader.DataLakeStoreUploader.java

/**
 * Verifies that the metadata is valid for a fresh upload.
 *
 * @param metadata {@link UploadMetadata} to validate for a fresh upload.
 * @throws Exception/*  w w  w . j  ava 2  s  .com*/
 */
private void validateMetadataForFreshUpload(UploadMetadata metadata) throws Exception {
    validateMetadataMatchesLocalFile(metadata);

    //verify that the target stream does not already exist (in case we don't want to overwrite)
    if (!this.getParameters().isOverwrite() && frontEnd.streamExists(metadata.getTargetStreamPath())) {
        throw new OperationsException("Target Stream already exists");
    }
}

From source file:com.microsoft.azure.management.datalake.store.uploader.DataLakeStoreUploader.java

/**
 * Verifies that the metadata is consistent with the local file information.
 *
 * @param metadata The {@link UploadMetadata} to check against a serialized copy.
 * @throws OperationsException/*from  w  ww  . j  av  a 2s . co m*/
 */
private void validateMetadataMatchesLocalFile(UploadMetadata metadata) throws OperationsException {
    if (!metadata.getTargetStreamPath().trim()
            .equalsIgnoreCase(this.getParameters().getTargetStreamPath().trim())) {
        throw new OperationsException("Metadata points to a different target stream than the input parameters");
    }

    //verify that it matches against local file (size, name)
    File metadataInputFileInfo = new File(metadata.getInputFilePath());
    File paramInputFileInfo = new File(this.getParameters().getInputFilePath());

    if (!paramInputFileInfo.toString().toLowerCase().equals(metadataInputFileInfo.toString().toLowerCase())) {
        throw new OperationsException("The metadata refers to different file than the one requested");
    }

    if (!metadataInputFileInfo.exists()) {
        throw new OperationsException("The metadata refers to a file that does not exist");
    }

    if (metadata.getFileLength() != metadataInputFileInfo.length()) {
        throw new OperationsException("The metadata's file information differs from the actual file");
    }
}

From source file:com.microsoft.azure.management.datalake.store.uploader.DataLakeStoreUploader.java

/**
 * Concatenates all the segments defined in the metadata into a single stream.
 *
 * @param metadata The {@link UploadMetadata} to determine the segments to concatenate
 * @throws Exception//from   www.  jav  a2  s  .  c o  m
 */
private void concatenateSegments(final UploadMetadata metadata) throws Exception {
    final String[] inputPaths = new String[metadata.getSegmentCount()];

    //verify if target stream exists
    if (frontEnd.streamExists(metadata.getTargetStreamPath())) {
        if (this.getParameters().isOverwrite()) {
            frontEnd.deleteStream(metadata.getTargetStreamPath(), false);
        } else {
            throw new OperationsException("Target Stream already exists");
        }
    }

    //ensure all input streams exist and are of the expected length
    //ensure all segments in the metadata are marked as 'complete'
    final List<Exception> exceptions = new ArrayList<>();
    ExecutorService exec = Executors.newFixedThreadPool(this.getParameters().getThreadCount());
    for (int i = 0; i < metadata.getSegmentCount(); i++) {
        final int finalI = i;
        exec.submit(new Runnable() {
            @Override
            public void run() {
                try {
                    if (metadata.getSegments()[finalI].getStatus() != SegmentUploadStatus.Complete) {
                        throw new UploadFailedException(
                                "Cannot perform 'concatenate' operation because not all streams are fully uploaded.");
                    }

                    String remoteStreamPath = metadata.getSegments()[finalI].getPath();
                    int retryCount = 0;
                    long remoteLength = -1;

                    while (retryCount < SingleSegmentUploader.MAX_BUFFER_UPLOAD_ATTEMPT_COUNT) {
                        retryCount++;
                        try {
                            remoteLength = frontEnd.getStreamLength(remoteStreamPath);
                            break;
                        } catch (Exception e) {
                            if (retryCount >= SingleSegmentUploader.MAX_BUFFER_UPLOAD_ATTEMPT_COUNT) {
                                throw new UploadFailedException(MessageFormat.format(
                                        "Cannot perform 'concatenate' operation due to the following exception retrieving file information: {0}",
                                        e));
                            }

                            SingleSegmentUploader.waitForRetry(retryCount,
                                    parameters.isUseSegmentBlockBackOffRetryStrategy());
                        }
                    }

                    if (remoteLength != metadata.getSegments()[finalI].getLength()) {
                        throw new UploadFailedException(MessageFormat.format(
                                "Cannot perform 'concatenate' operation because segment {0} has an incorrect length (expected {1}, actual {2}).",
                                finalI, metadata.getSegments()[finalI].getLength(), remoteLength));
                    }

                    inputPaths[finalI] = remoteStreamPath;

                } catch (Exception ex) {
                    //collect any exceptions, whether we just generated them above or whether they come from the Front End,
                    synchronized (exceptions) {
                        exceptions.add(ex);
                    }
                }
            }
        });
    }

    exec.shutdown();

    try {
        exec.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); // waits ~292 years for completion or interruption.
    } catch (InterruptedException e) {
        // add the exception since it will indicate that it was cancelled.
        exceptions.add(e);
    }

    if (exceptions.size() > 0) {
        throw new AggregateUploadException("At least one concatenate test failed", exceptions.remove(0),
                exceptions);
    }

    //issue the command
    frontEnd.concatenate(metadata.getTargetStreamPath(), inputPaths);
}

From source file:org.apache.qpid.server.jmx.mbeans.QueueMBean.java

public TabularData viewMessages(long startPosition, long endPosition) throws IOException, JMException {
    if ((startPosition > endPosition) || (startPosition < 1)) {
        throw new OperationsException("From Index = " + startPosition + ", To Index = " + endPosition
                + "\n\"From Index\" should be greater than 0 and less than \"To Index\"");
    }/*from   www . j a  va  2 s.c  o m*/

    if ((endPosition - startPosition) > Integer.MAX_VALUE) {
        throw new OperationsException(
                "Specified MessageID interval is too large. Intervals must be less than 2^31 in size");
    }

    List<QueueEntry> messages = getMessages(startPosition, endPosition);

    TabularDataSupport messageTable = new TabularDataSupport(MSG_LIST_DATA_TYPE);

    // Create the tabular list of message header contents
    long position = startPosition;

    for (QueueEntry queueEntry : messages) {
        ServerMessage serverMsg = queueEntry.getMessage();
        AMQMessageHeader header = serverMsg.getMessageHeader();
        String[] headerAttributes = { "reply-to = " + header.getReplyTo(), "propertyFlags = ",
                "ApplicationID = " + header.getAppId(), "ClusterID = ", "UserId = " + header.getUserId(),
                "JMSMessageID = " + header.getMessageId(), "JMSCorrelationID = " + header.getCorrelationId(),
                "JMSDeliveryMode = " + (serverMsg.isPersistent() ? "Persistent" : "Non_Persistent"),
                "JMSPriority = " + header.getPriority(), "JMSType = " + header.getType(),
                "JMSExpiration = " + (header.getExpiration() == 0 ? null
                        : FAST_DATE_FORMAT.format(header.getExpiration())),
                "JMSTimestamp = " + (header.getTimestamp() == 0 ? null
                        : FAST_DATE_FORMAT.format(header.getTimestamp())) };

        Object[] itemValues = new Object[] { serverMsg.getMessageNumber(), headerAttributes,
                serverMsg.getSize(), queueEntry.isRedelivered(), position, queueEntry.getDeliveryCount() };

        position++;

        CompositeData messageData = new CompositeDataSupport(MSG_DATA_TYPE,
                VIEW_MSGS_COMPOSITE_ITEM_NAMES_DESC_ARRAY, itemValues);
        messageTable.put(messageData);
    }

    return messageTable;

}

From source file:org.apache.qpid.server.jmx.mbeans.QueueMBean.java

public CompositeData viewMessageContent(long messageId) throws IOException, JMException {
    QueueEntry entry = getMessage(messageId);
    if (entry == null) {
        throw new OperationsException(
                "AMQMessage with message id = " + messageId + " is not in the " + _queue.getName());
    }/*  ww  w .  j ava 2s.  co  m*/

    ServerMessage serverMsg = entry.getMessage();
    final int bodySize = (int) serverMsg.getSize();

    byte[] msgContent = new byte[bodySize];

    ByteBuffer buf = ByteBuffer.wrap(msgContent);
    int stored = serverMsg.getContent(buf, 0);

    if (bodySize != stored) {
        LOGGER.error(String.format(
                "An unexpected amount of content was retrieved "
                        + "(expected %d, got %d bytes) when viewing content for message with ID %d "
                        + "on queue '%s' in virtual host '%s'",
                bodySize, stored, messageId, _queue.getName(), _vhostMBean.getName()));
    }

    AMQMessageHeader header = serverMsg.getMessageHeader();

    String mimeType = null, encoding = null;
    if (header != null) {
        mimeType = header.getMimeType();

        encoding = header.getEncoding();
    }

    Object[] itemValues = { messageId, mimeType, encoding, msgContent };

    return new CompositeDataSupport(MSG_CONTENT_TYPE, VIEW_MSG_COMPOSIT_ITEM_NAMES_ARRAY, itemValues);

}

From source file:org.apache.qpid.server.jmx.mbeans.QueueMBean.java

public void moveMessages(final long fromMessageId, final long toMessageId, String toQueue)
        throws IOException, JMException {
    if ((fromMessageId > toMessageId) || (fromMessageId < 1)) {
        throw new OperationsException(
                "\"From MessageId\" should be greater than 0 and less than \"To MessageId\"");
    }//from   w  w w .j  a  va 2 s.  co m

    VirtualHost vhost = _queue.getParent(VirtualHost.class);
    final Queue destinationQueue = MBeanUtils.findQueueFromQueueName(vhost, toQueue);

    vhost.executeTransaction(new VirtualHost.TransactionalOperation() {
        public void withinTransaction(final VirtualHost.Transaction txn) {
            _queue.visit(new QueueEntryVisitor() {

                public boolean visit(final QueueEntry entry) {
                    final ServerMessage message = entry.getMessage();
                    if (message != null) {
                        final long messageId = message.getMessageNumber();

                        if ((messageId >= fromMessageId) && (messageId <= toMessageId)) {
                            txn.move(entry, destinationQueue);
                        }

                    }
                    return false;
                }
            });
        }
    });
}

From source file:org.apache.qpid.server.jmx.mbeans.QueueMBean.java

public void copyMessages(final long fromMessageId, final long toMessageId, String toQueue)
        throws IOException, JMException {
    if ((fromMessageId > toMessageId) || (fromMessageId < 1)) {
        throw new OperationsException(
                "\"From MessageId\" should be greater than 0 and less than \"To MessageId\"");
    }// w w w.j a  va2  s . c o m

    VirtualHost vhost = _queue.getParent(VirtualHost.class);
    final Queue destinationQueue = MBeanUtils.findQueueFromQueueName(vhost, toQueue);

    vhost.executeTransaction(new VirtualHost.TransactionalOperation() {
        public void withinTransaction(final VirtualHost.Transaction txn) {
            _queue.visit(new QueueEntryVisitor() {

                public boolean visit(final QueueEntry entry) {
                    final ServerMessage message = entry.getMessage();
                    if (message != null) {
                        final long messageId = message.getMessageNumber();

                        if ((messageId >= fromMessageId) && (messageId <= toMessageId)) {
                            txn.copy(entry, destinationQueue);
                        }

                    }
                    return false;
                }
            });
        }
    });
}

From source file:org.moe.cli.manager.SourceCocoaPodsManager.java

public IExecutor processCocoapods(String source, SpecObject spec, String packageName, String[] javaSource,
        String outputJar) throws IOException, OperationsException, InterruptedException, WrapNatJGenException {
    generatePodFile(spec);//w w  w . jav  a  2s. com
    extractXCodeProject();
    String buildExitMessage = buildPods(spec);

    if (buildExitMessage != null) {
        throw new OperationsException(buildExitMessage);
    }
    List<File> dependenciesFramework = new ArrayList<>();

    List<String> exceptList = new ArrayList<>();
    exceptList.add(spec.getName());
    List<SpecObject> allUsedSpecs = getAllDepSpecs(spec, exceptList);
    allUsedSpecs.add(spec);

    for (SpecObject key : allUsedSpecs) {
        //for iphonesimulator
        File simDep = new File(JPod.getAbsolutePath(), "/build/Release-iphonesimulator/" + key.getName());
        File[] simFiles = simDep.listFiles();
        if (simFiles != null) {
            String nameRegexp = key.getName().replace("-", ".") + ".framework";
            for (File file : simFiles) {
                if (file.getName().matches(nameRegexp)) {
                    dependenciesFramework.add(file);
                }
            }
        }

        //for iphoneos
        File devDep = new File(JPod.getAbsolutePath(), "/build/Release-iphoneos/" + key.getName());
        File[] devFiles = devDep.listFiles();
        if (devFiles != null) {
            String nameRegexp = key.getName().replace("-", ".") + ".framework";
            for (File file : devFiles) {
                if (file.getName().matches(nameRegexp)) {
                    dependenciesFramework.add(file);
                }
            }
        }
    }

    IExecutor executor = null;
    if (dependenciesFramework.size() > 0) {

        Set<String> headerContent = new HashSet<String>();
        for (File fr : dependenciesFramework) {
            File headers = new File(fr, "Headers");
            if (headers.exists()) {
                headerContent.add(headers.getPath());
            }
        }

        String ldFlags = spec.getLdFlags();

        //find all bundles
        File destination = new File(JPod.getAbsolutePath(), String.format("/Pods/%s", spec.getName()));
        Set<String> bundleContent = new HashSet<String>();
        List<String> resources = spec.getResources();
        if (resources != null && resources.size() > 0) {
            for (String bundle : resources) {
                Set<String> bundleWildCard = getBundleResources(bundle, destination);
                bundleContent.addAll(bundleWildCard);
            }
        }

        Set<String> frameworkParam = new HashSet<String>();
        for (File fr : dependenciesFramework) {
            frameworkParam.add(fr.getAbsolutePath());

            int frameworkNameIdx = fr.getName().lastIndexOf(".");
            if (frameworkNameIdx >= 0) {
                ldFlags = ldFlags + "-framework " + fr.getName().substring(0, frameworkNameIdx) + ";";
            }
        }

        executor = new ThirdPartyFrameworkLinkExecutor(packageName, frameworkParam.toArray(new String[0]),
                javaSource, headerContent.toArray(new String[0]), bundleContent.toArray(new String[0]),
                outputJar, ldFlags);
    }

    return executor;
}