Example usage for java.lang Throwable Throwable

List of usage examples for java.lang Throwable Throwable

Introduction

In this page you can find the example usage for java.lang Throwable Throwable.

Prototype

public Throwable(Throwable cause) 

Source Link

Document

Constructs a new throwable with the specified cause and a detail message of (cause==null ?

Usage

From source file:org.apache.nifi.web.StandardNiFiServiceFacade.java

@Override
public NodeDTO updateNode(NodeDTO nodeDTO) {
    final NiFiUser user = NiFiUserUtils.getNiFiUser();
    if (user == null) {
        throw new WebApplicationException(new Throwable("Unable to access details for current user."));
    }/* w  w w  . j  a v a2 s.c  o  m*/
    final String userDn = user.getIdentity();

    if (Node.Status.CONNECTING.name().equalsIgnoreCase(nodeDTO.getStatus())) {
        clusterManager.requestReconnection(nodeDTO.getNodeId(), userDn);
    } else if (Node.Status.DISCONNECTING.name().equalsIgnoreCase(nodeDTO.getStatus())) {
        clusterManager.requestDisconnection(nodeDTO.getNodeId(), userDn);
    } else {
        // handle primary
        final Boolean primary = nodeDTO.isPrimary();
        if (primary != null && primary) {
            clusterManager.setPrimaryNode(nodeDTO.getNodeId(), userDn);
        }
    }

    final String nodeId = nodeDTO.getNodeId();
    return dtoFactory.createNodeDTO(clusterManager.getNode(nodeId), clusterManager.getNodeEvents(nodeId),
            isPrimaryNode(nodeId));
}

From source file:com.krawler.esp.servlets.importICSServlet.java

private static boolean getICalFileFromURL(File file, String url, boolean deleteOlderAndCreateNew)
        throws ServiceException {
    boolean success = false;
    InputStream is = null;/*from  w w w  .  java2  s.  co  m*/
    try {
        URL u = new URL(url);
        URLConnection uc = u.openConnection();
        is = uc.getInputStream();
        if (uc.getContentType().contains("text/calendar")) {
            if (deleteOlderAndCreateNew) {
                file.delete(); // delete the file in store as it is an older one
            }
            file.createNewFile();
            FileOutputStream fop = new FileOutputStream(file);
            byte[] b = new byte[4096];
            int count = 0;
            while ((count = is.read(b)) >= 0) {
                fop.write(b, 0, count);
            }
            fop.close();
            closeInputStream(is);
            success = true;
        } else {
            closeInputStream(is);
            throw ServiceException.FAILURE("Given calendar URL is not a valid internet calendar.",
                    new Throwable(url));
        }
    } catch (MalformedURLException ex) {
        throw ServiceException.FAILURE(KWLErrorMsgs.calURLEx, ex);
    } catch (FileNotFoundException ex) {
        throw ServiceException.FAILURE(KWLErrorMsgs.calFileEx, ex);
    } catch (IOException ex) {
        closeInputStream(is);
        throw ServiceException.FAILURE(KWLErrorMsgs.calIOEx, ex);
    } catch (Exception ex) {
        closeInputStream(is);
        throw ServiceException.FAILURE(KWLErrorMsgs.calIOEx, ex);
    }
    return success;
}

From source file:org.apache.jmeter.util.JMeterUtils.java

/**
 * Report an error through a dialog box.
 *
 * @param errorMsg - the error message.//from  ww  w.  j a va  2  s  . c  om
 * @param titleMsg - title string
 */
public static void reportErrorToUser(String errorMsg, String titleMsg) {
    if (errorMsg == null) {
        errorMsg = "Unknown error - see log file";
        log.warn("Unknown error", new Throwable("errorMsg == null"));
    }
    GuiPackage instance = GuiPackage.getInstance();
    if (instance == null) {
        System.out.println(errorMsg);
        return; // Done
    }
    try {
        JOptionPane.showMessageDialog(instance.getMainFrame(), errorMsg, titleMsg, JOptionPane.ERROR_MESSAGE);
    } catch (HeadlessException e) {
        log.warn("reportErrorToUser(\"" + errorMsg + "\") caused", e);
    }
}

From source file:alma.acs.nc.NCSubscriber.java

/**
 * This method manages the filtering capabilities used to control subscriptions.
 * <p>//from   w  ww  .  j  av a2  s  .c o m
 * A constraint evaluates to true when both of the following conditions are true:
 *   A member of the constraint's EventTypeSeq matches the message's event type.
 *   The constraint expression evaluates to true.
 * 
 * @return FilterID (see OMG NotificationService spec 3.2.4.1)
 * @throws AcsJCORBAProblemEx
 */
protected int addFilter(String eventTypeName) throws AcsJCORBAProblemEx {

    try {
        // Create the filter
        FilterFactory filterFactory = channel.default_filter_factory();
        Filter filter = filterFactory.create_filter(getFilterLanguage());

        // Information needed to construct the constraint expression object
        // (any domain, THE event type)
        // Note that TAO will internally convert the event type name 
        // to the expression "$type_name=='<our_eventTypeName>'", 
        // see orbsvcs/Notify/Notify_Constraint_Interpreter.cpp
        EventType[] t_info = { new EventType("*", eventTypeName) }; // The old Consumer class used 'getChannelDomain()' instead of "*"..?

        // Add constraint expression object to the filter
        String constraint_expr = ""; // no constraints other than the eventTypeName already given above
        ConstraintExp[] cexp = { new ConstraintExp(t_info, constraint_expr) };
        filter.add_constraints(cexp);

        // Add the filter to the proxy and return the filter ID
        int filterId = proxySupplier.add_filter(filter);

        if (logger.isLoggable(AcsLogLevel.DELOUSE)) {
            NcFilterInspector insp = new NcFilterInspector(proxySupplier,
                    channelName + "::" + clientName + "::ProxySupplier", logger);
            logger.log(AcsLogLevel.DELOUSE,
                    "Added filter for '" + eventTypeName + "'. Current " + insp.getFilterInfo());

            //            NcFilterInspector insp2 = new NcFilterInspector(
            //                  sharedConsumerAdmin, channelName + "::" + clientName + "::Admin", logger);
            //            logger.log(AcsLogLevel.DEBUG, "Admin filters: " + insp2.getFilterInfo());
        }
        return filterId;

    } catch (org.omg.CosNotifyFilter.InvalidGrammar e) {
        Throwable cause = new Throwable("'" + eventTypeName + "' filter is invalid for the '" + channelName
                + "' channel: " + e.getMessage());
        throw new alma.ACSErrTypeCommon.wrappers.AcsJCORBAProblemEx(cause);
    } catch (org.omg.CosNotifyFilter.InvalidConstraint e) {
        Throwable cause = new Throwable("'" + eventTypeName + "' filter is invalid for the '" + channelName
                + "' channel: " + e.getMessage());
        throw new alma.ACSErrTypeCommon.wrappers.AcsJCORBAProblemEx(cause);
    }

}

From source file:com.github.caldav4j.CalDAVCollection.java

/**
 * Uses the HTTP HEAD Method to check if the connection is possible.
 * @param httpClient HTTPClient to make the request
 * @return StatusCode/*from   w ww . j a  va 2  s .com*/
 * @throws CalDAV4JException when Status is not {@link CalDAVStatus#SC_OK}
 */
public int testConnection(HttpClient httpClient) throws CalDAV4JException {
    HttpHead method = new HttpHead(getCalendarCollectionRoot());

    HttpResponse response = null;
    try {
        response = httpClient.execute(getDefaultHttpHost(method.getURI()), method);
    } catch (Exception e) {
        throw new CalDAV4JException(e.getMessage(), new Throwable(e.getCause()));
    }

    switch (response.getStatusLine().getStatusCode()) {
    case CalDAVStatus.SC_OK:
        break;
    default:
        throw new BadStatusException(response.getStatusLine().getStatusCode(), method.getMethod(),
                getCalendarCollectionRoot());
    }
    return response.getStatusLine().getStatusCode();
}

From source file:com.vmware.photon.controller.deployer.xenon.workflow.DeploymentWorkflowService.java

private void migrateData(State currentState, List<VmService.State> managementVms,
        final String destinationProtocol) {
    Collection<DeploymentMigrationInformation> migrationInformation = HostUtils.getDeployerContext(this)
            .getDeploymentMigrationInformation();

    final AtomicInteger latch = new AtomicInteger(migrationInformation.size());
    final List<Throwable> errors = new BlockingArrayQueue<>();

    Set<InetSocketAddress> sourceServers = new HashSet<>();
    Set<InetSocketAddress> destinationServers = new HashSet<>();

    for (DeploymentMigrationInformation entry : migrationInformation) {
        if (sourceServers.size() == 0) {
            sourceServers.add(new InetSocketAddress(getHost().getPreferredAddress(), getHost().getPort()));
            for (VmService.State vm : managementVms) {
                destinationServers.add(new InetSocketAddress(vm.ipAddress, vm.deployerXenonPort));
            }/*from  w  w  w  .j  a v  a  2  s.  c om*/
        }

        String factory = entry.factoryServicePath;
        if (!factory.endsWith("/")) {
            factory += "/";
        }

        CopyStateTaskService.State startState = new CopyStateTaskService.State();
        startState.sourceURIs = Collections.singletonList(getHost().getUri());
        startState.sourceFactoryLink = factory;
        startState.destinationURI = UriUtils.buildUri(destinationProtocol, managementVms.get(0).ipAddress,
                managementVms.get(0).deployerXenonPort, null, null);
        startState.destinationFactoryLink = factory;

        TaskUtils.startTaskAsync(this, CopyStateTaskFactoryService.SELF_LINK, startState,
                state -> TaskUtils.finalTaskStages.contains(state.taskState.stage),
                CopyStateTaskService.State.class, currentState.taskPollDelay,
                new FutureCallback<CopyStateTaskService.State>() {

                    @Override
                    public void onSuccess(@Nullable CopyStateTaskService.State result) {
                        switch (result.taskState.stage) {
                        case FINISHED:
                            break;
                        case FAILED:
                        case CANCELLED:
                            errors.add(new Throwable("service: " + result.documentSelfLink + " did not finish. "
                                    + result.taskState.failure.message));
                            break;
                        default:
                            errors.add(new Throwable("service: " + result.documentSelfLink
                                    + " ended in unexpected stage " + result.taskState.stage.name()));
                            break;
                        }

                        if (latch.decrementAndGet() == 0) {
                            if (!errors.isEmpty()) {
                                failTask(errors);
                            } else {
                                updateDeploymentServiceState(destinationServers, currentState,
                                        destinationProtocol);
                            }
                        }
                    }

                    @Override
                    public void onFailure(Throwable t) {
                        errors.add(t);
                        if (latch.decrementAndGet() == 0) {
                            failTask(errors);
                        }
                    }
                });
    }
}

From source file:com.eryansky.common.utils.SysUtils.java

public static String fillzero(String str, int len) {
     if (str == null) {
         return "";
     }//from w  ww  .j a  v  a  2s.c  o m
     if (str.length() > len) {
         Throwable throwable = new Throwable("?,!");
         throwable.printStackTrace();
     }

     while (str.length() < len) {
         str = "0" + str;
     }
     return str;
 }

From source file:org.apache.nifi.controller.repository.StandardProcessSession.java

private void rollback(final boolean penalize, final boolean rollbackCheckpoint) {
    if (LOG.isDebugEnabled()) {
        LOG.debug("{} session rollback called, FlowFile records are {} {}", this, loggableFlowfileInfo(),
                new Throwable("Stack Trace on rollback"));
    }/* www . jav  a  2s.co  m*/

    deleteOnCommit.clear();

    final Map<FlowFile, InputStream> openStreamCopy = new HashMap<>(openInputStreams); // avoid ConcurrentModificationException by creating a copy of the List
    for (final Map.Entry<FlowFile, InputStream> entry : openStreamCopy.entrySet()) {
        final FlowFile flowFile = entry.getKey();
        final InputStream openStream = entry.getValue();

        LOG.debug("{} closing {} for {} due to session rollback", this, openStream, flowFile);
        try {
            openStream.close();
        } catch (final Exception e) {
            LOG.warn("{} Attempted to close {} for {} due to session rollback but close failed", this,
                    openStream, this.connectableDescription);
            LOG.warn("", e);
        }
    }

    try {
        claimCache.reset();
    } catch (IOException e1) {
        LOG.warn("{} Attempted to close Output Stream for {} due to session rollback but close failed", this,
                this.connectableDescription, e1);
    }

    final Set<StandardRepositoryRecord> recordsToHandle = new HashSet<>();
    recordsToHandle.addAll(records.values());
    if (rollbackCheckpoint) {
        final Checkpoint existingCheckpoint = this.checkpoint;
        this.checkpoint = null;
        if (existingCheckpoint != null && existingCheckpoint.records != null) {
            recordsToHandle.addAll(existingCheckpoint.records.values());
        }
    }

    resetWriteClaims();
    resetReadClaim();

    if (recordsToHandle.isEmpty()) {
        LOG.trace("{} was rolled back, but no events were performed by this ProcessSession", this);
        acknowledgeRecords();
        resetState();
        return;
    }

    for (final StandardRepositoryRecord record : recordsToHandle) {
        // remove the working claims if they are different than the originals.
        removeTemporaryClaim(record);
    }

    final Set<RepositoryRecord> abortedRecords = new HashSet<>();
    final Set<StandardRepositoryRecord> transferRecords = new HashSet<>();
    for (final StandardRepositoryRecord record : recordsToHandle) {
        if (record.isMarkedForAbort()) {
            decrementClaimCount(record.getWorkingClaim());
            if (record.getCurrentClaim() != null
                    && !record.getCurrentClaim().equals(record.getWorkingClaim())) {
                // if working & original claim are same, don't remove twice; we only want to remove the original
                // if it's different from the working. Otherwise, we remove two claimant counts. This causes
                // an issue if we only updated the flowfile attributes.
                decrementClaimCount(record.getCurrentClaim());
            }
            abortedRecords.add(record);
        } else {
            transferRecords.add(record);
        }
    }

    // Put the FlowFiles that are not marked for abort back to their original queues
    for (final StandardRepositoryRecord record : transferRecords) {
        if (record.getOriginal() != null) {
            final FlowFileQueue originalQueue = record.getOriginalQueue();
            if (originalQueue != null) {
                if (penalize) {
                    final long expirationEpochMillis = System.currentTimeMillis()
                            + context.getConnectable().getPenalizationPeriod(TimeUnit.MILLISECONDS);
                    final FlowFileRecord newFile = new StandardFlowFileRecord.Builder()
                            .fromFlowFile(record.getOriginal()).penaltyExpirationTime(expirationEpochMillis)
                            .build();
                    originalQueue.put(newFile);
                } else {
                    originalQueue.put(record.getOriginal());
                }
            }
        }
    }

    if (!abortedRecords.isEmpty()) {
        try {
            context.getFlowFileRepository().updateRepository(abortedRecords);
        } catch (final IOException ioe) {
            LOG.error("Unable to update FlowFile repository for aborted records due to {}", ioe.toString());
            if (LOG.isDebugEnabled()) {
                LOG.error("", ioe);
            }
        }
    }

    // If we have transient claims that need to be cleaned up, do so.
    final List<ContentClaim> transientClaims = recordsToHandle.stream()
            .flatMap(record -> record.getTransientClaims().stream()).collect(Collectors.toList());

    if (!transientClaims.isEmpty()) {
        final RepositoryRecord repoRecord = new TransientClaimRepositoryRecord(transientClaims);
        try {
            context.getFlowFileRepository().updateRepository(Collections.singletonList(repoRecord));
        } catch (final IOException ioe) {
            LOG.error("Unable to update FlowFile repository to cleanup transient claims due to {}",
                    ioe.toString());
            if (LOG.isDebugEnabled()) {
                LOG.error("", ioe);
            }
        }
    }

    final Connectable connectable = context.getConnectable();
    final StandardFlowFileEvent flowFileEvent = new StandardFlowFileEvent(connectable.getIdentifier());
    flowFileEvent.setBytesRead(bytesRead);
    flowFileEvent.setBytesWritten(bytesWritten);

    // update event repository
    try {
        context.getFlowFileEventRepository().updateRepository(flowFileEvent);
    } catch (final Exception e) {
        LOG.error("Failed to update FlowFileEvent Repository due to " + e);
        if (LOG.isDebugEnabled()) {
            LOG.error("", e);
        }
    }

    acknowledgeRecords();
    resetState();
}

From source file:org.apache.nifi.web.controller.ControllerFacade.java

/**
 * Submits a replay request for the specified event id.
 *
 * @param eventId event id//from w w  w  . j a  v  a 2  s . c o m
 * @return provenance event
 */
public ProvenanceEventDTO submitReplay(final Long eventId) {
    try {
        final NiFiUser user = NiFiUserUtils.getNiFiUser();
        if (user == null) {
            throw new WebApplicationException(new Throwable("Unable to access details for current user."));
        }

        // lookup the original event
        final ProvenanceEventRecord originalEvent = flowController.getProvenanceRepository().getEvent(eventId);
        if (originalEvent == null) {
            throw new ResourceNotFoundException("Unable to find the specified event.");
        }

        // authorize the replay
        authorizeReplay(originalEvent);

        // replay the flow file
        final ProvenanceEventRecord event = flowController.replayFlowFile(originalEvent, user);

        // convert the event record
        return createProvenanceEventDto(event, false);
    } catch (final IOException ioe) {
        throw new NiFiCoreException("An error occurred while getting the specified event.", ioe);
    }
}

From source file:brooklyn.entity.basic.AbstractEntity.java

@Override
public <T> void emit(Sensor<T> sensor, T val) {
    if (sensor instanceof AttributeSensor) {
        LOG.warn(//  w w w. ja  v a2  s  . c  o m
                "Strongly discouraged use of emit with attribute sensor " + sensor + " " + val
                        + "; use setAttribute instead!",
                new Throwable("location of discouraged attribute " + sensor + " emit"));
    }
    if (val instanceof SensorEvent) {
        LOG.warn(
                "Strongly discouraged use of emit with sensor event as value " + sensor + " " + val
                        + "; value should be unpacked!",
                new Throwable("location of discouraged event " + sensor + " emit"));
    }
    BrooklynLogging.log(LOG, BrooklynLogging.levelDebugOrTraceIfReadOnly(this),
            "Emitting sensor notification {} value {} on {}", sensor.getName(), val, this);
    emitInternal(sensor, val);
}