Example usage for java.util.concurrent Future cancel

List of usage examples for java.util.concurrent Future cancel

Introduction

In this page you can find the example usage for java.util.concurrent Future cancel.

Prototype

boolean cancel(boolean mayInterruptIfRunning);

Source Link

Document

Attempts to cancel execution of this task.

Usage

From source file:org.apache.marmotta.platform.sparql.services.sparql.SparqlServiceImpl.java

@Override
@Deprecated// www  . j  a  v  a  2s  .co m
public void query(final QueryLanguage queryLanguage, final String query, final QueryResultWriter writer,
        final int timeoutInSeconds)
        throws MarmottaException, MalformedQueryException, QueryEvaluationException, TimeoutException {
    log.debug("executing SPARQL query:\n{}", query);
    Future<Boolean> future = executorService.submit(new Callable<Boolean>() {
        @Override
        public Boolean call() throws Exception {
            long start = System.currentTimeMillis();
            try {
                RepositoryConnection connection = sesameService.getConnection();
                try {
                    connection.begin();
                    Query sparqlQuery = connection.prepareQuery(queryLanguage, query,
                            configurationService.getBaseUri());

                    if (sparqlQuery instanceof TupleQuery) {
                        query((TupleQuery) sparqlQuery, (TupleQueryResultWriter) writer);
                    } else if (sparqlQuery instanceof BooleanQuery) {
                        query((BooleanQuery) sparqlQuery, (BooleanQueryResultWriter) writer);
                    } else if (sparqlQuery instanceof GraphQuery) {
                        query((GraphQuery) sparqlQuery, ((SPARQLGraphResultWriter) writer).getOutputStream(),
                                ((SPARQLGraphResultWriter) writer).getFormat());
                    } else {
                        connection.rollback();
                        throw new InvalidArgumentException(
                                "SPARQL query type " + sparqlQuery.getClass() + " not supported!");
                    }

                    connection.commit();
                } catch (Exception ex) {
                    connection.rollback();
                    throw ex;
                } finally {
                    connection.close();
                }
            } catch (RepositoryException e) {
                log.error("error while getting repository connection: {}", e);
                throw new MarmottaException("error while getting repository connection", e);
            } catch (QueryEvaluationException e) {
                log.error("error while evaluating query: {}", e.getMessage());
                throw new MarmottaException("error while writing query result in format ", e);
            }

            log.debug("SPARQL execution took {}ms", System.currentTimeMillis() - start);

            return Boolean.TRUE;
        }
    });

    try {
        future.get(timeoutInSeconds, TimeUnit.SECONDS);
    } catch (InterruptedException | TimeoutException e) {
        log.info("SPARQL query execution aborted due to timeout");
        future.cancel(true);
        throw new TimeoutException("SPARQL query execution aborted due to timeout (" + timeoutInSeconds + "s)");
    } catch (ExecutionException e) {
        log.info("SPARQL query execution aborted due to exception");
        log.debug("exception details", e);
        if (e.getCause() instanceof MarmottaException) {
            throw (MarmottaException) e.getCause();
        } else if (e.getCause() instanceof MalformedQueryException) {
            throw (MalformedQueryException) e.getCause();
        } else {
            throw new MarmottaException("unknown exception while evaluating SPARQL query", e.getCause());
        }
    }
}

From source file:com.mirth.connect.client.ui.browsers.message.MessageBrowser.java

/**
 * An action for when a row is selected in the table
 *///from  w  ww  .  j  a  v  a 2s  .c o  m
private void MessageListSelected(ListSelectionEvent evt) {
    if (!evt.getValueIsAdjusting()) {
        int row = getSelectedMessageIndex();

        if (row >= 0) {
            // Cancel all pretty printing tasks
            for (Future<Void> worker : prettyPrintWorkers) {
                worker.cancel(true);
            }
            prettyPrintWorkers.clear();

            parent.setVisibleTasks(parent.messageTasks, parent.messagePopupMenu, 6, 6, true);
            parent.setVisibleTasks(parent.messageTasks, parent.messagePopupMenu, 7, -1, isChannelDeployed);

            this.setCursor(Cursor.getPredefinedCursor(Cursor.WAIT_CURSOR));

            // Get the table node
            MessageBrowserTableNode messageNode = (MessageBrowserTableNode) messageTreeTable.getPathForRow(row)
                    .getLastPathComponent();

            if (messageNode.isNodeActive()) {
                // Get the messageId from the message node
                Long messageId = messageNode.getMessageId();
                // Get the metaDataId from the message node
                Integer metaDataId = messageNode.getMetaDataId();

                // Attempt to get the message from the message cache
                Message message = messageCache.get(messageId);
                List<Attachment> attachments = attachmentCache.get(messageId);

                // If the message is not in the cache, retrieve it from the server
                if (message == null) {
                    try {
                        message = parent.mirthClient.getMessageContent(channelId, messageId,
                                selectedMetaDataIds);
                        // If the message was not found (ie. it may have been deleted during the request), do nothing
                        if (message == null || message.getConnectorMessages().size() == 0) {
                            clearDescription(
                                    "Could not retrieve message content. The message may have been deleted.");
                            this.setCursor(Cursor.getPredefinedCursor(Cursor.DEFAULT_CURSOR));
                            return;
                        }

                        attachments = parent.mirthClient.getAttachmentsByMessageId(channelId, messageId, false);
                    } catch (Throwable t) {
                        if (t.getMessage().contains("Java heap space")) {
                            parent.alertError(parent,
                                    "There was an out of memory error when trying to retrieve message content.\nIncrease your heap size and try again.");
                        } else if (t instanceof RequestAbortedException) {
                            // The client is no longer waiting for the message content request
                        } else {
                            parent.alertThrowable(parent, t);
                        }
                        this.setCursor(Cursor.getPredefinedCursor(Cursor.DEFAULT_CURSOR));
                        return;
                    }
                    // Add the retrieved message to the message cache
                    messageCache.put(messageId, message);
                    attachmentCache.put(messageId, attachments);
                }

                ConnectorMessage connectorMessage = message.getConnectorMessages().get(metaDataId);

                if (connectorMessage != null) {
                    // Update the message tabs
                    updateDescriptionMessages(connectorMessage);
                    // Update the mappings tab
                    updateDescriptionMappings(connectorMessage);
                    // Update the attachments tab
                    updateAttachmentsTable(messageId);
                    // Update the errors tab
                    updateDescriptionErrors(connectorMessage);
                    // Show relevant tabs. Not using errorCode here just in case for some reason there are errors even though errorCode is 0
                    updateDescriptionTabs(connectorMessage.getProcessingError() != null
                            || connectorMessage.getPostProcessorError() != null
                            || connectorMessage.getResponseError() != null, attachments.size() > 0);
                    updateMessageRadioGroup();

                    if (attachmentTable == null || attachmentTable.getSelectedRow() == -1
                            || descriptionTabbedPane.indexOfTab("Attachments") == -1) {
                        parent.setVisibleTasks(parent.messageTasks, parent.messagePopupMenu, 9, 10, false);
                    }
                }
            } else {
                clearDescription(null);
            }

            this.setCursor(Cursor.getPredefinedCursor(Cursor.DEFAULT_CURSOR));

        }
    }
}

From source file:raptor.engine.uci.UCIEngine.java

/**
 * Connects to the engine. After this method is invoked the engine name,
 * engine author, and options will be populated in this object.
 * //from   w  ww  .  j  ava2s.c o  m
 * @return true if connection was successful, false otherwise.
 */
public boolean connect() {
    if (isConnected()) {
        return true;
    }

    resetConnectionState();
    Future<?> connectionTimeoutFuture = ThreadService.getInstance().scheduleOneShot(CONNECTION_TIMEOUT,
            new Runnable() {
                public void run() {
                    disconnect();
                }
            });

    try {
        long startTime = System.currentTimeMillis();

        if (parameters == null || parameters.length == 0) {
            process = new ProcessBuilder(processPath).directory(new File(new File(processPath).getParent()))
                    .start();
        } else {
            String[] args = new String[parameters.length + 1];
            args[0] = processPath;
            System.arraycopy(parameters, 0, args, 1, parameters.length);
            process = new ProcessBuilder(args).start();
        }
        in = new BufferedReader(new InputStreamReader(process.getInputStream()), 10000);
        out = new PrintWriter(process.getOutputStream());

        send("uci");

        String currentLine = null;
        while ((currentLine = readLine()) != null) {
            if (LOG.isDebugEnabled())
                LOG.debug(currentLine);
            if (currentLine.startsWith("id")) {
                parseIdLine(currentLine);
            } else if (currentLine.startsWith("option ")) {
                parseOptionLine(currentLine);
            } else if (currentLine.startsWith("uciok")) {
                break;
            } else {
                if (LOG.isDebugEnabled()) {
                    LOG.debug("Unknown response to uci ignoring: " + currentLine);
                }
            }
        }

        UCIOption multiPv = new UCISpinner();
        multiPv.setName("MultiPV");
        multiPv.setValue(
                Raptor.getInstance().getPreferences().getString(PreferenceKeys.STOCKFISH_MOVES_TO_SUGGEST));
        setOption(multiPv);
        isReady();

        if (LOG.isDebugEnabled()) {
            LOG.debug("engineName=" + engineName + " engineAuthor=" + engineAuthor + "UCI_Chess960="
                    + supportsFischerRandom + " Options:\n" + nameToOptions.values() + " initialized in "
                    + (System.currentTimeMillis() - startTime));
        }

        connectionTimeoutFuture.cancel(true);
        return true;
    } catch (Throwable t) {
        LOG.error("Error connecting to UCI Engine " + this, t);
        disconnect();
        return false;
    }
}

From source file:org.apache.marmotta.kiwi.sparql.persistence.KiWiSparqlConnection.java

/**
 * Evaluate a statement pattern join or filter on the database by translating it into an appropriate SQL statement.
 * Copied and adapted from KiWiReasoningConnection.query()
 *
 * @param join//from   w w  w  .java 2  s  .co m
 * @param dataset
 * @return
 */
public CloseableIteration<BindingSet, SQLException> evaluateJoin(TupleExpr join, final BindingSet bindings,
        final Dataset dataset) throws SQLException, InterruptedException {
    Preconditions
            .checkArgument(join instanceof Join || join instanceof Filter || join instanceof StatementPattern
                    || join instanceof Distinct || join instanceof Slice || join instanceof Reduced);

    // some definitions
    String[] positions = new String[] { "subject", "predicate", "object", "context" };

    // collect all patterns in a list, using depth-first search over the join
    List<StatementPattern> patterns = new PatternCollector(join).patterns;

    long offset = new LimitFinder(join).offset;
    long limit = new LimitFinder(join).limit;

    boolean distinct = new DistinctFinder(join).distinct;

    // associate a name with each pattern; the names are used in the database query to refer to the triple
    // that matched this pattern and in the construction of variable names for the HQL query
    int patternCount = 0;
    final Map<StatementPattern, String> patternNames = new HashMap<StatementPattern, String>();
    for (StatementPattern p : patterns) {
        patternNames.put(p, "P" + (++patternCount));
    }

    // find all variables occurring in the patterns and create a map to map them to
    // field names in the database query; each variable will have one or several field names,
    // one for each pattern it occurs in; field names are constructed automatically by a counter
    // and the pattern name to ensure the name is a valid HQL identifier
    int variableCount = 0;

    // a map for the variable names; will look like { ?x -> "V1", ?y -> "V2", ... }
    final Map<Var, String> variableNames = new HashMap<>();

    // a map for mapping variables to field names; each variable might have one or more field names,
    // depending on the number of patterns it occurs in; will look like
    // { ?x -> ["P1_V1", "P2_V1"], ?y -> ["P2_V2"], ... }
    Map<Var, List<String>> queryVariables = new HashMap<>();
    Map<Var, List<String>> queryVariableIds = new HashMap<>();

    // a map for defining alternative context values for each variable used in the context part of a pattern
    Map<StatementPattern, List<Resource>> variableContexts = new HashMap<>();

    for (StatementPattern p : patterns) {
        // check graph restrictions in datasets (MARMOTTA-340)
        Resource[] contexts;
        Value contextValue = p.getContextVar() != null ? p.getContextVar().getValue() : null;

        Set<URI> graphs = null;
        boolean emptyGraph = false;

        if (dataset != null) {
            if (p.getScope() == StatementPattern.Scope.DEFAULT_CONTEXTS) {
                graphs = dataset.getDefaultGraphs();
                emptyGraph = graphs.isEmpty() && !dataset.getNamedGraphs().isEmpty();
            } else {
                graphs = dataset.getNamedGraphs();
                emptyGraph = graphs.isEmpty() && !dataset.getDefaultGraphs().isEmpty();
            }
        }

        if (emptyGraph) {
            // Search zero contexts
            return new EmptyIteration<BindingSet, SQLException>();
        } else if (graphs == null || graphs.isEmpty()) {
            if (contextValue != null) {
                contexts = new Resource[] { (Resource) contextValue };
            } else {
                contexts = new Resource[0];
            }
        } else if (contextValue != null) {
            if (graphs.contains(contextValue)) {
                contexts = new Resource[] { (Resource) contextValue };
            } else {
                // Statement pattern specifies a context that is not part of
                // the dataset
                return new EmptyIteration<BindingSet, SQLException>();
            }
        } else {
            contexts = new Resource[graphs.size()];
            int i = 0;
            for (URI graph : graphs) {
                URI context = null;
                if (!SESAME.NIL.equals(graph)) {
                    context = graph;
                }
                contexts[i++] = context;
            }
        }

        // build pattern
        Var[] fields = new Var[] { p.getSubjectVar(), p.getPredicateVar(), p.getObjectVar(),
                p.getContextVar() };
        for (int i = 0; i < fields.length; i++) {
            if (fields[i] != null && !fields[i].hasValue()) {
                Var v = fields[i];
                if (variableNames.get(v) == null) {
                    variableNames.put(v, "V" + (++variableCount));
                    queryVariables.put(v, new LinkedList<String>());
                    queryVariableIds.put(v, new LinkedList<String>());
                }
                String pName = patternNames.get(p);
                String vName = variableNames.get(v);
                if (hasNodeCondition(fields[i], join)) {
                    queryVariables.get(v).add(pName + "_" + positions[i] + "_" + vName);
                }
                queryVariableIds.get(v).add(pName + "." + positions[i]);
            }
        }

        // build an OR query for the value of the context variable
        if (contexts.length > 0) {
            variableContexts.put(p, Arrays.asList(contexts));
        }
    }

    // build the select clause by projecting for each query variable the first name
    StringBuilder selectClause = new StringBuilder();

    if (distinct) {
        selectClause.append("DISTINCT ");
    }

    final List<Var> selectVariables = new LinkedList<Var>();
    for (Iterator<Var> it = queryVariableIds.keySet().iterator(); it.hasNext();) {
        Var v = it.next();
        String projectedName = variableNames.get(v);
        String fromName = queryVariableIds.get(v).get(0);
        selectClause.append(fromName);
        selectClause.append(" as ");
        selectClause.append(projectedName);
        if (it.hasNext()) {
            selectClause.append(", ");
        }
        selectVariables.add(v);
    }

    // build the from-clause of the query; the from clause is constructed as follows:
    // 1. for each pattern P, there will be a "KiWiTriple P" in the from clause
    // 2. for each variable V in P occurring in
    //    - subject, there will be a "inner join P.subject as P_S_V" or "left outer join P.subject as P_S_V",
    //      depending on whether the "optional" parameter is false or true
    //    - property, there will be a "inner join P.property as P_P_V" or "left outer join p.property as P_P_V"
    //    - object, there will be a "inner join P.object as P_O_V" or "left outer join p.object as P_O_V"
    //    - context, there will be a "inner join P.context as P_C_V" or "left outer join p.context as P_C_V"
    StringBuilder fromClause = new StringBuilder();
    for (Iterator<StatementPattern> it = patterns.iterator(); it.hasNext();) {
        StatementPattern p = it.next();
        String pName = patternNames.get(p);
        fromClause.append("triples " + pName);

        Var[] fields = new Var[] { p.getSubjectVar(), p.getPredicateVar(), p.getObjectVar(),
                p.getContextVar() };
        for (int i = 0; i < fields.length; i++) {
            if (fields[i] != null && !fields[i].hasValue() && hasNodeCondition(fields[i], join)) {
                String vName = variableNames.get(fields[i]);
                fromClause.append(" INNER JOIN nodes AS ");
                fromClause.append(pName + "_" + positions[i] + "_" + vName);
                fromClause.append(" ON " + pName + "." + positions[i] + " = ");
                fromClause.append(pName + "_" + positions[i] + "_" + vName + ".id ");
            }
        }

        if (it.hasNext()) {
            fromClause.append(",\n ");
        }
    }

    // build the where clause as follows:
    // 1. iterate over all patterns and for each resource and literal field in subject,
    //    property, object, or context, and set a query condition according to the
    //    nodes given in the pattern
    // 2. for each variable that has more than one occurrences, add a join condition
    // 3. for each variable in the initialBindings, add a condition to the where clause

    // list of where conditions that will later be connected by AND
    List<String> whereConditions = new LinkedList<String>();

    // 1. iterate over all patterns and for each resource and literal field in subject,
    //    property, object, or context, and set a query condition according to the
    //    nodes given in the pattern
    for (StatementPattern p : patterns) {
        String pName = patternNames.get(p);
        Var[] fields = new Var[] { p.getSubjectVar(), p.getPredicateVar(), p.getObjectVar(),
                p.getContextVar() };
        for (int i = 0; i < fields.length; i++) {
            // find node id of the resource or literal field and use it in the where clause
            // in this way we can avoid setting too many query parameters
            long nodeId = -1;
            if (fields[i] != null && fields[i].hasValue()) {
                Value v = valueFactory.convert(fields[i].getValue());
                if (v instanceof KiWiNode) {
                    nodeId = ((KiWiNode) v).getId();
                } else {
                    throw new IllegalArgumentException(
                            "the values in this query have not been created by the KiWi value factory");
                }

                if (nodeId >= 0) {
                    String condition = pName + "." + positions[i] + " = " + nodeId;
                    whereConditions.add(condition);
                }
            }
        }
    }

    // 2. for each variable that has more than one occurrences, add a join condition
    for (Var v : queryVariableIds.keySet()) {
        List<String> vNames = queryVariableIds.get(v);
        for (int i = 1; i < vNames.size(); i++) {
            String vName1 = vNames.get(i - 1);
            String vName2 = vNames.get(i);
            whereConditions.add(vName1 + " = " + vName2);
        }
    }

    // 3. for each variable in the initialBindings, add a condition to the where clause setting it
    //    to the node given as binding
    if (bindings != null) {
        for (String v : bindings.getBindingNames()) {
            for (Map.Entry<Var, List<String>> entry : queryVariableIds.entrySet()) {
                if (entry.getKey().getName() != null && entry.getKey().getName().equals(v)
                        && entry.getValue() != null && entry.getValue().size() > 0) {
                    List<String> vNames = entry.getValue();
                    String vName = vNames.get(0);
                    Value binding = valueFactory.convert(bindings.getValue(v));
                    if (binding instanceof KiWiNode) {
                        whereConditions.add(vName + " = " + ((KiWiNode) binding).getId());
                    } else {
                        throw new IllegalArgumentException(
                                "the values in this binding have not been created by the KiWi value factory");
                    }
                }
            }
        }
    }

    // 4. for each pattern, ensure that the matched triple is not marked as deleted
    for (StatementPattern p : patterns) {
        String pName = patternNames.get(p);
        whereConditions.add(pName + ".deleted = false");
    }

    // 5. for each filter condition, add a statement to the where clause
    List<ValueExpr> filters = new FilterCollector(join).filters;
    for (ValueExpr expr : filters) {
        whereConditions.add(evaluateExpression(expr, queryVariables, null));
    }

    // 6. for each context variable with a restricted list of contexts, we add a condition to the where clause
    //    of the form (V.id = R1.id OR V.id = R2.id ...)
    for (Map.Entry<StatementPattern, List<Resource>> vctx : variableContexts.entrySet()) {
        // the variable
        String varName = patternNames.get(vctx.getKey());

        // the string we are building
        StringBuilder cCond = new StringBuilder();
        cCond.append("(");
        for (Iterator<Resource> it = vctx.getValue().iterator(); it.hasNext();) {
            Value v = valueFactory.convert(it.next());
            if (v instanceof KiWiNode) {
                long nodeId = ((KiWiNode) v).getId();

                cCond.append(varName);
                cCond.append(".context = ");
                cCond.append(nodeId);

                if (it.hasNext()) {
                    cCond.append(" OR ");
                }
            } else {
                throw new IllegalArgumentException(
                        "the values in this query have not been created by the KiWi value factory");
            }

        }
        cCond.append(")");
        whereConditions.add(cCond.toString());
    }

    // construct the where clause
    StringBuilder whereClause = new StringBuilder();
    for (Iterator<String> it = whereConditions.iterator(); it.hasNext();) {
        whereClause.append(it.next());
        whereClause.append("\n ");
        if (it.hasNext()) {
            whereClause.append("AND ");
        }
    }

    // construct limit and offset
    StringBuilder limitClause = new StringBuilder();
    if (limit > 0) {
        limitClause.append("LIMIT ");
        limitClause.append(limit);
        limitClause.append(" ");
    }
    if (offset >= 0) {
        limitClause.append("OFFSET ");
        limitClause.append(offset);
        limitClause.append(" ");
    }

    // build the query string
    String queryString = "SELECT " + selectClause + "\n " + "FROM " + fromClause + "\n " + "WHERE "
            + whereClause + "\n " + limitClause;

    log.debug("original SPARQL syntax tree:\n {}", join);
    log.debug("constructed SQL query string:\n {}", queryString);
    log.debug("SPARQL -> SQL node variable mappings:\n {}", queryVariables);
    log.debug("SPARQL -> SQL ID variable mappings:\n {}", queryVariableIds);

    final PreparedStatement queryStatement = parent.getJDBCConnection().prepareStatement(queryString);
    if (parent.getDialect().isCursorSupported()) {
        queryStatement.setFetchSize(parent.getConfiguration().getCursorSize());
    }

    Future<ResultSet> queryFuture = executorService.submit(new Callable<ResultSet>() {
        @Override
        public ResultSet call() throws Exception {
            try {
                return queryStatement.executeQuery();
            } catch (SQLException ex) {
                if (Thread.interrupted()) {
                    log.info("SQL query execution cancelled; not returning result (Thread={})",
                            Thread.currentThread());
                    throw new InterruptedException("SPARQL query execution cancelled");
                } else {
                    throw ex;
                }
            }
        }
    });

    try {
        ResultSet result = queryFuture.get();

        ResultSetIteration<BindingSet> it = new ResultSetIteration<BindingSet>(result, true,
                new ResultTransformerFunction<BindingSet>() {
                    @Override
                    public BindingSet apply(ResultSet row) throws SQLException {
                        MapBindingSet resultRow = new MapBindingSet();

                        long[] nodeIds = new long[selectVariables.size()];
                        for (int i = 0; i < selectVariables.size(); i++) {
                            nodeIds[i] = row.getLong(variableNames.get(selectVariables.get(i)));
                        }
                        KiWiNode[] nodes = parent.loadNodesByIds(nodeIds);

                        for (int i = 0; i < selectVariables.size(); i++) {
                            Var v = selectVariables.get(i);
                            resultRow.addBinding(v.getName(), nodes[i]);
                        }

                        if (bindings != null) {
                            for (Binding binding : bindings) {
                                resultRow.addBinding(binding);
                            }
                        }
                        return resultRow;
                    }
                });

        // materialize result to avoid having more than one result set open at the same time
        return new CloseableIteratorIteration<BindingSet, SQLException>(Iterations.asList(it).iterator());
    } catch (InterruptedException | CancellationException e) {
        log.info("SPARQL query execution cancelled");
        queryFuture.cancel(true);
        queryStatement.cancel();
        queryStatement.close();

        throw new InterruptedException("SPARQL query execution cancelled");
    } catch (ExecutionException e) {
        log.error("error executing SPARQL query", e.getCause());
        if (e.getCause() instanceof SQLException) {
            throw (SQLException) e.getCause();
        } else if (e.getCause() instanceof InterruptedException) {
            throw (InterruptedException) e.getCause();
        } else {
            throw new SQLException("error executing SPARQL query", e);
        }
    }
}

From source file:org.neo4j.io.pagecache.PageCacheTest.java

@Test(timeout = SHORT_TIMEOUT_MILLIS)
public void retryMustResetCursorOffset() throws Exception {
    // The general idea here, is that we have a page with a particular value in its 0th position.
    // We also have a thread that constantly writes to the middle of the page, so it modifies
    // the page, but does not change the value in the 0th position. This thread will in principle
    // mean that it is possible for a reader to get an inconsistent view and must retry.
    // We then check that every retry iteration will read the special value in the 0th position.
    // We repeat the experiment a couple of times to make sure we didn't succeed by chance.

    PageCache cache = getPageCache(fs, maxPages, pageCachePageSize, PageCacheTracer.NULL);
    final PagedFile pagedFile = cache.map(file("a"), filePageSize);
    final AtomicReference<Exception> caughtWriterException = new AtomicReference<>();
    final CountDownLatch startLatch = new CountDownLatch(1);
    final byte expectedByte = (byte) 13;

    try (PageCursor cursor = pagedFile.io(0, PF_SHARED_WRITE_LOCK)) {
        if (cursor.next()) {
            do {/*from   w w  w  . j a  v  a2s  . co  m*/
                cursor.putByte(expectedByte);
            } while (cursor.shouldRetry());
        }
    }

    Runnable writer = () -> {
        while (!Thread.currentThread().isInterrupted()) {
            try (PageCursor cursor = pagedFile.io(0, PF_SHARED_WRITE_LOCK)) {
                if (cursor.next()) {
                    do {
                        cursor.setOffset(recordSize);
                        cursor.putByte((byte) 14);
                    } while (cursor.shouldRetry());
                }
                startLatch.countDown();
            } catch (IOException e) {
                caughtWriterException.set(e);
                throw new RuntimeException(e);
            }
        }
    };
    Future<?> writerFuture = executor.submit(writer);

    startLatch.await();

    for (int i = 0; i < 1000; i++) {
        try (PageCursor cursor = pagedFile.io(0, PF_SHARED_READ_LOCK)) {
            assertTrue(cursor.next());
            do {
                assertThat(cursor.getByte(), is(expectedByte));
            } while (cursor.shouldRetry());
        }
    }

    writerFuture.cancel(true);
    pagedFile.close();
}

From source file:org.apereo.portal.io.xml.JaxbPortalDataHandlerService.java

/**
 * Used by batch import and export to wait for queued tasks to complete. Handles fail-fast behavior
 * if any of the tasks threw and exception by canceling all queued futures and logging a summary of
 * the failures. All completed futures are removed from the queue.
 *
 * @param futures Queued futures to check for completeness
 * @param wait If true it will wait for all futures to complete, if false only check for completed futures
 * @return a list of futures that either threw exceptions or timed out
 *//*from  w  w  w .j a  v a2 s  .  co  m*/
protected List<FutureHolder<?>> waitForFutures(final Queue<? extends FutureHolder<?>> futures,
        final PrintWriter reportWriter, final File reportDirectory, final boolean wait)
        throws InterruptedException {

    final List<FutureHolder<?>> failedFutures = new LinkedList<FutureHolder<?>>();

    for (Iterator<? extends FutureHolder<?>> futuresItr = futures.iterator(); futuresItr.hasNext();) {
        final FutureHolder<?> futureHolder = futuresItr.next();

        //If waiting, or if not waiting but the future is already done do the get
        final Future<?> future = futureHolder.getFuture();
        if (wait || (!wait && future.isDone())) {
            futuresItr.remove();

            try {
                //Don't bother doing a get() on canceled futures
                if (!future.isCancelled()) {
                    if (this.maxWait > 0) {
                        future.get(this.maxWait, this.maxWaitTimeUnit);
                    } else {
                        future.get();
                    }

                    reportWriter.printf(REPORT_FORMAT, "SUCCESS", futureHolder.getDescription(),
                            futureHolder.getExecutionTimeMillis());
                }
            } catch (CancellationException e) {
                //Ignore cancellation exceptions
            } catch (ExecutionException e) {
                logger.error("Failed: " + futureHolder);

                futureHolder.setError(e);
                failedFutures.add(futureHolder);
                reportWriter.printf(REPORT_FORMAT, "FAIL", futureHolder.getDescription(),
                        futureHolder.getExecutionTimeMillis());

                try {
                    final String dataReportName = SafeFilenameUtils.makeSafeFilename(
                            futureHolder.getDataType() + "_" + futureHolder.getDataName() + ".txt");
                    final File dataReportFile = new File(reportDirectory, dataReportName);
                    final PrintWriter dataReportWriter = new PrintWriter(
                            new BufferedWriter(new FileWriter(dataReportFile)));
                    try {
                        dataReportWriter.println(
                                "FAIL: " + futureHolder.getDataType() + " - " + futureHolder.getDataName());
                        dataReportWriter.println(
                                "--------------------------------------------------------------------------------");
                        e.getCause().printStackTrace(dataReportWriter);
                    } finally {
                        IOUtils.closeQuietly(dataReportWriter);
                    }
                } catch (Exception re) {
                    logger.warn("Failed to write error report for failed " + futureHolder
                            + ", logging root failure here", e.getCause());
                }
            } catch (TimeoutException e) {
                logger.warn("Failed: " + futureHolder);

                futureHolder.setError(e);
                failedFutures.add(futureHolder);
                future.cancel(true);
                reportWriter.printf(REPORT_FORMAT, "TIMEOUT", futureHolder.getDescription(),
                        futureHolder.getExecutionTimeMillis());
            }
        }
    }

    return failedFutures;
}

From source file:com.mobiperf.MeasurementScheduler.java

@SuppressWarnings("unchecked")
private void uploadResults() {
    MeasurementResult result;/*from w ww  .ja  v a 2 s .c  o m*/
    Future<MeasurementResult> future;
    JSONArray results = readResultsFromFile();

    synchronized (this.pendingTasks) {
        try {
            for (MeasurementTask task : this.pendingTasks.keySet()) {
                future = this.pendingTasks.get(task);
                if (future != null) {
                    sendStringMsg("Finished:\n" + task);
                    if (future.isDone()) {
                        try {
                            this.pendingTasks.remove(task);

                            if (!future.isCancelled()) {
                                result = future.get();
                            } else {
                                Logger.e("Task execution was canceled");
                                JSONObject cancelledResult = MeasurementJsonConvertor.encodeToJson(this
                                        .getFailureResult(task, new CancellationException("Task cancelled")));
                                results.put(cancelledResult);
                            }

                        } catch (InterruptedException e) {
                            Logger.e("Task execution interrupted", e);
                        } catch (ExecutionException e) {
                            if (e.getCause() instanceof MeasurementSkippedException) {
                                // Don't do anything with this - no need to report skipped measurements
                                sendStringMsg("Task skipped - " + e.getCause().toString() + "\n" + task);
                                Logger.i("Task skipped", e.getCause());
                            } else {
                                // Log the error
                                sendStringMsg("Task failed - " + e.getCause().toString() + "\n" + task);
                                Logger.e("Task execution failed", e.getCause());
                                // Was already sent
                                // finishedTasks.add(this.getFailureResult(task, e.getCause()));
                            }
                        } catch (CancellationException e) {
                            Logger.e("Task cancelled", e);
                        }
                    } else if (task.isPassedDeadline()) {
                        /*
                         * If a task has reached its deadline but has not been run, remove it and report
                         * failure
                         */
                        this.pendingTasks.remove(task);
                        future.cancel(true);
                        JSONObject cancelledResult = MeasurementJsonConvertor
                                .encodeToJson(this.getFailureResult(task,
                                        new RuntimeException("Deadline passed before execution")));
                        results.put(cancelledResult);
                    }
                }

                if (future == null) {
                    /*
                     * Tasks that are scheduled after deadline are put into pendingTasks with a null future.
                     */
                    this.pendingTasks.remove(task);
                    JSONObject cancelledResult = MeasurementJsonConvertor.encodeToJson(
                            this.getFailureResult(task, new RuntimeException("Task scheduled after deadline")));
                    results.put(cancelledResult);
                }
            }
        } catch (ConcurrentModificationException e) {
            /*
             * keySet is a synchronized view of the keys. However, changes during iteration will throw
             * ConcurrentModificationException. Since we have synchronized all changes to pendingTasks
             * this should not happen.
             */
            Logger.e("Pending tasks is changed during measurement upload");
        } catch (JSONException e) {
            e.printStackTrace();
        }
    }

    if (results.length() > 0) {
        try {
            this.checkin.uploadMeasurementResult(results, resourceCapManager);
        } catch (IOException e) {
            Logger.e("Error when uploading message");
        }
    }

    Logger.i("A total of " + results.length() + " uploaded");
    Logger.i("A total of " + results.length() + " is in the results list");
}

From source file:hudson.plugins.sshslaves.SSHLauncher.java

/**
 * {@inheritDoc}// w  ww .  ja  va 2s  .  c  om
 */
@Override
public synchronized void afterDisconnect(SlaveComputer slaveComputer, final TaskListener listener) {
    if (connection != null) {
        boolean connectionLost = reportTransportLoss(connection, listener);
        if (session != null) {
            // give the process 3 seconds to write out its dying message before we cut the loss
            // and give up on this process. if the slave process had JVM crash, OOME, or any other
            // critical problem, this will allow us to capture that.
            // exit code is also an useful info to figure out why the process has died.
            try {
                listener.getLogger().println(getSessionOutcomeMessage(session, connectionLost));
                session.getStdout().close();
                session.close();
            } catch (Throwable t) {
                t.printStackTrace(listener.error(Messages.SSHLauncher_ErrorWhileClosingConnection()));
            }
            session = null;
        }

        Slave n = slaveComputer.getNode();
        if (n != null && !connectionLost) {
            String workingDirectory = getWorkingDirectory(n);
            final String fileName = workingDirectory + "/slave.jar";
            Future<?> tidyUp = Computer.threadPoolForRemoting.submit(new Runnable() {
                public void run() {
                    // this would fail if the connection is already lost, so we want to check that.
                    // TODO: Connection class should expose whether it is still connected or not.

                    SFTPv3Client sftpClient = null;
                    try {
                        sftpClient = new SFTPv3Client(connection);
                        sftpClient.rm(fileName);
                    } catch (Exception e) {
                        if (sftpClient == null) {// system without SFTP
                            try {
                                connection.exec("rm " + fileName, listener.getLogger());
                            } catch (Error error) {
                                throw error;
                            } catch (Throwable x) {
                                x.printStackTrace(
                                        listener.error(Messages.SSHLauncher_ErrorDeletingFile(getTimestamp())));
                                // We ignore other Exception types
                            }
                        } else {
                            e.printStackTrace(
                                    listener.error(Messages.SSHLauncher_ErrorDeletingFile(getTimestamp())));
                        }
                    } finally {
                        if (sftpClient != null) {
                            sftpClient.close();
                        }
                    }
                }
            });
            try {
                // the delete is best effort only and if it takes longer than 60 seconds - or the launch 
                // timeout (if specified) - then we should just give up and leave the file there.
                tidyUp.get(launchTimeoutSeconds == null ? 60 : launchTimeoutSeconds, TimeUnit.SECONDS);
            } catch (InterruptedException e) {
                e.printStackTrace(listener.error(Messages.SSHLauncher_ErrorDeletingFile(getTimestamp())));
                // we should either re-apply our interrupt flag or propagate... we don't want to propagate, so...
                Thread.currentThread().interrupt();
            } catch (ExecutionException e) {
                e.printStackTrace(listener.error(Messages.SSHLauncher_ErrorDeletingFile(getTimestamp())));
            } catch (TimeoutException e) {
                e.printStackTrace(listener.error(Messages.SSHLauncher_ErrorDeletingFile(getTimestamp())));
            } finally {
                if (!tidyUp.isDone()) {
                    tidyUp.cancel(true);
                }
            }
        }

        PluginImpl.unregister(connection);
        cleanupConnection(listener);
    }
}

From source file:org.apache.hadoop.hive.ql.metadata.Hive.java

/**
 * Given a source directory name of the load path, load all dynamically generated partitions
 * into the specified table and return a list of strings that represent the dynamic partition
 * paths.//w w w  .  j  a va2 s. c  o  m
 * @param loadPath
 * @param tableName
 * @param partSpec
 * @param replace
 * @param numDP number of dynamic partitions
 * @param listBucketingEnabled
 * @param isAcid true if this is an ACID operation
 * @param txnId txnId, can be 0 unless isAcid == true
 * @return partition map details (PartitionSpec and Partition)
 * @throws HiveException
 */
public Map<Map<String, String>, Partition> loadDynamicPartitions(final Path loadPath, final String tableName,
        final Map<String, String> partSpec, final boolean replace, final int numDP,
        final boolean listBucketingEnabled, final boolean isAcid, final long txnId,
        final boolean hasFollowingStatsTask, final AcidUtils.Operation operation) throws HiveException {

    final Map<Map<String, String>, Partition> partitionsMap = Collections
            .synchronizedMap(new LinkedHashMap<Map<String, String>, Partition>());

    int poolSize = conf.getInt(ConfVars.HIVE_LOAD_DYNAMIC_PARTITIONS_THREAD_COUNT.varname, 1);
    final ExecutorService pool = Executors.newFixedThreadPool(poolSize,
            new ThreadFactoryBuilder().setDaemon(true).setNameFormat("load-dynamic-partitions-%d").build());

    // Get all valid partition paths and existing partitions for them (if any)
    final Table tbl = getTable(tableName);
    final Set<Path> validPartitions = getValidPartitionsInPath(numDP, loadPath);

    final int partsToLoad = validPartitions.size();
    final AtomicInteger partitionsLoaded = new AtomicInteger(0);

    final boolean inPlaceEligible = conf.getLong("fs.trash.interval", 0) <= 0
            && InPlaceUpdate.canRenderInPlace(conf) && !SessionState.getConsole().getIsSilent();
    final PrintStream ps = (inPlaceEligible) ? SessionState.getConsole().getInfoStream() : null;
    final SessionState parentSession = SessionState.get();

    final List<Future<Void>> futures = Lists.newLinkedList();
    try {
        // for each dynamically created DP directory, construct a full partition spec
        // and load the partition based on that
        final Map<Long, RawStore> rawStoreMap = new HashMap<Long, RawStore>();
        for (final Path partPath : validPartitions) {
            // generate a full partition specification
            final LinkedHashMap<String, String> fullPartSpec = Maps.newLinkedHashMap(partSpec);
            Warehouse.makeSpecFromName(fullPartSpec, partPath);
            futures.add(pool.submit(new Callable<Void>() {
                @Override
                public Void call() throws Exception {
                    try {
                        // move file would require session details (needCopy() invokes SessionState.get)
                        SessionState.setCurrentSessionState(parentSession);
                        LOG.info("New loading path = " + partPath + " with partSpec " + fullPartSpec);

                        // load the partition
                        Partition newPartition = loadPartition(partPath, tbl, fullPartSpec, replace, true,
                                listBucketingEnabled, false, isAcid, hasFollowingStatsTask);
                        partitionsMap.put(fullPartSpec, newPartition);

                        if (inPlaceEligible) {
                            synchronized (ps) {
                                InPlaceUpdate.rePositionCursor(ps);
                                partitionsLoaded.incrementAndGet();
                                InPlaceUpdate.reprintLine(ps, "Loaded : " + partitionsLoaded.get() + "/"
                                        + partsToLoad + " partitions.");
                            }
                        }
                        // Add embedded rawstore, so we can cleanup later to avoid memory leak
                        if (getMSC().isLocalMetaStore()) {
                            if (!rawStoreMap.containsKey(Thread.currentThread().getId())) {
                                rawStoreMap.put(Thread.currentThread().getId(),
                                        HiveMetaStore.HMSHandler.getRawStore());
                            }
                        }
                        return null;
                    } catch (Exception t) {
                        LOG.error("Exception when loading partition with parameters " + " partPath=" + partPath
                                + ", " + " table=" + tbl.getTableName() + ", " + " partSpec=" + fullPartSpec
                                + ", " + " replace=" + replace + ", " + " listBucketingEnabled="
                                + listBucketingEnabled + ", " + " isAcid=" + isAcid + ", "
                                + " hasFollowingStatsTask=" + hasFollowingStatsTask, t);
                        throw t;
                    }
                }
            }));
        }
        pool.shutdown();
        LOG.debug("Number of partitions to be added is " + futures.size());

        for (Future future : futures) {
            future.get();
        }

        for (RawStore rs : rawStoreMap.values()) {
            rs.shutdown();
        }
    } catch (InterruptedException | ExecutionException e) {
        LOG.debug("Cancelling " + futures.size() + " dynamic loading tasks");
        //cancel other futures
        for (Future future : futures) {
            future.cancel(true);
        }
        throw new HiveException("Exception when loading " + partsToLoad + " in table " + tbl.getTableName()
                + " with loadPath=" + loadPath, e);
    }

    try {
        if (isAcid) {
            List<String> partNames = new ArrayList<>(partitionsMap.size());
            for (Partition p : partitionsMap.values()) {
                partNames.add(p.getName());
            }
            getMSC().addDynamicPartitions(txnId, tbl.getDbName(), tbl.getTableName(), partNames,
                    AcidUtils.toDataOperationType(operation));
        }
        LOG.info("Loaded " + partitionsMap.size() + " partitions");
        return partitionsMap;
    } catch (TException te) {
        throw new HiveException("Exception updating metastore for acid table " + tableName + " with partitions "
                + partitionsMap.values(), te);
    }
}

From source file:org.apache.nifi.provenance.MiNiFiPersistentProvenanceRepository.java

/**
 * <p>/*from w w w  .ja  v a  2 s  . com*/
 * MUST be called with the write lock held.
 * </p>
 * <p>
 * Rolls over the data in the journal files, merging them into a single Provenance Event Log File, and
 * compressing as needed.
 *
 * @param force if true, will force a rollover regardless of whether or not data has been written
 * @throws IOException if unable to complete rollover
 */
private void rollover(final boolean force) throws IOException {
    if (!configuration.isAllowRollover()) {
        return;
    }

    // If this is the first time we're creating the out stream, or if we
    // have written something to the stream, then roll over
    if (force || recordsWrittenSinceRollover.get() > 0L || dirtyWriterCount.get() > 0) {
        final List<File> journalsToMerge = new ArrayList<>();
        for (final RecordWriter writer : writers) {
            if (!writer.isClosed()) {
                final File writerFile = writer.getFile();
                journalsToMerge.add(writerFile);
                try {
                    writer.close();
                } catch (final IOException ioe) {
                    logger.warn("Failed to close {} due to {}", writer, ioe.toString());
                    if (logger.isDebugEnabled()) {
                        logger.warn("", ioe);
                    }
                }
            }
        }

        if (logger.isDebugEnabled()) {
            if (journalsToMerge.isEmpty()) {
                logger.debug("No journals to merge; all RecordWriters were already closed");
            } else {
                logger.debug("Going to merge {} files for journals starting with ID {}", journalsToMerge.size(),
                        StringUtils.substringBefore(journalsToMerge.get(0).getName(), "."));
            }
        }

        // Choose a storage directory to store the merged file in.
        final long storageDirIdx = storageDirectoryIndex.getAndIncrement();
        final List<File> storageDirs = configuration.getStorageDirectories();
        final File storageDir = storageDirs.get((int) (storageDirIdx % storageDirs.size()));

        Future<?> future = null;
        if (!journalsToMerge.isEmpty()) {
            // Run the rollover logic in a background thread.
            final AtomicReference<Future<?>> futureReference = new AtomicReference<>();
            final int recordsWritten = recordsWrittenSinceRollover.getAndSet(0);
            final Runnable rolloverRunnable = new Runnable() {
                @Override
                public void run() {
                    try {
                        final File fileRolledOver;

                        try {
                            fileRolledOver = mergeJournals(journalsToMerge,
                                    getMergeFile(journalsToMerge, storageDir), eventReporter);
                        } catch (final IOException ioe) {
                            logger.error(
                                    "Failed to merge Journal Files {} into a Provenance Log File due to {}",
                                    journalsToMerge, ioe.toString());
                            logger.error("", ioe);
                            return;
                        }

                        if (fileRolledOver == null) {
                            logger.debug(
                                    "Couldn't merge journals. Will try again in 10 seconds. journalsToMerge: {}, storageDir: {}",
                                    journalsToMerge, storageDir);
                            return;
                        }
                        final File file = fileRolledOver;

                        // update our map of id to Path
                        // We need to make sure that another thread doesn't also update the map at the same time. We cannot
                        // use the write lock when purging old events, and we want to use the same approach here.
                        boolean updated = false;
                        final Long fileFirstEventId = Long
                                .valueOf(StringUtils.substringBefore(fileRolledOver.getName(), "."));
                        while (!updated) {
                            final SortedMap<Long, Path> existingPathMap = idToPathMap.get();
                            final SortedMap<Long, Path> newIdToPathMap = new TreeMap<>(new PathMapComparator());
                            newIdToPathMap.putAll(existingPathMap);
                            newIdToPathMap.put(fileFirstEventId, file.toPath());
                            updated = idToPathMap.compareAndSet(existingPathMap, newIdToPathMap);
                        }

                        logger.info("Successfully Rolled over Provenance Event file containing {} records",
                                recordsWritten);
                        rolloverCompletions.getAndIncrement();

                        // We have finished successfully. Cancel the future so that we don't run anymore
                        Future<?> future;
                        while ((future = futureReference.get()) == null) {
                            try {
                                Thread.sleep(10L);
                            } catch (final InterruptedException ie) {
                            }
                        }

                        future.cancel(false);
                    } catch (final Throwable t) {
                        logger.error("Failed to rollover Provenance repository due to {}", t.toString());
                        logger.error("", t);
                    }
                }
            };

            // We are going to schedule the future to run immediately and then repeat every 10 seconds. This allows us to keep retrying if we
            // fail for some reason. When we succeed, the Runnable will cancel itself.
            future = rolloverExecutor.scheduleWithFixedDelay(rolloverRunnable, 0, 10, TimeUnit.SECONDS);
            futureReference.set(future);
        }

        streamStartTime.set(System.currentTimeMillis());
        bytesWrittenSinceRollover.set(0);

        // We don't want to create new 'writers' until the number of unmerged journals falls below our threshold. So we wait
        // here before we repopulate the 'writers' member variable and release the lock.
        int journalFileCount = getJournalCount();
        long repoSize = getSize(getLogFiles(), 0L);
        final int journalCountThreshold = configuration.getJournalCount() * 5;
        final long sizeThreshold = (long) (configuration.getMaxStorageCapacity() * 1.1D); // do not go over 10% of max capacity

        // check if we need to apply backpressure.
        // If we have too many journal files, or if the repo becomes too large, backpressure is necessary. Without it,
        // if the rate at which provenance events are registered exceeds the rate at which we can compress/merge them,
        // then eventually we will end up with all of the data stored in the 'journals' directory. This
        // would mean that the data would never even be accessible. In order to prevent this, if we exceeds 110% of the configured
        // max capacity for the repo, or if we have 5 sets of journal files waiting to be merged, we will block here until
        // that is no longer the case.
        if (journalFileCount > journalCountThreshold || repoSize > sizeThreshold) {
            logger.warn("The rate of the dataflow is exceeding the provenance recording rate. "
                    + "Slowing down flow to accommodate. Currently, there are {} journal files ({} bytes) and "
                    + "threshold for blocking is {} ({} bytes)", journalFileCount, repoSize,
                    journalCountThreshold, sizeThreshold);
            eventReporter.reportEvent(Severity.WARNING, "Provenance Repository", "The rate of the dataflow is "
                    + "exceeding the provenance recording rate. Slowing down flow to accommodate");

            while (journalFileCount > journalCountThreshold || repoSize > sizeThreshold) {
                // if a shutdown happens while we are in this loop, kill the rollover thread and break
                if (this.closed.get()) {
                    if (future != null) {
                        future.cancel(true);
                    }

                    break;
                }

                if (repoSize > sizeThreshold) {
                    logger.debug(
                            "Provenance Repository has exceeded its size threshold; will trigger purging of oldest events");
                    purgeOldEvents();

                    journalFileCount = getJournalCount();
                    repoSize = getSize(getLogFiles(), 0L);
                    continue;
                } else {
                    // if we are constrained by the number of journal files rather than the size of the repo,
                    // then we will just sleep a bit because another thread is already actively merging the journals,
                    // due to the runnable that we scheduled above
                    try {
                        Thread.sleep(100L);
                    } catch (final InterruptedException ie) {
                    }
                }

                logger.debug(
                        "Provenance Repository is still behind. Keeping flow slowed down "
                                + "to accommodate. Currently, there are {} journal files ({} bytes) and "
                                + "threshold for blocking is {} ({} bytes)",
                        journalFileCount, repoSize, journalCountThreshold, sizeThreshold);

                journalFileCount = getJournalCount();
                repoSize = getSize(getLogFiles(), 0L);
            }

            logger.info(
                    "Provenance Repository has now caught up with rolling over journal files. Current number of "
                            + "journal files to be rolled over is {}",
                    journalFileCount);
        }

        // we've finished rolling over successfully. Create new writers and reset state.
        writers = createWriters(configuration, idGenerator.get());
        dirtyWriterCount.set(0);
        streamStartTime.set(System.currentTimeMillis());
        recordsWrittenSinceRollover.getAndSet(0);
    }
}