Example usage for java.lang Thread interrupted

List of usage examples for java.lang Thread interrupted

Introduction

In this page you can find the example usage for java.lang Thread interrupted.

Prototype

public static boolean interrupted() 

Source Link

Document

Tests whether the current thread has been interrupted.

Usage

From source file:com.ngdata.hbaseindexer.indexer.FusionPipelineClient.java

protected synchronized Exception postJsonToPipelineWithRetry(String endpoint, List docs,
        ArrayList<String> mutable, Exception lastExc, int requestId) throws Exception {
    Exception retryAfterException = null;

    try {/*w  ww  .j  a va 2  s. c  om*/
        postJsonToPipeline(endpoint, docs, requestId);
        if (lastExc != null)
            log.info("Re-try request " + requestId + " to " + endpoint + " succeeded after seeing a "
                    + lastExc.getMessage());
    } catch (Exception exc) {
        log.warn("Failed to send request " + requestId + " to '" + endpoint + "' due to: " + exc);
        if (mutable.size() > 1) {
            // try another endpoint but update the cloned list to avoid re-hitting the one having an error
            if (log.isDebugEnabled())
                log.debug("Will re-try failed request " + requestId + " on next endpoint in the list");

            mutable.remove(endpoint);
            retryAfterException = exc;
        } else {
            // no other endpoints to try ... brief wait and then retry
            log.warn("No more endpoints available to try ... will retry to send request " + requestId + " to "
                    + endpoint + " after waiting 1 sec");
            try {
                Thread.sleep(1000);
            } catch (InterruptedException ignore) {
                Thread.interrupted();
            }
            // note we want the exception to propagate from here up the stack since we re-tried and it didn't work
            postJsonToPipeline(endpoint, docs, requestId);
            log.info("Re-try request " + requestId + " to " + endpoint + " succeeded");
            retryAfterException = null; // return success condition
        }
    }

    return retryAfterException;
}

From source file:org.apache.hadoop.dfs.DataBlockScanner.java

public void run() {
    try {//  ww  w  . j ava 2s .  co m
        //Read last verification times
        if (!assignInitialVerificationTimes()) {
            return;
        }

        adjustThrottler();

        while (datanode.shouldRun && !Thread.interrupted()) {
            long now = System.currentTimeMillis();
            synchronized (this) {
                if (now >= (currentPeriodStart + scanPeriod)) {
                    startNewPeriod();
                }
            }
            if ((now - getEarliestScanTime()) >= scanPeriod) {
                verifyFirstBlock();
            } else {
                try {
                    Thread.sleep(1000);
                } catch (InterruptedException ignored) {
                }
            }
        }
        shutdown();
    } catch (RuntimeException e) {
        LOG.warn("RuntimeException during DataBlockScanner.run() : " + StringUtils.stringifyException(e));
        throw e;
    } finally {
        LOG.info("Exiting DataBlockScanner thread.");
    }
}

From source file:net.minecraftforge.fml.client.FMLClientHandler.java

@Override
public void queryUser(StartupQuery query) throws InterruptedException {
    if (query.getResult() == null) {
        client.displayGuiScreen(new GuiNotification(query));
    } else {/*from   w w  w . ja v a 2s . co  m*/
        client.displayGuiScreen(new GuiConfirmation(query));
    }

    if (query.isSynchronous()) {
        while (client.currentScreen instanceof GuiNotification) {
            if (Thread.interrupted())
                throw new InterruptedException();

            client.loadingScreen.displayLoadingString("");

            Thread.sleep(50);
        }

        client.loadingScreen.displayLoadingString(""); // make sure the blank screen is being drawn at the end
    }
}

From source file:org.apache.hadoop.hdfs.TestHFlush.java

@Test
public void testHFlushInterrupted() throws Exception {
    final int DATANODE_NUM = 2;
    final int fileLen = 6;
    byte[] fileContents = AppendTestUtil.initBuffer(fileLen);
    Configuration conf = new HdfsConfiguration();
    final Path p = new Path("/hflush-interrupted");

    System.out.println("p=" + p);

    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build();
    try {/*from  w  ww .j  a  v  a2 s .  c o  m*/
        DistributedFileSystem fs = (DistributedFileSystem) cluster.getFileSystem();

        // create a new file.
        FSDataOutputStream stm = AppendTestUtil.createFile(fs, p, DATANODE_NUM);

        stm.write(fileContents, 0, 2);
        Thread.currentThread().interrupt();
        try {
            stm.hflush();
            // If we made it past the hflush(), then that means that the ack made it back
            // from the pipeline before we got to the wait() call. In that case we should
            // still have interrupted status.
            assertTrue(Thread.interrupted());
        } catch (InterruptedIOException ie) {
            System.out.println("Got expected exception during flush");
        }
        assertFalse(Thread.interrupted());

        // Try again to flush should succeed since we no longer have interrupt status
        stm.hflush();

        // Write some more data and flush
        stm.write(fileContents, 2, 2);
        stm.hflush();

        // Write some data and close while interrupted

        stm.write(fileContents, 4, 2);
        Thread.currentThread().interrupt();
        try {
            stm.close();
            // If we made it past the close(), then that means that the ack made it back
            // from the pipeline before we got to the wait() call. In that case we should
            // still have interrupted status.
            assertTrue(Thread.interrupted());
        } catch (InterruptedIOException ioe) {
            System.out.println("Got expected exception during close");
            // If we got the exception, we shouldn't have interrupted status anymore.
            assertFalse(Thread.interrupted());

            // Now do a successful close.
            stm.close();
        }

        // verify that entire file is good
        AppendTestUtil.checkFullFile(fs, p, 4, fileContents, "Failed to deal with thread interruptions", false);
    } finally {
        cluster.shutdown();
    }
}

From source file:LinkedTransferQueue.java

public void put(E e) throws InterruptedException {
    if (e == null) {
        throw new NullPointerException();
    }/*from   w  ww. ja va2  s . com*/
    if (Thread.interrupted()) {
        throw new InterruptedException();
    }
    xfer(e, NOWAIT, 0);
}

From source file:com.addthis.hydra.task.source.AbstractStreamFileDataSource.java

private boolean waitForInitialized() {
    boolean wasInterrupted = false;
    try {//ww  w .  j a v a 2s.  co m
        while (!localInitialized && !awaitUninterruptibly(initialized, 3, TimeUnit.SECONDS)
                && !shuttingDown.get()) {
            log.info(fileStatsToString("waiting for initialization"));
            if (Thread.interrupted()) {
                wasInterrupted = true;
                log.info("interrupted while waiting for initialization; closing source then resuming wait");
                close();
            }
        }
        log.info(fileStatsToString("initialized"));
        localInitialized = true;
        return true;
    } finally {
        if (wasInterrupted) {
            Thread.currentThread().interrupt();
        }
    }
}

From source file:LinkedTransferQueue.java

public boolean offer(E e, long timeout, TimeUnit unit) throws InterruptedException {
    if (e == null) {
        throw new NullPointerException();
    }//from  ww w  . j a  v  a  2  s. co m
    if (Thread.interrupted()) {
        throw new InterruptedException();
    }
    xfer(e, NOWAIT, 0);
    return true;
}

From source file:org.apache.marmotta.kiwi.sparql.persistence.KiWiSparqlConnection.java

/**
 * Evaluate a statement pattern join or filter on the database by translating it into an appropriate SQL statement.
 * Copied and adapted from KiWiReasoningConnection.query()
 *
 * @param join/*from  www.  ja v  a 2s .  c o  m*/
 * @param dataset
 * @return
 */
public CloseableIteration<BindingSet, SQLException> evaluateJoin(TupleExpr join, final BindingSet bindings,
        final Dataset dataset) throws SQLException, InterruptedException {
    Preconditions
            .checkArgument(join instanceof Join || join instanceof Filter || join instanceof StatementPattern
                    || join instanceof Distinct || join instanceof Slice || join instanceof Reduced);

    // some definitions
    String[] positions = new String[] { "subject", "predicate", "object", "context" };

    // collect all patterns in a list, using depth-first search over the join
    List<StatementPattern> patterns = new PatternCollector(join).patterns;

    long offset = new LimitFinder(join).offset;
    long limit = new LimitFinder(join).limit;

    boolean distinct = new DistinctFinder(join).distinct;

    // associate a name with each pattern; the names are used in the database query to refer to the triple
    // that matched this pattern and in the construction of variable names for the HQL query
    int patternCount = 0;
    final Map<StatementPattern, String> patternNames = new HashMap<StatementPattern, String>();
    for (StatementPattern p : patterns) {
        patternNames.put(p, "P" + (++patternCount));
    }

    // find all variables occurring in the patterns and create a map to map them to
    // field names in the database query; each variable will have one or several field names,
    // one for each pattern it occurs in; field names are constructed automatically by a counter
    // and the pattern name to ensure the name is a valid HQL identifier
    int variableCount = 0;

    // a map for the variable names; will look like { ?x -> "V1", ?y -> "V2", ... }
    final Map<Var, String> variableNames = new HashMap<>();

    // a map for mapping variables to field names; each variable might have one or more field names,
    // depending on the number of patterns it occurs in; will look like
    // { ?x -> ["P1_V1", "P2_V1"], ?y -> ["P2_V2"], ... }
    Map<Var, List<String>> queryVariables = new HashMap<>();
    Map<Var, List<String>> queryVariableIds = new HashMap<>();

    // a map for defining alternative context values for each variable used in the context part of a pattern
    Map<StatementPattern, List<Resource>> variableContexts = new HashMap<>();

    for (StatementPattern p : patterns) {
        // check graph restrictions in datasets (MARMOTTA-340)
        Resource[] contexts;
        Value contextValue = p.getContextVar() != null ? p.getContextVar().getValue() : null;

        Set<URI> graphs = null;
        boolean emptyGraph = false;

        if (dataset != null) {
            if (p.getScope() == StatementPattern.Scope.DEFAULT_CONTEXTS) {
                graphs = dataset.getDefaultGraphs();
                emptyGraph = graphs.isEmpty() && !dataset.getNamedGraphs().isEmpty();
            } else {
                graphs = dataset.getNamedGraphs();
                emptyGraph = graphs.isEmpty() && !dataset.getDefaultGraphs().isEmpty();
            }
        }

        if (emptyGraph) {
            // Search zero contexts
            return new EmptyIteration<BindingSet, SQLException>();
        } else if (graphs == null || graphs.isEmpty()) {
            if (contextValue != null) {
                contexts = new Resource[] { (Resource) contextValue };
            } else {
                contexts = new Resource[0];
            }
        } else if (contextValue != null) {
            if (graphs.contains(contextValue)) {
                contexts = new Resource[] { (Resource) contextValue };
            } else {
                // Statement pattern specifies a context that is not part of
                // the dataset
                return new EmptyIteration<BindingSet, SQLException>();
            }
        } else {
            contexts = new Resource[graphs.size()];
            int i = 0;
            for (URI graph : graphs) {
                URI context = null;
                if (!SESAME.NIL.equals(graph)) {
                    context = graph;
                }
                contexts[i++] = context;
            }
        }

        // build pattern
        Var[] fields = new Var[] { p.getSubjectVar(), p.getPredicateVar(), p.getObjectVar(),
                p.getContextVar() };
        for (int i = 0; i < fields.length; i++) {
            if (fields[i] != null && !fields[i].hasValue()) {
                Var v = fields[i];
                if (variableNames.get(v) == null) {
                    variableNames.put(v, "V" + (++variableCount));
                    queryVariables.put(v, new LinkedList<String>());
                    queryVariableIds.put(v, new LinkedList<String>());
                }
                String pName = patternNames.get(p);
                String vName = variableNames.get(v);
                if (hasNodeCondition(fields[i], join)) {
                    queryVariables.get(v).add(pName + "_" + positions[i] + "_" + vName);
                }
                queryVariableIds.get(v).add(pName + "." + positions[i]);
            }
        }

        // build an OR query for the value of the context variable
        if (contexts.length > 0) {
            variableContexts.put(p, Arrays.asList(contexts));
        }
    }

    // build the select clause by projecting for each query variable the first name
    StringBuilder selectClause = new StringBuilder();

    if (distinct) {
        selectClause.append("DISTINCT ");
    }

    final List<Var> selectVariables = new LinkedList<Var>();
    for (Iterator<Var> it = queryVariableIds.keySet().iterator(); it.hasNext();) {
        Var v = it.next();
        String projectedName = variableNames.get(v);
        String fromName = queryVariableIds.get(v).get(0);
        selectClause.append(fromName);
        selectClause.append(" as ");
        selectClause.append(projectedName);
        if (it.hasNext()) {
            selectClause.append(", ");
        }
        selectVariables.add(v);
    }

    // build the from-clause of the query; the from clause is constructed as follows:
    // 1. for each pattern P, there will be a "KiWiTriple P" in the from clause
    // 2. for each variable V in P occurring in
    //    - subject, there will be a "inner join P.subject as P_S_V" or "left outer join P.subject as P_S_V",
    //      depending on whether the "optional" parameter is false or true
    //    - property, there will be a "inner join P.property as P_P_V" or "left outer join p.property as P_P_V"
    //    - object, there will be a "inner join P.object as P_O_V" or "left outer join p.object as P_O_V"
    //    - context, there will be a "inner join P.context as P_C_V" or "left outer join p.context as P_C_V"
    StringBuilder fromClause = new StringBuilder();
    for (Iterator<StatementPattern> it = patterns.iterator(); it.hasNext();) {
        StatementPattern p = it.next();
        String pName = patternNames.get(p);
        fromClause.append("triples " + pName);

        Var[] fields = new Var[] { p.getSubjectVar(), p.getPredicateVar(), p.getObjectVar(),
                p.getContextVar() };
        for (int i = 0; i < fields.length; i++) {
            if (fields[i] != null && !fields[i].hasValue() && hasNodeCondition(fields[i], join)) {
                String vName = variableNames.get(fields[i]);
                fromClause.append(" INNER JOIN nodes AS ");
                fromClause.append(pName + "_" + positions[i] + "_" + vName);
                fromClause.append(" ON " + pName + "." + positions[i] + " = ");
                fromClause.append(pName + "_" + positions[i] + "_" + vName + ".id ");
            }
        }

        if (it.hasNext()) {
            fromClause.append(",\n ");
        }
    }

    // build the where clause as follows:
    // 1. iterate over all patterns and for each resource and literal field in subject,
    //    property, object, or context, and set a query condition according to the
    //    nodes given in the pattern
    // 2. for each variable that has more than one occurrences, add a join condition
    // 3. for each variable in the initialBindings, add a condition to the where clause

    // list of where conditions that will later be connected by AND
    List<String> whereConditions = new LinkedList<String>();

    // 1. iterate over all patterns and for each resource and literal field in subject,
    //    property, object, or context, and set a query condition according to the
    //    nodes given in the pattern
    for (StatementPattern p : patterns) {
        String pName = patternNames.get(p);
        Var[] fields = new Var[] { p.getSubjectVar(), p.getPredicateVar(), p.getObjectVar(),
                p.getContextVar() };
        for (int i = 0; i < fields.length; i++) {
            // find node id of the resource or literal field and use it in the where clause
            // in this way we can avoid setting too many query parameters
            long nodeId = -1;
            if (fields[i] != null && fields[i].hasValue()) {
                Value v = valueFactory.convert(fields[i].getValue());
                if (v instanceof KiWiNode) {
                    nodeId = ((KiWiNode) v).getId();
                } else {
                    throw new IllegalArgumentException(
                            "the values in this query have not been created by the KiWi value factory");
                }

                if (nodeId >= 0) {
                    String condition = pName + "." + positions[i] + " = " + nodeId;
                    whereConditions.add(condition);
                }
            }
        }
    }

    // 2. for each variable that has more than one occurrences, add a join condition
    for (Var v : queryVariableIds.keySet()) {
        List<String> vNames = queryVariableIds.get(v);
        for (int i = 1; i < vNames.size(); i++) {
            String vName1 = vNames.get(i - 1);
            String vName2 = vNames.get(i);
            whereConditions.add(vName1 + " = " + vName2);
        }
    }

    // 3. for each variable in the initialBindings, add a condition to the where clause setting it
    //    to the node given as binding
    if (bindings != null) {
        for (String v : bindings.getBindingNames()) {
            for (Map.Entry<Var, List<String>> entry : queryVariableIds.entrySet()) {
                if (entry.getKey().getName() != null && entry.getKey().getName().equals(v)
                        && entry.getValue() != null && entry.getValue().size() > 0) {
                    List<String> vNames = entry.getValue();
                    String vName = vNames.get(0);
                    Value binding = valueFactory.convert(bindings.getValue(v));
                    if (binding instanceof KiWiNode) {
                        whereConditions.add(vName + " = " + ((KiWiNode) binding).getId());
                    } else {
                        throw new IllegalArgumentException(
                                "the values in this binding have not been created by the KiWi value factory");
                    }
                }
            }
        }
    }

    // 4. for each pattern, ensure that the matched triple is not marked as deleted
    for (StatementPattern p : patterns) {
        String pName = patternNames.get(p);
        whereConditions.add(pName + ".deleted = false");
    }

    // 5. for each filter condition, add a statement to the where clause
    List<ValueExpr> filters = new FilterCollector(join).filters;
    for (ValueExpr expr : filters) {
        whereConditions.add(evaluateExpression(expr, queryVariables, null));
    }

    // 6. for each context variable with a restricted list of contexts, we add a condition to the where clause
    //    of the form (V.id = R1.id OR V.id = R2.id ...)
    for (Map.Entry<StatementPattern, List<Resource>> vctx : variableContexts.entrySet()) {
        // the variable
        String varName = patternNames.get(vctx.getKey());

        // the string we are building
        StringBuilder cCond = new StringBuilder();
        cCond.append("(");
        for (Iterator<Resource> it = vctx.getValue().iterator(); it.hasNext();) {
            Value v = valueFactory.convert(it.next());
            if (v instanceof KiWiNode) {
                long nodeId = ((KiWiNode) v).getId();

                cCond.append(varName);
                cCond.append(".context = ");
                cCond.append(nodeId);

                if (it.hasNext()) {
                    cCond.append(" OR ");
                }
            } else {
                throw new IllegalArgumentException(
                        "the values in this query have not been created by the KiWi value factory");
            }

        }
        cCond.append(")");
        whereConditions.add(cCond.toString());
    }

    // construct the where clause
    StringBuilder whereClause = new StringBuilder();
    for (Iterator<String> it = whereConditions.iterator(); it.hasNext();) {
        whereClause.append(it.next());
        whereClause.append("\n ");
        if (it.hasNext()) {
            whereClause.append("AND ");
        }
    }

    // construct limit and offset
    StringBuilder limitClause = new StringBuilder();
    if (limit > 0) {
        limitClause.append("LIMIT ");
        limitClause.append(limit);
        limitClause.append(" ");
    }
    if (offset >= 0) {
        limitClause.append("OFFSET ");
        limitClause.append(offset);
        limitClause.append(" ");
    }

    // build the query string
    String queryString = "SELECT " + selectClause + "\n " + "FROM " + fromClause + "\n " + "WHERE "
            + whereClause + "\n " + limitClause;

    log.debug("original SPARQL syntax tree:\n {}", join);
    log.debug("constructed SQL query string:\n {}", queryString);
    log.debug("SPARQL -> SQL node variable mappings:\n {}", queryVariables);
    log.debug("SPARQL -> SQL ID variable mappings:\n {}", queryVariableIds);

    final PreparedStatement queryStatement = parent.getJDBCConnection().prepareStatement(queryString);
    if (parent.getDialect().isCursorSupported()) {
        queryStatement.setFetchSize(parent.getConfiguration().getCursorSize());
    }

    Future<ResultSet> queryFuture = executorService.submit(new Callable<ResultSet>() {
        @Override
        public ResultSet call() throws Exception {
            try {
                return queryStatement.executeQuery();
            } catch (SQLException ex) {
                if (Thread.interrupted()) {
                    log.info("SQL query execution cancelled; not returning result (Thread={})",
                            Thread.currentThread());
                    throw new InterruptedException("SPARQL query execution cancelled");
                } else {
                    throw ex;
                }
            }
        }
    });

    try {
        ResultSet result = queryFuture.get();

        ResultSetIteration<BindingSet> it = new ResultSetIteration<BindingSet>(result, true,
                new ResultTransformerFunction<BindingSet>() {
                    @Override
                    public BindingSet apply(ResultSet row) throws SQLException {
                        MapBindingSet resultRow = new MapBindingSet();

                        long[] nodeIds = new long[selectVariables.size()];
                        for (int i = 0; i < selectVariables.size(); i++) {
                            nodeIds[i] = row.getLong(variableNames.get(selectVariables.get(i)));
                        }
                        KiWiNode[] nodes = parent.loadNodesByIds(nodeIds);

                        for (int i = 0; i < selectVariables.size(); i++) {
                            Var v = selectVariables.get(i);
                            resultRow.addBinding(v.getName(), nodes[i]);
                        }

                        if (bindings != null) {
                            for (Binding binding : bindings) {
                                resultRow.addBinding(binding);
                            }
                        }
                        return resultRow;
                    }
                });

        // materialize result to avoid having more than one result set open at the same time
        return new CloseableIteratorIteration<BindingSet, SQLException>(Iterations.asList(it).iterator());
    } catch (InterruptedException | CancellationException e) {
        log.info("SPARQL query execution cancelled");
        queryFuture.cancel(true);
        queryStatement.cancel();
        queryStatement.close();

        throw new InterruptedException("SPARQL query execution cancelled");
    } catch (ExecutionException e) {
        log.error("error executing SPARQL query", e.getCause());
        if (e.getCause() instanceof SQLException) {
            throw (SQLException) e.getCause();
        } else if (e.getCause() instanceof InterruptedException) {
            throw (InterruptedException) e.getCause();
        } else {
            throw new SQLException("error executing SPARQL query", e);
        }
    }
}

From source file:ai.grakn.kb.internal.computer.GraknSparkComputer.java

@SuppressWarnings("PMD.UnusedFormalParameter")
private Future<ComputerResult> submitWithExecutor(Executor exec) {
    jobGroupId = Integer.toString(ThreadLocalRandom.current().nextInt(Integer.MAX_VALUE));
    String jobDescription = this.vertexProgram == null ? this.mapReducers.toString()
            : this.vertexProgram + "+" + this.mapReducers;

    // Use different output locations
    this.sparkConfiguration.setProperty(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION,
            this.sparkConfiguration.getString(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION) + "/" + jobGroupId);

    updateConfigKeys(sparkConfiguration);

    final Future<ComputerResult> result = computerService.submit(() -> {
        final long startTime = System.currentTimeMillis();

        // apache and hadoop configurations that are used throughout the graph computer computation
        final org.apache.commons.configuration.Configuration graphComputerConfiguration = new HadoopConfiguration(
                this.sparkConfiguration);
        if (!graphComputerConfiguration.containsKey(Constants.SPARK_SERIALIZER)) {
            graphComputerConfiguration.setProperty(Constants.SPARK_SERIALIZER,
                    GryoSerializer.class.getCanonicalName());
        }/*from www  . ja v  a  2s  .  c  o  m*/
        graphComputerConfiguration.setProperty(Constants.GREMLIN_HADOOP_GRAPH_WRITER_HAS_EDGES,
                this.persist.equals(GraphComputer.Persist.EDGES));

        final Configuration hadoopConfiguration = ConfUtil.makeHadoopConfiguration(graphComputerConfiguration);

        final Storage fileSystemStorage = FileSystemStorage.open(hadoopConfiguration);
        final boolean inputFromHDFS = FileInputFormat.class.isAssignableFrom(
                hadoopConfiguration.getClass(Constants.GREMLIN_HADOOP_GRAPH_READER, Object.class));
        final boolean inputFromSpark = PersistedInputRDD.class.isAssignableFrom(
                hadoopConfiguration.getClass(Constants.GREMLIN_HADOOP_GRAPH_READER, Object.class));
        final boolean outputToHDFS = FileOutputFormat.class.isAssignableFrom(
                hadoopConfiguration.getClass(Constants.GREMLIN_HADOOP_GRAPH_WRITER, Object.class));
        final boolean outputToSpark = PersistedOutputRDD.class.isAssignableFrom(
                hadoopConfiguration.getClass(Constants.GREMLIN_HADOOP_GRAPH_WRITER, Object.class));
        final boolean skipPartitioner = graphComputerConfiguration
                .getBoolean(Constants.GREMLIN_SPARK_SKIP_PARTITIONER, false);
        final boolean skipPersist = graphComputerConfiguration
                .getBoolean(Constants.GREMLIN_SPARK_SKIP_GRAPH_CACHE, false);

        if (inputFromHDFS) {
            String inputLocation = Constants
                    .getSearchGraphLocation(hadoopConfiguration.get(Constants.GREMLIN_HADOOP_INPUT_LOCATION),
                            fileSystemStorage)
                    .orElse(null);
            if (null != inputLocation) {
                try {
                    graphComputerConfiguration.setProperty(Constants.MAPREDUCE_INPUT_FILEINPUTFORMAT_INPUTDIR,
                            FileSystem.get(hadoopConfiguration).getFileStatus(new Path(inputLocation)).getPath()
                                    .toString());
                    hadoopConfiguration.set(Constants.MAPREDUCE_INPUT_FILEINPUTFORMAT_INPUTDIR,
                            FileSystem.get(hadoopConfiguration).getFileStatus(new Path(inputLocation)).getPath()
                                    .toString());
                } catch (final IOException e) {
                    throw new IllegalStateException(e.getMessage(), e);
                }
            }
        }

        final InputRDD inputRDD;
        final OutputRDD outputRDD;
        final boolean filtered;
        try {
            inputRDD = InputRDD.class.isAssignableFrom(
                    hadoopConfiguration.getClass(Constants.GREMLIN_HADOOP_GRAPH_READER, Object.class))
                            ? hadoopConfiguration.getClass(Constants.GREMLIN_HADOOP_GRAPH_READER,
                                    InputRDD.class, InputRDD.class).newInstance()
                            : InputFormatRDD.class.newInstance();
            outputRDD = OutputRDD.class.isAssignableFrom(
                    hadoopConfiguration.getClass(Constants.GREMLIN_HADOOP_GRAPH_WRITER, Object.class))
                            ? hadoopConfiguration.getClass(Constants.GREMLIN_HADOOP_GRAPH_WRITER,
                                    OutputRDD.class, OutputRDD.class).newInstance()
                            : OutputFormatRDD.class.newInstance();

            // if the input class can filter on load, then set the filters
            if (inputRDD instanceof InputFormatRDD
                    && GraphFilterAware.class.isAssignableFrom(hadoopConfiguration.getClass(
                            Constants.GREMLIN_HADOOP_GRAPH_READER, InputFormat.class, InputFormat.class))) {
                GraphFilterAware.storeGraphFilter(graphComputerConfiguration, hadoopConfiguration,
                        this.graphFilter);
                filtered = false;
            } else if (inputRDD instanceof GraphFilterAware) {
                ((GraphFilterAware) inputRDD).setGraphFilter(this.graphFilter);
                filtered = false;
            } else
                filtered = this.graphFilter.hasFilter();
        } catch (final InstantiationException | IllegalAccessException e) {
            throw new IllegalStateException(e.getMessage(), e);
        }

        // create the spark context from the graph computer configuration
        final JavaSparkContext sparkContext = new JavaSparkContext(Spark.create(hadoopConfiguration));
        final Storage sparkContextStorage = SparkContextStorage.open();

        sparkContext.setJobGroup(jobGroupId, jobDescription);

        GraknSparkMemory memory = null;
        // delete output location
        final String outputLocation = hadoopConfiguration.get(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION, null);
        if (null != outputLocation) {
            if (outputToHDFS && fileSystemStorage.exists(outputLocation)) {
                fileSystemStorage.rm(outputLocation);
            }
            if (outputToSpark && sparkContextStorage.exists(outputLocation)) {
                sparkContextStorage.rm(outputLocation);
            }
        }

        // the Spark application name will always be set by SparkContextStorage,
        // thus, INFO the name to make it easier to debug
        logger.debug(Constants.GREMLIN_HADOOP_SPARK_JOB_PREFIX
                + (null == this.vertexProgram ? "No VertexProgram" : this.vertexProgram) + "["
                + this.mapReducers + "]");

        // add the project jars to the cluster
        this.loadJars(hadoopConfiguration, sparkContext);
        updateLocalConfiguration(sparkContext, hadoopConfiguration);

        // create a message-passing friendly rdd from the input rdd
        boolean partitioned = false;
        JavaPairRDD<Object, VertexWritable> loadedGraphRDD = inputRDD.readGraphRDD(graphComputerConfiguration,
                sparkContext);

        // if there are vertex or edge filters, filter the loaded graph rdd prior to partitioning and persisting
        if (filtered) {
            this.logger.debug("Filtering the loaded graphRDD: " + this.graphFilter);
            loadedGraphRDD = GraknSparkExecutor.applyGraphFilter(loadedGraphRDD, this.graphFilter);
        }
        // if the loaded graph RDD is already partitioned use that partitioner,
        // else partition it with HashPartitioner
        if (loadedGraphRDD.partitioner().isPresent()) {
            this.logger.debug("Using the existing partitioner associated with the loaded graphRDD: "
                    + loadedGraphRDD.partitioner().get());
        } else {
            if (!skipPartitioner) {
                final Partitioner partitioner = new HashPartitioner(
                        this.workersSet ? this.workers : loadedGraphRDD.partitions().size());
                this.logger.debug("Partitioning the loaded graphRDD: " + partitioner);
                loadedGraphRDD = loadedGraphRDD.partitionBy(partitioner);
                partitioned = true;
                assert loadedGraphRDD.partitioner().isPresent();
            } else {
                // no easy way to test this with a test case
                assert skipPartitioner == !loadedGraphRDD.partitioner().isPresent();

                this.logger.debug("Partitioning has been skipped for the loaded graphRDD via "
                        + Constants.GREMLIN_SPARK_SKIP_PARTITIONER);
            }
        }
        // if the loaded graphRDD was already partitioned previous,
        // then this coalesce/repartition will not take place
        if (this.workersSet) {
            // ensures that the loaded graphRDD does not have more partitions than workers
            if (loadedGraphRDD.partitions().size() > this.workers) {
                loadedGraphRDD = loadedGraphRDD.coalesce(this.workers);
            } else {
                // ensures that the loaded graphRDD does not have less partitions than workers
                if (loadedGraphRDD.partitions().size() < this.workers) {
                    loadedGraphRDD = loadedGraphRDD.repartition(this.workers);
                }
            }
        }
        // persist the vertex program loaded graph as specified by configuration
        // or else use default cache() which is MEMORY_ONLY
        if (!skipPersist && (!inputFromSpark || partitioned || filtered)) {
            loadedGraphRDD = loadedGraphRDD.persist(StorageLevel.fromString(
                    hadoopConfiguration.get(Constants.GREMLIN_SPARK_GRAPH_STORAGE_LEVEL, "MEMORY_ONLY")));
        }
        // final graph with view
        // (for persisting and/or mapReducing -- may be null and thus, possible to save space/time)
        JavaPairRDD<Object, VertexWritable> computedGraphRDD = null;
        try {
            ////////////////////////////////
            // process the vertex program //
            ////////////////////////////////
            if (null != this.vertexProgram) {
                memory = new GraknSparkMemory(this.vertexProgram, this.mapReducers, sparkContext);
                /////////////////
                // if there is a registered VertexProgramInterceptor, use it to bypass the GraphComputer semantics
                if (graphComputerConfiguration
                        .containsKey(Constants.GREMLIN_HADOOP_VERTEX_PROGRAM_INTERCEPTOR)) {
                    try {
                        final GraknSparkVertexProgramInterceptor<VertexProgram> interceptor = (GraknSparkVertexProgramInterceptor) Class
                                .forName(graphComputerConfiguration
                                        .getString(Constants.GREMLIN_HADOOP_VERTEX_PROGRAM_INTERCEPTOR))
                                .newInstance();
                        computedGraphRDD = interceptor.apply(this.vertexProgram, loadedGraphRDD, memory);
                    } catch (final ClassNotFoundException | IllegalAccessException | InstantiationException e) {
                        throw new IllegalStateException(e.getMessage());
                    }
                } else {
                    // standard GraphComputer semantics
                    // get a configuration that will be propagated to all workers
                    final HadoopConfiguration vertexProgramConfiguration = new HadoopConfiguration();
                    this.vertexProgram.storeState(vertexProgramConfiguration);
                    // set up the vertex program and wire up configurations
                    this.vertexProgram.setup(memory);
                    JavaPairRDD<Object, ViewIncomingPayload<Object>> viewIncomingRDD = null;
                    memory.broadcastMemory(sparkContext);
                    // execute the vertex program
                    while (true) {
                        if (Thread.interrupted()) {
                            sparkContext.cancelAllJobs();
                            throw new TraversalInterruptedException();
                        }
                        memory.setInExecute(true);
                        viewIncomingRDD = GraknSparkExecutor.executeVertexProgramIteration(loadedGraphRDD,
                                viewIncomingRDD, memory, graphComputerConfiguration,
                                vertexProgramConfiguration);
                        memory.setInExecute(false);
                        if (this.vertexProgram.terminate(memory)) {
                            break;
                        } else {
                            memory.incrIteration();
                            memory.broadcastMemory(sparkContext);
                        }
                    }
                    // if the graph will be continued to be used (persisted or mapreduced),
                    // then generate a view+graph
                    if ((null != outputRDD && !this.persist.equals(Persist.NOTHING))
                            || !this.mapReducers.isEmpty()) {
                        computedGraphRDD = GraknSparkExecutor.prepareFinalGraphRDD(loadedGraphRDD,
                                viewIncomingRDD, this.vertexProgram.getVertexComputeKeys());
                        assert null != computedGraphRDD && computedGraphRDD != loadedGraphRDD;
                    } else {
                        // ensure that the computedGraphRDD was not created
                        assert null == computedGraphRDD;
                    }
                }
                /////////////////
                memory.complete(); // drop all transient memory keys
                // write the computed graph to the respective output (rdd or output format)
                if (null != outputRDD && !this.persist.equals(Persist.NOTHING)) {
                    // the logic holds that a computeGraphRDD must be created at this point
                    assert null != computedGraphRDD;

                    outputRDD.writeGraphRDD(graphComputerConfiguration, computedGraphRDD);
                }
            }

            final boolean computedGraphCreated = computedGraphRDD != null && computedGraphRDD != loadedGraphRDD;
            if (!computedGraphCreated) {
                computedGraphRDD = loadedGraphRDD;
            }

            final Memory.Admin finalMemory = null == memory ? new MapMemory() : new MapMemory(memory);

            //////////////////////////////
            // process the map reducers //
            //////////////////////////////
            if (!this.mapReducers.isEmpty()) {
                // create a mapReduceRDD for executing the map reduce jobs on
                JavaPairRDD<Object, VertexWritable> mapReduceRDD = computedGraphRDD;
                if (computedGraphCreated && !outputToSpark) {
                    // drop all the edges of the graph as they are not used in mapReduce processing
                    mapReduceRDD = computedGraphRDD.mapValues(vertexWritable -> {
                        vertexWritable.get().dropEdges(Direction.BOTH);
                        return vertexWritable;
                    });
                    // if there is only one MapReduce to execute, don't bother wasting the clock cycles.
                    if (this.mapReducers.size() > 1) {
                        mapReduceRDD = mapReduceRDD.persist(StorageLevel.fromString(hadoopConfiguration
                                .get(Constants.GREMLIN_SPARK_GRAPH_STORAGE_LEVEL, "MEMORY_ONLY")));
                    }
                }

                for (final MapReduce mapReduce : this.mapReducers) {
                    // execute the map reduce job
                    final HadoopConfiguration newApacheConfiguration = new HadoopConfiguration(
                            graphComputerConfiguration);
                    mapReduce.storeState(newApacheConfiguration);
                    // map
                    final JavaPairRDD mapRDD = GraknSparkExecutor.executeMap(mapReduceRDD, mapReduce,
                            newApacheConfiguration);
                    // combine
                    final JavaPairRDD combineRDD = mapReduce.doStage(MapReduce.Stage.COMBINE)
                            ? GraknSparkExecutor.executeCombine(mapRDD, newApacheConfiguration)
                            : mapRDD;
                    // reduce
                    final JavaPairRDD reduceRDD = mapReduce.doStage(MapReduce.Stage.REDUCE)
                            ? GraknSparkExecutor.executeReduce(combineRDD, mapReduce, newApacheConfiguration)
                            : combineRDD;
                    // write the map reduce output back to disk and computer result memory
                    if (null != outputRDD) {
                        mapReduce.addResultToMemory(finalMemory, outputRDD.writeMemoryRDD(
                                graphComputerConfiguration, mapReduce.getMemoryKey(), reduceRDD));
                    }
                }
                // if the mapReduceRDD is not simply the computed graph, unpersist the mapReduceRDD
                if (computedGraphCreated && !outputToSpark) {
                    assert loadedGraphRDD != computedGraphRDD;
                    assert mapReduceRDD != computedGraphRDD;
                    mapReduceRDD.unpersist();
                } else {
                    assert mapReduceRDD == computedGraphRDD;
                }
            }

            // unpersist the loaded graph if it will not be used again (no PersistedInputRDD)
            // if the graphRDD was loaded from Spark, but then partitioned or filtered, its a different RDD
            if (!inputFromSpark || partitioned || filtered) {
                loadedGraphRDD.unpersist();
            }
            // unpersist the computed graph if it will not be used again (no PersistedOutputRDD)
            // if the computed graph is the loadedGraphRDD because it was not mutated and not-unpersisted,
            // then don't unpersist the computedGraphRDD/loadedGraphRDD
            if ((!outputToSpark || this.persist.equals(GraphComputer.Persist.NOTHING))
                    && computedGraphCreated) {
                computedGraphRDD.unpersist();
            }
            // delete any file system or rdd data if persist nothing
            if (null != outputLocation && this.persist.equals(GraphComputer.Persist.NOTHING)) {
                if (outputToHDFS) {
                    fileSystemStorage.rm(outputLocation);
                }
                if (outputToSpark) {
                    sparkContextStorage.rm(outputLocation);
                }
            }
            // update runtime and return the newly computed graph
            finalMemory.setRuntime(System.currentTimeMillis() - startTime);
            // clear properties that should not be propagated in an OLAP chain
            graphComputerConfiguration.clearProperty(Constants.GREMLIN_HADOOP_GRAPH_FILTER);
            graphComputerConfiguration.clearProperty(Constants.GREMLIN_HADOOP_VERTEX_PROGRAM_INTERCEPTOR);
            graphComputerConfiguration.clearProperty(Constants.GREMLIN_SPARK_SKIP_GRAPH_CACHE);
            graphComputerConfiguration.clearProperty(Constants.GREMLIN_SPARK_SKIP_PARTITIONER);
            return new DefaultComputerResult(InputOutputHelper.getOutputGraph(graphComputerConfiguration,
                    this.resultGraph, this.persist), finalMemory.asImmutable());
        } catch (Exception e) {
            // So it throws the same exception as tinker does
            throw new RuntimeException(e);
        }
    });
    computerService.shutdown();
    return result;
}

From source file:ir.rasen.charsoo.controller.image_loader.core.LoadAndDisplayImageTask.java

/** @return <b>true</b> - if current task was interrupted; <b>false</b> - otherwise */
private boolean isTaskInterrupted() {
    if (Thread.interrupted()) {
        L.d(LOG_TASK_INTERRUPTED, memoryCacheKey);
        return true;
    }//  www.j ava 2s.c o m
    return false;
}