Example usage for java.lang Thread interrupted

List of usage examples for java.lang Thread interrupted

Introduction

In this page you can find the example usage for java.lang Thread interrupted.

Prototype

public static boolean interrupted() 

Source Link

Document

Tests whether the current thread has been interrupted.

Usage

From source file:net.sf.xfd.provider.PublicProvider.java

@Nullable
@Override/*w w  w .ja  v a  2 s  .co m*/
public ParcelFileDescriptor openFile(@NonNull Uri uri, @NonNull String requestedMode, CancellationSignal signal)
        throws FileNotFoundException {
    String path = uri.getPath();

    assertAbsolute(path);

    final int readableMode = ParcelFileDescriptor.parseMode(requestedMode);

    if (signal != null) {
        final Thread theThread = Thread.currentThread();

        signal.setOnCancelListener(theThread::interrupt);
    }

    path = canonString(path);

    if (!path.equals(uri.getPath())) {
        uri = uri.buildUpon().path(path).build();
    }

    try {
        if (!checkAccess(uri, requestedMode)) {
            return null;
        }

        final OS rooted = base.getOS();

        if (rooted == null) {
            throw new FileNotFoundException("Failed to open " + uri.getPath() + ": unable to acquire access");
        }

        int openFlags;

        if ((readableMode & MODE_READ_ONLY) == readableMode) {
            openFlags = OS.O_RDONLY;
        } else if ((readableMode & MODE_WRITE_ONLY) == readableMode) {
            openFlags = OS.O_WRONLY;
        } else {
            openFlags = OS.O_RDWR;
        }

        if (signal == null) {
            openFlags |= NativeBits.O_NONBLOCK;
        }

        //noinspection WrongConstant
        @Fd
        int fd = rooted.open(path, openFlags, 0);

        return ParcelFileDescriptor.adoptFd(fd);
    } catch (IOException e) {
        throw new FileNotFoundException("Unable to open " + uri.getPath() + ": " + e.getMessage());
    } finally {
        if (signal != null) {
            signal.setOnCancelListener(null);
        }

        Thread.interrupted();
    }
}

From source file:LinkedTransferQueue.java

public E poll(long timeout, TimeUnit unit) throws InterruptedException {
    Object e = xfer(null, TIMEOUT, unit.toNanos(timeout));
    if (e != null || !Thread.interrupted()) {
        return cast(e);
    }/*www  .ja  v  a2 s .  co m*/
    throw new InterruptedException();
}

From source file:edu.unc.lib.dl.services.BatchIngestTask.java

@Override
public void run() {
    startTime = System.currentTimeMillis();
    if (ingestProperties.getSubmitterGroups() != null) {
        GroupsThreadStore.storeGroups(new AccessGroupSet(ingestProperties.getSubmitterGroups()));
        log.debug("Groups loaded to thread from in run: " + GroupsThreadStore.getGroupString());
    } else {/*from w w  w .j  a va 2 s . c  om*/
        GroupsThreadStore.clearStore();
    }
    while (this.state != STATE.FINISHED) {
        log.debug("Batch ingest: state=" + this.state.name() + ", dir=" + this.getBaseDir().getName());
        if (Thread.interrupted()) {
            log.debug("halting ingest task due to interrupt, in run method");
            this.halting = true;
        }
        if (this.halting && (this.state != STATE.SEND_MESSAGES && this.state != STATE.CLEANUP)) {
            log.debug("Halting this batch ingest task: state=" + this.state + ", dir=" + this.getBaseDir());
            break; // stop immediately as long as not sending msgs or cleaning up.
        }
        try {
            try {
                switch (this.state) {
                case CHECK:
                    checkDestination();
                    break;
                case INGEST: // ingest the next foxml file, until none left
                    ingestNextObject();
                    break;
                case INGEST_WAIT: // poll for last ingested pid (and fedora availability)
                    waitForLastIngest();
                    break;
                case INGEST_VERIFY_CHECKSUMS: // match local checksums against those generated by Fedora
                    verifyLastIngestChecksums();
                    break;
                case CONTAINER_UPDATES: // update parent container object
                    updateNextContainer();
                    break;
                case SEND_MESSAGES: // send cdr JMS and email for this AIP ingest
                    sendIngestMessages();
                    break;
                case CLEANUP:
                    this.finishedTime = System.currentTimeMillis();
                    this.ingestProperties.setFinishedTime(this.finishedTime);
                    this.ingestProperties.setStartTime(this.startTime);
                    this.ingestProperties.save();
                    GroupsThreadStore.clearStore();
                    deleteDataFiles();
                    handleFinishedDir();
                    this.state = STATE.FINISHED;
                    break;
                }
            } catch (RuntimeException e) {
                throw fail("Unexpected RuntimeException", e);
            }
        } catch (BatchFailedException e) {
            log.error("Batch Ingest Task failed: " + e.getLocalizedMessage(), e);
            return;
        }
    }
}

From source file:it.anyplace.sync.bep.BlockExchangeConnectionHandler.java

private void startMessageListenerService() {
    inExecutorService.submit(new Runnable() {

        @Override/*  w  w  w. j a va2 s . c om*/
        public void run() {
            try {
                while (!Thread.interrupted()) {
                    final Pair<BlockExchageProtos.MessageType, GeneratedMessage> message = receiveMessage();
                    logger.debug("received message type = {} {}", message.getLeft(),
                            getIdForMessage(message.getRight()));
                    logger.trace("received message = {}", message.getRight());
                    messageProcessingService.submit(new Runnable() {
                        @Override
                        public void run() {
                            logger.debug("processing message type = {} {}", message.getLeft(),
                                    getIdForMessage(message.getRight()));
                            try {
                                switch (message.getLeft()) {
                                case INDEX:
                                    eventBus.post(new IndexMessageReceivedEvent((Index) message.getValue()));
                                    break;
                                case INDEX_UPDATE:
                                    eventBus.post(new IndexUpdateMessageReceivedEvent(
                                            (IndexUpdate) message.getValue()));
                                    break;
                                case REQUEST:
                                    eventBus.post(
                                            new RequestMessageReceivedEvent((Request) message.getValue()));
                                    break;
                                case RESPONSE:
                                    eventBus.post(
                                            new ResponseMessageReceivedEvent((Response) message.getValue()));
                                    break;
                                case PING:
                                    logger.debug("ping message received");
                                    break;
                                case CLOSE:
                                    logger.info("received close message = {}", message.getValue());
                                    closeBg();
                                    break;
                                case CLUSTER_CONFIG: {
                                    checkArgument(clusterConfigInfo == null,
                                            "received cluster config message twice!");
                                    clusterConfigInfo = new ClusterConfigInfo();
                                    ClusterConfig clusterConfig = (ClusterConfig) message.getValue();
                                    for (Folder folder : firstNonNull(clusterConfig.getFoldersList(),
                                            Collections.<Folder>emptyList())) {
                                        ClusterConfigFolderInfo.Builder builder = ClusterConfigFolderInfo
                                                .newBuilder().setFolder(folder.getId())
                                                .setLabel(folder.getLabel());
                                        Map<String, Device> devicesById = Maps.uniqueIndex(
                                                firstNonNull(folder.getDevicesList(),
                                                        Collections.<Device>emptyList()),
                                                new Function<Device, String>() {
                                                    @Override
                                                    public String apply(Device input) {
                                                        return hashDataToDeviceIdString(
                                                                input.getId().toByteArray());
                                                    }
                                                });
                                        Device otherDevice = devicesById.get(address.getDeviceId()),
                                                ourDevice = devicesById.get(configuration.getDeviceId());
                                        if (otherDevice != null) {
                                            builder.setAnnounced(true);
                                        }
                                        final ClusterConfigFolderInfo folderInfo;
                                        if (ourDevice != null) {
                                            folderInfo = builder.setShared(true).build();
                                            logger.info("folder shared from device = {} folder = {}",
                                                    address.getDeviceId(), folderInfo);
                                            if (!configuration.getFolderNames()
                                                    .contains(folderInfo.getFolder())) {
                                                configuration.edit().addFolders(new FolderInfo(
                                                        folderInfo.getFolder(), folderInfo.getLabel()));
                                                logger.info("new folder shared = {}", folderInfo);
                                                eventBus.post(new NewFolderSharedEvent() {
                                                    @Override
                                                    public String getFolder() {
                                                        return folderInfo.getFolder();
                                                    }

                                                });
                                            }
                                        } else {
                                            folderInfo = builder.build();
                                            logger.info("folder not shared from device = {} folder = {}",
                                                    address.getDeviceId(), folderInfo);
                                        }
                                        clusterConfigInfo.putFolderInfo(folderInfo);
                                        configuration.edit()
                                                .addPeers(
                                                        Iterables
                                                                .filter(Iterables.transform(
                                                                        firstNonNull(folder.getDevicesList(),
                                                                                Collections
                                                                                        .<Device>emptyList()),
                                                                        new Function<Device, DeviceInfo>() {
                                                                            @Override
                                                                            public DeviceInfo apply(
                                                                                    Device device) {
                                                                                String deviceId = hashDataToDeviceIdString(
                                                                                        device.getId()
                                                                                                .toByteArray()),
                                                                                        name = device.hasName()
                                                                                                ? device.getName()
                                                                                                : null;
                                                                                return new DeviceInfo(deviceId,
                                                                                        name);
                                                                            }
                                                                        }), new Predicate<DeviceInfo>() {
                                                                            @Override
                                                                            public boolean apply(DeviceInfo s) {
                                                                                return !equal(s.getDeviceId(),
                                                                                        configuration
                                                                                                .getDeviceId());
                                                                            }
                                                                        }));
                                    }
                                    configuration.edit().persistLater();
                                    eventBus.post(new ClusterConfigMessageProcessedEvent(clusterConfig));
                                }
                                    break;
                                }
                            } catch (Exception ex) {
                                if (messageProcessingService.isShutdown()) {
                                    return;
                                }
                                logger.error("error processing message", ex);
                                closeBg();
                                throw ex;
                            }
                        }
                    });
                }
            } catch (Exception ex) {
                if (inExecutorService.isShutdown()) {
                    return;
                }
                logger.error("error receiving message", ex);
                closeBg();
            }
        }
    });
}

From source file:net.pms.io.ThreadedProcessWrapper.java

/**
 * Runs a process with the given command {@link List}.
 *
 * @param command an array of {@link String} used to build the command line.
 * @param timeoutMS the process timeout in milliseconds after which the
 *            process is terminated. Use zero for no timeout, but be aware
 *            of the <a href=/*from  www  .ja va 2 s  .c  o m*/
 *            "https://web.archive.org/web/20121201070147/http://kylecartmell.com/?p=9"
 *            >pitfalls</a>
 * @param terminateTimeoutMS the timeout in milliseconds to wait for each
 *            termination attempt.
 * @return The {@link ProcessWrapperResult} from running the process.
 * @throws IllegalArgumentException If {@code command} is {@code null} or
 *             empty.
 */
@Nonnull
public Future<R> runProcess(@Nonnull final List<String> command, final long timeoutMS,
        final long terminateTimeoutMS) {
    if (command == null || command.isEmpty()) {
        throw new IllegalArgumentException("command can't be null or empty");
    }
    final String executableName;
    if (isNotBlank(command.get(0))) {
        Path executable = Paths.get(command.get(0)).getFileName();
        if (executable != null) {
            executableName = executable.toString();
        } else {
            executableName = command.get(0);
        }
    } else {
        executableName = command.get(0);
    }
    final int threadId = PROCESS_COUNTER.getAndIncrement();

    Callable<R> callable = new Callable<R>() {

        @Override
        public R call() throws InterruptedException {
            boolean manageProcess = timeoutMS > 0;
            ProcessBuilder processBuilder = new ProcessBuilder(command);
            processBuilder.redirectErrorStream(true);
            if (LOGGER.isTraceEnabled()) {
                //XXX: Replace with String.join() in Java 8
                LOGGER.trace("Executing \"{}\"", StringUtils.join(command, " "));
            }
            Process process;
            try {
                process = processBuilder.start();
            } catch (IOException e) {
                LOGGER.debug("IOException when trying to start \"{}\" process: {}", executableName,
                        e.getMessage());
                LOGGER.trace("", e);
                return consumer.createResult(null, Integer.MIN_VALUE, e);
            }
            Future<T> output = consumer.consume(process.getInputStream(),
                    "TPW \"" + executableName + "\" consumer " + threadId);
            if (manageProcess) {
                Services.processManager().addProcess(process, executableName, timeoutMS, terminateTimeoutMS);
            }
            int exitCode = Integer.MIN_VALUE;
            boolean interrupted = false;
            boolean shutdown = false;
            do {
                interrupted = false;
                try {
                    exitCode = process.waitFor();
                } catch (InterruptedException e) {
                    interrupted = Thread.interrupted();
                    if (!shutdown) {
                        if (manageProcess) {
                            Services.processManager().shutdownProcess(process, executableName);
                            manageProcess = false;
                        } else {
                            Services.processManager().addProcess(process, executableName, 0,
                                    terminateTimeoutMS);
                        }
                        shutdown = true;
                    }
                }
            } while (interrupted);
            if (manageProcess) {
                Services.processManager().removeProcess(process, executableName);
            }
            try {
                return consumer.createResult(output.get(), exitCode, null);
            } catch (ExecutionException e) {
                Throwable cause = e.getCause() != null ? e.getCause() : e;
                LOGGER.error("ExecutionException in \"{}\" consumer, no output will be returned: {}",
                        executableName, cause.getMessage());
                LOGGER.trace("", e);
                return consumer.createResult(null, exitCode, cause);
            }
        }
    };
    FutureTask<R> result = new FutureTask<R>(callable);
    Thread runner = new Thread(result, "TPW \"" + executableName + "\" " + threadId);
    runner.start();
    return result;
}

From source file:canreg.client.dataentry.Import.java

/**
 *
 * @param task/*from   w  w w  .j a  v a2  s  .  c o  m*/
 * @param doc
 * @param map
 * @param file
 * @param server
 * @param io
 * @return
 * @throws java.sql.SQLException
 * @throws java.rmi.RemoteException
 * @throws canreg.server.database.RecordLockedException
 */
public static boolean importFile(Task<Object, String> task, Document doc,
        List<canreg.client.dataentry.Relation> map, File file, CanRegServerInterface server, ImportOptions io)
        throws SQLException, RemoteException, SecurityException, RecordLockedException {
    //public static boolean importFile(canreg.client.gui.management.CanReg4MigrationInternalFrame.MigrationTask task, Document doc, List<canreg.client.dataentry.Relation> map, File file, CanRegServerInterface server, ImportOptions io) throws SQLException, RemoteException, SecurityException, RecordLockedException {
    boolean success = false;

    Set<String> noNeedToLookAtPatientVariables = new TreeSet<String>();

    noNeedToLookAtPatientVariables.add(io.getPatientIDVariableName());
    noNeedToLookAtPatientVariables.add(io.getPatientRecordIDVariableName());

    String firstNameVariableName = io.getFirstNameVariableName();
    String sexVariableName = io.getSexVariableName();

    CSVParser parser = null;
    CSVFormat format = CSVFormat.DEFAULT.withFirstRecordAsHeader().withDelimiter(io.getSeparator());

    int linesToRead = io.getMaxLines();

    HashMap mpCodes = new HashMap();

    int numberOfLinesRead = 0;

    Map<String, Integer> nameSexTable = server.getNameSexTables();

    try {
        //            FileInputStream fis = new FileInputStream(file);
        //           BufferedReader bsr = new BufferedReader(new InputStreamReader(fis, io.getFileCharset()));

        // Logger.getLogger(Import.class.getName()).log(Level.CONFIG, "Name of the character encoding {0}");
        int numberOfRecordsInFile = canreg.common.Tools.numberOfLinesInFile(file.getAbsolutePath());

        if (linesToRead > 0) {
            linesToRead = Math.min(numberOfRecordsInFile, linesToRead);
        } else {
            linesToRead = numberOfRecordsInFile;
        }

        parser = CSVParser.parse(file, io.getFileCharset(), format);

        for (CSVRecord csvRecord : parser) {
            numberOfLinesRead++;
            // We allow for null tasks...
            boolean needToSavePatientAgain = true;
            int patientDatabaseRecordID = -1;

            if (task != null) {
                task.firePropertyChange("progress", (numberOfLinesRead - 1) * 100 / linesToRead,
                        (numberOfLinesRead) * 100 / linesToRead);
            }

            // Build patient part
            Patient patient = new Patient();
            for (int i = 0; i < map.size(); i++) {
                Relation rel = map.get(i);
                if (rel.getDatabaseTableVariableID() >= 0
                        && rel.getDatabaseTableName().equalsIgnoreCase("patient")) {
                    if (rel.getFileColumnNumber() < csvRecord.size()) {
                        if (rel.getVariableType().equalsIgnoreCase("Number")) {
                            if (csvRecord.get(rel.getFileColumnNumber()).length() > 0) {
                                try {
                                    patient.setVariable(rel.getDatabaseVariableName(),
                                            Integer.parseInt(csvRecord.get(rel.getFileColumnNumber())));
                                } catch (NumberFormatException ex) {
                                    Logger.getLogger(Import.class.getName()).log(Level.SEVERE,
                                            "Number format error in line: " + (numberOfLinesRead + 1 + 1)
                                                    + ". ",
                                            ex);
                                    success = false;
                                }
                            }
                        } else {
                            patient.setVariable(rel.getDatabaseVariableName(),
                                    StringEscapeUtils.unescapeCsv(csvRecord.get(rel.getFileColumnNumber())));
                        }
                    } else {
                        Logger.getLogger(Import.class.getName()).log(Level.INFO,
                                "Something wrong with patient part of line " + numberOfLinesRead + ".",
                                new Exception("Error in line: " + numberOfLinesRead + ". Can't find field: "
                                        + rel.getDatabaseVariableName()));
                    }
                }
            }
            // debugOut(patient.toString());

            // Build tumour part
            Tumour tumour = new Tumour();
            for (canreg.client.dataentry.Relation rel : map) {
                if (rel.getDatabaseTableVariableID() >= 0
                        && rel.getDatabaseTableName().equalsIgnoreCase("tumour")) {
                    if (rel.getFileColumnNumber() < csvRecord.size()) {
                        if (rel.getVariableType().equalsIgnoreCase("Number")) {
                            if (csvRecord.get(rel.getFileColumnNumber()).length() > 0) {
                                try {
                                    tumour.setVariable(rel.getDatabaseVariableName(),
                                            Integer.parseInt(csvRecord.get(rel.getFileColumnNumber())));
                                } catch (NumberFormatException ex) {
                                    Logger.getLogger(Import.class.getName()).log(Level.SEVERE,
                                            "Number format error in line: " + (numberOfLinesRead + 1 + 1)
                                                    + ". ",
                                            ex);
                                    success = false;
                                }
                            }
                        } else {
                            tumour.setVariable(rel.getDatabaseVariableName(),
                                    StringEscapeUtils.unescapeCsv(csvRecord.get(rel.getFileColumnNumber())));
                        }
                    } else {
                        Logger.getLogger(Import.class.getName()).log(Level.INFO,
                                "Something wrong with tumour part of line " + numberOfLinesRead + ".",
                                new Exception("Error in line: " + numberOfLinesRead + ". Can't find field: "
                                        + rel.getDatabaseVariableName()));
                    }
                }
            }

            // Build source part
            Set<Source> sources = Collections.synchronizedSet(new LinkedHashSet<Source>());
            Source source = new Source();
            for (canreg.client.dataentry.Relation rel : map) {
                if (rel.getDatabaseTableVariableID() >= 0
                        && rel.getDatabaseTableName().equalsIgnoreCase(Globals.SOURCE_TABLE_NAME)) {
                    if (rel.getFileColumnNumber() < csvRecord.size()) {
                        if (rel.getVariableType().equalsIgnoreCase("Number")) {
                            if (csvRecord.get(rel.getFileColumnNumber()).length() > 0) {
                                try {
                                    source.setVariable(rel.getDatabaseVariableName(),
                                            Integer.parseInt(csvRecord.get(rel.getFileColumnNumber())));
                                } catch (NumberFormatException ex) {
                                    Logger.getLogger(Import.class.getName()).log(Level.SEVERE,
                                            "Number format error in line: " + (numberOfLinesRead + 1 + 1)
                                                    + ". ",
                                            ex);
                                    success = false;
                                }
                            }
                        } else {
                            source.setVariable(rel.getDatabaseVariableName(),
                                    StringEscapeUtils.unescapeCsv(csvRecord.get(rel.getFileColumnNumber())));
                        }
                    } else {
                        Logger.getLogger(Import.class.getName()).log(Level.INFO,
                                "Something wrong with source part of line " + numberOfLinesRead + ".",
                                new Exception("Error in line: " + numberOfLinesRead + ". Can't find field: "
                                        + rel.getDatabaseVariableName()));
                    }

                }
            }
            sources.add(source);
            tumour.setSources(sources);

            // debugOut(tumour.toString());
            // add patient to the database
            Object patientID = patient.getVariable(io.getPatientIDVariableName());
            Object patientRecordID = patient.getVariable(io.getPatientRecordIDVariableName());

            if (patientID == null) {
                // save the record to get the new patientID;
                patientDatabaseRecordID = server.savePatient(patient);
                patient = (Patient) server.getRecord(patientDatabaseRecordID, Globals.PATIENT_TABLE_NAME,
                        false);
                patientID = patient.getVariable(io.getPatientIDVariableName());
                patientRecordID = patient.getVariable(io.getPatientRecordIDVariableName());
            }

            if (io.isDataFromPreviousCanReg()) {
                // set update date for the patient the same as for the tumour
                Object updateDate = tumour.getVariable(io.getTumourUpdateDateVariableName());
                patient.setVariable(io.getPatientUpdateDateVariableName(), updateDate);

                // Set the patientID the same as the tumourID initially
                // Object tumourSequence = tumour.getVariable(io.getTumourSequenceVariableName());
                Object tumourSequence = "1";

                String tumourSequenceString = tumourSequence + "";
                while (tumourSequenceString.length() < Globals.ADDITIONAL_DIGITS_FOR_PATIENT_RECORD) {
                    tumourSequenceString = "0" + tumourSequenceString;
                }
                patientRecordID = patientID + "" + tumourSequenceString;

                // If this is a multiple primary tumour...
                String mpCodeString = (String) tumour.getVariable(io.getMultiplePrimaryVariableName());
                if (mpCodeString != null && mpCodeString.length() > 0) {
                    patientID = lookUpPatientID(mpCodeString, patientID, mpCodes);

                    // rebuild sequenceNumber
                    Tumour[] tumours = new Tumour[0];
                    try {
                        tumours = CanRegClientApp.getApplication()
                                .getTumourRecordsBasedOnPatientID(patientID + "", false);
                    } catch (DistributedTableDescriptionException ex) {
                        Logger.getLogger(Import.class.getName()).log(Level.SEVERE, null, ex);
                    } catch (UnknownTableException ex) {
                        Logger.getLogger(Import.class.getName()).log(Level.SEVERE, null, ex);
                    }

                    tumourSequenceString = (tumours.length + 1) + "";
                    while (tumourSequenceString.length() < Globals.ADDITIONAL_DIGITS_FOR_PATIENT_RECORD) {
                        tumourSequenceString = "0" + tumourSequenceString;
                    }

                    patientRecordID = patientID + "" + tumourSequenceString;
                    Patient[] oldPatients = null;
                    try {
                        oldPatients = CanRegClientApp.getApplication().getPatientRecordsByID((String) patientID,
                                false);
                    } catch (RemoteException ex) {
                        Logger.getLogger(Import.class.getName()).log(Level.SEVERE, null, ex);
                    } catch (SecurityException ex) {
                        Logger.getLogger(Import.class.getName()).log(Level.SEVERE, null, ex);
                    } catch (DistributedTableDescriptionException ex) {
                        Logger.getLogger(Import.class.getName()).log(Level.SEVERE, null, ex);
                    } catch (RecordLockedException ex) {
                        Logger.getLogger(Import.class.getName()).log(Level.SEVERE, null, ex);
                    } catch (SQLException ex) {
                        Logger.getLogger(Import.class.getName()).log(Level.SEVERE, null, ex);
                    } catch (UnknownTableException ex) {
                        Logger.getLogger(Import.class.getName()).log(Level.SEVERE, null, ex);
                    }
                    for (Patient oldPatient : oldPatients) {
                        if (!Tools.newRecordContainsNewInfo(patient, oldPatient,
                                noNeedToLookAtPatientVariables)) {
                            needToSavePatientAgain = false;
                            patient = oldPatient;
                            patientRecordID = oldPatient.getVariable(io.getPatientRecordIDVariableName());
                        }
                    }
                }

                Object tumourID = patientRecordID + "" + tumourSequenceString;
                //
                patient.setVariable(io.getPatientIDVariableName(), patientID);
                tumour.setVariable(io.getTumourIDVariablename(), tumourID);
                // And store the record ID

                patient.setVariable(io.getPatientRecordIDVariableName(), patientRecordID);

                // Set the patient ID number on the tumour
                tumour.setVariable(io.getPatientIDTumourTableVariableName(), patientID);
                tumour.setVariable(io.getPatientRecordIDTumourTableVariableName(), patientRecordID);

                // Set the deprecated flag to 0 - no obsolete records from CR4
                tumour.setVariable(io.getObsoleteTumourFlagVariableName(), "0");
                patient.setVariable(io.getObsoletePatientFlagVariableName(), "0");

            }

            // Set the name in the firstName database
            String sex = (String) patient.getVariable(sexVariableName);
            if (sex != null && sex.length() > 0) {
                Integer sexCode = Integer.parseInt(sex);
                String firstNames = (String) patient.getVariable(firstNameVariableName);
                if (firstNames != null) {
                    String[] firstNamesArray = firstNames.split(" ");
                    for (String firstName : firstNamesArray) {
                        if (firstName != null && firstName.trim().length() > 0) {
                            // here we use the locale specific toUpperCase
                            Integer registeredSexCode = nameSexTable.get(firstName);
                            if (registeredSexCode == null) {
                                NameSexRecord nsr = new NameSexRecord();
                                nsr.setName(firstName);
                                nsr.setSex(sexCode);

                                server.saveNameSexRecord(nsr, false);

                                nameSexTable.put(firstName, sexCode);
                            } else if (registeredSexCode != sexCode) {
                                if (registeredSexCode != 9) {
                                    sexCode = 9;
                                    NameSexRecord nsr = new NameSexRecord();
                                    nsr.setName(firstName);
                                    nsr.setSex(sexCode);
                                    server.saveNameSexRecord(nsr, true);
                                    nameSexTable.remove(firstName);
                                    nameSexTable.put(firstName, sexCode);
                                }
                            }
                        }
                    }
                }
            }

            if (needToSavePatientAgain) {
                if (patientDatabaseRecordID > 0) {
                    server.editPatient(patient);
                } else {
                    patientDatabaseRecordID = server.savePatient(patient);
                }
            }
            if (patient != null && tumour != null) {
                String icd10 = (String) tumour.getVariable(io.getICD10VariableName());
                if (icd10 == null || icd10.trim().length() == 0) {
                    ConversionResult[] conversionResult = canreg.client.CanRegClientApp.getApplication()
                            .performConversions(Converter.ConversionName.ICDO3toICD10, patient, tumour);
                    tumour.setVariable(io.getICD10VariableName(), conversionResult[0].getValue());
                }
                String iccc = (String) tumour.getVariable(io.getICCCVariableName());
                if (iccc == null || iccc.trim().length() == 0) {
                    ConversionResult[] conversionResult = canreg.client.CanRegClientApp.getApplication()
                            .performConversions(Converter.ConversionName.ICDO3toICCC3, patient, tumour);
                    tumour.setVariable(io.getICCCVariableName(), conversionResult[0].getValue());
                }
            }
            if (tumour.getVariable(io.getPatientIDTumourTableVariableName()) == null) {
                tumour.setVariable(io.getPatientIDTumourTableVariableName(), patientID);
            }

            if (tumour.getVariable(io.getPatientRecordIDTumourTableVariableName()) == null) {
                tumour.setVariable(io.getPatientRecordIDTumourTableVariableName(), patientRecordID);
            }

            int tumourDatabaseIDNumber = server.saveTumour(tumour);

            if (Thread.interrupted()) {
                //We've been interrupted: no more importing.
                throw new InterruptedException();
            }
        }
        task.firePropertyChange("finished", null, null);
        success = true;
    } catch (IOException ex) {
        Logger.getLogger(Import.class.getName()).log(Level.SEVERE,
                "Error in line: " + (numberOfLinesRead + 1 + 1) + ". ", ex);
        success = false;
    } catch (NumberFormatException ex) {
        Logger.getLogger(Import.class.getName()).log(Level.SEVERE,
                "Error in line: " + (numberOfLinesRead + 1 + 1) + ". ", ex);
        success = false;
    } catch (InterruptedException ex) {
        Logger.getLogger(Import.class.getName()).log(Level.INFO,
                "Interupted on line: " + (numberOfLinesRead + 1) + ". ", ex);
        success = true;
    } catch (IndexOutOfBoundsException ex) {
        Logger.getLogger(Import.class.getName()).log(Level.SEVERE,
                "Error in line: " + (numberOfLinesRead + 1 + 1) + ". ", ex);
        success = false;
    } catch (SQLException ex) {
        Logger.getLogger(Import.class.getName()).log(Level.SEVERE,
                "Error in line: " + (numberOfLinesRead + 1 + 1) + ". ", ex);
        success = false;
    } finally {
        if (parser != null) {
            try {
                parser.close();
            } catch (IOException ex) {
                Logger.getLogger(Import.class.getName()).log(Level.SEVERE, null, ex);
            }
        }
    }

    return success;
}

From source file:org.apache.hadoop.mapreduce.v2.app.job.impl.TestJobImpl.java

@Test(timeout = 20000)
public void testKilledDuringKillAbort() throws Exception {
    Configuration conf = new Configuration();
    conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
    // not initializing dispatcher to avoid potential race condition between
    // the dispatcher thread & test thread - see MAPREDUCE-6831
    AsyncDispatcher dispatcher = new AsyncDispatcher();

    OutputCommitter committer = new StubbedOutputCommitter() {
        @Override/*  w w  w  . ja va2s  .  co  m*/
        public synchronized void abortJob(JobContext jobContext, State state) throws IOException {
            while (!Thread.interrupted()) {
                try {
                    wait();
                } catch (InterruptedException e) {
                }
            }
        }
    };
    CommitterEventHandler commitHandler = createCommitterEventHandler(dispatcher, committer);
    commitHandler.init(conf);
    commitHandler.start();

    JobImpl job = createStubbedJob(conf, dispatcher, 2, null);
    JobId jobId = job.getID();
    job.handle(new JobEvent(jobId, JobEventType.JOB_INIT));
    assertJobState(job, JobStateInternal.INITED);
    job.handle(new JobStartEvent(jobId));
    assertJobState(job, JobStateInternal.SETUP);

    job.handle(new JobEvent(jobId, JobEventType.JOB_KILL));
    assertJobState(job, JobStateInternal.KILL_ABORT);

    job.handle(new JobEvent(jobId, JobEventType.JOB_KILL));
    assertJobState(job, JobStateInternal.KILLED);
    dispatcher.stop();
    commitHandler.stop();
}

From source file:net.pms.io.SimpleProcessWrapper.java

/**
 * Runs a process with the given command {@link List}.
 *
 * @param command an array of {@link String} used to build the command line.
 * @param timeoutMS the process timeout in milliseconds after which the
 *            process is terminated. Use zero for no timeout, but be aware
 *            of the <a href=//from www.ja  v  a 2s.c o m
 *            "https://web.archive.org/web/20121201070147/http://kylecartmell.com/?p=9"
 *            >pitfalls</a>
 * @param terminateTimeoutMS the timeout in milliseconds to wait for each
 *            termination attempt.
 * @return The {@link ProcessWrapperResult} from running the process.
 * @throws InterruptedException If the operation is interrupted.
 * @throws IllegalArgumentException If {@code command} is {@code null} or
 *             empty.
 */
@Nonnull
public R runProcess(@Nonnull List<String> command, long timeoutMS, long terminateTimeoutMS)
        throws InterruptedException {
    if (command == null || command.isEmpty()) {
        throw new IllegalArgumentException("command can't be null or empty");
    }
    final String executableName;
    if (isNotBlank(command.get(0))) {
        Path executable = Paths.get(command.get(0)).getFileName();
        if (executable != null) {
            executableName = executable.toString();
        } else {
            executableName = command.get(0);
        }
    } else {
        executableName = command.get(0);
    }
    boolean manageProcess = timeoutMS > 0;
    ProcessBuilder processBuilder = new ProcessBuilder(command);
    processBuilder.redirectErrorStream(true);
    if (LOGGER.isTraceEnabled()) {
        //XXX: Replace with String.join() in Java 8
        LOGGER.trace("Executing \"{}\"", StringUtils.join(command, " "));
    }
    Process process;
    try {
        process = processBuilder.start();
    } catch (IOException e) {
        LOGGER.debug("IOException when trying to start \"{}\" process: {}", executableName, e.getMessage());
        LOGGER.trace("", e);
        return consumer.createResult(null, Integer.MIN_VALUE, e);
    }
    Future<T> output = consumer.consume(process.getInputStream(), "SPW \"" + executableName + "\" consumer");
    if (manageProcess) {
        Services.processManager().addProcess(process, command.get(0), timeoutMS, terminateTimeoutMS);
    }
    int exitCode = Integer.MIN_VALUE;
    boolean interrupted = false;
    boolean shutdown = false;
    do {
        interrupted = false;
        try {
            exitCode = process.waitFor();
        } catch (InterruptedException e) {
            interrupted = Thread.interrupted();
            if (!shutdown) {
                if (manageProcess) {
                    Services.processManager().shutdownProcess(process, executableName);
                    manageProcess = false;
                } else {
                    Services.processManager().addProcess(process, executableName, 0, terminateTimeoutMS);
                }
                shutdown = true;
            }
        }
    } while (interrupted);
    if (manageProcess) {
        Services.processManager().removeProcess(process, command.get(0));
    }
    try {
        return consumer.createResult(output.get(), exitCode, null);
    } catch (ExecutionException e) {
        Throwable cause = e.getCause() != null ? e.getCause() : e;
        LOGGER.error("ExecutionException in \"{}\" consumer, no output will be returned: {}", executableName,
                cause.getMessage());
        LOGGER.trace("", e);
        return consumer.createResult(null, exitCode, cause);
    }
}

From source file:grakn.core.server.session.computer.GraknSparkComputer.java

@SuppressWarnings("PMD.UnusedFormalParameter")
private Future<ComputerResult> submitWithExecutor() {
    jobGroupId = Integer.toString(ThreadLocalRandom.current().nextInt(Integer.MAX_VALUE));
    String jobDescription = this.vertexProgram == null ? this.mapReducers.toString()
            : this.vertexProgram + "+" + this.mapReducers;

    // Use different output locations
    this.sparkConfiguration.setProperty(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION,
            this.sparkConfiguration.getString(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION) + "/" + jobGroupId);

    updateConfigKeys(sparkConfiguration);

    final Future<ComputerResult> result = computerService.submit(() -> {
        final long startTime = System.currentTimeMillis();

        //////////////////////////////////////////////////
        /////// PROCESS SHIM AND SYSTEM PROPERTIES ///////
        //////////////////////////////////////////////////
        final String shimService = KryoSerializer.class.getCanonicalName()
                .equals(this.sparkConfiguration.getString(Constants.SPARK_SERIALIZER, null))
                        ? UnshadedKryoShimService.class.getCanonicalName()
                        : HadoopPoolShimService.class.getCanonicalName();
        this.sparkConfiguration.setProperty(KryoShimServiceLoader.KRYO_SHIM_SERVICE, shimService);
        ///////////
        final StringBuilder params = new StringBuilder();
        this.sparkConfiguration.getKeys().forEachRemaining(key -> {
            if (KEYS_PASSED_IN_JVM_SYSTEM_PROPERTIES.contains(key)) {
                params.append(" -D").append("tinkerpop.").append(key).append("=")
                        .append(this.sparkConfiguration.getProperty(key));
                System.setProperty("tinkerpop." + key, this.sparkConfiguration.getProperty(key).toString());
            }/* w w w  .  ja  v a  2s . c om*/
        });
        if (params.length() > 0) {
            this.sparkConfiguration.setProperty(SparkLauncher.EXECUTOR_EXTRA_JAVA_OPTIONS,
                    (this.sparkConfiguration.getString(SparkLauncher.EXECUTOR_EXTRA_JAVA_OPTIONS, "")
                            + params.toString()).trim());
            this.sparkConfiguration.setProperty(SparkLauncher.DRIVER_EXTRA_JAVA_OPTIONS,
                    (this.sparkConfiguration.getString(SparkLauncher.DRIVER_EXTRA_JAVA_OPTIONS, "")
                            + params.toString()).trim());
        }
        KryoShimServiceLoader.applyConfiguration(this.sparkConfiguration);
        //////////////////////////////////////////////////
        //////////////////////////////////////////////////
        //////////////////////////////////////////////////
        // apache and hadoop configurations that are used throughout the graph computer computation
        final org.apache.commons.configuration.Configuration graphComputerConfiguration = new HadoopConfiguration(
                this.sparkConfiguration);
        if (!graphComputerConfiguration.containsKey(Constants.SPARK_SERIALIZER)) {
            graphComputerConfiguration.setProperty(Constants.SPARK_SERIALIZER,
                    KryoSerializer.class.getCanonicalName());
            if (!graphComputerConfiguration.containsKey(Constants.SPARK_KRYO_REGISTRATOR)) {
                graphComputerConfiguration.setProperty(Constants.SPARK_KRYO_REGISTRATOR,
                        GryoRegistrator.class.getCanonicalName());
            }
        }
        graphComputerConfiguration.setProperty(Constants.GREMLIN_HADOOP_GRAPH_WRITER_HAS_EDGES,
                this.persist.equals(GraphComputer.Persist.EDGES));

        final Configuration hadoopConfiguration = ConfUtil.makeHadoopConfiguration(graphComputerConfiguration);

        final Storage fileSystemStorage = FileSystemStorage.open(hadoopConfiguration);
        final boolean inputFromHDFS = FileInputFormat.class.isAssignableFrom(
                hadoopConfiguration.getClass(Constants.GREMLIN_HADOOP_GRAPH_READER, Object.class));
        final boolean inputFromSpark = PersistedInputRDD.class.isAssignableFrom(
                hadoopConfiguration.getClass(Constants.GREMLIN_HADOOP_GRAPH_READER, Object.class));
        final boolean outputToHDFS = FileOutputFormat.class.isAssignableFrom(
                hadoopConfiguration.getClass(Constants.GREMLIN_HADOOP_GRAPH_WRITER, Object.class));
        final boolean outputToSpark = PersistedOutputRDD.class.isAssignableFrom(
                hadoopConfiguration.getClass(Constants.GREMLIN_HADOOP_GRAPH_WRITER, Object.class));
        final boolean skipPartitioner = graphComputerConfiguration
                .getBoolean(Constants.GREMLIN_SPARK_SKIP_PARTITIONER, false);
        final boolean skipPersist = graphComputerConfiguration
                .getBoolean(Constants.GREMLIN_SPARK_SKIP_GRAPH_CACHE, false);

        if (inputFromHDFS) {
            String inputLocation = Constants
                    .getSearchGraphLocation(hadoopConfiguration.get(Constants.GREMLIN_HADOOP_INPUT_LOCATION),
                            fileSystemStorage)
                    .orElse(null);
            if (null != inputLocation) {
                try {
                    graphComputerConfiguration.setProperty(Constants.MAPREDUCE_INPUT_FILEINPUTFORMAT_INPUTDIR,
                            FileSystem.get(hadoopConfiguration).getFileStatus(new Path(inputLocation)).getPath()
                                    .toString());
                    hadoopConfiguration.set(Constants.MAPREDUCE_INPUT_FILEINPUTFORMAT_INPUTDIR,
                            FileSystem.get(hadoopConfiguration).getFileStatus(new Path(inputLocation)).getPath()
                                    .toString());
                } catch (final IOException e) {
                    throw new IllegalStateException(e.getMessage(), e);
                }
            }
        }

        final InputRDD inputRDD;
        final OutputRDD outputRDD;
        final boolean filtered;
        try {
            inputRDD = InputRDD.class.isAssignableFrom(
                    hadoopConfiguration.getClass(Constants.GREMLIN_HADOOP_GRAPH_READER, Object.class))
                            ? hadoopConfiguration.getClass(Constants.GREMLIN_HADOOP_GRAPH_READER,
                                    InputRDD.class, InputRDD.class).newInstance()
                            : InputFormatRDD.class.newInstance();
            outputRDD = OutputRDD.class.isAssignableFrom(
                    hadoopConfiguration.getClass(Constants.GREMLIN_HADOOP_GRAPH_WRITER, Object.class))
                            ? hadoopConfiguration.getClass(Constants.GREMLIN_HADOOP_GRAPH_WRITER,
                                    OutputRDD.class, OutputRDD.class).newInstance()
                            : OutputFormatRDD.class.newInstance();

            // if the input class can filter on load, then set the filters
            if (inputRDD instanceof InputFormatRDD
                    && GraphFilterAware.class.isAssignableFrom(hadoopConfiguration.getClass(
                            Constants.GREMLIN_HADOOP_GRAPH_READER, InputFormat.class, InputFormat.class))) {
                GraphFilterAware.storeGraphFilter(graphComputerConfiguration, hadoopConfiguration,
                        this.graphFilter);
                filtered = false;
            } else if (inputRDD instanceof GraphFilterAware) {
                ((GraphFilterAware) inputRDD).setGraphFilter(this.graphFilter);
                filtered = false;
            } else
                filtered = this.graphFilter.hasFilter();
        } catch (final InstantiationException | IllegalAccessException e) {
            throw new IllegalStateException(e.getMessage(), e);
        }

        // create the spark context from the graph computer configuration
        final JavaSparkContext sparkContext = new JavaSparkContext(Spark.create(hadoopConfiguration));
        final Storage sparkContextStorage = SparkContextStorage.open();

        sparkContext.setJobGroup(jobGroupId, jobDescription);

        GraknSparkMemory memory = null;
        // delete output location
        final String outputLocation = hadoopConfiguration.get(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION, null);
        if (null != outputLocation) {
            if (outputToHDFS && fileSystemStorage.exists(outputLocation)) {
                fileSystemStorage.rm(outputLocation);
            }
            if (outputToSpark && sparkContextStorage.exists(outputLocation)) {
                sparkContextStorage.rm(outputLocation);
            }
        }

        // the Spark application name will always be set by SparkContextStorage,
        // thus, INFO the name to make it easier to debug
        logger.debug(Constants.GREMLIN_HADOOP_SPARK_JOB_PREFIX
                + (null == this.vertexProgram ? "No VertexProgram" : this.vertexProgram) + "["
                + this.mapReducers + "]");

        // add the project jars to the cluster
        this.loadJars(hadoopConfiguration, sparkContext);
        updateLocalConfiguration(sparkContext, hadoopConfiguration);

        // create a message-passing friendly rdd from the input rdd
        boolean partitioned = false;
        JavaPairRDD<Object, VertexWritable> loadedGraphRDD = inputRDD.readGraphRDD(graphComputerConfiguration,
                sparkContext);

        // if there are vertex or edge filters, filter the loaded graph rdd prior to partitioning and persisting
        if (filtered) {
            this.logger.debug("Filtering the loaded graphRDD: " + this.graphFilter);
            loadedGraphRDD = GraknSparkExecutor.applyGraphFilter(loadedGraphRDD, this.graphFilter);
        }
        // if the loaded graph RDD is already partitioned use that partitioner,
        // else partition it with HashPartitioner
        if (loadedGraphRDD.partitioner().isPresent()) {
            this.logger.debug("Using the existing partitioner associated with the loaded graphRDD: "
                    + loadedGraphRDD.partitioner().get());
        } else {
            if (!skipPartitioner) {
                final Partitioner partitioner = new HashPartitioner(
                        this.workersSet ? this.workers : loadedGraphRDD.partitions().size());
                this.logger.debug("Partitioning the loaded graphRDD: " + partitioner);
                loadedGraphRDD = loadedGraphRDD.partitionBy(partitioner);
                partitioned = true;
                assert loadedGraphRDD.partitioner().isPresent();
            } else {
                // no easy way to test this with a test case
                assert skipPartitioner == !loadedGraphRDD.partitioner().isPresent();

                this.logger.debug("Partitioning has been skipped for the loaded graphRDD via "
                        + Constants.GREMLIN_SPARK_SKIP_PARTITIONER);
            }
        }
        // if the loaded graphRDD was already partitioned previous,
        // then this coalesce/repartition will not take place
        if (this.workersSet) {
            // ensures that the loaded graphRDD does not have more partitions than workers
            if (loadedGraphRDD.partitions().size() > this.workers) {
                loadedGraphRDD = loadedGraphRDD.coalesce(this.workers);
            } else {
                // ensures that the loaded graphRDD does not have less partitions than workers
                if (loadedGraphRDD.partitions().size() < this.workers) {
                    loadedGraphRDD = loadedGraphRDD.repartition(this.workers);
                }
            }
        }
        // persist the vertex program loaded graph as specified by configuration
        // or else use default cache() which is MEMORY_ONLY
        if (!skipPersist && (!inputFromSpark || partitioned || filtered)) {
            loadedGraphRDD = loadedGraphRDD.persist(StorageLevel.fromString(
                    hadoopConfiguration.get(Constants.GREMLIN_SPARK_GRAPH_STORAGE_LEVEL, "MEMORY_ONLY")));
        }
        // final graph with view
        // (for persisting and/or mapReducing -- may be null and thus, possible to save space/time)
        JavaPairRDD<Object, VertexWritable> computedGraphRDD = null;
        try {
            ////////////////////////////////
            // process the vertex program //
            ////////////////////////////////
            if (null != this.vertexProgram) {
                memory = new GraknSparkMemory(this.vertexProgram, this.mapReducers, sparkContext);
                /////////////////
                // if there is a registered VertexProgramInterceptor, use it to bypass the GraphComputer semantics
                if (graphComputerConfiguration
                        .containsKey(Constants.GREMLIN_HADOOP_VERTEX_PROGRAM_INTERCEPTOR)) {
                    try {
                        final GraknSparkVertexProgramInterceptor<VertexProgram> interceptor = (GraknSparkVertexProgramInterceptor) Class
                                .forName(graphComputerConfiguration
                                        .getString(Constants.GREMLIN_HADOOP_VERTEX_PROGRAM_INTERCEPTOR))
                                .newInstance();
                        computedGraphRDD = interceptor.apply(this.vertexProgram, loadedGraphRDD, memory);
                    } catch (final ClassNotFoundException | IllegalAccessException | InstantiationException e) {
                        throw new IllegalStateException(e.getMessage());
                    }
                } else {
                    // standard GraphComputer semantics
                    // get a configuration that will be propagated to all workers
                    final HadoopConfiguration vertexProgramConfiguration = new HadoopConfiguration();
                    this.vertexProgram.storeState(vertexProgramConfiguration);
                    // set up the vertex program and wire up configurations
                    this.vertexProgram.setup(memory);
                    JavaPairRDD<Object, ViewIncomingPayload<Object>> viewIncomingRDD = null;
                    memory.broadcastMemory(sparkContext);
                    // execute the vertex program
                    while (true) {
                        if (Thread.interrupted()) {
                            sparkContext.cancelAllJobs();
                            throw new TraversalInterruptedException();
                        }
                        memory.setInExecute(true);
                        viewIncomingRDD = GraknSparkExecutor.executeVertexProgramIteration(loadedGraphRDD,
                                viewIncomingRDD, memory, graphComputerConfiguration,
                                vertexProgramConfiguration);
                        memory.setInExecute(false);
                        if (this.vertexProgram.terminate(memory)) {
                            break;
                        } else {
                            memory.incrIteration();
                            memory.broadcastMemory(sparkContext);
                        }
                    }
                    // if the graph will be continued to be used (persisted or mapreduced),
                    // then generate a view+graph
                    if ((null != outputRDD && !this.persist.equals(Persist.NOTHING))
                            || !this.mapReducers.isEmpty()) {
                        computedGraphRDD = GraknSparkExecutor.prepareFinalGraphRDD(loadedGraphRDD,
                                viewIncomingRDD, this.vertexProgram.getVertexComputeKeys());
                        assert null != computedGraphRDD && computedGraphRDD != loadedGraphRDD;
                    } else {
                        // ensure that the computedGraphRDD was not created
                        assert null == computedGraphRDD;
                    }
                }
                /////////////////
                memory.complete(); // drop all transient memory keys
                // write the computed graph to the respective output (rdd or output format)
                if (null != outputRDD && !this.persist.equals(Persist.NOTHING)) {
                    // the logic holds that a computeGraphRDD must be created at this point
                    assert null != computedGraphRDD;

                    outputRDD.writeGraphRDD(graphComputerConfiguration, computedGraphRDD);
                }
            }

            final boolean computedGraphCreated = computedGraphRDD != null && computedGraphRDD != loadedGraphRDD;
            if (!computedGraphCreated) {
                computedGraphRDD = loadedGraphRDD;
            }

            final Memory.Admin finalMemory = null == memory ? new MapMemory() : new MapMemory(memory);

            //////////////////////////////
            // process the map reducers //
            //////////////////////////////
            if (!this.mapReducers.isEmpty()) {
                // create a mapReduceRDD for executing the map reduce jobs on
                JavaPairRDD<Object, VertexWritable> mapReduceRDD = computedGraphRDD;
                if (computedGraphCreated && !outputToSpark) {
                    // drop all the edges of the graph as they are not used in mapReduce processing
                    mapReduceRDD = computedGraphRDD.mapValues(vertexWritable -> {
                        vertexWritable.get().dropEdges(Direction.BOTH);
                        return vertexWritable;
                    });
                    // if there is only one MapReduce to execute, don't bother wasting the clock cycles.
                    if (this.mapReducers.size() > 1) {
                        mapReduceRDD = mapReduceRDD.persist(StorageLevel.fromString(hadoopConfiguration
                                .get(Constants.GREMLIN_SPARK_GRAPH_STORAGE_LEVEL, "MEMORY_ONLY")));
                    }
                }

                for (final MapReduce mapReduce : this.mapReducers) {
                    // execute the map reduce job
                    final HadoopConfiguration newApacheConfiguration = new HadoopConfiguration(
                            graphComputerConfiguration);
                    mapReduce.storeState(newApacheConfiguration);
                    // map
                    final JavaPairRDD mapRDD = GraknSparkExecutor.executeMap(mapReduceRDD, mapReduce,
                            newApacheConfiguration);
                    // combine
                    final JavaPairRDD combineRDD = mapReduce.doStage(MapReduce.Stage.COMBINE)
                            ? GraknSparkExecutor.executeCombine(mapRDD, newApacheConfiguration)
                            : mapRDD;
                    // reduce
                    final JavaPairRDD reduceRDD = mapReduce.doStage(MapReduce.Stage.REDUCE)
                            ? GraknSparkExecutor.executeReduce(combineRDD, mapReduce, newApacheConfiguration)
                            : combineRDD;
                    // write the map reduce output back to disk and computer result memory
                    if (null != outputRDD) {
                        mapReduce.addResultToMemory(finalMemory, outputRDD.writeMemoryRDD(
                                graphComputerConfiguration, mapReduce.getMemoryKey(), reduceRDD));
                    }
                }
                // if the mapReduceRDD is not simply the computed graph, unpersist the mapReduceRDD
                if (computedGraphCreated && !outputToSpark) {
                    assert loadedGraphRDD != computedGraphRDD;
                    assert mapReduceRDD != computedGraphRDD;
                    mapReduceRDD.unpersist();
                } else {
                    assert mapReduceRDD == computedGraphRDD;
                }
            }

            // unpersist the loaded graph if it will not be used again (no PersistedInputRDD)
            // if the graphRDD was loaded from Spark, but then partitioned or filtered, its a different RDD
            if (!inputFromSpark || partitioned || filtered) {
                loadedGraphRDD.unpersist();
            }
            // unpersist the computed graph if it will not be used again (no PersistedOutputRDD)
            // if the computed graph is the loadedGraphRDD because it was not mutated and not-unpersisted,
            // then don't unpersist the computedGraphRDD/loadedGraphRDD
            if ((!outputToSpark || this.persist.equals(GraphComputer.Persist.NOTHING))
                    && computedGraphCreated) {
                computedGraphRDD.unpersist();
            }
            // delete any file system or rdd data if persist nothing
            if (null != outputLocation && this.persist.equals(GraphComputer.Persist.NOTHING)) {
                if (outputToHDFS) {
                    fileSystemStorage.rm(outputLocation);
                }
                if (outputToSpark) {
                    sparkContextStorage.rm(outputLocation);
                }
            }
            // update runtime and return the newly computed graph
            finalMemory.setRuntime(System.currentTimeMillis() - startTime);
            // clear properties that should not be propagated in an OLAP chain
            graphComputerConfiguration.clearProperty(Constants.GREMLIN_HADOOP_GRAPH_FILTER);
            graphComputerConfiguration.clearProperty(Constants.GREMLIN_HADOOP_VERTEX_PROGRAM_INTERCEPTOR);
            graphComputerConfiguration.clearProperty(Constants.GREMLIN_SPARK_SKIP_GRAPH_CACHE);
            graphComputerConfiguration.clearProperty(Constants.GREMLIN_SPARK_SKIP_PARTITIONER);
            return new DefaultComputerResult(InputOutputHelper.getOutputGraph(graphComputerConfiguration,
                    this.resultGraph, this.persist), finalMemory.asImmutable());
        } catch (Exception e) {
            // So it throws the same exception as tinker does
            throw new RuntimeException(e);
        }
    });
    computerService.shutdown();
    return result;
}

From source file:org.apache.solr.client.solrj.impl.ConcurrentUpdateSolrClient.java

public synchronized void blockUntilFinished() {
    lock = new CountDownLatch(1);
    try {//w w  w.j  a  v  a  2 s. co  m
        synchronized (runners) {

            // NOTE: if the executor is shut down, runners may never become empty (a scheduled task may never be run,
            // which means it would never remove itself from the runners list.  This is why we don't wait forever
            // and periodically check if the scheduler is shutting down.
            while (!runners.isEmpty()) {
                try {
                    runners.wait(250);
                } catch (InterruptedException e) {
                    Thread.interrupted();
                }

                if (scheduler.isShutdown())
                    break;

                // Need to check if the queue is empty before really considering this is finished (SOLR-4260)
                int queueSize = queue.size();
                if (queueSize > 0 && runners.isEmpty()) {
                    // TODO: can this still happen?
                    log.warn("No more runners, but queue still has " + queueSize
                            + " adding more runners to process remaining requests on queue");
                    addRunner();
                }
            }
        }
    } finally {
        lock.countDown();
        lock = null;
    }
}