Example usage for org.apache.hadoop.io IOUtils cleanup

List of usage examples for org.apache.hadoop.io IOUtils cleanup

Introduction

In this page you can find the example usage for org.apache.hadoop.io IOUtils cleanup.

Prototype

@Deprecated
public static void cleanup(Log log, java.io.Closeable... closeables) 

Source Link

Document

Close the Closeable objects and ignore any Throwable or null pointers.

Usage

From source file:org.apache.tajo.util.history.HistoryWriter.java

License:Apache License

@Override
public void serviceStop() throws Exception {
    if (stopped.getAndSet(true)) {
        return;//from  w  ww  .j  a  v a  2s .  co  m
    }

    for (WriterHolder eachWriter : taskWriters.values()) {
        IOUtils.cleanup(LOG, eachWriter);
    }

    taskWriters.clear();
    writerThread.interrupt();

    IOUtils.cleanup(LOG, querySummaryWriter);

    if (historyCleaner != null) {
        historyCleaner.doStop();
    }
    super.serviceStop();
}

From source file:org.apache.tajo.util.metrics.reporter.MetricsFileScheduledReporter.java

License:Apache License

@Override
protected void afterInit() {
    String fileName = metricsProperties.get(metricsPropertyKey + "filename");
    if (fileName == null) {
        LOG.warn("No " + metricsPropertyKey + "filename property in tajo-metrics.properties");
        return;//from w w  w .ja v  a 2  s.  c o m
    }

    try {
        File file = new File(fileName);
        File parentFile = file.getParentFile();
        if (parentFile != null && !parentFile.exists()) {
            if (!parentFile.mkdirs()) {
                LOG.warn("Can't create dir for tajo metrics:" + parentFile.getAbsolutePath());
            }
        }
        this.output = new FileOutputStream(fileName, true);
        this.setDateFormat(null);
    } catch (FileNotFoundException e) {
        LOG.warn("Can't open metrics file:" + fileName);
        IOUtils.cleanup(LOG, this);
    }
}

From source file:org.apache.tez.engine.common.shuffle.impl.Fetcher.java

License:Apache License

/**
 * The crux of the matter...//www  . j  a  v a 2 s  . c  o m
 * 
 * @param host {@link MapHost} from which we need to  
 *              shuffle available map-outputs.
 */
@VisibleForTesting
protected void copyFromHost(MapHost host) throws IOException {
    // Get completed maps on 'host'
    List<TezTaskAttemptID> maps = scheduler.getMapsForHost(host);

    // Sanity check to catch hosts with only 'OBSOLETE' maps, 
    // especially at the tail of large jobs
    if (maps.size() == 0) {
        return;
    }

    if (LOG.isDebugEnabled()) {
        LOG.debug("Fetcher " + id + " going to fetch from " + host + " for: " + maps);
    }

    // List of maps to be fetched yet
    Set<TezTaskAttemptID> remaining = new HashSet<TezTaskAttemptID>(maps);

    // Construct the url and connect
    DataInputStream input;
    boolean connectSucceeded = false;

    try {
        URL url = getMapOutputURL(host, maps);
        HttpURLConnection connection = openConnection(url);

        // generate hash of the url
        String msgToEncode = SecureShuffleUtils.buildMsgFrom(url);
        String encHash = SecureShuffleUtils.hashFromString(msgToEncode, jobTokenSecret);

        // put url hash into http header
        connection.addRequestProperty(SecureShuffleUtils.HTTP_HEADER_URL_HASH, encHash);
        // set the read timeout
        connection.setReadTimeout(readTimeout);
        // put shuffle version into http header
        connection.addRequestProperty(ShuffleHeader.HTTP_HEADER_NAME, ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
        connection.addRequestProperty(ShuffleHeader.HTTP_HEADER_VERSION,
                ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
        connect(connection, connectionTimeout);
        connectSucceeded = true;
        input = new DataInputStream(connection.getInputStream());

        // Validate response code
        int rc = connection.getResponseCode();
        if (rc != HttpURLConnection.HTTP_OK) {
            throw new IOException("Got invalid response code " + rc + " from " + url + ": "
                    + connection.getResponseMessage());
        }
        // get the shuffle version
        if (!ShuffleHeader.DEFAULT_HTTP_HEADER_NAME
                .equals(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_NAME))
                || !ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION
                        .equals(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_VERSION))) {
            throw new IOException("Incompatible shuffle response version");
        }
        // get the replyHash which is HMac of the encHash we sent to the server
        String replyHash = connection.getHeaderField(SecureShuffleUtils.HTTP_HEADER_REPLY_URL_HASH);
        if (replyHash == null) {
            throw new IOException("security validation of TT Map output failed");
        }
        LOG.debug("url=" + msgToEncode + ";encHash=" + encHash + ";replyHash=" + replyHash);
        // verify that replyHash is HMac of encHash
        SecureShuffleUtils.verifyReply(replyHash, encHash, jobTokenSecret);
        LOG.info("for url=" + msgToEncode + " sent hash and receievd reply");
    } catch (IOException ie) {
        ioErrs.increment(1);
        LOG.warn("Failed to connect to " + host + " with " + remaining.size() + " map outputs", ie);

        // If connect did not succeed, just mark all the maps as failed,
        // indirectly penalizing the host
        if (!connectSucceeded) {
            for (TezTaskAttemptID left : remaining) {
                scheduler.copyFailed(left, host, connectSucceeded);
            }
        } else {
            // If we got a read error at this stage, it implies there was a problem
            // with the first map, typically lost map. So, penalize only that map
            // and add the rest
            TezTaskAttemptID firstMap = maps.get(0);
            scheduler.copyFailed(firstMap, host, connectSucceeded);
        }

        // Add back all the remaining maps, WITHOUT marking them as failed
        for (TezTaskAttemptID left : remaining) {
            scheduler.putBackKnownMapOutput(host, left);
        }

        return;
    }

    try {
        // Loop through available map-outputs and fetch them
        // On any error, faildTasks is not null and we exit
        // after putting back the remaining maps to the 
        // yet_to_be_fetched list and marking the failed tasks.
        TezTaskAttemptID[] failedTasks = null;
        while (!remaining.isEmpty() && failedTasks == null) {
            failedTasks = copyMapOutput(host, input, remaining);
        }

        if (failedTasks != null && failedTasks.length > 0) {
            LOG.warn("copyMapOutput failed for tasks " + Arrays.toString(failedTasks));
            for (TezTaskAttemptID left : failedTasks) {
                scheduler.copyFailed(left, host, true);
            }
        }

        IOUtils.cleanup(LOG, input);

        // Sanity check
        if (failedTasks == null && !remaining.isEmpty()) {
            throw new IOException(
                    "server didn't return all expected map outputs: " + remaining.size() + " left.");
        }
    } finally {
        for (TezTaskAttemptID left : remaining) {
            scheduler.putBackKnownMapOutput(host, left);
        }
    }
}

From source file:org.apache.tez.engine.common.shuffle.impl.Fetcher.java

License:Apache License

private void shuffleToMemory(MapHost host, MapOutput mapOutput, InputStream input, int decompressedLength,
        int compressedLength) throws IOException {
    IFileInputStream checksumIn = new IFileInputStream(input, compressedLength, job);

    input = checksumIn;//from  w w w  .ja  v  a 2  s  .  c  o  m

    // Are map-outputs compressed?
    if (codec != null) {
        decompressor.reset();
        input = codec.createInputStream(input, decompressor);
    }

    // Copy map-output into an in-memory buffer
    byte[] shuffleData = mapOutput.getMemory();

    try {
        IOUtils.readFully(input, shuffleData, 0, shuffleData.length);
        metrics.inputBytes(shuffleData.length);
        reporter.progress();
        LOG.info("Read " + shuffleData.length + " bytes from map-output for " + mapOutput.getMapId());
    } catch (IOException ioe) {
        // Close the streams
        IOUtils.cleanup(LOG, input);

        // Re-throw
        throw ioe;
    }

}

From source file:org.apache.tez.runtime.library.common.shuffle.impl.Fetcher.java

License:Apache License

/**
 * The crux of the matter...//from w ww. ja v  a2 s.com
 * 
 * @param host {@link MapHost} from which we need to  
 *              shuffle available map-outputs.
 */
@VisibleForTesting
protected void copyFromHost(MapHost host) throws IOException {
    // Get completed maps on 'host'
    List<InputAttemptIdentifier> srcAttempts = scheduler.getMapsForHost(host);

    // Sanity check to catch hosts with only 'OBSOLETE' maps, 
    // especially at the tail of large jobs
    if (srcAttempts.size() == 0) {
        return;
    }

    if (LOG.isDebugEnabled()) {
        LOG.debug("Fetcher " + id + " going to fetch from " + host + " for: " + srcAttempts);
    }

    // List of maps to be fetched yet
    remaining = new LinkedHashSet<InputAttemptIdentifier>(srcAttempts);

    // Construct the url and connect
    DataInputStream input;
    boolean connectSucceeded = false;

    try {
        URL url = getMapOutputURL(host, srcAttempts);
        HttpURLConnection connection = openConnection(url);

        // generate hash of the url
        String msgToEncode = SecureShuffleUtils.buildMsgFrom(url);
        String encHash = SecureShuffleUtils.hashFromString(msgToEncode, jobTokenSecret);

        // put url hash into http header
        connection.addRequestProperty(SecureShuffleUtils.HTTP_HEADER_URL_HASH, encHash);
        // set the read timeout
        connection.setReadTimeout(readTimeout);
        // put shuffle version into http header
        connection.addRequestProperty(ShuffleHeader.HTTP_HEADER_NAME, ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
        connection.addRequestProperty(ShuffleHeader.HTTP_HEADER_VERSION,
                ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
        connect(connection, connectionTimeout);
        connectSucceeded = true;
        input = new DataInputStream(connection.getInputStream());

        // Validate response code
        int rc = connection.getResponseCode();
        if (rc != HttpURLConnection.HTTP_OK) {
            throw new IOException("Got invalid response code " + rc + " from " + url + ": "
                    + connection.getResponseMessage());
        }
        // get the shuffle version
        if (!ShuffleHeader.DEFAULT_HTTP_HEADER_NAME
                .equals(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_NAME))
                || !ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION
                        .equals(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_VERSION))) {
            throw new IOException("Incompatible shuffle response version");
        }
        // get the replyHash which is HMac of the encHash we sent to the server
        String replyHash = connection.getHeaderField(SecureShuffleUtils.HTTP_HEADER_REPLY_URL_HASH);
        if (replyHash == null) {
            throw new IOException("security validation of TT Map output failed");
        }
        LOG.debug("url=" + msgToEncode + ";encHash=" + encHash + ";replyHash=" + replyHash);
        // verify that replyHash is HMac of encHash
        SecureShuffleUtils.verifyReply(replyHash, encHash, jobTokenSecret);
        LOG.info("for url=" + msgToEncode + " sent hash and receievd reply");
    } catch (IOException ie) {
        ioErrs.increment(1);
        LOG.warn("Failed to connect to " + host + " with " + remaining.size() + " map outputs", ie);

        // If connect did not succeed, just mark all the maps as failed,
        // indirectly penalizing the host
        if (!connectSucceeded) {
            for (InputAttemptIdentifier left : remaining) {
                scheduler.copyFailed(left, host, connectSucceeded);
            }
        } else {
            // If we got a read error at this stage, it implies there was a problem
            // with the first map, typically lost map. So, penalize only that map
            // and add the rest
            InputAttemptIdentifier firstMap = srcAttempts.get(0);
            scheduler.copyFailed(firstMap, host, connectSucceeded);
        }

        // Add back all the remaining maps, WITHOUT marking them as failed
        for (InputAttemptIdentifier left : remaining) {
            // TODO Should the first one be skipped ?
            scheduler.putBackKnownMapOutput(host, left);
        }

        return;
    }

    try {
        // Loop through available map-outputs and fetch them
        // On any error, faildTasks is not null and we exit
        // after putting back the remaining maps to the 
        // yet_to_be_fetched list and marking the failed tasks.
        InputAttemptIdentifier[] failedTasks = null;
        while (!remaining.isEmpty() && failedTasks == null) {
            failedTasks = copyMapOutput(host, input);
        }

        if (failedTasks != null && failedTasks.length > 0) {
            LOG.warn("copyMapOutput failed for tasks " + Arrays.toString(failedTasks));
            for (InputAttemptIdentifier left : failedTasks) {
                scheduler.copyFailed(left, host, true);
            }
        }

        IOUtils.cleanup(LOG, input);

        // Sanity check
        if (failedTasks == null && !remaining.isEmpty()) {
            throw new IOException(
                    "server didn't return all expected map outputs: " + remaining.size() + " left.");
        }
    } finally {
        for (InputAttemptIdentifier left : remaining) {
            scheduler.putBackKnownMapOutput(host, left);
        }
    }
}

From source file:org.apache.tez.runtime.library.common.shuffle.impl.Fetcher.java

License:Apache License

private void shuffleToMemory(MapHost host, MapOutput mapOutput, InputStream input, int decompressedLength,
        int compressedLength) throws IOException {
    IFileInputStream checksumIn = new IFileInputStream(input, compressedLength, ifileReadAhead,
            ifileReadAheadLength);//from   w w  w .j ava 2  s .  com

    input = checksumIn;

    // Are map-outputs compressed?
    if (codec != null) {
        decompressor.reset();
        input = codec.createInputStream(input, decompressor);
    }

    // Copy map-output into an in-memory buffer
    byte[] shuffleData = mapOutput.getMemory();

    try {
        IOUtils.readFully(input, shuffleData, 0, shuffleData.length);
        metrics.inputBytes(shuffleData.length);
        LOG.info("Read " + shuffleData.length + " bytes from map-output for "
                + mapOutput.getAttemptIdentifier());
    } catch (IOException ioe) {
        // Close the streams
        IOUtils.cleanup(LOG, input);

        // Re-throw
        throw ioe;
    }

}

From source file:org.apache.tez.runtime.library.common.shuffle.ShuffleUtils.java

License:Apache License

public static void shuffleToMemory(byte[] shuffleData, InputStream input, int decompressedLength,
        int compressedLength, CompressionCodec codec, boolean ifileReadAhead, int ifileReadAheadLength, Log LOG,
        String identifier) throws IOException {
    try {/*from  w w w  . j a  v a  2s .  com*/
        IFile.Reader.readToMemory(shuffleData, input, compressedLength, codec, ifileReadAhead,
                ifileReadAheadLength);
        // metrics.inputBytes(shuffleData.length);
        LOG.info("Read " + shuffleData.length + " bytes from input for " + identifier);
    } catch (IOException ioe) {
        // Close the streams
        IOUtils.cleanup(LOG, input);
        // Re-throw
        throw ioe;
    }
}

From source file:org.apache.tez.runtime.library.shuffle.common.Fetcher.java

License:Apache License

@Override
public FetchResult call() throws Exception {
    if (srcAttempts.size() == 0) {
        return new FetchResult(host, port, partition, srcAttempts);
    }//from   w w  w  . j a  v  a2s.  c o m

    for (InputAttemptIdentifier in : srcAttempts) {
        pathToAttemptMap.put(in.getPathComponent(), in);
    }

    remaining = new LinkedHashSet<InputAttemptIdentifier>(srcAttempts);

    HttpURLConnection connection;
    try {
        connection = connectToShuffleHandler(host, port, partition, srcAttempts);
    } catch (IOException e) {
        // ioErrs.increment(1);
        // If connect did not succeed, just mark all the maps as failed,
        // indirectly penalizing the host
        for (Iterator<InputAttemptIdentifier> leftIter = remaining.iterator(); leftIter.hasNext();) {
            fetcherCallback.fetchFailed(host, leftIter.next(), true);
        }
        return new FetchResult(host, port, partition, remaining);
    }

    DataInputStream input;

    try {
        input = new DataInputStream(connection.getInputStream());
        validateConnectionResponse(connection, url, msgToEncode, encHash);
    } catch (IOException e) {
        // ioErrs.increment(1);
        // If we got a read error at this stage, it implies there was a problem
        // with the first map, typically lost map. So, penalize only that map
        // and add the rest
        InputAttemptIdentifier firstAttempt = srcAttempts.get(0);
        LOG.warn("Fetch Failure from host while connecting: " + host + ", attempt: " + firstAttempt
                + " Informing ShuffleManager: ", e);
        fetcherCallback.fetchFailed(host, firstAttempt, false);
        return new FetchResult(host, port, partition, remaining);
    }

    // By this point, the connection is setup and the response has been
    // validated.

    // Loop through available map-outputs and fetch them
    // On any error, faildTasks is not null and we exit
    // after putting back the remaining maps to the
    // yet_to_be_fetched list and marking the failed tasks.
    InputAttemptIdentifier[] failedInputs = null;
    while (!remaining.isEmpty() && failedInputs == null) {
        failedInputs = fetchInputs(input);
    }

    if (failedInputs != null && failedInputs.length > 0) {
        LOG.warn("copyInputs failed for tasks " + Arrays.toString(failedInputs));
        for (InputAttemptIdentifier left : failedInputs) {
            fetcherCallback.fetchFailed(host, left, false);
        }
    }

    IOUtils.cleanup(LOG, input);

    // Sanity check
    if (failedInputs == null && !remaining.isEmpty()) {
        throw new IOException("server didn't return all expected map outputs: " + remaining.size() + " left.");
    }

    return new FetchResult(host, port, partition, remaining);

}

From source file:org.apache.tez.runtime.library.shuffle.common.ShuffleUtils.java

License:Apache License

@SuppressWarnings("resource")
public static void shuffleToMemory(MemoryFetchedInput fetchedInput, InputStream input, int decompressedLength,
        int compressedLength, CompressionCodec codec, boolean ifileReadAhead, int ifileReadAheadLength, Log LOG)
        throws IOException {
    IFileInputStream checksumIn = new IFileInputStream(input, compressedLength, ifileReadAhead,
            ifileReadAheadLength);//from w w  w.j  ava  2s.  c o  m

    input = checksumIn;

    // Are map-outputs compressed?
    if (codec != null) {
        Decompressor decompressor = CodecPool.getDecompressor(codec);
        decompressor.reset();
        input = codec.createInputStream(input, decompressor);
    }
    // Copy map-output into an in-memory buffer
    byte[] shuffleData = fetchedInput.getBytes();

    try {
        IOUtils.readFully(input, shuffleData, 0, shuffleData.length);
        // metrics.inputBytes(shuffleData.length);
        LOG.info("Read " + shuffleData.length + " bytes from input for "
                + fetchedInput.getInputAttemptIdentifier());
    } catch (IOException ioe) {
        // Close the streams
        IOUtils.cleanup(LOG, input);
        // Re-throw
        throw ioe;
    }
}

From source file:Qn3.TotalOrderPartitioner.java

License:Apache License

/**
 * Read the cut points from the given IFile.
 * @param fs The file system/*w  w  w .ja v  a2  s.c o  m*/
 * @param p The path to read
 * @param keyClass The map output key class
 * @param job The job config
 * @throws IOException
 */
// matching key types enforced by passing in
@SuppressWarnings("unchecked") // map output key class
private K[] readPartitions(FileSystem fs, Path p, Class<K> keyClass, Configuration conf) throws IOException {
    SequenceFile.Reader reader = new SequenceFile.Reader(fs, p, conf);
    ArrayList<K> parts = new ArrayList<K>();
    K key = ReflectionUtils.newInstance(keyClass, conf);
    NullWritable value = NullWritable.get();
    try {
        while (reader.next(key, value)) {
            parts.add(key);
            key = ReflectionUtils.newInstance(keyClass, conf);
        }
        reader.close();
        reader = null;
    } finally {
        IOUtils.cleanup(LOG, reader);
    }
    return parts.toArray((K[]) Array.newInstance(keyClass, parts.size()));
}