Example usage for org.apache.hadoop.io IOUtils cleanup

List of usage examples for org.apache.hadoop.io IOUtils cleanup

Introduction

In this page you can find the example usage for org.apache.hadoop.io IOUtils cleanup.

Prototype

@Deprecated
public static void cleanup(Log log, java.io.Closeable... closeables) 

Source Link

Document

Close the Closeable objects and ignore any Throwable or null pointers.

Usage

From source file:io.hops.tensorflow.ApplicationMaster.java

License:Apache License

/**
 * Dump out contents of $CWD and the environment to stdout for debugging
 *//*  w ww.j  av  a  2  s .co m*/
private void dumpOutDebugInfo() {
    LOG.info("Dump debug output");
    Map<String, String> envs = System.getenv();
    for (Map.Entry<String, String> env : envs.entrySet()) {
        LOG.info("System env: key=" + env.getKey() + ", val=" + env.getValue());
        System.out.println("System env: key=" + env.getKey() + ", val=" + env.getValue());
    }

    BufferedReader buf = null;
    try {
        String lines = Shell.WINDOWS ? Shell.execCommand("cmd", "/c", "dir") : Shell.execCommand("ls", "-al");
        buf = new BufferedReader(new StringReader(lines));
        String line = "";
        while ((line = buf.readLine()) != null) {
            LOG.info("System CWD content: " + line);
            System.out.println("System CWD content: " + line);
        }
    } catch (IOException e) {
        e.printStackTrace();
    } finally {
        IOUtils.cleanup(LOG, buf);
    }
}

From source file:it.crs4.pydoop.mapreduce.pipes.TaskLog.java

License:Apache License

private static LogFileDetail getLogFileDetail(TaskAttemptID taskid, LogName filter, boolean isCleanup)
        throws IOException {
    File indexFile = getIndexFile(taskid, isCleanup);
    BufferedReader fis = new BufferedReader(new InputStreamReader(
            SecureIOUtils.openForRead(indexFile, obtainLogDirOwner(taskid), null), Charsets.UTF_8));
    //the format of the index file is
    //LOG_DIR: <the dir where the task logs are really stored>
    //stdout:<start-offset in the stdout file> <length>
    //stderr:<start-offset in the stderr file> <length>
    //syslog:<start-offset in the syslog file> <length>
    LogFileDetail l = new LogFileDetail();
    String str = null;//from   www. ja  v  a  2s  .co  m
    try {
        str = fis.readLine();
        if (str == null) { // the file doesn't have anything
            throw new IOException("Index file for the log of " + taskid + " doesn't exist.");
        }
        l.location = str.substring(str.indexOf(LogFileDetail.LOCATION) + LogFileDetail.LOCATION.length());
        // special cases are the debugout and profile.out files. They are
        // guaranteed
        // to be associated with each task attempt since jvm reuse is disabled
        // when profiling/debugging is enabled
        if (filter.equals(LogName.DEBUGOUT) || filter.equals(LogName.PROFILE)) {
            l.length = new File(l.location, filter.toString()).length();
            l.start = 0;
            fis.close();
            return l;
        }
        str = fis.readLine();
        while (str != null) {
            // look for the exact line containing the logname
            if (str.contains(filter.toString())) {
                str = str.substring(filter.toString().length() + 1);
                String[] startAndLen = str.split(" ");
                l.start = Long.parseLong(startAndLen[0]);
                l.length = Long.parseLong(startAndLen[1]);
                break;
            }
            str = fis.readLine();
        }
        fis.close();
        fis = null;
    } finally {
        IOUtils.cleanup(LOG, fis);
    }
    return l;
}

From source file:it.crs4.pydoop.mapreduce.pipes.TaskLog.java

License:Apache License

private static synchronized void writeToIndexFile(String logLocation, boolean isCleanup) throws IOException {
    // To ensure atomicity of updates to index file, write to temporary index
    // file first and then rename.
    File tmpIndexFile = getTmpIndexFile(currentTaskid, isCleanup);

    BufferedOutputStream bos = new BufferedOutputStream(SecureIOUtils.createForWrite(tmpIndexFile, 0644));
    DataOutputStream dos = new DataOutputStream(bos);
    //the format of the index file is
    //LOG_DIR: <the dir where the task logs are really stored>
    //STDOUT: <start-offset in the stdout file> <length>
    //STDERR: <start-offset in the stderr file> <length>
    //SYSLOG: <start-offset in the syslog file> <length>
    try {/*from www  .jav  a 2s . c  o m*/
        dos.writeBytes(LogFileDetail.LOCATION + logLocation + "\n" + LogName.STDOUT.toString() + ":");
        dos.writeBytes(Long.toString(prevOutLength) + " ");
        dos.writeBytes(Long.toString(new File(logLocation, LogName.STDOUT.toString()).length() - prevOutLength)
                + "\n" + LogName.STDERR + ":");
        dos.writeBytes(Long.toString(prevErrLength) + " ");
        dos.writeBytes(Long.toString(new File(logLocation, LogName.STDERR.toString()).length() - prevErrLength)
                + "\n" + LogName.SYSLOG.toString() + ":");
        dos.writeBytes(Long.toString(prevLogLength) + " ");
        dos.writeBytes(Long.toString(new File(logLocation, LogName.SYSLOG.toString()).length() - prevLogLength)
                + "\n");
        dos.close();
        dos = null;
    } finally {
        IOUtils.cleanup(LOG, dos);
    }

    File indexFile = getIndexFile(currentTaskid, isCleanup);
    Path indexFilePath = new Path(indexFile.getAbsolutePath());
    Path tmpIndexFilePath = new Path(tmpIndexFile.getAbsolutePath());

    if (localFS == null) {// set localFS once
        localFS = FileSystem.getLocal(new Configuration());
    }
    localFS.rename(tmpIndexFilePath, indexFilePath);
}

From source file:nl.basjes.hadoop.io.compress.TestSplittableCodecSeams.java

License:Apache License

/**
 * Write the specified number of records to file in test dir using codec.
 * Records are simply lines random ASCII
 *///from  w w w  .j a va  2 s . c om
private static Path writeSplitTestFile(final Configuration conf,
        final Class<? extends SplittableCompressionCodec> codecClass, final long records,
        final int recordLength, final int trailingSizeJitter, final int randomizeEveryNChars)
        throws IOException {

    RAND.setSeed(1); // Make the tests better reproducable

    final FileSystem fs = FileSystem.getLocal(conf);
    final SplittableCompressionCodec codec = ReflectionUtils.newInstance(codecClass, conf);

    final Path wd = new Path(new Path(System.getProperty("test.build.data", "/tmp")).makeQualified(fs.getUri(),
            fs.getWorkingDirectory()), codec.getClass().getSimpleName());

    final Path file = new Path(wd,
            "test-" + records + "-" + recordLength + "-" + trailingSizeJitter + codec.getDefaultExtension());
    DataOutputStream out = null;
    final Compressor cmp = CodecPool.getCompressor(codec);
    try {
        out = new DataOutputStream(codec.createOutputStream(fs.create(file, true), cmp));

        for (long seq = 1; seq <= records; ++seq) {
            final String line = randomGibberish(
                    recordLength + (trailingSizeJitter > 0 ? RAND.nextInt(trailingSizeJitter) : 0),
                    randomizeEveryNChars) + "\n";
            // There must be a simpler way to output ACSII instead of 2 byte UNICODE
            out.writeBytes(new String(line.getBytes("UTF-8"), "US-ASCII"));
        }
    } finally {
        IOUtils.cleanup(LOG, out);
        CodecPool.returnCompressor(cmp);
    }
    return file;
}

From source file:org.apache.ambari.servicemonitor.probes.JTClusterStatusProbe.java

License:Apache License

/**
 * Make a {@link JTClusterOps#getClusterStatus(boolean)} call with 
 * the param set to fals (non detailed)/*from   w  w  w  .ja v  a 2  s  .  c o  m*/
 * @param livePing is the ping live: true for live; false for boot time
 * @return true iff the cluster status operation returns.
 */
@Override
public ProbeStatus ping(boolean livePing) {
    ProbeStatus status = new ProbeStatus();
    JTClusterOps clusterOps = new JTClusterOps();
    try {
        InetSocketAddress addr = MonitorUtils.getURIAddress(jturi);
        clusterOps.connect(addr, conf);
        clusterStatus = clusterOps.getClusterStatus(false);
        if (LOG.isDebugEnabled()) {
            LOG.debug("JT state = " + clusterStatus.getJobTrackerState());
            LOG.debug("Active trackers = " + clusterStatus.getTaskTrackers());
            LOG.debug("Blacklisted trackers = " + clusterStatus.getBlacklistedTrackers());
        }
        status.succeed(this);
        status.setMessage(getName() + " is in state " + clusterStatus.getJobTrackerState());
    } catch (IOException e) {
        status.fail(this, new IOException(getName() + " : " + e, e));
        LOG.debug("Failure to probe " + getName());
    } finally {
        IOUtils.cleanup(LOG, clusterOps);
    }
    return status;
}

From source file:org.apache.blur.manager.results.BlurResultIterableMultiple.java

License:Apache License

@Override
public void close() throws IOException {
    for (BlurResultIterable it : results) {
        IOUtils.cleanup(LOG, it);
    }
}

From source file:org.apache.blur.manager.writer.BlurIndexSimpleWriter.java

License:Apache License

private synchronized void openWriter() {
    IOUtils.cleanup(LOG, _indexImporter);
    BlurIndexWriter writer = _writer.get();
    if (writer != null) {
        try {//from   w  w  w  . j  a v a2  s .  co m
            writer.close(false);
        } catch (IOException e) {
            LOG.error("Unknown error while trying to close the writer, ["
                    + _shardContext.getTableContext().getTable() + "] Shard [" + _shardContext.getShard() + "]",
                    e);
        }
        _writer.set(null);
    }
    _writerOpener = getWriterOpener(_shardContext);
    _writerOpener.start();
}

From source file:org.apache.blur.MiniCluster.java

License:Apache License

public void stopControllers() {
    IOUtils.cleanup(LOG, controllers.toArray(new Closeable[] {}));
}

From source file:org.apache.blur.MiniCluster.java

License:Apache License

public void stopShards() {
    IOUtils.cleanup(LOG, shards.toArray(new Closeable[] {}));
}

From source file:org.apache.falcon.replication.FilteredCopyListingTest.java

License:Apache License

private static void mkdirs(String path) throws Exception {
    FileSystem fileSystem = null;
    try {/*from   w  ww . j  a v  a2 s. c  o  m*/
        fileSystem = FileSystem.getLocal(new Configuration());
        fileSystem.mkdirs(new Path(path));
        recordInExpectedValues(path);
    } finally {
        IOUtils.cleanup(null, fileSystem);
    }
}