Example usage for org.apache.hadoop.fs FileSystem closeAll

List of usage examples for org.apache.hadoop.fs FileSystem closeAll

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem closeAll.

Prototype

public static void closeAll() throws IOException 

Source Link

Document

Close all cached FileSystem instances.

Usage

From source file:com.asakusafw.runtime.stage.AbstractCleanupStageClient.java

License:Apache License

@Override
protected int execute(String[] args) throws IOException, InterruptedException {
    Configuration conf = getConf();
    Path path = getPath(conf);// ww w .ja v a2s  .co m
    FileSystem fileSystem = FileSystem.get(path.toUri(), conf);
    String info = MessageFormat.format("batchId={0}, flowId={1}, executionId={2}, operationId={3}, path={4}", //$NON-NLS-1$
            getBatchId(), getFlowId(), getExecutionId(), getOperationId(), path);
    try {
        LOG.info(MessageFormat.format("Searching for cleanup target: {0}", info));
        long start = System.currentTimeMillis();
        if (RuntimeContext.get().isSimulation()) {
            LOG.info(MessageFormat.format(
                    "Skip deleting cleanup target because current execution is in simulation mode: {0}", info));
        } else {
            FileStatus stat = fileSystem.getFileStatus(path);
            if (stat == null) {
                throw new FileNotFoundException(path.toString());
            }
            LOG.info(MessageFormat.format("Start deleting cleanup target: {0}", info));
            if (fileSystem.delete(path, true) == false) {
                throw new IOException("FileSystem.delete() returned false");
            }
        }
        long end = System.currentTimeMillis();
        LOG.info(MessageFormat.format("Finish deleting cleanup target: {0}, elapsed={1}ms", info, end - start));
        return 0;
    } catch (FileNotFoundException e) {
        LOG.warn(MessageFormat.format("Cleanup target is missing: {0}", info));
        return 0;
    } catch (IOException e) {
        LOG.warn(MessageFormat.format("Failed to delete cleanup target: {0}", info), e);
        return 1;
    } finally {
        FileSystem.closeAll();
    }
}

From source file:com.asakusafw.testdriver.FileSystemCleaner.java

License:Apache License

private static void clean() {
    try {//w w  w  .  j a va2 s.c o m
        FileSystem.closeAll();
    } catch (IOException e) {
        LOG.warn("error occurred while cleaning up Hadoop file systems", e);
    }
}

From source file:com.facebook.presto.hadoop.TestHadoopFileSystemCache.java

License:Apache License

@Test
public void testCache() throws Exception {
    HadoopFileSystemCache.initialize();//from   w  w  w. j av a  2s.c o m

    FileSystem.closeAll();

    Configuration conf = new Configuration();
    URI uri = URI.create("file:///");

    FileSystem fs1 = FileSystem.get(uri, conf);
    FileSystem fs2 = FileSystem.get(uri, conf);
    assertSame(fs2, fs1);

    FileSystem.closeAll();

    FileSystem fs3 = FileSystem.get(uri, conf);
    assertNotSame(fs3, fs1);
}

From source file:com.google.cloud.hadoop.fs.gcs.GoogleHadoopFileSystemIntegrationTest.java

License:Open Source License

@Before
public void clearFileSystemCache() throws IOException {
    FileSystem.closeAll();
}

From source file:com.google.mr4c.hadoop.MR4CGenericOptionsParser.java

License:Open Source License

/**
  * This should be called after tests to get rid of FileSystem side effects
*//*from  w w  w  .j a  v  a  2  s  .  c  o m*/
public static void cleanup() throws IOException {
    // GenericOptionsParser will cache FileSystem objects based on the config it is updating
    FileSystem.closeAll();
}

From source file:com.panguso.lc.analysis.format.Logcenter.java

License:Open Source License

@Override
public int run(String[] args) throws Exception {
    context = new ClassPathXmlApplicationContext("applicationContext.xml");
    Properties prop = context.getBean("configProperties", Properties.class);
    // ??//from   ww  w . j av a 2s  .c  om
    // String time = new DateTime().toString("yyyyMMddHH");

    // hadoop.lib=/application/format/lib/
    // hadoop.conf=/application/format/conf/
    // hadoop.src=/log/src/
    // hadoop.dest=/log/dest/
    // hadoop.archive=/log/archive/
    libPath = prop.getProperty("hadoop.lib");
    confPath = prop.getProperty("hadoop.conf");
    srcPath = prop.getProperty("hadoop.src");
    destPath = prop.getProperty("hadoop.dest");
    archivePath = prop.getProperty("hadoop.archive");
    Configuration conf = getConf();
    logger.info("libPath=" + libPath);
    logger.info("confPath=" + confPath);
    logger.info("srcPath=" + srcPath);
    logger.info("destPath=" + destPath);
    logger.info("archivePath=" + archivePath);

    FileSystem fs = FileSystem.get(conf);
    // --jar
    FileStatus[] fJars = fs.listStatus(new Path(libPath));
    for (FileStatus fileStatus : fJars) {
        String jar = libPath + fileStatus.getPath().getName();
        DistributedCache.addFileToClassPath(new Path(jar), conf, FileSystem.get(conf));
    }
    // --?
    FileStatus[] fProp = fs.listStatus(new Path(confPath));
    for (FileStatus fileStatus : fProp) {
        DistributedCache.addArchiveToClassPath(new Path(confPath + fileStatus.getPath().getName()), conf,
                FileSystem.get(conf));
    }
    FileStatus[] fDirs = fs.listStatus(new Path(srcPath));
    if (fDirs != null && fDirs.length > 0) {
        for (FileStatus file : fDirs) {
            // dir
            String currentTime = file.getPath().getName();
            String srcPathWithTime = srcPath + currentTime + "/";
            String destPathWithTime = destPath + currentTime + "/";
            String archPathWithTime = archivePath + currentTime + "/";
            // ??
            if (analysisService.isSuccessful(currentTime)) {
                continue;
            }

            // ??job?

            // 
            fs.delete(new Path(destPathWithTime), true);

            // ?
            // if (!fs.exists(new Path(srcPathWithTime))) {
            // logger.warn("outPath does not exist,inputPath=" +
            // srcPathWithTime);
            // analysisService.saveFailureJob(job.getJobName(),
            // currentTime);
            // return -1;
            // }
            // ?classpath";"":"
            Job job = new Job(conf);
            String jars = job.getConfiguration().get("mapred.job.classpath.files");
            job.getConfiguration().set("mapred.job.classpath.files", jars.replace(";", ":"));
            logger.info("current dir=" + currentTime);
            job.setJobName("format_" + currentTime);

            job.setJarByClass(Logcenter.class);
            job.setMapperClass(FormatAnalysisMapper.class);
            job.setReducerClass(FormatAnalysisReducer.class);
            job.setCombinerClass(FormatAnalysisReducer.class);
            job.setOutputKeyClass(Text.class);
            job.setOutputValueClass(Text.class);
            job.setOutputFormatClass(TextOutputFormat.class);
            // job.setNumReduceTasks(0);
            // //??reduce????namenode
            FileInputFormat.addInputPath(job, new Path(srcPathWithTime));
            FileOutputFormat.setOutputPath(job, new Path(destPathWithTime));

            // ?
            boolean result = false;
            try {
                result = job.waitForCompletion(true);
            } catch (FileAlreadyExistsException e) {
                logger.warn(e.getMessage(), e);
            }
            if (!result) {
                logger.warn("job execute failure!");
                analysisService.saveFailureJob(job.getJobName(), currentTime);
                continue;
                // return -1;
            }

            // ,
            fs.delete(new Path(archPathWithTime), true);
            fs.rename(new Path(srcPathWithTime), new Path(archPathWithTime));
            analysisService.saveSuccessJob(job.getJobName(), currentTime);
        }
    }

    FileSystem.closeAll();
    return 0;
}

From source file:com.rim.logdriver.sawmill.Sawmill.java

License:Apache License

public void run(String[] args) {
    if (args.length < 1) {
        System.out.println("Usage: " + this.getClass().getSimpleName() + " <config.properties>");
        System.exit(1);/*from   w  w  w  . j a va 2s  .com*/
    }

    LOG.info("Starting {}", Sawmill.class.getSimpleName());

    // First arg is the config
    String configFile = args[0];

    // Load configuration.
    Properties conf = new Properties();
    try {
        conf.load(new FileInputStream(configFile));
    } catch (FileNotFoundException e) {
        LOG.error("Config file not found.", e);
        System.exit(1);
    } catch (Throwable t) {
        LOG.error("Error reading config file.", t);
        System.exit(1);
    }

    // Parse the configuration.

    // Load in any Hadoop config files.
    Configuration hConf = new Configuration();
    {
        String[] hadoopConfs = Configs.hadoopConfigPaths.getArray(conf);
        for (String confPath : hadoopConfs) {
            hConf.addResource(new Path(confPath));
        }
        // Also, don't shut down my FileSystem automatically!!!
        hConf.setBoolean("fs.automatic.close", false);
        for (Entry<Object, Object> e : System.getProperties().entrySet()) {
            if (e.getValue() instanceof Integer) {
                hConf.setInt(e.getKey().toString(), (Integer) e.getValue());
            } else if (e.getValue() instanceof Long) {
                hConf.setLong(e.getKey().toString(), (Long) e.getValue());
            } else {
                hConf.set(e.getKey().toString(), e.getValue().toString());
            }
        }
    }

    // Ensure that UserGroupInformation is set up, and knows if security is
    // enabled.
    UserGroupInformation.setConfiguration(hConf);

    // Kerberos credentials. If these are not present, then it just won't try to
    // authenticate.
    String kerbConfPrincipal = Configs.kerberosPrincipal.get(conf);
    String kerbKeytab = Configs.kerberosKeytab.get(conf);
    Authenticator.getInstance().setKerbConfPrincipal(kerbConfPrincipal);
    Authenticator.getInstance().setKerbKeytab(kerbKeytab);

    // Check out the number of threads for workers, and creater the threadpools
    // for both workers and stats updates.
    int threadCount = Configs.threadpoolSize.getInteger(conf);
    final ScheduledExecutorService executor = Executors.newScheduledThreadPool(threadCount);

    // Get the MBean server
    MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();

    // Set up the Mina Exception Monitor
    ExceptionMonitor.setInstance(new ExceptionLoggerExceptionMonitor());

    // For each port->output mapping, create a path (listener, queue, worker).
    // List<DataPath> paths = new ArrayList<DataPath>();
    final List<IoAcceptor> acceptors = new ArrayList<IoAcceptor>();
    final List<Writer> writers = new ArrayList<Writer>();
    {
        String[] pathStrings = Configs.paths.getArray(conf);
        for (String p : pathStrings) {
            Properties pathConf = Util.subProperties(conf, "path." + p);

            String name = Configs.name.get(pathConf);
            if (name == null) {
                LOG.info("Path has no name.  Using {}", p);
                name = p;
            }
            LOG.info("[{}] Configuring path {}", name, name);

            // Check the properties for this specific instance
            Integer maxLineLength = Configs.tcpMaxLineLength.getInteger(pathConf);
            if (maxLineLength == null) {
                maxLineLength = Configs.defaultTcpMaxLineLength.getInteger(conf);
            }
            LOG.info("[{}] Maximum line length is {}", name, maxLineLength);

            InetAddress bindAddress = null;
            try {
                String address = Configs.bindAddress.get(pathConf);
                bindAddress = InetAddress.getByName(address);
            } catch (UnknownHostException e) {
                LOG.error("[{}] Error getting bindAddress from string {}",
                        new Object[] { name, pathConf.getProperty("bindAddress") }, e);
            }

            Integer port = Configs.port.getInteger(pathConf);
            if (port == null) {
                LOG.error("[{}] Port not set.  Skipping this path.", name);
                continue;
            }

            int queueLength = Configs.queueCapacity.getInteger(pathConf);

            // Set up the actual processing chain
            IoAcceptor acceptor = new NioSocketAcceptor();
            SocketSessionConfig sessionConfig = (SocketSessionConfig) acceptor.getSessionConfig();
            sessionConfig.setReuseAddress(true);
            acceptors.add(acceptor);

            String charsetName = Configs.charset.getString(pathConf);
            Charset charset = null;
            try {
                charset = Charset.forName(charsetName);
            } catch (UnsupportedCharsetException e) {
                LOG.error("[{}] Charset '{}' is not supported.  Defaulting to UTF-8.", name, charsetName);
                charset = Charset.forName("UTF-8");
            }
            LOG.info("[{}] Using character set {}", name, charset.displayName());
            TextLineCodecFactory textLineCodecFactory = new TextLineCodecFactory(charset, LineDelimiter.UNIX,
                    LineDelimiter.AUTO);
            textLineCodecFactory.setDecoderMaxLineLength(maxLineLength);
            acceptor.getFilterChain().addLast("textLineCodec", new ProtocolCodecFilter(textLineCodecFactory));

            int numBuckets = Configs.outputBuckets.getInteger(pathConf);
            if (numBuckets > 1) {
                // Set up mulitple writers for one MultiEnqueueHandler
                @SuppressWarnings("unchecked")
                BlockingQueue<String>[] queues = new BlockingQueue[numBuckets];

                for (int i = 0; i < numBuckets; i++) {
                    BlockingQueue<String> queue = new ArrayBlockingQueue<String>(queueLength);
                    queues[i] = queue;

                    // Set up the processor on the other end.
                    Writer writer = new Writer();
                    writer.setName(name);
                    writer.setConfig(pathConf);
                    writer.setHadoopConf(hConf);
                    writer.setQueue(queue);
                    writer.init();

                    // Set up MBean for the Writer
                    {
                        ObjectName mbeanName = null;
                        try {
                            mbeanName = new ObjectName(Writer.class.getPackage().getName() + ":type="
                                    + Writer.class.getSimpleName() + " [" + i + "]" + ",name=" + name);
                        } catch (MalformedObjectNameException e) {
                            LOG.error("[{}] Error creating MBean name.", name, e);
                        } catch (NullPointerException e) {
                            LOG.error("[{}] Error creating MBean name.", name, e);
                        }
                        try {
                            mbs.registerMBean(writer, mbeanName);
                        } catch (InstanceAlreadyExistsException e) {
                            LOG.error("[{}] Error registering MBean name.", name, e);
                        } catch (MBeanRegistrationException e) {
                            LOG.error("[{}] Error registering MBean name.", name, e);
                        } catch (NotCompliantMBeanException e) {
                            LOG.error("[{}] Error registering MBean name.", name, e);
                        }
                    }

                    executor.scheduleWithFixedDelay(writer, 0, 100, TimeUnit.MILLISECONDS);
                    writers.add(writer);
                }

                MultiEnqueueHandler handler = new MultiEnqueueHandler(queues);
                acceptor.setHandler(handler);

                // Set up MBean for the MultiEnqueueHandler
                {
                    ObjectName mbeanName = null;
                    try {
                        mbeanName = new ObjectName(MultiEnqueueHandler.class.getPackage().getName() + ":type="
                                + MultiEnqueueHandler.class.getSimpleName() + ",name=" + name);
                    } catch (MalformedObjectNameException e) {
                        LOG.error("[{}] Error creating MBean name.", name, e);
                    } catch (NullPointerException e) {
                        LOG.error("[{}] Error creating MBean name.", name, e);
                    }
                    try {
                        mbs.registerMBean(handler, mbeanName);
                    } catch (InstanceAlreadyExistsException e) {
                        LOG.error("[{}] Error registering MBean name.", name, e);
                    } catch (MBeanRegistrationException e) {
                        LOG.error("[{}] Error registering MBean name.", name, e);
                    } catch (NotCompliantMBeanException e) {
                        LOG.error("[{}] Error registering MBean name.", name, e);
                    }
                }
            } else {
                BlockingQueue<String> queue = new ArrayBlockingQueue<String>(queueLength);

                // Set up the processor on the other end.
                Writer writer = new Writer();
                writer.setName(name);
                writer.setConfig(pathConf);
                writer.setHadoopConf(hConf);
                writer.setQueue(queue);
                writer.init();

                // Set up MBean for the Writer
                {
                    ObjectName mbeanName = null;
                    try {
                        mbeanName = new ObjectName(Writer.class.getPackage().getName() + ":type="
                                + Writer.class.getSimpleName() + ",name=" + name);
                    } catch (MalformedObjectNameException e) {
                        LOG.error("[{}] Error creating MBean name.", name, e);
                    } catch (NullPointerException e) {
                        LOG.error("[{}] Error creating MBean name.", name, e);
                    }
                    try {
                        mbs.registerMBean(writer, mbeanName);
                    } catch (InstanceAlreadyExistsException e) {
                        LOG.error("[{}] Error registering MBean name.", name, e);
                    } catch (MBeanRegistrationException e) {
                        LOG.error("[{}] Error registering MBean name.", name, e);
                    } catch (NotCompliantMBeanException e) {
                        LOG.error("[{}] Error registering MBean name.", name, e);
                    }
                }

                executor.scheduleWithFixedDelay(writer, 0, 100, TimeUnit.MILLISECONDS);
                writers.add(writer);

                EnqueueHandler handler = new EnqueueHandler(queue);
                acceptor.setHandler(handler);

                // Set up MBean for the EnqueueHandler
                {
                    ObjectName mbeanName = null;
                    try {
                        mbeanName = new ObjectName(EnqueueHandler.class.getPackage().getName() + ":type="
                                + EnqueueHandler.class.getSimpleName() + ",name=" + name);
                    } catch (MalformedObjectNameException e) {
                        LOG.error("[{}] Error creating MBean name.", name, e);
                    } catch (NullPointerException e) {
                        LOG.error("[{}] Error creating MBean name.", name, e);
                    }
                    try {
                        mbs.registerMBean(handler, mbeanName);
                    } catch (InstanceAlreadyExistsException e) {
                        LOG.error("[{}] Error registering MBean name.", name, e);
                    } catch (MBeanRegistrationException e) {
                        LOG.error("[{}] Error registering MBean name.", name, e);
                    } catch (NotCompliantMBeanException e) {
                        LOG.error("[{}] Error registering MBean name.", name, e);
                    }
                }
            }

            acceptor.getSessionConfig().setReadBufferSize(Configs.tcpReadBufferSize.getInteger(pathConf));
            acceptor.getSessionConfig().setIdleTime(IdleStatus.BOTH_IDLE, 5);

            while (true) {
                try {
                    acceptor.bind(new InetSocketAddress(bindAddress, port));
                } catch (IOException e) {
                    LOG.error("Error binding to {}:{}.  Retrying...", bindAddress, port);

                    try {
                        Thread.sleep(2000);
                    } catch (InterruptedException e1) {
                        // nothing
                    }

                    continue;
                }

                break;
            }

        }
    }

    // Register a shutdown hook..
    Runtime.getRuntime().addShutdownHook(new Thread() {
        public void run() {
            LOG.info("Shutting down");

            LOG.info("Unbinding and disposing of all IoAcceptors");
            for (IoAcceptor acceptor : acceptors) {
                acceptor.unbind();
                acceptor.dispose(true);
            }

            LOG.info("Shutting down worker threadpools.  This could take a little while.");
            executor.shutdown();
            try {
                executor.awaitTermination(10, TimeUnit.MINUTES);
            } catch (InterruptedException e) {
                LOG.error("Interrupted waiting for writer threadpool termination.", e);
            }
            if (!executor.isTerminated()) {
                LOG.error("Threadpool did not terminate cleanly.");
            }

            LOG.info("Cleaning out any remaining messages from the queues.");
            List<Thread> threads = new ArrayList<Thread>();
            for (final Writer writer : writers) {
                Runnable r = new Runnable() {
                    @Override
                    public void run() {
                        try {
                            writer.runAndClose();
                        } catch (Throwable t) {
                            LOG.error("Error shutting down writer [{}]", writer.getName(), t);
                        }
                    }
                };
                Thread t = new Thread(r);
                t.setDaemon(false);
                t.start();
                threads.add(t);
            }

            for (Thread t : threads) {
                try {
                    t.join();
                } catch (InterruptedException e) {
                    LOG.error("Interrupted waiting for thread to finish.");
                }
            }

            LOG.info("Closing filesystems.");
            try {
                FileSystem.closeAll();
            } catch (Throwable t) {
                LOG.error("Error closing filesystems.", t);
            }

            LOG.info("Finished shutting down cleanly.");
        }
    });
}

From source file:com.streamsets.pipeline.stage.destination.hdfs.BaseHdfsTargetIT.java

License:Apache License

@BeforeClass
public static void setUpClass() throws Exception {
    //setting some dummy kerberos settings to be able to test a mis-setting
    System.setProperty("java.security.krb5.realm", "foo");
    System.setProperty("java.security.krb5.kdc", "localhost:0");

    File minidfsDir = new File("target/minidfs").getAbsoluteFile();
    if (!minidfsDir.exists()) {
        Assert.assertTrue(minidfsDir.mkdirs());
    }//from   w w  w .j  a  v a 2  s.  co  m
    Set<PosixFilePermission> set = new HashSet<PosixFilePermission>();
    set.add(PosixFilePermission.OWNER_EXECUTE);
    set.add(PosixFilePermission.OWNER_READ);
    set.add(PosixFilePermission.OWNER_WRITE);
    set.add(PosixFilePermission.OTHERS_READ);
    java.nio.file.Files.setPosixFilePermissions(minidfsDir.toPath(), set);
    System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, minidfsDir.getPath());
    Configuration conf = new HdfsConfiguration();
    conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".hosts", "*");
    conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".groups", "*");
    fooUgi = UserGroupInformation.createUserForTesting("foo", new String[] { "all" });
    EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
    FileSystem.closeAll();
    miniDFS = new MiniDFSCluster.Builder(conf).build();
    miniDFS.getFileSystem().setPermission(new Path("/"), FsPermission.createImmutable((short) 0777));
}

From source file:com.streamsets.pipeline.stage.destination.hdfs.metadataexecutor.HdfsMetadataExecutorIT.java

License:Apache License

@BeforeClass
public static void setUpClass() throws Exception {
    // Conf dir/*  ww  w  .j  av a 2s  . c  o m*/
    new File(confDir).mkdirs();

    //setting some dummy kerberos settings to be able to test a mis-setting
    System.setProperty("java.security.krb5.realm", "foo");
    System.setProperty("java.security.krb5.kdc", "localhost:0");

    File minidfsDir = new File(baseDir, "minidfs").getAbsoluteFile();
    if (!minidfsDir.exists()) {
        Assert.assertTrue(minidfsDir.mkdirs());
    }
    Set<PosixFilePermission> set = new HashSet<>();
    set.add(PosixFilePermission.OWNER_EXECUTE);
    set.add(PosixFilePermission.OWNER_READ);
    set.add(PosixFilePermission.OWNER_WRITE);
    set.add(PosixFilePermission.OTHERS_READ);
    java.nio.file.Files.setPosixFilePermissions(minidfsDir.toPath(), set);
    System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, minidfsDir.getPath());
    Configuration conf = new HdfsConfiguration();
    conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".hosts", "*");
    conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".groups", "*");
    conf.set("dfs.namenode.acls.enabled", "true");
    fooUgi = UserGroupInformation.createUserForTesting("foo", new String[] { "all" });
    EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
    FileSystem.closeAll();
    miniDFS = new MiniDFSCluster.Builder(conf).build();
    miniDFS.getFileSystem().setPermission(new Path("/"), FsPermission.createImmutable((short) 0777));
    fs = miniDFS.getFileSystem();
    writeConfiguration(miniDFS.getConfiguration(0), confDir + "core-site.xml");
    writeConfiguration(miniDFS.getConfiguration(0), confDir + "hdfs-site.xml");
}

From source file:com.uber.hoodie.io.TestHoodieCommitArchiveLog.java

License:Apache License

@AfterClass
public static void cleanUp() throws Exception {
    // Need to closeAll to clear FileSystem.Cache, required because DFS and LocalFS used in the
    // same JVM/*from  w  w w  .ja v a2  s  . c o m*/
    FileSystem.closeAll();

    if (hdfsTestService != null) {
        hdfsTestService.stop();
        dfsCluster.shutdown();
    }
}