Example usage for org.apache.hadoop.fs Path getParent

List of usage examples for org.apache.hadoop.fs Path getParent

Introduction

In this page you can find the example usage for org.apache.hadoop.fs Path getParent.

Prototype

public Path getParent() 

Source Link

Document

Returns the parent of a path or null if at root.

Usage

From source file:com.rapleaf.ramhdfs.RamFileSystem.java

License:Apache License

@Override
public boolean mkdirs(Path f) throws IOException {
    if (f == null) {
        throw new IllegalArgumentException("mkdirs path arg is null");
    }/*from  www . j a  va  2  s .  com*/
    Path parent = f.getParent();
    FileObject p2f = pathToFileObject(f);
    if (isDirectory(p2f)) {
        return true;
    }
    if (parent != null) {
        FileObject parent2f = pathToFileObject(parent);
        if (parent2f != null && parent2f.exists() && !isDirectory(parent2f)) {
            throw new FileAlreadyExistsException("Parent path is not a directory: " + parent);
        }
    }
    return (parent == null || mkdirs(parent)) && (createDirectory(p2f) || isDirectory(p2f));
}

From source file:com.redsqirl.workflow.server.connect.HDFSInterface.java

License:Open Source License

/**
 * Copy from local fs to HDFS//from ww  w  .  j a v  a 2  s  .c om
 * 
 * @param local_path
 * @param hdfs_path
 * @return Error message
 * @throws RemoteException
 */
@Override
public String copyFromLocal(String local_path, String hdfs_path) throws RemoteException {
    String error = null;
    Path localP = new Path(local_path), hdfsP = new Path(hdfs_path);
    File failFile = new File(localP.getParent().toString(), "." + localP.getName() + ".crc");
    try {
        FileChecker hChO = new FileChecker(new File(local_path));
        if (hChO.exists()) {
            FileSystem fs = NameNodeVar.getFS();
            if (failFile.exists()) {
                failFile.delete();
            }
            fs.copyFromLocalFile(false, localP, hdfsP);
        } else {
            error = LanguageManagerWF.getText("HdfsInterface.ouputexists");
        }

    } catch (IOException e) {
        logger.error(e.getMessage());
        error = LanguageManagerWF.getText("HdfsInterface.errormove", new Object[] { e.getMessage() });
    }
    if (error != null) {
        if (failFile.exists()) {
            failFile.delete();
        }
        logger.debug(error);
    }
    return error;
}

From source file:com.redsqirl.workflow.server.connect.HDFSInterface.java

License:Open Source License

public String copyToLocal(String hdfs_path, String local_path, boolean writtableByAll) throws RemoteException {
    String error = null;//w w w.  j  a v a 2  s  . co  m
    Path localP = new Path(local_path), hdfsP = new Path(hdfs_path);
    File failFile = new File(localP.getParent().toString(), "." + localP.getName() + ".crc");
    try {
        FileChecker hChN = new FileChecker(new File(local_path));
        HdfsFileChecker hChO = new HdfsFileChecker(hdfsP);
        if (!hChN.exists() && hChO.exists()) {
            FileSystem fs = NameNodeVar.getFS();
            if (failFile.exists()) {
                failFile.delete();
            }
            fs.copyToLocalFile(false, hdfsP, localP);
            if (writtableByAll) {
                new File(local_path).setWritable(true, false);
            }
        } else {
            error = LanguageManagerWF.getText("HdfsInterface.ouputexists");
        }

    } catch (IOException e) {
        logger.error(e.getMessage());
        error = LanguageManagerWF.getText("HdfsInterface.errormove", new Object[] { e.getMessage() });
    }
    if (error != null) {
        logger.warn(error);
        if (failFile.exists()) {
            failFile.delete();
        }
    }
    return error;
}

From source file:com.redsqirl.workflow.server.connect.jdbc.JdbcStore.java

License:Open Source License

public static String writePassword(String connectionName, JdbcDetails details) {
    String passwordPathStr = "/user/" + System.getProperty("user.name") + "/.redsqirl/jdbc_password/password_"
            + connectionName;//from www. j av a2  s .  c o m
    Path passwordPath = new Path(passwordPathStr);

    try {
        FileSystem fileSystem = NameNodeVar.getFS();
        if (fileSystem.exists(passwordPath)) {
            BufferedReader br = new BufferedReader(new InputStreamReader(fileSystem.open(passwordPath)));
            String line = br.readLine();
            if (line == null || !line.equals(details.getPassword())) {
                fileSystem.delete(passwordPath, false);
            }
            br.close();
        }
        if (!fileSystem.exists(passwordPath) && details.getPassword() != null) {
            if (!fileSystem.exists(passwordPath.getParent())) {
                fileSystem.mkdirs(passwordPath.getParent());
                fileSystem.setPermission(passwordPath.getParent(), new FsPermission("700"));
            }
            FSDataOutputStream out = fileSystem.create(passwordPath);
            out.write(details.getPassword().getBytes());
            out.close();
            fileSystem.setPermission(passwordPath, new FsPermission("400"));
        }
    } catch (Exception e) {
        logger.error(e, e);
    }
    return passwordPathStr;
}

From source file:com.rim.logdriver.LockedFs.java

License:Apache License

public void move(Configuration conf, String[] from, String to) throws IOException {
    FileSystem fs = FileSystem.get(conf);

    List<FileStatus> fromList = new ArrayList<FileStatus>();
    for (String s : from) {
        FileStatus[] statuses = fs.globStatus(new Path(s));
        if (statuses == null) {
            continue;
        }//from  www . ja  v a2 s . c  om
        for (FileStatus status : statuses) {
            fromList.add(status);
        }
    }

    Path toPath = new Path(to);
    Boolean toExists = fs.exists(toPath);
    FileStatus toFileStatus = null;
    if (toExists) {
        toFileStatus = fs.getFileStatus(toPath);
    }

    // If there is no from, that's a problem.
    if (fromList.isEmpty()) {
        throw new IOException("No input files found");
    }

    // If the to exists, and is a file, that's a problem too.
    if (toExists && !toFileStatus.isDir()) {
        throw new IOException("Destination file exists:" + to);
    }

    // If the destination exists, and is a directory, then ensure that none of
    // the from list names will clash with existing contents of the directory.
    if (toExists && toFileStatus.isDir()) {
        for (FileStatus fromStatus : fromList) {
            String name = fromStatus.getPath().getName();
            if (fs.exists(new Path(toPath, name))) {
                throw new IOException("Destination file exists:" + to + "/" + name);
            }
        }
    }

    // If the destination doesn't exist, but it ends with a slash, then create
    // it as a directory.
    if (!toExists && to.endsWith("/")) {
        fs.mkdirs(toPath);
        toFileStatus = fs.getFileStatus(toPath);
        toExists = true;
    }

    // If the destination doesn't exist, and there is more than one 'from', then
    // create a directory.
    if (!toExists && fromList.size() > 1) {
        fs.mkdirs(toPath);
        toFileStatus = fs.getFileStatus(toPath);
    }

    // If there was only one from, then just rename it to to
    if (fromList.size() == 1) {
        fs.mkdirs(toPath.getParent());
        fs.rename(fromList.get(0).getPath(), toPath);
    }

    // If there was more than one from, then for each file in the from list,
    // move it to the to directory.
    if (fromList.size() > 1) {
        for (FileStatus fromStatus : fromList) {
            String name = fromStatus.getPath().getName();
            fs.rename(fromStatus.getPath(), new Path(toPath, name));
        }
    }
}

From source file:com.savy3.nonequijoin.MapOutputSampler.java

License:Apache License

/**
 * Driver for InputSampler MapReduce Job
 *///from w  w w  . ja va  2 s .  c o m
public static void runMap(Job job, Path sampleInputPath)
        throws IOException, IllegalStateException, ClassNotFoundException, InterruptedException {
    LOG.info("Running a MapReduce Job on Sample Input File" + sampleInputPath.toString());

    Configuration conf = new Configuration();
    conf.setBoolean("mapreduce.job.ubertask.enable", true);
    conf.set("numSamples", "" + (job.getNumReduceTasks() - 1));
    Job sampleJob = new Job(conf);
    sampleJob.setMapperClass(job.getMapperClass());
    sampleJob.setReducerClass(SampleKeyReducer.class);
    sampleJob.setJarByClass(job.getMapperClass());
    sampleJob.setMapOutputKeyClass(job.getMapOutputKeyClass());
    sampleJob.setMapOutputValueClass(job.getMapOutputValueClass());
    sampleJob.setOutputKeyClass(job.getMapOutputKeyClass());
    sampleJob.setOutputValueClass(NullWritable.class);
    sampleJob.setInputFormatClass(SequenceFileInputFormat.class);
    sampleJob.setOutputFormatClass(SequenceFileOutputFormat.class);

    SequenceFileInputFormat.addInputPath(sampleJob, sampleInputPath);
    FileSystem fs = FileSystem.get(conf);

    Path out = new Path(sampleInputPath.getParent(), "mapOut");
    fs.delete(out, true);

    SequenceFileOutputFormat.setOutputPath(sampleJob, out);

    sampleJob.waitForCompletion(true);

    LOG.info("Sample MapReduce Job Output File" + out.toString());

    Path partFile = new Path(out, "part-r-00000");
    Path tmpFile = new Path("/_tmp");
    fs.delete(tmpFile, true);
    fs.rename(partFile, tmpFile);
    fs.delete(sampleInputPath.getParent(), true);
    fs.rename(new Path("/_tmp"), sampleInputPath.getParent());

    LOG.info("Sample partitioning file cpied to location " + sampleInputPath.getParent().toString());
}

From source file:com.shopzilla.hadoop.repl.commands.completers.HDFSFileNameCompletor.java

License:Apache License

@Override
public int complete(final String buffer, final int cursor, final List candidates) {
    try {//from w  ww.j  a  v  a 2  s .c om

        if (buffer == null) {
            return 0;
        }

        final String translated = buffer;
        final Path f = new Path(root, translated);
        final Path dir;

        if (translated.endsWith(File.separator)) {
            dir = f;
        } else {
            dir = f.getParent();
        }

        final Path[] entries = (dir == null) ? new Path[0] : listFiles(dir);

        return matchFiles(buffer, translated, entries, candidates);
    } catch (final Exception ex) {
        // Don't do anything
        return 0;
    } finally {
        sortFileNames(candidates);
    }
}

From source file:com.splicemachine.storage.HNIOFileSystem.java

License:Apache License

@Override
public void concat(Path target, Path... sources) throws IOException {
    org.apache.hadoop.fs.Path[] srcPaths = new org.apache.hadoop.fs.Path[sources.length];
    for (int i = 0; i < sources.length; i++) {
        srcPaths[i] = new org.apache.hadoop.fs.Path(sources[i].getParent().toString(),
                sources[i].getFileName().toString());
    }//from  w  ww . j a v a 2 s  . co m
    org.apache.hadoop.fs.Path targetPath = new org.apache.hadoop.fs.Path(target.getParent().toString(),
            target.getFileName().toString());

    if (isDistributedFS) {
        fs.concat(targetPath, srcPaths);
    } else {
        for (org.apache.hadoop.fs.Path src : srcPaths) {
            fs.copyFromLocalFile(true, false, src, targetPath);
        }
    }
}

From source file:com.splout.db.engine.MySQLOutputFormat.java

License:Apache License

public void initPartition(int partition, Path local) throws IOException {

    Path mysqlDb = new Path(local.getParent(), partition + "");

    LOG.info("Initializing SQL connection [" + partition + "]");
    try {//from ww w  . ja v a2s .co  m
        PortLock portLock = PortUtils.getNextAvailablePort(EmbeddedMySQLConfig.DEFAULT_PORT);

        EmbeddedMySQL mySQL = null;
        EmbeddedMySQLConfig config = null;
        HashMap<String, Object> customConfig = new HashMap<String, Object>();

        // Fixing memory for indexation. Main important parameters is myisam_sort_buffer_size
        // and key_buffer_size. See http://dev.mysql.com/doc/refman/5.0/en/server-system-variables.html
        long totalMem = getConf().getLong(GLOBAL_MEMORY_AVAILABLE_FOR_INDEXING, 100 * 1024 * 1024);
        double shareForSortBuffer = 0.9;
        customConfig.put("myisam_sort_buffer_size", (long) (shareForSortBuffer * totalMem));
        customConfig.put("key_buffer_size", (long) ((1 - shareForSortBuffer) * totalMem));
        customConfig.put("myisam_max_sort_file_size", 9223372036854775807l);

        try {
            File mysqlDir = new File(mysqlDb.toString());
            LOG.info("Going to instantiate a MySQLD in: " + mysqlDir + ", port [" + portLock.getPort()
                    + "] (partition: " + partition + ")");

            config = new EmbeddedMySQLConfig(portLock.getPort(), EmbeddedMySQLConfig.DEFAULT_USER,
                    EmbeddedMySQLConfig.DEFAULT_PASS, mysqlDir, customConfig);

            mySQL = new EmbeddedMySQL(config);
            mySQL.start(true);
        } catch (Exception e) {
            throw e;
        } finally {
            portLock.release();
        }

        mySQLs.put(partition, mySQL);

        // MySQL is successfully started at this point, or an Exception would have been thrown.
        Class.forName(EmbeddedMySQL.DRIVER);
        Connection conn = DriverManager.getConnection(config.getLocalJDBCConnection(GENERATED_DB_NAME),
                config.getUser(), config.getPass());
        conn.setAutoCommit(false);
        connCache.put(partition, conn);
        Statement st = conn.createStatement();

        // Init transaction
        for (String sql : getPreSQL()) {
            LOG.info("Executing: " + sql);
            st.execute(sql);
        }
        st.execute("BEGIN");
        st.close();

        Map<String, PreparedStatement> stMap = new HashMap<String, PreparedStatement>();
        stCache.put(partition, stMap);
    } catch (Exception e) {
        throw new IOException(e);
    }
}

From source file:com.splunk.shuttl.archiver.filesystem.HadoopFileSystemArchive.java

License:Apache License

/**
 * Do NOT call nor override this method outside this class.It's meant to be
 * private but is package private for testing purposes. If you want to expose
 * this method make it public or protected!
 *///from  w w  w  .j av a2  s  .co  m
/* package private */void move(Path src, Path dst) throws IOException {
    hadoopFileSystem.mkdirs(dst.getParent());
    hadoopFileSystem.rename(src, dst);

}