Example usage for org.apache.hadoop.fs FileSystem copyToLocalFile

List of usage examples for org.apache.hadoop.fs FileSystem copyToLocalFile

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem copyToLocalFile.

Prototype

public void copyToLocalFile(boolean delSrc, Path src, Path dst) throws IOException 

Source Link

Document

Copy it a file from a remote filesystem to the local one.

Usage

From source file:com.blackberry.logtools.LogTools.java

License:Apache License

public void runPigLocal(Map<String, String> params, String out, String tmp, final boolean quiet,
        final boolean silent, Configuration conf, String queue_name, String additional_jars, File pig_tmp,
        ArrayList<String> D_options, String PIG_DIR, FileSystem fs)
        throws IllegalArgumentException, IOException {
    //Create temp file on local to hold data to sort
    final File local_tmp = Files.createTempDir();
    local_tmp.deleteOnExit();//from w  w w.jav  a2  s. c  om

    Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() {
        @Override
        public void run() {
            try {
                logConsole(quiet, silent, warn, "Deleting tmp files in local tmp");
                delete(local_tmp);
            } catch (IOException e) {
                // TODO Auto-generated catch block
                e.printStackTrace();
            }
        }
    }));

    //Set input parameter for pig job
    params.put("tmpdir", local_tmp.toString() + "/" + tmp);

    //Check for an out of '-', meaning write to stdout
    String pigout;
    if (out.equals("-")) {
        params.put("out", local_tmp + "/" + tmp + "/final");
        pigout = local_tmp + "/" + tmp + "/final";
    } else {
        params.put("out", local_tmp + "/" + StringEscapeUtils.escapeJava(out));
        pigout = StringEscapeUtils.escapeJava(out);
    }

    //Copy the tmp folder from HDFS to the local tmp directory, and delete the remote folder
    fs.copyToLocalFile(true, new Path(tmp), new Path(local_tmp + "/" + tmp));

    try {
        logConsole(quiet, silent, info, "Running PIG Command");
        conf.set("mapred.job.queue.name", queue_name);
        conf.set("pig.additional.jars", additional_jars);
        conf.set("pig.exec.reducers.bytes.per.reducer", Integer.toString(100 * 1000 * 1000));
        conf.set("pig.logfile", pig_tmp.toString());
        conf.set("hadoopversion", "23");
        //PIG temp directory set to be able to delete all temp files/directories
        conf.set("pig.temp.dir", local_tmp.getAbsolutePath());

        //Setting output separator for logdriver
        String DEFAULT_OUTPUT_SEPARATOR = "\t";
        Charset UTF_8 = Charset.forName("UTF-8");
        String outputSeparator = conf.get("logdriver.output.field.separator", DEFAULT_OUTPUT_SEPARATOR);
        byte[] bytes = outputSeparator.getBytes(UTF_8);
        if (bytes.length != 1) {
            System.err.println(
                    ";******************** The output separator must be a single byte in UTF-8. ******************** ");
            System.exit(1);
        }
        conf.set("logdriver.output.field.separator", Byte.toString(bytes[0]));

        dOpts(D_options, silent, out, conf);

        PigServer pigServer = new PigServer(ExecType.LOCAL, conf);
        UserGroupInformation.setConfiguration(new Configuration(false));
        pigServer.registerScript(PIG_DIR + "/formatAndSortLocal.pg", params);
    } catch (Exception e) {
        e.printStackTrace();
        System.exit(1);
    }

    logConsole(quiet, silent, warn, "PIG Job Completed.");

    if (out.equals("-")) {
        System.out.println(";#################### DATA RESULTS ####################");
        try {
            File results = new File(pigout);
            String[] resultList = results.list();

            //Find the files in the directory, open and printout results
            for (int i = 0; i < resultList.length; i++) {
                if (resultList[i].contains("part-") && !resultList[i].contains(".crc")) {
                    BufferedReader br = new BufferedReader(
                            new FileReader(new File(pigout + "/" + resultList[i])));
                    String line;
                    line = br.readLine();
                    while (line != null) {
                        System.out.println(line);
                        line = br.readLine();
                    }
                    br.close();
                }
            }
            System.out.println(";#################### END OF RESULTS ####################");
        } catch (IOException e) {
            e.printStackTrace();
            System.exit(1);
        }
    } else {
        fs.copyFromLocalFile(new Path(local_tmp + "/" + StringEscapeUtils.escapeJava(out)), new Path(pigout));
        System.out.println(
                ";#################### Done. Search results are in " + pigout + " ####################");
    }
}

From source file:com.nexr.pyhive.hive.udf.PyUDF.java

License:Apache License

private void loadPyObjects(String name) throws HiveException {
    if (!funcSet.contains(name)) {
        try {//from   w w  w .  j a  v a2  s  .  com

            /**
             * import package
             */
            String load = "import marshal" + "\n" + "import types" + "\n";
            pi.exec(load);

            FileSystem fs = FileSystem.get(UDFUtils.getConf());

            boolean srcDel = false;
            Path varsrc = UDFUtils.getVarPath(name);
            if (fs.exists(varsrc)) {
                Path vardst = getVarLocalPath(name);
                fs.copyToLocalFile(srcDel, varsrc, vardst);

                /**
                 * loading variable
                 */
                load = String.format("file = open('%s','r')\n" + "dic = marshal.load(file)\n"
                        + "locals().update(dic)\n" + "file.close()\n", vardst.toString());
                pi.exec(load);
            }

            Path funcsrc = UDFUtils.getFuncPath(name);
            Path funcdst = getFuncLocalPath(name);
            fs.copyToLocalFile(srcDel, funcsrc, funcdst);

            /**
             * loading function
             */
            load = String.format(
                    "file = open('%s','r')" + "\n" + "code = marshal.loads(file.read())\n"
                            + "%s = types.FunctionType(code,globals(),'%s')\n" + "file.close()\n",
                    funcdst.toString(), name, name);
            pi.exec(load);
        } catch (Exception e) {
            throw new HiveException(e);
        }
        funcSet.add(name);
    }
}

From source file:com.nexr.rhive.hive.udf.RUDF.java

License:Apache License

private void loadRObjects(String name) throws HiveException {
    if (!funcSet.contains(name)) {
        try {/*www.j av a 2s .  c om*/
            FileSystem fs = FileSystem.get(conf);

            boolean srcDel = false;
            Path src = UDFUtils.getPath(name);
            Path dst = getLocalPath(name);
            fs.copyToLocalFile(srcDel, src, dst);

            String dataFilePath = dst.toString();
            getConnection().eval(String.format("load(file=\"%s\")", dataFilePath));

        } catch (Exception e) {
            throw new HiveException(e);
        }

        funcSet.add(name);
    }
}

From source file:com.redsqirl.workflow.server.connect.HDFSInterface.java

License:Open Source License

public String copyToLocal(String hdfs_path, String local_path, boolean writtableByAll) throws RemoteException {
    String error = null;//w  w w. j  a  v a2  s  .c  o m
    Path localP = new Path(local_path), hdfsP = new Path(hdfs_path);
    File failFile = new File(localP.getParent().toString(), "." + localP.getName() + ".crc");
    try {
        FileChecker hChN = new FileChecker(new File(local_path));
        HdfsFileChecker hChO = new HdfsFileChecker(hdfsP);
        if (!hChN.exists() && hChO.exists()) {
            FileSystem fs = NameNodeVar.getFS();
            if (failFile.exists()) {
                failFile.delete();
            }
            fs.copyToLocalFile(false, hdfsP, localP);
            if (writtableByAll) {
                new File(local_path).setWritable(true, false);
            }
        } else {
            error = LanguageManagerWF.getText("HdfsInterface.ouputexists");
        }

    } catch (IOException e) {
        logger.error(e.getMessage());
        error = LanguageManagerWF.getText("HdfsInterface.errormove", new Object[] { e.getMessage() });
    }
    if (error != null) {
        logger.warn(error);
        if (failFile.exists()) {
            failFile.delete();
        }
    }
    return error;
}

From source file:com.yahoo.druid.hadoop.DruidRecordReader.java

License:Apache License

private void getSegmentFiles(String pathStr, File dir, FileSystem fs) throws IOException {
    if (!dir.exists() && !dir.mkdirs()) {
        throw new IOException(dir + " does not exist and creation failed");
    }//w w w .  ja v  a 2s.c om

    final File tmpDownloadFile = File.createTempFile("dataSegment", ".zip");
    if (tmpDownloadFile.exists() && !tmpDownloadFile.delete()) {
        logger.warn("Couldn't clear out temporary file [%s]", tmpDownloadFile);
    }

    try {
        final Path inPath = new Path(pathStr);
        fs.copyToLocalFile(false, inPath, new Path(tmpDownloadFile.toURI()));

        long size = 0L;
        try (final ZipInputStream zipInputStream = new ZipInputStream(
                new BufferedInputStream(new FileInputStream(tmpDownloadFile)))) {
            final byte[] buffer = new byte[1 << 13];
            for (ZipEntry entry = zipInputStream.getNextEntry(); entry != null; entry = zipInputStream
                    .getNextEntry()) {
                final String fileName = entry.getName();
                try (final FileOutputStream fos = new FileOutputStream(
                        dir.getAbsolutePath() + File.separator + fileName)) {
                    for (int len = zipInputStream.read(buffer); len >= 0; len = zipInputStream.read(buffer)) {
                        size += len;
                        fos.write(buffer, 0, len);
                    }
                }
            }
        }
    } finally {
        if (tmpDownloadFile.exists() && !tmpDownloadFile.delete()) {
            logger.warn("Temporary download file could not be deleted [%s]", tmpDownloadFile);
        }
    }
}

From source file:edu.mit.jwi.data.FileProvider.java

License:Creative Commons License

/**
 * Make a File from Path/*  w  w w. jav  a  2s. c o m*/
 * 
 * from
 * http://stackoverflow.com/questions/3444313/how-to-convert-a-hadoop-path-
 * object-into-a-java-file-object
 * 
 * @param some_path
 * @param conf
 * @author Mauro Pelucchi
 * @since JWI 2.3.3-hadoop
 * @return
 */
public static File MakeFileFromPath(Path hdfsPath, Configuration conf) {
    try {
        FileSystem fs = FileSystem.get(hdfsPath.toUri(), conf);
        File tempFolder = File.createTempFile(hdfsPath.getName(), "");
        if (!(tempFolder.delete())) {
            throw new IOException("Could not delete temp file: " + tempFolder.getAbsolutePath());
        }

        if (!(tempFolder.mkdir())) {
            throw new IOException("Could not create temp directory: " + tempFolder.getAbsolutePath());
        }
        FileStatus[] status = fs.listStatus(hdfsPath);
        for (int i = 0; i < status.length; i++) {
            System.out.println("------------------------------------");
            if (status[i].isFile()) {
                System.out.println(status[i].getPath());
                fs.copyToLocalFile(false, status[i].getPath(), new Path(tempFolder.getAbsolutePath()));
                //System.out.println(ReadFileFromHdfs(fs, status[i].getPath()));
            }
        }

        tempFolder.deleteOnExit();

        File[] files = tempFolder.listFiles();
        for (int i = 0; i < files.length; i++) {
            System.out.println("------------------------------------");
            System.out.println(files[i].getPath());
            //System.out.println(ReadFile(files[i].getPath()));
            if (files[i].getName().startsWith(".")) {
                System.out.println("Delete --> " + files[i].getPath());
                if (!(files[i].delete())) {
                    throw new IOException("Could not delete temp file: " + files[i].getAbsolutePath());
                }
            }
        }

        return tempFolder;
    } catch (Exception e) {
        e.printStackTrace();
    }
    return null;
}

From source file:hydrograph.engine.spark.datasource.utils.AWSS3Util.java

License:Apache License

public void upload(RunFileTransferEntity runFileTransferEntity) {
    log.debug("Start AWSS3Util upload");
    int retryAttempt = 0;
    int i;/*from   w  w  w . ja v a 2s.  co m*/

    java.nio.file.Path inputfile = new File(runFileTransferEntity.getLocalPath()).toPath();
    String keyName = inputfile.getFileName().toString();
    log.info("keyName is: " + keyName);
    log.info("bucket name is:" + runFileTransferEntity.getBucketName());
    log.info("Folder Name is" + runFileTransferEntity.getFolder_name_in_bucket());

    String amazonFileUploadLocationOriginal = null;
    FileInputStream stream = null;
    File filecheck = new File(runFileTransferEntity.getLocalPath());
    if (runFileTransferEntity.getFailOnError())
        if (!(filecheck.isFile() || filecheck.isDirectory())
                && !(runFileTransferEntity.getLocalPath().contains("hdfs://"))) {
            Log.error("Invalid local path.Please provide valid path");
            throw new AWSUtilException("Invalid local path");
        }

    if (runFileTransferEntity.getRetryAttempt() == 0)
        retryAttempt = 1;
    else
        retryAttempt = runFileTransferEntity.getRetryAttempt();

    for (i = 0; i < retryAttempt; i++) {
        log.info("connection attempt: " + (i + 1));
        try {
            AmazonS3 s3Client = null;
            ClientConfiguration clientConf = new ClientConfiguration();
            clientConf.setProtocol(Protocol.HTTPS);
            if (runFileTransferEntity.getCrediationalPropertiesFile() == null) {
                BasicAWSCredentials creds = new BasicAWSCredentials(runFileTransferEntity.getAccessKeyID(),
                        runFileTransferEntity.getSecretAccessKey());
                s3Client = AmazonS3ClientBuilder.standard().withClientConfiguration(clientConf)
                        .withRegion(runFileTransferEntity.getRegion())
                        .withCredentials(new AWSStaticCredentialsProvider(creds)).build();
            } else {
                File securityFile = new File(runFileTransferEntity.getCrediationalPropertiesFile());
                PropertiesCredentials creds = new PropertiesCredentials(securityFile);

                s3Client = AmazonS3ClientBuilder.standard().withClientConfiguration(clientConf)
                        .withRegion(runFileTransferEntity.getRegion())
                        .withCredentials(new AWSStaticCredentialsProvider(creds)).build();
            }

            String s3folderName = null;
            String filepath = runFileTransferEntity.getFolder_name_in_bucket();
            log.debug("file path name" + filepath);
            s3folderName = filepath;

            if (s3folderName != null && !s3folderName.trim().equals("")) {
                amazonFileUploadLocationOriginal = runFileTransferEntity.getBucketName() + "/" + s3folderName;
            } else {
                amazonFileUploadLocationOriginal = runFileTransferEntity.getBucketName();
            }

            File f = new File(runFileTransferEntity.getLocalPath());

            if (runFileTransferEntity.getLocalPath().contains("hdfs://")) {
                log.debug("Provided HDFS local path ");
                String inputPath = runFileTransferEntity.getLocalPath();
                String s1 = inputPath.substring(7, inputPath.length());
                String s2 = s1.substring(0, s1.indexOf("/"));
                File file = new File("/tmp");
                if (!file.exists())
                    file.mkdir();
                Configuration conf = new Configuration();
                conf.set("fs.defaultFS", "hdfs://" + s2);
                FileSystem hdfsFileSystem = FileSystem.get(conf);
                Path local = new Path("/tmp");
                String s = inputPath.substring(7, inputPath.length());
                String hdfspath = s.substring(s.indexOf("/"), s.length());
                Path hdfs = new Path(hdfspath);
                ObjectMetadata objectMetadata = new ObjectMetadata();
                if (runFileTransferEntity.getEncoding() != null)
                    objectMetadata.setContentEncoding(runFileTransferEntity.getEncoding());
                File dir = new File(hdfspath);
                if (hdfsFileSystem.isDirectory(new Path(hdfspath))) {
                    InputStream is = null;
                    OutputStream os = null;
                    String localDirectory = hdfspath.substring(hdfspath.lastIndexOf("/") + 1);
                    FileStatus[] fileStatus = hdfsFileSystem
                            .listStatus(new Path(runFileTransferEntity.getLocalPath()));
                    Path[] paths = FileUtil.stat2Paths(fileStatus);
                    File dirs = null;

                    try {
                        String folderName = hdfspath.substring(hdfspath.lastIndexOf("/") + 1);

                        DateFormat df = new SimpleDateFormat("dd-MM-yyyy");
                        String dateWithoutTime = df.format(new Date()).toString();
                        Random ran = new Random();
                        String tempFolder = "ftp_sftp_" + System.nanoTime() + "_" + ran.nextInt(1000);
                        dirs = new File("/tmp/" + tempFolder);

                        boolean success = dirs.mkdirs();
                        for (Path files : paths) {
                            is = hdfsFileSystem.open(files);
                            os = new BufferedOutputStream(new FileOutputStream(dirs + "/" + files.getName()));
                            org.apache.hadoop.io.IOUtils.copyBytes(is, os, conf);
                        }

                        for (File files : dirs.listFiles()) {

                            if (files.isFile()) {
                                s3Client.putObject(new PutObjectRequest(
                                        amazonFileUploadLocationOriginal + "/" + folderName, files.getName(),
                                        files));
                            }

                        }
                    }

                    catch (IOException e) {
                        Log.error("IOException occured while transfering the file", e);
                    } finally {
                        org.apache.hadoop.io.IOUtils.closeStream(is);
                        org.apache.hadoop.io.IOUtils.closeStream(os);
                        if (dirs != null) {

                            FileUtils.deleteDirectory(dirs);
                        }

                    }

                } else {
                    hdfsFileSystem.copyToLocalFile(false, hdfs, local);
                    stream = new FileInputStream("/tmp/" + f.getName());
                    File S3file = new File("/tmp/" + f.getName());

                    PutObjectRequest putObjectRequest = new PutObjectRequest(amazonFileUploadLocationOriginal,
                            keyName, file);
                    PutObjectResult result = s3Client.putObject(putObjectRequest);
                }
            } else {

                ObjectMetadata objectMetadata = new ObjectMetadata();
                if (runFileTransferEntity.getEncoding() != null)
                    objectMetadata.setContentEncoding(runFileTransferEntity.getEncoding());

                if (Files.isDirectory(inputfile)) {

                    File fileloc = new File(inputfile.toAbsolutePath().toString());
                    String folderName = new File(runFileTransferEntity.getLocalPath()).getName();
                    for (File files : fileloc.listFiles()) {

                        if (files.isFile()) {
                            PutObjectRequest putObjectRequest = new PutObjectRequest(
                                    amazonFileUploadLocationOriginal + "/" + folderName, files.getName(),
                                    files);

                            PutObjectResult result = s3Client.putObject(putObjectRequest);
                        }

                    }

                } else {
                    PutObjectRequest putObjectRequest = null;
                    File file = new File(runFileTransferEntity.getLocalPath());
                    stream = new FileInputStream(runFileTransferEntity.getLocalPath());
                    putObjectRequest = new PutObjectRequest(amazonFileUploadLocationOriginal, keyName, file);
                    PutObjectResult result = s3Client.putObject(putObjectRequest);
                }
            }

        }

        catch (AmazonServiceException e) {
            if (e.getStatusCode() == 403 || e.getStatusCode() == 404) {
                if (runFileTransferEntity.getFailOnError())
                    Log.error("Incorrect details provided.Please provide valid details", e);
                throw new AWSUtilException("Incorrect details provided");

            }

            {
                try {
                    Thread.sleep(runFileTransferEntity.getRetryAfterDuration());
                } catch (Exception e1) {
                    Log.error("Exception occured while sleeping the thread");
                }
                continue;
            }

        } catch (Exception e) {
            log.error("error while transferring file", e);
            try {
                Thread.sleep(runFileTransferEntity.getRetryAfterDuration());
            } catch (Exception e1) {
                Log.error("Exception occured while sleeping the thread");
            }
            continue;
        } catch (Error err) {
            Log.error("Error occured while uploading the file", err);
            throw new AWSUtilException(err);
        }
        done = true;
        break;
    }
    if (runFileTransferEntity.getFailOnError() && !done) {
        log.error("File transfer failed");
        throw new AWSUtilException("File transfer failed");
    } else if (!done) {
        log.error("File transfer failed but mentioned fail on error as false");
    }
    if (i == runFileTransferEntity.getRetryAttempt()) {
        if (runFileTransferEntity.getFailOnError())
            throw new AWSUtilException("File transfer failed");
    }
    log.debug("Finished AWSS3Util upload");
}

From source file:hydrograph.engine.spark.datasource.utils.FTPUtil.java

License:Apache License

public void upload(RunFileTransferEntity runFileTransferEntity) {
    log.debug("Start FTPUtil upload");

    FTPClient ftpClient = new FTPClient();
    ftpClient.enterLocalPassiveMode();/*  www. j  ava  2  s  . c  o  m*/
    ftpClient.setBufferSize(1024000);

    int retryAttempt = runFileTransferEntity.getRetryAttempt();
    int attemptCount = 1;
    int i = 0;

    InputStream inputStream = null;
    boolean login = false;
    File filecheck = new File(runFileTransferEntity.getInputFilePath());
    log.info("input file name" + filecheck.getName());
    if (runFileTransferEntity.getFailOnError()) {
        if (!(filecheck.isFile() || filecheck.isDirectory())
                && !(runFileTransferEntity.getInputFilePath().contains("hdfs://"))) {
            log.error("Invalid input file path. Please provide valid input file path.");
            throw new FTPUtilException("Invalid input file path");
        }
    }

    boolean done = false;
    for (i = 0; i < retryAttempt; i++) {
        try {
            log.info("Connection attempt: " + (i + 1));
            if (runFileTransferEntity.getTimeOut() != 0)
                if (runFileTransferEntity.getEncoding() != null)
                    ftpClient.setControlEncoding(runFileTransferEntity.getEncoding());
            ftpClient.setConnectTimeout(runFileTransferEntity.getTimeOut());
            log.debug("connection details: " + "/n" + "Username: " + runFileTransferEntity.getUserName() + "/n"
                    + "HostName " + runFileTransferEntity.getHostName() + "/n" + "Portno"
                    + runFileTransferEntity.getPortNo());
            ftpClient.connect(runFileTransferEntity.getHostName(), runFileTransferEntity.getPortNo());
            login = ftpClient.login(runFileTransferEntity.getUserName(), runFileTransferEntity.getPassword());
            if (!login) {
                log.error("Invalid FTP details provided. Please provide correct FTP details.");
                throw new FTPUtilException("Invalid FTP details");
            }
            ftpClient.enterLocalPassiveMode();
            ftpClient.setFileType(FTP.BINARY_FILE_TYPE);
            if (runFileTransferEntity.getInputFilePath().contains("hdfs://")) {
                log.debug("Processing for HDFS input file path");
                String inputPath = runFileTransferEntity.getInputFilePath();

                String s1 = inputPath.substring(7, inputPath.length());

                String s2 = s1.substring(0, s1.indexOf("/"));

                int index = runFileTransferEntity.getInputFilePath()
                        .replaceAll(Matcher.quoteReplacement("\\"), "/").lastIndexOf('/');

                String file_name = runFileTransferEntity.getInputFilePath().substring(index + 1);

                File f = new File("/tmp");
                if (!f.exists())
                    f.mkdir();
                Configuration conf = new Configuration();
                conf.set("fs.defaultFS", "hdfs://" + s2);
                FileSystem hdfsFileSystem = FileSystem.get(conf);
                Path local = new Path("/tmp");
                String s = inputPath.substring(7, inputPath.length());
                String hdfspath = s.substring(s.indexOf("/"), s.length());
                File dir = new File(hdfspath);
                Random ran = new Random();
                String tempFolder = "ftp_sftp_" + System.nanoTime() + "_" + ran.nextInt(1000);
                File dirs = new File("/tmp/" + tempFolder);
                boolean success = dirs.mkdirs();
                if (hdfsFileSystem.isDirectory(new Path(hdfspath))) {
                    log.debug("Provided HDFS input path is for directory.");
                    InputStream is = null;
                    OutputStream os = null;
                    String localDirectory = hdfspath.substring(hdfspath.lastIndexOf("/") + 1);
                    FileStatus[] fileStatus = hdfsFileSystem
                            .listStatus(new Path(runFileTransferEntity.getInputFilePath()));
                    Path[] paths = FileUtil.stat2Paths(fileStatus);
                    try {
                        String folderName = hdfspath.substring(hdfspath.lastIndexOf("/") + 1);
                        Path hdfs = new Path(hdfspath);
                        for (Path file : paths) {
                            is = hdfsFileSystem.open(file);
                            os = new BufferedOutputStream(
                                    new FileOutputStream(dirs + "" + File.separatorChar + file.getName()));
                            IOUtils.copyBytes(is, os, conf);
                        }
                        ftpClient.changeWorkingDirectory(runFileTransferEntity.getOutFilePath()
                                .replaceAll(Matcher.quoteReplacement("\\"), "/"));
                        ftpClient.removeDirectory(folderName);
                        ftpClient.makeDirectory(folderName);
                        ftpClient.changeWorkingDirectory(runFileTransferEntity.getOutFilePath().replaceAll(
                                Matcher.quoteReplacement("\\"), "/") + File.separatorChar + folderName);
                        for (File files : dirs.listFiles()) {

                            if (files.isFile())
                                ftpClient.storeFile(files.getName().toString(),
                                        new BufferedInputStream(new FileInputStream(files)));

                        }
                    } catch (IOException e) {
                        log.error("Failed while doing FTP file", e);
                        //throw e;
                    } finally {
                        IOUtils.closeStream(is);
                        IOUtils.closeStream(os);
                        if (dirs != null) {
                            FileUtils.deleteDirectory(dirs);
                        }
                    }
                } else {
                    try {
                        Path hdfs = new Path(hdfspath);
                        hdfsFileSystem.copyToLocalFile(false, hdfs, local);
                        inputStream = new FileInputStream(dirs + file_name);
                        ftpClient.storeFile(file_name, new BufferedInputStream(inputStream));
                    } catch (Exception e) {
                        log.error("Failed while doing FTP file", e);
                        throw new FTPUtilException("Failed while doing FTP file", e);
                    } finally {
                        FileUtils.deleteDirectory(dirs);
                    }
                }
            } else {
                java.nio.file.Path file = new File(runFileTransferEntity.getInputFilePath()).toPath();
                if (Files.isDirectory(file)) {
                    log.debug("Provided input file path is for directory");
                    File dir = new File(runFileTransferEntity.getInputFilePath());
                    String folderName = new File(runFileTransferEntity.getInputFilePath()).getName();
                    ftpClient.changeWorkingDirectory(runFileTransferEntity.getOutFilePath()
                            .replaceAll(Matcher.quoteReplacement("\\"), "/"));
                    try {
                        ftpClient.removeDirectory(folderName);
                    } catch (IOException e) {
                        log.error("Failed while doing FTP file", e);
                        throw new FTPUtilException("Failed while doing FTP file", e);
                    }
                    ftpClient.makeDirectory(folderName);

                    ftpClient.changeWorkingDirectory(runFileTransferEntity.getOutFilePath()
                            .replaceAll(Matcher.quoteReplacement("\\"), "/") + "/" + folderName);
                    for (File files : dir.listFiles()) {

                        if (files.isFile())
                            ftpClient.storeFile(files.getName().toString(),
                                    new BufferedInputStream(new FileInputStream(files)));
                    }
                } else {

                    inputStream = new FileInputStream(runFileTransferEntity.getInputFilePath());
                    ftpClient.changeWorkingDirectory(runFileTransferEntity.getOutFilePath()
                            .replaceAll(Matcher.quoteReplacement("\\"), "/"));
                    int index = runFileTransferEntity.getInputFilePath()
                            .replaceAll(Matcher.quoteReplacement("\\"), "/").lastIndexOf('/');
                    String file_name = runFileTransferEntity.getInputFilePath().substring(index + 1);
                    ftpClient.storeFile(file_name, new BufferedInputStream(inputStream));
                }

            }
        } catch (Exception e) {
            log.error("Failed while doing FTP file", e);
            if (!login && runFileTransferEntity.getFailOnError()) {
                throw new FTPUtilException("Invalid FTP details");
            }
            try {
                Thread.sleep(runFileTransferEntity.getRetryAfterDuration());
            } catch (Exception e1) {
                log.error("Failed while sleeping for retry duration", e1);
            }
            continue;
        } finally {
            try {
                if (inputStream != null)
                    inputStream.close();
            } catch (IOException ioe) {

            }
        }
        done = true;
        break;
    }

    try {
        if (ftpClient != null) {
            ftpClient.logout();
            ftpClient.disconnect();

        }
    } catch (Exception e) {
        log.error("Failed while clossing the connection", e);
    } catch (Error e) {
        log.error("Failed while clossing the connection", e);
        //throw new RuntimeException(e);
    }

    if (runFileTransferEntity.getFailOnError() && !done) {
        log.error("File transfer failed");
        throw new FTPUtilException("File transfer failed");
    } else if (!done) {
        log.error("File transfer failed but mentioned fail on error as false");
    }
    log.debug("Finished FTPUtil upload");
}

From source file:hydrograph.engine.spark.datasource.utils.SFTPUtil.java

License:Apache License

public void upload(RunFileTransferEntity runFileTransferEntity) {
    log.debug("Start SFTPUtil upload");
    JSch jsch = new JSch();
    Session session = null;/* w ww.  j av  a 2  s .  c o  m*/
    Channel channel = null;
    ChannelSftp sftpChannel = null;
    ZipInputStream zip = null;
    FileInputStream fin = null;
    int retryAttempt = 0;
    int i;
    File filecheck = new File(runFileTransferEntity.getInputFilePath());
    if (runFileTransferEntity.getFailOnError())
        if (!(filecheck.isFile() || filecheck.isDirectory())
                && !(runFileTransferEntity.getInputFilePath().contains("hdfs://"))) {
            log.error("invalid input file path,Please provide valid file path");
            throw new SFTPUtilException("Invalid input file path");
        }

    if (runFileTransferEntity.getRetryAttempt() == 0)
        retryAttempt = 1;
    else
        retryAttempt = runFileTransferEntity.getRetryAttempt();

    for (i = 0; i < retryAttempt; i++) {

        try {
            log.info("connection attempt: " + (i + 1));
            if (runFileTransferEntity.getPrivateKeyPath() != null) {
                jsch.addIdentity(runFileTransferEntity.getPrivateKeyPath());
            }
            log.debug("connection details: " + "/n" + "Username: " + runFileTransferEntity.getUserName() + "/n"
                    + "HostName " + runFileTransferEntity.getHostName() + "/n" + "Portno"
                    + runFileTransferEntity.getPortNo());
            session = jsch.getSession(runFileTransferEntity.getUserName(), runFileTransferEntity.getHostName(),
                    runFileTransferEntity.getPortNo());
            session.setConfig("PreferredAuthentications", "publickey,keyboard-interactive,password");
            session.setConfig("StrictHostKeyChecking", "no");
            if (runFileTransferEntity.getPassword() != null) {
                session.setPassword(runFileTransferEntity.getPassword());
            }
            if (runFileTransferEntity.getTimeOut() > 0) {

                session.setTimeout(runFileTransferEntity.getTimeOut());
            }

            session.connect();
            channel = session.openChannel("sftp");
            channel.connect();
            sftpChannel = (ChannelSftp) channel;
            sftpChannel.setFilenameEncoding(runFileTransferEntity.getEncoding());
            sftpChannel
                    .cd(runFileTransferEntity.getOutFilePath().replaceAll(Matcher.quoteReplacement("\\"), "/"));

            if (runFileTransferEntity.getInputFilePath().contains("hdfs://")) {
                log.debug("in hdfs file system transfer");
                String inputPath = runFileTransferEntity.getInputFilePath();
                File f = new File("/tmp");
                if (!f.exists())
                    f.mkdir();
                String s1 = inputPath.substring(7, inputPath.length());
                String s2 = s1.substring(0, s1.indexOf("/"));
                Configuration conf = new Configuration();
                conf.set("fs.defaultFS", "hdfs://" + s2);
                FileSystem hdfsFileSystem = FileSystem.get(conf);
                Path local = new Path("/tmp");
                String s = inputPath.substring(7, inputPath.length());
                String hdfspath = s.substring(s.indexOf("/"), s.length());

                File dir = new File(hdfspath);
                if (hdfsFileSystem.isDirectory(new Path(hdfspath))) {
                    log.debug("in hdfs file system folder path");
                    InputStream is = null;
                    OutputStream os = null;
                    String localDirectory = hdfspath.substring(hdfspath.lastIndexOf("/") + 1);
                    FileStatus[] fileStatus = hdfsFileSystem
                            .listStatus(new Path(runFileTransferEntity.getInputFilePath()));
                    Path[] paths = FileUtil.stat2Paths(fileStatus);
                    File dirs = null;

                    try {
                        String folderName = hdfspath.substring(hdfspath.lastIndexOf("/") + 1);

                        DateFormat df = new SimpleDateFormat("dd-MM-yyyy");
                        String dateWithoutTime = df.format(new Date()).toString();
                        java.util.Random ran = new Random();
                        String tempFolder = "ftp_sftp_" + System.nanoTime() + "_" + ran.nextInt(1000);
                        dirs = new File("/tmp/" + tempFolder);
                        boolean success = dirs.mkdirs();
                        for (Path file : paths) {

                            is = hdfsFileSystem.open(file);
                            os = new BufferedOutputStream(
                                    new FileOutputStream(dirs + "" + File.separatorChar + file.getName()));
                            IOUtils.copyBytes(is, os, conf);
                        }
                        try {

                            sftpChannel.cd(folderName);
                        } catch (SftpException e) {
                            sftpChannel.mkdir(folderName);
                            sftpChannel.cd(folderName);
                        }
                        for (File files : dirs.listFiles()) {

                            if (files.isFile())
                                if (files.isFile()) {

                                    sftpChannel.put(new BufferedInputStream(new FileInputStream(files)),
                                            files.getName());

                                }

                        }
                    }

                    catch (IOException e) {
                        log.error("error while transferring file", e);
                        throw new SFTPUtilException(e);
                    } finally {
                        IOUtils.closeStream(is);
                        IOUtils.closeStream(os);
                        if (dirs != null) {

                            FileUtils.deleteDirectory(dirs);
                        }

                    }

                } else {
                    log.debug("File transfer in normal mode");
                    Path hdfs = new Path(hdfspath);
                    hdfsFileSystem.copyToLocalFile(false, hdfs, local);
                    int index = inputPath.replaceAll(Matcher.quoteReplacement("\\"), "/").lastIndexOf('/');
                    String file_name = runFileTransferEntity.getInputFilePath().substring(index + 1);
                    fin = new FileInputStream("/tmp/" + file_name);
                    sftpChannel.cd(runFileTransferEntity.getOutFilePath()
                            .replaceAll(Matcher.quoteReplacement("\\"), "/"));
                    sftpChannel.put(new BufferedInputStream(fin), file_name);
                    i = i + 1;
                    fin.close();
                }

            } else {
                java.nio.file.Path file = new File(runFileTransferEntity.getInputFilePath()).toPath();
                if (Files.isDirectory(file)) {
                    log.debug("Folder transfer in SFTP");
                    File f = new File(file.toAbsolutePath().toString());
                    String folderName = new File(runFileTransferEntity.getInputFilePath()).getName();
                    sftpChannel.cd(runFileTransferEntity.getOutFilePath()
                            .replaceAll(Matcher.quoteReplacement("\\"), "/"));
                    try {

                        sftpChannel.cd(folderName);
                    } catch (SftpException e) {
                        log.error(
                                "changing the directory but the folde location not found,so create a new directory");
                        sftpChannel.cd(runFileTransferEntity.getOutFilePath()
                                .replaceAll(Matcher.quoteReplacement("\\"), "/"));
                        sftpChannel.mkdir(folderName);
                        sftpChannel.cd(folderName);
                    }

                    for (File files : f.listFiles()) {

                        if (files.isFile())
                            sftpChannel.put(new BufferedInputStream(new FileInputStream(files)),
                                    files.getName());

                    }

                } else {
                    int index = runFileTransferEntity.getInputFilePath()
                            .replaceAll(Matcher.quoteReplacement("\\"), "/").lastIndexOf('/');
                    String file_name = runFileTransferEntity.getInputFilePath().substring(index + 1);
                    fin = new FileInputStream(runFileTransferEntity.getInputFilePath());
                    sftpChannel.cd(runFileTransferEntity.getOutFilePath()
                            .replaceAll(Matcher.quoteReplacement("\\"), "/"));
                    sftpChannel.put(new BufferedInputStream(fin), file_name);
                    fin.close();
                }
            }
        } catch (JSchException e) {
            if (e.getMessage().compareTo("Auth fail") == 0) {
                log.error("authentication error,please provide valid details", e);
                if (runFileTransferEntity.getFailOnError())
                    throw new SFTPUtilException(e.getMessage());
            } else {
                log.error("error while transfering the file and retrying ", e);
                try {
                    Thread.sleep(runFileTransferEntity.getRetryAfterDuration());
                } catch (Exception e1) {
                    log.error("sleep duration for re attemp exception", e1);
                }
                continue;
            }

        } catch (Exception e) {
            log.error("Error while transfering the file", e);
            try {
                Thread.sleep(runFileTransferEntity.getRetryAfterDuration());
            } catch (Exception e1) {
                log.error("exception while sleep thread", e);
            }
            continue;
        } finally {
            try {
                if (fin != null)
                    fin.close();
            } catch (IOException ioe) {
                log.error("error while closing input stream ");
            }
        }

        done = true;
        break;

    }

    if (sftpChannel != null) {
        sftpChannel.disconnect();
    }
    if (channel != null) {
        channel.disconnect();
    }
    if (session != null) {
        session.disconnect();
    }
    if (runFileTransferEntity.getFailOnError() && !done) {
        log.error("File transfer failed");
        throw new SFTPUtilException("File transfer failed");
    } else if (!done) {
        log.error("File transfer failed but mentioned fail on error as false");
    }
    log.debug("Fininished SFTPUtil upload");
}

From source file:inflater.runner.RunInflater.java

License:MIT License

@Override
public int run(String[] args) throws IOException, ClassNotFoundException, InterruptedException, ParseException {
    if (args.length < 2) {
        return -1;
    }/*from  www .j a v a  2 s  .c o m*/
    if (conf == null) {
        conf = new Configuration();
    }

    GiraphConfiguration giraphConf = new GiraphConfiguration(getConf());
    giraphConf.addResource(new Path("giraph-site.xml"));

    GiraphJob job = new GiraphJob(giraphConf, giraphConf.getComputationName());

    Path inputLocal = new Path(args[0]);
    Path outputLocal = new Path(args[1]);

    // We copy file from local file system to HDFS
    FileSystem fs = FileSystem.get(giraphConf);
    inputHDFS = new Path(fs.getHomeDirectory(),
            "Giraph Source" + File.separator + inputLocal.getName() + File.separator + inputLocal.getName());
    inputHDFS = fs.makeQualified(inputHDFS);

    outputHDFS = new Path(fs.getHomeDirectory(),
            "Giraph Source" + File.separator + inputLocal.getName() + File.separator + "output");
    outputHDFS = fs.makeQualified(outputHDFS);

    fs.copyFromLocalFile(false, true, inputLocal, inputHDFS);

    // Delete output path because Hadoop cannot override it.
    if (fs.exists(outputHDFS))
        fs.delete(outputHDFS, true);

    FileOutputFormat.setOutputPath(job.getInternalJob(), outputHDFS);
    GiraphFileInputFormat.addVertexInputPath(giraphConf, inputHDFS);
    new GiraphConfigurationValidator<>(giraphConf).validateConfiguration();
    boolean result = job.run(true);
    if (result) {
        fs.copyToLocalFile(false, new Path(outputHDFS, "part-m-00000"), outputLocal);
    }
    return result ? 0 : -1;
}