Example usage for org.apache.hadoop.fs FileSystem getUri

List of usage examples for org.apache.hadoop.fs FileSystem getUri

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem getUri.

Prototype

public abstract URI getUri();

Source Link

Document

Returns a URI which identifies this FileSystem.

Usage

From source file:org.apache.helix.provisioning.yarn.AppLauncher.java

License:Apache License

public boolean launch() throws Exception {
    LOG.info("Running Client");
    yarnClient.start();/*from   w  w w .jav a 2  s.co m*/

    // Get a new application id
    YarnClientApplication app = yarnClient.createApplication();
    GetNewApplicationResponse appResponse = app.getNewApplicationResponse();
    // TODO get min/max resource capabilities from RM and change memory ask if needed
    // If we do not have min/max, we may not be able to correctly request
    // the required resources from the RM for the app master
    // Memory ask has to be a multiple of min and less than max.
    // Dump out information about cluster capability as seen by the resource manager
    int maxMem = appResponse.getMaximumResourceCapability().getMemory();
    LOG.info("Max mem capabililty of resources in this cluster " + maxMem);

    // set the application name
    ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
    _appId = appContext.getApplicationId();
    _appMasterConfig.setAppId(_appId.getId());
    String appName = _applicationSpec.getAppName();
    _appMasterConfig.setAppName(appName);
    _appMasterConfig.setApplicationSpecFactory(_applicationSpecFactory.getClass().getCanonicalName());
    appContext.setApplicationName(appName);

    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);

    LOG.info("Copy Application archive file from local filesystem and add to local environment");
    // Copy the application master jar to the filesystem
    // Create a local resource to point to the destination jar path
    FileSystem fs = FileSystem.get(_conf);

    // get packages for each component packages
    Map<String, URI> packages = new HashMap<String, URI>();
    packages.put(AppMasterConfig.AppEnvironment.APP_MASTER_PKG.toString(), appMasterArchive.toURI());
    packages.put(AppMasterConfig.AppEnvironment.APP_SPEC_FILE.toString(), _yamlConfigFile.toURI());
    for (String serviceName : _applicationSpec.getServices()) {
        packages.put(serviceName, _applicationSpec.getServicePackage(serviceName));
    }
    Map<String, Path> hdfsDest = new HashMap<String, Path>();
    Map<String, String> classpathMap = new HashMap<String, String>();
    for (String name : packages.keySet()) {
        URI uri = packages.get(name);
        Path dst = copyToHDFS(fs, name, uri);
        hdfsDest.put(name, dst);
        String classpath = generateClasspathAfterExtraction(name, new File(uri));
        classpathMap.put(name, classpath);
        _appMasterConfig.setClasspath(name, classpath);
        String serviceMainClass = _applicationSpec.getServiceMainClass(name);
        if (serviceMainClass != null) {
            _appMasterConfig.setMainClass(name, serviceMainClass);
        }
    }

    // Get YAML files describing all workflows to immediately start
    Map<String, URI> workflowFiles = new HashMap<String, URI>();
    List<TaskConfig> taskConfigs = _applicationSpec.getTaskConfigs();
    if (taskConfigs != null) {
        for (TaskConfig taskConfig : taskConfigs) {
            URI configUri = taskConfig.getYamlURI();
            if (taskConfig.name != null && configUri != null) {
                workflowFiles.put(taskConfig.name, taskConfig.getYamlURI());
            }
        }
    }

    // set local resources for the application master
    // local files or archives as needed
    // In this scenario, the jar file for the application master is part of the local resources
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
    LocalResource appMasterPkg = setupLocalResource(fs,
            hdfsDest.get(AppMasterConfig.AppEnvironment.APP_MASTER_PKG.toString()));
    LocalResource appSpecFile = setupLocalResource(fs,
            hdfsDest.get(AppMasterConfig.AppEnvironment.APP_SPEC_FILE.toString()));
    localResources.put(AppMasterConfig.AppEnvironment.APP_MASTER_PKG.toString(), appMasterPkg);
    localResources.put(AppMasterConfig.AppEnvironment.APP_SPEC_FILE.toString(), appSpecFile);
    for (String name : workflowFiles.keySet()) {
        URI uri = workflowFiles.get(name);
        Path dst = copyToHDFS(fs, name, uri);
        LocalResource taskLocalResource = setupLocalResource(fs, dst);
        localResources.put(AppMasterConfig.AppEnvironment.TASK_CONFIG_FILE.toString() + "_" + name,
                taskLocalResource);
    }

    // Set local resource info into app master container launch context
    amContainer.setLocalResources(localResources);

    // Set the necessary security tokens as needed
    // amContainer.setContainerTokens(containerToken);

    // Add AppMaster.jar location to classpath
    // At some point we should not be required to add
    // the hadoop specific classpaths to the env.
    // It should be provided out of the box.
    // For now setting all required classpaths including
    // the classpath to "." for the application jar
    StringBuilder classPathEnv = new StringBuilder(Environment.CLASSPATH.$()).append(File.pathSeparatorChar)
            .append("./*").append(File.pathSeparatorChar);
    classPathEnv.append(classpathMap.get(AppMasterConfig.AppEnvironment.APP_MASTER_PKG.toString()));
    for (String c : _conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
            YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) {
        classPathEnv.append(File.pathSeparatorChar);
        classPathEnv.append(c.trim());
    }
    classPathEnv.append(File.pathSeparatorChar).append("./log4j.properties");

    // add the runtime classpath needed for tests to work
    if (_conf.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) {
        classPathEnv.append(':');
        classPathEnv.append(System.getProperty("java.class.path"));
    }
    LOG.info("\n\n Setting the classpath to launch AppMaster:\n\n");
    // Set the env variables to be setup in the env where the application master will be run
    Map<String, String> env = new HashMap<String, String>(_appMasterConfig.getEnv());
    env.put("CLASSPATH", classPathEnv.toString());

    amContainer.setEnvironment(env);

    // Set the necessary command to execute the application master
    Vector<CharSequence> vargs = new Vector<CharSequence>(30);

    // Set java executable command
    LOG.info("Setting up app master launch command");
    vargs.add(Environment.JAVA_HOME.$() + "/bin/java");
    int amMemory = 4096;
    // Set Xmx based on am memory size
    vargs.add("-Xmx" + amMemory + "m");
    // Set class name
    vargs.add(AppMasterLauncher.class.getCanonicalName());
    // Set params for Application Master
    // vargs.add("--num_containers " + String.valueOf(numContainers));

    vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stdout");
    vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stderr");

    // Get final commmand
    StringBuilder command = new StringBuilder();
    for (CharSequence str : vargs) {
        command.append(str).append(" ");
    }

    LOG.info("Completed setting up app master command " + command.toString());
    List<String> commands = new ArrayList<String>();
    commands.add(command.toString());
    amContainer.setCommands(commands);

    // Set up resource type requirements
    // For now, only memory is supported so we set memory requirements
    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(amMemory);
    appContext.setResource(capability);

    // Service data is a binary blob that can be passed to the application
    // Not needed in this scenario
    // amContainer.setServiceData(serviceData);

    // Setup security tokens
    if (UserGroupInformation.isSecurityEnabled()) {
        Credentials credentials = new Credentials();
        String tokenRenewer = _conf.get(YarnConfiguration.RM_PRINCIPAL);
        if (tokenRenewer == null || tokenRenewer.length() == 0) {
            throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer");
        }

        // For now, only getting tokens for the default file-system.
        final Token<?> tokens[] = fs.addDelegationTokens(tokenRenewer, credentials);
        if (tokens != null) {
            for (Token<?> token : tokens) {
                LOG.info("Got dt for " + fs.getUri() + "; " + token);
            }
        }
        DataOutputBuffer dob = new DataOutputBuffer();
        credentials.writeTokenStorageToStream(dob);
        ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
        amContainer.setTokens(fsTokens);
    }

    appContext.setAMContainerSpec(amContainer);

    // Set the priority for the application master
    Priority pri = Records.newRecord(Priority.class);
    int amPriority = 0;
    // TODO - what is the range for priority? how to decide?
    pri.setPriority(amPriority);
    appContext.setPriority(pri);

    String amQueue = "default";
    // Set the queue to which this application is to be submitted in the RM
    appContext.setQueue(amQueue);

    LOG.info("Submitting application to YARN Resource Manager");

    ApplicationId applicationId = yarnClient.submitApplication(appContext);

    LOG.info("Submitted application with applicationId:" + applicationId);

    return true;
}

From source file:org.apache.hive.hcatalog.mapreduce.TestHCatMultiOutputFormat.java

License:Apache License

@BeforeClass
public static void setup() throws Exception {
    System.clearProperty("mapred.job.tracker");
    String testDir = System.getProperty("test.tmp.dir", "./");
    testDir = testDir + "/test_multitable_" + Math.abs(new Random().nextLong()) + "/";
    workDir = new File(new File(testDir).getCanonicalPath());
    FileUtil.fullyDelete(workDir);//from   www  . j  av  a  2  s . c  o m
    workDir.mkdirs();

    warehousedir = new Path(System.getProperty("test.warehouse.dir"));

    HiveConf metastoreConf = new HiveConf();
    metastoreConf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE, warehousedir.toString());

    // Run hive metastore server
    msPort = MetaStoreUtils.startMetaStore(metastoreConf);
    // LocalJobRunner does not work with mapreduce OutputCommitter. So need
    // to use MiniMRCluster. MAPREDUCE-2350
    Configuration conf = new Configuration(true);
    conf.set("yarn.scheduler.capacity.root.queues", "default");
    conf.set("yarn.scheduler.capacity.root.default.capacity", "100");

    FileSystem fs = FileSystem.get(conf);
    System.setProperty("hadoop.log.dir", new File(workDir, "/logs").getAbsolutePath());
    mrCluster = new MiniMRCluster(1, fs.getUri().toString(), 1, null, null, new JobConf(conf));
    mrConf = mrCluster.createJobConf();

    initializeSetup();

    warehousedir.getFileSystem(conf).mkdirs(warehousedir);
}

From source file:org.apache.hive.jdbc.miniHS2.MiniHS2.java

License:Apache License

private MiniHS2(HiveConf hiveConf, MiniClusterType miniClusterType, boolean useMiniKdc, String serverPrincipal,
        String serverKeytab, boolean isMetastoreRemote, boolean usePortsFromConf, String authType, boolean isHA)
        throws Exception {
    // Always use localhost for hostname as some tests like SSL CN validation ones
    // are tied to localhost being present in the certificate name
    super(hiveConf, "localhost",
            (usePortsFromConf ? hiveConf.getIntVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_PORT)
                    : MetaStoreUtils.findFreePort()),
            (usePortsFromConf ? hiveConf.getIntVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_HTTP_PORT)
                    : MetaStoreUtils.findFreePort()));
    this.miniClusterType = miniClusterType;
    this.useMiniKdc = useMiniKdc;
    this.serverPrincipal = serverPrincipal;
    this.isMetastoreRemote = isMetastoreRemote;
    baseDir = Files.createTempDir();
    localFS = FileSystem.getLocal(hiveConf);
    FileSystem fs;

    if (miniClusterType != MiniClusterType.DFS_ONLY) {
        // Initialize dfs
        dfs = ShimLoader.getHadoopShims().getMiniDfs(hiveConf, 4, true, null, isHA);
        fs = dfs.getFileSystem();//from   w w  w .j  a v  a2s.  co  m
        String uriString = WindowsPathUtil.getHdfsUriString(fs.getUri().toString());

        // Initialize the execution engine based on cluster type
        switch (miniClusterType) {
        case TEZ:
            mr = ShimLoader.getHadoopShims().getMiniTezCluster(hiveConf, 4, uriString);
            break;
        case LLAP:
            if (usePortsFromConf) {
                hiveConf.setBoolean("minillap.usePortsFromConf", true);
            }
            llapCluster = LlapItUtils.startAndGetMiniLlapCluster(hiveConf, null, null);

            mr = ShimLoader.getHadoopShims().getMiniTezCluster(hiveConf, 4, uriString);
            break;
        case MR:
            mr = ShimLoader.getHadoopShims().getMiniMrCluster(hiveConf, 4, uriString, 1);
            break;
        default:
            throw new IllegalArgumentException("Unsupported cluster type " + mr);
        }
        // store the config in system properties
        mr.setupConfiguration(getHiveConf());
        baseDfsDir = new Path(new Path(fs.getUri()), "/base");
    } else {
        // This is DFS only mode, just initialize the dfs root directory.
        fs = FileSystem.getLocal(hiveConf);
        baseDfsDir = new Path("file://" + baseDir.toURI().getPath());
    }
    if (useMiniKdc) {
        hiveConf.setVar(ConfVars.HIVE_SERVER2_KERBEROS_PRINCIPAL, serverPrincipal);
        hiveConf.setVar(ConfVars.HIVE_SERVER2_KERBEROS_KEYTAB, serverKeytab);
        hiveConf.setVar(ConfVars.HIVE_SERVER2_AUTHENTICATION, authType);
    }
    String metaStoreURL = "jdbc:derby:" + baseDir.getAbsolutePath() + File.separator + "test_metastore-"
            + hs2Counter.incrementAndGet() + ";create=true";

    fs.mkdirs(baseDfsDir);
    Path wareHouseDir = new Path(baseDfsDir, "warehouse");
    // Create warehouse with 777, so that user impersonation has no issues.
    FileSystem.mkdirs(fs, wareHouseDir, FULL_PERM);

    fs.mkdirs(wareHouseDir);
    setWareHouseDir(wareHouseDir.toString());
    System.setProperty(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, metaStoreURL);
    hiveConf.setVar(HiveConf.ConfVars.METASTORECONNECTURLKEY, metaStoreURL);
    if (!usePortsFromConf) {
        // reassign a new port, just in case if one of the MR services grabbed the last one
        setBinaryPort(MetaStoreUtils.findFreePort());
    }
    hiveConf.setVar(ConfVars.HIVE_SERVER2_THRIFT_BIND_HOST, getHost());
    hiveConf.setIntVar(ConfVars.HIVE_SERVER2_THRIFT_PORT, getBinaryPort());
    hiveConf.setIntVar(ConfVars.HIVE_SERVER2_THRIFT_HTTP_PORT, getHttpPort());

    Path scratchDir = new Path(baseDfsDir, "scratch");
    // Create root scratchdir with write all, so that user impersonation has no issues.
    Utilities.createDirsWithPermission(hiveConf, scratchDir, WRITE_ALL_PERM, true);
    System.setProperty(HiveConf.ConfVars.SCRATCHDIR.varname, scratchDir.toString());
    hiveConf.setVar(ConfVars.SCRATCHDIR, scratchDir.toString());

    String localScratchDir = baseDir.getPath() + File.separator + "scratch";
    System.setProperty(HiveConf.ConfVars.LOCALSCRATCHDIR.varname, localScratchDir);
    hiveConf.setVar(ConfVars.LOCALSCRATCHDIR, localScratchDir);
}

From source file:org.apache.impala.common.FileSystemUtil.java

License:Apache License

/**
 * Returns true if path p1 and path p2 are in the same encryption zone in HDFS.
 * Returns false if they are in different encryption zones or if either of the paths
 * are not on HDFS./*  w w  w . j av a 2s  . c o m*/
 */
private static boolean arePathsInSameHdfsEncryptionZone(FileSystem fs, Path p1, Path p2) throws IOException {
    // Only distributed file systems have encryption zones.
    if (!isDistributedFileSystem(p1) || !isDistributedFileSystem(p2))
        return false;
    HdfsAdmin hdfsAdmin = new HdfsAdmin(fs.getUri(), CONF);
    EncryptionZone z1 = hdfsAdmin.getEncryptionZoneForPath(p1);
    EncryptionZone z2 = hdfsAdmin.getEncryptionZoneForPath(p2);
    if (z1 == null && z2 == null)
        return true;
    if (z1 == null || z2 == null)
        return false;
    return z1.equals(z2);
}

From source file:org.apache.ivory.service.SharedLibraryHostingService.java

License:Apache License

public static void pushLibsToHDFS(String path, Cluster cluster, PathFilter pathFilter) throws IOException {
    Configuration conf = ClusterHelper.getConfiguration(cluster);
    FileSystem fs = FileSystem.get(conf);
    String localPaths = StartupProperties.get().getProperty("system.lib.location");
    assert localPaths != null && !localPaths.isEmpty() : "Invalid value for system.lib.location";
    if (!new File(localPaths).isDirectory()) {
        LOG.warn(localPaths + " configured for system.lib.location doesn't contain any valid libs");
        return;/*from   ww  w  .j  a  v a 2  s.  c  o m*/
    }
    for (File localFile : new File(localPaths).listFiles()) {
        Path clusterFile = new Path(path, localFile.getName());
        if (!pathFilter.accept(clusterFile))
            continue;

        if (fs.exists(clusterFile)) {
            FileStatus fstat = fs.getFileStatus(clusterFile);
            if (fstat.getLen() == localFile.length() && fstat.getModificationTime() == localFile.lastModified())
                continue;
        }
        fs.copyFromLocalFile(false, true, new Path(localFile.getAbsolutePath()), clusterFile);
        fs.setTimes(clusterFile, localFile.lastModified(), System.currentTimeMillis());
        LOG.info("Copied " + localFile.getAbsolutePath() + " to " + path + " in " + fs.getUri());
    }
}

From source file:org.apache.kylin.dict.DictionaryManager.java

License:Apache License

private String unpackDataSet(String tempHDFSDir, String dataSetName) throws IOException {

    InputStream in = this.getClass().getResourceAsStream("/org/apache/kylin/dict/" + dataSetName + ".txt");
    if (in == null) // data set resource not found
        return null;

    ByteArrayOutputStream buf = new ByteArrayOutputStream();
    IOUtils.copy(in, buf);//from w  w w.  j  av  a 2s .  com
    in.close();
    byte[] bytes = buf.toByteArray();

    Path tmpDataSetPath = new Path(
            tempHDFSDir + "/dict/temp_dataset/" + dataSetName + "_" + bytes.length + ".txt");

    FileSystem fs = HadoopUtil.getFileSystem(tempHDFSDir);
    boolean writtenNewFile = false;
    if (fs.exists(tmpDataSetPath) == false || fs.getFileStatus(tmpDataSetPath).getLen() != bytes.length) {
        fs.mkdirs(tmpDataSetPath.getParent());
        FSDataOutputStream out = fs.create(tmpDataSetPath);
        IOUtils.copy(new ByteArrayInputStream(bytes), out);
        out.close();
        writtenNewFile = true;
    }

    String qualifiedPath = tmpDataSetPath.makeQualified(fs.getUri(), new Path("/")).toString();
    if (writtenNewFile)
        logger.info("Dictionary temp data set file written to " + qualifiedPath);
    return qualifiedPath;
}

From source file:org.apache.kylin.job.cube.GarbageCollectionStep.java

License:Apache License

private void dropHdfsPathOnCluster(List<String> oldHdfsPaths, FileSystem fileSystem) throws IOException {
    if (oldHdfsPaths != null && oldHdfsPaths.size() > 0) {
        logger.debug("Drop HDFS path on FileSystem: " + fileSystem.getUri());
        output.append("Drop HDFS path on FileSystem: \"" + fileSystem.getUri() + "\" \n");
        for (String path : oldHdfsPaths) {
            if (path.endsWith("*"))
                path = path.substring(0, path.length() - 1);

            Path oldPath = new Path(path);
            if (fileSystem.exists(oldPath)) {
                fileSystem.delete(oldPath, true);
                logger.debug("Dropped HDFS path: " + path);
                output.append("Dropped HDFS path  \"" + path + "\" \n");
            } else {
                logger.debug("HDFS path not exists: " + path);
                output.append("HDFS path not exists: \"" + path + "\" \n");
            }// w  w  w.j  av  a2  s  .co  m
        }
    }
}

From source file:org.apache.kylin.job.tools.DeployCoprocessorCLI.java

License:Apache License

public static Path uploadCoprocessorJar(String localCoprocessorJar, FileSystem fileSystem,
        Set<String> oldJarPaths) throws IOException {
    Path uploadPath = null;/*from   www  .j  a va2  s .  c  om*/
    File localCoprocessorFile = new File(localCoprocessorJar);

    // check existing jars
    if (oldJarPaths == null) {
        oldJarPaths = new HashSet<String>();
    }
    Path coprocessorDir = getCoprocessorHDFSDir(fileSystem, KylinConfig.getInstanceFromEnv());
    for (FileStatus fileStatus : fileSystem.listStatus(coprocessorDir)) {
        if (fileStatus.getLen() == localCoprocessorJar.length()
                && fileStatus.getModificationTime() == localCoprocessorFile.lastModified()) {
            uploadPath = fileStatus.getPath();
            break;
        }
        String filename = fileStatus.getPath().toString();
        if (filename.endsWith(".jar")) {
            oldJarPaths.add(filename);
        }
    }

    // upload if not existing
    if (uploadPath == null) {
        // figure out a unique new jar file name
        Set<String> oldJarNames = new HashSet<String>();
        for (String path : oldJarPaths) {
            oldJarNames.add(new Path(path).getName());
        }
        String baseName = getBaseFileName(localCoprocessorJar);
        String newName = null;
        int i = 0;
        while (newName == null) {
            newName = baseName + "-" + (i++) + ".jar";
            if (oldJarNames.contains(newName))
                newName = null;
        }

        // upload
        uploadPath = new Path(coprocessorDir, newName);
        FileInputStream in = null;
        FSDataOutputStream out = null;
        try {
            in = new FileInputStream(localCoprocessorFile);
            out = fileSystem.create(uploadPath);
            IOUtils.copy(in, out);
        } finally {
            IOUtils.closeQuietly(in);
            IOUtils.closeQuietly(out);
        }

        fileSystem.setTimes(uploadPath, localCoprocessorFile.lastModified(), -1);

    }

    uploadPath = uploadPath.makeQualified(fileSystem.getUri(), null);
    return uploadPath;
}

From source file:org.apache.kylin.storage.hbase.steps.HDFSPathGarbageCollectionStep.java

License:Apache License

private void dropHdfsPathOnCluster(List<String> oldHdfsPaths, FileSystem fileSystem) throws IOException {
    if (oldHdfsPaths != null && oldHdfsPaths.size() > 0) {
        logger.debug("Drop HDFS path on FileSystem: " + fileSystem.getUri());
        output.append("Drop HDFS path on FileSystem: \"" + fileSystem.getUri() + "\" \n");
        for (String path : oldHdfsPaths) {
            if (path.endsWith("*"))
                path = path.substring(0, path.length() - 1);

            Path oldPath = new Path(path);
            if (fileSystem.exists(oldPath)) {
                fileSystem.delete(oldPath, true);
                logger.debug("HDFS path " + path + " is dropped.");
                output.append("HDFS path " + path + " is dropped.\n");
            } else {
                logger.debug("HDFS path " + path + " not exists.");
                output.append("HDFS path " + path + " not exists.\n");
            }//w ww .ja v  a2  s.com
            // If hbase was deployed on another cluster, the job dir is empty and should be dropped,
            // because of rowkey_stats and hfile dirs are both dropped.
            if (fileSystem.listStatus(oldPath.getParent()).length == 0) {
                Path emptyJobPath = new Path(JobBuilderSupport.getJobWorkingDir(config, getJobId()));
                if (fileSystem.exists(emptyJobPath)) {
                    fileSystem.delete(emptyJobPath, true);
                    logger.debug("HDFS path " + emptyJobPath + " is empty and dropped.");
                    output.append("HDFS path " + emptyJobPath + " is empty and dropped.\n");
                }
            }
        }
    }
}

From source file:org.apache.kylin.storage.hbase.util.DeployCoprocessorCLI.java

License:Apache License

public synchronized static Path uploadCoprocessorJar(String localCoprocessorJar, FileSystem fileSystem,
        Set<String> oldJarPaths) throws IOException {
    Path uploadPath = null;/*  w ww  . ja  v  a 2  s.  co m*/
    File localCoprocessorFile = new File(localCoprocessorJar);

    // check existing jars
    if (oldJarPaths == null) {
        oldJarPaths = new HashSet<String>();
    }
    Path coprocessorDir = getCoprocessorHDFSDir(fileSystem, KylinConfig.getInstanceFromEnv());
    for (FileStatus fileStatus : fileSystem.listStatus(coprocessorDir)) {
        if (isSame(localCoprocessorFile, fileStatus)) {
            uploadPath = fileStatus.getPath();
            break;
        }
        String filename = fileStatus.getPath().toString();
        if (filename.endsWith(".jar")) {
            oldJarPaths.add(filename);
        }
    }

    // upload if not existing
    if (uploadPath == null) {
        // figure out a unique new jar file name
        Set<String> oldJarNames = new HashSet<String>();
        for (String path : oldJarPaths) {
            oldJarNames.add(new Path(path).getName());
        }
        String baseName = getBaseFileName(localCoprocessorJar);
        String newName = null;
        int i = 0;
        while (newName == null) {
            newName = baseName + "-" + (i++) + ".jar";
            if (oldJarNames.contains(newName))
                newName = null;
        }

        // upload
        uploadPath = new Path(coprocessorDir, newName);
        FileInputStream in = null;
        FSDataOutputStream out = null;
        try {
            in = new FileInputStream(localCoprocessorFile);
            out = fileSystem.create(uploadPath);
            IOUtils.copy(in, out);
        } finally {
            IOUtils.closeQuietly(in);
            IOUtils.closeQuietly(out);
        }

        fileSystem.setTimes(uploadPath, localCoprocessorFile.lastModified(), -1);

    }

    uploadPath = uploadPath.makeQualified(fileSystem.getUri(), null);
    return uploadPath;
}