Example usage for org.apache.hadoop.fs FileSystem getScheme

List of usage examples for org.apache.hadoop.fs FileSystem getScheme

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem getScheme.

Prototype

public String getScheme() 

Source Link

Document

Return the protocol scheme for this FileSystem.

Usage

From source file:cascading.flow.tez.planner.Hadoop2TezFlowStepJob.java

License:Open Source License

private Path prepareEnsureStagingDir(TezConfiguration workingConf) throws IOException {
    String stepStagingPath = createStepStagingPath();

    workingConf.set(TezConfiguration.TEZ_AM_STAGING_DIR, stepStagingPath);

    Path stagingDir = new Path(stepStagingPath);
    FileSystem fileSystem = FileSystem.get(workingConf);

    stagingDir = fileSystem.makeQualified(stagingDir);

    TokenCache.obtainTokensForNamenodes(new Credentials(), new Path[] { stagingDir }, workingConf);

    TezClientUtils.ensureStagingDirExists(workingConf, stagingDir);

    if (fileSystem.getScheme().startsWith("file:/"))
        new File(stagingDir.toUri()).mkdirs();

    return stagingDir;
}

From source file:com.datatorrent.contrib.hive.HiveOperator.java

License:Apache License

/**
 * Override this method to change the FileSystem instance that is used by the
 * operator./*from   www.  j a v  a 2s.  c o m*/
 *
 * @return A FileSystem object.
 * @throws IOException
 */
protected FileSystem getHDFSInstance() throws IOException {
    FileSystem tempFS = FileSystem.newInstance(new Path(store.filepath).toUri(), new Configuration());
    if (!tempFS.getScheme().equalsIgnoreCase("hdfs")) {
        localString = " local";
    }
    return tempFS;
}

From source file:com.thinkbiganalytics.kylo.hadoop.FileSystemUtil.java

License:Apache License

/**
 * Adds Hadoop {@link FileSystem} classes to the Hadoop configuration.
 *//*from  ww w. j  a v a  2  s . c o  m*/
public static void registerFileSystems(@Nonnull final Iterable<FileSystem> fileSystems,
        @Nonnull final Configuration conf) {
    for (final FileSystem fs : fileSystems) {
        try {
            final String scheme = fs.getScheme();
            final Class clazz = fs.getClass();
            log.debug("Found {} FileSystem using class: {}", scheme, clazz);
            conf.setClass("fs." + scheme + ".impl", clazz, FileSystem.class);
        } catch (final Exception e) {
            log.warn("Cannot load FileSystem using class: {}: {}", fs.getClass().getName(), e, e);
        }
    }
}

From source file:com.uber.hoodie.common.table.log.HoodieLogFormatWriter.java

License:Apache License

/**
 * @param fs/*from  w  w w.j a va 2s.  co  m*/
 * @param logFile
 * @param bufferSize
 * @param replication
 * @param sizeThreshold
 */
HoodieLogFormatWriter(FileSystem fs, HoodieLogFile logFile, Integer bufferSize, Short replication,
        Long sizeThreshold, String logWriteToken, String rolloverLogWriteToken)
        throws IOException, InterruptedException {
    this.fs = fs;
    this.logFile = logFile;
    this.sizeThreshold = sizeThreshold;
    this.bufferSize = bufferSize;
    this.replication = replication;
    this.logWriteToken = logWriteToken;
    this.rolloverLogWriteToken = rolloverLogWriteToken;
    Path path = logFile.getPath();
    if (fs.exists(path)) {
        boolean isAppendSupported = StorageSchemes.isAppendSupported(fs.getScheme());
        if (isAppendSupported) {
            log.info(logFile + " exists. Appending to existing file");
            try {
                this.output = fs.append(path, bufferSize);
            } catch (RemoteException e) {
                log.warn("Remote Exception, attempting to handle or recover lease", e);
                handleAppendExceptionOrRecoverLease(path, e);
            } catch (IOException ioe) {
                if (ioe.getMessage().toLowerCase().contains("not supported")) {
                    // may still happen if scheme is viewfs.
                    isAppendSupported = false;
                } else {
                    throw ioe;
                }
            }
        }
        if (!isAppendSupported) {
            this.logFile = logFile.rollOver(fs, rolloverLogWriteToken);
            log.info("Append not supported.. Rolling over to " + logFile);
            createNewFile();
        }
    } else {
        log.info(logFile + " does not exist. Create a new file");
        // Block size does not matter as we will always manually autoflush
        createNewFile();
    }
}

From source file:eu.stratosphere.yarn.Client.java

License:Apache License

public void run(String[] args) throws Exception {

    if (UserGroupInformation.isSecurityEnabled()) {
        throw new RuntimeException("Stratosphere YARN client does not have security support right now."
                + "File a bug, we will fix it asap");
    }//from w w  w .j a  va 2 s .c o m
    //Utils.logFilesInCurrentDirectory(LOG);
    //
    //   Command Line Options
    //
    Options options = new Options();
    options.addOption(VERBOSE);
    options.addOption(STRATOSPHERE_CONF_DIR);
    options.addOption(STRATOSPHERE_JAR);
    options.addOption(JM_MEMORY);
    options.addOption(TM_MEMORY);
    options.addOption(TM_CORES);
    options.addOption(CONTAINER);
    options.addOption(GEN_CONF);
    options.addOption(QUEUE);
    options.addOption(QUERY);
    options.addOption(SHIP_PATH);

    CommandLineParser parser = new PosixParser();
    CommandLine cmd = null;
    try {
        cmd = parser.parse(options, args);
    } catch (MissingOptionException moe) {
        System.out.println(moe.getMessage());
        printUsage();
        System.exit(1);
    }

    if (System.getProperty("log4j.configuration") == null) {
        Logger root = Logger.getRootLogger();
        root.removeAllAppenders();
        PatternLayout layout = new PatternLayout("%d{HH:mm:ss,SSS} %-5p %-60c %x - %m%n");
        ConsoleAppender appender = new ConsoleAppender(layout, "System.err");
        root.addAppender(appender);
        if (cmd.hasOption(VERBOSE.getOpt())) {
            root.setLevel(Level.DEBUG);
            LOG.debug("CLASSPATH: " + System.getProperty("java.class.path"));
        } else {
            root.setLevel(Level.INFO);
        }
    }

    // Jar Path
    Path localJarPath;
    if (cmd.hasOption(STRATOSPHERE_JAR.getOpt())) {
        String userPath = cmd.getOptionValue(STRATOSPHERE_JAR.getOpt());
        if (!userPath.startsWith("file://")) {
            userPath = "file://" + userPath;
        }
        localJarPath = new Path(userPath);
    } else {
        localJarPath = new Path(
                "file://" + Client.class.getProtectionDomain().getCodeSource().getLocation().getPath());
    }

    if (cmd.hasOption(GEN_CONF.getOpt())) {
        LOG.info("Placing default configuration in current directory");
        File outFile = generateDefaultConf(localJarPath);
        LOG.info("File written to " + outFile.getAbsolutePath());
        System.exit(0);
    }

    // Conf Path 
    Path confPath = null;
    String confDirPath = "";
    if (cmd.hasOption(STRATOSPHERE_CONF_DIR.getOpt())) {
        confDirPath = cmd.getOptionValue(STRATOSPHERE_CONF_DIR.getOpt()) + "/";
        File confFile = new File(confDirPath + CONFIG_FILE_NAME);
        if (!confFile.exists()) {
            LOG.fatal("Unable to locate configuration file in " + confFile);
            System.exit(1);
        }
        confPath = new Path(confFile.getAbsolutePath());
    } else {
        System.out.println("No configuration file has been specified");

        // no configuration path given.
        // -> see if there is one in the current directory
        File currDir = new File(".");
        File[] candidates = currDir.listFiles(new FilenameFilter() {
            @Override
            public boolean accept(final File dir, final String name) {
                return name != null && name.endsWith(".yaml");
            }
        });
        if (candidates == null || candidates.length == 0) {
            System.out.println(
                    "No configuration file has been found in current directory.\n" + "Copying default.");
            File outFile = generateDefaultConf(localJarPath);
            confPath = new Path(outFile.toURI());
        } else {
            if (candidates.length > 1) {
                System.out.println("Multiple .yaml configuration files were found in the current directory\n"
                        + "Please specify one explicitly");
                System.exit(1);
            } else if (candidates.length == 1) {
                confPath = new Path(candidates[0].toURI());
            }
        }
    }
    List<File> shipFiles = new ArrayList<File>();
    // path to directory to ship
    if (cmd.hasOption(SHIP_PATH.getOpt())) {
        String shipPath = cmd.getOptionValue(SHIP_PATH.getOpt());
        File shipDir = new File(shipPath);
        if (shipDir.isDirectory()) {
            shipFiles = new ArrayList<File>(Arrays.asList(shipDir.listFiles(new FilenameFilter() {
                @Override
                public boolean accept(File dir, String name) {
                    return !(name.equals(".") || name.equals(".."));
                }
            })));
        } else {
            LOG.warn("Ship directory is not a directory!");
        }
    }
    boolean hasLog4j = false;
    //check if there is a log4j file
    if (confDirPath.length() > 0) {
        File l4j = new File(confDirPath + "/log4j.properties");
        if (l4j.exists()) {
            shipFiles.add(l4j);
            hasLog4j = true;
        }
    }

    // queue
    String queue = "default";
    if (cmd.hasOption(QUEUE.getOpt())) {
        queue = cmd.getOptionValue(QUEUE.getOpt());
    }

    // JobManager Memory
    int jmMemory = 512;
    if (cmd.hasOption(JM_MEMORY.getOpt())) {
        jmMemory = Integer.valueOf(cmd.getOptionValue(JM_MEMORY.getOpt()));
    }

    // Task Managers memory
    int tmMemory = 1024;
    if (cmd.hasOption(TM_MEMORY.getOpt())) {
        tmMemory = Integer.valueOf(cmd.getOptionValue(TM_MEMORY.getOpt()));
    }

    // Task Managers vcores
    int tmCores = 1;
    if (cmd.hasOption(TM_CORES.getOpt())) {
        tmCores = Integer.valueOf(cmd.getOptionValue(TM_CORES.getOpt()));
    }
    Utils.getStratosphereConfiguration(confPath.toUri().getPath());
    int jmPort = GlobalConfiguration.getInteger(ConfigConstants.JOB_MANAGER_IPC_PORT_KEY, 0);
    if (jmPort == 0) {
        LOG.warn("Unable to find job manager port in configuration!");
        jmPort = ConfigConstants.DEFAULT_JOB_MANAGER_IPC_PORT;
    }
    conf = Utils.initializeYarnConfiguration();

    // intialize HDFS
    LOG.info("Copy App Master jar from local filesystem and add to local environment");
    // Copy the application master jar to the filesystem 
    // Create a local resource to point to the destination jar path 
    final FileSystem fs = FileSystem.get(conf);

    if (fs.getScheme().startsWith("file")) {
        LOG.warn("The file system scheme is '" + fs.getScheme() + "'. This indicates that the "
                + "specified Hadoop configuration path is wrong and the sytem is using the default Hadoop configuration values."
                + "The Stratosphere YARN client needs to store its files in a distributed file system");
    }

    // Create yarnClient
    final YarnClient yarnClient = YarnClient.createYarnClient();
    yarnClient.init(conf);
    yarnClient.start();

    // Query cluster for metrics
    if (cmd.hasOption(QUERY.getOpt())) {
        showClusterMetrics(yarnClient);
    }
    if (!cmd.hasOption(CONTAINER.getOpt())) {
        LOG.fatal("Missing required argument " + CONTAINER.getOpt());
        printUsage();
        yarnClient.stop();
        System.exit(1);
    }

    // TM Count
    final int taskManagerCount = Integer.valueOf(cmd.getOptionValue(CONTAINER.getOpt()));

    System.out.println("Using values:");
    System.out.println("\tContainer Count = " + taskManagerCount);
    System.out.println("\tJar Path = " + localJarPath.toUri().getPath());
    System.out.println("\tConfiguration file = " + confPath.toUri().getPath());
    System.out.println("\tJobManager memory = " + jmMemory);
    System.out.println("\tTaskManager memory = " + tmMemory);
    System.out.println("\tTaskManager cores = " + tmCores);

    // Create application via yarnClient
    YarnClientApplication app = yarnClient.createApplication();
    GetNewApplicationResponse appResponse = app.getNewApplicationResponse();
    Resource maxRes = appResponse.getMaximumResourceCapability();
    if (tmMemory > maxRes.getMemory() || tmCores > maxRes.getVirtualCores()) {
        LOG.fatal("The cluster does not have the requested resources for the TaskManagers available!\n"
                + "Maximum Memory: " + maxRes.getMemory() + ", Maximum Cores: " + tmCores);
        yarnClient.stop();
        System.exit(1);
    }
    if (jmMemory > maxRes.getMemory()) {
        LOG.fatal("The cluster does not have the requested resources for the JobManager available!\n"
                + "Maximum Memory: " + maxRes.getMemory());
        yarnClient.stop();
        System.exit(1);
    }
    int totalMemoryRequired = jmMemory + tmMemory * taskManagerCount;
    ClusterResourceDescription freeClusterMem = getCurrentFreeClusterResources(yarnClient);
    if (freeClusterMem.totalFreeMemory < totalMemoryRequired) {
        LOG.fatal("This YARN session requires " + totalMemoryRequired + "MB of memory in the cluster. "
                + "There are currently only " + freeClusterMem.totalFreeMemory + "MB available.");
        yarnClient.stop();
        System.exit(1);
    }
    if (tmMemory > freeClusterMem.containerLimit) {
        LOG.fatal("The requested amount of memory for the TaskManagers (" + tmMemory + "MB) is more than "
                + "the largest possible YARN container: " + freeClusterMem.containerLimit);
        yarnClient.stop();
        System.exit(1);
    }
    if (jmMemory > freeClusterMem.containerLimit) {
        LOG.fatal("The requested amount of memory for the JobManager (" + jmMemory + "MB) is more than "
                + "the largest possible YARN container: " + freeClusterMem.containerLimit);
        yarnClient.stop();
        System.exit(1);
    }

    // respect custom JVM options in the YAML file
    final String javaOpts = GlobalConfiguration.getString(ConfigConstants.STRATOSPHERE_JVM_OPTIONS, "");

    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);

    String amCommand = "$JAVA_HOME/bin/java" + " -Xmx" + Utils.calculateHeapSize(jmMemory) + "M " + javaOpts;
    if (hasLog4j) {
        amCommand += " -Dlog.file=\"" + ApplicationConstants.LOG_DIR_EXPANSION_VAR
                + "/jobmanager-log4j.log\" -Dlog4j.configuration=file:log4j.properties";
    }
    amCommand += " eu.stratosphere.yarn.ApplicationMaster" + " " + " 1>"
            + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager-stdout.log" + " 2>"
            + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager-stderr.log";
    amContainer.setCommands(Collections.singletonList(amCommand));

    System.err.println("amCommand=" + amCommand);

    // Set-up ApplicationSubmissionContext for the application
    ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
    final ApplicationId appId = appContext.getApplicationId();

    // Setup jar for ApplicationMaster
    LocalResource appMasterJar = Records.newRecord(LocalResource.class);
    LocalResource stratosphereConf = Records.newRecord(LocalResource.class);
    Path remotePathJar = Utils.setupLocalResource(conf, fs, appId.toString(), localJarPath, appMasterJar,
            fs.getHomeDirectory());
    Path remotePathConf = Utils.setupLocalResource(conf, fs, appId.toString(), confPath, stratosphereConf,
            fs.getHomeDirectory());
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(2);
    localResources.put("stratosphere.jar", appMasterJar);
    localResources.put("stratosphere-conf.yaml", stratosphereConf);

    // setup security tokens (code from apache storm)
    final Path[] paths = new Path[3 + shipFiles.size()];
    StringBuffer envShipFileList = new StringBuffer();
    // upload ship files
    for (int i = 0; i < shipFiles.size(); i++) {
        File shipFile = shipFiles.get(i);
        LocalResource shipResources = Records.newRecord(LocalResource.class);
        Path shipLocalPath = new Path("file://" + shipFile.getAbsolutePath());
        paths[3 + i] = Utils.setupLocalResource(conf, fs, appId.toString(), shipLocalPath, shipResources,
                fs.getHomeDirectory());
        localResources.put(shipFile.getName(), shipResources);

        envShipFileList.append(paths[3 + i]);
        if (i + 1 < shipFiles.size()) {
            envShipFileList.append(',');
        }
    }

    paths[0] = remotePathJar;
    paths[1] = remotePathConf;
    paths[2] = new Path(fs.getHomeDirectory(), ".stratosphere/" + appId.toString() + "/");
    FsPermission permission = new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL);
    fs.setPermission(paths[2], permission); // set permission for path.
    Utils.setTokensFor(amContainer, paths, this.conf);

    amContainer.setLocalResources(localResources);
    fs.close();

    // Setup CLASSPATH for ApplicationMaster
    Map<String, String> appMasterEnv = new HashMap<String, String>();
    Utils.setupEnv(conf, appMasterEnv);
    // set configuration values
    appMasterEnv.put(Client.ENV_TM_COUNT, String.valueOf(taskManagerCount));
    appMasterEnv.put(Client.ENV_TM_CORES, String.valueOf(tmCores));
    appMasterEnv.put(Client.ENV_TM_MEMORY, String.valueOf(tmMemory));
    appMasterEnv.put(Client.STRATOSPHERE_JAR_PATH, remotePathJar.toString());
    appMasterEnv.put(Client.ENV_APP_ID, appId.toString());
    appMasterEnv.put(Client.ENV_CLIENT_HOME_DIR, fs.getHomeDirectory().toString());
    appMasterEnv.put(Client.ENV_CLIENT_SHIP_FILES, envShipFileList.toString());
    appMasterEnv.put(Client.ENV_CLIENT_USERNAME, UserGroupInformation.getCurrentUser().getShortUserName());

    amContainer.setEnvironment(appMasterEnv);

    // Set up resource type requirements for ApplicationMaster
    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(jmMemory);
    capability.setVirtualCores(1);

    appContext.setApplicationName("Stratosphere"); // application name
    appContext.setAMContainerSpec(amContainer);
    appContext.setResource(capability);
    appContext.setQueue(queue);

    // file that we write into the conf/ dir containing the jobManager address.
    final File addrFile = new File(confDirPath + CliFrontend.JOBMANAGER_ADDRESS_FILE);

    Runtime.getRuntime().addShutdownHook(new Thread() {
        @Override
        public void run() {
            try {
                LOG.info("Killing the Stratosphere-YARN application.");
                yarnClient.killApplication(appId);
                LOG.info("Deleting files in " + paths[2]);
                FileSystem shutFS = FileSystem.get(conf);
                shutFS.delete(paths[2], true); // delete conf and jar file.
                shutFS.close();
            } catch (Exception e) {
                LOG.warn("Exception while killing the YARN application", e);
            }
            try {
                addrFile.delete();
            } catch (Exception e) {
                LOG.warn("Exception while deleting the jobmanager address file", e);
            }
            LOG.info("YARN Client is shutting down");
            yarnClient.stop();
        }
    });

    LOG.info("Submitting application master " + appId);
    yarnClient.submitApplication(appContext);
    ApplicationReport appReport = yarnClient.getApplicationReport(appId);
    YarnApplicationState appState = appReport.getYarnApplicationState();
    boolean told = false;
    char[] el = { '/', '|', '\\', '-' };
    int i = 0;
    while (appState != YarnApplicationState.FINISHED && appState != YarnApplicationState.KILLED
            && appState != YarnApplicationState.FAILED) {
        if (!told && appState == YarnApplicationState.RUNNING) {
            System.err
                    .println("Stratosphere JobManager is now running on " + appReport.getHost() + ":" + jmPort);
            System.err.println("JobManager Web Interface: " + appReport.getTrackingUrl());
            // write jobmanager connect information

            PrintWriter out = new PrintWriter(addrFile);
            out.println(appReport.getHost() + ":" + jmPort);
            out.close();
            addrFile.setReadable(true, false); // readable for all.
            told = true;
        }
        if (!told) {
            System.err.print(el[i++] + "\r");
            if (i == el.length) {
                i = 0;
            }
            Thread.sleep(500); // wait for the application to switch to RUNNING
        } else {
            Thread.sleep(5000);
        }

        appReport = yarnClient.getApplicationReport(appId);
        appState = appReport.getYarnApplicationState();
    }

    LOG.info("Application " + appId + " finished with" + " state " + appState + " at "
            + appReport.getFinishTime());
    if (appState == YarnApplicationState.FAILED || appState == YarnApplicationState.KILLED) {
        LOG.warn("Application failed. Diagnostics " + appReport.getDiagnostics());
    }

}

From source file:gobblin.util.filesystem.FileSystemInstrumentation.java

License:Apache License

public FileSystemInstrumentation(FileSystem underlying) {
    super(underlying.getScheme(), underlying.getScheme());
    this.underlyingFs = underlying;
}

From source file:gobblin.util.filesystem.InstrumentedFileSystem.java

License:Apache License

public InstrumentedFileSystem(String scheme, FileSystem underlyingFileSystem) {
    super(scheme, underlyingFileSystem.getScheme());
    this.underlyingFs = underlyingFileSystem;
}

From source file:io.druid.indexer.JobHelper.java

License:Apache License

public static DataSegment serializeOutIndex(final DataSegment segmentTemplate,
        final Configuration configuration, final Progressable progressable, final TaskAttemptID taskAttemptID,
        final File mergedBase, final Path segmentBasePath) throws IOException {
    final FileSystem outputFS = FileSystem.get(segmentBasePath.toUri(), configuration);
    final Path tmpPath = new Path(segmentBasePath, String.format("index.zip.%d", taskAttemptID.getId()));
    final AtomicLong size = new AtomicLong(0L);
    final DataPusher zipPusher = (DataPusher) RetryProxy.create(DataPusher.class, new DataPusher() {
        @Override//w w w  . j a  v  a2s .c om
        public long push() throws IOException {
            try (OutputStream outputStream = outputFS.create(tmpPath, true, DEFAULT_FS_BUFFER_SIZE,
                    progressable)) {
                size.set(zipAndCopyDir(mergedBase, outputStream, progressable));
                outputStream.flush();
            } catch (IOException | RuntimeException exception) {
                log.error(exception, "Exception in retry loop");
                throw exception;
            }
            return -1;
        }
    }, RetryPolicies.exponentialBackoffRetry(NUM_RETRIES, SECONDS_BETWEEN_RETRIES, TimeUnit.SECONDS));
    zipPusher.push();
    log.info("Zipped %,d bytes to [%s]", size.get(), tmpPath.toUri());

    final Path finalIndexZipFilePath = new Path(segmentBasePath, "index.zip");
    final URI indexOutURI = finalIndexZipFilePath.toUri();
    final ImmutableMap<String, Object> loadSpec;
    // TODO: Make this a part of Pushers or Pullers
    switch (outputFS.getScheme()) {
    case "hdfs":
        loadSpec = ImmutableMap.<String, Object>of("type", "hdfs", "path", indexOutURI.toString());
        break;
    case "s3":
    case "s3n":
        loadSpec = ImmutableMap.<String, Object>of("type", "s3_zip", "bucket", indexOutURI.getHost(), "key",
                indexOutURI.getPath().substring(1) // remove the leading "/"
        );
        break;
    case "file":
        loadSpec = ImmutableMap.<String, Object>of("type", "local", "path", indexOutURI.getPath());
        break;
    default:
        throw new IAE("Unknown file system scheme [%s]", outputFS.getScheme());
    }
    final DataSegment finalSegment = segmentTemplate.withLoadSpec(loadSpec).withSize(size.get())
            .withBinaryVersion(SegmentUtils.getVersionFromDir(mergedBase));

    if (!renameIndexFiles(outputFS, tmpPath, finalIndexZipFilePath)) {
        throw new IOException(String.format("Unable to rename [%s] to [%s]", tmpPath.toUri().toString(),
                finalIndexZipFilePath.toUri().toString()));
    }
    writeSegmentDescriptor(outputFS, finalSegment, new Path(segmentBasePath, "descriptor.json"), progressable);
    return finalSegment;
}

From source file:io.druid.indexer.JobHelper.java

License:Apache License

public static Path makeSegmentOutputPath(Path basePath, FileSystem fileSystem, String dataSource,
        String version, Interval interval, int partitionNum) {
    Path outputPath = new Path(prependFSIfNullScheme(fileSystem, basePath), "./" + dataSource);
    if ("hdfs".equals(fileSystem.getScheme())) {
        outputPath = new Path(outputPath,
                String.format("./%s_%s", interval.getStart().toString(ISODateTimeFormat.basicDateTime()),
                        interval.getEnd().toString(ISODateTimeFormat.basicDateTime())));
        outputPath = new Path(outputPath, version.replace(":", "_"));
    } else {/* ww  w . j a v a 2s  .c  o m*/
        outputPath = new Path(outputPath,
                String.format("./%s_%s", interval.getStart().toString(), interval.getEnd().toString()));
        outputPath = new Path(outputPath, String.format("./%s", version));
    }
    outputPath = new Path(outputPath, Integer.toString(partitionNum));
    return outputPath;
}

From source file:io.druid.storage.hdfs.HdfsDataSegmentFinder.java

License:Apache License

@Override
public Set<DataSegment> findSegments(String workingDirPathStr, boolean updateDescriptor)
        throws SegmentLoadingException {
    final Set<DataSegment> segments = Sets.newHashSet();
    final Path workingDirPath = new Path(workingDirPathStr);
    FileSystem fs;
    try {/*from  w w w  . j a  v a2  s . c o m*/
        fs = workingDirPath.getFileSystem(config);

        log.info(fs.getScheme());
        log.info("FileSystem URI:" + fs.getUri().toString());

        if (!fs.exists(workingDirPath)) {
            throw new SegmentLoadingException("Working directory [%s] doesn't exist.", workingDirPath);
        }

        if (!fs.isDirectory(workingDirPath)) {
            throw new SegmentLoadingException("Working directory [%s] is not a directory!?", workingDirPath);
        }

        final RemoteIterator<LocatedFileStatus> it = fs.listFiles(workingDirPath, true);
        while (it.hasNext()) {
            final LocatedFileStatus locatedFileStatus = it.next();
            final Path path = locatedFileStatus.getPath();
            if (path.getName().endsWith("descriptor.json")) {
                final Path indexZip;
                final String descriptorParts[] = path.getName().split("_");
                if (descriptorParts.length == 2 && descriptorParts[1].equals("descriptor.json")
                        && org.apache.commons.lang.StringUtils.isNumeric(descriptorParts[0])) {
                    indexZip = new Path(path.getParent(),
                            StringUtils.format("%s_index.zip", descriptorParts[0]));
                } else {
                    indexZip = new Path(path.getParent(), "index.zip");
                }
                if (fs.exists(indexZip)) {
                    final DataSegment dataSegment = mapper.readValue(fs.open(path), DataSegment.class);
                    log.info("Found segment [%s] located at [%s]", dataSegment.getIdentifier(), indexZip);

                    final Map<String, Object> loadSpec = dataSegment.getLoadSpec();
                    final String pathWithoutScheme = indexZip.toUri().getPath();

                    if (!loadSpec.get("type").equals(HdfsStorageDruidModule.SCHEME)
                            || !loadSpec.get("path").equals(pathWithoutScheme)) {
                        loadSpec.put("type", HdfsStorageDruidModule.SCHEME);
                        loadSpec.put("path", pathWithoutScheme);
                        if (updateDescriptor) {
                            log.info("Updating loadSpec in descriptor.json at [%s] with new path [%s]", path,
                                    pathWithoutScheme);
                            mapper.writeValue(fs.create(path, true), dataSegment);
                        }
                    }
                    segments.add(dataSegment);
                } else {
                    throw new SegmentLoadingException(
                            "index.zip didn't exist at [%s] while descripter.json exists!?", indexZip);
                }
            }
        }
    } catch (IOException e) {
        throw new SegmentLoadingException(e, "Problems interacting with filesystem[%s].", workingDirPath);
    }

    return segments;
}