Example usage for org.apache.hadoop.fs Path getFileSystem

List of usage examples for org.apache.hadoop.fs Path getFileSystem

Introduction

In this page you can find the example usage for org.apache.hadoop.fs Path getFileSystem.

Prototype

public FileSystem getFileSystem(Configuration conf) throws IOException 

Source Link

Document

Return the FileSystem that owns this Path.

Usage

From source file:com.asakusafw.operation.tools.directio.file.DataSourceParameter.java

License:Apache License

/**
 * Returns the Hadoop file system for the path.
 * @param path the target path//  www . j av  a 2  s .c  om
 * @return the corresponded file system object
 */
public org.apache.hadoop.fs.FileSystem getHadoopFileSystem(org.apache.hadoop.fs.Path path) {
    try {
        return path.getFileSystem(getConfiguration());
    } catch (IOException e) {
        throw new CommandConfigurationException(
                MessageFormat.format("error occurred while resolving Hadoop path: {0} ({1})", path,
                        Optional.ofNullable(e.getMessage()).orElseGet(() -> e.toString())),
                e);
    }
}

From source file:com.asakusafw.operation.tools.directio.file.FileMakeDirectoryCommand.java

License:Apache License

private void mkdir(PrintWriter writer, Path path) {
    LOG.debug("mkdir: {}", path);
    try {/*from  w  w  w .  j  a  v  a 2 s.c  o m*/
        FileSystem fs = path.getFileSystem(dataSourceParameter.getConfiguration());
        if (stat(fs, path).filter(FileStatus::isDirectory).isPresent()) {
            verboseParameter.printf(writer, "already exists: %s%n", path);
        } else {
            if (fs.mkdirs(path) == false
                    && stat(fs, path).filter(s -> s.isDirectory() == false).isPresent() == false) {
                throw new CommandException(MessageFormat.format("cannot create directory: {0}", path));
            }
            verboseParameter.printf(writer, "create directory: %s%n", path);
        }
    } catch (IOException e) {
        throw new CommandException(MessageFormat.format("error occurred while creating directory: {0}", path),
                e);
    }
}

From source file:com.asakusafw.runtime.directio.hadoop.DirectIoTransactionEditor.java

License:Apache License

/**
 * Returns the corresponded transaction information to the execution ID.
 * @param executionId target ID/*w  ww . j a v a  2 s  . com*/
 * @return the corresponded transaction information, or {@code null} if does not exist
 * @throws IOException if failed to obtain information
 * @throws IllegalArgumentException if some parameters were {@code null}
 */
public TransactionInfo get(String executionId) throws IOException {
    if (executionId == null) {
        throw new IllegalArgumentException("executionId must not be null"); //$NON-NLS-1$
    }
    Path path = HadoopDataSourceUtil.getTransactionInfoPath(getConf(), executionId);
    try {
        FileStatus status = path.getFileSystem(getConf()).getFileStatus(path);
        return toInfoObject(status);
    } catch (FileNotFoundException e) {
        return null;
    }
}

From source file:com.asakusafw.runtime.directio.hadoop.DirectIoTransactionEditor.java

License:Apache License

private TransactionInfo toInfoObject(FileStatus stat) throws IOException {
    assert stat != null;
    Path path = stat.getPath();
    String executionId = HadoopDataSourceUtil.getTransactionInfoExecutionId(path);
    long timestamp = stat.getModificationTime();
    List<String> comment = new ArrayList<>();
    Path commitMarkPath = HadoopDataSourceUtil.getCommitMarkPath(getConf(), executionId);
    FileSystem fs = path.getFileSystem(getConf());
    boolean committed = fs.exists(commitMarkPath);
    try (FSDataInputStream input = fs.open(path);
            Scanner scanner = new Scanner(new InputStreamReader(input, HadoopDataSourceUtil.COMMENT_CHARSET))) {
        while (scanner.hasNextLine()) {
            comment.add(scanner.nextLine());
        }//from  w  ww . j  ava 2 s .com
    } catch (IOException e) {
        comment.add(e.toString());
    }

    return new TransactionInfo(executionId, timestamp, committed, comment);
}

From source file:com.asakusafw.runtime.directio.hadoop.DirectIoTransactionEditor.java

License:Apache License

private boolean doApply(String executionId) throws IOException, InterruptedException {
    assert executionId != null;
    Path transactionInfo = HadoopDataSourceUtil.getTransactionInfoPath(getConf(), executionId);
    Path commitMark = HadoopDataSourceUtil.getCommitMarkPath(getConf(), executionId);
    FileSystem fs = commitMark.getFileSystem(getConf());
    if (fs.exists(transactionInfo) == false) {
        return false;
    }/* ww w  .  j  a v a  2s . c om*/
    boolean succeed = true;
    if (fs.exists(commitMark) == false) {
        // FIXME cleanup
        return false;
    }
    DirectDataSourceRepository repo = getRepository();
    for (String containerPath : repo.getContainerPaths()) {
        String datasourceId = repo.getRelatedId(containerPath);
        try {
            DirectDataSource datasource = repo.getRelatedDataSource(containerPath);
            OutputTransactionContext context = HadoopDataSourceUtil.createContext(executionId, datasourceId);
            datasource.commitTransactionOutput(context);
            datasource.cleanupTransactionOutput(context);
        } catch (IOException e) {
            succeed = false;
            LOG.error(MessageFormat.format("Failed to apply transaction (datastoreId={0}, executionId={1})",
                    datasourceId, executionId));
        }
    }
    if (succeed) {
        LOG.info(MessageFormat.format("Deleting commit mark (executionId={0}, path={1})", executionId,
                commitMark));
        try {
            if (fs.delete(commitMark, true) == false) {
                LOG.warn(MessageFormat.format("Failed to delete commit mark (executionId={0}, path={1})",
                        executionId, commitMark));
            } else if (fs.delete(transactionInfo, true) == false) {
                LOG.warn(MessageFormat.format("Failed to delete transaction info (executionId={0}, path={1})",
                        executionId, transactionInfo));
            }
        } catch (FileNotFoundException e) {
            LOG.warn(MessageFormat.format("Failed to delete commit mark (executionId={0}, path={1})",
                    executionId, commitMark), e);
        }
        return true;
    } else {
        throw new IOException(MessageFormat.format("Failed to apply this transaction (executionId={0});"
                + " if you want to ignore this transaction, please abort this.", executionId));
    }
}

From source file:com.asakusafw.runtime.directio.hadoop.DirectIoTransactionEditor.java

License:Apache License

private boolean doAbort(String executionId) throws IOException, InterruptedException {
    assert executionId != null;
    Path transactionInfo = HadoopDataSourceUtil.getTransactionInfoPath(getConf(), executionId);
    Path commitMark = HadoopDataSourceUtil.getCommitMarkPath(getConf(), executionId);
    FileSystem fs = commitMark.getFileSystem(getConf());
    if (fs.exists(transactionInfo) == false) {
        return false;
    }/*from  w w w  . ja  v a2s.  co  m*/
    boolean succeed = true;
    if (fs.exists(commitMark)) {
        LOG.info(MessageFormat.format("Deleting commit mark (executionId={0}, path={1})", executionId,
                commitMark));
        if (fs.delete(commitMark, true) == false) {
            succeed = false;
            LOG.warn(MessageFormat.format("Failed to delete commit mark (executionId={0}, path={1})",
                    executionId, commitMark));
        }
    }
    DirectDataSourceRepository repo = getRepository();
    for (String containerPath : repo.getContainerPaths()) {
        String datasourceId = repo.getRelatedId(containerPath);
        try {
            DirectDataSource datasource = repo.getRelatedDataSource(containerPath);
            OutputTransactionContext context = HadoopDataSourceUtil.createContext(executionId, datasourceId);
            datasource.cleanupTransactionOutput(context);
        } catch (IOException e) {
            succeed = false;
            LOG.error(MessageFormat.format("Failed to abort transaction (datastoreId={0}, executionId={1})",
                    datasourceId, executionId));
        }
    }
    if (succeed) {
        LOG.info(MessageFormat.format("Deleting transaction info (executionId={0}, path={1})", executionId,
                commitMark));
        try {
            if (fs.delete(transactionInfo, true) == false) {
                LOG.warn(MessageFormat.format("Failed to delete transaction info (executionId={0}, path={1})",
                        executionId, transactionInfo));
            }
        } catch (FileNotFoundException e) {
            LOG.warn(MessageFormat.format("Failed to delete transaction info (executionId={0}, path={1})",
                    executionId, commitMark), e);
        }
        return true;
    } else {
        throw new IOException(MessageFormat.format(
                "Failed to abort this transaction (executionId={0});"
                        + " if you want to ignore this transaction, please delete {1} manually.",
                executionId, transactionInfo));
    }
}

From source file:com.asakusafw.runtime.directio.hadoop.DirectIoTransactionEditorTest.java

License:Apache License

private void indoubt(String executionId) throws IOException, InterruptedException {
    Path txPath = HadoopDataSourceUtil.getTransactionInfoPath(conf, executionId);
    Path cmPath = HadoopDataSourceUtil.getCommitMarkPath(conf, executionId);
    FileSystem fs = txPath.getFileSystem(conf);
    fs.create(txPath).close();/*w  w  w . jav a2  s .  com*/
    fs.create(cmPath).close();
    int index = 0;
    for (String path : repo.getContainerPaths()) {
        String id = repo.getRelatedId(path);
        DirectDataSource ds = repo.getRelatedDataSource(path);
        OutputTransactionContext txContext = HadoopDataSourceUtil.createContext(executionId, id);
        OutputAttemptContext aContext = new OutputAttemptContext(txContext.getTransactionId(),
                String.valueOf(index), txContext.getOutputId(), new Counter());

        ds.setupTransactionOutput(txContext);
        ds.setupAttemptOutput(aContext);
        try (ModelOutput<StringBuilder> output = ds.openOutput(aContext,
                SimpleDataDefinition.newInstance(StringBuilder.class, new MockFormat()), "", executionId,
                new Counter())) {
            output.write(new StringBuilder("Hello, world!"));
        }
        ds.commitAttemptOutput(aContext);
        ds.cleanupAttemptOutput(aContext);

        index++;
    }
}

From source file:com.asakusafw.runtime.directio.hadoop.HadoopDataSourceProfile.java

License:Apache License

/**
 * Creates a new instance.//from   ww  w.  j  a va  2  s .c o m
 * @param conf the current configuration
 * @param id the ID of this datasource
 * @param contextPath the logical context path
 * @param fileSystemPath the mapping target path
 * @param temporaryPath the temporary root path
 * @throws IOException if failed to create profile
 * @throws IllegalArgumentException if some parameters were {@code null}
 */
public HadoopDataSourceProfile(Configuration conf, String id, String contextPath, Path fileSystemPath,
        Path temporaryPath) throws IOException {
    this.id = id;
    this.contextPath = contextPath;
    this.fileSystemPath = fileSystemPath;
    this.temporaryPath = temporaryPath;
    this.fileSystem = fileSystemPath.getFileSystem(conf);
    this.localFileSystem = FileSystem.getLocal(conf);
}

From source file:com.asakusafw.runtime.directio.hadoop.HadoopDataSourceProfile.java

License:Apache License

/**
 * Converts the {@link DirectDataSourceProfile} into this profile.
 * @param profile target profile/*  ww  w.  j av  a  2  s.c o  m*/
 * @param conf Hadoop configuration
 * @return the converted profile
 * @throws IOException if failed to convert
 * @throws IllegalArgumentException if some parameters were {@code null}
 */
public static HadoopDataSourceProfile convert(DirectDataSourceProfile profile, Configuration conf)
        throws IOException {
    if (profile == null) {
        throw new IllegalArgumentException("profile must not be null"); //$NON-NLS-1$
    }
    if (conf == null) {
        throw new IllegalArgumentException("conf must not be null"); //$NON-NLS-1$
    }
    Map<String, String> attributes = new HashMap<>(profile.getAttributes());
    Path fsPath = takeFsPath(profile, attributes, conf);
    if (fsPath == null) {
        throw new IOException(MessageFormat.format(
                "The directio configuration \"{0} ({1})\" does not have \"{2}\"", profile.getId(),
                profile.getPath().isEmpty() ? ROOT_REPRESENTATION : profile.getPath(), fqn(profile, KEY_PATH)));
    }
    Path tempPath = takeTempPath(profile, attributes, conf, fsPath);
    FileSystem fileSystem = fsPath.getFileSystem(conf);
    FileSystem tempFs = tempPath.getFileSystem(conf);
    if (getFsIdentity(fileSystem).equals(getFsIdentity(tempFs)) == false) {
        throw new IOException(MessageFormat.format(
                "The directio target and temporary path must be on same file system ({0}={1} <=> {2}={3})",
                fqn(profile, KEY_PATH), fsPath, fqn(profile, KEY_TEMP), tempPath));
    }
    fsPath = fsPath.makeQualified(fileSystem);
    tempPath = tempPath.makeQualified(fileSystem);
    HadoopDataSourceProfile result = new HadoopDataSourceProfile(conf, profile.getId(), profile.getPath(),
            fsPath, tempPath);
    long minFragment = takeMinFragment(profile, attributes, conf);
    result.setMinimumFragmentSize(minFragment);
    long prefFragment = takePrefFragment(profile, attributes, conf);
    result.setPreferredFragmentSize(prefFragment);
    result.setOutputStaging(takeBoolean(profile, attributes, KEY_OUTPUT_STAGING, DEFAULT_OUTPUT_STAGING));
    result.setOutputStreaming(takeBoolean(profile, attributes, KEY_OUTPUT_STREAMING, DEFAULT_OUTPUT_STREAMING));
    result.setSplitBlocks(takeBoolean(profile, attributes, KEY_SPLIT_BLOCKS, DEFAULT_SPLIT_BLOCKS));
    result.setCombineBlocks(takeBoolean(profile, attributes, KEY_COMBINE_BLOCKS, DEFAULT_COMBINE_BLOCKS));
    result.setKeepAliveInterval(takeKeepAliveInterval(profile, attributes, conf));

    if (attributes.isEmpty() == false) {
        throw new IOException(MessageFormat.format("Unknown attributes in \"{0}\": {1}", profile.getId(),
                new TreeSet<>(attributes.keySet())));
    }
    return result;
}

From source file:com.asakusafw.runtime.directio.hadoop.HadoopDataSourceUtil.java

License:Apache License

/**
 * Returns the all transaction info files.
 * @param conf the current configuration
 * @return target path//from ww w. ja v a 2 s . c o m
 * @throws IOException if failed to find files by I/O error
 * @throws IllegalArgumentException if some parameters were {@code null}
 */
public static Collection<FileStatus> findAllTransactionInfoFiles(Configuration conf) throws IOException {
    if (conf == null) {
        throw new IllegalArgumentException("conf must not be null"); //$NON-NLS-1$
    }
    Path dir = getTransactionInfoDir(conf);
    FileSystem fs = dir.getFileSystem(conf);
    FileStatus[] statusArray;
    try {
        statusArray = fs.listStatus(dir);
    } catch (FileNotFoundException e) {
        statusArray = null;
        if (LOG.isDebugEnabled()) {
            LOG.debug(MessageFormat.format("Target file is not found: {0}", dir), e); //$NON-NLS-1$
        }
    }
    if (statusArray == null || statusArray.length == 0) {
        return Collections.emptyList();
    }
    Collection<FileStatus> results = new ArrayList<>();
    for (FileStatus stat : statusArray) {
        if (getTransactionInfoExecutionId(stat.getPath()) != null) {
            results.add(stat);
        }
    }
    return results;
}