Example usage for org.apache.hadoop.fs Path SEPARATOR

List of usage examples for org.apache.hadoop.fs Path SEPARATOR

Introduction

In this page you can find the example usage for org.apache.hadoop.fs Path SEPARATOR.

Prototype

String SEPARATOR

To view the source code for org.apache.hadoop.fs Path SEPARATOR.

Click Source Link

Document

The directory separator, a slash.

Usage

From source file:org.apache.pig.tez.TestTezJobExecution.java

License:Apache License

@Test
public void testUnionParallelRoundRobinBatchSize() throws IOException {
    String input = TEST_DIR + Path.SEPARATOR + "input1";
    String output = TEST_DIR + Path.SEPARATOR + "output1";
    Util.createInputFile(pigServer.getPigContext(), input, new String[] { "1", "1", "1", "2", "2", "2" });
    String query = "A = LOAD '" + input + "';" + "B = LOAD '" + input + "';" + "C = UNION A, B PARALLEL 2;"
            + "STORE C into '" + output + "';";
    pigServer.getPigContext().getProperties()
            .setProperty(RoundRobinPartitioner.PIG_ROUND_ROBIN_PARTITIONER_BATCH_SIZE, "3");
    pigServer.registerQuery(query);/*from   ww w .  j a va 2  s . com*/
    String part0 = FileUtils.readFileToString(new File(output + Path.SEPARATOR + "part-v002-o000-r-00000"));
    String part1 = FileUtils.readFileToString(new File(output + Path.SEPARATOR + "part-v002-o000-r-00001"));
    assertEquals("1\n1\n1\n1\n1\n1\n", part0);
    assertEquals("2\n2\n2\n2\n2\n2\n", part1);
}

From source file:org.apache.ranger.audit.destination.HDFSAuditDestination.java

License:Apache License

synchronized private PrintWriter getLogFileStream() throws Exception {
    closeFileIfNeeded();/* ww  w .j  a v  a2s .c  o m*/

    // Either there are no open log file or the previous one has been rolled
    // over
    if (logWriter == null) {
        Date currentTime = new Date();
        // Create a new file
        String fileName = MiscUtil.replaceTokens(logFileNameFormat, currentTime.getTime());
        String parentFolder = MiscUtil.replaceTokens(logFolder, currentTime.getTime());
        Configuration conf = createConfiguration();

        String fullPath = parentFolder + Path.SEPARATOR + fileName;
        String defaultPath = fullPath;
        URI uri = URI.create(fullPath);
        FileSystem fileSystem = FileSystem.get(uri, conf);

        Path hdfPath = new Path(fullPath);
        logger.info("Checking whether log file exists. hdfPath=" + fullPath + ", UGI="
                + MiscUtil.getUGILoginUser());
        int i = 0;
        while (fileSystem.exists(hdfPath)) {
            i++;
            int lastDot = defaultPath.lastIndexOf('.');
            String baseName = defaultPath.substring(0, lastDot);
            String extension = defaultPath.substring(lastDot);
            fullPath = baseName + "." + i + extension;
            hdfPath = new Path(fullPath);
            logger.info("Checking whether log file exists. hdfPath=" + fullPath);
        }
        logger.info("Log file doesn't exists. Will create and use it. hdfPath=" + fullPath);
        // Create parent folders
        createParents(hdfPath, fileSystem);

        // Create the file to write
        logger.info("Creating new log file. hdfPath=" + fullPath);
        FSDataOutputStream ostream = fileSystem.create(hdfPath);
        logWriter = new PrintWriter(ostream);
        fileCreateTime = new Date();
        currentFileName = fullPath;
    }
    return logWriter;
}

From source file:org.apache.ranger.audit.provider.hdfs.HdfsLogDestination.java

License:Apache License

private void openFile() {
    mLogger.debug("==> HdfsLogDestination.openFile()");

    closeFile();/*from  w  ww  .  j a va2s.  com*/

    mNextRolloverTime = MiscUtil.getNextRolloverTime(mNextRolloverTime, (mRolloverIntervalSeconds * 1000L));

    long startTime = MiscUtil.getRolloverStartTime(mNextRolloverTime, (mRolloverIntervalSeconds * 1000L));

    mHdfsFilename = MiscUtil.replaceTokens(mDirectory + Path.SEPARATOR + mFile, startTime);

    FSDataOutputStream ostream = null;
    FileSystem fileSystem = null;
    Path pathLogfile = null;
    Configuration conf = null;
    boolean bOverwrite = false;

    try {
        mLogger.debug("HdfsLogDestination.openFile(): opening file " + mHdfsFilename);

        URI uri = URI.create(mHdfsFilename);

        // TODO: mechanism to XA-HDFS plugin to disable auditing of access checks to the current HDFS file

        conf = createConfiguration();
        pathLogfile = new Path(mHdfsFilename);
        fileSystem = FileSystem.get(uri, conf);

        try {
            if (fileSystem.exists(pathLogfile)) { // file already exists. either append to the file or write to a new file
                if (mIsAppend) {
                    mLogger.info("HdfsLogDestination.openFile(): opening file for append " + mHdfsFilename);

                    ostream = fileSystem.append(pathLogfile);
                } else {
                    mHdfsFilename = getNewFilename(mHdfsFilename, fileSystem);
                    pathLogfile = new Path(mHdfsFilename);
                }
            }

            // if file does not exist or if mIsAppend==false, create the file
            if (ostream == null) {
                mLogger.info("HdfsLogDestination.openFile(): opening file for write " + mHdfsFilename);

                createParents(pathLogfile, fileSystem);
                ostream = fileSystem.create(pathLogfile, bOverwrite);
            }
        } catch (IOException excp) {
            // append may not be supported by the filesystem; or the file might already be open by another application. Try a different filename
            String failedFilename = mHdfsFilename;

            mHdfsFilename = getNewFilename(mHdfsFilename, fileSystem);
            pathLogfile = new Path(mHdfsFilename);

            mLogger.info("HdfsLogDestination.openFile(): failed in opening file " + failedFilename
                    + ". Will try opening " + mHdfsFilename);
        }

        if (ostream == null) {
            mLogger.info("HdfsLogDestination.openFile(): opening file for write " + mHdfsFilename);

            createParents(pathLogfile, fileSystem);
            ostream = fileSystem.create(pathLogfile, bOverwrite);
        }
    } catch (Throwable ex) {
        mLogger.warn("HdfsLogDestination.openFile() failed", ex);
        //      } finally {
        // TODO: unset the property set above to exclude auditing of logfile opening
        //        System.setProperty(hdfsCurrentFilenameProperty, null);
    }

    mWriter = createWriter(ostream);

    if (mWriter != null) {
        mLogger.debug("HdfsLogDestination.openFile(): opened file " + mHdfsFilename);

        mFsDataOutStream = ostream;
        mNextFlushTime = System.currentTimeMillis() + (mFlushIntervalSeconds * 1000L);
        mLastOpenFailedTime = 0;
    } else {
        mLogger.warn("HdfsLogDestination.openFile(): failed to open file for write " + mHdfsFilename);

        mHdfsFilename = null;
        mLastOpenFailedTime = System.currentTimeMillis();
    }

    mLogger.debug("<== HdfsLogDestination.openFile(" + mHdfsFilename + ")");
}

From source file:org.apache.ranger.plugin.store.file.BaseFileStore.java

License:Apache License

protected String getServiceDefFile(Long id) {
    String filePath = dataDir + Path.SEPARATOR + FILE_PREFIX_SERVICE_DEF + id + FILE_SUFFIX_JSON;

    return filePath;
}

From source file:org.apache.ranger.plugin.store.file.BaseFileStore.java

License:Apache License

protected String getServiceFile(Long id) {
    String filePath = dataDir + Path.SEPARATOR + FILE_PREFIX_SERVICE + id + FILE_SUFFIX_JSON;

    return filePath;
}

From source file:org.apache.ranger.plugin.store.file.BaseFileStore.java

License:Apache License

protected String getPolicyFile(Long serviceId, Long policyId) {
    String filePath = dataDir + Path.SEPARATOR + FILE_PREFIX_POLICY + serviceId + "-" + policyId
            + FILE_SUFFIX_JSON;/*from   w  ww.  jav  a 2s . c  o m*/

    return filePath;
}

From source file:org.apache.ranger.plugin.store.file.FileStoreUtil.java

License:Apache License

public String getDataFile(String filePrefix, Long id) {
    String filePath = dataDir + Path.SEPARATOR + filePrefix + id + FILE_SUFFIX_JSON;

    return filePath;
}

From source file:org.apache.ranger.plugin.store.file.FileStoreUtil.java

License:Apache License

public String getDataFile(String filePrefix, Long parentId, Long objectId) {
    String filePath = dataDir + Path.SEPARATOR + filePrefix + parentId + "-" + objectId + FILE_SUFFIX_JSON;

    return filePath;
}

From source file:org.apache.reef.io.data.output.TaskOutputStreamProviderHDFS.java

License:Apache License

/**
 * create a file output stream using the given name.
 * The path of the file on HDFS is 'outputPath/name/taskId'.
 *
 * @param name name of the created output stream
 *             It is used as the name of the directory if the created output stream is a file output stream
 * @return OutputStream to a file on HDFS. The path of the file is 'outputPath/name/taskId'
 * @throws java.io.IOException/*  www .j  a  v  a 2  s.com*/
 */
@Override
public DataOutputStream create(final String name) throws IOException {
    final String directoryPath = outputPath + Path.SEPARATOR + name;
    if (!fs.exists(new Path(directoryPath))) {
        fs.mkdirs(new Path(directoryPath));
    }
    return fs.create(new Path(directoryPath + Path.SEPARATOR + getTaskId()));
}

From source file:org.apache.sentry.hdfs.HMSPaths.java

License:Apache License

public HMSPaths(String[] pathPrefixes) {
    boolean rootPrefix = false;
    // Copy the array to avoid external modification
    this.prefixes = Arrays.copyOf(pathPrefixes, pathPrefixes.length);
    for (String pathPrefix : pathPrefixes) {
        rootPrefix = rootPrefix || pathPrefix.equals(Path.SEPARATOR);
    }/* w ww.j a v a2 s  . c om*/
    if (rootPrefix && pathPrefixes.length > 1) {
        throw new IllegalArgumentException("Root is a path prefix, there cannot be other path prefixes");
    }
    root = Entry.createRoot(rootPrefix);
    if (!rootPrefix) {
        for (String pathPrefix : pathPrefixes) {
            root.createPrefix(getPathElements(pathPrefix));
        }
    }

    authzObjToEntries = new TreeMap<String, Set<Entry>>(String.CASE_INSENSITIVE_ORDER);
    LOG.info("Sentry managed prefixes: " + prefixes.toString());
}