Example usage for org.apache.hadoop.fs Path SEPARATOR

List of usage examples for org.apache.hadoop.fs Path SEPARATOR

Introduction

In this page you can find the example usage for org.apache.hadoop.fs Path SEPARATOR.

Prototype

String SEPARATOR

To view the source code for org.apache.hadoop.fs Path SEPARATOR.

Click Source Link

Document

The directory separator, a slash.

Usage

From source file:com.datasalt.pangool.solr.SolrRecordWriter.java

License:Apache License

static String relativePathForZipEntry(final String rawPath, final String baseName, final String root) {
    String relativePath = rawPath.replaceFirst(Pattern.quote(root.toString()), "");
    LOG.info(//from   ww w  . ja  va  2  s  . c o m
            String.format("RawPath %s, baseName %s, root %s, first %s", rawPath, baseName, root, relativePath));

    if (relativePath.startsWith(Path.SEPARATOR)) {
        relativePath = relativePath.substring(1);
    }
    LOG.info(String.format("RawPath %s, baseName %s, root %s, post leading slash %s", rawPath, baseName, root,
            relativePath));
    if (relativePath.isEmpty()) {
        LOG.warn(String.format("No data after root (%s) removal from raw path %s", root, rawPath));
        return baseName;
    }
    // Construct the path that will be written to the zip file, including
    // removing any leading '/' characters
    String inZipPath = baseName + Path.SEPARATOR_CHAR + relativePath;

    LOG.info(String.format("RawPath %s, baseName %s, root %s, inZip 1 %s", rawPath, baseName, root, inZipPath));
    if (inZipPath.startsWith(Path.SEPARATOR)) {
        inZipPath = inZipPath.substring(1);
    }
    LOG.info(String.format("RawPath %s, baseName %s, root %s, inZip 2 %s", rawPath, baseName, root, inZipPath));

    return inZipPath;

}

From source file:com.datatorrent.common.util.AsyncFSStorageAgent.java

License:Apache License

public void copyToHDFS(final int operatorId, final long windowId) throws IOException {
    if (this.localBasePath == null) {
        throw new AssertionError("save() was not called before copyToHDFS");
    }/*from ww w  . java2  s  .  com*/
    String operatorIdStr = String.valueOf(operatorId);
    File directory = new File(localBasePath, operatorIdStr);
    String window = Long.toHexString(windowId);
    Path lPath = new Path(path + Path.SEPARATOR + operatorIdStr + Path.SEPARATOR + TMP_FILE);
    File srcFile = new File(directory, String.valueOf(windowId));
    FSDataOutputStream stream = null;
    boolean stateSaved = false;
    try {
        // Create the temporary file with OverWrite option to avoid dangling lease issue and avoid exception if file already exists
        stream = fileContext.create(lPath, EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
                Options.CreateOpts.CreateParent.createParent());
        InputStream in = null;
        try {
            in = new FileInputStream(srcFile);
            IOUtils.copyBytes(in, stream, conf, false);
        } finally {
            IOUtils.closeStream(in);
        }
        stateSaved = true;
    } catch (Throwable t) {
        logger.debug("while saving {} {}", operatorId, window, t);
        stateSaved = false;
        throw Throwables.propagate(t);
    } finally {
        try {
            if (stream != null) {
                stream.close();
            }
        } catch (IOException ie) {
            stateSaved = false;
            throw new RuntimeException(ie);
        } finally {
            if (stateSaved) {
                fileContext.rename(lPath,
                        new Path(path + Path.SEPARATOR + operatorIdStr + Path.SEPARATOR + window),
                        Options.Rename.OVERWRITE);
            }
            FileUtil.fullyDelete(srcFile);
        }
    }
}

From source file:com.datatorrent.common.util.FSStorageAgent.java

License:Apache License

@SuppressWarnings("ThrowFromFinallyBlock")
@Override//  w  ww  .  ja va  2 s  .  co m
public void save(Object object, int operatorId, long windowId) throws IOException {
    String operatorIdStr = String.valueOf(operatorId);
    Path lPath = new Path(path + Path.SEPARATOR + operatorIdStr + Path.SEPARATOR + TMP_FILE);
    String window = Long.toHexString(windowId);
    boolean stateSaved = false;
    FSDataOutputStream stream = null;
    try {
        stream = fileContext.create(lPath, EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
                Options.CreateOpts.CreateParent.createParent());
        store(stream, object);
        stateSaved = true;
    } catch (Throwable t) {
        logger.debug("while saving {} {}", operatorId, window, t);
        stateSaved = false;
        DTThrowable.rethrow(t);
    } finally {
        try {
            if (stream != null) {
                stream.close();
            }
        } catch (IOException ie) {
            stateSaved = false;
            throw new RuntimeException(ie);
        } finally {
            if (stateSaved) {
                logger.debug("Saving {}: {}", operatorId, window);
                fileContext.rename(lPath,
                        new Path(path + Path.SEPARATOR + operatorIdStr + Path.SEPARATOR + window),
                        Options.Rename.OVERWRITE);
            }
        }
    }
}

From source file:com.datatorrent.common.util.FSStorageAgent.java

License:Apache License

@Override
public Object load(int operatorId, long windowId) throws IOException {
    Path lPath = new Path(
            path + Path.SEPARATOR + String.valueOf(operatorId) + Path.SEPARATOR + Long.toHexString(windowId));
    logger.debug("Loading: {}", lPath);

    FSDataInputStream stream = fileContext.open(lPath);
    try {/*  w w w . j av  a  2  s .  c o m*/
        return retrieve(stream);
    } finally {
        stream.close();
    }
}

From source file:com.datatorrent.common.util.FSStorageAgent.java

License:Apache License

@Override
public void delete(int operatorId, long windowId) throws IOException {
    Path lPath = new Path(
            path + Path.SEPARATOR + String.valueOf(operatorId) + Path.SEPARATOR + Long.toHexString(windowId));
    logger.debug("Deleting: {}", lPath);

    fileContext.delete(lPath, false);/*from  w  w w.  j  a  va 2  s . co m*/
}

From source file:com.datatorrent.common.util.FSStorageAgent.java

License:Apache License

@Override
public long[] getWindowIds(int operatorId) throws IOException {
    Path lPath = new Path(path + Path.SEPARATOR + String.valueOf(operatorId));

    RemoteIterator<FileStatus> fileStatusRemoteIterator = fileContext.listStatus(lPath);
    if (!fileStatusRemoteIterator.hasNext()) {
        throw new IOException("Storage Agent has not saved anything yet!");
    }//from w w w. j  av  a  2s. co m
    List<Long> lwindows = Lists.newArrayList();
    do {
        FileStatus fileStatus = fileStatusRemoteIterator.next();
        String name = fileStatus.getPath().getName();
        if (name.equals(TMP_FILE)) {
            continue;
        }
        lwindows.add(
                STATELESS_CHECKPOINT_WINDOW_ID.equals(name) ? Stateless.WINDOW_ID : Long.parseLong(name, 16));
    } while (fileStatusRemoteIterator.hasNext());
    long[] windowIds = new long[lwindows.size()];
    for (int i = 0; i < windowIds.length; i++) {
        windowIds[i] = lwindows.get(i);
    }
    return windowIds;
}

From source file:com.datatorrent.contrib.hive.HiveOperator.java

License:Apache License

private String processHiveFile(FilePartitionMapping tuple) {
    String filename = tuple.getFilename();
    ArrayList<String> partition = tuple.getPartition();
    String command = null;/*  www  .ja va  2 s. c  om*/
    String filepath = store.getFilepath() + Path.SEPARATOR + filename;
    logger.debug("processing {} filepath", filepath);
    int numPartitions = partition.size();
    try {
        if (fs.exists(new Path(filepath))) {
            if (numPartitions > 0) {
                StringBuilder partitionString = new StringBuilder(
                        hivePartitionColumns.get(0) + "='" + partition.get(0) + "'");
                int i = 0;
                while (i < numPartitions) {
                    i++;
                    if (i == numPartitions) {
                        break;
                    }
                    partitionString.append(",").append(hivePartitionColumns.get(i)).append("='")
                            .append(partition.get(i)).append("'");
                }
                if (i < hivePartitionColumns.size()) {
                    partitionString.append(",").append(hivePartitionColumns.get(i));
                }
                command = "load data" + localString + " inpath '" + filepath + "' into table " + tablename
                        + " PARTITION" + "( " + partitionString + " )";
            } else {
                command = "load data" + localString + " inpath '" + filepath + "' into table " + tablename;
            }
        }
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
    logger.debug("command is {}", command);
    return command;
}

From source file:com.datatorrent.lib.io.block.BlockWriter.java

License:Apache License

/**
 * Also, initializes the filePath based on Application path
 *///from w w  w .j  a  v a  2  s.c  om
@Override
public void setup(Context.OperatorContext context) {
    filePath = context.getValue(Context.DAGContext.APPLICATION_PATH) + Path.SEPARATOR + blocksDirectory;
    super.setup(context);
}

From source file:com.datatorrent.lib.io.fs.AbstractFileOutputOperator.java

License:Open Source License

@Override
public void setup(Context.OperatorContext context) {
    LOG.debug("setup initiated");
    rollingFile = maxLength < Long.MAX_VALUE;

    //Getting required file system instance.
    try {//from  w ww .  j  a v  a2  s.co m
        fs = getFSInstance();
    } catch (IOException ex) {
        throw new RuntimeException(ex);
    }

    if (replication <= 0) {
        replication = fs.getDefaultReplication(new Path(filePath));
    }

    LOG.debug("FS class {}", fs.getClass());

    //When an entry is removed from the cache, removal listener is notified and it closes the output stream.
    RemovalListener<String, FSDataOutputStream> removalListener = new RemovalListener<String, FSDataOutputStream>() {
        @Override
        public void onRemoval(RemovalNotification<String, FSDataOutputStream> notification) {
            FSDataOutputStream value = notification.getValue();
            if (value != null) {
                try {
                    LOG.debug("closing {}", notification.getKey());
                    value.close();
                } catch (IOException e) {
                    throw new RuntimeException(e);
                }
            }
        }
    };

    //Define cache
    CacheLoader<String, FSDataOutputStream> loader = new CacheLoader<String, FSDataOutputStream>() {
        @Override
        public FSDataOutputStream load(String filename) {
            String partFileName = getPartFileNamePri(filename);
            Path lfilepath = new Path(filePath + Path.SEPARATOR + partFileName);

            FSDataOutputStream fsOutput;

            boolean sawThisFileBefore = endOffsets.containsKey(filename);

            try {
                if (fs.exists(lfilepath)) {
                    if (sawThisFileBefore) {
                        FileStatus fileStatus = fs.getFileStatus(lfilepath);
                        MutableLong endOffset = endOffsets.get(filename);

                        if (endOffset != null) {
                            endOffset.setValue(fileStatus.getLen());
                        } else {
                            endOffsets.put(filename, new MutableLong(fileStatus.getLen()));
                        }

                        fsOutput = fs.append(lfilepath);
                        LOG.debug("appending to {}", lfilepath);
                    }
                    //We never saw this file before and we don't want to append
                    else {
                        //If the file is rolling we need to delete all its parts.
                        if (rollingFile) {
                            int part = 0;

                            while (true) {
                                Path seenPartFilePath = new Path(
                                        filePath + Path.SEPARATOR + getPartFileName(filename, part));
                                if (!fs.exists(seenPartFilePath)) {
                                    break;
                                }

                                fs.delete(seenPartFilePath, true);
                                part = part + 1;
                            }

                            fsOutput = fs.create(lfilepath, (short) replication);
                        }
                        //Not rolling is easy, just delete the file and create it again.
                        else {
                            fs.delete(lfilepath, true);
                            fsOutput = fs.create(lfilepath, (short) replication);
                        }
                    }
                } else {
                    fsOutput = fs.create(lfilepath, (short) replication);
                }

                //Get the end offset of the file.

                LOG.info("opened: {}", fs.getFileStatus(lfilepath).getPath());
                return fsOutput;
            } catch (IOException e) {
                throw new RuntimeException(e);
            }
        }
    };

    streamsCache = CacheBuilder.newBuilder().maximumSize(maxOpenFiles).removalListener(removalListener)
            .build(loader);

    try {
        LOG.debug("File system class: {}", fs.getClass());
        LOG.debug("end-offsets {}", endOffsets);

        //Restore the files in case they were corrupted and the operator
        Path writerPath = new Path(filePath);
        if (fs.exists(writerPath)) {
            for (String seenFileName : endOffsets.keySet()) {
                String seenFileNamePart = getPartFileNamePri(seenFileName);
                LOG.debug("seenFileNamePart: {}", seenFileNamePart);
                Path seenPartFilePath = new Path(filePath + Path.SEPARATOR + seenFileNamePart);
                if (fs.exists(seenPartFilePath)) {
                    LOG.debug("file exists {}", seenFileNamePart);
                    long offset = endOffsets.get(seenFileName).longValue();
                    FSDataInputStream inputStream = fs.open(seenPartFilePath);
                    FileStatus status = fs.getFileStatus(seenPartFilePath);

                    if (status.getLen() != offset) {
                        LOG.info("file corrupted {} {} {}", seenFileNamePart, offset, status.getLen());
                        byte[] buffer = new byte[COPY_BUFFER_SIZE];

                        Path tmpFilePath = new Path(
                                filePath + Path.SEPARATOR + seenFileNamePart + TMP_EXTENSION);
                        FSDataOutputStream fsOutput = fs.create(tmpFilePath, (short) replication);
                        while (inputStream.getPos() < offset) {
                            long remainingBytes = offset - inputStream.getPos();
                            int bytesToWrite = remainingBytes < COPY_BUFFER_SIZE ? (int) remainingBytes
                                    : COPY_BUFFER_SIZE;
                            inputStream.read(buffer);
                            fsOutput.write(buffer, 0, bytesToWrite);
                        }

                        flush(fsOutput);
                        fsOutput.close();
                        inputStream.close();

                        FileContext fileContext = FileContext.getFileContext(fs.getUri());
                        LOG.debug("temp file path {}, rolling file path {}", tmpFilePath.toString(),
                                status.getPath().toString());
                        fileContext.rename(tmpFilePath, status.getPath(), Options.Rename.OVERWRITE);
                    } else {
                        inputStream.close();
                    }
                }
            }
        }

        //delete the left over future rolling files produced from the previous crashed instance
        //of this operator.
        if (rollingFile) {
            for (String seenFileName : endOffsets.keySet()) {
                try {
                    Integer part = openPart.get(seenFileName).getValue() + 1;

                    while (true) {
                        Path seenPartFilePath = new Path(
                                filePath + Path.SEPARATOR + getPartFileName(seenFileName, part));
                        if (!fs.exists(seenPartFilePath)) {
                            break;
                        }

                        fs.delete(seenPartFilePath, true);
                        part = part + 1;
                    }

                    Path seenPartFilePath = new Path(filePath + Path.SEPARATOR
                            + getPartFileName(seenFileName, openPart.get(seenFileName).intValue()));

                    //Handle the case when restoring to a checkpoint where the current rolling file
                    //already has a length greater than max length.
                    if (fs.getFileStatus(seenPartFilePath).getLen() > maxLength) {
                        LOG.debug("rotating file at setup.");
                        rotate(seenFileName);
                    }
                } catch (IOException e) {
                    throw new RuntimeException(e);
                } catch (ExecutionException e) {
                    throw new RuntimeException(e);
                }
            }
        }

        LOG.debug("setup completed");
        LOG.debug("end-offsets {}", endOffsets);
    } catch (IOException e) {
        throw new RuntimeException(e);
    }

    this.context = context;
    lastTimeStamp = System.currentTimeMillis();

    fileCounters.setCounter(Counters.TOTAL_BYTES_WRITTEN, new MutableLong());
    fileCounters.setCounter(Counters.TOTAL_TIME_ELAPSED, new MutableLong());
}

From source file:com.datatorrent.lib.io.fs.FileMergerTest.java

License:Apache License

@AfterClass
public static void cleanup() {
    try {/*from w ww.j  a v a 2s .c  o  m*/
        FileUtils.deleteDirectory(new File("target" + Path.SEPARATOR + FileMergerTest.class.getName()));
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
}