Example usage for org.apache.hadoop.fs Path toUri

List of usage examples for org.apache.hadoop.fs Path toUri

Introduction

In this page you can find the example usage for org.apache.hadoop.fs Path toUri.

Prototype

public URI toUri() 

Source Link

Document

Convert this Path to a URI.

Usage

From source file:azkaban.viewer.hdfs.HdfsBrowserServlet.java

License:Apache License

private void handleFsDisplay(String user, HttpServletRequest req, HttpServletResponse resp, Session session)
        throws IOException, ServletException, IllegalArgumentException, IllegalStateException {
    FileSystem fs = null;//from   w  w  w  .j a va2s. c o  m
    try {
        fs = getFileSystem(user);
    } catch (HadoopSecurityManagerException e) {
        errorPage(user, req, resp, session, "Cannot get FileSystem.");
        return;
    }

    Path path = getPath(req);
    if (logger.isDebugEnabled()) {
        logger.debug("path: '" + path.toString() + "'");
    }

    try {
        if (!fs.exists(path)) {
            errorPage(user, req, resp, session, path.toUri().getPath() + " does not exist.");
            fs.close();
            return;
        }
    } catch (IOException ioe) {
        logger.error("Got exception while checking for existence of path '" + path + "'", ioe);
        errorPage(user, req, resp, session,
                path.toUri().getPath() + " Encountered error while trying to detect if path '" + path
                        + "' exists. Reason: " + ioe.getMessage());
        fs.close();
        return;
    }

    if (fs.isFile(path)) {
        displayFilePage(fs, user, req, resp, session, path);
    } else if (fs.getFileStatus(path).isDir()) {
        displayDirPage(fs, user, req, resp, session, path);
    } else {
        errorPage(user, req, resp, session,
                "It exists, it is not a file, and it is not a directory, what " + "is it precious?");
    }
    fs.close();
}

From source file:azkaban.viewer.hdfs.HdfsBrowserServlet.java

License:Apache License

private void handleAjaxAction(String username, HttpServletRequest request, HttpServletResponse response,
        Session session) throws ServletException, IOException {
    Map<String, Object> ret = new HashMap<String, Object>();
    FileSystem fs = null;// ww w .  j  av  a 2  s  .  co  m
    try {
        try {
            fs = getFileSystem(username);
        } catch (HadoopSecurityManagerException e) {
            errorAjax(response, ret, "Cannot get FileSystem.");
            return;
        }

        String ajaxName = getParam(request, "ajax");
        Path path = null;
        if (!hasParam(request, "path")) {
            errorAjax(response, ret, "Missing parameter 'path'.");
            return;
        }

        path = new Path(getParam(request, "path"));
        if (!fs.exists(path)) {
            errorAjax(response, ret, path.toUri().getPath() + " does not exist.");
            return;
        }

        if (ajaxName.equals("fetchschema")) {
            handleAjaxFetchSchema(fs, request, ret, session, path);
        } else if (ajaxName.equals("fetchfile")) {
            // Note: fetchFile writes directly to the output stream. Thus, we need
            // to make sure we do not write to the output stream once this call
            // returns.
            ret = null;
            handleAjaxFetchFile(fs, request, response, session, path);
        } else {
            ret.put("error", "Unknown AJAX action " + ajaxName);
        }

        if (ret != null) {
            this.writeJSON(response, ret);
        }
    } finally {
        fs.close();
    }
}

From source file:azkaban.viewer.hdfs.ParquetFileViewer.java

License:Apache License

@Override
public Set<Capability> getCapabilities(FileSystem fs, Path path) throws AccessControlException {
    if (logger.isDebugEnabled()) {
        logger.debug("Parquet file path: " + path.toUri().getPath());
    }//www  .jav  a 2 s.co  m

    AvroParquetReader<GenericRecord> parquetReader = null;
    try {
        parquetReader = new AvroParquetReader<GenericRecord>(path);
    } catch (IOException e) {
        if (logger.isDebugEnabled()) {
            logger.debug(path.toUri().getPath() + " is not a Parquet file.");
            logger.debug("Error in opening Parquet file: " + e.getLocalizedMessage());
        }
        return EnumSet.noneOf(Capability.class);
    } finally {
        try {
            if (parquetReader != null) {
                parquetReader.close();
            }
        } catch (IOException e) {
            logger.error(e);
        }
    }
    return EnumSet.of(Capability.READ, Capability.SCHEMA);
}

From source file:azkaban.viewer.hdfs.ParquetFileViewer.java

License:Apache License

@Override
public void displayFile(FileSystem fs, Path path, OutputStream outputStream, int startLine, int endLine)
        throws IOException {
    if (logger.isDebugEnabled()) {
        logger.debug("Display Parquet file: " + path.toUri().getPath());
    }// ww w  .j  a  v  a 2  s  . c  o m

    JsonGenerator json = null;
    AvroParquetReader<GenericRecord> parquetReader = null;
    try {
        parquetReader = new AvroParquetReader<GenericRecord>(path);

        // Initialize JsonGenerator.
        json = new JsonFactory().createJsonGenerator(outputStream, JsonEncoding.UTF8);
        json.useDefaultPrettyPrinter();

        // Declare the avroWriter encoder that will be used to output the records
        // as JSON but don't construct them yet because we need the first record
        // in order to get the Schema.
        DatumWriter<GenericRecord> avroWriter = null;
        Encoder encoder = null;

        long endTime = System.currentTimeMillis() + STOP_TIME;
        int line = 1;
        while (line <= endLine && System.currentTimeMillis() <= endTime) {
            GenericRecord record = parquetReader.read();
            if (record == null) {
                break;
            }

            if (avroWriter == null) {
                Schema schema = record.getSchema();
                avroWriter = new GenericDatumWriter<GenericRecord>(schema);
                encoder = EncoderFactory.get().jsonEncoder(schema, json);
            }

            if (line >= startLine) {
                String recordStr = "\n\nRecord " + line + ":\n";
                outputStream.write(recordStr.getBytes("UTF-8"));
                avroWriter.write(record, encoder);
                encoder.flush();
            }
            ++line;
        }
    } catch (IOException e) {
        outputStream.write(("Error in displaying Parquet file: " + e.getLocalizedMessage()).getBytes("UTF-8"));
        throw e;
    } catch (Throwable t) {
        logger.error(t.getMessage());
        return;
    } finally {
        if (json != null) {
            json.close();
        }
        parquetReader.close();
    }
}

From source file:azkaban.viewer.hdfs.ParquetFileViewer.java

License:Apache License

@Override
public String getSchema(FileSystem fs, Path path) {
    String schema = null;/*  ww w  .  j a v a  2s . co  m*/
    try {
        AvroParquetReader<GenericRecord> parquetReader = new AvroParquetReader<GenericRecord>(path);
        GenericRecord record = parquetReader.read();
        if (record == null) {
            return null;
        }
        Schema avroSchema = record.getSchema();
        AvroSchemaConverter converter = new AvroSchemaConverter();
        schema = converter.convert(avroSchema).toString();
    } catch (IOException e) {
        logger.warn("Cannot get schema for file: " + path.toUri().getPath());
        return null;
    }

    return schema;
}

From source file:azkaban.web.pages.HdfsBrowserServlet.java

License:Apache License

@Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {

    String prefix = req.getContextPath() + req.getServletPath();
    String fsPath = req.getRequestURI().substring(prefix.length());
    if (fsPath.length() == 0)
        fsPath = "/";

    if (logger.isDebugEnabled())
        logger.debug("path=" + fsPath);

    Path path = new Path(fsPath);
    if (!_fs.exists(path))
        throw new IllegalArgumentException(path.toUri().getPath() + " does not exist.");
    else if (_fs.isFile(path))
        displayFile(req, resp, path);/*from  w w  w  .  j  a v  a 2  s .  co  m*/
    else if (_fs.getFileStatus(path).isDir())
        displayDir(req, resp, path);
    else
        throw new IllegalStateException(
                "It exists, it is not a file, and it is not a directory, what is it precious?");

}

From source file:azkaban.webapp.servlet.hdfsviewer.AvroFileViewer.java

License:Apache License

@Override
public Set<Capability> getCapabilities(FileSystem fs, Path path) {
    if (logger.isDebugEnabled())
        logger.debug("path:" + path.toUri().getPath());

    DataFileStream<Object> avroDataStream = null;
    try {/*from w ww .ja v  a2s .c  o  m*/
        avroDataStream = getAvroDataStream(fs, path);
        Schema schema = avroDataStream.getSchema();
        return (schema != null) ? EnumSet.of(Capability.READ, Capability.SCHEMA)
                : EnumSet.noneOf(Capability.class);
    } catch (IOException e) {
        if (logger.isDebugEnabled()) {
            logger.debug(path.toUri().getPath() + " is not an avro file.");
            logger.debug("Error in getting avro schema: " + e.getLocalizedMessage());
        }
        return EnumSet.noneOf(Capability.class);
    } finally {
        try {
            if (avroDataStream != null) {
                avroDataStream.close();
            }
        } catch (IOException e) {
            logger.error(e);
        }
    }
}

From source file:azkaban.webapp.servlet.hdfsviewer.AvroFileViewer.java

License:Apache License

@Override
public String getSchema(FileSystem fs, Path path) {
    if (logger.isDebugEnabled())
        logger.debug("path:" + path.toUri().getPath());

    DataFileStream<Object> avroDataStream = null;
    try {// w w w  .j av a2s.  c o  m
        avroDataStream = getAvroDataStream(fs, path);
        Schema schema = avroDataStream.getSchema();
        return schema.toString();
    } catch (IOException e) {
        if (logger.isDebugEnabled()) {
            logger.debug(path.toUri().getPath() + " is not an avro file.");
            logger.debug("Error in getting avro schema: " + e.getLocalizedMessage());
        }
        return null;
    } finally {
        try {
            if (avroDataStream != null) {
                avroDataStream.close();
            }
        } catch (IOException e) {
            logger.error(e);
        }
    }
}

From source file:azkaban.webapp.servlet.hdfsviewer.ParquetFileViewer.java

License:Apache License

@Override
public Set<Capability> getCapabilities(FileSystem fs, Path path) {
    if (logger.isDebugEnabled()) {
        logger.debug("Parquet file path: " + path.toUri().getPath());
    }/*ww w.j a v  a2 s.c  o m*/

    AvroParquetReader<GenericRecord> parquetReader = null;
    try {
        parquetReader = new AvroParquetReader<GenericRecord>(path);
    } catch (IOException e) {
        if (logger.isDebugEnabled()) {
            logger.debug(path.toUri().getPath() + " is not a Parquet file.");
            logger.debug("Error in opening Parquet file: " + e.getLocalizedMessage());
        }
        return EnumSet.noneOf(Capability.class);
    } finally {
        try {
            if (parquetReader != null) {
                parquetReader.close();
            }
        } catch (IOException e) {
            logger.error(e);
        }
    }
    return EnumSet.of(Capability.READ, Capability.SCHEMA);
}

From source file:babel.prep.datedcorpus.DatedLangFilesOutputFormat.java

License:Apache License

public RecordWriter<Text, Text> getBaseRecordWriter(final FileSystem fs, JobConf job, String name,
        final Progressable progress) throws IOException {
    final Path dumpFile = new Path(FileOutputFormat.getOutputPath(job), name);

    // Get the old copy out of the way
    if (fs.exists(dumpFile)) {
        fs.delete(dumpFile, true);// w  w  w  .  j a v  a 2 s  .  c o  m
    } else {
        fs.mkdirs(dumpFile.getParent());
    }

    return new RecordWriter<Text, Text>() {
        public synchronized void write(Text key, Text versText) throws IOException {
            try {
                BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(
                        new FileOutputStream(new File(dumpFile.toUri()), true), DEFAULT_CHARSET));

                writer.write(versText.toString());
                writer.close();
            } catch (Exception e) {
                throw new RuntimeException("Error writing page versions: " + e.toString());
            }
        }

        public synchronized void close(Reporter reporter) throws IOException {
        }
    };
}