Example usage for org.apache.commons.vfs FileSystemManager getSchemes

List of usage examples for org.apache.commons.vfs FileSystemManager getSchemes

Introduction

In this page you can find the example usage for org.apache.commons.vfs FileSystemManager getSchemes.

Prototype

public String[] getSchemes();

Source Link

Document

Get the schemes currently available.

Usage

From source file:org.carbondata.processing.csvload.DataGraphExecuter.java

/**
 * executeGraph which generate the kettle graph
 *
 * @throws DataLoadingException/*from   w w  w. java2  s.c o  m*/
 */

private void execute(String graphFilePath, List<String> measureColumns, SchemaInfo schemaInfo)
        throws DataLoadingException {

    //This Method will validate the both fact and dimension csv files.

    isCheckPointNeeded(graphFilePath, schemaInfo);
    initKettleEnv();
    TransMeta transMeta = null;
    try {
        transMeta = new TransMeta(graphFilePath);
        transMeta.setFilename(graphFilePath);
        trans = new Trans(transMeta);
        if (!schemaInfo.isAutoAggregateRequest()) {
            // Register HDFS as a file system type with VFS to make HadoopFileInputMeta work
            boolean hdfsReadMode = model.getCsvFilePath() != null && model.getCsvFilePath().startsWith("hdfs:");
            if (hdfsReadMode) {
                try {
                    FileSystemManager fsm = KettleVFS.getInstance().getFileSystemManager();
                    if (fsm instanceof DefaultFileSystemManager) {
                        if (!Arrays.asList(fsm.getSchemes()).contains("hdfs")
                                && !((DefaultFileSystemManager) fsm).hasProvider("hdfs")) {
                            ((DefaultFileSystemManager) fsm).addProvider("hdfs", new HDFSFileProvider());
                        }
                    }
                } catch (FileSystemException e) {
                    if (!e.getMessage().contains("Multiple providers registered for URL scheme")) {
                        LOGGER.error(e, e.getMessage());
                    }
                }
            }

            trans.setVariable("modifiedDimNames", model.getDimTables());
            trans.setVariable("csvInputFilePath", model.getCsvFilePath());
            trans.setVariable("dimFileLocDir", model.getDimCSVDirLoc());
            if (hdfsReadMode) {
                trans.addParameterDefinition("vfs.hdfs.dfs.client.read.shortcircuit", "true", "");
                trans.addParameterDefinition("vfs.hdfs.dfs.domain.socket.path",
                        "/var/lib/hadoop-hdfs-new/dn_socket", "");
                trans.addParameterDefinition("vfs.hdfs.dfs.block.local-path-access.user", "hadoop,root", "");
                trans.addParameterDefinition("vfs.hdfs.io.file.buffer.size", "5048576", "");
            }
            List<StepMeta> stepsMeta = trans.getTransMeta().getSteps();
            StringBuilder builder = new StringBuilder();
            StringBuilder measuresInCSVFile = new StringBuilder();
            processCsvInputMeta(measureColumns, stepsMeta, builder, measuresInCSVFile);
            processGetFileNamesMeta(stepsMeta);

            processHadoopFileInputMeta(measureColumns, stepsMeta, builder, measuresInCSVFile);
        }
        setGraphLogLevel();
        trans.execute(null);
        LOGGER.info("Graph execution is started " + graphFilePath);
        trans.waitUntilFinished();
        LOGGER.info("Graph execution is finished.");
    } catch (KettleXMLException e) {
        LOGGER.error(e, "Unable to start execution of graph " + e.getMessage());
        throw new DataLoadingException("Unable to start execution of graph ", e);

    } catch (KettleException e) {
        LOGGER.error(e, "Unable to start execution of graph " + e.getMessage());
        throw new DataLoadingException("Unable to start execution of graph ", e);
    } catch (Throwable e) {
        LOGGER.error(e, "Unable to start execution of graph " + e.getMessage());
        throw new DataLoadingException("Unable to start execution of graph ", e);
    }

    //Don't change the logic of creating key
    String key = model.getSchemaName() + '/' + model.getCubeName() + '_' + model.getTableName();

    if (trans.getErrors() > 0) {
        LOGGER.error("Graph Execution had errors");
        throw new DataLoadingException("Internal Errors");
    } else if (null != BadRecordslogger.hasBadRecord(key)) {
        LOGGER.error("Graph Execution is partcially success");
        throw new DataLoadingException(DataProcessorConstants.BAD_REC_FOUND,
                "Graph Execution is partcially success");
    } else {
        LOGGER.info("Graph execution task is over with No error.");
    }
    LoggingRegistry instance = LoggingRegistry.getInstance();
    Map<String, LoggingObjectInterface> map = instance.getMap();
    if (null != map) {
        for (Entry<String, LoggingObjectInterface> entry : map.entrySet()) {
            instance.removeIncludingChildren(entry.getKey());
        }
    }

    map = null;
    XMLHandlerCache.getInstance().clear();
    trans.cleanup();
    trans.eraseParameters();
    trans.killAll();
    trans = null;
}

From source file:org.pentaho.amazon.AmazonSpoonPlugin.java

public void onStart(LifeEventHandler arg0) throws LifecycleException {
    try {/*from   www.  j  a  v  a  2s  . c  om*/
        // Register S3 as a file system type with VFS
        FileSystemManager fsm = KettleVFS.getInstance().getFileSystemManager();
        if (fsm instanceof DefaultFileSystemManager) {
            if (!Arrays.asList(fsm.getSchemes()).contains(S3_SCHEME)) {
                ((DefaultFileSystemManager) fsm).addProvider(S3_SCHEME, new S3FileProvider());
            }
        }
    } catch (FileSystemException e) {
        log.logError(BaseMessages.getString(PKG, "AmazonSpoonPlugin.StartupError.FailedToLoadS3Driver"));
    }

    VfsFileChooserDialog dialog = Spoon.getInstance().getVfsFileChooserDialog(null, null);
    Spoon.getInstance().getVfsFileChooserDialog(null, null)
            .addVFSUIPanel(new S3VfsFileChooserDialog(dialog, null, null));
}

From source file:org.pentaho.di.core.hadoop.HadoopSpoonPlugin.java

public void onStart(LifeEventHandler arg0) throws LifecycleException {
    try {/*w w  w.  jav a2 s.c om*/
        // Register HDFS as a file system type with VFS
        FileSystemManager fsm = KettleVFS.getInstance().getFileSystemManager();
        if (fsm instanceof DefaultFileSystemManager) {
            if (!Arrays.asList(fsm.getSchemes()).contains(HDFS_SCHEME)) {
                ((DefaultFileSystemManager) fsm).addProvider(HDFS_SCHEME, new HDFSFileProvider());
            }
        }
    } catch (FileSystemException e) {
        log.logError(BaseMessages.getString(PKG, "HadoopSpoonPlugin.StartupError.FailedToLoadHdfsDriver"));
    }

    VfsFileChooserDialog dialog = Spoon.getInstance().getVfsFileChooserDialog(null, null);
    dialog.addVFSUIPanel(new HadoopVfsFileChooserDialog(dialog, null, null));
}

From source file:org.pentaho.di.core.vfs.KettleVFS.java

public static FileObject getFileObject(String vfsFilename, VariableSpace space, FileSystemOptions fsOptions)
        throws KettleFileException {
    try {//w w  w.  ja v a2 s .co m
        FileSystemManager fsManager = getInstance().getFileSystemManager();

        // We have one problem with VFS: if the file is in a subdirectory of the current one: somedir/somefile
        // In that case, VFS doesn't parse the file correctly.
        // We need to put file: in front of it to make it work.
        // However, how are we going to verify this?
        //
        // We are going to see if the filename starts with one of the known protocols like file: zip: ram: smb: jar: etc.
        // If not, we are going to assume it's a file.
        //
        boolean relativeFilename = true;
        String[] schemes = fsManager.getSchemes();
        for (int i = 0; i < schemes.length && relativeFilename; i++) {
            if (vfsFilename.startsWith(schemes[i] + ":")) {
                relativeFilename = false;
                // We have a VFS URL, load any options for the file system driver
                fsOptions = buildFsOptions(space, fsOptions, vfsFilename, schemes[i]);
            }
        }

        String filename;
        if (vfsFilename.startsWith("\\\\")) {
            File file = new File(vfsFilename);
            filename = file.toURI().toString();
        } else {
            if (relativeFilename) {
                File file = new File(vfsFilename);
                filename = file.getAbsolutePath();
            } else {
                filename = vfsFilename;
            }
        }

        FileObject fileObject = null;

        if (fsOptions != null) {
            fileObject = fsManager.resolveFile(filename, fsOptions);
        } else {
            fileObject = fsManager.resolveFile(filename);
        }

        return fileObject;
    } catch (IOException e) {
        throw new KettleFileException(
                "Unable to get VFS File object for filename '" + vfsFilename + "' : " + e.getMessage());
    }
}

From source file:uk.ac.liv.shaman.vfsme.CommonVFSMediaAdaptor.java

public String toHTML(String pre) throws IOException {
    URI u = getURI();//ww  w . jav a  2  s  .  com
    FileSystemManager m = VFS.getManager();
    m.getSchemes();
    String su = u.toString();
    //      if (pre.equals("iso:"))
    //         su = su.replaceFirst("file:", "");
    String uri = pre + su + "!";

    System.out.println("VFS Open: " + uri);
    FileObject o = m.resolveFile(uri);
    StringBuilder sb = new StringBuilder(5000);
    sb.append("<html><head><style type=\"text/css\">\n" + "body {\n"
            + "   color: black; background-color: white;\n"
            + "   font-size: 14pts;   /* Mozilla: 16 for proportional, 13 for fixed */\n"
            + "   padding: 10px;}\n" + "\n" + "a:link { color: blue; }\n" + "a:visited { color: magenta; }\n"
            + "a:hover { color: red; }\n" + "a:active { color: red; }\n" + "\n" + "a:link, a:visited, \n"
            + "a:active, a:hover {\n" + "   text-decoration: underline;\n" + "}\n" + "\n" + "p {\n"
            + "   margin-top: 10px;\n" + "}\n" + "text { padding: 5px; }\n" + "\n"
            + "pre { font-family: monospace; }\n" + "\n\n"
            + "h1 { font-size: 24pt; font-weight: bold; margin: 10px 0px; }\n"
            + "h2 { font-size: 18pt; font-weight: bold; margin: 9px 0px; }\n"
            + "h3 { font-size: 14pt; font-weight: bold; margin: 7px 0px; }\n"
            + "h4 { font-size: 12pt; font-weight: bold; margin: 6px 0px; }\n"
            + "h5 { font-size: 10pt; font-weight: bold; margin: 5px 0px; }\n"
            + "h6 { font-size:  9pt; font-weight: bold; margin: 5px 0px; }\n" + "" + "" + "</style>");
    sb.append("<title>").append("Contents of the archive").append(u.getPath()).append("</title>");

    sb.append("\t<base href='").append(u).append("!/'>\n");
    sb.append("</head>\n");
    sb.append("<body>\n");
    sb.append("<h2>").append("Contents of the archive").append(u.getPath()).append("</h2>");

    sb.append("\n<pre><table width='90%'>\n");

    // headers.  click to sort
    sb.append("<tr><span Behavior='ScriptSpan' script='event tableSort <node>'  title='Sort table'>");
    sb.append(
            "<th align='left'>File / <b>Directory<th align='right'>Size<th align='right'>Last Modified</b></span>\n");

    processChild(o, m, sb);
    sb.append("</table>\n</body></html>\n");

    return sb.toString();
}