Example usage for org.apache.hadoop.fs Path SEPARATOR_CHAR

List of usage examples for org.apache.hadoop.fs Path SEPARATOR_CHAR

Introduction

In this page you can find the example usage for org.apache.hadoop.fs Path SEPARATOR_CHAR.

Prototype

char SEPARATOR_CHAR

To view the source code for org.apache.hadoop.fs Path SEPARATOR_CHAR.

Click Source Link

Document

The directory separator, a slash, as a character.

Usage

From source file:com.alibaba.jstorm.hdfs.spout.DirLock.java

License:Apache License

private static Path getDirLockFile(Path dir) {
    return new Path(dir.toString() + Path.SEPARATOR_CHAR + DIR_LOCK_FILE);
}

From source file:com.cloudera.cdk.data.filesystem.FileSystemMetadataProvider.java

License:Apache License

/**
 * Returns the correct dataset path for the given name and root directory.
 *
 * @param root A Path/*from  ww  w.java 2  s .c  o m*/
 * @param name A String dataset name
 * @return the correct dataset Path
 */
private static Path pathForDataset(Path root, String name) {
    Preconditions.checkArgument(name != null, "Dataset name cannot be null");

    // Why replace '.' here? Is this a namespacing hack?
    return new Path(root, name.replace('.', Path.SEPARATOR_CHAR));
}

From source file:com.cloudera.cdk.data.filesystem.PathConversion.java

License:Apache License

public Path fromKey(StorageKey key) {
    final StringBuilder pathBuilder = new StringBuilder();
    final List<FieldPartitioner> partitioners = key.getPartitionStrategy().getFieldPartitioners();

    for (int i = 0; i < partitioners.size(); i++) {
        final FieldPartitioner fp = partitioners.get(i);
        if (i != 0) {
            pathBuilder.append(Path.SEPARATOR_CHAR);
        }//from   w  w  w. j ava  2  s.c  o m
        @SuppressWarnings("unchecked")
        String dirname = dirnameForValue(fp, key.get(i));
        pathBuilder.append(dirname);
    }

    return new Path(pathBuilder.toString());
}

From source file:com.cloudera.cdk.data.hcatalog.HiveUtils.java

License:Apache License

/**
 * Returns the correct dataset path for the given name and root directory.
 *
 * @param root A Path/*from   ww w. j  a va  2  s.  co  m*/
 * @param name A String dataset name
 * @return the correct dataset Path
 */
static Path pathForDataset(Path root, String name) {
    Preconditions.checkArgument(name != null, "Dataset name cannot be null");

    // Why replace '.' here? Is this a namespacing hack?
    return new Path(root, name.replace('.', Path.SEPARATOR_CHAR));
}

From source file:com.cloudera.flume.collector.CollectorSink.java

License:Apache License

CollectorSink(Context ctx, final String logdir, final String filename, final long millis, final Tagger tagger,
        long checkmillis, AckListener ackDest) {
    this.ackDest = ackDest;
    this.roller = new RollSink(ctx, "collectorSink", new TimeTrigger(tagger, millis), checkmillis) {
        @Override/*from  www . j  a va2 s  . com*/
        public EventSink newSink(Context ctx) throws IOException {
            String tag = tagger.newTag();
            String path = logdir + Path.SEPARATOR_CHAR;
            EventSink dfs = new EscapedCustomDfsSink(path, filename + tag);
            return new RollDetectDeco(dfs, tag);
        }
    };

    long initMs = FlumeConfiguration.get().getInsistentOpenInitBackoff();
    long cumulativeMaxMs = FlumeConfiguration.get().getFailoverMaxCumulativeBackoff();
    long maxMs = FlumeConfiguration.get().getFailoverMaxSingleBackoff();
    BackoffPolicy backoff1 = new CumulativeCappedExponentialBackoff(initMs, maxMs, cumulativeMaxMs);
    BackoffPolicy backoff2 = new CumulativeCappedExponentialBackoff(initMs, maxMs, cumulativeMaxMs);

    // the collector snk has ack checking logic, retry and reopen logic, and
    // needs an extra mask before rolling, writing to disk and forwarding acks
    // (roll detect).

    // { ackChecksumChecker => insistentAppend => stubbornAppend =>
    // insistentOpen => mask("rolltag") => roll(xx) { rollDetect =>
    // escapedCusomtDfs } }
    EventSink tmp = new MaskDecorator<EventSink>(roller, "rolltag");
    tmp = new InsistentOpenDecorator<EventSink>(tmp, backoff1);
    tmp = new StubbornAppendSink<EventSink>(tmp);
    tmp = new InsistentAppendDecorator<EventSink>(tmp, backoff2);
    snk = new AckChecksumChecker<EventSink>(tmp, accum);
}

From source file:com.datasalt.pangool.solr.SolrRecordWriter.java

License:Apache License

/**
 * Write a file to a zip output stream, removing leading path name components from the actual file name when creating
 * the zip file entry./*w  w  w.ja  v  a2  s. c om*/
 * 
 * The entry placed in the zip file is <code>baseName</code>/ <code>relativePath</code>, where
 * <code>relativePath</code> is constructed by removing a leading <code>root</code> from the path for
 * <code>itemToZip</code>.
 * 
 * If <code>itemToZip</code> is an empty directory, it is ignored. If <code>itemToZip</code> is a directory, the
 * contents of the directory are added recursively.
 * 
 * @param zos
 *          The zip output stream
 * @param baseName
 *          The base name to use for the file name entry in the zip file
 * @param root
 *          The path to remove from <code>itemToZip</code> to make a relative path name
 * @param itemToZip
 *          The path to the file to be added to the zip file
 * @return the number of entries added
 * @throws IOException
 */
static public int zipDirectory(final Configuration conf, final ZipOutputStream zos, final String baseName,
        final String root, final Path itemToZip) throws IOException {
    LOG.info(String.format("zipDirectory: %s %s %s", baseName, root, itemToZip));
    LocalFileSystem localFs = FileSystem.getLocal(conf);
    int count = 0;

    final FileStatus itemStatus = localFs.getFileStatus(itemToZip);
    if (itemStatus.isDir()) {
        final FileStatus[] statai = localFs.listStatus(itemToZip);

        // Add a directory entry to the zip file
        final String zipDirName = relativePathForZipEntry(itemToZip.toUri().getPath(), baseName, root);
        final ZipEntry dirZipEntry = new ZipEntry(zipDirName + Path.SEPARATOR_CHAR);
        LOG.info(String.format("Adding directory %s to zip", zipDirName));
        zos.putNextEntry(dirZipEntry);
        zos.closeEntry();
        count++;

        if (statai == null || statai.length == 0) {
            LOG.info(String.format("Skipping empty directory %s", itemToZip));
            return count;
        }
        for (FileStatus status : statai) {
            count += zipDirectory(conf, zos, baseName, root, status.getPath());
        }
        LOG.info(String.format("Wrote %d entries for directory %s", count, itemToZip));
        return count;
    }

    final String inZipPath = relativePathForZipEntry(itemToZip.toUri().getPath(), baseName, root);

    if (inZipPath.length() == 0) {
        LOG.warn(String.format("Skipping empty zip file path for %s (%s %s)", itemToZip, root, baseName));
        return 0;
    }

    // Take empty files in case the place holder is needed
    FSDataInputStream in = null;
    try {
        in = localFs.open(itemToZip);
        final ZipEntry ze = new ZipEntry(inZipPath);
        ze.setTime(itemStatus.getModificationTime());
        // Comments confuse looking at the zip file
        // ze.setComment(itemToZip.toString());
        zos.putNextEntry(ze);

        IOUtils.copyBytes(in, zos, conf, false);
        zos.closeEntry();
        LOG.info(String.format("Wrote %d entries for file %s", count, itemToZip));
        return 1;
    } finally {
        in.close();
    }

}

From source file:com.datasalt.pangool.solr.SolrRecordWriter.java

License:Apache License

static String relativePathForZipEntry(final String rawPath, final String baseName, final String root) {
    String relativePath = rawPath.replaceFirst(Pattern.quote(root.toString()), "");
    LOG.info(/*from  w  ww.j a  v  a  2 s  .  co  m*/
            String.format("RawPath %s, baseName %s, root %s, first %s", rawPath, baseName, root, relativePath));

    if (relativePath.startsWith(Path.SEPARATOR)) {
        relativePath = relativePath.substring(1);
    }
    LOG.info(String.format("RawPath %s, baseName %s, root %s, post leading slash %s", rawPath, baseName, root,
            relativePath));
    if (relativePath.isEmpty()) {
        LOG.warn(String.format("No data after root (%s) removal from raw path %s", root, rawPath));
        return baseName;
    }
    // Construct the path that will be written to the zip file, including
    // removing any leading '/' characters
    String inZipPath = baseName + Path.SEPARATOR_CHAR + relativePath;

    LOG.info(String.format("RawPath %s, baseName %s, root %s, inZip 1 %s", rawPath, baseName, root, inZipPath));
    if (inZipPath.startsWith(Path.SEPARATOR)) {
        inZipPath = inZipPath.substring(1);
    }
    LOG.info(String.format("RawPath %s, baseName %s, root %s, inZip 2 %s", rawPath, baseName, root, inZipPath));

    return inZipPath;

}

From source file:com.intel.ssg.dcst.panthera.parse.SkinDriver.java

License:Apache License

/**
 * Compile a new query, but potentially reset taskID counter.  Not resetting task counter
 * is useful for generating re-entrant QL queries.
 * @param command  The HiveQL query to compile
 * @param resetTaskIds Resets taskID counter if true.
 * @return 0 for ok/*ww  w.ja va2  s.  c  o m*/
 */
public int compile(String command, boolean resetTaskIds) {
    PerfLogger perfLogger = PerfLogger.getPerfLogger();
    perfLogger.PerfLogBegin(LOG, PerfLogger.COMPILE);

    //holder for parent command type/string when executing reentrant queries
    QueryState queryState = new QueryState();

    if (plan != null) {
        close();
        plan = null;
    }

    if (resetTaskIds) {
        TaskFactory.resetId();
    }
    saveSession(queryState);

    try {
        try {
            command = new VariableSubstitution().substitute(conf, command);
            ctx = new Context(conf);
            ctx.setTryCount(getTryCount());
            ctx.setCmd(command);
            ctx.setHDFSCleanup(true);

            perfLogger.PerfLogBegin(LOG, PerfLogger.PARSE);
            // use SqlParse directly, in which SQL Parser will be called first, then Hive Parser if SQL
            // Parser failed.
            SqlParseDriver pd = new SqlParseDriver(conf);
            ASTNode tree = pd.parse(command, ctx);
            tree = ParseUtils.findRootNonNullToken(tree);
            perfLogger.PerfLogEnd(LOG, PerfLogger.PARSE);

            perfLogger.PerfLogBegin(LOG, PerfLogger.ANALYZE);
            BaseSemanticAnalyzer sem = SemanticAnalyzerFactory.get(conf, tree);
            List<HiveSemanticAnalyzerHook> saHooks = getHooks(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK,
                    HiveSemanticAnalyzerHook.class);

            // Do semantic analysis and plan generation
            if (saHooks != null) {
                HiveSemanticAnalyzerHookContext hookCtx = new HiveSemanticAnalyzerHookContextImpl();
                hookCtx.setConf(conf);
                for (HiveSemanticAnalyzerHook hook : saHooks) {
                    tree = hook.preAnalyze(hookCtx, tree);
                }
                sem.analyze(tree, ctx);
                hookCtx.update(sem);
                for (HiveSemanticAnalyzerHook hook : saHooks) {
                    hook.postAnalyze(hookCtx, sem.getRootTasks());
                }
            } else {
                sem.analyze(tree, ctx);
            }

            LOG.info("Semantic Analysis Completed");

            // validate the plan
            sem.validate();
            perfLogger.PerfLogEnd(LOG, PerfLogger.ANALYZE);

            plan = new QueryPlan(command, sem, perfLogger.getStartTime(PerfLogger.DRIVER_RUN));

            // test Only - serialize the query plan and deserialize it
            if ("true".equalsIgnoreCase(System.getProperty("test.serialize.qplan"))) {

                String queryPlanFileName = ctx.getLocalScratchDir(true) + Path.SEPARATOR_CHAR + "queryplan.xml";
                LOG.info("query plan = " + queryPlanFileName);
                queryPlanFileName = new Path(queryPlanFileName).toUri().getPath();

                // serialize the queryPlan
                FileOutputStream fos = new FileOutputStream(queryPlanFileName);
                Utilities.serializeObject(plan, fos);
                fos.close();

                // deserialize the queryPlan
                FileInputStream fis = new FileInputStream(queryPlanFileName);
                QueryPlan newPlan = Utilities.deserializeObject(fis);
                fis.close();

                // Use the deserialized plan
                plan = newPlan;
            }

            // initialize FetchTask right here
            if (plan.getFetchTask() != null) {
                plan.getFetchTask().initialize(conf, plan, null);
            }

            // get the output schema
            schema = getSchema(sem, conf);

            // do the authorization check
            if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED)) {
                try {
                    perfLogger.PerfLogBegin(LOG, PerfLogger.DO_AUTHORIZATION);
                    doAuthorization(sem);
                } catch (AuthorizationException authExp) {
                    errorMessage = "Authorization failed:" + authExp.getMessage()
                            + ". Use show grant to get more details.";
                    console.printError(errorMessage);
                    return 403;
                } finally {
                    perfLogger.PerfLogEnd(LOG, PerfLogger.DO_AUTHORIZATION);
                }
            }

            // restore state after we're done executing a specific query

            return 0;
        } catch (SemanticException semanticException) {
            command = new VariableSubstitution().substitute(conf, command);
            ctx = new Context(conf);
            ctx.setTryCount(getTryCount());
            ctx.setCmd(command);
            ctx.setHDFSCleanup(true);

            perfLogger.PerfLogBegin(LOG, PerfLogger.PARSE);

            // do again, may be semantic exception is triggered from SQL parser, then try Hive parser again.
            // for second time to execute these code, only use HIVE parser, since SQL parser must be tested.
            SqlParseDriver pd = new SqlParseDriver(conf);
            ASTNode tree = null;
            try {
                tree = pd.parse(command, ctx, true);
            } catch (ParseException parseException) {
                //
                // if reEntry the parse, and get ParseException, that means HIVE Paser can
                // not parse the query while SQL Parser can, then throw SemanticException
                //
                throw semanticException;
            }

            try {
                tree = ParseUtils.findRootNonNullToken(tree);
                perfLogger.PerfLogEnd(LOG, PerfLogger.PARSE);

                perfLogger.PerfLogBegin(LOG, PerfLogger.ANALYZE);
                BaseSemanticAnalyzer sem = SemanticAnalyzerFactory.get(conf, tree);
                List<HiveSemanticAnalyzerHook> saHooks = getHooks(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK,
                        HiveSemanticAnalyzerHook.class);

                // Do semantic analysis and plan generation
                if (saHooks != null) {
                    HiveSemanticAnalyzerHookContext hookCtx = new HiveSemanticAnalyzerHookContextImpl();
                    hookCtx.setConf(conf);
                    for (HiveSemanticAnalyzerHook hook : saHooks) {
                        tree = hook.preAnalyze(hookCtx, tree);
                    }
                    sem.analyze(tree, ctx);
                    hookCtx.update(sem);
                    for (HiveSemanticAnalyzerHook hook : saHooks) {
                        hook.postAnalyze(hookCtx, sem.getRootTasks());
                    }
                } else {
                    sem.analyze(tree, ctx);
                }

                LOG.info("Semantic Analysis Completed");

                // validate the plan
                sem.validate();
                perfLogger.PerfLogEnd(LOG, PerfLogger.ANALYZE);

                plan = new QueryPlan(command, sem, perfLogger.getStartTime(PerfLogger.DRIVER_RUN));

                // test Only - serialize the query plan and deserialize it
                if ("true".equalsIgnoreCase(System.getProperty("test.serialize.qplan"))) {

                    String queryPlanFileName = ctx.getLocalScratchDir(true) + Path.SEPARATOR_CHAR
                            + "queryplan.xml";
                    LOG.info("query plan = " + queryPlanFileName);
                    queryPlanFileName = new Path(queryPlanFileName).toUri().getPath();

                    // serialize the queryPlan
                    FileOutputStream fos = new FileOutputStream(queryPlanFileName);
                    Utilities.serializeObject(plan, fos);
                    fos.close();

                    // deserialize the queryPlan
                    FileInputStream fis = new FileInputStream(queryPlanFileName);
                    QueryPlan newPlan = Utilities.deserializeObject(fis);
                    fis.close();

                    // Use the deserialized plan
                    plan = newPlan;
                }

                // initialize FetchTask right here
                if (plan.getFetchTask() != null) {
                    plan.getFetchTask().initialize(conf, plan, null);
                }

                // get the output schema
                schema = getSchema(sem, conf);

                //do the authorization check
                if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED)) {
                    try {
                        perfLogger.PerfLogBegin(LOG, PerfLogger.DO_AUTHORIZATION);
                        doAuthorization(sem);
                    } catch (AuthorizationException authExp) {
                        errorMessage = "Authorization failed:" + authExp.getMessage()
                                + ". Use show grant to get more details.";
                        console.printError(errorMessage);
                        return 403;
                    } finally {
                        perfLogger.PerfLogEnd(LOG, PerfLogger.DO_AUTHORIZATION);
                    }
                }

                //restore state after we're done executing a specific query

                return 0;
            } catch (SemanticException hiveSemanticEx) {
                //
                // if use HIVE Parser still get SemanticException, then still throw SemanticException
                // which is throw from the first time of choosing Parser. Since that semanticeException
                // can either throw from SQL Parser or HIVE Parser while this hiveSemanticEx would only
                // thrown from HIVE Parser.
                //
                throw semanticException;
            }
        }
    } catch (SqlParseException e) {
        errorMessage = "FAILED: SQL Parse Error: " + e.getMessage();
        SQLState = ErrorMsg.findSQLState(e.getMessage());
        console.printError(errorMessage, "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
        return (15);
    } catch (SqlXlateException e) {
        errorMessage = "FAILED: SQL AST Translate Error: " + e.getMessage();
        SQLState = ErrorMsg.findSQLState(e.getMessage());
        console.printError(errorMessage, "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
        return (14);
    } catch (Exception e) {
        ErrorMsg error = ErrorMsg.getErrorMsg(e.getMessage());
        errorMessage = "FAILED: " + e.getClass().getSimpleName();
        if (error != ErrorMsg.GENERIC_ERROR) {
            errorMessage += " [Error " + error.getErrorCode() + "]:";
        }

        // HIVE-4889
        if ((e instanceof IllegalArgumentException) && e.getMessage() == null && e.getCause() != null) {
            errorMessage += " " + e.getCause().getMessage();
        } else {
            errorMessage += " " + e.getMessage();
        }

        SQLState = error.getSQLState();
        downstreamError = e;
        console.printError(errorMessage, "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
        return error.getErrorCode();
    } finally {
        perfLogger.PerfLogEnd(LOG, PerfLogger.COMPILE);
        restoreSession(queryState);
    }
}

From source file:com.intel.ssg.dcst.panthera.ql.SkinDriver.java

License:Apache License

/**
 * Compile a new query, but potentially reset taskID counter.  Not resetting task counter
 * is useful for generating re-entrant QL queries.
 * @param command  The HiveQL query to compile
 * @param resetTaskIds Resets taskID counter if true.
 * @return 0 for ok//  w ww . jav  a  2s  . c o  m
 */
public int compile(String command, boolean resetTaskIds) {
    PerfLogger perfLogger = PerfLogger.getPerfLogger();
    perfLogger.PerfLogBegin(LOG, PerfLogger.COMPILE);

    //holder for parent command type/string when executing reentrant queries
    QueryState queryState = new QueryState();

    if (plan != null) {
        close();
        plan = null;
    }

    if (resetTaskIds) {
        TaskFactory.resetId();
    }
    saveSession(queryState);

    try {
        command = new VariableSubstitution().substitute(conf, command);
        ctx = new Context(conf);
        ctx.setTryCount(getTryCount());
        ctx.setCmd(command);
        ctx.setHDFSCleanup(true);

        perfLogger.PerfLogBegin(LOG, PerfLogger.PARSE);
        // use SqlParse directly, in which queries will be decided to be run on Hive or Panthera.
        SqlParseDriver pd = new SqlParseDriver(conf);
        ASTNode tree = pd.parse(command, ctx);
        tree = ParseUtils.findRootNonNullToken(tree);
        perfLogger.PerfLogEnd(LOG, PerfLogger.PARSE);

        perfLogger.PerfLogBegin(LOG, PerfLogger.ANALYZE);
        BaseSemanticAnalyzer sem = SemanticAnalyzerFactory.get(conf, tree);
        List<HiveSemanticAnalyzerHook> saHooks = getHooks(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK,
                HiveSemanticAnalyzerHook.class);

        // Do semantic analysis and plan generation
        if (saHooks != null) {
            HiveSemanticAnalyzerHookContext hookCtx = new HiveSemanticAnalyzerHookContextImpl();
            hookCtx.setConf(conf);
            for (HiveSemanticAnalyzerHook hook : saHooks) {
                tree = hook.preAnalyze(hookCtx, tree);
            }
            sem.analyze(tree, ctx);
            hookCtx.update(sem);
            for (HiveSemanticAnalyzerHook hook : saHooks) {
                hook.postAnalyze(hookCtx, sem.getRootTasks());
            }
        } else {
            sem.analyze(tree, ctx);
        }

        LOG.info("Semantic Analysis Completed");

        // validate the plan
        sem.validate();
        perfLogger.PerfLogEnd(LOG, PerfLogger.ANALYZE);

        plan = new QueryPlan(command, sem, perfLogger.getStartTime(PerfLogger.DRIVER_RUN));

        // test Only - serialize the query plan and deserialize it
        if ("true".equalsIgnoreCase(System.getProperty("test.serialize.qplan"))) {

            String queryPlanFileName = ctx.getLocalScratchDir(true) + Path.SEPARATOR_CHAR + "queryplan.xml";
            LOG.info("query plan = " + queryPlanFileName);
            queryPlanFileName = new Path(queryPlanFileName).toUri().getPath();

            // serialize the queryPlan
            FileOutputStream fos = new FileOutputStream(queryPlanFileName);
            Utilities.serializeObject(plan, fos);
            fos.close();

            // deserialize the queryPlan
            FileInputStream fis = new FileInputStream(queryPlanFileName);
            QueryPlan newPlan = Utilities.deserializeObject(fis);
            fis.close();

            // Use the deserialized plan
            plan = newPlan;
        }

        // initialize FetchTask right here
        if (plan.getFetchTask() != null) {
            plan.getFetchTask().initialize(conf, plan, null);
        }

        // get the output schema
        schema = getSchema(sem, conf);

        // do the authorization check
        if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED)) {
            try {
                perfLogger.PerfLogBegin(LOG, PerfLogger.DO_AUTHORIZATION);
                doAuthorization(sem);
            } catch (AuthorizationException authExp) {
                errorMessage = "Authorization failed:" + authExp.getMessage()
                        + ". Use show grant to get more details.";
                console.printError(errorMessage);
                return 403;
            } finally {
                perfLogger.PerfLogEnd(LOG, PerfLogger.DO_AUTHORIZATION);
            }
        }

        // restore state after we're done executing a specific query

        return 0;

    } catch (HiveParseException e) {
        errorMessage = "FAILED: SQL Parse Error: " + e.getMessage() + "\nQuery is not supported.";
        SQLState = ErrorMsg.findSQLState(e.getMessage());
        console.printError(errorMessage, "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
        return (16);
    } catch (SqlParseException e) {
        errorMessage = "FAILED: SQL Parse Error: " + e.getMessage();
        SQLState = ErrorMsg.findSQLState(e.getMessage());
        console.printError(errorMessage, "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
        return (15);
    } catch (SqlXlateException e) {
        errorMessage = "FAILED: SQL AST Translate Error: " + e.getMessage();
        SQLState = ErrorMsg.findSQLState(e.getMessage());
        console.printError(errorMessage, "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
        return (14);
    } catch (Exception e) {
        ErrorMsg error = ErrorMsg.getErrorMsg(e.getMessage());
        errorMessage = "FAILED: " + e.getClass().getSimpleName();
        if (error != ErrorMsg.GENERIC_ERROR) {
            errorMessage += " [Error " + error.getErrorCode() + "]:";
        }

        // HIVE-4889
        if ((e instanceof IllegalArgumentException) && e.getMessage() == null && e.getCause() != null) {
            errorMessage += " " + e.getCause().getMessage();
        } else {
            errorMessage += " " + e.getMessage();
        }

        SQLState = error.getSQLState();
        downstreamError = e;
        console.printError(errorMessage, "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
        return error.getErrorCode();
    } finally {
        perfLogger.PerfLogEnd(LOG, PerfLogger.COMPILE);
        restoreSession(queryState);
    }
}

From source file:com.pinterest.hdfsbackup.distcp.DistCp.java

License:Apache License

static private boolean isAncestorPath(String x, String y) {
    if (!y.startsWith(x)) {
        return false;
    }//www .  j  a va 2 s. c om
    final int len = x.length();
    return y.length() == len || y.charAt(len) == Path.SEPARATOR_CHAR;
}