List of usage examples for org.apache.hadoop.fs Path hashCode
@Override public int hashCode()
From source file:oracle.kv.hadoop.hive.table.TableHiveInputSplit.java
License:Open Source License
@Override public int hashCode() { int hc = 0;//from w w w. j a v a 2s .co m final Path filePath = getPath(); if (filePath != null) { hc = filePath.hashCode(); } return hc + v2Split.hashCode(); }
From source file:org.apache.jena.hadoop.rdf.io.input.util.RdfIOUtils.java
License:Apache License
/** * Selects a seed for use in generating blank node identifiers * //from w w w .j ava 2 s . com * @param context * Job Context * @param path * File path * @return Seed */ public static UUID getSeed(JobContext context, Path path) { // This is to ensure that blank node allocation policy is constant when // subsequent MapReduce jobs need that String jobId = context.getJobID().toString(); if (jobId == null) { jobId = String.valueOf(System.currentTimeMillis()); LOGGER.warn( "Job ID was not set, using current milliseconds of {}. Sequence of MapReduce jobs must carefully handle blank nodes.", jobId); } if (!context.getConfiguration().getBoolean(RdfIOConstants.GLOBAL_BNODE_IDENTITY, false)) { // Using normal file scoped blank node allocation LOGGER.debug("Generating Blank Node Seed from Job Details (ID={}, Input Path={})", jobId, path); // Form a reproducible seed for the run return new UUID(jobId.hashCode(), path.hashCode()); } else { // Using globally scoped blank node allocation LOGGER.warn( "Using globally scoped blank node allocation policy from Job Details (ID={}) - this is unsafe if your RDF inputs did not originate from a previous job", jobId); return new UUID(jobId.hashCode(), 0); } }