List of usage examples for com.google.common.hash Hashing sha1
public static HashFunction sha1()
From source file:com.facebook.buck.parser.ParallelDaemonicParserState.java
@SuppressWarnings({ "rawtypes", "unchecked" }) private TargetNode<?> createTargetNode(BuckEventBus eventBus, Cell cell, Path buildFile, BuildTarget target, Map<String, Object> rawNode, TargetNodeListener nodeListener) { BuildRuleType buildRuleType = parseBuildRuleTypeFromRawRule(cell, rawNode); // Because of the way that the parser works, we know this can never return null. Description<?> description = cell.getDescription(buildRuleType); if (target.isFlavored()) { if (description instanceof Flavored) { if (!((Flavored) description).hasFlavors(ImmutableSet.copyOf(target.getFlavors()))) { throw new HumanReadableException("Unrecognized flavor in target %s while parsing %s%s.", target, UnflavoredBuildTarget.BUILD_TARGET_PREFIX, MorePaths .pathWithUnixSeparators(target.getBasePath().resolve(cell.getBuildFileName()))); }/*from w ww . j av a 2s. co m*/ } else { LOG.warn( "Target %s (type %s) must implement the Flavored interface " + "before we can check if it supports flavors: %s", target.getUnflavoredBuildTarget(), buildRuleType, target.getFlavors()); throw new HumanReadableException( "Target %s (type %s) does not currently support flavors (tried %s)", target.getUnflavoredBuildTarget(), buildRuleType, target.getFlavors()); } } Cell targetCell = cell.getCell(target); BuildRuleFactoryParams factoryParams = new BuildRuleFactoryParams(targetCell.getFilesystem(), target.withoutCell(), new FilesystemBackedBuildFileTree(cell.getFilesystem(), cell.getBuildFileName()), targetCell.isEnforcingBuckPackageBoundaries()); Object constructorArg = description.createUnpopulatedConstructorArg(); try { ImmutableSet.Builder<BuildTarget> declaredDeps = ImmutableSet.builder(); ImmutableSet.Builder<BuildTargetPattern> visibilityPatterns = ImmutableSet.builder(); try (SimplePerfEvent.Scope scope = SimplePerfEvent.scope(eventBus, PerfEventId.of("MarshalledConstructorArg"), "target", target)) { marshaller.populate(targetCell.getCellRoots(), targetCell.getFilesystem(), factoryParams, constructorArg, declaredDeps, visibilityPatterns, rawNode); } try (SimplePerfEvent.Scope scope = SimplePerfEvent.scope(eventBus, PerfEventId.of("CreatedTargetNode"), "target", target)) { Hasher hasher = Hashing.sha1().newHasher(); hasher.putString(BuckVersion.getVersion(), UTF_8); JsonObjectHashing.hashJsonObject(hasher, rawNode); synchronized (this) { targetsCornucopia.put(target.getUnflavoredBuildTarget(), target); } TargetNode<?> node = new TargetNode(hasher.hash(), description, constructorArg, typeCoercerFactory, factoryParams, declaredDeps.build(), visibilityPatterns.build(), targetCell.getCellRoots()); nodeListener.onCreate(buildFile, node); return node; } } catch (NoSuchBuildTargetException | TargetNode.InvalidSourcePathInputException e) { throw new HumanReadableException(e); } catch (ConstructorArgMarshalException e) { throw new HumanReadableException("%s: %s", target, e.getMessage()); } catch (IOException e) { throw new HumanReadableException(e.getMessage(), e); } }
From source file:org.jooby.assets.SvgSprites.java
private String sha1(final File dir, final File sprite, final File css) throws IOException { try (Stream<Path> stream = Files.walk(dir.toPath())) { Hasher sha1 = Hashing.sha1().newHasher(); stream.filter(p -> !Files.isDirectory(p)) .forEach(p -> Try.run(() -> sha1.putBytes(Files.readAllBytes(p)))); if (sprite.exists()) { sha1.putBytes(Files.readAllBytes(sprite.toPath())); }//from w ww. j a va 2 s.c om if (css.exists()) { sha1.putBytes(Files.readAllBytes(css.toPath())); } return BaseEncoding.base16().encode(sha1.hash().asBytes()).toLowerCase(); } }
From source file:com.facebook.buck.java.DefaultJavaLibraryRule.java
/** * Creates a Hasher containing the ABI keys of the dependencies. * @param rulesWithAbiToConsider a sorted set containing the dependencies whose ABI key will be * added to the hasher./*from w ww .j a va 2 s . c om*/ * @return a Hasher containing the ABI keys of the dependencies. */ private Hasher createHasherWithAbiKeyForDeps(SortedSet<JavaLibraryRule> rulesWithAbiToConsider) { Hasher hasher = Hashing.sha1().newHasher(); for (JavaLibraryRule ruleWithAbiToConsider : rulesWithAbiToConsider) { if (ruleWithAbiToConsider == this) { continue; } Sha1HashCode abiKey = ruleWithAbiToConsider.getAbiKey(); hasher.putUnencodedChars(abiKey.getHash()); } return hasher; }
From source file:org.apache.druid.metadata.IndexerSQLMetadataStorageCoordinator.java
@Nullable private SegmentIdentifier allocatePendingSegment(final Handle handle, final String dataSource, final String sequenceName, final Interval interval, final String maxVersion) throws IOException { final CheckExistingSegmentIdResult result = checkAndGetExistingSegmentId( handle.createQuery(StringUtils.format( "SELECT payload FROM %s WHERE " + "dataSource = :dataSource AND " + "sequence_name = :sequence_name AND " + "start = :start AND " + "%2$send%2$s = :end", dbTables.getPendingSegmentsTable(), connector.getQuoteString())), interval, sequenceName, null, Pair.of("dataSource", dataSource), Pair.of("sequence_name", sequenceName), Pair.of("start", interval.getStart().toString()), Pair.of("end", interval.getEnd().toString())); if (result.found) { // The found existing segment identifier can be null if its interval doesn't match with the given interval return result.segmentIdentifier; }//from w ww . jav a2 s.co m final SegmentIdentifier newIdentifier = createNewSegment(handle, dataSource, interval, maxVersion); if (newIdentifier == null) { return null; } // SELECT -> INSERT can fail due to races; callers must be prepared to retry. // Avoiding ON DUPLICATE KEY since it's not portable. // Avoiding try/catch since it may cause inadvertent transaction-splitting. // UNIQUE key for the row, ensuring we don't have more than one segment per sequence per interval. // Using a single column instead of (sequence_name, sequence_prev_id) as some MySQL storage engines // have difficulty with large unique keys (see https://github.com/apache/incubator-druid/issues/2319) final String sequenceNamePrevIdSha1 = BaseEncoding.base16() .encode(Hashing.sha1().newHasher().putBytes(StringUtils.toUtf8(sequenceName)).putByte((byte) 0xff) .putLong(interval.getStartMillis()).putLong(interval.getEndMillis()).hash().asBytes()); // always insert empty previous sequence id insertToMetastore(handle, newIdentifier, dataSource, interval, "", sequenceName, sequenceNamePrevIdSha1); log.info("Allocated pending segment [%s] for sequence[%s] in DB", newIdentifier.getIdentifierAsString(), sequenceName); return newIdentifier; }
From source file:com.android.build.gradle.internal.transforms.DexTransform.java
/** * Returns the hash of a file.//from ww w . j a v a 2s . c o m * * If the file is a folder, it's a hash of its path. If the file is a file, then * it's a hash of the file itself. * * @param file the file to hash */ @NonNull private static String getFileHash(@NonNull File file) throws IOException { HashCode hashCode; HashFunction hashFunction = Hashing.sha1(); if (file.isDirectory()) { hashCode = hashFunction.hashString(file.getPath(), Charsets.UTF_16LE); } else { hashCode = Files.hash(file, hashFunction); } return hashCode.toString(); }
From source file:com.facebook.buck.util.ProjectFilesystem.java
public String computeSha1(Path pathRelativeToProjectRoot) throws IOException { Path fileToHash = getPathForRelativePath(pathRelativeToProjectRoot); return Hashing.sha1().hashBytes(java.nio.file.Files.readAllBytes(fileToHash)).toString(); }
From source file:org.jaqpot.core.service.data.ConjoinerService.java
public String createHashedIdentifier(String name, String units, String conditions) { HashFunction hf = Hashing.sha1(); StringBuilder b = new StringBuilder(); b.append(name == null ? "" : name); b.append(units == null ? "" : units); b.append(conditions == null ? "" : conditions); HashCode hc = hf.newHasher().putString(b.toString(), Charsets.US_ASCII).hash(); return hc.toString().toUpperCase(); }
From source file:com.facebook.buck.io.filesystem.impl.FakeProjectFilesystem.java
/** Does not support symlinks. */ @Override// w w w . ja v a2 s. co m public Sha1HashCode computeSha1(Path pathRelativeToProjectRootOrJustAbsolute) throws IOException { if (!exists(pathRelativeToProjectRootOrJustAbsolute)) { throw new NoSuchFileException(pathRelativeToProjectRootOrJustAbsolute.toString()); } // Because this class is a fake, the file contents may not be available as a stream, so we load // all of the contents into memory as a byte[] and then hash them. byte[] fileContents = getFileBytes(pathRelativeToProjectRootOrJustAbsolute); HashCode hashCode = Hashing.sha1().newHasher().putBytes(fileContents).hash(); return Sha1HashCode.fromHashCode(hashCode); }
From source file:org.glowroot.central.repo.TraceDao.java
public void store(String agentId, Trace trace) throws Exception { String traceId = trace.getId(); Trace.Header priorHeader = trace.getUpdate() ? readHeader(agentId, traceId) : null; Trace.Header header = trace.getHeader(); List<String> agentRollupIds = agentDao.readAgentRollupIds(agentId); List<ResultSetFuture> futures = Lists.newArrayList(); List<Trace.SharedQueryText> sharedQueryTexts = Lists.newArrayList(); for (Trace.SharedQueryText sharedQueryText : trace.getSharedQueryTextList()) { String fullTextSha1 = sharedQueryText.getFullTextSha1(); if (fullTextSha1.isEmpty()) { String fullText = sharedQueryText.getFullText(); if (fullText.length() > 2 * StorageConfig.TRACE_QUERY_TEXT_TRUNCATE) { fullTextSha1 = Hashing.sha1().hashString(fullText, Charsets.UTF_8).toString(); futures.addAll(fullQueryTextDao.store(agentId, fullTextSha1, fullText)); for (int i = 1; i < agentRollupIds.size(); i++) { futures.addAll(fullQueryTextDao.updateCheckTTL(agentRollupIds.get(i), fullTextSha1)); }//from ww w . j a v a 2s . com sharedQueryTexts.add(Trace.SharedQueryText.newBuilder() .setTruncatedText(fullText.substring(0, StorageConfig.TRACE_QUERY_TEXT_TRUNCATE)) .setTruncatedEndText(fullText.substring( fullText.length() - StorageConfig.TRACE_QUERY_TEXT_TRUNCATE, fullText.length())) .setFullTextSha1(fullTextSha1).build()); } else { sharedQueryTexts.add(sharedQueryText); } } else { futures.addAll(fullQueryTextDao.updateTTL(agentId, fullTextSha1)); for (int i = 1; i < agentRollupIds.size(); i++) { futures.addAll(fullQueryTextDao.updateCheckTTL(agentRollupIds.get(i), fullTextSha1)); } sharedQueryTexts.add(sharedQueryText); } } // wait for success before proceeding in order to ensure cannot end up with orphaned // fullTextSha1 MoreFutures.waitForAll(futures); futures.clear(); int adjustedTTL = AggregateDao.getAdjustedTTL(getTTL(), header.getCaptureTime(), clock); for (String agentRollupId : agentRollupIds) { if (!agentRollupId.equals(agentId)) { BoundStatement boundStatement = insertCheck.bind(); int i = 0; boundStatement.setString(i++, agentRollupId); boundStatement.setString(i++, agentId); boundStatement.setString(i++, traceId); boundStatement.setInt(i++, adjustedTTL); futures.add(session.executeAsync(boundStatement)); } if (header.getSlow()) { BoundStatement boundStatement = insertOverallSlowPoint.bind(); bindSlowPoint(boundStatement, agentRollupId, agentId, traceId, header, adjustedTTL, true); futures.add(session.executeAsync(boundStatement)); boundStatement = insertTransactionSlowPoint.bind(); bindSlowPoint(boundStatement, agentRollupId, agentId, traceId, header, adjustedTTL, false); futures.add(session.executeAsync(boundStatement)); boundStatement = insertOverallSlowCount.bind(); bindCount(boundStatement, agentRollupId, agentId, traceId, header, adjustedTTL, true); futures.add(session.executeAsync(boundStatement)); boundStatement = insertTransactionSlowCount.bind(); bindCount(boundStatement, agentRollupId, agentId, traceId, header, adjustedTTL, false); futures.add(session.executeAsync(boundStatement)); if (priorHeader != null) { boundStatement = deletePartialOverallSlowPoint.bind(); bind(boundStatement, agentRollupId, agentId, traceId, priorHeader, true); futures.add(session.executeAsync(boundStatement)); boundStatement = deletePartialTransactionSlowPoint.bind(); bind(boundStatement, agentRollupId, agentId, traceId, priorHeader, false); futures.add(session.executeAsync(boundStatement)); boundStatement = deletePartialOverallSlowCount.bind(); bind(boundStatement, agentRollupId, agentId, traceId, priorHeader, true); futures.add(session.executeAsync(boundStatement)); boundStatement = deletePartialTransactionSlowCount.bind(); bind(boundStatement, agentRollupId, agentId, traceId, priorHeader, false); futures.add(session.executeAsync(boundStatement)); } } // seems unnecessary to insert error info for partial traces // and this avoids having to clean up partial trace data when trace is complete if (header.hasError() && !header.getPartial()) { BoundStatement boundStatement = insertOverallErrorMessage.bind(); bindErrorMessage(boundStatement, agentRollupId, agentId, traceId, header, adjustedTTL, true); futures.add(session.executeAsync(boundStatement)); boundStatement = insertTransactionErrorMessage.bind(); bindErrorMessage(boundStatement, agentRollupId, agentId, traceId, header, adjustedTTL, false); futures.add(session.executeAsync(boundStatement)); boundStatement = insertOverallErrorPoint.bind(); bindErrorPoint(boundStatement, agentRollupId, agentId, traceId, header, adjustedTTL, true); futures.add(session.executeAsync(boundStatement)); boundStatement = insertTransactionErrorPoint.bind(); bindErrorPoint(boundStatement, agentRollupId, agentId, traceId, header, adjustedTTL, false); futures.add(session.executeAsync(boundStatement)); boundStatement = insertOverallErrorCount.bind(); bindCount(boundStatement, agentRollupId, agentId, traceId, header, adjustedTTL, true); futures.add(session.executeAsync(boundStatement)); boundStatement = insertTransactionErrorCount.bind(); bindCount(boundStatement, agentRollupId, agentId, traceId, header, adjustedTTL, false); futures.add(session.executeAsync(boundStatement)); } for (Trace.Attribute attributeName : header.getAttributeList()) { traceAttributeNameDao.store(agentRollupId, header.getTransactionType(), attributeName.getName(), futures); } } BoundStatement boundStatement = insertHeader.bind(); int i = 0; boundStatement.setString(i++, agentId); boundStatement.setString(i++, traceId); boundStatement.setBytes(i++, ByteBuffer.wrap(header.toByteArray())); boundStatement.setInt(i++, adjustedTTL); futures.add(session.executeAsync(boundStatement)); int index = 0; for (Trace.Entry entry : trace.getEntryList()) { boundStatement = insertEntry.bind(); i = 0; boundStatement.setString(i++, agentId); boundStatement.setString(i++, traceId); boundStatement.setInt(i++, index++); boundStatement.setInt(i++, entry.getDepth()); boundStatement.setLong(i++, entry.getStartOffsetNanos()); boundStatement.setLong(i++, entry.getDurationNanos()); boundStatement.setBool(i++, entry.getActive()); if (entry.hasQueryEntryMessage()) { boundStatement.setToNull(i++); boundStatement.setInt(i++, entry.getQueryEntryMessage().getSharedQueryTextIndex()); boundStatement.setString(i++, Strings.emptyToNull(entry.getQueryEntryMessage().getPrefix())); boundStatement.setString(i++, Strings.emptyToNull(entry.getQueryEntryMessage().getSuffix())); } else { // message is empty for trace entries added using addErrorEntry() boundStatement.setString(i++, Strings.emptyToNull(entry.getMessage())); boundStatement.setToNull(i++); boundStatement.setToNull(i++); boundStatement.setToNull(i++); } List<Trace.DetailEntry> detailEntries = entry.getDetailEntryList(); if (detailEntries.isEmpty()) { boundStatement.setToNull(i++); } else { boundStatement.setBytes(i++, Messages.toByteBuffer(detailEntries)); } List<StackTraceElement> location = entry.getLocationStackTraceElementList(); if (location.isEmpty()) { boundStatement.setToNull(i++); } else { boundStatement.setBytes(i++, Messages.toByteBuffer(location)); } if (entry.hasError()) { boundStatement.setBytes(i++, ByteBuffer.wrap(entry.getError().toByteArray())); } else { boundStatement.setToNull(i++); } boundStatement.setInt(i++, adjustedTTL); futures.add(session.executeAsync(boundStatement)); } index = 0; for (Trace.SharedQueryText sharedQueryText : sharedQueryTexts) { boundStatement = insertSharedQueryText.bind(); i = 0; boundStatement.setString(i++, agentId); boundStatement.setString(i++, traceId); boundStatement.setInt(i++, index++); String fullText = sharedQueryText.getFullText(); if (fullText.isEmpty()) { boundStatement.setString(i++, sharedQueryText.getTruncatedText()); boundStatement.setString(i++, sharedQueryText.getTruncatedEndText()); boundStatement.setString(i++, sharedQueryText.getFullTextSha1()); } else { boundStatement.setString(i++, fullText); boundStatement.setToNull(i++); boundStatement.setToNull(i++); } boundStatement.setInt(i++, adjustedTTL); futures.add(session.executeAsync(boundStatement)); } if (trace.hasMainThreadProfile()) { boundStatement = insertMainThreadProfile.bind(); bindThreadProfile(boundStatement, agentId, traceId, trace.getMainThreadProfile(), adjustedTTL); futures.add(session.executeAsync(boundStatement)); } if (trace.hasAuxThreadProfile()) { boundStatement = insertAuxThreadProfile.bind(); bindThreadProfile(boundStatement, agentId, traceId, trace.getAuxThreadProfile(), adjustedTTL); futures.add(session.executeAsync(boundStatement)); } futures.addAll(transactionTypeDao.store(agentRollupIds, header.getTransactionType())); MoreFutures.waitForAll(futures); }
From source file:com.android.tools.lint.psi.extract.ExtractPsi.java
private static void writeCheckSumFiles(@NonNull File file) throws IOException { byte[] bytes = Files.toByteArray(file); Files.write(Hashing.md5().hashBytes(bytes).toString(), new File(file.getPath() + ".md5"), UTF_8); Files.write(Hashing.sha1().hashBytes(bytes).toString(), new File(file.getPath() + ".sha1"), UTF_8); }