List of usage examples for com.google.common.hash Hashing md5
public static HashFunction md5()
From source file:org.eclipse.che.api.vfs.server.impl.memory.MemoryVirtualFile.java
@Override public LazyIterator<Pair<String, String>> countMd5Sums() throws ServerException { checkExist();/* w w w.j ava2s . c o m*/ if (isFile()) { return LazyIterator.emptyIterator(); } final List<Pair<String, String>> hashes = new ArrayList<>(); final int trimPathLength = getPath().length() + 1; final HashFunction hashFunction = Hashing.md5(); final ValueHolder<ServerException> errorHolder = new ValueHolder<>(); accept(new VirtualFileVisitor() { @Override public void visit(final VirtualFile virtualFile) { try { if (virtualFile.isFile()) { try (InputStream stream = virtualFile.getContent().getStream()) { final String hexHash = ByteSource.wrap(ByteStreams.toByteArray(stream)) .hash(hashFunction).toString(); hashes.add(Pair.of(hexHash, virtualFile.getPath().substring(trimPathLength))); } catch (ForbiddenException e) { throw new ServerException(e.getServiceError()); } catch (IOException e) { throw new ServerException(e); } } else { final LazyIterator<VirtualFile> children = virtualFile.getChildren(VirtualFileFilter.ALL); while (children.hasNext()) { children.next().accept(this); } } } catch (ServerException e) { errorHolder.set(e); } } }); final ServerException error = errorHolder.get(); if (error != null) { throw error; } return LazyIterator.fromList(hashes); }
From source file:org.hawkular.alerts.engine.impl.PartitionManagerImpl.java
/** * Distribute triggers on nodes using a consistent hashing strategy. * This strategy allows to scale and minimize changes and re-distribution when cluster changes. * * @param entries a list of entries to distribute * @param buckets a table of nodes//w ww. j a va 2s . co m * @return a map of entries distributed across nodes */ public Map<PartitionEntry, Integer> calculatePartition(List<PartitionEntry> entries, Map<Integer, Integer> buckets) { if (entries == null) { throw new IllegalArgumentException("entries must be not null"); } if (isEmpty(buckets)) { throw new IllegalArgumentException("entries must be not null"); } HashFunction md5 = Hashing.md5(); int numBuckets = buckets.size(); Map<PartitionEntry, Integer> newPartition = new HashMap<>(); for (PartitionEntry entry : entries) { newPartition.put(entry, buckets.get(Hashing.consistentHash(md5.hashInt(entry.hashCode()), numBuckets))); } return newPartition; }
From source file:net.revelc.code.formatter.FormatterMojo.java
/** * Md5hash.//from www . j a va 2s.c o m * * @param str the str * @return the string * @throws UnsupportedEncodingException the unsupported encoding exception */ private String md5hash(String str) throws UnsupportedEncodingException { return Hashing.md5().hashBytes(str.getBytes(this.encoding)).toString(); }
From source file:org.hawkular.alerts.engine.impl.PartitionManagerImpl.java
/** * Distribute a new entry across buckets using a consistent hashing strategy. * * @param newEntry the new entry to distribute * @param buckets a table of nodes/*from ww w. j a va 2 s .com*/ * @return a code of the node which the new entry is placed */ public Integer calculateNewEntry(PartitionEntry newEntry, Map<Integer, Integer> buckets) { if (newEntry == null) { throw new IllegalArgumentException("newEntry must be not null"); } if (isEmpty(buckets)) { throw new IllegalArgumentException("buckets must be not null"); } HashFunction md5 = Hashing.md5(); int numBuckets = buckets.size(); return buckets.get(Hashing.consistentHash(md5.hashInt(newEntry.hashCode()), numBuckets)); }
From source file:org.mycore.frontend.cli.MCRIFS2Commands.java
private static void fixFileEntry(File node, String content_store, String derivate_id, String storage_base, boolean check_only) { LOGGER.debug("fixFileEntry : name = " + node.getName()); String storageid = node.getAbsolutePath().substring(storage_base.length()).replace("\\", "/"); LOGGER.debug("fixFileEntry : storageid = " + storageid); String id = ""; String md5_old = ""; long size_old = 0; boolean foundEntry = false; MCRSession mcrSession = MCRSessionMgr.getCurrentSession(); boolean transactionActive = mcrSession.isTransactionActive(); if (!transactionActive) { mcrSession.beginTransaction();/*from w w w .ja v a 2 s.c o m*/ } EntityManager em = MCREntityManagerProvider.getCurrentEntityManager(); try { CriteriaBuilder cb = em.getCriteriaBuilder(); CriteriaQuery<MCRFSNODES> query = cb.createQuery(MCRFSNODES.class); Root<MCRFSNODES> nodes = query.from(MCRFSNODES.class); try { MCRFSNODES fsNode = em.createQuery(query.where(cb.equal(nodes.get(MCRFSNODES_.owner), derivate_id), cb.equal(nodes.get(MCRFSNODES_.storeid), content_store), cb.equal(nodes.get(MCRFSNODES_.storageid), storageid), cb.equal(nodes.get(MCRFSNODES_.type), "F"))).getSingleResult(); LOGGER.debug("Found file entry for " + storageid); foundEntry = true; id = fsNode.getId(); md5_old = fsNode.getMd5(); size_old = fsNode.getSize(); em.detach(fsNode); } catch (NoResultException e) { LOGGER.error("Can't find file entry for " + storageid); if (check_only) return; } catch (NonUniqueResultException e) { LOGGER.error("Non unique file entry for " + storageid); return; } } catch (Exception e) { e.printStackTrace(); } // check fctid, size and MD5 of the file String fctid = ""; String md5 = ""; try { MCRContentInputStream cis = new MCRContentInputStream(new FileInputStream(node)); byte[] header = cis.getHeader(); fctid = MCRFileContentTypeFactory.detectType(node.getName(), header).getID(); cis.close(); md5 = Files.hash(node, Hashing.md5()).toString(); } catch (MCRException | IOException e1) { e1.printStackTrace(); return; } long size = node.length(); LOGGER.debug("size old : " + Long.toString(size_old) + " <--> size : " + Long.toString(size)); LOGGER.debug("MD5 old : " + md5_old + " <--> MD5 : " + md5); if (size_old == size && md5_old.equals(md5)) { return; } if (foundEntry && size_old != size) { LOGGER.warn("Wrong file size for " + storageid + " : " + size_old + " <-> " + size); } if (foundEntry && !md5.equals(md5_old)) { LOGGER.warn("Wrong file md5 for " + storageid + " : " + md5_old + " <-> " + md5); } if (check_only) return; // fix entry LOGGER.info("Fix entry for file " + storageid); if (!foundEntry) { MCRFileMetadataManager fmmgr = MCRFileMetadataManager.instance(); id = fmmgr.createNodeID(); } String pid = null; try { pid = getParentID(node, derivate_id); } catch (NoResultException e1) { LOGGER.error("Can't find parent id of directory for file " + storageid); } catch (NonUniqueResultException e1) { LOGGER.error("The directory entry for " + derivate_id + " and " + node.getParentFile().getName() + " is not unique!"); return; } try { MCRFSNODES mcrfsnodes = new MCRFSNODES(); mcrfsnodes.setId(id); mcrfsnodes.setPid(pid); mcrfsnodes.setType("F"); mcrfsnodes.setOwner(derivate_id); mcrfsnodes.setName(node.getName()); mcrfsnodes.setSize(size); mcrfsnodes.setDate(new Date(node.lastModified())); mcrfsnodes.setStoreid(content_store); mcrfsnodes.setStorageid(storageid); mcrfsnodes.setFctid(fctid); mcrfsnodes.setMd5(md5); em.merge(mcrfsnodes); mcrSession.commitTransaction(); LOGGER.debug("Entry " + node.getName() + " fixed."); } catch (PersistenceException pe) { mcrSession.rollbackTransaction(); pe.printStackTrace(); } catch (Exception e) { e.printStackTrace(); } }
From source file:org.apache.hadoop.hive.ql.hooks.LineageLogger.java
/** * Generate query string md5 hash./*w w w . j a va 2s. c o m*/ */ private String getQueryHash(String queryStr) { Hasher hasher = Hashing.md5().newHasher(); hasher.putString(queryStr); return hasher.hash().toString(); }
From source file:net.sourceforge.fenixedu.presentationTier.Action.person.ExternalAppsDA.java
public ActionForward showServiceAgreement(ActionMapping mapping, ActionForm actionForm, HttpServletRequest request, HttpServletResponse response) throws Exception { final String serviceAgreementHtml = getServiceAgreementHtml(); request.setAttribute("serviceAgreement", serviceAgreementHtml); request.setAttribute("serviceAgreementChecksum", Hashing.md5().newHasher().putString(serviceAgreementHtml, Charsets.UTF_8).hash().toString()); PortalLayoutInjector.skipLayoutOn(request); return new ActionForward(null, "/auth/showServiceAgreement.jsp", false, ""); }
From source file:org.fabrician.enabler.DockerContainer.java
public ProcessWrapper getExecCmdProcessWrapper(String execCmd) throws Exception { String command = "docker exec -d " + dockerContainerTag + " " + execCmd; if (useSudo) { command = "sudo " + command; }/*ww w. jav a2 s . c o m*/ File workDir = new File(getWorkDir()); HashFunction hf = Hashing.md5(); HashCode hc = hf.newHasher().putString(command, Charsets.UTF_8).hash(); String cmd_pid = "_cmd_" + BaseEncoding.base64Url().encode(hc.asBytes()) + ".pid"; try { String engineOS = getEngineProperty(EngineProperties.OS); ProcessWrapper p = getProcessWrapper(command, workDir, engineOS + cmd_pid); return p; } catch (Exception ex) { getEngineLogger().log(Level.SEVERE, "while getting a process wrapper for 'docker exec' command [" + execCmd + "]", ex); throw ex; } }
From source file:com.android.sdklib.devices.DeviceManager.java
/** * Returns the hardware properties defined in * {@link AvdManager#HARDWARE_INI} as a {@link Map}. * * This is intended to be dumped in the config.ini and already contains * the device name, manufacturer and device hash. * * @param d The {@link Device} from which to derive the hardware properties. * @return A {@link Map} of hardware properties. *//*from w w w .j ava 2 s . c om*/ @NonNull public static Map<String, String> getHardwareProperties(@NonNull Device d) { Map<String, String> props = getHardwareProperties(d.getDefaultState()); for (State s : d.getAllStates()) { if (s.getKeyState().equals(KeyboardState.HIDDEN)) { props.put("hw.keyboard.lid", getBooleanVal(true)); } } HashFunction md5 = Hashing.md5(); Hasher hasher = md5.newHasher(); ArrayList<String> keys = new ArrayList<String>(props.keySet()); Collections.sort(keys); for (String key : keys) { if (key != null) { hasher.putString(key, Charsets.UTF_8); String value = props.get(key); hasher.putString(value == null ? "null" : value, Charsets.UTF_8); } } // store the hash method for potential future compatibility String hash = "MD5:" + hasher.hash().toString(); props.put(AvdManager.AVD_INI_DEVICE_HASH_V2, hash); props.remove(AvdManager.AVD_INI_DEVICE_HASH_V1); props.put(AvdManager.AVD_INI_DEVICE_NAME, d.getId()); props.put(AvdManager.AVD_INI_DEVICE_MANUFACTURER, d.getManufacturer()); return props; }
From source file:org.apache.impala.analysis.Vertex.java
private String getQueryHash(String queryStr) { Hasher hasher = Hashing.md5().newHasher(); hasher.putString(queryStr); return hasher.hash().toString(); }