List of usage examples for org.apache.hadoop.util StringUtils formatPercent
public static String formatPercent(double fraction, int decimalPlaces)
From source file:com.inclouds.hbase.rowcache.RowCache.java
License:Open Source License
/** * Start co-processor - cache.//from w ww .j a v a2 s . co m * * @param cfg * the cfg * @throws IOException * Signals that an I/O exception has occurred. */ public void start(Configuration cfg) throws IOException { // Get all config from Configuration object // Start - load cache this.config = cfg; synchronized (RowCache.class) { if (rowCache != null) return; final CacheConfiguration ccfg = ConfigHelper.getCacheConfiguration(cfg); // set cache name ccfg.setCacheName("row-cache"); long maxMemory = cfg.getLong(ROWCACHE_MAXMEMORY, DEFAULT_MAX_MEMORY); ccfg.setMaxMemory(maxMemory); LOG.info("[row-cache] Setting max memory to " + maxMemory); long maxItems = cfg.getLong(ROWCACHE_MAXITEMS, DEFAULT_MAXITEMS); if (maxItems > Integer.MAX_VALUE - 1) { maxItems = Integer.MAX_VALUE - 1; LOG.warn("[row-cache] Max items is too large " + maxItems); } else { LOG.info("[row-cache] Setting max items to " + maxItems); } LOG.info("[row-cache] Direct memory buffer size set to " + StringUtils.byteDesc(RowCache.ioBufferSize)); ccfg.setBucketNumber((int) maxItems); String codecName = cfg.get(ROWCACHE_COMPRESSION, DEFAULT_COMPRESSION); ccfg.setCodecType(CodecType.valueOf(codecName.toUpperCase())); LOG.info("[row-cache] compression codec=" + codecName); isPersistentCache = Boolean.parseBoolean(cfg.get(ROWCACHE_PERSISTENT, DEFAULT_PERSISTENT)); LOG.info("[row-cache] persistent=" + isPersistentCache); String[] dataRoots = getDataRoots(cfg.get(ROWCACHE_CACHE_DATA_ROOTS)); if (isPersistentCache && dataRoots == null) { dataRoots = getHDFSRoots(cfg); if (dataRoots == null) { LOG.warn("Data roots are not defined for Row Cache. Set persistent mode to false."); isPersistentCache = false; } } // TODO - compression CacheManager manager = CacheManager.getInstance(); try { if (isPersistentCache) { RawFSConfiguration storeConfig = new RawFSConfiguration(); storeConfig.setDiskStoreImplementation(RawFSStore.class); storeConfig.setStoreName(ccfg.getCacheName()); storeConfig.setDbDataStoreRoots(dataRoots); storeConfig.setPersistenceMode(PersistenceMode.ONDEMAND); storeConfig.setDbSnapshotInterval(15); ccfg.setDataStoreConfiguration(storeConfig); // Load cache data rowCache = manager.getCache(ccfg, null); } else { rowCache = manager.getCache(ccfg, new ProgressListener() { @Override public void canceled() { LOG.info("Canceled"); } @Override public void error(Throwable t, boolean aborted) { LOG.error("Aborted=" + aborted, t); } @Override public void finished() { LOG.info("Finished loading cache"); } @Override public void progress(long done, long total) { LOG.info("Loaded " + done + " out of " + total); } @Override public void started() { LOG.info("Started loading scan cache data from " + ccfg.getDiskStoreConfiguration().getDbDataStoreRoots()); } }); } } catch (Throwable ex) { throw new IOException(ex); } LOG.info("[row-cache] coprocessor started "); RowCache.instance = this; Runnable r = new Runnable() { public void run() { LOG.info("[row-cache] Stats thread started. "); while (true) { try { Thread.sleep(STATS_INTERVAL); } catch (InterruptedException e) { } long lastR = lastRequests; long lastH = lastHits; long requests = rowCache.getTotalRequestCount(); long hits = rowCache.getHitCount(); if (requests != lastRequests) { // Log only if new data LOG.info("[L1-OFFHEAP]: accesses=" + requests + " hits=" + hits + " hitRatio=" + ((requests == 0) ? "0.00" : StringUtils.formatPercent((double) hits / requests, 2) + "%" + " Last period: accesses=" + (requests - lastR) + " hits=" + (hits - lastH) + " hitRatio=" + (((requests - lastR) == 0) ? "0.00" : StringUtils.formatPercent( (double) (hits - lastH) / (requests - lastR), 2))) + "%" + " maxMemory=" + StringUtils.byteDesc(rowCache.getMemoryLimit()) + " allocatedMemory=" + StringUtils.byteDesc(rowCache.getAllocatedMemorySize()) + " freeMemory=" + StringUtils .byteDesc(rowCache.getMemoryLimit() - rowCache.getAllocatedMemorySize()) + " totalItems=" + rowCache.size() + " evicted=" + rowCache.getEvictedCount()); lastRequests = requests; lastHits = hits; } } } }; statThread = new Thread(r, "BigBaseRowCache.StatisticsThread"); statThread.start(); // Register shutdown hook registerShutdownHook(); } }
From source file:com.kakao.hbase.manager.command.MC.java
License:Apache License
private String getRegionInfo(byte[] regionName) { return "Table: " + regionTableMap.get(regionName) + ", RS: " + regionRSMap.get(regionName) + ", Locality: " + (regionLocalityMap.get(regionName) == null ? "null" : StringUtils.formatPercent(regionLocalityMap.get(regionName), 2)) + ", SizeMB: " + regionSizeMap.get(regionName); }
From source file:com.koda.integ.hbase.blockcache.OffHeapBlockCache.java
License:Open Source License
/** * Log stats.//from w ww . j ava2 s .co m */ protected void logStats() { // Log size long totalSize = getCurrentSize(); long freeSize = getMaxSize() - totalSize; OffHeapBlockCache.LOG.info("[BLOCK CACHE]: " + "total=" + StringUtils.byteDesc(totalSize) + ", " + "free=" + StringUtils.byteDesc(freeSize) + ", " + "max=" + StringUtils.byteDesc(getMaxSize()) + ", " + "blocks=" + size() + ", " + "accesses=" + stats.getRequestCount() + ", " + "hits=" + stats.getHitCount() + ", " + "hitRatio=" + (stats.getRequestCount() > 0 ? StringUtils.formatPercent(stats.getHitRatio(), 2) : "0.00") + "%, " + "cachingAccesses=" + stats.getRequestCachingCount() + ", " + "cachingHits=" + stats.getHitCachingCount() + ", " + "cachingHitsRatio=" + (stats.getRequestCachingCount() > 0 ? StringUtils.formatPercent(stats.getHitCachingRatio(), 2) : "0.00") + "%, " + "evicted=" + getEvictedCount()); }
From source file:com.koda.integ.hbase.blockcache.OffHeapBlockCache.java
License:Open Source License
protected void logStatsOffHeap() { // Log size// www . j ava2s . c om long totalSize = offHeapCache.getTotalAllocatedMemorySize(); long maxSize = offHeapCache.getMemoryLimit(); long freeSize = maxSize - totalSize; OffHeapBlockCache.LOG.info("[L2-OFFHEAP] : " + "total=" + StringUtils.byteDesc(totalSize) + ", " + "free=" + StringUtils.byteDesc(freeSize) + ", " + "max=" + StringUtils.byteDesc(maxSize) + ", " + "blocks=" + offHeapCache.size() + ", " + "accesses=" + offHeapStats.getRequestCount() + ", " + "hits=" + offHeapStats.getHitCount() + ", " + "hitRatio=" + (offHeapStats.getRequestCount() > 0 ? StringUtils.formatPercent(offHeapStats.getHitRatio(), 2) : "0.00") + "%, " + "cachingAccesses=" + offHeapStats.getRequestCachingCount() + ", " + "cachingHits=" + offHeapStats.getHitCachingCount() + ", " + "cachingHitsRatio=" + (offHeapStats.getRequestCachingCount() > 0 ? StringUtils.formatPercent(offHeapStats.getHitCachingRatio(), 2) : "0.00") + "%, " + "evicted=" + offHeapCache.getEvictedCount()); }
From source file:com.koda.integ.hbase.blockcache.OffHeapBlockCache.java
License:Open Source License
protected void logStatsOffHeapExt() { // Log size/*from w w w . j a v a 2 s .c o m*/ long totalSize = extStorageCache.getAllocatedMemorySize(); long maxSize = extStorageCache.getMemoryLimit(); long freeSize = maxSize - totalSize; OffHeapBlockCache.LOG.info("[L3-OFFHEAP] : " + "total=" + StringUtils.byteDesc(totalSize) + ", " + "free=" + StringUtils.byteDesc(freeSize) + ", " + "max=" + StringUtils.byteDesc(maxSize) + ", " + "refs=" + extStorageCache.size() + ", " + "accesses=" + extRefStats.getRequestCount() + ", " + "hits=" + extRefStats.getHitCount() + ", " + "hitRatio=" + (extRefStats.getRequestCount() > 0 ? StringUtils.formatPercent(extRefStats.getHitRatio(), 2) : "0.00") + "%, " + "cachingAccesses=" + extRefStats.getRequestCachingCount() + ", " + "cachingHits=" + extRefStats.getHitCachingCount() + ", " + "cachingHitsRatio=" + (extRefStats.getRequestCachingCount() > 0 ? StringUtils.formatPercent(offHeapStats.getHitCachingRatio(), 2) : "0.00") + "%, " + "evicted=" + extStorageCache.getEvictedCount()); }
From source file:com.koda.integ.hbase.blockcache.OffHeapBlockCache.java
License:Open Source License
protected void logStatsOnHeap() { if (onHeapEnabled() == false) return;/*from www.j a va 2s. c o m*/ // Log size long totalSize = onHeapCache.getCurrentSize(); long maxSize = onHeapCache.getMaxSize(); long freeSize = maxSize - totalSize; OnHeapBlockCache.LOG.info("[L2-HEAP] : " + "total=" + StringUtils.byteDesc(totalSize) + ", " + "free=" + StringUtils.byteDesc(freeSize) + ", " + "max=" + StringUtils.byteDesc(maxSize) + ", " + "blocks=" + onHeapCache.size() + ", " + "accesses=" + onHeapStats.getRequestCount() + ", " + "hits=" + onHeapStats.getHitCount() + ", " + "hitRatio=" + (onHeapStats.getRequestCount() > 0 ? StringUtils.formatPercent(onHeapStats.getHitRatio(), 2) : "0.00") + "%, " + "cachingAccesses=" + onHeapStats.getRequestCachingCount() + ", " + "cachingHits=" + onHeapStats.getHitCachingCount() + ", " + "cachingHitsRatio=" + (onHeapStats.getRequestCachingCount() > 0 ? StringUtils.formatPercent(onHeapStats.getHitCachingRatio(), 2) : "0.00") + "%, " + "evicted=" + onHeapCache.getEvictedCount()); }
From source file:com.koda.integ.hbase.blockcache.OffHeapBlockCache.java
License:Open Source License
protected void logStatsExternal() { if (storage == null) return;/*w w w . j ava 2 s .c o m*/ // Log size long totalSize = storage.size(); long maxSize = storage.getMaxStorageSize(); long freeSize = maxSize - totalSize; OffHeapBlockCache.LOG.info("[L3-DISK] : " + "total=" + StringUtils.byteDesc(totalSize) + ", " + "free=" + StringUtils.byteDesc(freeSize) + ", " + "max=" + StringUtils.byteDesc(maxSize) + ", " + "accesses=" + extStats.getRequestCount() + ", " + "hits=" + extStats.getHitCount() + ", " + "hitRatio=" + (extStats.getRequestCount() > 0 ? StringUtils.formatPercent(extStats.getHitRatio(), 2) : "0.00") + "%, " + "cachingAccesses=" + extStats.getRequestCachingCount() + ", " + "cachingHits=" + extStats.getHitCachingCount() + ", " + "cachingHitsRatio=" + (extStats.getRequestCachingCount() > 0 ? StringUtils.formatPercent(extStats.getHitCachingRatio(), 2) : "0.00") + "%, "); // "\nFATAL READS="+fatalExternalReads.get()); }
From source file:com.koda.integ.hbase.blockcache.OffHeapBlockCacheOld.java
License:Open Source License
/** * Log stats.//from ww w . j av a 2 s.com */ public void logStats() { if (!LOG.isDebugEnabled()) return; // Log size long totalSize = getCurrentSize(); long freeSize = maxSize - totalSize; OffHeapBlockCacheOld.LOG.debug("LRU Stats: " + "total=" + StringUtils.byteDesc(totalSize) + ", " + "free=" + StringUtils.byteDesc(freeSize) + ", " + "max=" + StringUtils.byteDesc(this.maxSize) + ", " + "blocks=" + size() + ", " + "accesses=" + stats.getRequestCount() + ", " + "hits=" + stats.getHitCount() + ", " + "hitRatio=" + StringUtils.formatPercent(stats.getHitRatio(), 2) + "%, " + "cachingAccesses=" + stats.getRequestCachingCount() + ", " + "cachingHits=" + stats.getHitCachingCount() + ", " + "cachingHitsRatio=" + StringUtils.formatPercent(stats.getHitCachingRatio(), 2) + "%, " + "evicted=" + getEvictedCount()); }
From source file:com.koda.integ.hbase.blockcache.OnHeapBlockCache.java
License:Open Source License
/** * Log stats./*from w w w .j ava2s . c o m*/ */ public void logStats() { if (!LOG.isDebugEnabled()) return; // Log size long totalSize = heapSize(); long freeSize = maxSize - totalSize; OnHeapBlockCache.LOG.debug("Stats: " + "total=" + StringUtils.byteDesc(totalSize) + ", " + "free=" + StringUtils.byteDesc(freeSize) + ", " + "max=" + StringUtils.byteDesc(this.maxSize) + ", " + "blocks=" + size() + ", " + "accesses=" + stats.getRequestCount() + ", " + "hits=" + stats.getHitCount() + ", " + "hitRatio=" + (stats.getHitCount() == 0 ? "0" : (StringUtils.formatPercent(stats.getHitRatio(), 2) + ", ")) + ", " + "cachingAccesses=" + stats.getRequestCachingCount() + ", " + "cachingHits=" + stats.getHitCachingCount() + ", " + "cachingHitsRatio=" + (stats.getHitCachingCount() == 0 ? "0" : (StringUtils.formatPercent(stats.getHitCachingRatio(), 2) + ", ")) + ", " + "evictions=" + stats.getEvictionCount() + ", " + "evicted=" + stats.getEvictedCount() + ", " + "evictedPerRun=" + stats.evictedPerEviction()); }
From source file:io.hops.erasure_coding.MapReduceEncoder.java
License:Apache License
/** * Checks if the map-reduce job has completed. * * @return true if the job completed, false otherwise. * @throws java.io.IOException/*from w w w . ja va 2s.c o m*/ */ public boolean checkComplete() throws IOException { JobID jobID = runningJob.getID(); if (runningJob.isComplete()) { // delete job directory final String jobdir = jobconf.get(JOB_DIR_LABEL); if (jobdir != null) { final Path jobpath = new Path(jobdir); jobpath.getFileSystem(jobconf).delete(jobpath, true); } if (runningJob.isSuccessful()) { LOG.info("Job Complete(Succeeded): " + jobID); } else { LOG.info("Job Complete(Failed): " + jobID); } cleanUp(); return true; } else { String report = (" job " + jobID + " map " + StringUtils.formatPercent(runningJob.mapProgress(), 0) + " reduce " + StringUtils.formatPercent(runningJob.reduceProgress(), 0)); if (!report.equals(lastReport)) { LOG.info(report); lastReport = report; } TaskCompletionEvent[] events = runningJob.getTaskCompletionEvents(jobEventCounter); jobEventCounter += events.length; for (TaskCompletionEvent event : events) { if (event.getTaskStatus() == TaskCompletionEvent.Status.FAILED) { LOG.info(" Job " + jobID + " " + event.toString()); } } return false; } }