List of usage examples for org.apache.commons.io FileUtils ONE_MB
long ONE_MB
To view the source code for org.apache.commons.io FileUtils ONE_MB.
Click Source Link
From source file:org.alfresco.repo.content.caching.quota.StandardQuotaStrategy.java
/** * Lifecycle method. Should be called immediately after constructing objects of this type (e.g. by the * Spring framework's application context). *///w ww . j a va2 s . c om public void init() { if (log.isDebugEnabled()) { log.debug("Starting quota strategy."); } PropertyCheck.mandatory(this, "cleaner", cleaner); PropertyCheck.mandatory(this, "cache", cache); if (maxUsageBytes < (10 * FileUtils.ONE_MB)) { if (log.isWarnEnabled()) { log.warn("Low maxUsageBytes of " + maxUsageBytes + "bytes - did you mean to specify in MB?"); } } loadDiskUsage(); // Set the time to start the normal clean lastCleanupStart.set(System.currentTimeMillis() - normalCleanThresholdSec); // Run the cleaner thread so that it can update the disk usage more accurately. signalCleanerStart("quota (init)"); }
From source file:org.alfresco.repo.content.caching.quota.StandardQuotaStrategy.java
public void setMaxUsageMB(long maxUsageMB) { setMaxUsageBytes(maxUsageMB * FileUtils.ONE_MB); }
From source file:org.alfresco.repo.content.caching.quota.StandardQuotaStrategy.java
public double getCurrentUsageMB() { return (double) getCurrentUsageBytes() / FileUtils.ONE_MB; }
From source file:org.alfresco.repo.content.caching.quota.StandardQuotaStrategy.java
public long getMaxUsageMB() { return maxUsageBytes / FileUtils.ONE_MB; }
From source file:org.alfresco.repo.content.caching.quota.StandardQuotaStrategy.java
protected long getMaxFileSizeBytes() { return maxFileSizeMB * FileUtils.ONE_MB; }
From source file:org.alfresco.repo.content.caching.quota.StandardQuotaStrategyTest.java
@SuppressWarnings("unchecked") @Test// w w w . ja va2 s . c om public void largeContentCacheFilesAreNotKeptOnDisk() throws IOException { quota.setMaxFileSizeMB(3); writeSingleFileInMB(1); writeSingleFileInMB(2); writeSingleFileInMB(3); writeSingleFileInMB(4); List<File> files = new ArrayList<File>(findCacheFiles()); assertEquals(3, files.size()); Collections.sort(files, SizeFileComparator.SIZE_COMPARATOR); assertEquals(1, files.get(0).length() / FileUtils.ONE_MB); assertEquals(2, files.get(1).length() / FileUtils.ONE_MB); assertEquals(3, files.get(2).length() / FileUtils.ONE_MB); }
From source file:org.apache.eagle.jpm.analyzer.mr.suggestion.MapReduceSpillProcessor.java
@Override public Result.ProcessorResult process(MapReduceAnalyzerEntity jobAnalysisEntity) { StringBuilder sb = new StringBuilder(); List<String> optSettings = new ArrayList<>(); String setting;//from w w w . j av a2 s. co m long outputRecords = 0L; // Map output records long spillRecords = 0L; // Spilled Records try { outputRecords = context.getJob().getMapCounters() .getCounterValue(JobCounters.CounterName.MAP_OUTPUT_RECORDS); spillRecords = context.getJob().getMapCounters() .getCounterValue(JobCounters.CounterName.SPILLED_RECORDS); if (outputRecords < spillRecords) { sb.append("Total map output records: ").append(outputRecords); sb.append(" Total map spilled records: ").append(spillRecords).append(". Please set"); long minMapSpillMemBytes = context.getMinMapSpillMemBytes(); double spillPercent = context.getJobconf().getDouble(MAP_SORT_SPILL_PERCENT, 0.8); if (minMapSpillMemBytes > 512 * FileUtils.ONE_MB * spillPercent) { if (Math.abs(1.0 - spillPercent) > 0.001) { setting = String.format("-D%s=1", MAP_SORT_SPILL_PERCENT); sb.append(" ").append(setting); optSettings.add(setting); } } else { minMapSpillMemBytes /= spillPercent; } long minMapSpillMemMB = (minMapSpillMemBytes / FileUtils.ONE_MB + 10) / 10 * 10; if (minMapSpillMemMB >= 2047) { sb.append( "\nPlease reduce the block size of the input files and make sure they are splittable."); } else { setting = String.format("-D%s=%s", IO_SORT_MB, minMapSpillMemMB); sb.append(" ").append(setting); optSettings.add(setting); long heapSize = getMaxHeapSize(context.getJobconf().get(MAP_JAVA_OPTS)); if (heapSize < 3 * minMapSpillMemMB) { long expectedHeapSizeMB = (minMapSpillMemMB * 3 + 1024) / 1024 * 1024; setting = String.format("-D%s=-Xmx%sM", MAP_JAVA_OPTS, expectedHeapSizeMB); sb.append(" ").append(setting); optSettings.add(setting); } } sb.append(" to avoid spilled records.\n"); } long reduceInputRecords = context.getJob().getReduceCounters() .getCounterValue(JobCounters.CounterName.REDUCE_INPUT_RECORDS); spillRecords = context.getJob().getReduceCounters() .getCounterValue(JobCounters.CounterName.SPILLED_RECORDS); if (reduceInputRecords < spillRecords) { sb.append("Please add more memory (mapreduce.reduce.java.opts) to avoid spilled records."); sb.append(" Total Reduce input records: ").append(reduceInputRecords); sb.append(" Total Spilled Records: ").append(spillRecords); sb.append("\n"); } if (sb.length() > 0) { return new Result.ProcessorResult(Result.RuleType.SPILL, Result.ResultLevel.INFO, sb.toString(), optSettings); } } catch (NullPointerException e) { //When job failed there may not have counters, so just ignore it } return null; }
From source file:org.apache.eagle.jpm.analyzer.mr.suggestion.MapReduceTaskNumProcessor.java
private String analyzeReduceTaskNum(List<String> optSettings) { StringBuilder sb = new StringBuilder(); long numReduces = context.getNumReduces(); if (numReduces > 0) { long avgReduceTime = context.getAvgReduceTimeInSec(); long avgShuffleTime = context.getAvgShuffleTimeInSec(); long avgShuffleBytes = context.getJob().getReduceCounters() .getCounterValue(JobCounters.CounterName.REDUCE_SHUFFLE_BYTES) / numReduces; long avgReduceOutput = context.getJob().getReduceCounters() .getCounterValue(JobCounters.CounterName.HDFS_BYTES_WRITTEN) / numReduces; long avgReduceTotalTime = avgShuffleTime + avgReduceTime; long suggestReduces = 0; StringBuilder tmpsb = new StringBuilder(); String avgShuffleDisplaySize = bytesToHumanReadable(avgShuffleBytes); if (avgShuffleBytes < 256 * FileUtils.ONE_MB && avgReduceTotalTime < 300 && avgReduceOutput < 256 * FileUtils.ONE_MB && numReduces > 1) { tmpsb.append("average reduce input bytes is: ").append(avgShuffleDisplaySize).append(", "); suggestReduces = getReduceNum(avgShuffleBytes, avgReduceOutput, avgReduceTime); } else if (avgShuffleBytes > 10 * FileUtils.ONE_GB && avgReduceTotalTime > 1800) { tmpsb.append("average reduce input bytes is: ").append(avgShuffleDisplaySize).append(", "); suggestReduces = getReduceNum(avgShuffleBytes, avgReduceOutput, avgReduceTime); }//from w ww .j a v a 2 s.c o m if (avgReduceTotalTime < 60 && numReduces > 1) { tmpsb.append("average reduce time is only ").append(avgReduceTotalTime).append(" seconds, "); if (suggestReduces == 0) { suggestReduces = getReduceNum(avgShuffleBytes, avgReduceOutput, avgReduceTime); } } else if (avgReduceTotalTime > 3600 && avgReduceTime > 1800) { tmpsb.append("average reduce time is ").append(avgReduceTotalTime).append(" seconds, "); if (suggestReduces == 0) { suggestReduces = getReduceNum(avgShuffleBytes, avgReduceOutput, avgReduceTime); } } String avgReduceOutputDisplaySize = bytesToHumanReadable(avgReduceOutput); if (avgReduceOutput < 10 * FileUtils.ONE_MB && avgReduceTime < 300 && avgShuffleBytes < 2 * FileUtils.ONE_GB && numReduces > 1) { tmpsb.append(" average reduce output is only ").append(avgReduceOutputDisplaySize).append(", "); if (suggestReduces == 0) { suggestReduces = getReduceNum(avgShuffleBytes, avgReduceOutput, avgReduceTime); } } else if (avgReduceOutput > 10 * FileUtils.ONE_GB && avgReduceTime > 1800) { tmpsb.append(" average reduce output is ").append(avgReduceOutputDisplaySize).append(", "); if (suggestReduces == 0) { suggestReduces = getReduceNum(avgShuffleBytes, avgReduceOutput, avgReduceTime); } } if (suggestReduces > 0) { sb.append("Best practice: ").append(tmpsb.toString()).append("please consider "); if (suggestReduces > numReduces) { sb.append("increasing the "); } else { sb.append("decreasing the "); } String setting = String.format("-D%s=%s", NUM_REDUCES, suggestReduces); sb.append("reducer number. You could try ").append(setting).append("\n"); optSettings.add(setting); } } return sb.toString(); }
From source file:org.apache.eagle.jpm.analyzer.mr.suggestion.MapReduceTaskNumProcessor.java
private String analyzeMapTaskNum(List<String> optSettings) { StringBuilder sb = new StringBuilder(); long numMaps = context.getNumMaps(); long avgMapTime = context.getAvgMapTimeInSec(); long avgMapInput = context.getJob().getMapCounters() .getCounterValue(JobCounters.CounterName.HDFS_BYTES_READ) / numMaps; String avgMapInputDisplaySize = bytesToHumanReadable(avgMapInput); if (avgMapInput < 5 * FileUtils.ONE_MB && avgMapTime < 30 && numMaps > 1) { sb.append("Best practice: average map input bytes only have ").append(avgMapInputDisplaySize); sb.append(". Please reduce the number of mappers by merging input files.\n"); } else if (avgMapInput > FileUtils.ONE_GB) { sb.append("Best practice: average map input bytes have ").append(avgMapInputDisplaySize); sb.append(/*from w w w .j a v a2s .co m*/ ". Please increase the number of mappers by using splittable compression, a container file format or a smaller block size.\n"); } if (avgMapTime < 10 && numMaps > 1) { sb.append("Best practice: average map time only have ").append(avgMapTime); sb.append( " seconds. Please reduce the number of mappers by merging input files or by using a larger block size.\n"); } else if (avgMapTime > 600 && avgMapInput < FileUtils.ONE_GB) { sb.append("Best practice: average map time is ").append(avgMapInput); sb.append( " seconds. Please increase the number of mappers by using splittable compression, a container file format or a smaller block size.\n"); } return sb.toString(); }
From source file:org.apache.hadoop.hbase.master.cleaner.TestLogsCleaner.java
private void createFiles(FileSystem fs, Path parentDir, int numOfFiles) throws IOException { for (int i = 0; i < numOfFiles; i++) { // size of each file is 1M, 2M, or 3M int xMega = 1 + ThreadLocalRandom.current().nextInt(1, 4); try (FSDataOutputStream fsdos = fs.create(new Path(parentDir, "file-" + i))) { byte[] M = RandomUtils.nextBytes(Math.toIntExact(FileUtils.ONE_MB * xMega)); fsdos.write(M);//from w ww . j a v a2s .c o m } } }