Example usage for org.apache.commons.io FileUtils ONE_MB

List of usage examples for org.apache.commons.io FileUtils ONE_MB

Introduction

In this page you can find the example usage for org.apache.commons.io FileUtils ONE_MB.

Prototype

long ONE_MB

To view the source code for org.apache.commons.io FileUtils ONE_MB.

Click Source Link

Document

The number of bytes in a megabyte.

Usage

From source file:org.apache.sling.tracer.internal.TracerLogServlet.java

public TracerLogServlet(BundleContext context, int cacheSizeInMB, long cacheDurationInSecs,
        boolean compressionEnabled, boolean gzipResponse) {
    super(LABEL, "Sling Tracer", "Sling", null);
    this.compressRecording = compressionEnabled;
    this.cacheDurationInSecs = cacheDurationInSecs;
    this.cacheSizeInMB = cacheSizeInMB;
    this.gzipResponse = compressionEnabled && gzipResponse;
    this.cache = CacheBuilder.newBuilder().maximumWeight(cacheSizeInMB * FileUtils.ONE_MB)
            .weigher(new Weigher<String, JSONRecording>() {
                @Override/*from  w ww. ja va2s.  c o m*/
                public int weigh(@Nonnull String key, @Nonnull JSONRecording value) {
                    return value.size();
                }
            }).expireAfterAccess(cacheDurationInSecs, TimeUnit.SECONDS).recordStats().build();
    register(context);
}

From source file:org.apache.spark.SparkMetricsAggregator.java

@Override
public void aggregate(HadoopApplicationData data) {
    long resourceUsed = 0;
    long resourceWasted = 0;
    SparkApplicationData applicationData = (SparkApplicationData) data;

    long perExecutorMem = MemoryFormatUtils
            .stringToBytes(applicationData.getEnvironmentData().getSparkProperty(SPARK_EXECUTOR_MEMORY, "0"));

    Iterator<String> executorIds = applicationData.getExecutorData().getExecutors().iterator();

    while (executorIds.hasNext()) {
        String executorId = executorIds.next();
        SparkExecutorData.ExecutorInfo executorInfo = applicationData.getExecutorData()
                .getExecutorInfo(executorId);
        // store the resourceUsed in MBSecs
        resourceUsed += (executorInfo.duration / Statistics.SECOND_IN_MS) * (perExecutorMem / FileUtils.ONE_MB);
        // maxMem is the maximum available storage memory
        // memUsed is how much storage memory is used.
        // any difference is wasted after a buffer of 50% is wasted
        long excessMemory = (long) (executorInfo.maxMem
                - (executorInfo.memUsed * (1.0 + _storageMemWastageBuffer)));
        if (excessMemory > 0) {
            resourceWasted += (executorInfo.duration / Statistics.SECOND_IN_MS)
                    * (excessMemory / FileUtils.ONE_MB);
        }//  ww  w  . ja  va 2  s  .c om
    }

    _hadoopAggregatedData.setResourceUsed(resourceUsed);
    _hadoopAggregatedData.setResourceWasted(resourceWasted);
    // TODO: to find a way to calculate the delay
    _hadoopAggregatedData.setTotalDelay(0L);
}

From source file:org.apache.storm.daemon.logviewer.utils.LogFileDownloader.java

/**
 * Checks authorization for the log file and download.
 *
 * @param fileName file to download//from   w  w w  .  j a v  a  2  s  .  c  om
 * @param user username
 * @param isDaemon true if the file is a daemon log, false if the file is an worker log
 * @return a Response which lets browsers download that file.
 */
public Response downloadFile(String fileName, String user, boolean isDaemon) throws IOException {
    Path rootDir = isDaemon ? daemonLogRoot : logRoot;
    Path rawFile = rootDir.resolve(fileName);
    Path file = rawFile.toAbsolutePath().normalize();
    if (!file.startsWith(rootDir) || !rawFile.normalize().toString().equals(rawFile.toString())) {
        //Ensure filename doesn't contain ../ parts 
        return LogviewerResponseBuilder.buildResponsePageNotFound();
    }
    if (isDaemon && Paths.get(fileName).getNameCount() != 1) {
        //Prevent daemon log reads from pathing into worker logs
        return LogviewerResponseBuilder.buildResponsePageNotFound();
    }

    if (file.toFile().exists()) {
        if (isDaemon || resourceAuthorizer.isUserAllowedToAccessFile(user, fileName)) {
            fileDownloadSizeDistMb.update(Math.round((double) file.toFile().length() / FileUtils.ONE_MB));
            return LogviewerResponseBuilder.buildDownloadFile(file.toFile(), numFileDownloadExceptions);
        } else {
            return LogviewerResponseBuilder.buildResponseUnauthorizedUser(user);
        }
    } else {
        return LogviewerResponseBuilder.buildResponsePageNotFound();
    }
}

From source file:org.apache.usergrid.services.assets.data.AWSBinaryStore.java

@Override
public void write(final UUID appId, final Entity entity, InputStream inputStream) throws Exception {

    String uploadFileName = AssetUtils.buildAssetKey(appId, entity);
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    long written = IOUtils.copyLarge(inputStream, baos, 0, FIVE_MB);

    byte[] data = baos.toByteArray();

    InputStream awsInputStream = new ByteArrayInputStream(data);

    final Map<String, Object> fileMetadata = AssetUtils.getFileMetadata(entity);
    fileMetadata.put(AssetUtils.LAST_MODIFIED, System.currentTimeMillis());

    String mimeType = AssetMimeHandler.get().getMimeType(entity, data);

    Boolean overSizeLimit = false;

    EntityManager em = emf.getEntityManager(appId);

    if (written < FIVE_MB) { // total smaller than 5mb

        ObjectMetadata om = new ObjectMetadata();
        om.setContentLength(written);//  w ww.j  av a  2 s.co m
        om.setContentType(mimeType);
        PutObjectResult result = null;
        result = getS3Client().putObject(bucketName, uploadFileName, awsInputStream, om);

        String md5sum = Hex.encodeHexString(Base64.decodeBase64(result.getContentMd5()));
        String eTag = result.getETag();

        fileMetadata.put(AssetUtils.CONTENT_LENGTH, written);

        if (md5sum != null)
            fileMetadata.put(AssetUtils.CHECKSUM, md5sum);
        fileMetadata.put(AssetUtils.E_TAG, eTag);

        em.update(entity);

    } else { // bigger than 5mb... dump 5 mb tmp files and upload from them
        written = 0; //reset written to 0, we still haven't wrote anything in fact
        int partNumber = 1;
        int firstByte = 0;
        Boolean isFirstChunck = true;
        List<PartETag> partETags = new ArrayList<PartETag>();

        //get the s3 client in order to initialize the multipart request
        getS3Client();
        InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName,
                uploadFileName);
        InitiateMultipartUploadResult initResponse = getS3Client().initiateMultipartUpload(initRequest);

        InputStream firstChunck = new ByteArrayInputStream(data);
        PushbackInputStream chunckableInputStream = new PushbackInputStream(inputStream, 1);

        // determine max size file allowed, default to 50mb
        long maxSizeBytes = 50 * FileUtils.ONE_MB;
        String maxSizeMbString = properties.getProperty("usergrid.binary.max-size-mb", "50");
        if (StringUtils.isNumeric(maxSizeMbString)) {
            maxSizeBytes = Long.parseLong(maxSizeMbString) * FileUtils.ONE_MB;
        }

        // always allow files up to 5mb
        if (maxSizeBytes < 5 * FileUtils.ONE_MB) {
            maxSizeBytes = 5 * FileUtils.ONE_MB;
        }

        while (-1 != (firstByte = chunckableInputStream.read())) {
            long partSize = 0;
            chunckableInputStream.unread(firstByte);
            File tempFile = File.createTempFile(
                    entity.getUuid().toString().concat("-part").concat(String.valueOf(partNumber)), "tmp");

            tempFile.deleteOnExit();
            OutputStream os = null;
            try {
                os = new BufferedOutputStream(new FileOutputStream(tempFile.getAbsolutePath()));

                if (isFirstChunck == true) {
                    partSize = IOUtils.copyLarge(firstChunck, os, 0, (FIVE_MB));
                    isFirstChunck = false;
                } else {
                    partSize = IOUtils.copyLarge(chunckableInputStream, os, 0, (FIVE_MB));
                }
                written += partSize;

                if (written > maxSizeBytes) {
                    overSizeLimit = true;
                    logger.error("OVERSIZED FILE ({}). STARTING ABORT", written);
                    break;
                    //set flag here and break out of loop to run abort
                }
            } finally {
                IOUtils.closeQuietly(os);
            }

            FileInputStream chunk = new FileInputStream(tempFile);

            Boolean isLastPart = -1 == (firstByte = chunckableInputStream.read());
            if (!isLastPart)
                chunckableInputStream.unread(firstByte);

            UploadPartRequest uploadRequest = new UploadPartRequest().withUploadId(initResponse.getUploadId())
                    .withBucketName(bucketName).withKey(uploadFileName).withInputStream(chunk)
                    .withPartNumber(partNumber).withPartSize(partSize).withLastPart(isLastPart);
            partETags.add(getS3Client().uploadPart(uploadRequest).getPartETag());
            partNumber++;
        }

        //check for flag here then abort.
        if (overSizeLimit) {

            AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(bucketName,
                    uploadFileName, initResponse.getUploadId());

            ListMultipartUploadsRequest listRequest = new ListMultipartUploadsRequest(bucketName);

            MultipartUploadListing listResult = getS3Client().listMultipartUploads(listRequest);

            //upadte the entity with the error.
            try {
                logger.error("starting update of entity due to oversized asset");
                fileMetadata.put("error", "Asset size is larger than max size of " + maxSizeBytes);
                em.update(entity);
            } catch (Exception e) {
                logger.error("Error updating entity with error message", e);
            }

            int timesIterated = 20;
            //loop and abort all the multipart uploads
            while (listResult.getMultipartUploads().size() != 0 && timesIterated > 0) {

                getS3Client().abortMultipartUpload(abortRequest);
                Thread.sleep(1000);
                timesIterated--;
                listResult = getS3Client().listMultipartUploads(listRequest);
                if (logger.isDebugEnabled()) {
                    logger.debug("Files that haven't been aborted are: {}",
                            listResult.getMultipartUploads().listIterator().toString());
                }

            }
            if (timesIterated == 0) {
                logger.error("Files parts that couldn't be aborted in 20 seconds are:");
                Iterator<MultipartUpload> multipartUploadIterator = listResult.getMultipartUploads().iterator();
                while (multipartUploadIterator.hasNext()) {
                    logger.error(multipartUploadIterator.next().getKey());
                }
            }
        } else {
            CompleteMultipartUploadRequest request = new CompleteMultipartUploadRequest(bucketName,
                    uploadFileName, initResponse.getUploadId(), partETags);
            CompleteMultipartUploadResult amazonResult = getS3Client().completeMultipartUpload(request);
            fileMetadata.put(AssetUtils.CONTENT_LENGTH, written);
            fileMetadata.put(AssetUtils.E_TAG, amazonResult.getETag());
            em.update(entity);
        }
    }
}

From source file:org.apache.usergrid.services.assets.data.GoogleBinaryStore.java

@Override
public void write(UUID appId, Entity entity, InputStream inputStream) throws Exception {

    getService();/* w  w  w.java  2  s  . co  m*/

    final AtomicLong writtenSize = new AtomicLong();

    final int chunkSize = 1024; // one KB

    // determine max size file allowed, default to 50mb
    long maxSizeBytes = 50 * FileUtils.ONE_MB;
    String maxSizeMbString = properties.getProperty("usergrid.binary.max-size-mb", "50");
    if (StringUtils.isNumeric(maxSizeMbString)) {
        maxSizeBytes = Long.parseLong(maxSizeMbString) * FileUtils.ONE_MB;
    }

    byte[] firstData = new byte[chunkSize];
    int firstSize = inputStream.read(firstData);
    writtenSize.addAndGet(firstSize);

    // from the first sample chunk, determine the file size
    final String contentType = AssetMimeHandler.get().getMimeType(entity, firstData);

    // Convert to the Google Cloud Storage Blob
    final BlobId blobId = BlobId.of(bucketName, AssetUtils.buildAssetKey(appId, entity));
    final BlobInfo blobInfo = BlobInfo.newBuilder(blobId).setContentType(contentType).build();

    // always allow files up to 5mb
    if (maxSizeBytes < 5 * FileUtils.ONE_MB) {
        maxSizeBytes = 5 * FileUtils.ONE_MB;
    }

    EntityManager em = entityManagerFactory.getEntityManager(appId);
    Map<String, Object> fileMetadata = AssetUtils.getFileMetadata(entity);

    // directly upload files that are smaller than the chunk size
    if (writtenSize.get() < chunkSize) {

        // Upload to Google cloud Storage
        instance.create(blobInfo, firstData);

    } else {

        WriteChannel writer = instance.writer(blobInfo);

        // write the initial sample data used to determine file type
        writer.write(ByteBuffer.wrap(firstData, 0, firstData.length));

        // start writing remaining chunks from the stream
        byte[] buffer = new byte[chunkSize];
        int limit;
        while ((limit = inputStream.read(buffer)) >= 0) {

            writtenSize.addAndGet(limit);
            if (writtenSize.get() > maxSizeBytes) {
                try {
                    fileMetadata.put("error", "Asset size is larger than max size of " + maxSizeBytes);
                    em.update(entity);

                } catch (Exception e) {
                    logger.error("Error updating entity with error message", e);
                }
                return;
            }

            try {
                writer.write(ByteBuffer.wrap(buffer, 0, limit));

            } catch (Exception ex) {
                logger.error("Error writing chunk to Google Cloud Storage for asset ");
            }
        }

        writer.close();
    }

    fileMetadata.put(AssetUtils.CONTENT_LENGTH, writtenSize.get());
    fileMetadata.put(AssetUtils.LAST_MODIFIED, System.currentTimeMillis());
    fileMetadata.put(AssetUtils.E_TAG, RandomStringUtils.randomAlphanumeric(10));
    fileMetadata.put(AssetUtils.CONTENT_TYPE, contentType);

    try {
        em.update(entity);
    } catch (Exception e) {
        throw new IOException("Unable to update entity filedata", e);
    }

}

From source file:org.apache.usergrid.services.assets.data.LocalFileBinaryStore.java

@Override
public InputStream read(UUID appId, Entity entity) throws IOException {
    return read(appId, entity, 0, FileUtils.ONE_MB * 5);
}

From source file:org.candlepin.audit.HornetqContextListener.java

public void contextInitialized(Injector injector) {

    org.candlepin.common.config.Configuration candlepinConfig = injector
            .getInstance(org.candlepin.common.config.Configuration.class);

    if (hornetqServer == null) {
        Configuration config = new ConfigurationImpl();

        HashSet<TransportConfiguration> transports = new HashSet<TransportConfiguration>();
        transports.add(new TransportConfiguration(InVMAcceptorFactory.class.getName()));
        config.setAcceptorConfigurations(transports);

        // alter the default pass to silence log output
        config.setClusterUser(null);// www .  j a  v a  2  s.  co m
        config.setClusterPassword(null);

        // in vm, who needs security?
        config.setSecurityEnabled(false);

        config.setJournalType(JournalType.NIO);

        config.setCreateBindingsDir(true);
        config.setCreateJournalDir(true);

        String baseDir = candlepinConfig.getString(ConfigProperties.HORNETQ_BASE_DIR);

        config.setBindingsDirectory(new File(baseDir, "bindings").toString());
        config.setJournalDirectory(new File(baseDir, "journal").toString());
        config.setLargeMessagesDirectory(new File(baseDir, "largemsgs").toString());
        config.setPagingDirectory(new File(baseDir, "paging").toString());

        Map<String, AddressSettings> settings = new HashMap<String, AddressSettings>();
        AddressSettings pagingConfig = new AddressSettings();

        String addressPolicyString = candlepinConfig.getString(ConfigProperties.HORNETQ_ADDRESS_FULL_POLICY);
        long maxQueueSizeInMb = candlepinConfig.getInt(ConfigProperties.HORNETQ_MAX_QUEUE_SIZE);
        long maxPageSizeInMb = candlepinConfig.getInt(ConfigProperties.HORNETQ_MAX_PAGE_SIZE);

        AddressFullMessagePolicy addressPolicy = null;
        if (addressPolicyString.equals("PAGE")) {
            addressPolicy = AddressFullMessagePolicy.PAGE;
        } else if (addressPolicyString.equals("BLOCK")) {
            addressPolicy = AddressFullMessagePolicy.BLOCK;
        } else {
            throw new IllegalArgumentException("Unknown HORNETQ_ADDRESS_FULL_POLICY: " + addressPolicyString
                    + " . Please use one of: PAGE, BLOCK");
        }

        // Paging sizes need to be converted to bytes
        pagingConfig.setMaxSizeBytes(maxQueueSizeInMb * FileUtils.ONE_MB);
        if (addressPolicy == AddressFullMessagePolicy.PAGE) {
            pagingConfig.setPageSizeBytes(maxPageSizeInMb * FileUtils.ONE_MB);
        }
        pagingConfig.setAddressFullMessagePolicy(addressPolicy);
        //Enable for all the queues
        settings.put("#", pagingConfig);
        config.setAddressesSettings(settings);

        int maxScheduledThreads = candlepinConfig.getInt(ConfigProperties.HORNETQ_MAX_SCHEDULED_THREADS);
        int maxThreads = candlepinConfig.getInt(ConfigProperties.HORNETQ_MAX_THREADS);
        if (maxThreads != -1) {
            config.setThreadPoolMaxSize(maxThreads);
        }

        if (maxScheduledThreads != -1) {
            config.setScheduledThreadPoolMaxSize(maxScheduledThreads);
        }

        /**
         * Anything up to size of LARGE_MSG_SIZE may be needed to be written to the Journal,
         * so we must set buffer size accordingly.
         *
         * If buffer size would be < LARGE_MSG_SIZE we may get exceptions such as this:
         * Can't write records bigger than the bufferSize(XXXYYY) on the journal
         */
        int largeMsgSize = candlepinConfig.getInt(ConfigProperties.HORNETQ_LARGE_MSG_SIZE);
        config.setJournalBufferSize_AIO(largeMsgSize);
        config.setJournalBufferSize_NIO(largeMsgSize);

        hornetqServer = new EmbeddedHornetQ();
        hornetqServer.setConfiguration(config);
    }
    try {
        hornetqServer.start();
        log.info("Hornetq server started");
    } catch (Exception e) {
        log.error("Failed to start hornetq message server:", e);
        throw new RuntimeException(e);
    }

    cleanupOldQueues();

    List<String> listeners = getHornetqListeners(candlepinConfig);

    eventSource = injector.getInstance(EventSource.class);
    for (int i = 0; i < listeners.size(); i++) {
        try {
            Class<?> clazz = this.getClass().getClassLoader().loadClass(listeners.get(i));
            eventSource.registerListener((EventListener) injector.getInstance(clazz));
        } catch (Exception e) {
            log.warn("Unable to register listener " + listeners.get(i), e);
        }
    }

    // Initialize the Event sink AFTER the internal server has been
    // created and started.
    EventSink sink = injector.getInstance(EventSink.class);
    try {
        sink.initialize();
    } catch (Exception e) {
        log.error("Failed to initialize EventSink:", e);
        throw new RuntimeException(e);
    }
}

From source file:org.jclouds.examples.blobstore.BlobUploader.java

@Override
public void run() {
    /**/*ww  w.j a  v a  2s . co m*/
     * Instantiate the ThreadLocal variables when this thread runs for the first time.
     * Instantiating this in the constructor will not work (different thread).
     */
    if (blobStore.get() == null) {
        // It is usually a good idea to include the currentThread when logging parallel tasks.
        System.out.println("Creating connection for thread " + Thread.currentThread());
        /**
         * In some cases, especially when running very large jobs with many parallel threads, some connections will
         * break. In that case, we need to be able to obtain a new connection (and socket) to the service, which is
         * why this is factored out.
         */
        resetBlobstore(username, password, provider, region);
    }

    if (container.get() == null) {
        container.set(UUID.randomUUID().toString());
        Location location = getOnlyElement(blobStore.get().listAssignableLocations());
        blobStore.get().createContainerInLocation(location, container.get());

        System.out.println("Created container " + container.get() + " for thread " + Thread.currentThread()
                + " in " + location.toString());
    }

    // The md5 as returned by the service, and as calculated locally.
    String md5Local;
    String md5Remote;
    Blob blob;

    try {
        md5Local = BaseEncoding.base16().encode(Files.hash(file, Hashing.md5()).asBytes()).toLowerCase();
    } catch (java.io.IOException e) {
        e.printStackTrace();
        /**
         * The file is no longer available on the local FS.
         * In some application cases, you might also want to retry this instead of finishing the unit of work.
         */
        return;
    }

    ByteSourcePayload bsp = new ByteSourcePayload(Files.asByteSource(file));

    /**
     * Uploading a file over a network is an inherently fragile operation. Over thousands of files, especially in
     * highly parallel jobs that tax upload bandwidth, a small percent of uploads are guaranteed to fail.
     */
    do {
        System.out.println("Uploading " + file.getName() + " ; " + FileUtils.sizeOf(file));
        blob = blobStore.get().blobBuilder(file.getName()).payload(bsp).build();
        md5Remote = blobStore.get().putBlob(container.get(), blob).toLowerCase();
        if (md5Local.equals(md5Remote)) {
            long total = BlobUploaderMain.bytesUploaded.addAndGet(FileUtils.sizeOf(file));
            System.out.println("Uploaded MB: " + (int) total / FileUtils.ONE_MB + "MB ; "
                    + (int) ((float) BlobUploaderMain.bytesUploaded.get() / BlobUploaderMain.totalBytes) * 100
                    + "%");
            bsp.release();
            return;
        } else {
            System.out.printf("md5 mismatch %s vs %s, retrying %s", md5Local, md5Remote, file.getName());
        }
    } while (true);
}

From source file:org.jclouds.examples.blobstore.BlobUploaderMain.java

public static void main(String[] args) throws IOException {

    OptionParser parser = new OptionParser();
    parser.accepts("directory").withRequiredArg().required().ofType(String.class);
    parser.accepts("provider").withRequiredArg().required().ofType(String.class);
    parser.accepts("username").withRequiredArg().required().ofType(String.class);
    parser.accepts("password").withRequiredArg().required().ofType(String.class);
    parser.accepts("region").withRequiredArg().required().ofType(String.class);
    parser.accepts("threads").withRequiredArg().ofType(Integer.TYPE).describedAs("number of parallel threads");
    OptionSet options = null;/*from  www .  j a  v  a  2s  .  com*/

    try {
        options = parser.parse(args);
    } catch (OptionException e) {
        System.out.println(e.getLocalizedMessage());
        parser.printHelpOn(System.out);
        return;
    }

    if (options.has("threads")) {
        numThreads = Integer.valueOf((String) options.valueOf("numThreads"));
    }

    File rootDir = new File((String) options.valueOf("directory"));
    Collection<File> files = FileUtils.listFiles(rootDir, CanReadFileFilter.CAN_READ, TrueFileFilter.TRUE);
    totalBytes = FileUtils.sizeOfDirectory(rootDir);

    System.out.println("Uploading " + rootDir.getName() + " " + totalBytes / FileUtils.ONE_MB + "MB");

    ExecutorService executor = Executors.newFixedThreadPool(numThreads);

    for (File f : files) {
        BlobUploader b = new BlobUploader((String) options.valueOf("username"),
                (String) options.valueOf("password"), (String) options.valueOf("provider"),
                (String) options.valueOf("region"), f);
        executor.execute(b);
    }
    executor.shutdown();

    try {
        executor.awaitTermination(1, TimeUnit.DAYS);
    } catch (InterruptedException e) {
        e.printStackTrace();
    }
}

From source file:org.kuali.student.git.importer.ReportBlobSizePerBranch.java

private static BigDecimal getMB(BigDecimal counter) {

    return counter.divide(new BigDecimal(FileUtils.ONE_MB));

}