Example usage for org.apache.commons.io FileUtils writeByteArrayToFile

List of usage examples for org.apache.commons.io FileUtils writeByteArrayToFile

Introduction

In this page you can find the example usage for org.apache.commons.io FileUtils writeByteArrayToFile.

Prototype

public static void writeByteArrayToFile(File file, byte[] data) throws IOException 

Source Link

Document

Writes a byte array to a file creating the file if it does not exist.

Usage

From source file:org.aludratest.impl.log4testing.LocalFileAttachmentLog.java

/** Constructs a new file-based attachment log for the given attachment. The contents of the attachment are immediately written
 * to a temporary file, which is automatically deleted on VM exit.
 * //from   w  w w. j a  v  a  2  s.c  o  m
 * @param attachment Attachment to create an attachment log for.
 * 
 * @throws IOException If the attachment could not be written to a temporary file (e.g. disk full). */
public LocalFileAttachmentLog(Attachment attachment) throws IOException {
    super(attachment.getLabel(), attachment.getFileExtension());
    localFile = File.createTempFile("aludraTestAttachment", attachment.getFileExtension());
    FileUtils.writeByteArrayToFile(localFile, attachment.getFileData());
    localFile.deleteOnExit();
}

From source file:org.anarres.lzo.LzopStreamTest.java

public void testAlgorithm(LzoAlgorithm algorithm, byte[] orig) throws IOException {
    for (long flags : FLAGS) {
        try {/*from   w  w  w .  j  a va 2 s  .  com*/
            LzoCompressor compressor = LzoLibrary.getInstance().newCompressor(algorithm, null);
            LOG.info("Compressing " + orig.length + " bytes using " + algorithm);

            // LOG.info("Original:   " + Arrays.toString(orig));

            ByteArrayOutputStream os = new ByteArrayOutputStream();
            LzopOutputStream cs = new LzopOutputStream(os, compressor, 256, flags);
            cs.write(orig);
            cs.close();

            // LOG.info("Compressed: OK.");

            FileUtils.writeByteArrayToFile(new File("temp.lzo"), os.toByteArray());

            // LzoDecompressor decompressor = LzoLibrary.getInstance().newDecompressor(algorithm, null);

            ByteArrayInputStream is = new ByteArrayInputStream(os.toByteArray());
            LzopInputStream us = new LzopInputStream(is);
            DataInputStream ds = new DataInputStream(us);
            byte[] uncompressed = new byte[orig.length];
            ds.readFully(uncompressed);

            // LOG.info("Output:     OK.");
            // LOG.info("Output:     " + Arrays.toString(uncompressed));

            assertArrayEquals(orig, uncompressed);
        } finally {
            System.out.flush();
            System.err.flush();
        }
    }
}

From source file:org.apache.cloudstack.storage.configdrive.ConfigDriveBuilder.java

static void createFileInTempDirAnAppendOpenStackMetadataToJsonObject(String tempDirName, JsonObject metaData,
        String dataType, String fileName, String content) {
    if (StringUtils.isBlank(dataType)) {
        return;/*from w w  w  .j  av a  2s .  c  o m*/
    }
    //create folder
    File typeFolder = new File(tempDirName + ConfigDrive.cloudStackConfigDriveName + dataType);
    if (!typeFolder.exists() && !typeFolder.mkdirs()) {
        throw new CloudRuntimeException("Failed to create folder: " + typeFolder);
    }
    if (StringUtils.isNotBlank(content)) {
        File file = new File(typeFolder, fileName + ".txt");
        try {
            if (fileName.equals(USERDATA_FILE)) {
                // User Data is passed as a base64 encoded string
                FileUtils.writeByteArrayToFile(file, Base64.decodeBase64(content));
            } else {
                FileUtils.write(file, content, com.cloud.utils.StringUtils.getPreferredCharset());
            }
        } catch (IOException ex) {
            throw new CloudRuntimeException("Failed to create file ", ex);
        }
    }

    //now write the file to the OpenStack directory
    buildOpenStackMetaData(metaData, dataType, fileName, content);
}

From source file:org.apache.drill.exec.store.parquet.TestParquetScan.java

@Test
public void testSuccessFile() throws Exception {
    final byte[] bytes = Resources.toByteArray(Resources.getResource("tpch/nation.parquet"));

    final Path rootPath = dirTestWatcher.getRootDir().toPath();
    final File scanFile = rootPath.resolve("nation_test_parquet_scan").toFile();
    final File successFile = rootPath.resolve("_SUCCESS").toFile();
    final File logsFile = rootPath.resolve("_logs").toFile();

    FileUtils.writeByteArrayToFile(scanFile, bytes);
    successFile.createNewFile();/*from  www . j a v  a 2 s.c  o  m*/
    logsFile.createNewFile();

    testBuilder().sqlQuery("select count(*) c from dfs.nation_test_parquet_scan where 1 = 1").unOrdered()
            .baselineColumns("c").baselineValues(25L).build().run();
}

From source file:org.apache.druid.segment.SegmentUtilsTest.java

@Test
public void testVersionBin() throws Exception {
    File dir = tempFolder.newFolder();
    FileUtils.writeByteArrayToFile(new File(dir, "version.bin"), Ints.toByteArray(9));
    Assert.assertEquals(9, SegmentUtils.getVersionFromDir(dir));
}

From source file:org.apache.falcon.regression.TestngListener.java

private void takeScreenShot(ITestResult result) throws IOException {
    String logs = Config.getProperty("log.capture.location", OSUtil.concat("target", "surefire-reports"));
    if (BaseUITestClass.getDriver() != null) {
        byte[] scrFile = ((TakesScreenshot) BaseUITestClass.getDriver()).getScreenshotAs(OutputType.BYTES);
        String params = Arrays.toString(result.getParameters());
        params = params.replaceAll("[<>\":\\\\/\\|\\?\\*]", ""); //remove <>:"/\|?*
        String filename = OSUtil.concat(logs, "screenshots", String.format("%s.%s(%s).png",
                result.getTestClass().getRealClass().getSimpleName(), result.getName(), params));
        LOGGER.info("Saving screenshot to: " + filename);
        FileUtils.writeByteArrayToFile(new File(filename), scrFile);
    }//w  w w  . j  a  va  2 s. co m
}

From source file:org.apache.felix.ipojo.manipulation.ManipulatedClassLoader.java

public void dump() throws IOException {
    File outer = new File(DUMP_BASEDIR, name.replace(".", "/") + ".class");
    FileUtils.writeByteArrayToFile(outer, clazz);
    for (String name : inner.keySet()) {
        File file = new File(DUMP_BASEDIR, name.replace(".", "/") + ".class");
        FileUtils.writeByteArrayToFile(file, inner.get(name));
    }//ww w .ja  v a 2  s  .com
}

From source file:org.apache.flink.runtime.blob.BlobCacheCorruptionTest.java

/**
 * Checks the GET operation fails when the downloaded file (from {@link BlobServer} or HA store)
 * is corrupt, i.e. its content's hash does not match the {@link BlobKey}'s hash.
 *
 * @param jobId/*from   ww w.j  av  a  2 s  .c  o m*/
 *       job ID or <tt>null</tt> if job-unrelated
 * @param blobType
 *       whether the BLOB should become permanent or transient
 * @param corruptOnHAStore
 *       whether the file should be corrupt in the HA store (<tt>true</tt>, required
 *       <tt>highAvailability</tt> to be set) or on the {@link BlobServer}'s local store
 *       (<tt>false</tt>)
 * @param config
 *       blob server configuration (including HA settings like {@link HighAvailabilityOptions#HA_STORAGE_PATH}
 *       and {@link HighAvailabilityOptions#HA_CLUSTER_ID}) used to set up <tt>blobStore</tt>
 * @param blobStore
 *       shared HA blob store to use
 * @param expectedException
 *       expected exception rule to use
 */
private static void testGetFailsFromCorruptFile(@Nullable JobID jobId, BlobKey.BlobType blobType,
        boolean corruptOnHAStore, Configuration config, BlobStore blobStore,
        ExpectedException expectedException) throws IOException {

    assertTrue("corrupt HA file requires a HA setup", !corruptOnHAStore || blobType == PERMANENT_BLOB);

    Random rnd = new Random();

    try (BlobServer server = new BlobServer(config, blobStore);
            BlobCacheService cache = new BlobCacheService(config,
                    corruptOnHAStore ? blobStore : new VoidBlobStore(),
                    new InetSocketAddress("localhost", server.getPort()))) {

        server.start();

        byte[] data = new byte[2000000];
        rnd.nextBytes(data);

        // put content addressable (like libraries)
        BlobKey key = put(server, jobId, data, blobType);
        assertNotNull(key);

        // change server/HA store file contents to make sure that GET requests fail
        byte[] data2 = Arrays.copyOf(data, data.length);
        data2[0] ^= 1;
        if (corruptOnHAStore) {
            File tmpFile = Files.createTempFile("blob", ".jar").toFile();
            try {
                FileUtils.writeByteArrayToFile(tmpFile, data2);
                blobStore.put(tmpFile, jobId, key);
            } finally {
                //noinspection ResultOfMethodCallIgnored
                tmpFile.delete();
            }

            // delete local (correct) file on server to make sure that the GET request does not
            // fall back to downloading the file from the BlobServer's local store
            File blobFile = server.getStorageLocation(jobId, key);
            assertTrue(blobFile.delete());
        } else {
            File blobFile = server.getStorageLocation(jobId, key);
            assertTrue(blobFile.exists());
            FileUtils.writeByteArrayToFile(blobFile, data2);
        }

        // issue a GET request that fails
        expectedException.expect(IOException.class);
        expectedException.expectCause(CoreMatchers.allOf(instanceOf(IOException.class),
                hasProperty("message", containsString("data corruption"))));

        get(cache, jobId, key);
    }
}

From source file:org.apache.flink.runtime.blob.BlobCachePutTest.java

/**
 * [FLINK-6020]//from   w w w. j  a  v  a 2  s  .c  om
 * Tests that concurrent put operations will only upload the file once to the {@link BlobStore}
 * and that the files are not corrupt at any time.
 *
 * @param jobId
 *       job ID to use (or <tt>null</tt> if job-unrelated)
 * @param blobType
 *       whether the BLOB should become permanent or transient
 */
private void testConcurrentPutOperations(@Nullable final JobID jobId, final BlobKey.BlobType blobType)
        throws IOException, InterruptedException, ExecutionException {
    final Configuration config = new Configuration();
    config.setString(BlobServerOptions.STORAGE_DIRECTORY, temporaryFolder.newFolder().getAbsolutePath());

    final BlobStore blobStoreServer = mock(BlobStore.class);
    final BlobStore blobStoreCache = mock(BlobStore.class);

    int concurrentPutOperations = 2;
    int dataSize = 1024;

    final CountDownLatch countDownLatch = new CountDownLatch(concurrentPutOperations);
    final byte[] data = new byte[dataSize];

    final List<Path> jars;
    if (blobType == PERMANENT_BLOB) {
        // implement via JAR file upload instead:
        File tmpFile = temporaryFolder.newFile();
        FileUtils.writeByteArrayToFile(tmpFile, data);
        jars = Collections.singletonList(new Path(tmpFile.getAbsolutePath()));
    } else {
        jars = null;
    }

    Collection<CompletableFuture<BlobKey>> allFutures = new ArrayList<>(concurrentPutOperations);

    ExecutorService executor = Executors.newFixedThreadPool(concurrentPutOperations);

    try (final BlobServer server = new BlobServer(config, blobStoreServer);
            final BlobCacheService cache = new BlobCacheService(config, blobStoreCache,
                    new InetSocketAddress("localhost", server.getPort()))) {

        server.start();

        // for highAvailability
        final InetSocketAddress serverAddress = new InetSocketAddress("localhost", server.getPort());
        // uploading HA BLOBs works on BlobServer only (and, for now, via the BlobClient)

        for (int i = 0; i < concurrentPutOperations; i++) {
            final Supplier<BlobKey> callable;
            if (blobType == PERMANENT_BLOB) {
                // cannot use a blocking stream here (upload only possible via files)
                callable = () -> {
                    try {
                        List<PermanentBlobKey> keys = BlobClient.uploadFiles(serverAddress, config, jobId,
                                jars);
                        assertEquals(1, keys.size());
                        BlobKey uploadedKey = keys.get(0);
                        // check the uploaded file's contents (concurrently)
                        verifyContents(server, jobId, uploadedKey, data);
                        return uploadedKey;
                    } catch (IOException e) {
                        throw new CompletionException(new FlinkException("Could not upload blob.", e));
                    }
                };

            } else {
                callable = () -> {
                    try {
                        BlockingInputStream inputStream = new BlockingInputStream(countDownLatch, data);
                        BlobKey uploadedKey = put(cache, jobId, inputStream, blobType);
                        // check the uploaded file's contents (concurrently)
                        verifyContents(server, jobId, uploadedKey, data);
                        return uploadedKey;
                    } catch (IOException e) {
                        throw new CompletionException(new FlinkException("Could not upload blob.", e));
                    }
                };
            }
            CompletableFuture<BlobKey> putFuture = CompletableFuture.supplyAsync(callable, executor);

            allFutures.add(putFuture);
        }

        FutureUtils.ConjunctFuture<Collection<BlobKey>> conjunctFuture = FutureUtils.combineAll(allFutures);

        // wait until all operations have completed and check that no exception was thrown
        Collection<BlobKey> blobKeys = conjunctFuture.get();

        Iterator<BlobKey> blobKeyIterator = blobKeys.iterator();

        assertTrue(blobKeyIterator.hasNext());

        BlobKey blobKey = blobKeyIterator.next();

        // make sure that all blob keys are the same
        while (blobKeyIterator.hasNext()) {
            // check for unique BlobKey, but should have same hash
            verifyKeyDifferentHashEquals(blobKey, blobKeyIterator.next());
        }

        // check the uploaded file's contents
        verifyContents(server, jobId, blobKey, data);

        // check that we only uploaded the file once to the blob store
        if (blobType == PERMANENT_BLOB) {
            verify(blobStoreServer, times(1)).put(any(File.class), eq(jobId), eq(blobKey));
        } else {
            // can't really verify much in the other cases other than that the put operations should
            // work and not corrupt files
            verify(blobStoreServer, times(0)).put(any(File.class), eq(jobId), eq(blobKey));
        }
        // caches must not access the blob store (they are not allowed to write there)
        verify(blobStoreCache, times(0)).put(any(File.class), eq(jobId), eq(blobKey));
    } finally {
        executor.shutdownNow();
    }
}

From source file:org.apache.flink.runtime.blob.BlobServerCorruptionTest.java

/**
 * Checks the GET operation fails when the downloaded file (from HA store)
 * is corrupt, i.e. its content's hash does not match the {@link BlobKey}'s hash.
 *
 * @param config//from   w w  w .  j a  v  a  2 s .c om
 *       blob server configuration (including HA settings like {@link HighAvailabilityOptions#HA_STORAGE_PATH}
 *       and {@link HighAvailabilityOptions#HA_CLUSTER_ID}) used to set up <tt>blobStore</tt>
 * @param blobStore
 *       shared HA blob store to use
 * @param expectedException
 *       expected exception rule to use
 */
public static void testGetFailsFromCorruptFile(Configuration config, BlobStore blobStore,
        ExpectedException expectedException) throws IOException {

    Random rnd = new Random();
    JobID jobId = new JobID();

    try (BlobServer server = new BlobServer(config, blobStore)) {

        server.start();

        byte[] data = new byte[2000000];
        rnd.nextBytes(data);

        // put content addressable (like libraries)
        BlobKey key = put(server, jobId, data, PERMANENT_BLOB);
        assertNotNull(key);

        // delete local file to make sure that the GET requests downloads from HA
        File blobFile = server.getStorageLocation(jobId, key);
        assertTrue(blobFile.delete());

        // change HA store file contents to make sure that GET requests fail
        byte[] data2 = Arrays.copyOf(data, data.length);
        data2[0] ^= 1;
        File tmpFile = Files.createTempFile("blob", ".jar").toFile();
        try {
            FileUtils.writeByteArrayToFile(tmpFile, data2);
            blobStore.put(tmpFile, jobId, key);
        } finally {
            //noinspection ResultOfMethodCallIgnored
            tmpFile.delete();
        }

        // issue a GET request that fails
        expectedException.expect(IOException.class);
        expectedException.expectMessage("data corruption");

        get(server, jobId, key);
    }
}