Example usage for org.apache.commons.io FileUtils writeByteArrayToFile

List of usage examples for org.apache.commons.io FileUtils writeByteArrayToFile

Introduction

In this page you can find the example usage for org.apache.commons.io FileUtils writeByteArrayToFile.

Prototype

public static void writeByteArrayToFile(File file, byte[] data) throws IOException 

Source Link

Document

Writes a byte array to a file creating the file if it does not exist.

Usage

From source file:org.apache.flink.runtime.blob.BlobServerGetTest.java

/**
 * [FLINK-6020] Tests that concurrent get operations don't concurrently access the BlobStore to
 * download a blob./*from  www  .ja  v  a 2 s  . c  o m*/
 *
 * @param jobId
 *       job ID to use (or <tt>null</tt> if job-unrelated)
 * @param blobType
 *       whether the BLOB should become permanent or transient
 */
private void testConcurrentGetOperations(@Nullable final JobID jobId, final BlobKey.BlobType blobType)
        throws IOException, InterruptedException, ExecutionException {
    final Configuration config = new Configuration();
    config.setString(BlobServerOptions.STORAGE_DIRECTORY, temporaryFolder.newFolder().getAbsolutePath());

    final BlobStore blobStore = mock(BlobStore.class);

    final int numberConcurrentGetOperations = 3;
    final List<CompletableFuture<File>> getOperations = new ArrayList<>(numberConcurrentGetOperations);

    final byte[] data = { 1, 2, 3, 4, 99, 42 };

    doAnswer(new Answer() {
        @Override
        public Object answer(InvocationOnMock invocation) throws Throwable {
            File targetFile = (File) invocation.getArguments()[2];

            FileUtils.writeByteArrayToFile(targetFile, data);

            return null;
        }
    }).when(blobStore).get(any(JobID.class), any(BlobKey.class), any(File.class));

    final ExecutorService executor = Executors.newFixedThreadPool(numberConcurrentGetOperations);

    try (final BlobServer server = new BlobServer(config, blobStore)) {

        server.start();

        // upload data first
        final BlobKey blobKey = put(server, jobId, data, blobType);

        // now try accessing it concurrently (only HA mode will be able to retrieve it from HA store!)
        if (blobType == PERMANENT_BLOB) {
            // remove local copy so that a transfer from HA store takes place
            assertTrue(server.getStorageLocation(jobId, blobKey).delete());
        }
        for (int i = 0; i < numberConcurrentGetOperations; i++) {
            CompletableFuture<File> getOperation = CompletableFuture.supplyAsync(() -> {
                try {
                    File file = get(server, jobId, blobKey);
                    // check that we have read the right data
                    validateGetAndClose(new FileInputStream(file), data);
                    return file;
                } catch (IOException e) {
                    throw new CompletionException(
                            new FlinkException("Could not read blob for key " + blobKey + '.', e));
                }
            }, executor);

            getOperations.add(getOperation);
        }

        CompletableFuture<Collection<File>> filesFuture = FutureUtils.combineAll(getOperations);
        filesFuture.get();
    } finally {
        executor.shutdownNow();
    }
}

From source file:org.apache.flink.runtime.blob.BlobServerPutTest.java

/**
 * Helper to choose the right {@link BlobServer#putTransient} method.
 *
 * @param blobType/*w ww  .  j a va 2s. c o  m*/
 *       whether the BLOB should become permanent or transient
 *
 * @return blob key for the uploaded data
 */
static BlobKey put(BlobService service, @Nullable JobID jobId, byte[] data, BlobKey.BlobType blobType)
        throws IOException {
    if (blobType == PERMANENT_BLOB) {
        if (service instanceof BlobServer) {
            return ((BlobServer) service).putPermanent(jobId, data);
        } else {
            // implement via JAR file upload instead:
            File tmpFile = Files.createTempFile("blob", ".jar").toFile();
            try {
                FileUtils.writeByteArrayToFile(tmpFile, data);
                InetSocketAddress serverAddress = new InetSocketAddress("localhost", service.getPort());
                // uploading HA BLOBs works on BlobServer only (and, for now, via the BlobClient)
                Configuration clientConfig = new Configuration();
                List<Path> jars = Collections.singletonList(new Path(tmpFile.getAbsolutePath()));
                List<PermanentBlobKey> keys = BlobClient.uploadFiles(serverAddress, clientConfig, jobId, jars);
                assertEquals(1, keys.size());
                return keys.get(0);
            } finally {
                //noinspection ResultOfMethodCallIgnored
                tmpFile.delete();
            }
        }
    } else if (jobId == null) {
        return service.getTransientBlobService().putTransient(data);
    } else {
        return service.getTransientBlobService().putTransient(jobId, data);
    }
}

From source file:org.apache.flume.tools.PasswordObfuscator.java

/**
 * Encodes the originalText and writes it to a file
 * @param originalText the text to be encoded
 * @param outputFile the file where the encoded bytes will be written
 * @throws java.io.IOException if could not write to file
 *//*from   w w w. ja v a 2  s . c o  m*/
public static void encodeToFile(String originalText, String outputFile) throws IOException {
    File output = new File(outputFile);
    byte[] cipherBytes = encode(originalText);
    FileUtils.writeByteArrayToFile(output, cipherBytes);
}

From source file:org.apache.fop.render.pdf.BasePDFTest.java

/**
 * Convert a test FO file to PDF//from   www .  j av  a 2  s.c o m
 * @param foFile the FO file
 * @param ua the preconfigured user agent
 * @param dumpPdfFile if true, dumps the generated PDF file to a file name (foFile).pdf
 * @return the generated PDF data
 * @throws Exception if the conversion fails
 */
protected byte[] convertFO(File foFile, FOUserAgent ua, boolean dumpPdfFile) throws Exception {
    ByteArrayOutputStream baout = new ByteArrayOutputStream();
    Fop fop = fopFactory.newFop(MimeConstants.MIME_PDF, ua, baout);
    Transformer transformer = tFactory.newTransformer();
    Source src = new StreamSource(foFile);
    SAXResult res = new SAXResult(fop.getDefaultHandler());
    try {
        transformer.transform(src, res);
        final byte[] result = baout.toByteArray();
        if (dumpPdfFile) {
            final File outFile = new File(foFile.getParentFile(), foFile.getName() + ".pdf");
            FileUtils.writeByteArrayToFile(outFile, result);
        }
        return result;
    } catch (TransformerException e) {
        throw extractOriginalException(e);
    }
}

From source file:org.apache.geode.distributed.internal.ClusterConfigurationService.java

/**
 * Add jar information into the shared configuration and save the jars in the file system used
 * when deploying jars// w  ww .java2s.  c  o  m
 * 
 * @return true on success
 */
public boolean addJarsToThisLocator(String[] jarNames, byte[][] jarBytes, String[] groups) {
    boolean success = true;
    lockSharedConfiguration();
    try {
        if (groups == null) {
            groups = new String[] { ClusterConfigurationService.CLUSTER_CONFIG };
        }
        Region<String, Configuration> configRegion = getConfigurationRegion();
        for (String group : groups) {
            Configuration configuration = configRegion.get(group);

            if (configuration == null) {
                configuration = new Configuration(group);
                createConfigDirIfNecessary(group);
            }

            String groupDir = FilenameUtils.concat(configDirPath, group);
            for (int i = 0; i < jarNames.length; i++) {
                String filePath = FilenameUtils.concat(groupDir, jarNames[i]);
                File jarFile = new File(filePath);
                try {
                    FileUtils.writeByteArrayToFile(jarFile, jarBytes[i]);
                } catch (IOException e) {
                    logger.info(e);
                }
            }

            // update the record after writing the jars to the file system, since the listener
            // will need the jars on file to upload to other locators. Need to update the jars
            // using a new copy of the Configuration so that the change listener will pick up the jar
            // name changes.
            Configuration configurationCopy = new Configuration(configuration);
            configurationCopy.addJarNames(jarNames);
            configRegion.put(group, configurationCopy);
        }
    } catch (Exception e) {
        success = false;
        logger.info(e.getMessage(), e);
    } finally {
        unlockSharedConfiguration();
    }
    return success;
}

From source file:org.apache.geode.distributed.internal.ClusterConfigurationService.java

public void downloadJarFromOtherLocators(String groupName, String jarName) throws Exception {
    logger.info("Getting Jar files from other locators");
    DM dm = cache.getDistributionManager();
    DistributedMember me = cache.getMyId();
    Set<DistributedMember> locators = new HashSet<>(dm.getAllHostedLocatorsWithSharedConfiguration().keySet());
    locators.remove(me);/*from w w w  . j a  v a  2  s .  c  o m*/

    createConfigDirIfNecessary(groupName);

    byte[] jarBytes = locators.stream()
            .map((DistributedMember locator) -> downloadJarFromLocator(locator, groupName, jarName))
            .filter(Objects::nonNull).findFirst().orElseThrow(() -> new IllegalStateException(
                    "No locators have a deployed jar named " + jarName + " in " + groupName));

    File jarToWrite = getPathToJarOnThisLocator(groupName, jarName).toFile();
    FileUtils.writeByteArrayToFile(jarToWrite, jarBytes);
}

From source file:org.apache.geode.distributed.internal.SharedConfiguration.java

/**
 * Gets the Jar from existing locators in the system
 *///from   w  w w  .ja va2 s. c om
private void getAllJarsFromOtherLocators() throws Exception {
    logger.info("Getting Jar files from other locators");
    DM dm = cache.getDistributionManager();
    DistributedMember me = cache.getMyId();
    Set<DistributedMember> locators = new HashSet<DistributedMember>(
            dm.getAllHostedLocatorsWithSharedConfiguration().keySet());
    locators.remove(me);
    String[] jarNames = null;
    byte[][] jarBytes = null;

    if (locators.isEmpty()) {
        logger.info("No other locators present");
        return;
    }
    ResultCollector<?, List<Object>> rc = (ResultCollector<?, List<Object>>) CliUtil
            .executeFunction(getAllJarsFunction, null, locators);

    List<Object> results = rc.getResult();
    for (Object result : results) {
        if (result != null) {
            if (!(result instanceof Exception)) {
                Object[] jars = (Object[]) result;
                jarNames = (String[]) jars[0];
                jarBytes = (byte[][]) jars[1];
                break;
            }
        }
    }

    if (jarNames != null && jarBytes != null) {
        Map<String, Integer> jarIndex = new HashMap<String, Integer>();

        for (int i = 0; i < jarNames.length; i++) {
            String jarName = jarNames[i];
            jarIndex.put(jarName, i);
        }

        Map<String, Configuration> entireConfiguration = getEntireConfiguration();
        Set<String> groups = entireConfiguration.keySet();

        for (String group : groups) {
            Configuration config = entireConfiguration.get(group);
            Set<String> groupJarNames = config.getJarNames();
            String groupDirPath = FilenameUtils.concat(configDirPath, group);

            for (String groupJarName : groupJarNames) {
                Integer index = jarIndex.get(groupJarName);

                if (index != null) {
                    String jarFilePath = FilenameUtils.concat(groupDirPath, groupJarName);
                    byte[] jarData = jarBytes[index.intValue()];

                    try {
                        FileUtils.writeByteArrayToFile(new File(jarFilePath), jarData);
                    } catch (IOException e) {
                        logger.info(e.getMessage(), e);
                    }
                } else {
                    //This should NEVER happen
                    logger.error("JarFile {} not delivered.", groupJarName);
                }
            }
        }
    } else {
        logger.info("No deployed jars found on other locators.");
    }
}

From source file:org.apache.geode.distributed.internal.SharedConfiguration.java

/**
 * Writes the//w w  w. j  a v a 2  s  .  c o m
 * @param dirPath target directory , where the jar files are to be written
 * @param jarNames Array containing the name of the jar files.
 * @param jarBytes Array of byte arrays for the jar files.
 */
private void writeJarFiles(final String dirPath, final String[] jarNames, final byte[][] jarBytes) {
    for (int i = 0; i < jarNames.length; i++) {
        String filePath = FilenameUtils.concat(dirPath, jarNames[i]);
        File jarFile = new File(filePath);
        try {
            FileUtils.writeByteArrayToFile(jarFile, jarBytes[i]);
        } catch (IOException e) {
            logger.info(e);
        }
    }
}

From source file:org.apache.geode.internal.DeployedJarJUnitTest.java

private byte[] givenInvalidJarBytes() throws IOException {
    byte[] invalidJarBytes = "INVALID JAR CONTENT".getBytes();
    FileUtils.writeByteArrayToFile(jarFile, invalidJarBytes);

    return invalidJarBytes;
}

From source file:org.apache.geode.management.internal.cli.commands.ClusterConfigurationDUnitTest.java

private void createAsyncEventQueue(final String queueName) throws Exception {
    String queueCommandsJarName = "testEndToEndSC-QueueCommands.jar";
    final File jarFile = temporaryFolder.newFile(queueCommandsJarName);
    ClassBuilder classBuilder = new ClassBuilder();
    byte[] jarBytes = classBuilder.createJarFromClassContent("com/qcdunit/QueueCommandsDUnitTestListener",
            "package com.qcdunit;" + "import java.util.List; import java.util.Properties;"
                    + "import org.apache.geode.internal.cache.xmlcache.Declarable2; import org.apache.geode.cache.asyncqueue.AsyncEvent;"
                    + "import org.apache.geode.cache.asyncqueue.AsyncEventListener;"
                    + "public class QueueCommandsDUnitTestListener implements Declarable2, AsyncEventListener {"
                    + "Properties props;"
                    + "public boolean processEvents(List<AsyncEvent> events) { return true; }"
                    + "public void close() {}"
                    + "public void init(final Properties props) {this.props = props;}"
                    + "public Properties getConfig() {return this.props;}}");

    FileUtils.writeByteArrayToFile(jarFile, jarBytes);
    CommandStringBuilder csb = new CommandStringBuilder(CliStrings.DEPLOY);
    csb.addOption(CliStrings.JAR, jarFile.getAbsolutePath());
    gfsh.executeAndVerifyCommand(csb.getCommandString());

    csb = new CommandStringBuilder(CliStrings.CREATE_ASYNC_EVENT_QUEUE);
    csb.addOption(CliStrings.CREATE_ASYNC_EVENT_QUEUE__ID, queueName);
    csb.addOption(CliStrings.CREATE_ASYNC_EVENT_QUEUE__LISTENER, "com.qcdunit.QueueCommandsDUnitTestListener");
    csb.addOption(CliStrings.CREATE_ASYNC_EVENT_QUEUE__BATCH_SIZE, "100");
    csb.addOption(CliStrings.CREATE_ASYNC_EVENT_QUEUE__BATCHTIMEINTERVAL, "200");
    csb.addOption(CliStrings.CREATE_ASYNC_EVENT_QUEUE__DISPATCHERTHREADS, "4");
    csb.addOption(CliStrings.CREATE_ASYNC_EVENT_QUEUE__ENABLEBATCHCONFLATION, "true");
    csb.addOption(CliStrings.CREATE_ASYNC_EVENT_QUEUE__DISKSYNCHRONOUS, "true");
    csb.addOption(CliStrings.CREATE_ASYNC_EVENT_QUEUE__FORWARD_EXPIRATION_DESTROY, "false");
    csb.addOption(CliStrings.CREATE_ASYNC_EVENT_QUEUE__MAXIMUM_QUEUE_MEMORY, "1000");
    csb.addOption(CliStrings.CREATE_ASYNC_EVENT_QUEUE__ORDERPOLICY, GatewaySender.OrderPolicy.KEY.toString());
    csb.addOption(CliStrings.CREATE_ASYNC_EVENT_QUEUE__PERSISTENT, "true");
    csb.addOption(CliStrings.CREATE_ASYNC_EVENT_QUEUE__PARALLEL, "true");

    gfsh.executeAndVerifyCommand(csb.getCommandString());
}