Example usage for java.util.concurrent.atomic AtomicReference set

List of usage examples for java.util.concurrent.atomic AtomicReference set

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicReference set.

Prototype

public final void set(V newValue) 

Source Link

Document

Sets the value to newValue , with memory effects as specified by VarHandle#setVolatile .

Usage

From source file:org.cloudifysource.shell.installer.LocalhostGridAgentBootstrapper.java

private GridServiceAgent waitForAgent(final Admin admin, final boolean existingAgent, final long timeout,
        final TimeUnit timeunit) throws InterruptedException, TimeoutException, CLIException {

    final AtomicReference<GridServiceAgent> agentOnLocalhost = new AtomicReference<GridServiceAgent>();

    createConditionLatch(timeout, timeunit).waitFor(new ConditionLatch.Predicate() {

        /**//from  www . j  a v  a 2  s.  co  m
         * {@inheritDoc}
         */
        @Override
        public boolean isDone() throws CLIException, InterruptedException {

            boolean isDone = false;
            for (final GridServiceAgent agent : admin.getGridServiceAgents()) {
                if (checkAgent(agent)) {
                    agentOnLocalhost.set(agent);
                    isDone = true;
                    break;
                }
            }
            if (!isDone) {
                if (existingAgent) {
                    logger.fine("Looking for an existing agent running on local machine");
                } else {
                    logger.fine("Waiting for the agent on the local machine to start.");
                }
                publishEvent(null);
            }
            return isDone;
        }

        private boolean checkAgent(final GridServiceAgent agent) {
            final String agentNicAddress = agent.getMachine().getHostAddress();
            final String agentLookupGroups = getLookupGroups(agent);
            final boolean checkLookupGroups = lookupGroups != null && lookupGroups.equals(agentLookupGroups);
            final boolean checkNicAddress = nicAddress != null && agentNicAddress.equals(nicAddress)
                    || IsLocalCloudUtils.isThisMyIpAddress(agentNicAddress);
            if (verbose) {
                String message = "Discovered agent nic-address=" + agentNicAddress + " lookup-groups="
                        + agentLookupGroups + ". ";
                if (!checkLookupGroups) {
                    message += "Ignoring agent. Filter lookupGroups='" + lookupGroups
                            + "', agent LookupGroups='" + agentLookupGroups + "'";
                }
                if (!checkNicAddress) {
                    message += "Ignoring agent. Filter nicAddress='" + nicAddress
                            + "' or local address, agent nicAddress='" + agentNicAddress + "'";
                }
                publishEvent(message);
            }
            return checkLookupGroups && checkNicAddress;
        }

        private String getLookupGroups(final VirtualMachineAware component) {

            final String prefix = "-Dcom.gs.jini_lus.groups=";
            return getCommandLineArgumentRemovePrefix(component, prefix);
        }

        private String getCommandLineArgumentRemovePrefix(final VirtualMachineAware component,
                final String prefix) {
            final String[] commandLineArguments = component.getVirtualMachine().getDetails()
                    .getInputArguments();
            String requiredArg = null;
            for (final String arg : commandLineArguments) {
                if (arg.startsWith(prefix)) {
                    requiredArg = arg;
                }
            }

            if (requiredArg != null) {
                return requiredArg.substring(prefix.length());
            }
            return null;
        }
    });

    return agentOnLocalhost.get();
}

From source file:com.spotify.docker.client.DefaultDockerClientTest.java

@Test
public void testBuildImageId() throws Exception {
    final String dockerDirectory = Resources.getResource("dockerDirectory").getPath();
    final AtomicReference<String> imageIdFromMessage = new AtomicReference<>();

    final String returnedImageId = sut.build(Paths.get(dockerDirectory), "test", new ProgressHandler() {
        @Override/*  w  w  w . j  a  v a  2  s  .  c  om*/
        public void progress(ProgressMessage message) throws DockerException {
            final String imageId = message.buildImageId();
            if (imageId != null) {
                imageIdFromMessage.set(imageId);
            }
        }
    });

    assertThat(returnedImageId, is(imageIdFromMessage.get()));
}

From source file:edu.rit.flick.genetics.FastFileDeflator.java

@Override
public File deflate(final Configuration configuration, final File fileIn, final File fileOut) {
    assert fileIn.exists();

    try {/*from w ww  .j av  a 2  s.c  om*/
        // Deflate to Directory
        final String outputDirectoryPath = fileOut.getPath()
                .replaceAll("." + Files.getFileExtension(fileOut.getPath()), FLICK_FAST_FILE_TMP_DIR_SUFFIX);

        final File tmpOutputDirectory = new File(outputDirectoryPath);
        if (tmpOutputDirectory.exists())
            FileUtils.deleteDirectory(tmpOutputDirectory);
        tmpOutputDirectory.mkdirs();

        final AtomicReference<Thread> cleanHookAtomic = new AtomicReference<Thread>();

        // Deflate Fast file to a temporary directory
        final Thread deflateToDirectoryThread = new Thread(() -> {
            try {
                // Deflate Fast file to a temporary directory
                deflateToDirectory(fileIn, tmpOutputDirectory);

                // Remove unused buffer space
                removeUnusedBufferSpace(outputDirectoryPath);

                // Compress Directory to a zip file
                deflateToFile(tmpOutputDirectory, fileOut);

                Runtime.getRuntime().removeShutdownHook(cleanHookAtomic.get());
            } catch (final Exception e) {
                if (!interrupted)
                    System.err.println(e.getMessage());
            }
        }, "Default_Deflation_Thread");

        // Make cleaning hook
        final Thread cleanHook = new Thread(() -> {
            interrupted = true;
            configuration.setFlag(VERBOSE_FLAG, false);
            configuration.setFlag(DELETE_FLAG, false);
            try {
                if (deflateToDirectoryThread.isAlive())
                    deflateToDirectoryThread.interrupt();

                // Remove unused buffer space
                removeUnusedBufferSpace(outputDirectoryPath);

                // Delete files that were not able to be processed
                FileUtils.deleteQuietly(tmpOutputDirectory);
                System.out.println();
            } catch (final IOException | InterruptedException e) {
                e.printStackTrace();
            }
        }, "Deflation_Cleaning_Thread");

        cleanHookAtomic.set(cleanHook);

        Runtime.getRuntime().addShutdownHook(cleanHook);

        deflateToDirectoryThread.start();
        deflateToDirectoryThread.join();

    } catch (final IOException | InterruptedException e) {
        e.printStackTrace();
    }

    return fileOut;
}

From source file:org.apache.solr.handler.dataimport.processor.XPathEntityProcessor.java

private Iterator<Map<String, Object>> getRowIterator(final Reader data, final String s) {
    //nothing atomic about it. I just needed a StongReference
    final AtomicReference<Exception> exp = new AtomicReference<Exception>();
    final BlockingQueue<Map<String, Object>> blockingQueue = new ArrayBlockingQueue<Map<String, Object>>(
            blockingQueueSize);//from  w w w.  j a v  a 2s  .co m
    final AtomicBoolean isEnd = new AtomicBoolean(false);
    final AtomicBoolean throwExp = new AtomicBoolean(true);
    publisherThread = new Thread() {
        @Override
        public void run() {
            try {
                xpathReader.streamRecords(data, new XPathRecordReader.Handler() {
                    @Override
                    @SuppressWarnings("unchecked")
                    public void handle(Map<String, Object> record, String xpath) {
                        if (isEnd.get()) {
                            throwExp.set(false);
                            //To end the streaming . otherwise the parsing will go on forever
                            //though consumer has gone away
                            throw new RuntimeException("BREAK");
                        }
                        Map<String, Object> row;
                        try {
                            row = readRow(record, xpath);
                        } catch (final Exception e) {
                            isEnd.set(true);
                            return;
                        }
                        offer(row);
                    }
                });
            } catch (final Exception e) {
                if (throwExp.get())
                    exp.set(e);
            } finally {
                closeIt(data);
                if (!isEnd.get()) {
                    offer(END_MARKER);
                }
            }
        }

        private void offer(Map<String, Object> row) {
            try {
                while (!blockingQueue.offer(row, blockingQueueTimeOut, blockingQueueTimeOutUnits)) {
                    if (isEnd.get())
                        return;
                    LOG.debug("Timeout elapsed writing records.  Perhaps buffer size should be increased.");
                }
            } catch (final InterruptedException e) {
                return;
            } finally {
                synchronized (this) {
                    notifyAll();
                }
            }
        }
    };

    publisherThread.start();

    return new Iterator<Map<String, Object>>() {
        private Map<String, Object> lastRow;
        int count = 0;

        @Override
        public boolean hasNext() {
            return !isEnd.get();
        }

        @Override
        public Map<String, Object> next() {
            Map<String, Object> row;

            do {
                try {
                    row = blockingQueue.poll(blockingQueueTimeOut, blockingQueueTimeOutUnits);
                    if (row == null) {
                        LOG.debug("Timeout elapsed reading records.");
                    }
                } catch (final InterruptedException e) {
                    LOG.debug("Caught InterruptedException while waiting for row.  Aborting.");
                    isEnd.set(true);
                    return null;
                }
            } while (row == null);

            if (row == END_MARKER) {
                isEnd.set(true);
                if (exp.get() != null) {
                    String msg = "Parsing failed for xml, url:" + s + " rows processed in this xml:" + count;
                    if (lastRow != null)
                        msg += " last row in this xml:" + lastRow;
                    if (ABORT.equals(onError)) {
                        wrapAndThrow(SEVERE, exp.get(), msg);
                    } else if (SKIP.equals(onError)) {
                        wrapAndThrow(DataImportHandlerException.SKIP, exp.get());
                    } else {
                        LOG.warn(msg, exp.get());
                    }
                }
                return null;
            }
            count++;
            return lastRow = row;
        }

        @Override
        public void remove() {
            /*no op*/
        }
    };

}

From source file:com.geodan.ngr.serviceintegration.CSWTransformer.java

/**
 * Generates a JSON response and sends that back to the browser. The JSON can contain records or errors.
 *
 * @param rw      used to send a response back to the browser
 * @param wmsList   containing all the results from the request
 * @param callback   used as a header for the JSON
 * @param resulttype if LONG_RESULT then insert extra field 'description' into JSON response
 *//* w  w  w .j  a v a 2  s.  co m*/
private void getResponse(PrintWriter rw, List<WMSResource> wmsList, String callback, ResultType resulttype) {

    String wmsResponse = "";
    String errorResponse = "";
    AtomicReference<String> response = new AtomicReference<String>("");

    // Used to indicate that a valid response is found
    boolean responseFound = false;

    wmsResponse += " \"records\": [";
    for (int i = 0; i < wmsList.size(); i++) {
        WMSResource wms = wmsList.get(i);

        // If the name and url are null then there is an error.
        // Otherwise, a valid response was found and a valid JSON response is generated

        if (wms.getName() != null && wms.getUrl() != null && !wms.getName().equals("")
                && !wms.getUrl().equals("")) {

            responseFound = true;

            wmsResponse += "{\"wmsurl\": \"" + wms.getUrl() + "\", \"name\": \"" + wms.getName()
                    + "\",  \"title\": \"" + wms.getTitle();
            if (resulttype == ResultType.LONG_RESULT) {
                // insert abstract into resulting response
                wmsResponse += "\", \"description\": \"" + jsEscape(wms.getDescription());
            }
            wmsResponse += "\"}";
            if ((i + 1) < wmsList.size() && wmsList.get(i + 1).getError() == null) {
                wmsResponse += ", ";
            } else {
                wmsResponse += "]}}";
            }
        } else {
            errorResponse = wms.getError();
        }
    }

    // If a valid response is found then place the records in the
    // response else place an error in the response
    if (responseFound) {
        response.set("{ \"response\": {" + wmsResponse);
    } else {
        response.set("{ \"response\": {" + errorResponse);
    }

    log.debug(response.get());

    // There doesn't have to be an callback. If not then there shouldn't be any ()
    if (callback != null && !callback.equals("")) {
        rw.println(callback + "(" + response + ")");

    } else {
        rw.println(response.get());
    }
    rw.flush();
}

From source file:cl.gisred.android.RepartoActivity.java

public void enviarDatos() {
    final AtomicReference<String> resp = new AtomicReference<>("");

    for (RepartoClass rep : arrayDatos) {

        final RepartoClass repActual = rep;
        Map<String, Object> objectMap = new HashMap<>();

        objectMap.put("nis", rep.getNis());
        objectMap.put("valor_captura", rep.getCodigo());
        objectMap.put("empresa", empresa);
        objectMap.put("modulo", modulo);

        Point oUbicacion = new Point(rep.getX(), rep.getY());

        Graphic newFeatureGraphic = new Graphic(oUbicacion, null, objectMap);
        Graphic[] adds = { newFeatureGraphic };
        LyReparto.applyEdits(adds, null, null, new CallbackListener<FeatureEditResult[][]>() {
            @Override// w w  w  .j  a va2 s .  c  om
            public void onCallback(FeatureEditResult[][] featureEditResults) {
                if (featureEditResults[0] != null) {
                    if (featureEditResults[0][0] != null && featureEditResults[0][0].isSuccess()) {

                        runOnUiThread(new Runnable() {

                            @Override
                            public void run() {
                                deleteData(repActual.getId());
                            }
                        });
                    }
                }
            }

            @Override
            public void onError(Throwable throwable) {
                resp.set("Error al ingresar: " + throwable.getLocalizedMessage());

                runOnUiThread(new Runnable() {

                    @Override
                    public void run() {
                        Toast.makeText(RepartoActivity.this, resp.get(), Toast.LENGTH_SHORT).show();
                    }
                });
            }
        });
    }
}

From source file:com.sysunite.weaver.nifi.GetXMLNodes.java

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {

    final FlowFile flowFileIn = session.get();

    if (flowFileIn == null) {
        return;/*from   www.ja v  a2s.c  om*/
    }

    //System.out.println("i am here!!!");

    final AtomicReference<InputStream> savedNodes = new AtomicReference<>();

    //inlezen van flowFileIn
    session.read(flowFileIn, new InputStreamCallback() {

        @Override
        public void process(InputStream isIn) throws IOException {

            try {

                String contents = IOUtils.toString(isIn);

                XML xml = new XMLDocument(contents); //"<error>\n<fout a=\"poo\">\n<tiger>Bla</tiger>\n</fout>\n</error>");

                String aPath = context.getProperty(PROP_XPATH).getValue();

                //System.out.println(aPath);

                StringBuffer buffer = new StringBuffer();
                buffer.append("<root>");

                int counter = 0;

                for (XML ibr : xml.nodes(aPath)) {

                    //              if(counter == 3){
                    //               break;
                    //              }

                    buffer.append(ibr.toString());
                    //System.out.println(ibr.toString());

                    //
                    //              FlowFile flowfileOut = session.create();
                    //
                    //        InputStream data = new ByteArrayInputStream(ibr.toString().getBytes());
                    //        flowfileOut = session.importFrom(data, flowfileOut);
                    //
                    //
                    //       session.transfer(flowfileOut, MY_RELATIONSHIP);
                    //      session.commit();

                    //counter++;

                }

                buffer.append("</root>");

                InputStream is = new ByteArrayInputStream(buffer.toString().getBytes()); // geen extra parameters, want dit zorgt voor 'too much output too process'

                savedNodes.set(is);

            } catch (Exception e) {

                System.out.println("??????");
                //System.out.println(e.getMessage());
            }

        }

    });

    session.remove(flowFileIn);

    try {

        String contents = IOUtils.toString(savedNodes.get());

        XML xml = new XMLDocument(contents);

        String aPath = context.getProperty(PROP_XPATH).getValue();
        String parts[] = aPath.split("/");

        int size = parts.length;
        String lastNode = parts[size - 1]; //FunctionalPhysicalObject"

        for (XML node : xml.nodes("root/" + lastNode)) {

            //System.out.println(node.toString());

            FlowFile flowfileOut = session.create();

            InputStream data = new ByteArrayInputStream(node.toString().getBytes());
            flowfileOut = session.importFrom(data, flowfileOut);

            session.transfer(flowfileOut, MY_RELATIONSHIP);
            session.commit();

        }

    } catch (IOException e) {
        System.out.println("w00t");// + e.getMessage());
    } catch (IllegalArgumentException e) {
        System.out.println(e.getMessage());
    } catch (FlowFileHandlingException e) {
        System.out.println(e.getMessage());
    }

    //---------------

    //            for (XML ibr : xml.nodes("//fout")) {
    //
    //              System.out.println(ibr.toString());
    //            }

    //            List<XML> nodes = xml.nodes(aPath);
    //
    //            if(nodes.size() == 0){
    //              System.out.println("geen nodes gevonden!!");
    //            }else{
    //
    //
    //              System.out.println("more nodes!!!");
    //
    ////              ListSaver ls = new ListSaver();
    ////
    //              StringBuffer buffer = new StringBuffer();
    //
    //              int counter = 0;
    //              for(;counter<nodes.size();counter++){
    //                XML node = nodes.get(counter);
    ////                ls.add(""+counter, node);
    //                buffer.append(node.node().toString());
    //             }
    //
    //              InputStream isOut = new ByteArrayInputStream(buffer.toString().getBytes());
    //
    //              xpath_nodes.set(isOut);
    //            }

    //      savedNodes.get().
    //      //System.out.println(ibr.toString());
    //      //remove flowFileIn from session otherwise fail to transfer new flowfile
    //      session.remove(flowFileIn);
    //
    //      FlowFile flowfileOut = session.create();
    //////
    //      InputStream data = new ByteArrayInputStream(ibr.toString().getBytes());
    //      flowfileOut = session.importFrom(data, flowfileOut);
    //      session.transfer(flowfileOut, MY_RELATIONSHIP);
    //session.transfer(flowFileIn, MY_RELATIONSHIP);

    //      System.out.println("going to make individual nodes");
    //
    //      //create flow files from every node in xpath_nodes

    //System.out.println(xpath_nodes.toString());
    //
    //      for(XML aNodeXML : nodes.xmlList){
    ////
    //       System.out.println(aNodeXML.toString());
    ////
    ////        //XMLDocument doc = new XMLDocument(aNodeXML.node());
    ////
    ////        //System.out.println(doc.node());
    ////
    ////        //FlowFile flowfileOut = session.create();
    ////
    ////        //InputStream data = new ByteArrayInputStream(doc.node().toString().getBytes());
    ////        //flowfileOut = session.importFrom(data, flowfileOut);
    ////
    ////        //session.transfer(flowfileOut, MY_RELATIONSHIP);
    ////
    //      }

    //session.transfer(flowFileIn, MY_RELATIONSHIP);

}

From source file:com.amazon.carbonado.repo.tupl.TuplRepositoryBuilder.java

@Override
public Repository build(AtomicReference<Repository> rootReference)
        throws ConfigurationException, RepositoryException {
    if (mIndexSupport) {
        // Wrap TuplRepository with IndexedRepository.

        // Temporarily set to false to avoid infinite recursion.
        mIndexSupport = false;/*from  ww  w . j a v  a 2  s.c o m*/
        try {
            IndexedRepositoryBuilder ixBuilder = new IndexedRepositoryBuilder();
            ixBuilder.setWrappedRepository(this);
            ixBuilder.setMaster(isMaster());
            ixBuilder.setIndexRepairEnabled(mIndexRepairEnabled);
            ixBuilder.setIndexRepairThrottle(mIndexThrottle);
            ixBuilder.setStrictTriggers(mLockUpgradeRule == null || mLockUpgradeRule == LockUpgradeRule.STRICT);
            return ixBuilder.build(rootReference);
        } finally {
            mIndexSupport = true;
        }
    }

    assertReady();

    Log log;
    if (mBaseFile == null) {
        log = null;
    } else {
        log = LogFactory.getLog(TuplRepository.class);
    }

    Database db = mDatabase;
    if (db == null) {
        LogEventListener listener = null;

        if (log != null || mPanicHandler != null) {
            listener = new LogEventListener(log, mName, mPanicHandler);
            mConfig.eventListener(listener);
        }

        try {
            db = Database.open(mConfig);
        } catch (IOException e) {
            throw new TuplExceptionTransformer(null).toRepositoryException(e);
        }

        if (listener != null) {
            listener.setDatabase(db);
        }
    }

    Repository repo = new TuplRepository(mName, mMaster, getTriggerFactories(), rootReference, db, log);

    rootReference.set(repo);
    return repo;
}

From source file:com.spotify.docker.client.DefaultDockerClientTest.java

@Test
public void testBuildImageIdPathToDockerFile() throws Exception {
    final String dockerDirectory = Resources.getResource("dockerDirectory").getPath();
    final AtomicReference<String> imageIdFromMessage = new AtomicReference<>();

    final String returnedImageId = sut.build(Paths.get(dockerDirectory), "test", "innerDir/innerDockerfile",
            new ProgressHandler() {
                @Override//from w  w  w .  ja  v  a2 s  .c o  m
                public void progress(ProgressMessage message) throws DockerException {
                    final String imageId = message.buildImageId();
                    if (imageId != null) {
                        imageIdFromMessage.set(imageId);
                    }
                }
            });

    assertThat(returnedImageId, is(imageIdFromMessage.get()));
}

From source file:io.pravega.segmentstore.server.writer.SegmentAggregatorTests.java

/**
 * Tests the ability of the SegmentAggregator to reconcile AppendOperations (Cached/NonCached).
 *//*from   w  w  w . j a  v a 2  s .  com*/
@Test
public void testReconcileAppends() throws Exception {
    final WriterConfig config = DEFAULT_CONFIG;
    final int appendCount = 1000;
    final int failEvery = 3;

    @Cleanup
    TestContext context = new TestContext(config);
    context.storage.create(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
    context.segmentAggregator.initialize(TIMEOUT, executorService()).join();

    // The writes always succeed, but every few times we return some random error, indicating that they didn't.
    AtomicInteger writeCount = new AtomicInteger();
    AtomicReference<Exception> setException = new AtomicReference<>();
    context.storage.setWriteInterceptor((segmentName, offset, data, length, storage) -> {
        if (writeCount.incrementAndGet() % failEvery == 0) {
            // Time to wreak some havoc.
            return storage.write(writeHandle(segmentName), offset, data, length, TIMEOUT).thenAccept(v -> {
                IntentionalException ex = new IntentionalException(
                        String.format("S=%s,O=%d,L=%d", segmentName, offset, length));
                setException.set(ex);
                throw ex;
            });
        } else {
            setException.set(null);
            return null;
        }
    });

    @Cleanup
    ByteArrayOutputStream writtenData = new ByteArrayOutputStream();

    for (int i = 0; i < appendCount; i++) {
        // Add another operation and record its length.
        StorageOperation appendOp = generateAppendAndUpdateMetadata(i, SEGMENT_ID, context);
        context.segmentAggregator.add(appendOp);
        getAppendData(appendOp, writtenData, context);
    }

    context.increaseTime(config.getFlushThresholdTime().toMillis() + 1); // Force a flush by incrementing the time by a lot.
    while (context.segmentAggregator.mustFlush()) {
        // Call flush() and inspect the result.
        FlushResult flushResult = null;

        try {
            flushResult = context.segmentAggregator.flush(TIMEOUT, executorService()).get(TIMEOUT.toMillis(),
                    TimeUnit.MILLISECONDS);
            Assert.assertNull("An exception was expected, but none was thrown.", setException.get());
            Assert.assertNotNull("No FlushResult provided.", flushResult);
        } catch (Exception ex) {
            if (setException.get() != null) {
                Assert.assertEquals("Unexpected exception thrown.", setException.get(),
                        ExceptionHelpers.getRealException(ex));
            } else {
                // Only expecting a BadOffsetException after our own injected exception.
                Throwable realEx = ExceptionHelpers.getRealException(ex);
                Assert.assertTrue("Unexpected exception thrown: " + realEx,
                        realEx instanceof BadOffsetException);
            }
        }

        // Check flush result.
        if (flushResult != null) {
            AssertExtensions.assertGreaterThan("Not enough bytes were flushed (time threshold).", 0,
                    flushResult.getFlushedBytes());
            Assert.assertEquals("Not expecting any merged bytes in this test.", 0,
                    flushResult.getMergedBytes());
        }

        context.increaseTime(config.getFlushThresholdTime().toMillis() + 1); // Force a flush by incrementing the time by a lot.
    }

    // Verify data.
    byte[] expectedData = writtenData.toByteArray();
    byte[] actualData = new byte[expectedData.length];
    long storageLength = context.storage
            .getStreamSegmentInfo(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join()
            .getLength();
    Assert.assertEquals("Unexpected number of bytes flushed to Storage.", expectedData.length, storageLength);
    context.storage.read(readHandle(context.segmentAggregator.getMetadata().getName()), 0, actualData, 0,
            actualData.length, TIMEOUT).join();

    Assert.assertArrayEquals("Unexpected data written to storage.", expectedData, actualData);
}