Example usage for java.util.concurrent.atomic AtomicReference get

List of usage examples for java.util.concurrent.atomic AtomicReference get

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicReference get.

Prototype

public final V get() 

Source Link

Document

Returns the current value, with memory effects as specified by VarHandle#getVolatile .

Usage

From source file:com.heliosdecompiler.helios.tasks.DecompileAndSaveTask.java

@Override
public void run() {
    File file = FileChooserUtil.chooseSaveLocation(Settings.LAST_DIRECTORY.get().asString(),
            Collections.singletonList("zip"));
    if (file == null)
        return;/*w  w w  . j  a v a 2 s.c o m*/
    if (file.exists()) {
        boolean delete = SWTUtil.promptForYesNo(Constants.REPO_NAME + " - Overwrite existing file",
                "The selected file already exists. Overwrite?");
        if (!delete) {
            return;
        }
    }

    AtomicReference<Transformer> transformer = new AtomicReference<>();

    Display display = Display.getDefault();
    display.asyncExec(() -> {
        Shell shell = new Shell(Display.getDefault());
        FillLayout layout = new FillLayout();
        layout.type = SWT.VERTICAL;
        shell.setLayout(layout);
        Transformer.getAllTransformers(t -> {
            return t instanceof Decompiler || t instanceof Disassembler;
        }).forEach(t -> {
            Button button = new Button(shell, SWT.RADIO);
            button.setText(t.getName());
            button.addSelectionListener(new SelectionAdapter() {
                @Override
                public void widgetSelected(SelectionEvent e) {
                    transformer.set(t);
                }
            });
        });
        Button ok = new Button(shell, SWT.NONE);
        ok.setText("OK");
        ok.addSelectionListener(new SelectionAdapter() {
            @Override
            public void widgetSelected(SelectionEvent e) {
                shell.close();
                shell.dispose();
                synchronized (transformer) {
                    transformer.notify();
                }
            }
        });
        shell.pack();
        SWTUtil.center(shell);
        shell.open();
    });

    synchronized (transformer) {
        try {
            transformer.wait();
        } catch (InterruptedException e) {
            ExceptionHandler.handle(e);
        }
    }

    FileOutputStream fileOutputStream = null;
    ZipOutputStream zipOutputStream = null;

    try {
        file.createNewFile();
        fileOutputStream = new FileOutputStream(file);
        zipOutputStream = new ZipOutputStream(fileOutputStream);
        Set<String> written = new HashSet<>();
        for (Pair<String, String> pair : data) {
            LoadedFile loadedFile = Helios.getLoadedFile(pair.getValue0());
            if (loadedFile != null) {
                String innerName = pair.getValue1();
                byte[] bytes = loadedFile.getAllData().get(innerName);
                if (bytes != null) {
                    if (loadedFile.getClassNode(pair.getValue1()) != null) {
                        StringBuilder buffer = new StringBuilder();
                        transformer.get().transform(loadedFile.getClassNode(pair.getValue1()), bytes, buffer);
                        String name = innerName.substring(0, innerName.length() - 6) + ".java";
                        if (written.add(name)) {
                            zipOutputStream.putNextEntry(new ZipEntry(name));
                            zipOutputStream.write(buffer.toString().getBytes(StandardCharsets.UTF_8));
                            zipOutputStream.closeEntry();
                        } else {
                            SWTUtil.showMessage("Duplicate entry occured: " + name);
                        }
                    } else {
                        if (written.add(pair.getValue1())) {
                            zipOutputStream.putNextEntry(new ZipEntry(pair.getValue1()));
                            zipOutputStream.write(loadedFile.getAllData().get(pair.getValue1()));
                            zipOutputStream.closeEntry();
                        } else {
                            SWTUtil.showMessage("Duplicate entry occured: " + pair.getValue1());
                        }
                    }
                }
            }
        }
    } catch (Exception e) {
        ExceptionHandler.handle(e);
    } finally {
        IOUtils.closeQuietly(zipOutputStream);
        IOUtils.closeQuietly(fileOutputStream);
    }
}

From source file:de.hybris.platform.jdbcwrapper.DataSourceFactoryTest.java

@Test
public void testPoolShutdownAfterError() {
    final Tenant t = Registry.getCurrentTenantNoFallback();
    final Map<String, String> params = new HashMap<String, String>(
            t.getMasterDataSource().getConnectionParameters());
    // make it fail on connect by messing up the user name
    params.put(SystemSpecificParams.DB_USERNAME, "FooDosntExist");

    final DataSourceFactory dataSourceFactory = t.getMasterDataSource().getDataSourceFactory();

    final AtomicReference<WeakReference<JDBCConnectionPool>> poolRef = new AtomicReference<WeakReference<JDBCConnectionPool>>();

    final DataSourceFactory f = new DataSourceFactory() {

        @Override/* w w  w .  j a va  2 s  .c  om*/
        public HybrisDataSource createJNDIDataSource(final String id, final Tenant tenant,
                final String jndiName, final boolean readOnly) {
            throw new UnsupportedOperationException();
        }

        @Override
        public HybrisDataSource createDataSource(final String id, final Tenant tenant,
                final Map<String, String> connectionParams, final boolean readOnly) {
            throw new UnsupportedOperationException();
        }

        @Override
        public JDBCConnectionPool createConnectionPool(final HybrisDataSource dataSource,
                final Config poolConfig) {
            final JDBCConnectionPool ret = dataSourceFactory.createConnectionPool(dataSource, poolConfig);
            poolRef.set(new WeakReference<JDBCConnectionPool>(ret));
            return ret;
        }

        @Override
        public Connection wrapConnection(final HybrisDataSource wrappedDataSource,
                final Connection rawConnection) {
            return dataSourceFactory.wrapConnection(wrappedDataSource, rawConnection);
        }

        @Override
        public Statement wrapStatement(final Connection wrappedConnection, final Statement rawStatement) {
            return dataSourceFactory.wrapStatement(wrappedConnection, rawStatement);
        }

        @Override
        public PreparedStatement wrapPreparedStatement(final Connection wrappedConnection,
                final PreparedStatement rawStatement, final String query) {
            return dataSourceFactory.wrapPreparedStatement(wrappedConnection, rawStatement, query);
        }

        @Override
        public ResultSet wrapResultSet(final Statement wrappedStatement, final ResultSet rawResultSet) {
            return dataSourceFactory.wrapResultSet(wrappedStatement, rawResultSet);
        }
    };

    HybrisDataSource ds = null;
    try {
        TestUtils.disableFileAnalyzer("DataSource creation should throw exception");
        ds = new DataSourceImpl(t, "DummyDS", params, false, f);
    } catch (final Exception e) {
        // fine so far - now check how the pool behaves
        assertPoolIsShutDown(poolRef.get(), 30);
    } finally {
        TestUtils.enableFileAnalyzer();
    }
    if (ds != null) {
        // in case data source creation did not fail as expected we must clean up for sure 
        ds.destroy();
        fail("data source creation was supposed to fail but did not");
    }
}

From source file:com.dmbstream.android.service.RESTMusicService.java

private HttpResponse executeWithRetry(Context context, String url, HttpParams requestParams,
        List<String> parameterNames, List<Object> parameterValues, List<Header> headers,
        ProgressListener progressListener, CancellableTask task) throws IOException {
    Log.i(TAG, "Using URL " + url);

    final AtomicReference<Boolean> cancelled = new AtomicReference<Boolean>(false);
    int attempts = 0;
    while (true) {
        attempts++;//from w w  w .  j av a2s  . c o m
        HttpContext httpContext = new BasicHttpContext();
        final HttpPost request = new HttpPost(url);

        if (task != null) {
            // Attempt to abort the HTTP request if the task is cancelled.
            task.setOnCancelListener(new CancellableTask.OnCancelListener() {
                @Override
                public void onCancel() {
                    cancelled.set(true);
                    request.abort();
                }
            });
        }

        if (parameterNames != null) {
            List<NameValuePair> params = new ArrayList<NameValuePair>();
            for (int i = 0; i < parameterNames.size(); i++) {
                params.add(
                        new BasicNameValuePair(parameterNames.get(i), String.valueOf(parameterValues.get(i))));
            }
            request.setEntity(new UrlEncodedFormEntity(params, Encoding.UTF_8.name()));
        }

        if (requestParams != null) {
            request.setParams(requestParams);
            Log.d(TAG, "Socket read timeout: " + HttpConnectionParams.getSoTimeout(requestParams) + " ms.");
        }

        if (headers != null) {
            for (Header header : headers) {
                request.addHeader(header);
            }
        }

        // Add default headers to identify this app
        request.addHeader("Content-Type", "application/json");
        request.addHeader("X-ApiKey", ApiConstants.instance().getApiKey());
        request.addHeader("User-Agent", ApiConstants.instance().getAppName());

        String userToken = Util.getUserToken(context);
        if (!ValidationHelper.isNullOrWhitespace(userToken))
            request.addHeader(BasicScheme.authenticate(new UsernamePasswordCredentials(userToken, ""),
                    Encoding.UTF_8.name(), false));
        else
            Log.w(TAG, "No usertoken was specified for the request.");

        try {
            HttpResponse response = httpClient.execute(request, httpContext);
            return response;
        } catch (IOException x) {
            request.abort();
            if (attempts >= HTTP_REQUEST_MAX_ATTEMPTS || cancelled.get()) {
                throw x;
            }
            if (progressListener != null) {
                String msg = context.getResources().getString(R.string.music_service_retry, attempts,
                        HTTP_REQUEST_MAX_ATTEMPTS - 1);
                progressListener.updateProgress(msg);
            }
            Log.w(TAG, "Got IOException (" + attempts + "), will retry", x);
            increaseTimeouts(requestParams);
            Util.sleepQuietly(2000L);
        }
    }
}

From source file:org.elasticsearch.xpack.ml.integration.MlJobIT.java

public void testDelete_multipleRequest() throws Exception {
    String jobId = "delete-job-mulitple-times";
    createFarequoteJob(jobId);//w w  w  .  ja  va  2 s. co  m

    ConcurrentMapLong<Response> responses = ConcurrentCollections.newConcurrentMapLong();
    ConcurrentMapLong<ResponseException> responseExceptions = ConcurrentCollections.newConcurrentMapLong();
    AtomicReference<IOException> ioe = new AtomicReference<>();
    AtomicInteger recreationGuard = new AtomicInteger(0);
    AtomicReference<Response> recreationResponse = new AtomicReference<>();
    AtomicReference<ResponseException> recreationException = new AtomicReference<>();

    Runnable deleteJob = () -> {
        try {
            boolean forceDelete = randomBoolean();
            String url = MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId;
            if (forceDelete) {
                url += "?force=true";
            }
            Response response = client().performRequest("delete", url);
            responses.put(Thread.currentThread().getId(), response);
        } catch (ResponseException re) {
            responseExceptions.put(Thread.currentThread().getId(), re);
        } catch (IOException e) {
            ioe.set(e);
        }

        // Immediately after the first deletion finishes, recreate the job.  This should pick up
        // race conditions where another delete request deletes part of the newly created job.
        if (recreationGuard.getAndIncrement() == 0) {
            try {
                recreationResponse.set(createFarequoteJob(jobId));
            } catch (ResponseException re) {
                recreationException.set(re);
            } catch (IOException e) {
                ioe.set(e);
            }
        }
    };

    // The idea is to hit the situation where one request waits for
    // the other to complete. This is difficult to schedule but
    // hopefully it will happen in CI
    int numThreads = 5;
    Thread[] threads = new Thread[numThreads];
    for (int i = 0; i < numThreads; i++) {
        threads[i] = new Thread(deleteJob);
    }
    for (int i = 0; i < numThreads; i++) {
        threads[i].start();
    }
    for (int i = 0; i < numThreads; i++) {
        threads[i].join();
    }

    if (ioe.get() != null) {
        // This looks redundant but the check is done so we can
        // print the exception's error message
        assertNull(ioe.get().getMessage(), ioe.get());
    }

    assertEquals(numThreads, responses.size() + responseExceptions.size());

    // 404s are ok as it means the job had already been deleted.
    for (ResponseException re : responseExceptions.values()) {
        assertEquals(re.getMessage(), 404, re.getResponse().getStatusLine().getStatusCode());
    }

    for (Response response : responses.values()) {
        assertEquals(responseEntityToString(response), 200, response.getStatusLine().getStatusCode());
    }

    assertNotNull(recreationResponse.get());
    assertEquals(responseEntityToString(recreationResponse.get()), 200,
            recreationResponse.get().getStatusLine().getStatusCode());

    if (recreationException.get() != null) {
        assertNull(recreationException.get().getMessage(), recreationException.get());
    }

    try {
        // The idea of the code above is that the deletion is sufficiently time-consuming that
        // all threads enter the deletion call before the first one exits it.  Usually this happens,
        // but in the case that it does not the job that is recreated may get deleted.
        // It is not a error if the job does not exist but the following assertions
        // will fail in that case.
        client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId);

        // Check that the job aliases exist.  These are the last thing to be deleted when a job is deleted, so
        // if there's been a race between deletion and recreation these are what will be missing.
        String aliases = getAliases();

        assertThat(aliases, containsString("\"" + AnomalyDetectorsIndex.jobResultsAliasedName(jobId)
                + "\":{\"filter\":{\"term\":{\"job_id\":{\"value\":\"" + jobId + "\",\"boost\":1.0}}}}"));
        assertThat(aliases, containsString("\"" + AnomalyDetectorsIndex.resultsWriteAlias(jobId) + "\":{}"));

    } catch (ResponseException missingJobException) {
        // The job does not exist
        assertThat(missingJobException.getResponse().getStatusLine().getStatusCode(), equalTo(404));

        // The job aliases should be deleted
        String aliases = getAliases();
        assertThat(aliases, not(containsString("\"" + AnomalyDetectorsIndex.jobResultsAliasedName(jobId)
                + "\":{\"filter\":{\"term\":{\"job_id\":{\"value\":\"" + jobId + "\",\"boost\":1.0}}}}")));
        assertThat(aliases,
                not(containsString("\"" + AnomalyDetectorsIndex.resultsWriteAlias(jobId) + "\":{}")));
    }

    assertEquals(numThreads, recreationGuard.get());
}

From source file:it.anyplace.sync.bep.BlockPuller.java

public FileDownloadObserver pullBlocks(FileBlocks fileBlocks) throws InterruptedException {
    logger.info("pulling file = {}", fileBlocks);
    checkArgument(connectionHandler.hasFolder(fileBlocks.getFolder()),
            "supplied connection handler %s will not share folder %s", connectionHandler,
            fileBlocks.getFolder());/*from   w ww.  j  a  va2  s. c o m*/
    final Object lock = new Object();
    final AtomicReference<Exception> error = new AtomicReference<>();
    final Object listener = new Object() {
        @Subscribe
        public void handleResponseMessageReceivedEvent(ResponseMessageReceivedEvent event) {
            synchronized (lock) {
                try {
                    if (!requestIds.contains(event.getMessage().getId())) {
                        return;
                    }
                    checkArgument(equal(event.getMessage().getCode(), ErrorCode.NO_ERROR),
                            "received error response, code = %s", event.getMessage().getCode());
                    byte[] data = event.getMessage().getData().toByteArray();
                    String hash = BaseEncoding.base16().encode(Hashing.sha256().hashBytes(data).asBytes());
                    blockCache.pushBlock(data);
                    if (missingHashes.remove(hash)) {
                        blocksByHash.put(hash, data);
                        logger.debug("aquired block, hash = {}", hash);
                        lock.notify();
                    } else {
                        logger.warn("received not-needed block, hash = {}", hash);
                    }
                } catch (Exception ex) {
                    error.set(ex);
                    lock.notify();
                }
            }
        }
    };
    FileDownloadObserver fileDownloadObserver = new FileDownloadObserver() {

        private long getReceivedData() {
            return blocksByHash.size() * BLOCK_SIZE;
        }

        private long getTotalData() {
            return (blocksByHash.size() + missingHashes.size()) * BLOCK_SIZE;
        }

        @Override
        public double getProgress() {
            return isCompleted() ? 1d : getReceivedData() / ((double) getTotalData());
        }

        @Override
        public String getProgressMessage() {
            return (Math.round(getProgress() * 1000d) / 10d) + "% "
                    + FileUtils.byteCountToDisplaySize(getReceivedData()) + " / "
                    + FileUtils.byteCountToDisplaySize(getTotalData());
        }

        @Override
        public boolean isCompleted() {
            return missingHashes.isEmpty();
        }

        @Override
        public void checkError() {
            if (error.get() != null) {
                throw new RuntimeException(error.get());
            }
        }

        @Override
        public double waitForProgressUpdate() throws InterruptedException {
            if (!isCompleted()) {
                synchronized (lock) {
                    checkError();
                    lock.wait();
                    checkError();
                }
            }
            return getProgress();
        }

        @Override
        public InputStream getInputStream() {
            checkArgument(missingHashes.isEmpty(), "pull failed, some blocks are still missing");
            List<byte[]> blockList = Lists
                    .newArrayList(Lists.transform(hashList, Functions.forMap(blocksByHash)));
            return new SequenceInputStream(Collections
                    .enumeration(Lists.transform(blockList, new Function<byte[], ByteArrayInputStream>() {
                        @Override
                        public ByteArrayInputStream apply(byte[] data) {
                            return new ByteArrayInputStream(data);
                        }
                    })));
        }

        @Override
        public void close() {
            missingHashes.clear();
            hashList.clear();
            blocksByHash.clear();
            try {
                connectionHandler.getEventBus().unregister(listener);
            } catch (Exception ex) {
            }
            if (closeConnection) {
                connectionHandler.close();
            }
        }
    };
    try {
        synchronized (lock) {
            hashList.addAll(Lists.transform(fileBlocks.getBlocks(), new Function<BlockInfo, String>() {
                @Override
                public String apply(BlockInfo block) {
                    return block.getHash();
                }
            }));
            missingHashes.addAll(hashList);
            for (String hash : missingHashes) {
                byte[] block = blockCache.pullBlock(hash);
                if (block != null) {
                    blocksByHash.put(hash, block);
                    missingHashes.remove(hash);
                }
            }
            connectionHandler.getEventBus().register(listener);
            for (BlockInfo block : fileBlocks.getBlocks()) {
                if (missingHashes.contains(block.getHash())) {
                    int requestId = Math.abs(new Random().nextInt());
                    requestIds.add(requestId);
                    connectionHandler.sendMessage(Request.newBuilder().setId(requestId)
                            .setFolder(fileBlocks.getFolder()).setName(fileBlocks.getPath())
                            .setOffset(block.getOffset()).setSize(block.getSize())
                            .setHash(ByteString.copyFrom(BaseEncoding.base16().decode(block.getHash())))
                            .build());
                    logger.debug("sent request for block, hash = {}", block.getHash());
                }
            }
            return fileDownloadObserver;
        }
    } catch (Exception ex) {
        fileDownloadObserver.close();
        throw ex;
    }
}

From source file:com.jeremydyer.nifi.ObjectDetectionProcessor.java

final public Mat detectObjects(final ProcessSession session, FlowFile original, final JSONObject dd,
        final Mat image) {

    CascadeClassifier objectDetector = new CascadeClassifier(dd.getString("opencv_xml_cascade_path"));
    MatOfRect objectDetections = new MatOfRect();
    objectDetector.detectMultiScale(image, objectDetections);
    //getLogger().error("Detected " + objectDetections.toArray().length + " " + dd.getString("name") + " objects in the input flowfile");

    final AtomicReference<Mat> croppedImageReference = new AtomicReference<>();

    int counter = 0;
    for (int i = 0; i < objectDetections.toArray().length; i++) {
        final Rect rect = objectDetections.toArray()[i];
        FlowFile detection = session.write(session.create(original), new OutputStreamCallback() {
            @Override/*from  w ww.  java 2  s  .  co  m*/
            public void process(OutputStream outputStream) throws IOException {

                Mat croppedImage = null;

                //Should the image be cropped? If so there is no need to draw bounds because that would be the same as the cropping
                if (dd.getBoolean("crop")) {
                    Rect rectCrop = new Rect(rect.x, rect.y, rect.width, rect.height);
                    croppedImage = new Mat(image, rectCrop);
                    MatOfByte updatedImage = new MatOfByte();
                    Imgcodecs.imencode(".jpg", croppedImage, updatedImage);
                    croppedImageReference.set(croppedImage);
                    outputStream.write(updatedImage.toArray());
                } else {
                    //Should the image have a border drawn around it?
                    if (dd.getBoolean("drawBounds")) {
                        Mat imageWithBorder = image.clone();
                        Imgproc.rectangle(imageWithBorder, new Point(rect.x, rect.y),
                                new Point(rect.x + rect.width, rect.y + rect.height),
                                new Scalar(255, 255, 255));
                        MatOfByte updatedImage = new MatOfByte();
                        Imgcodecs.imencode(".jpg", imageWithBorder, updatedImage);
                        outputStream.write(updatedImage.toArray());
                    } else {
                        MatOfByte updatedImage = new MatOfByte();
                        Imgcodecs.imencode(".jpg", image, updatedImage);
                        outputStream.write(updatedImage.toArray());
                    }
                }

            }
        });

        Map<String, String> atts = new HashMap<>();
        atts.put("object.detection.name", dd.getString("name"));
        atts.put("object.detection.id", new Long(System.currentTimeMillis() + counter).toString());

        counter++;

        detection = session.putAllAttributes(detection, atts);
        session.transfer(detection, REL_OBJECT_DETECTED);
    }

    Mat childResponse = null;

    if (croppedImageReference.get() != null) {
        childResponse = croppedImageReference.get();
    } else {
        childResponse = image;
    }

    if (dd.has("children")) {
        JSONArray children = dd.getJSONArray("children");
        if (children != null) {

            for (int i = 0; i < children.length(); i++) {
                JSONObject ddd = children.getJSONObject(i);
                childResponse = detectObjects(session, original, ddd, childResponse);
            }
        }
    }

    return childResponse;
}

From source file:org.apache.bookkeeper.mledger.impl.ManagedCursorImpl.java

/**
 * Async replays given positions: a. before reading it filters out already-acked messages b. reads remaining entries
 * async and gives it to given ReadEntriesCallback c. returns all already-acked messages which are not replayed so,
 * those messages can be removed by caller(Dispatcher)'s replay-list and it won't try to replay it again
 *
 *///  w w w  .  j a  v  a  2  s  .  com
@Override
public Set<? extends Position> asyncReplayEntries(final Set<? extends Position> positions,
        ReadEntriesCallback callback, Object ctx) {
    List<Entry> entries = Lists.newArrayListWithExpectedSize(positions.size());
    if (positions.isEmpty()) {
        callback.readEntriesComplete(entries, ctx);
    }

    // filters out messages which are already acknowledged
    Set<Position> alreadyAcknowledgedPositions = Sets.newHashSet();
    lock.readLock().lock();
    try {
        positions.stream()
                .filter(position -> individualDeletedMessages.contains((PositionImpl) position)
                        || ((PositionImpl) position).compareTo(markDeletePosition) < 0)
                .forEach(alreadyAcknowledgedPositions::add);
    } finally {
        lock.readLock().unlock();
    }

    final int totalValidPositions = positions.size() - alreadyAcknowledgedPositions.size();
    final AtomicReference<ManagedLedgerException> exception = new AtomicReference<>();
    ReadEntryCallback cb = new ReadEntryCallback() {
        int pendingCallbacks = totalValidPositions;

        @Override
        public synchronized void readEntryComplete(Entry entry, Object ctx) {
            if (exception.get() != null) {
                // if there is already a failure for a different position, we should release the entry straight away
                // and not add it to the list
                entry.release();
                if (--pendingCallbacks == 0) {
                    callback.readEntriesFailed(exception.get(), ctx);
                }
            } else {
                entries.add(entry);
                if (--pendingCallbacks == 0) {
                    callback.readEntriesComplete(entries, ctx);
                }
            }
        }

        @Override
        public synchronized void readEntryFailed(ManagedLedgerException mle, Object ctx) {
            log.warn("[{}][{}] Error while replaying entries", ledger.getName(), name, mle);
            if (exception.compareAndSet(null, mle)) {
                // release the entries just once, any further read success will release the entry straight away
                entries.forEach(Entry::release);
            }
            if (--pendingCallbacks == 0) {
                callback.readEntriesFailed(exception.get(), ctx);
            }
        }
    };

    positions.stream().filter(position -> !alreadyAcknowledgedPositions.contains(position))
            .forEach(p -> ledger.asyncReadEntry((PositionImpl) p, cb, ctx));

    return alreadyAcknowledgedPositions;
}

From source file:org.elasticsearch.client.sniff.SnifferTests.java

/**
 * Test multiple sniffing rounds by mocking the {@link Scheduler} as well as the {@link HostsSniffer}.
 * Simulates the ordinary behaviour of {@link Sniffer} when sniffing on failure is not enabled.
 * The {@link CountingHostsSniffer} doesn't make any network connection but may throw exception or return no hosts, which makes
 * it possible to verify that errors are properly handled and don't affect subsequent runs and their scheduling.
 * The {@link Scheduler} implementation submits rather than scheduling tasks, meaning that it doesn't respect the requested sniff
 * delays while allowing to assert that the requested delays for each requested run and the following one are the expected values.
 *///w ww  .j a v a2 s. co  m
public void testOrdinarySniffRounds() throws Exception {
    final long sniffInterval = randomLongBetween(1, Long.MAX_VALUE);
    long sniffAfterFailureDelay = randomLongBetween(1, Long.MAX_VALUE);
    RestClient restClient = mock(RestClient.class);
    CountingHostsSniffer hostsSniffer = new CountingHostsSniffer();
    final int iters = randomIntBetween(30, 100);
    final Set<Future<?>> futures = new CopyOnWriteArraySet<>();
    final CountDownLatch completionLatch = new CountDownLatch(1);
    final AtomicInteger runs = new AtomicInteger(iters);
    final ExecutorService executor = Executors.newSingleThreadExecutor();
    final AtomicReference<Future<?>> lastFuture = new AtomicReference<>();
    final AtomicReference<Sniffer.Task> lastTask = new AtomicReference<>();
    Scheduler scheduler = new Scheduler() {
        @Override
        public Future<?> schedule(Sniffer.Task task, long delayMillis) {
            assertEquals(sniffInterval, task.nextTaskDelay);
            int numberOfRuns = runs.getAndDecrement();
            if (numberOfRuns == iters) {
                //the first call is to schedule the first sniff round from the Sniffer constructor, with delay O
                assertEquals(0L, delayMillis);
                assertEquals(sniffInterval, task.nextTaskDelay);
            } else {
                //all of the subsequent times "schedule" is called with delay set to the configured sniff interval
                assertEquals(sniffInterval, delayMillis);
                assertEquals(sniffInterval, task.nextTaskDelay);
                if (numberOfRuns == 0) {
                    completionLatch.countDown();
                    return null;
                }
            }
            //we submit rather than scheduling to make the test quick and not depend on time
            Future<?> future = executor.submit(task);
            futures.add(future);
            if (numberOfRuns == 1) {
                lastFuture.set(future);
                lastTask.set(task);
            }
            return future;
        }

        @Override
        public void shutdown() {
            //the executor is closed externally, shutdown is tested separately
        }
    };
    try {
        new Sniffer(restClient, hostsSniffer, scheduler, sniffInterval, sniffAfterFailureDelay);
        assertTrue("timeout waiting for sniffing rounds to be completed",
                completionLatch.await(1000, TimeUnit.MILLISECONDS));
        assertEquals(iters, futures.size());
        //the last future is the only one that may not be completed yet, as the count down happens
        //while scheduling the next round which is still part of the execution of the runnable itself.
        assertTrue(lastTask.get().hasStarted());
        lastFuture.get().get();
        for (Future<?> future : futures) {
            assertTrue(future.isDone());
            future.get();
        }
    } finally {
        executor.shutdown();
        assertTrue(executor.awaitTermination(1000, TimeUnit.MILLISECONDS));
    }
    int totalRuns = hostsSniffer.runs.get();
    assertEquals(iters, totalRuns);
    int setHostsRuns = totalRuns - hostsSniffer.failures.get() - hostsSniffer.emptyList.get();
    verify(restClient, times(setHostsRuns)).setHosts(Matchers.<HttpHost>anyVararg());
    verifyNoMoreInteractions(restClient);
}

From source file:io.pravega.segmentstore.server.writer.SegmentAggregatorTests.java

/**
 * Tests the flush() method with Append and StreamSegmentSealOperations when there are Storage errors.
 *///from   www .j a  v  a2  s . c  o m
@Test
public void testSealWithStorageErrors() throws Exception {
    // Add some appends and seal, and then flush together. Verify that everything got flushed in one go.
    final int appendCount = 1000;
    final WriterConfig config = WriterConfig.builder()
            .with(WriterConfig.FLUSH_THRESHOLD_BYTES, appendCount * 50) // Extra high length threshold.
            .with(WriterConfig.FLUSH_THRESHOLD_MILLIS, 1000L).with(WriterConfig.MAX_FLUSH_SIZE_BYTES, 10000)
            .with(WriterConfig.MIN_READ_TIMEOUT_MILLIS, 10L).build();

    @Cleanup
    TestContext context = new TestContext(config);
    context.storage.create(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
    context.segmentAggregator.initialize(TIMEOUT, executorService()).join();

    @Cleanup
    ByteArrayOutputStream writtenData = new ByteArrayOutputStream();

    // Part 1: flush triggered by accumulated size.
    for (int i = 0; i < appendCount; i++) {
        // Add another operation and record its length (not bothering with flushing here; testFlushSeal() covers that).
        StorageOperation appendOp = generateAppendAndUpdateMetadata(i, SEGMENT_ID, context);
        context.segmentAggregator.add(appendOp);
        getAppendData(appendOp, writtenData, context);
    }

    // Generate and add a Seal Operation.
    StorageOperation sealOp = generateSealAndUpdateMetadata(SEGMENT_ID, context);
    context.segmentAggregator.add(sealOp);

    // Have the writes fail every few attempts with a well known exception.
    AtomicBoolean generateSyncException = new AtomicBoolean(true);
    AtomicBoolean generateAsyncException = new AtomicBoolean(true);
    AtomicReference<IntentionalException> setException = new AtomicReference<>();
    Supplier<Exception> exceptionSupplier = () -> {
        IntentionalException ex = new IntentionalException(Long.toString(context.timer.getElapsedMillis()));
        setException.set(ex);
        return ex;
    };
    context.storage.setSealSyncErrorInjector(
            new ErrorInjector<>(count -> generateSyncException.getAndSet(false), exceptionSupplier));
    context.storage.setSealAsyncErrorInjector(
            new ErrorInjector<>(count -> generateAsyncException.getAndSet(false), exceptionSupplier));

    // Call flush and verify that the entire Aggregator got flushed and the Seal got persisted to Storage.
    int attemptCount = 4;
    for (int i = 0; i < attemptCount; i++) {
        // Repeat a number of times, at least once should work.
        setException.set(null);
        try {
            FlushResult flushResult = context.segmentAggregator.flush(TIMEOUT, executorService()).join();
            Assert.assertNull("An exception was expected, but none was thrown.", setException.get());
            Assert.assertNotNull("No FlushResult provided.", flushResult);
        } catch (Exception ex) {
            if (setException.get() != null) {
                Assert.assertEquals("Unexpected exception thrown.", setException.get(),
                        ExceptionHelpers.getRealException(ex));
            } else {
                // Not expecting any exception this time.
                throw ex;
            }
        }

        if (!generateAsyncException.get() && !generateSyncException.get() && setException.get() == null) {
            // We are done. We got at least one through.
            break;
        }
    }

    // Verify data.
    byte[] expectedData = writtenData.toByteArray();
    byte[] actualData = new byte[expectedData.length];
    SegmentProperties storageInfo = context.storage
            .getStreamSegmentInfo(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
    Assert.assertEquals("Unexpected number of bytes flushed to Storage.", expectedData.length,
            storageInfo.getLength());
    Assert.assertTrue("Segment is not sealed in storage post flush.", storageInfo.isSealed());
    Assert.assertTrue("Segment is not marked in metadata as sealed in storage post flush.",
            context.segmentAggregator.getMetadata().isSealedInStorage());
    context.storage.read(readHandle(context.segmentAggregator.getMetadata().getName()), 0, actualData, 0,
            actualData.length, TIMEOUT).join();
    Assert.assertArrayEquals("Unexpected data written to storage.", expectedData, actualData);
}

From source file:com.alibaba.wasp.master.FMaster.java

/**
 *
 * @param tableName//from ww w.j av a2  s.  co m
 * @param rowKey
 * @return
 * @throws java.io.IOException
 */
public Pair<EntityGroupInfo, ServerName> getTableEntityGroupForRow(final byte[] tableName, final byte[] rowKey)
        throws IOException {
    final AtomicReference<Pair<EntityGroupInfo, ServerName>> result = new AtomicReference<Pair<EntityGroupInfo, ServerName>>(
            null);

    MetaScannerVisitor visitor = new MetaScannerVisitorBase() {
        @Override
        public boolean processRow(Result data) throws IOException {
            if (data == null || data.size() <= 0) {
                return true;
            }
            Pair<EntityGroupInfo, ServerName> pair = EntityGroupInfo.getEntityGroupInfoAndServerName(data);
            if (pair == null) {
                return false;
            }
            if (!Bytes.equals(pair.getFirst().getTableName(), tableName)) {
                return false;
            }
            result.set(pair);
            return true;
        }
    };

    FMetaScanner.metaScan(conf, visitor, tableName, rowKey, 1);
    return result.get();
}