Example usage for java.util.concurrent.atomic AtomicReference set

List of usage examples for java.util.concurrent.atomic AtomicReference set

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicReference set.

Prototype

public final void set(V newValue) 

Source Link

Document

Sets the value to newValue , with memory effects as specified by VarHandle#setVolatile .

Usage

From source file:org.apache.flink.streaming.connectors.kafka.KafkaConsumerTestBase.java

/**
 * This test ensures that when explicitly set to start from latest record, the consumer
 * ignores the "auto.offset.reset" behaviour as well as any committed group offsets in Kafka.
 *///from w ww.  j ava2 s  . co m
public void runStartFromLatestOffsets() throws Exception {
    // 50 records written to each of 3 partitions before launching a latest-starting consuming job
    final int parallelism = 3;
    final int recordsInEachPartition = 50;

    // each partition will be written an extra 200 records
    final int extraRecordsInEachPartition = 200;

    // all already existing data in the topic, before the consuming topology has started, should be ignored
    final String topicName = writeSequence("testStartFromLatestOffsetsTopic", recordsInEachPartition,
            parallelism, 1);

    // the committed offsets should be ignored
    KafkaTestEnvironment.KafkaOffsetHandler kafkaOffsetHandler = kafkaServer.createOffsetHandler();
    kafkaOffsetHandler.setCommittedOffset(topicName, 0, 23);
    kafkaOffsetHandler.setCommittedOffset(topicName, 1, 31);
    kafkaOffsetHandler.setCommittedOffset(topicName, 2, 43);

    // job names for the topologies for writing and consuming the extra records
    final String consumeExtraRecordsJobName = "Consume Extra Records Job";
    final String writeExtraRecordsJobName = "Write Extra Records Job";

    // seriliazation / deserialization schemas for writing and consuming the extra records
    final TypeInformation<Tuple2<Integer, Integer>> resultType = TypeInformation
            .of(new TypeHint<Tuple2<Integer, Integer>>() {
            });

    final KeyedSerializationSchema<Tuple2<Integer, Integer>> serSchema = new KeyedSerializationSchemaWrapper<>(
            new TypeInformationSerializationSchema<>(resultType, new ExecutionConfig()));

    final KeyedDeserializationSchema<Tuple2<Integer, Integer>> deserSchema = new KeyedDeserializationSchemaWrapper<>(
            new TypeInformationSerializationSchema<>(resultType, new ExecutionConfig()));

    // setup and run the latest-consuming job
    final StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment("localhost",
            flinkPort);
    env.getConfig().disableSysoutLogging();
    env.setParallelism(parallelism);

    final Properties readProps = new Properties();
    readProps.putAll(standardProps);
    readProps.setProperty("auto.offset.reset", "earliest"); // this should be ignored

    FlinkKafkaConsumerBase<Tuple2<Integer, Integer>> latestReadingConsumer = kafkaServer.getConsumer(topicName,
            deserSchema, readProps);
    latestReadingConsumer.setStartFromLatest();

    env.addSource(latestReadingConsumer).setParallelism(parallelism)
            .flatMap(new FlatMapFunction<Tuple2<Integer, Integer>, Object>() {
                @Override
                public void flatMap(Tuple2<Integer, Integer> value, Collector<Object> out) throws Exception {
                    if (value.f1 - recordsInEachPartition < 0) {
                        throw new RuntimeException(
                                "test failed; consumed a record that was previously written: " + value);
                    }
                }
            }).setParallelism(1).addSink(new DiscardingSink<>());

    final AtomicReference<Throwable> error = new AtomicReference<>();
    Thread consumeThread = new Thread(new Runnable() {
        @Override
        public void run() {
            try {
                env.execute(consumeExtraRecordsJobName);
            } catch (Throwable t) {
                if (!(t.getCause() instanceof JobCancellationException)) {
                    error.set(t);
                }
            }
        }
    });
    consumeThread.start();

    // wait until the consuming job has started, to be extra safe
    JobManagerCommunicationUtils.waitUntilJobIsRunning(flink.getLeaderGateway(timeout),
            consumeExtraRecordsJobName);

    // setup the extra records writing job
    final StreamExecutionEnvironment env2 = StreamExecutionEnvironment.createRemoteEnvironment("localhost",
            flinkPort);

    DataStream<Tuple2<Integer, Integer>> extraRecordsStream = env2
            .addSource(new RichParallelSourceFunction<Tuple2<Integer, Integer>>() {

                private boolean running = true;

                @Override
                public void run(SourceContext<Tuple2<Integer, Integer>> ctx) throws Exception {
                    int count = recordsInEachPartition; // the extra records should start from the last written value
                    int partition = getRuntimeContext().getIndexOfThisSubtask();

                    while (running && count < recordsInEachPartition + extraRecordsInEachPartition) {
                        ctx.collect(new Tuple2<>(partition, count));
                        count++;
                    }
                }

                @Override
                public void cancel() {
                    running = false;
                }
            }).setParallelism(parallelism);

    kafkaServer.produceIntoKafka(extraRecordsStream, topicName, serSchema, readProps, null);

    try {
        env2.execute(writeExtraRecordsJobName);
    } catch (Exception e) {
        throw new RuntimeException("Writing extra records failed", e);
    }

    // cancel the consume job after all extra records are written
    JobManagerCommunicationUtils.cancelCurrentJob(flink.getLeaderGateway(timeout), consumeExtraRecordsJobName);
    consumeThread.join();

    kafkaOffsetHandler.close();
    deleteTestTopic(topicName);

    // check whether the consuming thread threw any test errors;
    // test will fail here if the consume job had incorrectly read any records other than the extra records
    final Throwable consumerError = error.get();
    if (consumerError != null) {
        throw new Exception("Exception in the consuming thread", consumerError);
    }
}

From source file:com.twitter.distributedlog.lock.TestZKSessionLock.java

private void testLockWhenSiblingUseDifferentLockId(long timeout, final boolean isUnlock) throws Exception {
    String lockPath = "/test-lock-when-sibling-use-different-lock-id-" + timeout + "-" + isUnlock + "-"
            + System.currentTimeMillis();
    String clientId0 = "client-id-0";
    String clientId1 = "client-id-1";

    createLockPath(zkc.get(), lockPath);

    final ZKSessionLock lock0_0 = new ZKSessionLock(zkc0, lockPath, clientId0, lockStateExecutor);
    final ZKSessionLock lock0_1 = new ZKSessionLock(zkc0, lockPath, clientId0, lockStateExecutor);
    final ZKSessionLock lock1 = new ZKSessionLock(zkc, lockPath, clientId1, lockStateExecutor);

    lock0_0.tryLock(Long.MAX_VALUE, TimeUnit.MILLISECONDS);

    // lock1 wait for the lock ownership.
    final CountDownLatch lock1DoneLatch = new CountDownLatch(1);
    Thread lock1Thread = new Thread(new Runnable() {
        @Override//from   w w  w . j a  v  a2 s  . c om
        public void run() {
            try {
                lock1.tryLock(Long.MAX_VALUE, TimeUnit.MILLISECONDS);
                lock1DoneLatch.countDown();
            } catch (LockingException e) {
                logger.error("Failed on locking lock1 : ", e);
            }
        }
    }, "lock1-thread");
    lock1Thread.start();

    // check lock1 is waiting for lock0_0
    List<String> children = awaitWaiters(2, zkc, lockPath);

    assertEquals(2, children.size());
    assertEquals(State.CLAIMED, lock0_0.getLockState());
    assertEquals(lock0_0.getLockId(), Await.result(asyncParseClientID(zkc0.get(), lockPath, children.get(0))));
    awaitState(State.WAITING, lock1);
    assertEquals(lock1.getLockId(), Await.result(asyncParseClientID(zkc.get(), lockPath, children.get(1))));

    final CountDownLatch lock0DoneLatch = new CountDownLatch(1);
    final AtomicReference<String> ownerFromLock0 = new AtomicReference<String>(null);
    Thread lock0Thread = null;
    if (timeout == 0) {
        try {
            lock0_1.tryLock(0, TimeUnit.MILLISECONDS);
            fail("Should fail on locking if sibling is using differnt lock id.");
        } catch (OwnershipAcquireFailedException oafe) {
            assertEquals(clientId0, oafe.getCurrentOwner());
        }
        assertEquals(State.CLOSED, lock0_1.getLockState());
        children = getLockWaiters(zkc, lockPath);
        assertEquals(2, children.size());
        assertEquals(State.CLAIMED, lock0_0.getLockState());
        assertEquals(lock0_0.getLockId(),
                Await.result(asyncParseClientID(zkc0.get(), lockPath, children.get(0))));
        assertEquals(State.WAITING, lock1.getLockState());
        assertEquals(lock1.getLockId(), Await.result(asyncParseClientID(zkc.get(), lockPath, children.get(1))));
    } else {
        lock0Thread = new Thread(new Runnable() {
            @Override
            public void run() {
                try {
                    lock0_1.tryLock(Long.MAX_VALUE, TimeUnit.MILLISECONDS);
                    if (isUnlock) {
                        lock0DoneLatch.countDown();
                    }
                } catch (OwnershipAcquireFailedException oafe) {
                    if (!isUnlock) {
                        ownerFromLock0.set(oafe.getCurrentOwner());
                        lock0DoneLatch.countDown();
                    }
                } catch (LockingException le) {
                    logger.error("Failed on locking lock0_1 : ", le);
                }
            }
        }, "lock0-thread");
        lock0Thread.start();

        // check lock1 is waiting for lock0_0
        children = awaitWaiters(3, zkc, lockPath);

        assertEquals(3, children.size());
        assertEquals(State.CLAIMED, lock0_0.getLockState());
        assertEquals(lock0_0.getLockId(),
                Await.result(asyncParseClientID(zkc0.get(), lockPath, children.get(0))));
        awaitState(State.WAITING, lock1);
        assertEquals(lock1.getLockId(), Await.result(asyncParseClientID(zkc.get(), lockPath, children.get(1))));
        awaitState(State.WAITING, lock0_1);
        assertEquals(lock0_1.getLockId(),
                Await.result(asyncParseClientID(zkc0.get(), lockPath, children.get(2))));
    }

    if (isUnlock) {
        lock0_0.unlock();
    } else {
        ZooKeeperClientUtils.expireSession(zkc0, zkServers, sessionTimeoutMs);
    }

    lock1DoneLatch.await();
    lock1Thread.join();

    // check the state of lock0_0
    if (isUnlock) {
        assertEquals(State.CLOSED, lock0_0.getLockState());
    } else {
        assertEquals(State.EXPIRED, lock0_0.getLockState());
    }

    if (timeout == 0) {
        children = getLockWaiters(zkc, lockPath);
        assertEquals(1, children.size());
        assertEquals(State.CLAIMED, lock1.getLockState());
        assertEquals(lock1.getLockId(), Await.result(asyncParseClientID(zkc.get(), lockPath, children.get(0))));
    } else {
        assertNotNull(lock0Thread);
        if (!isUnlock) {
            // both lock0_0 and lock0_1 would be expired
            lock0DoneLatch.await();
            lock0Thread.join();

            assertEquals(clientId0, ownerFromLock0.get());
            assertEquals(State.CLOSED, lock0_1.getLockState());

            children = getLockWaiters(zkc, lockPath);
            assertEquals(1, children.size());
            assertEquals(State.CLAIMED, lock1.getLockState());
            assertEquals(lock1.getLockId(),
                    Await.result(asyncParseClientID(zkc.get(), lockPath, children.get(0))));
        } else {
            children = getLockWaiters(zkc, lockPath);
            assertEquals(2, children.size());
            assertEquals(State.CLAIMED, lock1.getLockState());
            assertEquals(lock1.getLockId(),
                    Await.result(asyncParseClientID(zkc.get(), lockPath, children.get(0))));
            assertEquals(State.WAITING, lock0_1.getLockState());
            assertEquals(lock0_1.getLockId(),
                    Await.result(asyncParseClientID(zkc0.get(), lockPath, children.get(1))));
        }
    }

    lock1.unlock();

    if (timeout != 0 && isUnlock) {
        lock0DoneLatch.await();
        lock0Thread.join();

        children = getLockWaiters(zkc, lockPath);
        assertEquals(1, children.size());
        assertEquals(State.CLAIMED, lock0_1.getLockState());
        assertEquals(lock0_1.getLockId(),
                Await.result(asyncParseClientID(zkc0.get(), lockPath, children.get(0))));
    }
}

From source file:it.anyplace.sync.bep.BlockPusher.java

public FileUploadObserver pushFile(final DataSource dataSource, @Nullable FileInfo fileInfo,
        final String folder, final String path) {
    checkArgument(connectionHandler.hasFolder(folder),
            "supplied connection handler %s will not share folder %s", connectionHandler, folder);
    checkArgument(fileInfo == null || equal(fileInfo.getFolder(), folder));
    checkArgument(fileInfo == null || equal(fileInfo.getPath(), path));
    try {//from w  ww.  j a  v a2 s .com
        final ExecutorService monitoringProcessExecutorService = Executors.newCachedThreadPool();
        final long fileSize = dataSource.getSize();
        final Set<String> sentBlocks = Sets.newConcurrentHashSet();
        final AtomicReference<Exception> uploadError = new AtomicReference<>();
        final AtomicBoolean isCompleted = new AtomicBoolean(false);
        final Object updateLock = new Object();
        final Object listener = new Object() {
            @Subscribe
            public void handleRequestMessageReceivedEvent(RequestMessageReceivedEvent event) {
                BlockExchageProtos.Request request = event.getMessage();
                if (equal(request.getFolder(), folder) && equal(request.getName(), path)) {
                    try {
                        final String hash = BaseEncoding.base16().encode(request.getHash().toByteArray());
                        logger.debug("handling block request = {}:{}-{} ({})", request.getName(),
                                request.getOffset(), request.getSize(), hash);
                        byte[] data = dataSource.getBlock(request.getOffset(), request.getSize(), hash);
                        checkNotNull(data, "data not found for hash = %s", hash);
                        final Future future = connectionHandler.sendMessage(
                                Response.newBuilder().setCode(BlockExchageProtos.ErrorCode.NO_ERROR)
                                        .setData(ByteString.copyFrom(data)).setId(request.getId()).build());
                        monitoringProcessExecutorService.submit(new Runnable() {
                            @Override
                            public void run() {
                                try {
                                    future.get();
                                    sentBlocks.add(hash);
                                    synchronized (updateLock) {
                                        updateLock.notifyAll();
                                    }
                                    //TODO retry on error, register error and throw on watcher
                                } catch (InterruptedException ex) {
                                    //return and do nothing
                                } catch (ExecutionException ex) {
                                    uploadError.set(ex);
                                    synchronized (updateLock) {
                                        updateLock.notifyAll();
                                    }
                                }
                            }
                        });
                    } catch (Exception ex) {
                        logger.error("error handling block request", ex);
                        connectionHandler.sendMessage(Response.newBuilder()
                                .setCode(BlockExchageProtos.ErrorCode.GENERIC).setId(request.getId()).build());
                        uploadError.set(ex);
                        synchronized (updateLock) {
                            updateLock.notifyAll();
                        }
                    }
                }
            }
        };
        connectionHandler.getEventBus().register(listener);
        logger.debug("send index update for file = {}", path);
        final Object indexListener = new Object() {

            @Subscribe
            public void handleIndexRecordAquiredEvent(IndexHandler.IndexRecordAquiredEvent event) {
                if (equal(event.getFolder(), folder)) {
                    for (FileInfo fileInfo : event.getNewRecords()) {
                        if (equal(fileInfo.getPath(), path)
                                && equal(fileInfo.getHash(), dataSource.getHash())) { //TODO check not invalid
                            //                                sentBlocks.addAll(dataSource.getHashes());
                            isCompleted.set(true);
                            synchronized (updateLock) {
                                updateLock.notifyAll();
                            }
                        }
                    }
                }
            }
        };
        if (indexHandler != null) {
            indexHandler.getEventBus().register(indexListener);
        }
        final IndexUpdate indexUpdate = sendIndexUpdate(folder,
                BlockExchageProtos.FileInfo.newBuilder().setName(path).setSize(fileSize)
                        .setType(BlockExchageProtos.FileInfoType.FILE).addAllBlocks(dataSource.getBlocks()),
                fileInfo == null ? null : fileInfo.getVersionList()).getRight();
        final FileUploadObserver messageUploadObserver = new FileUploadObserver() {
            @Override
            public void close() {
                logger.debug("closing upload process");
                try {
                    connectionHandler.getEventBus().unregister(listener);
                    monitoringProcessExecutorService.shutdown();
                    if (indexHandler != null) {
                        indexHandler.getEventBus().unregister(indexListener);
                    }
                } catch (Exception ex) {
                }
                if (closeConnection && connectionHandler != null) {
                    connectionHandler.close();
                }
                if (indexHandler != null) {
                    FileInfo fileInfo = indexHandler.pushRecord(indexUpdate.getFolder(),
                            Iterables.getOnlyElement(indexUpdate.getFilesList()));
                    logger.info("sent file info record = {}", fileInfo);
                }
            }

            @Override
            public double getProgress() {
                return isCompleted() ? 1d : sentBlocks.size() / ((double) dataSource.getHashes().size());
            }

            @Override
            public String getProgressMessage() {
                return (Math.round(getProgress() * 1000d) / 10d) + "% " + sentBlocks.size() + "/"
                        + dataSource.getHashes().size();
            }

            @Override
            public boolean isCompleted() {
                //                    return sentBlocks.size() == dataSource.getHashes().size();
                return isCompleted.get();
            }

            @Override
            public double waitForProgressUpdate() throws InterruptedException {
                synchronized (updateLock) {
                    updateLock.wait();
                }
                if (uploadError.get() != null) {
                    throw new RuntimeException(uploadError.get());
                }
                return getProgress();
            }

            @Override
            public DataSource getDataSource() {
                return dataSource;
            }

        };
        return messageUploadObserver;
    } catch (Exception ex) {
        throw new RuntimeException(ex);
    }
}

From source file:org.apache.distributedlog.lock.TestZKSessionLock.java

private void testLockWhenSiblingUseDifferentLockId(long timeout, final boolean isUnlock) throws Exception {
    String lockPath = "/test-lock-when-sibling-use-different-lock-id-" + timeout + "-" + isUnlock + "-"
            + System.currentTimeMillis();
    String clientId0 = "client-id-0";
    String clientId1 = "client-id-1";

    createLockPath(zkc.get(), lockPath);

    final ZKSessionLock lock0_0 = new ZKSessionLock(zkc0, lockPath, clientId0, lockStateExecutor);
    final ZKSessionLock lock0_1 = new ZKSessionLock(zkc0, lockPath, clientId0, lockStateExecutor);
    final ZKSessionLock lock1 = new ZKSessionLock(zkc, lockPath, clientId1, lockStateExecutor);

    lock0_0.tryLock(Long.MAX_VALUE, TimeUnit.MILLISECONDS);

    // lock1 wait for the lock ownership.
    final CountDownLatch lock1DoneLatch = new CountDownLatch(1);
    Thread lock1Thread = new Thread(new Runnable() {
        @Override/* w  w  w  .  ja v  a  2 s  .  com*/
        public void run() {
            try {
                lock1.tryLock(Long.MAX_VALUE, TimeUnit.MILLISECONDS);
                lock1DoneLatch.countDown();
            } catch (LockingException e) {
                logger.error("Failed on locking lock1 : ", e);
            }
        }
    }, "lock1-thread");
    lock1Thread.start();

    // check lock1 is waiting for lock0_0
    List<String> children = awaitWaiters(2, zkc, lockPath);

    assertEquals(2, children.size());
    assertEquals(State.CLAIMED, lock0_0.getLockState());
    assertEquals(lock0_0.getLockId(),
            Utils.ioResult(asyncParseClientID(zkc0.get(), lockPath, children.get(0))));
    awaitState(State.WAITING, lock1);
    assertEquals(lock1.getLockId(), Utils.ioResult(asyncParseClientID(zkc.get(), lockPath, children.get(1))));

    final CountDownLatch lock0DoneLatch = new CountDownLatch(1);
    final AtomicReference<String> ownerFromLock0 = new AtomicReference<String>(null);
    Thread lock0Thread = null;
    if (timeout == 0) {
        try {
            lock0_1.tryLock(0, TimeUnit.MILLISECONDS);
            fail("Should fail on locking if sibling is using differnt lock id.");
        } catch (OwnershipAcquireFailedException oafe) {
            assertEquals(clientId0, oafe.getCurrentOwner());
        }
        assertEquals(State.CLOSED, lock0_1.getLockState());
        children = getLockWaiters(zkc, lockPath);
        assertEquals(2, children.size());
        assertEquals(State.CLAIMED, lock0_0.getLockState());
        assertEquals(lock0_0.getLockId(),
                Utils.ioResult(asyncParseClientID(zkc0.get(), lockPath, children.get(0))));
        assertEquals(State.WAITING, lock1.getLockState());
        assertEquals(lock1.getLockId(),
                Utils.ioResult(asyncParseClientID(zkc.get(), lockPath, children.get(1))));
    } else {
        lock0Thread = new Thread(new Runnable() {
            @Override
            public void run() {
                try {
                    lock0_1.tryLock(Long.MAX_VALUE, TimeUnit.MILLISECONDS);
                    if (isUnlock) {
                        lock0DoneLatch.countDown();
                    }
                } catch (OwnershipAcquireFailedException oafe) {
                    if (!isUnlock) {
                        ownerFromLock0.set(oafe.getCurrentOwner());
                        lock0DoneLatch.countDown();
                    }
                } catch (LockingException le) {
                    logger.error("Failed on locking lock0_1 : ", le);
                }
            }
        }, "lock0-thread");
        lock0Thread.start();

        // check lock1 is waiting for lock0_0
        children = awaitWaiters(3, zkc, lockPath);

        assertEquals(3, children.size());
        assertEquals(State.CLAIMED, lock0_0.getLockState());
        assertEquals(lock0_0.getLockId(),
                Utils.ioResult(asyncParseClientID(zkc0.get(), lockPath, children.get(0))));
        awaitState(State.WAITING, lock1);
        assertEquals(lock1.getLockId(),
                Utils.ioResult(asyncParseClientID(zkc.get(), lockPath, children.get(1))));
        awaitState(State.WAITING, lock0_1);
        assertEquals(lock0_1.getLockId(),
                Utils.ioResult(asyncParseClientID(zkc0.get(), lockPath, children.get(2))));
    }

    if (isUnlock) {
        lock0_0.unlock();
    } else {
        ZooKeeperClientUtils.expireSession(zkc0, zkServers, sessionTimeoutMs);
    }

    lock1DoneLatch.await();
    lock1Thread.join();

    // check the state of lock0_0
    if (isUnlock) {
        assertEquals(State.CLOSED, lock0_0.getLockState());
    } else {
        assertEquals(State.EXPIRED, lock0_0.getLockState());
    }

    if (timeout == 0) {
        children = getLockWaiters(zkc, lockPath);
        assertEquals(1, children.size());
        assertEquals(State.CLAIMED, lock1.getLockState());
        assertEquals(lock1.getLockId(),
                Utils.ioResult(asyncParseClientID(zkc.get(), lockPath, children.get(0))));
    } else {
        assertNotNull(lock0Thread);
        if (!isUnlock) {
            // both lock0_0 and lock0_1 would be expired
            lock0DoneLatch.await();
            lock0Thread.join();

            assertEquals(clientId0, ownerFromLock0.get());
            assertEquals(State.CLOSED, lock0_1.getLockState());

            children = getLockWaiters(zkc, lockPath);
            assertEquals(1, children.size());
            assertEquals(State.CLAIMED, lock1.getLockState());
            assertEquals(lock1.getLockId(),
                    Utils.ioResult(asyncParseClientID(zkc.get(), lockPath, children.get(0))));
        } else {
            children = getLockWaiters(zkc, lockPath);
            assertEquals(2, children.size());
            assertEquals(State.CLAIMED, lock1.getLockState());
            assertEquals(lock1.getLockId(),
                    Utils.ioResult(asyncParseClientID(zkc.get(), lockPath, children.get(0))));
            assertEquals(State.WAITING, lock0_1.getLockState());
            assertEquals(lock0_1.getLockId(),
                    Utils.ioResult(asyncParseClientID(zkc0.get(), lockPath, children.get(1))));
        }
    }

    lock1.unlock();

    if (timeout != 0 && isUnlock) {
        lock0DoneLatch.await();
        lock0Thread.join();

        children = getLockWaiters(zkc, lockPath);
        assertEquals(1, children.size());
        assertEquals(State.CLAIMED, lock0_1.getLockState());
        assertEquals(lock0_1.getLockId(),
                Utils.ioResult(asyncParseClientID(zkc0.get(), lockPath, children.get(0))));
    }
}

From source file:android.webkit.cts.WebViewTest.java

private Picture waitForPictureToHaveColor(int color, final TestPictureListener listener) throws Throwable {
    final int MAX_ON_NEW_PICTURE_ITERATIONS = 5;
    final AtomicReference<Picture> pictureRef = new AtomicReference<Picture>();
    for (int i = 0; i < MAX_ON_NEW_PICTURE_ITERATIONS; i++) {
        final int oldCallCount = listener.callCount;
        runTestOnUiThread(new Runnable() {
            @Override/*  w  w w. ja  v  a2s. c om*/
            public void run() {
                pictureRef.set(mWebView.capturePicture());
            }
        });
        if (isPictureFilledWithColor(pictureRef.get(), color))
            break;
        new PollingCheck(TEST_TIMEOUT) {
            @Override
            protected boolean check() {
                return listener.callCount > oldCallCount;
            }
        }.run();
    }
    return pictureRef.get();
}

From source file:org.dspace.app.rest.WorkflowItemRestRepositoryIT.java

@Test
/**//  ww  w .j a v a2s.c om
 * Test the creation of workflowitem POSTing to the resource workflowitems collection endpoint a workspaceitem (as
 * uri-list). This corresponds to the deposit action done by the submitter.
 *
 * @throws Exception
 */
public void createWorkflowItemTest() throws Exception {
    context.turnOffAuthorisationSystem();

    // hold the id of the created workflow item
    AtomicReference<Integer> idRef = new AtomicReference<Integer>();
    try {
        //** GIVEN **
        //1. A community with one collection.
        parentCommunity = CommunityBuilder.createCommunity(context).withName("Parent Community").build();
        Collection col1 = CollectionBuilder.createCollection(context, parentCommunity).withName("Collection 1")
                .withWorkflowGroup(1, admin).build();

        //2. create a normal user to use as submitter
        EPerson submitter = EPersonBuilder.createEPerson(context).withEmail("submitter@example.com")
                .withPassword("dspace").build();

        context.setCurrentUser(submitter);

        //3. a workspace item
        WorkspaceItem wsitem = WorkspaceItemBuilder.createWorkspaceItem(context, col1)
                .withTitle("Submission Item").withIssueDate("2017-10-17").build();

        context.restoreAuthSystemState();

        // get the submitter auth token
        String authToken = getAuthToken(submitter.getEmail(), "dspace");

        // submit the workspaceitem to start the workflow
        getClient(authToken).perform(post(BASE_REST_SERVER_URL + "/api/workflow/workflowitems")
                .content("/api/submission/workspaceitems/" + wsitem.getID()).contentType(textUriContentType))
                .andExpect(status().isCreated())
                .andExpect(jsonPath("$",
                        WorkflowItemMatcher.matchItemWithTitleAndDateIssued(null, "Submission Item",
                                "2017-10-17")))
                .andDo(result -> idRef.set(read(result.getResponse().getContentAsString(), "$.id")));

        // check that the workflowitem is persisted
        getClient(authToken).perform(get("/api/workflow/workflowitems/" + idRef.get()))
                .andExpect(status().isOk()).andExpect(jsonPath("$", Matchers.is(WorkflowItemMatcher
                        .matchItemWithTitleAndDateIssued(null, "Submission Item", "2017-10-17"))));
    } finally {
        // remove the workflowitem if any
        WorkflowItemBuilder.deleteWorkflowItem(idRef.get());
    }
}

From source file:com.alibaba.wasp.master.FMaster.java

/**
 * @see com.alibaba.wasp.protobuf.generated.MasterAdminProtos.MasterAdminService.BlockingInterface#getEntityGroupWithScan(com.google.protobuf.RpcController,
 *      com.alibaba.wasp.protobuf.generated.MasterAdminProtos.GetEntityGroupWithScanRequest)
 *///from   w  ww  . j a v a2s . c  om
@Override
public GetEntityGroupWithScanResponse getEntityGroupWithScan(RpcController controller,
        GetEntityGroupWithScanRequest request) throws ServiceException {
    byte[] tableNameOrEntityGroupName = request.getTableNameOrEntityGroupName().toByteArray();
    Pair<EntityGroupInfo, ServerName> pair;
    try {
        pair = FMetaReader.getEntityGroup(conf, tableNameOrEntityGroupName);
        if (pair == null) {
            final AtomicReference<Pair<EntityGroupInfo, ServerName>> result = new AtomicReference<Pair<EntityGroupInfo, ServerName>>(
                    null);
            final String encodedName = Bytes.toString(tableNameOrEntityGroupName);
            MetaScannerVisitor visitor = new MetaScannerVisitorBase() {
                @Override
                public boolean processRow(org.apache.hadoop.hbase.client.Result data) throws IOException {
                    EntityGroupInfo info = EntityGroupInfo.getEntityGroupInfo(data);
                    if (info == null) {
                        LOG.warn("No serialized EntityGroupInfo in " + data);
                        return true;
                    }
                    if (!encodedName.equals(info.getEncodedName())) {
                        return true;
                    }
                    ServerName sn = EntityGroupInfo.getServerName(data);
                    result.set(new Pair<EntityGroupInfo, ServerName>(info, sn));
                    return false; // found the entityGroup, stop
                }
            };

            FMetaScanner.metaScan(conf, visitor);
            pair = result.get();
        }
        GetEntityGroupWithScanResponse.Builder builder = GetEntityGroupWithScanResponse.newBuilder();
        if (pair != null) {
            if (pair.getFirst() != null) {
                builder.setEgInfo(pair.getFirst().convert());
            }
            if (pair.getSecond() != null) {
                builder.setServerName(pair.getSecond().convert());
            }
        }
        return builder.build();
    } catch (Exception e) {
        LOG.error("Failed getEntityGroupWithScan.", e);
        throw new ServiceException(e);
    }
}

From source file:com.datamelt.nifi.processors.ExecuteRuleEngine.java

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    // map used to store the attribute name and its value from the content of the flow file
    final Map<String, String> propertyMap = new HashMap<>();

    // get a logger instance
    final ComponentLog logger = getLogger();

    // a header from the content if present
    final AtomicReference<HeaderRow> header = new AtomicReference<>();

    AtomicBoolean error = new AtomicBoolean();

    // get the flow file
    FlowFile flowFile = session.get();/*from  www.j  ava  2  s  .  c o m*/
    if (flowFile == null) {
        return;
    }

    // list of rows from splitting the original flow file content
    ArrayList<RuleEngineRow> flowFileRows = new ArrayList<RuleEngineRow>();

    // list of rows containing the detailed results of the ruleengine
    ArrayList<RuleEngineRow> flowFileDetails = new ArrayList<RuleEngineRow>();

    boolean headerPresent = context.getProperty(ATTRIBUTE_HEADER_PRESENT).getValue().equals("true");

    // put the name of the ruleengine zip file in the list of properties
    propertyMap.put(PROPERTY_RULEENGINE_ZIPFILE_NAME,
            context.getProperty(ATTRIBUTE_RULEENGINE_ZIPFILE).getValue());

    final int batchSize = Integer.parseInt(context.getProperty(BATCH_SIZE_NAME).getValue());

    // read flow file into input stream
    session.read(flowFile, new InputStreamCallback() {
        public void process(InputStream in) throws IOException {
            try {
                // iterator over the lines from the input stream
                LineIterator iterator = IOUtils.lineIterator(in, "utf-8");

                // check if configuration indicates that a header row is present in the flow file content
                if (headerPresent) {
                    logger.debug("configuration indicates a header row is present in flow file content");

                    // if there is at least one row of data and the header is not defined yet
                    if (iterator.hasNext() && header.get() == null) {
                        // set the header from the content
                        header.set(new HeaderRow(iterator.nextLine(), separator));
                    }
                }
                // if no header row is present in the flow file content
                else {
                    logger.debug("configuration indicates no header row is present in flow file content");

                    // use the header from the field names
                    header.set(headerFromFieldNames);
                }

                // loop over all rows of data
                while (iterator.hasNext()) {
                    // we handle the error per row of data
                    error.set(false);

                    // get a row to process
                    String row = iterator.nextLine();

                    // check that we have data
                    if (row != null && !row.trim().equals("")) {
                        RowFieldCollection rowFieldCollection = null;
                        try {
                            rowFieldCollection = getRowFieldCollection(row, header.get());

                            logger.debug("RowFieldCollection header contains: "
                                    + rowFieldCollection.getHeader().getNumberOfFields() + " fields");
                            logger.debug("RowFieldCollection contains: "
                                    + rowFieldCollection.getNumberOfFields() + " fields");

                            // run the ruleengine with the given data from the flow file
                            logger.debug("running business ruleengine...");

                            // run the business logic/rules against the data
                            ruleEngine.run("flowfile", rowFieldCollection);

                            // add some debugging output that might be useful
                            logger.debug("number of rulegroups: " + ruleEngine.getNumberOfGroups());
                            logger.debug(
                                    "number of rulegroups passed: " + ruleEngine.getNumberOfGroupsPassed());
                            logger.debug(
                                    "number of rulegroups failed: " + ruleEngine.getNumberOfGroupsFailed());
                            logger.debug(
                                    "number of rulegroups skipped: " + ruleEngine.getNumberOfGroupsSkipped());
                            logger.debug("number of rules: " + ruleEngine.getNumberOfRules());
                            logger.debug("number of rules passed: " + ruleEngine.getNumberOfRulesPassed());
                            logger.debug("number of rules failed: " + ruleEngine.getNumberOfRulesFailed());
                            logger.debug("number of actions: " + ruleEngine.getNumberOfActions());

                            // add some properties of the ruleengine execution to the map
                            addRuleEngineProperties(propertyMap);
                        } catch (Exception ex) {
                            error.set(true);
                            logger.error(ex.getMessage(), ex);
                        }

                        // if no error occurred we create a save the data for the creation of the flow files
                        if (!error.get()) {
                            // process only if the collection of fields was changed by
                            // a ruleengine action. this means the data was updated so
                            // we will have to re-write/re-create the flow file content.
                            if (rowFieldCollection.isCollectionUpdated()) {
                                // put an indicator that the data was modified by the ruleengine
                                propertyMap.put(PROPERTY_RULEENGINE_CONTENT_MODIFIED, "true");

                                logger.debug(
                                        "data was modified - updating flow file content with ruleengine results");

                                // the RuleEngineRow instance will contain the row of data and the map of properties
                                // and will later be used when the flow files are created
                                flowFileRows
                                        .add(new RuleEngineRow(getResultRow(rowFieldCollection), propertyMap));
                            } else {
                                // put an indicator that the data was NOT modified by the ruleengine
                                propertyMap.put(PROPERTY_RULEENGINE_CONTENT_MODIFIED, "false");

                                logger.debug("data was not modified - using original content");

                                // the RuleEngineRow instance will contain the row of data and the map of properties
                                // and will later be used when the flow files are created
                                flowFileRows.add(new RuleEngineRow(row, propertyMap));
                            }

                            if (flowFileRows.size() >= batchSize) {
                                // generate flow files from the individual rows
                                List<FlowFile> splitFlowFiles = generateFlowFileSplits(context, session,
                                        flowFileRows, header.get(), headerPresent);
                                // transfer all individual rows to success relationship
                                if (splitFlowFiles.size() > 0) {
                                    session.transfer(splitFlowFiles, SUCCESS);
                                }
                            }

                            // if the user configured detailed results 
                            if (context.getProperty(ATTRIBUTE_OUTPUT_DETAILED_RESULTS).getValue()
                                    .equals("true")) {
                                // get the configured output type
                                String outputType = context.getProperty(ATTRIBUTE_OUTPUT_DETAILED_RESULTS_TYPE)
                                        .getValue();
                                logger.debug("configuration set to output detailed results with type ["
                                        + outputType + "]");

                                // we need to create a flow file only, if the ruleengine results are according to the output type settings
                                if (outputType.equals(OUTPUT_TYPE_ALL_GROUPS_ALL_RULES)
                                        || (outputType.equals(OUTPUT_TYPE_FAILED_GROUPS_ALL_RULES)
                                                && ruleEngine.getNumberOfGroupsFailed() > 0)
                                        || (outputType.equals(OUTPUT_TYPE_FAILED_GROUPS_FAILED_RULES)
                                                && ruleEngine.getNumberOfGroupsFailed() > 0)
                                        || (outputType.equals(OUTPUT_TYPE_FAILED_GROUPS_PASSED_RULES)
                                                && ruleEngine.getNumberOfGroupsFailed() > 0)
                                        || (outputType.equals(OUTPUT_TYPE_PASSED_GROUPS_ALL_RULES)
                                                && ruleEngine.getNumberOfGroupsPassed() > 0)
                                        || (outputType.equals(OUTPUT_TYPE_PASSED_GROUPS_FAILED_RULES)
                                                && ruleEngine.getNumberOfGroupsPassed() > 0
                                                || (outputType.equals(OUTPUT_TYPE_PASSED_GROUPS_PASSED_RULES)
                                                        && ruleEngine.getNumberOfGroupsPassed() > 0))) {
                                    // create the content for the flow file
                                    String content = getFlowFileRuleEngineDetailsContent(header.get(),
                                            headerPresent, outputType, row);

                                    // add results to the list
                                    flowFileDetails.add(new RuleEngineRow(content, propertyMap));

                                    if (flowFileDetails.size() >= batchSize) {
                                        List<FlowFile> detailsFlowFiles = generateFlowFilesRuleEngineDetails(
                                                context, session, flowFileDetails, header.get(), headerPresent);
                                        // transfer all individual rows to detailed relationship
                                        if (detailsFlowFiles.size() > 0) {
                                            session.transfer(detailsFlowFiles, DETAILED_RESULTS);
                                        }
                                    }
                                }
                            }
                            // clear the collections of ruleengine results
                            ruleEngine.getRuleExecutionCollection().clear();
                        }
                        // if we have an error we create a flow file from the current row of data and send it to the failure relationsship
                        else {
                            FlowFile failureFlowFile = generateFailureFlowFile(context, session, row,
                                    header.get(), headerPresent);
                            session.transfer(failureFlowFile, FAILURE);
                        }
                    }
                }

                LineIterator.closeQuietly(iterator);
            } catch (Exception ex) {
                ex.printStackTrace();
                logger.error("error running the business ruleengine", ex);
            }
        }
    });

    // generate flow files from the individual rows
    List<FlowFile> splitFlowFiles = generateFlowFileSplits(context, session, flowFileRows, header.get(),
            headerPresent);

    // generate flow files from the individual rows
    List<FlowFile> detailsFlowFiles = generateFlowFilesRuleEngineDetails(context, session, flowFileDetails,
            header.get(), headerPresent);

    // transfer the original flow file
    session.transfer(flowFile, ORIGINAL);

    // transfer all individual rows to success relationship
    if (splitFlowFiles.size() > 0) {
        session.transfer(splitFlowFiles, SUCCESS);
    }

    // transfer all individual rows to success relationship
    if (detailsFlowFiles.size() > 0) {
        session.transfer(detailsFlowFiles, DETAILED_RESULTS);
    }
}

From source file:org.apache.bookkeeper.mledger.impl.ManagedLedgerTest.java

/**
 * It verifies that asyncRead timesout if it doesn't receive response from bk-client in configured timeout
 * /*from   w  w  w  .  j ava 2  s  .  co  m*/
 * @throws Exception
 */
@Test
public void testManagedLedgerWithReadEntryTimeOut() throws Exception {
    ManagedLedgerConfig config = new ManagedLedgerConfig().setReadEntryTimeoutSeconds(1);
    ManagedLedgerImpl ledger = (ManagedLedgerImpl) factory.open("timeout_ledger_test", config);

    BookKeeper bk = mock(BookKeeper.class);
    doNothing().when(bk).asyncCreateLedger(anyInt(), anyInt(), anyInt(), any(), any(), any(), any(), any());
    AtomicReference<ManagedLedgerException> responseException1 = new AtomicReference<>();
    CountDownLatch latch1 = new CountDownLatch(1);

    CompletableFuture<LedgerEntries> entriesFuture = new CompletableFuture<>();
    ReadHandle ledgerHandle = mock(ReadHandle.class);
    doReturn(entriesFuture).when(ledgerHandle).readAsync(PositionImpl.earliest.getLedgerId(),
            PositionImpl.earliest.getEntryId());

    // (1) test read-timeout for: ManagedLedger.asyncReadEntry(..)
    ledger.asyncReadEntry(ledgerHandle, PositionImpl.earliest, new ReadEntryCallback() {
        @Override
        public void readEntryComplete(Entry entry, Object ctx) {
            responseException1.set(null);
            latch1.countDown();
        }

        @Override
        public void readEntryFailed(ManagedLedgerException exception, Object ctx) {
            responseException1.set(exception);
            latch1.countDown();
        }
    }, null);
    ledger.asyncCreateLedger(bk, config, null, new CreateCallback() {
        @Override
        public void createComplete(int rc, LedgerHandle lh, Object ctx) {

        }
    }, Collections.emptyMap());
    latch1.await(config.getReadEntryTimeoutSeconds() + 2, TimeUnit.SECONDS);
    assertNotNull(responseException1.get());
    assertEquals(responseException1.get().getMessage(),
            BKException.getMessage(BKException.Code.TimeoutException));

    // (2) test read-timeout for: ManagedLedger.asyncReadEntry(..)
    CountDownLatch latch2 = new CountDownLatch(1);
    AtomicReference<ManagedLedgerException> responseException2 = new AtomicReference<>();
    PositionImpl readPositionRef = PositionImpl.earliest;
    ManagedCursorImpl cursor = new ManagedCursorImpl(bk, config, ledger, "cursor1");
    OpReadEntry opReadEntry = OpReadEntry.create(cursor, readPositionRef, 1, new ReadEntriesCallback() {

        @Override
        public void readEntriesComplete(List<Entry> entries, Object ctx) {
            latch2.countDown();
        }

        @Override
        public void readEntriesFailed(ManagedLedgerException exception, Object ctx) {
            responseException2.set(exception);
            latch2.countDown();
        }

    }, null);
    ledger.asyncReadEntry(ledgerHandle, PositionImpl.earliest.getEntryId(), PositionImpl.earliest.getEntryId(),
            false, opReadEntry, null);
    latch2.await(config.getReadEntryTimeoutSeconds() + 2, TimeUnit.SECONDS);
    assertNotNull(responseException2.get());
    assertEquals(responseException2.get().getMessage(),
            BKException.getMessage(BKException.Code.TimeoutException));

    ledger.close();
}