Example usage for java.util.concurrent.atomic AtomicBoolean get

List of usage examples for java.util.concurrent.atomic AtomicBoolean get

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicBoolean get.

Prototype

public final boolean get() 

Source Link

Document

Returns the current value, with memory effects as specified by VarHandle#getVolatile .

Usage

From source file:org.apache.tinkerpop.gremlin.structure.IoTest.java

@Test
@FeatureRequirement(featureClass = Graph.Features.EdgeFeatures.class, feature = Graph.Features.EdgeFeatures.FEATURE_ADD_EDGES)
@FeatureRequirement(featureClass = Graph.Features.VertexFeatures.class, feature = Graph.Features.VertexFeatures.FEATURE_ADD_VERTICES)
@FeatureRequirement(featureClass = VertexPropertyFeatures.class, feature = FEATURE_STRING_VALUES)
@FeatureRequirement(featureClass = EdgePropertyFeatures.class, feature = EdgePropertyFeatures.FEATURE_DOUBLE_VALUES)
@FeatureRequirement(featureClass = Graph.Features.VertexFeatures.class, feature = Graph.Features.VertexFeatures.FEATURE_MULTI_PROPERTIES)
@FeatureRequirement(featureClass = Graph.Features.VertexFeatures.class, feature = Graph.Features.VertexFeatures.FEATURE_META_PROPERTIES)
public void shouldReadWriteVertexMultiPropsNoEdgesToGraphSON() throws Exception {
    final Vertex v1 = g.addVertex("name", "marko", "name", "mark", "acl", "rw");
    v1.property("propsSquared", 123, "x", "a", "y", "b");
    final Vertex v2 = g.addVertex();
    v1.addEdge("friends", v2, "weight", 0.5d);

    try (final ByteArrayOutputStream os = new ByteArrayOutputStream()) {
        final GraphSONWriter writer = g.io().graphSONWriter().create();
        writer.writeVertex(os, v1);//from   ww  w . j ava  2s . c  om

        final AtomicBoolean called = new AtomicBoolean(false);
        final GraphSONReader reader = g.io().graphSONReader().create();
        try (final ByteArrayInputStream bais = new ByteArrayInputStream(os.toByteArray())) {
            reader.readVertex(bais, detachedVertex -> {
                assertEquals(v1.id().toString(), detachedVertex.id().toString()); // lossy
                assertEquals(v1.label(), detachedVertex.label());
                assertEquals(4, StreamFactory.stream(detachedVertex.iterators().propertyIterator()).count());
                assertEquals("a", detachedVertex.property("propsSquared").value("x"));
                assertEquals("b", detachedVertex.property("propsSquared").value("y"));
                assertEquals(2,
                        StreamFactory.stream(detachedVertex.iterators().propertyIterator("name")).count());
                assertTrue(StreamFactory.stream(detachedVertex.iterators().propertyIterator("name"))
                        .allMatch(p -> p.key().equals("name")
                                && (p.value().equals("marko") || p.value().equals("mark"))));
                assertEquals(v1.value("acl"), detachedVertex.value("acl").toString());
                called.set(true);
                return mock(Vertex.class);
            });
        }
        assertTrue(called.get());
    }
}

From source file:org.apache.tinkerpop.gremlin.structure.IoTest.java

@Test
@FeatureRequirement(featureClass = Graph.Features.EdgeFeatures.class, feature = Graph.Features.EdgeFeatures.FEATURE_ADD_EDGES)
@FeatureRequirement(featureClass = Graph.Features.VertexFeatures.class, feature = Graph.Features.VertexFeatures.FEATURE_ADD_VERTICES)
@FeatureRequirement(featureClass = VertexPropertyFeatures.class, feature = FEATURE_STRING_VALUES)
@FeatureRequirement(featureClass = EdgePropertyFeatures.class, feature = EdgePropertyFeatures.FEATURE_DOUBLE_VALUES)
@FeatureRequirement(featureClass = Graph.Features.VertexFeatures.class, feature = Graph.Features.VertexFeatures.FEATURE_MULTI_PROPERTIES)
@FeatureRequirement(featureClass = Graph.Features.VertexFeatures.class, feature = Graph.Features.VertexFeatures.FEATURE_META_PROPERTIES)
public void shouldReadWriteVertexMultiPropsNoEdgesToGryo() throws Exception {
    final Vertex v1 = g.addVertex("name", "marko", "name", "mark", "acl", "rw");
    v1.property("propsSquared", 123, "x", "a", "y", "b");
    final Vertex v2 = g.addVertex();
    v1.addEdge("friends", v2, "weight", 0.5d);

    try (final ByteArrayOutputStream os = new ByteArrayOutputStream()) {
        final GryoWriter writer = g.io().gryoWriter().create();
        writer.writeVertex(os, v1);/*  w ww. j  a v a2  s .  co m*/

        final AtomicBoolean called = new AtomicBoolean(false);
        final GryoReader reader = g.io().gryoReader().workingDirectory(File.separator + "tmp").create();
        try (final ByteArrayInputStream bais = new ByteArrayInputStream(os.toByteArray())) {
            reader.readVertex(bais, detachedVertex -> {
                assertEquals(v1.id(), detachedVertex.id());
                assertEquals(v1.label(), detachedVertex.label());
                assertEquals(4, StreamFactory.stream(detachedVertex.iterators().propertyIterator()).count());
                assertEquals("a", detachedVertex.property("propsSquared").value("x"));
                assertEquals("b", detachedVertex.property("propsSquared").value("y"));
                assertEquals(2,
                        StreamFactory.stream(detachedVertex.iterators().propertyIterator("name")).count());
                assertTrue(StreamFactory.stream(detachedVertex.iterators().propertyIterator("name"))
                        .allMatch(p -> p.key().equals("name")
                                && (p.value().equals("marko") || p.value().equals("mark"))));
                assertEquals(v1.value("acl"), detachedVertex.value("acl").toString());
                called.set(true);
                return mock(Vertex.class);
            });
        }
        assertTrue(called.get());
    }
}

From source file:com.spectralogic.ds3client.integration.GetJobManagement_Test.java

private void doReadJobWithJobStarter(final ReadJobStarter readJobStarter) throws IOException,
        URISyntaxException, NoSuchMethodException, IllegalAccessException, InvocationTargetException {
    final String tempPathPrefix = null;
    final Path tempDirectory = Files.createTempDirectory(Paths.get("."), tempPathPrefix);

    try {/*w w w .jav a  2  s  .com*/
        final String DIR_NAME = "largeFiles/";
        final String FILE_NAME = "lesmis.txt";

        final Path objPath = ResourceUtils.loadFileResource(DIR_NAME + FILE_NAME);
        final long bookSize = Files.size(objPath);
        final Ds3Object obj = new Ds3Object(FILE_NAME, bookSize);

        final Ds3ClientShim ds3ClientShim = new Ds3ClientShim((Ds3ClientImpl) client);

        final int maxNumBlockAllocationRetries = 1;
        final int maxNumObjectTransferAttempts = 3;
        final Ds3ClientHelpers ds3ClientHelpers = Ds3ClientHelpers.wrap(ds3ClientShim,
                maxNumBlockAllocationRetries, maxNumObjectTransferAttempts);

        final Ds3ClientHelpers.Job readJob = readJobStarter.startReadJob(ds3ClientHelpers, BUCKET_NAME,
                Arrays.asList(obj));

        final AtomicBoolean dataTransferredEventReceived = new AtomicBoolean(false);
        final AtomicBoolean objectCompletedEventReceived = new AtomicBoolean(false);
        final AtomicBoolean checksumEventReceived = new AtomicBoolean(false);
        final AtomicBoolean metadataEventReceived = new AtomicBoolean(false);
        final AtomicBoolean waitingForChunksEventReceived = new AtomicBoolean(false);
        final AtomicBoolean failureEventReceived = new AtomicBoolean(false);

        readJob.attachDataTransferredListener(new DataTransferredListener() {
            @Override
            public void dataTransferred(final long size) {
                dataTransferredEventReceived.set(true);
                assertEquals(bookSize, size);
            }
        });
        readJob.attachObjectCompletedListener(new ObjectCompletedListener() {
            @Override
            public void objectCompleted(final String name) {
                objectCompletedEventReceived.set(true);
            }
        });
        readJob.attachChecksumListener(new ChecksumListener() {
            @Override
            public void value(final BulkObject obj, final ChecksumType.Type type, final String checksum) {
                checksumEventReceived.set(true);
                assertEquals("69+JXWeZuzl2HFTM6Lbo8A==", checksum);
            }
        });
        readJob.attachMetadataReceivedListener(new MetadataReceivedListener() {
            @Override
            public void metadataReceived(final String filename, final Metadata metadata) {
                metadataEventReceived.set(true);
            }
        });
        readJob.attachWaitingForChunksListener(new WaitingForChunksListener() {
            @Override
            public void waiting(final int secondsToWait) {
                waitingForChunksEventReceived.set(true);
            }
        });
        readJob.attachFailureEventListener(new FailureEventListener() {
            @Override
            public void onFailure(final FailureEvent failureEvent) {
                failureEventReceived.set(true);
            }
        });

        readJob.transfer(new FileObjectGetter(tempDirectory));

        final File originalFile = ResourceUtils.loadFileResource(DIR_NAME + FILE_NAME).toFile();
        final File fileCopiedFromBP = Paths.get(tempDirectory.toString(), FILE_NAME).toFile();
        assertTrue(FileUtils.contentEquals(originalFile, fileCopiedFromBP));

        assertTrue(dataTransferredEventReceived.get());
        assertTrue(objectCompletedEventReceived.get());
        assertTrue(checksumEventReceived.get());
        assertTrue(metadataEventReceived.get());
        assertFalse(waitingForChunksEventReceived.get());
        assertFalse(failureEventReceived.get());
    } finally {
        FileUtils.deleteDirectory(tempDirectory.toFile());
    }
}

From source file:org.apache.nifi.processors.csv.ParseCSVRecord.java

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    final FlowFile original = session.get();
    if (original == null) {
        return;//w ww  . ja v a2 s  .  c o  m
    }

    final AtomicBoolean lineFound = new AtomicBoolean(false);
    final Map<String, String> outputAttrs = new HashMap<>();

    session.read(original, new InputStreamCallback() {
        @Override
        public void process(InputStream inputStream) throws IOException {
            final String fromAttribute = context.getProperty(PROP_RECORD_FROM_ATTRIBUTE).getValue();

            String unparsedRecord;
            // data source is the attribute
            if (StringUtils.isNotBlank(fromAttribute)) {
                unparsedRecord = original.getAttribute(fromAttribute);
                if (StringUtils.isBlank(unparsedRecord)) {
                    // will be routed to failure at the end of the method implementation
                    return;
                }
            } else {
                // data source is the content
                // TODO expose the charset property?
                LineIterator iterator = IOUtils.lineIterator(inputStream, UTF_8);
                if (!iterator.hasNext()) {
                    return;
                }
                unparsedRecord = iterator.next();
            }

            lineFound.set(true);
            final String format = context.getProperty(PROP_FORMAT).getValue();
            final String delimiter = context.getProperty(PROP_DELIMITER).evaluateAttributeExpressions(original)
                    .getValue();
            final String schemaPrefix = context.getProperty(PROP_SCHEMA_ATTR_PREFIX)
                    .evaluateAttributeExpressions(original).getValue();
            final String valuePrefix = context.getProperty(PROP_VALUE_ATTR_PREFIX)
                    .evaluateAttributeExpressions(original).getValue();
            final boolean trimValues = context.getProperty(PROP_TRIM_VALUES).asBoolean();

            final CSVFormat csvFormat = buildFormat(format, delimiter, false, // this is a payload, not header anymore
                    null); // no custom header

            final CSVParser parser = csvFormat.parse(new StringReader(unparsedRecord));
            List<CSVRecord> records = parser.getRecords();
            if (records.size() > 1) {
                // TODO revisit for NiFi's native micro-batching
                throw new ProcessException("Multi-line entries not supported");
            }

            CSVRecord record = records.get(0);

            Map<String, String> originalAttrs = original.getAttributes();
            // filter delimited schema attributes only
            Map<String, String> schemaAttrs = new HashMap<>();
            for (String key : originalAttrs.keySet()) {
                if (key.startsWith(schemaPrefix)) {
                    schemaAttrs.put(key, originalAttrs.get(key));
                }
            }

            // put key/value pairs into attributes
            for (int i = 0; i < record.size(); i++) {
                String columnName = schemaAttrs.get(schemaPrefix + (i + 1)); // 1-based column numbering
                if (columnName == null) {
                    // 1-based column index
                    columnName = String.valueOf(i + 1);
                }
                // TODO indexed schemaless parsing vs auto-schema vs user-provided schema
                String columnValue = record.get(i);
                if (trimValues) {
                    columnValue = columnValue.trim();
                }
                String attrName = (StringUtils.isBlank(valuePrefix) ? "delimited.column." : valuePrefix)
                        + columnName;
                outputAttrs.put(attrName, columnValue);
            }
        }
    });

    if (lineFound.get()) {
        FlowFile ff = session.putAllAttributes(original, outputAttrs);
        session.transfer(ff, REL_SUCCESS);
    } else {
        session.transfer(original, REL_FAILURE);
    }
}

From source file:org.apache.tinkerpop.gremlin.structure.IoTest.java

@Test
@FeatureRequirement(featureClass = Graph.Features.EdgeFeatures.class, feature = Graph.Features.EdgeFeatures.FEATURE_ADD_EDGES)
@FeatureRequirement(featureClass = Graph.Features.VertexFeatures.class, feature = Graph.Features.VertexFeatures.FEATURE_ADD_VERTICES)
@FeatureRequirement(featureClass = EdgePropertyFeatures.class, feature = EdgePropertyFeatures.FEATURE_SERIALIZABLE_VALUES)
public void shouldSupportUUIDInGraphSON() throws Exception {
    final UUID id = UUID.randomUUID();
    final Vertex v1 = g.addVertex(T.label, "person");
    final Vertex v2 = g.addVertex(T.label, "person");
    final Edge e = v1.addEdge("friend", v2, "uuid", id);

    try (final ByteArrayOutputStream os = new ByteArrayOutputStream()) {
        final GraphSONWriter writer = g.io().graphSONWriter()
                .mapper(g.io().graphSONMapper().embedTypes(true).create()).create();
        writer.writeEdge(os, e);/*from  ww w  . ja v  a 2  s .c om*/

        final AtomicBoolean called = new AtomicBoolean(false);
        final GraphSONReader reader = g.io().graphSONReader()
                .mapper(g.io().graphSONMapper().embedTypes(true).create()).create();
        try (final ByteArrayInputStream bais = new ByteArrayInputStream(os.toByteArray())) {
            reader.readEdge(bais, detachedEdge -> {
                assertEquals(e.id(),
                        graphProvider.reconstituteGraphSONIdentifier(Edge.class, detachedEdge.id()));
                assertEquals(v1.id(), graphProvider.reconstituteGraphSONIdentifier(Vertex.class,
                        detachedEdge.iterators().vertexIterator(Direction.OUT).next().id()));
                assertEquals(v2.id(), graphProvider.reconstituteGraphSONIdentifier(Vertex.class,
                        detachedEdge.iterators().vertexIterator(Direction.IN).next().id()));
                assertEquals(v1.label(), detachedEdge.iterators().vertexIterator(Direction.OUT).next().label());
                assertEquals(v2.label(), detachedEdge.iterators().vertexIterator(Direction.IN).next().label());
                assertEquals(e.label(), detachedEdge.label());
                assertEquals(e.keys().size(),
                        StreamFactory.stream(detachedEdge.iterators().propertyIterator()).count());
                assertEquals(id, detachedEdge.value("uuid"));

                called.set(true);

                return null;
            });
        }

        assertTrue(called.get());
    }
}

From source file:com.splout.db.integration.TestMultiThreadedQueryAndDeploy.java

@Test
@Ignore // Causes some non-deterministic problems, to be analyzed
public void test() throws Throwable {
    FileUtils.deleteDirectory(new File(TMP_FOLDER));
    new File(TMP_FOLDER).mkdirs();

    createSploutEnsemble(N_QNODES, N_DNODES);
    String[] qNodeAddresses = new String[N_QNODES];
    for (int i = 0; i < N_QNODES; i++) {
        qNodeAddresses[i] = getqNodes().get(i).getAddress();
    }//from w ww . j a v  a2 s  . c om

    final SploutClient client = new SploutClient(qNodeAddresses);
    final Tablespace testTablespace = createTestTablespace(N_DNODES);
    final Random random = new Random(SEED);
    final AtomicBoolean failed = new AtomicBoolean(false);
    final AtomicInteger iteration = new AtomicInteger(0);
    final Set<Integer> iterationsSeen = new HashSet<Integer>();

    deployIteration(0, random, client, testTablespace);

    for (QNode qnode : getqNodes()) {
        // Make sure all QNodes are aware of the the first deploy
        // There might be some delay as they have to receive notifications via Hazelcast etc
        long waitedSoFar = 0;
        QueryStatus status = null;
        SploutClient perQNodeClient = new SploutClient(qnode.getAddress());
        do {
            status = perQNodeClient.query(TABLESPACE, "0", "SELECT * FROM " + TABLE + ";", null);
            Thread.sleep(100);
            waitedSoFar += 100;
            if (waitedSoFar > 5000) {
                throw new AssertionError("Waiting too much on a test condition");
            }
        } while (status == null || status.getError() != null);
        log.info("QNode [" + qnode.getAddress() + "] is ready to serve deploy 0.");
    }

    try {
        // Business logic here
        ExecutorService service = Executors.newFixedThreadPool(N_THREADS);

        // These threads will continuously perform queries and check that the results is consistent.
        // They will also count how many deploys have happened since the beginning.
        for (int i = 0; i < N_THREADS; i++) {
            service.submit(new Runnable() {
                @Override
                public void run() {
                    try {
                        while (true) {
                            int randomDNode = Math.abs(random.nextInt()) % N_DNODES;
                            QueryStatus status = client.query(TABLESPACE, (randomDNode * 10) + "",
                                    "SELECT * FROM " + TABLE + ";", null);
                            log.info("Query status -> " + status);
                            assertEquals(1, status.getResult().size());
                            Map<String, Object> jsonResult = (Map<String, Object>) status.getResult().get(0);
                            Integer seenIteration = (Integer) jsonResult.get("iteration");
                            synchronized (iterationsSeen) {
                                iterationsSeen.add(seenIteration);
                            }
                            assertTrue(seenIteration <= iteration.get());
                            assertEquals(randomDNode, jsonResult.get("dnode"));
                            Thread.sleep(100);
                        }
                    } catch (InterruptedException ie) {
                        // Bye bye
                        log.info("Bye bye!");
                    } catch (Throwable e) {
                        e.printStackTrace();
                        failed.set(true);
                    }
                }
            });
        }

        final SploutConfiguration config = SploutConfiguration.getTestConfig();
        final int iterationsToPerform = config.getInt(QNodeProperties.VERSIONS_PER_TABLESPACE) + 5;
        for (int i = 0; i < iterationsToPerform; i++) {
            iteration.incrementAndGet();
            log.info("Deploy iteration: " + iteration.get());
            deployIteration(iteration.get(), random, client, testTablespace);

            new TestUtils.NotWaitingForeverCondition() {
                @Override
                public boolean endCondition() {
                    synchronized (iterationsSeen) {
                        return iterationsSeen.size() == (iteration.get() + 1);
                    }
                }
            }.waitAtMost(5000);
        }

        assertEquals(false, failed.get());

        service.shutdownNow(); // will interrupt all threads
        while (!service.isTerminated()) {
            Thread.sleep(100);
        }

        CoordinationStructures coord = TestUtils.getCoordinationStructures(config);
        assertNotNull(coord.getCopyVersionsBeingServed().get(TABLESPACE));

        // Assert that there is only MAX_VERSIONS versions of the tablespace (due to old version cleanup)
        new TestUtils.NotWaitingForeverCondition() {

            @Override
            public boolean endCondition() {
                QNodeHandler handler = (QNodeHandler) qNodes.get(0).getHandler();
                int seenVersions = 0;
                for (Map.Entry<TablespaceVersion, Tablespace> tablespaceVersion : handler.getContext()
                        .getTablespaceVersionsMap().entrySet()) {
                    if (tablespaceVersion.getKey().getTablespace().equals(TABLESPACE)) {
                        seenVersions++;
                    }
                }
                return seenVersions <= config.getInt(QNodeProperties.VERSIONS_PER_TABLESPACE);
            }
        }.waitAtMost(5000);
    } finally {
        closeSploutEnsemble();
        FileUtils.deleteDirectory(new File(TMP_FOLDER));
    }
}

From source file:com.spectralogic.ds3client.integration.GetJobManagement_Test.java

@Test
public void createReadJobWithBigFile() throws IOException, URISyntaxException, NoSuchMethodException,
        IllegalAccessException, InvocationTargetException {
    final String tempPathPrefix = null;
    final Path tempDirectory = Files.createTempDirectory(Paths.get("."), tempPathPrefix);

    try {/* w w  w  .j  a  v  a 2 s.com*/
        final String DIR_NAME = "largeFiles/";
        final String FILE_NAME = "lesmis-copies.txt";

        final Path objPath = ResourceUtils.loadFileResource(DIR_NAME + FILE_NAME);
        final long bookSize = Files.size(objPath);
        final Ds3Object obj = new Ds3Object(FILE_NAME, bookSize);

        final Ds3ClientShim ds3ClientShim = new Ds3ClientShim((Ds3ClientImpl) client);

        final int maxNumBlockAllocationRetries = 1;
        final int maxNumObjectTransferAttempts = 3;
        final Ds3ClientHelpers ds3ClientHelpers = Ds3ClientHelpers.wrap(ds3ClientShim,
                maxNumBlockAllocationRetries, maxNumObjectTransferAttempts);

        final Ds3ClientHelpers.Job readJob = ds3ClientHelpers.startReadJob(BUCKET_NAME, Arrays.asList(obj));

        final AtomicBoolean dataTransferredEventReceived = new AtomicBoolean(false);
        final AtomicBoolean objectCompletedEventReceived = new AtomicBoolean(false);
        final AtomicBoolean checksumEventReceived = new AtomicBoolean(false);
        final AtomicBoolean metadataEventReceived = new AtomicBoolean(false);
        final AtomicBoolean waitingForChunksEventReceived = new AtomicBoolean(false);
        final AtomicBoolean failureEventReceived = new AtomicBoolean(false);

        readJob.attachDataTransferredListener(new DataTransferredListener() {
            @Override
            public void dataTransferred(final long size) {
                dataTransferredEventReceived.set(true);
                assertEquals(bookSize, size);
            }
        });
        readJob.attachObjectCompletedListener(new ObjectCompletedListener() {
            @Override
            public void objectCompleted(final String name) {
                objectCompletedEventReceived.set(true);
            }
        });
        readJob.attachChecksumListener(new ChecksumListener() {
            @Override
            public void value(final BulkObject obj, final ChecksumType.Type type, final String checksum) {
                checksumEventReceived.set(true);
                assertEquals("0feqCQBgdtmmgGs9pB/Huw==", checksum);
            }
        });
        readJob.attachMetadataReceivedListener(new MetadataReceivedListener() {
            @Override
            public void metadataReceived(final String filename, final Metadata metadata) {
                metadataEventReceived.set(true);
            }
        });
        readJob.attachWaitingForChunksListener(new WaitingForChunksListener() {
            @Override
            public void waiting(final int secondsToWait) {
                waitingForChunksEventReceived.set(true);
            }
        });
        readJob.attachFailureEventListener(new FailureEventListener() {
            @Override
            public void onFailure(final FailureEvent failureEvent) {
                failureEventReceived.set(true);
            }
        });

        final GetJobSpectraS3Response jobSpectraS3Response = ds3ClientShim
                .getJobSpectraS3(new GetJobSpectraS3Request(readJob.getJobId()));

        assertThat(jobSpectraS3Response.getMasterObjectListResult(), is(notNullValue()));

        readJob.transfer(new FileObjectGetter(tempDirectory));

        final File originalFile = ResourceUtils.loadFileResource(DIR_NAME + FILE_NAME).toFile();
        final File fileCopiedFromBP = Paths.get(tempDirectory.toString(), FILE_NAME).toFile();
        assertTrue(FileUtils.contentEquals(originalFile, fileCopiedFromBP));

        assertTrue(dataTransferredEventReceived.get());
        assertTrue(objectCompletedEventReceived.get());
        assertTrue(checksumEventReceived.get());
        assertTrue(metadataEventReceived.get());
        assertFalse(waitingForChunksEventReceived.get());
        assertFalse(failureEventReceived.get());
    } finally {
        FileUtils.deleteDirectory(tempDirectory.toFile());
    }
}

From source file:com.splout.db.integration.TestMultiThreadedFailover.java

@Test
public void test() throws Throwable {
    FileUtils.deleteDirectory(new File(TMP_FOLDER));
    new File(TMP_FOLDER).mkdirs();

    createSploutEnsemble(N_QNODES, N_DNODES);
    String[] qNodeAddresses = new String[N_QNODES];
    for (int i = 0; i < N_QNODES; i++) {
        qNodeAddresses[i] = getqNodes().get(i).getAddress();
    }/*from w w  w  . j av a  2 s .com*/

    final SploutClient client = new SploutClient(qNodeAddresses);
    final Tablespace testTablespace = createTestTablespace(N_DNODES);
    final Random random = new Random(SEED);
    final AtomicBoolean failed = new AtomicBoolean(false);

    deployIteration(0, random, client, testTablespace);

    for (QNode qnode : getqNodes()) {
        // Make sure all QNodes are aware of the the first deploy
        // There might be some delay as they have to receive notifications via
        // Hazelcast etc
        long waitedSoFar = 0;
        QueryStatus status = null;
        SploutClient perQNodeClient = new SploutClient(qnode.getAddress());
        do {
            status = perQNodeClient.query(TABLESPACE, "0", "SELECT * FROM " + TABLE + ";", null);
            Thread.sleep(100);
            waitedSoFar += 100;
            if (waitedSoFar > 5000) {
                throw new AssertionError("Waiting too much on a test condition");
            }
        } while (status == null || status.getError() != null);
        log.info("QNode [" + qnode.getAddress() + "] is ready to serve deploy 0.");
    }

    try {
        // Business logic here
        ExecutorService service = Executors.newFixedThreadPool(N_THREADS);

        // This is the "mother-fucker" thread.
        // It will bring DNodes down on purpose.
        // And then bring them up again.
        service.submit(new Runnable() {

            @Override
            public void run() {

                while (true) {
                    try {
                        Thread.sleep(1000);
                        log.info("Time to kill some DNode...");
                        int whichOne = (int) (Math.random() * getdNodes().size());
                        getdNodes().get(whichOne).testCommand(TestCommands.SHUTDOWN.toString());
                        Thread.sleep(1000);
                        log.info("Time to bring the DNode back to life...");
                        getdNodes().get(whichOne).testCommand(TestCommands.RESTART.toString());
                    } catch (InterruptedException e) {
                        log.info("MFT - Bye bye!");
                    } catch (DNodeException e) {
                        failed.set(true);
                        e.printStackTrace();
                        throw new RuntimeException(e);
                    } catch (TException e) {
                        failed.set(true);
                        e.printStackTrace();
                        throw new RuntimeException(e);
                    }
                }
            }

        });

        // These threads will continuously perform queries and check that the
        // results are consistent.
        for (int i = 0; i < N_THREADS; i++) {
            service.submit(new Runnable() {
                @SuppressWarnings("unchecked")
                @Override
                public void run() {
                    try {
                        while (true) {
                            int randomDNode = Math.abs(random.nextInt()) % N_DNODES;
                            QueryStatus status = client.query(TABLESPACE, ((randomDNode * 10) - 1) + "",
                                    "SELECT * FROM " + TABLE + ";", null);
                            log.info("Query status -> " + status);
                            assertEquals(1, status.getResult().size());
                            Map<String, Object> jsonResult = (Map<String, Object>) status.getResult().get(0);
                            assertEquals(randomDNode, jsonResult.get("dnode"));
                            Thread.sleep(100);
                        }
                    } catch (InterruptedException ie) {
                        // Bye bye
                        log.info("Bye bye!");
                    } catch (Throwable e) {
                        e.printStackTrace();
                        failed.set(true);
                    }
                }
            });
        }

        Thread.sleep(15000);

        assertEquals(false, failed.get());

    } finally {
        closeSploutEnsemble();
        FileUtils.deleteDirectory(new File(TMP_FOLDER));
    }
}

From source file:org.apache.geode.internal.cache.OplogJUnitTest.java

/**
 * tests directory stats are correctly updated in case of single directory (for bug 37531)
 *//*from w  ww  . java 2 s.  c om*/
@Test
public void testPersist1DirStats() {
    final AtomicBoolean freezeRoller = new AtomicBoolean();
    CacheObserver old = CacheObserverHolder.setInstance(new CacheObserverAdapter() {
        private volatile boolean didBeforeCall = false;

        @Override
        public void beforeGoingToCompact() {
            this.didBeforeCall = true;
            synchronized (freezeRoller) {
                if (!assertDone) {
                    try {
                        // Here, we are not allowing the Roller thread to roll the old oplog into htree
                        while (!freezeRoller.get()) {
                            freezeRoller.wait();
                        }
                        freezeRoller.set(false);
                    } catch (InterruptedException e) {
                        fail("interrupted");
                    }
                }
            }
        }

        @Override
        public void afterHavingCompacted() {
            if (this.didBeforeCall) {
                this.didBeforeCall = false;
                LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = false;
                // assertTrue("Assert failure for DSpaceUsage in afterHavingCompacted ",
                // diskSpaceUsageStats() == calculatedDiskSpaceUsageStats());
                // what is the point of this assert?
                checkDiskStats();
            }
        }
    });
    try {
        final int MAX_OPLOG_SIZE = 500;
        diskProps.setMaxOplogSize(MAX_OPLOG_SIZE);
        diskProps.setPersistBackup(true);
        diskProps.setRolling(true);
        diskProps.setSynchronous(true);
        diskProps.setOverflow(false);
        diskProps.setDiskDirsAndSizes(new File[] { dirs[0] }, new int[] { 4000 });
        final byte[] val = new byte[200];
        region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, diskProps, Scope.LOCAL);
        LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = true;
        region.put("key1", val);
        // Disk space should have changed due to 1 put
        // assertTrue("stats did not increase after put 1 ", diskSpaceUsageStats() ==
        // calculatedDiskSpaceUsageStats());
        checkDiskStats();
        region.put("key2", val);
        // assertTrue("stats did not increase after put 2", diskSpaceUsageStats() ==
        // calculatedDiskSpaceUsageStats());
        checkDiskStats();
        // This put will cause a switch as max-oplog size (500) will be exceeded (600)
        region.put("key3", val);
        synchronized (freezeRoller) {
            // assertTrue("current disk space usage with Roller thread in wait and put key3 done is
            // incorrect " + diskSpaceUsageStats() + " " + calculatedDiskSpaceUsageStats(),
            // diskSpaceUsageStats()== calculatedDiskSpaceUsageStats());
            checkDiskStats();
            assertDone = true;
            freezeRoller.set(true);
            freezeRoller.notifyAll();
        }

        region.close();
        closeDown();
        // Stop rolling to get accurate estimates:
        diskProps.setRolling(false);

        region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, diskProps, Scope.LOCAL);

        // On recreating the region after closing, old Oplog file gets rolled into htree
        // "Disk space usage zero when region recreated"
        checkDiskStats();
        region.put("key4", val);
        // assertTrue("stats did not increase after put 4", diskSpaceUsageStats() ==
        // calculatedDiskSpaceUsageStats());
        checkDiskStats();
        region.put("key5", val);
        // assertTrue("stats did not increase after put 5", diskSpaceUsageStats() ==
        // calculatedDiskSpaceUsageStats());
        checkDiskStats();
        assertDone = false;
        LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = true;
        region.put("key6", val);
        // again we expect a switch in oplog here
        synchronized (freezeRoller) {
            // assertTrue("current disk space usage with Roller thread in wait and put key6 done is
            // incorrect", diskSpaceUsageStats()== calculatedDiskSpaceUsageStats());
            checkDiskStats();
            assertDone = true;
            freezeRoller.set(true);
            freezeRoller.notifyAll();
        }
        region.close();
    } catch (Exception e) {
        e.printStackTrace();
        fail("Test failed due to exception" + e);
    } finally {
        LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = false;
        CacheObserverHolder.setInstance(old);
        synchronized (freezeRoller) {
            assertDone = true;
            freezeRoller.set(true);
            freezeRoller.notifyAll();
        }
    }
}

From source file:com.gemstone.gemfire.internal.cache.OplogJUnitTest.java

/**
 * tests directory stats are correctly updated in case of single directory
 * (for bug 37531)/*  w  w  w  .  ja v  a  2  s. co  m*/
 */
@Test
public void testPersist1DirStats() {
    final AtomicBoolean freezeRoller = new AtomicBoolean();
    CacheObserver old = CacheObserverHolder.setInstance(new CacheObserverAdapter() {
        private volatile boolean didBeforeCall = false;

        @Override
        public void beforeGoingToCompact() {
            this.didBeforeCall = true;
            synchronized (freezeRoller) {
                if (!assertDone) {
                    try {
                        // Here, we are not allowing the Roller thread to roll the old oplog into htree
                        while (!freezeRoller.get()) {
                            freezeRoller.wait();
                        }
                        freezeRoller.set(false);
                    } catch (InterruptedException e) {
                        fail("interrupted");
                    }
                }
            }
        }

        @Override
        public void afterHavingCompacted() {
            if (this.didBeforeCall) {
                this.didBeforeCall = false;
                LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = false;
                //assertTrue("Assert failure for DSpaceUsage in afterHavingCompacted ", diskSpaceUsageStats() == calculatedDiskSpaceUsageStats());
                // what is the point of this assert?
                checkDiskStats();
            }
        }
    });
    try {
        final int MAX_OPLOG_SIZE = 500;
        diskProps.setMaxOplogSize(MAX_OPLOG_SIZE);
        diskProps.setPersistBackup(true);
        diskProps.setRolling(true);
        diskProps.setSynchronous(true);
        diskProps.setOverflow(false);
        diskProps.setDiskDirsAndSizes(new File[] { dirs[0] }, new int[] { 4000 });
        final byte[] val = new byte[200];
        region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, diskProps, Scope.LOCAL);
        LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = true;
        region.put("key1", val);
        // Disk space should have changed due to 1 put
        //assertTrue("stats did not increase after put 1 ", diskSpaceUsageStats() == calculatedDiskSpaceUsageStats());
        checkDiskStats();
        region.put("key2", val);
        //assertTrue("stats did not increase after put 2", diskSpaceUsageStats() == calculatedDiskSpaceUsageStats());
        checkDiskStats();
        // This put will cause a switch as max-oplog size (500) will be exceeded (600)
        region.put("key3", val);
        synchronized (freezeRoller) {
            //assertTrue("current disk space usage with Roller thread in wait and put key3 done is incorrect " +  diskSpaceUsageStats() + " " + calculatedDiskSpaceUsageStats(), diskSpaceUsageStats()== calculatedDiskSpaceUsageStats());
            checkDiskStats();
            assertDone = true;
            freezeRoller.set(true);
            freezeRoller.notifyAll();
        }

        region.close();
        closeDown();
        //    Stop rolling to get accurate estimates:
        diskProps.setRolling(false);

        region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, diskProps, Scope.LOCAL);

        // On recreating the region after closing, old Oplog file gets rolled into htree
        // "Disk space usage zero when region recreated"
        checkDiskStats();
        region.put("key4", val);
        //assertTrue("stats did not increase after put 4", diskSpaceUsageStats() == calculatedDiskSpaceUsageStats());
        checkDiskStats();
        region.put("key5", val);
        //assertTrue("stats did not increase after put 5", diskSpaceUsageStats() == calculatedDiskSpaceUsageStats());
        checkDiskStats();
        assertDone = false;
        LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = true;
        region.put("key6", val);
        // again we expect a switch in oplog here
        synchronized (freezeRoller) {
            //assertTrue("current disk space usage with Roller thread in wait and put key6 done is incorrect", diskSpaceUsageStats()== calculatedDiskSpaceUsageStats());
            checkDiskStats();
            assertDone = true;
            freezeRoller.set(true);
            freezeRoller.notifyAll();
        }
        region.close();
    } catch (Exception e) {
        e.printStackTrace();
        fail("Test failed due to exception" + e);
    } finally {
        LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = false;
        CacheObserverHolder.setInstance(old);
        synchronized (freezeRoller) {
            assertDone = true;
            freezeRoller.set(true);
            freezeRoller.notifyAll();
        }
    }
}