Example usage for java.util.concurrent.atomic AtomicBoolean AtomicBoolean

List of usage examples for java.util.concurrent.atomic AtomicBoolean AtomicBoolean

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicBoolean AtomicBoolean.

Prototype

public AtomicBoolean(boolean initialValue) 

Source Link

Document

Creates a new AtomicBoolean with the given initial value.

Usage

From source file:ca.psiphon.PsiphonTunnel.java

private PsiphonTunnel(HostService hostService) {
    mHostService = hostService;/*from   w w w  .  j a va  2 s .  c  o m*/
    mVpnMode = new AtomicBoolean(false);
    mTunFd = new AtomicReference<ParcelFileDescriptor>();
    mLocalSocksProxyPort = new AtomicInteger(0);
    mRoutingThroughTunnel = new AtomicBoolean(false);
    mIsWaitingForNetworkConnectivity = new AtomicBoolean(false);
}

From source file:edu.umn.msi.tropix.common.jobqueue.test.IntegrationTestBase.java

protected boolean pollJob(final Job job, final JobQueueContext context) {
    final CountDownLatch completeLatch = new CountDownLatch(1), postprocessLatch = new CountDownLatch(1);
    final AtomicBoolean previouslyLaunchedFileTransfer = new AtomicBoolean(false);
    class Listener implements JobUpdateListener {
        private boolean finishedProperly = false;

        public void jobComplete(final Ticket ticket, final boolean finishedProperly, final Status finalStatus) {
            this.finishedProperly = finishedProperly;
            completeLatch.countDown();//from w w  w  . j  a v  a 2 s . c  om
        }

        public void update(final Ticket ticket, final Status status) {
            final QueueStage stage = QueueStage.fromStatusUpdateList(status);
            LOG.info("Queue stage is [" + stage.getStageEnumerationValue().getValue() + "]");
            if (stage.getStageEnumerationValue() == StageEnumeration.Postprocessed && startTransfer()) {
                postprocessLatch.countDown();
            }
        }

        private boolean startTransfer() {
            final boolean wasFalse = previouslyLaunchedFileTransfer.getAndSet(true);
            return !wasFalse;
        }

    }
    final Listener listener = new Listener();
    dListener.setJobUpdateListener(listener);
    jobPoller.pollJob(job);

    postprocessLatch.await();
    final FileJobQueueContext fileContext = (FileJobQueueContext) context;
    final int numResults = fileContext.getNumResults();
    final TransferResource[] results = new TransferResource[numResults];
    for (int i = 0; i < numResults; i++) {
        final StorageData storageData = getStorageData();
        getResults().add(storageData);
        results[i] = storageData.prepareUploadResource();
    }
    fileContext.getResults(results, null);
    completeLatch.await();
    jobCompletedProperly = listener.finishedProperly;
    return jobCompletedProperly;
}

From source file:it.anyplace.sync.client.SyncthingClient.java

private BlockExchangeConnectionHandler openConnection(DeviceAddress deviceAddress) throws Exception {
    final BlockExchangeConnectionHandler connectionHandler = new BlockExchangeConnectionHandler(configuration,
            deviceAddress);//from   w  ww  .  ja v  a 2 s .  c  o  m
    connectionHandler.setIndexHandler(indexHandler);
    connectionHandler.getEventBus().register(indexHandler);
    connectionHandler.getEventBus().register(devicesHandler);
    final AtomicBoolean shouldRestartForNewFolder = new AtomicBoolean(false);
    connectionHandler.getEventBus().register(new Object() {
        @Subscribe
        public void handleConnectionClosedEvent(BlockExchangeConnectionHandler.ConnectionClosedEvent event) {
            connections.remove(connectionHandler);
            synchronized (pool) {
                pool.remove(connectionHandler);
            }
        }

        @Subscribe
        public void handleNewFolderSharedEvent(BlockExchangeConnectionHandler.NewFolderSharedEvent event) {
            shouldRestartForNewFolder.set(true);
        }
    });
    connectionHandler.connect();
    connections.add(connectionHandler);
    if (shouldRestartForNewFolder.get()) {
        logger.info("restart connection for new folder shared");
        connectionHandler.close();
        return openConnection(deviceAddress);
    } else {
        return connectionHandler;
    }
}

From source file:com.netflix.spinnaker.orca.clouddriver.tasks.providers.aws.AmazonImageTagger.java

/**
 * Return true iff the tags on the current machine image match the desired.
 *//*from   w w  w  . j a  v  a 2 s .com*/
@Override
public boolean areImagesTagged(Collection<Image> targetImages, Stage stage) {
    Collection<MatchedImage> matchedImages = findImages(
            targetImages.stream().map(targetImage -> targetImage.imageName).collect(Collectors.toList()),
            stage);

    AtomicBoolean isUpserted = new AtomicBoolean(true);
    for (Image targetImage : targetImages) {
        targetImage.regions.forEach(region -> {
            MatchedImage matchedImage = matchedImages.stream()
                    .filter(m -> m.imageName.equals(targetImage.imageName)).findFirst().orElse(null);

            if (matchedImage == null) {
                isUpserted.set(false);
                return;
            }

            List<String> imagesForRegion = matchedImage.amis.get(region);
            imagesForRegion.forEach(image -> {
                Map<String, String> allImageTags = matchedImage.tagsByImageId.get(image);
                targetImage.tags.entrySet().forEach(entry -> {
                    // assert tag equality
                    isUpserted
                            .set(isUpserted.get() && entry.getValue().equals(allImageTags.get(entry.getKey())));
                });
            });
        });
    }

    return isUpserted.get();
}

From source file:org.apache.nifi.processors.msgpack.MessagePackPack.java

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    FlowFile flowFile = session.get();/* w  ww  .ja v a  2s .  c  o  m*/
    if (flowFile == null) {
        return;
    }

    final ObjectMapper reader = new ObjectMapper();
    final ObjectMapper writer = new ObjectMapper(new MessagePackFactory());
    writer.setAnnotationIntrospector(new JsonArrayFormat());

    final AtomicBoolean failed = new AtomicBoolean(false);
    flowFile = session.write(flowFile, new StreamCallback() {
        @Override
        public void process(InputStream is, OutputStream os) throws IOException {
            try (final OutputStream msgpack = new BufferedOutputStream(os)) {
                final JsonNode json = reader.readTree(is);
                final byte[] bytes = writer.writeValueAsBytes(json);
                msgpack.write(bytes);
                msgpack.flush();
            } catch (JsonProcessingException e) {
                getLogger().error(e.getMessage(), e);
                failed.set(true);
            }
        }
    });

    if (failed.get()) {
        session.transfer(flowFile, REL_FAILURE);
        return;
    }

    flowFile = session.putAttribute(flowFile, CoreAttributes.MIME_TYPE.key(), MIME_TYPE);
    flowFile = session.putAttribute(flowFile, MIME_EXT_KEY, MIME_EXT);

    session.transfer(flowFile, REL_SUCCESS);
}

From source file:com.opinionlab.woa.WallOfAwesome.java

private static Handler<RoutingContext> makeDownloadRoute() {
    return routingContext -> EXECUTOR.execute(() -> {
        try {//from  w ww.j  av a  2  s  . co  m
            final HttpServerResponse response = routingContext.response();
            final AtomicBoolean first = new AtomicBoolean(true);
            response.putHeader("Content-Type", "text/plain");
            response.putHeader("Content-Disposition", "inline;filename=awesome.txt");

            response.setChunked(true);
            response.write("BEGIN AWESOME\n\n");
            AwesomeImap.fetchAwesome().forEach(awesome -> {
                if (!first.get()) {
                    response.write("\n\n---\n\n");
                } else {
                    first.set(false);
                }

                response.write(new ST(AWESOME_TEMPLATE).add("awesome", awesome).render());
            });
            response.write("\n\nEND AWESOME");
            response.end();
        } catch (Throwable t) {
            LOGGER.error("Unable to fetch messages.", t);
        }
    });
}

From source file:io.restassured.path.xml.XmlPathObjectDeserializationTest.java

@Test
public void xml_path_supports_custom_deserializer_using_static_configuration() {
    // Given/*from   ww  w.  ja  v  a 2  s . c  om*/
    final AtomicBoolean customDeserializerUsed = new AtomicBoolean(false);

    XmlPath.config = XmlPathConfig.xmlPathConfig().defaultObjectDeserializer(new XmlPathObjectDeserializer() {
        public <T> T deserialize(ObjectDeserializationContext ctx) {
            customDeserializerUsed.set(true);
            final String xml = ctx.getDataToDeserialize().asString();
            final Greeting greeting = new Greeting();
            greeting.setFirstName(StringUtils.substringBetween(xml, "<firstName>", "</firstName>"));
            greeting.setLastName(StringUtils.substringBetween(xml, "<lastName>", "</lastName>"));
            return (T) greeting;
        }
    });

    // When
    try {
        final XmlPath xmlPath = new XmlPath(COOL_GREETING);
        final Greeting greeting = xmlPath.getObject("", Greeting.class);

        // Then
        assertThat(greeting.getFirstName(), equalTo("John"));
        assertThat(greeting.getLastName(), equalTo("Doe"));
        assertThat(customDeserializerUsed.get(), is(true));
    } finally {
        XmlPath.reset();
    }
}

From source file:com.jayway.restassured.path.xml.XmlPathObjectDeserializationTest.java

@Test
public void xml_path_supports_custom_deserializer_using_static_configuration() {
    // Given// w w  w .  j  a v  a  2  s .c om
    final AtomicBoolean customDeserializerUsed = new AtomicBoolean(false);

    XmlPath.config = xmlPathConfig().defaultObjectDeserializer(new XmlPathObjectDeserializer() {
        public <T> T deserialize(ObjectDeserializationContext ctx) {
            customDeserializerUsed.set(true);
            final String xml = ctx.getDataToDeserialize().asString();
            final Greeting greeting = new Greeting();
            greeting.setFirstName(StringUtils.substringBetween(xml, "<firstName>", "</firstName>"));
            greeting.setLastName(StringUtils.substringBetween(xml, "<lastName>", "</lastName>"));
            return (T) greeting;
        }
    });

    // When
    try {
        final XmlPath xmlPath = new XmlPath(COOL_GREETING);
        final Greeting greeting = xmlPath.getObject("", Greeting.class);

        // Then
        assertThat(greeting.getFirstName(), equalTo("John"));
        assertThat(greeting.getLastName(), equalTo("Doe"));
        assertThat(customDeserializerUsed.get(), is(true));
    } finally {
        XmlPath.reset();
    }
}

From source file:com.sixt.service.framework.kafka.messaging.KafkaIntegrationTest.java

@Ignore("long running test")
@Test/*  w w  w.  j a va2s. c  om*/
public void partitionAssignmentChange() throws InterruptedException {
    ServiceProperties serviceProperties = new ServiceProperties();
    serviceProperties.initialize(new String[] {}); // Reads environment variables set by DockerComposeHelper

    // Topics are created with 3 partitions - see docker-compose-integrationtest.yml
    Topic ping = new Topic("ping");
    Topic pong = new Topic("pong");

    Producer producer = new ProducerFactory(serviceProperties).createProducer();

    final AtomicBoolean produceMessages = new AtomicBoolean(true);
    final AtomicInteger sentMessages = new AtomicInteger(0);

    final AtomicInteger receivedMessagesConsumer1 = new AtomicInteger(0);
    final CountDownLatch firstMessageProcessedConsumer1 = new CountDownLatch(1);

    final AtomicInteger receivedMessagesConsumer2 = new AtomicInteger(0);
    final CountDownLatch firstMessageProcessedConsumer2 = new CountDownLatch(1);

    final AtomicInteger receivedMessagesConsumer3 = new AtomicInteger(0);
    final CountDownLatch firstMessageProcessedConsumer3 = new CountDownLatch(1);

    // Produce messages until test tells producer to stop.
    ExecutorService producerExecutor = Executors.newSingleThreadExecutor();
    producerExecutor.submit(new Runnable() {
        @Override
        public void run() {
            OrangeContext context = new OrangeContext();
            Sleeper sleeper = new Sleeper();

            try {
                while (produceMessages.get()) {
                    String key = RandomStringUtils.randomAscii(5);
                    SayHelloToCmd payload = SayHelloToCmd.newBuilder().setName(key).build();

                    Message request = Messages.requestFor(ping, pong, key, payload, context);

                    producer.send(request);
                    sentMessages.incrementAndGet();

                    sleeper.sleepNoException(250);
                }
            } catch (Throwable t) {
                logger.error("Exception in producer loop", t);
            }
        }
    });

    // Start first producer. It should get all 3 partitions assigned.
    Consumer consumer1 = consumerFactoryWithHandler(serviceProperties, SayHelloToCmd.class,
            new MessageHandler<SayHelloToCmd>() {
                @Override
                public void onMessage(Message<SayHelloToCmd> message, OrangeContext context) {
                    receivedMessagesConsumer1.incrementAndGet();
                    firstMessageProcessedConsumer1.countDown();
                }
            }).consumerForTopic(ping, new DiscardFailedMessages());

    // wait until consumer 1 is up.
    firstMessageProcessedConsumer1.await();
    Thread.sleep(5000); // consume some messages

    // Now, start second processor. It should get at least one partition assigned.
    Consumer consumer2 = consumerFactoryWithHandler(serviceProperties, SayHelloToCmd.class,
            new MessageHandler<SayHelloToCmd>() {
                @Override
                public void onMessage(Message<SayHelloToCmd> message, OrangeContext context) {
                    receivedMessagesConsumer2.incrementAndGet();
                    firstMessageProcessedConsumer2.countDown();
                }
            }).consumerForTopic(ping, new DiscardFailedMessages());

    // wait until the second consumer is up.
    firstMessageProcessedConsumer2.await();
    Thread.sleep(5000); // let both consumers run a bit

    brutallyKillConsumer("pool-14-thread-1"); // consumer2 thread, HACKY: if this is too brittle, change the test to shutdown()

    //Need to wait a bit longer while Kafka "restabilizes the group" after consumer 2 was killed.
    // -> Consumer 1 should now get all three partitions back again.
    Thread.sleep(30000); // must be > than max.poll.interval.ms

    // Now, start third processor. It should get at least one partition assigned.
    Consumer consumer3 = consumerFactoryWithHandler(serviceProperties, SayHelloToCmd.class,
            new MessageHandler<SayHelloToCmd>() {
                @Override
                public void onMessage(Message<SayHelloToCmd> message, OrangeContext context) {
                    receivedMessagesConsumer3.incrementAndGet();
                    firstMessageProcessedConsumer3.countDown();
                }
            }).consumerForTopic(ping, new DiscardFailedMessages());
    firstMessageProcessedConsumer3.await();
    Thread.sleep(5000);

    // Now shut down the first consumer.
    consumer1.shutdown();
    Thread.sleep(10000);

    // Stop the producer.
    produceMessages.set(false);
    producer.shutdown();
    producerExecutor.shutdown();

    Thread.sleep(3000); // give the remaining consumer the chance to consume all messages
    consumer3.shutdown(); // no assignment any longer

    // Finally, the assertions:
    int receivedMessagesTotal = receivedMessagesConsumer1.get() + receivedMessagesConsumer2.get()
            + receivedMessagesConsumer3.get();
    assertEquals(sentMessages.get(), receivedMessagesTotal);

    assertTrue(receivedMessagesConsumer1.get() > 0);
    assertTrue(receivedMessagesConsumer2.get() > 0);
    assertTrue(receivedMessagesConsumer3.get() > 0);
}

From source file:com.qubole.quark.planner.parser.SqlQueryParser.java

public SqlQueryParserResult parse(String sql) throws SQLException {
    DataSourceSchema dataSource = this.context.getDefaultDataSource();
    final AtomicBoolean foundNonQuarkScan = new AtomicBoolean(false);
    final ImmutableSet.Builder<DataSourceSchema> dsBuilder = new ImmutableSet.Builder<>();
    try {/*  w ww.  j  a  va  2s . co  m*/
        final SqlKind kind = getSqlParser(sql).parseQuery().getKind();
        SqlQueryParserResult result = new SqlQueryParserResult(stripNamespace(sql, dataSource), dataSource,
                kind, null, false);
        RelNode relNode = parseInternal(sql);
        final RelVisitor relVisitor = new RelVisitor() {
            @Override
            public void visit(RelNode node, int ordinal, RelNode parent) {
                if (node instanceof QuarkViewScan) {
                    visitQuarkViewScan((QuarkViewScan) node);
                } else if (node instanceof QuarkTileScan) {
                    visitQuarkTileScan((QuarkTileScan) node);
                } else if (node instanceof TableScan) {
                    visitNonQuarkScan((TableScan) node);
                }
                super.visit(node, ordinal, parent);
            }

            private void visitNonQuarkScan(TableScan node) {
                foundNonQuarkScan.set(true);
                final String schemaName = node.getTable().getQualifiedName().get(0);
                CalciteSchema schema = CalciteSchema.from(getRootSchma()).getSubSchema(schemaName, false);
                dsBuilder.addAll(getDrivers(schema));
            }

            private void visitQuarkTileScan(QuarkTileScan node) {
                QuarkTile quarkTile = node.getQuarkTile();
                CalciteCatalogReader calciteCatalogReader = new CalciteCatalogReader(
                        CalciteSchema.from(getRootSchma()), false, context.getDefaultSchemaPath(),
                        getTypeFactory());
                CalciteSchema tileSchema = calciteCatalogReader.getTable(quarkTile.tableName)
                        .unwrap(CalciteSchema.class);
                dsBuilder.addAll(getDrivers(tileSchema));
            }

            private void visitQuarkViewScan(QuarkViewScan node) {
                QuarkTable table = node.getQuarkTable();
                if (table instanceof QuarkViewTable) {
                    final CalciteSchema tableSchema = ((QuarkViewTable) table).getBackupTableSchema();
                    dsBuilder.addAll(getDrivers(tableSchema));
                }
            }

            private ImmutableSet<DataSourceSchema> getDrivers(CalciteSchema tableSchema) {
                final ImmutableSet.Builder<DataSourceSchema> dsBuilder = new ImmutableSet.Builder<>();
                SchemaPlus tableSchemaPlus = tableSchema.plus();
                while (tableSchemaPlus != null) {
                    Schema schema = CalciteSchema.from(tableSchemaPlus).schema;
                    if (schema instanceof DataSourceSchema) {
                        dsBuilder.add((DataSourceSchema) schema);
                    }
                    tableSchemaPlus = tableSchemaPlus.getParentSchema();
                }
                return dsBuilder.build();
            }

        };

        relVisitor.go(relNode);

        ImmutableSet<DataSourceSchema> dataSources = dsBuilder.build();

        if (!foundNonQuarkScan.get() && dataSources.size() == 1) {
            /**
             * Check if query is completely optimized for a data source
             */
            final DataSourceSchema newDataSource = dataSources.asList().get(0);
            final SqlDialect dialect = newDataSource.getDataSource().getSqlDialect();
            final String parsedSql = getParsedSql(relNode, dialect);
            result = new SqlQueryParserResult(parsedSql, newDataSource, kind, relNode, true);
        } else if (foundNonQuarkScan.get() && dataSources.size() == 1) {
            /**
             * Check if its not optimized
             */
            final DataSourceSchema newDataSource = dataSources.asList().get(0);
            final String stripNamespace = stripNamespace(sql, newDataSource);
            result = new SqlQueryParserResult(stripNamespace, newDataSource, kind, relNode, true);
        } else if (this.context.isUnitTestMode()) {
            String parsedSql = getParsedSql(relNode,
                    new SqlDialect(SqlDialect.DatabaseProduct.UNKNOWN, "UNKNOWN", null, true));
            result = new SqlQueryParserResult(parsedSql, null, kind, relNode, true);
        } else if (dataSources.size() > 1) {
            /**
             * Check if it's partially optimized, i.e., tablescans of multiple datasources
             * are found in RelNode. We currently donot support multiple datasources.
             */
            throw new SQLException("Federation between data sources is not allowed", "0A001");
        } else if (dataSources.isEmpty()) {
            throw new SQLException("No dataSource found for query", "3D001");
        }
        return result;
    } catch (SQLException e) {
        throw e;
    } catch (Exception e) {
        throw new SQLException(e);
    }
}