Example usage for java.util.concurrent.atomic AtomicInteger get

List of usage examples for java.util.concurrent.atomic AtomicInteger get

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger get.

Prototype

public final int get() 

Source Link

Document

Returns the current value, with memory effects as specified by VarHandle#getVolatile .

Usage

From source file:com.btoddb.fastpersitentqueue.FpqIT.java

@Test
public void testThreading() throws Exception {
    final int numEntries = 1000;
    final int numPushers = 4;
    final int numPoppers = 4;
    final int entrySize = 1000;
    fpq1.setMaxTransactionSize(2000);/*from  w w  w .  ja v  a2  s  . com*/
    final int popBatchSize = 100;
    fpq1.setMaxMemorySegmentSizeInBytes(10000000);
    fpq1.setMaxJournalFileSize(10000000);
    fpq1.setMaxJournalDurationInMs(30000);
    fpq1.setFlushPeriodInMs(1000);
    fpq1.setNumberOfFlushWorkers(4);

    final Random pushRand = new Random(1000L);
    final Random popRand = new Random(1000000L);
    final AtomicInteger pusherFinishCount = new AtomicInteger();
    final AtomicInteger numPops = new AtomicInteger();
    final AtomicLong counter = new AtomicLong();
    final AtomicLong pushSum = new AtomicLong();
    final AtomicLong popSum = new AtomicLong();

    fpq1.init();

    ExecutorService execSrvc = Executors.newFixedThreadPool(numPushers + numPoppers);

    Set<Future> futures = new HashSet<Future>();

    // start pushing
    for (int i = 0; i < numPushers; i++) {
        Future future = execSrvc.submit(new Runnable() {
            @Override
            public void run() {
                for (int i = 0; i < numEntries; i++) {
                    try {
                        long x = counter.getAndIncrement();
                        pushSum.addAndGet(x);
                        ByteBuffer bb = ByteBuffer.wrap(new byte[entrySize]);
                        bb.putLong(x);

                        fpq1.beginTransaction();
                        fpq1.push(bb.array());
                        fpq1.commit();
                        if ((x + 1) % 500 == 0) {
                            System.out.println("pushed ID = " + x);
                        }
                        Thread.sleep(pushRand.nextInt(5));
                    } catch (Exception e) {
                        e.printStackTrace();
                    }
                }
                pusherFinishCount.incrementAndGet();
            }
        });
        futures.add(future);
    }

    // start popping
    for (int i = 0; i < numPoppers; i++) {
        Future future = execSrvc.submit(new Runnable() {
            @Override
            public void run() {
                while (pusherFinishCount.get() < numPushers || !fpq1.isEmpty()) {
                    try {
                        fpq1.beginTransaction();
                        try {
                            Collection<FpqEntry> entries = fpq1.pop(popBatchSize);
                            if (null == entries) {
                                Thread.sleep(100);
                                continue;
                            }

                            for (FpqEntry entry : entries) {
                                ByteBuffer bb = ByteBuffer.wrap(entry.getData());
                                popSum.addAndGet(bb.getLong());
                                if (entry.getId() % 500 == 0) {
                                    System.out.println("popped ID = " + entry.getId());
                                }
                            }
                            numPops.addAndGet(entries.size());
                            fpq1.commit();
                            entries.clear();
                        } finally {
                            if (fpq1.isTransactionActive()) {
                                fpq1.rollback();
                            }
                        }
                        Thread.sleep(popRand.nextInt(10));
                    } catch (Exception e) {
                        e.printStackTrace();
                    }
                }
            }
        });
        futures.add(future);
    }

    boolean finished = false;
    while (!finished) {
        try {
            for (Future f : futures) {
                f.get();
            }
            finished = true;
        } catch (InterruptedException e) {
            // ignore
            Thread.interrupted();
        }
    }

    assertThat(numPops.get(), is(numEntries * numPushers));
    assertThat(fpq1.getNumberOfEntries(), is(0L));
    assertThat(pushSum.get(), is(popSum.get()));
    assertThat(fpq1.getMemoryMgr().getNumberOfActiveSegments(), is(1));
    assertThat(fpq1.getMemoryMgr().getSegments(), hasSize(1));
    assertThat(fpq1.getJournalMgr().getJournalFiles().entrySet(), hasSize(1));
    assertThat(FileUtils.listFiles(fpq1.getPagingDirectory(), TrueFileFilter.INSTANCE, TrueFileFilter.INSTANCE),
            is(empty()));
    assertThat(
            FileUtils.listFiles(fpq1.getJournalDirectory(), TrueFileFilter.INSTANCE, TrueFileFilter.INSTANCE),
            hasSize(1));
}

From source file:com.couchbase.client.core.endpoint.query.QueryHandlerTest.java

@Test
public void shouldDecodeOneRowResponseWithQuotesInClientIdAndResults() throws Exception {
    String expectedClientIdWithQuotes = "ThisIsA\\\"Client\\\"Id";

    String response = Resources.read("with_escaped_quotes.json", this.getClass());
    HttpResponse responseHeader = new DefaultHttpResponse(HttpVersion.HTTP_1_1,
            new HttpResponseStatus(200, "OK"));
    HttpContent responseChunk = new DefaultLastHttpContent(Unpooled.copiedBuffer(response, CharsetUtil.UTF_8));

    GenericQueryRequest requestMock = mock(GenericQueryRequest.class);
    queue.add(requestMock);/*from w w w .j  av  a2  s.  c  om*/
    channel.writeInbound(responseHeader, responseChunk);
    latch.await(1, TimeUnit.SECONDS);
    assertEquals(1, firedEvents.size());
    GenericQueryResponse inbound = (GenericQueryResponse) firedEvents.get(0);

    final AtomicInteger invokeCounter1 = new AtomicInteger();
    assertResponse(inbound, true, ResponseStatus.SUCCESS, FAKE_REQUESTID, expectedClientIdWithQuotes, "success",
            FAKE_SIGNATURE, new Action1<ByteBuf>() {
                @Override
                public void call(ByteBuf buf) {
                    invokeCounter1.incrementAndGet();
                    String response = buf.toString(CharsetUtil.UTF_8);
                    try {
                        Map found = mapper.readValue(response, Map.class);
                        assertEquals(12, found.size());
                        assertEquals("San Francisco", found.get("city"));
                        assertEquals("United States", found.get("country"));
                        Map geo = (Map) found.get("geo");
                        assertNotNull(geo);
                        assertEquals(3, geo.size());
                        assertEquals("ROOFTOP", geo.get("accuracy"));
                        //TODO check the quote in the result
                    } catch (IOException e) {
                        assertFalse(true);
                    }
                }
            }, new Action1<ByteBuf>() {
                @Override
                public void call(ByteBuf buf) {
                    fail("no error expected");
                }
            }, expectedMetricsCounts(0, 1));
    assertEquals(1, invokeCounter1.get());
}

From source file:com.vmware.admiral.adapter.docker.service.DockerAdapterService.java

@SuppressWarnings("unchecked")
private void processCreateContainer(RequestContext context, int retriesCount) {
    AssertUtil.assertNotEmpty(context.containerState.names, "containerState.names");

    String fullImageName = DockerImage.fromImageName(context.containerDescription.image).toString();

    CommandInput createCommandInput = new CommandInput(context.commandInput)
            .withProperty(DOCKER_CONTAINER_IMAGE_PROP_NAME, fullImageName)
            .withProperty(DOCKER_CONTAINER_TTY_PROP_NAME, true)
            .withProperty(DOCKER_CONTAINER_OPEN_STDIN_PROP_NAME, true)
            .withPropertyIfNotNull(DOCKER_CONTAINER_COMMAND_PROP_NAME,
                    CommandUtil.spread(context.containerDescription.command))
            .withProperty(DOCKER_CONTAINER_NAME_PROP_NAME, context.containerState.names.get(0))
            .withPropertyIfNotNull(DOCKER_CONTAINER_ENV_PROP_NAME, context.containerState.env)
            .withPropertyIfNotNull(DOCKER_CONTAINER_USER_PROP_NAME, context.containerDescription.user)
            .withPropertyIfNotNull(DOCKER_CONTAINER_ENTRYPOINT_PROP_NAME,
                    context.containerDescription.entryPoint)
            .withPropertyIfNotNull(DOCKER_CONTAINER_HOSTNAME_PROP_NAME, context.containerDescription.hostname)
            .withPropertyIfNotNull(DOCKER_CONTAINER_DOMAINNAME_PROP_NAME,
                    context.containerDescription.domainName)
            .withPropertyIfNotNull(DOCKER_CONTAINER_WORKING_DIR_PROP_NAME,
                    context.containerDescription.workingDir);

    Map<String, Object> hostConfig = getOrAddMap(createCommandInput, DOCKER_CONTAINER_HOST_CONFIG_PROP_NAME);

    hostConfig.put(MEMORY_SWAP_PROP_NAME, context.containerDescription.memorySwapLimit);

    hostConfig.put(MEMORY_PROP_NAME, context.containerState.memoryLimit);
    hostConfig.put(CPU_SHARES_PROP_NAME, context.containerState.cpuShares);

    // TODO Can't limit the storage? https://github.com/docker/docker/issues/3804

    hostConfig.put(DNS_PROP_NAME, context.containerDescription.dns);
    hostConfig.put(DNS_SEARCH_PROP_NAME, context.containerDescription.dnsSearch);
    hostConfig.put(EXTRA_HOSTS_PROP_NAME, context.containerState.extraHosts);

    // the volumes are added as binds property
    hostConfig.put(BINDS_PROP_NAME, filterVolumeBindings(context.containerState.volumes));
    hostConfig.put(VOLUME_DRIVER, context.containerDescription.volumeDriver);
    hostConfig.put(CAP_ADD_PROP_NAME, context.containerDescription.capAdd);
    hostConfig.put(CAP_DROP_PROP_NAME, context.containerDescription.capDrop);
    hostConfig.put(NETWORK_MODE_PROP_NAME, context.containerDescription.networkMode);
    hostConfig.put(LINKS_PROP_NAME, context.containerState.links);
    hostConfig.put(PRIVILEGED_PROP_NAME, context.containerDescription.privileged);
    hostConfig.put(PID_MODE_PROP_NAME, context.containerDescription.pidMode);

    if (context.containerDescription.publishAll != null) {
        hostConfig.put(PUBLISH_ALL, context.containerDescription.publishAll);
    }//from ww w .j a  v a2  s.  c o m

    // Mapping properties from containerState to the docker config:
    hostConfig.put(VOLUMES_FROM_PROP_NAME, context.containerState.volumesFrom);

    // Add first container network to avoid container to be connected to default network.
    // Other container networks will be added after container is created.
    // Docker APIs fail if there is more than one network added to the container when it is created
    if (context.containerState.networks != null && !context.containerState.networks.isEmpty()) {
        createNetworkConfig(createCommandInput, context.containerState.networks.entrySet().iterator().next());
    }

    if (context.containerState.ports != null) {
        addPortBindings(createCommandInput, context.containerState.ports);
    }

    if (context.containerDescription.logConfig != null) {
        addLogConfiguration(createCommandInput, context.containerDescription.logConfig);
    }

    if (context.containerDescription.restartPolicy != null) {
        Map<String, Object> restartPolicy = new HashMap<>();
        restartPolicy.put(RESTART_POLICY_NAME_PROP_NAME, context.containerDescription.restartPolicy);
        if (context.containerDescription.maximumRetryCount != null
                && context.containerDescription.maximumRetryCount != 0) {
            restartPolicy.put(RESTART_POLICY_RETRIES_PROP_NAME, context.containerDescription.maximumRetryCount);
        }
        hostConfig.put(RESTART_POLICY_PROP_NAME, restartPolicy);
    }

    if (context.containerState.volumes != null) {
        Map<String, Object> volumeMap = new HashMap<>();
        for (String volume : context.containerState.volumes) {
            // docker expects each volume to be mapped to an empty object (an empty map)
            // where the key is the container_path (second element in the volume string)
            String containerPart = VolumeBinding.fromString(volume).getContainerPart();
            volumeMap.put(containerPart, Collections.emptyMap());
        }

        createCommandInput.withProperty(DOCKER_CONTAINER_VOLUMES_PROP_NAME, volumeMap);
    }

    if (context.containerDescription.device != null) {
        List<?> devices = Arrays.stream(context.containerDescription.device)
                .map(deviceStr -> DockerDevice.fromString(deviceStr).toMap()).collect(Collectors.toList());

        hostConfig.put(DEVICES_PROP_NAME, devices);
    }

    // copy custom properties
    if (context.containerState.customProperties != null) {
        for (Map.Entry<String, String> customProperty : context.containerState.customProperties.entrySet()) {
            createCommandInput.withProperty(customProperty.getKey(), customProperty.getValue());
        }
    }

    if (ContainerHostUtil.isVicHost(context.computeState)) {
        // VIC has requires several mandatory elements, add them
        addVicRequiredConfig(createCommandInput);
    }

    AtomicInteger retryCount = new AtomicInteger(retriesCount);
    ensurePropertyExists((retryCountProperty) -> {
        context.executor.createContainer(createCommandInput, (o, ex) -> {
            if (ex != null) {
                if (shouldTryCreateFromLocalImage(context.containerDescription)) {
                    logInfo("Unable to create container using local image. Will be fetched from a remote "
                            + "location...");
                    context.containerDescription.customProperties
                            .put(DOCKER_CONTAINER_CREATE_USE_LOCAL_IMAGE_WITH_PRIORITY, "false");
                    processContainerDescription(context);
                } else if (RETRIABLE_HTTP_STATUSES.contains(o.getStatusCode())
                        && retryCount.getAndIncrement() < retryCountProperty) {
                    logWarning("Provisioning for container %s failed with %s. Retries left %d",
                            context.containerState.names.get(0), Utils.toString(ex),
                            retryCountProperty - retryCount.get());
                    processCreateContainer(context, retryCount.get());
                } else {
                    fail(context.request, o, ex);
                }
            } else {
                handleExceptions(context.request, context.operation, () -> {
                    Map<String, Object> body = o.getBody(Map.class);
                    context.containerState.id = (String) body.get(DOCKER_CONTAINER_ID_PROP_NAME);
                    processCreatedContainer(context);
                });
            }
        });
    });
}

From source file:org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.java

/**
 * Create an HRegion with the result of a WAL split and test we only see the
 * good edits/*w w w. ja  v  a 2s  .  com*/
 * @throws Exception
 */
@Test
public void testReplayEditsWrittenIntoWAL() throws Exception {
    final TableName tableName = TableName.valueOf("testReplayEditsWrittenIntoWAL");
    final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
    final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
    final Path basedir = FSUtils.getTableDir(hbaseRootDir, tableName);
    deleteDir(basedir);

    final HTableDescriptor htd = createBasic3FamilyHTD(tableName);
    HRegion region2 = HBaseTestingUtility.createRegionAndWAL(hri, hbaseRootDir, this.conf, htd);
    HBaseTestingUtility.closeRegionAndWAL(region2);
    final WAL wal = createWAL(this.conf, hbaseRootDir, logName);
    final byte[] rowName = tableName.getName();
    final byte[] regionName = hri.getEncodedNameAsBytes();

    // Add 1k to each family.
    final int countPerFamily = 1000;
    Set<byte[]> familyNames = new HashSet<byte[]>();
    NavigableMap<byte[], Integer> scopes = new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR);
    for (byte[] fam : htd.getFamiliesKeys()) {
        scopes.put(fam, 0);
    }
    for (HColumnDescriptor hcd : htd.getFamilies()) {
        addWALEdits(tableName, hri, rowName, hcd.getName(), countPerFamily, ee, wal, htd, mvcc, scopes);
        familyNames.add(hcd.getName());
    }

    // Add a cache flush, shouldn't have any effect
    wal.startCacheFlush(regionName, familyNames);
    wal.completeCacheFlush(regionName);

    // Add an edit to another family, should be skipped.
    WALEdit edit = new WALEdit();
    long now = ee.currentTime();
    edit.add(new KeyValue(rowName, Bytes.toBytes("another family"), rowName, now, rowName));
    wal.append(hri, new WALKey(hri.getEncodedNameAsBytes(), tableName, now, mvcc, scopes), edit, true);

    // Delete the c family to verify deletes make it over.
    edit = new WALEdit();
    now = ee.currentTime();
    edit.add(new KeyValue(rowName, Bytes.toBytes("c"), null, now, KeyValue.Type.DeleteFamily));
    wal.append(hri, new WALKey(hri.getEncodedNameAsBytes(), tableName, now, mvcc, scopes), edit, true);

    // Sync.
    wal.sync();
    // Make a new conf and a new fs for the splitter to run on so we can take
    // over old wal.
    final Configuration newConf = HBaseConfiguration.create(this.conf);
    User user = HBaseTestingUtility.getDifferentUser(newConf, ".replay.wal.secondtime");
    user.runAs(new PrivilegedExceptionAction<Void>() {
        @Override
        public Void run() throws Exception {
            runWALSplit(newConf);
            FileSystem newFS = FileSystem.get(newConf);
            // 100k seems to make for about 4 flushes during HRegion#initialize.
            newConf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024 * 100);
            // Make a new wal for new region.
            WAL newWal = createWAL(newConf, hbaseRootDir, logName);
            final AtomicInteger flushcount = new AtomicInteger(0);
            try {
                final HRegion region = new HRegion(basedir, newWal, newFS, newConf, hri, htd, null) {
                    @Override
                    protected FlushResult internalFlushcache(final WAL wal, final long myseqid,
                            final Collection<Store> storesToFlush, MonitoredTask status,
                            boolean writeFlushWalMarker) throws IOException {
                        LOG.info("InternalFlushCache Invoked");
                        FlushResult fs = super.internalFlushcache(wal, myseqid, storesToFlush,
                                Mockito.mock(MonitoredTask.class), writeFlushWalMarker);
                        flushcount.incrementAndGet();
                        return fs;
                    }
                };
                // The seq id this region has opened up with
                long seqid = region.initialize();

                // The mvcc readpoint of from inserting data.
                long writePoint = mvcc.getWritePoint();

                // We flushed during init.
                assertTrue("Flushcount=" + flushcount.get(), flushcount.get() > 0);
                assertTrue((seqid - 1) == writePoint);

                Get get = new Get(rowName);
                Result result = region.get(get);
                // Make sure we only see the good edits
                assertEquals(countPerFamily * (htd.getFamilies().size() - 1), result.size());
                region.close();
            } finally {
                newWal.close();
            }
            return null;
        }
    });
}

From source file:org.apache.druid.indexing.kafka.supervisor.KafkaSupervisor.java

/**
 * This method does two things -//from w w  w.j  a v  a2  s.  c o  m
 * 1. Makes sure the checkpoints information in the taskGroup is consistent with that of the tasks, if not kill
 * inconsistent tasks.
 * 2. truncates the checkpoints in the taskGroup corresponding to which segments have been published, so that any newly
 * created tasks for the taskGroup start indexing from after the latest published offsets.
 */
private void verifyAndMergeCheckpoints(final TaskGroup taskGroup) {
    final int groupId = taskGroup.groupId;
    final List<Pair<String, TreeMap<Integer, Map<Integer, Long>>>> taskSequences = new ArrayList<>();
    final List<ListenableFuture<TreeMap<Integer, Map<Integer, Long>>>> futures = new ArrayList<>();
    final List<String> taskIds = new ArrayList<>();

    for (String taskId : taskGroup.taskIds()) {
        final ListenableFuture<TreeMap<Integer, Map<Integer, Long>>> checkpointsFuture = taskClient
                .getCheckpointsAsync(taskId, true);
        taskIds.add(taskId);
        futures.add(checkpointsFuture);
    }

    try {
        List<TreeMap<Integer, Map<Integer, Long>>> futuresResult = Futures.successfulAsList(futures)
                .get(futureTimeoutInSeconds, TimeUnit.SECONDS);

        for (int i = 0; i < futuresResult.size(); i++) {
            final TreeMap<Integer, Map<Integer, Long>> checkpoints = futuresResult.get(i);
            final String taskId = taskIds.get(i);
            if (checkpoints == null) {
                try {
                    // catch the exception in failed futures
                    futures.get(i).get();
                } catch (Exception e) {
                    log.error(e, "Problem while getting checkpoints for task [%s], killing the task", taskId);
                    killTask(taskId);
                    taskGroup.tasks.remove(taskId);
                }
            } else if (checkpoints.isEmpty()) {
                log.warn("Ignoring task [%s], as probably it is not started running yet", taskId);
            } else {
                taskSequences.add(new Pair<>(taskId, checkpoints));
            }
        }
    } catch (Exception e) {
        throw new RuntimeException(e);
    }

    final KafkaDataSourceMetadata latestDataSourceMetadata = (KafkaDataSourceMetadata) indexerMetadataStorageCoordinator
            .getDataSourceMetadata(dataSource);
    final boolean hasValidOffsetsFromDb = latestDataSourceMetadata != null
            && latestDataSourceMetadata.getKafkaPartitions() != null
            && ioConfig.getTopic().equals(latestDataSourceMetadata.getKafkaPartitions().getTopic());
    final Map<Integer, Long> latestOffsetsFromDb;
    if (hasValidOffsetsFromDb) {
        latestOffsetsFromDb = latestDataSourceMetadata.getKafkaPartitions().getPartitionOffsetMap();
    } else {
        latestOffsetsFromDb = null;
    }

    // order tasks of this taskGroup by the latest sequenceId
    taskSequences.sort((o1, o2) -> o2.rhs.firstKey().compareTo(o1.rhs.firstKey()));

    final Set<String> tasksToKill = new HashSet<>();
    final AtomicInteger earliestConsistentSequenceId = new AtomicInteger(-1);
    int taskIndex = 0;

    while (taskIndex < taskSequences.size()) {
        TreeMap<Integer, Map<Integer, Long>> taskCheckpoints = taskSequences.get(taskIndex).rhs;
        String taskId = taskSequences.get(taskIndex).lhs;
        if (earliestConsistentSequenceId.get() == -1) {
            // find the first replica task with earliest sequenceId consistent with datasource metadata in the metadata
            // store
            if (taskCheckpoints.entrySet().stream()
                    .anyMatch(sequenceCheckpoint -> sequenceCheckpoint.getValue().entrySet().stream()
                            .allMatch(partitionOffset -> Longs.compare(partitionOffset.getValue(),
                                    latestOffsetsFromDb == null ? partitionOffset.getValue()
                                            : latestOffsetsFromDb.getOrDefault(partitionOffset.getKey(),
                                                    partitionOffset.getValue())) == 0)
                            && earliestConsistentSequenceId.compareAndSet(-1, sequenceCheckpoint.getKey()))
                    || (pendingCompletionTaskGroups.getOrDefault(groupId, EMPTY_LIST).size() > 0
                            && earliestConsistentSequenceId.compareAndSet(-1, taskCheckpoints.firstKey()))) {
                final SortedMap<Integer, Map<Integer, Long>> latestCheckpoints = new TreeMap<>(
                        taskCheckpoints.tailMap(earliestConsistentSequenceId.get()));
                log.info("Setting taskGroup sequences to [%s] for group [%d]", latestCheckpoints, groupId);
                taskGroup.sequenceOffsets.clear();
                taskGroup.sequenceOffsets.putAll(latestCheckpoints);
            } else {
                log.debug("Adding task [%s] to kill list, checkpoints[%s], latestoffsets from DB [%s]", taskId,
                        taskCheckpoints, latestOffsetsFromDb);
                tasksToKill.add(taskId);
            }
        } else {
            // check consistency with taskGroup sequences
            if (taskCheckpoints.get(taskGroup.sequenceOffsets.firstKey()) == null
                    || !(taskCheckpoints.get(taskGroup.sequenceOffsets.firstKey())
                            .equals(taskGroup.sequenceOffsets.firstEntry().getValue()))
                    || taskCheckpoints.tailMap(taskGroup.sequenceOffsets.firstKey())
                            .size() != taskGroup.sequenceOffsets.size()) {
                log.debug("Adding task [%s] to kill list, checkpoints[%s], taskgroup checkpoints [%s]", taskId,
                        taskCheckpoints, taskGroup.sequenceOffsets);
                tasksToKill.add(taskId);
            }
        }
        taskIndex++;
    }

    if ((tasksToKill.size() > 0 && tasksToKill.size() == taskGroup.tasks.size()) || (taskGroup.tasks.size() == 0
            && pendingCompletionTaskGroups.getOrDefault(groupId, EMPTY_LIST).size() == 0)) {
        // killing all tasks or no task left in the group ?
        // clear state about the taskgroup so that get latest offset information is fetched from metadata store
        log.warn("Clearing task group [%d] information as no valid tasks left the group", groupId);
        taskGroups.remove(groupId);
        partitionGroups.get(groupId).replaceAll((partition, offset) -> NOT_SET);
    }

    taskSequences.stream().filter(taskIdSequences -> tasksToKill.contains(taskIdSequences.lhs))
            .forEach(sequenceCheckpoint -> {
                log.warn(
                        "Killing task [%s], as its checkpoints [%s] are not consistent with group checkpoints[%s] or latest "
                                + "persisted offsets in metadata store [%s]",
                        sequenceCheckpoint.lhs, sequenceCheckpoint.rhs, taskGroup.sequenceOffsets,
                        latestOffsetsFromDb);
                killTask(sequenceCheckpoint.lhs);
                taskGroup.tasks.remove(sequenceCheckpoint.lhs);
            });
}

From source file:net.sourceforge.pmd.ant.internal.PMDTaskImpl.java

private void doTask() {
    setupClassLoader();/*from   w w w. j  a  v a2  s  .c o m*/

    // Setup RuleSetFactory and validate RuleSets
    final ResourceLoader rl = setupResourceLoader();
    RuleSetFactory ruleSetFactory = RulesetsFactoryUtils.getRulesetFactory(configuration, rl);

    try {
        // This is just used to validate and display rules. Each thread will create its own ruleset
        String ruleSets = configuration.getRuleSets();
        if (StringUtils.isNotBlank(ruleSets)) {
            // Substitute env variables/properties
            configuration.setRuleSets(project.replaceProperties(ruleSets));
        }
        RuleSets rules = ruleSetFactory.createRuleSets(configuration.getRuleSets());
        logRulesUsed(rules);
    } catch (RuleSetNotFoundException e) {
        throw new BuildException(e.getMessage(), e);
    }

    if (configuration.getSuppressMarker() != null) {
        project.log("Setting suppress marker to be " + configuration.getSuppressMarker(), Project.MSG_VERBOSE);
    }

    // Start the Formatters
    for (Formatter formatter : formatters) {
        project.log("Sending a report to " + formatter, Project.MSG_VERBOSE);
        formatter.start(project.getBaseDir().toString());
    }

    // log("Setting Language Version " + languageVersion.getShortName(),
    // Project.MSG_VERBOSE);

    // TODO Do we really need all this in a loop over each FileSet? Seems
    // like a lot of redundancy
    RuleContext ctx = new RuleContext();
    Report errorReport = new Report();
    final AtomicInteger reportSize = new AtomicInteger();
    final String separator = System.getProperty("file.separator");

    for (FileSet fs : filesets) {
        List<DataSource> files = new LinkedList<>();
        DirectoryScanner ds = fs.getDirectoryScanner(project);
        String[] srcFiles = ds.getIncludedFiles();
        for (String srcFile : srcFiles) {
            File file = new File(ds.getBasedir() + separator + srcFile);
            files.add(new FileDataSource(file));
        }

        final String inputPaths = ds.getBasedir().getPath();
        configuration.setInputPaths(inputPaths);

        Renderer logRenderer = new AbstractRenderer("log", "Logging renderer") {
            @Override
            public void start() {
                // Nothing to do
            }

            @Override
            public void startFileAnalysis(DataSource dataSource) {
                project.log("Processing file " + dataSource.getNiceFileName(false, inputPaths),
                        Project.MSG_VERBOSE);
            }

            @Override
            public void renderFileReport(Report r) {
                int size = r.size();
                if (size > 0) {
                    reportSize.addAndGet(size);
                }
            }

            @Override
            public void end() {
                // Nothing to do
            }

            @Override
            public String defaultFileExtension() {
                return null;
            } // not relevant
        };
        List<Renderer> renderers = new ArrayList<>(formatters.size() + 1);
        renderers.add(logRenderer);
        for (Formatter formatter : formatters) {
            renderers.add(formatter.getRenderer());
        }
        try {
            PMD.processFiles(configuration, ruleSetFactory, files, ctx, renderers);
        } catch (RuntimeException pmde) {
            handleError(ctx, errorReport, pmde);
        }
    }

    int problemCount = reportSize.get();
    project.log(problemCount + " problems found", Project.MSG_VERBOSE);

    for (Formatter formatter : formatters) {
        formatter.end(errorReport);
    }

    if (failuresPropertyName != null && problemCount > 0) {
        project.setProperty(failuresPropertyName, String.valueOf(problemCount));
        project.log("Setting property " + failuresPropertyName + " to " + problemCount, Project.MSG_VERBOSE);
    }

    if (failOnRuleViolation && problemCount > maxRuleViolations) {
        throw new BuildException(
                "Stopping build since PMD found " + problemCount + " rule violations in the code");
    }
}

From source file:org.apache.hadoop.hbase.regionserver.wal.TestHLogSplit.java

@Test(timeout = 300000)
public void testTerminationAskedByReporter() throws IOException, CorruptedLogFileException {
    generateHLogs(1, 10, -1);//from   w w  w  . j a v a 2 s  .  c om
    FileStatus logfile = fs.listStatus(HLOGDIR)[0];
    fs.initialize(fs.getUri(), conf);

    final AtomicInteger count = new AtomicInteger();

    CancelableProgressable localReporter = new CancelableProgressable() {
        @Override
        public boolean progress() {
            count.getAndIncrement();
            return false;
        }
    };

    FileSystem spiedFs = Mockito.spy(fs);
    Mockito.doAnswer(new Answer<FSDataInputStream>() {
        public FSDataInputStream answer(InvocationOnMock invocation) throws Throwable {
            Thread.sleep(1500); // Sleep a while and wait report status invoked
            return (FSDataInputStream) invocation.callRealMethod();
        }
    }).when(spiedFs).open(Mockito.<Path>any(), Mockito.anyInt());

    try {
        conf.setInt("hbase.splitlog.report.period", 1000);
        boolean ret = HLogSplitter.splitLogFile(HBASEDIR, logfile, spiedFs, conf, localReporter, null, null,
                null);
        assertFalse("Log splitting should failed", ret);
        assertTrue(count.get() > 0);
    } catch (IOException e) {
        fail("There shouldn't be any exception but: " + e.toString());
    } finally {
        // reset it back to its default value
        conf.setInt("hbase.splitlog.report.period", 59000);
    }
}

From source file:org.dasein.cloud.azure.tests.network.AzureIpAddressSupportTest.java

@Test
public void stopForwardToServerShouldPostCorrectRequest() throws CloudException, InternalException {
    final AtomicInteger putCount = new AtomicInteger(0);
    new MockUp<CloseableHttpClient>() {
        @Mock(invocations = 2)//ww w.  ja v  a 2 s  .c o m
        public CloseableHttpResponse execute(Invocation inv, HttpUriRequest request) throws IOException {
            if (request.getMethod().equals("GET")) {
                DaseinObjectToXmlEntity<PersistentVMRoleModel> daseinEntity = new DaseinObjectToXmlEntity<PersistentVMRoleModel>(
                        createPersistentVMRoleModelWithEndpoint());
                assertGet(request, EXPECTED_URL,
                        new Header[] { new BasicHeader("x-ms-version", "2012-03-01") });
                return getHttpResponseMock(getStatusLineMock(HttpServletResponse.SC_OK), daseinEntity,
                        new Header[] { new BasicHeader("x-ms-request-id", UUID.randomUUID().toString()) });
            } else if (request.getMethod().equals("PUT")) {
                putCount.incrementAndGet();
                PersistentVMRoleModel persistentVMRoleModel = createPersistentVMRoleModelWithoutEndpoint();
                //set an empty list otherwise unitils will assert fail as one is null while another is empty list
                persistentVMRoleModel.getConfigurationSets().get(0)
                        .setInputEndpoints(new ArrayList<PersistentVMRoleModel.InputEndpoint>());
                assertPut(request, EXPECTED_URL, new Header[] { new BasicHeader("x-ms-version", "2012-03-01") },
                        persistentVMRoleModel);
                return getHttpResponseMock(getStatusLineMock(HttpServletResponse.SC_ACCEPTED), null,
                        new Header[] { new BasicHeader("x-ms-request-id", UUID.randomUUID().toString()) });
            } else {
                throw new IOException("Request is not mocked");
            }
        }
    };
    String ruleId = new AzureRuleIdParts(VM_ID, Protocol.TCP.toString(), String.valueOf(PRIVATE_PORT))
            .toProviderId();
    ipAddressSupport.stopForwardToServer(ruleId, VM_ID);
    assertEquals("PUT count doesn't match", 1, putCount.get());
}

From source file:org.dataconservancy.packaging.tool.integration.PackageGenerationTest.java

@Test
public void complexPropertiesTest() throws Exception {
    PackageState initialState = initializer.initialize(DCS_PROFILE);

    OpenedPackage opened = packager.createPackage(initialState, folder.getRoot());

    DomainProfileService profileService = profileServiceFactory
            .getProfileService(opened.getPackageState().getDomainObjectRDF());

    Property creator1 = new Property(bop.getHasCreator());
    Property creator1_name = new Property(bop.getName());
    creator1_name.setStringValue("Fred");
    Property creator1_mbox = new Property(bop.getMbox());
    creator1_mbox.setStringValue("fred@mertz.org");
    creator1.setComplexValue(Arrays.asList(creator1_name, creator1_mbox));

    Property creator2 = new Property(bop.getHasCreator());
    Property creator2_name = new Property(bop.getName());
    creator2_name.setStringValue("Ethel");
    Property creator2_mbox = new Property(bop.getMbox());
    creator2_mbox.setStringValue("ethel@mertz.org");
    creator2.setComplexValue(Arrays.asList(creator2_name, creator2_mbox));

    AtomicInteger collectionCount = new AtomicInteger(0);

    /* Add two creators to each collection */
    opened.getPackageTree().walk(node -> {
        if (node.getNodeType().getDomainTypes().contains(URI.create(NS_DCS_ONTOLOGY_BOM + "Collection"))) {
            collectionCount.incrementAndGet();
            profileService.addProperty(node, creator1);
            profileService.addProperty(node, creator2);
        }//from   w  w  w  .j  av  a2 s . c o m
    });

    OpenedPackage afterSaveAndReopen = packager.createPackage(opened.getPackageState(), folder.getRoot());

    Set<String> initialObjects = initialState.getDomainObjectRDF().listObjects().filterKeep(RDFNode::isLiteral)
            .mapWith(RDFNode::asLiteral).mapWith(Literal::getString).toSet();
    Set<String> openedObjects = opened.getPackageState().getDomainObjectRDF().listObjects()
            .filterKeep(RDFNode::isLiteral).mapWith(RDFNode::asLiteral).mapWith(Literal::getString).toSet();
    Set<String> afterSaveAndReopenObjects = afterSaveAndReopen.getPackageState().getDomainObjectRDF()
            .listObjects().filterKeep(RDFNode::isLiteral).mapWith(RDFNode::asLiteral)
            .mapWith(Literal::getString).toSet();
    Set<String> afterSaveAndReopenCustodialObjects = custodialDomainObjects(afterSaveAndReopen).listObjects()
            .filterKeep(RDFNode::isLiteral).mapWith(RDFNode::asLiteral).mapWith(Literal::getString).toSet();

    assertFalse(initialObjects.contains(creator1_name.getStringValue()));
    assertTrue(openedObjects.contains(creator1_name.getStringValue()));
    assertTrue(openedObjects.contains(creator2_name.getStringValue()));
    assertEquals(2 * collectionCount.get(),
            opened.getPackageState().getDomainObjectRDF()
                    .listStatements(null,
                            opened.getPackageState().getDomainObjectRDF()
                                    .getProperty(creator1.getPropertyType().getDomainPredicate().toString()),

                            (RDFNode) null)
                    .toSet().size());
    assertTrue(afterSaveAndReopenObjects.contains(creator1_name.getStringValue()));
    assertTrue(afterSaveAndReopenObjects.contains(creator2_name.getStringValue()));
    assertTrue(afterSaveAndReopenCustodialObjects.contains(creator1_name.getStringValue()));
    assertTrue(afterSaveAndReopenCustodialObjects.contains(creator2_name.getStringValue()));

    assertNotEquals(domainObjectSizes(initialState), domainObjectSizes(opened.getPackageState()));
    assertEquals(domainObjectSizes(opened.getPackageState()),
            domainObjectSizes(afterSaveAndReopen.getPackageState()));

    Model custodialAfterSaveAndReopen = custodialDomainObjects(afterSaveAndReopen);

    assertEquals(afterSaveAndReopen.getPackageState().getDomainObjectRDF().listStatements().toSet().size(),
            custodialAfterSaveAndReopen.listStatements().toSet().size());
}