Example usage for java.util.concurrent ConcurrentHashMap newKeySet

List of usage examples for java.util.concurrent ConcurrentHashMap newKeySet

Introduction

In this page you can find the example usage for java.util.concurrent ConcurrentHashMap newKeySet.

Prototype

public static <K> KeySetView<K, Boolean> newKeySet() 

Source Link

Document

Creates a new Set backed by a ConcurrentHashMap from the given type to Boolean.TRUE .

Usage

From source file:com.github.ansell.shp.SHPDump.java

public static void main(String... args) throws Exception {
    final OptionParser parser = new OptionParser();

    final OptionSpec<Void> help = parser.accepts("help").forHelp();
    final OptionSpec<File> input = parser.accepts("input").withRequiredArg().ofType(File.class).required()
            .describedAs("The input SHP file");
    final OptionSpec<File> output = parser.accepts("output").withRequiredArg().ofType(File.class).required()
            .describedAs("The output directory to use for debugging files");
    final OptionSpec<String> outputPrefix = parser.accepts("prefix").withRequiredArg().ofType(String.class)
            .defaultsTo("shp-debug").describedAs("The output prefix to use for debugging files");
    final OptionSpec<File> outputMappingTemplate = parser.accepts("output-mapping").withRequiredArg()
            .ofType(File.class).describedAs("The output mapping template file if it needs to be generated.");
    final OptionSpec<Integer> resolution = parser.accepts("resolution").withRequiredArg().ofType(Integer.class)
            .defaultsTo(2048).describedAs("The output image file resolution");
    final OptionSpec<String> format = parser.accepts("format").withRequiredArg().ofType(String.class)
            .defaultsTo("png").describedAs("The output image format");
    final OptionSpec<String> removeIfEmpty = parser.accepts("remove-if-empty").withRequiredArg()
            .ofType(String.class).describedAs(
                    "The name of an attribute to remove if its value is empty before outputting the resulting shapefile. Use multiple times to specify multiple fields to check");

    OptionSet options = null;//from  www  . jav  a2s . c om

    try {
        options = parser.parse(args);
    } catch (final OptionException e) {
        System.out.println(e.getMessage());
        parser.printHelpOn(System.out);
        throw e;
    }

    if (options.has(help)) {
        parser.printHelpOn(System.out);
        return;
    }

    final Path inputPath = input.value(options).toPath();
    if (!Files.exists(inputPath)) {
        throw new FileNotFoundException("Could not find input SHP file: " + inputPath.toString());
    }

    final Path outputPath = output.value(options).toPath();
    if (!Files.exists(outputPath)) {
        throw new FileNotFoundException("Output directory does not exist: " + outputPath.toString());
    }

    final Path outputMappingPath = options.has(outputMappingTemplate)
            ? outputMappingTemplate.value(options).toPath()
            : null;
    if (options.has(outputMappingTemplate) && Files.exists(outputMappingPath)) {
        throw new FileNotFoundException(
                "Output mapping template file already exists: " + outputMappingPath.toString());
    }

    final Set<String> filterFields = ConcurrentHashMap.newKeySet();
    if (options.has(removeIfEmpty)) {
        for (String nextFilterField : removeIfEmpty.values(options)) {
            System.out.println("Will filter field if empty value found: " + nextFilterField);
            filterFields.add(nextFilterField);
        }
    }

    if (!filterFields.isEmpty()) {
        System.out.println("Full set of filter fields: " + filterFields);
    }

    final String prefix = outputPrefix.value(options);

    FileDataStore store = FileDataStoreFinder.getDataStore(inputPath.toFile());

    if (store == null) {
        throw new RuntimeException("Could not read the given input as an ESRI Shapefile: "
                + inputPath.toAbsolutePath().toString());
    }

    for (String typeName : new LinkedHashSet<>(Arrays.asList(store.getTypeNames()))) {
        System.out.println("");
        System.out.println("Type: " + typeName);
        SimpleFeatureSource featureSource = store.getFeatureSource(typeName);
        SimpleFeatureType schema = featureSource.getSchema();

        Name outputSchemaName = new NameImpl(schema.getName().getNamespaceURI(),
                schema.getName().getLocalPart().replace(" ", "").replace("%20", ""));
        System.out.println("Replacing name on schema: " + schema.getName() + " with " + outputSchemaName);
        SimpleFeatureType outputSchema = SHPUtils.changeSchemaName(schema, outputSchemaName);

        List<String> attributeList = new ArrayList<>();
        for (AttributeDescriptor attribute : schema.getAttributeDescriptors()) {
            System.out.println("Attribute: " + attribute.getName().toString());
            attributeList.add(attribute.getName().toString());
        }
        CsvSchema csvSchema = CSVUtil.buildSchema(attributeList);

        SimpleFeatureCollection collection = featureSource.getFeatures();
        int featureCount = 0;
        Path nextCSVFile = outputPath.resolve(prefix + ".csv");
        Path nextSummaryCSVFile = outputPath
                .resolve(prefix + "-" + outputSchema.getTypeName() + "-Summary.csv");
        List<SimpleFeature> outputFeatureList = new CopyOnWriteArrayList<>();

        try (SimpleFeatureIterator iterator = collection.features();
                Writer bufferedWriter = Files.newBufferedWriter(nextCSVFile, StandardCharsets.UTF_8,
                        StandardOpenOption.CREATE_NEW);
                SequenceWriter csv = CSVUtil.newCSVWriter(bufferedWriter, csvSchema);) {
            List<String> nextLine = new ArrayList<>();
            while (iterator.hasNext()) {
                SimpleFeature feature = iterator.next();
                featureCount++;
                if (featureCount <= 2) {
                    System.out.println("");
                    System.out.println(feature.getIdentifier());
                } else if (featureCount % 100 == 0) {
                    System.out.print(".");
                }
                boolean filterThisFeature = false;
                for (AttributeDescriptor attribute : schema.getAttributeDescriptors()) {
                    String featureString = Optional.ofNullable(feature.getAttribute(attribute.getName()))
                            .orElse("").toString();
                    nextLine.add(featureString);
                    if (filterFields.contains(attribute.getName().toString())
                            && featureString.trim().isEmpty()) {
                        filterThisFeature = true;
                    }
                    if (featureString.length() > 100) {
                        featureString = featureString.substring(0, 100) + "...";
                    }
                    if (featureCount <= 2) {
                        System.out.print(attribute.getName() + "=");
                        System.out.println(featureString);
                    }
                }
                if (!filterThisFeature) {
                    outputFeatureList.add(SHPUtils.changeSchemaName(feature, outputSchema));
                    csv.write(nextLine);
                }
                nextLine.clear();
            }
        }
        try (Reader csvReader = Files.newBufferedReader(nextCSVFile, StandardCharsets.UTF_8);
                Writer summaryOutput = Files.newBufferedWriter(nextSummaryCSVFile, StandardCharsets.UTF_8,
                        StandardOpenOption.CREATE_NEW);
                final Writer mappingWriter = options.has(outputMappingTemplate)
                        ? Files.newBufferedWriter(outputMappingPath)
                        : NullWriter.NULL_WRITER) {
            CSVSummariser.runSummarise(csvReader, summaryOutput, mappingWriter,
                    CSVSummariser.DEFAULT_SAMPLE_COUNT, false);
        }
        if (featureCount > 100) {
            System.out.println("");
        }
        System.out.println("");
        System.out.println("Feature count: " + featureCount);

        SimpleFeatureCollection outputCollection = new ListFeatureCollection(outputSchema, outputFeatureList);
        Path outputShapefilePath = outputPath.resolve(prefix + "-" + outputSchema.getTypeName() + "-dump");
        if (!Files.exists(outputShapefilePath)) {
            Files.createDirectory(outputShapefilePath);
        }
        SHPUtils.writeShapefile(outputCollection, outputShapefilePath);

        // Create ZIP file from the contents to keep the subfiles together
        Path outputShapefileZipPath = outputPath
                .resolve(prefix + "-" + outputSchema.getTypeName() + "-dump.zip");
        try (final OutputStream out = Files.newOutputStream(outputShapefileZipPath,
                StandardOpenOption.CREATE_NEW);
                final ZipOutputStream zip = new ZipOutputStream(out, StandardCharsets.UTF_8);) {
            Files.list(outputShapefilePath).forEachOrdered(Unchecked.consumer(e -> {
                zip.putNextEntry(new ZipEntry(e.getFileName().toString()));
                Files.copy(e, zip);
                zip.closeEntry();
            }));
        }

        try (final OutputStream outputStream = Files.newOutputStream(
                outputPath.resolve(prefix + "." + format.value(options)), StandardOpenOption.CREATE_NEW);) {
            MapContent map = new MapContent();
            map.setTitle(prefix + "-" + outputSchema.getTypeName());
            Style style = SLD.createSimpleStyle(featureSource.getSchema());
            Layer layer = new FeatureLayer(new CollectionFeatureSource(outputCollection), style);
            map.addLayer(layer);
            SHPUtils.renderImage(map, outputStream, resolution.value(options), format.value(options));
        }
    }
}

From source file:Main.java

public static <T> Set<T> toConcurrentSet(Iterable<T> iterable) {
    Set<T> temp = ConcurrentHashMap.newKeySet();
    iterable.forEach(temp::add);/*from www .  ja v a  2  s .  co  m*/
    return temp;
}

From source file:org.apache.asterix.transaction.management.resource.PersistentLocalResourceRepository.java

public void setReplicationManager(IReplicationManager replicationManager) {
    this.replicationManager = replicationManager;
    isReplicationEnabled = replicationManager.isReplicationEnabled();

    if (isReplicationEnabled) {
        filesToBeReplicated = new HashSet<>();
        nodeInactivePartitions = ConcurrentHashMap.newKeySet();
    }//  ww w  . j  av  a  2 s .c o m
}

From source file:org.apache.bookkeeper.mledger.impl.OffloadPrefixTest.java

@Test
public void testOffloadConflict() throws Exception {
    Set<Pair<Long, UUID>> deleted = ConcurrentHashMap.newKeySet();
    CompletableFuture<Set<Long>> errorLedgers = new CompletableFuture<>();
    Set<Pair<Long, UUID>> failedOffloads = ConcurrentHashMap.newKeySet();

    MockLedgerOffloader offloader = new MockLedgerOffloader() {
        @Override//www . j av  a 2  s.co m
        public CompletableFuture<Void> offload(ReadHandle ledger, UUID uuid,
                Map<String, String> extraMetadata) {
            return errorLedgers.thenCompose((errors) -> {
                if (errors.remove(ledger.getId())) {
                    failedOffloads.add(Pair.of(ledger.getId(), uuid));
                    CompletableFuture<Void> future = new CompletableFuture<>();
                    future.completeExceptionally(new Exception("Some kind of error"));
                    return future;
                } else {
                    return super.offload(ledger, uuid, extraMetadata);
                }
            });
        }

        @Override
        public CompletableFuture<Void> deleteOffloaded(long ledgerId, UUID uuid,
                Map<String, String> offloadDriverMetadata) {
            deleted.add(Pair.of(ledgerId, uuid));
            return super.deleteOffloaded(ledgerId, uuid, offloadDriverMetadata);
        }
    };
    ManagedLedgerConfig config = new ManagedLedgerConfig();
    config.setMaxEntriesPerLedger(10);
    config.setMinimumRolloverTime(0, TimeUnit.SECONDS);
    config.setRetentionTime(10, TimeUnit.MINUTES);
    config.setLedgerOffloader(offloader);
    ManagedLedgerImpl ledger = (ManagedLedgerImpl) factory.open("my_test_ledger", config);

    for (int i = 0; i < 15; i++) {
        String content = "entry-" + i;
        ledger.addEntry(content.getBytes());
    }

    Set<Long> errorSet = ConcurrentHashMap.newKeySet();
    errorSet.add(ledger.getLedgersInfoAsList().get(0).getLedgerId());
    errorLedgers.complete(errorSet);

    try {
        ledger.offloadPrefix(ledger.getLastConfirmedEntry());
    } catch (ManagedLedgerException e) {
        // expected
    }
    Assert.assertTrue(errorSet.isEmpty());
    Assert.assertEquals(failedOffloads.size(), 1);
    Assert.assertEquals(deleted.size(), 0);

    long expectedFailedLedger = ledger.getLedgersInfoAsList().get(0).getLedgerId();
    UUID expectedFailedUUID = new UUID(ledger.getLedgersInfoAsList().get(0).getOffloadContext().getUidMsb(),
            ledger.getLedgersInfoAsList().get(0).getOffloadContext().getUidLsb());
    Assert.assertEquals(failedOffloads.stream().findFirst().get(),
            Pair.of(expectedFailedLedger, expectedFailedUUID));
    Assert.assertFalse(ledger.getLedgersInfoAsList().get(0).getOffloadContext().getComplete());

    // try offload again
    ledger.offloadPrefix(ledger.getLastConfirmedEntry());

    Assert.assertEquals(failedOffloads.size(), 1);
    Assert.assertEquals(deleted.size(), 1);
    Assert.assertEquals(deleted.stream().findFirst().get(), Pair.of(expectedFailedLedger, expectedFailedUUID));
    UUID successUUID = new UUID(ledger.getLedgersInfoAsList().get(0).getOffloadContext().getUidMsb(),
            ledger.getLedgersInfoAsList().get(0).getOffloadContext().getUidLsb());
    Assert.assertFalse(successUUID.equals(expectedFailedUUID));
    Assert.assertTrue(ledger.getLedgersInfoAsList().get(0).getOffloadContext().getComplete());
}

From source file:org.apache.bookkeeper.mledger.impl.OffloadPrefixTest.java

@Test
public void testOffloadDelete() throws Exception {
    Set<Pair<Long, UUID>> deleted = ConcurrentHashMap.newKeySet();
    CompletableFuture<Set<Long>> errorLedgers = new CompletableFuture<>();
    Set<Pair<Long, UUID>> failedOffloads = ConcurrentHashMap.newKeySet();

    MockLedgerOffloader offloader = new MockLedgerOffloader();
    ManagedLedgerConfig config = new ManagedLedgerConfig();
    config.setMaxEntriesPerLedger(10);/*from  ww w .ja v  a 2 s  . c  o  m*/
    config.setMinimumRolloverTime(0, TimeUnit.SECONDS);
    config.setRetentionTime(0, TimeUnit.MINUTES);
    config.setLedgerOffloader(offloader);
    ManagedLedgerImpl ledger = (ManagedLedgerImpl) factory.open("my_test_ledger", config);
    ManagedCursor cursor = ledger.openCursor("foobar");
    for (int i = 0; i < 15; i++) {
        String content = "entry-" + i;
        ledger.addEntry(content.getBytes());
    }

    Assert.assertEquals(ledger.getLedgersInfoAsList().size(), 2);
    ledger.offloadPrefix(ledger.getLastConfirmedEntry());
    Assert.assertEquals(ledger.getLedgersInfoAsList().size(), 2);

    Assert.assertEquals(
            ledger.getLedgersInfoAsList().stream().filter(e -> e.getOffloadContext().getComplete()).count(), 1);
    Assert.assertTrue(ledger.getLedgersInfoAsList().get(0).getOffloadContext().getComplete());
    long firstLedger = ledger.getLedgersInfoAsList().get(0).getLedgerId();
    long secondLedger = ledger.getLedgersInfoAsList().get(1).getLedgerId();

    cursor.markDelete(ledger.getLastConfirmedEntry());
    assertEventuallyTrue(() -> ledger.getLedgersInfoAsList().size() == 1);
    Assert.assertEquals(ledger.getLedgersInfoAsList().get(0).getLedgerId(), secondLedger);

    assertEventuallyTrue(() -> offloader.deletedOffloads().contains(firstLedger));
}

From source file:org.apache.bookkeeper.mledger.impl.OffloadPrefixTest.java

@Test
public void testOffloadDeleteIncomplete() throws Exception {
    Set<Pair<Long, UUID>> deleted = ConcurrentHashMap.newKeySet();
    CompletableFuture<Set<Long>> errorLedgers = new CompletableFuture<>();
    Set<Pair<Long, UUID>> failedOffloads = ConcurrentHashMap.newKeySet();

    MockLedgerOffloader offloader = new MockLedgerOffloader() {
        @Override/*from w  ww  . ja v a2s .c o m*/
        public CompletableFuture<Void> offload(ReadHandle ledger, UUID uuid,
                Map<String, String> extraMetadata) {
            return super.offload(ledger, uuid, extraMetadata).thenCompose((res) -> {
                CompletableFuture<Void> f = new CompletableFuture<>();
                f.completeExceptionally(new Exception("Fail after offload occurred"));
                return f;
            });
        }
    };
    ManagedLedgerConfig config = new ManagedLedgerConfig();
    config.setMaxEntriesPerLedger(10);
    config.setMinimumRolloverTime(0, TimeUnit.SECONDS);
    config.setRetentionTime(0, TimeUnit.MINUTES);
    config.setLedgerOffloader(offloader);
    ManagedLedgerImpl ledger = (ManagedLedgerImpl) factory.open("my_test_ledger", config);
    ManagedCursor cursor = ledger.openCursor("foobar");
    for (int i = 0; i < 15; i++) {
        String content = "entry-" + i;
        ledger.addEntry(content.getBytes());
    }

    Assert.assertEquals(ledger.getLedgersInfoAsList().size(), 2);
    try {
        ledger.offloadPrefix(ledger.getLastConfirmedEntry());
    } catch (ManagedLedgerException mle) {
        // expected
    }

    Assert.assertEquals(ledger.getLedgersInfoAsList().size(), 2);

    Assert.assertEquals(
            ledger.getLedgersInfoAsList().stream().filter(e -> e.getOffloadContext().getComplete()).count(), 0);
    Assert.assertEquals(
            ledger.getLedgersInfoAsList().stream().filter(e -> e.getOffloadContext().hasUidMsb()).count(), 1);
    Assert.assertTrue(ledger.getLedgersInfoAsList().get(0).getOffloadContext().hasUidMsb());

    long firstLedger = ledger.getLedgersInfoAsList().get(0).getLedgerId();
    long secondLedger = ledger.getLedgersInfoAsList().get(1).getLedgerId();

    cursor.markDelete(ledger.getLastConfirmedEntry());
    assertEventuallyTrue(() -> ledger.getLedgersInfoAsList().size() == 1);
    Assert.assertEquals(ledger.getLedgersInfoAsList().get(0).getLedgerId(), secondLedger);

    assertEventuallyTrue(() -> offloader.deletedOffloads().contains(firstLedger));
}

From source file:org.apache.solr.cloud.ZkController.java

public void publishAndWaitForDownStates() throws KeeperException, InterruptedException {

    publishNodeAsDown(getNodeName());/*w  ww  . j a v  a  2 s .c o  m*/

    Set<String> collectionsWithLocalReplica = ConcurrentHashMap.newKeySet();
    for (SolrCore core : cc.getCores()) {
        collectionsWithLocalReplica.add(core.getCoreDescriptor().getCloudDescriptor().getCollectionName());
    }

    CountDownLatch latch = new CountDownLatch(collectionsWithLocalReplica.size());
    for (String collectionWithLocalReplica : collectionsWithLocalReplica) {
        zkStateReader.registerCollectionStateWatcher(collectionWithLocalReplica,
                (liveNodes, collectionState) -> {
                    boolean foundStates = true;
                    for (SolrCore core : cc.getCores()) {
                        if (core.getCoreDescriptor().getCloudDescriptor().getCollectionName()
                                .equals(collectionWithLocalReplica)) {
                            Replica replica = collectionState.getReplica(
                                    core.getCoreDescriptor().getCloudDescriptor().getCoreNodeName());
                            if (replica.getState() != Replica.State.DOWN) {
                                foundStates = false;
                            }
                        }
                    }

                    if (foundStates && collectionsWithLocalReplica.remove(collectionWithLocalReplica)) {
                        latch.countDown();
                    }
                    return foundStates;
                });
    }

    boolean allPublishedDown = latch.await(WAIT_DOWN_STATES_TIMEOUT_SECONDS, TimeUnit.SECONDS);
    if (!allPublishedDown) {
        log.warn("Timed out waiting to see all nodes published as DOWN in our cluster state.");
    }
}

From source file:org.eclipse.smarthome.core.thing.internal.CommunicationManager.java

@Reference(cardinality = ReferenceCardinality.MULTIPLE, policy = ReferencePolicy.DYNAMIC)
protected void addProfileFactory(ProfileFactory profileFactory) {
    this.profileFactories.put(profileFactory, ConcurrentHashMap.newKeySet());
}

From source file:org.openhab.io.neeo.internal.NeeoDeviceKeys.java

/**
 * Refreshes the keys from the brain/*from   w w w . j a va  2  s .  co  m*/
 *
 * @throws IOException Signals that an I/O exception has occurred.
 */
void refresh() throws IOException {
    try (HttpRequest request = new HttpRequest()) {
        logger.debug("Getting existing device mappings from {}{}", brainUrl, NeeoConstants.PROJECTS_HOME);
        final HttpResponse resp = request.sendGetCommand(brainUrl + NeeoConstants.PROJECTS_HOME);
        if (resp.getHttpCode() != HttpStatus.OK_200) {
            throw resp.createException();
        }

        uidToKey.clear();

        final JsonParser parser = new JsonParser();
        final JsonObject root = parser.parse(resp.getContent()).getAsJsonObject();
        for (Map.Entry<String, JsonElement> room : root.getAsJsonObject("rooms").entrySet()) {
            final JsonObject roomObj = (JsonObject) room.getValue();
            for (Map.Entry<String, JsonElement> dev : roomObj.getAsJsonObject("devices").entrySet()) {
                final JsonObject devObj = (JsonObject) dev.getValue();
                final String key = devObj.get("key").getAsString();

                final JsonObject det = devObj.getAsJsonObject("details");
                final String adapterName = det.get("adapterName").getAsString();

                NeeoThingUID thingUID = null;
                try {
                    thingUID = new NeeoThingUID(adapterName);
                } catch (IllegalArgumentException e) {
                    logger.debug("Invalid UID (probably not an openhab thing): {} for key {}", adapterName,
                            key);
                }

                if (thingUID != null) {
                    final Set<String> newMap = ConcurrentHashMap.newKeySet();
                    final Set<String> uidKeys = uidToKey.putIfAbsent(thingUID, newMap);
                    (uidKeys == null ? newMap : uidKeys).add(key);
                }
            }
        }
    }
}

From source file:org.openhab.io.neeo.internal.NeeoDeviceKeys.java

/**
 * Adds the NEEO device key to the relationship with the UID
 *
 * @param uid the non-null uid//from  w  ww. j a v a2s . com
 * @param key the non-empty key
 */
public void put(NeeoThingUID uid, String key) {
    Objects.requireNonNull(uid, "uid cannot be null");
    NeeoUtil.requireNotEmpty(key, "key cannot be empty");

    final Set<String> newMap = ConcurrentHashMap.newKeySet();
    final Set<String> uidKeys = uidToKey.putIfAbsent(uid, newMap);
    (uidKeys == null ? newMap : uidKeys).add(key);
}