Example usage for java.util.concurrent.atomic AtomicInteger getAndIncrement

List of usage examples for java.util.concurrent.atomic AtomicInteger getAndIncrement

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger getAndIncrement.

Prototype

public final int getAndIncrement() 

Source Link

Document

Atomically increments the current value, with memory effects as specified by VarHandle#getAndAdd .

Usage

From source file:org.deeplearning4j.models.embeddings.loader.WordVectorSerializer.java

/**
 * This method loads full w2v model, previously saved with writeFullMethod call
 *
 * Deprecation note: Please, consider using readWord2VecModel() or loadStaticModel() method instead
 *
 * @param path - path to previously stored w2v json model
 * @return - Word2Vec instance//from  ww w  . j a v a  2s .  co m
 */
@Deprecated
public static Word2Vec loadFullModel(@NonNull String path) throws FileNotFoundException {
    /*
    // TODO: implementation is in process
    We need to restore:
             1. WeightLookupTable, including syn0 and syn1 matrices
             2. VocabCache + mark it as SPECIAL, to avoid accidental word removals
     */
    BasicLineIterator iterator = new BasicLineIterator(new File(path));

    // first 3 lines should be processed separately
    String confJson = iterator.nextSentence();
    log.info("Word2Vec conf. JSON: " + confJson);
    VectorsConfiguration configuration = VectorsConfiguration.fromJson(confJson);

    // actually we dont need expTable, since it produces exact results on subsequent runs untill you dont modify expTable size :)
    String eTable = iterator.nextSentence();
    double[] expTable;

    String nTable = iterator.nextSentence();
    if (configuration.getNegative() > 0) {
        // TODO: we probably should parse negTable, but it's not required until vocab changes are introduced. Since on the predefined vocab it will produce exact nTable, the same goes for expTable btw.
    }

    /*
        Since we're restoring vocab from previously serialized model, we can expect minWordFrequency appliance in its vocabulary, so it should NOT be truncated.
        That's why i'm setting minWordFrequency to configuration value, but applying SPECIAL to each word, to avoid truncation
     */
    VocabularyHolder holder = new VocabularyHolder.Builder()
            .minWordFrequency(configuration.getMinWordFrequency())
            .hugeModelExpected(configuration.isHugeModelExpected())
            .scavengerActivationThreshold(configuration.getScavengerActivationThreshold())
            .scavengerRetentionDelay(configuration.getScavengerRetentionDelay()).build();

    AtomicInteger counter = new AtomicInteger(0);
    AbstractCache<VocabWord> vocabCache = new AbstractCache.Builder<VocabWord>().build();
    while (iterator.hasNext()) {
        //    log.info("got line: " + iterator.nextSentence());
        String wordJson = iterator.nextSentence();
        VocabularyWord word = VocabularyWord.fromJson(wordJson);
        word.setSpecial(true);

        VocabWord vw = new VocabWord(word.getCount(), word.getWord());
        vw.setIndex(counter.getAndIncrement());

        vw.setIndex(word.getHuffmanNode().getIdx());
        vw.setCodeLength(word.getHuffmanNode().getLength());
        vw.setPoints(arrayToList(word.getHuffmanNode().getPoint(), word.getHuffmanNode().getLength()));
        vw.setCodes(arrayToList(word.getHuffmanNode().getCode(), word.getHuffmanNode().getLength()));

        vocabCache.addToken(vw);
        vocabCache.addWordToIndex(vw.getIndex(), vw.getLabel());
        vocabCache.putVocabWord(vw.getWord());
    }

    // at this moment vocab is restored, and it's time to rebuild Huffman tree
    // since word counters are equal, huffman tree will be equal too
    //holder.updateHuffmanCodes();

    // we definitely don't need UNK word in this scenarion

    //        holder.transferBackToVocabCache(vocabCache, false);

    // now, it's time to transfer syn0/syn1/syn1 neg values
    InMemoryLookupTable lookupTable = (InMemoryLookupTable) new InMemoryLookupTable.Builder()
            .negative(configuration.getNegative()).useAdaGrad(configuration.isUseAdaGrad())
            .lr(configuration.getLearningRate()).cache(vocabCache).vectorLength(configuration.getLayersSize())
            .build();

    // we create all arrays
    lookupTable.resetWeights(true);

    iterator.reset();

    // we should skip 3 lines from file
    iterator.nextSentence();
    iterator.nextSentence();
    iterator.nextSentence();

    // now, for each word from vocabHolder we'll just transfer actual values
    while (iterator.hasNext()) {
        String wordJson = iterator.nextSentence();
        VocabularyWord word = VocabularyWord.fromJson(wordJson);

        // syn0 transfer
        INDArray syn0 = lookupTable.getSyn0().getRow(vocabCache.indexOf(word.getWord()));
        syn0.assign(Nd4j.create(word.getSyn0()));

        // syn1 transfer
        // syn1 values are being accessed via tree points, but since our goal is just deserialization - we can just push it row by row
        INDArray syn1 = lookupTable.getSyn1().getRow(vocabCache.indexOf(word.getWord()));
        syn1.assign(Nd4j.create(word.getSyn1()));

        // syn1Neg transfer
        if (configuration.getNegative() > 0) {
            INDArray syn1Neg = lookupTable.getSyn1Neg().getRow(vocabCache.indexOf(word.getWord()));
            syn1Neg.assign(Nd4j.create(word.getSyn1Neg()));
        }
    }

    Word2Vec vec = new Word2Vec.Builder(configuration).vocabCache(vocabCache).lookupTable(lookupTable)
            .resetModel(false).build();

    vec.setModelUtils(new BasicModelUtils());

    return vec;
}

From source file:org.deeplearning4j.models.embeddings.loader.WordVectorSerializer.java

/**
 * This method//from   ww w .  j  a v a  2  s  .c o m
 * 1) Binary model, either compressed or not. Like well-known Google Model
 * 2) Popular CSV word2vec text format
 * 3) DL4j compressed format
 *
 * Please note: if extended data isn't available, only weights will be loaded instead.
 *
 * @param file
 * @param extendedModel if TRUE, we'll try to load HS states & Huffman tree info, if FALSE, only weights will be loaded
 * @return
 */
public static Word2Vec readWord2VecModel(@NonNull File file, boolean extendedModel) {
    InMemoryLookupTable<VocabWord> lookupTable = new InMemoryLookupTable<>();
    AbstractCache<VocabWord> vocabCache = new AbstractCache<>();
    Word2Vec vec;
    INDArray syn0 = null;
    VectorsConfiguration configuration = new VectorsConfiguration();

    if (!file.exists() || !file.isFile())
        throw new ND4JIllegalStateException("File [" + file.getAbsolutePath() + "] doesn't exist");

    int originalFreq = Nd4j.getMemoryManager().getOccasionalGcFrequency();
    boolean originalPeriodic = Nd4j.getMemoryManager().isPeriodicGcActive();

    if (originalPeriodic)
        Nd4j.getMemoryManager().togglePeriodicGc(false);

    Nd4j.getMemoryManager().setOccasionalGcFrequency(50000);

    // try to load zip format
    try {
        if (extendedModel) {
            log.debug("Trying full model restoration...");
            // this method just loads full compressed model

            if (originalPeriodic)
                Nd4j.getMemoryManager().togglePeriodicGc(true);

            Nd4j.getMemoryManager().setOccasionalGcFrequency(originalFreq);

            return readWord2Vec(file);
        } else {
            log.debug("Trying simplified model restoration...");

            File tmpFileSyn0 = File.createTempFile("word2vec", "syn");
            File tmpFileConfig = File.createTempFile("word2vec", "config");
            // we don't need full model, so we go directly to syn0 file

            ZipFile zipFile = new ZipFile(file);
            ZipEntry syn = zipFile.getEntry("syn0.txt");
            InputStream stream = zipFile.getInputStream(syn);

            Files.copy(stream, Paths.get(tmpFileSyn0.getAbsolutePath()), StandardCopyOption.REPLACE_EXISTING);

            // now we're restoring configuration saved earlier
            ZipEntry config = zipFile.getEntry("config.json");
            if (config != null) {
                stream = zipFile.getInputStream(config);

                StringBuilder builder = new StringBuilder();
                try (BufferedReader reader = new BufferedReader(new InputStreamReader(stream))) {
                    String line;
                    while ((line = reader.readLine()) != null) {
                        builder.append(line);
                    }
                }

                configuration = VectorsConfiguration.fromJson(builder.toString().trim());
            }

            ZipEntry ve = zipFile.getEntry("frequencies.txt");
            if (ve != null) {
                stream = zipFile.getInputStream(ve);
                AtomicInteger cnt = new AtomicInteger(0);
                try (BufferedReader reader = new BufferedReader(new InputStreamReader(stream))) {
                    String line;
                    while ((line = reader.readLine()) != null) {
                        String[] split = line.split(" ");
                        VocabWord word = new VocabWord(Double.valueOf(split[1]), decodeB64(split[0]));
                        word.setIndex(cnt.getAndIncrement());
                        word.incrementSequencesCount(Long.valueOf(split[2]));

                        vocabCache.addToken(word);
                        vocabCache.addWordToIndex(word.getIndex(), word.getLabel());

                        Nd4j.getMemoryManager().invokeGcOccasionally();
                    }
                }
            }

            List<INDArray> rows = new ArrayList<>();
            // basically read up everything, call vstacl and then return model
            try (Reader reader = new CSVReader(tmpFileSyn0)) {
                AtomicInteger cnt = new AtomicInteger(0);
                while (reader.hasNext()) {
                    Pair<VocabWord, float[]> pair = reader.next();
                    VocabWord word = pair.getFirst();
                    INDArray vector = Nd4j.create(pair.getSecond());

                    if (ve != null) {
                        if (syn0 == null)
                            syn0 = Nd4j.create(vocabCache.numWords(), vector.length());

                        syn0.getRow(cnt.getAndIncrement()).assign(vector);
                    } else {
                        rows.add(vector);

                        vocabCache.addToken(word);
                        vocabCache.addWordToIndex(word.getIndex(), word.getLabel());
                    }

                    Nd4j.getMemoryManager().invokeGcOccasionally();
                }
            } catch (Exception e) {
                throw new RuntimeException(e);
            } finally {
                if (originalPeriodic)
                    Nd4j.getMemoryManager().togglePeriodicGc(true);

                Nd4j.getMemoryManager().setOccasionalGcFrequency(originalFreq);
            }

            if (syn0 == null && vocabCache.numWords() > 0)
                syn0 = Nd4j.vstack(rows);

            if (syn0 == null) {
                log.error("Can't build syn0 table");
                throw new DL4JInvalidInputException("Can't build syn0 table");
            }

            lookupTable = new InMemoryLookupTable.Builder<VocabWord>().cache(vocabCache)
                    .vectorLength(syn0.columns()).useHierarchicSoftmax(false).useAdaGrad(false).build();

            lookupTable.setSyn0(syn0);

            try {
                tmpFileSyn0.delete();
                tmpFileConfig.delete();
            } catch (Exception e) {
                //
            }
        }
    } catch (Exception e) {
        // let's try to load this file as csv file
        try {
            log.debug("Trying CSV model restoration...");

            Pair<InMemoryLookupTable, VocabCache> pair = loadTxt(file);
            lookupTable = pair.getFirst();
            vocabCache = (AbstractCache<VocabWord>) pair.getSecond();
        } catch (Exception ex) {
            // we fallback to trying binary model instead
            try {
                log.debug("Trying binary model restoration...");

                if (originalPeriodic)
                    Nd4j.getMemoryManager().togglePeriodicGc(true);

                Nd4j.getMemoryManager().setOccasionalGcFrequency(originalFreq);

                vec = loadGoogleModel(file, true, true);
                return vec;
            } catch (Exception ey) {
                // try to load without linebreaks
                try {
                    if (originalPeriodic)
                        Nd4j.getMemoryManager().togglePeriodicGc(true);

                    Nd4j.getMemoryManager().setOccasionalGcFrequency(originalFreq);

                    vec = loadGoogleModel(file, true, false);
                    return vec;
                } catch (Exception ez) {
                    throw new RuntimeException(
                            "Unable to guess input file format. Please use corresponding loader directly");
                }
            }
        }
    }

    Word2Vec.Builder builder = new Word2Vec.Builder(configuration).lookupTable(lookupTable).useAdaGrad(false)
            .vocabCache(vocabCache).layerSize(lookupTable.layerSize())

            // we don't use hs here, because model is incomplete
            .useHierarchicSoftmax(false).resetModel(false);

    /*
    Trying to restore TokenizerFactory & TokenPreProcessor
     */

    TokenizerFactory factory = getTokenizerFactory(configuration);
    if (factory != null)
        builder.tokenizerFactory(factory);

    vec = builder.build();

    return vec;
}

From source file:com.spectralogic.ds3client.integration.GetJobManagement_Test.java

@Test
public void testGetJobWithUserSuppliedBlobStrategy() throws IOException, InterruptedException {
    final String tempPathPrefix = null;
    final Path tempDirectory = Files.createTempDirectory(Paths.get("."), tempPathPrefix);
    final String fileName = "beowulf.txt";

    try {/*from w w w  . ja v a  2s.c o m*/
        final List<Ds3Object> objects = Lists.newArrayList(new Ds3Object(fileName));

        final GetBulkJobSpectraS3Request getBulkJobSpectraS3Request = new GetBulkJobSpectraS3Request(
                BUCKET_NAME, objects);

        final GetBulkJobSpectraS3Response getBulkJobSpectraS3Response = client
                .getBulkJobSpectraS3(getBulkJobSpectraS3Request);

        final MasterObjectList masterObjectList = getBulkJobSpectraS3Response.getMasterObjectList();

        final EventDispatcher eventDispatcher = new EventDispatcherImpl(new SameThreadEventRunner());

        final AtomicInteger numChunkAllocationAttempts = new AtomicInteger(0);

        final TransferStrategyBuilder transferStrategyBuilder = new TransferStrategyBuilder()
                .withDs3Client(client).withMasterObjectList(masterObjectList)
                .withChannelBuilder(new FileObjectGetter(tempDirectory))
                .withRangesForBlobs(PartialObjectHelpers.mapRangesToBlob(masterObjectList.getObjects(),
                        PartialObjectHelpers.getPartialObjectsRanges(objects)))
                .withBlobStrategy(new UserSuppliedPutBlobStrategy(client, masterObjectList, eventDispatcher,
                        new MaxChunkAttemptsRetryBehavior(5),
                        new ClientDefinedChunkAttemptRetryDelayBehavior(1, eventDispatcher), new Monitorable() {
                            @Override
                            public void monitor() {
                                numChunkAllocationAttempts.getAndIncrement();
                            }
                        }));

        final TransferStrategy transferStrategy = transferStrategyBuilder.makeGetTransferStrategy();

        transferStrategy.transfer();

        final Collection<File> filesInTempDirectory = FileUtils.listFiles(tempDirectory.toFile(), null, false);

        for (final File file : filesInTempDirectory) {
            assertEquals(fileName, file.getName());
        }

        assertEquals(1, numChunkAllocationAttempts.get());
    } finally {
        FileUtils.deleteDirectory(tempDirectory.toFile());
    }
}

From source file:org.mule.test.integration.routing.MessageChunkingTestCase.java

@Test
public void testMessageChunkingObject() throws Exception {
    final AtomicInteger messagePartsCount = new AtomicInteger(0);
    final Latch chunkingReceiverLatch = new Latch();
    final SimpleSerializableObject simpleSerializableObject = new SimpleSerializableObject("Test String", true,
            99);//w w  w .java2  s  . c  o m

    // find number of chunks
    final int parts = (int) Math
            .ceil((SerializationUtils.serialize(simpleSerializableObject).length / (double) 2));

    // Listen to events fired by the ChunkingReceiver service
    muleContext.registerListener(new FunctionalTestNotificationListener() {
        @Override
        public void onNotification(ServerNotification notification) {
            // Not strictly necessary to test for this as when we register the
            // listener we supply the ComponentName as the subscription filter
            assertEquals("ChunkingObjectReceiver", notification.getResourceIdentifier());
            // Test that we have received all chunks in the correct order
            Object reply = ((FunctionalTestNotification) notification).getEventContext().getMessage()
                    .getPayload();
            // Check if Object is of Correct Type
            assertTrue(reply instanceof SimpleSerializableObject);
            SimpleSerializableObject replySimpleSerializableObject = (SimpleSerializableObject) reply;
            // Check that Contents are Identical
            assertEquals(simpleSerializableObject.b, replySimpleSerializableObject.b);
            assertEquals(simpleSerializableObject.i, replySimpleSerializableObject.i);
            assertEquals(simpleSerializableObject.s, replySimpleSerializableObject.s);
            chunkingReceiverLatch.countDown();
        }
    }, "ChunkingObjectReceiver");

    // Listen to Message Notifications on the Chunking receiver so we can
    // determine how many message parts have been received
    muleContext.registerListener(new EndpointMessageNotificationListener<EndpointMessageNotification>() {
        @Override
        public void onNotification(EndpointMessageNotification notification) {
            if (notification.getAction() == EndpointMessageNotification.MESSAGE_RECEIVED) {
                messagePartsCount.getAndIncrement();
            }
            assertEquals("ChunkingObjectReceiver", notification.getResourceIdentifier());
        }
    }, "ChunkingObjectReceiver");

    MuleClient client = new MuleClient(muleContext);
    client.dispatch("vm://inbound.object.channel", simpleSerializableObject, null);
    // Wait for the message to be received and tested (in the listener above)
    assertTrue(chunkingReceiverLatch.await(20L, TimeUnit.SECONDS));
    // Ensure we processed expected number of message parts
    assertEquals(parts, messagePartsCount.get());
}

From source file:com.ezdi.rtf.testRTFParser.RTFObjDataParser.java

private byte[] handleEmbeddedPOIFS(InputStream is, Metadata metadata, AtomicInteger unknownFilenameCount)
        throws IOException {

    byte[] ret = null;
    try (NPOIFSFileSystem fs = new NPOIFSFileSystem(is)) {

        DirectoryNode root = fs.getRoot();

        if (root == null) {
            return ret;
        }/*  w w w . j  a v a 2  s.  com*/

        if (root.hasEntry("Package")) {
            Entry ooxml = root.getEntry("Package");
            TikaInputStream stream = TikaInputStream.get(new DocumentInputStream((DocumentEntry) ooxml));

            ByteArrayOutputStream out = new ByteArrayOutputStream();

            IOUtils.copy(stream, out);
            ret = out.toByteArray();
        } else {
            // try poifs
            POIFSDocumentType type = POIFSDocumentType.detectType(root);
            if (type == POIFSDocumentType.OLE10_NATIVE) {
                try {
                    // Try to un-wrap the OLE10Native record:
                    Ole10Native ole = Ole10Native.createFromEmbeddedOleObject(root);
                    ret = ole.getDataBuffer();
                } catch (Ole10NativeException ex) {
                    // Not a valid OLE10Native record, skip it
                }
            } else if (type == POIFSDocumentType.COMP_OBJ) {

                DocumentEntry contentsEntry;
                try {
                    contentsEntry = (DocumentEntry) root.getEntry("CONTENTS");
                } catch (FileNotFoundException ioe) {
                    contentsEntry = (DocumentEntry) root.getEntry("Contents");
                }

                try (DocumentInputStream inp = new DocumentInputStream(contentsEntry)) {
                    ret = new byte[contentsEntry.getSize()];
                    inp.readFully(ret);
                }
            } else {

                ByteArrayOutputStream out = new ByteArrayOutputStream();
                is.reset();
                IOUtils.copy(is, out);
                ret = out.toByteArray();
                metadata.set(Metadata.RESOURCE_NAME_KEY,
                        "file_" + unknownFilenameCount.getAndIncrement() + "." + type.getExtension());
                metadata.set(Metadata.CONTENT_TYPE, type.getType().toString());
            }
        }
    }
    return ret;
}

From source file:uk.ac.ebi.ep.base.search.EnzymeFinder.java

/**
 * Builds filters - species, compounds, diseases - from a result list.
 *
 * @param searchResults the result list, which will be modified by setting
 * the relevant filters./*from w  w  w  . j  ava2s. c  o  m*/
 */
private void buildFilters(SearchResults searchResults) {
    //  String[] commonSpecie = {"HUMAN", "MOUSE", "RAT", "Fruit fly", "WORM", "Yeast", "ECOLI"};
    // CommonSpecies [] commonSpecie = {"Homo sapiens","Mus musculus","Rattus norvegicus", "Drosophila melanogaster","Saccharomyces cerevisiae"};
    // List<String> commonSpecieList = Arrays.asList(commonSpecie);
    List<String> commonSpecieList = new ArrayList<>();
    for (CommonSpecies commonSpecies : CommonSpecies.values()) {
        commonSpecieList.add(commonSpecies.getScientificName());
    }

    Map<Integer, Species> priorityMapper = new TreeMap<>();

    AtomicInteger key = new AtomicInteger(50);
    AtomicInteger customKey = new AtomicInteger(6);

    for (Species sp : uniqueSpecies) {

        if (commonSpecieList.contains(sp.getScientificname().split("\\(")[0].trim())) {
            // HUMAN, MOUSE, RAT, Fly, WORM, Yeast, ECOLI 
            // "Homo sapiens","Mus musculus","Rattus norvegicus", "Drosophila melanogaster","WORM","Saccharomyces cerevisiae","ECOLI"
            if (sp.getScientificname().equalsIgnoreCase(CommonSpecies.HUMAN.getScientificName())) {
                priorityMapper.put(1, sp);
            } else if (sp.getScientificname().equalsIgnoreCase(CommonSpecies.MOUSE.getScientificName())) {
                priorityMapper.put(2, sp);
            } else if (sp.getScientificname().equalsIgnoreCase(CommonSpecies.RAT.getScientificName())) {
                priorityMapper.put(3, sp);
            } else if (sp.getScientificname().equalsIgnoreCase(CommonSpecies.FRUIT_FLY.getScientificName())) {
                priorityMapper.put(4, sp);
            } else if (sp.getScientificname().equalsIgnoreCase(CommonSpecies.WORM.getScientificName())) {
                priorityMapper.put(5, sp);
            } else if (sp.getScientificname().equalsIgnoreCase(CommonSpecies.ECOLI.getScientificName())) {
                priorityMapper.put(6, sp);
            } else if (sp.getScientificname().split("\\(")[0].trim()
                    .equalsIgnoreCase(CommonSpecies.BAKER_YEAST.getScientificName())) {
                priorityMapper.put(customKey.getAndIncrement(), sp);

            }
        } else {

            priorityMapper.put(key.getAndIncrement(), sp);

        }
    }

    List<Species> speciesFilters = new LinkedList<>();
    priorityMapper.entrySet().stream().forEach((map) -> {
        speciesFilters.add(map.getValue());
    });

    SearchFilters filters = new SearchFilters();
    filters.setSpecies(speciesFilters);
    filters.setCompounds(compoundFilters.stream().distinct().collect(Collectors.toList()));

    filters.setDiseases(diseaseFilters.stream().distinct().collect(Collectors.toList()));
    filters.setEcNumbers(ecNumberFilters.stream().distinct().collect(Collectors.toList()));
    searchResults.setSearchfilters(filters);
}

From source file:org.apache.tika.parser.rtf.RTFObjDataParser.java

private byte[] handleEmbeddedPOIFS(InputStream is, Metadata metadata, AtomicInteger unknownFilenameCount)
        throws IOException {

    byte[] ret = null;
    try (NPOIFSFileSystem fs = new NPOIFSFileSystem(is)) {

        DirectoryNode root = fs.getRoot();

        if (root == null) {
            return ret;
        }//from www  . j av  a  2s  .  c om

        if (root.hasEntry("Package")) {
            Entry ooxml = root.getEntry("Package");
            TikaInputStream stream = TikaInputStream.get(new DocumentInputStream((DocumentEntry) ooxml));

            ByteArrayOutputStream out = new ByteArrayOutputStream();

            IOUtils.copy(stream, out);
            ret = out.toByteArray();
        } else {
            //try poifs
            POIFSDocumentType type = POIFSDocumentType.detectType(root);
            if (type == POIFSDocumentType.OLE10_NATIVE) {
                try {
                    // Try to un-wrap the OLE10Native record:
                    Ole10Native ole = Ole10Native.createFromEmbeddedOleObject(root);
                    ret = ole.getDataBuffer();
                } catch (Ole10NativeException ex) {
                    // Not a valid OLE10Native record, skip it
                }
            } else if (type == POIFSDocumentType.COMP_OBJ) {

                DocumentEntry contentsEntry;
                try {
                    contentsEntry = (DocumentEntry) root.getEntry("CONTENTS");
                } catch (FileNotFoundException ioe) {
                    contentsEntry = (DocumentEntry) root.getEntry("Contents");
                }

                try (DocumentInputStream inp = new DocumentInputStream(contentsEntry)) {
                    ret = new byte[contentsEntry.getSize()];
                    inp.readFully(ret);
                }
            } else {

                ByteArrayOutputStream out = new ByteArrayOutputStream();
                is.reset();
                IOUtils.copy(is, out);
                ret = out.toByteArray();
                metadata.set(Metadata.RESOURCE_NAME_KEY,
                        "file_" + unknownFilenameCount.getAndIncrement() + "." + type.getExtension());
                metadata.set(Metadata.CONTENT_TYPE, type.getType().toString());
            }
        }
    }
    return ret;
}

From source file:org.apache.synapse.transport.nhttp.ClientHandler.java

/**
 * Record a connection in the active connection records.
 *
 * @param conn connection to be recorded.
 *///from  w w  w . j  a v  a  2s  .  c o m
private void recordConnection(NHttpClientConnection conn) {
    if (conn instanceof HttpInetConnection) {
        HttpInetConnection inetConnection = (HttpInetConnection) conn;
        // first we try to get the connection with host_addrss:port key
        AtomicInteger connections = openConnections
                .get(inetConnection.getRemoteAddress().getHostName() + ":" + inetConnection.getRemotePort());
        // if we fail try to get the connection with ip_address:port key
        if (connections == null) {
            connections = openConnections.get(
                    inetConnection.getRemoteAddress().getHostAddress() + ":" + inetConnection.getRemotePort());
        }

        lock.lock();
        try {
            if (connections == null) {
                connections = new AtomicInteger();
                if (inetConnection.getRemoteAddress().getHostName() != null) {
                    openConnections.put(inetConnection.getRemoteAddress().getHostName() + ":"
                            + inetConnection.getRemotePort(), connections);
                } else {
                    openConnections.put(inetConnection.getRemoteAddress().getHostAddress() + ":"
                            + inetConnection.getRemotePort(), connections);
                }
            }
        } finally {
            lock.unlock();
        }
        connections.getAndIncrement();
    }
}

From source file:com.ethlo.geodata.importer.file.FileIpLookupImporter.java

@Override
public long importData() throws IOException {
    final Map.Entry<Date, File> ipDataFile = super.fetchResource(DataType.IP, url);
    final AtomicInteger count = new AtomicInteger(0);

    final File csvFile = ipDataFile.getValue();
    final long total = IoUtils.lineCount(csvFile);
    final ProgressListener prg = new ProgressListener(
            l -> publish(new DataLoadedEvent(this, DataType.IP, Operation.IMPORT, l, total)));

    final IpLookupImporter ipLookupImporter = new IpLookupImporter(csvFile);

    final JsonFactory f = new JsonFactory();
    f.enable(JsonGenerator.Feature.ESCAPE_NON_ASCII);
    f.disable(JsonGenerator.Feature.AUTO_CLOSE_TARGET);
    final ObjectMapper mapper = new ObjectMapper(f);

    final byte newLine = (byte) "\n".charAt(0);

    logger.info("Writing IP data to file {}", getFile().getAbsolutePath());
    try (final OutputStream out = new BufferedOutputStream(new FileOutputStream(getFile()))) {
        ipLookupImporter.processFile(entry -> {
            final String strGeoNameId = findMapValue(entry, "geoname_id", "represented_country_geoname_id",
                    "registered_country_geoname_id");
            final String strGeoNameCountryId = findMapValue(entry, "represented_country_geoname_id",
                    "registered_country_geoname_id");
            final Long geonameId = strGeoNameId != null ? Long.parseLong(strGeoNameId) : null;
            final Long geonameCountryId = strGeoNameCountryId != null ? Long.parseLong(strGeoNameCountryId)
                    : null;//from w  w w. j  av a  2 s.c  o  m
            if (geonameId != null) {
                final SubnetUtils u = new SubnetUtils(entry.get("network"));
                final long lower = UnsignedInteger
                        .fromIntBits(InetAddresses
                                .coerceToInteger(InetAddresses.forString(u.getInfo().getLowAddress())))
                        .longValue();
                final long upper = UnsignedInteger
                        .fromIntBits(InetAddresses
                                .coerceToInteger(InetAddresses.forString(u.getInfo().getHighAddress())))
                        .longValue();
                final Map<String, Object> paramMap = new HashMap<>(5);
                paramMap.put("geoname_id", geonameId);
                paramMap.put("geoname_country_id", geonameCountryId);
                paramMap.put("first", lower);
                paramMap.put("last", upper);

                try {
                    mapper.writeValue(out, paramMap);
                    out.write(newLine);
                } catch (IOException exc) {
                    throw new DataAccessResourceFailureException(exc.getMessage(), exc);
                }
            }

            if (count.get() % 100_000 == 0) {
                logger.info("Processed {}", count.get());
            }

            count.getAndIncrement();

            prg.update();
        });
    }

    return total;
}

From source file:net.sf.jasperreports.engine.fill.JRFillCrosstab.java

protected int getChunkIndex() {
    JRFillContext fillerContext = filler.getFillContext();
    AtomicInteger counter = (AtomicInteger) fillerContext.getFillCache(FILL_CACHE_KEY_CROSSTAB_CHUNK_COUNTER);
    if (counter == null) {
        // we just need a mutable integer, there's no actual concurrency here
        counter = new AtomicInteger();
        fillerContext.setFillCache(FILL_CACHE_KEY_CROSSTAB_CHUNK_COUNTER, counter);
    }/*from   www  . j av a 2s .c o  m*/

    int chunkIndex = counter.getAndIncrement();
    return chunkIndex;
}