Example usage for java.util.concurrent.atomic AtomicInteger getAndIncrement

List of usage examples for java.util.concurrent.atomic AtomicInteger getAndIncrement

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger getAndIncrement.

Prototype

public final int getAndIncrement() 

Source Link

Document

Atomically increments the current value, with memory effects as specified by VarHandle#getAndAdd .

Usage

From source file:org.jasig.ssp.util.importer.job.staging.PostgresStagingTableWriter.java

@Override
public void write(final List<? extends RawItem> items) {

    NamedParameterJdbcTemplate jdbcTemplate = new NamedParameterJdbcTemplate(dataSource);
    String fileName = items.get(0).getResource().getFilename();
    final String[] tableName = fileName.split("\\.");

    Integer batchStart = (Integer) (stepExecution.getExecutionContext().get("batchStart") == null ? null
            : stepExecution.getExecutionContext().get("batchStart"));
    Integer batchStop = (Integer) (stepExecution.getExecutionContext().get("batchStop") == null ? null
            : stepExecution.getExecutionContext().get("batchStop"));
    Object currentEntity = stepExecution.getExecutionContext().get("currentEntity");

    if (currentEntity == null || !currentEntity.equals(tableName[0])) {
        batchStart = 0;/*from   w ww . j  a  v a  2s  .c o m*/
        batchStop = items.size() - 1;
        currentEntity = tableName[0];
        stepExecution.getExecutionContext().put("currentEntity", currentEntity);
        stepExecution.getExecutionContext().put("batchStart", batchStart);
        stepExecution.getExecutionContext().put("batchStop", batchStop);
    } else {
        batchStart = batchStop + 1;
        batchStop = (Integer) batchStart + items.size() - 1;
        stepExecution.getExecutionContext().put("batchStart", batchStart);
        stepExecution.getExecutionContext().put("batchStop", batchStop);
    }

    RawItem firstItem = items.get(0);
    Resource firstItemResource = firstItem.getResource();

    if (currentResource == null || !(this.currentResource.equals(firstItemResource))) {
        this.orderedHeaders = writeHeader(firstItem);
        this.currentResource = firstItemResource;
    }

    StringBuilder insertSql = new StringBuilder();
    insertSql.append("INSERT INTO stg_" + tableName[0] + " (batch_id,");
    StringBuilder valuesSqlBuilder = new StringBuilder();
    valuesSqlBuilder.append(" VALUES (?,");
    for (String header : this.orderedHeaders) {
        insertSql.append(header).append(",");
        valuesSqlBuilder.append("?").append(",");
    }
    insertSql.setLength(insertSql.length() - 1); // trim comma
    valuesSqlBuilder.setLength(valuesSqlBuilder.length() - 1); // trim comma
    insertSql.append(")");
    valuesSqlBuilder.append(");");
    insertSql.append(valuesSqlBuilder);

    final AtomicInteger batchStartRef = new AtomicInteger(batchStart);
    final String sql = insertSql.toString();

    jdbcTemplate.getJdbcOperations().execute(sql, new PreparedStatementCallback() {
        @Override
        public Object doInPreparedStatement(PreparedStatement ps) throws SQLException, DataAccessException {
            for (RawItem item : items) {
                final List<Object> paramsForLog = new ArrayList(orderedHeaders.length);
                int counter = 1;
                paramsForLog.add(batchStartRef.get());
                StatementCreatorUtils.setParameterValue(ps, counter, SqlTypeValue.TYPE_UNKNOWN,
                        batchStartRef.getAndIncrement());
                counter++;
                for (String header : orderedHeaders) {
                    final Map<String, String> record = item.getRecord();
                    String value = record.get(header);
                    final Integer sqlType = metadataRepository.getRepository().getColumnMetadataRepository()
                            .getColumnMetadata(new ColumnReference(tableName[0], header)).getJavaSqlType();
                    paramsForLog.add(value);
                    StatementCreatorUtils.setParameterValue(ps, counter, sqlType, value);
                    counter++;
                }
                sayQuery(sql, paramsForLog);
                ps.addBatch();
            }
            return ps.executeBatch();
        }
    });

    batchStart = batchStartRef.get();

    say("******CHUNK POSTGRES******");
}

From source file:org.fcrepo.camel.processor.LdnProcessor.java

/**
 * Process the Fedora message//  www  .  j av  a 2  s .  c  om
 *
 * @param exchange the current camel message exchange
 */
public void process(final Exchange exchange) throws IOException, NoSuchHeaderException {
    final Message in = exchange.getIn();
    final Model model = createDefaultModel();
    final Model newModel = createDefaultModel();
    final Resource resource = createResource(getSubjectUri(exchange));
    final Resource event = createResource("");
    final AtomicInteger counter = new AtomicInteger();
    final ByteArrayOutputStream serializedGraph = new ByteArrayOutputStream();

    read(model, in.getBody(InputStream.class),
            contentTypeToLang(parse(in.getHeader(CONTENT_TYPE, String.class)).getMimeType()));

    newModel.add(createStatement(event, used, resource));
    model.listObjectsOfProperty(resource, wasGeneratedBy).forEachRemaining(obj -> {
        if (obj.isResource()) {
            obj.asResource().listProperties().forEachRemaining(stmt -> {
                newModel.add(createStatement(event, stmt.getPredicate(), stmt.getObject()));
            });
        }
    });
    model.listObjectsOfProperty(resource, wasAttributedTo).forEachRemaining(obj -> {
        final Resource agent = createResource("#agent" + Integer.toString(counter.getAndIncrement()));
        if (obj.isResource()) {
            obj.asResource().listProperties().forEachRemaining(stmt -> {
                newModel.add(createStatement(agent, stmt.getPredicate(), stmt.getObject()));
            });
        }
        newModel.add(createStatement(event, wasAssociatedWith, agent));
    });

    write(serializedGraph, newModel, JSONLD);
    in.setBody(serializedGraph.toString("UTF-8"));
    in.setHeader(HTTP_METHOD, "POST");
    in.setHeader(CONTENT_TYPE, "application/ld+json");
}

From source file:com.gs.obevo.db.apps.reveng.AbstractDdlReveng.java

private void revengMain(AquaRevengArgs args) {
    String schema = args.getDbSchema();
    File file = args.getInputPath();
    boolean generateBaseline = args.isGenerateBaseline();
    File outputDir = args.getOutputPath();

    MutableList<ChangeEntry> changeEntries = Lists.mutable.empty();

    final MutableList<String> dataLines;
    if (file.isFile()) {
        dataLines = FileUtilsCobra.readLines(file);
    } else {/*from ww  w. j a  v a2s.c  o  m*/
        dataLines = ArrayAdapter.adapt(file.listFiles()).select(new Predicate<File>() {
            @Override
            public boolean accept(File file) {
                return file.isFile();
            }
        }).flatCollect(new Function<File, Iterable<String>>() {
            @Override
            public Iterable<String> valueOf(File file) {
                return FileUtilsCobra.readLines(file);
            }
        });
    }

    dataLines.forEachWithIndex(new ObjectIntProcedure<String>() {
        @Override
        public void value(String line, int i) {
            if (line.startsWith("--------------------") && dataLines.get(i + 1).startsWith("-- DDL Statements")
                    && dataLines.get(i + 2).startsWith("--------------------")) {
                dataLines.set(i, "");
                dataLines.set(i + 1, "");
                dataLines.set(i + 2, "");
            } else if (line.startsWith("--------------------")
                    && dataLines.get(i + 2).startsWith("-- DDL Statements")
                    && dataLines.get(i + 4).startsWith("--------------------")) {
                dataLines.set(i, "");
                dataLines.set(i + 1, "");
                dataLines.set(i + 2, "");
                dataLines.set(i + 3, "");
                dataLines.set(i + 4, "");
            } else if (line.startsWith("-- DDL Statements for ")) {
                dataLines.set(i, "");
            }
        }
    });

    String data = dataLines.makeString(SystemUtils.LINE_SEPARATOR);

    MutableList<String> entries = stringSplitter.valueOf(data);

    String candidateObject = "UNKNOWN";
    ChangeType candidateObjectType = UnclassifiedChangeType.INSTANCE;

    int selfOrder = 0;
    int objectOrder = 0;

    // Find object names
    MutableSet<String> objectNames = Sets.mutable.empty();
    for (String candidateLine : entries) {
        candidateLine = StringUtils.stripStart(candidateLine, "\r\n \t");

        if (StringUtils.isNotBlank(candidateLine) && Predicates.noneOf(skipPredicates).accept(candidateLine)) {
            candidateLine = candidateLine.replaceAll(schema + "\\.dbo\\.", ""); // sybase ASE
            candidateLine = candidateLine.replaceAll("'dbo\\.", "'"); // sybase ASE
            candidateLine = candidateLine.replaceAll("\"" + schema + "\\s*\"\\.", ""); // DB2
            candidateLine = candidateLine.replaceAll(schema + "\\.", ""); // alternate DB2 for views
            candidateLine = removeQuotesFromProcxmode(candidateLine); // sybase ASE

            RevengPattern chosenRevengPattern = null;
            String secondaryName = null;
            for (RevengPattern revengPattern : revengPatterns) {
                RevengPatternOutput patternMatch = revengPattern.evaluate(candidateLine);
                if (patternMatch != null) {
                    System.out.println("OBJECT NAME " + patternMatch.getPrimaryName());
                    objectNames.add(patternMatch.getPrimaryName());
                    chosenRevengPattern = revengPattern;
                    candidateObject = patternMatch.getPrimaryName();
                    if (patternMatch.getSecondaryName() != null) {
                        secondaryName = patternMatch.getSecondaryName();
                    }
                    candidateObjectType = platform.getChangeType(revengPattern.getChangeType());
                    objectOrder = 0;
                    break;
                }
            }
        }
    }

    MutableMap<String, AtomicInteger> countByObject = Maps.mutable.empty();

    for (String candidateLine : entries) {
        try {

            candidateLine = StringUtils.stripStart(candidateLine, "\r\n \t");

            if (StringUtils.isNotBlank(candidateLine)
                    && Predicates.noneOf(skipPredicates).accept(candidateLine)) {
                for (String objectName : objectNames) {
                    candidateLine = candidateLine.replaceAll(schema + "\\s*\\." + objectName, objectName); // sybase ASE
                    candidateLine = candidateLine.replaceAll(
                            schema.toLowerCase() + "\\s*\\." + objectName.toLowerCase(),
                            objectName.toLowerCase()); // sybase ASE
                }
                candidateLine = candidateLine.replaceAll(schema + "\\.dbo\\.", ""); // sybase ASE
                candidateLine = candidateLine.replaceAll("'dbo\\.", "'"); // sybase ASE
                candidateLine = candidateLine.replaceAll("\"" + schema + "\\s*\"\\.", ""); // DB2
                candidateLine = candidateLine.replaceAll(schema + "\\.", ""); // alternate DB2 for views
                candidateLine = removeQuotesFromProcxmode(candidateLine); // sybase ASE

                RevengPattern chosenRevengPattern = null;
                String secondaryName = null;
                for (RevengPattern revengPattern : revengPatterns) {
                    RevengPatternOutput patternMatch = revengPattern.evaluate(candidateLine);
                    if (patternMatch != null) {
                        chosenRevengPattern = revengPattern;
                        candidateObject = patternMatch.getPrimaryName();
                        if (patternMatch.getSecondaryName() != null) {
                            secondaryName = patternMatch.getSecondaryName();
                        }
                        candidateObjectType = platform.getChangeType(revengPattern.getChangeType());
                        objectOrder = 0;
                        break;
                    }
                }

                AtomicInteger objectOrder2 = countByObject.getIfAbsentPut(candidateObject,
                        new Function0<AtomicInteger>() {
                            @Override
                            public AtomicInteger value() {
                                return new AtomicInteger(0);
                            }
                        });

                if (secondaryName == null) {
                    secondaryName = "change" + objectOrder2.getAndIncrement();
                }
                RevEngDestination destination = new RevEngDestination(schema, candidateObjectType,
                        candidateObject, false);

                String annotation = chosenRevengPattern != null ? chosenRevengPattern.getAnnotation() : null;
                MutableList<Function<String, LineParseOutput>> postProcessSqls = chosenRevengPattern != null
                        ? chosenRevengPattern.getPostProcessSqls()
                        : Lists.mutable.<Function<String, LineParseOutput>>empty();

                for (Function<String, LineParseOutput> postProcessSql : postProcessSqls) {
                    LineParseOutput lineParseOutput = postProcessSql.valueOf(candidateLine);
                    candidateLine = lineParseOutput.getLineOutput();
                }

                ChangeEntry change = new ChangeEntry(destination, candidateLine + "\nGO", secondaryName,
                        annotation, selfOrder++);

                postProcessChange.value(change, candidateLine);

                changeEntries.add(change);
            }
        } catch (RuntimeException e) {
            throw new RuntimeException("Failed parsing on statement " + candidateLine, e);
        }
    }

    new RevengWriter().write(platform, changeEntries, outputDir, generateBaseline,
            RevengWriter.defaultShouldOverwritePredicate(), args.getDbHost(), args.getDbPort(),
            args.getDbServer());
}

From source file:org.dataconservancy.packaging.tool.impl.generator.DomainObjectResourceBuilderTest.java

@Test
public void singleResourcePerDomainObject() throws Exception {
    PackageModelBuilderState state = bootstrap3();
    state.assembler = spy(new FunctionalAssemblerMock(folder.getRoot()));

    DomainObjectResourceBuilder underTest = new DomainObjectResourceBuilder();

    underTest.init(state);//from w  w  w. j  a  va 2  s .  c om

    AtomicInteger reservedResources = new AtomicInteger(0);
    doAnswer(invocation -> {
        InputStream domainObjectGraphIn = invocation.getArgumentAt(1, InputStream.class);
        assertNotNull(domainObjectGraphIn);
        Model domainObjectGraph = ModelFactory.createDefaultModel().read(domainObjectGraphIn, "foo", "TTL");
        assertEquals(1, domainObjectGraph.listSubjects().toList().size());
        reservedResources.getAndIncrement();
        return null;
    }).when(state.assembler).putResource(any(), any());

    state.tree.walk(n -> underTest.visitNode(n, state));

    assertEquals(3, reservedResources.get());
}

From source file:com.github.cherimojava.data.mongo.io._DeEncoding.java

@Test
public void saveDrop() {
    PrimitiveEntity pe = factory.create(PrimitiveEntity.class);
    factory.create(PrimitiveEntity.class).setString("don't delete").save();
    pe.setString("413").save();
    final AtomicInteger count = new AtomicInteger(0);
    MongoCollection<PrimitiveEntity> coll = getCollection(PrimitiveEntity.class);
    coll.find(PrimitiveEntity.class).forEach(new Block<Entity>() {
        @Override/*from   www  .  j a v a2 s .co m*/
        public void apply(Entity entity) {
            count.getAndIncrement();
        }
    });
    assertEquals(2, count.get());
    pe.drop();

    count.compareAndSet(2, 0);
    coll.find(PrimitiveEntity.class).forEach(new Block<Entity>() {
        @Override
        public void apply(Entity entity) {
            count.getAndIncrement();
        }
    });
    assertEquals(1, count.get());
}

From source file:org.wildfly.security.tool.FileSystemRealmCommand.java

/**
 * Handles input being given as a descriptor file
 *
 * @throws Exception Exception to be handled by Elytron Tool
 *//*from ww  w  .jav  a2 s  .co m*/
private void parseDescriptorFile(String file) throws Exception {
    Path path = Paths.get(file);
    if (!path.toFile().exists()) {
        errorHandler(ElytronToolMessages.msg.fileNotFound(file));
    }

    Descriptor descriptor = new Descriptor();
    AtomicInteger count = new AtomicInteger(1);
    try (Stream<String> stream = Files.lines(path)) {
        stream.forEach(line -> {
            if (line.equals("")) {
                findMissingRequiredValuesAndSetValues(count.intValue(), descriptor);
                copyAddResetDescriptor(descriptor);
                count.getAndIncrement();
            } else {
                // Since Windows URIs have a colon, only split at first occurrence
                String[] parts = line.split(":", 2);
                String option = parts[0];
                String arg = parts[1];
                switch (option) {
                case USERS_FILE_PARAM:
                    descriptor.setUsersFile(arg);
                    break;
                case ROLES_FILE_PARAM:
                    descriptor.setRolesFile(arg);
                    break;
                case OUTPUT_LOCATION_PARAM:
                    descriptor.setOutputLocation(arg);
                    break;
                case FILESYSTEM_REALM_NAME_PARAM:
                    descriptor.setFileSystemRealmName(arg);
                    break;
                case SECURITY_DOMAIN_NAME_PARAM:
                    descriptor.setSecurityDomainName(arg);
                    break;
                }
            }
        });
    } catch (IOException e) {
        errorHandler(e);
    }
    int currentCount = count.intValue();
    findMissingRequiredValuesAndSetValues(currentCount, descriptor);
    copyAddResetDescriptor(descriptor);
    if (summaryMode) {
        printDescriptorBlocks(currentCount);
    }
    count.getAndIncrement();
}

From source file:org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.java

/**
 * runTest implementation//w w  w .  jav a2  s. c  o m
 * @param length file length
 * @param killPos killing positions in ascending order
 * @param dnIndex DN index to kill when meets killing positions
 * @param tokenExpire wait token to expire when kill a DN
 * @throws Exception
 */
private void runTest(final int length, final int[] killPos, final int[] dnIndex, final boolean tokenExpire)
        throws Exception {
    if (killPos[0] <= FLUSH_POS) {
        LOG.warn("killPos=" + Arrays.toString(killPos) + " <= FLUSH_POS=" + FLUSH_POS + ", length=" + length
                + ", dnIndex=" + Arrays.toString(dnIndex));
        return; //skip test
    }
    Preconditions.checkArgument(length > killPos[0], "length=%s <= killPos=%s", length, killPos);
    Preconditions.checkArgument(killPos.length == dnIndex.length);

    final Path p = new Path(dir,
            "dn" + Arrays.toString(dnIndex) + "len" + length + "kill" + Arrays.toString(killPos));
    final String fullPath = p.toString();
    LOG.info("fullPath=" + fullPath);

    if (tokenExpire) {
        final NameNode nn = cluster.getNameNode();
        final BlockManager bm = nn.getNamesystem().getBlockManager();
        final BlockTokenSecretManager sm = bm.getBlockTokenSecretManager();

        // set a short token lifetime (1 second)
        SecurityTestUtil.setBlockTokenLifetime(sm, 1000L);
    }

    final AtomicInteger pos = new AtomicInteger();
    final FSDataOutputStream out = dfs.create(p);
    final DFSStripedOutputStream stripedOut = (DFSStripedOutputStream) out.getWrappedStream();

    long firstGS = -1; // first GS of this block group which never proceeds blockRecovery
    long oldGS = -1; // the old GS before bumping
    List<Long> gsList = new ArrayList<>();
    final List<DatanodeInfo> killedDN = new ArrayList<>();
    int numKilled = 0;
    for (; pos.get() < length;) {
        final int i = pos.getAndIncrement();
        if (numKilled < killPos.length && i == killPos[numKilled]) {
            assertTrue(firstGS != -1);
            final long gs = getGenerationStamp(stripedOut);
            if (numKilled == 0) {
                assertEquals(firstGS, gs);
            } else {
                //TODO: implement hflush/hsync and verify gs strict greater than oldGS
                assertTrue(gs >= oldGS);
            }
            oldGS = gs;

            if (tokenExpire) {
                DFSTestUtil.flushInternal(stripedOut);
                waitTokenExpires(out);
            }

            killedDN.add(killDatanode(cluster, stripedOut, dnIndex[numKilled], pos));
            numKilled++;
        }

        write(out, i);

        if (i % BLOCK_GROUP_SIZE == FLUSH_POS) {
            firstGS = getGenerationStamp(stripedOut);
            oldGS = firstGS;
        }
        if (i > 0 && (i + 1) % BLOCK_GROUP_SIZE == 0) {
            gsList.add(oldGS);
        }
    }
    gsList.add(oldGS);
    out.close();
    assertEquals(dnIndex.length, numKilled);

    StripedFileTestUtil.waitBlockGroupsReported(dfs, fullPath, numKilled);

    cluster.triggerBlockReports();
    StripedFileTestUtil.checkData(dfs, p, length, killedDN, gsList);
}

From source file:org.apache.hadoop.hdfs.TestWriteStripedFileWithFailure.java

/**
 * Test writing a file with shutting down some DNs(data DNs or parity DNs or both).
 * @param fileLength file length//w  w  w  . j a  v a  2  s . c  o  m
 * @param dataDNFailureNum the shutdown number of data DNs
 * @param parityDNFailureNum the shutdown number of parity DNs
 * @throws IOException
 */
private void writeFileWithDNFailure(int fileLength, int dataDNFailureNum, int parityDNFailureNum)
        throws IOException {
    String fileType = fileLength < (blockSize * dataBlocks) ? "smallFile" : "largeFile";
    String src = "/dnFailure_" + dataDNFailureNum + "_" + parityDNFailureNum + "_" + fileType;
    LOG.info("writeFileWithDNFailure: file = " + src + ", fileType = " + fileType + ", dataDNFailureNum = "
            + dataDNFailureNum + ", parityDNFailureNum = " + parityDNFailureNum);

    Path srcPath = new Path(src);
    final AtomicInteger pos = new AtomicInteger();
    final FSDataOutputStream out = fs.create(srcPath);
    final DFSStripedOutputStream stripedOut = (DFSStripedOutputStream) out.getWrappedStream();

    int[] dataDNFailureIndices = StripedFileTestUtil.randomArray(0, dataBlocks, dataDNFailureNum);
    Assert.assertNotNull(dataDNFailureIndices);
    int[] parityDNFailureIndices = StripedFileTestUtil.randomArray(dataBlocks, dataBlocks + parityBlocks,
            parityDNFailureNum);
    Assert.assertNotNull(parityDNFailureIndices);

    int[] failedDataNodes = new int[dataDNFailureNum + parityDNFailureNum];
    System.arraycopy(dataDNFailureIndices, 0, failedDataNodes, 0, dataDNFailureIndices.length);
    System.arraycopy(parityDNFailureIndices, 0, failedDataNodes, dataDNFailureIndices.length,
            parityDNFailureIndices.length);

    final int killPos = fileLength / 2;
    for (; pos.get() < fileLength;) {
        final int i = pos.getAndIncrement();
        if (i == killPos) {
            for (int failedDn : failedDataNodes) {
                StripedFileTestUtil.killDatanode(cluster, stripedOut, failedDn, pos);
            }
        }
        write(out, i);
    }
    out.close();

    // make sure the expected number of Datanode have been killed
    int dnFailureNum = dataDNFailureNum + parityDNFailureNum;
    Assert.assertEquals(cluster.getDataNodes().size(), numDNs - dnFailureNum);

    byte[] smallBuf = new byte[1024];
    byte[] largeBuf = new byte[fileLength + 100];
    final byte[] expected = StripedFileTestUtil.generateBytes(fileLength);
    StripedFileTestUtil.verifyLength(fs, srcPath, fileLength);
    StripedFileTestUtil.verifySeek(fs, srcPath, fileLength);
    StripedFileTestUtil.verifyStatefulRead(fs, srcPath, fileLength, expected, smallBuf);
    StripedFileTestUtil.verifyPread(fs, srcPath, fileLength, expected, largeBuf);

    // delete the file
    fs.delete(srcPath, true);
}

From source file:com.googlecode.msidor.springframework.integration.channel.ConcurentOrderedMultiQueueChannel.java

/**
 * Adds message to the queue./*from   w ww  . j a va2s.co  m*/
 * This method blocks if there is no space.
 * @param message to be added
 * @param timeout after which the method awakes from waiting for free space (0 for no timeout)
 * @return true if message was successfully added to queue
 */
@Override
protected boolean doSend(Message<?> message, long timeout) {
    Assert.notNull(message, "'message' must not be null");

    log.trace("Sending message " + message);

    long nanos = TimeUnit.MILLISECONDS.toNanos(timeout);
    int c = -1;
    final ReentrantLock lock = this.objectLock;
    final AtomicInteger count = this.count;
    try {
        //lock the object exclusively
        lock.lockInterruptibly();

        while (count.get() == totalCapacity) {
            //if timeout was set and has elapsed
            if (nanos <= 0 && timeout > 0)
                return false;

            //wait for notification when any message has been handled
            if (timeout > 0) {
                nanos = notFull.awaitNanos(nanos);
            } else {
                notFull.await();
            }
        }

        //add message to the queue
        addMessage(message);

        c = count.getAndIncrement();

        //if there is still some space notify any other potentially dormant producer thread
        if (c + 1 < totalCapacity)
            notFull.signal();

    } catch (InterruptedException e) {
        log.trace("Lock interrupted by other thread");
    } finally {
        //notify potentially dormant consumer thread that there is a message to handle 
        newMessagesToCheck.signal();
        lock.unlock();
    }

    return true;
}

From source file:com.metamx.emitter.core.EmitterTest.java

@Test
public void testBatchSplitting() throws Exception {
    final byte[] big = new byte[500 * 1024];
    for (int i = 0; i < big.length; i++) {
        big[i] = 'x';
    }/*from  w w w  .  java 2  s .c  o  m*/
    final String bigString = new String(big);
    final List<UnitEvent> events = Arrays.asList(new UnitEvent(bigString, 1), new UnitEvent(bigString, 2),
            new UnitEvent(bigString, 3), new UnitEvent(bigString, 4));
    final AtomicInteger counter = new AtomicInteger();
    emitter = manualFlushEmitterWithBatchSizeAndBufferSize(1024 * 1024, 5 * 1024 * 1024);
    Assert.assertEquals(0, emitter.getBufferedSize());

    httpClient.setGoHandler(new GoHandler() {
        @Override
        public <Intermediate, Final> ListenableFuture<Final> go(Request request,
                HttpResponseHandler<Intermediate, Final> handler) throws Exception {
            Assert.assertEquals(new URL(TARGET_URL), request.getUrl());
            Assert.assertEquals(ImmutableList.of("application/json"),
                    request.getHeaders().get(HttpHeaders.Names.CONTENT_TYPE));
            Assert.assertEquals(
                    String.format("[%s,%s]\n",
                            jsonMapper.writeValueAsString(events.get(counter.getAndIncrement())),
                            jsonMapper.writeValueAsString(events.get(counter.getAndIncrement()))),
                    request.getContent().toString(Charsets.UTF_8));
            Assert.assertTrue("handler is a StatusResponseHandler", handler instanceof StatusResponseHandler);

            return Futures.immediateFuture((Final) okResponse());
        }
    }.times(3));

    for (UnitEvent event : events) {
        emitter.emit(event);
    }
    Assert.assertEquals(jsonMapper.writeValueAsString(events).length() - events.size() - 1,
            emitter.getBufferedSize());

    emitter.flush();
    waitForEmission(emitter);
    Assert.assertEquals(0, emitter.getBufferedSize());
    closeNoFlush(emitter);
    Assert.assertTrue(httpClient.succeeded());
}