Example usage for java.util.concurrent.atomic AtomicInteger set

List of usage examples for java.util.concurrent.atomic AtomicInteger set

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger set.

Prototype

public final void set(int newValue) 

Source Link

Document

Sets the value to newValue , with memory effects as specified by VarHandle#setVolatile .

Usage

From source file:org.apache.hadoop.hbase.client.TestAdmin1.java

void splitTest(byte[] splitPoint, byte[][] familyNames, int[] rowCounts, int numVersions, int blockSize)
        throws Exception {
    TableName tableName = TableName.valueOf("testForceSplit");
    StringBuilder sb = new StringBuilder();
    // Add tail to String so can see better in logs where a test is running.
    for (int i = 0; i < rowCounts.length; i++) {
        sb.append("_").append(Integer.toString(rowCounts[i]));
    }/* w ww.ja v  a 2s. c  o  m*/
    assertFalse(admin.tableExists(tableName));
    try (final Table table = TEST_UTIL.createTable(tableName, familyNames, numVersions, blockSize);
            final RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) {

        int rowCount = 0;
        byte[] q = new byte[0];

        // insert rows into column families. The number of rows that have values
        // in a specific column family is decided by rowCounts[familyIndex]
        for (int index = 0; index < familyNames.length; index++) {
            ArrayList<Put> puts = new ArrayList<Put>(rowCounts[index]);
            for (int i = 0; i < rowCounts[index]; i++) {
                byte[] k = Bytes.toBytes(i);
                Put put = new Put(k);
                put.addColumn(familyNames[index], q, k);
                puts.add(put);
            }
            table.put(puts);

            if (rowCount < rowCounts[index]) {
                rowCount = rowCounts[index];
            }
        }

        // get the initial layout (should just be one region)
        List<HRegionLocation> m = locator.getAllRegionLocations();
        LOG.info("Initial regions (" + m.size() + "): " + m);
        assertTrue(m.size() == 1);

        // Verify row count
        Scan scan = new Scan();
        ResultScanner scanner = table.getScanner(scan);
        int rows = 0;
        for (@SuppressWarnings("unused")
        Result result : scanner) {
            rows++;
        }
        scanner.close();
        assertEquals(rowCount, rows);

        // Have an outstanding scan going on to make sure we can scan over splits.
        scan = new Scan();
        scanner = table.getScanner(scan);
        // Scan first row so we are into first region before split happens.
        scanner.next();

        // Split the table
        this.admin.split(tableName, splitPoint);

        final AtomicInteger count = new AtomicInteger(0);
        Thread t = new Thread("CheckForSplit") {
            @Override
            public void run() {
                for (int i = 0; i < 45; i++) {
                    try {
                        sleep(1000);
                    } catch (InterruptedException e) {
                        continue;
                    }
                    // check again
                    List<HRegionLocation> regions = null;
                    try {
                        regions = locator.getAllRegionLocations();
                    } catch (IOException e) {
                        e.printStackTrace();
                    }
                    if (regions == null)
                        continue;
                    count.set(regions.size());
                    if (count.get() >= 2) {
                        LOG.info("Found: " + regions);
                        break;
                    }
                    LOG.debug("Cycle waiting on split");
                }
                LOG.debug("CheckForSplit thread exited, current region count: " + count.get());
            }
        };
        t.setPriority(Thread.NORM_PRIORITY - 2);
        t.start();
        t.join();

        // Verify row count
        rows = 1; // We counted one row above.
        for (@SuppressWarnings("unused")
        Result result : scanner) {
            rows++;
            if (rows > rowCount) {
                scanner.close();
                assertTrue("Scanned more than expected (" + rowCount + ")", false);
            }
        }
        scanner.close();
        assertEquals(rowCount, rows);

        List<HRegionLocation> regions = null;
        try {
            regions = locator.getAllRegionLocations();
        } catch (IOException e) {
            e.printStackTrace();
        }
        assertEquals(2, regions.size());
        if (splitPoint != null) {
            // make sure the split point matches our explicit configuration
            assertEquals(Bytes.toString(splitPoint),
                    Bytes.toString(regions.get(0).getRegionInfo().getEndKey()));
            assertEquals(Bytes.toString(splitPoint),
                    Bytes.toString(regions.get(1).getRegionInfo().getStartKey()));
            LOG.debug("Properly split on " + Bytes.toString(splitPoint));
        } else {
            if (familyNames.length > 1) {
                int splitKey = Bytes.toInt(regions.get(0).getRegionInfo().getEndKey());
                // check if splitKey is based on the largest column family
                // in terms of it store size
                int deltaForLargestFamily = Math.abs(rowCount / 2 - splitKey);
                LOG.debug("SplitKey=" + splitKey + "&deltaForLargestFamily=" + deltaForLargestFamily + ", r="
                        + regions.get(0).getRegionInfo());
                for (int index = 0; index < familyNames.length; index++) {
                    int delta = Math.abs(rowCounts[index] / 2 - splitKey);
                    if (delta < deltaForLargestFamily) {
                        assertTrue("Delta " + delta + " for family " + index + " should be at least "
                                + "deltaForLargestFamily " + deltaForLargestFamily, false);
                    }
                }
            }
        }
        TEST_UTIL.deleteTable(tableName);
    }
}

From source file:io.pravega.segmentstore.server.containers.StreamSegmentMapperTests.java

/**
 * Tests the ability of getOrAssignStreamSegmentId to handle the TooManyActiveSegmentsException.
 *///from w ww.j ava  2s .  c  o  m
@Test
public void testGetOrAssignStreamSegmentIdWithMetadataLimit() throws Exception {
    final String segmentName = "Segment";
    final String transactionName = StreamSegmentNameUtils.getTransactionNameFromId(segmentName,
            UUID.randomUUID());

    HashSet<String> storageSegments = new HashSet<>();
    storageSegments.add(segmentName);
    storageSegments.add(transactionName);

    @Cleanup
    TestContext context = new TestContext();
    setupStorageGetHandler(context, storageSegments,
            name -> new StreamSegmentInformation(name, 0, false, false, new ImmutableDate()));

    // 1. Verify the behavior when even after the retry we still cannot map.
    AtomicInteger exceptionCounter = new AtomicInteger();
    AtomicBoolean cleanupInvoked = new AtomicBoolean();

    // We use 'containerId' as a proxy for the exception id (to make sure we collect the right one).
    context.operationLog.addHandler = op -> FutureHelpers
            .failedFuture(new TooManyActiveSegmentsException(exceptionCounter.incrementAndGet(), 0));
    Supplier<CompletableFuture<Void>> noOpCleanup = () -> {
        if (!cleanupInvoked.compareAndSet(false, true)) {
            return FutureHelpers.failedFuture(new AssertionError("Cleanup invoked multiple times/"));
        }
        return CompletableFuture.completedFuture(null);
    };
    val mapper1 = new StreamSegmentMapper(context.metadata, context.operationLog, context.stateStore,
            noOpCleanup, context.storage, executorService());
    AssertExtensions.assertThrows(
            "Unexpected outcome when trying to map a segment name to a full metadata that cannot be cleaned.",
            () -> mapper1.getOrAssignStreamSegmentId(segmentName, TIMEOUT),
            ex -> ex instanceof TooManyActiveSegmentsException
                    && ((TooManyActiveSegmentsException) ex).getContainerId() == exceptionCounter.get());
    Assert.assertEquals("Unexpected number of attempts to map.", 2, exceptionCounter.get());
    Assert.assertTrue("Cleanup was not invoked.", cleanupInvoked.get());

    // Now with a transaction.
    exceptionCounter.set(0);
    cleanupInvoked.set(false);
    AssertExtensions.assertThrows(
            "Unexpected outcome when trying to map a segment name to a full metadata that cannot be cleaned.",
            () -> mapper1.getOrAssignStreamSegmentId(transactionName, TIMEOUT),
            ex -> ex instanceof TooManyActiveSegmentsException
                    && ((TooManyActiveSegmentsException) ex).getContainerId() == exceptionCounter.get());
    Assert.assertEquals("Unexpected number of attempts to map.", 2, exceptionCounter.get());
    Assert.assertTrue("Cleanup was not invoked.", cleanupInvoked.get());

    // 2. Verify the behavior when the first call fails, but the second one succeeds.
    exceptionCounter.set(0);
    cleanupInvoked.set(false);
    Supplier<CompletableFuture<Void>> workingCleanup = () -> {
        if (!cleanupInvoked.compareAndSet(false, true)) {
            return FutureHelpers.failedFuture(new AssertionError("Cleanup invoked multiple times."));
        }

        setupOperationLog(context); // Setup the OperationLog to function correctly.
        return CompletableFuture.completedFuture(null);
    };

    val mapper2 = new StreamSegmentMapper(context.metadata, context.operationLog, context.stateStore,
            workingCleanup, context.storage, executorService());
    long id = mapper2.getOrAssignStreamSegmentId(segmentName, TIMEOUT).join();
    Assert.assertEquals("Unexpected number of attempts to map.", 1, exceptionCounter.get());
    Assert.assertTrue("Cleanup was not invoked.", cleanupInvoked.get());
    Assert.assertNotEquals("No valid SegmentId assigned.", ContainerMetadata.NO_STREAM_SEGMENT_ID, id);
}

From source file:org.apache.directory.studio.test.integration.ui.BrowserTest.java

/**
 * Test for DIRSTUDIO-1121.// w  ww.j  a  va  2  s  .co  m
 *
 * Verify input is set only once when entry is selected.
 */
@Test
public void testSetInputOnlyOnce() throws Exception {
    browserViewBot.selectEntry("DIT", "Root DSE", "ou=system", "ou=users");
    browserViewBot.expandEntry("DIT", "Root DSE", "ou=system", "ou=users");

    // verify link-with-editor is enabled
    assertTrue(BrowserUIPlugin.getDefault().getPreferenceStore()
            .getBoolean(BrowserUIConstants.PREFERENCE_BROWSER_LINK_WITH_EDITOR));

    // setup counter and listener to record entry editor input changes
    final AtomicInteger counter = new AtomicInteger();
    UIThreadRunnable.syncExec(new VoidResult() {
        public void run() {
            try {
                IEditorPart editor = PlatformUI.getWorkbench().getActiveWorkbenchWindow().getActivePage()
                        .getActiveEditor();
                editor.addPropertyListener(new IPropertyListener() {
                    @Override
                    public void propertyChanged(Object source, int propId) {
                        if (source instanceof EntryEditor && propId == BrowserUIConstants.INPUT_CHANGED) {
                            counter.incrementAndGet();
                        }
                    }
                });
            } catch (Exception e) {
                throw new RuntimeException(e);
            }
        }
    });

    // select 3 different entries, select one twice should not set the input again
    browserViewBot.selectEntry("DIT", "Root DSE", "ou=system", "ou=users", "uid=user.1");
    browserViewBot.selectEntry("DIT", "Root DSE", "ou=system", "ou=users", "uid=user.1");
    browserViewBot.selectEntry("DIT", "Root DSE", "ou=system", "ou=users", "uid=user.2");
    browserViewBot.selectEntry("DIT", "Root DSE", "ou=system", "ou=users", "uid=user.2");
    browserViewBot.selectEntry("DIT", "Root DSE", "ou=system", "ou=users", "uid=user.3");
    browserViewBot.selectEntry("DIT", "Root DSE", "ou=system", "ou=users", "uid=user.3");

    // verify that input was only set 3 times.
    assertEquals("Only 3 input changes expected.", 3, counter.get());

    // reset counter
    counter.set(0);

    // use navigation history to go back and forth, each step should set input only once
    studioBot.navigationHistoryBack();
    browserViewBot.waitUntilEntryIsSelected("uid=user.2");
    studioBot.navigationHistoryBack();
    browserViewBot.waitUntilEntryIsSelected("uid=user.1");
    studioBot.navigationHistoryForward();
    browserViewBot.waitUntilEntryIsSelected("uid=user.2");
    studioBot.navigationHistoryForward();
    browserViewBot.waitUntilEntryIsSelected("uid=user.3");

    // verify that input was only set 4 times.
    assertEquals("Only 4 input changes expected.", 4, counter.get());
}

From source file:org.apache.nifi.processors.csv.ExtractCSVHeader.java

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    final FlowFile original = session.get();
    if (original == null) {
        return;//from w  ww .j a  v a  2  s. c  o  m
    }

    final AtomicBoolean lineFound = new AtomicBoolean(false);
    final Map<String, String> attrs = new HashMap<>();

    final AtomicInteger headerLength = new AtomicInteger(0);

    session.read(original, new InputStreamCallback() {
        @Override
        public void process(InputStream inputStream) throws IOException {
            // TODO expose the charset property?
            LineIterator iterator = IOUtils.lineIterator(inputStream, UTF_8);
            if (iterator.hasNext()) {
                lineFound.set(true);
                final String header = iterator.nextLine();

                final String format = context.getProperty(PROP_FORMAT).getValue();
                final String delimiter = context.getProperty(PROP_DELIMITER)
                        .evaluateAttributeExpressions(original).getValue();
                final String prefix = context.getProperty(PROP_SCHEMA_ATTR_PREFIX)
                        .evaluateAttributeExpressions(original).getValue();

                attrs.put(prefix + ATTR_HEADER_ORIGINAL, header);
                // TODO validate delimiter in the callback first
                final CSVFormat csvFormat = buildFormat(format, delimiter, true, // we assume first line is the header
                        null); // no custom header
                final CSVParser parser = csvFormat.parse(new StringReader(header));
                final Map<String, Integer> headers = parser.getHeaderMap();
                final int columnCount = headers.size();
                attrs.put(prefix + ATTR_HEADER_COLUMN_COUNT, String.valueOf(columnCount));
                for (Map.Entry<String, Integer> h : headers.entrySet()) {
                    // CSV columns are 1-based in Excel
                    attrs.put(prefix + (h.getValue() + 1), h.getKey());
                }

                // strip the header and send to the 'content' relationship
                if (StringUtils.isNotBlank(header)) {
                    int hLength = header.length();
                    // move past the new line if there are more lines
                    if (original.getSize() > hLength + 1) {
                        hLength++;
                    }
                    headerLength.set(hLength);
                }
            }
        }
    });

    if (lineFound.get()) {
        FlowFile ff = session.putAllAttributes(original, attrs);

        int offset = headerLength.get();
        if (offset > 0) {
            FlowFile contentOnly = session.clone(ff, offset, original.getSize() - offset);
            session.transfer(contentOnly, REL_CONTENT);
        }

        session.transfer(ff, REL_ORIGINAL);
    } else {
        session.transfer(original, REL_FAILURE);
    }
}

From source file:org.nd4j.linalg.api.test.NDArrayTests.java

@Test
public void testVectorDimension() {
    INDArray test = Nd4j.create(Nd4j.linspace(1, 4, 4).data(), new int[] { 2, 2 });
    final AtomicInteger count = new AtomicInteger(0);
    //row wise//w  w w . j  a v  a2s  . c  om
    test.iterateOverDimension(1, new SliceOp() {

        /**
         * Operates on an ndarray slice
         *
         * @param nd the result to operate on
         */
        @Override
        public void operate(INDArray nd) {
            INDArray test = nd;
            if (count.get() == 0) {
                INDArray firstDimension = Nd4j.create(new float[] { 1, 2 }, new int[] { 2 });
                assertEquals(firstDimension, test);
            } else {
                INDArray firstDimension = Nd4j.create(new float[] { 3, 4 }, new int[] { 2 });
                assertEquals(firstDimension, test);

            }

            count.incrementAndGet();
        }

    }, false);

    count.set(0);

    //columnwise
    test.iterateOverDimension(0, new SliceOp() {

        /**
         * Operates on an ndarray slice
         *
         * @param nd the result to operate on
         */
        @Override
        public void operate(INDArray nd) {
            log.info("Operator " + nd);
            INDArray test = nd;
            if (count.get() == 0) {
                INDArray firstDimension = Nd4j.create(new float[] { 1, 3 }, new int[] { 2 });
                assertEquals(firstDimension, test);
            } else {
                INDArray firstDimension = Nd4j.create(new float[] { 2, 4 }, new int[] { 2 });
                assertEquals(firstDimension, test);
                firstDimension.data().destroy();

            }

            count.incrementAndGet();
        }

    }, false);

    test.data().destroy();

}

From source file:org.talend.designer.core.generic.utils.ComponentsUtils.java

/**
 * DOC ycbai Comment method "loadParametersFromForm".
 * <p>//from   www  .  j av  a2  s. co  m
 * Get element parameters of <code>element</code> from <code>form</code>.
 * 
 * @param node optional, used if there is a component setting up the properties
 * @param element
 * @param category
 * @param form
 * @return parameters list
 */
private static List<ElementParameter> getParametersFromForm(IElement element, EComponentCategory category,
        ComponentProperties rootProperty, ComponentProperties compProperties, String parentPropertiesPath,
        Form form, Widget parentWidget, AtomicInteger lastRowNum) {
    List<ElementParameter> elementParameters = new ArrayList<>();
    List<String> parameterNames = new ArrayList<>();
    EComponentCategory compCategory = category;
    if (compCategory == null) {
        compCategory = EComponentCategory.BASIC;
    }
    AtomicInteger lastRN = lastRowNum;
    if (lastRN == null) {
        lastRN = new AtomicInteger();
    }
    if (form == null) {
        return elementParameters;
    }
    ComponentProperties componentProperties = compProperties;
    if (componentProperties == null) {
        componentProperties = (ComponentProperties) form.getProperties();
    }
    if (element instanceof INode) {
        INode node = (INode) element;
        // FIXME - this should be able to be removed TUP-4053
        // Set the properties only one time to get the top-level properties object
        if (node.getComponentProperties() == null) {
            node.setComponentProperties(componentProperties);
        }
    }

    // Dont use Value Evaluator here.
    componentProperties.setValueEvaluator(null);

    // Have to initialize for the messages
    Collection<Widget> formWidgets = form.getWidgets();
    for (Widget widget : formWidgets) {
        NamedThing widgetProperty = widget.getContent();

        String propertiesPath = getPropertiesPath(parentPropertiesPath, null);
        if (widgetProperty instanceof Form) {
            Form subForm = (Form) widgetProperty;
            ComponentProperties subProperties = (ComponentProperties) subForm.getProperties();
            // Reset properties path
            if (!isSameComponentProperties(componentProperties, widgetProperty)) {
                propertiesPath = getPropertiesPath(parentPropertiesPath, subProperties.getName());
            }
            elementParameters.addAll(getParametersFromForm(element, compCategory, rootProperty, subProperties,
                    propertiesPath, subForm, widget, lastRN));
            continue;
        }

        GenericElementParameter param = new GenericElementParameter(element, rootProperty, form, widget,
                getComponentService());
        String parameterName = propertiesPath.concat(param.getName());
        param.setName(parameterName);
        param.setCategory(compCategory);
        param.setShow(
                parentWidget == null ? widget.isVisible() : parentWidget.isVisible() && widget.isVisible());
        int rowNum = 0;
        if (widget.getOrder() != 1) {
            rowNum = lastRN.get();
        } else {
            rowNum = widget.getRow();
            if (parentWidget != null) {
                rowNum += parentWidget.getRow();
            }
            rowNum = rowNum + lastRN.get();
        }
        param.setNumRow(rowNum);
        lastRN.set(rowNum);
        // handle form...

        EParameterFieldType fieldType = getFieldType(widget, widgetProperty);
        param.setFieldType(fieldType != null ? fieldType : EParameterFieldType.TEXT);
        if (widgetProperty instanceof PresentationItem) {
            param.setValue(widgetProperty.getDisplayName());
        } else if (widgetProperty instanceof Property) {
            Property property = (Property) widgetProperty;
            param.setRequired(property.isRequired());
            if (fieldType != null && fieldType.equals(EParameterFieldType.TABLE)) {
                Object value = property.getValue();
                if (value == null) {
                    param.setValue(new ArrayList<Map<String, Object>>());
                } else {
                    param.setValue(value);
                }
                param.setSupportContext(false);
            } else {
                param.setValue(getParameterValue(element, property));
                param.setSupportContext(isSupportContext(property));
            }
            // TCOMP-96
            param.setContext(getConnectionType(property));
            List<?> values = property.getPossibleValues();
            if (values != null) {
                param.setPossibleValues(values);
                List<String> possVals = new ArrayList<>();
                List<String> possValsDisplay = new ArrayList<>();
                for (Object obj : values) {
                    if (obj instanceof NamedThing) {
                        NamedThing nal = (NamedThing) obj;
                        possVals.add(nal.getName());
                        possValsDisplay.add(nal.getDisplayName());
                    } else {
                        possVals.add(String.valueOf(obj));
                        possValsDisplay.add(String.valueOf(obj));
                    }
                }
                param.setListItemsDisplayName(possValsDisplay.toArray(new String[0]));
                param.setListItemsDisplayCodeName(possValsDisplay.toArray(new String[0]));
                param.setListItemsValue(possVals.toArray(new String[0]));
            }
            if (fieldType != null && fieldType.equals(EParameterFieldType.TABLE)) {
                List<ElementParameter> possVals = new ArrayList<>();
                List<String> codeNames = new ArrayList<>();
                List<String> possValsDisplay = new ArrayList<>();
                for (Property curChildProp : property.getChildren()) {
                    EParameterFieldType currentField = EParameterFieldType.TEXT;

                    ElementParameter newParam = new ElementParameter(element);
                    newParam.setName(curChildProp.getName());
                    newParam.setFilter(null);
                    newParam.setDisplayName(""); //$NON-NLS-1$
                    newParam.setFieldType(currentField);
                    newParam.setContext(null);
                    newParam.setShowIf(null);
                    newParam.setNotShowIf(null);
                    newParam.setReadOnlyIf(null);
                    newParam.setNotReadOnlyIf(null);
                    newParam.setNoContextAssist(false);
                    newParam.setRaw(false);
                    newParam.setReadOnly(false);
                    newParam.setValue(curChildProp.getDefaultValue());
                    possVals.add(newParam);
                    if (isPrevColumnList(curChildProp)) {
                        // temporary code while waiting for TCOMP-143
                        newParam.setFieldType(EParameterFieldType.PREV_COLUMN_LIST);
                        newParam.setListItemsDisplayName(new String[0]);
                        newParam.setListItemsDisplayCodeName(new String[0]);
                        newParam.setListItemsValue(new String[0]);
                        newParam.setListRepositoryItems(new String[0]);
                        newParam.setListItemsShowIf(new String[0]);
                        newParam.setListItemsNotShowIf(new String[0]);
                        newParam.setListItemsNotReadOnlyIf(new String[0]);
                        newParam.setListItemsReadOnlyIf(new String[0]);
                    }
                    if (curChildProp.getType().equals(Property.Type.BOOLEAN)) {
                        newParam.setFieldType(EParameterFieldType.CHECK);
                        newParam.setValue(new Boolean(curChildProp.getDefaultValue()));
                    }
                    codeNames.add(curChildProp.getName());
                    possValsDisplay.add(curChildProp.getDisplayName());
                }
                param.setListItemsDisplayName(possValsDisplay.toArray(new String[0]));
                param.setListItemsDisplayCodeName(codeNames.toArray(new String[0]));
                param.setListItemsValue(possVals.toArray(new ElementParameter[0]));
                String[] listItemsShowIf = new String[property.getChildren().size()];
                String[] listItemsNotShowIf = new String[property.getChildren().size()];
                param.setListItemsShowIf(listItemsShowIf);
                param.setListItemsNotShowIf(listItemsNotShowIf);

            }
        }
        param.setReadOnly(false);
        param.setSerialized(true);
        param.setDynamicSettings(true);
        // Avoid adding duplicate prameter.
        if (!parameterNames.contains(parameterName)) {
            elementParameters.add(param);
            parameterNames.add(parameterName);
        }
    }
    return elementParameters;
}

From source file:org.apache.hadoop.hbase.client.TestAdmin.java

void splitTest(byte[] splitPoint, byte[][] familyNames, int[] rowCounts, int numVersions, int blockSize)
        throws Exception {
    TableName tableName = TableName.valueOf("testForceSplit");
    StringBuilder sb = new StringBuilder();
    // Add tail to String so can see better in logs where a test is running.
    for (int i = 0; i < rowCounts.length; i++) {
        sb.append("_").append(Integer.toString(rowCounts[i]));
    }/*from  w ww  . j  av  a 2 s  .c  o m*/
    assertFalse(admin.tableExists(tableName));
    final HTable table = TEST_UTIL.createTable(tableName, familyNames, numVersions, blockSize);

    int rowCount = 0;
    byte[] q = new byte[0];

    // insert rows into column families. The number of rows that have values
    // in a specific column family is decided by rowCounts[familyIndex]
    for (int index = 0; index < familyNames.length; index++) {
        ArrayList<Put> puts = new ArrayList<Put>(rowCounts[index]);
        for (int i = 0; i < rowCounts[index]; i++) {
            byte[] k = Bytes.toBytes(i);
            Put put = new Put(k);
            put.add(familyNames[index], q, k);
            puts.add(put);
        }
        table.put(puts);

        if (rowCount < rowCounts[index]) {
            rowCount = rowCounts[index];
        }
    }

    // get the initial layout (should just be one region)
    Map<HRegionInfo, ServerName> m = table.getRegionLocations();
    LOG.info("Initial regions (" + m.size() + "): " + m);
    assertTrue(m.size() == 1);

    // Verify row count
    Scan scan = new Scan();
    ResultScanner scanner = table.getScanner(scan);
    int rows = 0;
    for (@SuppressWarnings("unused")
    Result result : scanner) {
        rows++;
    }
    scanner.close();
    assertEquals(rowCount, rows);

    // Have an outstanding scan going on to make sure we can scan over splits.
    scan = new Scan();
    scanner = table.getScanner(scan);
    // Scan first row so we are into first region before split happens.
    scanner.next();

    // Split the table
    this.admin.split(tableName.getName(), splitPoint);

    final AtomicInteger count = new AtomicInteger(0);
    Thread t = new Thread("CheckForSplit") {
        public void run() {
            for (int i = 0; i < 45; i++) {
                try {
                    sleep(1000);
                } catch (InterruptedException e) {
                    continue;
                }
                // check again    table = new HTable(conf, tableName);
                Map<HRegionInfo, ServerName> regions = null;
                try {
                    regions = table.getRegionLocations();
                } catch (IOException e) {
                    e.printStackTrace();
                }
                if (regions == null)
                    continue;
                count.set(regions.size());
                if (count.get() >= 2) {
                    LOG.info("Found: " + regions);
                    break;
                }
                LOG.debug("Cycle waiting on split");
            }
            LOG.debug("CheckForSplit thread exited, current region count: " + count.get());
        }
    };
    t.setPriority(Thread.NORM_PRIORITY - 2);
    t.start();
    t.join();

    // Verify row count
    rows = 1; // We counted one row above.
    for (@SuppressWarnings("unused")
    Result result : scanner) {
        rows++;
        if (rows > rowCount) {
            scanner.close();
            assertTrue("Scanned more than expected (" + rowCount + ")", false);
        }
    }
    scanner.close();
    assertEquals(rowCount, rows);

    Map<HRegionInfo, ServerName> regions = null;
    try {
        regions = table.getRegionLocations();
    } catch (IOException e) {
        e.printStackTrace();
    }
    assertEquals(2, regions.size());
    Set<HRegionInfo> hRegionInfos = regions.keySet();
    HRegionInfo[] r = hRegionInfos.toArray(new HRegionInfo[hRegionInfos.size()]);
    if (splitPoint != null) {
        // make sure the split point matches our explicit configuration
        assertEquals(Bytes.toString(splitPoint), Bytes.toString(r[0].getEndKey()));
        assertEquals(Bytes.toString(splitPoint), Bytes.toString(r[1].getStartKey()));
        LOG.debug("Properly split on " + Bytes.toString(splitPoint));
    } else {
        if (familyNames.length > 1) {
            int splitKey = Bytes.toInt(r[0].getEndKey());
            // check if splitKey is based on the largest column family
            // in terms of it store size
            int deltaForLargestFamily = Math.abs(rowCount / 2 - splitKey);
            LOG.debug(
                    "SplitKey=" + splitKey + "&deltaForLargestFamily=" + deltaForLargestFamily + ", r=" + r[0]);
            for (int index = 0; index < familyNames.length; index++) {
                int delta = Math.abs(rowCounts[index] / 2 - splitKey);
                if (delta < deltaForLargestFamily) {
                    assertTrue(
                            "Delta " + delta + " for family " + index
                                    + " should be at least deltaForLargestFamily " + deltaForLargestFamily,
                            false);
                }
            }
        }
    }
    TEST_UTIL.deleteTable(tableName);
    table.close();
}

From source file:org.apache.bookkeeper.mledger.impl.ManagedLedgerTest.java

@Test
public void testManagedLedgerWithCreateLedgerTimeOut() throws Exception {
    ManagedLedgerConfig config = new ManagedLedgerConfig().setMetadataOperationsTimeoutSeconds(3);
    ManagedLedgerImpl ledger = (ManagedLedgerImpl) factory.open("timeout_ledger_test", config);

    BookKeeper bk = mock(BookKeeper.class);
    doNothing().when(bk).asyncCreateLedger(anyInt(), anyInt(), anyInt(), any(), any(), any(), any(), any());
    AtomicInteger response = new AtomicInteger(0);
    CountDownLatch latch = new CountDownLatch(1);
    ledger.asyncCreateLedger(bk, config, null, new CreateCallback() {
        @Override/* w  ww  .j  a  v a 2s .  co m*/
        public void createComplete(int rc, LedgerHandle lh, Object ctx) {
            response.set(rc);
            latch.countDown();
        }
    }, Collections.emptyMap());

    latch.await(config.getMetadataOperationsTimeoutSeconds() + 2, TimeUnit.SECONDS);
    assertEquals(response.get(), BKException.Code.TimeoutException);

    ledger.close();
}

From source file:org.loklak.api.server.SearchServlet.java

@Override
protected void doGet(final HttpServletRequest request, final HttpServletResponse response)
        throws ServletException, IOException {
    final RemoteAccess.Post post = RemoteAccess.evaluate(request);
    try {/* w  w w.j  a  v a  2 s.co m*/

        // manage DoS
        if (post.isDoS_blackout()) {
            response.sendError(503, "your (" + post.getClientHost() + ") request frequency is too high");
            return;
        }

        // check call type
        boolean jsonExt = request.getServletPath().endsWith(".json");
        boolean rssExt = request.getServletPath().endsWith(".rss");
        boolean txtExt = request.getServletPath().endsWith(".txt");

        // evaluate get parameter
        String callback = post.get("callback", "");
        boolean jsonp = callback != null && callback.length() > 0;
        boolean minified = post.get("minified", false);
        String query = post.get("q", "");
        if (query == null || query.length() == 0)
            query = post.get("query", "");
        query = CharacterCoding.html2unicode(query).replaceAll("\\+", " ");
        final long timeout = (long) post.get("timeout", DAO.getConfig("search.timeout", 2000));
        final int count = post.isDoS_servicereduction() ? 10
                : Math.min(post.get("count", post.get("maximumRecords", 100)),
                        post.isLocalhostAccess() ? 10000 : 1000);
        String source = post.isDoS_servicereduction() ? "cache" : post.get("source", "all"); // possible values: cache, backend, twitter, all
        int limit = post.get("limit", 100);
        String[] fields = post.get("fields", new String[0], ",");
        int timezoneOffset = post.get("timezoneOffset", 0);
        if (query.indexOf("id:") >= 0 && ("all".equals(source) || "twitter".equals(source)))
            source = "cache"; // id's cannot be retrieved from twitter with the scrape-api (yet), only from the cache
        final String ordername = post.get("order", Timeline.Order.CREATED_AT.getMessageFieldName());
        final Timeline.Order order = Timeline.parseOrder(ordername);

        // create tweet timeline
        final Timeline tl = new Timeline(order);
        Map<String, List<Map.Entry<String, Long>>> aggregations = null;
        final QueryEntry.Tokens tokens = new QueryEntry.Tokens(query);

        final AtomicInteger cache_hits = new AtomicInteger(0), count_backend = new AtomicInteger(0),
                count_twitter_all = new AtomicInteger(0), count_twitter_new = new AtomicInteger(0);
        final boolean backend_push = DAO.getConfig("backend.push.enabled", false);

        if ("all".equals(source)) {
            // start all targets for search concurrently
            final int timezoneOffsetf = timezoneOffset;
            final String queryf = query;
            final long start = System.currentTimeMillis();

            // start a scraper
            Thread scraperThread = tokens.raw.length() == 0 ? null : new Thread() {
                public void run() {
                    final String scraper_query = tokens.translate4scraper();
                    DAO.log(request.getServletPath() + " scraping with query: " + scraper_query);
                    Timeline twitterTl = DAO.scrapeTwitter(post, scraper_query, order, timezoneOffsetf, true,
                            timeout, true);
                    count_twitter_new.set(twitterTl.size());
                    tl.putAll(QueryEntry.applyConstraint(twitterTl, tokens, false)); // pre-localized results are not filtered with location constraint any more 
                    tl.setScraperInfo(twitterTl.getScraperInfo());
                    post.recordEvent("twitterscraper_time", System.currentTimeMillis() - start);
                }
            };
            if (scraperThread != null)
                scraperThread.start();

            // start a local search
            Thread localThread = queryf == null || queryf.length() == 0 ? null : new Thread() {
                public void run() {
                    DAO.SearchLocalMessages localSearchResult = new DAO.SearchLocalMessages(queryf, order,
                            timezoneOffsetf, count, 0);
                    post.recordEvent("cache_time", System.currentTimeMillis() - start);
                    cache_hits.set(localSearchResult.timeline.getHits());
                    tl.putAll(localSearchResult.timeline);
                }
            };
            if (localThread != null)
                localThread.start();

            // start a backend search, but only if backend_push == true or result from scraper is too bad
            boolean start_backend_thread = false;
            if (backend_push)
                start_backend_thread = true;
            else {
                // wait now for termination of scraper thread and local search
                // to evaluate how many results are available
                if (scraperThread != null)
                    try {
                        scraperThread.join(Math.max(10000, timeout - System.currentTimeMillis() + start));
                    } catch (InterruptedException e) {
                    }
                if (localThread != null)
                    try {
                        localThread.join(Math.max(100, timeout - System.currentTimeMillis() + start));
                    } catch (InterruptedException e) {
                    }
                localThread = null;
                scraperThread = null;
                if (tl.size() < count)
                    start_backend_thread = true;
            }
            Thread backendThread = tokens.original.length() == 0 || !start_backend_thread ? null
                    : new Thread() {
                        public void run() {
                            Timeline backendTl = DAO.searchBackend(tokens.original, order, count,
                                    timezoneOffsetf, "cache", timeout);
                            if (backendTl != null) {
                                tl.putAll(QueryEntry.applyConstraint(backendTl, tokens, true));
                                count_backend.set(tl.size());
                                // TODO: read and aggregate aggregations from backend as well
                            }
                            post.recordEvent("backend_time", System.currentTimeMillis() - start);
                        }
                    };
            if (backendThread != null)
                backendThread.start();

            // wait for termination of all threads
            if (scraperThread != null)
                try {
                    scraperThread.join(Math.max(10000, timeout - System.currentTimeMillis() + start));
                } catch (InterruptedException e) {
                }
            if (localThread != null)
                try {
                    localThread.join(Math.max(100, timeout - System.currentTimeMillis() + start));
                } catch (InterruptedException e) {
                }
            if (backendThread != null)
                try {
                    backendThread.join(Math.max(100, timeout - System.currentTimeMillis() + start));
                } catch (InterruptedException e) {
                }

        } else if ("twitter".equals(source) && tokens.raw.length() > 0) {
            final long start = System.currentTimeMillis();
            final String scraper_query = tokens.translate4scraper();
            DAO.log(request.getServletPath() + " scraping with query: " + scraper_query);
            Timeline twitterTl = DAO.scrapeTwitter(post, scraper_query, order, timezoneOffset, true, timeout,
                    true);
            count_twitter_new.set(twitterTl.size());
            tl.putAll(QueryEntry.applyConstraint(twitterTl, tokens, false)); // pre-localized results are not filtered with location constraint any more 
            tl.setScraperInfo(twitterTl.getScraperInfo());
            post.recordEvent("twitterscraper_time", System.currentTimeMillis() - start);
            // in this case we use all tweets, not only the latest one because it may happen that there are no new and that is not what the user expects

        } else if ("cache".equals(source)) {
            final long start = System.currentTimeMillis();
            DAO.SearchLocalMessages localSearchResult = new DAO.SearchLocalMessages(query, order,
                    timezoneOffset, count, limit, fields);
            cache_hits.set(localSearchResult.timeline.getHits());
            tl.putAll(localSearchResult.timeline);
            aggregations = localSearchResult.aggregations;
            post.recordEvent("cache_time", System.currentTimeMillis() - start);

        } else if ("backend".equals(source) && query.length() > 0) {
            final long start = System.currentTimeMillis();
            Timeline backendTl = DAO.searchBackend(query, order, count, timezoneOffset, "cache", timeout);
            if (backendTl != null) {
                tl.putAll(QueryEntry.applyConstraint(backendTl, tokens, true));
                tl.setScraperInfo(backendTl.getScraperInfo());
                // TODO: read and aggregate aggregations from backend as well
                count_backend.set(tl.size());
            }
            post.recordEvent("backend_time", System.currentTimeMillis() - start);

        }

        final long start = System.currentTimeMillis();
        // check the latest user_ids
        DAO.announceNewUserId(tl);

        // reduce the list to the wanted number of results if we have more
        tl.reduceToMaxsize(count);

        if (post.isDoS_servicereduction() && !RemoteAccess.isSleepingForClient(post.getClientHost())) {
            RemoteAccess.sleep(post.getClientHost(), 2000);
        }

        // create json or xml according to path extension
        int shortlink_iflinkexceedslength = (int) DAO.getConfig("shortlink.iflinkexceedslength", 500L);
        String shortlink_urlstub = DAO.getConfig("shortlink.urlstub", "http://localhost:9000");
        if (jsonExt) {
            post.setResponse(response, jsonp ? "application/javascript" : "application/json");
            // generate json
            Map<String, Object> m = new LinkedHashMap<String, Object>();
            Map<String, Object> metadata = new LinkedHashMap<String, Object>();
            if (!minified) {
                m.put("readme_0",
                        "THIS JSON IS THE RESULT OF YOUR SEARCH QUERY - THERE IS NO WEB PAGE WHICH SHOWS THE RESULT!");
                m.put("readme_1",
                        "loklak.org is the framework for a message search system, not the portal, read: http://loklak.org/about.html#notasearchportal");
                m.put("readme_2",
                        "This is supposed to be the back-end of a search portal. For the api, see http://loklak.org/api.html");
                m.put("readme_3",
                        "Parameters q=(query), source=(cache|backend|twitter|all), callback=p for jsonp, maximumRecords=(message count), minified=(true|false)");
            }
            metadata.put("itemsPerPage", Integer.toString(count));
            metadata.put("count", Integer.toString(tl.size()));
            metadata.put("count_twitter_all", count_twitter_all.get());
            metadata.put("count_twitter_new", count_twitter_new.get());
            metadata.put("count_backend", count_backend.get());
            metadata.put("count_cache", cache_hits.get());
            metadata.put("hits", Math.max(cache_hits.get(), tl.size()));
            if (order == Timeline.Order.CREATED_AT)
                metadata.put("period", tl.period());
            metadata.put("query", query);
            metadata.put("client", post.getClientHost());
            metadata.put("time", System.currentTimeMillis() - post.getAccessTime());
            metadata.put("servicereduction", post.isDoS_servicereduction() ? "true" : "false");
            if (tl.getScraperInfo().length() > 0)
                metadata.put("scraperInfo", tl.getScraperInfo());
            m.put("search_metadata", metadata);
            List<Object> statuses = new ArrayList<>();
            try {
                for (MessageEntry t : tl) {
                    UserEntry u = tl.getUser(t);
                    if (DAO.getConfig("flag.fixunshorten", false))
                        t.setText(TwitterScraper
                                .unshorten(t.getText(shortlink_iflinkexceedslength, shortlink_urlstub)));
                    statuses.add(t.toMap(u, true, shortlink_iflinkexceedslength, shortlink_urlstub));
                }
            } catch (ConcurrentModificationException e) {
                // late incoming messages from concurrent peer retrieval may cause this
                // we silently do nothing here and return what we listed so far
            }
            m.put("statuses", statuses);

            // aggregations
            Map<String, Object> agg = new LinkedHashMap<String, Object>();
            if (aggregations != null) {
                for (Map.Entry<String, List<Map.Entry<String, Long>>> aggregation : aggregations.entrySet()) {
                    Map<String, Object> facet = new LinkedHashMap<>();
                    for (Map.Entry<String, Long> a : aggregation.getValue()) {
                        if (a.getValue().equals(query))
                            continue; // we omit obvious terms that cannot be used for faceting, like search for "#abc" -> most hashtag is "#abc"
                        facet.put(a.getKey(), a.getValue());
                    }
                    agg.put(aggregation.getKey(), facet);
                }
            }
            m.put("aggregations", agg);

            // write json
            response.setCharacterEncoding("UTF-8");
            PrintWriter sos = response.getWriter();
            if (jsonp)
                sos.print(callback + "(");
            sos.print(minified ? new ObjectMapper().writer().writeValueAsString(m)
                    : new ObjectMapper().writerWithDefaultPrettyPrinter().writeValueAsString(m));
            if (jsonp)
                sos.println(");");
            sos.println();
        } else if (rssExt) {
            response.setCharacterEncoding("UTF-8");
            post.setResponse(response, "application/rss+xml;charset=utf-8");
            // generate xml
            RSSMessage channel = new RSSMessage();
            channel.setPubDate(new Date());
            channel.setTitle("RSS feed for Twitter search for " + query);
            channel.setDescription("");
            channel.setLink("");
            RSSFeed feed = new RSSFeed(tl.size());
            feed.setChannel(channel);
            try {
                for (MessageEntry t : tl) {
                    UserEntry u = tl.getUser(t);
                    RSSMessage m = new RSSMessage();
                    m.setLink(t.getStatusIdUrl().toExternalForm());
                    m.setAuthor(u.getName() + " @" + u.getScreenName());
                    m.setTitle(u.getName() + " @" + u.getScreenName());
                    m.setDescription(t.getText(shortlink_iflinkexceedslength, shortlink_urlstub));
                    m.setPubDate(t.getCreatedAt());
                    m.setGuid(t.getIdStr());
                    feed.addMessage(m);
                }
            } catch (ConcurrentModificationException e) {
                // late incoming messages from concurrent peer retrieval may cause this
                // we silently do nothing here and return what we listed so far
            }
            String rss = feed.toString();
            //System.out.println("feed has " + feed.size() + " entries");

            // write xml
            response.getOutputStream().write(UTF8.getBytes(rss));
        } else if (txtExt) {
            post.setResponse(response, "text/plain");
            final StringBuilder buffer = new StringBuilder(1000);
            try {
                for (MessageEntry t : tl) {
                    UserEntry u = tl.getUser(t);
                    buffer.append(t.getCreatedAt()).append(" ").append(u.getScreenName()).append(": ")
                            .append(t.getText(shortlink_iflinkexceedslength, shortlink_urlstub)).append('\n');
                }
            } catch (ConcurrentModificationException e) {
                // late incoming messages from concurrent peer retrieval may cause this
                // we silently do nothing here and return what we listed so far
            }
            response.getOutputStream().write(UTF8.getBytes(buffer.toString()));
        }
        post.recordEvent("result_count", tl.size());
        post.recordEvent("postprocessing_time", System.currentTimeMillis() - start);
        Map<String, Object> hits = new LinkedHashMap<>();
        hits.put("count_twitter_all", count_twitter_all.get());
        hits.put("count_twitter_new", count_twitter_new.get());
        hits.put("count_backend", count_backend.get());
        hits.put("cache_hits", cache_hits.get());
        post.recordEvent("hits", hits);
        DAO.log(request.getServletPath() + "?" + request.getQueryString() + " -> " + tl.size()
                + " records returned, " + count_twitter_new.get() + " new");
        post.finalize();
    } catch (Throwable e) {
        Log.getLog().warn(e.getMessage(), e);
        //e.printStackTrace();
    }
}