Example usage for org.apache.commons.collections IteratorUtils toList

List of usage examples for org.apache.commons.collections IteratorUtils toList

Introduction

In this page you can find the example usage for org.apache.commons.collections IteratorUtils toList.

Prototype

public static List toList(Iterator iterator) 

Source Link

Document

Gets a list based on an iterator.

Usage

From source file:com.uber.hoodie.common.table.log.avro.AvroLogAppenderTest.java

@Test
public void testAppendAndReadOnCorruptedLog() throws IOException, URISyntaxException, InterruptedException {
    HoodieLogAppendConfig logConfig = HoodieLogAppendConfig.newBuilder().onPartitionPath(partitionPath)
            .withLogFileExtension(HoodieLogFile.DELTA_EXTENSION).withFileId("test-fileid1")
            .withBaseCommitTime("100").withSchema(SchemaTestUtil.getSimpleSchema()).withFs(fs).build();
    RollingAvroLogAppender logAppender = new RollingAvroLogAppender(logConfig);
    long size1 = logAppender.getCurrentSize();
    logAppender.append(SchemaTestUtil.generateTestRecords(0, 100).iterator());
    logAppender.close();/*from   w  w w  .  jav a  2s  . c om*/

    // Append some arbit byte[] to thee end of the log (mimics a partially written commit)
    assertTrue(fs.exists(logConfig.getLogFile().getPath()));
    fs = FileSystem.get(fs.getConf());
    FSDataOutputStream outputStream = fs.append(logConfig.getLogFile().getPath(), logConfig.getBufferSize());
    outputStream.write("something-random".getBytes());
    outputStream.flush();
    outputStream.close();

    logAppender = new RollingAvroLogAppender(logConfig);
    long size2 = logAppender.getCurrentSize();
    logAppender.append(SchemaTestUtil.generateTestRecords(100, 100).iterator());
    logAppender.close();

    AvroLogReader logReader = new AvroLogReader(logConfig.getLogFile(), fs, logConfig.getSchema());

    // Try to grab the middle block here
    List<GenericRecord> secondBatch = IteratorUtils.toList(logReader.readBlock(size1));
    assertEquals("Stream collect should return 100 records", 100, secondBatch.size());

    // Try to grab the last block here
    List<GenericRecord> lastBatch = IteratorUtils.toList(logReader.readBlock(size2));
    assertEquals("Stream collect should return 100 records", 100, lastBatch.size());
}

From source file:com.adobe.cq.wcm.core.components.extension.contentfragment.internal.models.v1.ContentFragmentImpl.java

@Nullable
@Override/* w w w.j ava2  s. c o  m*/
public List<Resource> getAssociatedContent() {
    if (fragment != null && associatedContentList == null) {
        associatedContentList = IteratorUtils.toList(fragment.getAssociatedContent());
    }
    return associatedContentList;
}

From source file:eu.europa.ec.fisheries.uvms.reporting.service.util.ReportDeserializer.java

private void addGroupCriteria(JsonNode groupBy, List<FilterDTO> filterDTOList, Long reportId, JsonParser jp) {

    if (groupBy != null) {
        List list = IteratorUtils.toList(groupBy.elements());
        for (int i = 0; i < list.size(); i++) {
            String code = ((JsonNode) list.get(i)).get("code").asText();
            JsonNode valueNode = ((JsonNode) list.get(i)).get("values");
            List<GroupCriteriaType> groupCriteriaList = null;
            if (valueNode != null) {
                List<String> strings = ((ObjectMapper) jp.getCodec()).convertValue(valueNode, List.class);
                groupCriteriaList = GroupCriteriaFilterMapper.INSTANCE
                        .mapGroupCriteriaTypeListToStringList(strings);
            }// ww  w  . j  a va  2 s.c o m
            filterDTOList.add(new CriteriaFilterDTO(code, groupCriteriaList, i + 1, reportId));
        }
    }
}

From source file:com.orientechnologies.orient.graph.blueprints.GraphTest.java

@Test
public void testCompositeKey() {

    OrientGraphNoTx graph = new OrientGraphNoTx("memory:testComposite");

    try {/*from   w  w  w.  j av a2 s .c om*/
        graph.createVertexType("Account");

        graph.command(new OCommandSQL("create property account.description STRING")).execute();
        graph.command(new OCommandSQL("create property account.namespace STRING")).execute();
        graph.command(new OCommandSQL("create property account.name STRING")).execute();
        graph.command(new OCommandSQL("create index account.composite on account (name, namespace) unique"))
                .execute();

        graph.addVertex("class:account",
                new Object[] { "name", "foo", "namespace", "bar", "description", "foobar" });
        graph.addVertex("class:account",
                new Object[] { "name", "foo", "namespace", "baz", "description", "foobaz" });

        Iterable<Vertex> vertices = graph
                .command(new OCommandSQL("select from index:account.composite where key = [ 'foo', 'baz' ]"))
                .execute();

        List list = IteratorUtils.toList(vertices.iterator());

        Assert.assertEquals(1, list.size());

        vertices = graph.getVertices("account.composite", new Object[] { "foo", "baz" });

        list = IteratorUtils.toList(vertices.iterator());

        Assert.assertEquals(1, list.size());

        vertices = graph.getVertices("account.composite", new OCompositeKey("foo", "baz"));

        list = IteratorUtils.toList(vertices.iterator());

        Assert.assertEquals(1, list.size());

        graph.getVertices("account.composite", new OCompositeKey("foo", "baz"));

    } finally {
        graph.drop();
    }
}

From source file:com.uber.hoodie.common.table.log.avro.AvroLogAppenderTest.java

@Test
public void testCompositeAvroLogReader() throws IOException, URISyntaxException, InterruptedException {
    // Set a small threshold so that every block is a new version
    HoodieLogAppendConfig logConfig = HoodieLogAppendConfig.newBuilder().onPartitionPath(partitionPath)
            .withLogFileExtension(HoodieLogFile.DELTA_EXTENSION).withFileId("test-fileid1")
            .withBaseCommitTime("100").withSchema(SchemaTestUtil.getSimpleSchema()).withSizeThreshold(500)
            .withFs(fs).build();//from  w w w .j  a va2 s . c  o m

    RollingAvroLogAppender logAppender = new RollingAvroLogAppender(logConfig);
    long size1 = logAppender.getCurrentSize();
    List<IndexedRecord> input1 = SchemaTestUtil.generateTestRecords(0, 100);
    logAppender.append(input1.iterator());
    logAppender.close();

    // Need to rebuild config to set the latest version as path
    logConfig = HoodieLogAppendConfig.newBuilder().onPartitionPath(partitionPath)
            .withLogFileExtension(HoodieLogFile.DELTA_EXTENSION).withFileId("test-fileid1")
            .withBaseCommitTime("100").withSchema(SchemaTestUtil.getSimpleSchema()).withSizeThreshold(500)
            .withFs(fs).build();
    logAppender = new RollingAvroLogAppender(logConfig);
    long size2 = logAppender.getCurrentSize();
    List<IndexedRecord> input2 = SchemaTestUtil.generateTestRecords(100, 100);
    logAppender.append(input2.iterator());
    logAppender.close();

    logConfig = HoodieLogAppendConfig.newBuilder().onPartitionPath(partitionPath)
            .withLogFileExtension(HoodieLogFile.DELTA_EXTENSION).withFileId("test-fileid1")
            .withBaseCommitTime("100").withSchema(SchemaTestUtil.getSimpleSchema()).withSizeThreshold(500)
            .withFs(fs).build();
    List<HoodieLogFile> allLogFiles = FSUtils
            .getAllLogFiles(fs, partitionPath, logConfig.getLogFile().getFileId(),
                    HoodieLogFile.DELTA_EXTENSION, logConfig.getLogFile().getBaseCommitTime())
            .collect(Collectors.toList());
    assertEquals("", 2, allLogFiles.size());

    SortedMap<Integer, List<Long>> offsets = Maps.newTreeMap();
    offsets.put(1, Lists.newArrayList(size1));
    offsets.put(2, Lists.newArrayList(size2));
    CompositeAvroLogReader reader = new CompositeAvroLogReader(partitionPath,
            logConfig.getLogFile().getFileId(), logConfig.getLogFile().getBaseCommitTime(), fs,
            logConfig.getSchema(), HoodieLogFile.DELTA_EXTENSION);
    Iterator<GenericRecord> results = reader.readBlocks(offsets);
    List<GenericRecord> totalBatch = IteratorUtils.toList(results);
    assertEquals("Stream collect should return all 200 records", 200, totalBatch.size());
    input1.addAll(input2);
    assertEquals("CompositeAvroLogReader should return 200 records from 2 versions", input1, totalBatch);
}

From source file:edu.cornell.mannlib.vitro.webapp.controller.individual.IndividualRdfAssemblerTest.java

@SuppressWarnings("unchecked")
private List<Statement> statementList(OntModel m) {
    return IteratorUtils.toList(m.listStatements());
}

From source file:com.orientechnologies.orient.graph.blueprints.GraphTest.java

@Test(expected = IllegalArgumentException.class)
public void testCompositeExceptionKey() {

    OrientGraphNoTx graph = new OrientGraphNoTx("memory:testComposite");

    try {/* w  w w .  ja v  a  2s .  com*/
        graph.createVertexType("Account");

        graph.command(new OCommandSQL("create property account.description STRING")).execute();
        graph.command(new OCommandSQL("create property account.namespace STRING")).execute();
        graph.command(new OCommandSQL("create property account.name STRING")).execute();
        graph.command(new OCommandSQL("create index account.composite on account (name, namespace) unique"))
                .execute();

        graph.addVertex("class:account",
                new Object[] { "name", "foo", "namespace", "bar", "description", "foobar" });
        graph.addVertex("class:account",
                new Object[] { "name", "foo", "namespace", "baz", "description", "foobaz" });

        Iterable<Vertex> vertices = graph
                .command(new OCommandSQL("select from index:account.composite where key = [ 'foo', 'baz' ]"))
                .execute();

        List list = IteratorUtils.toList(vertices.iterator());

        Assert.assertEquals(1, list.size());

        graph.getVertices("account.composite", new Object[] { "foo", "baz", "bar" });

    } finally {
        graph.drop();
    }
}

From source file:com.xpn.xwiki.internal.plugin.rightsmanager.ReferenceUserIteratorTest.java

@Test
public void getMembersWhenGroupIsLooping() throws Exception {
    setUpBaseMocks();/*  w  w w . j ava2 s.  c o  m*/

    DocumentReference groupReference = new DocumentReference("groupwiki", "XWiki", "grouppage");
    XWikiDocument document = mock(XWikiDocument.class);
    when(document.isNew()).thenReturn(false);
    when(document.getDocumentReference()).thenReturn(groupReference);
    when(xwiki.getDocument(groupReference, this.xwikiContext)).thenReturn(document);

    List<BaseObject> memberObjects = new ArrayList<>();
    BaseObject bo = mock(BaseObject.class);
    when(bo.getStringValue("member")).thenReturn("XWiki.othergroup");
    memberObjects.add(bo);
    bo = mock(BaseObject.class);
    when(bo.getStringValue("member")).thenReturn("XWiki.userpage");
    memberObjects.add(bo);
    when(document.getXObjects(new EntityReference("XWikiGroups", EntityType.DOCUMENT,
            new EntityReference("XWiki", EntityType.SPACE)))).thenReturn(memberObjects);

    DocumentReference userpageReference = new DocumentReference("groupwiki", "XWiki", "userpage");
    setUpUserPageMocks(userpageReference);
    when(this.resolver.resolve("XWiki.userpage", groupReference)).thenReturn(userpageReference);

    DocumentReference otherGroupReference = new DocumentReference("groupwiki", "XWiki", "othergroup");
    document = mock(XWikiDocument.class);
    when(document.isNew()).thenReturn(false);
    when(document.getDocumentReference()).thenReturn(otherGroupReference);
    when(xwiki.getDocument(otherGroupReference, this.xwikiContext)).thenReturn(document);

    memberObjects = new ArrayList<>();
    bo = mock(BaseObject.class);
    when(bo.getStringValue("member")).thenReturn("XWiki.grouppage");
    memberObjects.add(bo);
    bo = mock(BaseObject.class);
    when(bo.getStringValue("member")).thenReturn("XWiki.anotheruser");
    memberObjects.add(bo);
    when(document.getXObjects(new EntityReference("XWikiGroups", EntityType.DOCUMENT,
            new EntityReference("XWiki", EntityType.SPACE)))).thenReturn(memberObjects);

    DocumentReference anotheruserReference = new DocumentReference("groupwiki", "XWiki", "anotheruser");
    setUpUserPageMocks(anotheruserReference);
    when(this.resolver.resolve("XWiki.anotheruser", otherGroupReference)).thenReturn(anotheruserReference);

    when(this.resolver.resolve("XWiki.grouppage", otherGroupReference)).thenReturn(groupReference);
    when(this.resolver.resolve("XWiki.othergroup", groupReference)).thenReturn(otherGroupReference);

    Iterator<DocumentReference> iterator = new ReferenceUserIterator(groupReference, this.resolver,
            this.execution);

    assertThat((List<DocumentReference>) IteratorUtils.toList(iterator),
            containsInAnyOrder(userpageReference, anotheruserReference));
}

From source file:com.splicemachine.stream.StreamableRDDTest.java

@Test
public void testConcurrentQueries() throws StandardException, ExecutionException, InterruptedException {
    final StreamListener<ExecRow> sl1 = new StreamListener<>();
    final StreamListener<ExecRow> sl2 = new StreamListener<>();
    final StreamListener<ExecRow> sl3 = new StreamListener<>();
    HostAndPort hostAndPort = server.getHostAndPort();
    server.register(sl1);/*from w  w w. ja  v  a  2s .c  o  m*/
    server.register(sl2);
    server.register(sl3);

    List<Tuple2<ExecRow, ExecRow>> manyRows = new ArrayList<>();
    for (int i = 0; i < 100000; ++i) {
        manyRows.add(new Tuple2<ExecRow, ExecRow>(getExecRow(i, 1), getExecRow(i, 2)));
    }

    JavaPairRDD<ExecRow, ExecRow> rdd = SpliceSpark.getContext().parallelizePairs(manyRows, 12);
    final StreamableRDD srdd1 = new StreamableRDD(rdd.values(), sl1.getUuid(), hostAndPort.getHostText(),
            hostAndPort.getPort());
    final StreamableRDD srdd2 = new StreamableRDD(rdd.values().map(new Function<ExecRow, ExecRow>() {
        @Override
        public ExecRow call(ExecRow o) throws Exception {
            o.getColumn(1).setValue(0);
            return o;
        }
    }), sl2.getUuid(), hostAndPort.getHostText(), hostAndPort.getPort());
    final StreamableRDD srdd3 = new StreamableRDD(rdd.values(), sl3.getUuid(), hostAndPort.getHostText(),
            hostAndPort.getPort());
    for (final StreamableRDD srdd : Arrays.asList(srdd1, srdd2, srdd3)) {
        new Thread() {
            @Override
            public void run() {
                try {
                    srdd.submit();
                } catch (Exception e) {
                    throw new RuntimeException(e);
                }

            }
        }.start();
    }
    // We collect them asynchronously into memory so we are able to iterate over them at the same time. Otherwise
    // tasks for the third RDD might be blocked by tasks in other RDDs, and we are not consuming elements from the
    // other iterators so they can become unblocked.
    ExecutorService executor = Executors.newFixedThreadPool(3);
    Future<List<ExecRow>> future1 = executor.submit(new Callable<List<ExecRow>>() {
        @Override
        public List<ExecRow> call() throws Exception {
            return IteratorUtils.toList(sl1.getIterator());
        }
    });
    Future<List<ExecRow>> future2 = executor.submit(new Callable<List<ExecRow>>() {
        @Override
        public List<ExecRow> call() throws Exception {
            return IteratorUtils.toList(sl2.getIterator());
        }
    });
    Future<List<ExecRow>> future3 = executor.submit(new Callable<List<ExecRow>>() {
        @Override
        public List<ExecRow> call() throws Exception {
            return IteratorUtils.toList(sl3.getIterator());
        }
    });
    Iterator<ExecRow> it1 = future1.get().iterator();
    Iterator<ExecRow> it2 = future2.get().iterator();
    Iterator<ExecRow> it3 = future3.get().iterator();
    int count = 0;
    while (it1.hasNext()) {
        ExecRow r1 = it1.next();
        ExecRow r2 = it2.next();
        ExecRow r3 = it3.next();
        count++;
        assertNotNull(r1);
        assertNotNull(r2);
        assertNotNull(r3);
        assertEquals(0, r2.getColumn(1).getInt());
        assertEquals(r1.getColumn(1), r3.getColumn(1));
        assertEquals(r1.getColumn(2), r2.getColumn(2));
    }
    assertEquals(100000, count);
}

From source file:com.discovery.darchrow.tools.jsonlib.JsonUtil.java

/**
 * Bean?Map?????Json.//from www .  ja  va 2s  .co  m
 *
 * @param obj
 *            the obj
 * @param jsonConfig
 *            the json config
 * @return the jSON
 * @see net.sf.json.JSONArray#fromObject(Object, JsonConfig)
 * @see net.sf.json.JSONObject#fromObject(Object, JsonConfig)
 * @see net.sf.json.util.JSONUtils#isArray(Object)
 * @see java.lang.Class#isEnum()
 * @see net.sf.json.JsonConfig#registerJsonValueProcessor(Class, JsonValueProcessor)
 * @see org.apache.commons.collections.IteratorUtils#toList(Iterator)
 * @see org.apache.commons.collections.IteratorUtils#toList(Iterator, int)
 */
public static JSON toJSON(Object obj, JsonConfig jsonConfig) {
    if (null == jsonConfig) {
        jsonConfig = DEFAULT_JSON_CONFIG;
    }

    // obj instanceof Collection || obj instanceof Object[]
    if (JSONUtils.isArray(obj) || //
            obj instanceof Enum || // obj.getClass().isEnum() null// object'isanEnum.UseJSONArrayinstead
            obj instanceof Iterator) {

        if (obj instanceof Iterator) {
            Collection<?> list = IteratorUtils.toList((Iterator<?>) obj);
            obj = list;
        }
        //Accepts JSON formatted strings, arrays, Collections and Enums.
        return JSONArray.fromObject(obj, jsonConfig);
    }
    //Accepts JSON formatted strings, Maps, DynaBeans and JavaBeans.
    return JSONObject.fromObject(obj, jsonConfig);
}