Example usage for org.apache.commons.lang ArrayUtils toPrimitive

List of usage examples for org.apache.commons.lang ArrayUtils toPrimitive

Introduction

In this page you can find the example usage for org.apache.commons.lang ArrayUtils toPrimitive.

Prototype

public static boolean[] toPrimitive(Boolean[] array) 

Source Link

Document

Converts an array of object Booleans to primitives.

Usage

From source file:org.acoustid.data.sql.SQLTrackData.java

@Override
public void loadMBIDs(List<Track> tracks) throws SQLException {
    if (tracks.isEmpty()) {
        return;/*www  .j  a  v  a  2 s  . c o m*/
    }
    Map<Integer, Track> trackMap = new HashMap<Integer, Track>();
    for (Track track : tracks) {
        trackMap.put(track.getId(), track);
    }
    int[] trackIds = ArrayUtils.toPrimitive(trackMap.keySet().toArray(new Integer[0]));
    String sql = "SELECT track_id, mbid FROM track_mbid WHERE track_id IN "
            + DataUtils.encodeIntArray(trackIds, '(', ')') + " ORDER BY track_id, mbid";
    PreparedStatement statement = getConnection().prepareStatement(sql);
    ResultSet rs = statement.executeQuery();
    while (rs.next()) {
        int trackId = rs.getInt("track_id");
        Track track = trackMap.get(trackId);
        if (track.getMBIDs() == null) {
            track.setMBIDs(new ArrayList<String>(1));
        }
        track.getMBIDs().add(rs.getString("mbid"));
    }
    statement.close();
}

From source file:org.alfresco.repo.virtual.page.PageCollatorTest.java

private int[] createMergedPage(int skip, int count, List<Integer> s1, Integer[] s2) {
    return createMergedPage(skip, count, s1, ArrayUtils.toPrimitive(s2));
}

From source file:org.alfresco.repo.virtual.page.PageCollatorTest.java

private int[] createMergedPage(int skip, int count, List<Integer> s1, int[] s2) {
    int[] s1Primitive = (s1 == null || s1.isEmpty()) ? new int[] {}
            : ArrayUtils.toPrimitive((Integer[]) s1.toArray());

    return createMergedPage(skip, count, s1Primitive, s2);
}

From source file:org.alfresco.repo.virtual.page.PageCollatorTest.java

private void assertEqualPages(String message, int[] expected, List<Integer> page) {
    assertEqualPages(message, expected, ArrayUtils.toPrimitive((Integer[]) page.toArray(new Integer[] {})));
}

From source file:org.apache.carbondata.core.datastore.chunk.reader.dimension.v3.CompressedDimensionChunkFileBasedReaderV3.java

/**
 * Below method will be used to read the dimension column data form carbon data file
 * Steps for reading//from  w  w w.ja v a 2 s .  c o m
 * 1. Get the length of the data to be read
 * 2. Allocate the direct buffer
 * 3. read the data from file
 * 4. Get the data chunk object from data read
 * 5. Create the raw chunk object and fill the details
 *
 * @param fileReader          reader for reading the column from carbon data file
 * @param blockletColumnIndex blocklet index of the column in carbon data file
 * @return dimension raw chunk
 */
public DimensionRawColumnChunk readRawDimensionChunk(FileHolder fileReader, int blockletColumnIndex)
        throws IOException {
    // get the current dimension offset
    long currentDimensionOffset = dimensionChunksOffset.get(blockletColumnIndex);
    int length = 0;
    // to calculate the length of the data to be read
    // column other than last column we can subtract the offset of current column with
    // next column and get the total length.
    // but for last column we need to use lastDimensionOffset which is the end position
    // of the last dimension, we can subtract current dimension offset from lastDimesionOffset
    if (dimensionChunksOffset.size() - 1 == blockletColumnIndex) {
        length = (int) (lastDimensionOffsets - currentDimensionOffset);
    } else {
        length = (int) (dimensionChunksOffset.get(blockletColumnIndex + 1) - currentDimensionOffset);
    }
    ByteBuffer buffer = null;
    // read the data from carbon data file
    synchronized (fileReader) {
        buffer = fileReader.readByteBuffer(filePath, currentDimensionOffset, length);
    }
    // get the data chunk which will have all the details about the data pages
    DataChunk3 dataChunk = CarbonUtil.readDataChunk3(buffer, 0, length);
    // creating a raw chunks instance and filling all the details
    DimensionRawColumnChunk rawColumnChunk = new DimensionRawColumnChunk(blockletColumnIndex, buffer, 0, length,
            this);
    int numberOfPages = dataChunk.getPage_length().size();
    byte[][] maxValueOfEachPage = new byte[numberOfPages][];
    byte[][] minValueOfEachPage = new byte[numberOfPages][];
    int[] eachPageLength = new int[numberOfPages];
    for (int i = 0; i < minValueOfEachPage.length; i++) {
        maxValueOfEachPage[i] = dataChunk.getData_chunk_list().get(i).getMin_max().getMax_values().get(0)
                .array();
        minValueOfEachPage[i] = dataChunk.getData_chunk_list().get(i).getMin_max().getMin_values().get(0)
                .array();
        eachPageLength[i] = dataChunk.getData_chunk_list().get(i).getNumberOfRowsInpage();
    }
    rawColumnChunk.setDataChunkV3(dataChunk);
    rawColumnChunk.setFileHolder(fileReader);
    rawColumnChunk.setPagesCount(dataChunk.getPage_length().size());
    rawColumnChunk.setMaxValues(maxValueOfEachPage);
    rawColumnChunk.setMinValues(minValueOfEachPage);
    rawColumnChunk.setRowCount(eachPageLength);
    rawColumnChunk.setLengths(
            ArrayUtils.toPrimitive(dataChunk.page_length.toArray(new Integer[dataChunk.page_length.size()])));
    rawColumnChunk.setOffsets(
            ArrayUtils.toPrimitive(dataChunk.page_offset.toArray(new Integer[dataChunk.page_offset.size()])));
    return rawColumnChunk;
}

From source file:org.apache.carbondata.core.datastore.chunk.reader.dimension.v3.CompressedDimensionChunkFileBasedReaderV3.java

/**
 * Below method will be used to read the multiple dimension column data in group
 * and divide into dimension raw chunk object
 * Steps for reading//from w  w  w .  j a  v a 2 s .  c o m
 * 1. Get the length of the data to be read
 * 2. Allocate the direct buffer
 * 3. read the data from file
 * 4. Get the data chunk object from file for each column
 * 5. Create the raw chunk object and fill the details for each column
 * 6. increment the offset of the data
 *
 * @param fileReader
 *        reader which will be used to read the dimension columns data from file
 * @param startBlockletColumnIndex
 *        blocklet index of the first dimension column
 * @param endBlockletColumnIndex
 *        blocklet index of the last dimension column
 * @ DimensionRawColumnChunk array
 */
protected DimensionRawColumnChunk[] readRawDimensionChunksInGroup(FileHolder fileReader,
        int startBlockletColumnIndex, int endBlockletColumnIndex) throws IOException {
    // to calculate the length of the data to be read
    // column we can subtract the offset of start column offset with
    // end column+1 offset and get the total length.
    long currentDimensionOffset = dimensionChunksOffset.get(startBlockletColumnIndex);
    ByteBuffer buffer = null;
    // read the data from carbon data file
    synchronized (fileReader) {
        buffer = fileReader.readByteBuffer(filePath, currentDimensionOffset,
                (int) (dimensionChunksOffset.get(endBlockletColumnIndex + 1) - currentDimensionOffset));
    }
    // create raw chunk for each dimension column
    DimensionRawColumnChunk[] dimensionDataChunks = new DimensionRawColumnChunk[endBlockletColumnIndex
            - startBlockletColumnIndex + 1];
    int index = 0;
    int runningLength = 0;
    for (int i = startBlockletColumnIndex; i <= endBlockletColumnIndex; i++) {
        int currentLength = (int) (dimensionChunksOffset.get(i + 1) - dimensionChunksOffset.get(i));
        dimensionDataChunks[index] = new DimensionRawColumnChunk(i, buffer, runningLength, currentLength, this);
        DataChunk3 dataChunk = CarbonUtil.readDataChunk3(buffer, runningLength, dimensionChunksLength.get(i));
        int numberOfPages = dataChunk.getPage_length().size();
        byte[][] maxValueOfEachPage = new byte[numberOfPages][];
        byte[][] minValueOfEachPage = new byte[numberOfPages][];
        int[] eachPageLength = new int[numberOfPages];
        for (int j = 0; j < minValueOfEachPage.length; j++) {
            maxValueOfEachPage[j] = dataChunk.getData_chunk_list().get(j).getMin_max().getMax_values().get(0)
                    .array();
            minValueOfEachPage[j] = dataChunk.getData_chunk_list().get(j).getMin_max().getMin_values().get(0)
                    .array();
            eachPageLength[j] = dataChunk.getData_chunk_list().get(j).getNumberOfRowsInpage();
        }
        dimensionDataChunks[index].setDataChunkV3(dataChunk);
        dimensionDataChunks[index].setFileHolder(fileReader);
        dimensionDataChunks[index].setPagesCount(dataChunk.getPage_length().size());
        dimensionDataChunks[index].setMaxValues(maxValueOfEachPage);
        dimensionDataChunks[index].setMinValues(minValueOfEachPage);
        dimensionDataChunks[index].setRowCount(eachPageLength);
        dimensionDataChunks[index].setLengths(ArrayUtils
                .toPrimitive(dataChunk.page_length.toArray(new Integer[dataChunk.page_length.size()])));
        dimensionDataChunks[index].setOffsets(ArrayUtils
                .toPrimitive(dataChunk.page_offset.toArray(new Integer[dataChunk.page_offset.size()])));
        runningLength += currentLength;
        index++;
    }
    return dimensionDataChunks;
}

From source file:org.apache.carbondata.core.datastore.chunk.reader.measure.v3.CompressedMeasureChunkFileBasedReaderV3.java

/**
 * Below method will be used to read the measure column data form carbon data file
 * 1. Get the length of the data to be read
 * 2. Allocate the direct buffer//www  .j  a  v a 2s.  co m
 * 3. read the data from file
 * 4. Get the data chunk object from data read
 * 5. Create the raw chunk object and fill the details
 *
 * @param fileReader          reader for reading the column from carbon data file
 * @param blockIndex          blocklet index of the column in carbon data file
 * @return measure raw chunk
 */
@Override
public MeasureRawColumnChunk readRawMeasureChunk(FileHolder fileReader, int blockletColumnIndex)
        throws IOException {
    int dataLength = 0;
    // to calculate the length of the data to be read
    // column other than last column we can subtract the offset of current column with
    // next column and get the total length.
    // but for last column we need to use lastDimensionOffset which is the end position
    // of the last dimension, we can subtract current dimension offset from lastDimesionOffset
    if (measureColumnChunkOffsets.size() - 1 == blockletColumnIndex) {
        dataLength = (int) (measureOffsets - measureColumnChunkOffsets.get(blockletColumnIndex));
    } else {
        dataLength = (int) (measureColumnChunkOffsets.get(blockletColumnIndex + 1)
                - measureColumnChunkOffsets.get(blockletColumnIndex));
    }
    ByteBuffer buffer = null;
    // read the data from carbon data file
    synchronized (fileReader) {
        buffer = fileReader.readByteBuffer(filePath, measureColumnChunkOffsets.get(blockletColumnIndex),
                dataLength);
    }
    // get the data chunk which will have all the details about the data pages
    DataChunk3 dataChunk = CarbonUtil.readDataChunk3(buffer, 0,
            measureColumnChunkLength.get(blockletColumnIndex));
    // creating a raw chunks instance and filling all the details
    MeasureRawColumnChunk rawColumnChunk = new MeasureRawColumnChunk(blockletColumnIndex, buffer, 0, dataLength,
            this);
    int numberOfPages = dataChunk.getPage_length().size();
    byte[][] maxValueOfEachPage = new byte[numberOfPages][];
    byte[][] minValueOfEachPage = new byte[numberOfPages][];
    int[] eachPageLength = new int[numberOfPages];
    for (int i = 0; i < minValueOfEachPage.length; i++) {
        maxValueOfEachPage[i] = dataChunk.getData_chunk_list().get(i).getMin_max().getMax_values().get(0)
                .array();
        minValueOfEachPage[i] = dataChunk.getData_chunk_list().get(i).getMin_max().getMin_values().get(0)
                .array();
        eachPageLength[i] = dataChunk.getData_chunk_list().get(i).getNumberOfRowsInpage();
    }
    rawColumnChunk.setDataChunkV3(dataChunk);
    rawColumnChunk.setFileReader(fileReader);
    rawColumnChunk.setPagesCount(dataChunk.getPage_length().size());
    rawColumnChunk.setMaxValues(maxValueOfEachPage);
    rawColumnChunk.setMinValues(minValueOfEachPage);
    rawColumnChunk.setRowCount(eachPageLength);
    rawColumnChunk.setLengths(
            ArrayUtils.toPrimitive(dataChunk.page_length.toArray(new Integer[dataChunk.page_length.size()])));
    rawColumnChunk.setOffsets(
            ArrayUtils.toPrimitive(dataChunk.page_offset.toArray(new Integer[dataChunk.page_offset.size()])));
    return rawColumnChunk;
}

From source file:org.apache.carbondata.core.datastore.chunk.reader.measure.v3.CompressedMeasureChunkFileBasedReaderV3.java

/**
 * Below method will be used to read the multiple measure column data in group
 * and divide into measure raw chunk object
 * Steps for reading/*from  w  w w .j  a  v  a  2  s  . c o m*/
 * 1. Get the length of the data to be read
 * 2. Allocate the direct buffer
 * 3. read the data from file
 * 4. Get the data chunk object from file for each column
 * 5. Create the raw chunk object and fill the details for each column
 * 6. increment the offset of the data
 *
 * @param fileReader
 *        reader which will be used to read the measure columns data from file
 * @param startColumnBlockletIndex
 *        blocklet index of the first measure column
 * @param endColumnBlockletIndex
 *        blocklet index of the last measure column
 * @return MeasureRawColumnChunk array
 */
protected MeasureRawColumnChunk[] readRawMeasureChunksInGroup(FileHolder fileReader,
        int startColumnBlockletIndex, int endColumnBlockletIndex) throws IOException {
    // to calculate the length of the data to be read
    // column we can subtract the offset of start column offset with
    // end column+1 offset and get the total length.
    long currentMeasureOffset = measureColumnChunkOffsets.get(startColumnBlockletIndex);
    ByteBuffer buffer = null;
    // read the data from carbon data file
    synchronized (fileReader) {
        buffer = fileReader.readByteBuffer(filePath, currentMeasureOffset,
                (int) (measureColumnChunkOffsets.get(endColumnBlockletIndex + 1) - currentMeasureOffset));
    }
    // create raw chunk for each measure column
    MeasureRawColumnChunk[] measureDataChunk = new MeasureRawColumnChunk[endColumnBlockletIndex
            - startColumnBlockletIndex + 1];
    int runningLength = 0;
    int index = 0;
    for (int i = startColumnBlockletIndex; i <= endColumnBlockletIndex; i++) {
        int currentLength = (int) (measureColumnChunkOffsets.get(i + 1) - measureColumnChunkOffsets.get(i));
        MeasureRawColumnChunk measureRawColumnChunk = new MeasureRawColumnChunk(i, buffer, runningLength,
                currentLength, this);
        DataChunk3 dataChunk = CarbonUtil.readDataChunk3(buffer, runningLength,
                measureColumnChunkLength.get(i));

        int numberOfPages = dataChunk.getPage_length().size();
        byte[][] maxValueOfEachPage = new byte[numberOfPages][];
        byte[][] minValueOfEachPage = new byte[numberOfPages][];
        int[] eachPageLength = new int[numberOfPages];
        for (int j = 0; j < minValueOfEachPage.length; j++) {
            maxValueOfEachPage[j] = dataChunk.getData_chunk_list().get(j).getMin_max().getMax_values().get(0)
                    .array();
            minValueOfEachPage[j] = dataChunk.getData_chunk_list().get(j).getMin_max().getMin_values().get(0)
                    .array();
            eachPageLength[j] = dataChunk.getData_chunk_list().get(j).getNumberOfRowsInpage();
        }
        measureRawColumnChunk.setDataChunkV3(dataChunk);
        ;
        measureRawColumnChunk.setFileReader(fileReader);
        measureRawColumnChunk.setPagesCount(dataChunk.getPage_length().size());
        measureRawColumnChunk.setMaxValues(maxValueOfEachPage);
        measureRawColumnChunk.setMinValues(minValueOfEachPage);
        measureRawColumnChunk.setRowCount(eachPageLength);
        measureRawColumnChunk.setLengths(ArrayUtils
                .toPrimitive(dataChunk.page_length.toArray(new Integer[dataChunk.page_length.size()])));
        measureRawColumnChunk.setOffsets(ArrayUtils
                .toPrimitive(dataChunk.page_offset.toArray(new Integer[dataChunk.page_offset.size()])));
        measureDataChunk[index] = measureRawColumnChunk;
        runningLength += currentLength;
        index++;
    }
    return measureDataChunk;
}

From source file:org.apache.carbondata.core.reader.CarbonDeleteFilesDataReader.java

/**
 * Returns all deleted records from all specified delta files
 *
 * @param deltaFiles/*from   w w w .j  a  va  2 s  .  c  om*/
 * @return
 * @throws Exception
 */
public int[] getDeleteDataFromAllFiles(List<String> deltaFiles, String blockletId) throws Exception {

    List<Future<DeleteDeltaBlockDetails>> taskSubmitList = new ArrayList<>();
    ExecutorService executorService = Executors.newFixedThreadPool(thread_pool_size);
    for (final String deltaFile : deltaFiles) {
        taskSubmitList.add(executorService.submit(new Callable<DeleteDeltaBlockDetails>() {
            @Override
            public DeleteDeltaBlockDetails call() throws IOException {
                CarbonDeleteDeltaFileReaderImpl deltaFileReader = new CarbonDeleteDeltaFileReaderImpl(deltaFile,
                        FileFactory.getFileType(deltaFile));
                return deltaFileReader.readJson();
            }
        }));
    }
    try {
        executorService.shutdown();
        executorService.awaitTermination(30, TimeUnit.MINUTES);
    } catch (InterruptedException e) {
        LOGGER.error("Error while reading the delete delta files : " + e.getMessage());
    }

    Set<Integer> result = new TreeSet<Integer>();
    for (int i = 0; i < taskSubmitList.size(); i++) {
        try {
            List<DeleteDeltaBlockletDetails> blockletDetails = taskSubmitList.get(i).get().getBlockletDetails();
            result.addAll(blockletDetails
                    .get(blockletDetails.indexOf(new DeleteDeltaBlockletDetails(blockletId))).getDeletedRows());
        } catch (Throwable e) {
            LOGGER.error(e.getMessage());
            throw new Exception(e.getMessage());
        }
    }
    return ArrayUtils.toPrimitive(result.toArray(new Integer[result.size()]));

}

From source file:org.apache.carbondata.core.scan.filter.FilterUtil.java

private static List<Integer> prepareExcludeFilterMembers(Dictionary forwardDictionary,
        List<Integer> includeSurrogates) throws FilterUnsupportedException {
    DictionaryChunksWrapper dictionaryWrapper;
    RoaringBitmap bitMapOfSurrogates = RoaringBitmap
            .bitmapOf(ArrayUtils.toPrimitive(includeSurrogates.toArray(new Integer[includeSurrogates.size()])));
    dictionaryWrapper = forwardDictionary.getDictionaryChunks();
    List<Integer> excludeFilterList = new ArrayList<Integer>(includeSurrogates.size());
    int surrogateCount = 0;
    while (dictionaryWrapper.hasNext()) {
        dictionaryWrapper.next();//from   w w w.j  av  a  2 s.c o m
        ++surrogateCount;
        if (!bitMapOfSurrogates.contains(surrogateCount)) {
            excludeFilterList.add(surrogateCount);
        }
    }
    return excludeFilterList;
}