Example usage for org.apache.commons.lang3 ArrayUtils toPrimitive

List of usage examples for org.apache.commons.lang3 ArrayUtils toPrimitive

Introduction

In this page you can find the example usage for org.apache.commons.lang3 ArrayUtils toPrimitive.

Prototype

public static boolean[] toPrimitive(final Boolean[] array) 

Source Link

Document

Converts an array of object Booleans to primitives.

This method returns null for a null input array.

Usage

From source file:org.apache.carbondata.core.scan.executor.util.RestructureUtil.java

/**
 * Below method will be used to prepare the measure info object
 * in this method some of the properties which will be extracted
 * from query measure and current block measures will be set
 *
 * @param blockExecutionInfo//from   w w  w  . j a  va 2  s  .c  o  m
 * @param queryMeasures        measures present in query
 * @param currentBlockMeasures current block measures
 * @return measures present in the block
 */
public static List<QueryMeasure> createMeasureInfoAndGetCurrentBlockQueryMeasures(
        BlockExecutionInfo blockExecutionInfo, List<QueryMeasure> queryMeasures,
        List<CarbonMeasure> currentBlockMeasures) {
    MeasureInfo measureInfo = new MeasureInfo();
    List<QueryMeasure> presentMeasure = new ArrayList<>(queryMeasures.size());
    int numberOfMeasureInQuery = queryMeasures.size();
    List<Integer> measureOrdinalList = new ArrayList<>(numberOfMeasureInQuery);
    Object[] defaultValues = new Object[numberOfMeasureInQuery];
    boolean[] measureExistsInCurrentBlock = new boolean[numberOfMeasureInQuery];
    int index = 0;
    for (QueryMeasure queryMeasure : queryMeasures) {
        // if query measure exists in current dimension measures
        // then setting measure exists is true
        // otherwise adding a default value of a measure
        for (CarbonMeasure carbonMeasure : currentBlockMeasures) {
            if (carbonMeasure.getColumnId().equals(queryMeasure.getMeasure().getColumnId())) {
                QueryMeasure currentBlockMeasure = new QueryMeasure(carbonMeasure.getColName());
                carbonMeasure.getColumnSchema().setDataType(queryMeasure.getMeasure().getDataType());
                carbonMeasure.getColumnSchema().setPrecision(queryMeasure.getMeasure().getPrecision());
                carbonMeasure.getColumnSchema().setScale(queryMeasure.getMeasure().getScale());
                carbonMeasure.getColumnSchema().setDefaultValue(queryMeasure.getMeasure().getDefaultValue());
                currentBlockMeasure.setMeasure(carbonMeasure);
                currentBlockMeasure.setQueryOrder(queryMeasure.getQueryOrder());
                presentMeasure.add(currentBlockMeasure);
                measureOrdinalList.add(carbonMeasure.getOrdinal());
                measureExistsInCurrentBlock[index] = true;
                break;
            }
        }
        if (!measureExistsInCurrentBlock[index]) {
            defaultValues[index] = getMeasureDefaultValue(queryMeasure.getMeasure().getColumnSchema(),
                    queryMeasure.getMeasure().getDefaultValue());
            blockExecutionInfo.setRestructuredBlock(true);
        }
        index++;
    }
    int[] measureOrdinals = ArrayUtils
            .toPrimitive(measureOrdinalList.toArray(new Integer[measureOrdinalList.size()]));
    measureInfo.setDefaultValues(defaultValues);
    measureInfo.setMeasureOrdinals(measureOrdinals);
    measureInfo.setMeasureExists(measureExistsInCurrentBlock);
    blockExecutionInfo.setMeasureInfo(measureInfo);
    return presentMeasure;
}

From source file:org.apache.carbondata.processing.merger.CarbonCompactionUtil.java

/**
 * This method will return the updated cardinality according to the master schema
 *
 * @param columnCardinalityMap/*from  w  w w .  j av  a 2 s  . c o m*/
 * @param carbonTable
 * @param updatedColumnSchemaList
 * @return
 */
public static int[] updateColumnSchemaAndGetCardinality(Map<String, Integer> columnCardinalityMap,
        CarbonTable carbonTable, List<ColumnSchema> updatedColumnSchemaList) {
    List<CarbonDimension> masterDimensions = carbonTable
            .getDimensionByTableName(carbonTable.getFactTableName());
    List<Integer> updatedCardinalityList = new ArrayList<>(columnCardinalityMap.size());
    for (CarbonDimension dimension : masterDimensions) {
        Integer value = columnCardinalityMap.get(dimension.getColumnId());
        if (null == value) {
            updatedCardinalityList.add(getDimensionDefaultCardinality(dimension));
        } else {
            updatedCardinalityList.add(value);
        }
        updatedColumnSchemaList.add(dimension.getColumnSchema());
    }
    // add measures to the column schema list
    List<CarbonMeasure> masterSchemaMeasures = carbonTable
            .getMeasureByTableName(carbonTable.getFactTableName());
    for (CarbonMeasure measure : masterSchemaMeasures) {
        updatedColumnSchemaList.add(measure.getColumnSchema());
    }
    return ArrayUtils.toPrimitive(updatedCardinalityList.toArray(new Integer[updatedCardinalityList.size()]));
}

From source file:org.apache.carbondata.processing.newflow.sort.impl.SortPreparatorIterator.java

public SortPreparatorIterator(Iterator<CarbonRowBatch> iterator, DataField[] dataFields) {
    this.iterator = iterator;
    List<Integer> dictIndexes = new ArrayList<>();
    List<Integer> nonDictIndexes = new ArrayList<>();
    List<Integer> msrIndexes = new ArrayList<>();
    for (int i = 0; i < dataFields.length; i++) {
        if (dataFields[i].getColumn().isDimesion()) {
            if (dataFields[i].hasDictionaryEncoding()) {
                dictIndexes.add(i);//w  w  w  .  j  a  v a2  s  . c  o  m
            } else {
                nonDictIndexes.add(i);
            }
        } else {
            msrIndexes.add(i);
        }
    }
    dictionaryFieldIndexes = ArrayUtils.toPrimitive(dictIndexes.toArray(new Integer[dictIndexes.size()]));
    nonDictionaryFieldIndexes = ArrayUtils
            .toPrimitive(nonDictIndexes.toArray(new Integer[nonDictIndexes.size()]));
    measueFieldIndexes = ArrayUtils.toPrimitive(msrIndexes.toArray(new Integer[msrIndexes.size()]));
}

From source file:org.apache.carbondata.processing.store.writer.AbstractFactDataWriter.java

public AbstractFactDataWriter(CarbonDataWriterVo dataWriterVo) {
    this.dataWriterVo = dataWriterVo;
    this.blockletInfoList = new ArrayList<BlockletInfoColumnar>(CarbonCommonConstants.CONSTANT_SIZE_TEN);
    blockIndexInfoList = new ArrayList<>();
    // get max file size;
    CarbonProperties propInstance = CarbonProperties.getInstance();
    // if blocksize=2048, then 2048*1024*1024 will beyond the range of Int
    this.fileSizeInBytes = (long) dataWriterVo.getTableBlocksize()
            * CarbonCommonConstants.BYTE_TO_KB_CONVERSION_FACTOR
            * CarbonCommonConstants.BYTE_TO_KB_CONVERSION_FACTOR;
    this.spaceReservedForBlockMetaSize = Integer
            .parseInt(propInstance.getProperty(CarbonCommonConstants.CARBON_BLOCK_META_RESERVED_SPACE,
                    CarbonCommonConstants.CARBON_BLOCK_META_RESERVED_SPACE_DEFAULT));
    this.dataBlockSize = fileSizeInBytes - (fileSizeInBytes * spaceReservedForBlockMetaSize) / 100;
    LOGGER.info("Total file size: " + fileSizeInBytes + " and dataBlock Size: " + dataBlockSize);

    this.executorService = Executors.newFixedThreadPool(1);
    executorServiceSubmitList = new ArrayList<>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
    // in case of compaction we will pass the cardinality.
    this.localCardinality = dataWriterVo.getColCardinality();
    CarbonTable carbonTable = CarbonMetadata.getInstance().getCarbonTable(
            dataWriterVo.getDatabaseName() + CarbonCommonConstants.UNDERSCORE + dataWriterVo.getTableName());
    carbonTablePath = CarbonStorePath.getCarbonTablePath(dataWriterVo.getStoreLocation(),
            carbonTable.getCarbonTableIdentifier());
    //TODO: We should delete the levelmetadata file after reading here.
    // so only data loading flow will need to read from cardinality file.
    if (null == this.localCardinality) {
        this.localCardinality = CarbonMergerUtil
                .getCardinalityFromLevelMetadata(dataWriterVo.getStoreLocation(), dataWriterVo.getTableName());
        List<Integer> cardinalityList = new ArrayList<Integer>();
        thriftColumnSchemaList = getColumnSchemaListAndCardinality(cardinalityList, localCardinality,
                dataWriterVo.getWrapperColumnSchemaList());
        localCardinality = ArrayUtils.toPrimitive(cardinalityList.toArray(new Integer[cardinalityList.size()]));
    } else { // for compaction case
        List<Integer> cardinalityList = new ArrayList<Integer>();
        thriftColumnSchemaList = getColumnSchemaListAndCardinality(cardinalityList, localCardinality,
                dataWriterVo.getWrapperColumnSchemaList());
    }//from www .  ja  va  2s.  c  o  m
    this.numberCompressor = new NumberCompressor(Integer.parseInt(CarbonProperties.getInstance().getProperty(
            CarbonCommonConstants.BLOCKLET_SIZE, CarbonCommonConstants.BLOCKLET_SIZE_DEFAULT_VAL)));
    this.dataChunksOffsets = new ArrayList<>();
    this.dataChunksLength = new ArrayList<>();
    blockletMetadata = new ArrayList<BlockletInfo3>();
    blockletIndex = new ArrayList<>();
}

From source file:org.apache.carbondata.processing.util.CarbonDataProcessorUtil.java

/**
 * Preparing the boolean [] to map whether the dimension is no Dictionary or not.
 *//*from ww  w . ja  va2 s.co  m*/
public static boolean[] getNoDictionaryMapping(DataField[] fields) {
    List<Boolean> noDictionaryMapping = new ArrayList<Boolean>();
    for (DataField field : fields) {
        // for  complex type need to break the loop
        if (field.getColumn().isComplex()) {
            break;
        }

        if (!field.hasDictionaryEncoding() && field.getColumn().isDimesion()) {
            noDictionaryMapping.add(true);
        } else if (field.getColumn().isDimesion()) {
            noDictionaryMapping.add(false);
        }
    }
    return ArrayUtils.toPrimitive(noDictionaryMapping.toArray(new Boolean[noDictionaryMapping.size()]));
}

From source file:org.apache.carbondata.processing.util.CarbonDataProcessorUtil.java

/**
 * Preparing the boolean [] to map whether the dimension use inverted index or not.
 *///ww  w .  j  a v  a 2 s.  co  m
public static boolean[] getIsUseInvertedIndex(DataField[] fields) {
    List<Boolean> isUseInvertedIndexList = new ArrayList<Boolean>();
    for (DataField field : fields) {
        if (field.getColumn().isUseInvertedIndex() && field.getColumn().isDimesion()) {
            isUseInvertedIndexList.add(true);
        } else if (field.getColumn().isDimesion()) {
            isUseInvertedIndexList.add(false);
        }
    }
    return ArrayUtils.toPrimitive(isUseInvertedIndexList.toArray(new Boolean[isUseInvertedIndexList.size()]));
}

From source file:org.apache.carbondata.scan.collector.impl.DictionaryBasedResultCollector.java

/**
 * This method will add a record both key and value to list object
 * it will keep track of how many record is processed, to handle limit scenario
 *///from   www . j  a  v a  2  s .c  o  m
@Override
public List<Object[]> collectData(AbstractScannedResult scannedResult, int batchSize) {

    List<Object[]> listBasedResult = new ArrayList<>(batchSize);
    boolean isMsrsPresent = measureDatatypes.length > 0;

    QueryDimension[] queryDimensions = tableBlockExecutionInfos.getQueryDimensions();
    List<Integer> dictionaryIndexes = new ArrayList<Integer>();
    for (int i = 0; i < queryDimensions.length; i++) {
        if (queryDimensions[i].getDimension().hasEncoding(Encoding.DICTIONARY)
                || queryDimensions[i].getDimension().hasEncoding(Encoding.DIRECT_DICTIONARY)) {
            dictionaryIndexes.add(queryDimensions[i].getDimension().getOrdinal());
        }
    }
    int[] primitive = ArrayUtils.toPrimitive(dictionaryIndexes.toArray(new Integer[dictionaryIndexes.size()]));
    Arrays.sort(primitive);
    int[] actualIndexInSurrogateKey = new int[dictionaryIndexes.size()];
    int index = 0;
    for (int i = 0; i < queryDimensions.length; i++) {
        if (queryDimensions[i].getDimension().hasEncoding(Encoding.DICTIONARY)
                || queryDimensions[i].getDimension().hasEncoding(Encoding.DIRECT_DICTIONARY)) {
            actualIndexInSurrogateKey[index++] = Arrays.binarySearch(primitive,
                    queryDimensions[i].getDimension().getOrdinal());
        }
    }

    QueryMeasure[] queryMeasures = tableBlockExecutionInfos.getQueryMeasures();
    Map<Integer, GenericQueryType> comlexDimensionInfoMap = tableBlockExecutionInfos
            .getComlexDimensionInfoMap();
    boolean[] dictionaryEncodingArray = CarbonUtil.getDictionaryEncodingArray(queryDimensions);
    boolean[] directDictionaryEncodingArray = CarbonUtil.getDirectDictionaryEncodingArray(queryDimensions);
    boolean[] complexDataTypeArray = CarbonUtil.getComplexDataTypeArray(queryDimensions);
    int dimSize = queryDimensions.length;
    boolean isDimensionsExist = dimSize > 0;
    int[] order = new int[dimSize + queryMeasures.length];
    for (int i = 0; i < dimSize; i++) {
        order[i] = queryDimensions[i].getQueryOrder();
    }
    for (int i = 0; i < queryMeasures.length; i++) {
        order[i + dimSize] = queryMeasures[i].getQueryOrder();
    }
    // scan the record and add to list
    int rowCounter = 0;
    int dictionaryColumnIndex = 0;
    int noDictionaryColumnIndex = 0;
    int complexTypeColumnIndex = 0;
    int[] surrogateResult;
    String[] noDictionaryKeys;
    byte[][] complexTypeKeyArray;
    while (scannedResult.hasNext() && rowCounter < batchSize) {
        Object[] row = new Object[dimSize + queryMeasures.length];
        if (isDimensionsExist) {
            surrogateResult = scannedResult.getDictionaryKeyIntegerArray();
            noDictionaryKeys = scannedResult.getNoDictionaryKeyStringArray();
            complexTypeKeyArray = scannedResult.getComplexTypeKeyArray();
            dictionaryColumnIndex = 0;
            noDictionaryColumnIndex = 0;
            complexTypeColumnIndex = 0;
            for (int i = 0; i < dimSize; i++) {
                if (!dictionaryEncodingArray[i]) {
                    row[order[i]] = DataTypeUtil.getDataBasedOnDataType(
                            noDictionaryKeys[noDictionaryColumnIndex++],
                            queryDimensions[i].getDimension().getDataType());
                } else if (directDictionaryEncodingArray[i]) {
                    DirectDictionaryGenerator directDictionaryGenerator = DirectDictionaryKeyGeneratorFactory
                            .getDirectDictionaryGenerator(queryDimensions[i].getDimension().getDataType());
                    if (directDictionaryGenerator != null) {
                        row[order[i]] = directDictionaryGenerator.getValueFromSurrogate(
                                surrogateResult[actualIndexInSurrogateKey[dictionaryColumnIndex++]]);
                    }
                } else if (complexDataTypeArray[i]) {
                    row[order[i]] = comlexDimensionInfoMap.get(queryDimensions[i].getDimension().getOrdinal())
                            .getDataBasedOnDataTypeFromSurrogates(
                                    ByteBuffer.wrap(complexTypeKeyArray[complexTypeColumnIndex++]));
                } else {
                    row[order[i]] = surrogateResult[actualIndexInSurrogateKey[dictionaryColumnIndex++]];
                }
            }

        } else {
            scannedResult.incrementCounter();
        }
        if (isMsrsPresent) {
            Object[] msrValues = new Object[measureDatatypes.length];
            fillMeasureData(msrValues, 0, scannedResult);
            for (int i = 0; i < msrValues.length; i++) {
                row[order[i + dimSize]] = msrValues[i];
            }
        }
        listBasedResult.add(row);
        rowCounter++;
    }
    return listBasedResult;
}

From source file:org.apache.carbondata.scan.executor.impl.AbstractQueryExecutor.java

/**
 * Below method will be used to get the block execution info which is
 * required to execute any block  based on query model
 *
 * @param queryModel query model from user query
 * @param blockIndex block index//from  ww w . jav  a 2  s .  c  o  m
 * @return block execution info
 * @throws QueryExecutionException any failure during block info creation
 */
protected BlockExecutionInfo getBlockExecutionInfoForBlock(QueryModel queryModel, AbstractIndex blockIndex,
        int startBlockletIndex, int numberOfBlockletToScan) throws QueryExecutionException {
    BlockExecutionInfo blockExecutionInfo = new BlockExecutionInfo();
    SegmentProperties segmentProperties = blockIndex.getSegmentProperties();
    List<CarbonDimension> tableBlockDimensions = segmentProperties.getDimensions();
    KeyGenerator blockKeyGenerator = segmentProperties.getDimensionKeyGenerator();

    // below is to get only those dimension in query which is present in the
    // table block
    List<QueryDimension> updatedQueryDimension = RestructureUtil.getUpdatedQueryDimension(
            queryModel.getQueryDimension(), tableBlockDimensions, segmentProperties.getComplexDimensions());
    // TODO add complex dimension children
    int[] maskByteRangesForBlock = QueryUtil.getMaskedByteRange(updatedQueryDimension, blockKeyGenerator);
    int[] maksedByte = QueryUtil.getMaskedByte(blockKeyGenerator.getKeySizeInBytes(), maskByteRangesForBlock);
    blockExecutionInfo.setStartBlockletIndex(startBlockletIndex);
    blockExecutionInfo.setNumberOfBlockletToScan(numberOfBlockletToScan);
    blockExecutionInfo.setQueryDimensions(
            updatedQueryDimension.toArray(new QueryDimension[updatedQueryDimension.size()]));
    blockExecutionInfo.setQueryMeasures(
            queryModel.getQueryMeasures().toArray(new QueryMeasure[queryModel.getQueryMeasures().size()]));
    blockExecutionInfo.setDataBlock(blockIndex);
    blockExecutionInfo.setBlockKeyGenerator(blockKeyGenerator);
    // adding aggregation info for query
    blockExecutionInfo.setAggregatorInfo(getAggregatorInfoForBlock(queryModel, blockIndex));
    // adding query statistics list to record the statistics
    blockExecutionInfo.setStatisticsRecorder(queryProperties.queryStatisticsRecorder);
    // setting the limit
    blockExecutionInfo.setLimit(queryModel.getLimit());
    // setting whether detail query or not
    blockExecutionInfo.setDetailQuery(queryModel.isDetailQuery());
    // setting whether raw record query or not
    blockExecutionInfo.setRawRecordDetailQuery(queryModel.isForcedDetailRawQuery());
    // setting the masked byte of the block which will be
    // used to update the unpack the older block keys
    blockExecutionInfo.setMaskedByteForBlock(maksedByte);
    // total number dimension
    blockExecutionInfo
            .setTotalNumberDimensionBlock(segmentProperties.getDimensionOrdinalToBlockMapping().size());
    blockExecutionInfo
            .setTotalNumberOfMeasureBlock(segmentProperties.getMeasuresOrdinalToBlockMapping().size());
    blockExecutionInfo.setComplexDimensionInfoMap(QueryUtil.getComplexDimensionsMap(updatedQueryDimension,
            segmentProperties.getDimensionOrdinalToBlockMapping(),
            segmentProperties.getEachComplexDimColumnValueSize(), queryProperties.columnToDictionayMapping,
            queryProperties.complexFilterDimension));
    // to check whether older block key update is required or not
    blockExecutionInfo.setFixedKeyUpdateRequired(
            !blockKeyGenerator.equals(queryProperties.keyStructureInfo.getKeyGenerator()));
    IndexKey startIndexKey = null;
    IndexKey endIndexKey = null;
    if (null != queryModel.getFilterExpressionResolverTree()) {
        // loading the filter executer tree for filter evaluation
        blockExecutionInfo.setFilterExecuterTree(
                FilterUtil.getFilterExecuterTree(queryModel.getFilterExpressionResolverTree(),
                        segmentProperties, blockExecutionInfo.getComlexDimensionInfoMap()));
        List<IndexKey> listOfStartEndKeys = new ArrayList<IndexKey>(2);
        FilterUtil.traverseResolverTreeAndGetStartAndEndKey(segmentProperties,
                queryModel.getAbsoluteTableIdentifier(), queryModel.getFilterExpressionResolverTree(),
                listOfStartEndKeys);
        startIndexKey = listOfStartEndKeys.get(0);
        endIndexKey = listOfStartEndKeys.get(1);
    } else {
        try {
            startIndexKey = FilterUtil.prepareDefaultStartIndexKey(segmentProperties);
            endIndexKey = FilterUtil.prepareDefaultEndIndexKey(segmentProperties);
        } catch (KeyGenException e) {
            throw new QueryExecutionException(e);
        }
    }
    blockExecutionInfo
            .setFileType(FileFactory.getFileType(queryModel.getAbsoluteTableIdentifier().getStorePath()));
    //setting the start index key of the block node
    blockExecutionInfo.setStartKey(startIndexKey);
    //setting the end index key of the block node
    blockExecutionInfo.setEndKey(endIndexKey);
    // expression dimensions
    List<CarbonDimension> expressionDimensions = new ArrayList<CarbonDimension>(
            CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
    // expression measure
    List<CarbonMeasure> expressionMeasures = new ArrayList<CarbonMeasure>(
            CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
    // setting all the dimension chunk indexes to be read from file
    int numberOfElementToConsider = 0;
    int[] dimensionsBlockIndexes = QueryUtil.getDimensionsBlockIndexes(updatedQueryDimension,
            segmentProperties.getDimensionOrdinalToBlockMapping(), expressionDimensions,
            queryProperties.complexFilterDimension);
    if (dimensionsBlockIndexes.length > 0) {
        numberOfElementToConsider = dimensionsBlockIndexes[dimensionsBlockIndexes.length
                - 1] == segmentProperties.getBlockTodimensionOrdinalMapping().size() - 1
                        ? dimensionsBlockIndexes.length - 1
                        : dimensionsBlockIndexes.length;
        blockExecutionInfo.setAllSelectedDimensionBlocksIndexes(CarbonUtil.getRangeIndex(dimensionsBlockIndexes,
                numberOfElementToConsider, CarbonCommonConstants.NUMBER_OF_COLUMN_READ_IN_IO));
    } else {
        blockExecutionInfo.setAllSelectedDimensionBlocksIndexes(new int[0][0]);
    }

    int[] measureBlockIndexes = QueryUtil.getMeasureBlockIndexes(queryModel.getQueryMeasures(),
            expressionMeasures, segmentProperties.getMeasuresOrdinalToBlockMapping(),
            queryProperties.filterMeasures);
    if (measureBlockIndexes.length > 0) {

        numberOfElementToConsider = measureBlockIndexes[measureBlockIndexes.length - 1] == segmentProperties
                .getMeasures().size() - 1 ? measureBlockIndexes.length - 1 : measureBlockIndexes.length;
        // setting all the measure chunk indexes to be read from file
        blockExecutionInfo.setAllSelectedMeasureBlocksIndexes(CarbonUtil.getRangeIndex(measureBlockIndexes,
                numberOfElementToConsider, CarbonCommonConstants.NUMBER_OF_COLUMN_READ_IN_IO));
    } else {
        blockExecutionInfo.setAllSelectedMeasureBlocksIndexes(new int[0][0]);
    }
    // setting the key structure info which will be required
    // to update the older block key with new key generator
    blockExecutionInfo.setKeyStructureInfo(queryProperties.keyStructureInfo);
    // setting the size of fixed key column (dictionary column)
    blockExecutionInfo.setFixedLengthKeySize(getKeySize(updatedQueryDimension, segmentProperties));
    Set<Integer> dictionaryColumnBlockIndex = new HashSet<Integer>();
    List<Integer> noDictionaryColumnBlockIndex = new ArrayList<Integer>();
    // get the block index to be read from file for query dimension
    // for both dictionary columns and no dictionary columns
    QueryUtil.fillQueryDimensionsBlockIndexes(updatedQueryDimension,
            segmentProperties.getDimensionOrdinalToBlockMapping(), dictionaryColumnBlockIndex,
            noDictionaryColumnBlockIndex);
    int[] queryDictionaryColumnBlockIndexes = ArrayUtils
            .toPrimitive(dictionaryColumnBlockIndex.toArray(new Integer[dictionaryColumnBlockIndex.size()]));
    // need to sort the dictionary column as for all dimension
    // column key will be filled based on key order
    Arrays.sort(queryDictionaryColumnBlockIndexes);
    blockExecutionInfo.setDictionaryColumnBlockIndex(queryDictionaryColumnBlockIndexes);
    // setting the no dictionary column block indexes
    blockExecutionInfo.setNoDictionaryBlockIndexes(ArrayUtils.toPrimitive(
            noDictionaryColumnBlockIndex.toArray(new Integer[noDictionaryColumnBlockIndex.size()])));
    // setting column id to dictionary mapping
    blockExecutionInfo.setColumnIdToDcitionaryMapping(queryProperties.columnToDictionayMapping);
    // setting each column value size
    blockExecutionInfo.setEachColumnValueSize(segmentProperties.getEachDimColumnValueSize());
    blockExecutionInfo
            .setComplexColumnParentBlockIndexes(getComplexDimensionParentBlockIndexes(updatedQueryDimension));
    try {
        // to set column group and its key structure info which will be used
        // to
        // for getting the column group column data in case of final row
        // and in case of dimension aggregation
        blockExecutionInfo.setColumnGroupToKeyStructureInfo(
                QueryUtil.getColumnGroupKeyStructureInfo(updatedQueryDimension, segmentProperties));
    } catch (KeyGenException e) {
        throw new QueryExecutionException(e);
    }
    return blockExecutionInfo;
}

From source file:org.apache.carbondata.scan.executor.impl.AbstractQueryExecutor.java

/**
 * This method will be used to get fixed key length size this will be used
 * to create a row from column chunk//from  w  w w  . j  av  a 2  s  .c  o m
 *
 * @param queryDimension    query dimension
 * @param blockMetadataInfo block metadata info
 * @return key size
 */
private int getKeySize(List<QueryDimension> queryDimension, SegmentProperties blockMetadataInfo) {
    List<Integer> fixedLengthDimensionOrdinal = new ArrayList<Integer>(
            CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
    int counter = 0;
    while (counter < queryDimension.size()) {
        if (queryDimension.get(counter).getDimension().numberOfChild() > 0) {
            counter += queryDimension.get(counter).getDimension().numberOfChild();
            continue;
        } else if (!CarbonUtil.hasEncoding(queryDimension.get(counter).getDimension().getEncoder(),
                Encoding.DICTIONARY)) {
            counter++;
        } else {
            fixedLengthDimensionOrdinal.add(queryDimension.get(counter).getDimension().getKeyOrdinal());
            counter++;
        }
    }
    int[] dictioanryColumnOrdinal = ArrayUtils
            .toPrimitive(fixedLengthDimensionOrdinal.toArray(new Integer[fixedLengthDimensionOrdinal.size()]));
    if (dictioanryColumnOrdinal.length > 0) {
        return blockMetadataInfo.getFixedLengthKeySplitter().getKeySizeByBlock(dictioanryColumnOrdinal);
    }
    return 0;
}

From source file:org.apache.carbondata.scan.executor.util.QueryUtil.java

/**
 * Below method will be used to get the dimension block index in file based
 * on query dimension/*from w ww . ja  v  a 2 s . c o m*/
 *
 * @param queryDimensions                query dimension
 * @param dimensionOrdinalToBlockMapping mapping of dimension block in file to query dimension
 * @return block index of file
 */
public static int[] getDimensionsBlockIndexes(List<QueryDimension> queryDimensions,
        Map<Integer, Integer> dimensionOrdinalToBlockMapping, List<CarbonDimension> customAggregationDimension,
        Set<CarbonDimension> filterDimensions) {
    // using set as in row group columns will point to same block
    Set<Integer> dimensionBlockIndex = new HashSet<Integer>();
    Set<Integer> filterDimensionOrdinal = getFilterDimensionOrdinal(filterDimensions);
    int blockIndex = 0;
    for (int i = 0; i < queryDimensions.size(); i++) {
        if (!filterDimensionOrdinal.contains(queryDimensions.get(i).getDimension().getOrdinal())) {
            blockIndex = dimensionOrdinalToBlockMapping.get(queryDimensions.get(i).getDimension().getOrdinal());
            dimensionBlockIndex.add(blockIndex);
            if (queryDimensions.get(i).getDimension().numberOfChild() > 0) {
                addChildrenBlockIndex(dimensionBlockIndex, queryDimensions.get(i).getDimension());
            }
        }
    }
    for (int i = 0; i < customAggregationDimension.size(); i++) {
        blockIndex = dimensionOrdinalToBlockMapping.get(customAggregationDimension.get(i).getOrdinal());
        // not adding the children dimension as dimension aggregation
        // is not push down in case of complex dimension
        dimensionBlockIndex.add(blockIndex);
    }
    int[] dimensionIndex = ArrayUtils
            .toPrimitive(dimensionBlockIndex.toArray(new Integer[dimensionBlockIndex.size()]));
    Arrays.sort(dimensionIndex);
    return dimensionIndex;
}