Example usage for org.apache.commons.lang ArrayUtils toPrimitive

List of usage examples for org.apache.commons.lang ArrayUtils toPrimitive

Introduction

In this page you can find the example usage for org.apache.commons.lang ArrayUtils toPrimitive.

Prototype

public static boolean[] toPrimitive(Boolean[] array) 

Source Link

Document

Converts an array of object Booleans to primitives.

Usage

From source file:org.apache.hadoop.hive.ql.exec.vector.VectorGroupByOperator.java

private void setupGroupingSets() {

    groupingSetsPresent = conf.isGroupingSetsPresent();
    if (!groupingSetsPresent) {
        groupingSets = null;/*w w  w .j  av  a 2s  . c om*/
        groupingSetsPosition = -1;
        groupingSetsDummyVectorExpression = null;
        allGroupingSetsOverrideIsNulls = null;
        return;
    }

    groupingSets = ArrayUtils.toPrimitive(conf.getListGroupingSets().toArray(new Integer[0]));
    groupingSetsPosition = conf.getGroupingSetPosition();

    allGroupingSetsOverrideIsNulls = new boolean[groupingSets.length][];

    int pos = 0;
    for (int groupingSet : groupingSets) {

        // Create the mapping corresponding to the grouping set

        // Assume all columns are null, except the dummy column is always non-null.
        boolean[] groupingSetsOverrideIsNull = new boolean[keyExpressions.length];
        Arrays.fill(groupingSetsOverrideIsNull, true);
        groupingSetsOverrideIsNull[groupingSetsPosition] = false;

        // Add keys of this grouping set.
        FastBitSet bitset = GroupByOperator.groupingSet2BitSet(groupingSet, groupingSetsPosition);
        for (int keyPos = bitset.nextClearBit(0); keyPos < groupingSetsPosition; keyPos = bitset
                .nextClearBit(keyPos + 1)) {
            groupingSetsOverrideIsNull[keyPos] = false;
        }

        allGroupingSetsOverrideIsNulls[pos] = groupingSetsOverrideIsNull;
        pos++;
    }

    // The last key column is the dummy grouping set id.
    //
    // Figure out which (scratch) column was used so we can overwrite the dummy id.

    groupingSetsDummyVectorExpression = (ConstantVectorExpression) keyExpressions[groupingSetsPosition];
}

From source file:org.apache.hadoop.hive.ql.io.mapred.NetCDFInputFormat.java

private NetCDFInfo getNetCDFInfo(Path file, FileSystem fs, JobConf job) {

    //traverse header and return chunk start and size arrays
    NetCDFInfo result = new NetCDFInfo();//library call

    NetcdfFile ncFile;//from www. j a va 2 s . com
    Variable v;
    ncFile = null;
    try {
        ncFile = NetcdfDataset.openFile(file.toString(), null);

        v = ncFile.findVariable("rsut");
        //List<Variable> vs = ncFile.getVariables();
        //v = vs.get(vs.size()-1);

        //LOG.info("Variable is "+ v.getFullName());
        result.fileSize = ncFile.vfileSize;
        result.recStart = ncFile.vrecStart;
        Long[] metaArray = v.reallyReadMeta().toArray(new Long[(int) (ncFile.vnumRecs)]);
        result.chunkStarts = ArrayUtils.toPrimitive(metaArray);
        //result.chunkSizes = nc.chunkSizes;
        result.numRecs = ncFile.vnumRecs;
        result.recSize = ncFile.vrecSize;
        result.smallRecSize = ncFile.vsmallRecSize;
        //result.shape = v.shape;

    } catch (Exception e) {
        LOG.info("Bad... " + e);
    }
    try {
        if (ncFile != null)
            ncFile.close();
    } catch (Exception e) {
        LOG.info("Bad2... " + e);
    }

    return result;
}

From source file:org.apache.hadoop.hive.ql.io.mapreduce.lib.input.NetCDFInputFormat.java

private NetCDFInfo getNetCDFInfo(Path file, FileSystem fs) {

    //traverse header and return chunk start and size arrays
    NetCDFInfo result = new NetCDFInfo();//library call

    NetcdfFile ncFile;// ww  w  .  j  a  v  a 2s  .co  m
    Variable v;
    ncFile = null;
    try {
        ncFile = NetcdfDataset.openFile(file.toString(), null);

        v = ncFile.findVariable("rsut");
        //List<Variable> vs = ncFile.getVariables();
        //v = vs.get(vs.size()-1);

        //LOG.info("Variable is "+ v.getFullName());
        result.fileSize = ncFile.vfileSize;
        result.recStart = ncFile.vrecStart;
        Long[] metaArray = v.reallyReadMeta().toArray(new Long[(int) (ncFile.vnumRecs)]);
        result.chunkStarts = ArrayUtils.toPrimitive(metaArray);
        //result.chunkSizes = nc.chunkSizes;
        result.numRecs = ncFile.vnumRecs;
        result.recSize = ncFile.vrecSize;
        result.smallRecSize = ncFile.vsmallRecSize;
        //result.shape = v.shape;

    } catch (Exception e) {
        LOG.info("Bad... " + e);
    }
    try {
        if (ncFile != null)
            ncFile.close();
    } catch (Exception e) {
        LOG.info("Bad2... " + e);
    }

    return result;
}

From source file:org.apache.hadoop.hive.ql.optimizer.physical.Vectorizer.java

public static Operator<? extends OperatorDesc> vectorizeTableScanOperator(
        Operator<? extends OperatorDesc> tableScanOp, VectorizationContext vContext) throws HiveException {
    TableScanDesc tableScanDesc = (TableScanDesc) tableScanOp.getConf();
    VectorTableScanDesc vectorTableScanDesc = new VectorTableScanDesc();
    tableScanDesc.setVectorDesc(vectorTableScanDesc);
    vectorTableScanDesc.setProjectedOutputColumns(
            ArrayUtils.toPrimitive(vContext.getProjectedColumns().toArray(new Integer[0])));
    return tableScanOp;
}

From source file:org.apache.hadoop.hive.ql.optimizer.physical.Vectorizer.java

private static VectorPTFInfo createVectorPTFInfo(Operator<? extends OperatorDesc> ptfOp, PTFDesc ptfDesc,
        VectorizationContext vContext) throws HiveException {

    PartitionedTableFunctionDef funcDef = ptfDesc.getFuncDef();

    ArrayList<ColumnInfo> outputSignature = ptfOp.getSchema().getSignature();
    final int outputSize = outputSignature.size();

    VectorPTFDesc vectorPTFDesc = (VectorPTFDesc) ptfDesc.getVectorDesc();

    boolean isPartitionOrderBy = vectorPTFDesc.getIsPartitionOrderBy();
    ExprNodeDesc[] orderExprNodeDescs = vectorPTFDesc.getOrderExprNodeDescs();
    ExprNodeDesc[] partitionExprNodeDescs = vectorPTFDesc.getPartitionExprNodeDescs();
    String[] evaluatorFunctionNames = vectorPTFDesc.getEvaluatorFunctionNames();

    final int evaluatorCount = evaluatorFunctionNames.length;
    WindowFrameDef[] evaluatorWindowFrameDefs = vectorPTFDesc.getEvaluatorWindowFrameDefs();
    List<ExprNodeDesc>[] evaluatorInputExprNodeDescLists = vectorPTFDesc.getEvaluatorInputExprNodeDescLists();

    /*/*w  w w .ja  v  a2 s.  c o  m*/
     * Output columns.
     */
    int[] outputColumnMap = new int[outputSize];
    for (int i = 0; i < evaluatorCount; i++) {
        ColumnInfo colInfo = outputSignature.get(i);
        TypeInfo typeInfo = colInfo.getType();
        final int outputColumnNum;
        outputColumnNum = vContext.allocateScratchColumn(typeInfo);
        outputColumnMap[i] = outputColumnNum;
    }
    for (int i = evaluatorCount; i < outputSize; i++) {
        ColumnInfo colInfo = outputSignature.get(i);
        outputColumnMap[i] = vContext.getInputColumnIndex(colInfo.getInternalName());
    }

    /*
     * Partition and order by.
     */

    int[] partitionColumnMap;
    Type[] partitionColumnVectorTypes;
    VectorExpression[] partitionExpressions;

    if (!isPartitionOrderBy) {
        partitionColumnMap = null;
        partitionColumnVectorTypes = null;
        partitionExpressions = null;
    } else {
        final int partitionKeyCount = partitionExprNodeDescs.length;
        partitionColumnMap = new int[partitionKeyCount];
        partitionColumnVectorTypes = new Type[partitionKeyCount];
        partitionExpressions = new VectorExpression[partitionKeyCount];

        for (int i = 0; i < partitionKeyCount; i++) {
            VectorExpression partitionExpression = vContext.getVectorExpression(partitionExprNodeDescs[i]);
            String typeName = partitionExpression.getOutputType();
            typeName = VectorizationContext.mapTypeNameSynonyms(typeName);
            TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString(typeName);
            Type columnVectorType = VectorizationContext.getColumnVectorTypeFromTypeInfo(typeInfo);
            partitionColumnVectorTypes[i] = columnVectorType;
            partitionColumnMap[i] = partitionExpression.getOutputColumn();
            partitionExpressions[i] = partitionExpression;
        }
    }

    final int orderKeyCount = orderExprNodeDescs.length;
    int[] orderColumnMap = new int[orderKeyCount];
    Type[] orderColumnVectorTypes = new Type[orderKeyCount];
    VectorExpression[] orderExpressions = new VectorExpression[orderKeyCount];
    for (int i = 0; i < orderKeyCount; i++) {
        VectorExpression orderExpression = vContext.getVectorExpression(orderExprNodeDescs[i]);
        String typeName = orderExpression.getOutputType();
        typeName = VectorizationContext.mapTypeNameSynonyms(typeName);
        TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString(typeName);
        Type columnVectorType = VectorizationContext.getColumnVectorTypeFromTypeInfo(typeInfo);
        orderColumnVectorTypes[i] = columnVectorType;
        orderColumnMap[i] = orderExpression.getOutputColumn();
        orderExpressions[i] = orderExpression;
    }

    ArrayList<Integer> keyInputColumns = new ArrayList<Integer>();
    ArrayList<Integer> nonKeyInputColumns = new ArrayList<Integer>();
    determineKeyAndNonKeyInputColumnMap(outputColumnMap, isPartitionOrderBy, orderColumnMap, partitionColumnMap,
            evaluatorCount, keyInputColumns, nonKeyInputColumns);
    int[] keyInputColumnMap = ArrayUtils.toPrimitive(keyInputColumns.toArray(new Integer[0]));
    int[] nonKeyInputColumnMap = ArrayUtils.toPrimitive(nonKeyInputColumns.toArray(new Integer[0]));

    VectorExpression[] evaluatorInputExpressions = new VectorExpression[evaluatorCount];
    Type[] evaluatorInputColumnVectorTypes = new Type[evaluatorCount];
    for (int i = 0; i < evaluatorCount; i++) {
        String functionName = evaluatorFunctionNames[i];
        WindowFrameDef windowFrameDef = evaluatorWindowFrameDefs[i];
        SupportedFunctionType functionType = VectorPTFDesc.supportedFunctionsMap.get(functionName);

        List<ExprNodeDesc> exprNodeDescList = evaluatorInputExprNodeDescLists[i];
        VectorExpression inputVectorExpression;
        final Type columnVectorType;
        if (exprNodeDescList != null) {

            // Validation has limited evaluatorInputExprNodeDescLists to size 1.
            ExprNodeDesc exprNodeDesc = exprNodeDescList.get(0);

            // Determine input vector expression using the VectorizationContext.
            inputVectorExpression = vContext.getVectorExpression(exprNodeDesc);

            TypeInfo typeInfo = exprNodeDesc.getTypeInfo();
            PrimitiveCategory primitiveCategory = ((PrimitiveTypeInfo) typeInfo).getPrimitiveCategory();
            columnVectorType = VectorizationContext.getColumnVectorTypeFromTypeInfo(typeInfo);
        } else {
            inputVectorExpression = null;
            columnVectorType = ColumnVector.Type.NONE;
        }

        evaluatorInputExpressions[i] = inputVectorExpression;
        evaluatorInputColumnVectorTypes[i] = columnVectorType;
    }

    VectorPTFInfo vectorPTFInfo = new VectorPTFInfo();

    vectorPTFInfo.setOutputColumnMap(outputColumnMap);

    vectorPTFInfo.setPartitionColumnMap(partitionColumnMap);
    vectorPTFInfo.setPartitionColumnVectorTypes(partitionColumnVectorTypes);
    vectorPTFInfo.setPartitionExpressions(partitionExpressions);

    vectorPTFInfo.setOrderColumnMap(orderColumnMap);
    vectorPTFInfo.setOrderColumnVectorTypes(orderColumnVectorTypes);
    vectorPTFInfo.setOrderExpressions(orderExpressions);

    vectorPTFInfo.setEvaluatorInputExpressions(evaluatorInputExpressions);
    vectorPTFInfo.setEvaluatorInputColumnVectorTypes(evaluatorInputColumnVectorTypes);

    vectorPTFInfo.setKeyInputColumnMap(keyInputColumnMap);
    vectorPTFInfo.setNonKeyInputColumnMap(nonKeyInputColumnMap);

    return vectorPTFInfo;
}

From source file:org.apache.hadoop.hive.ql.plan.VectorPTFDesc.java

public static int[] getStreamingColumnMap(VectorPTFEvaluatorBase[] evaluators) {
    final int evaluatorCount = evaluators.length;
    ArrayList<Integer> streamingColumns = new ArrayList<Integer>();
    for (int i = 0; i < evaluatorCount; i++) {
        final VectorPTFEvaluatorBase evaluator = evaluators[i];
        if (evaluator.streamsResult()) {
            streamingColumns.add(evaluator.getOutputColumnNum());
        }//from ww  w  .  j  av  a 2 s . co m
    }
    return ArrayUtils.toPrimitive(streamingColumns.toArray(new Integer[0]));
}

From source file:org.apache.hadoop.hive.ql.udf.ptf.WindowingTableFunction.java

@SuppressWarnings("rawtypes")
@Override// w  w w .  j av  a2 s .  com
public Iterator<Object> iterator(PTFPartitionIterator<Object> pItr) throws HiveException {
    WindowTableFunctionDef wTFnDef = (WindowTableFunctionDef) getTableDef();
    ArrayList<Object> output = new ArrayList<Object>();
    List<?>[] outputFromPivotFunctions = new List<?>[wTFnDef.getWindowFunctions().size()];
    ArrayList<Integer> wFnsWithWindows = new ArrayList<Integer>();
    PTFPartition iPart = pItr.getPartition();

    int i = 0;
    for (WindowFunctionDef wFn : wTFnDef.getWindowFunctions()) {
        boolean processWindow = processWindow(wFn.getWindowFrame());
        pItr.reset();
        if (!processWindow && !wFn.isPivotResult()) {
            Object out = evaluateFunctionOnPartition(wFn, iPart);
            output.add(out);
        } else if (wFn.isPivotResult()) {
            GenericUDAFEvaluator streamingEval = wFn.getWFnEval().getWindowingEvaluator(wFn.getWindowFrame());
            if (streamingEval != null && streamingEval instanceof ISupportStreamingModeForWindowing) {
                ISupportStreamingModeForWindowing strEval = (ISupportStreamingModeForWindowing) streamingEval;
                if (strEval.getRowsRemainingAfterTerminate() == 0) {
                    wFn.setWFnEval(streamingEval);
                    if (wFn.getOI() instanceof ListObjectInspector) {
                        ListObjectInspector listOI = (ListObjectInspector) wFn.getOI();
                        wFn.setOI(listOI.getListElementObjectInspector());
                    }
                    output.add(null);
                    wFnsWithWindows.add(i);
                } else {
                    outputFromPivotFunctions[i] = (List) evaluateFunctionOnPartition(wFn, iPart);
                    output.add(null);
                }
            } else {
                outputFromPivotFunctions[i] = (List) evaluateFunctionOnPartition(wFn, iPart);
                output.add(null);
            }
        } else {
            output.add(null);
            wFnsWithWindows.add(i);
        }
        i++;
    }

    for (i = 0; i < iPart.getOutputOI().getAllStructFieldRefs().size(); i++) {
        output.add(null);
    }

    if (wTFnDef.getRankLimit() != -1) {
        rnkLimitDef = new RankLimit(wTFnDef.getRankLimit(), wTFnDef.getRankLimitFunction(),
                wTFnDef.getWindowFunctions());
    }

    return new WindowingIterator(iPart, output, outputFromPivotFunctions,
            ArrayUtils.toPrimitive(wFnsWithWindows.toArray(new Integer[wFnsWithWindows.size()])));
}

From source file:org.apache.hadoop.mapred.NetCDFInputFormat.java

private NetCDFInfo getNetCDFInfo(Path file, FileSystem fs, JobConf job) {
    //traverse header and return chunk start and size arrays
    NetCDFInfo result = new NetCDFInfo();//library call

    NetcdfFile ncFile;//from w w  w.  j  a va  2s  . c  om
    Variable v;
    ncFile = null;
    try {
        //if( file == null ){
        //System.out.println( "[SAMAN] NetCDFInputFormat.getNetCDFInfo  file is null" );
        //LOG.info( "[SAMAN] NetCDFInputFormat.getNetCDFInfo  file is null" );
        //}else{
        //System.out.println( "[SAMAN] NetCDFInputFormat.getNetCDFInfo file is " + file.toString() );      
        //LOG.info( "[SAMAN] NetCDFInputFormat.getNetCDFInfo  file is null" );
        //}   
        ncFile = NetcdfDataset.openFile(file.toString(), null);

        v = ncFile.findVariable("rsut");
        //List<Variable> vs = ncFile.getVariables();
        //v = vs.get(vs.size()-1);

        //LOG.info("Variable is "+ v.getFullName());
        result.fileSize = ncFile.vfileSize;
        result.recStart = ncFile.vrecStart;
        Long[] metaArray = v.reallyReadMeta().toArray(new Long[(int) (ncFile.vnumRecs)]);
        result.chunkStarts = ArrayUtils.toPrimitive(metaArray);
        //result.chunkSizes = nc.chunkSizes;
        result.numRecs = ncFile.vnumRecs;
        result.recSize = ncFile.vrecSize;
        result.smallRecSize = ncFile.vsmallRecSize;
        //result.shape = v.shape;

    } catch (Exception e)

    {
        LOG.info("Bad... " + e);
        System.out.println("Bad... " + e);
    }
    try {
        if (ncFile != null)
            ncFile.close();
    } catch (Exception e) {
        LOG.info("Bad2... " + e);
        System.out.println("Bad2... " + e);
    }

    return result;
}

From source file:org.apache.hadoop.mapred.NetCDFInputFormatAllToMemoryPruneInMemoryNoMultiSplit.java

private NetCDFInfo getNetCDFInfo(Path file, FileSystem fs, JobConf job) {
    //traverse header and return chunk start and size arrays
    NetCDFInfo result = new NetCDFInfo();//library call

    NetcdfFile ncFile;/*  w  w w .  j  a  v a2 s. co  m*/
    Variable v;
    Variable time;
    Variable lat;
    Variable lon;
    ncFile = null;
    try {
        //if( file == null ){
        //System.out.println( "[SAMAN] NetCDFInputFormat.getNetCDFInfo  file is null" );
        //LOG.info( "[SAMAN] NetCDFInputFormat.getNetCDFInfo  file is null" );
        //}else{
        //System.out.println( "[SAMAN] NetCDFInputFormat.getNetCDFInfo file is " + file.toString() );
        //LOG.info( "[SAMAN] NetCDFInputFormat.getNetCDFInfo  file is null" );
        //}
        ncFile = NetcdfDataset.openFile(file.toString(), null);

        v = ncFile.findVariable("rsut");
        time = ncFile.findVariable("time");
        lat = ncFile.findVariable("lat");
        lon = ncFile.findVariable("lon");

        //List<Variable> vs = ncFile.getVariables();
        //v = vs.get(vs.size()-1);

        //LOG.info("Variable is "+ v.getFullName());
        result.fileSize = ncFile.vfileSize;
        result.recStart = ncFile.vrecStart;
        Long[] metaArray = v.reallyReadMeta().toArray(new Long[(int) (ncFile.vnumRecs)]);
        result.chunkStarts = ArrayUtils.toPrimitive(metaArray);
        //result.chunkSizes = nc.chunkSizes;
        result.numRecs = ncFile.vnumRecs;
        result.recSize = ncFile.vrecSize;
        result.smallRecSize = ncFile.vsmallRecSize;
        result.timeLength = (int) (time.getSize());
        result.latLength = (int) (lat.getSize());
        result.lonLength = (int) (lon.getSize());
        //result.shape = v.shape;

    } catch (Exception e)

    {
        LOG.info("Bad... " + e);
        System.out.println("Bad... " + e);
    }
    try {
        if (ncFile != null)
            ncFile.close();
    } catch (Exception e) {
        LOG.info("Bad2... " + e);
        System.out.println("Bad2... " + e);
    }

    return result;
}

From source file:org.apache.hadoop.mapred.NetCDFInputFormatDynamicReadSize.java

private NetCDFInfo getNetCDFInfo(Path file, FileSystem fs, JobConf job) {
    //traverse header and return chunk start and size arrays
    NetCDFInfo result = new NetCDFInfo();//library call

    NetcdfFile ncFile;//  ww  w . j  a va2  s . c o m
    Variable v;
    ncFile = null;
    try {
        //if( file == null ){
        //System.out.println( "[SAMAN] NetCDFInputFormat.getNetCDFInfo  file is null" );
        //LOG.info( "[SAMAN] NetCDFInputFormat.getNetCDFInfo  file is null" );
        //}else{
        //System.out.println( "[SAMAN] NetCDFInputFormat.getNetCDFInfo file is " + file.toString() );
        //LOG.info( "[SAMAN] NetCDFInputFormat.getNetCDFInfo  file is null" );
        //}
        ncFile = NetcdfDataset.openFile(file.toString(), null);

        v = ncFile.findVariable("rsut");
        //List<Variable> vs = ncFile.getVariables();
        //v = vs.get(vs.size()-1);

        //LOG.info("Variable is "+ v.getFullName());
        result.fileSize = ncFile.vfileSize;
        result.recStart = ncFile.vrecStart;
        Long[] metaArray = v.reallyReadMeta().toArray(new Long[(int) (ncFile.vnumRecs)]);
        result.chunkStarts = ArrayUtils.toPrimitive(metaArray);
        //result.chunkSizes = nc.chunkSizes;
        result.numRecs = ncFile.vnumRecs;
        result.recSize = ncFile.vrecSize;
        result.smallRecSize = ncFile.vsmallRecSize;
        //result.shape = v.shape;

    } catch (Exception e)

    {
        LOG.info("Bad... " + e);
        System.out.println("Bad... " + e);
    }
    try {
        if (ncFile != null)
            ncFile.close();
    } catch (Exception e) {
        LOG.info("Bad2... " + e);
        System.out.println("Bad2... " + e);
    }

    return result;
}