Example usage for org.apache.commons.lang ArrayUtils addAll

List of usage examples for org.apache.commons.lang ArrayUtils addAll

Introduction

In this page you can find the example usage for org.apache.commons.lang ArrayUtils addAll.

Prototype

public static double[] addAll(double[] array1, double[] array2) 

Source Link

Document

Adds all the elements of the given arrays into a new array.

Usage

From source file:org.apache.phoenix.end2end.AlterTableWithViewsIT.java

public void assertTableDefinition(Connection conn, String tableName, PTableType tableType,
        String parentTableName, int sequenceNumber, int columnCount, int baseColumnCount, String... columnNames)
        throws Exception {
    int delta = isMultiTenant ? 1 : 0;
    String[] cols;//from w ww .jav  a  2 s .c o m
    if (isMultiTenant) {
        cols = (String[]) ArrayUtils.addAll(new String[] { "TENANT_ID" }, columnNames);
    } else {
        cols = columnNames;
    }
    AlterMultiTenantTableWithViewsIT.assertTableDefinition(conn, tableName, tableType, parentTableName,
            sequenceNumber, columnCount + delta,
            baseColumnCount == QueryConstants.BASE_TABLE_BASE_COLUMN_COUNT ? baseColumnCount
                    : baseColumnCount + delta,
            cols);
}

From source file:org.apache.reef.io.network.group.impl.task.OperatorTopologyStructImpl.java

/**
 * Receive data from all children as a single byte array.
 * Messages from children are simply byte-concatenated.
 * This method is currently used only by the Gather operator.
 *
 * @return gathered data as a byte array
 *//*from   ww w  . ja  v a  2s  . c  o  m*/
@Override
public byte[] recvFromChildren() {
    LOG.entering("OperatorTopologyStructImpl", "recvFromChildren", getQualifiedName());
    for (final NodeStruct child : children) {
        childrenToRcvFrom.add(child.getId());
    }

    byte[] retVal = new byte[0];
    while (!childrenToRcvFrom.isEmpty()) {
        LOG.finest(getQualifiedName() + "Waiting for some child to send data");
        final NodeStruct child = nodesWithDataTakeUnsafe();
        final byte[] receivedVal = recvFromNodeCheckBigMsg(child,
                ReefNetworkGroupCommProtos.GroupCommMessage.Type.Gather);

        if (receivedVal != null) {
            retVal = ArrayUtils.addAll(retVal, receivedVal);
        }
        childrenToRcvFrom.remove(child.getId());
    }

    LOG.exiting("OperatorTopologyStructImpl", "recvFromChildren", getQualifiedName());
    return retVal;
}

From source file:org.apache.spark.simr.Simr.java

public void startDriver() {
    String master_url = "simr://" + conf.get("simr_tmp_dir") + "/" + DRIVERURL;
    String out_dir = conf.get("simr_out_dir");

    String main_class = conf.get("simr_main_class");
    String rest_args = conf.get("simr_rest_args");

    // Replace %spark_url% in params with actual driver location simr://some/hdfs/path
    String[] main_class_args = rest_args.replaceAll("\\%spark_url\\%", master_url).split(" ");

    String[] server_args = new String[] { conf.get("simr_tmp_dir") + "/" + RELAYURL, // HDFS location of RelayServer URI
            getLocalIP(), master_url, // SIMR URI, which points to driver
            out_dir, // Location on HDFS to dump driver's stdout and stderr
            main_class // Class to run
    };/*from   w  ww .  j  av  a 2s .c o m*/
    server_args = (String[]) ArrayUtils.addAll(server_args, main_class_args);

    try {
        org.apache.spark.simr.RelayServer.main(server_args);
    } catch (Exception ex) {
        System.out.println(ex);
    }
}

From source file:org.apache.sqoop.test.utils.ParametrizedUtils.java

/**
 * Merge two objects into array.//w w w .  jav a2  s  . c o  m
 *
 * This method will flatten any argument that is already an array, e.g.:
 *
 * mergeObjects(1, 2) = [1, 2]
 * mergeObjects([1], 2) = [1, 2]
 * mergeObjects(1, [2]) = [1, 2]
 * mergeObjects([1], [2]) = [1, 2]
 *
 * @param o1
 * @param o2
 * @return
 */
public static Object[] mergeObjects(Object o1, Object o2) {
    if (!o1.getClass().isArray() && !o2.getClass().isArray()) {
        return new Object[] { o1, o2 };
    }

    Object[] a1 = toArray(o1);
    Object[] a2 = toArray(o2);

    return ArrayUtils.addAll(a1, a2);
}

From source file:org.apache.storm.command.ShellSubmission.java

public static void main(String[] args) throws Exception {
    if (args.length <= 1) {
        LOG.error("Arguments should be of the form: <path_to_jar> [argument...]");
        System.exit(-1);/*from w w  w  . j  a v  a2 s .  c o m*/
    }
    Map<String, Object> conf = ConfigUtils.readStormConfig();
    try (NimbusClient client = NimbusClient.getConfiguredClient(conf)) {
        NimbusSummary ns = client.getClient().getLeader();
        String host = ns.get_host();
        int port = ns.get_port();
        String jarPath = StormSubmitter.submitJar(conf, args[0]);
        String[] newArgs = (String[]) ArrayUtils.addAll(Arrays.copyOfRange(args, 1, args.length),
                new String[] { host, String.valueOf(port), jarPath });
        ServerUtils.execCommand(newArgs);
    }
}

From source file:org.apache.sysml.runtime.controlprogram.caching.FrameObject.java

/**
 * Creates a new collection which contains the schema of the current
 * frame object concatenated with the schema of the passed frame object.
 * /*from w ww . ja  v  a 2s  . c  om*/
 * @param fo frame object
 * @return schema of value types
 */
public ValueType[] mergeSchemas(FrameObject fo) {
    return (ValueType[]) ArrayUtils.addAll(
            (_schema != null) ? _schema : UtilFunctions.nCopies((int) getNumColumns(), ValueType.STRING),
            (fo._schema != null) ? fo._schema
                    : UtilFunctions.nCopies((int) fo.getNumColumns(), ValueType.STRING));
}

From source file:org.apache.sysml.runtime.matrix.data.FrameBlock.java

/**
 * Append a set of column of value type DOUBLE at the end of the frame
 * in order to avoid repeated allocation with appendColumns. The given 
 * array is wrapped but not copied and hence might be updated in the future.
 * //from w ww  .  ja  va2 s.com
 * @param cols 2d array of doubles
 */
public void appendColumns(double[][] cols) {
    int ncol = cols.length;
    boolean empty = (_schema == null);
    ValueType[] tmpSchema = UtilFunctions.nCopies(ncol, ValueType.DOUBLE);
    Array[] tmpData = new Array[ncol];
    for (int j = 0; j < ncol; j++)
        tmpData[j] = new DoubleArray(cols[j]);
    _colnames = empty ? null
            : (String[]) ArrayUtils.addAll(getColumnNames(), createColNames(getNumColumns(), ncol)); //before schema modification
    _schema = empty ? tmpSchema : (ValueType[]) ArrayUtils.addAll(_schema, tmpSchema);
    _coldata = empty ? tmpData : (Array[]) ArrayUtils.addAll(_coldata, tmpData);
    _numRows = cols[0].length;
}

From source file:org.apache.sysml.runtime.matrix.data.FrameBlock.java

/**
 * Appends the given argument frameblock 'that' to this frameblock by 
 * creating a deep copy to prevent side effects. For cbind, the frames
 * are appended column-wise (same number of rows), while for rbind the 
 * frames are appended row-wise (same number of columns).   
 * //ww w .  ja v  a 2s . c o  m
 * @param that frame block to append to current frame block
 * @param ret frame block to return, can be null
 * @param cbind if true, column append
 * @return frame block
 * @throws DMLRuntimeException if DMLRuntimeException occurs
 */
public FrameBlock appendOperations(FrameBlock that, FrameBlock ret, boolean cbind) throws DMLRuntimeException {
    if (cbind) //COLUMN APPEND
    {
        //sanity check row dimension mismatch
        if (getNumRows() != that.getNumRows()) {
            throw new DMLRuntimeException("Incompatible number of rows for cbind: " + that.getNumRows()
                    + " (expected: " + getNumRows() + ")");
        }

        //allocate output frame
        if (ret == null)
            ret = new FrameBlock();
        ret._numRows = _numRows;

        //concatenate schemas (w/ deep copy to prevent side effects)
        ret._schema = (ValueType[]) ArrayUtils.addAll(_schema, that._schema);
        ret._colnames = (String[]) ArrayUtils.addAll(getColumnNames(), that.getColumnNames());
        ret._colmeta = (ColumnMetadata[]) ArrayUtils.addAll(_colmeta, that._colmeta);

        //concatenate column data (w/ deep copy to prevent side effects)
        ret._coldata = (Array[]) ArrayUtils.addAll(_coldata, that._coldata);
        for (int i = 0; i < ret._coldata.length; i++)
            ret._coldata[i] = ret._coldata[i].clone();
    } else //ROW APPEND
    {
        //sanity check column dimension mismatch
        if (getNumColumns() != that.getNumColumns()) {
            throw new DMLRuntimeException("Incompatible number of columns for rbind: " + that.getNumColumns()
                    + " (expected: " + getNumColumns() + ")");
        }

        //allocate output frame (incl deep copy schema)
        if (ret == null)
            ret = new FrameBlock();
        ret._numRows = _numRows;
        ret._schema = _schema.clone();
        ret._colnames = (_colnames != null) ? _colnames.clone() : null;

        //concatenate data (deep copy first, append second)
        ret._coldata = new Array[_coldata.length];
        for (int j = 0; j < _coldata.length; j++)
            ret._coldata[j] = _coldata[j].clone();
        Iterator<Object[]> iter = that.getObjectRowIterator();
        while (iter.hasNext())
            ret.appendRow(iter.next());
    }

    return ret;
}

From source file:org.apache.sysml.runtime.util.UtilFunctions.java

public static ValueType[] copyOf(ValueType[] schema1, ValueType[] schema2) {
    return (ValueType[]) ArrayUtils.addAll(schema1, schema2);
}

From source file:org.apache.tajo.storage.v2.CSVFileScanner.java

private void page() throws IOException {
    // Index initialization
    currentIdx = 0;//w  w  w.  j av a2 s.  co m

    // Buffer size set
    if (isSplittable() && fragmentable() < DEFAULT_BUFFER_SIZE) {
        bufSize = (int) fragmentable();
    }

    if (this.tail == null || this.tail.length == 0) {
        this.pageStart = getFilePosition();
        this.prevTailLen = 0;
    } else {
        this.pageStart = getFilePosition() - this.tail.length;
        this.prevTailLen = this.tail.length;
    }

    // Read
    int rbyte;
    buf = new byte[bufSize];
    rbyte = is.read(buf);

    if (rbyte < 0) {
        eof = true; // EOF
        return;
    }

    if (prevTailLen == 0) {
        tail = new byte[0];
        tuples = BytesUtils.splitPreserveAllTokens(buf, rbyte, (char) LF);
    } else {
        byte[] lastRow = ArrayUtils.addAll(tail, buf);
        tuples = BytesUtils.splitPreserveAllTokens(lastRow, rbyte + tail.length, (char) LF);
        tail = null;
    }

    // Check tail
    if ((char) buf[rbyte - 1] != LF) {
        if ((fragmentable() < 1 || rbyte != bufSize)) {
            int lineFeedPos = 0;
            byte[] temp = new byte[DEFAULT_BUFFER_SIZE];

            // find line feed
            while ((temp[lineFeedPos] = (byte) is.read()) != (byte) LF) {
                if (temp[lineFeedPos] < 0) {
                    break;
                }
                lineFeedPos++;
            }

            tuples[tuples.length - 1] = ArrayUtils.addAll(tuples[tuples.length - 1],
                    ArrayUtils.subarray(temp, 0, lineFeedPos));
            validIdx = tuples.length;
        } else {
            tail = tuples[tuples.length - 1];
            validIdx = tuples.length - 1;
        }
    } else {
        tail = new byte[0];
        validIdx = tuples.length - 1;
    }

    if (!isCompress())
        makeTupleOffset();
}