Example usage for java.nio DoubleBuffer wrap

List of usage examples for java.nio DoubleBuffer wrap

Introduction

In this page you can find the example usage for java.nio DoubleBuffer wrap.

Prototype

public static DoubleBuffer wrap(double[] array) 

Source Link

Document

Creates a new double buffer by wrapping the given double array.

Usage

From source file:Main.java

public static void main(String[] args) {
    DoubleBuffer bb = DoubleBuffer.wrap(new double[] { 98765, 98765 });

    bb.rewind();/*w ww .  j av a  2  s.c om*/

    System.out.println(Arrays.toString(bb.array()));
}

From source file:ffx.potential.nonbonded.SpatialDensityRegion.java

/**
 * <p>//from www  .j av a 2 s.com
 * Constructor for SpatialDensityRegion.</p>
 *
 * @param gX a int.
 * @param gY a int.
 * @param gZ a int.
 * @param grid an array of double.
 * @param basisSize a int.
 * @param nSymm a int.
 * @param minWork a int.
 * @param threadCount a int.
 * @param crystal a {@link ffx.crystal.Crystal} object.
 * @param atoms an array of {@link ffx.potential.bonded.Atom} objects.
 * @param coordinates an array of double.
 */
public SpatialDensityRegion(int gX, int gY, int gZ, double grid[], int basisSize, int nSymm, int minWork,
        int threadCount, Crystal crystal, Atom atoms[], double coordinates[][][]) {
    this(gX, gY, gZ, basisSize, nSymm, minWork, threadCount, crystal, atoms, coordinates);
    this.grid = grid;
    if (grid != null) {
        gridBuffer = DoubleBuffer.wrap(grid);
    }
}

From source file:edu.iu.daal_cov.COVDaalCollectiveMapper.java

private void runCOV(List<String> trainingDataFiles, Configuration conf, Context context) throws IOException {

    //set thread number used in DAAL
    LOG.info("The default value of thread numbers in DAAL: " + Environment.getNumberOfThreads());
    Environment.setNumberOfThreads(numThreads);
    LOG.info("The current value of thread numbers in DAAL: " + Environment.getNumberOfThreads());

    ts_start = System.currentTimeMillis();

    ts1 = System.currentTimeMillis();
    // extracting points from csv files
    List<double[]> pointArrays = COVUtil.loadPoints(trainingDataFiles, pointsPerFile, vectorSize, conf,
            harpThreads);//from  www  .  j av a 2  s  .c o  m
    ts2 = System.currentTimeMillis();
    load_time += (ts2 - ts1);

    // converting data to Numeric Table
    ts1 = System.currentTimeMillis();

    long nFeature = vectorSize;
    long nLabel = 1;
    long totalLengthFeature = 0;

    long[] array_startP_feature = new long[pointArrays.size()];
    double[][] array_data_feature = new double[pointArrays.size()][];

    for (int k = 0; k < pointArrays.size(); k++) {
        array_data_feature[k] = pointArrays.get(k);
        array_startP_feature[k] = totalLengthFeature;
        totalLengthFeature += pointArrays.get(k).length;
    }

    long featuretableSize = totalLengthFeature / nFeature;

    //initializing Numeric Table

    NumericTable featureArray_daal = new HomogenNumericTable(daal_Context, Double.class, nFeature,
            featuretableSize, NumericTable.AllocationFlag.DoAllocate);

    int row_idx_feature = 0;
    int row_len_feature = 0;

    for (int k = 0; k < pointArrays.size(); k++) {
        row_len_feature = (array_data_feature[k].length) / (int) nFeature;
        //release data from Java side to native side
        ((HomogenNumericTable) featureArray_daal).releaseBlockOfRows(row_idx_feature, row_len_feature,
                DoubleBuffer.wrap(array_data_feature[k]));
        row_idx_feature += row_len_feature;
    }

    ts2 = System.currentTimeMillis();
    convert_time += (ts2 - ts1);

    Table<ByteArray> partialResultTable = new Table<>(0, new ByteArrPlus());

    computeOnLocalNode(featureArray_daal, partialResultTable);
    if (this.isMaster()) {
        computeOnMasterNode(partialResultTable);
        HomogenNumericTable covariance = (HomogenNumericTable) result.get(ResultId.covariance);
        HomogenNumericTable mean = (HomogenNumericTable) result.get(ResultId.mean);
        Service.printNumericTable("Covariance matrix:", covariance);
        Service.printNumericTable("Mean vector:", mean);
    }

    daal_Context.dispose();

    ts_end = System.currentTimeMillis();
    total_time = (ts_end - ts_start);

    LOG.info("Total Execution Time of Cov: " + total_time);
    LOG.info("Loading Data Time of Cov: " + load_time);
    LOG.info("Computation Time of Cov: " + compute_time);
    LOG.info("Comm Time of Cov: " + comm_time);
    LOG.info("DataType Convert Time of Cov: " + convert_time);
    LOG.info("Misc Time of Cov: " + (total_time - load_time - compute_time - comm_time - convert_time));
}

From source file:edu.iu.daal_mom.MOMDaalCollectiveMapper.java

private void runMOM(List<String> trainingDataFiles, Configuration conf, Context context) throws IOException {

    //set thread number used in DAAL
    LOG.info("The default value of thread numbers in DAAL: " + Environment.getNumberOfThreads());
    Environment.setNumberOfThreads(numThreads);
    LOG.info("The current value of thread numbers in DAAL: " + Environment.getNumberOfThreads());

    ts_start = System.currentTimeMillis();

    ts1 = System.currentTimeMillis();
    // extracting points from csv files
    List<double[]> pointArrays = MOMUtil.loadPoints(trainingDataFiles, pointsPerFile, vectorSize, conf,
            harpThreads);//from   ww w  .  ja  v a2 s .co m
    ts2 = System.currentTimeMillis();
    load_time += (ts2 - ts1);

    // converting data to Numeric Table
    ts1 = System.currentTimeMillis();
    long nFeature = vectorSize;
    long nLabel = 1;
    long totalLengthFeature = 0;

    long[] array_startP_feature = new long[pointArrays.size()];
    double[][] array_data_feature = new double[pointArrays.size()][];

    for (int k = 0; k < pointArrays.size(); k++) {
        array_data_feature[k] = pointArrays.get(k);
        array_startP_feature[k] = totalLengthFeature;
        totalLengthFeature += pointArrays.get(k).length;
    }

    long featuretableSize = totalLengthFeature / nFeature;

    //initializing Numeric Table
    NumericTable featureArray_daal = new HomogenNumericTable(daal_Context, Double.class, nFeature,
            featuretableSize, NumericTable.AllocationFlag.DoAllocate);

    int row_idx_feature = 0;
    int row_len_feature = 0;

    for (int k = 0; k < pointArrays.size(); k++) {
        row_len_feature = (array_data_feature[k].length) / (int) nFeature;
        //release data from Java side to native side
        ((HomogenNumericTable) featureArray_daal).releaseBlockOfRows(row_idx_feature, row_len_feature,
                DoubleBuffer.wrap(array_data_feature[k]));
        row_idx_feature += row_len_feature;
    }
    ts2 = System.currentTimeMillis();
    convert_time += (ts2 - ts1);

    Table<ByteArray> partialResultTable = new Table<>(0, new ByteArrPlus());

    computeOnLocalNode(featureArray_daal, partialResultTable);
    if (this.isMaster()) {
        computeOnMasterNode(partialResultTable);
        printResults(result);

    }

    daal_Context.dispose();

    ts_end = System.currentTimeMillis();
    total_time = (ts_end - ts_start);

    LOG.info("Total Execution Time of MOM: " + total_time);
    LOG.info("Loading Data Time of MOM: " + load_time);
    LOG.info("Computation Time of MOM: " + compute_time);
    LOG.info("Comm Time of MOM: " + comm_time);
    LOG.info("DataType Convert Time of MOM: " + convert_time);
    LOG.info("Misc Time of MOM: " + (total_time - load_time - compute_time - comm_time - convert_time));
}

From source file:edu.iu.daal_qr.QRDaalCollectiveMapper.java

private void runQR(List<String> trainingDataFiles, Configuration conf, Context context) throws IOException {

    ts_start = System.currentTimeMillis();

    //set thread number used in DAAL
    LOG.info("The default value of thread numbers in DAAL: " + Environment.getNumberOfThreads());
    Environment.setNumberOfThreads(numThreads);
    LOG.info("The current value of thread numbers in DAAL: " + Environment.getNumberOfThreads());

    ts1 = System.currentTimeMillis();
    // extracting points from csv files
    List<double[]> pointArrays = QRUtil.loadPoints(trainingDataFiles, pointsPerFile, vectorSize, conf,
            harpThreads);//w w w.j  a  v a  2  s .  co m

    ts2 = System.currentTimeMillis();
    load_time += (ts2 - ts1);

    // converting data to Numeric Table
    ts1 = System.currentTimeMillis();

    long nFeature = vectorSize;
    long nLabel = 1;
    long totalLengthFeature = 0;

    long[] array_startP_feature = new long[pointArrays.size()];
    double[][] array_data_feature = new double[pointArrays.size()][];

    for (int k = 0; k < pointArrays.size(); k++) {
        array_data_feature[k] = pointArrays.get(k);
        array_startP_feature[k] = totalLengthFeature;
        totalLengthFeature += pointArrays.get(k).length;
    }

    long featuretableSize = totalLengthFeature / nFeature;

    //initializing Numeric Table

    NumericTable featureArray_daal = new HomogenNumericTable(daal_Context, Double.class, nFeature,
            featuretableSize, NumericTable.AllocationFlag.DoAllocate);

    int row_idx_feature = 0;
    int row_len_feature = 0;

    for (int k = 0; k < pointArrays.size(); k++) {
        row_len_feature = (array_data_feature[k].length) / (int) nFeature;
        //release data from Java side to native side
        ((HomogenNumericTable) featureArray_daal).releaseBlockOfRows(row_idx_feature, row_len_feature,
                DoubleBuffer.wrap(array_data_feature[k]));
        row_idx_feature += row_len_feature;
    }

    ts2 = System.currentTimeMillis();
    convert_time += (ts2 - ts1);

    qrStep1Local = new DistributedStep1Local(daal_Context, Float.class, Method.defaultDense);
    qrStep1Local.input.set(InputId.data, featureArray_daal);
    DistributedStep1LocalPartialResult pres = qrStep1Local.compute();
    dataFromStep1ForStep2 = pres.get(PartialResultId.outputOfStep1ForStep2);
    dataFromStep1ForStep3 = pres.get(PartialResultId.outputOfStep1ForStep3);

    ts1 = System.currentTimeMillis();

    Table<ByteArray> partialStep12 = new Table<>(0, new ByteArrPlus());
    partialStep12
            .addPartition(new Partition<>(this.getSelfID(), serializePartialResult(dataFromStep1ForStep2)));
    System.out
            .println("number of partition in partialresult before reduce :" + partialStep12.getNumPartitions());
    boolean reduceStatus = false;
    reduceStatus = this.reduce("nn", "sync-partialresult", partialStep12, this.getMasterID());

    if (!reduceStatus) {
        System.out.println("reduce not successful");
    } else {
        System.out.println("reduce successful");
    }

    System.out
            .println("number of partition in partialresult after reduce :" + partialStep12.getNumPartitions());

    ts2 = System.currentTimeMillis();
    comm_time += (ts2 - ts1);

    Table<ByteArray> partialStep32 = new Table<>(0, new ByteArrPlus());
    System.out.println(
            "number of partition in partialstep32 before broadcast :" + partialStep32.getNumPartitions());

    System.out.println("self id : " + this.getSelfID());

    if (this.isMaster()) {

        qrStep2Master = new DistributedStep2Master(daal_Context, Float.class, Method.defaultDense);

        System.out.println("this is a master node");
        int[] pid = partialStep12.getPartitionIDs().toIntArray();

        ts1 = System.currentTimeMillis();
        for (int j = 0; j < pid.length; j++) {
            try {
                System.out.println("pid : " + pid[j]);
                qrStep2Master.input.add(DistributedStep2MasterInputId.inputOfStep2FromStep1, pid[j],
                        deserializePartialResult(partialStep12.getPartition(pid[j]).get()));
            } catch (Exception e) {
                System.out.println("Fail to deserilize partialResultTable" + e.toString());
                e.printStackTrace();
            }
        }

        ts2 = System.currentTimeMillis();
        comm_time += (ts2 - ts1);

        ts1 = System.currentTimeMillis();
        DistributedStep2MasterPartialResult presStep2 = qrStep2Master.compute();

        inputForStep3FromStep2 = presStep2.get(DistributedPartialResultCollectionId.outputOfStep2ForStep3);

        for (int j = 0; j < pid.length; j++) {
            partialStep32.addPartition(
                    new Partition<>(j, serializePartialResult((DataCollection) inputForStep3FromStep2.get(j))));
        }

        Result result = qrStep2Master.finalizeCompute();
        R = result.get(ResultId.matrixR);

        ts2 = System.currentTimeMillis();
        compute_time += (ts2 - ts1);
    }

    boolean isSuccess = broadcast("main", "broadcast-partialStep32", partialStep32, 0, false);
    if (isSuccess) {
        System.out.println("broadcast successful");
    } else {
        System.out.println("broadcast not successful");
    }

    System.out.println(
            "number of partition in partialstep32 after broadcast :" + partialStep32.getNumPartitions());

    qrStep3Local = new DistributedStep3Local(daal_Context, Float.class, Method.defaultDense);
    qrStep3Local.input.set(DistributedStep3LocalInputId.inputOfStep3FromStep1, dataFromStep1ForStep3);

    ts1 = System.currentTimeMillis();

    try {
        qrStep3Local.input.set(DistributedStep3LocalInputId.inputOfStep3FromStep2,
                deserializePartialResult(partialStep32.getPartition(this.getSelfID()).get()));
    } catch (Exception e) {
        System.out.println("Fail to deserilize partialResultTable" + e.toString());
        e.printStackTrace();
    }

    ts2 = System.currentTimeMillis();
    comm_time += (ts2 - ts1);

    ts1 = System.currentTimeMillis();
    qrStep3Local.compute();
    Result result = qrStep3Local.finalizeCompute();

    ts2 = System.currentTimeMillis();
    compute_time += (ts2 - ts1);

    Qi = result.get(ResultId.matrixQ);
    System.out.println("number of rows" + Qi.getNumberOfRows());
    System.out.println("number of columns" + Qi.getNumberOfColumns());

    Table<ByteArray> resultNT = new Table<>(0, new ByteArrPlus());

    Service.printNumericTable("Orthogonal matrix Q (10 first vectors):", Qi, 10);
    if (this.isMaster()) {
        Service.printNumericTable("Triangular matrix R:", R);
    }

    ts_end = System.currentTimeMillis();
    total_time = (ts_end - ts_start);

    LOG.info("Total Execution Time of QR: " + total_time);
    LOG.info("Loading Data Time of QR: " + load_time);
    LOG.info("Computation Time of QR: " + compute_time);
    LOG.info("Comm Time of QR: " + comm_time);
    LOG.info("DataType Convert Time of QR: " + convert_time);
    LOG.info("Misc Time of QR: " + (total_time - load_time - compute_time - comm_time - convert_time));

}

From source file:edu.iu.daal_linreg.LinRegDaalCollectiveMapper.java

private void runLinReg(List<String> trainingDataFiles, Configuration conf, Context context) throws IOException {

    ts_start = System.currentTimeMillis();

    ts1 = System.currentTimeMillis();
    // extracting points from csv files
    List<List<double[]>> pointArrays = LinRegUtil.loadPoints(trainingDataFiles, pointsPerFile, vectorSize,
            nDependentVariables, conf, harpThreads);
    List<double[]> featurePoints = new LinkedList<>();
    for (int i = 0; i < pointArrays.size(); i++) {
        featurePoints.add(pointArrays.get(i).get(0));
    }/*from w w w.  ja va  2  s. c  o m*/
    List<double[]> labelPoints = new LinkedList<>();
    for (int i = 0; i < pointArrays.size(); i++) {
        labelPoints.add(pointArrays.get(i).get(1));
    }

    ts2 = System.currentTimeMillis();
    load_time += (ts2 - ts1);

    // converting data to Numeric Table
    ts1 = System.currentTimeMillis();

    long nFeature = vectorSize;
    long nLabel = nDependentVariables;
    long totalLengthFeature = 0;
    long totalLengthLabel = 0;

    long[] array_startP_feature = new long[pointArrays.size()];
    double[][] array_data_feature = new double[pointArrays.size()][];
    long[] array_startP_label = new long[labelPoints.size()];
    double[][] array_data_label = new double[labelPoints.size()][];

    for (int k = 0; k < featurePoints.size(); k++) {
        array_data_feature[k] = featurePoints.get(k);
        array_startP_feature[k] = totalLengthFeature;
        totalLengthFeature += featurePoints.get(k).length;
    }

    for (int k = 0; k < labelPoints.size(); k++) {
        array_data_label[k] = labelPoints.get(k);
        array_startP_label[k] = totalLengthLabel;
        totalLengthLabel += labelPoints.get(k).length;
    }

    long featuretableSize = totalLengthFeature / nFeature;
    long labeltableSize = totalLengthLabel / nLabel;

    //initializing Numeric Table

    NumericTable featureArray_daal = new HomogenNumericTable(daal_Context, Double.class, nFeature,
            featuretableSize, NumericTable.AllocationFlag.DoAllocate);
    NumericTable labelArray_daal = new HomogenNumericTable(daal_Context, Double.class, nLabel, labeltableSize,
            NumericTable.AllocationFlag.DoAllocate);

    int row_idx_feature = 0;
    int row_len_feature = 0;

    for (int k = 0; k < featurePoints.size(); k++) {
        row_len_feature = (array_data_feature[k].length) / (int) nFeature;
        //release data from Java side to native side
        ((HomogenNumericTable) featureArray_daal).releaseBlockOfRows(row_idx_feature, row_len_feature,
                DoubleBuffer.wrap(array_data_feature[k]));
        row_idx_feature += row_len_feature;
    }

    int row_idx_label = 0;
    int row_len_label = 0;

    for (int k = 0; k < labelPoints.size(); k++) {
        row_len_label = (array_data_label[k].length) / (int) nLabel;
        //release data from Java side to native side
        ((HomogenNumericTable) labelArray_daal).releaseBlockOfRows(row_idx_label, row_len_label,
                DoubleBuffer.wrap(array_data_label[k]));
        row_idx_label += row_len_label;
    }

    ts2 = System.currentTimeMillis();
    convert_time += (ts2 - ts1);

    Service.printNumericTable("featureArray_daal", featureArray_daal, 5,
            featureArray_daal.getNumberOfColumns());
    Service.printNumericTable("labelArray_daal", labelArray_daal, 5, labelArray_daal.getNumberOfColumns());

    Table<ByteArray> partialResultTable = new Table<>(0, new ByteArrPlus());

    trainModel(featureArray_daal, labelArray_daal, partialResultTable);
    if (this.isMaster()) {
        testModel(testFilePath, conf);
        printResults(testGroundTruth, predictionResult, conf);
    }

    daal_Context.dispose();

    ts_end = System.currentTimeMillis();
    total_time = (ts_end - ts_start);

    LOG.info("Total Execution Time of LinReg: " + total_time);
    LOG.info("Loading Data Time of LinReg: " + load_time);
    LOG.info("Computation Time of LinReg: " + compute_time);
    LOG.info("Comm Time of LinReg: " + comm_time);
    LOG.info("DataType Convert Time of LinReg: " + convert_time);
    LOG.info("Misc Time of LinReg: " + (total_time - load_time - compute_time - comm_time - convert_time));
}

From source file:edu.iu.daal_ridgereg.RidgeRegDaalCollectiveMapper.java

private void runRidgeReg(List<String> trainingDataFiles, Configuration conf, Context context)
        throws IOException {

    ts_start = System.currentTimeMillis();

    ts1 = System.currentTimeMillis();
    // extracting points from csv files
    List<List<double[]>> pointArrays = RidgeRegUtil.loadPoints(trainingDataFiles, pointsPerFile, vectorSize,
            nDependentVariables, conf, harpThreads);
    List<double[]> featurePoints = new LinkedList<>();
    for (int i = 0; i < pointArrays.size(); i++) {
        featurePoints.add(pointArrays.get(i).get(0));
    }/*  ww  w. j  a va 2  s .c o m*/
    List<double[]> labelPoints = new LinkedList<>();
    for (int i = 0; i < pointArrays.size(); i++) {
        labelPoints.add(pointArrays.get(i).get(1));
    }

    ts2 = System.currentTimeMillis();
    load_time += (ts2 - ts1);

    // converting data to Numeric Table
    ts1 = System.currentTimeMillis();

    long nFeature = vectorSize;
    long nLabel = nDependentVariables;
    long totalLengthFeature = 0;
    long totalLengthLabel = 0;

    long[] array_startP_feature = new long[pointArrays.size()];
    double[][] array_data_feature = new double[pointArrays.size()][];
    long[] array_startP_label = new long[labelPoints.size()];
    double[][] array_data_label = new double[labelPoints.size()][];

    for (int k = 0; k < featurePoints.size(); k++) {
        array_data_feature[k] = featurePoints.get(k);
        array_startP_feature[k] = totalLengthFeature;
        totalLengthFeature += featurePoints.get(k).length;
    }

    for (int k = 0; k < labelPoints.size(); k++) {
        array_data_label[k] = labelPoints.get(k);
        array_startP_label[k] = totalLengthLabel;
        totalLengthLabel += labelPoints.get(k).length;
    }

    long featuretableSize = totalLengthFeature / nFeature;
    long labeltableSize = totalLengthLabel / nLabel;

    //initializing Numeric Table

    NumericTable featureArray_daal = new HomogenNumericTable(daal_Context, Double.class, nFeature,
            featuretableSize, NumericTable.AllocationFlag.DoAllocate);
    NumericTable labelArray_daal = new HomogenNumericTable(daal_Context, Double.class, nLabel, labeltableSize,
            NumericTable.AllocationFlag.DoAllocate);

    int row_idx_feature = 0;
    int row_len_feature = 0;

    for (int k = 0; k < featurePoints.size(); k++) {
        row_len_feature = (array_data_feature[k].length) / (int) nFeature;
        //release data from Java side to native side
        ((HomogenNumericTable) featureArray_daal).releaseBlockOfRows(row_idx_feature, row_len_feature,
                DoubleBuffer.wrap(array_data_feature[k]));
        row_idx_feature += row_len_feature;
    }

    int row_idx_label = 0;
    int row_len_label = 0;

    for (int k = 0; k < labelPoints.size(); k++) {
        row_len_label = (array_data_label[k].length) / (int) nLabel;
        //release data from Java side to native side
        ((HomogenNumericTable) labelArray_daal).releaseBlockOfRows(row_idx_label, row_len_label,
                DoubleBuffer.wrap(array_data_label[k]));
        row_idx_label += row_len_label;
    }

    ts2 = System.currentTimeMillis();
    convert_time += (ts2 - ts1);

    Table<ByteArray> partialResultTable = new Table<>(0, new ByteArrPlus());

    trainModel(featureArray_daal, labelArray_daal, partialResultTable);
    if (this.isMaster()) {
        testModel(testFilePath, conf);
        printResults(testGroundTruth, predictionResult, conf);
    }

    daal_Context.dispose();

    ts_end = System.currentTimeMillis();
    total_time = (ts_end - ts_start);

    LOG.info("Total Execution Time of RidgeReg: " + total_time);
    LOG.info("Loading Data Time of RidgeReg: " + load_time);
    LOG.info("Computation Time of RidgeReg: " + compute_time);
    LOG.info("Comm Time of RidgeReg: " + comm_time);
    LOG.info("DataType Convert Time of RidgeReg: " + convert_time);
    LOG.info("Misc Time of RidgeReg: " + (total_time - load_time - compute_time - comm_time - convert_time));
}

From source file:edu.iu.daal_nn.NNDaalCollectiveMapper.java

private void runNN(List<String> trainingDataFiles, Configuration conf, Context context) throws IOException {

    ts_start = System.currentTimeMillis();

    ts1 = System.currentTimeMillis();

    // extracting points from csv files
    List<List<double[]>> pointArrays = NNUtil.loadPoints(trainingDataFiles, pointsPerFile, vectorSize, conf,
            harpThreads);/*  www .j av  a  2 s. c om*/
    List<double[]> featurePoints = new LinkedList<>();
    for (int i = 0; i < pointArrays.size(); i++) {
        featurePoints.add(pointArrays.get(i).get(0));
    }
    List<double[]> labelPoints = new LinkedList<>();
    for (int i = 0; i < pointArrays.size(); i++) {
        labelPoints.add(pointArrays.get(i).get(1));
    }

    ts2 = System.currentTimeMillis();
    load_time += (ts2 - ts1);

    // converting data to Numeric Table
    ts1 = System.currentTimeMillis();

    long nFeature = vectorSize;
    long nLabel = 1;
    long totalLengthFeature = 0;
    long totalLengthLabel = 0;

    long[] array_startP_feature = new long[featurePoints.size()];
    double[][] array_data_feature = new double[featurePoints.size()][];
    long[] array_startP_label = new long[labelPoints.size()];
    double[][] array_data_label = new double[labelPoints.size()][];

    for (int k = 0; k < featurePoints.size(); k++) {
        array_data_feature[k] = featurePoints.get(k);
        array_startP_feature[k] = totalLengthFeature;
        totalLengthFeature += featurePoints.get(k).length;
    }

    for (int k = 0; k < labelPoints.size(); k++) {
        array_data_label[k] = labelPoints.get(k);
        array_startP_label[k] = totalLengthLabel;
        totalLengthLabel += labelPoints.get(k).length;
    }

    long featuretableSize = totalLengthFeature / nFeature;
    long labeltableSize = totalLengthLabel / nLabel;

    //initializing Numeric Table

    NumericTable featureArray_daal = new HomogenNumericTable(daal_Context, Double.class, nFeature,
            featuretableSize, NumericTable.AllocationFlag.DoAllocate);
    NumericTable labelArray_daal = new HomogenNumericTable(daal_Context, Double.class, nLabel, labeltableSize,
            NumericTable.AllocationFlag.DoAllocate);

    int row_idx_feature = 0;
    int row_len_feature = 0;

    for (int k = 0; k < featurePoints.size(); k++) {
        row_len_feature = (array_data_feature[k].length) / (int) nFeature;
        //release data from Java side to native side
        ((HomogenNumericTable) featureArray_daal).releaseBlockOfRows(row_idx_feature, row_len_feature,
                DoubleBuffer.wrap(array_data_feature[k]));
        row_idx_feature += row_len_feature;
    }

    int row_idx_label = 0;
    int row_len_label = 0;

    for (int k = 0; k < labelPoints.size(); k++) {
        row_len_label = (array_data_label[k].length) / (int) nLabel;
        //release data from Java side to native side
        ((HomogenNumericTable) labelArray_daal).releaseBlockOfRows(row_idx_label, row_len_label,
                DoubleBuffer.wrap(array_data_label[k]));
        row_idx_label += row_len_label;
    }

    featureTensorInit = Service.readTensorFromNumericTable(daal_Context, featureArray_daal, true);
    labelTensorInit = Service.readTensorFromNumericTable(daal_Context, labelArray_daal, true);
    System.out.println("tensor size : " + featureTensorInit.getSize());
    System.out.println("tensor size : " + labelTensorInit.getSize());

    ts2 = System.currentTimeMillis();
    convert_time += (ts2 - ts1);

    initializeNetwork(featureTensorInit, labelTensorInit);
    trainModel(featureTensorInit, labelTensorInit);
    if (this.isMaster()) {
        testModel(conf);
        printResults(conf);
    }

    daal_Context.dispose();

    ts_end = System.currentTimeMillis();
    total_time = (ts_end - ts_start);

    LOG.info("Total Execution Time of NN: " + total_time);
    LOG.info("Loading Data Time of NN: " + load_time);
    LOG.info("Computation Time of NN: " + compute_time);
    LOG.info("Comm Time of NN: " + comm_time);
    LOG.info("DataType Convert Time of NN: " + convert_time);
    LOG.info("Misc Time of NN: " + (total_time - load_time - compute_time - comm_time - convert_time));

}

From source file:edu.iu.daal_naive.NaiveDaalCollectiveMapper.java

private void runNaive(List<String> trainingDataFiles, Configuration conf, Context context) throws IOException {

    ts1 = System.currentTimeMillis();

    // extracting points from csv files
    List<List<double[]>> pointArrays = NaiveUtil.loadPoints(trainingDataFiles, pointsPerFile, vectorSize, conf,
            harpThreads);//from  w w w.  j a v a 2 s. co  m

    List<double[]> featurePoints = new LinkedList<>();
    List<double[]> labelPoints = new LinkedList<>();

    //divide data into chunks for harp to daal conversion
    List<double[]> ConvertPoints = new LinkedList<>();

    // long total_point_dataSize = 
    for (int i = 0; i < pointArrays.size(); i++) {

        for (int j = 0; j < pointArrays.get(i).size() - 1; j++)
            featurePoints.add(pointArrays.get(i).get(j));

        labelPoints.add(pointArrays.get(i).get(pointArrays.get(i).size() - 1));
    }

    int total_point = featurePoints.size();
    long total_train_size = total_point * vectorSize * 8;
    // long convert_unit_size = 2*1024*1024*1024; //2GB each conversion container  
    long convert_unit_size = 250 * 1024 * 1024; //2GB each conversion container  
    int point_per_conversion = (int) (convert_unit_size / (vectorSize * 8));
    // int num_conversion = (total_point + (point_per_conversion - 1))/point_per_conversion;
    // aggregate points 
    int convert_p = 0;
    int convert_pos = 0;
    while (total_point > 0) {
        convert_p = (point_per_conversion > total_point) ? total_point : point_per_conversion;
        total_point -= convert_p;

        double[] convert_data = new double[convert_p * vectorSize];
        for (int j = 0; j < convert_p; j++) {
            System.arraycopy(featurePoints.get(convert_pos + j), 0, convert_data, j * vectorSize, vectorSize);
        }

        ConvertPoints.add(convert_data);
        convert_pos += convert_p;
    }

    testData = getNumericTableHDFS(daal_Context, conf, testFilePath, vectorSize, num_test);

    ts2 = System.currentTimeMillis();
    load_time += (ts2 - ts1);

    // start effective execution (exclude loading time)
    ts_start = System.currentTimeMillis();

    // converting data to Numeric Table
    ts1 = System.currentTimeMillis();

    long nFeature = vectorSize;
    long nLabel = 1;
    long totalLengthFeature = 0;
    long totalLengthLabel = 0;

    long[] array_startP_feature = new long[ConvertPoints.size()];
    double[][] array_data_feature = new double[ConvertPoints.size()][];
    long[] array_startP_label = new long[labelPoints.size()];
    double[][] array_data_label = new double[labelPoints.size()][];

    for (int k = 0; k < ConvertPoints.size(); k++) {
        array_data_feature[k] = ConvertPoints.get(k);
        array_startP_feature[k] = totalLengthFeature;
        totalLengthFeature += ConvertPoints.get(k).length;
    }

    for (int k = 0; k < labelPoints.size(); k++) {
        array_data_label[k] = labelPoints.get(k);
        array_startP_label[k] = totalLengthLabel;
        totalLengthLabel += labelPoints.get(k).length;
    }

    long featuretableSize = totalLengthFeature / nFeature;
    long labeltableSize = totalLengthLabel / nLabel;

    //initializing Numeric Table
    NumericTable featureArray_daal = new HomogenNumericTable(daal_Context, Double.class, nFeature,
            featuretableSize, NumericTable.AllocationFlag.DoAllocate);
    NumericTable labelArray_daal = new HomogenNumericTable(daal_Context, Double.class, nLabel, labeltableSize,
            NumericTable.AllocationFlag.DoAllocate);

    int row_idx_feature = 0;
    int row_len_feature = 0;

    for (int k = 0; k < ConvertPoints.size(); k++) {
        row_len_feature = (array_data_feature[k].length) / (int) nFeature;
        //release data from Java side to native side
        ((HomogenNumericTable) featureArray_daal).releaseBlockOfRows(row_idx_feature, row_len_feature,
                DoubleBuffer.wrap(array_data_feature[k]));
        row_idx_feature += row_len_feature;
    }

    int row_idx_label = 0;
    int row_len_label = 0;

    for (int k = 0; k < labelPoints.size(); k++) {
        row_len_label = (array_data_label[k].length) / (int) nLabel;
        //release data from Java side to native side
        ((HomogenNumericTable) labelArray_daal).releaseBlockOfRows(row_idx_label, row_len_label,
                DoubleBuffer.wrap(array_data_label[k]));
        row_idx_label += row_len_label;
    }

    ts2 = System.currentTimeMillis();
    convert_time += (ts2 - ts1);

    Table<ByteArray> partialResultTable = new Table<>(0, new ByteArrPlus());

    trainModel(featureArray_daal, labelArray_daal, partialResultTable);
    if (this.isMaster()) {
        testModel(testFilePath, conf);
        printResults(testGroundTruth, predictionResult, conf);
    }

    this.barrier("naive", "testmodel-sync");

    daal_Context.dispose();

    ts_end = System.currentTimeMillis();
    total_time = (ts_end - ts_start);

    LOG.info("Loading Data Time of Naive: " + load_time);
    LOG.info("Total Execution Time of Naive: " + total_time);
    LOG.info("Computation Time of Naive: " + compute_time);
    LOG.info("Comm Time of Naive: " + comm_time);
    LOG.info("DataType Convert Time of Naive: " + convert_time);
    LOG.info("Misc Time of Naive: " + (total_time - compute_time - comm_time - convert_time));
}

From source file:ffx.potential.nonbonded.ReciprocalSpace.java

private double initConvolution() {

    /**//  www . j  ava 2  s .  c  o m
     * Store the current reciprocal space grid dimensions.
     */
    int fftXCurrent = fftX;
    int fftYCurrent = fftY;
    int fftZCurrent = fftZ;

    double density = forceField.getDouble(ForceFieldDouble.PME_MESH_DENSITY, 1.2);

    int nX = forceField.getInteger(ForceFieldInteger.PME_GRID_X, -1);
    if (nX < 2) {
        nX = (int) Math.floor(crystal.a * density) + 1;
        if (nX % 2 != 0) {
            nX += 1;
        }
        while (!Complex.preferredDimension(nX)) {
            nX += 2;
        }
    }
    int nY = forceField.getInteger(ForceFieldInteger.PME_GRID_Y, -1);
    if (nY < 2) {
        nY = (int) Math.floor(crystal.b * density) + 1;
        if (nY % 2 != 0) {
            nY += 1;
        }
        while (!Complex.preferredDimension(nY)) {
            nY += 2;
        }
    }
    int nZ = forceField.getInteger(ForceFieldInteger.PME_GRID_Z, -1);
    if (nZ < 2) {
        nZ = (int) Math.floor(crystal.c * density) + 1;
        if (nZ % 2 != 0) {
            nZ += 1;
        }
        while (!Complex.preferredDimension(nZ)) {
            nZ += 2;
        }
    }

    fftX = nX;
    fftY = nY;
    fftZ = nZ;

    /**
     * Populate the matrix that fractionalizes multipoles.
     */
    transformMultipoleMatrix();
    /**
     * Populate the matrix that convert fractional potential components into
     * orthogonal Cartesian coordinates.
     */
    transformFieldMatrix();
    /**
     * Compute the Cartesian to fractional matrix.
     */
    for (int i = 0; i < 3; i++) {
        a[0][i] = fftX * crystal.A[i][0];
        a[1][i] = fftY * crystal.A[i][1];
        a[2][i] = fftZ * crystal.A[i][2];
    }

    fftSpace = fftX * fftY * fftZ * 2;
    boolean dimChanged = fftX != fftXCurrent || fftY != fftYCurrent || fftZ != fftZCurrent;

    switch (fftMethod) {
    case PJ:
        if (pjFFT3D == null || dimChanged) {
            pjFFT3D = new Complex3DParallel(fftX, fftY, fftZ, fftTeam, recipSchedule);
            if (splineGrid == null || splineGrid.length < fftSpace) {
                splineGrid = new double[fftSpace];
            }
            splineBuffer = DoubleBuffer.wrap(splineGrid);
        }
        pjFFT3D.setRecip(generalizedInfluenceFunction());
        cudaFFT3D = null;
        clFFT3D = null;
        gpuThread = null;
        break;
    case CUDA:
        if (cudaFFT3D == null || dimChanged) {
            if (cudaFFT3D != null) {
                cudaFFT3D.free();
            }
            cudaFFT3D = new Complex3DCuda(fftX, fftY, fftZ);
            gpuThread = new Thread(cudaFFT3D);
            gpuThread.setPriority(Thread.MAX_PRIORITY);
            gpuThread.start();
            splineBuffer = cudaFFT3D.getDoubleBuffer();
        }
        cudaFFT3D.setRecip(generalizedInfluenceFunction());
        pjFFT3D = null;
        clFFT3D = null;
        break;
    case OPENCL:
        if (clFFT3D == null || dimChanged) {
            if (clFFT3D != null) {
                clFFT3D.free();
            }
            clFFT3D = new Complex3DOpenCL(fftX, fftY, fftZ);
            gpuThread = new Thread(clFFT3D);
            gpuThread.setPriority(Thread.MAX_PRIORITY);
            gpuThread.start();
            splineBuffer = clFFT3D.getDoubleBuffer();
        }
        clFFT3D.setRecip(generalizedInfluenceFunction());
        pjFFT3D = null;
        cudaFFT3D = null;
        break;
    }

    switch (gridMethod) {
    case SPATIAL:
        if (spatialDensityRegion == null || dimChanged) {
            spatialDensityRegion = new SpatialDensityRegion(fftX, fftY, fftZ, splineGrid, bSplineOrder, nSymm,
                    10, threadCount, crystal, atoms, coordinates);
            if (fftMethod != FFTMethod.PJ) {
                spatialDensityRegion.setGridBuffer(splineBuffer);
            }
        } else {
            spatialDensityRegion.setCrystal(crystal, fftX, fftY, fftZ);
            spatialDensityRegion.coordinates = coordinates;
        }
        break;
    case ROW:
        if (rowRegion == null || dimChanged) {
            rowRegion = new RowRegion(fftX, fftY, fftZ, splineGrid, bSplineOrder, nSymm, threadCount, crystal,
                    atoms, coordinates);
            if (fftMethod != FFTMethod.PJ) {
                rowRegion.setGridBuffer(splineBuffer);
            }
        } else {
            rowRegion.setCrystal(crystal, fftX, fftY, fftZ);
            rowRegion.coordinates = coordinates;
        }

        break;
    case SLICE:
    default:
        if (sliceRegion == null || dimChanged) {
            sliceRegion = new SliceRegion(fftX, fftY, fftZ, splineGrid, bSplineOrder, nSymm, threadCount,
                    crystal, atoms, coordinates);
            if (fftMethod != FFTMethod.PJ) {
                sliceRegion.setGridBuffer(splineBuffer);
            }
        } else {
            sliceRegion.setCrystal(crystal, fftX, fftY, fftZ);
            sliceRegion.coordinates = coordinates;
        }
    }
    return density;
}