Example usage for org.apache.hadoop.conf Configuration setDouble

List of usage examples for org.apache.hadoop.conf Configuration setDouble

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration setDouble.

Prototype

public void setDouble(String name, double value) 

Source Link

Document

Set the value of the name property to a double.

Usage

From source file:clustering.inverted_index.Driver.java

License:Apache License

@Override
public int run(String[] args) throws Exception {
    if (args.length < 2) {
        System.err.printf("usage: %s tf_idf_result_dir output_dir" + "[decimal_number] [pruning_threshold]\n",
                getClass().getSimpleName());
        System.exit(1);//from  ww  w  . j  a  v a 2s .  c  o m
    }

    Path normDir = new Path(args[1] + "/normed");
    Path resultDir = new Path(args[1] + "/result");

    Configuration conf = getConf();
    conf = MapReduceUtils.initConf(conf);

    if (args.length > 2) {
        conf.setInt("deci.number", Integer.valueOf(args[2]));
    } else {
        conf.setInt("deci.number", 4);
    }

    if (args.length > 3) {
        conf.setBoolean("pruning", true);
        conf.setDouble("pruning.threshold", Double.valueOf(args[3]));
    } else {
        conf.setBoolean("pruning", false);
    }

    JobControl jobControl = new JobControl("inverted-index jobs");

    /* step 1, normalize the vector lenth of each document */

    Job job1 = Job.getInstance(conf, "tf idf normalizer job");
    job1.setJarByClass(Driver.class);

    FileInputFormat.addInputPath(job1, new Path(args[0]));
    job1.setInputFormatClass(KeyValueTextInputFormat.class);

    job1.setMapperClass(Mapper.class);

    job1.setReducerClass(NormalizerReducer.class);
    job1.setOutputKeyClass(Text.class);
    job1.setOutputValueClass(Text.class);

    FileOutputFormat.setOutputPath(job1, normDir);

    ControlledJob controlledJob1 = new ControlledJob(conf);
    controlledJob1.setJob(job1);
    jobControl.addJob(controlledJob1);

    /* step 2, calculate inverted index */

    Job job2 = Job.getInstance(conf, "inverted index job");
    job2.setJarByClass(Driver.class);

    FileInputFormat.addInputPath(job2, normDir);

    job2.setInputFormatClass(KeyValueTextInputFormat.class);

    job2.setMapperClass(Mapper.class);

    job2.setReducerClass(InvertedIndexReducer.class);
    job2.setOutputKeyClass(Text.class);
    job2.setOutputValueClass(Text.class);

    FileOutputFormat.setOutputPath(job2, resultDir);

    ControlledJob controlledJob2 = new ControlledJob(conf);
    controlledJob2.setJob(job2);
    controlledJob2.addDependingJob(controlledJob1);
    jobControl.addJob(controlledJob2);

    MapReduceUtils.runJobs(jobControl);

    return job2.waitForCompletion(true) ? 0 : 1;
}

From source file:clustering.mst.Driver.java

License:Apache License

@Override
public int run(String[] args) throws Exception {
    if (args.length < 3) {
        System.err.printf("usage: %s similarity_result_dir document_count_file output_dir "
                + "[cluster_threshold] [reduce_number] [compression]\n", getClass().getSimpleName());
        System.exit(1);//from  w  w w  .  ja  v  a2 s. com
    }

    Path step1_OutputDir = new Path(args[2] + "/step1");
    Path resultDir = new Path(args[2] + "/result");

    URI docCntFile = new URI(args[1] + "/part-r-00000#docCnt");

    Configuration conf = getConf();
    conf = MapReduceUtils.initConf(conf);

    if (args.length > 3) {
        conf.setDouble("final.threshold", Double.valueOf(args[3]));
    } else {
        conf.setDouble("final.threshold", 0.2d);
    }
    if (args.length > 4) {
        conf.setInt("reduce.task.num", Integer.valueOf(args[4]));
    } else {
        conf.setInt("reduce.task.num", 5);
    }

    JobControl jobControl = new JobControl("mst jobs");

    /* step 1, split and calculate the child msts */

    Job childJob = Job.getInstance(conf, "mst child job");
    childJob.setJarByClass(Driver.class);

    childJob.addCacheFile(docCntFile);

    if (args.length > 5 && args[5].equals("0")) {
        FileInputFormat.addInputPath(childJob, new Path(args[0]));
        childJob.setInputFormatClass(KeyValueTextInputFormat.class);
    } else {
        SequenceFileInputFormat.addInputPath(childJob, new Path(args[0]));
        childJob.setInputFormatClass(SequenceFileAsTextInputFormat.class);
    }

    FileOutputFormat.setOutputPath(childJob, step1_OutputDir);

    childJob.setMapperClass(ChildMapper.class);
    childJob.setMapOutputKeyClass(DoubleWritable.class);
    childJob.setMapOutputValueClass(Text.class);

    childJob.setPartitionerClass(ChildPartitioner.class);

    childJob.setReducerClass(ChildReducer.class);
    childJob.setNumReduceTasks(conf.getInt("reduce.task.num", 1));
    childJob.setOutputKeyClass(DoubleWritable.class);
    childJob.setOutputValueClass(Text.class);

    ControlledJob controlledChildJob = new ControlledJob(conf);
    controlledChildJob.setJob(childJob);
    jobControl.addJob(controlledChildJob);

    /* step 2, merge step 1's output and calculate final mst */

    Job finalJob = Job.getInstance(conf, "mst final job");
    finalJob.setJarByClass(FinalReducer.class);

    finalJob.addCacheFile(docCntFile);

    FileInputFormat.addInputPath(finalJob, step1_OutputDir);
    finalJob.setInputFormatClass(KeyValueTextInputFormat.class);

    finalJob.setMapperClass(FinalMapper.class);
    finalJob.setMapOutputKeyClass(DoubleWritable.class);
    finalJob.setMapOutputValueClass(Text.class);

    finalJob.setReducerClass(FinalReducer.class);
    finalJob.setOutputKeyClass(IntWritable.class);
    finalJob.setOutputValueClass(IntWritable.class);

    FileOutputFormat.setOutputPath(finalJob, resultDir);

    ControlledJob finalControlledJob = new ControlledJob(conf);
    finalControlledJob.setJob(finalJob);
    finalControlledJob.addDependingJob(controlledChildJob);
    jobControl.addJob(finalControlledJob);

    // run jobs

    MapReduceUtils.runJobs(jobControl);

    return finalJob.waitForCompletion(true) ? 0 : 1;
}

From source file:clustering.tf_idf.WorkflowDriver.java

License:Apache License

@Override
public int run(String[] args) throws Exception {
    if (args.length < 2) {
        System.err.printf("usage: %s simhash_result_dir output_dir " + "[gname_weight]\n",
                getClass().getSimpleName());
        System.exit(1);/*  w  ww .  j  av  a  2s  .c  o m*/
    }

    String docCntDir = args[1] + "/docCnt";
    String step1_outputDir = args[1] + "/step1";
    String step2_outputDir = args[1] + "/step2";
    String step3_outputDir = args[1] + "/result";

    Configuration conf = getConf();
    conf = initConf(conf);

    JobControl jobControl = new JobControl("tf-idf jobs");

    /* pre step, count documents number in the corpus */
    DocCntDriver docCntDriver = new DocCntDriver();
    String[] preJobArgs = new String[2];
    preJobArgs[0] = args[0];
    preJobArgs[1] = docCntDir;

    Job preJob = docCntDriver.configJob(preJobArgs);

    ControlledJob controlledPreJob = new ControlledJob(conf);
    controlledPreJob.setJob(preJob);
    jobControl.addJob(controlledPreJob);

    /* step 1, calculate term count of each document */
    TermCntDriver termCntDriver = new TermCntDriver();
    String[] job1Args = new String[2];
    job1Args[0] = args[0];
    job1Args[1] = step1_outputDir;
    Job job1 = termCntDriver.configJob(job1Args);

    ControlledJob controlledJob1 = new ControlledJob(conf);
    controlledJob1.setJob(job1);
    jobControl.addJob(controlledJob1);

    /* step 2, calculate the term frequency of each document */
    TermFreqDriver termFreqDriver = new TermFreqDriver();

    String gnameWeight = args.length > 2 ? args[2] : "1.0";
    conf.setDouble("gname.weight", Double.valueOf(gnameWeight));

    String[] job2Args = args.length > 2 ? new String[3] : new String[2];
    job2Args[0] = step1_outputDir;
    job2Args[1] = step2_outputDir;
    if (args.length > 2) {
        job2Args[2] = args[2];
    }
    Job job2 = termFreqDriver.configJob(job2Args);

    ControlledJob controlledJob2 = new ControlledJob(conf);
    controlledJob2.setJob(job2);
    controlledJob2.addDependingJob(controlledJob1);
    jobControl.addJob(controlledJob2);

    /* step 3, calculate tf_idf */
    TF_IDF_Driver tf_idf_driver = new TF_IDF_Driver();
    String[] job3Args = new String[3];
    job3Args[0] = docCntDir;
    job3Args[1] = step2_outputDir;
    job3Args[2] = step3_outputDir;
    Job job3 = tf_idf_driver.configJob(job3Args);

    ControlledJob controlledJob3 = new ControlledJob(conf);
    controlledJob3.setJob(job3);
    controlledJob3.addDependingJob(controlledJob2);
    controlledJob3.addDependingJob(controlledPreJob);

    jobControl.addJob(controlledJob3);

    // run jobs
    runJobs(jobControl);

    return job3.waitForCompletion(true) ? 0 : 1;
}

From source file:com.splicemachine.test.SpliceTestPlatformConfig.java

License:Apache License

public static Configuration create(String hbaseRootDirUri, Integer masterPort, Integer masterInfoPort,
        Integer regionServerPort, Integer regionServerInfoPort, Integer derbyPort, boolean failTasksRandomly) {

    Configuration config = HConfiguration.unwrapDelegate();

    config.set(SQLConfiguration.STORAGE_FACTORY_HOME, hbaseRootDirUri);

    ///*from w ww .j a va2  s .c o  m*/
    // Coprocessors
    //
    config.set("hbase.coprocessor.regionserver.classes", getRegionServerCoprocessorsAsString());
    config.set("hbase.coprocessor.region.classes", getRegionCoprocessorsAsString());
    config.set("hbase.coprocessor.master.classes", getMasterCoprocessorsAsString());

    //
    // Networking
    //
    config.set("hbase.zookeeper.quorum", "127.0.0.1:2181");
    config.setInt("hbase.master.port", masterPort);
    config.setInt("hbase.master.info.port", masterInfoPort);
    config.setInt("hbase.regionserver.port", regionServerPort);
    config.setInt("hbase.regionserver.info.port", regionServerInfoPort);
    config.setInt("hbase.master.jmx.port", HConfiguration.DEFAULT_JMX_BIND_PORT); // this is set because the HBase master and regionserver are running on the same machine and in the same JVM
    config.setInt(SQLConfiguration.NETWORK_BIND_PORT, derbyPort);
    config.setClass(DefaultStoreEngine.DEFAULT_COMPACTOR_CLASS_KEY, SpliceDefaultCompactor.class,
            Compactor.class);
    // config.setClass(ConsistencyControlUtils.MVCC_IMPL, SIMultiVersionConsistencyControl.class, ConsistencyControl.class);
    config.setClass(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY, SpliceDefaultCompactionPolicy.class,
            CompactionPolicy.class);

    //
    // Networking -- interfaces
    //
    // force use of loop back interface on MacOSX, else don't set it
    //        if (System.getProperty("os.name").contains("Mac") ) {
    //            String interfaceName = "lo0";
    //            config.set("hbase.zookeeper.dns.interface", interfaceName);
    //            config.set("hbase.master.dns.interface", interfaceName);
    //            config.set("hbase.regionserver.dns.interface", interfaceName);
    //        }

    //
    // File System
    //
    config.set("fs.defaultFS", "file:///"); // MapR Hack, tells it local filesystem // fs.default.name is deprecated
    config.set(FileSystem.FS_DEFAULT_NAME_KEY, "file:///");
    config.setDouble("yarn.nodemanager.resource.io-spindles", 2.0);
    config.set("fs.default.name", "file:///");
    config.set("yarn.nodemanager.container-executor.class",
            "org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor");

    // Must allow Cygwin instance to config its own rootURI
    if (!"CYGWIN".equals(hbaseRootDirUri)) {
        config.set("hbase.rootdir", hbaseRootDirUri);
    }

    //
    // Threads, timeouts
    //
    config.setLong("hbase.rpc.timeout", MINUTES.toMillis(2));
    config.setLong("hbase.client.scanner.timeout.period", MINUTES.toMillis(2)); // hbase.regionserver.lease.period is deprecated
    config.setLong("hbase.client.operation.timeout", MINUTES.toMillis(2));
    config.setLong("hbase.regionserver.handler.count", 200);
    config.setLong("hbase.regionserver.msginterval", 1000);
    config.setLong("hbase.master.event.waiting.time", 20);
    config.setLong("hbase.master.lease.thread.wakefrequency", SECONDS.toMillis(3));
    //        config.setBoolean("hbase.master.loadbalance.bytable",true);
    config.setInt("hbase.balancer.period", 5000);

    config.setLong("hbase.server.thread.wakefrequency", SECONDS.toMillis(1));
    config.setLong("hbase.client.pause", 100);

    //
    // Compaction Controls
    //
    config.setLong("hbase.hstore.compaction.min", 5); // min number of eligible files before we compact
    config.setLong("hbase.hstore.compaction.max", 10); // max files to be selected for a single minor compaction
    config.setLong("hbase.hstore.compaction.min.size", 16 * MiB); // store files smaller than this will always be eligible for minor compaction.  HFiles this size or larger are evaluated by hbase.hstore.compaction.ratio to determine if they are eligible
    config.setLong("hbase.hstore.compaction.max.size", 248 * MiB); // store files larger than this will be excluded from compaction
    config.setFloat("hbase.hstore.compaction.ratio", 1.25f); // default is 1.2f, at one point we had this set to 0.25f and 25f (which was likely a typo)

    //
    // Memstore, store files, splits
    //
    config.setLong(HConstants.HREGION_MAX_FILESIZE, 32 * MiB); // hbase.hregion.max.filesize
    config.setLong("hbase.hregion.memstore.flush.size", 128 * MiB); // was 512 MiB
    config.setLong("hbase.hregion.memstore.block.multiplier", 4);
    config.setFloat("hbase.regionserver.global.memstore.size", 0.25f); // set mem store to 25% of heap
    config.setLong("hbase.hstore.blockingStoreFiles", 20);
    //        config.set("hbase.regionserver.region.split.policy", "org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy"); // change default split policy.  this makes more sense for a standalone/single regionserver

    // Support SI
    //config.setClass(HConstants.MVCC_IMPL, SIMultiVersionConsistencyControl.class, ConsistencyControl.class);

    //
    // HFile
    //
    config.setInt("hfile.index.block.max.size", 16 * 1024); // 16KiB
    config.setFloat("hfile.block.cache.size", 0.25f); // set block cache to 25% of heap
    config.setFloat("io.hfile.bloom.error.rate", (float) 0.005);
    config.setBoolean(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, true); // hfile.block.bloom.cacheonwrite
    //config.set("hbase.master.hfilecleaner.plugins", getHFileCleanerAsString());
    config.set("hbase.master.hfilecleaner.plugins", getHFileCleanerAsString());
    //
    // Misc
    //
    config.set("hbase.cluster.distributed", "true"); // don't start zookeeper for us
    config.set("hbase.master.distributed.log.splitting", "false"); // TODO: explain why we are setting this

    // AWS Credentials for test...
    //

    config.set(ACCESS_KEY, "AKIAJ6HBMCK5ALHVBFPQ");
    config.set(SECRET_KEY, "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");

    //
    // Splice
    //

    config.setLong("splice.ddl.drainingWait.maximum", SECONDS.toMillis(15)); // wait 15 seconds before bailing on bad ddl statements
    config.setLong("splice.ddl.maxWaitSeconds", 120000);
    //
    // Snapshots
    //
    config.setBoolean("hbase.snapshot.enabled", true);

    HConfiguration.reloadConfiguration(config);
    return HConfiguration.unwrapDelegate();
}

From source file:edu.iu.ccd.CCDLauncher.java

License:Apache License

private Job configureCCDJob(Path inputDir, int r, double lambda, int numIterations, int numMapTasks,
        int numThreadsPerWorker, int numModelSlices, Path modelDir, Path outputDir, String testFilePath,
        Configuration configuration, int jobID) throws IOException, URISyntaxException {
    configuration.setInt(Constants.R, r);
    configuration.setDouble(Constants.LAMBDA, lambda);
    configuration.setInt(Constants.NUM_ITERATIONS, numIterations);
    configuration.setInt(Constants.NUM_THREADS, numThreadsPerWorker);
    System.out.println("Model Dir Path: " + modelDir.toString());
    configuration.set(Constants.MODEL_DIR, modelDir.toString());
    configuration.setInt(Constants.NUM_MODEL_SLICES, numModelSlices);
    configuration.set(Constants.TEST_FILE_PATH, testFilePath);
    Job job = Job.getInstance(configuration, "ccd_job_" + jobID);
    JobConf jobConf = (JobConf) job.getConfiguration();
    jobConf.set("mapreduce.framework.name", "map-collective");
    jobConf.setNumMapTasks(numMapTasks);
    jobConf.setInt("mapreduce.job.max.split.locations", 10000);
    FileInputFormat.setInputPaths(job, inputDir);
    FileOutputFormat.setOutputPath(job, outputDir);
    job.setInputFormatClass(MultiFileInputFormat.class);
    job.setJarByClass(CCDLauncher.class);
    job.setMapperClass(CCDMPCollectiveMapper.class);
    job.setNumReduceTasks(0);/*w w  w .ja  va 2  s.c  o  m*/
    return job;
}

From source file:edu.iu.daal_als.ALSDaalLauncher.java

License:Apache License

/**
 * Launches ALS workers.//  w ww .ja v a  2 s .c  o  m
 */
@Override
public int run(String[] args) throws Exception {

    /* Put shared libraries into the distributed cache */
    Configuration conf = this.getConf();

    Initialize init = new Initialize(conf, args);

    /* Put shared libraries into the distributed cache */
    init.loadDistributedLibs();

    // load args
    init.loadSysArgs();

    conf.setInt(HarpDAALConstants.NUM_FACTOR, Integer.parseInt(args[init.getSysArgNum()]));
    conf.setDouble(Constants.ALPHA, Double.parseDouble(args[init.getSysArgNum() + 1]));
    conf.setDouble(Constants.LAMBDA, Double.parseDouble(args[init.getSysArgNum() + 2]));
    conf.set(HarpDAALConstants.TEST_FILE_PATH, args[init.getSysArgNum() + 3]);

    // launch job
    System.out.println("Starting Job");
    long perJobSubmitTime = System.currentTimeMillis();
    System.out.println(
            "Start Job#" + " " + new SimpleDateFormat("HH:mm:ss.SSS").format(Calendar.getInstance().getTime()));

    Job alsJob = init.createJob("alsJob", ALSDaalLauncher.class, ALSDaalCollectiveMapper.class);

    // finish job
    boolean jobSuccess = alsJob.waitForCompletion(true);
    System.out.println(
            "End Job#" + " " + new SimpleDateFormat("HH:mm:ss.SSS").format(Calendar.getInstance().getTime()));
    System.out.println(
            "| Job#" + " Finished in " + (System.currentTimeMillis() - perJobSubmitTime) + " miliseconds |");
    if (!jobSuccess) {
        alsJob.killJob();
        System.out.println("alsJob failed");
    }

    return 0;
}

From source file:edu.iu.daal_ar.Aprior.ARDaalLauncher.java

License:Apache License

/**
 * Launches all the tasks in order./*w  ww  . j a v  a2s  .  c om*/
 */
@Override
public int run(String[] args) throws Exception {

    /* Put shared libraries into the distributed cache */
    Configuration conf = this.getConf();

    Initialize init = new Initialize(conf, args);

    /* Put shared libraries into the distributed cache */
    init.loadDistributedLibs();

    // load args
    init.loadSysArgs();

    //load app args
    conf.setInt(HarpDAALConstants.FILE_DIM, Integer.parseInt(args[init.getSysArgNum()]));
    conf.setDouble(Constants.MIN_SUPPORT, Double.parseDouble(args[init.getSysArgNum() + 1]));
    conf.setDouble(Constants.MIN_CONFIDENCE, Double.parseDouble(args[init.getSysArgNum() + 2]));

    // launch job
    System.out.println("Starting Job");
    long perJobSubmitTime = System.currentTimeMillis();
    System.out.println(
            "Start Job#" + " " + new SimpleDateFormat("HH:mm:ss.SSS").format(Calendar.getInstance().getTime()));

    Job arbatchJob = init.createJob("arbatchJob", ARDaalLauncher.class, ARDaalCollectiveMapper.class);

    // finish job
    boolean jobSuccess = arbatchJob.waitForCompletion(true);
    System.out.println(
            "End Job#" + " " + new SimpleDateFormat("HH:mm:ss.SSS").format(Calendar.getInstance().getTime()));
    System.out.println(
            "| Job#" + " Finished in " + (System.currentTimeMillis() - perJobSubmitTime) + " miliseconds |");
    if (!jobSuccess) {
        arbatchJob.killJob();
        System.out.println("ArBatchJob Job failed");
    }

    return 0;
}

From source file:edu.iu.daal_kernel_func.LinCSRBatch.LinCSRDaalLauncher.java

License:Apache License

/**
 * Launches all the tasks in order./*from w  w w . j av a 2s .  c  om*/
 */
@Override
public int run(String[] args) throws Exception {

    /* Put shared libraries into the distributed cache */
    Configuration conf = this.getConf();

    Initialize init = new Initialize(conf, args);

    /* Put shared libraries into the distributed cache */
    init.loadDistributedLibs();

    // load args
    init.loadSysArgs();

    //load app args
    conf.setDouble(Constants.K, Double.parseDouble(args[init.getSysArgNum()]));
    conf.setDouble(Constants.B, Double.parseDouble(args[init.getSysArgNum() + 1]));
    conf.set(Constants.RIGHT_FILE_PATH, args[init.getSysArgNum() + 2]);

    // launch job
    System.out.println("Starting Job");
    long perJobSubmitTime = System.currentTimeMillis();
    System.out.println(
            "Start Job#" + " " + new SimpleDateFormat("HH:mm:ss.SSS").format(Calendar.getInstance().getTime()));

    Job kernellinJob = init.createJob("kernellinJob", LinCSRDaalLauncher.class,
            LinCSRDaalCollectiveMapper.class);

    // finish job
    boolean jobSuccess = kernellinJob.waitForCompletion(true);
    System.out.println(
            "End Job#" + " " + new SimpleDateFormat("HH:mm:ss.SSS").format(Calendar.getInstance().getTime()));
    System.out.println(
            "| Job#" + " Finished in " + (System.currentTimeMillis() - perJobSubmitTime) + " miliseconds |");
    if (!jobSuccess) {
        kernellinJob.killJob();
        System.out.println("kernellinJob failed");
    }

    return 0;
}

From source file:edu.iu.daal_kernel_func.LinDenseBatch.LinDenseDaalLauncher.java

License:Apache License

/**
 * Launches all the tasks in order./*from   ww w  . ja v  a 2s .c  o m*/
 */
@Override
public int run(String[] args) throws Exception {

    /* Put shared libraries into the distributed cache */
    Configuration conf = this.getConf();

    Initialize init = new Initialize(conf, args);

    /* Put shared libraries into the distributed cache */
    init.loadDistributedLibs();

    // load args
    init.loadSysArgs();

    //load app args
    conf.setInt(HarpDAALConstants.FEATURE_DIM, Integer.parseInt(args[init.getSysArgNum()]));
    conf.setInt(HarpDAALConstants.FILE_DIM, Integer.parseInt(args[init.getSysArgNum() + 1]));
    conf.setDouble(Constants.K, Double.parseDouble(args[init.getSysArgNum()] + 2));
    conf.setDouble(Constants.B, Double.parseDouble(args[init.getSysArgNum() + 3]));
    conf.set(Constants.RIGHT_FILE_PATH, args[init.getSysArgNum() + 4]);

    // launch job
    System.out.println("Starting Job");
    long perJobSubmitTime = System.currentTimeMillis();
    System.out.println(
            "Start Job#" + " " + new SimpleDateFormat("HH:mm:ss.SSS").format(Calendar.getInstance().getTime()));

    Job kernellinJob = init.createJob("kernellinJob", LinDenseDaalLauncher.class,
            LinDenseDaalCollectiveMapper.class);

    // finish job
    boolean jobSuccess = kernellinJob.waitForCompletion(true);
    System.out.println(
            "End Job#" + " " + new SimpleDateFormat("HH:mm:ss.SSS").format(Calendar.getInstance().getTime()));
    System.out.println(
            "| Job#" + " Finished in " + (System.currentTimeMillis() - perJobSubmitTime) + " miliseconds |");
    if (!jobSuccess) {
        kernellinJob.killJob();
        System.out.println("kernellinJob failed");
    }

    return 0;
}

From source file:edu.iu.daal_kernel_func.RbfCSRBatch.RbfCSRDaalLauncher.java

License:Apache License

/**
 * Launches all the tasks in order./*from w ww. ja  v  a2 s  . c  o m*/
 */
@Override
public int run(String[] args) throws Exception {

    /* Put shared libraries into the distributed cache */
    Configuration conf = this.getConf();

    Initialize init = new Initialize(conf, args);

    /* Put shared libraries into the distributed cache */
    init.loadDistributedLibs();

    // load args
    init.loadSysArgs();

    //load app args
    conf.setDouble(Constants.SIGMA, Double.parseDouble(args[init.getSysArgNum()]));
    conf.set(Constants.RIGHT_FILE_PATH, args[init.getSysArgNum() + 1]);

    // launch job
    System.out.println("Starting Job");
    long perJobSubmitTime = System.currentTimeMillis();
    System.out.println(
            "Start Job#" + " " + new SimpleDateFormat("HH:mm:ss.SSS").format(Calendar.getInstance().getTime()));

    Job kernelRbfJob = init.createJob("kernelRbfJob", RbfCSRDaalLauncher.class,
            RbfCSRDaalCollectiveMapper.class);

    // finish job
    boolean jobSuccess = kernelRbfJob.waitForCompletion(true);
    System.out.println(
            "End Job#" + " " + new SimpleDateFormat("HH:mm:ss.SSS").format(Calendar.getInstance().getTime()));
    System.out.println(
            "| Job#" + " Finished in " + (System.currentTimeMillis() - perJobSubmitTime) + " miliseconds |");
    if (!jobSuccess) {
        kernelRbfJob.killJob();
        System.out.println("kernelRbfJob failed");
    }

    return 0;
}