Example usage for org.apache.hadoop.conf Configuration Configuration

List of usage examples for org.apache.hadoop.conf Configuration Configuration

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration Configuration.

Prototype

@SuppressWarnings("unchecked")
public Configuration(Configuration other) 

Source Link

Document

A new configuration with the same settings cloned from another.

Usage

From source file:MStress_Client.java

License:Open Source License

public static void main(String args[]) {
    parseOptions(args);/*  www. j ava 2s  .  c  om*/
    int result = 0;

    try {
        Configuration conf = new Configuration(true);
        String confSet = "hdfs://" + dfsServer_ + ":" + dfsPort_;
        conf.set("fs.default.name", confSet);
        conf.set("fs.trash.interval", "0");
        InetSocketAddress inet = new InetSocketAddress(dfsServer_, dfsPort_);
        dfsClient_ = new DFSClient(inet, conf);

        if (parsePlanFile() < 0) {
            System.exit(-1);
        }

        if (testName_.equals("create")) {
            result = createDFSPaths();
        } else if (testName_.equals("stat")) {
            result = statDFSPaths();
        } else if (testName_.equals("readdir")) {
            result = listDFSPaths();
        } else if (testName_.equals("delete")) {
            result = removeDFSPaths();
        } else {
            System.out.printf("Error: unrecognized test \'%s\'\n", testName_);
            System.exit(-1);
        }
    } catch (IOException e) {
        e.printStackTrace();
        System.exit(-1);
    }

    if (result != 0) {
        System.exit(-1);
    }

    return;
}

From source file:$.ApplicationTest.java

@Test
    public void testApplication() throws IOException, Exception {
        try {//from   ww w . java  2 s .  c o  m
            LocalMode lma = LocalMode.newInstance();
            Configuration conf = new Configuration(false);
            conf.addResource(this.getClass().getResourceAsStream("/META-INF/properties.xml"));
            lma.prepareDAG(new Application(), conf);
            LocalMode.Controller lc = lma.getController();
            lc.run(10000); // runs for 10 seconds and quits
        } catch (ConstraintViolationException e) {
            Assert.fail("constraint violations: " + e.getConstraintViolations());
        }
    }

From source file:accumulo.AccumuloStuff.java

License:Apache License

private static void setCoreSite(MiniAccumuloClusterImpl cluster) throws Exception {
    File csFile = new File(cluster.getConfig().getConfDir(), "core-site.xml");
    if (csFile.exists())
        throw new RuntimeException(csFile + " already exist");

    Configuration coreSite = new Configuration(false);
    coreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
    OutputStream out = new BufferedOutputStream(
            new FileOutputStream(new File(cluster.getConfig().getConfDir(), "core-site.xml")));
    coreSite.writeXml(out);//from w w w . j a v a  2 s.  co m
    out.close();
}

From source file:at.illecker.hadoop.rootbeer.examples.matrixmultiplication.cpu.MatrixMultiplicationCpu.java

License:Apache License

@Override
public int run(String[] strings) throws Exception {
    addOption("numRowsA", "nra", "Number of rows of the first input matrix", true);
    addOption("numColsA", "nca", "Number of columns of the first input matrix", true);
    addOption("numRowsB", "nrb", "Number of rows of the second input matrix", true);
    addOption("numColsB", "ncb", "Number of columns of the second input matrix", true);
    addOption("debug", "db", "Enable debugging (true|false)", false);

    Map<String, List<String>> argMap = parseArguments(strings);
    if (argMap == null) {
        return -1;
    }//from  w w  w.j av a  2  s .c  o  m

    int numRowsA = Integer.parseInt(getOption("numRowsA"));
    int numColsA = Integer.parseInt(getOption("numColsA"));
    int numRowsB = Integer.parseInt(getOption("numRowsB"));
    int numColsB = Integer.parseInt(getOption("numColsB"));
    boolean isDebugging = Boolean.parseBoolean(getOption("debug"));

    LOG.info("numRowsA: " + numRowsA);
    LOG.info("numColsA: " + numColsA);
    LOG.info("numRowsB: " + numRowsB);
    LOG.info("numColsB: " + numColsB);
    LOG.info("isDebugging: " + isDebugging);
    LOG.info("outputPath: " + OUTPUT_DIR);

    if (numColsA != numRowsB) {
        throw new CardinalityException(numColsA, numRowsB);
    }

    Configuration conf = new Configuration(getConf());

    // Create random DistributedRowMatrix
    // use constant seeds to get reproducable results
    // Matrix A is stored transposed
    DistributedRowMatrix.createRandomDistributedRowMatrix(conf, numRowsA, numColsA, new Random(42L),
            MATRIX_A_TRANSPOSED_PATH, true);
    DistributedRowMatrix.createRandomDistributedRowMatrix(conf, numRowsB, numColsB, new Random(1337L),
            MATRIX_B_PATH, false);

    // Load DistributedRowMatrix a and b
    DistributedRowMatrix aTransposed = new DistributedRowMatrix(MATRIX_A_TRANSPOSED_PATH, OUTPUT_DIR, numRowsA,
            numColsA);
    aTransposed.setConf(conf);

    DistributedRowMatrix b = new DistributedRowMatrix(MATRIX_B_PATH, OUTPUT_DIR, numRowsB, numColsB);
    b.setConf(conf);

    // MatrixMultiply all within a new MapReduce job
    long startTime = System.currentTimeMillis();
    DistributedRowMatrix c = aTransposed.multiplyMapReduce(b, MATRIX_C_PATH, false, true, 0, isDebugging);
    System.out.println("MatrixMultiplicationCpu using Hadoop finished in "
            + (System.currentTimeMillis() - startTime) / 1000.0 + " seconds");

    // Verification
    // Overwrite matrix A, NOT transposed for verification check
    DistributedRowMatrix.createRandomDistributedRowMatrix(conf, numRowsA, numColsA, new Random(42L),
            MATRIX_A_PATH, false);
    DistributedRowMatrix a = new DistributedRowMatrix(MATRIX_A_PATH, OUTPUT_DIR, numRowsA, numColsA);
    a.setConf(conf);

    DistributedRowMatrix d = a.multiplyJava(b, MATRIX_D_PATH);
    if (c.verify(d)) {
        System.out.println("Verify PASSED!");
    } else {
        System.out.println("Verify FAILED!");
    }

    if (isDebugging) {
        System.out.println("Matrix A:");
        a.printDistributedRowMatrix();
        System.out.println("Matrix A transposed:");
        aTransposed.printDistributedRowMatrix();
        System.out.println("Matrix B:");
        b.printDistributedRowMatrix();
        System.out.println("Matrix C:");
        c.printDistributedRowMatrix();
        System.out.println("Matrix D:");
        d.printDistributedRowMatrix();

        printOutput(conf);
    }
    return 0;
}

From source file:at.illecker.hadoop.rootbeer.examples.matrixmultiplication.gpu.MatrixMultiplicationGpu.java

License:Apache License

@Override
public int run(String[] strings) throws Exception {
    addOption("numRowsA", "nra", "Number of rows of the first input matrix", true);
    addOption("numColsA", "nca", "Number of columns of the first input matrix", true);
    addOption("numRowsB", "nrb", "Number of rows of the second input matrix", true);
    addOption("numColsB", "ncb", "Number of columns of the second input matrix", true);
    addOption("tileWidth", "tw", "TileWidth denotes the size of a submatrix", true);
    addOption("debug", "db", "Enable debugging (true|false)", false);

    Map<String, List<String>> argMap = parseArguments(strings);
    if (argMap == null) {
        return -1;
    }/*from  ww  w  .  j av  a2 s  . c o  m*/

    int numRowsA = Integer.parseInt(getOption("numRowsA"));
    int numColsA = Integer.parseInt(getOption("numColsA"));
    int numRowsB = Integer.parseInt(getOption("numRowsB"));
    int numColsB = Integer.parseInt(getOption("numColsB"));

    // TILE_WITH = 32
    // --> 2 * 32 = 1024 threads matches the blocksize
    int tileWidth = Integer.parseInt(getOption("tileWidth"));
    boolean isDebugging = Boolean.parseBoolean(getOption("debug"));

    LOG.info("numRowsA: " + numRowsA);
    LOG.info("numColsA: " + numColsA);
    LOG.info("numRowsB: " + numRowsB);
    LOG.info("numColsB: " + numColsB);
    LOG.info("tileWidth: " + tileWidth);
    LOG.info("isDebugging: " + isDebugging);
    LOG.info("outputPath: " + OUTPUT_DIR);

    if (numColsA != numRowsB) {
        throw new CardinalityException(numColsA, numRowsB);
    }

    Configuration conf = new Configuration(getConf());

    // Create random DistributedRowMatrix
    // use constant seeds to get reproducable results
    // Matrix A is stored transposed
    DistributedRowMatrix.createRandomDistributedRowMatrix(conf, numRowsA, numColsA, new Random(42L),
            MATRIX_A_TRANSPOSED_PATH, true);
    DistributedRowMatrix.createRandomDistributedRowMatrix(conf, numRowsB, numColsB, new Random(1337L),
            MATRIX_B_PATH, false);

    // Load DistributedRowMatrix a and b
    DistributedRowMatrix aTransposed = new DistributedRowMatrix(MATRIX_A_TRANSPOSED_PATH, OUTPUT_DIR, numRowsA,
            numColsA);
    aTransposed.setConf(conf);

    DistributedRowMatrix b = new DistributedRowMatrix(MATRIX_B_PATH, OUTPUT_DIR, numRowsB, numColsB);
    b.setConf(conf);

    // MatrixMultiply all within a new MapReduce job
    long startTime = System.currentTimeMillis();
    DistributedRowMatrix c = aTransposed.multiplyMapReduce(b, MATRIX_C_PATH, true, true, tileWidth,
            isDebugging);
    System.out.println("MatrixMultiplicationGpu using Hadoop finished in "
            + (System.currentTimeMillis() - startTime) / 1000.0 + " seconds");

    // Verification
    // Overwrite matrix A, NOT transposed for verification check
    DistributedRowMatrix.createRandomDistributedRowMatrix(conf, numRowsA, numColsA, new Random(42L),
            MATRIX_A_PATH, false);
    DistributedRowMatrix a = new DistributedRowMatrix(MATRIX_A_PATH, OUTPUT_DIR, numRowsA, numColsA);
    a.setConf(conf);

    DistributedRowMatrix d = a.multiplyJava(b, MATRIX_D_PATH);
    if (c.verify(d)) {
        System.out.println("Verify PASSED!");
    } else {
        System.out.println("Verify FAILED!");
    }

    if (isDebugging) {
        System.out.println("Matrix A:");
        a.printDistributedRowMatrix();
        System.out.println("Matrix A transposed:");
        aTransposed.printDistributedRowMatrix();
        System.out.println("Matrix B:");
        b.printDistributedRowMatrix();
        System.out.println("Matrix C:");
        c.printDistributedRowMatrix();
        System.out.println("Matrix D:");
        d.printDistributedRowMatrix();

        printOutput(conf);
    }
    return 0;
}

From source file:azkaban.AzkabanCommonModule.java

License:Apache License

@Inject
@Provides/*w w  w.  j a v  a  2s.  c o m*/
@Singleton
public Configuration createHadoopConfiguration() {
    final String hadoopConfDirPath = requireNonNull(this.props.get(HADOOP_CONF_DIR_PATH));

    final File hadoopConfDir = new File(requireNonNull(hadoopConfDirPath));
    checkArgument(hadoopConfDir.exists() && hadoopConfDir.isDirectory());

    final Configuration hadoopConf = new Configuration(false);
    hadoopConf.addResource(new org.apache.hadoop.fs.Path(hadoopConfDirPath, "core-site.xml"));
    hadoopConf.addResource(new org.apache.hadoop.fs.Path(hadoopConfDirPath, "hdfs-site.xml"));
    hadoopConf.set("fs.hdfs.impl", org.apache.hadoop.hdfs.DistributedFileSystem.class.getName());
    return hadoopConf;
}

From source file:azkaban.jobtype.HadoopConfigurationInjector.java

License:Apache License

/**
 * Writes out the XML configuration file that will be injected by the client
 * as a configuration resource.//from   w  w w  . j  a  v a 2s .c  o  m
 * <p>
 * This file will include a series of links injected by Azkaban as well as
 * any job properties that begin with the designated injection prefix.
 *
 * @param props The Azkaban properties
 * @param workingDir The Azkaban job working directory
 */
public static void prepareResourcesToInject(Props props, String workingDir) {
    try {
        Configuration conf = new Configuration(false);

        // First, inject a series of Azkaban links. These are equivalent to
        // CommonJobProperties.[EXECUTION,WORKFLOW,JOB,JOBEXEC,ATTEMPT]_LINK
        addHadoopProperties(props);

        // Next, automatically inject any properties that begin with the
        // designated injection prefix.
        Map<String, String> confProperties = props.getMapByPrefix(INJECT_PREFIX);

        for (Map.Entry<String, String> entry : confProperties.entrySet()) {
            String confKey = entry.getKey().replace(INJECT_PREFIX, "");
            String confVal = entry.getValue();
            conf.set(confKey, confVal);
        }

        // Now write out the configuration file to inject.
        File file = getConfFile(props, workingDir, INJECT_FILE);
        OutputStream xmlOut = new FileOutputStream(file);
        conf.writeXml(xmlOut);
        xmlOut.close();
    } catch (Throwable e) {
        _logger.error("Encountered error while preparing the Hadoop configuration resource file", e);
    }
}

From source file:azkaban.jobtype.StatsUtils.java

License:Apache License

public static Properties getJobConf(RunningJob runningJob) {
    try {/*from  w  w  w . jav  a  2 s  .  c  om*/
        Path path = new Path(runningJob.getJobFile());
        Configuration conf = new Configuration(false);
        FileSystem fs = FileSystem.get(new Configuration());
        InputStream in = fs.open(path);
        conf.addResource(in);
        return getJobConf(conf);
    } catch (FileNotFoundException e) {
        logger.warn("Job conf not found.");
    } catch (IOException e) {
        logger.warn("Error while retrieving job conf: " + e.getMessage());
    }
    return null;
}