Example usage for org.apache.hadoop.conf Configuration getBoolean

List of usage examples for org.apache.hadoop.conf Configuration getBoolean

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration getBoolean.

Prototype

public boolean getBoolean(String name, boolean defaultValue) 

Source Link

Document

Get the value of the name property as a boolean.

Usage

From source file:com.talis.hadoop.rdf.merge.IndexMerge.java

License:Apache License

public int run(String[] args) throws Exception {

    Configuration configuration = getConf();

    boolean useCompression = configuration.getBoolean(Constants.OPTION_USE_COMPRESSION,
            Constants.OPTION_USE_COMPRESSION_DEFAULT);
    if (useCompression) {
        configuration.setBoolean("mapred.compress.map.output", true);
        configuration.set("mapred.output.compression.type", "BLOCK");
        configuration.set("mapred.map.output.compression.codec", "org.apache.hadoop.io.compress.GzipCodec");
    }/*from   w  w w . j a  va 2s .  c  o m*/

    boolean overrideOutput = configuration.getBoolean(Constants.OPTION_OVERRIDE_OUTPUT,
            Constants.OPTION_OVERRIDE_OUTPUT_DEFAULT);
    FileSystem fs = FileSystem.get(new Path(args[1]).toUri(), configuration);
    if (overrideOutput) {
        fs.delete(new Path(args[1]), true);
    }

    Job job = new Job(configuration);
    job.setJobName(JOB_NAME);
    job.setJarByClass(getClass());

    Path input = new Path(args[0]);
    Path output = new Path(args[1]);
    FileInputFormat.addInputPath(job, input);
    FileOutputFormat.setOutputPath(job, output);

    job.setMapperClass(Mapper.class);
    job.setReducerClass(IndexMergeReducer.class);

    job.setInputFormatClass(TextInputFormat.class);
    job.setOutputKeyClass(LongWritable.class);
    job.setOutputValueClass(Text.class);
    job.setOutputFormatClass(TextOutputFormat.class);

    job.setNumReduceTasks(1);

    if (LOG.isDebugEnabled())
        Utils.log(job, LOG);

    return job.waitForCompletion(true) ? 0 : -1;
}

From source file:com.talis.hadoop.rdf.solr.QuadsIndexer.java

License:Apache License

public int run(String[] args) throws Exception {

    Configuration configuration = getConf();

    boolean useCompression = configuration.getBoolean(Constants.OPTION_USE_COMPRESSION,
            Constants.OPTION_USE_COMPRESSION_DEFAULT);
    if (useCompression) {
        configuration.setBoolean("mapred.compress.map.output", true);
        configuration.set("mapred.output.compression.type", "BLOCK");
        configuration.set("mapred.map.output.compression.codec", "org.apache.hadoop.io.compress.GzipCodec");
    }// w  w w.j a  v a  2  s.c o  m

    boolean overrideOutput = configuration.getBoolean(Constants.OPTION_OVERRIDE_OUTPUT,
            Constants.OPTION_OVERRIDE_OUTPUT_DEFAULT);
    FileSystem outputFs = FileSystem.get(new Path(args[1]).toUri(), configuration);
    if (overrideOutput) {
        outputFs.delete(new Path(args[1]), true);
    }

    Job job = new Job(configuration);
    job.setJobName(JOB_NAME);
    job.setJarByClass(getClass());

    int shards = -1;
    boolean compressOutput = false;

    Path input = new Path(args[0]);
    Path output = new Path(args[1]);
    Path solrConfig = new Path(args[2]);
    FileInputFormat.addInputPath(job, input);
    FileOutputFormat.setOutputPath(job, output);

    if (shards > 0) {
        job.setNumReduceTasks(shards);
    }

    job.setMapperClass(Mapper.class);
    job.setInputFormatClass(SequenceFileInputFormat.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(QuadArrayWritable.class);

    job.setReducerClass(SolrReducer.class);
    SolrDocumentConverter.setSolrDocumentConverter(LiteralsIndexer.class, job.getConfiguration());

    job.setOutputFormatClass(SolrOutputFormat.class);

    String zipName = "solr.zip";
    FileSystem solrConfigFs = FileSystem.get(solrConfig.toUri(), configuration);
    final URI baseZipUrl = solrConfigFs.getUri().resolve(solrConfig.toString() + '#' + zipName);
    DistributedCache.addCacheArchive(baseZipUrl, job.getConfiguration());
    job.getConfiguration().set(SolrOutputFormat.SETUP_OK, solrConfig.toString());
    SolrOutputFormat.setOutputZipFormat(compressOutput, job.getConfiguration());

    if (LOG.isDebugEnabled())
        Utils.log(job, LOG);

    return job.waitForCompletion(true) ? 0 : -1;
}

From source file:com.taobao.adfs.database.MysqlServerController.java

License:Apache License

/**
 * need to avoid timeout of sub-process. note: innobackupex has been modified by jiwan@taobao.com
 *//*  w  w  w.  j  a  v a 2  s . com*/
public String getData(Configuration conf, Lock writeLock) throws Throwable {
    // get settings
    setMysqlDefaultConf(conf);
    String dataPathLocal = Utilities.getNormalPath(conf.get("mysql.server.data.path", "."));
    String dataPathRemote = Utilities.getNormalPath(conf.get("mysql.server.backup.data.path", "."));
    String remoteHost = conf.get("mysql.server.backup.host", "localhost");
    String mysqlConfPathLocal = dataPathLocal + "/my.cnf";
    Utilities.mkdirsInRemote(remoteHost, dataPathRemote, true);

    String mysqlServerPid = startServer(conf);

    // generate command line
    // note: innobackupex has been modified by jiwan@taobao.com
    String cmdLine = "innobackupex";
    cmdLine += " --user=root";
    cmdLine += " --password=" + conf.get("mysql.server.password", "root");
    cmdLine += " --defaults-file=" + mysqlConfPathLocal;
    cmdLine += " --socket=" + conf.get(mysqlConfKeyPrefix + "mysqld.socket");
    cmdLine += " --no-lock";// it will save 2s for no needing to unlock tables
    cmdLine += " --suspend-at-end";
    cmdLine += " --stream=tar";
    cmdLine += " " + dataPathLocal;
    cmdLine += "|gzip|";
    if (Utilities.isLocalHost(remoteHost))
        cmdLine += " bash -c";
    else
        cmdLine += "ssh " + remoteHost;
    if (conf.getBoolean("mysql.server.backup.decompress", true))
        cmdLine += " \"tar -zixC " + dataPathRemote + "\"";
    else
        cmdLine += " cat >" + dataPathRemote + "/backup.tar.gz";
    Utilities.logInfo(logger, "Command=", cmdLine);

    // run command line
    Process process = Utilities.runCommand(new String[] { "/bin/bash", "-c", cmdLine },
            conf.get("mysql.server.bin.path"), null);

    // read the stderr stream get backup status and write the stdin stream to control it.
    // 0.innobackupex copied all data (not stop monitor the log)
    // 1.innobackupex suspends
    // 2.HotBackup finds innobackupex has suspended
    // 3.HotBackup notify master server to stops write service
    // 4.HotBackup notify master server to forward write requests to slave server
    // 5.HotBackup signal innobakupex to continue backup work
    // 6.HotBackup resumes and exits
    // 7.HotBackup notify master server to restart write service
    // 8.all write requests to master server be forwarded to the slave server
    // 9.slave server will apply the backup data and new requests
    BufferedWriter stdInputWriter = null;
    BufferedReader stdErrorReader = null;

    String line = null;
    boolean backupSuccessful = false;
    try {
        stdInputWriter = new BufferedWriter(new OutputStreamWriter(process.getOutputStream()));
        stdErrorReader = new BufferedReader(new InputStreamReader(process.getErrorStream()));

        while ((line = stdErrorReader.readLine()) != null) {
            Utilities.logInfo(logger, line);
            if (line.contains("WAIT_UNTIL_PARENT_PROCESS_SIGNAL")) {
                Utilities.logInfo(logger, "prepare to block write request for backuping mysql server data");
                writeLock.lock();
                Utilities.logInfo(logger, "already blocked write request for backuping mysql server data");
                // notify xtrabackup to complete the backup
                stdInputWriter.append("SIGNAL_CHILD_PROCESS_TO_CONTINUE\n");
                stdInputWriter.flush();
            }
            if (line.contains("completed OK!") && !line.contains("prints")) {
                backupSuccessful = true;
                Utilities.logInfo(logger, "complete backup.");
                break;
            }
        }
    } finally {
        if (stdInputWriter != null)
            stdInputWriter.close();
        if (stdErrorReader != null)
            stdErrorReader.close();
        process.destroy();
    }

    // wait until files are created
    for (int i = 0; i < 100; ++i) {
        try {
            Utilities.lsInRemote(remoteHost, dataPathRemote + "/xtrabackup_checkpoints");
            Utilities.lsInRemote(remoteHost, dataPathRemote + "/xtrabackup_logfile");
            Utilities.lsInRemote(remoteHost, dataPathRemote + "/ibdata1");
            break;
        } catch (Throwable t) {
            if (i == 99)
                backupSuccessful = false;
            else
                Utilities.sleepAndProcessInterruptedException(100, logger);
        }
    }

    if (!backupSuccessful)
        throw new IOException("fail to backup mysql server data");

    if (mysqlServerPid.isEmpty() && conf.getBoolean("mysql.server.restore", false))
        stopServer(conf);

    return dataPathRemote;
}

From source file:com.taobao.adfs.database.MysqlServerController.java

License:Apache License

/**
 * need to avoid timeout of sub-process. note: innobackupex has been modified by jiwan@taobao.com
 * @throws Throwable //from ww w  .  j  a  va 2s .  com
 */
public String setData(Configuration conf) throws Throwable {
    // get settings
    saveMysqlConf(conf);
    String dataPathLocal = Utilities.getNormalPath(conf.get("mysql.server.data.path", "."));
    String mysqlConfPathLocal = dataPathLocal + "/my.cnf";

    String mysqlServerPid = stopServer(conf);

    // generate command line
    // example: innobackupex --apply-log --user=root --password=root --defaults-file=/etc/mysql/my.cnf /var/lib/mysql
    // note: innobackupex-1.5.1 has been modified by jiwan@taobao.com
    String cmdLine = "innobackupex";
    cmdLine += " --apply-log";
    cmdLine += " --user=root";
    cmdLine += " --password=" + conf.get("mysql.server.password", "root");
    cmdLine += " --defaults-file=" + mysqlConfPathLocal;
    cmdLine += " --socket=" + getMysqlConf(conf, "mysqld.socket");
    cmdLine += " " + dataPathLocal;
    Utilities.logInfo(logger, "Command=", cmdLine);

    // run command line
    Process process = Utilities.runCommand(new String[] { "/bin/bash", "-c", cmdLine },
            conf.get("mysql.server.bin.path"), null);

    BufferedWriter stdInputWriter = null;
    BufferedReader stdErrorReader = null;

    String line = null;
    boolean restoreSuccessful = false;
    try {
        stdInputWriter = new BufferedWriter(new OutputStreamWriter(process.getOutputStream()));
        stdErrorReader = new BufferedReader(new InputStreamReader(process.getErrorStream()));
        while ((line = stdErrorReader.readLine()) != null) {
            Utilities.logInfo(logger, line);
            if (line.contains("completed OK!") && !line.contains("prints")) {
                restoreSuccessful = true;
                Utilities.logInfo(logger, "complete apply log.");
                break;
            }
        }
    } finally {
        if (stdInputWriter != null)
            stdInputWriter.close();
        if (stdErrorReader != null)
            stdErrorReader.close();
        process.destroy();
    }

    if (!restoreSuccessful)
        throw new IOException("fail to restore database");

    if (!mysqlServerPid.isEmpty() && conf.getBoolean("mysql.server.restore", false))
        startServer(conf);

    return dataPathLocal;
}

From source file:com.taobao.adfs.database.MysqlServerController.java

License:Apache License

public void formatData(Configuration conf) throws Throwable {
    Utilities.logInfo(logger, "mysql server is formatting");
    setMysqlDefaultConf(conf);/*from w w w .  java  2  s  . c om*/
    // stop mysql server and initialize data
    String mysqlServerPid = stopServer(conf);
    backupData(conf);
    String mysqlDataPath = Utilities.getNormalPath(conf.get("mysql.server.data.path", "."));
    Utilities.mkdirs(mysqlDataPath, true);
    String commandForCreateMysqlData = "mysql_install_db";
    commandForCreateMysqlData += " --force";
    commandForCreateMysqlData += " --general_log";
    commandForCreateMysqlData += " --no-defaults";
    commandForCreateMysqlData += " --basedir=" + getMysqlConf(conf, "mysqld.basedir");
    commandForCreateMysqlData += " --datadir=" + mysqlDataPath;
    Utilities.logInfo(logger, "mysql server is installing new data, command=", commandForCreateMysqlData);
    Utilities.runCommand(commandForCreateMysqlData, 0, conf.get("mysql.server.bin.path"), null);
    Utilities.logInfo(logger, "mysql server has installed new data");

    // start mysql server and set access control
    startServer(conf);
    String commandForSetMysqlAccess = "mysql -uroot";
    commandForSetMysqlAccess += " --socket=" + getMysqlConf(conf, "mysqld.socket");
    // commandForSetMysqlServerPassword += " password '" + conf.get("mysql.password", "root") + "'";
    commandForSetMysqlAccess += " --execute=\"";
    commandForSetMysqlAccess += "use mysql;delete from user;grant all privileges on *.* to 'root'@'%' identified by 'root';flush privileges;";
    commandForSetMysqlAccess += "\"";
    Utilities.logInfo(logger, "mysql server is setting privileges, command=", commandForSetMysqlAccess);
    Utilities.runCommand(commandForSetMysqlAccess, 0, conf.get("mysql.server.bin.path"),
            getMysqlLibPath(conf.get("mysql.server.bin.path")));
    Utilities.logInfo(logger, "mysql server has set privileges");

    // create database
    try {
        createDatabase(conf);
    } catch (Throwable t) {
        int retryIndex = conf.getInt("mysql.server.format.retry.index", 0);
        if (retryIndex >= conf.getInt("mysql.server.format.retry.max", 3))
            throw new IOException(t);
        conf.setInt("mysql.server.format.retry.index", ++retryIndex);
        Utilities.logError(logger, "mysql server fails to create database, retryIndex=", retryIndex, t);
        formatData(conf);
        return;
    }

    // restore mysql server status before format
    if (mysqlServerPid.isEmpty() && conf.getBoolean("mysql.server.restore", false))
        stopServer(conf);
    Utilities.logInfo(logger, "mysql server is formatted");
}

From source file:com.taobao.adfs.distributed.DistributedServer.java

License:Apache License

public static void stopServer(Configuration conf) throws Throwable {
    conf = new Configuration(conf);
    String serverName = getServerName(conf);
    conf.set("distributed.server.name", serverName);
    if (conf.getBoolean("distributed.data.format", false))
        configLogger(conf);//  ww w. j a  va  2s.c  o  m
    else {
        conf.set("distributed.logger.levels",
                "org.apache.zookeeper.ZooKeeper=warn,org.apache.zookeeper.ClientCnxn=warn");
        Utilities.setLoggerLevel(conf, null);
    }
    Utilities.logInfo(logger, serverName, " is stopping");

    // try to request distributed server to stop
    conf.setInt("ipc.client.connect.max.retries", 0);
    new DistributedMonitor(conf).stop();
    // kill process if distributed server is still running
    String[] includes = new String[] { serverName.replace("localhost", "127.0.0.1"), "java" };
    String[] fields = new String[] { "4", "7" };
    List<String> addressList = Utilities.getListenAddressList(includes, null, fields);
    for (String address : addressList) {
        if (address.split(",").length < 2)
            continue;
        String distributedServerPid = address.split(",")[1];
        if (distributedServerPid.split("/").length < 2)
            continue;
        distributedServerPid = distributedServerPid.split("/")[0];
        if (distributedServerPid.equals(Utilities.getPidString()))
            continue;
        Utilities.killProcess(distributedServerPid);
        Utilities.logInfo(logger, serverName, " with pid=", distributedServerPid, " is killed");
        break;
    }
    // kill all other processes which specified the data path in the command line
    String commandForKillRelativePids = "ps -ef| grep -v grep|grep " + getDataPath(conf) + "|awk '{print $2}'";
    String subProcessPids = Utilities.runCommand(commandForKillRelativePids, null, null, null).replaceAll("\n",
            ",");
    if (!subProcessPids.isEmpty()) {
        if (subProcessPids.charAt(subProcessPids.length() - 1) == ',')
            subProcessPids = subProcessPids.substring(0, subProcessPids.length() - 1);
        for (String pid : subProcessPids.split(",")) {
            if (pid.equals(Utilities.getPidString()))
                continue;
            Utilities.killProcess(pid);
            Utilities.logInfo(logger, serverName, "'s sub processes with pid=", subProcessPids, " is killed");
        }
    }
    Utilities.logInfo(logger, serverName, " has been stopped");
}

From source file:com.taobao.adfs.distributed.rpc.Client.java

License:Apache License

/**
 * Construct an IPC client whose values are of the given {@link Writable} class.
 *//*from  w w  w. jav a2  s .c  o m*/
public Client(Class<? extends Writable> valueClass, Configuration conf, SocketFactory factory) {
    this.valueClass = valueClass;
    this.maxIdleTime = conf.getInt("ipc.client.connection.maxidletime", 3600000); // 3600s
    this.maxRetriesForIOException = conf.getInt("ipc.client.connect.max.retries.ioexception", 0);
    this.maxRetriesForSocketTimeoutException = conf
            .getInt("ipc.client.connect.max.retries.sockettimeoutexception", 0);
    this.tcpNoDelay = conf.getBoolean("ipc.client.tcpnodelay", true);
    this.pingInterval = getPingInterval(conf);
    if (LOG.isDebugEnabled()) {
        LOG.debug("The ping interval is" + this.pingInterval + "ms.");
    }
    this.conf = conf;
    this.socketFactory = factory;
    this.connectionNumberPerAddress = conf.getInt("ipc.client.connection.number.per.address", 1);
    if (LOG.isDebugEnabled()) {
        LOG.debug("ipc.client.connection.number.per.address=" + connectionNumberPerAddress);
    }
}

From source file:com.taobao.adfs.distributed.rpc.Server.java

License:Apache License

/**
 * Constructs a server listening on the named port and address. Parameters passed must be of the named class. The
 * <code>handlerCount</handlerCount> determines
 * the number of handler threads that will be used to process calls.
 * //from w  w  w.  ja va2 s . c  o m
 */
protected Server(String bindAddress, int port, Class<? extends Writable> paramClass, int handlerCount,
        Configuration conf, String serverName) throws IOException {
    this.bindAddress = bindAddress;
    this.conf = conf;
    this.port = port;
    this.paramClass = paramClass;
    this.handlerCount = handlerCount;
    this.socketSendBufferSize = 0;
    this.maxQueueSize = handlerCount * MAX_QUEUE_SIZE_PER_HANDLER;
    this.callQueue = new LinkedBlockingQueue<Call>(maxQueueSize);
    this.readThreads = conf.getInt("ipc.server.read.threadpool.size", 40);
    this.maxIdleTime = 2 * conf.getInt("ipc.client.connection.maxidletime", 1000);
    this.maxConnectionsToNuke = conf.getInt("ipc.client.kill.max", 10);
    this.thresholdIdleConnections = conf.getInt("ipc.client.idlethreshold", 4000);

    // Start the listener here and let it bind to the port
    listener = new Listener();
    this.port = listener.getAddress().getPort();
    this.tcpNoDelay = conf.getBoolean("ipc.server.tcpnodelay", false);

    // Create the responder here
    responder = new Responder();
}

From source file:com.tuplejump.calliope.hadoop.cql3.CqlConfigHelper.java

License:Apache License

public static Boolean getMultiRangeInputSplit(Configuration conf) {
    return conf.getBoolean(MULTIRANGE_INPUT_SPLIT, false);
}

From source file:com.twitter.ambrose.hive.AmbroseHivePreHook.java

License:Apache License

/**
 * Waiting <tt>ambrose.wf.between.sleep.seconds</tt> before processing the
 * next statement (workflow) in the submitted script
 * /*from   w w w . jav a  2s.c  o m*/
 * @param hookContext
 * @param reporter
 * @param queryId
 */
private void waitBetween(HookContext hookContext, EmbeddedAmbroseHiveProgressReporter reporter,
        String queryId) {

    Configuration conf = hookContext.getConf();
    boolean justStarted = conf.getBoolean(SCRIPT_STARTED_PARAM, true);
    if (justStarted) {
        conf.setBoolean(SCRIPT_STARTED_PARAM, false);
    } else {
        // sleeping between workflows
        int sleepTimeMs = conf.getInt(WF_BETWEEN_SLEEP_SECS_PARAM, 10);
        try {

            LOG.info("One workflow complete, sleeping for " + sleepTimeMs
                    + " sec(s) before moving to the next one if exists. Hit ctrl-c to exit.");
            Thread.sleep(sleepTimeMs * 1000L);

            //send progressbar reset event
            Map<WorkflowProgressField, String> eventData = Maps.newHashMapWithExpectedSize(1);
            eventData.put(WorkflowProgressField.workflowProgress, "0");
            reporter.pushEvent(queryId, new Event.WorkflowProgressEvent(eventData));

            reporter.saveEventStack();
            reporter.reset();
        } catch (InterruptedException e) {
            LOG.warn("Sleep interrupted", e);
        }
    }
}