Example usage for org.apache.hadoop.conf Configuration setInt

List of usage examples for org.apache.hadoop.conf Configuration setInt

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration setInt.

Prototype

public void setInt(String name, int value) 

Source Link

Document

Set the value of the name property to an int.

Usage

From source file:com.cloudera.llama.server.TestClientNotifier.java

License:Apache License

@Test
public void testRetryWithRecovery() throws Exception {
    Configuration conf = new Configuration(false);
    conf.setInt(nsConf.getPropertyName(ServerConfiguration.CLIENT_NOTIFIER_HEARTBEAT_KEY), 10000);
    conf.setInt(nsConf.getPropertyName(ServerConfiguration.CLIENT_NOTIFIER_RETRY_INTERVAL_KEY), 200);
    conf.setInt(nsConf.getPropertyName(ServerConfiguration.CLIENT_NOTIFIER_MAX_RETRIES_KEY), 2);
    conf.setInt(nsConf.getPropertyName(ServerConfiguration.TRANSPORT_TIMEOUT_KEY), 50);
    ServerConfiguration sConf = new NSServerConfiguration(conf);
    UUID clientId = UUID.randomUUID();
    UUID handle = UUID.randomUUID();
    MyClientRegistry cr = new MyClientRegistry(sConf, clientId, handle, notificationServer.getAddressHost(),
            notificationServer.getAddressPort());
    ClientNotifier cn = new ClientNotifier(sConf, new HostnameOnlyNodeMapper(), cr, null);
    notificationServer.delayResponse = 100;
    try {//from w w w.ja v a 2 s  .  com
        cn.start();
        cn.registerClientForHeartbeats(handle);

        LlamaAMEventImpl event = new LlamaAMEventImpl();
        event.addReservation(TestUtils.createPlacedReservation(TestUtils.createReservation(true),
                PlacedReservation.Status.ALLOCATED));
        cn.onEvent(event);
        Thread.sleep(100); //adding 50ms extra
        Assert.assertEquals(1, cr.clientCallerCalls.get());
        Assert.assertFalse(cr.maxFailures);
        cr.clientCallerCalls.set(0);
        notificationServer.delayResponse = 0;
        notificationServer.notifications.clear();
        Thread.sleep(250); //adding 50ms extra
        Assert.assertEquals(1, cr.clientCallerCalls.get());
        Assert.assertEquals(1, notificationServer.notifications.size());
        Assert.assertFalse(notificationServer.notifications.get(0).isHeartbeat());
        Assert.assertFalse(cr.maxFailures);
    } finally {
        cn.stop();
    }
}

From source file:com.cloudera.oryx.als.computation.iterate.row.RowStep.java

License:Open Source License

@Override
protected MRPipeline createPipeline() throws IOException {

    IterationState iterationState = getIterationState();
    String iterationKey = iterationState.getIterationKey();
    boolean x = iterationState.isComputingX();
    int lastIteration = iterationState.getIteration() - 1;
    Store store = Store.get();//from   w  w  w.j av  a  2 s. co m

    JobStepConfig config = getConfig();
    String instanceDir = config.getInstanceDir();
    int generationID = config.getGenerationID();

    if (store.exists(Namespaces.getInstanceGenerationPrefix(instanceDir, generationID) + "X/", false)) {
        // Actually, looks like whole computation of X/Y finished -- just proceed
        return null;
    }

    // Take the opportunity to clean out iteration before last, if computing X
    if (x) {
        String lastLastIterationKey = Namespaces.getIterationsPrefix(instanceDir, generationID)
                + (lastIteration - 1) + '/';
        if (store.exists(lastLastIterationKey, false)) {
            log.info("Deleting old iteration data from {}", lastLastIterationKey);
            store.recursiveDelete(lastLastIterationKey);
        }
    }

    String yKey;
    if (x) {
        yKey = Namespaces.getIterationsPrefix(instanceDir, generationID) + lastIteration + "/Y/";
    } else {
        yKey = iterationKey + "X/";
    }

    String xKey = iterationKey + (x ? "X/" : "Y/");
    String rKey = Namespaces.getTempPrefix(instanceDir, generationID) + (x ? "userVectors/" : "itemVectors/");

    if (!validOutputPath(xKey)) {
        return null;
    }

    MRPipeline p = createBasicPipeline(RowReduceFn.class);
    Configuration conf = p.getConfiguration();
    conf.set(Y_KEY_KEY, yKey);

    String tempKey = Namespaces.getTempPrefix(instanceDir, generationID);
    String popularKey = tempKey + (x ? "popularItemsByUserPartition/" : "popularUsersByItemPartition/");
    conf.set(POPULAR_KEY, popularKey);

    String testPrefix = Namespaces.getInstanceGenerationPrefix(instanceDir, generationID) + "test/";
    conf.set(MAP_KEY, testPrefix);

    YState yState = new YState(ALSTypes.DENSE_ROW_MATRIX); // Shared Y-Matrix state

    GroupingOptions opts = groupingOptions();
    PCollection<MatrixRow> matrix = PTables.asPTable(p.read(input(rKey, ALSTypes.SPARSE_ROW_MATRIX)))
            .groupByKey(opts).parallelDo("rowReduce", new RowReduceFn(yState), ALSTypes.DENSE_ROW_MATRIX)
            .write(output(xKey));

    if (!x) {
        // Configure and perform convergence sampling
        int modulus = chooseConvergenceSamplingModulus(opts);
        conf.setInt(CONVERGENCE_SAMPLING_MODULUS_KEY, modulus);

        matrix.parallelDo("asPair", MatrixRow.AS_PAIR, Avros.tableOf(Avros.longs(), ALSTypes.FLOAT_ARRAY))
                .parallelDo("convergenceSample", new ConvergenceSampleFn(yState), Avros.strings())
                .write(compressedTextOutput(p.getConfiguration(), iterationKey + "Yconvergence"));
    }

    if (x && ConfigUtils.getDefaultConfig().getDouble("model.test-set-fraction") > 0.0
            && store.exists(testPrefix, false)) {
        PCollection<Double> aps = matrix
                .parallelDo("asPair", MatrixRow.AS_PAIR, Avros.tableOf(Avros.longs(), ALSTypes.FLOAT_ARRAY))
                .parallelDo("computeAP", new ComputeUserAPFn(yState), Avros.doubles());
        Mean meanAveragePrecision = new Mean();
        for (double ap : aps.materialize()) {
            meanAveragePrecision.increment(ap);
        }
        log.info("Mean average precision: {}", meanAveragePrecision.getResult());

        File tempMAPFile = File.createTempFile("MAP", ".txt");
        tempMAPFile.deleteOnExit();
        Files.write(Double.toString(meanAveragePrecision.getResult()), tempMAPFile, Charsets.UTF_8);
        store.upload(iterationKey + "MAP", tempMAPFile, false);
        IOUtils.delete(tempMAPFile);
    }

    return p;
}

From source file:com.cloudera.oryx.computation.common.JobStep.java

License:Open Source License

/**
 * Creates a new {@link MRPipeline} instance that contains common configuration
 * settings.//from www.jav a2s . c  o m
 *
 * @return a new {@link MRPipeline} instance, suitably configured
 */
protected final MRPipeline createBasicPipeline(Class<?> jarClass) throws IOException {
    Configuration conf = OryxConfiguration.get(getConf());

    conf.setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS, true);
    conf.setClass(MRJobConfig.MAP_OUTPUT_COMPRESS_CODEC, SnappyCodec.class, CompressionCodec.class);

    conf.setBoolean("mapred.output.compress", true);
    conf.set("mapred.output.compression.type", "BLOCK");
    conf.setClass("mapred.output.compression.codec", SnappyCodec.class, CompressionCodec.class);
    // Set old-style equivalents for Avro/Crunch's benefit
    conf.set("avro.output.codec", "snappy");

    conf.setBoolean(MRJobConfig.MAP_SPECULATIVE, true);
    conf.setBoolean(MRJobConfig.REDUCE_SPECULATIVE, true);
    conf.setBoolean(TTConfig.TT_OUTOFBAND_HEARBEAT, true);
    conf.setInt(MRJobConfig.JVM_NUMTASKS_TORUN, -1);

    //conf.setBoolean("crunch.disable.deep.copy", true);
    // Giving one mapper a lot of data can cause issues in some stages, so default to disable this
    conf.setBoolean("crunch.disable.combine.file", true);

    Config appConfig = ConfigUtils.getDefaultConfig();

    conf.set("crunch.tmp.dir", appConfig.getString("computation-layer.tmp-dir"));

    int mapMemoryMB = appConfig.getInt("computation-layer.mapper-memory-mb");
    log.info("Mapper memory: {}", mapMemoryMB);
    int mapHeapMB = (int) (mapMemoryMB / 1.3); // Matches Hadoop's default
    log.info("Mappers have {}MB heap and can access {}MB RAM", mapHeapMB, mapMemoryMB);
    if (conf.get(MRJobConfig.MAP_JAVA_OPTS) != null) {
        log.info("Overriding previous setting of {}, which was '{}'", MRJobConfig.MAP_JAVA_OPTS,
                conf.get(MRJobConfig.MAP_JAVA_OPTS));
    }
    conf.set(MRJobConfig.MAP_JAVA_OPTS,
            "-Xmx" + mapHeapMB + "m -XX:+UseCompressedOops -XX:+UseParallelGC -XX:+UseParallelOldGC");
    log.info("Set {} to '{}'", MRJobConfig.MAP_JAVA_OPTS, conf.get(MRJobConfig.MAP_JAVA_OPTS));
    // See comment below on CM
    conf.setInt("mapreduce.map.java.opts.max.heap", mapHeapMB);

    int reduceMemoryMB = appConfig.getInt("computation-layer.reducer-memory-mb");
    log.info("Reducer memory: {}", reduceMemoryMB);
    if (isHighMemoryStep()) {
        reduceMemoryMB *= appConfig.getInt("computation-layer.worker-high-memory-factor");
        log.info("Increasing {} to {} for high-memory step", MRJobConfig.REDUCE_MEMORY_MB, reduceMemoryMB);
    }
    conf.setInt(MRJobConfig.REDUCE_MEMORY_MB, reduceMemoryMB);

    int reduceHeapMB = (int) (reduceMemoryMB / 1.3); // Matches Hadoop's default
    log.info("Reducers have {}MB heap and can access {}MB RAM", reduceHeapMB, reduceMemoryMB);
    if (conf.get(MRJobConfig.REDUCE_JAVA_OPTS) != null) {
        log.info("Overriding previous setting of {}, which was '{}'", MRJobConfig.REDUCE_JAVA_OPTS,
                conf.get(MRJobConfig.REDUCE_JAVA_OPTS));
    }
    conf.set(MRJobConfig.REDUCE_JAVA_OPTS,
            "-Xmx" + reduceHeapMB + "m -XX:+UseCompressedOops -XX:+UseParallelGC -XX:+UseParallelOldGC");
    log.info("Set {} to '{}'", MRJobConfig.REDUCE_JAVA_OPTS, conf.get(MRJobConfig.REDUCE_JAVA_OPTS));
    // I see this in CM but not in Hadoop docs; probably won't hurt as it's supposed to result in
    // -Xmx appended to opts above, which is at worst redundant
    conf.setInt("mapreduce.reduce.java.opts.max.heap", reduceHeapMB);

    conf.setInt("yarn.scheduler.capacity.minimum-allocation-mb", 128);
    conf.setInt("yarn.app.mapreduce.am.resource.mb", 384);

    // Pass total config state
    conf.set(CONFIG_SERIALIZATION_KEY, ConfigUtils.getDefaultConfig().root().render());

    // Make sure to set any args to conf above this line!

    setConf(conf);

    Job job = Job.getInstance(conf);

    // Basic File IO settings
    FileInputFormat.setMaxInputSplitSize(job, 1L << 28); // ~268MB
    SequenceFileOutputFormat.setOutputCompressionType(job, SequenceFile.CompressionType.BLOCK);
    FileOutputFormat.setCompressOutput(job, true);
    FileOutputFormat.setOutputCompressorClass(job, SnappyCodec.class);

    log.info("Created pipeline configuration {}", job.getConfiguration());

    return new MRPipeline(jarClass, getCustomJobName(), job.getConfiguration());
}

From source file:com.cloudera.sa.hbase.to.hdfs.utils.NMapInputFormat.java

License:Apache License

public static void setNumMapTasks(Configuration conf, int numTasks) {
    conf.setInt(NMAPS_KEY, numTasks);
}

From source file:com.cloudera.sqoop.hbase.HBaseTestCase.java

License:Apache License

@Override
@Before//from   www. j  a v a  2  s .  c  o m
public void setUp() {
    try {
        HBaseTestCase.recordTestBuildDataProperty();
        String hbaseDir = new File(workDir, "hbase").getAbsolutePath();
        String hbaseRoot = "file://" + hbaseDir;
        Configuration hbaseConf = HBaseConfiguration.create();
        hbaseConf.set(HConstants.HBASE_DIR, hbaseRoot);
        //Hbase 0.90 does not have HConstants.ZOOKEEPER_CLIENT_PORT
        hbaseConf.setInt("hbase.zookeeper.property.clientPort", 21818);
        hbaseConf.set(HConstants.ZOOKEEPER_QUORUM, "0.0.0.0");
        hbaseConf.setInt("hbase.master.info.port", -1);
        hbaseConf.setInt("hbase.zookeeper.property.maxClientCnxns", 500);
        String zookeeperDir = new File(workDir, "zk").getAbsolutePath();
        int zookeeperPort = 21818;
        zookeeperCluster = new MiniZooKeeperCluster();
        Method m;
        Class<?> zkParam[] = { Integer.TYPE };
        try {
            m = MiniZooKeeperCluster.class.getDeclaredMethod("setDefaultClientPort", zkParam);
        } catch (NoSuchMethodException e) {
            m = MiniZooKeeperCluster.class.getDeclaredMethod("setClientPort", zkParam);
        }
        m.invoke(zookeeperCluster, new Object[] { new Integer(zookeeperPort) });
        zookeeperCluster.startup(new File(zookeeperDir));
        hbaseCluster = new MiniHBaseCluster(hbaseConf, 1);
        HMaster master = hbaseCluster.getMaster();
        Object serverName = master.getServerName();

        String hostAndPort;
        if (serverName instanceof String) {
            System.out.println("Server name is string, using HServerAddress.");
            m = HMaster.class.getDeclaredMethod("getMasterAddress", new Class<?>[] {});
            Class<?> clazz = Class.forName("org.apache.hadoop.hbase.HServerAddress");
            /*
             * Call method to get server address
             */
            Object serverAddr = clazz.cast(m.invoke(master, new Object[] {}));
            //returns the address as hostname:port
            hostAndPort = serverAddr.toString();
        } else {
            System.out.println("ServerName is org.apache.hadoop.hbase.ServerName," + "using getHostAndPort()");
            Class<?> clazz = Class.forName("org.apache.hadoop.hbase.ServerName");
            m = clazz.getDeclaredMethod("getHostAndPort", new Class<?>[] {});
            hostAndPort = m.invoke(serverName, new Object[] {}).toString();
        }
        hbaseConf.set("hbase.master", hostAndPort);
        hbaseTestUtil = new HBaseTestingUtility(hbaseConf);
        hbaseTestUtil.setZkCluster(zookeeperCluster);
        hbaseCluster.startMaster();
        super.setUp();
    } catch (Throwable e) {
        throw new RuntimeException(e);
    }
}

From source file:com.cloudera.sqoop.mapreduce.MySQLDumpImportJob.java

License:Apache License

/**
 * Configure the inputformat to use for the job.
 *//*from w  w  w  .j av a 2  s  . c  o m*/
protected void configureInputFormat(Job job, String tableName, String tableClassName, String splitByCol)
        throws ClassNotFoundException, IOException {

    if (null == tableName) {
        LOG.error("mysqldump-based import cannot support free-form query imports.");
        LOG.error("Do not use --direct and --query together for MySQL.");
        throw new IOException("null tableName for MySQLDumpImportJob.");
    }

    ConnManager mgr = getContext().getConnManager();
    String username = options.getUsername();
    if (null == username || username.length() == 0) {
        DBConfiguration.configureDB(job.getConfiguration(), mgr.getDriverClass(), options.getConnectString());
    } else {
        DBConfiguration.configureDB(job.getConfiguration(), mgr.getDriverClass(), options.getConnectString(),
                username, options.getPassword());
    }

    String[] colNames = options.getColumns();
    if (null == colNames) {
        colNames = mgr.getColumnNames(tableName);
    }

    String[] sqlColNames = null;
    if (null != colNames) {
        sqlColNames = new String[colNames.length];
        for (int i = 0; i < colNames.length; i++) {
            sqlColNames[i] = mgr.escapeColName(colNames[i]);
        }
    }

    // It's ok if the where clause is null in DBInputFormat.setInput.
    String whereClause = options.getWhereClause();

    // We can't set the class properly in here, because we may not have the
    // jar loaded in this JVM. So we start by calling setInput() with
    // DBWritable and then overriding the string manually.

    // Note that mysqldump also does *not* want a quoted table name.
    DataDrivenDBInputFormat.setInput(job, DBWritable.class, tableName, whereClause,
            mgr.escapeColName(splitByCol), sqlColNames);

    Configuration conf = job.getConfiguration();
    conf.setInt(MySQLUtils.OUTPUT_FIELD_DELIM_KEY, options.getOutputFieldDelim());
    conf.setInt(MySQLUtils.OUTPUT_RECORD_DELIM_KEY, options.getOutputRecordDelim());
    conf.setInt(MySQLUtils.OUTPUT_ENCLOSED_BY_KEY, options.getOutputEnclosedBy());
    conf.setInt(MySQLUtils.OUTPUT_ESCAPED_BY_KEY, options.getOutputEscapedBy());
    conf.setBoolean(MySQLUtils.OUTPUT_ENCLOSE_REQUIRED_KEY, options.isOutputEncloseRequired());
    String[] extraArgs = options.getExtraArgs();
    if (null != extraArgs) {
        conf.setStrings(MySQLUtils.EXTRA_ARGS_KEY, extraArgs);
    }

    LOG.debug("Using InputFormat: " + inputFormatClass);
    job.setInputFormatClass(getInputFormatClass());
}

From source file:com.cloudera.sqoop.mapreduce.MySQLExportJob.java

License:Apache License

@Override
/**/* www.j a v  a  2 s. co m*/
 * Configure the inputformat to use for the job.
 */
protected void configureInputFormat(Job job, String tableName, String tableClassName, String splitByCol)
        throws ClassNotFoundException, IOException {

    // Configure the delimiters, etc.
    Configuration conf = job.getConfiguration();
    conf.setInt(MySQLUtils.OUTPUT_FIELD_DELIM_KEY, options.getOutputFieldDelim());
    conf.setInt(MySQLUtils.OUTPUT_RECORD_DELIM_KEY, options.getOutputRecordDelim());
    conf.setInt(MySQLUtils.OUTPUT_ENCLOSED_BY_KEY, options.getOutputEnclosedBy());
    conf.setInt(MySQLUtils.OUTPUT_ESCAPED_BY_KEY, options.getOutputEscapedBy());
    conf.setBoolean(MySQLUtils.OUTPUT_ENCLOSE_REQUIRED_KEY, options.isOutputEncloseRequired());
    String[] extraArgs = options.getExtraArgs();
    if (null != extraArgs) {
        conf.setStrings(MySQLUtils.EXTRA_ARGS_KEY, extraArgs);
    }

    ConnManager mgr = context.getConnManager();
    String username = options.getUsername();
    if (null == username || username.length() == 0) {
        DBConfiguration.configureDB(job.getConfiguration(), mgr.getDriverClass(), options.getConnectString());
    } else {
        DBConfiguration.configureDB(job.getConfiguration(), mgr.getDriverClass(), options.getConnectString(),
                username, options.getPassword());
    }

    String[] colNames = options.getColumns();
    if (null == colNames) {
        colNames = mgr.getColumnNames(tableName);
    }

    String[] sqlColNames = null;
    if (null != colNames) {
        sqlColNames = new String[colNames.length];
        for (int i = 0; i < colNames.length; i++) {
            sqlColNames[i] = mgr.escapeColName(colNames[i]);
        }
    }

    // Note that mysqldump also does *not* want a quoted table name.
    DataDrivenDBInputFormat.setInput(job, DBWritable.class, tableName, null, null, sqlColNames);

    // Configure the actual InputFormat to use. 
    super.configureInputFormat(job, tableName, tableClassName, splitByCol);
}

From source file:com.codefollower.lealone.omid.tso.TransactionClient.java

License:Open Source License

/**
 * Main class for Client taking from two to four arguments<br>
 * -host for server<br>/* www .  j ava  2 s.c  om*/
 * -port for server<br>
 * -number of message (default is 256)<br>
 * -MAX_IN_FLIGHT
 * @param args
 * @throws Exception
 */
public static void main(String[] args) throws Exception {
    // Print usage if no argument is specified.
    if (args.length < 2 || args.length > 7) {
        System.err.println("Usage: " + TransactionClient.class.getSimpleName()
                + " <host> <port> [<number of messages>] [<MAX_IN_FLIGHT>] [<connections>] [<pause>] [<% reads>]");
        return;
    }

    // Parse options.
    String host = args[0];
    int port = Integer.parseInt(args[1]);
    int nbMessage;

    if (args.length >= 3) {
        nbMessage = Integer.parseInt(args[2]);
    } else {
        nbMessage = 256;
    }
    int inflight = 10;
    if (args.length >= 4) {
        inflight = Integer.parseInt(args[3]);
    }

    int runs = 1;
    if (args.length >= 5) {
        runs = Integer.parseInt(args[4]);
    }

    boolean pauseClient = false;
    if (args.length >= 6) {
        pauseClient = Boolean.parseBoolean(args[5]);
    }

    float percentRead = 0;
    if (args.length >= 7) {
        percentRead = Float.parseFloat(args[6]);
    }

    // *** Start the Netty configuration ***

    // Start client with Nb of active threads = 3 as maximum.
    ChannelFactory factory = new NioClientSocketChannelFactory(Executors.newCachedThreadPool(),
            Executors.newCachedThreadPool(), 30);

    // Create the global ChannelGroup
    ChannelGroup channelGroup = new DefaultChannelGroup(TransactionClient.class.getName());

    List<ClientHandler> handlers = new ArrayList<ClientHandler>();

    Configuration conf = HBaseConfiguration.create();
    conf.set("tso.host", host);
    conf.setInt("tso.port", port);
    conf.setInt("tso.executor.threads", 10);

    for (int i = 0; i < runs; ++i) {
        // Create the associated Handler
        ClientHandler handler = new ClientHandler(conf, nbMessage, inflight, pauseClient, percentRead);

        // *** Start the Netty running ***

        System.out.println("PARAM MAX_ROW: " + ClientHandler.MAX_ROW);
        System.out.println("PARAM DB_SIZE: " + ClientHandler.DB_SIZE);
        System.out.println("pause " + pauseClient);
        System.out.println("readPercent " + percentRead);

        handlers.add(handler);

        if ((i - 1) % 20 == 0)
            Thread.sleep(1000);
    }

    // Wait for the Traffic to finish
    for (ClientHandler handler : handlers) {
        boolean result = handler.waitForAll();
        System.out.println("Result: " + result);
    }

    // *** Start the Netty shutdown ***

    // Now close all channels
    System.out.println("close channelGroup");
    channelGroup.close().awaitUninterruptibly();
    // Now release resources
    System.out.println("close external resources");
    factory.releaseExternalResources();
}

From source file:com.codefollower.lealone.omid.tso.TSOTestBase.java

License:Open Source License

public static void setupClient() throws IOException {

    // *** Start the Netty configuration ***

    Configuration conf = HBaseConfiguration.create();
    conf.set("tso.host", "localhost");
    conf.setInt("tso.port", 1234);

    // Start client with Nb of active threads = 3 as maximum.
    channelFactory = new NioClientSocketChannelFactory(Executors.newCachedThreadPool(),
            Executors.newCachedThreadPool(), 3);
    // Create the bootstrap
    // Create the global ChannelGroup
    channelGroup = new DefaultChannelGroup(TransactionClient.class.getName());
    // Create the associated Handler
    clientHandler = new TestClientHandler(conf);

    // *** Start the Netty running ***

    System.out.println("PARAM MAX_ROW: " + ClientHandler.MAX_ROW);
    System.out.println("PARAM DB_SIZE: " + ClientHandler.DB_SIZE);

    // Connect to the server, wait for the connection and get back the channel
    clientHandler.await();/*from  ww w .j a  v a2 s  . com*/

    // Second client handler
    secondClientHandler = new TestClientHandler(conf);

    // *** Start the Netty running ***

    System.out.println("PARAM MAX_ROW: " + ClientHandler.MAX_ROW);
    System.out.println("PARAM DB_SIZE: " + ClientHandler.DB_SIZE);
}

From source file:com.dalabs.droop.util.password.CryptoFileLoader.java

License:Apache License

@Override
public void cleanUpConfiguration(Configuration configuration) {
    // Usage of Configuration#unset would be much better here, sadly
    // this particular API is not available in Hadoop 0.20 and < 1.2.0
    // that we are still supporting. Hence we are overriding the configs
    // with default values.
    configuration.set(PROPERTY_CRYPTO_PASSPHRASE, "REMOVED");
    configuration.set(PROPERTY_CRYPTO_SALT, DEFAULT_SALT);
    configuration.setInt(PROPERTY_CRYPTO_KEY_LEN, DEFAULT_KEY_LEN);
    configuration.setInt(PROPERTY_CRYPTO_ITERATIONS, DEFAULT_ITERATIONS);
}