Example usage for org.apache.hadoop.io BooleanWritable BooleanWritable

List of usage examples for org.apache.hadoop.io BooleanWritable BooleanWritable

Introduction

In this page you can find the example usage for org.apache.hadoop.io BooleanWritable BooleanWritable.

Prototype

public BooleanWritable(boolean value) 

Source Link

Usage

From source file:item_profile_job.ItemProfileMapper.java

License:Open Source License

/**
*  il metodo map prende come parametro di input un file di testo (#user,#item,#vote) per ogni riga del file prende l'item e il vote, se il voto e' positivo scrive nell'HDFS (item true) altrimenti scrive (item false)
*   @param key indica i byte di offset della riga correntemente letta dal file
*   @param value riga letta/*from w  ww .j  av a  2s .c  o  m*/
*   @param output -
*   @throws IOException
*   @throws InterruptedException
*/
public void map(LongWritable key, Text value, Context output) throws IOException, InterruptedException {

    String line = value.toString();
    StringTokenizer st = new StringTokenizer(line, split);
    if (st.hasMoreTokens()) {
        @SuppressWarnings("unused")
        int user = Integer.parseInt(st.nextToken());
        int item = Integer.parseInt(st.nextToken());
        int voto = Integer.parseInt(st.nextToken());
        Boolean bool_vote = null;

        if (voto == 1) {
            bool_vote = true;
        } else if (voto == -1) {
            bool_vote = false;
        }

        output.write(new IntWritable(item), new BooleanWritable(bool_vote));
    }
}

From source file:jadoop.HadoopGridTaskRunner.java

License:Open Source License

/**
 * The map function that executes the command line task and outputs the
 * return code, standard output and standard error.
 * /*from   www . ja va  2  s .  co m*/
 * The value provided as the input to this method will be a key and an array
 * of Text objects. The key matches the key provided when the HadoopGridTask
 * was added to the HadoopGridJob. The entries in the array are as follows:
 * <UL>
 * <LI>[0] - capStdOut: [true or false] If true, then all output to standard
 * output by the execution of the command should be captured and returned as
 * discussed below. If false, then standard output is not captured. This
 * will be the first element of the ArrayWritable.
 * <LI>[1] - capStdErr: [true or false] If true, then all output to standard
 * error by the execution of the command should be captured and returned as
 * discussed below. If false, then standard error is not captured. This will
 * be the second element in ArrayWritable.
 * <LI>[2] - timeout: The amount of time (in ms) that the task has to
 * execute. If the command does not complete before the timeout it is
 * terminated.
 * <LI>[3] - command: The command to be executed. Any arguments to the
 * command will be contained in subsequent entries.
 * <LI>[4]... - arguments to the command. These would be the individual
 * command line arguments if typed in at the command prompt.
 * </UL>
 * 
 * <p>
 * For example: if standard output of an execution of the cal command for
 * June 2015 were to be captured the array entries would be:
 * <UL>
 * <LI>[0] - true
 * <LI>[1] - false
 * <LI>[2] - 1000
 * <LI>[3] - cal
 * <LI>[4] - 6
 * <LI>[5] - 2015
 * </UL>
 * 
 * <p>
 * Entries 0-2 are used as flags, the remaining entries are converted to an
 * array of Strings and used as the argument in a call to
 * Runtime.getRuntime().exec() to run the command.
 * 
 * <p>
 * The key generated for the Mapper result will be the same key passed into
 * the mapper. The results generated for the Mapper will be a MapWritable
 * object with the following key/value pairs:
 * <UL>
 * <LI>EV,value : the value is a IntWritable containing the exit value
 * generated by the process created by the call to the Runtime.exec method.
 * <LI>TO,value : the value will be a BooleanWriteable indicating if the
 * task timed out (true) or not (false).
 * <LI>SO,value : the value is a Text containing the output written to
 * standard output by the executed program.
 * <LI>SE,value : the value is a Text containing the output written to
 * standard error by the executed program
 * </UL>
 * 
 * @param key
 *            a key that identifies this task. This will match the key
 *            provided in the HadoopGridTask object.
 * @param value
 *            the flags and command line as described above.
 * @param context
 *            a Hadoop Context object provided by the Hadoop system.
 * 
 * @throws InterruptedException
 *             if there is a problem writing the task result to the context.
 * @throws IOException
 *             if there is a problem writing the task result to the context.
 */
public void map(Text key, TextArrayWritable value, Context context) throws IOException, InterruptedException {

    String[] mapVal = value.toStrings();

    boolean capStdOutput = Boolean.parseBoolean(mapVal[0]);
    boolean capStdErr = Boolean.parseBoolean(mapVal[1]);
    long timeout = Long.parseLong(mapVal[2]);

    // Build the command.
    String[] cmdInput = new String[mapVal.length - 3];
    for (int i = 3; i < mapVal.length; i++) {
        cmdInput[i - 3] = mapVal[i];
    }

    StringBuffer stdOutputStr = new StringBuffer();
    StringBuffer errOutputStr = new StringBuffer();
    byte exitValue = 0;
    boolean timedout = false;

    try {
        // Executes the command.
        Process p = Runtime.getRuntime().exec(cmdInput);

        long start = System.currentTimeMillis();
        long cur = System.currentTimeMillis();
        boolean done = false;
        while (!timedout && !done) {
            Thread.sleep(PROCSSESS_POLL_DELAY);

            /*
             * Check if the process has finished. If it has, the exit value
             * will come back, if not it throws an exception.
             */
            try {
                exitValue = (byte) p.exitValue();
                done = true;
            } catch (IllegalThreadStateException e) {
                // process not done yet, keep going...
            }

            cur = System.currentTimeMillis();
            long elapsedTime = (cur - start);
            timedout = (elapsedTime >= timeout);

            // Keep long running tasks alive with hadoop.
            context.setStatus("Running for: " + elapsedTime + " ms.");
        }

        // Capture standard output generated by the command.
        if (capStdOutput) {
            BufferedReader stdOutputPrg = new BufferedReader(new InputStreamReader(p.getInputStream()));
            while (stdOutputPrg.ready()) {
                stdOutputStr.append(stdOutputPrg.readLine());
                stdOutputStr.append("\n");
            }
        }

        // Capture standard error generated by the command
        if (capStdErr) {
            BufferedReader stdErrPrg = new BufferedReader(new InputStreamReader(p.getErrorStream()));
            while (stdErrPrg.ready()) {
                errOutputStr.append(stdErrPrg.readLine());
                errOutputStr.append("\n");
            }
        }

        if (timedout) {
            p.destroy(); // kill the process.
            exitValue = -1;
        }
    } catch (Exception e) {
        /*
         * If an exception occurs put the message and stack trace on the end
         * of the standard error returned.
         */
        errOutputStr.append("\n" + e.getMessage() + "\n" + e.getStackTrace());
        exitValue = -1;
    }

    // Put the results into the context that is returned from this mapper.
    Text evKey = new Text("EV");
    Text toKey = new Text("TO");
    Text soKey = new Text("SO");
    Text seKey = new Text("SE");

    ByteWritable bwExitVal = new ByteWritable(exitValue);
    BooleanWritable bwTimeout = new BooleanWritable(timedout);
    Text tStdOutputStr = new Text(stdOutputStr.toString().trim());
    Text tErrOutputStr = new Text(errOutputStr.toString().trim());

    MapWritable mw = new MapWritable();
    mw.put(evKey, bwExitVal);
    mw.put(toKey, bwTimeout);
    mw.put(soKey, tStdOutputStr);
    mw.put(seKey, tErrOutputStr);

    context.write(key, mw);
}

From source file:main.okapi.cf.eval.RankEvaluationComputation.java

License:Apache License

public void sampleIrrelevantEdges(Vertex<CfLongId, FloatMatrixWritable, BooleanWritable> vertex) {
    if (vertex.getId().isUser()) {//only users
        Iterable<Edge<CfLongId, BooleanWritable>> edges = vertex.getEdges();
        HashSet<CfLongId> relevant = new HashSet<CfLongId>();
        for (Edge<CfLongId, BooleanWritable> e : edges) {
            relevant.add(e.getTargetVertexId());
        }/*from  ww w . ja  v  a2  s  .c o  m*/
        for (int i = 0; i < numberSamples; i++) {
            CfLongId random = getRandomItemId(relevant);
            vertex.addEdge(EdgeFactory.create(random, new BooleanWritable(false)));
        }
    }
}

From source file:net.mooncloud.hadoop.hive.ql.udf.UDFRSAVerify.java

License:Apache License

public BooleanWritable evaluate(Text n, Text sign, Text publicKey) {
    if (n == null || sign == null || publicKey == null) {
        return null;
    }/*from   w  ww.  j a v  a 2  s  .  co m*/

    try {
        byte[] publicKeybytes = new byte[publicKey.getLength()];
        System.arraycopy(publicKey.getBytes(), 0, publicKeybytes, 0, publicKey.getLength());
        byte[] publicKeydecoded = Base64.decodeBase64(publicKeybytes);

        byte[] signbytes = new byte[sign.getLength()];
        System.arraycopy(sign.getBytes(), 0, signbytes, 0, sign.getLength());
        byte[] signdecoded = Base64.decodeBase64(signbytes);

        result = new BooleanWritable(RSAUtils.verify(n.getBytes(), publicKeydecoded, signdecoded));
    } catch (Exception e) {
        e.printStackTrace();
    }
    return result;
}

From source file:net.mooncloud.hadoop.hive.ql.udf.UDFRSAVerify.java

License:Apache License

public BooleanWritable evaluate(BytesWritable b, BytesWritable sign, BytesWritable publicKey) {
    if (b == null || sign == null || publicKey == null) {
        return null;
    }//  w  w  w  . j a v a 2s  .  co  m

    try {
        result = new BooleanWritable(RSAUtils.verify(b.getBytes(), publicKey.getBytes(), sign.getBytes()));
    } catch (Exception e) {
        e.printStackTrace();
    }
    return result;
}

From source file:oracle.kv.hadoop.hive.table.TableBooleanObjectInspector.java

License:Open Source License

@Override
public Object getPrimitiveWritableObject(Object o) {
    return o == null ? null : new BooleanWritable(get(o));
}

From source file:org.apache.avro.hadoop.io.TestAvroDatumConverterFactory.java

License:Apache License

@Test
public void testConvertBooleanWritable() {
    AvroDatumConverter<BooleanWritable, Boolean> converter = mFactory.create(BooleanWritable.class);
    assertEquals(true, converter.convert(new BooleanWritable(true)).booleanValue());
}

From source file:org.apache.flink.hadoopcompatibility.mapred.record.datatypes.DefaultFlinkTypeConverter.java

License:Apache License

@SuppressWarnings("unchecked")
private <T> T convert(Record flinkType, int pos, Class<T> hadoopType) {
    if (hadoopType == LongWritable.class) {
        return (T) new LongWritable((flinkType.getField(pos, LongValue.class)).getValue());
    }// ww  w . j  av a2  s. c  o  m
    if (hadoopType == org.apache.hadoop.io.Text.class) {
        return (T) new Text((flinkType.getField(pos, StringValue.class)).getValue());
    }
    if (hadoopType == org.apache.hadoop.io.IntWritable.class) {
        return (T) new IntWritable((flinkType.getField(pos, IntValue.class)).getValue());
    }
    if (hadoopType == org.apache.hadoop.io.FloatWritable.class) {
        return (T) new FloatWritable((flinkType.getField(pos, FloatValue.class)).getValue());
    }
    if (hadoopType == org.apache.hadoop.io.DoubleWritable.class) {
        return (T) new DoubleWritable((flinkType.getField(pos, DoubleValue.class)).getValue());
    }
    if (hadoopType == org.apache.hadoop.io.BooleanWritable.class) {
        return (T) new BooleanWritable((flinkType.getField(pos, BooleanValue.class)).getValue());
    }
    if (hadoopType == org.apache.hadoop.io.ByteWritable.class) {
        return (T) new ByteWritable((flinkType.getField(pos, ByteValue.class)).getValue());
    }

    throw new RuntimeException(
            "Unable to convert Flink type (" + flinkType.getClass().getCanonicalName() + ") to Hadoop.");
}

From source file:org.apache.giraph.aggregators.BooleanAndAggregator.java

License:Apache License

@Override
public BooleanWritable createInitialValue() {
    return new BooleanWritable(true);
}

From source file:org.apache.giraph.aggregators.BooleanOrAggregator.java

License:Apache License

@Override
public BooleanWritable createInitialValue() {
    return new BooleanWritable(false);
}