Example usage for org.apache.hadoop.io IntWritable get

List of usage examples for org.apache.hadoop.io IntWritable get

Introduction

In this page you can find the example usage for org.apache.hadoop.io IntWritable get.

Prototype

public int get() 

Source Link

Document

Return the value of this IntWritable.

Usage

From source file:org.apache.ranger.authorization.hive.udf.RangerUdfMask.java

License:Apache License

int getCharArg(ObjectInspector[] arguments, int index, int defaultValue) {
    int ret = defaultValue;

    ObjectInspector arg = (arguments != null && arguments.length > index) ? arguments[index] : null;

    if (arg != null) {
        if (arg instanceof WritableConstantIntObjectInspector) {
            IntWritable value = ((WritableConstantIntObjectInspector) arg).getWritableConstantValue();

            if (value != null) {
                ret = value.get();
            }/*w  ww.  j a  v a 2s  .  c o  m*/
        } else if (arg instanceof WritableConstantLongObjectInspector) {
            LongWritable value = ((WritableConstantLongObjectInspector) arg).getWritableConstantValue();

            if (value != null) {
                ret = (int) value.get();
            }
        } else if (arg instanceof WritableConstantShortObjectInspector) {
            ShortWritable value = ((WritableConstantShortObjectInspector) arg).getWritableConstantValue();

            if (value != null) {
                ret = value.get();
            }
        } else if (arg instanceof ConstantObjectInspector) {
            Object value = ((ConstantObjectInspector) arg).getWritableConstantValue();

            if (value != null) {
                String strValue = value.toString();

                if (strValue != null && strValue.length() > 0) {
                    ret = strValue.charAt(0);
                }
            }
        }
    }

    return ret;
}

From source file:org.apache.ranger.authorization.hive.udf.RangerUdfMask.java

License:Apache License

int getIntArg(ObjectInspector[] arguments, int index, int defaultValue) {
    int ret = defaultValue;

    ObjectInspector arg = (arguments != null && arguments.length > index) ? arguments[index] : null;

    if (arg != null) {
        if (arg instanceof WritableConstantIntObjectInspector) {
            IntWritable value = ((WritableConstantIntObjectInspector) arg).getWritableConstantValue();

            if (value != null) {
                ret = value.get();
            }/*  w w w . ja v a2 s.  co  m*/
        } else if (arg instanceof WritableConstantLongObjectInspector) {
            LongWritable value = ((WritableConstantLongObjectInspector) arg).getWritableConstantValue();

            if (value != null) {
                ret = (int) value.get();
            }
        } else if (arg instanceof WritableConstantShortObjectInspector) {
            ShortWritable value = ((WritableConstantShortObjectInspector) arg).getWritableConstantValue();

            if (value != null) {
                ret = value.get();
            }
        } else if (arg instanceof ConstantObjectInspector) {
            Object value = ((ConstantObjectInspector) arg).getWritableConstantValue();

            if (value != null) {
                String strValue = value.toString();

                if (strValue != null && strValue.length() > 0) {
                    ret = Integer.parseInt(value.toString());
                }
            }
        }
    }

    return ret;
}

From source file:org.apache.sysml.runtime.matrix.sort.ReadWithZeros.java

License:Apache License

public boolean readNextKeyValuePairs(DoubleWritable readKey, IntWritable readValue) throws IOException {
    boolean ret = true;

    try {//w  w w .jav a 2  s. co m
        if (contain0s && justFound0) {
            readKey.set(keyAfterZero.get());
            readValue.set(valueAfterZero.get());
            contain0s = false;
        } else {
            readKey.readFields(currentStream);
            readValue.readFields(currentStream);
        }
    } catch (EOFException e) {
        // case in which zero is the maximum value in the matrix. 
        // The zero value from the last entry is not present in the input sorted matrix, but needs to be accounted for.
        if (contain0s && !justFound0) {
            justFound0 = true;
            readKey.set(0);
            readValue.set((int) numZeros);
        } else {
            ret = false;
        }
    }

    if (contain0s && !justFound0 && readKey.get() >= 0) {
        justFound0 = true;
        keyAfterZero.set(readKey.get());
        valueAfterZero.set(readValue.get());
        readKey.set(0);
        readValue.set((int) numZeros);
    }

    return ret;
}

From source file:org.apache.sysml.runtime.util.MapReduceTool.java

License:Apache License

public static double[] pickValueWeight(String dir, MetaDataNumItemsByEachReducer metadata, double p,
        boolean average) throws IOException {
    long[] counts = metadata.getNumItemsArray();
    long[] ranges = new long[counts.length];
    ranges[0] = counts[0];/*from  w ww. ja  va2 s.  co m*/
    for (int i = 1; i < counts.length; i++)
        ranges[i] = ranges[i - 1] + counts[i];

    long total = ranges[ranges.length - 1];

    // do averaging only if it is asked for; and sum_wt is even
    average = average && (total % 2 == 0);

    int currentPart = 0;
    double cum_weight = 0;
    long pos = (long) Math.ceil(total * p);
    while (ranges[currentPart] < pos) {
        currentPart++;
        cum_weight += ranges[currentPart];
    }
    int offset;
    if (currentPart > 0)
        offset = (int) (pos - ranges[currentPart - 1] - 1);
    else
        offset = (int) pos - 1;

    Path path = new Path(dir);
    FileSystem fs = IOUtilFunctions.getFileSystem(path);
    FileStatus[] files = fs.listStatus(path);
    Path fileToRead = null;
    for (FileStatus file : files)
        if (file.getPath().toString().endsWith(Integer.toString(currentPart))) {
            fileToRead = file.getPath();
            break;
        }

    if (fileToRead == null)
        throw new RuntimeException("cannot read partition " + currentPart);

    int buffsz = 64 * 1024;
    DoubleWritable readKey = new DoubleWritable();
    IntWritable readValue = new IntWritable();
    FSDataInputStream currentStream = null;
    double ret = -1;
    try {
        currentStream = fs.open(fileToRead, buffsz);

        boolean contain0s = false;
        long numZeros = 0;
        if (currentPart == metadata.getPartitionOfZero()) {
            contain0s = true;
            numZeros = metadata.getNumberOfZero();
        }
        ReadWithZeros reader = new ReadWithZeros(currentStream, contain0s, numZeros);

        int numRead = 0;
        while (numRead <= offset) {
            reader.readNextKeyValuePairs(readKey, readValue);
            numRead += readValue.get();
            cum_weight += readValue.get();
        }

        ret = readKey.get();
        if (average) {
            if (numRead <= offset + 1) {
                reader.readNextKeyValuePairs(readKey, readValue);
                cum_weight += readValue.get();
                ret = (ret + readKey.get()) / 2;
            }
        }
    } finally {
        IOUtilFunctions.closeSilently(currentStream);
    }
    return new double[] { ret, (average ? -1 : readValue.get()), (average ? -1 : cum_weight) };
}

From source file:org.apache.tajo.engine.function.hiveudf.HiveUDFtest.java

License:Apache License

public Text evaluate(Text str, IntWritable num) {
    String origin = str.toString();

    for (int i = 0; i < num.get() - 1; i++) {
        origin += origin;//from w  ww . j  av  a  2 s  .  com
    }

    return new Text(origin);
}

From source file:org.apache.tez.engine.common.shuffle.impl.ShuffleScheduler.java

License:Apache License

public synchronized void copyFailed(TezTaskAttemptID mapId, MapHost host, boolean readError) {
    host.penalize();/*from  w w w . jav  a 2s  . c o  m*/
    int failures = 1;
    if (failureCounts.containsKey(mapId)) {
        IntWritable x = failureCounts.get(mapId);
        x.set(x.get() + 1);
        failures = x.get();
    } else {
        failureCounts.put(mapId, new IntWritable(1));
    }
    String hostname = host.getHostName();
    if (hostFailures.containsKey(hostname)) {
        IntWritable x = hostFailures.get(hostname);
        x.set(x.get() + 1);
    } else {
        hostFailures.put(hostname, new IntWritable(1));
    }
    if (failures >= abortFailureLimit) {
        try {
            throw new IOException(failures + " failures downloading " + mapId);
        } catch (IOException ie) {
            reporter.reportException(ie);
        }
    }

    checkAndInformJobTracker(failures, mapId, readError);

    checkReducerHealth();

    long delay = (long) (INITIAL_PENALTY * Math.pow(PENALTY_GROWTH_RATE, failures));

    penalties.add(new Penalty(host, delay));

    failedShuffleCounter.increment(1);
}

From source file:org.apache.tez.runtime.library.common.shuffle.impl.ShuffleScheduler.java

License:Apache License

public synchronized void copyFailed(InputAttemptIdentifier srcAttempt, MapHost host, boolean readError) {
    host.penalize();//w w w  . j a  va  2s.c  om
    int failures = 1;
    if (failureCounts.containsKey(srcAttempt)) {
        IntWritable x = failureCounts.get(srcAttempt);
        x.set(x.get() + 1);
        failures = x.get();
    } else {
        failureCounts.put(srcAttempt, new IntWritable(1));
    }
    String hostname = host.getHostName();
    if (hostFailures.containsKey(hostname)) {
        IntWritable x = hostFailures.get(hostname);
        x.set(x.get() + 1);
    } else {
        hostFailures.put(hostname, new IntWritable(1));
    }
    if (failures >= abortFailureLimit) {
        IOException ioe = new IOException(failures + " failures downloading "
                + TezRuntimeUtils.getTaskAttemptIdentifier(inputContext.getSourceVertexName(),
                        srcAttempt.getInputIdentifier().getSrcTaskIndex(), srcAttempt.getAttemptNumber()));
        ioe.fillInStackTrace();
        shuffle.reportException(ioe);
    }

    checkAndInformJobTracker(failures, srcAttempt, readError);

    checkReducerHealth();

    long delay = (long) (INITIAL_PENALTY * Math.pow(PENALTY_GROWTH_RATE, failures));

    penalties.add(new Penalty(host, delay));

    failedShuffleCounter.increment(1);
}

From source file:org.apache.tez.runtime.library.common.shuffle.orderedgrouped.ShuffleScheduler.java

License:Apache License

public synchronized void copyFailed(InputAttemptIdentifier srcAttempt, MapHost host, boolean readError,
        boolean connectError) {
    host.penalize();//  w  w w. j  av  a 2  s .com
    int failures = 1;
    if (failureCounts.containsKey(srcAttempt)) {
        IntWritable x = failureCounts.get(srcAttempt);
        x.set(x.get() + 1);
        failures = x.get();
    } else {
        failureCounts.put(srcAttempt, new IntWritable(1));
    }
    String hostPort = host.getHostIdentifier();
    // TODO TEZ-922 hostFailures isn't really used for anything. Factor it into error
    // reporting / potential blacklisting of hosts.
    if (hostFailures.containsKey(hostPort)) {
        IntWritable x = hostFailures.get(hostPort);
        x.set(x.get() + 1);
    } else {
        hostFailures.put(hostPort, new IntWritable(1));
    }
    if (failures >= abortFailureLimit) {
        // This task has seen too many fetch failures - report it as failed. The
        // AM may retry it if max failures has not been reached.

        // Between the task and the AM - someone needs to determine who is at
        // fault. If there's enough errors seen on the task, before the AM informs
        // it about source failure, the task considers itself to have failed and
        // allows the AM to re-schedule it.
        IOException ioe = new IOException(failures + " failures downloading "
                + TezRuntimeUtils.getTaskAttemptIdentifier(inputContext.getSourceVertexName(),
                        srcAttempt.getInputIdentifier().getInputIndex(), srcAttempt.getAttemptNumber()));
        ioe.fillInStackTrace();
        // Shuffle knows how to deal with failures post shutdown via the onFailure hook
        shuffle.reportException(ioe);
    }

    failedShuffleCounter.increment(1);
    checkAndInformAM(failures, srcAttempt, readError, connectError);

    checkReducerHealth();

    long delay = (long) (INITIAL_PENALTY * Math.pow(PENALTY_GROWTH_RATE, failures));

    penalties.add(new Penalty(host, delay));
}

From source file:org.apache.tez.runtime.library.common.sort.impl.TestTezMerger.java

License:Apache License

private void merge(List<Path> pathList, int mergeFactor, RawComparator rc) throws Exception {
    //Merge datasets
    TezMerger merger = new TezMerger();
    TezRawKeyValueIterator records = merger.merge(defaultConf, localFs, IntWritable.class, LongWritable.class,
            null, false, 0, 1024, pathList.toArray(new Path[pathList.size()]), true, mergeFactor,
            new Path(workDir, "tmp_" + System.nanoTime()), ((rc == null) ? comparator : rc), new Reporter(),
            null, null, null, new Progress());

    //Verify the merged data is correct
    Map<Integer, Integer> dataMap = Maps.newHashMap();
    int pk = -1;// w w w. jav  a2  s  . co  m
    while (records.next()) {
        DataInputBuffer key = records.getKey();
        DataInputBuffer value = records.getValue();

        IntWritable k = new IntWritable();
        k.readFields(key);
        LongWritable v = new LongWritable();
        v.readFields(value);

        if (records.isSameKey()) {
            LOG.info("\tSame Key : key=" + k.get() + ", val=" + v.get());
            //More than one key should be present in the source data
            assertTrue(verificationDataSet.get(k.get()).size() > 1);
            //Ensure this is same as the previous key we saw
            assertTrue("previousKey=" + pk + ", current=" + k.get(), pk == k.get());
        } else {
            LOG.info("key=" + k.get() + ", val=" + v.get());
        }
        pk = k.get();

        int keyCount = (dataMap.containsKey(k.get())) ? (dataMap.get(k.get()) + 1) : 1;
        dataMap.put(k.get(), keyCount);
    }

    //Verify if the number of distinct entries is the same in source and the test
    assertTrue(
            "dataMap=" + dataMap.keySet().size() + ", verificationSet=" + verificationDataSet.keySet().size(),
            dataMap.keySet().size() == verificationDataSet.keySet().size());

    //Verify with source data
    for (Integer key : verificationDataSet.keySet()) {
        assertTrue(
                "Data size for " + key + " not matching with source; dataSize:" + dataMap.get(key).intValue()
                        + ", source:" + verificationDataSet.get(key).size(),
                dataMap.get(key).intValue() == verificationDataSet.get(key).size());
    }

    //Verify if every key has the same number of repeated items in the source dataset as well
    for (Map.Entry<Integer, Integer> entry : dataMap.entrySet()) {
        assertTrue(entry.getKey() + "", verificationDataSet.get(entry.getKey()).size() == entry.getValue());
    }

    LOG.info("******************");
    verificationDataSet.clear();
}

From source file:org.apache.tez.runtime.library.common.sort.impl.TestTezMerger.java

License:Apache License

/**
 * Generate data set for ifile.  Create repeated keys if needed.
 *
 * @param keyCount    approximate number of keys to be created
 * @param repeatCount number of times a key should be repeated
 * @return/*  w w w .ja v  a  2 s.c  o  m*/
 */
static TreeMultimap<Integer, Long> createDataForIFile(int keyCount, int repeatCount) {
    TreeMultimap<Integer, Long> dataSet = TreeMultimap.create();
    Random rnd = new Random();
    for (int i = 0; i < keyCount; i++) {
        if (repeatCount > 0 && (rnd.nextInt(keyCount) % 2 == 0)) {
            //repeat this key
            for (int j = 0; j < repeatCount; j++) {
                IntWritable key = new IntWritable(rnd.nextInt(keyCount));
                LongWritable value = new LongWritable(System.nanoTime());
                dataSet.put(key.get(), value.get());
            }
            i += repeatCount;
            LOG.info("Repeated key count=" + (repeatCount));
        } else {
            IntWritable key = new IntWritable(rnd.nextInt(keyCount));
            LongWritable value = new LongWritable(System.nanoTime());
            dataSet.put(key.get(), value.get());
        }
    }
    for (Integer key : dataSet.keySet()) {
        for (Long value : dataSet.get(key)) {
            LOG.info("Key=" + key + ", val=" + value);
        }
    }
    LOG.info("=============");
    return dataSet;
}