Example usage for org.apache.hadoop.io ArrayWritable get

List of usage examples for org.apache.hadoop.io ArrayWritable get

Introduction

In this page you can find the example usage for org.apache.hadoop.io ArrayWritable get.

Prototype

public Writable[] get() 

Source Link

Usage

From source file:io.apigee.lembos.mapreduce.converters.input.ArrayWritableConverter.java

License:Apache License

/**
 * Takes in an {@link ArrayWritable} and returns a {@link Scriptable} array.
 *
 * @param scope the JavaScript scope/*from  w w w  .  jav  a 2  s  .c o m*/
 * @param writable the value to convert
 *
 * @return the {@link Scriptable} array equivalent
 */
@Override
public Object toJavaScript(final Scriptable scope, final ArrayWritable writable) {
    final Writable[] entries = writable.get();
    final Object[] jsArrayEntries = new Object[entries.length];

    for (int i = 0; i < entries.length; i++) {
        jsArrayEntries[i] = ConversionUtils.writableToJS(entries[i], scope);
    }

    return JavaScriptUtils.asArray(scope, jsArrayEntries);
}

From source file:org.apache.carbondata.hive.CarbonHiveRecordReader.java

License:Apache License

@Override
public boolean next(Void aVoid, ArrayWritable value) throws IOException {
    if (carbonIterator.hasNext()) {
        Object obj = readSupport.readRow(carbonIterator.next());
        recordReaderCounter++;//from  w ww .  jav  a 2s  . c  o m
        Writable[] objArray = (Writable[]) obj;
        Writable[] sysArray = new Writable[value.get().length];
        if (columnIds != null && columnIds.length > 0 && objArray.length == columnIds.length) {
            for (int i = 0; i < columnIds.length; i++) {
                sysArray[columnIds[i]] = objArray[i];
            }
            value.set(sysArray);
        } else {
            value.set(objArray);
        }
        return true;
    } else {
        return false;
    }
}

From source file:org.apache.carbondata.hive.CarbonObjectInspector.java

License:Apache License

@Override
public Object getStructFieldData(final Object data, final StructField fieldRef) {
    if (data == null) {
        return null;
    }/* w w w  .j  a  v a 2 s . c o m*/

    if (data instanceof ArrayWritable) {
        final ArrayWritable arr = (ArrayWritable) data;
        return arr.get()[((StructFieldImpl) fieldRef).getIndex()];
    }

    boolean isArray = !(data instanceof List);
    int listSize = isArray ? ((Object[]) data).length : ((List) data).size();
    int fieldID = fieldRef.getFieldID();
    return fieldID >= listSize ? null : (isArray ? ((Object[]) data)[fieldID] : ((List) data).get(fieldID));
}

From source file:org.apache.carbondata.hive.CarbonObjectInspector.java

License:Apache License

@Override
public List<Object> getStructFieldsDataAsList(final Object data) {
    if (data == null) {
        return null;
    }//from   ww  w .ja va2  s .c  om

    if (data instanceof ArrayWritable) {
        final ArrayWritable arr = (ArrayWritable) data;
        final Object[] arrWritable = arr.get();
        return new ArrayList<Object>(Arrays.asList(arrWritable));
    }

    throw new UnsupportedOperationException("Cannot inspect " + data.getClass().getCanonicalName());
}

From source file:org.apache.carbondata.hive.TestCarbonSerDe.java

License:Apache License

private void deserializeAndSerializeLazySimple(final CarbonHiveSerDe serDe, final ArrayWritable t)
        throws SerDeException {

    // Get the row structure
    final StructObjectInspector oi = (StructObjectInspector) serDe.getObjectInspector();

    // Deserialize
    final Object row = serDe.deserialize(t);
    Assert.assertEquals("deserialization gives the wrong object class", row.getClass(), ArrayWritable.class);
    Assert.assertEquals("size correct after deserialization", serDe.getSerDeStats().getRawDataSize(),
            t.get().length);
    Assert.assertEquals("deserialization gives the wrong object", t, row);

    // Serialize/*from   w  ww.  j  ava2  s.co  m*/
    final ArrayWritable serializedArr = (ArrayWritable) serDe.serialize(row, oi);
    Assert.assertEquals("size correct after serialization", serDe.getSerDeStats().getRawDataSize(),
            serializedArr.get().length);
    Assert.assertTrue("serialized object should be equal to starting object",
            arrayWritableEquals(t, serializedArr));
}

From source file:org.apache.carbondata.hive.TestCarbonSerDe.java

License:Apache License

private static boolean arrayWritableEquals(final ArrayWritable a1, final ArrayWritable a2) {
    final Writable[] a1Arr = a1.get();
    final Writable[] a2Arr = a2.get();

    if (a1Arr.length != a2Arr.length) {
        return false;
    }/*w  w w  . j a v  a2 s. c o  m*/

    for (int i = 0; i < a1Arr.length; ++i) {
        if (a1Arr[i] instanceof ArrayWritable) {
            if (!(a2Arr[i] instanceof ArrayWritable)) {
                return false;
            }
            if (!arrayWritableEquals((ArrayWritable) a1Arr[i], (ArrayWritable) a2Arr[i])) {
                return false;
            }
        } else {
            if (!a1Arr[i].equals(a2Arr[i])) {
                return false;
            }
        }

    }
    return true;
}

From source file:org.apache.hama.bsp.TestCheckpoint.java

License:Apache License

private static void checkSuperstepMsgCount(PeerSyncClient syncClient,
        @SuppressWarnings("rawtypes") BSPPeer bspTask, BSPJob job, long step, long count) {

    ArrayWritable writableVal = new ArrayWritable(LongWritable.class);

    boolean result = syncClient.getInformation(
            syncClient.constructKey(job.getJobID(), "checkpoint", "" + bspTask.getPeerIndex()), writableVal);

    assertTrue(result);//from w w w  . j  av  a 2 s  . c  o  m

    LongWritable superstepNo = (LongWritable) writableVal.get()[0];
    LongWritable msgCount = (LongWritable) writableVal.get()[1];

    assertEquals(step, superstepNo.get());
    assertEquals(count, msgCount.get());
}

From source file:org.apache.hama.bsp.TestZooKeeper.java

License:Apache License

@Test
public void testClearZKNodes() throws IOException, KeeperException, InterruptedException {
    final ZooKeeperSyncServerImpl server = new ZooKeeperSyncServerImpl();
    boolean done = false;
    try {/*from  w  w  w.  ja va2 s  . c  om*/
        server.init(configuration);
        ExecutorService executorService = Executors.newCachedThreadPool();
        executorService.submit(new Runnable() {
            @Override
            public void run() {
                try {
                    server.start();
                } catch (Exception e) {
                    e.printStackTrace();
                }
            }
        });

        executorService.awaitTermination(10, TimeUnit.SECONDS);

        String bspRoot = "/bsp";

        ZooKeeperSyncClientImpl peerClient = (ZooKeeperSyncClientImpl) SyncServiceFactory
                .getPeerSyncClient(configuration);

        ZKSyncBSPMasterClient masterClient = (ZKSyncBSPMasterClient) SyncServiceFactory
                .getMasterSyncClient(configuration);

        masterClient.init(configuration);

        Thread.sleep(100);

        Log.info("Created master and client sync clients");

        assertTrue(masterClient.hasKey(bspRoot));

        Log.info("BSP root exists");

        BSPJobID jobID = new BSPJobID("test1", 1);
        masterClient.registerJob(jobID.toString());
        TaskID taskId1 = new TaskID(jobID, 1);
        TaskID taskId2 = new TaskID(jobID, 2);

        TaskAttemptID task1 = new TaskAttemptID(taskId1, 1);
        TaskAttemptID task2 = new TaskAttemptID(taskId2, 1);

        int zkPort = BSPNetUtils.getFreePort(21815);
        configuration.setInt(Constants.PEER_PORT, zkPort);
        peerClient.init(configuration, jobID, task1);

        peerClient.registerTask(jobID, "hamanode1", 5000L, task1);
        peerClient.registerTask(jobID, "hamanode2", 5000L, task2);

        peerClient.storeInformation(peerClient.constructKey(jobID, "info", "level2"), new IntWritable(5), true,
                null);

        String[] names = peerClient.getAllPeerNames(task1.getJobID());

        Log.info("Found child count = " + names.length);

        assertEquals(2, names.length);

        Log.info("Passed the child count test");

        masterClient.addKey(masterClient.constructKey(jobID, "peer", "1"), true, null);
        masterClient.addKey(masterClient.constructKey(jobID, "peer", "2"), true, null);

        String[] peerChild = masterClient.getChildKeySet(masterClient.constructKey(jobID, "peer"), null);
        Log.info("Found child count = " + peerChild.length);

        assertEquals(2, peerChild.length);

        Log.info(" Peer name " + peerChild[0]);
        Log.info(" Peer name " + peerChild[1]);

        Log.info("Passed the child key set test");

        masterClient.deregisterJob(jobID.toString());
        Log.info(masterClient.constructKey(jobID));

        Thread.sleep(200);

        assertEquals(false, masterClient.hasKey(masterClient.constructKey(jobID)));

        Log.info("Passed the key presence test");

        boolean result = masterClient.getInformation(masterClient.constructKey(jobID, "info", "level3"),
                new IntWritable());

        assertEquals(false, result);

        Writable[] writableArr = new Writable[2];
        writableArr[0] = new LongWritable(3L);
        writableArr[1] = new LongWritable(5L);
        ArrayWritable arrWritable = new ArrayWritable(LongWritable.class);
        arrWritable.set(writableArr);
        masterClient.storeInformation(masterClient.constructKey(jobID, "info", "level3"), arrWritable, true,
                null);

        ArrayWritable valueHolder = new ArrayWritable(LongWritable.class);

        boolean getResult = masterClient.getInformation(masterClient.constructKey(jobID, "info", "level3"),
                valueHolder);

        assertTrue(getResult);

        assertEquals(arrWritable.get()[0], valueHolder.get()[0]);
        assertEquals(arrWritable.get()[1], valueHolder.get()[1]);

        Log.info("Passed array writable test");
        done = true;

    } catch (Exception e) {
        e.printStackTrace();

    } finally {
        server.stopServer();
    }
    assertEquals(true, done);
}

From source file:org.elasticsearch.hadoop.hive.EsSerDe.java

License:Apache License

@SuppressWarnings("unchecked")
static Object hiveFromWritable(TypeInfo type, Writable data, FieldAlias alias, boolean IS_ES_10) {
    if (data == null || data instanceof NullWritable) {
        return null;
    }/*from  w  w  w . ja  va2 s  .  c om*/

    switch (type.getCategory()) {
    case LIST: {// or ARRAY
        ListTypeInfo listType = (ListTypeInfo) type;
        TypeInfo listElementType = listType.getListElementTypeInfo();

        ArrayWritable aw = (ArrayWritable) data;

        List<Object> list = new ArrayList<Object>();
        for (Writable writable : aw.get()) {
            list.add(hiveFromWritable(listElementType, writable, alias, IS_ES_10));
        }

        return list;
    }

    case MAP: {
        MapTypeInfo mapType = (MapTypeInfo) type;
        Map<Writable, Writable> mw = (Map<Writable, Writable>) data;

        Map<Object, Object> map = new LinkedHashMap<Object, Object>();

        for (Entry<Writable, Writable> entry : mw.entrySet()) {
            map.put(hiveFromWritable(mapType.getMapKeyTypeInfo(), entry.getKey(), alias, IS_ES_10),
                    hiveFromWritable(mapType.getMapValueTypeInfo(), entry.getValue(), alias, IS_ES_10));
        }

        return map;
    }
    case STRUCT: {
        StructTypeInfo structType = (StructTypeInfo) type;
        List<String> names = structType.getAllStructFieldNames();
        List<TypeInfo> info = structType.getAllStructFieldTypeInfos();

        // return just the values
        List<Object> struct = new ArrayList<Object>();

        MapWritable map = (MapWritable) data;
        Text reuse = new Text();
        for (int index = 0; index < names.size(); index++) {
            String esAlias = alias.toES(names.get(index));
            if (IS_ES_10) {
                // check for multi-level alias
                Writable result = map;
                for (String level : StringUtils.tokenize(esAlias, ".")) {
                    reuse.set(level);
                    result = ((MapWritable) result).get(reuse);
                    if (result == null) {
                        break;
                    }
                }
                struct.add(hiveFromWritable(info.get(index), result, alias, IS_ES_10));
            } else {
                reuse.set(alias.toES(names.get(index)));
                struct.add(hiveFromWritable(info.get(index), map.get(reuse), alias, IS_ES_10));
            }
        }
        return struct;
    }

    case UNION: {
        throw new UnsupportedOperationException("union not yet supported");//break;
    }

    case PRIMITIVE:
    default:
        // return as is
        return data;
    }
}

From source file:org.huahinframework.core.io.Value.java

License:Apache License

/**
 * {@inheritDoc}/*from  w  ww .j  a  va2  s .  c  om*/
 */
@Override
public String toString() {
    StringBuilder sb = new StringBuilder();
    for (Writable w : writableMap.values()) {
        if (w instanceof ArrayWritable) {
            ArrayWritable aw = (ArrayWritable) w;
            for (Writable x : aw.get()) {
                sb.append(x.toString()).append(StringUtil.TAB);
            }
        } else if (w instanceof MapWritable) {
            MapWritable mw = (MapWritable) w;
            for (Entry<Writable, Writable> entry : mw.entrySet()) {
                sb.append(entry.getKey().toString().toString()).append(StringUtil.TAB)
                        .append(entry.getValue().toString().toString()).append(StringUtil.TAB);
            }
        } else {
            sb.append(w.toString()).append("\t");
        }
    }
    return sb.toString().substring(0, sb.toString().length() - 1);
}