List of usage examples for org.apache.hadoop.io BytesWritable setCapacity
public void setCapacity(int new_cap)
From source file:com.dasasian.chok.lucene.integration.LuceneClientTest.java
License:Apache License
@Test public void testGetBinaryDetails() throws Exception { File index = temporaryFolder.newFolder("indexWithBinaryData"); File indexShard = new File(index, "binaryShard"); if (!indexShard.mkdirs()) { throw new RuntimeException("Unable to create directory " + indexShard.getAbsolutePath()); }/* w ww . j a v a 2 s . c o m*/ String textFieldName = "textField"; String binaryFieldName = "binaryField"; String textFieldContent = "sample text"; byte[] bytesFieldContent = new byte[] { 1, 2, 3 }; IndexWriter indexWriter = new IndexWriter(FSDirectory.open(indexShard), new StandardAnalyzer(Version.LUCENE_30), true, MaxFieldLength.UNLIMITED); Document document = new Document(); document.add(new Field(binaryFieldName, bytesFieldContent, Store.YES)); document.add(new Field(textFieldName, textFieldContent, Store.NO, Index.ANALYZED)); indexWriter.addDocument(document); indexWriter.close(true); DeployClient deployClient = new DeployClient(miniCluster.getProtocol()); IndexState indexState = deployClient.addIndex(index.getName(), index.getAbsolutePath(), 1).joinDeployment(); assertEquals(IndexState.DEPLOYED, indexState); LuceneClient client = new LuceneClient(miniCluster.createInteractionProtocol()); final Query query = new QueryParser(Version.LUCENE_30, "", new KeywordAnalyzer()) .parse(textFieldName + ": " + textFieldContent); final Hits hits = client.search(query, new String[] { index.getName() }, 10); assertNotNull(hits); assertEquals(1, hits.getHits().size()); final Hit hit = hits.getHits().get(0); final MapWritable details = client.getDetails(hit); final Set<Writable> keySet = details.keySet(); assertEquals(1, keySet.size()); final Writable writable = details.get(new Text(binaryFieldName)); assertNotNull(writable); assertThat(writable, instanceOf(BytesWritable.class)); BytesWritable bytesWritable = (BytesWritable) writable; bytesWritable.setCapacity(bytesWritable.getLength());// getBytes() returns // the full array assertArrayEquals(bytesFieldContent, bytesWritable.getBytes()); client.close(); }
From source file:com.lucidworks.hadoop.utils.ZipFileRecordReader.java
License:Apache License
/** * Each ZipEntry is decompressed and readied for the Mapper. If the * ZipFileInputFormat has been set to Lenient (not the default), certain * exceptions will be gracefully ignored to prevent a larger job from * failing./*from ww w . ja va2s . c o m*/ */ @Override public boolean next(Text key, BytesWritable value) throws IOException { { ZipEntry entry = null; try { entry = zip.getNextEntry(); } catch (Throwable e) { if (!ZipFileInputFormat.getLenient()) { throw new RuntimeException(e); } } // Sanity check if (entry == null) { processed = true; return false; } // Filename key.set(new Text(entry.getName())); byte[] bufferOut = null; int cummulativeBytesRead = 0; while (true) { int bytesRead = 0; byte[] bufferIn = new byte[8192]; try { bytesRead = zip.read(bufferIn, 0, bufferIn.length); } catch (Throwable e) { if (!ZipFileInputFormat.getLenient()) { throw new RuntimeException(e); } return false; } if (bytesRead > 0) { byte[] tmp = head(bufferIn, bytesRead); if (cummulativeBytesRead == 0) { bufferOut = tmp; } else { bufferOut = add(bufferOut, tmp); } cummulativeBytesRead += bytesRead; } else { break; } } try { zip.closeEntry(); } catch (IOException e) { if (!ZipFileInputFormat.getLenient()) { throw new RuntimeException(e); } } // Uncompressed contents if (bufferOut != null) { value.setCapacity(bufferOut.length); value.set(bufferOut, 0, bufferOut.length); } else { log.warn("bufferOut is null for " + key);//should we return false here? I don't think so, since I think that would mean we can't process any more records } return true; } }
From source file:crunch.MaxTemperature.java
License:Apache License
@Test public void test() throws IOException { // vv BytesWritableTest BytesWritable b = new BytesWritable(new byte[] { 3, 5 }); byte[] bytes = serialize(b); assertThat(StringUtils.byteToHexString(bytes), is("000000020305")); // ^^ BytesWritableTest // vv BytesWritableTest-Capacity b.setCapacity(11); assertThat(b.getLength(), is(2)); assertThat(b.getBytes().length, is(11)); // ^^ BytesWritableTest-Capacity }//w ww . j a va 2 s. c o m
From source file:io.aos.hdfs.BytesWritableTest.java
License:Apache License
@Test public void test() throws IOException { // vv BytesWritableTest BytesWritable b = new BytesWritable(new byte[] { 3, 5 }); byte[] bytes = serialize(b); assertThat(StringUtils.byteToHexString(bytes), is("000000020305")); // ^^ BytesWritableTest // vv BytesWritableTest-Capacity b.setCapacity(11); assertThat(b.getLength(), is(2));//w ww .ja v a2 s.c o m assertThat(b.getBytes().length, is(11)); // ^^ BytesWritableTest-Capacity }
From source file:net.sf.katta.integrationTest.lib.lucene.LuceneClientTest.java
License:Apache License
@Test public void testGetBinaryDetails() throws Exception { File index = _temporaryFolder.newFolder("indexWithBinaryData"); String textFieldName = "textField"; String binaryFieldName = "binaryField"; String textFieldContent = "sample text"; byte[] bytesFieldContent = new byte[] { 1, 2, 3 }; IndexWriter indexWriter = new IndexWriter(FSDirectory.open(index), new StandardAnalyzer(Version.LUCENE_35), true, MaxFieldLength.UNLIMITED); Document document = new Document(); document.add(new Field(binaryFieldName, bytesFieldContent, Store.YES)); document.add(new Field(textFieldName, textFieldContent, Store.NO, Index.ANALYZED)); indexWriter.addDocument(document);/* w w w .j av a2 s . c om*/ indexWriter.optimize(); indexWriter.close(); DeployClient deployClient = new DeployClient(_miniCluster.getProtocol()); IndexState indexState = deployClient.addIndex(index.getName(), index.getParentFile().getAbsolutePath(), 1) .joinDeployment(); assertEquals(IndexState.DEPLOYED, indexState); ILuceneClient client = new LuceneClient(_miniCluster.getZkConfiguration()); final Query query = new QueryParser(Version.LUCENE_35, "", new KeywordAnalyzer()) .parse(textFieldName + ": " + textFieldContent); final Hits hits = client.search(query, new String[] { index.getName() }, 10); assertNotNull(hits); assertEquals(1, hits.getHits().size()); final Hit hit = hits.getHits().get(0); final MapWritable details = client.getDetails(hit); final Set<Writable> keySet = details.keySet(); assertEquals(1, keySet.size()); final Writable writable = details.get(new Text(binaryFieldName)); assertNotNull(writable); assertThat(writable, instanceOf(BytesWritable.class)); BytesWritable bytesWritable = (BytesWritable) writable; bytesWritable.setCapacity(bytesWritable.getLength());// getBytes() returns // the full array assertArrayEquals(bytesFieldContent, bytesWritable.getBytes()); client.close(); }
From source file:voldemort.store.readonly.mr.utils.HadoopUtils.java
License:Apache License
/** * Tag the BytesWritable with an integer at the END *///from w ww . j av a2 s . co m public static void appendTag(BytesWritable writable, int tag) { int size = writable.getLength(); if (writable.getCapacity() < size + 4) { // BytesWritable preserves old values writable.setCapacity(size + 4); } ByteUtils.writeInt(writable.getBytes(), tag, size); writable.setSize(size + 4); }