List of usage examples for org.apache.hadoop.hdfs DistributedFileSystem concat
@Override public void concat(Path trg, Path[] psrcs) throws IOException
From source file:org.apache.phoenix.end2end.IndexScrutinyToolIT.java
License:Apache License
/** * Tests that with the output to file option set, the scrutiny tool outputs invalid rows to file *//*from ww w . java2 s .c om*/ @Test public void testOutputInvalidRowsToFile() throws Exception { insertOneValid_OneBadVal_OneMissingTarget(); String[] argValues = getArgValues(schemaName, dataTableName, indexTableName, System.currentTimeMillis(), 10L, SourceTable.DATA_TABLE_SOURCE, true, OutputFormat.FILE, null); runScrutiny(argValues); // check the output files Path outputPath = CsvBulkImportUtil.getOutputPath(new Path(outputDir), dataTableFullName); DistributedFileSystem fs = getUtility().getDFSCluster().getFileSystem(); List<Path> paths = Lists.newArrayList(); Path firstPart = null; for (FileStatus outputFile : fs.listStatus(outputPath)) { if (outputFile.getPath().getName().startsWith("part")) { if (firstPart == null) { firstPart = outputFile.getPath(); } else { paths.add(outputFile.getPath()); } } } if (dataTableDdl.contains("SALT_BUCKETS")) { fs.concat(firstPart, paths.toArray(new Path[0])); } Path outputFilePath = firstPart; assertTrue(fs.exists(outputFilePath)); FSDataInputStream fsDataInputStream = fs.open(outputFilePath); BufferedReader reader = new BufferedReader(new InputStreamReader(fsDataInputStream)); TreeSet<String> lines = Sets.newTreeSet(); try { String line = null; while ((line = reader.readLine()) != null) { lines.add(line); } } finally { IOUtils.closeQuietly(reader); IOUtils.closeQuietly(fsDataInputStream); } Iterator<String> lineIterator = lines.iterator(); assertEquals("[2, name-2, " + new Timestamp(testTime).toString() + ", 95123]\t[2, name-2, " + new Timestamp(testTime).toString() + ", 9999]", lineIterator.next()); assertEquals("[3, name-3, " + new Timestamp(testTime).toString() + ", 95123]\tTarget row not found", lineIterator.next()); }