Example usage for java.io IOError IOError

List of usage examples for java.io IOError IOError

Introduction

In this page you can find the example usage for java.io IOError IOError.

Prototype

public IOError(Throwable cause) 

Source Link

Document

Constructs a new instance of IOError with the specified cause.

Usage

From source file:com.bigdata.dastor.streaming.StreamOut.java

/**
 * Split out files for all tables on disk locally for each range and then stream them to the target endpoint.
*///from  w w  w  . j av a2s . c o m
public static void transferRanges(InetAddress target, String tableName, Collection<Range> ranges,
        Runnable callback) {
    assert ranges.size() > 0;

    logger.debug("Beginning transfer process to " + target + " for ranges " + StringUtils.join(ranges, ", "));

    /*
     * (1) dump all the memtables to disk.
     * (2) anticompaction -- split out the keys in the range specified
     * (3) transfer the data.
    */
    try {
        Table table = Table.open(tableName);
        logger.info("Flushing memtables for " + tableName + "...");
        for (Future f : table.flush()) {
            try {
                f.get();
            } catch (InterruptedException e) {
                throw new RuntimeException(e);
            } catch (ExecutionException e) {
                throw new RuntimeException(e);
            }
        }
        logger.info("Performing anticompaction ...");
        /* Get the list of files that need to be streamed */
        transferSSTables(target, table.forceAntiCompaction(ranges, target), tableName); // SSTR GC deletes the file when done
    } catch (IOException e) {
        throw new IOError(e);
    } finally {
        StreamOutManager.remove(target);
    }
    if (callback != null)
        callback.run();
}

From source file:nlp.wikiforia.App.java

/**
 * Application entrypoint/*  w w  w .j  ava2 s  . c om*/
 * @param args input arguments
 */
public static void main(String[] args) {
    Logger logger = LoggerFactory.getLogger(App.class);

    logger.info("Wikiforia v1.2.1 by Marcus Klang");

    Options options = new Options();
    options.addOption(index);
    options.addOption(pages);
    options.addOption(threads);
    options.addOption(batch);
    options.addOption(output);
    options.addOption(lang);
    options.addOption(hadoop);
    options.addOption(gzip);
    options.addOption(testDecompression);
    options.addOption(filterNs);
    options.addOption(outputFormatOption);

    CommandLineParser parser = new PosixParser();
    try {
        CommandLine cmdline = parser.parse(options, args);

        File indexPath = null, pagesPath, outputPath;
        int batchsize = 100;
        int numThreads = Runtime.getRuntime().availableProcessors();
        String outputFormat = OUTPUT_FORMAT_DEFAULT;

        //Read batch size
        if (cmdline.hasOption(batch.getOpt())) {
            batchsize = Integer.parseInt(cmdline.getOptionValue(batch.getOpt()));
        }

        //Read num threads
        if (cmdline.hasOption(threads.getOpt())) {
            numThreads = Integer.parseInt(cmdline.getOptionValue(threads.getOpt()));
        }

        //Output format
        if (cmdline.hasOption(outputFormatOption.getOpt())) {
            outputFormat = cmdline.getOptionValue(outputFormatOption.getOpt());
        }

        //Read required paths
        pagesPath = new File(cmdline.getOptionValue(pages.getOpt()));
        outputPath = new File(cmdline.getOptionValue(output.getOpt()));

        //Create output directories if they do not exist
        if (!outputPath.getAbsoluteFile().getParentFile().getAbsoluteFile().exists()) {
            if (!outputPath.getParentFile().getAbsoluteFile().mkdirs()) {
                throw new IOError(new IOException(
                        "Failed to create directories for " + outputPath.getParentFile().getAbsolutePath()));
            }
        }

        //To to automatically select an index file if it does not exits
        if (!cmdline.hasOption(index.getOpt())) {
            //try to automatically identify if there is an index file
            if (pagesPath.getAbsolutePath().toLowerCase().endsWith("-multistream.xml.bz2")) {
                int pos = pagesPath.getAbsolutePath().lastIndexOf("-multistream.xml.bz2");
                indexPath = new File(
                        pagesPath.getAbsolutePath().substring(0, pos) + "-multistream-index.txt.bz2");
                if (!indexPath.exists())
                    indexPath = null;
            }
        } else {
            indexPath = new File(cmdline.getOptionValue(index.getOpt()));
        }

        //Validation
        if (!pagesPath.exists()) {
            logger.error("pages with absolute filepath {} could not be found.", pagesPath.getAbsolutePath());
            return;
        }

        if (indexPath != null && !indexPath.exists()) {
            logger.error("Could not find index file {}.", indexPath.getAbsolutePath());
            logger.error("Skipping index and continuing with singlestream parsing (no threaded decompression)");
            indexPath = null;
        }

        String langId;
        if (cmdline.hasOption(lang.getOpt())) {
            langId = cmdline.getOptionValue(lang.getOpt());
        } else {
            Pattern langmatcher = Pattern.compile("([a-z]{2})wiki-");
            Matcher matcher = langmatcher.matcher(pagesPath.getName());
            if (matcher.find()) {
                langId = matcher.group(1).toLowerCase();
            } else {
                logger.error("Could not find a suitable language, will default to English");
                langId = "en";
            }
        }

        ArrayList<Filter<WikipediaPage>> filters = new ArrayList<Filter<WikipediaPage>>();
        if (cmdline.hasOption(filterNs.getOpt())) {
            String optionValue = cmdline.getOptionValue(filterNs.getOpt());
            final TreeSet<Integer> ns = new TreeSet<Integer>();
            for (String s : optionValue.split(",")) {
                ns.add(Integer.parseInt(s));
            }

            if (ns.size() > 0) {
                filters.add(new Filter<WikipediaPage>() {
                    @Override
                    protected boolean accept(WikipediaPage item) {
                        return ns.contains(item.getNamespace());
                    }

                    @Override
                    public String toString() {
                        return String.format("Namespace filter { namespaces: %s }", StringUtils.join(ns, ","));
                    }
                });
            }
        }

        TemplateConfig config;
        Class<? extends TemplateConfig> configClazz = LangFactory.get(langId);
        if (configClazz != null) {
            try {
                config = configClazz.newInstance();
            } catch (InstantiationException e) {
                throw new RuntimeException(e);
            } catch (IllegalAccessException e) {
                throw new RuntimeException(e);
            }
        } else {
            config = new EnglishConfig();
            logger.error(
                    "language {} is not yet supported and will be defaulted to a English setting for Sweble.",
                    langId);
            langId = "en";
        }

        if (cmdline.hasOption(hadoop.getOpt())) {
            if (outputPath.exists()) {
                logger.error("The target location already exists, please remove before using the tool!");
                System.exit(1);
            } else {
                int splitsize = 64000000;
                if (cmdline.hasOption(App.splitsize.getOpt())) {
                    splitsize = Integer.parseInt(cmdline.getOptionValue(App.splitsize.getOpt()));
                }

                hadoopConvert(config, indexPath, pagesPath, outputPath, numThreads, batchsize, splitsize,
                        cmdline.hasOption(gzip.getOpt()), filters);
            }
        } else {
            if (cmdline.hasOption(testDecompression.getOpt())) {
                test(config, indexPath, pagesPath, numThreads, batchsize);
            } else {
                convert(config, indexPath, pagesPath, outputPath, numThreads, batchsize, filters, outputFormat);
            }
        }

    } catch (ParseException e) {
        System.out.println(e.getMessage());
        HelpFormatter writer = new HelpFormatter();
        writer.printHelp("wikiforia", options);
    }
}

From source file:com.netflix.aegisthus.input.readers.SSTableRecordReader.java

@SuppressWarnings("rawtypes")
@Override/*from ww  w .  j  av  a  2 s. c o  m*/
public void initialize(InputSplit inputSplit, TaskAttemptContext ctx) throws IOException, InterruptedException {
    AegSplit split = (AegSplit) inputSplit;

    start = split.getStart();
    //TODO: This has a side effect of setting compressionmetadata. remove this.
    InputStream is = split.getInput(ctx.getConfiguration());
    if (split.isCompressed()) {
        end = split.getCompressionMetadata().getDataLength();
    } else {
        end = split.getEnd();
    }
    outputFile = ctx.getConfiguration().getBoolean("aegsithus.debug.file", false);
    filename = split.getPath().toUri().toString();

    LOG.info(String.format("File: %s", split.getPath().toUri().getPath()));
    LOG.info("Start: " + start);
    LOG.info("End: " + end);
    if (ctx instanceof TaskInputOutputContext) {
        context = (TaskInputOutputContext) ctx;
    }

    try {
        scanner = new SSTableScanner(new DataInputStream(is), split.getConvertors(), end,
                Descriptor.fromFilename(filename).version);
        if (ctx.getConfiguration().get("aegisthus.maxcolsize") != null) {
            scanner.setMaxColSize(ctx.getConfiguration().getLong("aegisthus.maxcolsize", -1L));
            LOG.info(String.format("aegisthus.maxcolsize - %d",
                    ctx.getConfiguration().getLong("aegisthus.maxcolsize", -1L)));
        }
        scanner.skipUnsafe(start);
        this.pos = start;
    } catch (IOException e) {
        throw new IOError(e);
    }
}

From source file:com.bigdata.dastor.gms.FailureDetector.java

/**
 * Dump the inter arrival times for examination if necessary.
 *//*from   w w w  . j  ava  2s.  c  o  m*/
public void dumpInterArrivalTimes() {
    try {
        FileOutputStream fos = new FileOutputStream("/var/tmp/output-" + System.currentTimeMillis() + ".dat",
                true);
        fos.write(toString().getBytes());
        fos.close();
    } catch (IOException e) {
        throw new IOError(e);
    }
}

From source file:org.apache.cassandra.gms.FailureDetector.java

/**
 * Dump the inter arrival times for examination if necessary.
 *///  w w w  .  ja va 2 s.  c o m
public void dumpInterArrivalTimes() {
    OutputStream os = null;
    try {
        File file = File.createTempFile("failuredetector-", ".dat");
        os = new BufferedOutputStream(new FileOutputStream(file, true));
        os.write(toString().getBytes());
    } catch (IOException e) {
        throw new IOError(e);
    } finally {
        FileUtils.closeQuietly(os);
    }
}

From source file:org.apache.cassandra.service.RowRepairResolver.java

/**
 * For each row version, compare with resolved (the superset of all row versions);
 * if it is missing anything, send a mutation to the endpoint it come from.
 *//*from   w  w w  . j a  v  a 2 s.c  om*/
public static void maybeScheduleRepairs(ColumnFamily resolved, String table, DecoratedKey key,
        List<ColumnFamily> versions, List<InetAddress> endpoints) {
    for (int i = 0; i < versions.size(); i++) {
        ColumnFamily diffCf = ColumnFamily.diff(versions.get(i), resolved);
        if (diffCf == null) // no repair needs to happen
            continue;

        // create and send the row mutation message based on the diff
        RowMutation rowMutation = new RowMutation(table, key.key);
        rowMutation.add(diffCf);
        Message repairMessage;
        try {
            repairMessage = rowMutation.getMessage(Gossiper.instance.getVersion(endpoints.get(i)));
        } catch (IOException e) {
            throw new IOError(e);
        }
        MessagingService.instance().sendOneWay(repairMessage, endpoints.get(i));
    }
}

From source file:org.apache.cassandra.io.sstable.SSTable.java

/**
 * We use a ReferenceQueue to manage deleting files that have been compacted
 * and for which no more SSTable references exist.  But this is not guaranteed
 * to run for each such file because of the semantics of the JVM gc.  So,
 * we write a marker to `compactedFilename` when a file is compacted;
 * if such a marker exists on startup, the file should be removed.
 *
 * @return true if the file was deleted//ww w .j a  va 2 s . c  o m
 */
public static boolean deleteIfCompacted(String dataFilename) {
    if (new File(compactedFilename(dataFilename)).exists()) {
        try {
            FileUtils.deleteWithConfirm(new File(dataFilename));
            FileUtils.deleteWithConfirm(new File(SSTable.indexFilename(dataFilename)));
            FileUtils.deleteWithConfirm(new File(SSTable.filterFilename(dataFilename)));
            FileUtils.deleteWithConfirm(new File(SSTable.compactedFilename(dataFilename)));
        } catch (IOException e) {
            throw new IOError(e);
        }
        logger.info("Deleted " + dataFilename);
        return true;
    }
    return false;
}

From source file:ca.mcgill.cs.crown.data.WiktionaryReader.java

public List<LexicalEntry> loadFromPreprocessed(File preprocessedFile) {
    List<JSONObject> rawEntries = new ArrayList<JSONObject>(500_000);
    for (String line : new LineReader(preprocessedFile)) {
        try {/*w ww  . ja  v  a  2 s  .c o m*/
            JSONObject rawEntry = new JSONObject(line);
            rawEntries.add(rawEntry);
        } catch (JSONException je) {
            throw new IOError(je);
        }
    }
    return convertToEntries(rawEntries);
}

From source file:com.nbt.world.NBTFileBranch.java

protected Cache<File, Region> createRegionCache() {
    return new Cache<File, Region>() {
        @Override/* ww w  .ja va 2 s . co  m*/
        public Region apply(File key) {
            try {
                return new NBTRegion(key);
            } catch (IOException e) {
                // TODO: don't be lazy
                throw new IOError(e);
            }
        }
    };
}

From source file:org.apache.cassandra.streaming.StreamOut.java

/**
 * Split out files for all tables on disk locally for each range and then stream them to the target endpoint.
*//*  w  ww.  j a v  a 2s  .  co m*/
public static void transferRangesForRequest(StreamOutSession session, Collection<Range> ranges,
        OperationType type) {
    assert ranges.size() > 0;

    logger.info("Beginning transfer to {}", session.getHost());
    logger.debug("Ranges are {}", StringUtils.join(ranges, ","));

    try {
        Table table = flushSSTable(session.table);
        // send the matching portion of every sstable in the keyspace
        List<PendingFile> pending = createPendingFiles(table.getAllSSTables(), ranges, type);
        session.addFilesToStream(pending);
        session.begin();
    } catch (IOException e) {
        throw new IOError(e);
    }
}