List of usage examples for org.apache.commons.io LineIterator hasNext
public boolean hasNext()
Reader
has more lines. From source file:fr.treeptik.cloudunit.docker.JSONClient.java
public JsonResponse sendGet(URI uri) throws IOException { StringBuilder builder = new StringBuilder(); CloseableHttpClient httpclient = build(); HttpGet httpGet = new HttpGet(uri); HttpResponse response = httpclient.execute(httpGet); LineIterator iterator = IOUtils.lineIterator(response.getEntity().getContent(), "UTF-8"); while (iterator.hasNext()) { builder.append(iterator.nextLine()); }//ww w .j av a2s. c o m JsonResponse jsonResponse = new JsonResponse(response.getStatusLine().getStatusCode(), builder.toString(), null); return jsonResponse; }
From source file:com.sangupta.httptools.DownloadUrlCommand.java
@Override public void run() { File file = new File(this.urlFile); if (file == null || !file.exists()) { System.out.println("URL file cannot be found."); return;//w ww . j av a2 s .c o m } if (!file.isFile()) { System.out.println("URL file does not represent a valid file."); return; } if (this.numThreads <= 0 || this.numThreads > 50) { System.out.println("Number of assigned threads should be between 1 and 50"); return; } outputDir = new File(this.outputFolder); if (outputDir.exists() && !outputDir.isDirectory()) { System.out.println("Output folder does not represent a valid directory"); return; } if (!outputDir.exists()) { outputDir.mkdirs(); } // try and parse and read all URLs int line = 1; try { LineIterator iterator = FileUtils.lineIterator(file); while (iterator.hasNext()) { ++line; String readURL = iterator.next(); createURLTask(readURL); } } catch (IOException e) { System.out.println("Unable to read URLs from the file at line: " + line); return; } // all set - create number of threads // and start fetching ExecutorService service = Executors.newFixedThreadPool(this.numThreads); final long start = System.currentTimeMillis(); for (Runnable runnable : this.downloadTasks) { service.submit(runnable); } // intialize some variables this.numTasks = this.downloadTasks.size(); this.downloadTasks.clear(); if (this.numTasks > 1000) { this.splitFolders = true; } // shutdown shutdownAndAwaitTermination(service); final long end = System.currentTimeMillis(); // everything done System.out.println(this.downloadTasks.size() + " urls downloaded in " + (end - start) + " millis."); }
From source file:fr.gael.dhus.server.http.webapp.search.controller.SearchController.java
/** * Provides the openSearch description file via /search/description API. * * @param res response// www . j a v a2s . com * @throws IOException if file description cannot be accessed */ @PreAuthorize("hasRole('ROLE_SEARCH')") @RequestMapping(value = "/description") public void search(HttpServletResponse res) throws IOException { String url = configurationManager.getServerConfiguration().getExternalUrl(); if (url != null && url.endsWith("/")) { url = url.substring(0, url.length() - 1); } String long_name = configurationManager.getNameConfiguration().getLongName(); String short_name = configurationManager.getNameConfiguration().getShortName(); String contact_mail = configurationManager.getSupportConfiguration().getMail(); InputStream is = ClassLoader.getSystemResourceAsStream(DESCRIPTION_FILE); if (is == null) { throw new IOException("Cannot find \"" + DESCRIPTION_FILE + "\" OpenSearch description file."); } LineIterator li = IOUtils.lineIterator(is, "UTF-8"); try (ServletOutputStream os = res.getOutputStream()) { while (li.hasNext()) { String line = li.next(); // Last line? -> the iterator eats LF if (li.hasNext()) { line = line + "\n"; } line = line.replace("[dhus_server]", url); if (long_name != null) { line = line.replace("[dhus_long_name]", long_name); } if (short_name != null) { line = line.replace("[dhus_short_name]", short_name); } if (contact_mail != null) { line = line.replace("[dhus_contact_mail]", contact_mail); } os.write(line.getBytes()); } } finally { IOUtils.closeQuietly(is); LineIterator.closeQuietly(li); } }
From source file:eu.eexcess.diversityasurement.wikipedia.RDFCategoryExtractor.java
public void build() throws IOException { LineIterator categoryEntryIterator = new LineIterator(new FileReader(categoryListing)); statistics.startTimeStamp = System.currentTimeMillis(); statistics.linesInFile = getTotalNumberOfLines(categoryListing.getAbsoluteFile()); while (categoryEntryIterator.hasNext()) { statistics.linesTotal++;//from w ww.j av a 2 s . c o m ParentChildCategoryGlue tuple = parseRelatedCategoryTuple(categoryEntryIterator.nextLine()); if (null != tuple) { statistics.linesConsidered++; try { collector.takeTuple(tuple.parent, tuple.child); } catch (Throwable e) { } } else { statistics.linesSkipped++; } if (0 == (statistics.linesTotal % printStatsEvery)) { logStatistics(); } } statistics.endTimeStamp = System.currentTimeMillis(); logStatistics(); }
From source file:it.geosolutions.tools.io.file.MultiPropertyFile.java
/** * Process the file.//from ww w . j av a2 s . c o m * The return value tells if the processing was successful. * Even in a case of a failed parsing, the valid properties will be accessibile. * <br/><br/> * At the end of the read procedure the InputStream will be closed. * * @return true if the parsing was successful. */ public boolean read() { properties = new HashMap<String, Object>(); boolean ret = true; LineIterator it = null; try { in = getIS(); it = IOUtils.lineIterator(in, "UTF-8"); while (it.hasNext()) { String line = it.nextLine(); if (line.trim().length() == 0) // empty line continue; if (line.startsWith("#")) // comment line continue; int idx = line.indexOf("="); if (idx == -1) { LOGGER.warn("Missing '=' in line: [" + line + "]" + (file == null ? "" : " in file " + file)); ret = false; continue; } String key = line.substring(0, idx); String value = line.substring(idx + 1); putValue(key, value); } return ret; } catch (IOException ex) { LOGGER.error( "Error processing input" + (file == null ? "" : (" file " + file)) + ": " + ex.getMessage(), ex); return false; } finally { IOUtils.closeQuietly(in); } }
From source file:de.tudarmstadt.ukp.dkpro.keyphrases.bookindexing.evaluation.phrasematch.LineReader.java
@Override public List<String> getListOfStrings(JCas jcas) throws AnalysisEngineProcessException { List<String> goldList = new ArrayList<String>(); LineIterator lineIterator; try {//from w w w . j a va 2 s.c o m lineIterator = FileUtils.lineIterator(new File(getPath(getDocumentBaseName(jcas))), encoding); } catch (IOException e) { throw new AnalysisEngineProcessException(new Throwable(e)); } try { while (lineIterator.hasNext()) { String line = lineIterator.nextLine().trim(); if (!line.isEmpty()) { if (lowercase) line = line.toLowerCase(); goldList.add(line); } } } finally { LineIterator.closeQuietly(lineIterator); } return goldList; }
From source file:com.opengamma.bbg.replay.BloombergRefDataCollector.java
private Set<String> loadFields() { Set<String> fields = Sets.newHashSet(); LineIterator it; try {/*from ww w .j a v a 2 s. com*/ it = FileUtils.lineIterator(_fieldsFile); } catch (IOException ex) { throw new OpenGammaRuntimeException("IOException when reading " + _fieldsFile, ex); } try { while (it.hasNext()) { String line = it.nextLine(); if (StringUtils.isBlank(line) || line.charAt(0) == '#') { continue; } fields.add(line); } } finally { LineIterator.closeQuietly(it); } return fields; }
From source file:com.norconex.collector.http.crawler.HttpCrawler.java
private void queueStartURLs(ICrawlDataStore crawlDataStore) { // Queue regular start urls String[] startURLs = getCrawlerConfig().getStartURLs(); if (startURLs != null) { for (int i = 0; i < startURLs.length; i++) { String startURL = startURLs[i]; executeQueuePipeline(new HttpCrawlData(startURL, 0), crawlDataStore); }//ww w . j ava 2 s .c o m } // Queue start urls define in one or more seed files String[] urlsFiles = getCrawlerConfig().getUrlsFiles(); if (urlsFiles != null) { for (int i = 0; i < urlsFiles.length; i++) { String urlsFile = urlsFiles[i]; LineIterator it = null; try { it = IOUtils.lineIterator(new FileInputStream(urlsFile), CharEncoding.UTF_8); while (it.hasNext()) { String startURL = it.nextLine(); executeQueuePipeline(new HttpCrawlData(startURL, 0), crawlDataStore); } } catch (IOException e) { throw new CollectorException("Could not process URLs file: " + urlsFile, e); } finally { LineIterator.closeQuietly(it); ; } } } }
From source file:com.adobe.acs.tools.tag_maker.impl.TagMakerServlet.java
private InputStream stripLineEnds(InputStream is, String charset, char chartoStrip) throws IOException { log.debug("Stripping [ {} ] from the end of lines.", chartoStrip); final ByteArrayOutputStream baos = new ByteArrayOutputStream(); final PrintStream printStream = new PrintStream(baos); final LineIterator lineIterator = IOUtils.lineIterator(is, charset); while (lineIterator.hasNext()) { String line = StringUtils.stripToNull(lineIterator.next()); if (line != null) { line = StringUtils.stripEnd(line, String.valueOf(chartoStrip)); printStream.println(line);//from w ww . j a v a 2s. co m } } return new ByteArrayInputStream(baos.toByteArray()); }
From source file:eu.eexcess.sourceselection.redde.indexer.topterm.TopTermToWNDomain.java
TreeNode<String> inflateDomainTree() throws FileNotFoundException { LineIterator iterator = new LineIterator(new FileReader(wordnetCSVTreeFile)); String[] currentBranch = new String[5]; currentBranch[0] = rootNodeName;/*from w ww . ja va2 s .co m*/ while (iterator.hasNext()) { // read current node and store its parents String line = iterator.nextLine(); String[] tokensInLine = line.split(tokenDelimiter); int depth = -1; for (int i = 0; i < tokensInLine.length; i++) { tokensInLine[i] = tokensInLine[i].trim(); if (!tokensInLine[i].isEmpty()) { depth = i; currentBranch[1 + depth] = tokensInLine[i]; } } // clear tail for (int tail = depth + 2; tail < currentBranch.length; tail++) { currentBranch[tail] = null; } // reconstruct and append the missing branch according to the // current tree ValueTreeNode<String> branch = null; for (int branchDepth = currentBranch.length; branchDepth > 0; branchDepth--) { String nodeName = currentBranch[branchDepth - 1]; if (nodeName == null) { continue; } Set<TreeNode<String>> result = new HashSet<TreeNode<String>>(); ValueTreeNode.findFirstNode(nodeName, wnDomainTree, result); TreeNode<String> nodeInTree = null; if (result.iterator().hasNext()) { nodeInTree = result.iterator().next(); } // if node tree -> add branch to tree if (nodeInTree != null) { if (branch != null) { nodeInTree.addChild(branch); branch = null; } break; // if node ! tree -> reconstruct the branch until the mount // point is clear } else { ValueTreeNode<String> newParent = new ValueTreeNode<String>(); newParent.setName(nodeName); if (branch != null) { newParent.addChild(branch); } branch = newParent; } } } iterator.close(); return wnDomainTree; }