List of usage examples for java.util.concurrent.atomic AtomicLong set
public final void set(long newValue)
From source file:org.apache.hadoop.ipc.DecayRpcScheduler.java
/** * Decay the stored counts for each user and clean as necessary. * This method should be called periodically in order to keep * counts current.//from w w w . jav a2 s . c o m */ private void decayCurrentCounts() { try { long totalDecayedCount = 0; long totalRawCount = 0; Iterator<Map.Entry<Object, List<AtomicLong>>> it = callCounts.entrySet().iterator(); while (it.hasNext()) { Map.Entry<Object, List<AtomicLong>> entry = it.next(); AtomicLong decayedCount = entry.getValue().get(0); AtomicLong rawCount = entry.getValue().get(1); // Compute the next value by reducing it by the decayFactor totalRawCount += rawCount.get(); long currentValue = decayedCount.get(); long nextValue = (long) (currentValue * decayFactor); totalDecayedCount += nextValue; decayedCount.set(nextValue); if (nextValue == 0) { // We will clean up unused keys here. An interesting optimization // might be to have an upper bound on keyspace in callCounts and only // clean once we pass it. it.remove(); } } // Update the total so that we remain in sync totalDecayedCallCount.set(totalDecayedCount); totalRawCallCount.set(totalRawCount); // Now refresh the cache of scheduling decisions recomputeScheduleCache(); // Update average response time with decay updateAverageResponseTime(true); } catch (Exception ex) { LOG.error("decayCurrentCounts exception: " + ExceptionUtils.getFullStackTrace(ex)); throw ex; } }
From source file:org.apache.blur.store.hdfs.HdfsDirectory.java
protected TimerTask reportOnBlockLocality() { final Counter totalHdfsBlock = _metricsGroup.totalHdfsBlock; final Counter localHdfsBlock = _metricsGroup.localHdfsBlock; final AtomicLong prevTotalCount = new AtomicLong(); final AtomicLong prevLocalCount = new AtomicLong(); return new TimerTask() { @Override/*from w w w . j a v a 2 s . c om*/ public void run() { try { long[] counts = runReport(); long total = counts[0]; long local = counts[1]; long prevTotal = prevTotalCount.get(); long prevLocal = prevLocalCount.get(); totalHdfsBlock.inc(total - prevTotal); localHdfsBlock.inc(local - prevLocal); prevTotalCount.set(total); prevLocalCount.set(local); } catch (Exception e) { LOG.error("Unknown error.", e); } } }; }
From source file:org.apache.nifi.processors.standard.AbstractExecuteSQL.java
@Override public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException { FlowFile fileToProcess = null;//from w w w.j ava 2s .c om if (context.hasIncomingConnection()) { fileToProcess = session.get(); // If we have no FlowFile, and all incoming connections are self-loops then we can continue on. // However, if we have no FlowFile and we have connections coming from other Processors, then // we know that we should run only if we have a FlowFile. if (fileToProcess == null && context.hasNonLoopConnection()) { return; } } final List<FlowFile> resultSetFlowFiles = new ArrayList<>(); final ComponentLog logger = getLogger(); final Integer queryTimeout = context.getProperty(QUERY_TIMEOUT).asTimePeriod(TimeUnit.SECONDS).intValue(); final Integer maxRowsPerFlowFile = context.getProperty(MAX_ROWS_PER_FLOW_FILE) .evaluateAttributeExpressions().asInteger(); final Integer outputBatchSizeField = context.getProperty(OUTPUT_BATCH_SIZE).evaluateAttributeExpressions() .asInteger(); final int outputBatchSize = outputBatchSizeField == null ? 0 : outputBatchSizeField; SqlWriter sqlWriter = configureSqlWriter(session, context, fileToProcess); final String selectQuery; if (context.getProperty(SQL_SELECT_QUERY).isSet()) { selectQuery = context.getProperty(SQL_SELECT_QUERY).evaluateAttributeExpressions(fileToProcess) .getValue(); } else { // If the query is not set, then an incoming flow file is required, and expected to contain a valid SQL select query. // If there is no incoming connection, onTrigger will not be called as the processor will fail when scheduled. final StringBuilder queryContents = new StringBuilder(); session.read(fileToProcess, in -> queryContents.append(IOUtils.toString(in, Charset.defaultCharset()))); selectQuery = queryContents.toString(); } int resultCount = 0; try (final Connection con = dbcpService .getConnection(fileToProcess == null ? Collections.emptyMap() : fileToProcess.getAttributes()); final PreparedStatement st = con.prepareStatement(selectQuery)) { st.setQueryTimeout(queryTimeout); // timeout in seconds if (fileToProcess != null) { JdbcCommon.setParameters(st, fileToProcess.getAttributes()); } logger.debug("Executing query {}", new Object[] { selectQuery }); int fragmentIndex = 0; final String fragmentId = UUID.randomUUID().toString(); final StopWatch executionTime = new StopWatch(true); boolean hasResults = st.execute(); long executionTimeElapsed = executionTime.getElapsed(TimeUnit.MILLISECONDS); boolean hasUpdateCount = st.getUpdateCount() != -1; while (hasResults || hasUpdateCount) { //getMoreResults() and execute() return false to indicate that the result of the statement is just a number and not a ResultSet if (hasResults) { final AtomicLong nrOfRows = new AtomicLong(0L); try { final ResultSet resultSet = st.getResultSet(); do { final StopWatch fetchTime = new StopWatch(true); FlowFile resultSetFF; if (fileToProcess == null) { resultSetFF = session.create(); } else { resultSetFF = session.create(fileToProcess); resultSetFF = session.putAllAttributes(resultSetFF, fileToProcess.getAttributes()); } try { resultSetFF = session.write(resultSetFF, out -> { try { nrOfRows.set(sqlWriter.writeResultSet(resultSet, out, getLogger(), null)); } catch (Exception e) { throw (e instanceof ProcessException) ? (ProcessException) e : new ProcessException(e); } }); long fetchTimeElapsed = fetchTime.getElapsed(TimeUnit.MILLISECONDS); // set attributes final Map<String, String> attributesToAdd = new HashMap<>(); attributesToAdd.put(RESULT_ROW_COUNT, String.valueOf(nrOfRows.get())); attributesToAdd.put(RESULT_QUERY_DURATION, String.valueOf(executionTimeElapsed + fetchTimeElapsed)); attributesToAdd.put(RESULT_QUERY_EXECUTION_TIME, String.valueOf(executionTimeElapsed)); attributesToAdd.put(RESULT_QUERY_FETCH_TIME, String.valueOf(fetchTimeElapsed)); attributesToAdd.put(RESULTSET_INDEX, String.valueOf(resultCount)); attributesToAdd.putAll(sqlWriter.getAttributesToAdd()); resultSetFF = session.putAllAttributes(resultSetFF, attributesToAdd); sqlWriter.updateCounters(session); // if fragmented ResultSet, determine if we should keep this fragment; set fragment attributes if (maxRowsPerFlowFile > 0) { // if row count is zero and this is not the first fragment, drop it instead of committing it. if (nrOfRows.get() == 0 && fragmentIndex > 0) { session.remove(resultSetFF); break; } resultSetFF = session.putAttribute(resultSetFF, FRAGMENT_ID, fragmentId); resultSetFF = session.putAttribute(resultSetFF, FRAGMENT_INDEX, String.valueOf(fragmentIndex)); } logger.info("{} contains {} records; transferring to 'success'", new Object[] { resultSetFF, nrOfRows.get() }); // Report a FETCH event if there was an incoming flow file, or a RECEIVE event otherwise if (context.hasIncomingConnection()) { session.getProvenanceReporter().fetch(resultSetFF, "Retrieved " + nrOfRows.get() + " rows", executionTimeElapsed + fetchTimeElapsed); } else { session.getProvenanceReporter().receive(resultSetFF, "Retrieved " + nrOfRows.get() + " rows", executionTimeElapsed + fetchTimeElapsed); } resultSetFlowFiles.add(resultSetFF); // If we've reached the batch size, send out the flow files if (outputBatchSize > 0 && resultSetFlowFiles.size() >= outputBatchSize) { session.transfer(resultSetFlowFiles, REL_SUCCESS); session.commit(); resultSetFlowFiles.clear(); } fragmentIndex++; } catch (Exception e) { // Remove the result set flow file and propagate the exception session.remove(resultSetFF); if (e instanceof ProcessException) { throw (ProcessException) e; } else { throw new ProcessException(e); } } } while (maxRowsPerFlowFile > 0 && nrOfRows.get() == maxRowsPerFlowFile); // If we are splitting results but not outputting batches, set count on all FlowFiles if (outputBatchSize == 0 && maxRowsPerFlowFile > 0) { for (int i = 0; i < resultSetFlowFiles.size(); i++) { resultSetFlowFiles.set(i, session.putAttribute(resultSetFlowFiles.get(i), FRAGMENT_COUNT, Integer.toString(fragmentIndex))); } } } catch (final SQLException e) { throw new ProcessException(e); } resultCount++; } // are there anymore result sets? try { hasResults = st.getMoreResults(Statement.CLOSE_CURRENT_RESULT); hasUpdateCount = st.getUpdateCount() != -1; } catch (SQLException ex) { hasResults = false; hasUpdateCount = false; } } // Transfer any remaining files to SUCCESS session.transfer(resultSetFlowFiles, REL_SUCCESS); resultSetFlowFiles.clear(); //If we had at least one result then it's OK to drop the original file, but if we had no results then // pass the original flow file down the line to trigger downstream processors if (fileToProcess != null) { if (resultCount > 0) { session.remove(fileToProcess); } else { fileToProcess = session.write(fileToProcess, out -> sqlWriter.writeEmptyResultSet(out, getLogger())); fileToProcess = session.putAttribute(fileToProcess, RESULT_ROW_COUNT, "0"); fileToProcess = session.putAttribute(fileToProcess, CoreAttributes.MIME_TYPE.key(), sqlWriter.getMimeType()); session.transfer(fileToProcess, REL_SUCCESS); } } else if (resultCount == 0) { //If we had no inbound FlowFile, no exceptions, and the SQL generated no result sets (Insert/Update/Delete statements only) // Then generate an empty Output FlowFile FlowFile resultSetFF = session.create(); resultSetFF = session.write(resultSetFF, out -> sqlWriter.writeEmptyResultSet(out, getLogger())); resultSetFF = session.putAttribute(resultSetFF, RESULT_ROW_COUNT, "0"); resultSetFF = session.putAttribute(resultSetFF, CoreAttributes.MIME_TYPE.key(), sqlWriter.getMimeType()); session.transfer(resultSetFF, REL_SUCCESS); } } catch (final ProcessException | SQLException e) { //If we had at least one result then it's OK to drop the original file, but if we had no results then // pass the original flow file down the line to trigger downstream processors if (fileToProcess == null) { // This can happen if any exceptions occur while setting up the connection, statement, etc. logger.error("Unable to execute SQL select query {} due to {}. No FlowFile to route to failure", new Object[] { selectQuery, e }); context.yield(); } else { if (context.hasIncomingConnection()) { logger.error("Unable to execute SQL select query {} for {} due to {}; routing to failure", new Object[] { selectQuery, fileToProcess, e }); fileToProcess = session.penalize(fileToProcess); } else { logger.error("Unable to execute SQL select query {} due to {}; routing to failure", new Object[] { selectQuery, e }); context.yield(); } session.transfer(fileToProcess, REL_FAILURE); } } }
From source file:sx.blah.discord.handle.impl.obj.Channel.java
@Override public MessageHistory getMessageHistory(int messageCount) { if (messageCount <= messages.size()) { // we already have all of the wanted messages in the cache return new MessageHistory(messages.values().stream().sorted(new MessageComparator(true)) .limit(messageCount).collect(Collectors.toList())); } else {/* www .jav a 2 s . c om*/ List<IMessage> retrieved = new ArrayList<>(messageCount); AtomicLong lastMessage = new AtomicLong(DiscordUtils.getSnowflakeFromTimestamp(Instant.now())); int chunkSize = messageCount < MESSAGE_CHUNK_COUNT ? messageCount : MESSAGE_CHUNK_COUNT; while (retrieved.size() < messageCount) { // while we dont have messageCount messages IMessage[] chunk = getHistory(lastMessage.get(), chunkSize); if (chunk.length == 0) break; lastMessage.set(chunk[chunk.length - 1].getLongID()); Collections.addAll(retrieved, chunk); } return new MessageHistory( retrieved.size() > messageCount ? retrieved.subList(0, messageCount) : retrieved); } }
From source file:org.apache.bookkeeper.bookie.InterleavedLedgerStorageTest.java
@Test public void testShellCommands() throws Exception { interleavedStorage.flush();/*from w ww. j av a 2s . com*/ interleavedStorage.shutdown(); final Pattern entryPattern = Pattern .compile("entry (?<entry>\\d+)\t:\t((?<na>N/A)|\\(log:(?<logid>\\d+), pos: (?<pos>\\d+)\\))"); class Metadata { final Pattern keyPattern = Pattern.compile("master key +: ([0-9a-f])"); final Pattern sizePattern = Pattern.compile("size +: (\\d+)"); final Pattern entriesPattern = Pattern.compile("entries +: (\\d+)"); final Pattern isFencedPattern = Pattern.compile("isFenced +: (\\w+)"); public String masterKey; public long size = -1; public long entries = -1; public boolean foundFenced = false; void check(String s) { Matcher keyMatcher = keyPattern.matcher(s); if (keyMatcher.matches()) { masterKey = keyMatcher.group(1); return; } Matcher sizeMatcher = sizePattern.matcher(s); if (sizeMatcher.matches()) { size = Long.valueOf(sizeMatcher.group(1)); return; } Matcher entriesMatcher = entriesPattern.matcher(s); if (entriesMatcher.matches()) { entries = Long.valueOf(entriesMatcher.group(1)); return; } Matcher isFencedMatcher = isFencedPattern.matcher(s); if (isFencedMatcher.matches()) { Assert.assertEquals("true", isFencedMatcher.group(1)); foundFenced = true; return; } } void validate(long foundEntries) { Assert.assertTrue(entries >= numWrites * entriesPerWrite); Assert.assertEquals(entries, foundEntries); Assert.assertTrue(foundFenced); Assert.assertNotEquals(-1, size); } } final Metadata foundMetadata = new Metadata(); AtomicLong curEntry = new AtomicLong(0); AtomicLong someEntryLogger = new AtomicLong(-1); BookieShell shell = new BookieShell(LedgerIdFormatter.LONG_LEDGERID_FORMATTER, EntryFormatter.STRING_FORMATTER) { @Override void printInfoLine(String s) { Matcher matcher = entryPattern.matcher(s); System.out.println(s); if (matcher.matches()) { assertEquals(Long.toString(curEntry.get()), matcher.group("entry")); if (matcher.group("na") == null) { String logId = matcher.group("logid"); Assert.assertNotEquals(matcher.group("logid"), null); Assert.assertNotEquals(matcher.group("pos"), null); Assert.assertTrue((curEntry.get() % entriesPerWrite) == 0); Assert.assertTrue(curEntry.get() <= numWrites * entriesPerWrite); if (someEntryLogger.get() == -1) { someEntryLogger.set(Long.valueOf(logId)); } } else { Assert.assertEquals(matcher.group("logid"), null); Assert.assertEquals(matcher.group("pos"), null); Assert.assertTrue(((curEntry.get() % entriesPerWrite) != 0) || ((curEntry.get() >= (entriesPerWrite * numWrites)))); } curEntry.incrementAndGet(); } else { foundMetadata.check(s); } } }; shell.setConf(conf); int res = shell.run(new String[] { "ledger", "-m", "0" }); Assert.assertEquals(0, res); Assert.assertTrue(curEntry.get() >= numWrites * entriesPerWrite); foundMetadata.validate(curEntry.get()); // Should pass consistency checker res = shell.run(new String[] { "localconsistencycheck" }); Assert.assertEquals(0, res); // Remove a logger EntryLogger entryLogger = new EntryLogger(conf); entryLogger.removeEntryLog(someEntryLogger.get()); // Should fail consistency checker res = shell.run(new String[] { "localconsistencycheck" }); Assert.assertEquals(1, res); }
From source file:org.apache.hadoop.hbase.client.SpeculativeMutater.java
public Boolean mutate(final long waitToSendFailover, final long waitToSendFailoverWithException, final HBaseTableFunction<Void> function, final HTableInterface primaryTable, final Collection<HTableInterface> failoverTables, final AtomicLong lastPrimaryFail, final int waitTimeFromLastPrimaryFail) { ExecutorCompletionService<Boolean> exeS = new ExecutorCompletionService<Boolean>(exe); ArrayList<Callable<Boolean>> callables = new ArrayList<Callable<Boolean>>(); final AtomicBoolean isPrimarySuccess = new AtomicBoolean(false); final long startTime = System.currentTimeMillis(); final long lastPrimaryFinalFail = lastPrimaryFail.get(); if (System.currentTimeMillis() - lastPrimaryFinalFail > 5000) { callables.add(new Callable<Boolean>() { public Boolean call() throws Exception { try { LOG.info(" --- CallingPrimary.1:" + isPrimarySuccess.get() + ", " + (System.currentTimeMillis() - startTime)); function.call(primaryTable); LOG.info(" --- CallingPrimary.2:" + isPrimarySuccess.get() + ", " + (System.currentTimeMillis() - startTime)); isPrimarySuccess.set(true); return true; } catch (java.io.InterruptedIOException e) { Thread.currentThread().interrupt(); } catch (Exception e) { lastPrimaryFail.set(System.currentTimeMillis()); Thread.currentThread().interrupt(); }//from w w w .j av a2s .c om return null; } }); } for (final HTableInterface failoverTable : failoverTables) { callables.add(new Callable<Boolean>() { public Boolean call() throws Exception { long waitToRequest = (System.currentTimeMillis() - lastPrimaryFinalFail > 5000) ? waitToSendFailover - (System.currentTimeMillis() - startTime) : waitToSendFailoverWithException - (System.currentTimeMillis() - startTime); LOG.info(" --- waitToRequest:" + waitToRequest + "," + (System.currentTimeMillis() - lastPrimaryFinalFail) + "," + (waitToSendFailover - (System.currentTimeMillis() - startTime)) + "," + (waitToSendFailoverWithException - (System.currentTimeMillis() - startTime))); if (waitToRequest > 0) { Thread.sleep(waitToRequest); } LOG.info(" --- isPrimarySuccess.get():" + isPrimarySuccess.get()); if (isPrimarySuccess.get() == false) { LOG.info(" --- CallingFailOver.1:" + isPrimarySuccess.get() + ", " + (System.currentTimeMillis() - startTime)); function.call(failoverTable); LOG.info(" --- CallingFailOver.2:" + isPrimarySuccess.get() + ", " + (System.currentTimeMillis() - startTime)); } return false; } }); } try { for (Callable<Boolean> call : callables) { exeS.submit(call); } Boolean result = exeS.take().get(); return result; } catch (InterruptedException e) { e.printStackTrace(); LOG.error(e); } catch (ExecutionException e) { e.printStackTrace(); LOG.error(e); } return null; }
From source file:org.apereo.portal.io.xml.JaxbPortalDataHandlerService.java
@Override public void exportAllDataOfType(Set<String> typeIds, File directory, BatchExportOptions options) { final Queue<ExportFuture<?>> exportFutures = new ConcurrentLinkedQueue<ExportFuture<?>>(); final boolean failOnError = options != null ? options.isFailOnError() : true; //Determine the parent directory to log to final File logDirectory = determineLogDirectory(options, "export"); //Setup reporting file final File exportReport = new File(logDirectory, "data-export.txt"); final PrintWriter reportWriter; try {//from ww w . ja v a 2 s . com reportWriter = new PrintWriter(new BufferedWriter(new FileWriter(exportReport))); } catch (IOException e) { throw new RuntimeException("Failed to create FileWriter for: " + exportReport, e); } try { for (final String typeId : typeIds) { final List<FutureHolder<?>> failedFutures = new LinkedList<FutureHolder<?>>(); final File typeDir = new File(directory, typeId); logger.info("Adding all data of type {} to export queue: {}", typeId, typeDir); reportWriter.println(typeId + "," + typeDir); final Iterable<? extends IPortalData> dataForType = this.getPortalData(typeId); for (final IPortalData data : dataForType) { final String dataId = data.getDataId(); //Check for completed futures on every iteration, needed to fail as fast as possible on an import exception final List<FutureHolder<?>> newFailed = waitForFutures(exportFutures, reportWriter, logDirectory, false); failedFutures.addAll(newFailed); final AtomicLong exportTime = new AtomicLong(-1); //Create export task Callable<Object> task = new CallableWithoutResult() { @Override protected void callWithoutResult() { exportTime.set(System.nanoTime()); try { exportData(typeId, dataId, typeDir); } finally { exportTime.set(System.nanoTime() - exportTime.get()); } } }; //Submit the export task final Future<?> exportFuture = this.importExportThreadPool.submit(task); //Add the future for tracking final ExportFuture futureHolder = new ExportFuture(exportFuture, typeId, dataId, exportTime); exportFutures.offer(futureHolder); } final List<FutureHolder<?>> newFailed = waitForFutures(exportFutures, reportWriter, logDirectory, true); failedFutures.addAll(newFailed); reportWriter.flush(); if (failOnError && !failedFutures.isEmpty()) { throw new RuntimeException(failedFutures.size() + " " + typeId + " entities failed to export.\n" + "\tPer entity exception logs and a full report can be found in " + logDirectory); } } } catch (InterruptedException e) { throw new RuntimeException("Interrupted while waiting for entities to export", e); } finally { IOUtils.closeQuietly(reportWriter); } }
From source file:org.apereo.portal.io.xml.JaxbPortalDataHandlerService.java
@Override public void importDataDirectory(File directory, String pattern, final BatchImportOptions options) { if (!directory.exists()) { throw new IllegalArgumentException("The specified directory '" + directory + "' does not exist"); }//from w ww .j a v a 2s. co m //Create the file filter to use when searching for files to import final FileFilter fileFilter; if (pattern != null) { fileFilter = new AntPatternFileFilter(true, false, pattern, this.dataFileExcludes); } else { fileFilter = new AntPatternFileFilter(true, false, this.dataFileIncludes, this.dataFileExcludes); } //Determine the parent directory to log to final File logDirectory = determineLogDirectory(options, "import"); //Setup reporting file final File importReport = new File(logDirectory, "data-import.txt"); final PrintWriter reportWriter; try { reportWriter = new PrintWriter(new PeriodicFlushingBufferedWriter(500, new FileWriter(importReport))); } catch (IOException e) { throw new RuntimeException("Failed to create FileWriter for: " + importReport, e); } //Convert directory to URI String to provide better logging output final URI directoryUri = directory.toURI(); final String directoryUriStr = directoryUri.toString(); IMPORT_BASE_DIR.set(directoryUriStr); try { //Scan the specified directory for files to import logger.info("Scanning for files to Import from: {}", directory); final PortalDataKeyFileProcessor fileProcessor = new PortalDataKeyFileProcessor(this.dataKeyTypes, options); this.directoryScanner.scanDirectoryNoResults(directory, fileFilter, fileProcessor); final long resourceCount = fileProcessor.getResourceCount(); logger.info("Found {} files to Import from: {}", resourceCount, directory); //See if the import should fail on error final boolean failOnError = options != null ? options.isFailOnError() : true; //Map of files to import, grouped by type final ConcurrentMap<PortalDataKey, Queue<Resource>> dataToImport = fileProcessor.getDataToImport(); //Import the data files for (final PortalDataKey portalDataKey : this.dataKeyImportOrder) { final Queue<Resource> files = dataToImport.remove(portalDataKey); if (files == null) { continue; } final Queue<ImportFuture<?>> importFutures = new LinkedList<ImportFuture<?>>(); final List<FutureHolder<?>> failedFutures = new LinkedList<FutureHolder<?>>(); final int fileCount = files.size(); logger.info("Importing {} files of type {}", fileCount, portalDataKey); reportWriter.println(portalDataKey + "," + fileCount); while (!files.isEmpty()) { final Resource file = files.poll(); //Check for completed futures on every iteration, needed to fail as fast as possible on an import exception final List<FutureHolder<?>> newFailed = waitForFutures(importFutures, reportWriter, logDirectory, false); failedFutures.addAll(newFailed); final AtomicLong importTime = new AtomicLong(-1); //Create import task final Callable<Object> task = new CallableWithoutResult() { @Override protected void callWithoutResult() { IMPORT_BASE_DIR.set(directoryUriStr); importTime.set(System.nanoTime()); try { importData(file, portalDataKey); } finally { importTime.set(System.nanoTime() - importTime.get()); IMPORT_BASE_DIR.remove(); } } }; //Submit the import task final Future<?> importFuture = this.importExportThreadPool.submit(task); //Add the future for tracking importFutures.offer(new ImportFuture(importFuture, file, portalDataKey, importTime)); } //Wait for all of the imports on of this type to complete final List<FutureHolder<?>> newFailed = waitForFutures(importFutures, reportWriter, logDirectory, true); failedFutures.addAll(newFailed); if (failOnError && !failedFutures.isEmpty()) { throw new RuntimeException( failedFutures.size() + " " + portalDataKey + " entities failed to import.\n\n" + "\tPer entity exception logs and a full report can be found in " + logDirectory + "\n"); } reportWriter.flush(); } if (!dataToImport.isEmpty()) { throw new IllegalStateException( "The following PortalDataKeys are not listed in the dataTypeImportOrder List: " + dataToImport.keySet()); } logger.info("For a detailed report on the data import see " + importReport); } catch (InterruptedException e) { throw new RuntimeException("Interrupted while waiting for entities to import", e); } finally { IOUtils.closeQuietly(reportWriter); IMPORT_BASE_DIR.remove(); } }
From source file:org.jasig.portal.io.xml.JaxbPortalDataHandlerService.java
@Override public void importData(File directory, String pattern, final BatchImportOptions options) { if (!directory.exists()) { throw new IllegalArgumentException("The specified directory '" + directory + "' does not exist"); }/*from w w w . j ava2 s . co m*/ //Create the file filter to use when searching for files to import final FileFilter fileFilter; if (pattern != null) { fileFilter = new AntPatternFileFilter(true, false, pattern, this.dataFileExcludes); } else { fileFilter = new AntPatternFileFilter(true, false, this.dataFileIncludes, this.dataFileExcludes); } //Determine the parent directory to log to final File logDirectory = determineLogDirectory(options, "import"); //Setup reporting file final File importReport = new File(logDirectory, "data-import.txt"); final PrintWriter reportWriter; try { reportWriter = new PrintWriter(new PeriodicFlushingBufferedWriter(500, new FileWriter(importReport))); } catch (IOException e) { throw new RuntimeException("Failed to create FileWriter for: " + importReport, e); } //Convert directory to URI String to provide better logging output final URI directoryUri = directory.toURI(); final String directoryUriStr = directoryUri.toString(); IMPORT_BASE_DIR.set(directoryUriStr); try { //Scan the specified directory for files to import logger.info("Scanning for files to Import from: {}", directory); final PortalDataKeyFileProcessor fileProcessor = new PortalDataKeyFileProcessor(this.dataKeyTypes, options); this.directoryScanner.scanDirectoryNoResults(directory, fileFilter, fileProcessor); final long resourceCount = fileProcessor.getResourceCount(); logger.info("Found {} files to Import from: {}", resourceCount, directory); //See if the import should fail on error final boolean failOnError = options != null ? options.isFailOnError() : true; //Map of files to import, grouped by type final ConcurrentMap<PortalDataKey, Queue<Resource>> dataToImport = fileProcessor.getDataToImport(); //Import the data files for (final PortalDataKey portalDataKey : this.dataKeyImportOrder) { final Queue<Resource> files = dataToImport.remove(portalDataKey); if (files == null) { continue; } final Queue<ImportFuture<?>> importFutures = new LinkedList<ImportFuture<?>>(); final List<FutureHolder<?>> failedFutures = new LinkedList<FutureHolder<?>>(); final int fileCount = files.size(); logger.info("Importing {} files of type {}", fileCount, portalDataKey); reportWriter.println(portalDataKey + "," + fileCount); while (!files.isEmpty()) { final Resource file = files.poll(); //Check for completed futures on every iteration, needed to fail as fast as possible on an import exception final List<FutureHolder<?>> newFailed = waitForFutures(importFutures, reportWriter, logDirectory, false); failedFutures.addAll(newFailed); final AtomicLong importTime = new AtomicLong(-1); //Create import task final Callable<Object> task = new CallableWithoutResult() { @Override protected void callWithoutResult() { IMPORT_BASE_DIR.set(directoryUriStr); importTime.set(System.nanoTime()); try { importData(file, portalDataKey); } finally { importTime.set(System.nanoTime() - importTime.get()); IMPORT_BASE_DIR.remove(); } } }; //Submit the import task final Future<?> importFuture = this.importExportThreadPool.submit(task); //Add the future for tracking importFutures.offer(new ImportFuture(importFuture, file, portalDataKey, importTime)); } //Wait for all of the imports on of this type to complete final List<FutureHolder<?>> newFailed = waitForFutures(importFutures, reportWriter, logDirectory, true); failedFutures.addAll(newFailed); if (failOnError && !failedFutures.isEmpty()) { throw new RuntimeException( failedFutures.size() + " " + portalDataKey + " entities failed to import.\n\n" + "\tPer entity exception logs and a full report can be found in " + logDirectory + "\n"); } reportWriter.flush(); } if (!dataToImport.isEmpty()) { throw new IllegalStateException( "The following PortalDataKeys are not listed in the dataTypeImportOrder List: " + dataToImport.keySet()); } logger.info("For a detailed report on the data import see " + importReport); } catch (InterruptedException e) { throw new RuntimeException("Interrupted while waiting for entities to import", e); } finally { IOUtils.closeQuietly(reportWriter); IMPORT_BASE_DIR.remove(); } }
From source file:org.apache.tinkerpop.gremlin.structure.TransactionTest.java
@Test @org.junit.Ignore("Ignoring this test for now. Perhaps it will have relelvance later. see - https://github.org/apache/tinkerpop/tinkerpop3/issues/31") @FeatureRequirement(featureClass = Graph.Features.VertexFeatures.class, feature = Graph.Features.VertexFeatures.FEATURE_ADD_VERTICES) @FeatureRequirement(featureClass = Graph.Features.GraphFeatures.class, feature = Graph.Features.GraphFeatures.FEATURE_TRANSACTIONS) public void shouldSupportTransactionIsolationWithSeparateThreads() throws Exception { // one thread modifies the graph and a separate thread reads before the transaction is committed. // the expectation is that the changes in the transaction are isolated to the thread that made the change // and the second thread should not see the change until commit() in the first thread. final CountDownLatch latchCommit = new CountDownLatch(1); final CountDownLatch latchFirstRead = new CountDownLatch(1); final CountDownLatch latchSecondRead = new CountDownLatch(1); final Thread threadMod = new Thread() { @Override/*from w w w. ja va2s .c om*/ public void run() { graph.addVertex(); latchFirstRead.countDown(); try { latchCommit.await(); } catch (InterruptedException ie) { throw new RuntimeException(ie); } graph.tx().commit(); latchSecondRead.countDown(); } }; threadMod.start(); final AtomicLong beforeCommitInOtherThread = new AtomicLong(0); final AtomicLong afterCommitInOtherThreadButBeforeRollbackInCurrentThread = new AtomicLong(0); final AtomicLong afterCommitInOtherThread = new AtomicLong(0); final Thread threadRead = new Thread() { @Override public void run() { try { latchFirstRead.await(); } catch (InterruptedException ie) { throw new RuntimeException(ie); } // reading vertex before tx from other thread is committed...should have zero vertices beforeCommitInOtherThread.set(IteratorUtils.count(graph.vertices())); latchCommit.countDown(); try { latchSecondRead.await(); } catch (InterruptedException ie) { throw new RuntimeException(ie); } // tx in other thread is committed...should have one vertex. rollback first to start a new tx // to get a fresh read given the commit afterCommitInOtherThreadButBeforeRollbackInCurrentThread.set(IteratorUtils.count(graph.vertices())); graph.tx().rollback(); afterCommitInOtherThread.set(IteratorUtils.count(graph.vertices())); } }; threadRead.start(); threadMod.join(); threadRead.join(); assertEquals(0l, beforeCommitInOtherThread.get()); assertEquals(0l, afterCommitInOtherThreadButBeforeRollbackInCurrentThread.get()); assertEquals(1l, afterCommitInOtherThread.get()); }