List of usage examples for java.util.concurrent.atomic AtomicLong get
public final long get()
From source file:org.apereo.portal.io.xml.JaxbPortalDataHandlerService.java
@Override public void exportAllDataOfType(Set<String> typeIds, File directory, BatchExportOptions options) { final Queue<ExportFuture<?>> exportFutures = new ConcurrentLinkedQueue<ExportFuture<?>>(); final boolean failOnError = options != null ? options.isFailOnError() : true; //Determine the parent directory to log to final File logDirectory = determineLogDirectory(options, "export"); //Setup reporting file final File exportReport = new File(logDirectory, "data-export.txt"); final PrintWriter reportWriter; try {//from ww w . ja va2 s. c o m reportWriter = new PrintWriter(new BufferedWriter(new FileWriter(exportReport))); } catch (IOException e) { throw new RuntimeException("Failed to create FileWriter for: " + exportReport, e); } try { for (final String typeId : typeIds) { final List<FutureHolder<?>> failedFutures = new LinkedList<FutureHolder<?>>(); final File typeDir = new File(directory, typeId); logger.info("Adding all data of type {} to export queue: {}", typeId, typeDir); reportWriter.println(typeId + "," + typeDir); final Iterable<? extends IPortalData> dataForType = this.getPortalData(typeId); for (final IPortalData data : dataForType) { final String dataId = data.getDataId(); //Check for completed futures on every iteration, needed to fail as fast as possible on an import exception final List<FutureHolder<?>> newFailed = waitForFutures(exportFutures, reportWriter, logDirectory, false); failedFutures.addAll(newFailed); final AtomicLong exportTime = new AtomicLong(-1); //Create export task Callable<Object> task = new CallableWithoutResult() { @Override protected void callWithoutResult() { exportTime.set(System.nanoTime()); try { exportData(typeId, dataId, typeDir); } finally { exportTime.set(System.nanoTime() - exportTime.get()); } } }; //Submit the export task final Future<?> exportFuture = this.importExportThreadPool.submit(task); //Add the future for tracking final ExportFuture futureHolder = new ExportFuture(exportFuture, typeId, dataId, exportTime); exportFutures.offer(futureHolder); } final List<FutureHolder<?>> newFailed = waitForFutures(exportFutures, reportWriter, logDirectory, true); failedFutures.addAll(newFailed); reportWriter.flush(); if (failOnError && !failedFutures.isEmpty()) { throw new RuntimeException(failedFutures.size() + " " + typeId + " entities failed to export.\n" + "\tPer entity exception logs and a full report can be found in " + logDirectory); } } } catch (InterruptedException e) { throw new RuntimeException("Interrupted while waiting for entities to export", e); } finally { IOUtils.closeQuietly(reportWriter); } }
From source file:org.apache.hadoop.hbase.master.TestDistributedLogSplitting.java
private void waitForCounter(AtomicLong ctr, long oldval, long newval, long timems) { long curt = System.currentTimeMillis(); long endt = curt + timems; while (curt < endt) { if (ctr.get() == oldval) { Thread.yield();/*from ww w . j a va 2s. c om*/ curt = System.currentTimeMillis(); } else { assertEquals(newval, ctr.get()); return; } } assertTrue(false); }
From source file:com.netflix.genie.web.tasks.node.DiskCleanupTaskUnitTests.java
/** * Make sure we can run successfully when runAsUser is false for the system. * * @throws IOException on error/*from www . j a v a2 s.c om*/ * @throws GenieException on error */ @Test public void canRunWithoutSudo() throws IOException, GenieException { final JobsProperties jobsProperties = new JobsProperties(); jobsProperties.getUsers().setRunAsUserEnabled(false); // Create some random junk file that should be ignored this.tmpJobDir.newFile(UUID.randomUUID().toString()); final DiskCleanupProperties properties = new DiskCleanupProperties(); final Calendar cal = TaskUtils.getMidnightUTC(); TaskUtils.subtractDaysFromDate(cal, properties.getRetention()); final Date threshold = cal.getTime(); final String job1Id = UUID.randomUUID().toString(); final String job2Id = UUID.randomUUID().toString(); final String job3Id = UUID.randomUUID().toString(); final String job4Id = UUID.randomUUID().toString(); final String job5Id = UUID.randomUUID().toString(); final Job job1 = Mockito.mock(Job.class); Mockito.when(job1.getStatus()).thenReturn(JobStatus.INIT); final Job job2 = Mockito.mock(Job.class); Mockito.when(job2.getStatus()).thenReturn(JobStatus.RUNNING); final Job job3 = Mockito.mock(Job.class); Mockito.when(job3.getStatus()).thenReturn(JobStatus.SUCCEEDED); Mockito.when(job3.getFinished()).thenReturn(Optional.of(new Date(threshold.getTime() - 1))); final Job job4 = Mockito.mock(Job.class); Mockito.when(job4.getStatus()).thenReturn(JobStatus.FAILED); Mockito.when(job4.getFinished()).thenReturn(Optional.of(threshold)); this.createJobDir(job1Id); this.createJobDir(job2Id); this.createJobDir(job3Id); this.createJobDir(job4Id); this.createJobDir(job5Id); final TaskScheduler scheduler = Mockito.mock(TaskScheduler.class); final Resource jobDir = Mockito.mock(Resource.class); Mockito.when(jobDir.exists()).thenReturn(true); Mockito.when(jobDir.getFile()).thenReturn(this.tmpJobDir.getRoot()); final JobSearchService jobSearchService = Mockito.mock(JobSearchService.class); final Registry registry = Mockito.mock(Registry.class); final AtomicLong numberOfDeletedJobDirs = new AtomicLong(); Mockito.when(registry.gauge(Mockito.eq("genie.tasks.diskCleanup.numberDeletedJobDirs.gauge"), Mockito.any(AtomicLong.class))).thenReturn(numberOfDeletedJobDirs); final AtomicLong numberOfDirsUnableToDelete = new AtomicLong(); Mockito.when(registry.gauge(Mockito.eq("genie.tasks.diskCleanup.numberDirsUnableToDelete.gauge"), Mockito.any(AtomicLong.class))).thenReturn(numberOfDirsUnableToDelete); final Counter unableToGetJobCounter = Mockito.mock(Counter.class); Mockito.when(registry.counter("genie.tasks.diskCleanup.unableToGetJobs.rate")) .thenReturn(unableToGetJobCounter); final Counter unabledToDeleteJobsDir = Mockito.mock(Counter.class); Mockito.when(registry.counter("genie.tasks.diskCleanup.unableToDeleteJobsDir.rate")) .thenReturn(unabledToDeleteJobsDir); Mockito.when(jobSearchService.getJob(job1Id)).thenReturn(job1); Mockito.when(jobSearchService.getJob(job2Id)).thenReturn(job2); Mockito.when(jobSearchService.getJob(job3Id)).thenReturn(job3); Mockito.when(jobSearchService.getJob(job4Id)).thenReturn(job4); Mockito.when(jobSearchService.getJob(job5Id)).thenThrow(new GenieServerException("blah")); final DiskCleanupTask task = new DiskCleanupTask(properties, scheduler, jobDir, jobSearchService, jobsProperties, Mockito.mock(Executor.class), registry); Assert.assertThat(numberOfDeletedJobDirs.get(), Matchers.is(0L)); Assert.assertThat(numberOfDirsUnableToDelete.get(), Matchers.is(0L)); task.run(); Assert.assertThat(numberOfDeletedJobDirs.get(), Matchers.is(1L)); Assert.assertThat(numberOfDirsUnableToDelete.get(), Matchers.is(1L)); Assert.assertTrue(new File(jobDir.getFile(), job1Id).exists()); Assert.assertTrue(new File(jobDir.getFile(), job2Id).exists()); Assert.assertFalse(new File(jobDir.getFile(), job3Id).exists()); Assert.assertTrue(new File(jobDir.getFile(), job4Id).exists()); Assert.assertTrue(new File(jobDir.getFile(), job5Id).exists()); }
From source file:io.druid.server.namespace.cache.NamespaceExtractionCacheManagerExecutorsTest.java
@Test(timeout = 50_000) public void testRepeatSubmission() throws ExecutionException, InterruptedException { final int repeatCount = 5; final long delay = 5; final AtomicLong ranCount = new AtomicLong(0l); final long totalRunCount; final long start; final CountDownLatch latch = new CountDownLatch(repeatCount); try {//from w ww . j av a2 s . c o m final URIExtractionNamespace namespace = new URIExtractionNamespace("ns", tmpFile.toURI(), new URIExtractionNamespace.ObjectMapperFlatDataParser( URIExtractionNamespaceTest.registerTypes(new ObjectMapper())), new Period(delay), null); start = System.currentTimeMillis(); final String cacheId = UUID.randomUUID().toString(); ListenableFuture<?> future = manager.schedule(namespace, factory, new Runnable() { @Override public void run() { try { manager.getPostRunnable(namespace, factory, cacheId).run(); ranCount.incrementAndGet(); } finally { latch.countDown(); } } }, cacheId); latch.await(); long minEnd = start + ((repeatCount - 1) * delay); long end = System.currentTimeMillis(); Assert.assertTrue(String.format("Didn't wait long enough between runs. Expected more than %d was %d", minEnd - start, end - start), minEnd < end); } finally { lifecycle.stop(); } totalRunCount = ranCount.get(); Thread.sleep(50); Assert.assertEquals(totalRunCount, ranCount.get(), 1); }
From source file:org.apereo.portal.io.xml.JaxbPortalDataHandlerService.java
@Override public void importDataDirectory(File directory, String pattern, final BatchImportOptions options) { if (!directory.exists()) { throw new IllegalArgumentException("The specified directory '" + directory + "' does not exist"); }//from w w w . j a v a 2 s. c om //Create the file filter to use when searching for files to import final FileFilter fileFilter; if (pattern != null) { fileFilter = new AntPatternFileFilter(true, false, pattern, this.dataFileExcludes); } else { fileFilter = new AntPatternFileFilter(true, false, this.dataFileIncludes, this.dataFileExcludes); } //Determine the parent directory to log to final File logDirectory = determineLogDirectory(options, "import"); //Setup reporting file final File importReport = new File(logDirectory, "data-import.txt"); final PrintWriter reportWriter; try { reportWriter = new PrintWriter(new PeriodicFlushingBufferedWriter(500, new FileWriter(importReport))); } catch (IOException e) { throw new RuntimeException("Failed to create FileWriter for: " + importReport, e); } //Convert directory to URI String to provide better logging output final URI directoryUri = directory.toURI(); final String directoryUriStr = directoryUri.toString(); IMPORT_BASE_DIR.set(directoryUriStr); try { //Scan the specified directory for files to import logger.info("Scanning for files to Import from: {}", directory); final PortalDataKeyFileProcessor fileProcessor = new PortalDataKeyFileProcessor(this.dataKeyTypes, options); this.directoryScanner.scanDirectoryNoResults(directory, fileFilter, fileProcessor); final long resourceCount = fileProcessor.getResourceCount(); logger.info("Found {} files to Import from: {}", resourceCount, directory); //See if the import should fail on error final boolean failOnError = options != null ? options.isFailOnError() : true; //Map of files to import, grouped by type final ConcurrentMap<PortalDataKey, Queue<Resource>> dataToImport = fileProcessor.getDataToImport(); //Import the data files for (final PortalDataKey portalDataKey : this.dataKeyImportOrder) { final Queue<Resource> files = dataToImport.remove(portalDataKey); if (files == null) { continue; } final Queue<ImportFuture<?>> importFutures = new LinkedList<ImportFuture<?>>(); final List<FutureHolder<?>> failedFutures = new LinkedList<FutureHolder<?>>(); final int fileCount = files.size(); logger.info("Importing {} files of type {}", fileCount, portalDataKey); reportWriter.println(portalDataKey + "," + fileCount); while (!files.isEmpty()) { final Resource file = files.poll(); //Check for completed futures on every iteration, needed to fail as fast as possible on an import exception final List<FutureHolder<?>> newFailed = waitForFutures(importFutures, reportWriter, logDirectory, false); failedFutures.addAll(newFailed); final AtomicLong importTime = new AtomicLong(-1); //Create import task final Callable<Object> task = new CallableWithoutResult() { @Override protected void callWithoutResult() { IMPORT_BASE_DIR.set(directoryUriStr); importTime.set(System.nanoTime()); try { importData(file, portalDataKey); } finally { importTime.set(System.nanoTime() - importTime.get()); IMPORT_BASE_DIR.remove(); } } }; //Submit the import task final Future<?> importFuture = this.importExportThreadPool.submit(task); //Add the future for tracking importFutures.offer(new ImportFuture(importFuture, file, portalDataKey, importTime)); } //Wait for all of the imports on of this type to complete final List<FutureHolder<?>> newFailed = waitForFutures(importFutures, reportWriter, logDirectory, true); failedFutures.addAll(newFailed); if (failOnError && !failedFutures.isEmpty()) { throw new RuntimeException( failedFutures.size() + " " + portalDataKey + " entities failed to import.\n\n" + "\tPer entity exception logs and a full report can be found in " + logDirectory + "\n"); } reportWriter.flush(); } if (!dataToImport.isEmpty()) { throw new IllegalStateException( "The following PortalDataKeys are not listed in the dataTypeImportOrder List: " + dataToImport.keySet()); } logger.info("For a detailed report on the data import see " + importReport); } catch (InterruptedException e) { throw new RuntimeException("Interrupted while waiting for entities to import", e); } finally { IOUtils.closeQuietly(reportWriter); IMPORT_BASE_DIR.remove(); } }
From source file:org.jasig.portal.io.xml.JaxbPortalDataHandlerService.java
@Override public void importData(File directory, String pattern, final BatchImportOptions options) { if (!directory.exists()) { throw new IllegalArgumentException("The specified directory '" + directory + "' does not exist"); }// w ww . ja v a 2 s .co m //Create the file filter to use when searching for files to import final FileFilter fileFilter; if (pattern != null) { fileFilter = new AntPatternFileFilter(true, false, pattern, this.dataFileExcludes); } else { fileFilter = new AntPatternFileFilter(true, false, this.dataFileIncludes, this.dataFileExcludes); } //Determine the parent directory to log to final File logDirectory = determineLogDirectory(options, "import"); //Setup reporting file final File importReport = new File(logDirectory, "data-import.txt"); final PrintWriter reportWriter; try { reportWriter = new PrintWriter(new PeriodicFlushingBufferedWriter(500, new FileWriter(importReport))); } catch (IOException e) { throw new RuntimeException("Failed to create FileWriter for: " + importReport, e); } //Convert directory to URI String to provide better logging output final URI directoryUri = directory.toURI(); final String directoryUriStr = directoryUri.toString(); IMPORT_BASE_DIR.set(directoryUriStr); try { //Scan the specified directory for files to import logger.info("Scanning for files to Import from: {}", directory); final PortalDataKeyFileProcessor fileProcessor = new PortalDataKeyFileProcessor(this.dataKeyTypes, options); this.directoryScanner.scanDirectoryNoResults(directory, fileFilter, fileProcessor); final long resourceCount = fileProcessor.getResourceCount(); logger.info("Found {} files to Import from: {}", resourceCount, directory); //See if the import should fail on error final boolean failOnError = options != null ? options.isFailOnError() : true; //Map of files to import, grouped by type final ConcurrentMap<PortalDataKey, Queue<Resource>> dataToImport = fileProcessor.getDataToImport(); //Import the data files for (final PortalDataKey portalDataKey : this.dataKeyImportOrder) { final Queue<Resource> files = dataToImport.remove(portalDataKey); if (files == null) { continue; } final Queue<ImportFuture<?>> importFutures = new LinkedList<ImportFuture<?>>(); final List<FutureHolder<?>> failedFutures = new LinkedList<FutureHolder<?>>(); final int fileCount = files.size(); logger.info("Importing {} files of type {}", fileCount, portalDataKey); reportWriter.println(portalDataKey + "," + fileCount); while (!files.isEmpty()) { final Resource file = files.poll(); //Check for completed futures on every iteration, needed to fail as fast as possible on an import exception final List<FutureHolder<?>> newFailed = waitForFutures(importFutures, reportWriter, logDirectory, false); failedFutures.addAll(newFailed); final AtomicLong importTime = new AtomicLong(-1); //Create import task final Callable<Object> task = new CallableWithoutResult() { @Override protected void callWithoutResult() { IMPORT_BASE_DIR.set(directoryUriStr); importTime.set(System.nanoTime()); try { importData(file, portalDataKey); } finally { importTime.set(System.nanoTime() - importTime.get()); IMPORT_BASE_DIR.remove(); } } }; //Submit the import task final Future<?> importFuture = this.importExportThreadPool.submit(task); //Add the future for tracking importFutures.offer(new ImportFuture(importFuture, file, portalDataKey, importTime)); } //Wait for all of the imports on of this type to complete final List<FutureHolder<?>> newFailed = waitForFutures(importFutures, reportWriter, logDirectory, true); failedFutures.addAll(newFailed); if (failOnError && !failedFutures.isEmpty()) { throw new RuntimeException( failedFutures.size() + " " + portalDataKey + " entities failed to import.\n\n" + "\tPer entity exception logs and a full report can be found in " + logDirectory + "\n"); } reportWriter.flush(); } if (!dataToImport.isEmpty()) { throw new IllegalStateException( "The following PortalDataKeys are not listed in the dataTypeImportOrder List: " + dataToImport.keySet()); } logger.info("For a detailed report on the data import see " + importReport); } catch (InterruptedException e) { throw new RuntimeException("Interrupted while waiting for entities to import", e); } finally { IOUtils.closeQuietly(reportWriter); IMPORT_BASE_DIR.remove(); } }
From source file:org.apache.hadoop.hbase.regionserver.wal.TestHLogSplit.java
/** * Simulates splitting a WAL out from under a regionserver that is still trying to write it. Ensures we do not * lose edits./* w w w . j a va 2 s.c o m*/ * @throws IOException * @throws InterruptedException */ @Test(timeout = 300000) public void testLogCannotBeWrittenOnceParsed() throws IOException, InterruptedException { final AtomicLong counter = new AtomicLong(0); AtomicBoolean stop = new AtomicBoolean(false); // Region we'll write edits too and then later examine to make sure they all made it in. final String region = REGIONS.get(0); Thread zombie = new ZombieLastLogWriterRegionServer(this.conf, counter, stop, region); try { long startCount = counter.get(); zombie.start(); // Wait till writer starts going. while (startCount == counter.get()) Threads.sleep(1); // Give it a second to write a few appends. Threads.sleep(1000); final Configuration conf2 = HBaseConfiguration.create(this.conf); final User robber = User.createUserForTesting(conf2, ROBBER, GROUP); int count = robber.runAs(new PrivilegedExceptionAction<Integer>() { @Override public Integer run() throws Exception { FileSystem fs = FileSystem.get(conf2); int expectedFiles = fs.listStatus(HLOGDIR).length; HLogSplitter.split(HBASEDIR, HLOGDIR, OLDLOGDIR, fs, conf2); Path[] logfiles = getLogForRegion(HBASEDIR, TABLE_NAME, region); assertEquals(expectedFiles, logfiles.length); int count = 0; for (Path logfile : logfiles) { count += countHLog(logfile, fs, conf2); } return count; } }); LOG.info("zombie=" + counter.get() + ", robber=" + count); assertTrue( "The log file could have at most 1 extra log entry, but can't have less. Zombie could write " + counter.get() + " and logfile had only " + count, counter.get() == count || counter.get() + 1 == count); } finally { stop.set(true); zombie.interrupt(); Threads.threadDumpingIsAlive(zombie); } }
From source file:org.apache.nifi.processors.standard.QueryDatabaseTable.java
@Override public void onTrigger(final ProcessContext context, final ProcessSessionFactory sessionFactory) throws ProcessException { ProcessSession session = sessionFactory.createSession(); final List<FlowFile> resultSetFlowFiles = new ArrayList<>(); final ComponentLog logger = getLogger(); final DBCPService dbcpService = context.getProperty(DBCP_SERVICE).asControllerService(DBCPService.class); final DatabaseAdapter dbAdapter = dbAdapters.get(context.getProperty(DB_TYPE).getValue()); final String tableName = context.getProperty(TABLE_NAME).evaluateAttributeExpressions().getValue(); final String columnNames = context.getProperty(COLUMN_NAMES).evaluateAttributeExpressions().getValue(); final String maxValueColumnNames = context.getProperty(MAX_VALUE_COLUMN_NAMES) .evaluateAttributeExpressions().getValue(); final Integer fetchSize = context.getProperty(FETCH_SIZE).evaluateAttributeExpressions().asInteger(); final Integer maxRowsPerFlowFile = context.getProperty(MAX_ROWS_PER_FLOW_FILE) .evaluateAttributeExpressions().asInteger(); final Integer maxFragments = context.getProperty(MAX_FRAGMENTS).isSet() ? context.getProperty(MAX_FRAGMENTS).evaluateAttributeExpressions().asInteger() : 0;/*from ww w . ja v a2 s . c om*/ final boolean convertNamesForAvro = context.getProperty(NORMALIZE_NAMES_FOR_AVRO).asBoolean(); final Map<String, String> maxValueProperties = getDefaultMaxValueProperties(context.getProperties()); final StateManager stateManager = context.getStateManager(); final StateMap stateMap; try { stateMap = stateManager.getState(Scope.CLUSTER); } catch (final IOException ioe) { getLogger().error("Failed to retrieve observed maximum values from the State Manager. Will not perform " + "query until this is accomplished.", ioe); context.yield(); return; } // Make a mutable copy of the current state property map. This will be updated by the result row callback, and eventually // set as the current state map (after the session has been committed) final Map<String, String> statePropertyMap = new HashMap<>(stateMap.toMap()); //If an initial max value for column(s) has been specified using properties, and this column is not in the state manager, sync them to the state property map for (final Map.Entry<String, String> maxProp : maxValueProperties.entrySet()) { String maxPropKey = maxProp.getKey().toLowerCase(); String fullyQualifiedMaxPropKey = getStateKey(tableName, maxPropKey); if (!statePropertyMap.containsKey(fullyQualifiedMaxPropKey)) { String newMaxPropValue; // If we can't find the value at the fully-qualified key name, it is possible (under a previous scheme) // the value has been stored under a key that is only the column name. Fall back to check the column name, // but store the new initial max value under the fully-qualified key. if (statePropertyMap.containsKey(maxPropKey)) { newMaxPropValue = statePropertyMap.get(maxPropKey); } else { newMaxPropValue = maxProp.getValue(); } statePropertyMap.put(fullyQualifiedMaxPropKey, newMaxPropValue); } } List<String> maxValueColumnNameList = StringUtils.isEmpty(maxValueColumnNames) ? null : Arrays.asList(maxValueColumnNames.split("\\s*,\\s*")); final String selectQuery = getQuery(dbAdapter, tableName, columnNames, maxValueColumnNameList, statePropertyMap); final StopWatch stopWatch = new StopWatch(true); final String fragmentIdentifier = UUID.randomUUID().toString(); try (final Connection con = dbcpService.getConnection(); final Statement st = con.createStatement()) { if (fetchSize != null && fetchSize > 0) { try { st.setFetchSize(fetchSize); } catch (SQLException se) { // Not all drivers support this, just log the error (at debug level) and move on logger.debug("Cannot set fetch size to {} due to {}", new Object[] { fetchSize, se.getLocalizedMessage() }, se); } } String jdbcURL = "DBCPService"; try { DatabaseMetaData databaseMetaData = con.getMetaData(); if (databaseMetaData != null) { jdbcURL = databaseMetaData.getURL(); } } catch (SQLException se) { // Ignore and use default JDBC URL. This shouldn't happen unless the driver doesn't implement getMetaData() properly } final Integer queryTimeout = context.getProperty(QUERY_TIMEOUT).evaluateAttributeExpressions() .asTimePeriod(TimeUnit.SECONDS).intValue(); st.setQueryTimeout(queryTimeout); // timeout in seconds try { logger.debug("Executing query {}", new Object[] { selectQuery }); final ResultSet resultSet = st.executeQuery(selectQuery); int fragmentIndex = 0; while (true) { final AtomicLong nrOfRows = new AtomicLong(0L); FlowFile fileToProcess = session.create(); try { fileToProcess = session.write(fileToProcess, out -> { // Max values will be updated in the state property map by the callback final MaxValueResultSetRowCollector maxValCollector = new MaxValueResultSetRowCollector( tableName, statePropertyMap, dbAdapter); try { nrOfRows.set(JdbcCommon.convertToAvroStream(resultSet, out, tableName, maxValCollector, maxRowsPerFlowFile, convertNamesForAvro)); } catch (SQLException | RuntimeException e) { throw new ProcessException( "Error during database query or conversion of records to Avro.", e); } }); } catch (ProcessException e) { // Add flowfile to results before rethrowing so it will be removed from session in outer catch resultSetFlowFiles.add(fileToProcess); throw e; } if (nrOfRows.get() > 0) { // set attribute how many rows were selected fileToProcess = session.putAttribute(fileToProcess, RESULT_ROW_COUNT, String.valueOf(nrOfRows.get())); fileToProcess = session.putAttribute(fileToProcess, RESULT_TABLENAME, tableName); if (maxRowsPerFlowFile > 0) { fileToProcess = session.putAttribute(fileToProcess, "fragment.identifier", fragmentIdentifier); fileToProcess = session.putAttribute(fileToProcess, "fragment.index", String.valueOf(fragmentIndex)); } logger.info("{} contains {} Avro records; transferring to 'success'", new Object[] { fileToProcess, nrOfRows.get() }); session.getProvenanceReporter().receive(fileToProcess, jdbcURL, stopWatch.getElapsed(TimeUnit.MILLISECONDS)); resultSetFlowFiles.add(fileToProcess); } else { // If there were no rows returned, don't send the flowfile session.remove(fileToProcess); context.yield(); break; } fragmentIndex++; if (maxFragments > 0 && fragmentIndex >= maxFragments) { break; } } for (int i = 0; i < resultSetFlowFiles.size(); i++) { // Add maximum values as attributes for (Map.Entry<String, String> entry : statePropertyMap.entrySet()) { // Get just the column name from the key String key = entry.getKey(); String colName = key .substring(key.lastIndexOf(NAMESPACE_DELIMITER) + NAMESPACE_DELIMITER.length()); resultSetFlowFiles.set(i, session.putAttribute(resultSetFlowFiles.get(i), "maxvalue." + colName, entry.getValue())); } //set count on all FlowFiles if (maxRowsPerFlowFile > 0) { resultSetFlowFiles.set(i, session.putAttribute(resultSetFlowFiles.get(i), "fragment.count", Integer.toString(fragmentIndex))); } } } catch (final SQLException e) { throw e; } session.transfer(resultSetFlowFiles, REL_SUCCESS); } catch (final ProcessException | SQLException e) { logger.error("Unable to execute SQL select query {} due to {}", new Object[] { selectQuery, e }); if (!resultSetFlowFiles.isEmpty()) { session.remove(resultSetFlowFiles); } context.yield(); } finally { session.commit(); try { // Update the state stateManager.setState(statePropertyMap, Scope.CLUSTER); } catch (IOException ioe) { getLogger().error("{} failed to update State Manager, maximum observed values will not be recorded", new Object[] { this, ioe }); } } }
From source file:org.apache.hadoop.hbase.regionserver.wal.TestWALReplay.java
/** * Tests for hbase-2727./*from w w w . j a va 2 s. com*/ * @throws Exception * @see https://issues.apache.org/jira/browse/HBASE-2727 */ @Test public void test2727() throws Exception { // Test being able to have > 1 set of edits in the recovered.edits directory. // Ensure edits are replayed properly. final TableName tableName = TableName.valueOf("test2727"); HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName); Path basedir = FSUtils.getTableDir(hbaseRootDir, tableName); deleteDir(basedir); HTableDescriptor htd = createBasic3FamilyHTD(tableName); HRegion region2 = HRegion.createHRegion(hri, hbaseRootDir, this.conf, htd); HRegion.closeHRegion(region2); final byte[] rowName = tableName.getName(); HLog wal1 = createWAL(this.conf); // Add 1k to each family. final int countPerFamily = 1000; final AtomicLong sequenceId = new AtomicLong(1); for (HColumnDescriptor hcd : htd.getFamilies()) { addWALEdits(tableName, hri, rowName, hcd.getName(), countPerFamily, ee, wal1, htd, sequenceId); } wal1.close(); runWALSplit(this.conf); HLog wal2 = createWAL(this.conf); // Add 1k to each family. for (HColumnDescriptor hcd : htd.getFamilies()) { addWALEdits(tableName, hri, rowName, hcd.getName(), countPerFamily, ee, wal2, htd, sequenceId); } wal2.close(); runWALSplit(this.conf); HLog wal3 = createWAL(this.conf); try { HRegion region = HRegion.openHRegion(this.conf, this.fs, hbaseRootDir, hri, htd, wal3); long seqid = region.getOpenSeqNum(); // The regions opens with sequenceId as 1. With 6k edits, its sequence number reaches 6k + 1. // When opened, this region would apply 6k edits, and increment the sequenceId by 1 assertTrue(seqid > sequenceId.get()); assertEquals(seqid - 1, sequenceId.get()); LOG.debug("region.getOpenSeqNum(): " + region.getOpenSeqNum() + ", wal3.id: " + sequenceId.get()); // TODO: Scan all. region.close(); } finally { wal3.closeAndDelete(); } }
From source file:org.apache.bookkeeper.metadata.etcd.Etcd64bitIdGeneratorTest.java
/** * Test generating id in parallel and ensure there is no duplicated id. *///from w w w.j av a 2 s. co m @Test public void testGenerateIdParallel() throws Exception { final int numThreads = 10; @Cleanup("shutdown") ExecutorService executor = Executors.newFixedThreadPool(numThreads); final int numIds = 10000; final AtomicLong totalIds = new AtomicLong(numIds); final Set<Long> ids = Collections.newSetFromMap(new ConcurrentHashMap<>()); final RateLimiter limiter = RateLimiter.create(1000); final CompletableFuture<Void> doneFuture = new CompletableFuture<>(); for (int i = 0; i < numThreads; i++) { executor.submit(() -> { Client client = Client.builder().endpoints(etcdContainer.getClientEndpoint()).build(); Etcd64bitIdGenerator gen = new Etcd64bitIdGenerator(client.getKVClient(), scope); AtomicBoolean running = new AtomicBoolean(true); while (running.get()) { limiter.acquire(); GenericCallbackFuture<Long> genFuture = new GenericCallbackFuture<>(); gen.generateLedgerId(genFuture); genFuture.thenAccept(lid -> { boolean duplicatedFound = !(ids.add(lid)); if (duplicatedFound) { running.set(false); doneFuture.completeExceptionally( new IllegalStateException("Duplicated id " + lid + " generated : " + ids)); return; } else { if (totalIds.decrementAndGet() <= 0) { running.set(false); doneFuture.complete(null); } } }).exceptionally(cause -> { running.set(false); doneFuture.completeExceptionally(cause); return null; }); } }); } FutureUtils.result(doneFuture); assertTrue(totalIds.get() <= 0); assertTrue(ids.size() >= numIds); }