List of usage examples for java.util.concurrent.atomic AtomicLong AtomicLong
public AtomicLong(long initialValue)
From source file:org.apache.nifi.processors.standard.AbstractQueryDatabaseTable.java
@Override public void onTrigger(final ProcessContext context, final ProcessSessionFactory sessionFactory) throws ProcessException { // Fetch the column/table info once if (!setupComplete.get()) { super.setup(context); }//from w w w .j av a 2 s. c om ProcessSession session = sessionFactory.createSession(); final List<FlowFile> resultSetFlowFiles = new ArrayList<>(); final ComponentLog logger = getLogger(); final DBCPService dbcpService = context.getProperty(DBCP_SERVICE).asControllerService(DBCPService.class); final DatabaseAdapter dbAdapter = dbAdapters.get(context.getProperty(DB_TYPE).getValue()); final String tableName = context.getProperty(TABLE_NAME).evaluateAttributeExpressions().getValue(); final String columnNames = context.getProperty(COLUMN_NAMES).evaluateAttributeExpressions().getValue(); final String sqlQuery = context.getProperty(SQL_QUERY).evaluateAttributeExpressions().getValue(); final String maxValueColumnNames = context.getProperty(MAX_VALUE_COLUMN_NAMES) .evaluateAttributeExpressions().getValue(); final String customWhereClause = context.getProperty(WHERE_CLAUSE).evaluateAttributeExpressions() .getValue(); final Integer fetchSize = context.getProperty(FETCH_SIZE).evaluateAttributeExpressions().asInteger(); final Integer maxRowsPerFlowFile = context.getProperty(MAX_ROWS_PER_FLOW_FILE) .evaluateAttributeExpressions().asInteger(); final Integer outputBatchSizeField = context.getProperty(OUTPUT_BATCH_SIZE).evaluateAttributeExpressions() .asInteger(); final int outputBatchSize = outputBatchSizeField == null ? 0 : outputBatchSizeField; final Integer maxFragments = context.getProperty(MAX_FRAGMENTS).isSet() ? context.getProperty(MAX_FRAGMENTS).evaluateAttributeExpressions().asInteger() : 0; SqlWriter sqlWriter = configureSqlWriter(session, context); final StateManager stateManager = context.getStateManager(); final StateMap stateMap; try { stateMap = stateManager.getState(Scope.CLUSTER); } catch (final IOException ioe) { getLogger().error("Failed to retrieve observed maximum values from the State Manager. Will not perform " + "query until this is accomplished.", ioe); context.yield(); return; } // Make a mutable copy of the current state property map. This will be updated by the result row callback, and eventually // set as the current state map (after the session has been committed) final Map<String, String> statePropertyMap = new HashMap<>(stateMap.toMap()); //If an initial max value for column(s) has been specified using properties, and this column is not in the state manager, sync them to the state property map for (final Map.Entry<String, String> maxProp : maxValueProperties.entrySet()) { String maxPropKey = maxProp.getKey().toLowerCase(); String fullyQualifiedMaxPropKey = getStateKey(tableName, maxPropKey, dbAdapter); if (!statePropertyMap.containsKey(fullyQualifiedMaxPropKey)) { String newMaxPropValue; // If we can't find the value at the fully-qualified key name, it is possible (under a previous scheme) // the value has been stored under a key that is only the column name. Fall back to check the column name, // but store the new initial max value under the fully-qualified key. if (statePropertyMap.containsKey(maxPropKey)) { newMaxPropValue = statePropertyMap.get(maxPropKey); } else { newMaxPropValue = maxProp.getValue(); } statePropertyMap.put(fullyQualifiedMaxPropKey, newMaxPropValue); } } List<String> maxValueColumnNameList = StringUtils.isEmpty(maxValueColumnNames) ? null : Arrays.asList(maxValueColumnNames.split("\\s*,\\s*")); final String selectQuery = getQuery(dbAdapter, tableName, sqlQuery, columnNames, maxValueColumnNameList, customWhereClause, statePropertyMap); final StopWatch stopWatch = new StopWatch(true); final String fragmentIdentifier = UUID.randomUUID().toString(); try (final Connection con = dbcpService.getConnection(Collections.emptyMap()); final Statement st = con.createStatement()) { if (fetchSize != null && fetchSize > 0) { try { st.setFetchSize(fetchSize); } catch (SQLException se) { // Not all drivers support this, just log the error (at debug level) and move on logger.debug("Cannot set fetch size to {} due to {}", new Object[] { fetchSize, se.getLocalizedMessage() }, se); } } String jdbcURL = "DBCPService"; try { DatabaseMetaData databaseMetaData = con.getMetaData(); if (databaseMetaData != null) { jdbcURL = databaseMetaData.getURL(); } } catch (SQLException se) { // Ignore and use default JDBC URL. This shouldn't happen unless the driver doesn't implement getMetaData() properly } final Integer queryTimeout = context.getProperty(QUERY_TIMEOUT).evaluateAttributeExpressions() .asTimePeriod(TimeUnit.SECONDS).intValue(); st.setQueryTimeout(queryTimeout); // timeout in seconds if (logger.isDebugEnabled()) { logger.debug("Executing query {}", new Object[] { selectQuery }); } try (final ResultSet resultSet = st.executeQuery(selectQuery)) { int fragmentIndex = 0; // Max values will be updated in the state property map by the callback final MaxValueResultSetRowCollector maxValCollector = new MaxValueResultSetRowCollector(tableName, statePropertyMap, dbAdapter); while (true) { final AtomicLong nrOfRows = new AtomicLong(0L); FlowFile fileToProcess = session.create(); try { fileToProcess = session.write(fileToProcess, out -> { try { nrOfRows.set( sqlWriter.writeResultSet(resultSet, out, getLogger(), maxValCollector)); } catch (Exception e) { throw new ProcessException("Error during database query or conversion of records.", e); } }); } catch (ProcessException e) { // Add flowfile to results before rethrowing so it will be removed from session in outer catch resultSetFlowFiles.add(fileToProcess); throw e; } if (nrOfRows.get() > 0) { // set attributes final Map<String, String> attributesToAdd = new HashMap<>(); attributesToAdd.put(RESULT_ROW_COUNT, String.valueOf(nrOfRows.get())); attributesToAdd.put(RESULT_TABLENAME, tableName); if (maxRowsPerFlowFile > 0) { attributesToAdd.put(FRAGMENT_ID, fragmentIdentifier); attributesToAdd.put(FRAGMENT_INDEX, String.valueOf(fragmentIndex)); } attributesToAdd.putAll(sqlWriter.getAttributesToAdd()); fileToProcess = session.putAllAttributes(fileToProcess, attributesToAdd); sqlWriter.updateCounters(session); logger.info("{} contains {} records; transferring to 'success'", new Object[] { fileToProcess, nrOfRows.get() }); session.getProvenanceReporter().receive(fileToProcess, jdbcURL, stopWatch.getElapsed(TimeUnit.MILLISECONDS)); resultSetFlowFiles.add(fileToProcess); // If we've reached the batch size, send out the flow files if (outputBatchSize > 0 && resultSetFlowFiles.size() >= outputBatchSize) { session.transfer(resultSetFlowFiles, REL_SUCCESS); session.commit(); resultSetFlowFiles.clear(); } } else { // If there were no rows returned, don't send the flowfile session.remove(fileToProcess); // If no rows and this was first FlowFile, yield if (fragmentIndex == 0) { context.yield(); } break; } fragmentIndex++; if (maxFragments > 0 && fragmentIndex >= maxFragments) { break; } // If we aren't splitting up the data into flow files or fragments, then the result set has been entirely fetched so don't loop back around if (maxFragments == 0 && maxRowsPerFlowFile == 0) { break; } // If we are splitting up the data into flow files, don't loop back around if we've gotten all results if (maxRowsPerFlowFile > 0 && nrOfRows.get() < maxRowsPerFlowFile) { break; } } // Apply state changes from the Max Value tracker maxValCollector.applyStateChanges(); // Even though the maximum value and total count are known at this point, to maintain consistent behavior if Output Batch Size is set, do not store the attributes if (outputBatchSize == 0) { for (int i = 0; i < resultSetFlowFiles.size(); i++) { // Add maximum values as attributes for (Map.Entry<String, String> entry : statePropertyMap.entrySet()) { // Get just the column name from the key String key = entry.getKey(); String colName = key .substring(key.lastIndexOf(NAMESPACE_DELIMITER) + NAMESPACE_DELIMITER.length()); resultSetFlowFiles.set(i, session.putAttribute(resultSetFlowFiles.get(i), "maxvalue." + colName, entry.getValue())); } //set count on all FlowFiles if (maxRowsPerFlowFile > 0) { resultSetFlowFiles.set(i, session.putAttribute(resultSetFlowFiles.get(i), FRAGMENT_COUNT, Integer.toString(fragmentIndex))); } } } } catch (final SQLException e) { throw e; } session.transfer(resultSetFlowFiles, REL_SUCCESS); } catch (final ProcessException | SQLException e) { logger.error("Unable to execute SQL select query {} due to {}", new Object[] { selectQuery, e }); if (!resultSetFlowFiles.isEmpty()) { session.remove(resultSetFlowFiles); } context.yield(); } finally { session.commit(); try { // Update the state stateManager.setState(statePropertyMap, Scope.CLUSTER); } catch (IOException ioe) { getLogger().error("{} failed to update State Manager, maximum observed values will not be recorded", new Object[] { this, ioe }); } } }
From source file:com.bosscs.spark.commons.utils.Utils.java
/** * Cast number type./*from w w w.ja va2s. c om*/ * * @param object the object * @param clazz the clazz * @return object */ public static Object castNumberType(Object object, Class clazz) { if (Number.class.isAssignableFrom(clazz)) { // AtomicInteger, AtomicLong, BigDecimal, BigInteger, Byte, Double, Float, Integer, Long, Short if (Double.class.isAssignableFrom(clazz)) { return ((Number) object).doubleValue(); } else if (Long.class.isAssignableFrom(clazz)) { return ((Number) object).longValue(); } else if (Float.class.isAssignableFrom(clazz)) { return ((Number) object).floatValue(); } else if (Integer.class.isAssignableFrom(clazz)) { return ((Number) object).intValue(); } else if (Short.class.isAssignableFrom(clazz)) { return ((Number) object).shortValue(); } else if (Byte.class.isAssignableFrom(clazz)) { return ((Number) object).byteValue(); } else if (BigInteger.class.isAssignableFrom(clazz)) { return BigInteger.valueOf(((Number) object).longValue()); } else if (BigDecimal.class.isAssignableFrom(clazz)) { return BigDecimal.valueOf(((Number) object).longValue()); } else if (AtomicLong.class.isAssignableFrom(clazz)) { return new AtomicLong(((Number) object).longValue()); } else if (AtomicInteger.class.isAssignableFrom(clazz)) { return new AtomicInteger(((Number) object).intValue()); } } throw new ClassCastException("it is not a Number Type" + object.getClass() + "|" + clazz); }
From source file:io.druid.server.namespace.cache.NamespaceExtractionCacheManagerExecutorsTest.java
@Test(timeout = 50_000) public void testRunCount() throws InterruptedException, ExecutionException { final Lifecycle lifecycle = new Lifecycle(); final NamespaceExtractionCacheManager onHeap; final AtomicLong runCount = new AtomicLong(0); final CountDownLatch latch = new CountDownLatch(1); try {// w ww . j a v a 2 s . c o m onHeap = new OnHeapNamespaceExtractionCacheManager(lifecycle, new ConcurrentHashMap<String, Function<String, String>>(), new NoopServiceEmitter(), ImmutableMap.<Class<? extends ExtractionNamespace>, ExtractionNamespaceFunctionFactory<?>>of( URIExtractionNamespace.class, new URIExtractionNamespaceFunctionFactory( ImmutableMap.<String, SearchableVersionedDataFinder>of("file", new LocalFileTimestampVersionFinder())))); final URIExtractionNamespace namespace = new URIExtractionNamespace("ns", tmpFile.toURI(), new URIExtractionNamespace.ObjectMapperFlatDataParser( URIExtractionNamespaceTest.registerTypes(new ObjectMapper())), new Period(1l), null); final String cacheId = UUID.randomUUID().toString(); ListenableFuture<?> future = onHeap.schedule(namespace, factory, new Runnable() { @Override public void run() { manager.getPostRunnable(namespace, factory, cacheId).run(); latch.countDown(); runCount.incrementAndGet(); } }, cacheId); latch.await(); Thread.sleep(20); } finally { lifecycle.stop(); } onHeap.waitForServiceToEnd(1_000, TimeUnit.MILLISECONDS); Assert.assertTrue(runCount.get() > 5); }
From source file:fr.gouv.vitam.mdbes.QueryBench.java
/** * Prepare the parse/*from www . ja va 2s .com*/ * @param request * @throws InvalidParseOperationException */ public void prepareParse(String request) throws InvalidParseOperationException { final JsonNode rootNode = JsonHandler.getFromString(request); if (rootNode.isMissingNode()) { throw new InvalidParseOperationException("The current Node is missing(empty): RequestRoot"); } // take model JsonNode node = ((ArrayNode) rootNode).remove(0); model = node.get(MODEL).asText(); // level are described as array entries, each being single element (no name) int level = 0; for (final JsonNode jlevel : rootNode) { context.cpts.put(CPTLEVEL + level, new AtomicLong(0)); // now parse sub element as single command/value analyzeVary(jlevel, level); level++; } }
From source file:org.apache.hadoop.hbase.client.transactional.TransactionalAggregationClient.java
/** * It gives the row count, by summing up the individual results obtained from * regions. In case the qualifier is null, FirstKeyValueFilter is used to * optimised the operation. In case qualifier is provided, I can't use the * filter as it may set the flag to skip to next row, but the value read is * not of the given filter: in this case, this particular row will not be * counted ==> an error.// ww w . j av a2s.c o m * @param table * @param ci * @param scan * @return <R, S> * @throws Throwable */ public <R, S, P extends Message, Q extends Message, T extends Message> long rowCount(final long transactionId, final TransactionalTable table, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan) throws Throwable { byte[] currentBeginKey = scan.getStartRow(); HRegionInfo currentRegion = table.getRegionLocation(currentBeginKey).getRegionInfo(); com.google.protobuf.ByteString regionName = ByteString.copyFromUtf8(currentRegion.getRegionNameAsString()); final TransactionalAggregateRequest requestArg = validateArgAndGetPB(regionName, transactionId, scan, ci, true); class RowNumCallback implements Batch.Callback<Long> { private final AtomicLong rowCountL = new AtomicLong(0); public long getRowNumCount() { return rowCountL.get(); } @Override public void update(byte[] region, byte[] row, Long result) { rowCountL.addAndGet(result.longValue()); } } RowNumCallback rowNum = new RowNumCallback(); table.coprocessorService(TrxRegionService.class, scan.getStartRow(), scan.getStopRow(), new Batch.Call<TrxRegionService, Long>() { @Override public Long call(TrxRegionService instance) throws IOException { ServerRpcController controller = new ServerRpcController(); BlockingRpcCallback<TransactionalAggregateResponse> rpcCallback = new BlockingRpcCallback<TransactionalAggregateResponse>(); instance.getRowNum(controller, requestArg, rpcCallback); TransactionalAggregateResponse response = rpcCallback.get(); if (controller.failedOnException()) { throw controller.getFailedOn(); } byte[] bytes = getBytesFromResponse(response.getFirstPart(0)); ByteBuffer bb = ByteBuffer.allocate(8).put(bytes); bb.rewind(); return bb.getLong(); } }, rowNum); return rowNum.getRowNumCount(); }
From source file:de.tudarmstadt.lt.seg.app.Segmenter.java
private void run_parallel() throws Exception { InputStream in = System.in; if (!"-".equals(_filename_in)) in = new FileInputStream(_filename_in); Stream<String> liter = new BufferedReader(new InputStreamReader(in, Charset.defaultCharset())).lines(); ThreadLocal<ISentenceSplitter> sentenceSplitter = ThreadLocal.withInitial(() -> { try {/*from w w w . j a v a 2 s . c om*/ return newSentenceSplitter(); } catch (ClassNotFoundException | InstantiationException | IllegalAccessException e) { throw new RuntimeException(e); } }); ThreadLocal<ITokenizer> tokenizer = ThreadLocal.withInitial(() -> { try { return newTokenizer(); } catch (ClassNotFoundException | InstantiationException | IllegalAccessException e) { throw new RuntimeException(e); } }); final PrintWriter[] w = new PrintWriter[_parallelism]; // init writers for (int i = 0; i < _parallelism; i++) { OutputStream out = System.out; if (!"-".equals(_filename_out)) { out = new FileOutputStream(String.format("%s_%d", _filename_out, i)); } w[i] = new PrintWriter(new OutputStreamWriter(out, Charset.defaultCharset())); } BlockingQueue<Runnable> queue = new ArrayBlockingQueue<Runnable>(_parallelism * 2, true); ExecutorService es = new ThreadPoolExecutor(_parallelism, _parallelism, 0L, TimeUnit.MILLISECONDS, queue); AtomicLong lc = new AtomicLong(0); liter.forEach((line) -> { // don't try to submit new threads, wait until the thread queue has some capacity again while (queue.remainingCapacity() == 0) try { Thread.sleep(10); } catch (InterruptedException e) { /**/} es.submit(() -> { final long docid = lc.incrementAndGet(); if (docid % 1000 == 0) System.err.format("Processing line %d ('%s')%n", docid, _filename_in); final int w_i = (int) (docid % _parallelism); split_and_tokenize(new StringReader(line.trim()), String.format("%s:%d", _filename_in, docid), sentenceSplitter.get(), tokenizer.get(), _level_filter, _level_normalize, _merge_types, _merge_tokens, _separator_sentence, _separator_token, _separator_desc, w[w_i]); }); }); es.shutdown(); es.awaitTermination(Integer.MAX_VALUE, TimeUnit.DAYS); // TODO: the stream parallelism version does not work because it submits too many threads at once // AtomicLong lc = new AtomicLong(0); // ForkJoinPool forkJoinPool = new ForkJoinPool(_parallelism); // forkJoinPool.submit(() -> // liter.parallel().forEach((line) -> { // final long docid = lc.incrementAndGet(); // if(docid % 1000 == 0) // System.err.format("Processing line %d ('%s')%n", docid, _filename_in); // // String l = line.replace("\\t", "\t").replace("\\n", "\n"); // split_and_tokenize( // new StringReader(l), // String.format("%s:%d", _filename_in, docid), // sentenceSplitter.get(), // tokenizer.get(), // _level_filter, // _level_normalize, // _merge_types, // _merge_tokens, // _separator_sentence, // _separator_token, // _separator_desc, // w); // })).get(); }
From source file:edu.tum.cs.vis.model.util.algorithm.ACCUM.java
/** * Diffuse the curvatures across the mesh * //from w w w .j av a2s .co m * @param m * the main model * @param curvatures * map assigning a curvature to each vertex of the model * @param sigma * smoothing sigma */ private static void diffuse_curv(Model m, HashMap<Vertex, Curvature> curvatures, float sigma) { int nv = m.getVertices().size(); float invsigma2 = (float) (1.0f / Math.pow(sigma, 2)); Vertex[] cflt = new Vertex[nv]; // TODO #pragma omp parallel { // Thread-local flags Map<Vertex, Long> flags = new HashMap<Vertex, Long>(nv); AtomicLong flag_curr = new AtomicLong(0); // TODO #pragma omp for ACCUM accumCurv = new AccumCurv(); for (int i = 0; i < nv; i++) { cflt[i] = new Vertex(0, 0, 0); diffuse_vert_field(m, curvatures, flags, flag_curr, accumCurv, i, invsigma2, cflt[i]); } // TODO #pragma omp for for (int i = 0; i < nv; i++) { Vertex v = m.getVertices().get(i); Curvature c = curvatures.get(v); Vector3f pdir[] = new Vector3f[] { c.getPrincipleDirectionMax(), c.getPrincipleDirectionMin() }; float k[] = new float[] { c.getCurvatureMax(), c.getCurvatureMin() }; diagonalize_curv(c.getPrincipleDirectionMax(), c.getPrincipleDirectionMin(), cflt[i].x, cflt[i].y, cflt[i].z, v.getNormalVector(), pdir, k); c.setPrincipleDirectionMax(pdir[0]); c.setPrincipleDirectionMin(pdir[1]); c.setCurvatureMax(k[0]); c.setCurvatureMin(k[1]); } } // #pragma omp parallel }
From source file:com.github.rinde.datgen.pdptw.DatasetGenerator.java
Dataset<GeneratedScenario> doGenerate() { final ListeningExecutorService service = MoreExecutors .listeningDecorator(Executors.newFixedThreadPool(builder.numThreads)); final Dataset<GeneratedScenario> dataset = Dataset.naturalOrder(); final List<ScenarioCreator> jobs = new ArrayList<>(); final RandomGenerator rng = new MersenneTwister(builder.randomSeed); final Map<GeneratorSettings, IdSeedGenerator> rngMap = new LinkedHashMap<>(); for (final Long urgency : builder.urgencyLevels) { for (final Double scale : builder.scaleLevels) { for (final Entry<TimeSeriesType, Collection<Range<Double>>> dynLevel : builder.dynamismLevels .asMap().entrySet()) { final int reps = builder.numInstances * dynLevel.getValue().size(); final long urg = urgency * 60 * 1000L; // The office hours is the period in which new orders are accepted, // it is defined as [0,officeHoursLength). final long officeHoursLength; if (urg < halfDiagTT) { officeHoursLength = builder.scenarioLengthMs - twoDiagTT - PICKUP_DURATION - DELIVERY_DURATION; } else { officeHoursLength = builder.scenarioLengthMs - urg - oneAndHalfDiagTT - PICKUP_DURATION - DELIVERY_DURATION; }/* w w w.j a v a2 s . c om*/ final int numOrders = DoubleMath.roundToInt(scale * numOrdersPerScale, RoundingMode.UNNECESSARY); final ImmutableMap.Builder<String, String> props = ImmutableMap.builder(); props.put("expected_num_orders", Integer.toString(numOrders)); props.put("pickup_duration", Long.toString(PICKUP_DURATION)); props.put("delivery_duration", Long.toString(DELIVERY_DURATION)); props.put("width_height", String.format("%1.1fx%1.1f", AREA_WIDTH, AREA_WIDTH)); // TODO store this in TimeSeriesType? final RangeSet<Double> rset = TreeRangeSet.create(); for (final Range<Double> r : dynLevel.getValue()) { rset.add(r); } // createTimeSeriesGenerator(dynLevel.getKey(), officeHoursLength, // numOrders, numOrdersPerScale, props); final GeneratorSettings set = GeneratorSettings.builder().setDayLength(builder.scenarioLengthMs) .setOfficeHours(officeHoursLength).setTimeSeriesType(dynLevel.getKey()) .setDynamismRangeCenters(builder.dynamismRangeMap.subRangeMap(rset.span())) .setUrgency(urg).setScale(scale).setNumOrders(numOrders).setProperties(props.build()) .build(); final IdSeedGenerator isg = new IdSeedGenerator(rng.nextLong()); rngMap.put(set, isg); for (int i = 0; i < reps; i++) { final LocationGenerator lg = Locations.builder().min(0d).max(AREA_WIDTH).buildUniform(); final TimeSeriesGenerator tsg2 = createTimeSeriesGenerator(dynLevel.getKey(), officeHoursLength, numOrders, numOrdersPerScale, ImmutableMap.<String, String>builder()); final ScenarioGenerator gen = createGenerator(officeHoursLength, urg, scale, tsg2, set.getDynamismRangeCenters(), lg, builder, numOrdersPerScale); jobs.add(ScenarioCreator.create(isg.next(), set, gen)); } } } } final AtomicLong currentJobs = new AtomicLong(0L); final AtomicLong datasetSize = new AtomicLong(0L); LOGGER.info(" - Submitting " + jobs.size() + " Jobs"); for (final ScenarioCreator job : jobs) { submitJob(currentJobs, service, job, builder.numInstances, dataset, rngMap, datasetSize); } final long targetSize = builder.numInstances * builder.dynamismLevels.values().size() * builder.scaleLevels.size() * builder.urgencyLevels.size(); while (datasetSize.get() < targetSize || dataset.size() < targetSize) { try { // LOGGER.info(" - Waiting, current size ==" + dataset.size()); Thread.sleep(THREAD_SLEEP_DURATION); } catch (final InterruptedException e) { throw new IllegalStateException(e); } } LOGGER.info(" - Shutdown Service, Awaiting Termination"); service.shutdown(); try { service.awaitTermination(1L, TimeUnit.HOURS); } catch (final InterruptedException e) { throw new IllegalStateException(e); } LOGGER.info(" - Returning dataset"); return dataset; }
From source file:com.facebook.presto.accumulo.tools.RewriteIndex.java
private void flushDeleteEntries(Connector connector, AccumuloTable table, long start, BatchWriter indexWriter, Multimap<ByteBuffer, Mutation> queryIndexEntries, Map<ByteBuffer, RowStatus> rowIdStatuses) throws MutationsRejectedException, TableNotFoundException { if (queryIndexEntries.size() > 0) { setRowIdStatuses(connector, table, start, queryIndexEntries, rowIdStatuses); AtomicLong numDeleteRows = new AtomicLong(0); ImmutableList.Builder<Mutation> builder = ImmutableList.builder(); queryIndexEntries.asMap().entrySet().forEach(entry -> { if (rowIdStatuses.get(entry.getKey()) == RowStatus.ABSENT) { builder.addAll(entry.getValue()); numDeleteRows.incrementAndGet(); }/* w w w .java2 s. c o m*/ }); List<Mutation> deleteMutations = builder.build(); numDeletedIndexEntries += deleteMutations.size(); if (!dryRun) { indexWriter.addMutations(deleteMutations); } } }
From source file:org.apache.hadoop.hbase.regionserver.wal.TestHLog.java
/** * Test new HDFS-265 sync./* w w w . j a v a2 s. co m*/ * @throws Exception */ @Test public void Broken_testSync() throws Exception { TableName tableName = TableName.valueOf(getName()); // First verify that using streams all works. Path p = new Path(dir, getName() + ".fsdos"); FSDataOutputStream out = fs.create(p); out.write(tableName.getName()); Method syncMethod = null; try { syncMethod = out.getClass().getMethod("hflush", new Class<?>[] {}); } catch (NoSuchMethodException e) { try { syncMethod = out.getClass().getMethod("sync", new Class<?>[] {}); } catch (NoSuchMethodException ex) { fail("This version of Hadoop supports neither Syncable.sync() " + "nor Syncable.hflush()."); } } syncMethod.invoke(out, new Object[] {}); FSDataInputStream in = fs.open(p); assertTrue(in.available() > 0); byte[] buffer = new byte[1024]; int read = in.read(buffer); assertEquals(tableName.getName().length, read); out.close(); in.close(); HLog wal = HLogFactory.createHLog(fs, dir, "hlogdir", conf); final AtomicLong sequenceId = new AtomicLong(1); final int total = 20; HLog.Reader reader = null; try { HRegionInfo info = new HRegionInfo(tableName, null, null, false); HTableDescriptor htd = new HTableDescriptor(); htd.addFamily(new HColumnDescriptor(tableName.getName())); for (int i = 0; i < total; i++) { WALEdit kvs = new WALEdit(); kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName())); wal.append(info, tableName, kvs, System.currentTimeMillis(), htd, sequenceId); } // Now call sync and try reading. Opening a Reader before you sync just // gives you EOFE. wal.sync(); // Open a Reader. Path walPath = ((FSHLog) wal).computeFilename(); reader = HLogFactory.createReader(fs, walPath, conf); int count = 0; HLog.Entry entry = new HLog.Entry(); while ((entry = reader.next(entry)) != null) count++; assertEquals(total, count); reader.close(); // Add test that checks to see that an open of a Reader works on a file // that has had a sync done on it. for (int i = 0; i < total; i++) { WALEdit kvs = new WALEdit(); kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName())); wal.append(info, tableName, kvs, System.currentTimeMillis(), htd, sequenceId); } reader = HLogFactory.createReader(fs, walPath, conf); count = 0; while ((entry = reader.next(entry)) != null) count++; assertTrue(count >= total); reader.close(); // If I sync, should see double the edits. wal.sync(); reader = HLogFactory.createReader(fs, walPath, conf); count = 0; while ((entry = reader.next(entry)) != null) count++; assertEquals(total * 2, count); // Now do a test that ensures stuff works when we go over block boundary, // especially that we return good length on file. final byte[] value = new byte[1025 * 1024]; // Make a 1M value. for (int i = 0; i < total; i++) { WALEdit kvs = new WALEdit(); kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), value)); wal.append(info, tableName, kvs, System.currentTimeMillis(), htd, sequenceId); } // Now I should have written out lots of blocks. Sync then read. wal.sync(); reader = HLogFactory.createReader(fs, walPath, conf); count = 0; while ((entry = reader.next(entry)) != null) count++; assertEquals(total * 3, count); reader.close(); // Close it and ensure that closed, Reader gets right length also. wal.close(); reader = HLogFactory.createReader(fs, walPath, conf); count = 0; while ((entry = reader.next(entry)) != null) count++; assertEquals(total * 3, count); reader.close(); } finally { if (wal != null) wal.closeAndDelete(); if (reader != null) reader.close(); } }