List of usage examples for java.util.concurrent.atomic AtomicInteger set
public final void set(int newValue)
From source file:com.android.sdklib.repository.legacy.remote.internal.DownloadCache.java
/** * Download, cache and return as an in-memory byte stream. * The download is only done if the server returns 200/OK. * On success, store an info file next to the download with * a few headers./*from w w w. j av a 2 s . co m*/ * <p> * This method deletes the cached file and the info file ONLY if it * attempted a download and it failed to complete. It doesn't erase * anything if there's no download because the server returned a 404 * or 304 or similar. * * @return An in-memory byte buffer input stream for the downloaded * and locally cached file, or null if nothing was downloaded * (including if it was a 304 Not-Modified status code.) */ @Nullable private InputStream downloadAndCache(@NonNull String urlString, @NonNull ITaskMonitor monitor, @NonNull File cached, @NonNull File info, @Nullable Header[] headers, @Nullable AtomicInteger outStatusCode) throws IOException { InputStream is = null; OutputStream os = null; int inc = 65536; int curr = 0; byte[] result = new byte[inc]; try { Pair<InputStream, URLConnection> r = openUrl(urlString, true /*needsMarkResetSupport*/, monitor, headers); is = r.getFirst(); URLConnection connection = r.getSecond(); if (DEBUG) { String message = null; if (connection instanceof HttpURLConnection) { message = ((HttpURLConnection) connection).getResponseMessage(); } System.out.println(String.format("%s : fetch: %s => %s", //$NON-NLS-1$ urlString, headers == null ? "" : Arrays.toString(headers), //$NON-NLS-1$ message)); } int code = connection instanceof HttpURLConnection ? ((HttpURLConnection) connection).getResponseCode() : 200; if (outStatusCode != null) { outStatusCode.set(code); } if (code != HttpStatus.SC_OK) { // Only a 200 response code makes sense here. // Even the other 20x codes should not apply, e.g. no content or partial // content are not statuses we want to handle and should never happen. // (see http://www.w3.org/Protocols/rfc2616/rfc2616-sec6.html#sec6.1.1 for list) return null; } os = mFileOp.newFileOutputStream(cached); int n; while ((n = is.read(result, curr, result.length - curr)) != -1) { if (os != null && n > 0) { os.write(result, curr, n); } curr += n; if (os != null && curr > MAX_SMALL_FILE_SIZE) { // If the file size exceeds our "small file size" threshold, // stop caching. We don't want to fill the disk. try { os.close(); } catch (IOException ignore) { } try { cached.delete(); info.delete(); } catch (SecurityException ignore) { } os = null; } if (curr == result.length) { byte[] temp = new byte[curr + inc]; System.arraycopy(result, 0, temp, 0, curr); result = temp; } } // Close the output stream, signaling it was stored properly. if (os != null) { try { os.close(); os = null; if (connection instanceof HttpURLConnection) { saveInfo(urlString, (HttpURLConnection) connection, info); } } catch (IOException ignore) { } } return new ByteArrayInputStream(result, 0, curr); } finally { if (is != null) { try { is.close(); } catch (IOException ignore) { } } if (os != null) { try { os.close(); } catch (IOException ignore) { } // If we get here with the output stream not null, it means there // was an issue and we don't want to keep that file. We'll try to // delete it. try { mFileOp.delete(cached); mFileOp.delete(info); } catch (SecurityException ignore) { } } } }
From source file:org.sakaiproject.tool.assessment.facade.ItemHashUtil.java
/** * Bit of a hack to allow reuse between {@link ItemFacadeQueries} and {@link PublishedItemFacadeQueries}. * Arguments are rather arbitrary extension points to support what we happen to <em>know</em> are the differences * between item and published item processing, as well as the common utilities/service dependencies. * * @param batchSize/*from www.j a va 2 s . c om*/ * @param hqlQueries * @param concreteType * @param hashAndAssignCallback * @param hibernateTemplate * @return */ BackfillItemHashResult backfillItemHashes(int batchSize, Map<String, String> hqlQueries, Class<? extends ItemDataIfc> concreteType, Function<ItemDataIfc, ItemDataIfc> hashAndAssignCallback, HibernateTemplate hibernateTemplate) { final long startTime = System.currentTimeMillis(); log.debug("Hash backfill starting for items of type [" + concreteType.getSimpleName() + "]"); if (batchSize <= 0) { batchSize = 100; } final int flushSize = batchSize; final AtomicInteger totalItems = new AtomicInteger(0); final AtomicInteger totalItemsNeedingBackfill = new AtomicInteger(0); final AtomicInteger batchNumber = new AtomicInteger(0); final AtomicInteger recordsRead = new AtomicInteger(0); final AtomicInteger recordsUpdated = new AtomicInteger(0); final Map<Long, Throwable> hashingErrors = new TreeMap<>(); final Map<Integer, Throwable> otherErrors = new TreeMap<>(); final List<Long> batchElapsedTimes = new ArrayList<>(); // always needed as *printable* average per-batch timing value, so just store as string. and cache at this // scope b/c we sometimes need to print a single calculation multiple times, e.g. in last batch and // at method exit final AtomicReference<String> currentAvgBatchElapsedTime = new AtomicReference<>("0.00"); final AtomicBoolean areMoreItems = new AtomicBoolean(true); // Get the item totals up front since a) we know any questions created while the job is running will be // assigned hashes and thus won't need to be handled by the job and b) makes bookkeeping within the job much // easier hibernateTemplate.execute(session -> { session.setDefaultReadOnly(true); totalItems.set(countItems(hqlQueries, session)); totalItemsNeedingBackfill.set(countItemsNeedingHashBackfill(hqlQueries, session)); log.debug("Hash backfill required for [" + totalItemsNeedingBackfill + "] of [" + totalItems + "] items of type [" + concreteType.getSimpleName() + "]"); return null; }); while (areMoreItems.get()) { long batchStartTime = System.currentTimeMillis(); batchNumber.getAndIncrement(); final AtomicInteger itemsHashedInBatch = new AtomicInteger(0); final AtomicInteger itemsReadInBatch = new AtomicInteger(0); final AtomicReference<Throwable> failure = new AtomicReference<>(null); // Idea here is a) avoid very long running transactions and b) avoid reading all items into memory // and c) avoid weirdness, e.g. duplicate results, when paginating complex hibernate objects. So // there's a per-batch transaction, and each batch re-runs the same two item lookup querys, one to // get the list of IDs for the next page of items, and one to resolve those IDs to items try { new TransactionTemplate(transactionManager, requireNewTransaction()).execute(status -> { hibernateTemplate.execute(session -> { List<ItemDataIfc> itemsInBatch = null; try { // resource cleanup block session.setFlushMode(FlushMode.MANUAL); try { // initial read block (failures here are fatal) // set up the actual result set for this batch of items. use error count to skip over failed items final List<Long> itemIds = itemIdsNeedingHashBackfill(hqlQueries, flushSize, hashingErrors.size(), session); itemsInBatch = itemsById(itemIds, hqlQueries, session); } catch (RuntimeException e) { // Panic on failure to read counts and/or the actual items in the batch. // Otherwise would potentially loop indefinitely since this design has no way way to // skip this page of results. log.error("Failed to read batch of hashable items. Giving up at record [" + recordsRead + "] of [" + totalItemsNeedingBackfill + "] Type: [" + concreteType.getSimpleName() + "]", e); areMoreItems.set(false); // force overall loop to exit throw e; // force txn to give up } for (ItemDataIfc item : itemsInBatch) { recordsRead.getAndIncrement(); itemsReadInBatch.getAndIncrement(); // Assign the item's hash/es try { log.debug("Backfilling hash for item [" + recordsRead + "] of [" + totalItemsNeedingBackfill + "] Type: [" + concreteType.getSimpleName() + "] ID: [" + item.getItemId() + "]"); hashAndAssignCallback.apply(item); itemsHashedInBatch.getAndIncrement(); } catch (Throwable t) { // Failures considered ignorable here... probably some unexpected item state // that prevented hash calculation. // // Re the log statement... yes, the caller probably logs exceptions, but likely // without stack traces, and we'd like to advertise failures as quickly as possible, // so we go ahead and emit an error log here. log.error("Item hash calculation failed for item [" + recordsRead + "] of [" + totalItemsNeedingBackfill + "] Type: [" + concreteType.getSimpleName() + "] ID: [" + (item == null ? "?" : item.getItemId()) + "]", t); hashingErrors.put(item.getItemId(), t); } } if (itemsHashedInBatch.get() > 0) { session.flush(); recordsUpdated.getAndAdd(itemsHashedInBatch.get()); } areMoreItems.set(itemsInBatch.size() >= flushSize); } finally { quietlyClear(session); // potentially very large, so clear aggressively } return null; }); // end session return null; }); // end transaction } catch (Throwable t) { // We're still in the loop over all batches, but something caused the current batch (and its // transaction) to exit abnormally. Logging of both success and failure cases is quite detailed, // and needs the same timing calcs, so is consolidated into the 'finally' block below. failure.set(t); otherErrors.put(batchNumber.get(), t); } finally { // Detailed batch-level reporting final long batchElapsed = (System.currentTimeMillis() - batchStartTime); batchElapsedTimes.add(batchElapsed); currentAvgBatchElapsedTime.set(new DecimalFormat("#.00") .format(batchElapsedTimes.stream().collect(Collectors.averagingLong(l -> l)))); if (failure.get() == null) { log.debug("Item hash backfill batch flushed to database. Type: [" + concreteType.getSimpleName() + "] Batch number: [" + batchNumber + "] Items attempted in batch: [" + itemsReadInBatch + "] Items succeeded in batch: [" + itemsHashedInBatch + "] Total items attempted: [" + recordsRead + "] Total items succeeded: [" + recordsUpdated + "] Total attemptable items: [" + totalItemsNeedingBackfill + "] Elapsed batch time: [" + batchElapsed + "ms] Avg time/batch: [" + currentAvgBatchElapsedTime + "ms]"); } else { // yes, caller probably logs exceptions later, but probably without stack traces, and we'd // like to advertise failures as quickly as possible, so we go ahead and emit an error log // here. log.error("Item hash backfill failed. Type: [" + concreteType.getSimpleName() + "] Batch number: [" + batchNumber + "] Items attempted in batch: [" + itemsReadInBatch + "] Items flushable (but failed) in batch: [" + itemsHashedInBatch + "] Total items attempted: [" + recordsRead + "] Total items succeeded: [" + recordsUpdated + "] Total attemptable items: [" + totalItemsNeedingBackfill + "] Elapsed batch time: [" + batchElapsed + "ms] Avg time/batch: [" + currentAvgBatchElapsedTime + "ms]", failure.get()); } } } // end loop over all batches final long elapsedTime = System.currentTimeMillis() - startTime; log.debug("Hash backfill completed for items of type [" + concreteType.getSimpleName() + "]. Total items attempted: [" + recordsRead + "] Total items succeeded: [" + recordsUpdated + "] Target attemptable items: [" + totalItemsNeedingBackfill + "] Total elapsed time: [" + elapsedTime + "ms] Total batches: [" + batchNumber + "] Avg time/batch: [" + currentAvgBatchElapsedTime + "ms]"); return new BackfillItemHashResult(elapsedTime, totalItems.get(), totalItemsNeedingBackfill.get(), recordsRead.get(), recordsUpdated.get(), flushSize, hashingErrors, otherErrors); }
From source file:org.apache.hadoop.hbase.client.TestAsyncSingleRequestRpcRetryingCaller.java
@Test public void testLocateError() throws IOException, InterruptedException, ExecutionException { AtomicBoolean errorTriggered = new AtomicBoolean(false); AtomicInteger count = new AtomicInteger(0); HRegionLocation loc = CONN.getRegionLocator(TABLE_NAME).getRegionLocation(ROW).get(); AsyncRegionLocator mockedLocator = new AsyncRegionLocator(CONN, AsyncConnectionImpl.RETRY_TIMER) { @Override//w w w . j av a2s. com CompletableFuture<HRegionLocation> getRegionLocation(TableName tableName, byte[] row, RegionLocateType locateType, long timeoutNs) { if (tableName.equals(TABLE_NAME)) { CompletableFuture<HRegionLocation> future = new CompletableFuture<>(); if (count.getAndIncrement() == 0) { errorTriggered.set(true); future.completeExceptionally(new RuntimeException("Inject error!")); } else { future.complete(loc); } return future; } else { return super.getRegionLocation(tableName, row, locateType, timeoutNs); } } @Override void updateCachedLocation(HRegionLocation loc, Throwable exception) { } }; try (AsyncConnectionImpl mockedConn = new AsyncConnectionImpl(CONN.getConfiguration(), CONN.registry, CONN.registry.getClusterId().get(), User.getCurrent()) { @Override AsyncRegionLocator getLocator() { return mockedLocator; } }) { RawAsyncTable table = mockedConn.getRawTableBuilder(TABLE_NAME) .setRetryPause(100, TimeUnit.MILLISECONDS).setMaxRetries(5).build(); table.put(new Put(ROW).addColumn(FAMILY, QUALIFIER, VALUE)).get(); assertTrue(errorTriggered.get()); errorTriggered.set(false); count.set(0); Result result = table.get(new Get(ROW).addColumn(FAMILY, QUALIFIER)).get(); assertArrayEquals(VALUE, result.getValue(FAMILY, QUALIFIER)); assertTrue(errorTriggered.get()); } }
From source file:org.jenkinsci.remoting.protocol.ProtocolStackTest.java
@Test public void initSequenceFailure() throws IOException { Logger logger = Logger.getLogger(ProtocolStack.class.getName()); CapturingHandler handler = new CapturingHandler(); assertThat(logger.isLoggable(Level.FINEST), is(false)); Level oldLevel = logger.getLevel(); logger.addHandler(handler);//from w ww .j a v a2s.c o m try { logger.setLevel(Level.FINEST); assertThat(logger.isLoggable(Level.FINEST), is(true)); final AtomicInteger state = new AtomicInteger(); try { ProtocolStack.on(new NetworkLayer(selector) { @Override protected void write(@NonNull ByteBuffer data) throws IOException { } @Override public void start() throws IOException { state.compareAndSet(0, 1); } @Override public void doCloseSend() throws IOException { } @Override public void doCloseRecv() { } @Override public boolean isSendOpen() { return true; } }).filter(new FilterLayer() { @Override public void start() throws IOException { state.compareAndSet(1, 2); throw new IOException("boom"); } @Override public void onRecv(@NonNull ByteBuffer data) throws IOException { } @Override public void doSend(@NonNull ByteBuffer data) throws IOException { } }).filter(new FilterLayer() { @Override public void start() throws IOException { state.set(-2); } @Override public void onRecv(@NonNull ByteBuffer data) throws IOException { } @Override public void doSend(@NonNull ByteBuffer data) throws IOException { } @Override public void onRecvClosed(IOException cause) throws IOException { state.compareAndSet(2, 3); super.onRecvClosed(cause); } }).named("initSeq").build(new ApplicationLayer<Void>() { @Override public Void get() { return null; } @Override public void onRead(@NonNull ByteBuffer data) throws IOException { } @Override public void start() throws IOException { state.set(-3); } @Override public void onReadClosed(IOException cause) throws IOException { state.compareAndSet(3, 4); } @Override public boolean isReadOpen() { return true; } }); fail("Expecting IOException"); } catch (IOException e) { assertThat(e.getMessage(), is("boom")); } assertThat(handler.logRecords, contains( allOf(hasProperty("message", is("[{0}] Initializing")), hasProperty("parameters", is(new Object[] { "initSeq" }))), allOf(hasProperty("message", is("[{0}] Starting")), hasProperty("parameters", is(new Object[] { "initSeq" }))), allOf(hasProperty("message", is("[{0}] Start failure")), hasProperty("parameters", is(new Object[] { "initSeq" })), hasProperty("thrown", hasProperty("message", is("boom")))))); assertThat("Init in sequence", state.get(), is(4)); } finally { logger.removeHandler(handler); logger.setLevel(oldLevel); } }
From source file:languages.TabFile.java
/** * Returns the value that has the highest {@link #stringCorrelation} with * the given {@link String}./* w ww . jav a 2 s. c om*/ * * @param column The column to look for values. * @param value The {@link String} to be compared. Only values with equal * length as {@code value} are returned due to the way * {@link #stringCorrelation} works. * @param ignoredWords Words to be filtered out before doing the comparison. * @return The value in the specified column that has the highest * correlation. */ public String getValueWithHighestCorrelation(int column, String value, List<String> ignoredWords) { ArrayList<Thread> threads = new ArrayList<>(); AtomicInteger currentIndex = new AtomicInteger(0); AtomicInteger maxIndex = new AtomicInteger(-1); AtomicDouble maxCorr = new AtomicDouble(-1); List<String> ignoredWordsCopy = new ArrayList<>(ignoredWords); // split all entries up that contain a space List<String> stringsToSplit = new ArrayList<>(); // Find words to split for (String word : ignoredWordsCopy) { if (word.contains(" ")) { stringsToSplit.add(word); } } // Actually to the splitting for (String word : stringsToSplit) { ignoredWordsCopy.remove(word); ignoredWordsCopy.addAll(Arrays.asList(word.split(" "))); } for (int i = 0; i < AppConfig.getParallelThreadCount(); i++) { threads.add(new Thread(() -> { int index = currentIndex.getAndIncrement(); while (index < getRowCount()) { if (value.length() == getValueAt(index, column).length() && !ignoredWordsCopy.contains(getValueAt(index, column)) && !HangmanSolver.currentWordContainsWrongChar(getValueAt(index, column))) { double corr = stringCorrelation(value, getValueAt(index, column)); if (corr > maxCorr.get()) { maxCorr.set(corr); maxIndex.set(index); } } // Grab the next index index = currentIndex.getAndIncrement(); } })); threads.get(i).start(); } // Wait for threads for (int i = 0; i < AppConfig.getParallelThreadCount(); i++) { try { threads.get(i).join(); } catch (InterruptedException e) { FOKLogger.log(TabFile.class.getName(), Level.SEVERE, "An error occurred", e); } } return getValueAt(maxIndex.get(), column); }
From source file:org.testdwr.plain.Test.java
public Verify checkScriptSessionListener(final JavascriptFunction progress1, final JavascriptFunction progress2) { final ServerContext serverContext = ServerContextFactory.get(); final String testPage = serverContext.getContextPath() + "/checkSession.html"; Verify verify = new Verify(); final int createdBefore = TestScriptSessionListener.created; final int createdBefore2 = Test2ScriptSessionListener.created; final int destroyedBefore = TestScriptSessionListener.destroyed; final int destroyedBefore2 = Test2ScriptSessionListener.destroyed; // At least one test window is open ... verify.isTrue("createdBefore > 0", createdBefore > 0); verify.isTrue("createdBefore2 > 0", createdBefore2 > 0); // Open a new window Window.open(testPage, "checkSession"); // We'll fill these in in the first cron, and use them in the second final AtomicInteger createdMid = new AtomicInteger(); final AtomicInteger createdMid2 = new AtomicInteger(); final AtomicInteger destroyedMid = new AtomicInteger(); final AtomicInteger destroyedMid2 = new AtomicInteger(); // Give it a second to open, check counters and close it ScheduledThreadPoolExecutor executorService = serverContext.getContainer() .getBean(ScheduledThreadPoolExecutor.class); executorService.schedule(new Runnable() { public void run() { createdMid.set(TestScriptSessionListener.created); createdMid2.set(Test2ScriptSessionListener.created); destroyedMid.set(TestScriptSessionListener.destroyed); destroyedMid2.set(Test2ScriptSessionListener.destroyed); Verify verify1 = new Verify(); verify1.isTrue("createdMid > createdBefore", createdMid.intValue() > createdBefore); verify1.isTrue("createdMid2 > createdBefore2", createdMid2.intValue() > createdBefore2); verify1.equals("destroyedMid == destroyedBefore", destroyedMid.intValue(), destroyedBefore); verify1.equals("destroyedMid2 == destroyedBefore2", destroyedMid2.intValue(), destroyedBefore2); // Find it and close it Browser.withPage(testPage, new Runnable() { public void run() { Window.close(); }// www . j ava 2 s.co m }); progress1.executeAndClose(verify1); } }, 1, TimeUnit.SECONDS); // Give it 2 seconds to open and be closed then check counters again executorService.schedule(new Runnable() { public void run() { int createdAfter = TestScriptSessionListener.created; int createdAfter2 = Test2ScriptSessionListener.created; int destroyedAfter = TestScriptSessionListener.destroyed; int destroyedAfter2 = Test2ScriptSessionListener.destroyed; Verify verify2 = new Verify(); verify2.equals("createdAfter == createdMid", createdAfter, createdMid.intValue()); verify2.equals("createdAfter2 == createdMid2", createdAfter2, createdMid2.intValue()); verify2.isTrue("destroyedAfter > destroyedMid", destroyedAfter > destroyedMid.intValue()); verify2.isTrue("destroyedAfter2 > destroyedMid2", destroyedAfter2 > destroyedMid2.intValue()); progress2.executeAndClose(verify2); } }, 2, TimeUnit.SECONDS); return verify; }
From source file:org.openconcerto.sql.model.SQLRowValuesListFetcher.java
private final List<SQLRowValues> fetch(final boolean merge) { final SQLSelect req = this.getReq(); // getName() would take 5% of ResultSetHandler.handle() final List<FieldRef> selectFields = req.getSelectFields(); final int selectFieldsSize = selectFields.size(); final List<String> selectFieldsNames = new ArrayList<String>(selectFieldsSize); for (final FieldRef f : selectFields) selectFieldsNames.add(f.getField().getName()); final SQLTable table = getGraph().getTable(); // create a flat list of the graph nodes, we just need the table, field count and the index // in this list of its linked table, eg for CPI -> LOCAL -> BATIMENT -> SITE : // <LOCAL,2,0>, <BATIMENT,2,0>, <SITE,5,1>, <CPI,4,0> final int graphSize = this.getGraph().getGraph().size(); final List<GraphNode> l = new ArrayList<GraphNode>(graphSize); // check field names only once since each row has the same fields final AtomicInteger fieldIndex = new AtomicInteger(0); walk(0, new ITransformer<State<Integer>, Integer>() { @Override/* w ww. ja va 2s. co m*/ public Integer transformChecked(State<Integer> input) { final int index = l.size(); final GraphNode node = new GraphNode(input); final int stop = fieldIndex.get() + node.getFieldCount(); for (int i = fieldIndex.get(); i < stop; i++) { if (i >= selectFieldsSize) throw new IllegalStateException("Fields were removed from the select"); final FieldRef field = selectFields.get(i); if (!node.getTable().equals(field.getTableRef().getTable())) throw new IllegalStateException("Select field not in " + node + " : " + field); } fieldIndex.set(stop); l.add(node); // used by link index of GraphNode return index; } }); // otherwise walk() would already have thrown an exception assert fieldIndex.get() <= selectFieldsSize; if (fieldIndex.get() != selectFieldsSize) { throw new IllegalStateException( "Fields have been added to the select (which is useless, since only fields specified by rows are returned) : " + selectFields.subList(fieldIndex.get(), selectFieldsSize)); } assert l.size() == graphSize : "All nodes weren't explored once : " + l.size() + " != " + graphSize + "\n" + this.getGraph().printGraph(); // if we wanted to use the cache, we'd need to copy the returned list and its items (i.e. // deepCopy()), since we modify them afterwards. Or perhaps include the code after this line // into the result set handler. final IResultSetHandler rsh = new IResultSetHandler(new RSH(selectFieldsNames, l), false); @SuppressWarnings("unchecked") final List<SQLRowValues> res = (List<SQLRowValues>) table.getBase().getDataSource().execute(req.asString(), rsh, false); // e.g. list of batiment pointing to site final List<SQLRowValues> merged = merge && this.fetchReferents() ? merge(res) : res; if (this.grafts.size() > 0) { for (final Entry<Path, Map<Path, SQLRowValuesListFetcher>> graftPlaceEntry : this.grafts.entrySet()) { // e.g. BATIMENT final Path graftPlace = graftPlaceEntry.getKey(); final Path mapPath = Path.get(graftPlace.getLast()); // list of BATIMENT to only fetch what's necessary final Set<Number> ids = new HashSet<Number>(); // byRows is common to all grafts to support CPI -> LOCAL -> BATIMENT and RECEPTEUR // -> LOCAL -> BATIMENT (ie avoid duplicate LOCAL) // CollectionMap since the same row can be in multiple index of merged, e.g. when // fetching *BATIMENT* -> SITE each site will be repeated as many times as it has // children and if we want their DOSSIER they must be grafted on each line. final ListMap<Tuple2<Path, Number>, SQLRowValues> byRows = createCollectionMap(); for (final SQLRowValues vals : merged) { // can be empty when grafting on optional row for (final SQLRowValues graftPlaceVals : vals.followPath(graftPlace, CreateMode.CREATE_NONE, false)) { ids.add(graftPlaceVals.getIDNumber()); byRows.add(Tuple2.create(mapPath, graftPlaceVals.getIDNumber()), graftPlaceVals); } } assert ids.size() == byRows.size(); for (final Entry<Path, SQLRowValuesListFetcher> e : graftPlaceEntry.getValue().entrySet()) { // e.g BATIMENT <- LOCAL <- CPI final Path descendantPath = e.getKey(); assert descendantPath.getFirst() == graftPlace.getLast() : descendantPath + " != " + graftPlace; final SQLRowValuesListFetcher graft = e.getValue(); final SQLSelect toRestore = graft.frozen; graft.frozen = new SQLSelect(graft.getReq()) .andWhere(new Where(graft.getGraph().getTable().getKey(), ids)); // don't merge then... final List<SQLRowValues> referentVals = graft.fetch(false); graft.frozen = toRestore; // ...but now this.merge(merged, referentVals, byRows, descendantPath); } } } return merged; }
From source file:org.talend.component.core.utils.ComponentsUtils.java
/** * DOC ycbai Comment method "loadParametersFromForm". * <p>/*from ww w .j a v a2 s. co m*/ * Get element parameters of <code>element</code> from <code>form</code>. * * @param node optional, used if there is a component setting up the properties * @param element * @param category * @param form * @return parameters list */ public static List<ElementParameter> getParametersFromForm(IElement element, EComponentCategory category, ComponentProperties compProperties, String parentPropertiesPath, Form form, Widget parentWidget, AtomicInteger lastRowNum) { List<ElementParameter> elementParameters = new ArrayList<>(); EComponentCategory compCategory = category; if (compCategory == null) { compCategory = EComponentCategory.BASIC; } AtomicInteger lastRN = lastRowNum; if (lastRN == null) { lastRN = new AtomicInteger(); } if (form == null) { return elementParameters; } ComponentProperties componentProperties = compProperties; if (componentProperties == null) { componentProperties = form.getComponentProperties(); } if (element instanceof INode) { INode node = (INode) element; // Set the properties only one time to get the top-level properties object if (node.getComponentProperties() == null) { node.setComponentProperties(componentProperties); } } // Have to initialize for the messages componentProperties.getProperties(); List<Widget> formWidgets = form.getWidgets(); for (Widget widget : formWidgets) { NamedThing[] widgetProperties = widget.getProperties(); NamedThing widgetProperty = widgetProperties[0]; String propertiesPath = getPropertiesPath(parentPropertiesPath, null); if (widgetProperty instanceof Form) { Form subForm = (Form) widgetProperty; ComponentProperties subProperties = subForm.getComponentProperties(); // Reset properties path if (!isSameComponentProperties(componentProperties, widgetProperty)) { propertiesPath = getPropertiesPath(parentPropertiesPath, subProperties.getName()); } elementParameters.addAll(getParametersFromForm(element, compCategory, subProperties, propertiesPath, subForm, widget, lastRN)); continue; } GenericElementParameter param = new GenericElementParameter(element, componentProperties, widget, getComponentService()); String parameterName = propertiesPath.concat(param.getName()); param.setName(parameterName); param.setCategory(compCategory); param.setRepositoryValue(parameterName); param.setShow( parentWidget == null ? widget.isVisible() : parentWidget.isVisible() && widget.isVisible()); int rowNum = 0; if (widget.getOrder() != 1) { rowNum = lastRN.get(); } else { rowNum = widget.getRow(); if (parentWidget != null) { rowNum += parentWidget.getRow(); } rowNum = rowNum + lastRN.get(); } param.setNumRow(rowNum); lastRN.set(rowNum); // handle form... SchemaElement se = null; if (widgetProperty instanceof SchemaElement) { se = (SchemaElement) widgetProperties[0]; param.setContext(EConnectionType.FLOW_MAIN.getName()); } EParameterFieldType fieldType = getFieldType(widget, widgetProperty, se); param.setFieldType(fieldType != null ? fieldType : EParameterFieldType.TEXT); if (se == null) { param.setValue(widgetProperty.getDisplayName()); } else { se = componentProperties.getProperty(se.getName()); param.setRequired(se.isRequired()); param.setValue(componentProperties.getValue(se)); param.setSupportContext(isSupportContext(se)); List<?> values = se.getPossibleValues(); if (values != null) { param.setPossibleValues(values); List<String> possVals = new ArrayList<>(); List<String> possValsDisplay = new ArrayList<>(); for (Object obj : values) { if (obj instanceof NameAndLabel) { NameAndLabel nal = (NameAndLabel) obj; possVals.add(nal.getName()); possValsDisplay.add(nal.getLabel()); } else { possVals.add(String.valueOf(obj)); possValsDisplay.add(String.valueOf(obj)); } } param.setListItemsDisplayName(possValsDisplay.toArray(new String[0])); param.setListItemsDisplayCodeName(possValsDisplay.toArray(new String[0])); param.setListItemsValue(possVals.toArray(new String[0])); } } // if (widgetProperty instanceof PresentationItem) { // param.setValue(null); // } param.setReadOnly(false); param.setSerialized(true); // Set param context when multiple schema if (EParameterFieldType.SCHEMA_TYPE.equals(param.getFieldType())) { String propertyName = componentProperties.getName(); if (IComponentConstants.SCHEMA_FLOW.equals(propertyName)) { param.setContext(EConnectionType.FLOW_MAIN.getDefaultMenuName().toUpperCase()); } else if (IComponentConstants.SCHEMA_REJECT.equals(propertyName)) { param.setContext(EConnectionType.REJECT.getName()); } } elementParameters.add(param); } return elementParameters; }
From source file:org.jdesktop.swingworker.AccumulativeRunnable.java
public final void test6493680() throws Exception { final AtomicInteger lastProgressValue = new AtomicInteger(-1); final Exchanger<Boolean> exchanger = new Exchanger<Boolean>(); class Test {/*www . j a va 2 s. c o m*/ private final AtomicInteger lastProgressValue = new AtomicInteger(-1); private final Exchanger<Boolean> exchanger = new Exchanger<Boolean>(); boolean test() throws Exception { TestSwingWorker swingWorker = new TestSwingWorker(); swingWorker.addPropertyChangeListener( new PropertyChangeListener() { public void propertyChange(PropertyChangeEvent evt) { if ("progress" == evt.getPropertyName()) { lastProgressValue.set((Integer) evt.getNewValue()); } } }); swingWorker.execute(); return exchanger.exchange(true); } class TestSwingWorker extends SwingWorker<Void, Void> { @Override protected Void doInBackground() throws Exception { for (int i = 0; i <= 100; i++) { Thread.sleep(1); setProgress(i); } return null; } @Override protected void done() { boolean isPassed = (lastProgressValue.get() == 100); try { exchanger.exchange(isPassed); } catch (Exception ingore) { } } } } /* * because timing is involved in this bug we will run the test * NUMBER_OF_TRIES times. * the tes`t passes if it does not fail once. */ final int NUMBER_OF_TRIES = 50; for (int i = 0; i < NUMBER_OF_TRIES; i++) { assertTrue((new Test()).test()); } }
From source file:org.apache.flume.channel.kafka.TestKafkaChannel.java
private List<Event> pullEvents(final KafkaChannel channel, ExecutorCompletionService<Void> submitterSvc, final int total, final boolean testRollbacks, final boolean retryAfterRollback) { final List<Event> eventsPulled = Collections.synchronizedList(new ArrayList<Event>(50)); final CyclicBarrier barrier = new CyclicBarrier(5); final AtomicInteger counter = new AtomicInteger(0); final AtomicInteger rolledBackCount = new AtomicInteger(0); final AtomicBoolean startedGettingEvents = new AtomicBoolean(false); final AtomicBoolean rolledBack = new AtomicBoolean(false); for (int k = 0; k < 5; k++) { final int index = k; submitterSvc.submit(new Callable<Void>() { @Override//from w w w . jav a 2 s . c om public Void call() throws Exception { Transaction tx = null; final List<Event> eventsLocal = Lists.newLinkedList(); int takenByThisThread = 0; channel.registerThread(); Thread.sleep(1000); barrier.await(); while (counter.get() < (total - rolledBackCount.get())) { if (tx == null) { tx = channel.getTransaction(); tx.begin(); } try { Event e = channel.take(); if (e != null) { startedGettingEvents.set(true); eventsLocal.add(e); } else { if (testRollbacks && index == 4 && (!rolledBack.get()) && startedGettingEvents.get()) { tx.rollback(); tx.close(); tx = null; rolledBack.set(true); final int eventsLocalSize = eventsLocal.size(); eventsLocal.clear(); if (!retryAfterRollback) { rolledBackCount.set(eventsLocalSize); return null; } } else { tx.commit(); tx.close(); tx = null; eventsPulled.addAll(eventsLocal); counter.getAndAdd(eventsLocal.size()); eventsLocal.clear(); } } } catch (Exception ex) { eventsLocal.clear(); if (tx != null) { tx.rollback(); tx.close(); } tx = null; ex.printStackTrace(); } } // Close txn. return null; } }); } return eventsPulled; }