List of usage examples for java.util.concurrent ExecutorCompletionService ExecutorCompletionService
public ExecutorCompletionService(Executor executor)
From source file:org.apache.hadoop.hbase.client.transactional.TransactionManager.java
/** * Prepare to commit a transaction./*www. ja va 2 s . c om*/ * * @param transactionState * @return commitStatusCode (see {@link TransactionalRegionInterface}) * @throws IOException * @throws CommitUnsuccessfulException */ public int prepareCommit(final TransactionState transactionState) throws CommitUnsuccessfulException, IOException { if (LOG.isTraceEnabled()) LOG.trace("Enter prepareCommit, txid: " + transactionState.getTransactionId()); if (batchRegionServer && (TRANSACTION_ALGORITHM == AlgorithmType.MVCC)) { boolean allReadOnly = true; int loopCount = 0; if (transactionState.islocalTransaction()) { if (LOG.isTraceEnabled()) LOG.trace("TransactionManager.prepareCommit local transaction " + transactionState.getTransactionId()); } else if (LOG.isTraceEnabled()) LOG.trace("TransactionManager.prepareCommit global transaction " + transactionState.getTransactionId()); // (need one CompletionService per request for thread safety, can share pool of threads CompletionService<Integer> compPool = new ExecutorCompletionService<Integer>(threadPool); try { ServerName servername; List<TransactionRegionLocation> regionList; Map<ServerName, List<TransactionRegionLocation>> locations = new HashMap<ServerName, List<TransactionRegionLocation>>(); for (TransactionRegionLocation location : transactionState.getParticipatingRegions()) { servername = location.getServerName(); if (!locations.containsKey(servername)) { regionList = new ArrayList<TransactionRegionLocation>(); locations.put(servername, regionList); } else { regionList = locations.get(servername); } regionList.add(location); } for (final Map.Entry<ServerName, List<TransactionRegionLocation>> entry : locations.entrySet()) { loopCount++; compPool.submit(new TransactionManagerCallable(transactionState, entry.getValue().iterator().next(), connection) { public Integer call() throws CommitUnsuccessfulException, IOException { return doPrepareX(entry.getValue(), transactionState.getTransactionId()); } }); } } catch (Exception e) { throw new CommitUnsuccessfulException(e); } // loop to retrieve replies int commitError = 0; try { for (int loopIndex = 0; loopIndex < loopCount; loopIndex++) { Integer canCommit = compPool.take().get(); switch (canCommit) { case TM_COMMIT_TRUE: allReadOnly = false; break; case TM_COMMIT_READ_ONLY: break; case TM_COMMIT_FALSE_CONFLICT: commitError = TransactionalReturn.COMMIT_CONFLICT; break; case TM_COMMIT_FALSE: // Commit conflict takes precedence if (commitError != TransactionalReturn.COMMIT_CONFLICT) commitError = TransactionalReturn.COMMIT_UNSUCCESSFUL; break; default: LOG.error("Unexpected value of canCommit in prepareCommit (during completion processing): " + canCommit); commitError = TransactionalReturn.COMMIT_UNSUCCESSFUL; ; } } loopCount = 0; if (transactionState.getRegionsRetryCount() > 0) { for (TransactionRegionLocation location : transactionState.getRetryRegions()) { loopCount++; compPool.submit(new TransactionManagerCallable(transactionState, location, connection) { public Integer call() throws CommitUnsuccessfulException, IOException { return doPrepareX(location.getRegionInfo().getRegionName(), transactionState.getTransactionId(), location); } }); } transactionState.clearRetryRegions(); } for (int loopIndex = 0; loopIndex < loopCount; loopIndex++) { Integer canCommit = compPool.take().get(); switch (canCommit) { case TM_COMMIT_TRUE: allReadOnly = false; break; case TM_COMMIT_READ_ONLY: break; case TM_COMMIT_FALSE_CONFLICT: commitError = TransactionalReturn.COMMIT_CONFLICT; break; case TM_COMMIT_FALSE: // Commit conflict takes precedence if (commitError != TransactionalReturn.COMMIT_CONFLICT) commitError = TransactionalReturn.COMMIT_UNSUCCESSFUL; break; default: commitError = TransactionalReturn.COMMIT_UNSUCCESSFUL; ; } } } catch (Exception e) { throw new CommitUnsuccessfulException(e); } if (commitError != 0) return commitError; return allReadOnly ? TransactionalReturn.COMMIT_OK_READ_ONLY : TransactionalReturn.COMMIT_OK; } else { boolean allReadOnly = true; int loopCount = 0; ServerName servername; List<TransactionRegionLocation> regionList; Map<ServerName, List<TransactionRegionLocation>> locations = null; if (transactionState.islocalTransaction()) { //System.out.println("prepare islocal"); if (LOG.isTraceEnabled()) LOG.trace("TransactionManager.prepareCommit local transaction " + transactionState.getTransactionId()); } else if (LOG.isTraceEnabled()) LOG.trace("TransactionManager.prepareCommit global transaction " + transactionState.getTransactionId()); // (need one CompletionService per request for thread safety, can share pool of threads CompletionService<Integer> compPool = new ExecutorCompletionService<Integer>(threadPool); try { if (batchRSMetricsFlag) locations = new HashMap<ServerName, List<TransactionRegionLocation>>(); for (TransactionRegionLocation location : transactionState.getParticipatingRegions()) { if (batchRSMetricsFlag) { servername = location.getServerName(); if (!locations.containsKey(servername)) { regionList = new ArrayList<TransactionRegionLocation>(); locations.put(servername, regionList); } else { regionList = locations.get(servername); } regionList.add(location); } loopCount++; final TransactionRegionLocation myLocation = location; final byte[] regionName = location.getRegionInfo().getRegionName(); compPool.submit(new TransactionManagerCallable(transactionState, location, connection) { public Integer call() throws IOException, CommitUnsuccessfulException { return doPrepareX(regionName, transactionState.getTransactionId(), myLocation); } }); } if (batchRSMetricsFlag) { this.regions += transactionState.getParticipatingRegions().size(); this.regionServers += locations.size(); String rsToRegion = locations.size() + " RS / " + transactionState.getParticipatingRegions().size() + " Regions"; if (batchRSMetrics.get(rsToRegion) == null) { batchRSMetrics.put(rsToRegion, 1L); } else { batchRSMetrics.put(rsToRegion, batchRSMetrics.get(rsToRegion) + 1); } if (metricsCount >= 10000) { metricsCount = 0; if (LOG.isInfoEnabled()) LOG.info("---------------------- BatchRS metrics ----------------------"); if (LOG.isInfoEnabled()) LOG.info("Number of total Region calls: " + this.regions); if (LOG.isInfoEnabled()) LOG.info("Number of total RegionServer calls: " + this.regionServers); if (LOG.isInfoEnabled()) LOG.info("---------------- Total number of calls by ratio: ------------"); for (Map.Entry<String, Long> entry : batchRSMetrics.entrySet()) { if (LOG.isInfoEnabled()) LOG.info(entry.getKey() + ": " + entry.getValue()); } if (LOG.isInfoEnabled()) LOG.info("-------------------------------------------------------------"); } metricsCount++; } } catch (Exception e) { LOG.error("exception in prepareCommit (during submit to pool): " + e); throw new CommitUnsuccessfulException(e); } // loop to retrieve replies int commitError = 0; try { for (int loopIndex = 0; loopIndex < loopCount; loopIndex++) { int canCommit = compPool.take().get(); switch (canCommit) { case TM_COMMIT_TRUE: allReadOnly = false; break; case TM_COMMIT_READ_ONLY: break; case TM_COMMIT_FALSE_CONFLICT: commitError = TransactionalReturn.COMMIT_CONFLICT; break; case TM_COMMIT_FALSE: // Commit conflict takes precedence if (commitError != TransactionalReturn.COMMIT_CONFLICT) commitError = TransactionalReturn.COMMIT_UNSUCCESSFUL; break; default: LOG.error("Unexpected value of canCommit in prepareCommit (during completion processing): " + canCommit); commitError = TransactionalReturn.COMMIT_UNSUCCESSFUL; ; } } } catch (Exception e) { LOG.error("exception in prepareCommit (during completion processing): " + e); throw new CommitUnsuccessfulException(e); } if (commitError != 0) return commitError; //Before replying prepare success, check for DDL transaction. //If prepare already has errors (commitError != 0), an abort is automatically //triggered by TM which would take care of ddl abort. //if prepare is success upto this point, DDL operation needs to check if any //drop table requests were recorded as part of phase 0. If any drop table //requests is recorded, then those tables need to disabled as part of prepare. if (transactionState.hasDDLTx()) { //if tables were created, then nothing else needs to be done. //if tables were recorded dropped, then they need to be disabled. //Disabled tables will ultimately be deleted in commit phase. ArrayList<String> createList = new ArrayList<String>(); //This list is ignored. ArrayList<String> dropList = new ArrayList<String>(); ArrayList<String> truncateList = new ArrayList<String>(); StringBuilder state = new StringBuilder(); try { tmDDL.getRow(transactionState.getTransactionId(), state, createList, dropList, truncateList); } catch (Exception e) { LOG.error("exception in doPrepare getRow: " + e); if (LOG.isTraceEnabled()) LOG.trace("exception in doPrepare getRow: txID: " + transactionState.getTransactionId()); state.append("INVALID"); //to avoid processing further down this path. commitError = TransactionalReturn.COMMIT_UNSUCCESSFUL; } //Return if error at this point. if (commitError != 0) return commitError; if (state.toString().equals("VALID") && dropList.size() > 0) { Iterator<String> di = dropList.iterator(); while (di.hasNext()) { try { //physical drop of table from hbase. disableTable(transactionState, di.next()); } catch (Exception e) { if (LOG.isTraceEnabled()) LOG.trace("exception in doPrepare disableTable: txID: " + transactionState.getTransactionId()); LOG.error("exception in doCommit, Step : DeleteTable: " + e); //Any error at this point should be considered prepareCommit as unsuccessful. //Retry logic can be added only if it is retryable error: TODO. commitError = TransactionalReturn.COMMIT_UNSUCCESSFUL; break; } } } } if (commitError != 0) return commitError; return allReadOnly ? TransactionalReturn.COMMIT_OK_READ_ONLY : TransactionalReturn.COMMIT_OK; } }
From source file:org.apache.nifi.cluster.manager.impl.WebClusterManager.java
/** * Drains the node responses off of the socket to ensure that the socket is appropriately cleaned-up. * * @param nodeResponses the collection of node responses *///w w w. ja v a2 s. co m private void drainResponses(final Collection<NodeResponse> nodeResponses) { // fail fast if nothing to do if (nodeResponses.isEmpty()) { return; } final ExecutorService executorService = Executors .newFixedThreadPool(properties.getClusterManagerProtocolThreads()); final CompletionService<Void> completionService = new ExecutorCompletionService<>(executorService); for (final NodeResponse nodeResponse : nodeResponses) { // if we received a response, then clear out the response data if (!nodeResponse.hasThrowable()) { completionService.submit(new Runnable() { @Override public void run() { try { try (final OutputStream drain = new OutputStream() { @Override public void write(final int b) { /* drain response */ } }) { ((StreamingOutput) nodeResponse.getResponse().getEntity()).write(drain); } } catch (final IOException | WebApplicationException ex) { logger.info("Failed clearing out non-client response buffer due to: " + ex, ex); } } }, null); } } executorService.shutdown(); }
From source file:org.apache.nifi.cluster.manager.impl.WebClusterManager.java
/** * A helper method to disconnect nodes that returned unsuccessful HTTP responses because of a replicated request. Disconnection requests are sent concurrently. * *//* w ww . j a va 2s. c om*/ private void disconnectNodes(final Set<NodeResponse> nodeResponses, final String explanation) { // return fast if nothing to do if (nodeResponses == null || nodeResponses.isEmpty()) { return; } final ExecutorService executorService = Executors .newFixedThreadPool(properties.getClusterManagerProtocolThreads()); final CompletionService<Void> completionService = new ExecutorCompletionService<>(executorService); for (final NodeResponse nodeResponse : nodeResponses) { completionService.submit(new Runnable() { @Override public void run() { final NodeIdentifier nodeId = nodeResponse.getNodeId(); final int responseStatus = nodeResponse.getStatus(); final URI requestUri = nodeResponse.getRequestUri(); final StringBuilder msgBuilder = new StringBuilder(); msgBuilder.append("Requesting disconnection for node ").append(nodeId) .append(" for request URI ").append(requestUri); if (nodeResponse.hasThrowable()) { msgBuilder.append(" because manager encountered exception when issuing request: ") .append(nodeResponse.getThrowable()); // log stack trace anytime we have a throwable ((NiFiLog) logger).getWrappedLog().info(msgBuilder.toString(), nodeResponse.getThrowable()); addEvent(nodeId, "Manager encountered exception when issuing request for URI " + requestUri); addBulletin(nodeId, Severity.ERROR, "Manager encountered exception when issuing request for URI " + requestUri + "; node will be disconnected"); } else { msgBuilder.append(" because HTTP response status was ").append(responseStatus); logger.info(msgBuilder.toString()); addEvent(nodeId, "HTTP response status was unsuccessful (" + responseStatus + ") for request URI " + requestUri); addBulletin(nodeId, Severity.ERROR, "HTTP response status was unsuccessful (" + responseStatus + ") for request URI " + requestUri); } requestDisconnectionQuietly(nodeId, explanation); } }, null); } executorService.shutdown(); }