List of usage examples for java.util.concurrent.atomic AtomicBoolean get
public final boolean get()
From source file:com.cloudera.impala.service.CatalogOpExecutor.java
/** * Create any new partitions required as a result of an INSERT statement and refreshes * the table metadata after every INSERT statement. Any new partitions will inherit * their cache configuration from the parent table. That is, if the parent is cached * new partitions created will also be cached and will be put in the same pool as the * parent.// w w w . j a v a 2 s .c o m * If the insert touched any pre-existing partitions that were cached, a request to * watch the associated cache directives will be submitted. This will result in an * async table refresh once the cache request completes. * Updates the lastDdlTime of the table if new partitions were created. */ public TUpdateCatalogResponse updateCatalog(TUpdateCatalogRequest update) throws ImpalaException { TUpdateCatalogResponse response = new TUpdateCatalogResponse(); // Only update metastore for Hdfs tables. Table table = getExistingTable(update.getDb_name(), update.getTarget_table()); if (!(table instanceof HdfsTable)) { throw new InternalException("Unexpected table type: " + update.getTarget_table()); } // Collects the cache directive IDs of any cached table/partitions that were // targeted. A watch on these cache directives is submitted to the TableLoadingMgr // and the table will be refreshed asynchronously after all cache directives // complete. List<Long> cacheDirIds = Lists.<Long>newArrayList(); // If the table is cached, get its cache pool name. New partitions will inherit // this property. String cachePoolName = null; Long cacheDirId = HdfsCachingUtil.getCacheDirIdFromParams(table.getMetaStoreTable().getParameters()); if (cacheDirId != null) { cachePoolName = HdfsCachingUtil.getCachePool(cacheDirId); if (table.getNumClusteringCols() == 0) cacheDirIds.add(cacheDirId); } TableName tblName = new TableName(table.getDb().getName(), table.getName()); AtomicBoolean addedNewPartition = new AtomicBoolean(false); if (table.getNumClusteringCols() > 0) { // Set of all partition names targeted by the insert that that need to be created // in the Metastore (partitions that do not currently exist in the catalog). // In the BE, we don't currently distinguish between which targeted partitions are // new and which already exist, so initialize the set with all targeted partition // names and remove the ones that are found to exist. Set<String> partsToCreate = Sets.newHashSet(update.getCreated_partitions()); for (HdfsPartition partition : ((HdfsTable) table).getPartitions()) { // Skip dummy default partition. if (partition.getId() == ImpalaInternalServiceConstants.DEFAULT_PARTITION_ID) { continue; } // TODO: In the BE we build partition names without a trailing char. In FE we // build partition name with a trailing char. We should make this consistent. String partName = partition.getPartitionName() + "/"; // Attempt to remove this partition name from from partsToCreate. If remove // returns true, it indicates the partition already exists. if (partsToCreate.remove(partName) && partition.isMarkedCached()) { // The partition was targeted by the insert and is also a cached. Since data // was written to the partition, a watch needs to be placed on the cache // cache directive so the TableLoadingMgr can perform an async refresh once // all data becomes cached. cacheDirIds.add(HdfsCachingUtil .getCacheDirIdFromParams(partition.getMetaStorePartition().getParameters())); } if (partsToCreate.size() == 0) break; } if (!partsToCreate.isEmpty()) { SettableFuture<Void> allFinished = SettableFuture.create(); AtomicInteger numPartitions = new AtomicInteger(partsToCreate.size()); // Add all partitions to metastore. for (String partName : partsToCreate) { Preconditions.checkState(partName != null && !partName.isEmpty()); CreatePartitionRunnable rbl = new CreatePartitionRunnable(tblName, partName, cachePoolName, addedNewPartition, allFinished, numPartitions, cacheDirIds); executor_.execute(rbl); } try { // Will throw if any operation calls setException allFinished.get(); } catch (Exception e) { throw new InternalException("Error updating metastore", e); } } } if (addedNewPartition.get()) { MetaStoreClient msClient = catalog_.getMetaStoreClient(); try { // Operate on a copy of msTbl to prevent our cached msTbl becoming inconsistent // if the alteration fails in the metastore. org.apache.hadoop.hive.metastore.api.Table msTbl = table.getMetaStoreTable().deepCopy(); updateLastDdlTime(msTbl, msClient); } catch (Exception e) { throw new InternalException("Error updating lastDdlTime", e); } finally { msClient.release(); } } // Submit the watch request for the given cache directives. if (!cacheDirIds.isEmpty()) catalog_.watchCacheDirs(cacheDirIds, tblName.toThrift()); response.setResult(new TCatalogUpdateResult()); response.getResult().setCatalog_service_id(JniCatalog.getServiceId()); response.getResult().setStatus(new TStatus(TStatusCode.OK, new ArrayList<String>())); // Perform an incremental refresh to load new/modified partitions and files. Table refreshedTbl = catalog_.reloadTable(tblName.toThrift()); response.getResult().setUpdated_catalog_object(TableToTCatalogObject(refreshedTbl)); response.getResult().setVersion(response.getResult().getUpdated_catalog_object().getCatalog_version()); return response; }
From source file:com.cloudera.whirr.cm.server.impl.CmServerImpl.java
@Override @CmServerCommandMethod(name = "client") public boolean getServiceConfigs(final CmServerCluster cluster, final File directory) throws CmServerException { final AtomicBoolean executed = new AtomicBoolean(false); try {//from ww w . j a v a 2 s . c om if (isProvisioned(cluster)) { logger.logOperation("GetConfig", new CmServerLogSyncCommand() { @Override public void execute() throws IOException { for (ApiService apiService : apiResourceRootV3.getClustersResource() .getServicesResource(getName(cluster)).readServices(DataView.SUMMARY)) { CmServerServiceType type = CmServerServiceType.valueOfId(apiService.getType()); if (type.equals(CmServerServiceType.HDFS) || type.equals(CmServerServiceType.MAPREDUCE) || type.equals(CmServerServiceType.YARN) || type.equals(CmServerServiceType.HBASE) || versionApi >= 4 && type.equals(CmServerServiceType.HIVE) || versionApi >= 5 && type.equals(CmServerServiceType.SOLR)) { ZipInputStream configInputZip = null; try { InputStreamDataSource configInput = apiResourceRootV3.getClustersResource() .getServicesResource(getName(cluster)) .getClientConfig(apiService.getName()); if (configInput != null) { configInputZip = new ZipInputStream(configInput.getInputStream()); ZipEntry configInputZipEntry = null; while ((configInputZipEntry = configInputZip.getNextEntry()) != null) { String configFile = configInputZipEntry.getName(); if (configFile.contains(File.separator)) { configFile = configFile.substring( configFile.lastIndexOf(File.separator), configFile.length()); } directory.mkdirs(); BufferedWriter configOutput = null; try { int read; configOutput = new BufferedWriter( new FileWriter(new File(directory, configFile))); while (configInputZip.available() > 0) { if ((read = configInputZip.read()) != -1) { configOutput.write(read); } } } finally { configOutput.close(); } } } } finally { if (configInputZip != null) { configInputZip.close(); } } executed.set(true); } } } }); } } catch (Exception e) { throw new CmServerException("Failed to get cluster config", e); } return executed.get(); }
From source file:com.parse.ParseObject.java
/** * This saves all of the objects and files reachable from the given object. It does its work in * multiple waves, saving as many as possible in each wave. If there's ever an error, it just * gives up, sets error, and returns NO. *///w ww . j av a 2 s . c om private static Task<Void> deepSaveAsync(final Object object, final String sessionToken) { Set<ParseObject> objects = new HashSet<>(); Set<ParseFile> files = new HashSet<>(); collectDirtyChildren(object, objects, files); // This has to happen separately from everything else because ParseUser.save() is // special-cased to work for lazy users, but new users can't be created by // ParseMultiCommand's regular save. Set<ParseUser> users = new HashSet<>(); for (ParseObject o : objects) { if (o instanceof ParseUser) { ParseUser user = (ParseUser) o; if (user.isLazy()) { users.add((ParseUser) o); } } } objects.removeAll(users); // objects will need to wait for files to be complete since they may be nested children. final AtomicBoolean filesComplete = new AtomicBoolean(false); List<Task<Void>> tasks = new ArrayList<>(); for (ParseFile file : files) { tasks.add(file.saveAsync(sessionToken, null, null)); } Task<Void> filesTask = Task.whenAll(tasks).continueWith(new Continuation<Void, Void>() { @Override public Void then(Task<Void> task) throws Exception { filesComplete.set(true); return null; } }); // objects will need to wait for users to be complete since they may be nested children. final AtomicBoolean usersComplete = new AtomicBoolean(false); tasks = new ArrayList<>(); for (final ParseUser user : users) { tasks.add(user.saveAsync(sessionToken)); } Task<Void> usersTask = Task.whenAll(tasks).continueWith(new Continuation<Void, Void>() { @Override public Void then(Task<Void> task) throws Exception { usersComplete.set(true); return null; } }); final Capture<Set<ParseObject>> remaining = new Capture<>(objects); Task<Void> objectsTask = Task.forResult(null).continueWhile(new Callable<Boolean>() { @Override public Boolean call() throws Exception { return remaining.get().size() > 0; } }, new Continuation<Void, Task<Void>>() { @Override public Task<Void> then(Task<Void> task) throws Exception { // Partition the objects into two sets: those that can be save immediately, // and those that rely on other objects to be created first. final List<ParseObject> current = new ArrayList<>(); final Set<ParseObject> nextBatch = new HashSet<>(); for (ParseObject obj : remaining.get()) { if (obj.canBeSerialized()) { current.add(obj); } else { nextBatch.add(obj); } } remaining.set(nextBatch); if (current.size() == 0 && filesComplete.get() && usersComplete.get()) { // We do cycle-detection when building the list of objects passed to this function, so // this should never get called. But we should check for it anyway, so that we get an // exception instead of an infinite loop. throw new RuntimeException("Unable to save a ParseObject with a relation to a cycle."); } // Package all save commands together if (current.size() == 0) { return Task.forResult(null); } return enqueueForAll(current, new Continuation<Void, Task<Void>>() { @Override public Task<Void> then(Task<Void> toAwait) throws Exception { return saveAllAsync(current, sessionToken, toAwait); } }); } }); return Task.whenAll(Arrays.asList(filesTask, usersTask, objectsTask)); }
From source file:de.hybris.platform.test.TransactionTest.java
@Test public void testNestedTAError() throws Exception { final AtomicBoolean storeCalled = new AtomicBoolean(); final AtomicBoolean commitCalled = new AtomicBoolean(); final AtomicBoolean rollbackCalled = new AtomicBoolean(); final Transaction transaction = new DefaultTransaction() { @Override// w w w . j a v a 2 s .c om public void rollback() throws TransactionException { rollbackCalled.set(true); super.rollback(); } @Override public void commit() throws TransactionException { commitCalled.set(true); super.commit(); } }; transaction.enableDelayedStore(true); final EntityInstanceContext eCtx = new EntityInstanceContext() { @Override public ItemDeployment getItemDeployment() { return null; } @Override public PK getPK() { return PK.NULL_PK; } @Override public PersistencePool getPersistencePool() { return null; } @Override public void setPK(final PK pk) { // mock } }; final EntityInstance mockEntity = new EntityInstance() { final EntityInstanceContext ctx = eCtx; @Override public PK ejbFindByPrimaryKey(final PK pkValue) throws YObjectNotFoundException { return null; } @Override public void ejbLoad() { // mock } @Override public void ejbRemove() { // mock } @Override public void ejbStore() { storeCalled.set(true); throw new IllegalArgumentException("let's rollback ;)"); } @Override public EntityInstanceContext getEntityContext() { return ctx; } @Override public boolean needsStoring() { return true; } @Override public void setEntityContext(final EntityInstanceContext ctx) { // mock } @Override public void setNeedsStoring(final boolean needs) { // mock } }; final ByteArrayOutputStream bos = new ByteArrayOutputStream(); final PrintStream printstream = new PrintStream(bos); final PrintStream err = System.err; final AtomicReference<Title> itemRef = new AtomicReference<Title>(); try { System.setErr(printstream); // outer TA transaction.execute(new TransactionBody() { @Override public Object execute() throws Exception { // inner TA transaction.execute(new TransactionBody() { @Override public Object execute() throws Exception { itemRef.set(UserManager.getInstance().createTitle("T" + System.currentTimeMillis())); // inject mock entity to call ejbStore upon -> throws exception transaction.registerEntityInstance(mockEntity); return null; } }); return null; } }); fail("IllegalArgumentException expected"); } catch (final IllegalArgumentException ex) { assertTrue(storeCalled.get()); assertEquals("let's rollback ;)", ex.getMessage()); assertFalse(transaction.isRunning()); assertEquals(0, transaction.getOpenTransactionCount()); assertNotNull(itemRef.get()); assertFalse(itemRef.get().isAlive()); final String errorLog = new String(bos.toByteArray()); assertFalse(errorLog.contains("no transaction running")); } catch (final Exception e) { fail("unexpected error " + e.getMessage()); } finally { System.setErr(err); } }
From source file:com.machinepublishers.jbrowserdriver.JBrowserDriver.java
private String launchProcess(final Settings settings, final PortGroup portGroup) { final AtomicBoolean ready = new AtomicBoolean(); final AtomicReference<String> logPrefix = new AtomicReference<String>(""); new Thread(new Runnable() { @Override//w w w .j a v a 2s. co m public void run() { List<String> myArgs = new ArrayList<String>(); myArgs.add(settings.javaBinary() == null ? JAVA_BIN : settings.javaBinary()); myArgs.addAll(inheritedArgs); if (!settings.customClasspath()) { myArgs.addAll(classpathArgs.get()); } if (settings.javaExportModules()) { myArgs.add("-XaddExports:javafx.web/com.sun.webkit.network=ALL-UNNAMED"); myArgs.add("-XaddExports:javafx.web/com.sun.webkit.network.about=ALL-UNNAMED"); myArgs.add("-XaddExports:javafx.web/com.sun.webkit.network.data=ALL-UNNAMED"); myArgs.add("-XaddExports:java.base/sun.net.www.protocol.http=ALL-UNNAMED"); myArgs.add("-XaddExports:java.base/sun.net.www.protocol.https=ALL-UNNAMED"); myArgs.add("-XaddExports:java.base/sun.net.www.protocol.file=ALL-UNNAMED"); myArgs.add("-XaddExports:java.base/sun.net.www.protocol.ftp=ALL-UNNAMED"); myArgs.add("-XaddExports:java.base/sun.net.www.protocol.jar=ALL-UNNAMED"); myArgs.add("-XaddExports:java.base/sun.net.www.protocol.mailto=ALL-UNNAMED"); myArgs.add("-XaddExports:javafx.graphics/com.sun.glass.ui=ALL-UNNAMED"); myArgs.add("-XaddExports:javafx.web/com.sun.javafx.webkit=ALL-UNNAMED"); myArgs.add("-XaddExports:javafx.web/com.sun.webkit=ALL-UNNAMED"); } myArgs.add("-Djava.io.tmpdir=" + tmpDir.getAbsolutePath()); myArgs.add("-Djava.rmi.server.hostname=" + settings.host()); myArgs.addAll(settings.javaOptions()); myArgs.add(JBrowserDriverServer.class.getName()); myArgs.add(Long.toString(portGroup.child)); myArgs.add(Long.toString(portGroup.parent)); myArgs.add(Long.toString(portGroup.parentAlt)); try { new ProcessExecutor().addListener(new ProcessListener() { @Override public void afterStart(Process proc, ProcessExecutor executor) { process.set(proc); } }).redirectOutput(new LogOutputStream() { boolean done = false; @Override protected void processLine(String line) { if (line != null && !line.isEmpty()) { if (!done) { synchronized (ready) { if (line.startsWith("ready on ports ")) { String[] parts = line.substring("ready on ports ".length()).split("/"); actualPortGroup.set(new PortGroup(Integer.parseInt(parts[0]), Integer.parseInt(parts[1]), Integer.parseInt(parts[2]))); logPrefix.set(new StringBuilder().append("[Instance ") .append(sessionIdCounter.incrementAndGet()).append("][Port ") .append(actualPortGroup.get().child).append("]").toString()); ready.set(true); ready.notifyAll(); done = true; } else { log(settings.logger(), logPrefix.get(), line); } } } else { log(settings.logger(), logPrefix.get(), line); } } } }).redirectError(new LogOutputStream() { @Override protected void processLine(String line) { log(settings.logger(), logPrefix.get(), line); } }).destroyOnExit().command(myArgs).execute(); } catch (Throwable t) { Util.handleException(t); } synchronized (ready) { ready.set(true); ready.notifyAll(); } } }).start(); synchronized (ready) { while (!ready.get()) { try { ready.wait(); break; } catch (InterruptedException e) { } } } return logPrefix.get(); }
From source file:com.datamelt.nifi.processors.ExecuteRuleEngine.java
@Override public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException { // map used to store the attribute name and its value from the content of the flow file final Map<String, String> propertyMap = new HashMap<>(); // get a logger instance final ComponentLog logger = getLogger(); // a header from the content if present final AtomicReference<HeaderRow> header = new AtomicReference<>(); AtomicBoolean error = new AtomicBoolean(); // get the flow file FlowFile flowFile = session.get();/*from w ww .j av a 2 s.co m*/ if (flowFile == null) { return; } // list of rows from splitting the original flow file content ArrayList<RuleEngineRow> flowFileRows = new ArrayList<RuleEngineRow>(); // list of rows containing the detailed results of the ruleengine ArrayList<RuleEngineRow> flowFileDetails = new ArrayList<RuleEngineRow>(); boolean headerPresent = context.getProperty(ATTRIBUTE_HEADER_PRESENT).getValue().equals("true"); // put the name of the ruleengine zip file in the list of properties propertyMap.put(PROPERTY_RULEENGINE_ZIPFILE_NAME, context.getProperty(ATTRIBUTE_RULEENGINE_ZIPFILE).getValue()); final int batchSize = Integer.parseInt(context.getProperty(BATCH_SIZE_NAME).getValue()); // read flow file into input stream session.read(flowFile, new InputStreamCallback() { public void process(InputStream in) throws IOException { try { // iterator over the lines from the input stream LineIterator iterator = IOUtils.lineIterator(in, "utf-8"); // check if configuration indicates that a header row is present in the flow file content if (headerPresent) { logger.debug("configuration indicates a header row is present in flow file content"); // if there is at least one row of data and the header is not defined yet if (iterator.hasNext() && header.get() == null) { // set the header from the content header.set(new HeaderRow(iterator.nextLine(), separator)); } } // if no header row is present in the flow file content else { logger.debug("configuration indicates no header row is present in flow file content"); // use the header from the field names header.set(headerFromFieldNames); } // loop over all rows of data while (iterator.hasNext()) { // we handle the error per row of data error.set(false); // get a row to process String row = iterator.nextLine(); // check that we have data if (row != null && !row.trim().equals("")) { RowFieldCollection rowFieldCollection = null; try { rowFieldCollection = getRowFieldCollection(row, header.get()); logger.debug("RowFieldCollection header contains: " + rowFieldCollection.getHeader().getNumberOfFields() + " fields"); logger.debug("RowFieldCollection contains: " + rowFieldCollection.getNumberOfFields() + " fields"); // run the ruleengine with the given data from the flow file logger.debug("running business ruleengine..."); // run the business logic/rules against the data ruleEngine.run("flowfile", rowFieldCollection); // add some debugging output that might be useful logger.debug("number of rulegroups: " + ruleEngine.getNumberOfGroups()); logger.debug( "number of rulegroups passed: " + ruleEngine.getNumberOfGroupsPassed()); logger.debug( "number of rulegroups failed: " + ruleEngine.getNumberOfGroupsFailed()); logger.debug( "number of rulegroups skipped: " + ruleEngine.getNumberOfGroupsSkipped()); logger.debug("number of rules: " + ruleEngine.getNumberOfRules()); logger.debug("number of rules passed: " + ruleEngine.getNumberOfRulesPassed()); logger.debug("number of rules failed: " + ruleEngine.getNumberOfRulesFailed()); logger.debug("number of actions: " + ruleEngine.getNumberOfActions()); // add some properties of the ruleengine execution to the map addRuleEngineProperties(propertyMap); } catch (Exception ex) { error.set(true); logger.error(ex.getMessage(), ex); } // if no error occurred we create a save the data for the creation of the flow files if (!error.get()) { // process only if the collection of fields was changed by // a ruleengine action. this means the data was updated so // we will have to re-write/re-create the flow file content. if (rowFieldCollection.isCollectionUpdated()) { // put an indicator that the data was modified by the ruleengine propertyMap.put(PROPERTY_RULEENGINE_CONTENT_MODIFIED, "true"); logger.debug( "data was modified - updating flow file content with ruleengine results"); // the RuleEngineRow instance will contain the row of data and the map of properties // and will later be used when the flow files are created flowFileRows .add(new RuleEngineRow(getResultRow(rowFieldCollection), propertyMap)); } else { // put an indicator that the data was NOT modified by the ruleengine propertyMap.put(PROPERTY_RULEENGINE_CONTENT_MODIFIED, "false"); logger.debug("data was not modified - using original content"); // the RuleEngineRow instance will contain the row of data and the map of properties // and will later be used when the flow files are created flowFileRows.add(new RuleEngineRow(row, propertyMap)); } if (flowFileRows.size() >= batchSize) { // generate flow files from the individual rows List<FlowFile> splitFlowFiles = generateFlowFileSplits(context, session, flowFileRows, header.get(), headerPresent); // transfer all individual rows to success relationship if (splitFlowFiles.size() > 0) { session.transfer(splitFlowFiles, SUCCESS); } } // if the user configured detailed results if (context.getProperty(ATTRIBUTE_OUTPUT_DETAILED_RESULTS).getValue() .equals("true")) { // get the configured output type String outputType = context.getProperty(ATTRIBUTE_OUTPUT_DETAILED_RESULTS_TYPE) .getValue(); logger.debug("configuration set to output detailed results with type [" + outputType + "]"); // we need to create a flow file only, if the ruleengine results are according to the output type settings if (outputType.equals(OUTPUT_TYPE_ALL_GROUPS_ALL_RULES) || (outputType.equals(OUTPUT_TYPE_FAILED_GROUPS_ALL_RULES) && ruleEngine.getNumberOfGroupsFailed() > 0) || (outputType.equals(OUTPUT_TYPE_FAILED_GROUPS_FAILED_RULES) && ruleEngine.getNumberOfGroupsFailed() > 0) || (outputType.equals(OUTPUT_TYPE_FAILED_GROUPS_PASSED_RULES) && ruleEngine.getNumberOfGroupsFailed() > 0) || (outputType.equals(OUTPUT_TYPE_PASSED_GROUPS_ALL_RULES) && ruleEngine.getNumberOfGroupsPassed() > 0) || (outputType.equals(OUTPUT_TYPE_PASSED_GROUPS_FAILED_RULES) && ruleEngine.getNumberOfGroupsPassed() > 0 || (outputType.equals(OUTPUT_TYPE_PASSED_GROUPS_PASSED_RULES) && ruleEngine.getNumberOfGroupsPassed() > 0))) { // create the content for the flow file String content = getFlowFileRuleEngineDetailsContent(header.get(), headerPresent, outputType, row); // add results to the list flowFileDetails.add(new RuleEngineRow(content, propertyMap)); if (flowFileDetails.size() >= batchSize) { List<FlowFile> detailsFlowFiles = generateFlowFilesRuleEngineDetails( context, session, flowFileDetails, header.get(), headerPresent); // transfer all individual rows to detailed relationship if (detailsFlowFiles.size() > 0) { session.transfer(detailsFlowFiles, DETAILED_RESULTS); } } } } // clear the collections of ruleengine results ruleEngine.getRuleExecutionCollection().clear(); } // if we have an error we create a flow file from the current row of data and send it to the failure relationsship else { FlowFile failureFlowFile = generateFailureFlowFile(context, session, row, header.get(), headerPresent); session.transfer(failureFlowFile, FAILURE); } } } LineIterator.closeQuietly(iterator); } catch (Exception ex) { ex.printStackTrace(); logger.error("error running the business ruleengine", ex); } } }); // generate flow files from the individual rows List<FlowFile> splitFlowFiles = generateFlowFileSplits(context, session, flowFileRows, header.get(), headerPresent); // generate flow files from the individual rows List<FlowFile> detailsFlowFiles = generateFlowFilesRuleEngineDetails(context, session, flowFileDetails, header.get(), headerPresent); // transfer the original flow file session.transfer(flowFile, ORIGINAL); // transfer all individual rows to success relationship if (splitFlowFiles.size() > 0) { session.transfer(splitFlowFiles, SUCCESS); } // transfer all individual rows to success relationship if (detailsFlowFiles.size() > 0) { session.transfer(detailsFlowFiles, DETAILED_RESULTS); } }
From source file:it.anyplace.sync.bep.BlockPusher.java
public FileUploadObserver pushFile(final DataSource dataSource, @Nullable FileInfo fileInfo, final String folder, final String path) { checkArgument(connectionHandler.hasFolder(folder), "supplied connection handler %s will not share folder %s", connectionHandler, folder); checkArgument(fileInfo == null || equal(fileInfo.getFolder(), folder)); checkArgument(fileInfo == null || equal(fileInfo.getPath(), path)); try {//from ww w . ja va 2 s. com final ExecutorService monitoringProcessExecutorService = Executors.newCachedThreadPool(); final long fileSize = dataSource.getSize(); final Set<String> sentBlocks = Sets.newConcurrentHashSet(); final AtomicReference<Exception> uploadError = new AtomicReference<>(); final AtomicBoolean isCompleted = new AtomicBoolean(false); final Object updateLock = new Object(); final Object listener = new Object() { @Subscribe public void handleRequestMessageReceivedEvent(RequestMessageReceivedEvent event) { BlockExchageProtos.Request request = event.getMessage(); if (equal(request.getFolder(), folder) && equal(request.getName(), path)) { try { final String hash = BaseEncoding.base16().encode(request.getHash().toByteArray()); logger.debug("handling block request = {}:{}-{} ({})", request.getName(), request.getOffset(), request.getSize(), hash); byte[] data = dataSource.getBlock(request.getOffset(), request.getSize(), hash); checkNotNull(data, "data not found for hash = %s", hash); final Future future = connectionHandler.sendMessage( Response.newBuilder().setCode(BlockExchageProtos.ErrorCode.NO_ERROR) .setData(ByteString.copyFrom(data)).setId(request.getId()).build()); monitoringProcessExecutorService.submit(new Runnable() { @Override public void run() { try { future.get(); sentBlocks.add(hash); synchronized (updateLock) { updateLock.notifyAll(); } //TODO retry on error, register error and throw on watcher } catch (InterruptedException ex) { //return and do nothing } catch (ExecutionException ex) { uploadError.set(ex); synchronized (updateLock) { updateLock.notifyAll(); } } } }); } catch (Exception ex) { logger.error("error handling block request", ex); connectionHandler.sendMessage(Response.newBuilder() .setCode(BlockExchageProtos.ErrorCode.GENERIC).setId(request.getId()).build()); uploadError.set(ex); synchronized (updateLock) { updateLock.notifyAll(); } } } } }; connectionHandler.getEventBus().register(listener); logger.debug("send index update for file = {}", path); final Object indexListener = new Object() { @Subscribe public void handleIndexRecordAquiredEvent(IndexHandler.IndexRecordAquiredEvent event) { if (equal(event.getFolder(), folder)) { for (FileInfo fileInfo : event.getNewRecords()) { if (equal(fileInfo.getPath(), path) && equal(fileInfo.getHash(), dataSource.getHash())) { //TODO check not invalid // sentBlocks.addAll(dataSource.getHashes()); isCompleted.set(true); synchronized (updateLock) { updateLock.notifyAll(); } } } } } }; if (indexHandler != null) { indexHandler.getEventBus().register(indexListener); } final IndexUpdate indexUpdate = sendIndexUpdate(folder, BlockExchageProtos.FileInfo.newBuilder().setName(path).setSize(fileSize) .setType(BlockExchageProtos.FileInfoType.FILE).addAllBlocks(dataSource.getBlocks()), fileInfo == null ? null : fileInfo.getVersionList()).getRight(); final FileUploadObserver messageUploadObserver = new FileUploadObserver() { @Override public void close() { logger.debug("closing upload process"); try { connectionHandler.getEventBus().unregister(listener); monitoringProcessExecutorService.shutdown(); if (indexHandler != null) { indexHandler.getEventBus().unregister(indexListener); } } catch (Exception ex) { } if (closeConnection && connectionHandler != null) { connectionHandler.close(); } if (indexHandler != null) { FileInfo fileInfo = indexHandler.pushRecord(indexUpdate.getFolder(), Iterables.getOnlyElement(indexUpdate.getFilesList())); logger.info("sent file info record = {}", fileInfo); } } @Override public double getProgress() { return isCompleted() ? 1d : sentBlocks.size() / ((double) dataSource.getHashes().size()); } @Override public String getProgressMessage() { return (Math.round(getProgress() * 1000d) / 10d) + "% " + sentBlocks.size() + "/" + dataSource.getHashes().size(); } @Override public boolean isCompleted() { // return sentBlocks.size() == dataSource.getHashes().size(); return isCompleted.get(); } @Override public double waitForProgressUpdate() throws InterruptedException { synchronized (updateLock) { updateLock.wait(); } if (uploadError.get() != null) { throw new RuntimeException(uploadError.get()); } return getProgress(); } @Override public DataSource getDataSource() { return dataSource; } }; return messageUploadObserver; } catch (Exception ex) { throw new RuntimeException(ex); } }
From source file:com.google.dart.java2dart.SyntaxTranslator.java
@Override public boolean visit(org.eclipse.jdt.core.dom.ClassInstanceCreation node) { IMethodBinding binding = node.resolveConstructorBinding(); String signature = JavaUtils.getJdtSignature(binding); TypeName typeNameNode = (TypeName) translate(node.getType()); final List<Expression> arguments = translateArguments(binding, node.arguments()); final ClassDeclaration innerClass; {//from w w w . j a v a 2 s.c om AnonymousClassDeclaration anoDeclaration = node.getAnonymousClassDeclaration(); if (anoDeclaration != null) { ITypeBinding superclass = anoDeclaration.resolveBinding().getSuperclass(); signature = superclass.getKey() + StringUtils.substringAfter(signature, ";"); String name = typeNameNode.getName().getName().replace('.', '_'); name = name + "_" + context.generateTechnicalAnonymousClassIndex(); innerClass = declareInnerClass(binding, anoDeclaration, name, ArrayUtils.EMPTY_STRING_ARRAY); typeNameNode = typeName(name); // prepare enclosing type final ITypeBinding enclosingTypeBinding = getEnclosingTypeBinding(node); final SimpleIdentifier enclosingTypeRef; final AtomicBoolean addEnclosingTypeRef = new AtomicBoolean(); { if (enclosingTypeBinding != null) { enclosingTypeRef = identifier(enclosingTypeBinding.getName() + "_this"); // add enclosing class references innerClass.accept(new RecursiveASTVisitor<Void>() { @Override public Void visitMethodInvocation(MethodInvocation node) { if (node.getTarget() == null) { IMethodBinding methodBinding = (IMethodBinding) context.getNodeBinding(node); if (methodBinding != null && methodBinding.getDeclaringClass() == enclosingTypeBinding) { addEnclosingTypeRef.set(true); node.setTarget(enclosingTypeRef); } } return super.visitMethodInvocation(node); } @Override public Void visitSimpleIdentifier(SimpleIdentifier node) { if (!(node.getParent() instanceof PropertyAccess) && !(node.getParent() instanceof PrefixedIdentifier)) { Object binding = context.getNodeBinding(node); if (binding instanceof IVariableBinding) { IVariableBinding variableBinding = (IVariableBinding) binding; if (variableBinding.isField() && variableBinding.getDeclaringClass() == enclosingTypeBinding) { addEnclosingTypeRef.set(true); replaceNode(node.getParent(), node, propertyAccess(enclosingTypeRef, node)); } } } return super.visitSimpleIdentifier(node); } }); } else { enclosingTypeRef = null; } } // declare referenced final variables XXX final String finalName = name; anoDeclaration.accept(new ASTVisitor() { final Set<org.eclipse.jdt.core.dom.IVariableBinding> addedParameters = Sets.newHashSet(); final List<FormalParameter> constructorParameters = Lists.newArrayList(); int index; @Override public void endVisit(AnonymousClassDeclaration node) { if (!constructorParameters.isEmpty()) { // add parameters to the existing "inner" constructor XXX for (ClassMember classMember : innerClass.getMembers()) { if (classMember instanceof ConstructorDeclaration) { ConstructorDeclaration innerConstructor = (ConstructorDeclaration) classMember; innerConstructor.getParameters().getParameters().addAll(constructorParameters); return; } } // create new "inner" constructor innerClass.getMembers().add(index, constructorDeclaration(identifier(finalName), null, formalParameterList(constructorParameters), null)); } super.endVisit(node); } @Override public void endVisit(SimpleName node) { IBinding nameBinding = node.resolveBinding(); if (nameBinding instanceof org.eclipse.jdt.core.dom.IVariableBinding) { org.eclipse.jdt.core.dom.IVariableBinding variableBinding = (org.eclipse.jdt.core.dom.IVariableBinding) nameBinding; org.eclipse.jdt.core.dom.MethodDeclaration enclosingMethod = getEnclosingMethod(node); if (!variableBinding.isField() && enclosingMethod != null && variableBinding.getDeclaringMethod() != enclosingMethod.resolveBinding() && addedParameters.add(variableBinding)) { TypeName parameterTypeName = translateTypeName(variableBinding.getType()); String parameterName = variableBinding.getName(); SimpleIdentifier parameterNameNode = identifier(parameterName); innerClass.getMembers().add(index++, fieldDeclaration(parameterTypeName, variableDeclaration(parameterNameNode))); constructorParameters.add(fieldFormalParameter(null, null, parameterNameNode)); arguments.add(parameterNameNode); context.putReference(parameterNameNode, variableBinding, null); } } super.endVisit(node); } @Override public boolean visit(AnonymousClassDeclaration node) { if (addEnclosingTypeRef.get()) { TypeName parameterTypeName = translateTypeName(enclosingTypeBinding); innerClass.getMembers().add(index++, fieldDeclaration(false, Keyword.FINAL, parameterTypeName, variableDeclaration(enclosingTypeRef))); constructorParameters.add(fieldFormalParameter(null, null, enclosingTypeRef)); arguments.add(thisExpression()); } return super.visit(node); } }); } else { innerClass = null; } } InstanceCreationExpression creation = instanceCreationExpression(Keyword.NEW, typeNameNode, null, arguments); context.putNodeBinding(creation, binding); context.putAnonymousDeclaration(creation, innerClass); context.getConstructorDescription(binding).instanceCreations.add(creation); return done(creation); }
From source file:org.apache.hadoop.hive.metastore.HiveMetaStore.java
/** * Start threads outside of the thrift service, such as the compactor threads. * @param conf Hive configuration object *//*ww w . j ava 2 s .co m*/ private static void startMetaStoreThreads(final HiveConf conf, final Lock startLock, final Condition startCondition, final AtomicBoolean startedServing) { // A thread is spun up to start these other threads. That's because we can't start them // until after the TServer has started, but once TServer.serve is called we aren't given back // control. Thread t = new Thread() { @Override public void run() { // This is a massive hack. The compactor threads have to access packages in ql (such as // AcidInputFormat). ql depends on metastore so we can't directly access those. To deal // with this the compactor thread classes have been put in ql and they are instantiated here // dyanmically. This is not ideal but it avoids a massive refactoring of Hive packages. // // Wrap the start of the threads in a catch Throwable loop so that any failures // don't doom the rest of the metastore. startLock.lock(); try { JvmPauseMonitor pauseMonitor = new JvmPauseMonitor(conf); pauseMonitor.start(); } catch (Throwable t) { LOG.warn("Could not initiate the JvmPauseMonitor thread." + " GCs and Pauses may not be " + "warned upon.", t); } try { // Per the javadocs on Condition, do not depend on the condition alone as a start gate // since spurious wake ups are possible. while (!startedServing.get()) startCondition.await(); startCompactorInitiator(conf); startCompactorWorkers(conf); startCompactorCleaner(conf); startHouseKeeperService(conf); } catch (Throwable e) { LOG.error("Failure when starting the compactor, compactions may not happen, " + StringUtils.stringifyException(e)); } finally { startLock.unlock(); } ReplChangeManager.scheduleCMClearer(conf); } }; t.setDaemon(true); t.setName("Metastore threads starter thread"); t.start(); }
From source file:divconq.tool.release.Main.java
@Override public void run(Scanner scan, ApiSession api) { Path relpath = null;//from ww w . j a va 2 s . c o m Path gitpath = null; Path wikigitpath = null; XElement fldset = Hub.instance.getConfig().selectFirst("CommandLine/Settings"); if (fldset != null) { relpath = Paths.get(fldset.getAttribute("ReleasePath")); gitpath = Paths.get(fldset.getAttribute("GitPath")); wikigitpath = Paths.get(fldset.getAttribute("WikiGitPath")); } boolean running = true; while (running) { try { System.out.println(); System.out.println("-----------------------------------------------"); System.out.println(" Release Builder Menu"); System.out.println("-----------------------------------------------"); System.out.println("0) Exit"); if (relpath != null) System.out.println("1) Build release package from Settings File"); System.out.println("2) Build custom release package [under construction]"); System.out.println("4) Pack the .jar files"); if (gitpath != null) System.out.println("5) Copy Source to GitHub folder"); System.out.println("6) Update AWWW"); String opt = scan.nextLine(); Long mopt = StringUtil.parseInt(opt); if (mopt == null) continue; switch (mopt.intValue()) { case 0: running = false; break; case 1: { ReleasesHelper releases = new ReleasesHelper(); if (!releases.init(relpath)) break; System.out.println("Select a release to build"); System.out.println("0) None"); List<String> rnames = releases.names(); for (int i = 0; i < rnames.size(); i++) System.out.println((i + 1) + ") " + rnames.get(i)); System.out.println("Option #: "); opt = scan.nextLine(); mopt = StringUtil.parseInt(opt); if ((mopt == null) || (mopt == 0)) break; XElement relchoice = releases.get(mopt.intValue() - 1); if (relchoice == null) { System.out.println("Invalid option"); break; } PackagesHelper availpackages = new PackagesHelper(); availpackages.init(); InstallHelper inst = new InstallHelper(); if (!inst.init(availpackages, relchoice)) break; XElement prindesc = availpackages.get(inst.prinpackage); XElement prininst = prindesc.find("Install"); if (prininst == null) { System.out.println("Principle package: " + inst.prinpackagenm + " cannot be released directly, it must be part of another package."); break; } String relvers = prindesc.getAttribute("Version"); System.out.println("Building release version " + relvers); if (prindesc.hasAttribute("LastVersion")) System.out.println("Previous release version " + prindesc.getAttribute("LastVersion")); String rname = relchoice.getAttribute("Name"); Path destpath = relpath.resolve(rname + "/" + rname + "-" + relvers + "-bin.zip"); if (Files.exists(destpath)) { System.out.println("Version " + relvers + " already exists, overwrite? (y/n): "); if (!scan.nextLine().toLowerCase().startsWith("y")) break; Files.delete(destpath); } System.out.println("Preparing zip files"); AtomicBoolean errored = new AtomicBoolean(); Path tempfolder = FileUtil.allocateTempFolder2(); ListStruct ignorepaths = new ListStruct(); Set<String> nolongerdepends = new HashSet<>(); Set<String> dependson = new HashSet<>(); // put all the release files into a temp folder inst.instpkgs.forEach(pname -> { availpackages.get(pname).selectAll("DependsOn").stream() .filter(doel -> !doel.hasAttribute("Option") || inst.relopts.contains(doel.getAttribute("Option"))) .forEach(doel -> { // copy all libraries we rely on doel.selectAll("Library").forEach(libel -> { dependson.add(libel.getAttribute("File")); Path src = Paths.get("./lib/" + libel.getAttribute("File")); Path dest = tempfolder.resolve("lib/" + libel.getAttribute("File")); try { Files.createDirectories(dest.getParent()); if (Files.notExists(dest)) Files.copy(src, dest, StandardCopyOption.COPY_ATTRIBUTES); } catch (Exception x) { errored.set(true); System.out.println("Unable to copy file: " + src); } }); // copy all files we rely on doel.selectAll("File").forEach(libel -> { Path src = Paths.get("./" + libel.getAttribute("Path")); Path dest = tempfolder.resolve(libel.getAttribute("Path")); try { Files.createDirectories(dest.getParent()); if (Files.notExists(dest)) Files.copy(src, dest, StandardCopyOption.COPY_ATTRIBUTES); } catch (Exception x) { errored.set(true); System.out.println("Unable to copy file: " + src); } }); // copy all folders we rely on doel.selectAll("Folder").forEach(libel -> { Path src = Paths.get("./" + libel.getAttribute("Path")); Path dest = tempfolder.resolve(libel.getAttribute("Path")); try { Files.createDirectories(dest.getParent()); } catch (Exception x) { errored.set(true); System.out.println("Unable to copy file: " + src); } OperationResult cres = FileUtil.copyFileTree(src, dest); if (cres.hasErrors()) errored.set(true); }); }); availpackages.get(pname).selectAll("IgnorePaths/Ignore") .forEach(doel -> ignorepaths.addItem(doel.getAttribute("Path"))); // NoLongerDependsOn functionally currently only applies to libraries availpackages.get(pname).selectAll("NoLongerDependsOn/Library") .forEach(doel -> nolongerdepends.add(doel.getAttribute("File"))); // copy the released packages folders Path src = Paths.get("./packages/" + pname); Path dest = tempfolder.resolve("packages/" + pname); try { Files.createDirectories(dest.getParent()); } catch (Exception x) { errored.set(true); System.out.println("Unable to copy file: " + src); } // we may wish to enhance filter to allow .JAR sometimes, but this is meant to prevent copying of packages/pname/lib/abc.lib.jar files OperationResult cres = FileUtil.copyFileTree(src, dest, path -> !path.toString().endsWith(".jar")); if (cres.hasErrors()) errored.set(true); // copy the released packages libraries Path libsrc = Paths.get("./packages/" + pname + "/lib"); Path libdest = tempfolder.resolve("lib"); if (Files.exists(libsrc)) { cres = FileUtil.copyFileTree(libsrc, libdest); if (cres.hasErrors()) errored.set(true); } }); if (errored.get()) { System.out.println("Error with assembling package"); break; } // copy the principle config Path csrc = Paths.get("./packages/" + inst.prinpackage + "/config"); Path cdest = tempfolder.resolve("config/" + inst.prinpackagenm); if (Files.exists(csrc)) { Files.createDirectories(cdest); OperationResult cres = FileUtil.copyFileTree(csrc, cdest); if (cres.hasErrors()) { System.out.println("Error with prepping config"); break; } } boolean configpassed = true; // copy packages with config = true for (XElement pkg : relchoice.selectAll("Package")) { if (!"true".equals(pkg.getAttribute("Config"))) break; String pname = pkg.getAttribute("Name"); int pspos = pname.lastIndexOf('/'); String pnm = (pspos != -1) ? pname.substring(pspos + 1) : pname; csrc = Paths.get("./packages/" + pname + "/config"); cdest = tempfolder.resolve("config/" + pnm); if (Files.exists(csrc)) { Files.createDirectories(cdest); OperationResult cres = FileUtil.copyFileTree(csrc, cdest); if (cres.hasErrors()) { System.out.println("Error with prepping extra config"); configpassed = false; break; } } } if (!configpassed) break; // also copy installer config if being used if (inst.includeinstaller) { csrc = Paths.get("./packages/dc/dcInstall/config"); cdest = tempfolder.resolve("config/dcInstall"); if (Files.exists(csrc)) { Files.createDirectories(cdest); OperationResult cres = FileUtil.copyFileTree(csrc, cdest); if (cres.hasErrors()) { System.out.println("Error with prepping install config"); break; } } } // write out the deployed file RecordStruct deployed = new RecordStruct(); deployed.setField("Version", relvers); deployed.setField("PackageFolder", relpath.resolve(rname)); deployed.setField("PackagePrefix", rname); OperationResult d1res = IOUtil.saveEntireFile(tempfolder.resolve("config/deployed.json"), deployed.toPrettyString()); if (d1res.hasErrors()) { System.out.println("Error with prepping deployed"); break; } RecordStruct deployment = new RecordStruct(); deployment.setField("Version", relvers); if (prindesc.hasAttribute("LastVersion")) deployment.setField("DependsOn", prindesc.getAttribute("LastVersion")); deployment.setField("UpdateMessage", "This update is complete, you may accept this update as runnable."); nolongerdepends.removeAll(dependson); ListStruct deletefiles = new ListStruct(); nolongerdepends.forEach(fname -> deletefiles.addItem("lib/" + fname)); deployment.setField("DeleteFiles", deletefiles); deployment.setField("IgnorePaths", ignorepaths); d1res = IOUtil.saveEntireFile(tempfolder.resolve("deployment.json"), deployment.toPrettyString()); if (d1res.hasErrors()) { System.out.println("Error with prepping deployment"); break; } // write env file d1res = IOUtil.saveEntireFile(tempfolder.resolve("env.bat"), "set mem=" + relchoice.getAttribute("Memory", "2048") + "\r\n" + "SET project=" + inst.prinpackagenm + "\r\n" + "SET service=" + relchoice.getAttribute("Service", inst.prinpackagenm) + "\r\n" + "SET servicename=" + relchoice.getAttribute("ServiceName", inst.prinpackagenm + " Service") + "\r\n"); if (d1res.hasErrors()) { System.out.println("Error with prepping env"); break; } System.out.println("Packing Release file."); Path relbin = relpath.resolve(rname + "/" + rname + "-" + relvers + "-bin.zip"); if (Files.notExists(relbin.getParent())) Files.createDirectories(relbin.getParent()); ZipArchiveOutputStream zipout = new ZipArchiveOutputStream(relbin.toFile()); try { Files.walkFileTree(tempfolder, EnumSet.of(FileVisitOption.FOLLOW_LINKS), Integer.MAX_VALUE, new SimpleFileVisitor<Path>() { @Override public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { ZipArchiveEntry entry = new ZipArchiveEntry( tempfolder.relativize(file).toString()); entry.setSize(Files.size(file)); zipout.putArchiveEntry(entry); zipout.write(Files.readAllBytes(file)); zipout.closeArchiveEntry(); return FileVisitResult.CONTINUE; } }); } catch (IOException x) { System.out.println("Error building zip: " + x); } zipout.close(); System.out.println("Release file written"); FileUtil.deleteDirectory(tempfolder); break; } // end case 1 case 3: { System.out.println("Note these utilities are only good from the main console,"); System.out.println("if you are using a remote connection then the encryption will"); System.out.println("not work as expected. [we do not have access the master keys]"); System.out.println(); Foreground.utilityMenu(scan); break; } case 4: { System.out.println("Packing jar library files."); String[] packlist = new String[] { "divconq.core", "divconq.interchange", "divconq.web", "divconq.tasks", "divconq.tasks.api", "ncc.uploader.api", "ncc.uploader.core", "ncc.workflow", "sd.core" }; String[] packnames = new String[] { "dcCore", "dcInterchange", "dcWeb", "dcTasks", "dcTasksApi", "nccUploaderApi", "nccUploader", "nccWorkflow", "sd/sdBackend" }; for (int i = 0; i < packlist.length; i++) { String lib = packlist[i]; String pname = packnames[i]; Path relbin = Paths.get("./ext/" + lib + ".jar"); Path srcbin = Paths.get("./" + lib + "/bin"); Path packbin = Paths.get("./packages/" + pname + "/lib/" + lib + ".jar"); if (Files.notExists(relbin.getParent())) Files.createDirectories(relbin.getParent()); Files.deleteIfExists(relbin); ZipArchiveOutputStream zipout = new ZipArchiveOutputStream(relbin.toFile()); try { Files.walkFileTree(srcbin, EnumSet.of(FileVisitOption.FOLLOW_LINKS), Integer.MAX_VALUE, new SimpleFileVisitor<Path>() { @Override public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { ZipArchiveEntry entry = new ZipArchiveEntry( srcbin.relativize(file).toString()); entry.setSize(Files.size(file)); zipout.putArchiveEntry(entry); zipout.write(Files.readAllBytes(file)); zipout.closeArchiveEntry(); return FileVisitResult.CONTINUE; } }); } catch (IOException x) { System.out.println("Error building zip: " + x); } zipout.close(); Files.copy(relbin, packbin, StandardCopyOption.REPLACE_EXISTING); } System.out.println("Done"); break; } case 5: { System.out.println("Copying Source Files"); System.out.println("Cleaning folders"); OperationResult or = FileUtil.deleteDirectory(gitpath.resolve("divconq.core/src/main/java")); if (or.hasErrors()) { System.out.println("Error deleting files"); break; } or = FileUtil.deleteDirectory(gitpath.resolve("divconq.core/src/main/resources")); if (or.hasErrors()) { System.out.println("Error deleting files"); break; } or = FileUtil.deleteDirectory(gitpath.resolve("divconq.interchange/src/main/java")); if (or.hasErrors()) { System.out.println("Error deleting files"); break; } or = FileUtil.deleteDirectory(gitpath.resolve("divconq.tasks/src/main/java")); if (or.hasErrors()) { System.out.println("Error deleting files"); break; } or = FileUtil.deleteDirectory(gitpath.resolve("divconq.tasks.api/src/main/java")); if (or.hasErrors()) { System.out.println("Error deleting files"); break; } or = FileUtil.deleteDirectory(gitpath.resolve("divconq.web/src/main/java")); if (or.hasErrors()) { System.out.println("Error deleting files"); break; } or = FileUtil.deleteDirectory(gitpath.resolve("packages")); if (or.hasErrors()) { System.out.println("Error deleting files"); break; } or = FileUtil.deleteDirectoryContent(wikigitpath, ".git"); if (or.hasErrors()) { System.out.println("Error deleting wiki files"); break; } System.out.println("Copying folders"); System.out.println("Copy tree ./divconq.core/src"); or = FileUtil.copyFileTree(Paths.get("./divconq.core/src/divconq"), gitpath.resolve("divconq.core/src/main/java/divconq"), new Predicate<Path>() { @Override public boolean test(Path file) { return file.getFileName().toString().endsWith(".java"); } }); if (or.hasErrors()) { System.out.println("Error copying files"); break; } or = FileUtil.copyFileTree(Paths.get("./divconq.core/src/org"), gitpath.resolve("divconq.core/src/main/java/org"), new Predicate<Path>() { @Override public boolean test(Path file) { return file.getFileName().toString().endsWith(".java"); } }); if (or.hasErrors()) { System.out.println("Error copying files"); break; } or = FileUtil.copyFileTree(Paths.get("./divconq.core/src/localize"), gitpath.resolve("divconq.core/src/main/resources/localize"), new Predicate<Path>() { @Override public boolean test(Path file) { return file.getFileName().toString().endsWith(".xml"); } }); if (or.hasErrors()) { System.out.println("Error copying files"); break; } System.out.println("Copy tree ./divconq.interchange/src"); or = FileUtil.copyFileTree(Paths.get("./divconq.interchange/src"), gitpath.resolve("divconq.interchange/src/main/java")); if (or.hasErrors()) { System.out.println("Error copying files"); break; } System.out.println("Copy tree ./divconq.tasks/src"); or = FileUtil.copyFileTree(Paths.get("./divconq.tasks/src"), gitpath.resolve("divconq.tasks/src/main/java")); if (or.hasErrors()) { System.out.println("Error copying files"); break; } System.out.println("Copy tree ./divconq.tasks.api/src"); or = FileUtil.copyFileTree(Paths.get("./divconq.tasks.api/src"), gitpath.resolve("divconq.tasks.api/src/main/java")); if (or.hasErrors()) { System.out.println("Error copying files"); break; } System.out.println("Copy tree ./divconq.web/src"); or = FileUtil.copyFileTree(Paths.get("./divconq.web/src"), gitpath.resolve("divconq.web/src/main/java")); if (or.hasErrors()) { System.out.println("Error copying files"); break; } System.out.println("Copy tree ./packages/dcCore"); or = FileUtil.copyFileTree(Paths.get("./packages/dcCore"), gitpath.resolve("packages/dcCore")); if (or.hasErrors()) { System.out.println("Error copying files"); break; } System.out.println("Copy tree ./packages/dcCorePublic"); or = FileUtil.copyFileTree(Paths.get("./packages/dcCorePublic"), gitpath.resolve("packages/dcCorePublic")); if (or.hasErrors()) { System.out.println("Error copying files"); break; } System.out.println("Copy tree ./packages/dcInterchange"); or = FileUtil.copyFileTree(Paths.get("./packages/dcInterchange"), gitpath.resolve("packages/dcInterchange")); if (or.hasErrors()) { System.out.println("Error copying files"); break; } System.out.println("Copy tree ./packages/dcTasks"); or = FileUtil.copyFileTree(Paths.get("./packages/dcTasks"), gitpath.resolve("packages/dcTasks")); if (or.hasErrors()) { System.out.println("Error copying files"); break; } System.out.println("Copy tree ./packages/dcTasksApi"); or = FileUtil.copyFileTree(Paths.get("./packages/dcTasksApi"), gitpath.resolve("packages/dcTasksApi")); if (or.hasErrors()) { System.out.println("Error copying files"); break; } System.out.println("Copy tree ./packages/dcTasksWeb"); or = FileUtil.copyFileTree(Paths.get("./packages/dcTasksWeb"), gitpath.resolve("packages/dcTasksWeb")); if (or.hasErrors()) { System.out.println("Error copying files"); break; } System.out.println("Copy tree ./packages/dcTest"); or = FileUtil.copyFileTree(Paths.get("./packages/dcTest"), gitpath.resolve("packages/dcTest")); if (or.hasErrors()) { System.out.println("Error copying files"); break; } System.out.println("Copy tree ./packages/dcWeb"); or = FileUtil.copyFileTree(Paths.get("./packages/dcWeb"), gitpath.resolve("packages/dcWeb")); if (or.hasErrors()) { System.out.println("Error copying files"); break; } System.out.println("Copy tree ./divconq.wiki/public"); or = FileUtil.copyFileTree(Paths.get("./divconq.wiki/public"), wikigitpath); if (or.hasErrors()) { System.out.println("Error copying files"); break; } System.out.println("Copying files"); Files.copy(Paths.get("./README.md"), gitpath.resolve("README.md"), StandardCopyOption.REPLACE_EXISTING, StandardCopyOption.COPY_ATTRIBUTES); Files.copy(Paths.get("./RELEASE_NOTES.md"), gitpath.resolve("RELEASE_NOTES.md"), StandardCopyOption.REPLACE_EXISTING, StandardCopyOption.COPY_ATTRIBUTES); Files.copy(Paths.get("./NOTICE.txt"), gitpath.resolve("NOTICE.txt"), StandardCopyOption.REPLACE_EXISTING, StandardCopyOption.COPY_ATTRIBUTES); Files.copy(Paths.get("./LICENSE.txt"), gitpath.resolve("LICENSE.txt"), StandardCopyOption.REPLACE_EXISTING, StandardCopyOption.COPY_ATTRIBUTES); System.out.println("Done"); break; } case 6: { System.out.println("Are you sure you want to update AWWW Server? (y/n): "); if (!scan.nextLine().toLowerCase().startsWith("y")) break; ReleasesHelper releases = new ReleasesHelper(); if (!releases.init(relpath)) break; XElement relchoice = releases.get("AWWWServer"); if (relchoice == null) { System.out.println("Invalid option"); break; } PackagesHelper availpackages = new PackagesHelper(); availpackages.init(); InstallHelper inst = new InstallHelper(); if (!inst.init(availpackages, relchoice)) break; ServerHelper ssh = new ServerHelper(); if (!ssh.init(relchoice.find("SSH"))) break; ChannelSftp sftp = null; try { Channel channel = ssh.session().openChannel("sftp"); channel.connect(); sftp = (ChannelSftp) channel; // go to routines folder sftp.cd("/usr/local/bin/dc/AWWWServer"); FileRepositoryBuilder builder = new FileRepositoryBuilder(); Repository repository = builder.setGitDir(new File(".git")).findGitDir() // scan up the file system tree .build(); String lastsync = releases.getData("AWWWServer").getFieldAsString("LastCommitSync"); RevWalk rw = new RevWalk(repository); ObjectId head1 = repository.resolve(Constants.HEAD); RevCommit commit1 = rw.parseCommit(head1); releases.getData("AWWWServer").setField("LastCommitSync", head1.name()); ObjectId rev2 = repository.resolve(lastsync); RevCommit parent = rw.parseCommit(rev2); //RevCommit parent2 = rw.parseCommit(parent.getParent(0).getId()); DiffFormatter df = new DiffFormatter(DisabledOutputStream.INSTANCE); df.setRepository(repository); df.setDiffComparator(RawTextComparator.DEFAULT); df.setDetectRenames(true); // list oldest first or change types are all wrong!! List<DiffEntry> diffs = df.scan(parent.getTree(), commit1.getTree()); for (DiffEntry diff : diffs) { String gnpath = diff.getNewPath(); String gopath = diff.getOldPath(); Path npath = Paths.get("./" + gnpath); Path opath = Paths.get("./" + gopath); if (diff.getChangeType() == ChangeType.DELETE) { if (inst.containsPathExtended(opath)) { System.out.println("- " + diff.getChangeType().name() + " - " + opath); try { sftp.rm(opath.toString()); System.out.println("deleted!!"); } catch (SftpException x) { System.out.println( " !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"); System.out.println("Sftp Error: " + x); System.out.println( " !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"); } } else { System.out.println("/ " + diff.getChangeType().name() + " - " + gopath + " !!!!!!!!!!!!!!!!!!!!!!!!!"); } } else if ((diff.getChangeType() == ChangeType.ADD) || (diff.getChangeType() == ChangeType.MODIFY) || (diff.getChangeType() == ChangeType.COPY)) { if (inst.containsPathExtended(npath)) { System.out.println("+ " + diff.getChangeType().name() + " - " + npath); try { ssh.makeDirSftp(sftp, npath.getParent()); sftp.put(npath.toString(), npath.toString(), ChannelSftp.OVERWRITE); sftp.chmod(npath.endsWith(".sh") ? 484 : 420, npath.toString()); // 644 octal = 420 dec, 744 octal = 484 dec System.out.println("uploaded!!"); } catch (SftpException x) { System.out.println( " !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"); System.out.println("Sftp Error: " + x); System.out.println( " !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"); } } else { System.out.println("> " + diff.getChangeType().name() + " - " + gnpath + " !!!!!!!!!!!!!!!!!!!!!!!!!"); } } else if (diff.getChangeType() == ChangeType.RENAME) { // remove the old if (inst.containsPathExtended(opath)) { System.out.println("- " + diff.getChangeType().name() + " - " + opath); try { sftp.rm(opath.toString()); System.out.println("deleted!!"); } catch (SftpException x) { System.out.println( " !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"); System.out.println("Sftp Error: " + x); System.out.println( " !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"); } } else { System.out.println("/ " + diff.getChangeType().name() + " - " + gopath + " !!!!!!!!!!!!!!!!!!!!!!!!!"); } // add the new path if (inst.containsPathExtended(npath)) { System.out.println("+ " + diff.getChangeType().name() + " - " + npath); try { ssh.makeDirSftp(sftp, npath.getParent()); sftp.put(npath.toString(), npath.toString(), ChannelSftp.OVERWRITE); sftp.chmod(npath.endsWith(".sh") ? 484 : 420, npath.toString()); // 644 octal = 420 dec, 744 octal = 484 dec System.out.println("uploaded!!"); } catch (SftpException x) { System.out.println( " !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"); System.out.println("Sftp Error: " + x); System.out.println( " !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"); } } else { System.out.println("> " + diff.getChangeType().name() + " - " + gnpath + " !!!!!!!!!!!!!!!!!!!!!!!!!"); } } else { System.out.println("??????????????????????????????????????????????????????????"); System.out.println(": " + diff.getChangeType().name() + " - " + gnpath + " ?????????????????????????"); System.out.println("??????????????????????????????????????????????????????????"); } } rw.dispose(); repository.close(); releases.saveData(); } catch (JSchException x) { System.out.println("Sftp Error: " + x); } finally { if (sftp.isConnected()) sftp.exit(); ssh.close(); } break; } case 7: { Path sfolder = Paths.get("/Work/Projects/awww-current/dairy-graze/poly"); Path dfolder = Paths.get("/Work/Projects/awww-current/dairy-graze/poly-js"); Files.list(sfolder).forEach(file -> { String fname = file.getFileName().toString(); if (!fname.endsWith(".xml")) return; FuncResult<XElement> lres = XmlReader.loadFile(file, false); if (lres.isEmptyResult()) { System.out.println("Unable to parse: " + file); return; } String zc = fname.substring(5, 8); String code = "zipsData['" + zc + "'] = "; XElement root = lres.getResult(); /* <polyline1 lng="-90.620897" lat="45.377447"/> <polyline1 lng="-90.619327" lat="45.3805"/> [-71.196845,41.67757],[-71.120168,41.496831],[-71.317338,41.474923],[-71.196845,41.67757] */ ListStruct center = new ListStruct(); ListStruct cords = new ListStruct(); ListStruct currentPoly = null; //String currentName = null; for (XElement child : root.selectAll("*")) { String cname = child.getName(); if (cname.startsWith("marker")) { // not always accurate if (center.isEmpty()) center.addItem(Struct.objectToDecimal(child.getAttribute("lng")), Struct.objectToDecimal(child.getAttribute("lat"))); currentPoly = new ListStruct(); cords.addItem(new ListStruct(currentPoly)); continue; } /* if (cname.startsWith("info")) { System.out.println("areas: " + child.getAttribute("areas")); continue; } */ if (!cname.startsWith("polyline")) continue; if (currentPoly == null) { //if (!cname.equals(currentName)) { //if (currentName == null) { // currentName = cname; // System.out.println("new poly: " + cname); currentPoly = new ListStruct(); cords.addItem(new ListStruct(currentPoly)); } currentPoly.addItem(new ListStruct(Struct.objectToDecimal(child.getAttribute("lng")), Struct.objectToDecimal(child.getAttribute("lat")))); } RecordStruct feat = new RecordStruct().withField("type", "Feature") .withField("id", "zip" + zc) .withField("properties", new RecordStruct().withField("name", "Prefix " + zc).withField("alias", zc)) .withField("geometry", new RecordStruct().withField("type", "MultiPolygon") .withField("coordinates", cords)); RecordStruct entry = new RecordStruct().withField("code", zc).withField("geo", feat) .withField("center", center); IOUtil.saveEntireFile2(dfolder.resolve("us-zips-" + zc + ".js"), code + entry.toPrettyString() + ";"); }); break; } } } catch (Exception x) { System.out.println("CLI error: " + x); } } }