List of usage examples for org.apache.commons.io FileUtils moveDirectory
public static void moveDirectory(File srcDir, File destDir) throws IOException
From source file:de.uzk.hki.da.cb.RestructureAction.java
static void makeRepOfSIPContent(Path objectPath, Path dataPath, String repName) throws IOException { final String A = "a"; final String DATA_TMP = WorkArea.DATA + UNDERSCORE; if (isNotSet(repName)) throw new IllegalArgumentException("rep name not set"); if (isNotSet(dataPath)) throw new IllegalArgumentException("data path not set"); if (isNotSet(objectPath)) throw new IllegalArgumentException("object path not set"); FileUtils.moveDirectory(dataPath.toFile(), Path.makeFile(objectPath, DATA_TMP)); dataPath.toFile().mkdirs();//ww w .j av a2 s . c om FileUtils.moveDirectory(Path.makeFile(objectPath, DATA_TMP), Path.makeFile(dataPath, repName + A)); }
From source file:com.github.robozonky.installer.RoboZonkyInstallerListener.java
private static CommandLinePart prepareStonky() { try {/* w w w. j a v a 2 s. co m*/ final String host = Variables.GOOGLE_CALLBACK_HOST.getValue(DATA); final String port = Variables.GOOGLE_CALLBACK_PORT.getValue(DATA); final CommandLinePart cli = new CommandLinePart(); if (Boolean.valueOf(Variables.IS_STONKY_ENABLED.getValue(DATA))) { cli.setProperty(GOOGLE_CALLBACK_HOST.getKey(), host); cli.setProperty(GOOGLE_CALLBACK_PORT.getKey(), port); // reuse the same code for this as we do in CLI LOGGER.debug("Preparing Google credentials."); final String username = Variables.ZONKY_USERNAME.getValue(DATA); final GoogleCredentialsFeature google = new GoogleCredentialsFeature(username); google.setHost(host); google.setPort(Integer.parseInt(port)); google.runGoogleCredentialCheck(); LOGGER.debug("Credential check over."); // copy credentials to the correct directory final File source = GOOGLE_LOCAL_FOLDER.getValue().map(File::new).filter(File::isDirectory) .orElseThrow(() -> new IllegalStateException("Google credentials folder is not proper.")); final File target = new File(INSTALL_PATH, source.getName()); LOGGER.debug("Will copy {} to {}.", source, target); FileUtils.moveDirectory(source, target); } return cli; } catch (final Exception ex) { throw new IllegalStateException("Failed configuring Google account.", ex); } }
From source file:com.xiaomi.linden.core.search.MultiLindenCoreImpl.java
private Response deleteIndex(LindenIndexRequest request) throws IOException { // Only INDEX_NAME division type supports index delete if (multiIndexStrategy instanceof TimeLimitMultiIndexStrategy || multiIndexStrategy instanceof DocNumLimitMultiIndexStrategy) { return ResponseUtils.buildFailedResponse("Index delete is not supported in current multi-index core"); }/* w w w .j a v a 2 s .c om*/ if (request.getIndexName() == null) { return ResponseUtils.buildFailedResponse("Index name is not set in index delete request."); } String fullIndexName = MultiIndexStrategy.MULTI_INDEX_PREFIX_NAME + request.getIndexName(); if (lindenCoreMap.containsKey(fullIndexName)) { LindenCore core = lindenCoreMap.remove(fullIndexName); if (core != null) { core.close(); } if (config.getIndexType() != LindenConfig.IndexType.RAM) { String dir = FilenameUtils.concat(baseIndexDir, fullIndexName); String destDir = FilenameUtils.concat(baseIndexDir, "delete_" + fullIndexName); if (new File(dir).exists()) { FileUtils.moveDirectory(new File(dir), new File(destDir)); } } return ResponseUtils.SUCCESS; } return ResponseUtils.buildFailedResponse("Index " + request.getIndexName() + " is not found."); }
From source file:com.github.ambry.store.DiskReformatter.java
/** * Uses {@link StoreCopier} to convert all the partitions on the given disk (D). * 1. Copies one partition on D to a scratch space * 2. Using {@link StoreCopier}, performs copies of all other partitions on D using D as a staging area. When a * partition is completely copied and verified, the original is replaced by the copy. * 3. Copies the partition in the scratch space back onto D. * 4. Deletes the folder in the scratch space * @param diskMountPath the mount path of the disk to reformat * @param scratch the scratch space to use * @throws Exception// www . j a va 2s . c o m */ public void reformat(String diskMountPath, File scratch) throws Exception { if (!scratch.exists()) { throw new IllegalArgumentException("Scratch space " + scratch + " does not exist"); } List<ReplicaId> replicasOnDisk = new ArrayList<>(); // populate the replicas on disk List<? extends ReplicaId> replicaIds = clusterMap.getReplicaIds(dataNodeId); for (ReplicaId replicaId : replicaIds) { if (replicaId.getDiskId().getMountPath().equals(diskMountPath)) { replicasOnDisk.add(replicaId); } } if (replicasOnDisk.size() == 0) { throw new IllegalArgumentException("There are no replicas on " + diskMountPath + " of " + dataNodeId); } replicasOnDisk.sort(Comparator.comparingLong(ReplicaId::getCapacityInBytes)); logger.info("Found {} on {}", replicasOnDisk, diskMountPath); // move the last replica id (the largest one) to scratch space ReplicaId toMove = replicasOnDisk.get(replicasOnDisk.size() - 1); String partIdString = toMove.getPartitionId().toString(); File scratchSrc = new File(toMove.getReplicaPath()); File scratchTmp = new File(scratch, partIdString + RELOCATION_IN_PROGRESS_SUFFIX); File scratchTgt = new File(scratch, partIdString + RELOCATED_DIR_NAME_SUFFIX); if (scratchTmp.exists()) { throw new IllegalStateException(scratchTmp + " already exists"); } if (scratchTgt.exists()) { throw new IllegalStateException(scratchTgt + " already exists"); } ensureNotInUse(scratchSrc, toMove.getCapacityInBytes()); logger.info("Moving {} to {}", scratchSrc, scratchTgt); FileUtils.moveDirectory(scratchSrc, scratchTmp); if (!scratchTmp.renameTo(scratchTgt)) { throw new IllegalStateException("Could not rename " + scratchTmp + " to " + scratchTgt); } // reformat each store, except the one moved, one by one for (int i = 0; i < replicasOnDisk.size() - 1; i++) { ReplicaId replicaId = replicasOnDisk.get(i); partIdString = replicaId.getPartitionId().toString(); File src = new File(replicaId.getReplicaPath()); File tgt = new File(replicaId.getMountPath(), partIdString + UNDER_REFORMAT_DIR_NAME_SUFFIX); logger.info("Copying {} to {}", src, tgt); copy(partIdString, src, tgt, replicaId.getCapacityInBytes()); logger.info("Deleting {}", src); Utils.deleteFileOrDirectory(src); logger.info("Renaming {} to {}", tgt, src); if (!tgt.renameTo(src)) { throw new IllegalStateException("Could not rename " + tgt + " to " + src); } logger.info("Done reformatting {}", replicaId); } // reformat the moved store logger.info("Copying {} to {}", scratchTgt, scratchSrc); copy(toMove.getPartitionId().toString(), scratchTgt, scratchSrc, toMove.getCapacityInBytes()); logger.info("Deleting {}", scratchTgt); Utils.deleteFileOrDirectory(scratchTgt); logger.info("Done reformatting {}", toMove); logger.info("Done reformatting disk {}", diskMountPath); }
From source file:com.linkedin.pinot.core.data.manager.realtime.RealtimeSegmentDataManager.java
public RealtimeSegmentDataManager(final RealtimeSegmentZKMetadata segmentMetadata, final AbstractTableConfig tableConfig, InstanceZKMetadata instanceMetadata, RealtimeTableDataManager realtimeResourceManager, final String resourceDataDir, final ReadMode mode, final Schema schema, final ServerMetrics serverMetrics) throws Exception { super();/*from ww w. j a va 2 s . co m*/ this.schema = schema; this.extractor = (PlainFieldExtractor) FieldExtractorFactory.getPlainFieldExtractor(schema); this.serverMetrics = serverMetrics; this.segmentName = segmentMetadata.getSegmentName(); this.tableName = tableConfig.getTableName(); IndexingConfig indexingConfig = tableConfig.getIndexingConfig(); if (indexingConfig.getSortedColumn().isEmpty()) { LOGGER.info("RealtimeDataResourceZKMetadata contains no information about sorted column for segment {}", segmentName); this.sortedColumn = null; } else { String firstSortedColumn = indexingConfig.getSortedColumn().get(0); if (this.schema.hasColumn(firstSortedColumn)) { LOGGER.info("Setting sorted column name: {} from RealtimeDataResourceZKMetadata for segment {}", firstSortedColumn, segmentName); this.sortedColumn = firstSortedColumn; } else { LOGGER.warn( "Sorted column name: {} from RealtimeDataResourceZKMetadata is not existed in schema for segment {}.", firstSortedColumn, segmentName); this.sortedColumn = null; } } //inverted index columns invertedIndexColumns = indexingConfig.getInvertedIndexColumns(); this.segmentMetatdaZk = segmentMetadata; // create and init stream provider config // TODO : ideally resourceMetatda should create and give back a streamProviderConfig this.kafkaStreamProviderConfig = new KafkaHighLevelStreamProviderConfig(); this.kafkaStreamProviderConfig.init(tableConfig, instanceMetadata, schema); segmentLogger = LoggerFactory.getLogger(RealtimeSegmentDataManager.class.getName() + "_" + segmentName + "_" + kafkaStreamProviderConfig.getStreamName()); segmentLogger.info("Created segment data manager with Sorted column:{}, invertedIndexColumns:{}", sortedColumn, invertedIndexColumns); segmentEndTimeThreshold = start + kafkaStreamProviderConfig.getTimeThresholdToFlushSegment(); this.resourceDir = new File(resourceDataDir); this.resourceTmpDir = new File(resourceDataDir, "_tmp"); if (!resourceTmpDir.exists()) { resourceTmpDir.mkdirs(); } // create and init stream provider final String tableName = tableConfig.getTableName(); this.kafkaStreamProvider = StreamProviderFactory.buildStreamProvider(); this.kafkaStreamProvider.init(kafkaStreamProviderConfig, tableName, serverMetrics); this.kafkaStreamProvider.start(); this.tableStreamName = tableName + "_" + kafkaStreamProviderConfig.getStreamName(); // lets create a new realtime segment segmentLogger.info("Started kafka stream provider"); realtimeSegment = new RealtimeSegmentImpl(schema, kafkaStreamProviderConfig.getSizeThresholdToFlushSegment(), tableName, segmentMetadata.getSegmentName(), kafkaStreamProviderConfig.getStreamName(), serverMetrics); realtimeSegment.setSegmentMetadata(segmentMetadata, this.schema); notifier = realtimeResourceManager; segmentStatusTask = new TimerTask() { @Override public void run() { computeKeepIndexing(); } }; // start the indexing thread indexingThread = new Thread(new Runnable() { @Override public void run() { // continue indexing until criteria is met boolean notFull = true; long exceptionSleepMillis = 50L; segmentLogger.info("Starting to collect rows"); do { GenericRow row = null; try { row = kafkaStreamProvider.next(); row = extractor.transform(row); if (row != null) { notFull = realtimeSegment.index(row); exceptionSleepMillis = 50L; } } catch (Exception e) { segmentLogger.warn( "Caught exception while indexing row, sleeping for {} ms, row contents {}", exceptionSleepMillis, row, e); // Sleep for a short time as to avoid filling the logs with exceptions too quickly Uninterruptibles.sleepUninterruptibly(exceptionSleepMillis, TimeUnit.MILLISECONDS); exceptionSleepMillis = Math.min(60000L, exceptionSleepMillis * 2); } catch (Error e) { segmentLogger.error("Caught error in indexing thread", e); throw e; } } while (notFull && keepIndexing && (!isShuttingDown)); if (isShuttingDown) { segmentLogger.info("Shutting down indexing thread!"); return; } try { int numErrors, numConversions, numNulls, numNullCols; if ((numErrors = extractor.getTotalErrors()) > 0) { serverMetrics.addMeteredTableValue(tableStreamName, ServerMeter.ROWS_WITH_ERRORS, (long) numErrors); } Map<String, Integer> errorCount = extractor.getError_count(); for (String column : errorCount.keySet()) { if ((numErrors = errorCount.get(column)) > 0) { segmentLogger.warn("Column {} had {} rows with errors", column, numErrors); } } if ((numConversions = extractor.getTotalConversions()) > 0) { serverMetrics.addMeteredTableValue(tableStreamName, ServerMeter.ROWS_NEEDING_CONVERSIONS, (long) numConversions); segmentLogger.info("{} rows needed conversions ", numConversions); } if ((numNulls = extractor.getTotalNulls()) > 0) { serverMetrics.addMeteredTableValue(tableStreamName, ServerMeter.ROWS_WITH_NULL_VALUES, (long) numNulls); segmentLogger.info("{} rows had null columns", numNulls); } if ((numNullCols = extractor.getTotalNullCols()) > 0) { serverMetrics.addMeteredTableValue(tableStreamName, ServerMeter.COLUMNS_WITH_NULL_VALUES, (long) numNullCols); segmentLogger.info("{} columns had null values", numNullCols); } segmentLogger.info("Indexing threshold reached, proceeding with index conversion"); // kill the timer first segmentStatusTask.cancel(); updateCurrentDocumentCountMetrics(); segmentLogger.info("Indexed {} raw events, current number of docs = {}", realtimeSegment.getRawDocumentCount(), realtimeSegment.getSegmentMetadata().getTotalDocs()); File tempSegmentFolder = new File(resourceTmpDir, "tmp-" + String.valueOf(System.currentTimeMillis())); // lets convert the segment now RealtimeSegmentConverter converter = new RealtimeSegmentConverter(realtimeSegment, tempSegmentFolder.getAbsolutePath(), schema, segmentMetadata.getTableName(), segmentMetadata.getSegmentName(), sortedColumn, invertedIndexColumns); segmentLogger.info("Trying to build segment"); final long buildStartTime = System.nanoTime(); converter.build(); final long buildEndTime = System.nanoTime(); segmentLogger.info("Built segment in {} ms", TimeUnit.MILLISECONDS.convert((buildEndTime - buildStartTime), TimeUnit.NANOSECONDS)); File destDir = new File(resourceDataDir, segmentMetadata.getSegmentName()); FileUtils.deleteQuietly(destDir); FileUtils.moveDirectory(tempSegmentFolder.listFiles()[0], destDir); FileUtils.deleteQuietly(tempSegmentFolder); long segStartTime = realtimeSegment.getMinTime(); long segEndTime = realtimeSegment.getMaxTime(); TimeUnit timeUnit = schema.getTimeFieldSpec().getOutgoingGranularitySpec().getTimeType(); Configuration configuration = new PropertyListConfiguration(); configuration.setProperty(IndexLoadingConfigMetadata.KEY_OF_LOADING_INVERTED_INDEX, invertedIndexColumns); IndexLoadingConfigMetadata configMetadata = new IndexLoadingConfigMetadata(configuration); IndexSegment segment = Loaders.IndexSegment .load(new File(resourceDir, segmentMetatdaZk.getSegmentName()), mode, configMetadata); segmentLogger.info("Committing Kafka offsets"); boolean commitSuccessful = false; try { kafkaStreamProvider.commit(); commitSuccessful = true; kafkaStreamProvider.shutdown(); segmentLogger.info("Successfully committed Kafka offsets, consumer release requested."); } catch (Throwable e) { // If we got here, it means that either the commit or the shutdown failed. Considering that the // KafkaConsumerManager delays shutdown and only adds the consumer to be released in a deferred way, this // likely means that writing the Kafka offsets failed. // // The old logic (mark segment as done, then commit offsets and shutdown the consumer immediately) would die // in a terrible way, leaving the consumer open and causing us to only get half the records from that point // on. In this case, because we keep the consumer open for a little while, we should be okay if the // controller reassigns us a new segment before the consumer gets released. Hopefully by the next time that // we get to committing the offsets, the transient ZK failure that caused the write to fail will not // happen again and everything will be good. // // Several things can happen: // - The controller reassigns us a new segment before we release the consumer (KafkaConsumerManager will // keep the consumer open for about a minute, which should be enough time for the controller to reassign // us a new segment) and the next time we close the segment the offsets commit successfully; we're good. // - The controller reassigns us a new segment, but after we released the consumer (if the controller was // down or there was a ZK failure on writing the Kafka offsets but not the Helix state). We lose whatever // data was in this segment. Not good. // - The server crashes after this comment and before we mark the current segment as done; if the Kafka // offsets didn't get written, then when the server restarts it'll start consuming the current segment // from the previously committed offsets; we're good. // - The server crashes after this comment, the Kafka offsets were written but the segment wasn't marked as // done in Helix, but we got a failure (or not) on the commit; we lose whatever data was in this segment // if we restart the server (not good). If we manually mark the segment as done in Helix by editing the // state in ZK, everything is good, we'll consume a new segment that starts from the correct offsets. // // This is still better than the previous logic, which would have these failure modes: // - Consumer was left open and the controller reassigned us a new segment; consume only half the events // (because there are two consumers and Kafka will try to rebalance partitions between those two) // - We got a segment assigned to us before we got around to committing the offsets, reconsume the data that // we got in this segment again, as we're starting consumption from the previously committed offset (eg. // duplicate data). // // This is still not very satisfactory, which is why this part is due for a redesign. // // Assuming you got here because the realtime offset commit metric has fired, check the logs to determine // which of the above scenarios happened. If you're in one of the good scenarios, then there's nothing to // do. If you're not, then based on how critical it is to get those rows back, then your options are: // - Wipe the realtime table and reconsume everything (mark the replica as disabled so that clients don't // see query results from partially consumed data, then re-enable it when this replica has caught up) // - Accept that those rows are gone in this replica and move on (they'll be replaced by good offline data // soon anyway) // - If there's a replica that has consumed properly, you could shut it down, copy its segments onto this // replica, assign a new consumer group id to this replica, rename the copied segments and edit their // metadata to reflect the new consumer group id, copy the Kafka offsets from the shutdown replica onto // the new consumer group id and then restart both replicas. This should get you the missing rows. segmentLogger.error( "FATAL: Exception committing or shutting down consumer commitSuccessful={}", commitSuccessful, e); serverMetrics.addMeteredTableValue(tableName, ServerMeter.REALTIME_OFFSET_COMMIT_EXCEPTIONS, 1L); if (!commitSuccessful) { kafkaStreamProvider.shutdown(); } } try { segmentLogger.info("Marking current segment as completed in Helix"); RealtimeSegmentZKMetadata metadataToOverwrite = new RealtimeSegmentZKMetadata(); metadataToOverwrite.setTableName(segmentMetadata.getTableName()); metadataToOverwrite.setSegmentName(segmentMetadata.getSegmentName()); metadataToOverwrite.setSegmentType(SegmentType.OFFLINE); metadataToOverwrite.setStatus(Status.DONE); metadataToOverwrite.setStartTime(segStartTime); metadataToOverwrite.setEndTime(segEndTime); metadataToOverwrite.setTotalRawDocs(realtimeSegment.getSegmentMetadata().getTotalDocs()); metadataToOverwrite.setTimeUnit(timeUnit); notifier.notifySegmentCommitted(metadataToOverwrite, segment); segmentLogger.info( "Completed write of segment completion to Helix, waiting for controller to assign a new segment"); } catch (Exception e) { if (commitSuccessful) { segmentLogger.error( "Offsets were committed to Kafka but we were unable to mark this segment as completed in Helix. Manually mark the segment as completed in Helix; restarting this instance will result in data loss.", e); } else { segmentLogger.warn( "Caught exception while marking segment as completed in Helix. Offsets were not written, restarting the instance should be safe.", e); } } } catch (Exception e) { segmentLogger.error("Caught exception in the realtime indexing thread", e); } } }); indexingThread.start(); serverMetrics.addValueToTableGauge(tableName, ServerGauge.SEGMENT_COUNT, 1L); segmentLogger.debug("scheduling keepIndexing timer check"); // start a schedule timer to keep track of the segment TimerService.timer.schedule(segmentStatusTask, ONE_MINUTE_IN_MILLSEC, ONE_MINUTE_IN_MILLSEC); segmentLogger.info("finished scheduling keepIndexing timer check"); }
From source file:dpfmanager.shell.modules.threading.core.ThreadingService.java
private void moveServerFolder(Long uuid, String internal) { try {/*from ww w. j a v a 2s . c om*/ File dest = new File(internal + "input"); File src = new File(DPFManagerProperties.getServerDir() + "/" + uuid); if (src.exists() && src.isDirectory()) { FileUtils.moveDirectory(src, dest); } } catch (Exception e) { context.send(BasicConfig.MODULE_MESSAGE, new ExceptionMessage("Exception in remove server folder", e)); } }
From source file:com.linkedin.pinot.core.data.manager.realtime.HLRealtimeSegmentDataManager.java
public HLRealtimeSegmentDataManager(final RealtimeSegmentZKMetadata segmentMetadata, final AbstractTableConfig tableConfig, InstanceZKMetadata instanceMetadata, final RealtimeTableDataManager realtimeTableDataManager, final String resourceDataDir, final ReadMode mode, final Schema schema, final ServerMetrics serverMetrics) throws Exception { super();/* w w w .j a v a 2 s . c om*/ _realtimeTableDataManager = realtimeTableDataManager; final String segmentVersionStr = tableConfig.getIndexingConfig().getSegmentFormatVersion(); _segmentVersion = SegmentVersion.fromStringOrDefault(segmentVersionStr); this.schema = schema; this.extractor = (PlainFieldExtractor) FieldExtractorFactory.getPlainFieldExtractor(schema); this.serverMetrics = serverMetrics; this.segmentName = segmentMetadata.getSegmentName(); this.tableName = tableConfig.getTableName(); IndexingConfig indexingConfig = tableConfig.getIndexingConfig(); if (indexingConfig.getSortedColumn().isEmpty()) { LOGGER.info("RealtimeDataResourceZKMetadata contains no information about sorted column for segment {}", segmentName); this.sortedColumn = null; } else { String firstSortedColumn = indexingConfig.getSortedColumn().get(0); if (this.schema.hasColumn(firstSortedColumn)) { LOGGER.info("Setting sorted column name: {} from RealtimeDataResourceZKMetadata for segment {}", firstSortedColumn, segmentName); this.sortedColumn = firstSortedColumn; } else { LOGGER.warn( "Sorted column name: {} from RealtimeDataResourceZKMetadata is not existed in schema for segment {}.", firstSortedColumn, segmentName); this.sortedColumn = null; } } //inverted index columns invertedIndexColumns = indexingConfig.getInvertedIndexColumns(); if (sortedColumn != null && !invertedIndexColumns.contains(sortedColumn)) { invertedIndexColumns.add(sortedColumn); } this.segmentMetatdaZk = segmentMetadata; // create and init stream provider config // TODO : ideally resourceMetatda should create and give back a streamProviderConfig this.kafkaStreamProviderConfig = new KafkaHighLevelStreamProviderConfig(); this.kafkaStreamProviderConfig.init(tableConfig, instanceMetadata, schema); segmentLogger = LoggerFactory.getLogger(HLRealtimeSegmentDataManager.class.getName() + "_" + segmentName + "_" + kafkaStreamProviderConfig.getStreamName()); segmentLogger.info("Created segment data manager with Sorted column:{}, invertedIndexColumns:{}", sortedColumn, invertedIndexColumns); segmentEndTimeThreshold = start + kafkaStreamProviderConfig.getTimeThresholdToFlushSegment(); this.resourceDir = new File(resourceDataDir); this.resourceTmpDir = new File(resourceDataDir, "_tmp"); if (!resourceTmpDir.exists()) { resourceTmpDir.mkdirs(); } // create and init stream provider final String tableName = tableConfig.getTableName(); this.kafkaStreamProvider = StreamProviderFactory.buildStreamProvider(); this.kafkaStreamProvider.init(kafkaStreamProviderConfig, tableName, serverMetrics); this.kafkaStreamProvider.start(); this.tableStreamName = tableName + "_" + kafkaStreamProviderConfig.getStreamName(); // lets create a new realtime segment segmentLogger.info("Started kafka stream provider"); realtimeSegment = new RealtimeSegmentImpl(schema, kafkaStreamProviderConfig.getSizeThresholdToFlushSegment(), tableName, segmentMetadata.getSegmentName(), kafkaStreamProviderConfig.getStreamName(), serverMetrics, invertedIndexColumns); realtimeSegment.setSegmentMetadata(segmentMetadata, this.schema); notifier = realtimeTableDataManager; segmentStatusTask = new TimerTask() { @Override public void run() { computeKeepIndexing(); } }; // start the indexing thread indexingThread = new Thread(new Runnable() { @Override public void run() { // continue indexing until criteria is met boolean notFull = true; long exceptionSleepMillis = 50L; segmentLogger.info("Starting to collect rows"); do { GenericRow row = null; try { row = kafkaStreamProvider.next(); if (row != null) { row = extractor.transform(row); notFull = realtimeSegment.index(row); exceptionSleepMillis = 50L; } } catch (Exception e) { segmentLogger.warn( "Caught exception while indexing row, sleeping for {} ms, row contents {}", exceptionSleepMillis, row, e); // Sleep for a short time as to avoid filling the logs with exceptions too quickly Uninterruptibles.sleepUninterruptibly(exceptionSleepMillis, TimeUnit.MILLISECONDS); exceptionSleepMillis = Math.min(60000L, exceptionSleepMillis * 2); } catch (Error e) { segmentLogger.error("Caught error in indexing thread", e); throw e; } } while (notFull && keepIndexing && (!isShuttingDown)); if (isShuttingDown) { segmentLogger.info("Shutting down indexing thread!"); return; } try { int numErrors, numConversions, numNulls, numNullCols; if ((numErrors = extractor.getTotalErrors()) > 0) { serverMetrics.addMeteredTableValue(tableStreamName, ServerMeter.ROWS_WITH_ERRORS, (long) numErrors); } Map<String, Integer> errorCount = extractor.getErrorCount(); for (String column : errorCount.keySet()) { if ((numErrors = errorCount.get(column)) > 0) { segmentLogger.warn("Column {} had {} rows with errors", column, numErrors); } } if ((numConversions = extractor.getTotalConversions()) > 0) { serverMetrics.addMeteredTableValue(tableStreamName, ServerMeter.ROWS_NEEDING_CONVERSIONS, (long) numConversions); segmentLogger.info("{} rows needed conversions ", numConversions); } if ((numNulls = extractor.getTotalNulls()) > 0) { serverMetrics.addMeteredTableValue(tableStreamName, ServerMeter.ROWS_WITH_NULL_VALUES, (long) numNulls); segmentLogger.info("{} rows had null columns", numNulls); } if ((numNullCols = extractor.getTotalNullCols()) > 0) { serverMetrics.addMeteredTableValue(tableStreamName, ServerMeter.COLUMNS_WITH_NULL_VALUES, (long) numNullCols); segmentLogger.info("{} columns had null values", numNullCols); } segmentLogger.info("Indexing threshold reached, proceeding with index conversion"); // kill the timer first segmentStatusTask.cancel(); updateCurrentDocumentCountMetrics(); segmentLogger.info("Indexed {} raw events, current number of docs = {}", realtimeSegment.getRawDocumentCount(), realtimeSegment.getSegmentMetadata().getTotalDocs()); File tempSegmentFolder = new File(resourceTmpDir, "tmp-" + String.valueOf(System.currentTimeMillis())); // lets convert the segment now RealtimeSegmentConverter converter = new RealtimeSegmentConverter(realtimeSegment, tempSegmentFolder.getAbsolutePath(), schema, segmentMetadata.getTableName(), segmentMetadata.getSegmentName(), sortedColumn, invertedIndexColumns); segmentLogger.info("Trying to build segment"); final long buildStartTime = System.nanoTime(); converter.build(_segmentVersion); final long buildEndTime = System.nanoTime(); segmentLogger.info("Built segment in {} ms", TimeUnit.MILLISECONDS.convert((buildEndTime - buildStartTime), TimeUnit.NANOSECONDS)); File destDir = new File(resourceDataDir, segmentMetadata.getSegmentName()); FileUtils.deleteQuietly(destDir); FileUtils.moveDirectory(tempSegmentFolder.listFiles()[0], destDir); FileUtils.deleteQuietly(tempSegmentFolder); long segStartTime = realtimeSegment.getMinTime(); long segEndTime = realtimeSegment.getMaxTime(); TimeUnit timeUnit = schema.getTimeFieldSpec().getOutgoingGranularitySpec().getTimeType(); IndexSegment segment = Loaders.IndexSegment.load( new File(resourceDir, segmentMetatdaZk.getSegmentName()), mode, realtimeTableDataManager.getIndexLoadingConfigMetadata()); segmentLogger.info("Committing Kafka offsets"); boolean commitSuccessful = false; try { kafkaStreamProvider.commit(); commitSuccessful = true; kafkaStreamProvider.shutdown(); segmentLogger.info("Successfully committed Kafka offsets, consumer release requested."); } catch (Throwable e) { // If we got here, it means that either the commit or the shutdown failed. Considering that the // KafkaConsumerManager delays shutdown and only adds the consumer to be released in a deferred way, this // likely means that writing the Kafka offsets failed. // // The old logic (mark segment as done, then commit offsets and shutdown the consumer immediately) would die // in a terrible way, leaving the consumer open and causing us to only get half the records from that point // on. In this case, because we keep the consumer open for a little while, we should be okay if the // controller reassigns us a new segment before the consumer gets released. Hopefully by the next time that // we get to committing the offsets, the transient ZK failure that caused the write to fail will not // happen again and everything will be good. // // Several things can happen: // - The controller reassigns us a new segment before we release the consumer (KafkaConsumerManager will // keep the consumer open for about a minute, which should be enough time for the controller to reassign // us a new segment) and the next time we close the segment the offsets commit successfully; we're good. // - The controller reassigns us a new segment, but after we released the consumer (if the controller was // down or there was a ZK failure on writing the Kafka offsets but not the Helix state). We lose whatever // data was in this segment. Not good. // - The server crashes after this comment and before we mark the current segment as done; if the Kafka // offsets didn't get written, then when the server restarts it'll start consuming the current segment // from the previously committed offsets; we're good. // - The server crashes after this comment, the Kafka offsets were written but the segment wasn't marked as // done in Helix, but we got a failure (or not) on the commit; we lose whatever data was in this segment // if we restart the server (not good). If we manually mark the segment as done in Helix by editing the // state in ZK, everything is good, we'll consume a new segment that starts from the correct offsets. // // This is still better than the previous logic, which would have these failure modes: // - Consumer was left open and the controller reassigned us a new segment; consume only half the events // (because there are two consumers and Kafka will try to rebalance partitions between those two) // - We got a segment assigned to us before we got around to committing the offsets, reconsume the data that // we got in this segment again, as we're starting consumption from the previously committed offset (eg. // duplicate data). // // This is still not very satisfactory, which is why this part is due for a redesign. // // Assuming you got here because the realtime offset commit metric has fired, check the logs to determine // which of the above scenarios happened. If you're in one of the good scenarios, then there's nothing to // do. If you're not, then based on how critical it is to get those rows back, then your options are: // - Wipe the realtime table and reconsume everything (mark the replica as disabled so that clients don't // see query results from partially consumed data, then re-enable it when this replica has caught up) // - Accept that those rows are gone in this replica and move on (they'll be replaced by good offline data // soon anyway) // - If there's a replica that has consumed properly, you could shut it down, copy its segments onto this // replica, assign a new consumer group id to this replica, rename the copied segments and edit their // metadata to reflect the new consumer group id, copy the Kafka offsets from the shutdown replica onto // the new consumer group id and then restart both replicas. This should get you the missing rows. segmentLogger.error( "FATAL: Exception committing or shutting down consumer commitSuccessful={}", commitSuccessful, e); serverMetrics.addMeteredTableValue(tableName, ServerMeter.REALTIME_OFFSET_COMMIT_EXCEPTIONS, 1L); if (!commitSuccessful) { kafkaStreamProvider.shutdown(); } } try { segmentLogger.info("Marking current segment as completed in Helix"); RealtimeSegmentZKMetadata metadataToOverwrite = new RealtimeSegmentZKMetadata(); metadataToOverwrite.setTableName(segmentMetadata.getTableName()); metadataToOverwrite.setSegmentName(segmentMetadata.getSegmentName()); metadataToOverwrite.setSegmentType(SegmentType.OFFLINE); metadataToOverwrite.setStatus(Status.DONE); metadataToOverwrite.setStartTime(segStartTime); metadataToOverwrite.setEndTime(segEndTime); metadataToOverwrite.setTotalRawDocs(realtimeSegment.getSegmentMetadata().getTotalDocs()); metadataToOverwrite.setTimeUnit(timeUnit); notifier.notifySegmentCommitted(metadataToOverwrite, segment); segmentLogger.info( "Completed write of segment completion to Helix, waiting for controller to assign a new segment"); } catch (Exception e) { if (commitSuccessful) { segmentLogger.error( "Offsets were committed to Kafka but we were unable to mark this segment as completed in Helix. Manually mark the segment as completed in Helix; restarting this instance will result in data loss.", e); } else { segmentLogger.warn( "Caught exception while marking segment as completed in Helix. Offsets were not written, restarting the instance should be safe.", e); } } } catch (Exception e) { segmentLogger.error("Caught exception in the realtime indexing thread", e); } } }); indexingThread.start(); serverMetrics.addValueToTableGauge(tableName, ServerGauge.SEGMENT_COUNT, 1L); segmentLogger.debug("scheduling keepIndexing timer check"); // start a schedule timer to keep track of the segment TimerService.timer.schedule(segmentStatusTask, ONE_MINUTE_IN_MILLSEC, ONE_MINUTE_IN_MILLSEC); segmentLogger.info("finished scheduling keepIndexing timer check"); }
From source file:com.ephesoft.gxt.foldermanager.server.FolderManagerServiceImpl.java
@Override public List<String> cutFiles(List<String> cutFilesList, String currentFolderPath) throws UIException { List<String> resultList = new ArrayList<String>(); for (String filePath : cutFilesList) { File srcFile = new File(filePath); String fileName = srcFile.getName(); if (srcFile.exists()) { try { String newPathName = currentFolderPath + File.separator + srcFile.getName(); File newFile = new File(newPathName); if (!newFile.exists()) { if (srcFile.isFile()) { FileUtils.moveFile(srcFile, newFile); } else { FileUtils.moveDirectory(srcFile, newFile); }//from w w w . jav a 2 s .c o m } else { resultList.add(fileName); // throw new UIException(FolderManagementMessages.CANNOT_COMPLETE_CUT_PASTE_OPERATION_AS_THE_FILE_FOLDER // + fileName + FolderManagementMessages.ALREADY_EXISTS); } } catch (IOException e) { throw new UIException( FolderManagementMessages.EXCEPTION_OCCURRED_WHILE_CUT_PASTE_OPERATION_COULD_NOT_COMPLETE_OPERATION); } } else { resultList.add(srcFile.getName()); // throw new UIException(FolderManagementMessages.EXCEPTION_OCCURRED_WHILE_CUT_PASTE_OPERATION_FILE_NOT_FOUND // + FolderManagementConstants.QUOTES + srcFile.getName() + FolderManagementConstants.QUOTES); } } return resultList; }
From source file:com.taobao.android.builder.tools.diff.DiffResExtractor.java
/** * assets : apk/* w w w . ja v a 2 s .com*/ * res : diffResFiles ?apk ? * * @param diffResFiles * @param currentApk * @param baseApk * @param fullResDir * @param destDir * @throws IOException */ public static void extractDiff(Set<String> diffResFiles, File currentApk, File baseApk, File fullResDir, File destDir) throws IOException { if (!currentApk.exists() || !baseApk.exists() || !fullResDir.exists()) { return; } FileUtils.deleteDirectory(destDir); destDir.mkdirs(); File tmpFolder = new File(destDir.getParentFile(), "tmp-diffRes"); FileUtils.deleteDirectory(tmpFolder); tmpFolder.mkdirs(); File apkDir = new File(tmpFolder, "newApkDir"); File baseApkDir = new File(tmpFolder, "baseApkDir"); ZipUtils.unzip(currentApk, apkDir.getAbsolutePath()); ZipUtils.unzip(baseApk, baseApkDir.getAbsolutePath()); //compare res and assets Collection<File> files = FileUtils.listFiles(apkDir, TrueFileFilter.INSTANCE, TrueFileFilter.INSTANCE); int basePathLength = apkDir.getAbsolutePath().length(); List<String> diffResPath = new ArrayList<String>(); //assets for (File file : files) { String relativePath = file.getAbsolutePath().substring(basePathLength); if (!relativePath.startsWith("/assets/")) { continue; } File baseFile = new File(baseApkDir, relativePath); if (!baseFile.exists()) { diffResPath.add(relativePath); continue; } if (!MD5Util.getFileMD5(file).equals(MD5Util.getFileMD5(baseFile))) { File rawFile = new File(apkDir, relativePath); FileUtils.copyFile(rawFile, new File(destDir, relativePath)); } } //res for (String diffFile : diffResFiles) { File baseFile = new File(baseApkDir, diffFile); File currentFile = new File(apkDir, diffFile); if (baseFile.exists() && currentFile.exists() && MD5Util.getFileMD5(baseFile).equals(MD5Util.getFileMD5(currentFile))) { continue; } //copy file File rawFile = new File(fullResDir, diffFile); if (rawFile.exists()) { FileUtils.copyFile(rawFile, new File(destDir, diffFile)); } } //?resource.arsc File assetsDir = new File(destDir, "assets"); File resDir = new File(destDir, "res"); if (assetsDir.exists() && !resDir.exists()) { File valuesDir = new File(resDir, "values"); FileUtils.forceMkdir(valuesDir); File stringsFile = new File(valuesDir, "strings.xml"); UUID uuid = UUID.randomUUID(); FileUtils.writeStringToFile(stringsFile, String.format( "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<resources>\n <string name=\"%s\">%s</string>\n</resources>\n", uuid, uuid), "UTF-8", false); } final Pattern densityOnlyPattern = Pattern.compile("[a-zA-Z]+-[a-zA-Z]+dpi"); if (resDir.exists()) { File[] resDirs = resDir.listFiles(); if (resDirs != null) { for (File file : resDirs) { Matcher m = densityOnlyPattern.matcher(file.getName()); if (m.matches()) { FileUtils.moveDirectory(file, new File(file.getAbsolutePath() + "-v4")); } } } } }
From source file:aurelienribon.gdxsetupui.ProjectSetup.java
private void move(File base, String path1, String path2) throws IOException { if (path1.equals(path2)) return;//from ww w . j av a2s.c o m File file1 = new File(base, FilenameUtils.normalize(path1)); File file2 = new File(base, FilenameUtils.normalize(path2)); FileUtils.deleteQuietly(file2); if (file1.isDirectory()) FileUtils.moveDirectory(file1, file2); else FileUtils.moveFile(file1, file2); }