List of usage examples for org.apache.commons.configuration Configuration setProperty
void setProperty(String key, Object value);
From source file:at.salzburgresearch.kmt.zkconfig.ZookeeperConfigurationTest.java
@Test public void testInt() throws Exception { Configuration config = new ZookeeperConfiguration(zkConnection, 5000, "/test"); final String key = UUID.randomUUID().toString(); final Random random = new Random(); final int val1 = random.nextInt(); final Integer val2 = random.nextInt(); assertThat(config.getProperty(key), nullValue()); config.setProperty(key, val1); assertEquals(val1, config.getInt(key)); assertEquals(new Integer(val1), config.getInteger(key, val2)); config.setProperty(key, val2); assertEquals(val2.intValue(), config.getInt(key)); assertEquals(val2, config.getInteger(key, val1)); }
From source file:at.salzburgresearch.kmt.zkconfig.ZookeeperConfigurationTest.java
@Test public void testLong() throws Exception { Configuration config = new ZookeeperConfiguration(zkConnection, 5000, "/test"); final String key = UUID.randomUUID().toString(); final Random random = new Random(); final long val1 = random.nextLong(); final Long val2 = random.nextLong(); assertThat(config.getProperty(key), nullValue()); config.setProperty(key, val1); assertEquals(val1, config.getLong(key)); assertEquals(Long.valueOf(val1), config.getLong(key, val2)); config.setProperty(key, val2); assertEquals(val2.longValue(), config.getLong(key)); assertEquals(val2, config.getLong(key, Long.valueOf(val1))); }
From source file:at.salzburgresearch.kmt.zkconfig.ZookeeperConfigurationTest.java
@Test public void testFloat() throws Exception { Configuration config = new ZookeeperConfiguration(zkConnection, 5000, "/test"); final String key = UUID.randomUUID().toString(); final Random random = new Random(); final float val1 = random.nextFloat(); final Float val2 = random.nextFloat(); assertThat(config.getProperty(key), nullValue()); config.setProperty(key, val1); assertEquals(val1, config.getFloat(key), 10e-6); assertEquals(new Float(val1), config.getFloat(key, val2)); config.setProperty(key, val2); assertEquals(val2, config.getFloat(key), 10e-6); assertEquals(val2, config.getFloat(key, Float.valueOf(val1))); }
From source file:at.salzburgresearch.kmt.zkconfig.ZookeeperConfigurationTest.java
@Test public void testDouble() throws Exception { Configuration config = new ZookeeperConfiguration(zkConnection, 5000, "/test"); final String key = UUID.randomUUID().toString(); final Random random = new Random(); final double val1 = random.nextDouble(); final Double val2 = random.nextDouble(); assertThat(config.getProperty(key), nullValue()); config.setProperty(key, val1); assertEquals(val1, config.getDouble(key), 10e-6); assertEquals(Double.valueOf(val1), config.getDouble(key, val2)); config.setProperty(key, val2); assertEquals(val2, config.getDouble(key), 10e-6); assertEquals(val2, config.getDouble(key, Double.valueOf(val1))); }
From source file:com.linkedin.pinot.perf.PerfBenchmarkDriver.java
public void startServer() throws Exception { if (!conf.shouldStartServer()) { LOGGER.info("Skipping start server step. Assumes server is already started"); return;//from w w w. j a v a 2 s . co m } Configuration serverConfiguration = new PropertiesConfiguration(); serverConfiguration.addProperty(CommonConstants.Server.CONFIG_OF_INSTANCE_DATA_DIR.toString(), serverInstanceDataDir); serverConfiguration.addProperty(CommonConstants.Server.CONFIG_OF_INSTANCE_SEGMENT_TAR_DIR.toString(), serverInstanceSegmentTarDir); serverConfiguration.setProperty("instanceId", serverInstanceName); HelixServerStarter helixServerStarter = new HelixServerStarter(clusterName, zkAddress, serverConfiguration); }
From source file:com.linkedin.pinot.tools.perf.PerfBenchmarkDriver.java
private void startServer() throws Exception { if (!_conf.shouldStartServer()) { LOGGER.info("Skipping start server step. Assumes server is already started."); return;/*from w w w.ja v a 2 s .co m*/ } Configuration serverConfiguration = new PropertiesConfiguration(); serverConfiguration.addProperty(CommonConstants.Server.CONFIG_OF_INSTANCE_DATA_DIR, _serverInstanceDataDir); serverConfiguration.addProperty(CommonConstants.Server.CONFIG_OF_INSTANCE_SEGMENT_TAR_DIR, _serverInstanceSegmentTarDir); serverConfiguration.setProperty(CommonConstants.Server.CONFIG_OF_SEGMENT_FORMAT_VERSION, _segmentFormatVersion); serverConfiguration.setProperty("instanceId", _serverInstanceName); LOGGER.info("Starting server instance: {}", _serverInstanceName); new HelixServerStarter(_clusterName, _zkAddress, serverConfiguration); }
From source file:net.juniper.titan.controller.TitanController.java
private Configuration getTitanConf() { Configuration conf = new BaseConfiguration(); conf.setProperty("index.search.backend", "elasticsearch"); conf.setProperty("index.search.directory", "/tmp/searchindex"); conf.setProperty("index.search.hostname", "127.0.0.1"); conf.setProperty("index.search.client-only", "false"); conf.setProperty("storage.backend", "cassandrathrift"); conf.setProperty("storage.hostname", "10.81.53.213"); conf.setProperty("cache.db-cache", true); conf.setProperty("cache.db-cache-clean-wait", 20); conf.setProperty("cache.db-cache-time", 180000); conf.setProperty("cache.db-cache-size", 0.25); conf.setProperty("index.search.backend", "elasticsearch"); conf.setProperty("index.search.hostname", "10.81.53.213"); conf.setProperty("index.search.directory", "/tmp/searchindex"); conf.setProperty("index.search.elasticsearch.client-only", false); return conf;//from ww w. j a v a2 s . co m }
From source file:com.linkedin.pinot.core.data.manager.realtime.RealtimeSegmentDataManager.java
public RealtimeSegmentDataManager(final RealtimeSegmentZKMetadata segmentMetadata, final AbstractTableConfig tableConfig, InstanceZKMetadata instanceMetadata, RealtimeTableDataManager realtimeResourceManager, final String resourceDataDir, final ReadMode mode, final Schema schema, final ServerMetrics serverMetrics) throws Exception { super();/* w ww.jav a 2s. co m*/ this.schema = schema; this.extractor = (PlainFieldExtractor) FieldExtractorFactory.getPlainFieldExtractor(schema); this.serverMetrics = serverMetrics; this.segmentName = segmentMetadata.getSegmentName(); this.tableName = tableConfig.getTableName(); IndexingConfig indexingConfig = tableConfig.getIndexingConfig(); if (indexingConfig.getSortedColumn().isEmpty()) { LOGGER.info("RealtimeDataResourceZKMetadata contains no information about sorted column for segment {}", segmentName); this.sortedColumn = null; } else { String firstSortedColumn = indexingConfig.getSortedColumn().get(0); if (this.schema.hasColumn(firstSortedColumn)) { LOGGER.info("Setting sorted column name: {} from RealtimeDataResourceZKMetadata for segment {}", firstSortedColumn, segmentName); this.sortedColumn = firstSortedColumn; } else { LOGGER.warn( "Sorted column name: {} from RealtimeDataResourceZKMetadata is not existed in schema for segment {}.", firstSortedColumn, segmentName); this.sortedColumn = null; } } //inverted index columns invertedIndexColumns = indexingConfig.getInvertedIndexColumns(); this.segmentMetatdaZk = segmentMetadata; // create and init stream provider config // TODO : ideally resourceMetatda should create and give back a streamProviderConfig this.kafkaStreamProviderConfig = new KafkaHighLevelStreamProviderConfig(); this.kafkaStreamProviderConfig.init(tableConfig, instanceMetadata, schema); segmentLogger = LoggerFactory.getLogger(RealtimeSegmentDataManager.class.getName() + "_" + segmentName + "_" + kafkaStreamProviderConfig.getStreamName()); segmentLogger.info("Created segment data manager with Sorted column:{}, invertedIndexColumns:{}", sortedColumn, invertedIndexColumns); segmentEndTimeThreshold = start + kafkaStreamProviderConfig.getTimeThresholdToFlushSegment(); this.resourceDir = new File(resourceDataDir); this.resourceTmpDir = new File(resourceDataDir, "_tmp"); if (!resourceTmpDir.exists()) { resourceTmpDir.mkdirs(); } // create and init stream provider final String tableName = tableConfig.getTableName(); this.kafkaStreamProvider = StreamProviderFactory.buildStreamProvider(); this.kafkaStreamProvider.init(kafkaStreamProviderConfig, tableName, serverMetrics); this.kafkaStreamProvider.start(); this.tableStreamName = tableName + "_" + kafkaStreamProviderConfig.getStreamName(); // lets create a new realtime segment segmentLogger.info("Started kafka stream provider"); realtimeSegment = new RealtimeSegmentImpl(schema, kafkaStreamProviderConfig.getSizeThresholdToFlushSegment(), tableName, segmentMetadata.getSegmentName(), kafkaStreamProviderConfig.getStreamName(), serverMetrics); realtimeSegment.setSegmentMetadata(segmentMetadata, this.schema); notifier = realtimeResourceManager; segmentStatusTask = new TimerTask() { @Override public void run() { computeKeepIndexing(); } }; // start the indexing thread indexingThread = new Thread(new Runnable() { @Override public void run() { // continue indexing until criteria is met boolean notFull = true; long exceptionSleepMillis = 50L; segmentLogger.info("Starting to collect rows"); do { GenericRow row = null; try { row = kafkaStreamProvider.next(); row = extractor.transform(row); if (row != null) { notFull = realtimeSegment.index(row); exceptionSleepMillis = 50L; } } catch (Exception e) { segmentLogger.warn( "Caught exception while indexing row, sleeping for {} ms, row contents {}", exceptionSleepMillis, row, e); // Sleep for a short time as to avoid filling the logs with exceptions too quickly Uninterruptibles.sleepUninterruptibly(exceptionSleepMillis, TimeUnit.MILLISECONDS); exceptionSleepMillis = Math.min(60000L, exceptionSleepMillis * 2); } catch (Error e) { segmentLogger.error("Caught error in indexing thread", e); throw e; } } while (notFull && keepIndexing && (!isShuttingDown)); if (isShuttingDown) { segmentLogger.info("Shutting down indexing thread!"); return; } try { int numErrors, numConversions, numNulls, numNullCols; if ((numErrors = extractor.getTotalErrors()) > 0) { serverMetrics.addMeteredTableValue(tableStreamName, ServerMeter.ROWS_WITH_ERRORS, (long) numErrors); } Map<String, Integer> errorCount = extractor.getError_count(); for (String column : errorCount.keySet()) { if ((numErrors = errorCount.get(column)) > 0) { segmentLogger.warn("Column {} had {} rows with errors", column, numErrors); } } if ((numConversions = extractor.getTotalConversions()) > 0) { serverMetrics.addMeteredTableValue(tableStreamName, ServerMeter.ROWS_NEEDING_CONVERSIONS, (long) numConversions); segmentLogger.info("{} rows needed conversions ", numConversions); } if ((numNulls = extractor.getTotalNulls()) > 0) { serverMetrics.addMeteredTableValue(tableStreamName, ServerMeter.ROWS_WITH_NULL_VALUES, (long) numNulls); segmentLogger.info("{} rows had null columns", numNulls); } if ((numNullCols = extractor.getTotalNullCols()) > 0) { serverMetrics.addMeteredTableValue(tableStreamName, ServerMeter.COLUMNS_WITH_NULL_VALUES, (long) numNullCols); segmentLogger.info("{} columns had null values", numNullCols); } segmentLogger.info("Indexing threshold reached, proceeding with index conversion"); // kill the timer first segmentStatusTask.cancel(); updateCurrentDocumentCountMetrics(); segmentLogger.info("Indexed {} raw events, current number of docs = {}", realtimeSegment.getRawDocumentCount(), realtimeSegment.getSegmentMetadata().getTotalDocs()); File tempSegmentFolder = new File(resourceTmpDir, "tmp-" + String.valueOf(System.currentTimeMillis())); // lets convert the segment now RealtimeSegmentConverter converter = new RealtimeSegmentConverter(realtimeSegment, tempSegmentFolder.getAbsolutePath(), schema, segmentMetadata.getTableName(), segmentMetadata.getSegmentName(), sortedColumn, invertedIndexColumns); segmentLogger.info("Trying to build segment"); final long buildStartTime = System.nanoTime(); converter.build(); final long buildEndTime = System.nanoTime(); segmentLogger.info("Built segment in {} ms", TimeUnit.MILLISECONDS.convert((buildEndTime - buildStartTime), TimeUnit.NANOSECONDS)); File destDir = new File(resourceDataDir, segmentMetadata.getSegmentName()); FileUtils.deleteQuietly(destDir); FileUtils.moveDirectory(tempSegmentFolder.listFiles()[0], destDir); FileUtils.deleteQuietly(tempSegmentFolder); long segStartTime = realtimeSegment.getMinTime(); long segEndTime = realtimeSegment.getMaxTime(); TimeUnit timeUnit = schema.getTimeFieldSpec().getOutgoingGranularitySpec().getTimeType(); Configuration configuration = new PropertyListConfiguration(); configuration.setProperty(IndexLoadingConfigMetadata.KEY_OF_LOADING_INVERTED_INDEX, invertedIndexColumns); IndexLoadingConfigMetadata configMetadata = new IndexLoadingConfigMetadata(configuration); IndexSegment segment = Loaders.IndexSegment .load(new File(resourceDir, segmentMetatdaZk.getSegmentName()), mode, configMetadata); segmentLogger.info("Committing Kafka offsets"); boolean commitSuccessful = false; try { kafkaStreamProvider.commit(); commitSuccessful = true; kafkaStreamProvider.shutdown(); segmentLogger.info("Successfully committed Kafka offsets, consumer release requested."); } catch (Throwable e) { // If we got here, it means that either the commit or the shutdown failed. Considering that the // KafkaConsumerManager delays shutdown and only adds the consumer to be released in a deferred way, this // likely means that writing the Kafka offsets failed. // // The old logic (mark segment as done, then commit offsets and shutdown the consumer immediately) would die // in a terrible way, leaving the consumer open and causing us to only get half the records from that point // on. In this case, because we keep the consumer open for a little while, we should be okay if the // controller reassigns us a new segment before the consumer gets released. Hopefully by the next time that // we get to committing the offsets, the transient ZK failure that caused the write to fail will not // happen again and everything will be good. // // Several things can happen: // - The controller reassigns us a new segment before we release the consumer (KafkaConsumerManager will // keep the consumer open for about a minute, which should be enough time for the controller to reassign // us a new segment) and the next time we close the segment the offsets commit successfully; we're good. // - The controller reassigns us a new segment, but after we released the consumer (if the controller was // down or there was a ZK failure on writing the Kafka offsets but not the Helix state). We lose whatever // data was in this segment. Not good. // - The server crashes after this comment and before we mark the current segment as done; if the Kafka // offsets didn't get written, then when the server restarts it'll start consuming the current segment // from the previously committed offsets; we're good. // - The server crashes after this comment, the Kafka offsets were written but the segment wasn't marked as // done in Helix, but we got a failure (or not) on the commit; we lose whatever data was in this segment // if we restart the server (not good). If we manually mark the segment as done in Helix by editing the // state in ZK, everything is good, we'll consume a new segment that starts from the correct offsets. // // This is still better than the previous logic, which would have these failure modes: // - Consumer was left open and the controller reassigned us a new segment; consume only half the events // (because there are two consumers and Kafka will try to rebalance partitions between those two) // - We got a segment assigned to us before we got around to committing the offsets, reconsume the data that // we got in this segment again, as we're starting consumption from the previously committed offset (eg. // duplicate data). // // This is still not very satisfactory, which is why this part is due for a redesign. // // Assuming you got here because the realtime offset commit metric has fired, check the logs to determine // which of the above scenarios happened. If you're in one of the good scenarios, then there's nothing to // do. If you're not, then based on how critical it is to get those rows back, then your options are: // - Wipe the realtime table and reconsume everything (mark the replica as disabled so that clients don't // see query results from partially consumed data, then re-enable it when this replica has caught up) // - Accept that those rows are gone in this replica and move on (they'll be replaced by good offline data // soon anyway) // - If there's a replica that has consumed properly, you could shut it down, copy its segments onto this // replica, assign a new consumer group id to this replica, rename the copied segments and edit their // metadata to reflect the new consumer group id, copy the Kafka offsets from the shutdown replica onto // the new consumer group id and then restart both replicas. This should get you the missing rows. segmentLogger.error( "FATAL: Exception committing or shutting down consumer commitSuccessful={}", commitSuccessful, e); serverMetrics.addMeteredTableValue(tableName, ServerMeter.REALTIME_OFFSET_COMMIT_EXCEPTIONS, 1L); if (!commitSuccessful) { kafkaStreamProvider.shutdown(); } } try { segmentLogger.info("Marking current segment as completed in Helix"); RealtimeSegmentZKMetadata metadataToOverwrite = new RealtimeSegmentZKMetadata(); metadataToOverwrite.setTableName(segmentMetadata.getTableName()); metadataToOverwrite.setSegmentName(segmentMetadata.getSegmentName()); metadataToOverwrite.setSegmentType(SegmentType.OFFLINE); metadataToOverwrite.setStatus(Status.DONE); metadataToOverwrite.setStartTime(segStartTime); metadataToOverwrite.setEndTime(segEndTime); metadataToOverwrite.setTotalRawDocs(realtimeSegment.getSegmentMetadata().getTotalDocs()); metadataToOverwrite.setTimeUnit(timeUnit); notifier.notifySegmentCommitted(metadataToOverwrite, segment); segmentLogger.info( "Completed write of segment completion to Helix, waiting for controller to assign a new segment"); } catch (Exception e) { if (commitSuccessful) { segmentLogger.error( "Offsets were committed to Kafka but we were unable to mark this segment as completed in Helix. Manually mark the segment as completed in Helix; restarting this instance will result in data loss.", e); } else { segmentLogger.warn( "Caught exception while marking segment as completed in Helix. Offsets were not written, restarting the instance should be safe.", e); } } } catch (Exception e) { segmentLogger.error("Caught exception in the realtime indexing thread", e); } } }); indexingThread.start(); serverMetrics.addValueToTableGauge(tableName, ServerGauge.SEGMENT_COUNT, 1L); segmentLogger.debug("scheduling keepIndexing timer check"); // start a schedule timer to keep track of the segment TimerService.timer.schedule(segmentStatusTask, ONE_MINUTE_IN_MILLSEC, ONE_MINUTE_IN_MILLSEC); segmentLogger.info("finished scheduling keepIndexing timer check"); }
From source file:edu.kit.dama.staging.adapters.DefaultStorageVirtualizationAdapter.java
/** * Create the destination folder for the ingest. This folder is located * withing the storage virtualization system. For this very basic adapter it * will be a folder with with a fixed scheme telling when the object was * uploaded by whom and which transfer id it had. The folder will be * generated as follows:/*from ww w. j a v a 2s .c o m*/ * * <i>archiveURL</i>/<i>pathPattern</i>/SHA1(pTransferId) where * <i>pathPattern</i> allows the use or variables like $year, $month, $day * and $owner and pTransferId is the numeric id of the transfer. * * @param pTransferId The transfer id as it comes from the ingest * information entity. * @param pOwner The owner who ingested the object. * * @return An AbstractFile representing the destination for the final * ingest. * */ private AbstractFile createDestination(String pTransferId, IAuthorizationContext pContext) { if (pTransferId == null) {//transfer id is part of the destination, so it must not be null throw new IllegalArgumentException("Argument 'pTransferId' must not be 'null'"); } String sUrl = archiveUrl.toString(); if (pathPattern != null) { Calendar c = Calendar.getInstance(); int year = c.get(Calendar.YEAR); int month = c.get(Calendar.MONTH); int day = c.get(Calendar.DAY_OF_MONTH); String dynPath = pathPattern; dynPath = dynPath.replaceAll(Pattern.quote(YEAR_PATTERN), Integer.toString(year)) .replaceAll(Pattern.quote(MONTH_PATTERN), Integer.toString(month)) .replaceAll(Pattern.quote(DAY_PATTERN), Integer.toString(day)); if (dynPath.contains(OWNER_PATTERN) || dynPath.contains(GROUP_PATTERN)) {//owner/group should be replaced by pattern definition if (pContext == null) {//uploader is 'null' but we need it for replacement throw new IllegalArgumentException( "Argument 'pOwner' must not be 'null' if pattern contains element '" + OWNER_PATTERN + "' or '" + GROUP_PATTERN + "'"); } else {//everything is fine LOGGER.debug("Replacing owner/group pattern with values from context '{}'", pContext); dynPath = dynPath .replaceAll(Pattern.quote(OWNER_PATTERN), Matcher.quoteReplacement(pContext.getUserId().getStringRepresentation())) .replaceAll(Pattern.quote(GROUP_PATTERN), Matcher.quoteReplacement(pContext.getGroupId().getStringRepresentation())); } } LOGGER.debug("Appending pattern-based path '{}' to base destination '{}'", new Object[] { dynPath, sUrl }); sUrl += "/" + dynPath; } //finally, create abstract file and return AbstractFile result; try { if (!sUrl.endsWith("/")) { sUrl += "/"; } LOGGER.debug("Appending SHA1-hashed transfer ID '{}' to current destination '{}'.", new Object[] { pTransferId, sUrl }); sUrl += CryptUtil.stringToSHA1(pTransferId); LOGGER.debug("Preparing destination at {}.", sUrl); result = new AbstractFile(new URL(sUrl)); Configuration config = result.getConfiguration(); String context = pContext.getUserId().getStringRepresentation() + " " + pContext.getGroupId().getStringRepresentation(); LOGGER.debug("Adding repository context {} to custom access protocol configuration.", context); config.setProperty("repository.context", context); result = new AbstractFile(new URL(sUrl), config); //check if destination exists and create it if required if (result.exists()) { LOGGER.info("Destination at '{}' already exists.", sUrl); } else {//try to create destination result = AbstractFile.createDirectory(result); } //check destination if (result != null) {//destination could be obtained result.clearCachedValues(); if (result.isReadable() && result.isWriteable()) { //everything is fine...return result return result; } else { //destination cannot be accessed LOGGER.error("Destination '{}' exists but is not read- or writeable", sUrl); result = null; } } else { LOGGER.warn("No result obtained from directory creation."); } } catch (MalformedURLException mue) { LOGGER.error("Failed to create valid destination URL for '" + sUrl + "' and transferId " + pTransferId, mue); result = null; } catch (AdalapiException ae) { LOGGER.error("Failed to check/create destination for '" + sUrl + "'", ae); result = null; } return result; }
From source file:com.evolveum.midpoint.init.StartupConfiguration.java
@Override public Configuration getConfiguration(String componentName) { if (null == componentName) { throw new IllegalArgumentException("NULL argument"); }/*from w w w.j a v a 2s .c o m*/ Configuration sub = config.subset(componentName); // Insert replacement for relative path to midpoint.home else clean // replace if (getMidpointHome() != null) { sub.addProperty(MIDPOINT_HOME, getMidpointHome()); } else { @SuppressWarnings("unchecked") Iterator<String> i = sub.getKeys(); while (i.hasNext()) { String key = i.next(); sub.setProperty(key, sub.getString(key).replace("${" + MIDPOINT_HOME + "}/", "")); sub.setProperty(key, sub.getString(key).replace("${" + MIDPOINT_HOME + "}", "")); } } if (LOGGER.isDebugEnabled()) { LOGGER.debug("Configuration for {} :", componentName); @SuppressWarnings("unchecked") Iterator<String> i = sub.getKeys(); while (i.hasNext()) { String key = i.next(); LOGGER.debug(" {} = {}", key, sub.getString(key)); } } return sub; }