List of usage examples for org.apache.commons.configuration.plist PropertyListConfiguration PropertyListConfiguration
public PropertyListConfiguration()
From source file:com.linkedin.pinot.controller.helix.core.rebalance.ReplicaGroupRebalanceStrategyTest.java
@Test public void testReplicaGroupRebalanceStrategy() throws Exception { Configuration rebalanceUserConfig = new PropertyListConfiguration(); rebalanceUserConfig.setProperty(RebalanceUserConfigConstants.DRYRUN, false); int numInstancesPerPartition = 3; ReplicaGroupStrategyConfig replicaGroupStrategyConfig = new ReplicaGroupStrategyConfig(); replicaGroupStrategyConfig.setNumInstancesPerPartition(numInstancesPerPartition); replicaGroupStrategyConfig.setMirrorAssignmentAcrossReplicaGroups(true); String tableNameWithType = TableNameBuilder.OFFLINE.tableNameWithType(TABLE_NAME); TableConfig tableConfig = _helixResourceManager.getTableConfig(TABLE_NAME, CommonConstants.Helix.TableType.OFFLINE); tableConfig.getValidationConfig().setReplicaGroupStrategyConfig(replicaGroupStrategyConfig); tableConfig.getValidationConfig().setSegmentAssignmentStrategy("ReplicaGroupSegmentAssignmentStrategy"); tableConfig.getValidationConfig().setReplication("2"); _helixResourceManager.setExistingTableConfig(tableConfig, tableNameWithType, CommonConstants.Helix.TableType.OFFLINE); // Test rebalancing after migration from non-replica to replica group table _helixResourceManager.rebalanceTable(TABLE_NAME, CommonConstants.Helix.TableType.OFFLINE, rebalanceUserConfig);/*from www .j av a 2 s. c om*/ Assert.assertTrue(validateTableLevelReplicaGroupRebalance()); Assert.assertTrue(validateNumSegments(INITIAL_NUM_SEGMENTS)); // Upload 10 more segments and validate the segment assignment addNewSegments(); while (!allSegmentsPushedToIdealState(TABLE_NAME, INITIAL_NUM_SEGMENTS + 10)) { Thread.sleep(100); } Assert.assertTrue(validateTableLevelReplicaGroupRebalance()); // Clean up new segments removeNewSegments(); while (!allSegmentsPushedToIdealState(TABLE_NAME, INITIAL_NUM_SEGMENTS)) { Thread.sleep(100); } // Test replace _helixAdmin.removeInstanceTag(getHelixClusterName(), "Server_localhost_0", OFFLINE_TENENT_NAME); _helixAdmin.addInstanceTag(getHelixClusterName(), "Server_localhost_a", OFFLINE_TENENT_NAME); _helixResourceManager.rebalanceTable(TABLE_NAME, CommonConstants.Helix.TableType.OFFLINE, rebalanceUserConfig); Assert.assertTrue(validateTableLevelReplicaGroupRebalance()); Assert.assertTrue(validateNumSegments(INITIAL_NUM_SEGMENTS)); // Upload 10 more segments and validate the segment assignment addNewSegments(); while (!allSegmentsPushedToIdealState(TABLE_NAME, INITIAL_NUM_SEGMENTS + 10)) { Thread.sleep(100); } Assert.assertTrue(validateTableLevelReplicaGroupRebalance()); // Test replace again _helixAdmin.removeInstanceTag(getHelixClusterName(), "Server_localhost_a", OFFLINE_TENENT_NAME); _helixAdmin.addInstanceTag(getHelixClusterName(), "Server_localhost_0", OFFLINE_TENENT_NAME); _helixResourceManager.rebalanceTable(TABLE_NAME, CommonConstants.Helix.TableType.OFFLINE, rebalanceUserConfig); Assert.assertTrue(validateTableLevelReplicaGroupRebalance()); Assert.assertTrue(validateNumSegments(INITIAL_NUM_SEGMENTS + 10)); // Clean up new segments removeNewSegments(); while (!allSegmentsPushedToIdealState(TABLE_NAME, INITIAL_NUM_SEGMENTS)) { Thread.sleep(100); } // Test adding servers to each replica group _helixAdmin.addInstanceTag(getHelixClusterName(), "Server_localhost_a", OFFLINE_TENENT_NAME); _helixAdmin.addInstanceTag(getHelixClusterName(), "Server_localhost_b", OFFLINE_TENENT_NAME); _helixAdmin.addInstanceTag(getHelixClusterName(), "Server_localhost_c", OFFLINE_TENENT_NAME); _helixAdmin.addInstanceTag(getHelixClusterName(), "Server_localhost_d", OFFLINE_TENENT_NAME); int targetNumInstancePerPartition = 5; int targetNumReplicaGroup = 2; updateTableConfig(targetNumInstancePerPartition, targetNumReplicaGroup); _helixResourceManager.rebalanceTable(TABLE_NAME, CommonConstants.Helix.TableType.OFFLINE, rebalanceUserConfig); Assert.assertTrue(validateTableLevelReplicaGroupRebalance()); Assert.assertTrue(validateNumSegments(INITIAL_NUM_SEGMENTS)); // Test removing servers to each replica group _helixAdmin.removeInstanceTag(getHelixClusterName(), "Server_localhost_a", OFFLINE_TENENT_NAME); _helixAdmin.removeInstanceTag(getHelixClusterName(), "Server_localhost_d", OFFLINE_TENENT_NAME); targetNumInstancePerPartition = 4; targetNumReplicaGroup = 2; updateTableConfig(targetNumInstancePerPartition, targetNumReplicaGroup); _helixResourceManager.rebalanceTable(TABLE_NAME, CommonConstants.Helix.TableType.OFFLINE, rebalanceUserConfig); Assert.assertTrue(validateTableLevelReplicaGroupRebalance()); Assert.assertTrue(validateNumSegments(INITIAL_NUM_SEGMENTS)); // Upload 10 more segments and validate the segment assignment addNewSegments(); while (!allSegmentsPushedToIdealState(TABLE_NAME, INITIAL_NUM_SEGMENTS + 10)) { Thread.sleep(100); } Assert.assertTrue(validateTableLevelReplicaGroupRebalance()); // Clean up new segments removeNewSegments(); while (!allSegmentsPushedToIdealState(TABLE_NAME, INITIAL_NUM_SEGMENTS)) { Thread.sleep(100); } // Test removing two more servers to each replica group with force run _helixAdmin.removeInstanceTag(getHelixClusterName(), "Server_localhost_b", OFFLINE_TENENT_NAME); _helixAdmin.removeInstanceTag(getHelixClusterName(), "Server_localhost_c", OFFLINE_TENENT_NAME); targetNumInstancePerPartition = 3; targetNumReplicaGroup = 2; updateTableConfig(targetNumInstancePerPartition, targetNumReplicaGroup); _helixResourceManager.rebalanceTable(TABLE_NAME, CommonConstants.Helix.TableType.OFFLINE, rebalanceUserConfig); Assert.assertTrue(validateTableLevelReplicaGroupRebalance()); Assert.assertTrue(validateNumSegments(INITIAL_NUM_SEGMENTS)); // Test adding a replica group _helixAdmin.addInstanceTag(getHelixClusterName(), "Server_localhost_a", OFFLINE_TENENT_NAME); _helixAdmin.addInstanceTag(getHelixClusterName(), "Server_localhost_b", OFFLINE_TENENT_NAME); _helixAdmin.addInstanceTag(getHelixClusterName(), "Server_localhost_c", OFFLINE_TENENT_NAME); targetNumInstancePerPartition = 3; targetNumReplicaGroup = 3; updateTableConfig(targetNumInstancePerPartition, targetNumReplicaGroup); _helixResourceManager.rebalanceTable(TABLE_NAME, CommonConstants.Helix.TableType.OFFLINE, rebalanceUserConfig); Assert.assertTrue(validateTableLevelReplicaGroupRebalance()); Assert.assertTrue(validateNumSegments(INITIAL_NUM_SEGMENTS)); // Upload 10 more segments and validate the segment assignment addNewSegments(); while (!allSegmentsPushedToIdealState(TABLE_NAME, INITIAL_NUM_SEGMENTS + 10)) { Thread.sleep(100); } Assert.assertTrue(validateTableLevelReplicaGroupRebalance()); // Clean up segments removeNewSegments(); while (!allSegmentsPushedToIdealState(TABLE_NAME, INITIAL_NUM_SEGMENTS)) { Thread.sleep(100); } // Test removing a replica group _helixAdmin.removeInstanceTag(getHelixClusterName(), "Server_localhost_0", OFFLINE_TENENT_NAME); _helixAdmin.removeInstanceTag(getHelixClusterName(), "Server_localhost_1", OFFLINE_TENENT_NAME); _helixAdmin.removeInstanceTag(getHelixClusterName(), "Server_localhost_2", OFFLINE_TENENT_NAME); targetNumInstancePerPartition = 3; targetNumReplicaGroup = 2; updateTableConfig(targetNumInstancePerPartition, targetNumReplicaGroup); _helixResourceManager.rebalanceTable(TABLE_NAME, CommonConstants.Helix.TableType.OFFLINE, rebalanceUserConfig); Assert.assertTrue(validateTableLevelReplicaGroupRebalance()); Assert.assertTrue(validateNumSegments(INITIAL_NUM_SEGMENTS)); }
From source file:edu.isi.wings.portal.controllers.DomainController.java
private PropertyListConfiguration getUserConfiguration() { this.userdir = this.config.getUserDir(); this.userConfigFile = userdir + "/user.properties"; // Create userConfigFile if it doesn't exist File cfile = new File(userConfigFile); if (!cfile.exists()) { if (!cfile.getParentFile().exists() && !cfile.getParentFile().mkdirs()) { System.err.println("Cannot create config file directory : " + cfile.getParent()); return null; }// w ww . j a v a 2s .co m createDefaultUserConfig(userConfigFile); } // Load properties from configFile PropertyListConfiguration config = new PropertyListConfiguration(); try { config.load(userConfigFile); } catch (Exception e) { e.printStackTrace(); } return config; }
From source file:com.linkedin.pinot.core.data.manager.realtime.RealtimeSegmentDataManager.java
public RealtimeSegmentDataManager(final RealtimeSegmentZKMetadata segmentMetadata, final AbstractTableConfig tableConfig, InstanceZKMetadata instanceMetadata, RealtimeTableDataManager realtimeResourceManager, final String resourceDataDir, final ReadMode mode, final Schema schema, final ServerMetrics serverMetrics) throws Exception { super();/*from www . j a v a 2s .co m*/ this.schema = schema; this.extractor = (PlainFieldExtractor) FieldExtractorFactory.getPlainFieldExtractor(schema); this.serverMetrics = serverMetrics; this.segmentName = segmentMetadata.getSegmentName(); this.tableName = tableConfig.getTableName(); IndexingConfig indexingConfig = tableConfig.getIndexingConfig(); if (indexingConfig.getSortedColumn().isEmpty()) { LOGGER.info("RealtimeDataResourceZKMetadata contains no information about sorted column for segment {}", segmentName); this.sortedColumn = null; } else { String firstSortedColumn = indexingConfig.getSortedColumn().get(0); if (this.schema.hasColumn(firstSortedColumn)) { LOGGER.info("Setting sorted column name: {} from RealtimeDataResourceZKMetadata for segment {}", firstSortedColumn, segmentName); this.sortedColumn = firstSortedColumn; } else { LOGGER.warn( "Sorted column name: {} from RealtimeDataResourceZKMetadata is not existed in schema for segment {}.", firstSortedColumn, segmentName); this.sortedColumn = null; } } //inverted index columns invertedIndexColumns = indexingConfig.getInvertedIndexColumns(); this.segmentMetatdaZk = segmentMetadata; // create and init stream provider config // TODO : ideally resourceMetatda should create and give back a streamProviderConfig this.kafkaStreamProviderConfig = new KafkaHighLevelStreamProviderConfig(); this.kafkaStreamProviderConfig.init(tableConfig, instanceMetadata, schema); segmentLogger = LoggerFactory.getLogger(RealtimeSegmentDataManager.class.getName() + "_" + segmentName + "_" + kafkaStreamProviderConfig.getStreamName()); segmentLogger.info("Created segment data manager with Sorted column:{}, invertedIndexColumns:{}", sortedColumn, invertedIndexColumns); segmentEndTimeThreshold = start + kafkaStreamProviderConfig.getTimeThresholdToFlushSegment(); this.resourceDir = new File(resourceDataDir); this.resourceTmpDir = new File(resourceDataDir, "_tmp"); if (!resourceTmpDir.exists()) { resourceTmpDir.mkdirs(); } // create and init stream provider final String tableName = tableConfig.getTableName(); this.kafkaStreamProvider = StreamProviderFactory.buildStreamProvider(); this.kafkaStreamProvider.init(kafkaStreamProviderConfig, tableName, serverMetrics); this.kafkaStreamProvider.start(); this.tableStreamName = tableName + "_" + kafkaStreamProviderConfig.getStreamName(); // lets create a new realtime segment segmentLogger.info("Started kafka stream provider"); realtimeSegment = new RealtimeSegmentImpl(schema, kafkaStreamProviderConfig.getSizeThresholdToFlushSegment(), tableName, segmentMetadata.getSegmentName(), kafkaStreamProviderConfig.getStreamName(), serverMetrics); realtimeSegment.setSegmentMetadata(segmentMetadata, this.schema); notifier = realtimeResourceManager; segmentStatusTask = new TimerTask() { @Override public void run() { computeKeepIndexing(); } }; // start the indexing thread indexingThread = new Thread(new Runnable() { @Override public void run() { // continue indexing until criteria is met boolean notFull = true; long exceptionSleepMillis = 50L; segmentLogger.info("Starting to collect rows"); do { GenericRow row = null; try { row = kafkaStreamProvider.next(); row = extractor.transform(row); if (row != null) { notFull = realtimeSegment.index(row); exceptionSleepMillis = 50L; } } catch (Exception e) { segmentLogger.warn( "Caught exception while indexing row, sleeping for {} ms, row contents {}", exceptionSleepMillis, row, e); // Sleep for a short time as to avoid filling the logs with exceptions too quickly Uninterruptibles.sleepUninterruptibly(exceptionSleepMillis, TimeUnit.MILLISECONDS); exceptionSleepMillis = Math.min(60000L, exceptionSleepMillis * 2); } catch (Error e) { segmentLogger.error("Caught error in indexing thread", e); throw e; } } while (notFull && keepIndexing && (!isShuttingDown)); if (isShuttingDown) { segmentLogger.info("Shutting down indexing thread!"); return; } try { int numErrors, numConversions, numNulls, numNullCols; if ((numErrors = extractor.getTotalErrors()) > 0) { serverMetrics.addMeteredTableValue(tableStreamName, ServerMeter.ROWS_WITH_ERRORS, (long) numErrors); } Map<String, Integer> errorCount = extractor.getError_count(); for (String column : errorCount.keySet()) { if ((numErrors = errorCount.get(column)) > 0) { segmentLogger.warn("Column {} had {} rows with errors", column, numErrors); } } if ((numConversions = extractor.getTotalConversions()) > 0) { serverMetrics.addMeteredTableValue(tableStreamName, ServerMeter.ROWS_NEEDING_CONVERSIONS, (long) numConversions); segmentLogger.info("{} rows needed conversions ", numConversions); } if ((numNulls = extractor.getTotalNulls()) > 0) { serverMetrics.addMeteredTableValue(tableStreamName, ServerMeter.ROWS_WITH_NULL_VALUES, (long) numNulls); segmentLogger.info("{} rows had null columns", numNulls); } if ((numNullCols = extractor.getTotalNullCols()) > 0) { serverMetrics.addMeteredTableValue(tableStreamName, ServerMeter.COLUMNS_WITH_NULL_VALUES, (long) numNullCols); segmentLogger.info("{} columns had null values", numNullCols); } segmentLogger.info("Indexing threshold reached, proceeding with index conversion"); // kill the timer first segmentStatusTask.cancel(); updateCurrentDocumentCountMetrics(); segmentLogger.info("Indexed {} raw events, current number of docs = {}", realtimeSegment.getRawDocumentCount(), realtimeSegment.getSegmentMetadata().getTotalDocs()); File tempSegmentFolder = new File(resourceTmpDir, "tmp-" + String.valueOf(System.currentTimeMillis())); // lets convert the segment now RealtimeSegmentConverter converter = new RealtimeSegmentConverter(realtimeSegment, tempSegmentFolder.getAbsolutePath(), schema, segmentMetadata.getTableName(), segmentMetadata.getSegmentName(), sortedColumn, invertedIndexColumns); segmentLogger.info("Trying to build segment"); final long buildStartTime = System.nanoTime(); converter.build(); final long buildEndTime = System.nanoTime(); segmentLogger.info("Built segment in {} ms", TimeUnit.MILLISECONDS.convert((buildEndTime - buildStartTime), TimeUnit.NANOSECONDS)); File destDir = new File(resourceDataDir, segmentMetadata.getSegmentName()); FileUtils.deleteQuietly(destDir); FileUtils.moveDirectory(tempSegmentFolder.listFiles()[0], destDir); FileUtils.deleteQuietly(tempSegmentFolder); long segStartTime = realtimeSegment.getMinTime(); long segEndTime = realtimeSegment.getMaxTime(); TimeUnit timeUnit = schema.getTimeFieldSpec().getOutgoingGranularitySpec().getTimeType(); Configuration configuration = new PropertyListConfiguration(); configuration.setProperty(IndexLoadingConfigMetadata.KEY_OF_LOADING_INVERTED_INDEX, invertedIndexColumns); IndexLoadingConfigMetadata configMetadata = new IndexLoadingConfigMetadata(configuration); IndexSegment segment = Loaders.IndexSegment .load(new File(resourceDir, segmentMetatdaZk.getSegmentName()), mode, configMetadata); segmentLogger.info("Committing Kafka offsets"); boolean commitSuccessful = false; try { kafkaStreamProvider.commit(); commitSuccessful = true; kafkaStreamProvider.shutdown(); segmentLogger.info("Successfully committed Kafka offsets, consumer release requested."); } catch (Throwable e) { // If we got here, it means that either the commit or the shutdown failed. Considering that the // KafkaConsumerManager delays shutdown and only adds the consumer to be released in a deferred way, this // likely means that writing the Kafka offsets failed. // // The old logic (mark segment as done, then commit offsets and shutdown the consumer immediately) would die // in a terrible way, leaving the consumer open and causing us to only get half the records from that point // on. In this case, because we keep the consumer open for a little while, we should be okay if the // controller reassigns us a new segment before the consumer gets released. Hopefully by the next time that // we get to committing the offsets, the transient ZK failure that caused the write to fail will not // happen again and everything will be good. // // Several things can happen: // - The controller reassigns us a new segment before we release the consumer (KafkaConsumerManager will // keep the consumer open for about a minute, which should be enough time for the controller to reassign // us a new segment) and the next time we close the segment the offsets commit successfully; we're good. // - The controller reassigns us a new segment, but after we released the consumer (if the controller was // down or there was a ZK failure on writing the Kafka offsets but not the Helix state). We lose whatever // data was in this segment. Not good. // - The server crashes after this comment and before we mark the current segment as done; if the Kafka // offsets didn't get written, then when the server restarts it'll start consuming the current segment // from the previously committed offsets; we're good. // - The server crashes after this comment, the Kafka offsets were written but the segment wasn't marked as // done in Helix, but we got a failure (or not) on the commit; we lose whatever data was in this segment // if we restart the server (not good). If we manually mark the segment as done in Helix by editing the // state in ZK, everything is good, we'll consume a new segment that starts from the correct offsets. // // This is still better than the previous logic, which would have these failure modes: // - Consumer was left open and the controller reassigned us a new segment; consume only half the events // (because there are two consumers and Kafka will try to rebalance partitions between those two) // - We got a segment assigned to us before we got around to committing the offsets, reconsume the data that // we got in this segment again, as we're starting consumption from the previously committed offset (eg. // duplicate data). // // This is still not very satisfactory, which is why this part is due for a redesign. // // Assuming you got here because the realtime offset commit metric has fired, check the logs to determine // which of the above scenarios happened. If you're in one of the good scenarios, then there's nothing to // do. If you're not, then based on how critical it is to get those rows back, then your options are: // - Wipe the realtime table and reconsume everything (mark the replica as disabled so that clients don't // see query results from partially consumed data, then re-enable it when this replica has caught up) // - Accept that those rows are gone in this replica and move on (they'll be replaced by good offline data // soon anyway) // - If there's a replica that has consumed properly, you could shut it down, copy its segments onto this // replica, assign a new consumer group id to this replica, rename the copied segments and edit their // metadata to reflect the new consumer group id, copy the Kafka offsets from the shutdown replica onto // the new consumer group id and then restart both replicas. This should get you the missing rows. segmentLogger.error( "FATAL: Exception committing or shutting down consumer commitSuccessful={}", commitSuccessful, e); serverMetrics.addMeteredTableValue(tableName, ServerMeter.REALTIME_OFFSET_COMMIT_EXCEPTIONS, 1L); if (!commitSuccessful) { kafkaStreamProvider.shutdown(); } } try { segmentLogger.info("Marking current segment as completed in Helix"); RealtimeSegmentZKMetadata metadataToOverwrite = new RealtimeSegmentZKMetadata(); metadataToOverwrite.setTableName(segmentMetadata.getTableName()); metadataToOverwrite.setSegmentName(segmentMetadata.getSegmentName()); metadataToOverwrite.setSegmentType(SegmentType.OFFLINE); metadataToOverwrite.setStatus(Status.DONE); metadataToOverwrite.setStartTime(segStartTime); metadataToOverwrite.setEndTime(segEndTime); metadataToOverwrite.setTotalRawDocs(realtimeSegment.getSegmentMetadata().getTotalDocs()); metadataToOverwrite.setTimeUnit(timeUnit); notifier.notifySegmentCommitted(metadataToOverwrite, segment); segmentLogger.info( "Completed write of segment completion to Helix, waiting for controller to assign a new segment"); } catch (Exception e) { if (commitSuccessful) { segmentLogger.error( "Offsets were committed to Kafka but we were unable to mark this segment as completed in Helix. Manually mark the segment as completed in Helix; restarting this instance will result in data loss.", e); } else { segmentLogger.warn( "Caught exception while marking segment as completed in Helix. Offsets were not written, restarting the instance should be safe.", e); } } } catch (Exception e) { segmentLogger.error("Caught exception in the realtime indexing thread", e); } } }); indexingThread.start(); serverMetrics.addValueToTableGauge(tableName, ServerGauge.SEGMENT_COUNT, 1L); segmentLogger.debug("scheduling keepIndexing timer check"); // start a schedule timer to keep track of the segment TimerService.timer.schedule(segmentStatusTask, ONE_MINUTE_IN_MILLSEC, ONE_MINUTE_IN_MILLSEC); segmentLogger.info("finished scheduling keepIndexing timer check"); }
From source file:edu.isi.wings.portal.controllers.DomainController.java
private boolean saveUserConfig(String file) { PropertyListConfiguration config = new PropertyListConfiguration(); config.addProperty("user.domain", this.domain.getDomainName()); for (String domname : this.user_domains.keySet()) { DomainInfo dom = this.user_domains.get(domname); config.addProperty("user.domains.domain(-1).name", dom.getName()); config.addProperty("user.domains.domain.dir", dom.getDirectory()); if (dom.isLegacy()) config.addProperty("user.domains.domain.legacy", dom.isLegacy()); else/*w w w.j a v a 2 s . c om*/ config.addProperty("user.domains.domain.url", dom.getUrl()); } try { config.save(file); return true; } catch (ConfigurationException e) { e.printStackTrace(); return false; } }
From source file:edu.isi.wings.portal.classes.Config.java
private PropertyListConfiguration getPortalConfiguration(HttpServletRequest request) { ServletContext app = request.getSession().getServletContext(); this.configFile = app.getInitParameter("config.file"); if (this.configFile == null) { String home = System.getProperty("user.home"); if (home != null && !home.equals("")) this.configFile = home + File.separator + ".wings" + File.separator + "portal.properties"; else//from ww w . j av a 2 s . c o m this.configFile = "/etc/wings/portal.properties"; } // Create configFile if it doesn't exist (portal.properties) File cfile = new File(this.configFile); if (!cfile.exists()) { if (!cfile.getParentFile().mkdirs()) { System.err.println("Cannot create config file directory : " + cfile.getParent()); return null; } createDefaultPortalConfig(request); } // Load properties from configFile PropertyListConfiguration props = new PropertyListConfiguration(); try { props.load(this.configFile); } catch (Exception e) { e.printStackTrace(); } return props; }
From source file:edu.isi.wings.portal.classes.config.Config.java
public PropertyListConfiguration getPortalConfiguration(HttpServletRequest request) { ServletContext app = request.getSession().getServletContext(); this.configFile = app.getInitParameter("config.file"); if (this.configFile == null) { String home = System.getProperty("user.home"); if (home != null && !home.equals("")) this.configFile = home + File.separator + ".wings" + File.separator + "portal.properties"; else/*from w ww .j a v a2 s. c o m*/ this.configFile = "/etc/wings/portal.properties"; } // Create configFile if it doesn't exist (portal.properties) File cfile = new File(this.configFile); if (!cfile.exists()) { if (!cfile.getParentFile().mkdirs()) { System.err.println("Cannot create config file directory : " + cfile.getParent()); return null; } if (request != null) createDefaultPortalConfig(request); } // Load properties from configFile PropertyListConfiguration props = new PropertyListConfiguration(); try { props.load(this.configFile); } catch (Exception e) { e.printStackTrace(); } return props; }
From source file:edu.isi.wings.portal.classes.config.Config.java
private void createDefaultPortalConfig(HttpServletRequest request) { String server = request.getScheme() + "://" + request.getServerName() + ":" + request.getServerPort(); String storageDir = null;//from w w w . j a va 2 s . com String home = System.getProperty("user.home"); if (home != null && !home.equals("")) storageDir = home + File.separator + ".wings" + File.separator + "storage"; else storageDir = System.getProperty("java.io.tmpdir") + File.separator + "wings" + File.separator + "storage"; if (!new File(storageDir).mkdirs()) System.err.println("Cannot create storage directory: " + storageDir); PropertyListConfiguration config = new PropertyListConfiguration(); config.addProperty("storage.local", storageDir); config.addProperty("storage.tdb", storageDir + File.separator + "TDB"); config.addProperty("server", server); File loc1 = new File("/usr/bin/dot"); File loc2 = new File("/usr/local/bin/dot"); config.addProperty("graphviz", loc2.exists() ? loc2.getAbsolutePath() : loc1.getAbsolutePath()); config.addProperty("ontology.data", ontdirurl + "/data.owl"); config.addProperty("ontology.component", ontdirurl + "/component.owl"); config.addProperty("ontology.workflow", ontdirurl + "/workflow.owl"); config.addProperty("ontology.execution", ontdirurl + "/execution.owl"); config.addProperty("ontology.resource", ontdirurl + "/resource.owl"); this.addEngineConfig(config, new ExeEngine("Local", LocalExecutionEngine.class.getCanonicalName(), ExeEngine.Type.BOTH)); this.addEngineConfig(config, new ExeEngine("Distributed", DistributedExecutionEngine.class.getCanonicalName(), ExeEngine.Type.BOTH)); /*this.addEngineConfig(config, new ExeEngine("OODT", OODTExecutionEngine.class.getCanonicalName(), ExeEngine.Type.PLAN)); this.addEngineConfig(config, new ExeEngine("Pegasus", PegasusExecutionEngine.class.getCanonicalName(), ExeEngine.Type.PLAN));*/ try { config.save(this.configFile); } catch (Exception e) { e.printStackTrace(); } }
From source file:edu.isi.wings.portal.classes.domains.Domain.java
public boolean saveDomain() { PropertyListConfiguration config = new PropertyListConfiguration(); config.addProperty("name", this.domainName); config.addProperty("useSharedTripleStore", this.useSharedTripleStore); config.addProperty("executions.engine.plan", this.planEngine); config.addProperty("executions.engine.step", this.stepEngine); this.setUrlMapProp(config, "workflows.library", this.templateLibrary); this.setUrlMapProp(config, "workflows.prefix", this.newTemplateDirectory); this.setUrlMapProp(config, "executions.library", this.executionLibrary); this.setUrlMapProp(config, "executions.prefix", this.newExecutionDirectory); this.setUrlMapProp(config, "data.ontology", this.dataOntology); this.setUrlMapProp(config, "data.library", this.dataLibrary); config.addProperty("data.library.storage", this.dataLibrary.getStorageDirectory()); config.addProperty("components.namespace", this.componentLibraryNamespace); this.setUrlMapProp(config, "components.abstract", this.abstractComponentLibrary); config.addProperty("components.concrete", this.concreteComponentLibrary.getName()); for (DomainLibrary clib : this.concreteComponentLibraries) { config.addProperty("components.libraries.library(-1).url", clib.getUrl()); config.addProperty("components.libraries.library.map", clib.getMapping()); config.addProperty("components.libraries.library.name", clib.getName()); config.addProperty("components.libraries.library.storage", clib.getStorageDirectory()); }/*from w w w .j a v a 2 s . c om*/ for (Permission permission : this.permissions) { config.addProperty("permissions.permission(-1).userid", permission.getUserid()); config.addProperty("permissions.permission.canRead", permission.canRead()); config.addProperty("permissions.permission.canWrite", permission.canWrite()); config.addProperty("permissions.permission.canExecute", permission.canExecute()); } if (this.domainDirectory != null) { File domdir = new File(this.domainDirectory); if (!domdir.exists() && !domdir.mkdirs()) System.err.println("Could not create domain directory: " + this.domainDirectory); } try { config.save(this.domainConfigFile); return true; } catch (ConfigurationException e) { e.printStackTrace(); } return false; }
From source file:org.apache.james.mpt.host.JamesImapHostSystem.java
private HierarchicalConfiguration userRepositoryConfiguration() { PropertyListConfiguration configuration = new PropertyListConfiguration(); configuration.addProperty("administratorId", "imapuser"); return configuration; }
From source file:org.apache.james.user.ldap.ReadOnlyUsersLDAPRepositoryTest.java
private HierarchicalConfiguration ldapRepositoryConfiguration() throws ConfigurationException { PropertyListConfiguration configuration = new PropertyListConfiguration(); configuration.addProperty("[@ldapHost]", ldapContainer.getLdapHost()); configuration.addProperty("[@principal]", "cn=admin\\,dc=james\\,dc=org"); configuration.addProperty("[@credentials]", ADMIN_PASSWORD); configuration.addProperty("[@userBase]", "ou=People\\,dc=james\\,dc=org"); configuration.addProperty("[@userIdAttribute]", "uid"); configuration.addProperty("[@userObjectClass]", "inetOrgPerson"); configuration.addProperty("[@maxRetries]", "4"); configuration.addProperty("[@retryStartInterval]", "0"); configuration.addProperty("[@retryMaxInterval]", "8"); configuration.addProperty("[@retryIntervalScale]", "1000"); return configuration; }