List of usage examples for org.apache.commons.lang BooleanUtils toBoolean
public static boolean toBoolean(String str)
Converts a String to a boolean (optimised for performance).
'true'
, 'on'
or 'yes'
(case insensitive) will return true
.
From source file:com.edgenius.wiki.webapp.servlet.InstallServlet.java
private void createTables(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { boolean confirmed = BooleanUtils.toBoolean(request.getParameter("confirmed")); String dbType = request.getParameter("dbType"); String connectType = request.getParameter("connectType"); String jndi = request.getParameter("jndi"); String dbname = request.getParameter("dbname"); String dbSchema = request.getParameter("dbschema"); String host = request.getParameter("host"); String username = request.getParameter("username"); String password = request.getParameter("password"); String userDBUrl = request.getParameter("userDBUrl"); String driverType = request.getParameter("driverType"); boolean connonly = "true".equals(request.getParameter("connonly")) ? true : false; boolean urlEdited = BooleanUtils.toBoolean(request.getParameter("urlEdited")); //echo back value request.setAttribute("dbType", dbType); request.setAttribute("connectType", connectType); request.setAttribute("jndi", jndi); request.setAttribute("host", host); request.setAttribute("dbname", dbname); request.setAttribute("dbschema", dbSchema); request.setAttribute("userDBUrl", userDBUrl); request.setAttribute("username", username); request.setAttribute("password", password); request.setAttribute("urlEdited", urlEdited); request.setAttribute("driverType", driverType); DBLoader loader = new DBLoader(); ConnectionProxy con = null;/*from ww w . j a v a 2 s . co m*/ try { //put all connection information into a Server object, and let loader to decide what kind Connect Server connServer = new Server(); connServer.setDbType(dbType); connServer.setDbConnectType(connectType); if (connectType.equalsIgnoreCase(Server.CONN_TYPE_JDBC)) { if (!urlEdited) { userDBUrl = loader.getURL(dbType, driverType, host, dbname, false); } connServer.setDbUrl(userDBUrl); connServer.setDbSchema(dbSchema); } else { //JDNI won't be blank so far if (!jndi.startsWith(Server.JNDI_PREFIX)) { if (jndi.startsWith("/")) jndi = Server.JNDI_PREFIX + jndi; else jndi = Server.JNDI_PREFIX + "/" + jndi; } connServer.setDbJNDI(jndi); } connServer.setDbUsername(username); connServer.setDbPassword(password); con = loader.getConnection(dbType, connServer); if (con == null) { request.setAttribute("error", "Unable to connect database."); request.getRequestDispatcher("/WEB-INF/pages/install/createtables.jsp").forward(request, response); return; } if (connonly) { //success init DB if (StringUtils.equalsIgnoreCase(Server.CONN_TYPE_DS, connectType)) { //Datasource updateServerPropertiesJNDI(request, dbType, jndi); } else { //JDBC updateServerPropertiesJDBC(request, dbType, dbSchema, loader.getDriver(dbType, driverType), username, password, userDBUrl); } response.sendRedirect("install?step=admin"); return; } //pre-check if tables exist or not if (confirmed || !loader.isTableExist(con)) { loader.resetTable(dbType, con); //success init DB if (StringUtils.equalsIgnoreCase(Server.CONN_TYPE_DS, connectType)) { //Datasource updateServerPropertiesJNDI(request, dbType, jndi); } else { //JDBC updateServerPropertiesJDBC(request, dbType, dbSchema, loader.getDriver(dbType, driverType), username, password, userDBUrl); } response.sendRedirect("install?step=admin"); } else { //return message, need user confirm request.setAttribute("existed", true); request.setAttribute("message", "Tables already exist, do you want to reset them?"); request.getRequestDispatcher("/WEB-INF/pages/install/createtables.jsp").forward(request, response); } } catch (DriverNotFoundException e) { log.error("Driver not found ", e); request.setAttribute("error", "No suitable database driver found. Please copy corresponding driver to your web server library directory."); request.getRequestDispatcher("/WEB-INF/pages/install/createtables.jsp").forward(request, response); } catch (SQLException e) { log.error("SQL error {}", e.getNextException(), e); request.setAttribute("error", "Exception:" + e.toString()); request.getRequestDispatcher("/WEB-INF/pages/install/createtables.jsp").forward(request, response); } catch (Exception e) { log.error("Unable complete table initialize task", e); request.setAttribute("error", "Unable create tables, please retry or create them manually."); request.getRequestDispatcher("/WEB-INF/pages/install/createtables.jsp").forward(request, response); } finally { if (con != null) con.close(); } }
From source file:com.wineaccess.winepermit.WinePermitHelper.java
private static void validatePermit(List<PermitModel> permit, String wineryId) { Map<String, String> permitNumberPOMap = new ConcurrentHashMap<String, String>(); Set<String> dtcPermitNumberSet = new HashSet<String>(); /*Map<String,String> permitNumberDOMap = new ConcurrentHashMap<String,String>(); Set<String> dtcPermitNumberSetDB = new HashSet<String>();*/ if (permit == null || permit.isEmpty()) { response.addError(new WineaccessError(SystemErrorCode.PERMIT_NO_SELECTED_ALT_STATES_ERROR_WINE, SystemErrorCode.PERMIT_NO_SELECTED_ALT_STATES_ERROR_WINE_TEXT)); } else {// w ww. ja v a2 s .c o m Boolean isValidWineryPermit = false; for (PermitModel permitModel : permit) { String permitdurationInMonths = permitModel.getPermitDurationInMonths(); Date permitStartDate = permitModel.getDtcPermitStartDate(); Date permitEndDate = permitModel.getDtcPermitEndDate(); String dtcPermitNumber = permitModel.getDtcPermitNumber(); Long masterDataId = (permitModel.getMasterDataId() != null) ? Long.parseLong(permitModel.getMasterDataId()) : null; if (masterDataId == null) { response.addError(new WineaccessError(SystemErrorCode.NO_WINE_PERMITS_ERROR, SystemErrorCode.NO_WINE_PERMITS_ERROR_TEXT)); } else { MasterData masterData = MasterDataRepository.getMasterDataById(masterDataId); if (masterData == null || !masterData.getMasterDataType().getName() .equals(MasterDataTypeEnum.WineryLicencePermit.name())) { response.addError(new WineaccessError(SystemErrorCode.PERMIT_INVALID_MASTER_DATA_WINE, SystemErrorCode.PERMIT_INVALID_MASTER_DATA_WINE_TEXT)); } } if (StringUtils.isNotBlank(dtcPermitNumber) && StringUtils.isNotBlank(permitdurationInMonths) && BooleanUtils.toBoolean(permitModel.getIsSelected())) { isValidWineryPermit = true; } if (StringUtils.isNotBlank(permitdurationInMonths) && permitStartDate != null && permitEndDate != null) { Integer diff = WineryPermitHelper.getMonthsDifference(permitStartDate, permitEndDate); if (Integer.parseInt(permitdurationInMonths) != diff) { response.addError(new WineaccessError(SystemErrorCode.PERMIT_INVALID_DURATION_WINE, SystemErrorCode.PERMIT_INVALID_DURATION_WINE_TEXT)); } } if (StringUtils.isNotBlank(dtcPermitNumber) && permitModel.getMasterDataId() != null) { permitNumberPOMap.put(permitModel.getMasterDataId(), dtcPermitNumber); dtcPermitNumberSet.add(dtcPermitNumber); } } if (!permitNumberPOMap.isEmpty()) { //Set<String> dtcPermitNumberSet = permitNumberPOMap. if (dtcPermitNumberSet.size() != permitNumberPOMap.size()) { response.addError(new WineaccessError(SystemErrorCode.PERMIT_DUPLICATE_DTC_PERMIT_NUMBER_WINE, SystemErrorCode.PERMIT_DUPLICATE_DTC_PERMIT_NUMBER_WINE_TEXT)); } /*else{ List<Object[]> dtcPermitNumberFromDB = WineryPermitRepository.findDTCPermitNumberByWineryId(Long.valueOf(wineryId)); if(dtcPermitNumberFromDB!=null && !dtcPermitNumberFromDB.isEmpty()){ for(Object[] obj:dtcPermitNumberFromDB){ permitNumberDOMap.put((String)obj[0], (String)obj[1]); dtcPermitNumberSetDB.add((String)obj[1]); } } if(dtcPermitNumberSetDB!=null && !dtcPermitNumberSetDB.isEmpty()){ for(String DTCPermitNumber:dtcPermitNumberSetDB) { if(Collections.frequency(permitNumberPOMap, DTCPermitNumber)!=0) { response.addError(new WineaccessError(SystemErrorCode.PERMIT_DUPLICATE_DTC_PERMIT_NUMBER,SystemErrorCode.PERMIT_DUPLICATE_DTC_PERMIT_NUMBER_TEXT)); } } } }*/ } if (!isValidWineryPermit) { response.addError(new WineaccessError(SystemErrorCode.PERMIT_INVALID_PERMIT_DURATION_WINE, SystemErrorCode.PERMIT_INVALID_PERMIT_DURATION_WINE_TEXT)); } } }
From source file:com.redhat.rhn.frontend.xmlrpc.kickstart.profile.ProfileHandler.java
private boolean md5cryptRootPw(List<Map> options) { for (Map m : options) { if ("md5_crypt_rootpw".equals(m.get("name"))) { return BooleanUtils.toBoolean((String) m.get("arguments")); }/* ww w . j a v a 2 s. c om*/ } return false; }
From source file:jp.primecloud.auto.process.puppet.PuppetComponentProcess.java
protected Map<String, Object> createInstanceMap(Long componentNo, ComponentProcessContext context, boolean start, Long instanceNo, Map<String, Object> rootMap) { Map<String, Object> map = new HashMap<String, Object>(rootMap); // Instance// w ww. j av a 2 s .c o m Instance instance = instanceDao.read(instanceNo); map.put("instance", instance); // PuppetInstance PuppetInstance puppetInstance = puppetInstanceDao.read(instanceNo); map.put("puppetInstance", puppetInstance); // InstanceConfig List<InstanceConfig> instanceConfigs = instanceConfigDao.readByInstanceNo(instanceNo); Map<String, Object> configs = new HashMap<String, Object>(); for (InstanceConfig instanceConfig : instanceConfigs) { if (componentNo.equals(instanceConfig.getComponentNo())) { configs.put(instanceConfig.getConfigName(), instanceConfig.getConfigValue()); } } map.put("instanceConfigs", configs); // Platform Platform platform = platformDao.read(instance.getPlatformNo()); map.put("platform", platform); // TODO CLOUD BRANCHING if (PCCConstant.PLATFORM_TYPE_AWS.equals(platform.getPlatformType())) { // AwsInstance AwsInstance awsInstance = awsInstanceDao.read(instanceNo); map.put("awsInstance", awsInstance); // AwsVolume AwsVolume awsVolume = awsVolumeDao.readByComponentNoAndInstanceNo(componentNo, instanceNo); if (awsVolume != null) { map.put("awsVolume", awsVolume); } } else if (PCCConstant.PLATFORM_TYPE_CLOUDSTACK.equals(platform.getPlatformType())) { // CloudStackInstance CloudstackInstance cloudstackInstance = cloudstackInstanceDao.read(instanceNo); map.put("cloudstackInstance", cloudstackInstance); // CloudStackVolume CloudstackVolume cloudstackVolume = cloudstackVolumeDao.readByComponentNoAndInstanceNo(componentNo, instanceNo); if (cloudstackVolume != null) { map.put("cloudstackVolume", cloudstackVolume); } } else if (PCCConstant.PLATFORM_TYPE_VMWARE.equals(platform.getPlatformType())) { // VmwareInstance VmwareInstance vmwareInstance = vmwareInstanceDao.read(instanceNo); map.put("vmwareInstance", vmwareInstance); // VmwareDisk VmwareDisk vmwareDisk = vmwareDiskDao.readByComponentNoAndInstanceNo(componentNo, instanceNo); if (vmwareDisk != null) { map.put("vmwareDisk", vmwareDisk); } } else if (PCCConstant.PLATFORM_TYPE_NIFTY.equals(platform.getPlatformType())) { // NiftyInstance NiftyInstance niftyInstance = niftyInstanceDao.read(instanceNo); map.put("niftyInstance", niftyInstance); // NiftyVolume NiftyVolume niftyVolume = niftyVolumeDao.readByComponentNoAndInstanceNo(componentNo, instanceNo); if (niftyVolume != null) { map.put("niftyVolume", niftyVolume); } } else if (PCCConstant.PLATFORM_TYPE_VCLOUD.equals(platform.getPlatformType())) { // VcloudInstance VcloudInstance vcloudInstance = vcloudInstanceDao.read(instanceNo); map.put("vcloudInstance", vcloudInstance); // VcloudDisk VcloudDisk vcloudDisk = null; List<VcloudDisk> vcloudDisks = vcloudDiskDao.readByComponentNo(componentNo); for (VcloudDisk tmpVcloudDisk : vcloudDisks) { if (tmpVcloudDisk.getInstanceNo().equals(instanceNo)) { vcloudDisk = tmpVcloudDisk; break; } } if (vcloudDisk != null) { map.put("vcloudDisk", vcloudDisk); } } else if (PCCConstant.PLATFORM_TYPE_AZURE.equals(platform.getPlatformType())) { // AzureInstance AzureInstance azureInstance = azureInstanceDao.read(instanceNo); map.put("azureInstance", azureInstance); // AzureDisk AzureDisk azureDisk = azureDiskDao.readByComponentNoAndInstanceNo(componentNo, instanceNo); if (azureDisk != null) { map.put("azureDisk", azureDisk); } } else if (PCCConstant.PLATFORM_TYPE_OPENSTACK.equals(platform.getPlatformType())) { // OpenstackInstance OpenstackInstance openstackInstance = openstackInstanceDao.read(instanceNo); map.put("openstackInstance", openstackInstance); // OpenstackVolume OpenstackVolume openstackVolume = openstackVolumeDao.readByComponentNoAndInstanceNo(componentNo, instanceNo); if (openstackVolume != null) { map.put("openstackVolume", openstackVolume); } } // IP List<Instance> runningInstances = instanceDao.readInInstanceNos(context.getRunningInstanceNos()); Map<String, String> accessIps = new HashMap<String, String>(); for (Instance runningInstance : runningInstances) { // ?publicIp?? String accessIp = runningInstance.getPublicIp(); if (instance.getPlatformNo().equals(runningInstance.getPlatformNo())) { // ???? // TODO CLOUD BRANCHING if (PCCConstant.PLATFORM_TYPE_AWS.equals(platform.getPlatformType())) { PlatformAws platformAws = platformAwsDao.read(runningInstance.getPlatformNo()); if (BooleanUtils.isFalse(platformAws.getVpc())) { // VPC?????privateIp?? accessIp = runningInstance.getPrivateIp(); } } else if (PCCConstant.PLATFORM_TYPE_CLOUDSTACK.equals(platform.getPlatformType())) { // Cloudstack?publicIp?? accessIp = runningInstance.getPublicIp(); } else if (PCCConstant.PLATFORM_TYPE_VMWARE.equals(platform.getPlatformType())) { // VMware???privateIp?? accessIp = runningInstance.getPrivateIp(); } else if (PCCConstant.PLATFORM_TYPE_NIFTY.equals(platform.getPlatformType())) { // ???privateIp?? accessIp = runningInstance.getPrivateIp(); } else if (PCCConstant.PLATFORM_TYPE_VCLOUD.equals(platform.getPlatformType())) { // VCloud???privateIp?? accessIp = runningInstance.getPrivateIp(); } else if (PCCConstant.PLATFORM_TYPE_AZURE.equals(platform.getPlatformType())) { // Azure???privateIp?? accessIp = runningInstance.getPrivateIp(); } else if (PCCConstant.PLATFORM_TYPE_OPENSTACK.equals(platform.getPlatformType())) { // Openstack?publicIp?? accessIp = runningInstance.getPublicIp(); } } accessIps.put(runningInstance.getInstanceNo().toString(), accessIp); } map.put("accessIps", accessIps); // ???????? boolean unDetachVolume = BooleanUtils.toBoolean(Config.getProperty("unDetachVolume")); map.put("unDetachVolume", unDetachVolume); return map; }
From source file:com.vmware.bdd.cli.commands.ClusterCommands.java
@CliCommand(value = "cluster resize", help = "Resize a cluster") public void resizeCluster( @CliOption(key = { "name" }, mandatory = true, help = "The cluster name") final String name, @CliOption(key = {/*w w w .j a v a 2 s . c o m*/ "nodeGroup" }, mandatory = true, help = "The node group name") final String nodeGroup, @CliOption(key = { "instanceNum" }, mandatory = false, unspecifiedDefaultValue = "0", help = "The new instance number, should be larger than 0") final int instanceNum, @CliOption(key = { "cpuNumPerNode" }, mandatory = false, unspecifiedDefaultValue = "0", help = "The number of vCPU for the nodes in this group") final int cpuNumber, @CliOption(key = { "memCapacityMbPerNode" }, mandatory = false, unspecifiedDefaultValue = "0", help = "The number of memory size in Mb for the nodes in this group") final long memory, @CliOption(key = { "force" }, mandatory = false, specifiedDefaultValue = "true", unspecifiedDefaultValue = "false", help = "Ignore errors during resizing cluster") final Boolean force, @CliOption(key = { "skipVcRefresh" }, mandatory = false, help = "flag to skip refreshing VC resources") final Boolean skipVcRefresh) { if ((instanceNum > 0 && cpuNumber == 0 && memory == 0) || (instanceNum == 0 && (cpuNumber > 0 || memory > 0))) { try { ClusterRead cluster = restClient.get(name, false); if (cluster == null) { CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_RESIZE, Constants.OUTPUT_OP_RESULT_FAIL, "cluster " + name + " does not exist."); return; } // disallow scale out zookeeper node group. List<NodeGroupRead> ngs = cluster.getNodeGroups(); boolean found = false; for (NodeGroupRead ng : ngs) { if (ng.getName().equals(nodeGroup)) { found = true; /*if (ng.getRoles() != null && ng.getRoles().contains( HadoopRole.ZOOKEEPER_ROLE.toString()) && instanceNum > 1) { CommandsUtils.printCmdFailure( Constants.OUTPUT_OBJECT_CLUSTER, name, Constants.OUTPUT_OP_RESIZE, Constants.OUTPUT_OP_RESULT_FAIL, Constants.ZOOKEEPER_NOT_RESIZE); return; }*/// TODO emma: do not check as client do not know who is Zookeeper break; } } if (!found) { CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_RESIZE, Constants.OUTPUT_OP_RESULT_FAIL, "node group " + nodeGroup + " does not exist."); return; } TaskRead taskRead = null; if (instanceNum > 0) { Map<String, String> queryStrings = new HashMap<String, String>(); queryStrings.put(Constants.FORCE_CLUSTER_OPERATION_KEY, force.toString()); queryStrings.put(Constants.REST_PARAM_SKIP_REFRESH_VC, Boolean.toString(BooleanUtils.toBoolean(skipVcRefresh))); restClient.resize(name, nodeGroup, instanceNum, queryStrings); } else if (cpuNumber > 0 || memory > 0) { if (!cluster.getStatus().isActiveServiceStatus()) { CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_RESIZE, Constants.OUTPUT_OP_RESULT_FAIL, "Cluster must be in 'RUNNING' state to scale up/down"); return; } ResourceScale resScale = new ResourceScale(name, nodeGroup, cpuNumber, memory); taskRead = restClient.scale(resScale); } CommandsUtils.printCmdSuccess(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_RESULT_RESIZE); if (taskRead != null) { System.out.println(); printScaleReport(taskRead, name, nodeGroup); } } catch (CliRestException e) { CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_RESIZE, Constants.OUTPUT_OP_RESULT_FAIL, e.getMessage()); } } else { if (instanceNum > 0 && (cpuNumber > 0 || memory > 0)) { CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_RESIZE, Constants.OUTPUT_OP_RESULT_FAIL, "Can not scale out/in and scale up/down at the same time, you have to run those commands separately"); } else if (instanceNum == 0 && cpuNumber == 0 && memory == 0) { CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_RESIZE, Constants.OUTPUT_OP_RESULT_FAIL, "You must specify one positive value for instanceNum/cpuNumPerNode/memCapacityMbPerNode"); } else { List<String> invalidParams = new ArrayList<String>(); if (instanceNum < 0) { invalidParams.add("instanceNum=" + instanceNum); } if (cpuNumber < 0) { invalidParams.add("cpuNumPerNode=" + cpuNumber); } if (memory < 0) { invalidParams.add("memCapacityMbPerNode=" + memory); } CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_RESIZE, Constants.OUTPUT_OP_RESULT_FAIL, Constants.INVALID_VALUE + " " + StringUtils.join(invalidParams, ", ")); } } }
From source file:com.edgenius.wiki.installation.UpgradeServiceImpl.java
@SuppressWarnings("unused") private void up3000To3100() throws Exception { log.info("Version 3.0 to 3.1 is upgarding"); String root = DataRoot.getDataRoot(); if (FileUtil.exist(root + Server.FILE)) { Server server = new Server(); Properties prop = FileUtil.loadProperties(root + Server.FILE); server.syncFrom(prop);//w w w . j a v a 2s . c o m if (server.getMqServerEmbedded() == null || BooleanUtils.toBoolean(server.getMqServerEmbedded())) { //embedded if (!server.getMqServerUrl().startsWith("tcp://")) { server.setMqServerUrl( "tcp://" + server.getMqServerUrl() + "?wireFormat.maxInactivityDuration=0"); server.syncTo(prop); prop.store(FileUtil.getFileOutputStream(root + Server.FILE), "save by system program"); } } } //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // database - remove all quartz tables - we don't backup Exportable job(backup and remove space) - it is not perfect but not big issue. if (FileUtil.exist(root + Server.FILE)) { Server server = new Server(); Properties prop = FileUtil.loadProperties(root + Server.FILE); server.syncFrom(prop); String dbType = server.getDbType(); String migrateSQL = dbType + "-3000-3100.sql"; DBLoader loader = new DBLoader(); ConnectionProxy con = loader.getConnection(dbType, server.getDbUrl(), server.getDbSchema(), server.getDbUsername(), server.getDbPassword()); loader.runSQLFile(dbType, migrateSQL, con); //reload quartz table log.info("Initialize quartz tables for system..."); Statement stat = con.createStatement(); Statement dropStat = con.createStatement(); List<String> lines = loader.loadSQLFile(dbType, dbType + "-quartz.sql"); for (String sql : lines) { sql = sql.replaceAll("\n", " ").trim(); if (sql.toLowerCase().startsWith("drop ")) { try { dropStat.execute(sql); } catch (Exception e) { log.error("Drop operation failed...." + sql); } continue; } stat.addBatch(sql); } stat.executeBatch(); dropStat.close(); stat.close(); con.close(); } }
From source file:com.oneops.transistor.ws.rest.TransistorRestController.java
@RequestMapping(value = "platforms/{platformId}/deployments/scaledown", method = RequestMethod.POST) @ResponseBody/*from ww w. ja v a 2s . c o m*/ public CmsDeployment scaleDown(@PathVariable long platformId, @RequestBody Map<String, String> paramMap, @RequestHeader(value = "X-Cms-User", required = false) String userId) { int scaleDownBy = 0; int minComputesInEachCloud = 3; boolean ensureEvenScale = true; if (paramMap != null) { scaleDownBy = NumberUtils.toInt(paramMap.get("scaleDownBy"), 0); minComputesInEachCloud = NumberUtils.toInt(paramMap.get("minComputesInEachCloud"), 3); if (paramMap.get("ensureEvenScale") != null) { ensureEvenScale = BooleanUtils.toBoolean(paramMap.get("ensureEvenScale")); } } if (scaleDownBy < 1) { throw new TransistorException(CmsError.TRANSISTOR_EXCEPTION, "scaleDownBy value must be greater than 0"); } try { if (userId == null) userId = "oneops-system"; return baProcessor.scaleDown(platformId, scaleDownBy, minComputesInEachCloud, ensureEvenScale, userId); } catch (Exception te) { logger.error("Error while submitting scale-down deployment: ", te); throw te; } }
From source file:com.vmware.bdd.cli.commands.ClusterCommands.java
private void resumeCreateCluster(final String name, Boolean skipVcRefresh) { Map<String, String> queryStrings = new HashMap<String, String>(); queryStrings.put(Constants.QUERY_ACTION_KEY, Constants.QUERY_ACTION_RESUME); queryStrings.put(Constants.REST_PARAM_SKIP_REFRESH_VC, Boolean.toString(BooleanUtils.toBoolean(skipVcRefresh))); try {// w ww .j av a 2s .c om restClient.actionOps(name, queryStrings); CommandsUtils.printCmdSuccess(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_RESULT_RESUME); } catch (CliRestException e) { CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_RESUME, Constants.OUTPUT_OP_RESULT_FAIL, e.getMessage()); } }
From source file:jp.primecloud.auto.process.puppet.PuppetComponentProcess.java
protected void stopVolume(Long componentNo, Long instanceNo) { // ???????/*from www . j a v a 2 s . c o m*/ boolean unDetachVolume = BooleanUtils.toBoolean(Config.getProperty("unDetachVolume")); if (unDetachVolume) { return; } Instance instance = instanceDao.read(instanceNo); Platform platform = platformDao.read(instance.getPlatformNo()); // TODO CLOUD BRANCHING if (PCCConstant.PLATFORM_TYPE_AWS.equals(platform.getPlatformType())) { stopAwsVolume(componentNo, instanceNo); } else if (PCCConstant.PLATFORM_TYPE_CLOUDSTACK.equals(platform.getPlatformType())) { stopCloudStackVolume(componentNo, instanceNo); } else if (PCCConstant.PLATFORM_TYPE_VMWARE.equals(platform.getPlatformType())) { stopVmwareDisk(componentNo, instanceNo); } else if (PCCConstant.PLATFORM_TYPE_VCLOUD.equals(platform.getPlatformType())) { stopVcloudDisk(componentNo, instanceNo); } else if (PCCConstant.PLATFORM_TYPE_AZURE.equals(platform.getPlatformType())) { stopAzureDisk(componentNo, instanceNo); } else if (PCCConstant.PLATFORM_TYPE_OPENSTACK.equals(platform.getPlatformType())) { stopOpenstackVolume(componentNo, instanceNo); } else if (PCCConstant.PLATFORM_TYPE_NIFTY.equals(platform.getPlatformType())) { stopNiftyVolume(componentNo, instanceNo); } }
From source file:com.redhat.rhn.manager.system.SystemManager.java
/** * Tests whether or not a given server can be entitled with a specific entitlement * @param serverId The Id of the server in question * @param ent The entitlement to test// www. j a v a 2 s . c o m * @return Returns true or false depending on whether or not the server can be * entitled to the passed in entitlement. */ public static boolean canEntitleServer(Long serverId, Entitlement ent) { if (log.isDebugEnabled()) { log.debug("canEntitleServer.serverId: " + serverId + " ent: " + ent.getHumanReadableLabel()); } Map<String, Object> in = new HashMap<String, Object>(); in.put("sid", serverId); in.put("entitlement", ent.getLabel()); Map<String, Integer> out = new HashMap<String, Integer>(); out.put("retval", new Integer(Types.NUMERIC)); CallableMode m = ModeFactory.getCallableMode("System_queries", "can_entitle_server"); Map<String, Object> result = m.execute(in, out); boolean retval = BooleanUtils.toBoolean(((Long) result.get("retval")).intValue()); log.debug("canEntitleServer.returning: " + retval); return retval; }