Example usage for org.apache.commons.configuration PropertiesConfiguration setProperty

List of usage examples for org.apache.commons.configuration PropertiesConfiguration setProperty

Introduction

In this page you can find the example usage for org.apache.commons.configuration PropertiesConfiguration setProperty.

Prototype

public void setProperty(String key, Object value) 

Source Link

Document

Sets a new value for the specified property.

Usage

From source file:com.jorge.db.Reportes.java

public Reportes() {
    try {//  ww w.ja va2s . c  o m
        PropertiesConfiguration config = new PropertiesConfiguration("./quijotelu/Reportes.properties");
        if (config.getProperty("reporte.facturasNoAutorizadas") == null) {
            config.setProperty("reporte.facturasNoAutorizadas",
                    "SELECT f.CODIGO,f.NUMERO,f.FECHA,f.TOTAL,f.DOCUMENTO,f.RAZON_SOCIAL,f.MAIL,(SELECT e.ESTADO FROM ELE_DOCUMENTO_ELECTRONICO e where e.CODIGO=f.CODIGO and e.NUMERO=f.NUMERO) as estado FROM V_INFO_FACTURA_MAESTRO f where CODIGO||NUMERO not in (SELECT e.codigo||e.numero FROM ELE_DOCUMENTO_ELECTRONICO e where e.ESTADO='AUTORIZADO') and FECHA like ?");
            config.save();
        }
        if (config.getProperty("reporte.facturasAutorizadas") == null) {
            config.setProperty("reporte.facturasAutorizadas",
                    "SELECT f.CODIGO,f.NUMERO,f.FECHA,f.TOTAL,f.DOCUMENTO,f.RAZON_SOCIAL,f.MAIL,FUN_CLAVE_ACCESO(f.FECHA,f.CODIGO,f.NUMERO) as xml,FUN_CLAVE_ACCESO(f.FECHA,f.CODIGO,f.NUMERO) as pdf FROM ELE_DOCUMENTO_ELECTRONICO e INNER JOIN V_INFO_FACTURA_MAESTRO f ON f.CODIGO  = e.CODIGO AND f.NUMERO = e.NUMERO where e.ESTADO='AUTORIZADO' and FECHA like ?");
            config.save();
        }
        if (config.getProperty("reporte.retencionesNoAutorizadas") == null) {
            config.setProperty("reporte.retencionesNoAutorizadas",
                    "SELECT f.CODIGO,f.NUMERO,f.FECHA,f.DOCUMENTO,f.RAZON_SOCIAL,f.MAIL,(SELECT e.ESTADO FROM ELE_DOCUMENTO_ELECTRONICO e where e.CODIGO=f.CODIGO and e.NUMERO=f.NUMERO) as estado FROM V_INFO_RETENCION_MAESTRO f where CODIGO||NUMERO not in (SELECT e.codigo||e.numero FROM ELE_DOCUMENTO_ELECTRONICO e where e.ESTADO='AUTORIZADO') and FECHA like ?");
            config.save();
        }
        if (config.getProperty("reporte.retencionesAutorizadas") == null) {
            config.setProperty("reporte.retencionesAutorizadas",
                    "SELECT f.CODIGO,f.NUMERO,f.FECHA,f.DOCUMENTO,f.RAZON_SOCIAL,f.MAIL,FUN_CLAVE_ACCESO(f.FECHA,f.CODIGO,f.NUMERO) as xml,FUN_CLAVE_ACCESO(f.FECHA,f.CODIGO,f.NUMERO) as pdf FROM ELE_DOCUMENTO_ELECTRONICO e INNER JOIN V_INFO_RETENCION_MAESTRO f ON f.CODIGO  = e.CODIGO AND f.NUMERO = e.NUMERO where e.ESTADO='AUTORIZADO' and FECHA like ?");
            config.save();
        }
        if (config.getProperty("reporte.notasCreditoNoAutorizadas") == null) {
            config.setProperty("reporte.notasCreditoNoAutorizadas",
                    "SELECT f.CODIGO,f.NUMERO,f.FECHA,f.TOTAL_MODIFICADO,f.DOCUMENTO,f.RAZON_SOCIAL,f.MAIL,(SELECT e.ESTADO FROM ELE_DOCUMENTO_ELECTRONICO e where e.CODIGO=f.CODIGO and e.NUMERO=f.NUMERO) as estado FROM V_INFO_NOTA_CREDITO_MAESTRO f where CODIGO||NUMERO not in (SELECT e.codigo||e.numero FROM ELE_DOCUMENTO_ELECTRONICO e where e.ESTADO='AUTORIZADO') and FECHA like ?");
            config.save();
        }
        if (config.getProperty("reporte.notasCreditoAutorizadas") == null) {
            config.setProperty("reporte.notasCreditoAutorizadas",
                    "SELECT f.CODIGO,f.NUMERO,f.FECHA,f.TOTAL_MODIFICADO,f.DOCUMENTO,f.RAZON_SOCIAL,f.MAIL,FUN_CLAVE_ACCESO(f.FECHA,f.CODIGO,f.NUMERO) as xml,FUN_CLAVE_ACCESO(f.FECHA,f.CODIGO,f.NUMERO) as pdf FROM ELE_DOCUMENTO_ELECTRONICO e INNER JOIN V_INFO_NOTA_CREDITO_MAESTRO f ON f.CODIGO  = e.CODIGO AND f.NUMERO = e.NUMERO where e.ESTADO='AUTORIZADO' and FECHA like ?");
            config.save();
        }
        if (config.getProperty("reporte.guiasRemisionNoAutorizadas") == null) {
            config.setProperty("reporte.guiasRemisionNoAutorizadas",
                    "SELECT d.CODIGO,d.NUMERO,d.FECHA,d.DIRECCION_PARTIDA,d.RAZON_SOCIAL_TRANSPORTISTA,d.DOCUMENTO,d.PLACA,(SELECT e.ESTADO FROM ELE_DOCUMENTO_ELECTRONICO e where e.CODIGO=d.CODIGO and e.NUMERO=d.NUMERO) as estado FROM V_INFO_GUIA_REMISION d where d.CODIGO||d.NUMERO not in (SELECT e.codigo||e.numero FROM ELE_DOCUMENTO_ELECTRONICO e where e.ESTADO='AUTORIZADO') and FECHA like ?");
            config.save();
        }
        if (config.getProperty("reporte.guiasRemisionAutorizadas") == null) {
            config.setProperty("reporte.guiasRemisionAutorizadas",
                    "SELECT f.CODIGO,f.NUMERO,f.FECHA,f.PLACA,f.DOCUMENTO,f.RAZON_SOCIAL_TRANSPORTISTA,f.DIRECCION_PARTIDA,FUN_CLAVE_ACCESO(f.FECHA,f.CODIGO,f.NUMERO) as xml,FUN_CLAVE_ACCESO(f.FECHA,f.CODIGO,f.NUMERO) as pdf FROM ELE_DOCUMENTO_ELECTRONICO e INNER JOIN V_INFO_GUIA_REMISION f ON f.CODIGO  = e.CODIGO AND f.NUMERO = e.NUMERO where e.ESTADO='AUTORIZADO' and FECHA like ?");
            config.save();
        }
        if (config.getProperty("reporte.autorizarFacturas") == null) {
            config.setProperty("reporte.autorizarFacturas",
                    "SELECT f.CODIGO,f.NUMERO FROM V_INFO_FACTURA_MAESTRO f where f.CODIGO||f.NUMERO not in (SELECT e.codigo||e.numero FROM ELE_DOCUMENTO_ELECTRONICO e where e.ESTADO='AUTORIZADO') and f.FECHA like ?");
            config.save();
        }
        facturasNoAutorizadas = config.getProperty("reporte.facturasNoAutorizadas").toString().replace("[", "")
                .replace("]", "");
        facturasAutorizadas = config.getProperty("reporte.facturasAutorizadas").toString().replace("[", "")
                .replace("]", "");
        retencionesNoAutorizadas = config.getProperty("reporte.retencionesNoAutorizadas").toString()
                .replace("[", "").replace("]", "");
        retencionesAutorizadas = config.getProperty("reporte.retencionesAutorizadas").toString()
                .replace("[", "").replace("]", "");
        notasCreditoNoAutorizadas = config.getProperty("reporte.notasCreditoNoAutorizadas").toString()
                .replace("[", "").replace("]", "");
        notasCreditoAutorizadas = config.getProperty("reporte.notasCreditoAutorizadas").toString()
                .replace("[", "").replace("]", "");
        guiasRemisionNoAutorizadas = config.getProperty("reporte.guiasRemisionNoAutorizadas").toString()
                .replace("[", "").replace("]", "");
        guiasRemisionAutorizadas = config.getProperty("reporte.guiasRemisionAutorizadas").toString()
                .replace("[", "").replace("]", "");
        autorizarFacturas = config.getProperty("reporte.autorizarFacturas").toString().replace("[", "")
                .replace("]", "");
    } catch (ConfigurationException ex) {
        Logger.getLogger(Reportes.class.getName()).log(Level.SEVERE, null, ex);
    }
}

From source file:com.runwaysdk.configuration.FlattenedProfileConfigurationTest.java

@Test
public void testActuallyUsingFlattenedProfile() {
    // Change a property so we know we're actually using the flattened ones and
    // not the unflattened
    try {// w  w  w .j ava 2s .  c  o m
        PropertiesConfiguration tprops = new PropertiesConfiguration(
                new File(baseDir + "/target/test-classes/flat/terraframe.properties"));
        String oldValue = tprops.getString("deploy.appname");

        tprops.setProperty("deploy.appname", "Actually Using Flattened Profile");
        tprops.save();

        try {
            CommonProperties.dumpInstance();
            String appName = CommonProperties.getDeployAppName();

            assertEquals("Actually Using Flattened Profile", appName);
        } finally {
            tprops.setProperty("deploy.appname", oldValue);
            tprops.save();

        }
    } catch (ConfigurationException e) {
        throw new RunwayConfigurationException(e);
    }

    CommonProperties.dumpInstance();
}

From source file:gov.nih.nci.cacisweb.action.SecureFTPAddAction.java

@Override
public String execute() throws Exception {
    log.debug("execute() - START");
    String secureFTPPropertyFileLocation = CaCISUtil
            .getProperty(CaCISWebConstants.COM_PROPERTY_NAME_SECFTP_PROPERTIES_FILE_LOCATION);
    String secureFTPKeystoreLocation = CaCISUtil.getPropertyFromPropertiesFile(secureFTPPropertyFileLocation,
            CaCISUtil.getProperty(CaCISWebConstants.COM_PROPERTY_NAME_SECFTP_TRUSTSTORE_LOCATION_PROP_NAME));
    String secureFTPKeystorePassword = CaCISUtil.getPropertyFromPropertiesFile(secureFTPPropertyFileLocation,
            CaCISUtil.getProperty(CaCISWebConstants.COM_PROPERTY_NAME_SECFTP_TRUSTSTORE_PASSWORD_PROP_NAME));
    try {//  w w w.  ja  v a  2  s  .c om
        CaCISUtil caCISUtil = new CaCISUtil();
        KeyStore keystore = caCISUtil.getKeystore(secureFTPKeystoreLocation,
                CaCISWebConstants.COM_KEYSTORE_TYPE_JKS, secureFTPKeystorePassword);

        if (keystore.containsAlias(secureFTPBean.getCertificateAlias())) {
            log.error(getText("secureFTPBean.duplicateKey"));
            addFieldError("secureFTPBean.certificateAlias", getText("secureFTPBean.duplicateKey"));
        }

        if (StringUtils.contains(secureFTPBean.getCertificateAlias(), "ftps")) {
            if (StringUtils.isBlank(secureFTPBean.getCertificateFileName())) {
                log.error(getText("secureFTPBean.certificateRequired"));
                addFieldError("secureFTPBean.certificateFileName",
                        getText("secureFTPBean.certificateRequired"));
                caCISUtil.releaseKeystore();
                return INPUT;
            } else {
                caCISUtil.releaseKeystore();
                FileInputStream certificateStream = new FileInputStream(secureFTPBean.getCertificate());

                CertificateFactory cf = CertificateFactory.getInstance("X.509");
                java.security.cert.Certificate cert = cf.generateCertificate(certificateStream);
                // Add the certificate
                keystore.setCertificateEntry(secureFTPBean.getCertificateAlias(), cert);

                // Save the new keystore contents
                FileOutputStream out = new FileOutputStream(new File(secureFTPKeystoreLocation));
                keystore.store(out, secureFTPKeystorePassword.toCharArray());
                out.close();
            }
        }

        // add the new entry to FTP configuration properties file
        PropertiesConfiguration config = new PropertiesConfiguration(
                CaCISUtil.getProperty(CaCISWebConstants.COM_PROPERTY_NAME_SECFTP_CONFIG_FILE_LOCATION));
        config.setProperty(secureFTPBean.getCertificateAlias(), "");
        config.save();
    } catch (KeystoreInstantiationException kie) {
        log.error(kie.getMessage());
        addActionError(getText("exception.keystoreInstantiation"));
        return ERROR;
    } catch (CertificateException ce) {
        log.error(CaCISUtil.getStackTrace(ce));
        addActionError(getText("exception.certification"));
        return INPUT;
    }
    addActionMessage(getText("secureFTPBean.addCertificateSuccessful"));
    log.debug("execute() - END");
    return SUCCESS;
}

From source file:com.linkedin.pinot.core.query.scheduler.MultiLevelPriorityQueueTest.java

@Test
public void testTakeWithLimits() throws OutOfCapacityError, BrokenBarrierException, InterruptedException {
    // Test that take() will not return query if that group is already using hardLimit resources
    PropertiesConfiguration conf = new PropertiesConfiguration();
    conf.setProperty(ResourceManager.QUERY_WORKER_CONFIG_KEY, 40);
    conf.setProperty(ResourceManager.QUERY_RUNNER_CONFIG_KEY, 10);
    conf.setProperty(ResourceLimitPolicy.TABLE_THREADS_SOFT_LIMIT, 20);
    conf.setProperty(ResourceLimitPolicy.TABLE_THREADS_HARD_LIMIT, 80);
    PolicyBasedResourceManager rm = new PolicyBasedResourceManager(conf);
    MultiLevelPriorityQueue queue = createQueue(conf, rm);
    queue.put(createQueryRequest(groupOne, metrics));
    queue.put(createQueryRequest(groupOne, metrics));
    queue.put(createQueryRequest(groupTwo, metrics));
    // group one has higher priority but it's above soft thread limit
    TestSchedulerGroup testGroupOne = groupFactory.groupMap.get(groupOne);
    TestSchedulerGroup testGroupTwo = groupFactory.groupMap.get(groupTwo);

    testGroupOne.addReservedThreads(rm.getTableThreadsSoftLimit() + 1);
    QueueReader reader = new QueueReader(queue);
    reader.startAndWaitForRead();/*from w  ww .  ja va  2 s . co  m*/
    assertEquals(reader.readQueries.size(), 1);
    assertEquals(reader.readQueries.poll().getSchedulerGroup().name(), groupTwo);

    // add one more group two
    queue.put(createQueryRequest(groupTwo, metrics));
    reader = new QueueReader(queue);
    reader.startAndWaitForRead();
    assertEquals(reader.readQueries.size(), 1);
    assertEquals(reader.readQueries.poll().getSchedulerGroup().name(), groupTwo);

    // add one more groupTwo and set groupTwo threads to higher than groupOne
    queue.put(createQueryRequest(groupTwo, metrics));
    testGroupTwo.addReservedThreads(testGroupOne.totalReservedThreads() + 1);
    reader = new QueueReader(queue);
    reader.startAndWaitForRead();
    assertEquals(reader.readQueries.size(), 1);
    assertEquals(reader.readQueries.poll().getSchedulerGroup().name(), groupOne);

    // set groupOne above hard limit
    testGroupOne.addReservedThreads(rm.getTableThreadsHardLimit());
    reader = new QueueReader(queue);
    reader.startAndWaitForRead();
    assertEquals(reader.readQueries.size(), 1);
    assertEquals(reader.readQueries.poll().getSchedulerGroup().name(), groupTwo);

    // all groups above hard limit
    queue.put(createQueryRequest(groupTwo, metrics));
    queue.put(createQueryRequest(groupTwo, metrics));
    queue.put(createQueryRequest(groupOne, metrics));
    testGroupTwo.addReservedThreads(rm.getTableThreadsHardLimit());
    reader = new QueueReader(queue);
    reader.startAndWaitForQueueWakeup();
    assertEquals(reader.readQueries.size(), 0);
    // try again
    sleepForQueueWakeup(queue);
    assertEquals(reader.readQueries.size(), 0);

    // now set thread limit lower for a group (aka. query finished)
    testGroupTwo.releasedReservedThreads(testGroupTwo.totalReservedThreads());
    sleepForQueueWakeup(queue);
    assertEquals(reader.readQueries.size(), 1);
}

From source file:com.jorge.propiedades.General.java

public General() {
    try {//from   www .  ja  v  a 2s.c o m
        PropertiesConfiguration config = new PropertiesConfiguration("./quijotelu/General.properties");
        if (config.getProperty("general.BaseDatos") == null) {
            /*
             Conexin con base de datos:
             oracle, sqlserver
             */
            config.setProperty("general.BaseDatos", "oracle");
            config.save();
        }
        if (config.getProperty("general.Publicidad") == null) {
            config.setProperty("general.Publicidad", "si");
            config.save();
        }
        if (config.getProperty("general.Nombre") == null) {
            config.setProperty("general.Nombre", "QuijoteLu");
            config.save();
        }
        BaseDatos = (String) config.getProperty("general.BaseDatos");
        Publicidad = (String) config.getProperty("general.Publicidad");
        Nombre = (String) config.getProperty("general.Nombre");

    } catch (ConfigurationException ex) {
        Logger.getLogger(General.class.getName()).log(Level.SEVERE, null, ex);
    }
}

From source file:com.linkedin.pinot.core.query.scheduler.PrioritySchedulerTest.java

@Test(enabled = false)
public void testOutOfCapacityResponse() throws Exception {
    PropertiesConfiguration conf = new PropertiesConfiguration();
    conf.setProperty(ResourceLimitPolicy.TABLE_THREADS_HARD_LIMIT, 5);
    conf.setProperty(MultiLevelPriorityQueue.MAX_PENDING_PER_GROUP_KEY, 1);
    TestPriorityScheduler scheduler = TestPriorityScheduler.create(conf);
    scheduler.start();/*from  www.j  a  va  2  s.c o  m*/
    List<ListenableFuture<byte[]>> results = new ArrayList<>();
    results.add(scheduler.submit(createServerQueryRequest("1", metrics)));
    TestSchedulerGroup group = TestPriorityScheduler.groupFactory.groupMap.get("1");
    group.addReservedThreads(10);
    group.addLast(createQueryRequest("1", metrics));
    results.add(scheduler.submit(createServerQueryRequest("1", metrics)));
    DataTable dataTable = DataTableFactory.getDataTable(results.get(1).get());
    assertTrue(dataTable.getMetadata().containsKey(
            DataTable.EXCEPTION_METADATA_KEY + QueryException.SERVER_OUT_OF_CAPACITY_ERROR.getErrorCode()));
    scheduler.stop();
}

From source file:com.splout.db.common.SploutConfiguration.java

/**
 * Get the Splout configuration using double configuration: defaults + custom
 *//*from w ww. j  a  v a 2 s.  co  m*/
public static SploutConfiguration get(String rootDir) {
    SploutConfiguration properties = new SploutConfiguration();

    PropertiesConfiguration config = load(rootDir, SPLOUT_PROPERTIES, false);
    if (config != null) {
        properties.addConfiguration(config);
    }
    config = load(rootDir, SPLOUT_PROPERTIES + ".default", true);
    properties.addConfiguration(config);

    // The following lines replaces the default "localhost" by the local IP for convenience:
    String myIp = "localhost";

    try {
        Collection<InetAddress> iNetAddresses = GetIPAddresses.getAllLocalIPs();
        // but only if there is Internet connectivity!
        if (iNetAddresses != null) {
            Iterator<InetAddress> it = iNetAddresses.iterator();
            if (it.hasNext()) {
                InetAddress address = it.next();
                if (address.getHostAddress() != null) {
                    myIp = address.getHostAddress();
                }
            }
        }
    } catch (IOException e) {
        throw new RuntimeException(e);
    }

    if (config.getString(QNodeProperties.HOST) != null
            && config.getString(QNodeProperties.HOST).equals("localhost")) {
        config.setProperty(QNodeProperties.HOST, myIp);
    }

    if (config.getString(DNodeProperties.HOST) != null
            && config.getString(DNodeProperties.HOST).equals("localhost")) {
        config.setProperty(DNodeProperties.HOST, myIp);
    }

    return properties;
}

From source file:com.linkedin.pinot.core.query.scheduler.PrioritySchedulerTest.java

@Test
public void testStartStopQueries() throws ExecutionException, InterruptedException, IOException {
    TestPriorityScheduler scheduler = TestPriorityScheduler.create();
    scheduler.start();//  w  ww . j av a2 s.  c o  m

    PropertiesConfiguration conf = new PropertiesConfiguration();
    conf.setProperty(ResourceLimitPolicy.TABLE_THREADS_HARD_LIMIT, 5);
    conf.setProperty(MultiLevelPriorityQueue.MAX_PENDING_PER_GROUP_KEY, 5);
    List<ListenableFuture<byte[]>> results = new ArrayList<>();
    results.add(scheduler.submit(createServerQueryRequest("1", metrics)));
    TestSchedulerGroup group = TestPriorityScheduler.groupFactory.groupMap.get("1");
    group.addReservedThreads(10);
    group.addLast(createQueryRequest("1", metrics));
    results.add(scheduler.submit(createServerQueryRequest("1", metrics)));

    scheduler.stop();
    long queueWakeTimeMicros = ((MultiLevelPriorityQueue) scheduler.getQueue()).getWakeupTimeMicros();
    long sleepTimeMs = queueWakeTimeMicros >= 1000 ? queueWakeTimeMicros / 1000 + 10 : 10;
    Thread.sleep(sleepTimeMs);
    int hasServerShuttingDownError = 0;
    for (ListenableFuture<byte[]> result : results) {
        DataTable table = DataTableFactory.getDataTable(result.get());
        hasServerShuttingDownError += table.getMetadata().containsKey(
                DataTable.EXCEPTION_METADATA_KEY + QueryException.SERVER_SCHEDULER_DOWN_ERROR.getErrorCode())
                        ? 1
                        : 0;
    }
    assertTrue(hasServerShuttingDownError > 0);
}

From source file:com.linkedin.pinot.core.query.scheduler.PrioritySchedulerTest.java

@Test
public void testMultiThreaded() throws InterruptedException {
    // add queries from multiple threads and verify that all those are executed
    PropertiesConfiguration conf = new PropertiesConfiguration();
    conf.setProperty(ResourceManager.QUERY_WORKER_CONFIG_KEY, 60);
    conf.setProperty(ResourceManager.QUERY_RUNNER_CONFIG_KEY, 20);
    conf.setProperty(ResourceLimitPolicy.THREADS_PER_QUERY_PCT, 50);
    conf.setProperty(ResourceLimitPolicy.TABLE_THREADS_HARD_LIMIT, 60);
    conf.setProperty(ResourceLimitPolicy.TABLE_THREADS_SOFT_LIMIT, 40);
    conf.setProperty(MultiLevelPriorityQueue.MAX_PENDING_PER_GROUP_KEY, 10);

    final TestPriorityScheduler scheduler = TestPriorityScheduler.create(conf);
    scheduler.start();//from w  w w  .  j a v a2s.  co  m
    final Random random = new Random();
    final ConcurrentLinkedQueue<ListenableFuture<byte[]>> results = new ConcurrentLinkedQueue<>();
    final int numThreads = 3;
    final int queriesPerThread = 10;
    numQueries = new CountDownLatch(numThreads * queriesPerThread);

    for (int i = 0; i < numThreads; i++) {
        final int index = i;
        new Thread(new Runnable() {
            @Override
            public void run() {
                for (int j = 0; j < queriesPerThread; j++) {
                    results.add(scheduler.submit(createServerQueryRequest(Integer.toString(index), metrics)));
                    Uninterruptibles.sleepUninterruptibly(random.nextInt(100), TimeUnit.MILLISECONDS);
                }
            }
        }).start();
    }
    numQueries.await();
    scheduler.stop();
}

From source file:com.linkedin.pinot.core.query.scheduler.PrioritySchedulerTest.java

@Test
public void testOneQuery()
        throws InterruptedException, ExecutionException, IOException, BrokenBarrierException {
    PropertiesConfiguration conf = new PropertiesConfiguration();
    conf.setProperty(ResourceLimitPolicy.THREADS_PER_QUERY_PCT, 50);
    conf.setProperty(ResourceLimitPolicy.TABLE_THREADS_HARD_LIMIT, 40);
    conf.setProperty(ResourceLimitPolicy.TABLE_THREADS_SOFT_LIMIT, 20);
    useBarrier = true;/*from  w  w w.  j a  va  2  s  . c o  m*/
    startupBarrier = new CyclicBarrier(2);
    validationBarrier = new CyclicBarrier(2);

    TestPriorityScheduler scheduler = TestPriorityScheduler.create(conf);
    int totalPermits = scheduler.getRunningQueriesSemaphore().availablePermits();
    scheduler.start();
    ListenableFuture<byte[]> result = scheduler.submit(createServerQueryRequest("1", metrics));
    startupBarrier.await();
    TestSchedulerGroup group = TestPriorityScheduler.groupFactory.groupMap.get("1");
    assertEquals(group.numRunning(), 1);
    assertEquals(group.getThreadsInUse(), 1);
    // The total number of threads allocated for query execution will be dependent on the underlying
    // platform (number of cores). Scheduler will assign total threads up to but not exceeding total
    // number of segments. On servers with less cores, this can assign only 1 thread (less than total segments)
    assertTrue(group.totalReservedThreads() <= 2 /* 2: numSegments in request*/);
    validationBarrier.await();
    byte[] resultData = result.get();
    DataTable table = DataTableFactory.getDataTable(resultData);
    assertEquals(table.getMetadata().get("table"), "1");
    // verify that accounting is handled right
    assertEquals(group.numPending(), 0);
    assertEquals(group.getThreadsInUse(), 0);
    assertEquals(group.totalReservedThreads(), 0);
    // -1 because we expect that 1 permit is blocked by the scheduler main thread
    assertEquals(scheduler.getRunningQueriesSemaphore().availablePermits(), totalPermits - 1);
    scheduler.stop();
}