Example usage for org.apache.commons.lang ArrayUtils addAll

List of usage examples for org.apache.commons.lang ArrayUtils addAll

Introduction

In this page you can find the example usage for org.apache.commons.lang ArrayUtils addAll.

Prototype

public static double[] addAll(double[] array1, double[] array2) 

Source Link

Document

Adds all the elements of the given arrays into a new array.

Usage

From source file:gda.hrpd.pmac.EpicsCVScanController.java

private double[] getCount() {
    double[] y1 = ArrayUtils.EMPTY_DOUBLE_ARRAY;
    try {//from w ww.  ja va  2  s.  co m
        // logger.info("gets MAC1Y");
        y1 = getMAC1Y();
        // logger.info("gets MAC1Y DONE");
    } catch (TimeoutException e) {
        logger.error("Timeout while gets MAC1Y", e);
        e.printStackTrace();
    } catch (CAException e) {
        logger.error("CAException while gets MAC1Y", e);
        e.printStackTrace();
    } catch (InterruptedException e) {
        logger.error("InterruptedException while gets MAC1Y", e);
    }
    double[] y2 = ArrayUtils.EMPTY_DOUBLE_ARRAY;
    try {
        // logger.info("gets MAC2Y");
        y2 = getMAC2Y();
        // logger.info("gets MAC2Y DONE");
    } catch (TimeoutException e) {
        logger.error("Timeout while gets MAC2Y", e);
        e.printStackTrace();
    } catch (CAException e) {
        logger.error("CAException while gets MAC2Y", e);
        e.printStackTrace();
    } catch (InterruptedException e) {
        logger.error("InterruptedException while gets MAC2Y", e);
    }
    double[] y3 = ArrayUtils.EMPTY_DOUBLE_ARRAY;
    try {
        // logger.info("gets MAC3Y");
        y3 = getMAC3Y();
        // logger.info("gets MAC3Y DONE");
    } catch (TimeoutException e) {
        logger.error("Timeout while gets MAC3Y", e);
        e.printStackTrace();
    } catch (CAException e) {
        logger.error("CAException while gets MAC3Y", e);
        e.printStackTrace();
    } catch (InterruptedException e) {
        logger.error("InterruptedException while gets MAC3Y", e);
    }
    double[] y4 = ArrayUtils.EMPTY_DOUBLE_ARRAY;
    try {
        // logger.info("gets MAC4Y");
        y4 = getMAC4Y();
        // logger.info("gets MAC4Y DONE");
    } catch (TimeoutException e) {
        logger.error("Timeout while gets MAC4Y", e);
        e.printStackTrace();
    } catch (CAException e) {
        logger.error("CAException while gets MAC4Y", e);
        e.printStackTrace();
    } catch (InterruptedException e) {
        logger.error("InterruptedException while gets MAC4Y", e);
    }
    double[] y5 = ArrayUtils.EMPTY_DOUBLE_ARRAY;
    try {
        // logger.info("gets MAC5Y");
        y5 = getMAC5Y();
        // logger.info("gets MAC5Y DONE");
    } catch (TimeoutException e) {
        logger.error("Timeout while gets MAC5Y", e);
        e.printStackTrace();
    } catch (CAException e) {
        logger.error("CAException while gets MAC5Y", e);
        e.printStackTrace();
    } catch (InterruptedException e) {
        logger.error("InterruptedException while gets MAC5Y", e);
    }
    return ArrayUtils.subarray(
            ArrayUtils.addAll(ArrayUtils.addAll(ArrayUtils.addAll(ArrayUtils.addAll(y1, y2), y3), y4), y5),
            16500, 305000);
}

From source file:com.linkedin.cubert.analyzer.physical.AggregateRewriter.java

private JsonNode createBlockgenForMV(ObjectNode programNode, ObjectNode cubeOperatorNode,
        Pair<ObjectNode, ObjectNode> bgInfo, String mvName, String mvPath,

        String[] mvColumns) throws AggregateRewriteException {
    String bgFactPath = null;/*from  w  w  w . ja v a2  s .  c  om*/

    String[] partitionKeys = null;
    String[] pivotKeys = null;
    String[] shufflePivotKeys = null;
    String mvInputPath = mvPath + "/avro";

    if (lineage.isBlockgenByIndex(bgInfo.getSecond())) {
        partitionKeys = JsonUtils.asArray(bgInfo.getFirst().get("partitionKeys"));

        String indexName = bgInfo.getFirst().get("index").getTextValue();
        ObjectNode jobNode = lineage.getOperatorJobNode(bgInfo.getSecond());

        // This should include BLOCK_ID, else assert.
        shufflePivotKeys = JsonUtils.asArray(((ObjectNode) (jobNode.get("shuffle"))).get("pivotKeys"));
        String indexPath = lineage.traceIndexPath(jobNode, indexName);
        System.out.println("Traced blockgen index " + indexName + " path as" + indexPath);
        System.out.println("job node = " + jobNode.toString());

        bgFactPath = indexPath;

    } else {
        bgFactPath = lineage.getDatedPathRoot((ArrayNode) (bgInfo.getSecond().get("path")));
        partitionKeys = JsonUtils.asArray(bgInfo.getSecond().get("partitionKeys"));
        pivotKeys = JsonUtils.asArray(bgInfo.getSecond().get("pivotKeys"));
        shufflePivotKeys = (String[]) ArrayUtils.addAll(new String[] { "BLOCK_ID" }, pivotKeys);
    }

    ArrayNode cacheIndexNode = JsonUtils.createArrayNode();
    cacheIndexNode.add(RewriteUtils.createObjectNode("name", mvName + "_fact_index", "path", bgFactPath));
    JsonNode mapNode = JsonUtils.makeJson(
            String.format("[{'input' : {'name':'%s', 'type': 'AVRO', 'path':['%s']}}]", mvName, mvInputPath));

    System.out.println("Blockgen partition keys = " + Arrays.toString(partitionKeys));
    ObjectNode blockIndexJoin = RewriteUtils.createObjectNode("operator", "BLOCK_INDEX_JOIN", "input", mvName,
            "output", mvName, "index", mvName + "_fact_index", "partitionKeys",
            JsonUtils.createArrayNode(partitionKeys));

    ObjectNode mapperNode = (ObjectNode) (((ArrayNode) mapNode).get(0));
    mapperNode.put("operators", JsonUtils.createArrayNode());
    ((ArrayNode) (mapperNode.get("operators"))).add(blockIndexJoin);

    JsonNode shuffleNode = RewriteUtils.createObjectNode("name", mvName, "type", "SHUFFLE", "partitionKeys",
            JsonUtils.createArrayNode(new String[] { "BLOCK_ID" }), "pivotKeys",
            JsonUtils.createArrayNode(shufflePivotKeys));

    JsonNode reduceOpNode = RewriteUtils.createObjectNode("operator", "CREATE_BLOCK", "input", mvName, "output",
            mvName + "_blockgen", "blockgenType", "BY_INDEX", "index", mvName + "_fact_index", "indexPath",
            bgFactPath, "partitionKeys", JsonUtils.createArrayNode(new String[] { "BLOCK_ID" }), "pivotKeys",
            JsonUtils.createArrayNode(shufflePivotKeys), "originalPartitionKeys",
            JsonUtils.createArrayNode(partitionKeys));

    ArrayNode reduceNode = JsonUtils.createArrayNode();
    reduceNode.add(reduceOpNode);

    ObjectNode outputNode = RewriteUtils.createObjectNode("name", mvName + "_blockgen", "path",
            mvPath + "/blockgen", "type", "RUBIX", "params",
            RewriteUtils.createObjectNode("overwrite", "true"));

    ObjectNode jobNode = RewriteUtils.createObjectNode("name", "BLOCKGEN FOR MV", "map", mapNode, "shuffle",
            shuffleNode, "reduce", reduceNode, "output", outputNode);
    jobNode.put("cacheIndex", cacheIndexNode);
    jobNode.put("reducers", 100);
    System.out.println("JOB json = " + jobNode.toString());
    return jobNode;

}

From source file:com.oneops.inductor.WorkOrderExecutor.java

/**
 * Removes the remote work order after remote execution
 *
 * @param wo      remote work order to be removed.
 * @param keyFile file to be used for executing the remote ssh
 * @param pr      the process runner.//from www  .ja  va 2 s. c o  m
 */
private void removeRemoteWorkOrder(CmsWorkOrderSimple wo, String keyFile, ProcessRunner pr) {
    String user = ONEOPS_USER;
    String comments = "";
    if (!isDebugEnabled(wo)) {
        // clear the workorder files
        String logKey = getLogKey(wo);
        String host = getWorkOrderHost(wo, getLogKey(wo));
        String port = "22";
        if (host.contains(":")) {
            String[] parts = host.split(":");
            host = parts[0];
            port = parts[1];
            logger.info("using port from " + config.getIpAttribute());
        }

        String remoteCmd = "rm " + getRemoteFileName(wo);
        String[] cmd = (String[]) ArrayUtils.addAll(sshCmdLine,
                new String[] { keyFile, "-p " + port, user + "@" + host, remoteCmd });
        logger.info(logKey + " ### EXEC: " + user + "@" + host + " " + remoteCmd);
        ProcessResult result = pr.executeProcessRetry(
                new ExecutionContext(wo, cmd, getLogKey(wo), getRetryCountForWorkOrder(wo)));
        if (result.getResultCode() != 0) {
            // Not throwing exceptions, Should be ok if we are not able to
            // remove remote wo.
            logger.error(logKey + comments);
        } else {
            logger.info("removed remote workorder");
        }
    } else {
        logger.info("debug enabled, not removing remote workorders");

    }
}

From source file:au.org.ala.biocache.dao.SearchDAOImpl.java

/**
 * Note - this method extracts from CASSANDRA rather than the Index.
 *///from   w  ww  .  j ava 2s  .  c om
public Map<String, Integer> writeResultsToStream(DownloadRequestParams downloadParams, OutputStream out, int i,
        boolean includeSensitive, DownloadDetailsDTO dd) throws Exception {

    int resultsCount = 0;
    Map<String, Integer> uidStats = new HashMap<String, Integer>();
    //stores the remaining limit for data resources that have a download limit
    Map<String, Integer> downloadLimit = new HashMap<String, Integer>();

    try {
        SolrQuery solrQuery = initSolrQuery(downloadParams, false, null);
        //ensure that the qa facet is being ordered alphabetically so that the order is consistent.
        boolean getAssertionsFromFacets = "all".equals(downloadParams.getQa());
        if (getAssertionsFromFacets) {
            //set the order for the facet to be based on the index - this will force the assertions to be returned in the same order each time
            //based on alphabetical sort.  The number of QA's may change between searches so we can't guarantee that the order won't change
            solrQuery.add("f.assertions.facet.sort", "index");
        }
        formatSearchQuery(downloadParams);
        //add context information
        updateQueryContext(downloadParams);
        logger.info("search query: " + downloadParams.getFormattedQuery());
        solrQuery.setQuery(buildSpatialQueryString(downloadParams));
        //Only the fields specified below will be included in the results from the SOLR Query
        solrQuery.setFields("row_key", "institution_uid", "collection_uid", "data_resource_uid",
                "data_provider_uid");

        String dFields = downloadParams.getFields();

        if (includeSensitive) {
            //include raw latitude and longitudes
            dFields = dFields
                    .replaceFirst("decimalLatitude.p", "decimalLatitude,decimalLongitude,decimalLatitude.p")
                    .replaceFirst(",locality,", ",locality,sensitive_locality,");
        }

        StringBuilder sb = new StringBuilder(dFields);
        if (downloadParams.getExtra().length() > 0)
            sb.append(",").append(downloadParams.getExtra());
        StringBuilder qasb = new StringBuilder();

        QueryResponse qr = runSolrQuery(solrQuery, downloadParams.getFq(), 0, 0, "_docid_", "asc");
        dd.setTotalRecords(qr.getResults().getNumFound());
        //get the assertion facets to add them to the download fields
        List<FacetField> facets = qr.getFacetFields();
        for (FacetField facet : facets) {
            if (facet.getName().equals("assertions") && facet.getValueCount() > 0) {

                for (FacetField.Count facetEntry : facet.getValues()) {
                    //System.out.println("facet: " + facetEntry.getName());
                    if (qasb.length() > 0)
                        qasb.append(",");
                    qasb.append(facetEntry.getName());
                }
            } else if (facet.getName().equals("data_resource_uid") && checkDownloadLimits) {
                //populate the download limit
                initDownloadLimits(downloadLimit, facet);
            }
        }

        //Write the header line
        String qas = qasb.toString();

        String[] fields = sb.toString().split(",");
        String[] qaFields = qas.equals("") ? new String[] {} : qas.split(",");
        String[] qaTitles = downloadFields.getHeader(qaFields, false);
        String[] titles = downloadFields.getHeader(fields, true);
        String[] header = org.apache.commons.lang3.ArrayUtils.addAll(titles, qaTitles);
        //Create the Writer that will be used to format the records
        //construct correct RecordWriter based on the supplied fileType
        final au.org.ala.biocache.RecordWriter rw = downloadParams.getFileType().equals("csv")
                ? new CSVRecordWriter(out, header, downloadParams.getSep(), downloadParams.getEsc())
                : new ShapeFileRecordWriter(downloadParams.getFile(), out,
                        (String[]) ArrayUtils.addAll(fields, qaFields));

        if (rw instanceof ShapeFileRecordWriter) {
            dd.setHeaderMap(((ShapeFileRecordWriter) rw).getHeaderMappings());
        }

        //download the records that have limits first...
        if (downloadLimit.size() > 0) {
            String[] originalFq = downloadParams.getFq();
            StringBuilder fqBuilder = new StringBuilder("-(");
            for (String dr : downloadLimit.keySet()) {
                //add another fq to the search for data_resource_uid                    
                downloadParams.setFq((String[]) ArrayUtils.add(originalFq, "data_resource_uid:" + dr));
                resultsCount = downloadRecords(downloadParams, rw, downloadLimit, uidStats, fields, qaFields,
                        resultsCount, dr, includeSensitive, dd);
                if (fqBuilder.length() > 2)
                    fqBuilder.append(" OR ");
                fqBuilder.append("data_resource_uid:").append(dr);
            }
            fqBuilder.append(")");
            //now include the rest of the data resources
            //add extra fq for the remaining records
            downloadParams.setFq((String[]) ArrayUtils.add(originalFq, fqBuilder.toString()));
            resultsCount = downloadRecords(downloadParams, rw, downloadLimit, uidStats, fields, qaFields,
                    resultsCount, null, includeSensitive, dd);
        } else {
            //download all at once
            downloadRecords(downloadParams, rw, downloadLimit, uidStats, fields, qaFields, resultsCount, null,
                    includeSensitive, dd);
        }
        rw.finalise();

    } catch (SolrServerException ex) {
        logger.error("Problem communicating with SOLR server. " + ex.getMessage(), ex);
    }

    return uidStats;
}

From source file:com.datatorrent.stram.cli.DTCli.java

private List<Completer> defaultCompleters() {
    Map<String, CommandSpec> commands = new TreeMap<String, CommandSpec>();

    commands.putAll(logicalPlanChangeCommands);
    commands.putAll(connectedCommands);/*w  w w.  j a v a  2 s .  c o m*/
    commands.putAll(globalCommands);

    List<Completer> completers = new LinkedList<Completer>();
    for (Map.Entry<String, CommandSpec> entry : commands.entrySet()) {
        String command = entry.getKey();
        CommandSpec cs = entry.getValue();
        List<Completer> argCompleters = new LinkedList<Completer>();
        argCompleters.add(new StringsCompleter(command));
        Arg[] args = (Arg[]) ArrayUtils.addAll(cs.requiredArgs, cs.optionalArgs);
        if (args != null) {
            if (cs instanceof OptionsCommandSpec) {
                // ugly hack because jline cannot dynamically change completer while user types
                if (args[0] instanceof FileArg || args[0] instanceof VarArg) {
                    for (int i = 0; i < 10; i++) {
                        argCompleters.add(new MyFileNameCompleter());
                    }
                }
            } else {
                for (Arg arg : args) {
                    if (arg instanceof FileArg || arg instanceof VarArg) {
                        argCompleters.add(new MyFileNameCompleter());
                    } else if (arg instanceof CommandArg) {
                        argCompleters.add(new StringsCompleter(commands.keySet().toArray(new String[] {})));
                    } else {
                        argCompleters.add(MyNullCompleter.INSTANCE);
                    }
                }
            }
        }

        completers.add(new ArgumentCompleter(argCompleters));
    }

    List<Completer> argCompleters = new LinkedList<Completer>();
    Set<String> set = new TreeSet<String>();
    set.addAll(aliases.keySet());
    set.addAll(macros.keySet());
    argCompleters.add(new StringsCompleter(set.toArray(new String[] {})));
    for (int i = 0; i < 10; i++) {
        argCompleters.add(new MyFileNameCompleter());
    }
    completers.add(new ArgumentCompleter(argCompleters));
    return completers;
}

From source file:com.novartis.opensource.yada.util.QueryUtils.java

/**
 * This method uses a variety of variables from {@code yq} to determine the
 * number of jdbc positional parameters which must be included in the SQL
 * {@code in} clause in the stored query. Once the parameter count is
 * determined, the original SQL is amended with additional jdbc parameter
 * placeholders (i.e., {@code ?}), and the amended SQL is stored in the
 * {@code yq}./*w ww.  j av a 2  s  .c om*/
 * 
 * @param yq
 *          the query being processed
 * @param row
 *          the index of the list of value lists in the query containing the
 *          data to evaluate
 * @return modified SQL code
 * @deprecated as of 7.1.0
 */
@Deprecated
public String processInColumns(YADAQuery yq, int row) {
    String[] inColumns = yq.getIns();
    String coreSql = yq.getYADACode();
    LinkedHashMap<String, String[]> newData = new LinkedHashMap<>(); // to be i.e., YADA_1:[],YADA_2:[]
    if (inColumns.length > 0) {
        String[] columns = yq.getParameterizedColumns();
        Map<String, String[]> data = yq.getDataRow(row);
        char[] dataTypes = yq.getDataTypes(row);
        Matcher matcher;

        l.debug("Processing inColumns [" + StringUtils.join(inColumns, ",") + "]");
        for (String in : inColumns) {
            int colIndex = -1, j = 0;
            String inCol = in.toUpperCase(); // TODO case sensitivity

            // get the index of the 'incolumn' in the 'JDBCcolumns' array
            l.debug("Looking for column [" + inCol + "] in columns array " + ArrayUtils.toString(columns));
            while (j < columns.length && colIndex != j) {
                if (inCol.contains(columns[j])) {
                    colIndex = j;
                    l.debug("Found column [" + inCol + "] at index [" + String.valueOf(colIndex)
                            + "] of columns array.");
                    break;
                }
                j++;
            }

            // get the value list associated to the column in the data hash
            String colName = "";
            String[] inData = null;
            int inLen = 0;
            if (data.containsKey(columns[colIndex])) // JSONParams
            {
                colName = columns[colIndex];
                if (data.get(colName).length == 1) {
                    inData = data.get(colName)[0].split(",");
                    for (int m = 0; m < columns.length; m++) {
                        if (columns[m].equals(colName)) {
                            // add the new data for the column
                            newData.put(colName, inData);
                        } else {
                            // add the existing data for the column
                            newData.put(columns[m], data.get(columns[m]));
                        }
                        // add data row
                        yq.getData().set(row, newData);
                    }
                    yq.getData().set(row, newData);
                } else
                    inData = data.get(colName);
                l.debug("Splitting in args [" + data.get(colName) + "]");
            } else
            // Standard Params
            {

                // Get an array of keys to compare and potentially manipulate
                String[] colNames = new String[data.size()];
                int k = 0;
                for (String col : data.keySet()) {
                    colNames[k] = col;
                    k++;
                }

                // if colNames and columns array are of equal size,
                // then there is no param value manipulation required
                if (colNames.length == columns.length) {
                    colName = QueryUtils.YADA_COLUMN + (colIndex + 1);
                    inData = data.get(colName);
                } else
                // there is a length discrepancy
                {
                    for (int m = colIndex; m < colNames.length; m++) {
                        if (m == colIndex) // it's the first index
                            inData = data.get(colNames[m]);
                        else
                            // further indexes must build aggregate array
                            inData = (String[]) ArrayUtils.addAll(inData, data.get(colNames[m]));
                    }

                    for (int m = 0; m < columns.length; m++) {
                        if (m == columns.length - 1) {
                            // it's the last index, so add the aggregrate inData array
                            newData.put(colNames[m], inData);
                        } else {
                            // not the last index, add the existing array
                            newData.put(colNames[m], data.get(colNames[m]));
                        }
                        // add data row
                        yq.getData().set(row, newData);
                    }
                }
                l.debug("Setting IN args [" + ArrayUtils.toString(inData) + "]");
            }
            if (inData != null) {
                inLen = inData.length;
            }

            if (inLen > 1) // there's an aggregate of multiple values
            {
                l.debug("Length of value list [" + String.valueOf(inLen) + "]");
                l.debug("Getting data type of [" + columns[colIndex] + "]");
                char dt = dataTypes[colIndex];
                String dtStr = "?" + String.valueOf(dt);

                // generate the new parameter string with data type markers
                String[] pList = new String[inLen];
                for (int k = 0; k < inLen; k++) {
                    pList[k] = dtStr;
                }
                String pListStr = StringUtils.join(pList, ",");
                l.debug("New parameter list [" + pListStr + "]");

                // add additional parameters to coreSql
                String rx = "(.+)(" + inCol + "\\s+in\\s+\\(\\" + dtStr + "\\))(.*)";
                String repl = inCol + " IN (" + pListStr + ")";
                String sql = coreSql.replaceAll(NEWLINE, " ");
                l.debug("Attempting to replace part of [" + sql + "] with [" + repl + "]");
                matcher = Pattern.compile(rx, Pattern.CASE_INSENSITIVE).matcher(sql);
                if (matcher.matches()) {
                    coreSql = matcher.group(1) + repl + matcher.group(3);
                }
                l.debug("Matched clause in coreSql [" + matcher.toString() + "]");
            } // end current incolumn processing
        } // end all incolumn processing
    }
    // reset datatype and param count with new coreSql
    yq.addDataTypes(row, this.getDataTypes(coreSql));
    yq.addParamCount(row, yq.getDataTypes(row).length);

    return coreSql;
}

From source file:com.abssh.util.GenericDao.java

/**
 * // w  w  w  .j av  a2s. co  m
 * 
 * @date 2012-04-06
 * @param page
 * @param filters
 * @param criterions
 * @return
 */
@SuppressWarnings("unchecked")
public Page<T> findPage(final Page<T> page, final List<PropertyFilter> filters, final Criterion... criterions) {
    Assert.notNull(page, "page should not be null!");

    Criterion[] cs = (Criterion[]) ArrayUtils.addAll(buildPropertyFilterCriterions(filters), criterions);
    Criteria c = createCriteria(cs);

    if (page.isAutoCount()) {
        int totalCount = countCriteriaResult(c);
        page.setTotalCount(totalCount);
    }
    if (page != null && page.getPageSize() > 0 && page.getTotalPages() < page.getPageNo()) {
        page.setPageNo(1L);
    }

    setPageParameter(c, page);
    List<T> result = c.list();
    page.setResult(result);

    return page;
}

From source file:com.abssh.util.GenericDao.java

/**
 * /* ww  w.ja v  a2s  .co m*/
 * 
 * @date 2012-04-06
 * @param page
 * @param filters
 * @param criterions
 * @return
 */
@SuppressWarnings("unchecked")
public Page<T> findPage(final Page<T> page, final List<PropertyFilter> filters, String[] fetchObject,
        final Criterion... criterions) {
    Assert.notNull(page, "page should not be null!");

    Criterion[] cs = (Criterion[]) ArrayUtils.addAll(buildPropertyFilterCriterions(filters), criterions);
    Criteria c = createCriteria(cs);

    if (page.isAutoCount()) {
        int totalCount = countCriteriaResult(c);
        page.setTotalCount(totalCount);
    }
    if (page != null && page.getPageSize() > 0 && page.getTotalPages() < page.getPageNo()) {
        page.setPageNo(1L);
    }

    setPageParameter(c, page);
    if (fetchObject != null) {
        for (int i = 0; i < fetchObject.length; i++) {
            c.setFetchMode(fetchObject[i], FetchMode.JOIN);
        }
    }
    List<T> result = c.list();
    page.setResult(result);

    return page;
}

From source file:com.datatorrent.stram.cli.ApexCli.java

private List<Completer> defaultCompleters() {
    Map<String, CommandSpec> commands = new TreeMap<>();

    commands.putAll(logicalPlanChangeCommands);
    commands.putAll(connectedCommands);/*  w ww.  j a  v  a  2s.com*/
    commands.putAll(globalCommands);

    List<Completer> completers = new LinkedList<>();
    for (Map.Entry<String, CommandSpec> entry : commands.entrySet()) {
        String command = entry.getKey();
        CommandSpec cs = entry.getValue();
        List<Completer> argCompleters = new LinkedList<>();
        argCompleters.add(new StringsCompleter(command));
        Arg[] args = (Arg[]) ArrayUtils.addAll(cs.requiredArgs, cs.optionalArgs);
        if (args != null) {
            if (cs instanceof OptionsCommandSpec) {
                // ugly hack because jline cannot dynamically change completer while user types
                if (args[0] instanceof FileArg || args[0] instanceof VarArg) {
                    for (int i = 0; i < 10; i++) {
                        argCompleters.add(new MyFileNameCompleter());
                    }
                }
            } else {
                for (Arg arg : args) {
                    if (arg instanceof FileArg || arg instanceof VarArg) {
                        argCompleters.add(new MyFileNameCompleter());
                    } else if (arg instanceof CommandArg) {
                        argCompleters.add(new StringsCompleter(commands.keySet().toArray(new String[] {})));
                    } else {
                        argCompleters.add(MyNullCompleter.INSTANCE);
                    }
                }
            }
        }

        completers.add(new ArgumentCompleter(argCompleters));
    }

    List<Completer> argCompleters = new LinkedList<>();
    Set<String> set = new TreeSet<>();
    set.addAll(aliases.keySet());
    set.addAll(macros.keySet());
    argCompleters.add(new StringsCompleter(set.toArray(new String[] {})));
    for (int i = 0; i < 10; i++) {
        argCompleters.add(new MyFileNameCompleter());
    }
    completers.add(new ArgumentCompleter(argCompleters));
    return completers;
}

From source file:de.tudarmstadt.ukp.dkpro.core.corenlp.CoreNlpParserTest.java

private JCas runTestWithPosTagger(String aLanguage, String aVariant, String aText, Object... aExtraParams)
        throws Exception {
    AssumeResource.assumeResource(CoreNlpParser.class, "de/tudarmstadt/ukp/dkpro/core/stanfordnlp", "parser",
            aLanguage, aVariant);// www . j av a2 s.c o  m

    AggregateBuilder aggregate = new AggregateBuilder();

    aggregate.add(createEngineDescription(CoreNlpPosTagger.class));

    Object[] params = new Object[] { CoreNlpParser.PARAM_VARIANT, aVariant, CoreNlpParser.PARAM_PRINT_TAGSET,
            true, CoreNlpParser.PARAM_WRITE_CONSTITUENT, true, CoreNlpParser.PARAM_WRITE_DEPENDENCY, true,
            CoreNlpParser.PARAM_WRITE_PENN_TREE, true, CoreNlpParser.PARAM_WRITE_POS, false };
    params = ArrayUtils.addAll(params, aExtraParams);
    aggregate.add(createEngineDescription(CoreNlpParser.class, params));

    return TestRunner.runTest(aggregate.createAggregateDescription(), aLanguage, aText);
}