Example usage for org.apache.commons.lang ArrayUtils addAll

List of usage examples for org.apache.commons.lang ArrayUtils addAll

Introduction

In this page you can find the example usage for org.apache.commons.lang ArrayUtils addAll.

Prototype

public static double[] addAll(double[] array1, double[] array2) 

Source Link

Document

Adds all the elements of the given arrays into a new array.

Usage

From source file:org.apache.geode.management.internal.security.LogNoPasswordTest.java

@Test
public void testPasswordInLogs() throws Exception {
    Properties properties = new Properties();
    properties.setProperty(LOG_LEVEL, "debug");
    properties.setProperty(SECURITY_MANAGER, MySecurityManager.class.getName());
    MemberVM locator = lsRule.startLocatorVM(0, properties);
    gfsh.secureConnectAndVerify(locator.getHttpPort(), GfshShellConnectionRule.PortType.http, "any", PASSWORD);
    gfsh.executeAndVerifyCommand("list members");

    // scan all locator log files to find any occurrences of password
    File[] serverLogFiles = locator.getWorkingDir().listFiles(file -> file.toString().endsWith(".log"));
    File[] gfshLogFiles = gfsh.getWorkingDir().listFiles(file -> file.toString().endsWith(".log"));

    File[] logFiles = (File[]) ArrayUtils.addAll(serverLogFiles, gfshLogFiles);

    for (File logFile : logFiles) {
        Scanner scanner = new Scanner(logFile);
        while (scanner.hasNextLine()) {
            String line = scanner.nextLine();
            assertThat(line).describedAs("File: %s, Line: %s", logFile.getAbsolutePath(), line)
                    .doesNotContain(PASSWORD);
        }/* w ww.  j  ava  2s .c  o m*/
    }
}

From source file:org.apache.gora.cassandra.serializers.AvroSerializer.java

/**
 * {@inheritDoc}/* www .ja  v  a2s .  c om*/
 *
 * @param dataStore
 * @param query
 * @return
 */
@Override
public Result execute(DataStore dataStore, Query query) throws GoraException {
    try {
        List<Object> objectArrayList = new ArrayList<>();
        String[] fields = query.getFields();
        if (fields != null) {
            fields = (String[]) ArrayUtils.addAll(fields, mapping.getAllKeys());
        } else {
            fields = mapping.getAllFieldsIncludingKeys();
        }
        CassandraResultSet<K, T> cassandraResult = new CassandraResultSet<>(dataStore, query);
        String cqlQuery = CassandraQueryFactory.getExecuteQuery(mapping, query, objectArrayList, fields);
        ResultSet results;
        SimpleStatement statement;
        if (objectArrayList.size() == 0) {
            statement = new SimpleStatement(cqlQuery);
        } else {
            statement = new SimpleStatement(cqlQuery, objectArrayList.toArray());
        }
        if (readConsistencyLevel != null) {
            statement.setConsistencyLevel(ConsistencyLevel.valueOf(readConsistencyLevel));
        }
        results = client.getSession().execute(statement);
        Iterator<Row> iterator = results.iterator();
        ColumnDefinitions definitions = results.getColumnDefinitions();
        T obj;
        K keyObject;
        CassandraKey cassandraKey = mapping.getCassandraKey();
        while (iterator.hasNext()) {
            AbstractGettableData row = (AbstractGettableData) iterator.next();
            obj = cassandraDataStore.newPersistent();
            keyObject = cassandraDataStore.newKey();
            populateValuesToPersistent(row, definitions, obj, fields);
            if (cassandraKey != null) {
                populateValuesToPersistent(row, definitions, (PersistentBase) keyObject,
                        cassandraKey.getFieldNames());
            } else {
                Field key = mapping.getInlinedDefinedPartitionKey();
                keyObject = (K) getValue(row, definitions.getType(key.getColumnName()), key.getColumnName(),
                        null);
            }
            cassandraResult.addResultElement(keyObject, obj);
        }
        return cassandraResult;
    } catch (Exception e) {
        throw new GoraException(e);
    }
}

From source file:org.apache.gora.cassandra.serializers.NativeSerializer.java

/**
 * {@inheritDoc}//from  ww w  .  j a  v a2 s  .c om
 *
 * @param dataStore
 * @param query
 * @return
 */
@Override
public org.apache.gora.query.Result execute(DataStore dataStore, Query query) throws GoraException {
    try {
        List<Object> objectArrayList = new ArrayList<>();
        String[] fields = query.getFields();
        if (fields != null) {
            fields = (String[]) ArrayUtils.addAll(fields, mapping.getAllKeys());
        } else {
            fields = mapping.getAllFieldsIncludingKeys();
        }
        CassandraResultSet<K, T> cassandraResult = new CassandraResultSet<>(dataStore, query);
        String cqlQuery = CassandraQueryFactory.getExecuteQuery(mapping, query, objectArrayList, fields);
        ResultSet results;
        if (objectArrayList.size() == 0) {
            results = client.getSession().execute(cqlQuery);
        } else {
            results = client.getSession().execute(cqlQuery, objectArrayList.toArray());
        }
        Result<T> objects = mapper.map(results);
        Iterator iterator = objects.iterator();
        while (iterator.hasNext()) {
            T result = (T) iterator.next();
            K key = getKey(result);
            cassandraResult.addResultElement(key, result);
        }
        return cassandraResult;
    } catch (Exception e) {
        throw new GoraException(e);
    }
}

From source file:org.apache.gora.cassandra.store.CassandraStore.java

@Override
public T get(K key, String[] fields) {
    CassandraQuery<K, T> query = new CassandraQuery<K, T>();
    query.setDataStore(this);
    query.setKeyRange(key, key);//from w  ww. j  a  v  a  2 s.  c  om

    if (fields == null) {
        fields = this.getFields();
    }
    // Generating UnionFields
    ArrayList<String> unionFields = new ArrayList<String>();
    for (String field : fields) {
        Field schemaField = this.fieldMap.get(field);
        Type type = schemaField.schema().getType();
        if (type.getName().equals("UNION".toLowerCase())) {
            unionFields.add(field + UNION_COL_SUFIX);
        }
    }

    String[] arr = unionFields.toArray(new String[unionFields.size()]);
    String[] both = (String[]) ArrayUtils.addAll(fields, arr);

    query.setFields(both);

    query.setLimit(1);
    Result<K, T> result = execute(query);
    boolean hasResult = false;
    try {
        hasResult = result.next();
    } catch (Exception e) {
        e.printStackTrace();
    }
    return hasResult ? result.get() : null;
}

From source file:org.apache.hadoop.gateway.identityasserter.common.filter.CommonIdentityAssertionFilter.java

/**
 * @param mappedGroups/* w  ww  .  j a  v a2  s. co m*/
 * @param groups
 * @return
 */
private String[] combineGroupMappings(String[] mappedGroups, String[] groups) {
    if (mappedGroups != null && groups != null) {
        return (String[]) ArrayUtils.addAll(mappedGroups, groups);
    } else {
        return groups != null ? groups : mappedGroups;
    }
}

From source file:org.apache.hadoop.hbase.index.covered.example.TestEndToEndCoveredIndexing.java

/**
 * Similar to {@link #testExceedVersionsOutOfOrderPut()}, but mingles deletes and puts.
 * @throws Exception on failure/* w  w  w .  j  a  v  a2 s  . c  o  m*/
 */
@Test
public void testExceedVersionsOutOfOrderUpdates() throws Exception {
    HTable primary = createSetupTables(fam1);

    // setup the data to store
    long ts1 = 1, ts2 = 2, ts3 = 3, ts4 = 4, ts5 = 5, ts6 = 6;
    byte[] value4 = Bytes.toBytes("val4"), value5 = Bytes.toBytes("val5"), value6 = Bytes.toBytes("val6");
    // values for the other column to index
    byte[] v1_1 = ArrayUtils.addAll(value1, Bytes.toBytes("_otherCol")),
            v3_1 = ArrayUtils.addAll(value3, Bytes.toBytes("_otherCol")),
            v5_1 = ArrayUtils.addAll(value5, Bytes.toBytes("_otherCol")),
            v6_1 = ArrayUtils.addAll(value6, Bytes.toBytes("_otherCol"));

    // make some puts to the primary table
    Put p = new Put(row1);
    p.add(FAM, indexed_qualifer, ts1, value1);
    p.add(FAM2, indexed_qualifer, ts1, v1_1);
    primary.put(p);
    primary.flushCommits();

    p = new Put(row1);
    p.add(FAM, indexed_qualifer, ts3, value3);
    p.add(FAM2, indexed_qualifer, ts3, v3_1);
    primary.put(p);
    primary.flushCommits();

    p = new Put(row1);
    p.add(FAM, indexed_qualifer, ts5, value5);
    p.add(FAM2, indexed_qualifer, ts5, v5_1);
    primary.put(p);
    primary.flushCommits();

    p = new Put(row1);
    p.add(FAM, indexed_qualifer, ts6, value6);
    p.add(FAM2, indexed_qualifer, ts6, v6_1);
    primary.put(p);
    primary.flushCommits();

    /*
     * now we have definitely exceeded the number of versions visible to a usual client of the
     * primary table, so we should try doing a put 'back in time' an make sure that has the correct
     * index values and cleanup
     */
    p = new Put(row1);
    p.add(FAM, indexed_qualifer, ts2, value2);
    primary.put(p);
    primary.flushCommits();

    // read the index for the expected values
    HTable index1 = new HTable(UTIL.getConfiguration(), fam1.getTable());

    // do a raw scan of everything in the table
    if (LOG.isDebugEnabled()) {
        Scan s = new Scan();
        s.setRaw(true);
        ResultScanner scanner = index1.getScanner(s);
        for (Result r : scanner) {
            LOG.debug("Found row:" + r);
        }
        scanner.close();
    }

    // build the expected kvs
    List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], CoveredColumn>>();
    pairs.add(new Pair<byte[], CoveredColumn>(value1, col1));
    pairs.add(new Pair<byte[], CoveredColumn>(v1_1, col2));

    // check the value1 should be present at the earliest timestamp
    List<KeyValue> expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts1, pairs);
    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts1, value1, value2);

    // and value1 should be removed at ts2 (even though it came later)
    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, Collections.<KeyValue>emptyList(), ts1, ts2 + 1,
            value1, value2); // timestamp + 1 since its exclusive end timestamp

    // late added column should be there just fine at its timestamp
    pairs.clear();
    pairs.add(new Pair<byte[], CoveredColumn>(value2, col1));
    pairs.add(new Pair<byte[], CoveredColumn>(v1_1, col2));
    expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts2, pairs);
    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts2, value2);

    // and check that the late entry also removes its self at the next timestamp up
    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, Collections.<KeyValue>emptyList(), ts3, value2,
            value3);

    // -----------------------------------------------
    // Check Delete intermingled
    // -----------------------------------------------

    // verify that the old row is there
    pairs.clear();
    pairs.add(new Pair<byte[], CoveredColumn>(value3, col1));
    pairs.add(new Pair<byte[], CoveredColumn>(v3_1, col2));
    expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts3, pairs);
    // scan from the start key forward (should only include [value3][v3_3])
    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts3, expected.get(0).getKey(), value4);

    // then do a delete of just one of the indexed columns. This should insert a delete for all just
    // the single value, then a put & a later corresponding in the past for the new value
    Delete d = new Delete(row1);
    d.deleteColumn(FAM2, indexed_qualifer, ts3);
    primary.delete(d);

    // we shouldn't find that entry, but we should find [value3][v1_1] since that is next entry back
    // in time from the current
    pairs.clear();
    pairs.add(new Pair<byte[], CoveredColumn>(value3, col1));
    pairs.add(new Pair<byte[], CoveredColumn>(v1_1, col2));
    expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts3, pairs);
    // it should be re-written at 3
    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts3, value3, value4);

    // but we shouldn't find it at ts5 since it should be covered again
    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, Collections.<KeyValue>emptyList(), ts5, value3,
            value4);

    // now remove all the older columns in FAM2 at 4
    d = new Delete(row1);
    d.deleteColumns(FAM2, indexed_qualifer, ts4);
    primary.delete(d);

    // we shouldn't find that entry, but we should find [value3][null] since deleteColumns removes
    // all the entries for that column
    pairs.clear();
    pairs.add(new Pair<byte[], CoveredColumn>(value3, col1));
    pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2));
    expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts4, pairs);
    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts4, value3, value4);

    // same as above, but now do it at 3 (on earlier)
    d = new Delete(row1);
    d.deleteColumns(FAM2, indexed_qualifer, ts3);
    primary.delete(d);

    // we shouldn't find that entry, but we should find [value3][null] since deleteColumns removes
    // all the entries for that column
    pairs.clear();
    pairs.add(new Pair<byte[], CoveredColumn>(value3, col1));
    pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2));
    expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts3, pairs);
    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts3, value3, value4);

    // -----------------------------------------------
    // then we should have the rest of the inserts at their appropriate timestamps. Everything else
    // should be exactly the same, except we shouldn't see ts0 anymore at ts2
    // -----------------------------------------------

    // check the entry at 5
    pairs.clear();
    pairs.add(new Pair<byte[], CoveredColumn>(value5, col1));
    pairs.add(new Pair<byte[], CoveredColumn>(v5_1, col2));
    expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts5, pairs);
    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts5, value5);

    // check the entry at 6
    pairs.clear();
    pairs.add(new Pair<byte[], CoveredColumn>(value6, col1));
    pairs.add(new Pair<byte[], CoveredColumn>(v6_1, col2));
    expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts6, pairs);
    IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts6, value5);

    // cleanup
    closeAndCleanupTables(primary, index1);
}

From source file:org.apache.hadoop.hbase.ipc.RWQueueRpcExecutor.java

public RWQueueRpcExecutor(final String name, final int handlerCount, final int numQueues, final float readShare,
        final float scanShare, final int maxQueueLength, final Configuration conf, final Abortable abortable,
        final Class<? extends BlockingQueue> readQueueClass, Object... readQueueInitArgs) {
    this(name, calcNumWriters(handlerCount, readShare), calcNumReaders(handlerCount, readShare),
            calcNumWriters(numQueues, readShare), calcNumReaders(numQueues, readShare), scanShare,
            LinkedBlockingQueue.class, new Object[] { maxQueueLength }, readQueueClass,
            ArrayUtils.addAll(new Object[] { maxQueueLength }, readQueueInitArgs));
}

From source file:org.apache.hadoop.hbase.master.procedure.MasterProcedureSchedulerPerformanceEvaluation.java

private void setupOperations() throws Exception {
    // Create set of operations based on --ops_type command line argument.
    final ProcedureFactory[] tableOps = new ProcedureFactory[numTables];
    for (int i = 0; i < numTables; ++i) {
        tableOps[i] = new TableProcedureFactory(TableName.valueOf("testTableLock-" + i));
    }/*from ww w. j  a v  a 2  s.co  m*/

    final ProcedureFactory[] regionOps = new ProcedureFactory[numTables * regionsPerTable];
    for (int i = 0; i < numTables; ++i) {
        for (int j = 0; j < regionsPerTable; ++j) {
            regionOps[i * regionsPerTable + j] = new RegionProcedureFactory(new HRegionInfo(
                    ((TableProcedureFactory) tableOps[i]).tableName, Bytes.toBytes(j), Bytes.toBytes(j + 1)));
        }
    }

    if (ops_type.equals("table")) {
        System.out.println("Operations: table only");
        ops = tableOps;
    } else if (ops_type.equals("region")) {
        System.out.println("Operations: region only");
        ops = regionOps;
    } else if (ops_type.equals("both")) {
        System.out.println("Operations: both (table + region)");
        ops = (ProcedureFactory[]) ArrayUtils.addAll(tableOps, regionOps);
    } else {
        throw new Exception("-ops_type should be one of table/region/both.");
    }
}

From source file:org.apache.hadoop.mapred.JobLocalizer.java

/**
 * Download the parts of the distributed cache that are private.
 * @param conf the job's configuration/*  ww  w .  j  a  va2 s  .  co m*/
 * @throws IOException
 * @return the size of the archive objects
 */
public static long[] downloadPrivateCache(Configuration conf) throws IOException {
    long[] fileSizes = downloadPrivateCacheObjects(conf, DistributedCache.getCacheFiles(conf),
            DistributedCache.getLocalCacheFiles(conf), DistributedCache.getFileTimestamps(conf),
            TrackerDistributedCacheManager.getFileVisibilities(conf), false);

    long[] archiveSizes = downloadPrivateCacheObjects(conf, DistributedCache.getCacheArchives(conf),
            DistributedCache.getLocalCacheArchives(conf), DistributedCache.getArchiveTimestamps(conf),
            TrackerDistributedCacheManager.getArchiveVisibilities(conf), true);

    // The order here matters - it has to match order of cache files
    // in TaskDistributedCacheManager.
    return ArrayUtils.addAll(fileSizes, archiveSizes);
}

From source file:org.apache.hadoop.mapreduce.v2.TestMRJobs.java

@Test(timeout = 120000)
public void testContainerRollingLog() throws IOException, InterruptedException, ClassNotFoundException {
    if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
        LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test.");
        return;/*from w ww . j av a2s. c o m*/
    }

    final SleepJob sleepJob = new SleepJob();
    final JobConf sleepConf = new JobConf(mrCluster.getConfig());
    sleepConf.set(MRJobConfig.MAP_LOG_LEVEL, Level.ALL.toString());
    final long userLogKb = 4;
    sleepConf.setLong(MRJobConfig.TASK_USERLOG_LIMIT, userLogKb);
    sleepConf.setInt(MRJobConfig.TASK_LOG_BACKUPS, 3);
    sleepConf.set(MRJobConfig.MR_AM_LOG_LEVEL, Level.ALL.toString());
    final long amLogKb = 7;
    sleepConf.setLong(MRJobConfig.MR_AM_LOG_KB, amLogKb);
    sleepConf.setInt(MRJobConfig.MR_AM_LOG_BACKUPS, 7);
    sleepJob.setConf(sleepConf);

    final Job job = sleepJob.createJob(1, 0, 1L, 100, 0L, 0);
    job.setJarByClass(SleepJob.class);
    job.addFileToClassPath(APP_JAR); // The AppMaster jar itself.
    job.waitForCompletion(true);
    final JobId jobId = TypeConverter.toYarn(job.getJobID());
    final ApplicationId appID = jobId.getAppId();
    int pollElapsed = 0;
    while (true) {
        Thread.sleep(1000);
        pollElapsed += 1000;
        if (TERMINAL_RM_APP_STATES
                .contains(mrCluster.getResourceManager().getRMContext().getRMApps().get(appID).getState())) {
            break;
        }
        if (pollElapsed >= 60000) {
            LOG.warn("application did not reach terminal state within 60 seconds");
            break;
        }
    }
    Assert.assertEquals(RMAppState.FINISHED,
            mrCluster.getResourceManager().getRMContext().getRMApps().get(appID).getState());

    // Job finished, verify logs
    //

    final String appIdStr = appID.toString();
    final String appIdSuffix = appIdStr.substring("application_".length(), appIdStr.length());
    final String containerGlob = "container_" + appIdSuffix + "_*_*";
    final String syslogGlob = appIdStr + Path.SEPARATOR + containerGlob + Path.SEPARATOR
            + TaskLog.LogName.SYSLOG;
    int numAppMasters = 0;
    int numMapTasks = 0;

    String user = UserGroupInformation.getCurrentUser().getUserName();
    String userFolder;
    try {
        MessageDigest digest = MessageDigest
                .getInstance(mrCluster.getResourceManager().getRMContext().getUserFolderHashAlgo());
        byte[] userBytes = user.getBytes(StandardCharsets.UTF_8);
        byte[] hashBase = ArrayUtils.addAll(userBytes, mrCluster.getResourceManager().getRMContext().getSeed());
        byte[] hash = digest.digest(hashBase);
        userFolder = Base64.encodeBase64URLSafeString(hash);
    } catch (NoSuchAlgorithmException ex) {
        LOG.error("error while creating userFolder random string", ex);
        throw new Error("error while creating userFolder random string", ex);
    }

    for (int i = 0; i < NUM_NODE_MGRS; i++) {
        final Configuration nmConf = mrCluster.getNodeManager(i).getConfig();
        for (String logDir : nmConf.getTrimmedStrings(YarnConfiguration.NM_LOG_DIRS)) {
            Path userLogDir = new Path(logDir, userFolder);
            final Path absSyslogGlob = new Path(userLogDir + Path.SEPARATOR + syslogGlob);
            LOG.info("Checking for glob: " + absSyslogGlob);
            final FileStatus[] syslogs = localFs.globStatus(absSyslogGlob);
            for (FileStatus slog : syslogs) {
                boolean foundAppMaster = job.isUber();
                final Path containerPathComponent = slog.getPath().getParent();
                if (!foundAppMaster) {
                    final ContainerId cid = ContainerId.fromString(containerPathComponent.getName());
                    foundAppMaster = ((cid.getContainerId() & ContainerId.CONTAINER_ID_BITMASK) == 1);
                }

                final FileStatus[] sysSiblings = localFs
                        .globStatus(new Path(containerPathComponent, TaskLog.LogName.SYSLOG + "*"));
                // sort to ensure for i > 0 sysSiblings[i] == "syslog.i"
                Arrays.sort(sysSiblings);

                if (foundAppMaster) {
                    numAppMasters++;
                } else {
                    numMapTasks++;
                }

                if (foundAppMaster) {
                    Assert.assertSame("Unexpected number of AM sylog* files",
                            sleepConf.getInt(MRJobConfig.MR_AM_LOG_BACKUPS, 0) + 1, sysSiblings.length);
                    Assert.assertTrue("AM syslog.1 length kb should be >= " + amLogKb,
                            sysSiblings[1].getLen() >= amLogKb * 1024);
                } else {
                    Assert.assertSame("Unexpected number of MR task sylog* files",
                            sleepConf.getInt(MRJobConfig.TASK_LOG_BACKUPS, 0) + 1, sysSiblings.length);
                    Assert.assertTrue("MR syslog.1 length kb should be >= " + userLogKb,
                            sysSiblings[1].getLen() >= userLogKb * 1024);
                }
            }
        }
    }
    // Make sure we checked non-empty set
    //
    Assert.assertEquals("No AppMaster log found!", 1, numAppMasters);
    if (sleepConf.getBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false)) {
        Assert.assertEquals("MapTask log with uber found!", 0, numMapTasks);
    } else {
        Assert.assertEquals("No MapTask log found!", 1, numMapTasks);
    }
}