List of usage examples for org.apache.commons.lang ArrayUtils indexOf
public static int indexOf(boolean[] array, boolean valueToFind)
Finds the index of the given value in the array.
From source file:org.ambraproject.admin.service.impl.AdminServiceImpl.java
@Transactional @Override//from w ww . j a va2 s . c om public String[] deleteVolumes(final String journalKey, final String... volumeUris) { //volumes are lazy, so we have to access them in a session return (String[]) hibernateTemplate.execute(new HibernateCallback() { @Override public Object doInHibernate(Session session) throws HibernateException, SQLException { Journal journal = (Journal) session.createCriteria(Journal.class) .add(Restrictions.eq("journalKey", journalKey)).uniqueResult(); if (journal == null) { throw new IllegalArgumentException("No such journal: " + journalKey); } List<String> deletedVolumes = new ArrayList<String>(volumeUris.length); Iterator<Volume> iterator = journal.getVolumes().iterator(); while (iterator.hasNext()) { Volume volume = iterator.next(); if (ArrayUtils.indexOf(volumeUris, volume.getVolumeUri()) != -1) { iterator.remove(); session.delete(volume); deletedVolumes.add(volume.getVolumeUri()); } } session.update(journal); return deletedVolumes.toArray(new String[deletedVolumes.size()]); } }); }
From source file:org.apache.hadoop.hbase.master.balancer.TestBaseLoadBalancer.java
@Test public void testClusterRegionLocations() { // tests whether region locations are handled correctly in Cluster List<ServerName> servers = getListOfServerNames(randomServers(10, 10)); List<HRegionInfo> regions = randomRegions(101); Map<ServerName, List<HRegionInfo>> clusterState = new HashMap<ServerName, List<HRegionInfo>>(); assignRegions(regions, servers, clusterState); // mock block locality for some regions RegionLocationFinder locationFinder = mock(RegionLocationFinder.class); // block locality: region:0 => {server:0} // region:1 => {server:0, server:1} // region:42 => {server:4, server:9, server:5} when(locationFinder.getTopBlockLocations(regions.get(0))).thenReturn(Lists.newArrayList(servers.get(0))); when(locationFinder.getTopBlockLocations(regions.get(1))) .thenReturn(Lists.newArrayList(servers.get(0), servers.get(1))); when(locationFinder.getTopBlockLocations(regions.get(42))) .thenReturn(Lists.newArrayList(servers.get(4), servers.get(9), servers.get(5))); when(locationFinder.getTopBlockLocations(regions.get(43))) .thenReturn(Lists.newArrayList(ServerName.valueOf("foo", 0, 0))); // this server does not exists in clusterStatus BaseLoadBalancer.Cluster cluster = new Cluster(null, clusterState, null, locationFinder, null); int r0 = ArrayUtils.indexOf(cluster.regions, regions.get(0)); // this is ok, it is just a test int r1 = ArrayUtils.indexOf(cluster.regions, regions.get(1)); int r10 = ArrayUtils.indexOf(cluster.regions, regions.get(10)); int r42 = ArrayUtils.indexOf(cluster.regions, regions.get(42)); int r43 = ArrayUtils.indexOf(cluster.regions, regions.get(43)); int s0 = cluster.serversToIndex.get(servers.get(0).getHostAndPort()); int s1 = cluster.serversToIndex.get(servers.get(1).getHostAndPort()); int s4 = cluster.serversToIndex.get(servers.get(4).getHostAndPort()); int s5 = cluster.serversToIndex.get(servers.get(5).getHostAndPort()); int s9 = cluster.serversToIndex.get(servers.get(9).getHostAndPort()); // region 0 locations assertEquals(1, cluster.regionLocations[r0].length); assertEquals(s0, cluster.regionLocations[r0][0]); // region 1 locations assertEquals(2, cluster.regionLocations[r1].length); assertEquals(s0, cluster.regionLocations[r1][0]); assertEquals(s1, cluster.regionLocations[r1][1]); // region 10 locations assertEquals(0, cluster.regionLocations[r10].length); // region 42 locations assertEquals(3, cluster.regionLocations[r42].length); assertEquals(s4, cluster.regionLocations[r42][0]); assertEquals(s9, cluster.regionLocations[r42][1]); assertEquals(s5, cluster.regionLocations[r42][2]); // region 43 locations assertEquals(1, cluster.regionLocations[r43].length); assertEquals(-1, cluster.regionLocations[r43][0]); }
From source file:org.apache.hadoop.hive.metastore.hbase.PartitionKeyComparator.java
public PartitionKeyComparator(String names, String types, List<Range> ranges, List<Operator> ops) { super(null);//www . jav a 2 s .com this.names = names; this.types = types; this.ranges = ranges; this.ops = ops; serdeProps = new Properties(); serdeProps.setProperty(serdeConstants.LIST_COLUMNS, "dbName,tableName," + names); serdeProps.setProperty(serdeConstants.LIST_COLUMN_TYPES, "string,string," + types); this.nativeRanges = new ArrayList<NativeRange>(this.ranges.size()); for (int i = 0; i < ranges.size(); i++) { Range range = ranges.get(i); NativeRange nativeRange = new NativeRange(); ; nativeRanges.add(i, nativeRange); nativeRange.pos = Arrays.asList(names.split(",")).indexOf(range.keyName); TypeInfo expectedType = TypeInfoUtils.getTypeInfoFromTypeString(types.split(",")[nativeRange.pos]); ObjectInspector outputOI = TypeInfoUtils.getStandardWritableObjectInspectorFromTypeInfo(expectedType); nativeRange.start = null; if (range.start != null) { Converter converter = ObjectInspectorConverters .getConverter(PrimitiveObjectInspectorFactory.javaStringObjectInspector, outputOI); nativeRange.start = (Comparable) converter.convert(range.start.value); } nativeRange.end = null; if (range.end != null) { Converter converter = ObjectInspectorConverters .getConverter(PrimitiveObjectInspectorFactory.javaStringObjectInspector, outputOI); nativeRange.end = (Comparable) converter.convert(range.end.value); } } this.nativeOps = new ArrayList<NativeOperator>(this.ops.size()); for (int i = 0; i < ops.size(); i++) { Operator op = ops.get(i); NativeOperator nativeOp = new NativeOperator(); nativeOps.add(i, nativeOp); nativeOp.pos = ArrayUtils.indexOf(names.split(","), op.keyName); TypeInfo expectedType = TypeInfoUtils.getTypeInfoFromTypeString(types.split(",")[nativeOp.pos]); ObjectInspector outputOI = TypeInfoUtils.getStandardWritableObjectInspectorFromTypeInfo(expectedType); Converter converter = ObjectInspectorConverters .getConverter(PrimitiveObjectInspectorFactory.javaStringObjectInspector, outputOI); nativeOp.val = (Comparable) converter.convert(op.val); } }
From source file:org.apache.kylin.cube.model.CubeDesc.java
private TblColRef initDimensionColRef(DimensionDesc dim, String colName) { TblColRef col = model.findColumn(dim.getTable(), colName); // for backward compatibility if (KylinVersion.isBefore200(getVersion())) { // always use FK instead PK, FK could be shared by more than one lookup tables JoinDesc join = dim.getJoin();/* w ww .ja va 2 s .co m*/ if (join != null) { int idx = ArrayUtils.indexOf(join.getPrimaryKeyColumns(), col); if (idx >= 0) { col = join.getForeignKeyColumns()[idx]; } } } return initDimensionColRef(col); }
From source file:org.apache.kylin.cube.model.v1_4_0.CubeDesc.java
private void initDimensionColumns() { for (DimensionDesc dim : dimensions) { JoinDesc join = dim.getJoin();/*from ww w. ja v a2s.c o m*/ // init dimension columns ArrayList<TblColRef> dimCols = Lists.newArrayList(); String[] colStrs = dim.getColumn(); if (colStrs == null && dim.isDerived() || ArrayUtils.contains(colStrs, "{FK}")) { // when column is omitted, special case for (TblColRef col : join.getForeignKeyColumns()) { dimCols.add(initDimensionColRef(col)); } } else { // normal case if (colStrs == null || colStrs.length == 0) throw new IllegalStateException("Dimension column must not be blank " + dim); for (String colStr : colStrs) { dimCols.add(initDimensionColRef(dim, colStr)); } // fill back column ref in hierarchy if (dim.isHierarchy()) { for (int i = 0; i < dimCols.size(); i++) dim.getHierarchy()[i].setColumnRef(dimCols.get(i)); } } TblColRef[] dimColArray = (TblColRef[]) dimCols.toArray(new TblColRef[dimCols.size()]); dim.setColumnRefs(dimColArray); // init derived columns TblColRef[] hostCols = dimColArray; if (dim.isDerived()) { String[] derived = dim.getDerived(); String[][] split = splitDerivedColumnAndExtra(derived); String[] derivedNames = split[0]; String[] derivedExtra = split[1]; TblColRef[] derivedCols = new TblColRef[derivedNames.length]; for (int i = 0; i < derivedNames.length; i++) { derivedCols[i] = initDimensionColRef(dim, derivedNames[i]); } initDerivedMap(hostCols, DeriveType.LOOKUP, dim, derivedCols, derivedExtra); } // PK-FK derive the other side if (join != null) { TblColRef[] fk = join.getForeignKeyColumns(); TblColRef[] pk = join.getPrimaryKeyColumns(); allColumns.addAll(Arrays.asList(fk)); allColumns.addAll(Arrays.asList(pk)); for (int i = 0; i < fk.length; i++) { int find = ArrayUtils.indexOf(hostCols, fk[i]); if (find >= 0) { TblColRef derivedCol = initDimensionColRef(pk[i]); initDerivedMap(hostCols[find], DeriveType.PK_FK, dim, derivedCol); } } /** disable this code as we don't need fk be derived from pk for (int i = 0; i < pk.length; i++) { int find = ArrayUtils.indexOf(hostCols, pk[i]); if (find >= 0) { TblColRef derivedCol = initDimensionColRef(fk[i]); initDerivedMap(hostCols[find], DeriveType.PK_FK, dim, derivedCol); } } */ } } }
From source file:org.apache.kylin.cube.model.v1_4_0.CubeDesc.java
private TblColRef initDimensionColRef(DimensionDesc dim, String colName) { TblColRef ref = model.findColumn(dim.getTable(), colName); // always use FK instead PK, FK could be shared by more than one lookup tables JoinDesc join = dim.getJoin();/*from www.java 2s . c o m*/ if (join != null) { int idx = ArrayUtils.indexOf(join.getPrimaryKeyColumns(), ref); if (idx >= 0) { ref = join.getForeignKeyColumns()[idx]; } } return initDimensionColRef(ref); }
From source file:org.apache.kylin.dict.DictionaryManager.java
/** * Decide a dictionary's source data, leverage PK-FK relationship. *///from w w w. j a v a 2 s .c om public TblColRef decideSourceData(DataModelDesc model, TblColRef col) { // Note FK on fact table is supported by scan the related PK on lookup table // FK on fact table and join type is inner, use PK from lookup instead if (model.isFactTable(col.getTable()) == false) return col; // find a lookup table that the col joins as FK for (TableRef lookup : model.getLookupTables()) { JoinDesc lookupJoin = model.getJoinByPKSide(lookup); int find = ArrayUtils.indexOf(lookupJoin.getForeignKeyColumns(), col); if (find < 0) continue; // make sure the joins are all inner up to the root if (isAllInnerJoinsToRoot(model, lookupJoin)) return lookupJoin.getPrimaryKeyColumns()[find]; } return col; }
From source file:org.apache.mahout.classifier.df.mapreduce.partial.PartialSequentialBuilder.java
@Override protected boolean runJob(Job job) throws IOException, InterruptedException { Configuration conf = job.getConfiguration(); // retrieve the splits TextInputFormat input = new TextInputFormat(); List<InputSplit> splits = input.getSplits(job); int nbSplits = splits.size(); log.debug("Nb splits : {}", nbSplits); InputSplit[] sorted = new InputSplit[nbSplits]; splits.toArray(sorted);//from ww w . j a va 2s. co m Builder.sortSplits(sorted); int numTrees = Builder.getNbTrees(conf); // total number of trees TaskAttemptContext task = new TaskAttemptContext(conf, new TaskAttemptID()); firstOutput = new MockContext(new Step1Mapper(), conf, task.getTaskAttemptID(), numTrees); /* first instance id in hadoop's order */ //int[] firstIds = new int[nbSplits]; /* partitions' sizes in hadoop order */ int[] sizes = new int[nbSplits]; // to compute firstIds, process the splits in file order long slowest = 0; // duration of slowest map int firstId = 0; for (InputSplit split : splits) { int hp = ArrayUtils.indexOf(sorted, split); // hadoop's partition RecordReader<LongWritable, Text> reader = input.createRecordReader(split, task); reader.initialize(split, task); Step1Mapper mapper = new MockStep1Mapper(getTreeBuilder(), dataset, getSeed(), hp, nbSplits, numTrees); long time = System.currentTimeMillis(); //firstIds[hp] = firstId; while (reader.nextKeyValue()) { mapper.map(reader.getCurrentKey(), reader.getCurrentValue(), firstOutput); firstId++; sizes[hp]++; } mapper.cleanup(firstOutput); time = System.currentTimeMillis() - time; log.info("Duration : {}", DFUtils.elapsedTime(time)); if (time > slowest) { slowest = time; } } log.info("Longest duration : {}", DFUtils.elapsedTime(slowest)); return true; }
From source file:org.apache.mahout.df.builder.DefaultTreeBuilderTest.java
/** * make sure that DefaultTreeBuilder.randomAttributes() returns the correct number of attributes, that have not been * selected yet//from w ww.jav a2 s. co m * * @throws Exception */ public void testRandomAttributes() throws Exception { Random rng = RandomUtils.getRandom(); int nbAttributes = rng.nextInt(100) + 1; boolean[] selected = new boolean[nbAttributes]; for (int nloop = 0; nloop < 100; nloop++) { Arrays.fill(selected, false); // randomly select some attributes int nbSelected = rng.nextInt(nbAttributes - 1); for (int index = 0; index < nbSelected; index++) { int attr; do { attr = rng.nextInt(nbAttributes); } while (selected[attr]); selected[attr] = true; } int m = rng.nextInt(nbAttributes); int[] attrs = DefaultTreeBuilder.randomAttributes(rng, selected, m); assertEquals(Math.min(m, nbAttributes - nbSelected), attrs.length); for (int attr : attrs) { // the attribute should not be already selected assertFalse("an attribute has already been selected", selected[attr]); // each attribute should be in the range [0, nbAttributes[ assertTrue(attr >= 0); assertTrue(attr < nbAttributes); // each attribute should appear only once assertEquals(ArrayUtils.indexOf(attrs, attr), ArrayUtils.lastIndexOf(attrs, attr)); } } }
From source file:org.apache.mahout.df.data.Dataset.java
/** * Returns the code used to represent the label value in the data * /* ww w . jav a 2s . co m*/ * @param label * label's value to code * @return label's code */ public int labelCode(String label) { return ArrayUtils.indexOf(labels, label); }