List of usage examples for com.google.common.collect Iterables toArray
static <T> T[] toArray(Iterable<? extends T> iterable, T[] array)
From source file:org.onosproject.net.flow.impl.FlowRuleManager.java
@Override public void removeFlowRulesById(ApplicationId id) { checkPermission(FLOWRULE_WRITE); removeFlowRules(Iterables.toArray(getFlowRulesById(id), FlowRule.class)); }
From source file:com.yahoo.druid.hadoop.HiveDatasourceInputFormat.java
@Override public InputSplit[] getSplits(JobConf jobConf, int numSplits) throws IOException { logger.info("checkPost #5"); String overlordUrl = jobConf.get(CONF_DRUID_OVERLORD_HOSTPORT); Preconditions.checkArgument(overlordUrl != null && !overlordUrl.isEmpty(), CONF_DRUID_OVERLORD_HOSTPORT + " not defined"); logger.info("druid overlord url = " + overlordUrl); String schemaStr = jobConf.get(CONF_DRUID_SCHEMA); Preconditions.checkArgument(schemaStr != null && !schemaStr.isEmpty(), "schema undefined, provide " + CONF_DRUID_SCHEMA); logger.info("schema = " + schemaStr); DatasourceIngestionSpec ingestionSpec = HadoopDruidIndexerConfig.JSON_MAPPER.readValue(schemaStr, DatasourceIngestionSpec.class); String segmentsStr = getSegmentsToLoad(ingestionSpec.getDataSource(), ingestionSpec.getIntervals(), overlordUrl);//from ww w .jav a 2 s. co m logger.info("segments list received from overlord = " + segmentsStr); List<DataSegment> segmentsList = HadoopDruidIndexerConfig.JSON_MAPPER.readValue(segmentsStr, new TypeReference<List<DataSegment>>() { }); VersionedIntervalTimeline<String, DataSegment> timeline = new VersionedIntervalTimeline<>( Ordering.natural()); for (DataSegment segment : segmentsList) { timeline.add(segment.getInterval(), segment.getVersion(), segment.getShardSpec().createChunk(segment)); } final List<TimelineObjectHolder<String, DataSegment>> timeLineSegments = timeline .lookup(ingestionSpec.getIntervals().get(0)); final List<WindowedDataSegment> windowedSegments = new ArrayList<>(); for (TimelineObjectHolder<String, DataSegment> holder : timeLineSegments) { for (PartitionChunk<DataSegment> chunk : holder.getObject()) { windowedSegments.add(new WindowedDataSegment(chunk.getObject(), holder.getInterval())); } } jobConf.set(CONF_INPUT_SEGMENTS, HadoopDruidIndexerConfig.JSON_MAPPER.writeValueAsString(windowedSegments)); segmentsStr = Preconditions.checkNotNull(jobConf.get(CONF_INPUT_SEGMENTS), "No segments found to read"); List<WindowedDataSegment> segments = HadoopDruidIndexerConfig.JSON_MAPPER.readValue(segmentsStr, new TypeReference<List<WindowedDataSegment>>() { }); if (segments == null || segments.size() == 0) { throw new ISE("No segments found to read"); } logger.info("segments to read " + segmentsStr); long maxSize = numSplits; if (maxSize > 0) { // combining is to happen, let us sort the segments list by size so that // they // are combined appropriately Collections.sort(segments, new Comparator<WindowedDataSegment>() { @Override public int compare(WindowedDataSegment s1, WindowedDataSegment s2) { return Long.compare(s1.getSegment().getSize(), s2.getSegment().getSize()); } }); } List<InputSplit> splits = Lists.newArrayList(); List<WindowedDataSegment> list = new ArrayList<>(); long size = 0; // JobConf dummyConf = new JobConf(); Job job = new Job(jobConf); JobContext jobContext = ShimLoader.getHadoopShims().newJobContext(job); Path[] paths = org.apache.hadoop.mapreduce.lib.input.FileInputFormat.getInputPaths(jobContext); logger.info("dummyPath : " + paths); jobConf.set("druid.hive.dummyfilename", paths[0].toString()); InputFormat fio = supplier.get(); for (WindowedDataSegment segment : segments) { if (size + segment.getSegment().getSize() > maxSize && size > 0) { splits.add(toDataSourceSplit(list, fio, jobConf, paths[0])); list = Lists.newArrayList(); size = 0; } list.add(segment); size += segment.getSegment().getSize(); } if (list.size() > 0) { splits.add(toDataSourceSplit(list, fio, jobConf, paths[0])); } logger.info("Number of splits: " + splits.size()); for (InputSplit split : splits) { logger.info(split.getClass().getName()); for (String location : split.getLocations()) logger.info(location); } return Iterables.toArray(splits, InputSplit.class); }
From source file:org.artifactory.rest.util.StorageInfoHelper.java
private void addStorageInfoProperties(RestBaseStorageInfo storageInfo) { if (!isIncludeProperties()) { return;/* w w w. jav a2 s .co m*/ } // Outside the loop since we want Jackson to parse it as an empty list if there aren't any properties storageInfo.properties = Maps.newTreeMap(); Properties propertiesAnnotatingItem = repositoryService.getProperties(itemInfo.getRepoPath()); if (propertiesAnnotatingItem != null && !propertiesAnnotatingItem.isEmpty()) { for (String propertyName : propertiesAnnotatingItem.keySet()) { storageInfo.properties.put(propertyName, Iterables.toArray(propertiesAnnotatingItem.get(propertyName), String.class)); } } }
From source file:net.citizensnpcs.api.command.CommandContext.java
public static Location parseLocation(Location currentLocation, String flag) throws CommandException { boolean denizen = flag.startsWith("l@"); String[] parts = Iterables.toArray(LOCATION_SPLITTER.split(flag.replaceFirst("l@", "")), String.class); if (parts.length > 0) { String worldName = currentLocation != null ? currentLocation.getWorld().getName() : ""; double x = 0, y = 0, z = 0; float yaw = 0F, pitch = 0F; switch (parts.length) { case 6:/*from ww w . ja va 2 s .c o m*/ if (denizen) { worldName = parts[5].replaceFirst("w@", ""); } else pitch = Float.parseFloat(parts[5]); case 5: if (denizen) { pitch = Float.parseFloat(parts[4]); } else yaw = Float.parseFloat(parts[4]); case 4: if (denizen && parts.length > 4) { yaw = Float.parseFloat(parts[3]); } else worldName = parts[3].replaceFirst("w@", ""); case 3: x = Double.parseDouble(parts[0]); y = Double.parseDouble(parts[1]); z = Double.parseDouble(parts[2]); break; default: throw new CommandException(CommandMessages.INVALID_SPAWN_LOCATION); } World world = Bukkit.getWorld(worldName); if (world == null) throw new CommandException(CommandMessages.INVALID_SPAWN_LOCATION); return new Location(world, x, y, z, yaw, pitch); } else { Player search = Bukkit.getPlayerExact(flag); if (search == null) throw new CommandException(CommandMessages.PLAYER_NOT_FOUND_FOR_SPAWN); return search.getLocation(); } }
From source file:org.schedoscope.export.ftp.outputformat.FtpUploadOutputFormat.java
private static String[] setCSVHeader(Configuration conf) throws IOException { HCatSchema schema = HCatInputFormat.getTableSchema(conf); return Iterables.toArray(schema.getFieldNames(), String.class); }
From source file:org.blip.workflowengine.transferobject.ModifiablePropertyNode.java
@Override public PropertyNode add(final String key, final Long value, final Collection<Attribute> attributes) { return internalAdd(true, key, value, Iterables.toArray(attributes, Attribute.class)); }
From source file:org.apache.lens.cli.commands.LensFactCommands.java
/** * Drop storage from fact./*from w w w .jav a 2 s . co m*/ * * @param tablepair the tablepair * @return the string */ @CliCommand(value = "fact drop storage", help = "drop a storage from fact") public String dropStorageFromFact(@CliOption(key = { "", "table" }, mandatory = true, help = "<table-name> <storage-name>") String tablepair) { Iterable<String> parts = Splitter.on(' ').trimResults().omitEmptyStrings().split(tablepair); String[] pair = Iterables.toArray(parts, String.class); if (pair.length != 2) { return "Syntax error, please try in following " + "format. fact drop storage <table> <storage>"; } APIResult result = getClient().dropStorageFromFact(pair[0], pair[1]); if (result.getStatus() == APIResult.Status.SUCCEEDED) { return "Fact table storage removal successful"; } else { return "Fact table storage removal failed"; } }
From source file:brooklyn.util.task.Tasks.java
public static Task<List<?>> parallel(Iterable<? extends TaskAdaptable<?>> tasks) { return parallel(asTasks(Iterables.toArray(tasks, TaskAdaptable.class))); }
From source file:org.apache.jackrabbit.oak.plugins.index.lucene.LuceneIndexMBeanImpl.java
private String[] determineIndexedPaths(IndexSearcher searcher, final int maxLevel, int maxPathCount) throws IOException { Set<String> paths = Sets.newHashSet(); int startDepth = getStartDepth(searcher, maxLevel); if (startDepth < 0) { return createMsg("startDepth cannot be determined after search for upto maxLevel [" + maxLevel + "]"); }/* ww w .j a v a2s . co m*/ SearchContext sc = new SearchContext(searcher, maxLevel, maxPathCount); List<LuceneDoc> docs = getDocsAtLevel(startDepth, sc); int maxPathLimitBreachedAtLevel = -1; topLevel: for (LuceneDoc doc : docs) { TreeTraverser<LuceneDoc> traverser = new TreeTraverser<LuceneDoc>() { @Override public Iterable<LuceneDoc> children(@Nonnull LuceneDoc root) { //Break at maxLevel if (root.depth >= maxLevel) { return Collections.emptyList(); } return root.getChildren(); } }; for (LuceneDoc node : traverser.breadthFirstTraversal(doc)) { if (paths.size() < maxPathCount) { paths.add(node.path); } else { maxPathLimitBreachedAtLevel = node.depth; break topLevel; } } } if (maxPathLimitBreachedAtLevel < 0) { return Iterables.toArray(paths, String.class); } //If max limit for path is reached then we can safely //say about includedPaths upto depth = level at which limit reached - 1 //As for that level we know *all* the path roots Set<String> result = Sets.newHashSet(); int safeDepth = maxPathLimitBreachedAtLevel - 1; if (safeDepth > 0) { for (String path : paths) { int pathDepth = PathUtils.getDepth(path); if (pathDepth == safeDepth) { result.add(path); } } } return Iterables.toArray(result, String.class); }
From source file:gov.nih.nci.firebird.selenium2.pages.user.MyAccountPageHelper.java
public void checkSponsorDelegateInformationDisplayed(FirebirdUser user) { page.getPersonTag().getHelper().verifyPersonInformationIsDisplayed(user.getPerson()); Organization[] sponsors = Iterables.toArray(user.getSponsorDelegateOrganizations(), Organization.class); page.getSponsorSection().getHelper().verifySponsorDelegatesAreDisplayed(sponsors); checkSelectedRolesDisplayed(UserRoleType.SPONSOR_DELEGATE); }