List of usage examples for com.google.gson.stream JsonReader hasNext
public boolean hasNext() throws IOException
From source file:org.apache.airavata.workflow.core.parser.JsonWorkflowParser.java
License:Apache License
private List<OutPort> readApplicationOutputs(JsonReader jsonReader) throws IOException, ParserException { List<OutPort> outPorts = new ArrayList<>(); PortModel portModel;//from w ww. ja v a2 s .c o m OutPort outPort; String name; JsonToken peek = jsonReader.peek(); if (peek == JsonToken.NULL) { jsonReader.nextNull(); } else if (peek == JsonToken.BEGIN_ARRAY) { jsonReader.beginArray(); while (jsonReader.hasNext()) { portModel = new PortModel(); outPort = new OutPortImpl(portModel); jsonReader.beginObject(); while (jsonReader.hasNext()) { name = jsonReader.nextName(); if (name.equals(NAME)) { portModel.setName(jsonReader.nextString()); } else if (name.equals(ID)) { portModel.setPortId(jsonReader.nextString()); } else if (name.equals(DATATYPE)) { jsonReader.skipValue(); } else if (name.equals(DEFAULT_VALUE)) { jsonReader.skipValue(); // can output has default values? } else if (name.equals(DESCRIPTION)) { portModel.setDescription(jsonReader.nextString()); } else { jsonReader.skipValue(); } } jsonReader.endObject(); outPorts.add(outPort); } jsonReader.endArray(); } else { throw new ParserException( "Error! reading application outputs, expected " + getTokenString(JsonToken.NULL) + " or " + getTokenString(JsonToken.BEGIN_ARRAY) + " but found " + getTokenString(peek)); } return outPorts; }
From source file:org.apache.airavata.workflow.core.parser.JsonWorkflowParser.java
License:Apache License
private void readPosition(JsonReader jsonReader) throws IOException { JsonToken peek = jsonReader.peek();/* ww w . j av a 2s. c o m*/ if (peek == JsonToken.NULL) { jsonReader.nextNull(); } else if (peek == JsonToken.BEGIN_OBJECT) { jsonReader.beginObject(); while (jsonReader.hasNext()) { // skip position data. jsonReader.nextName(); jsonReader.skipValue(); } jsonReader.endObject(); } else { jsonReader.skipValue(); } }
From source file:org.apache.airavata.workflow.core.parser.JsonWorkflowParser.java
License:Apache License
private void readProperties(JsonReader jsonReader) throws IOException { JsonToken peek = jsonReader.peek();// www . j a v a 2 s .c om if (peek == JsonToken.NULL) { jsonReader.nextNull(); } else if (peek == JsonToken.BEGIN_OBJECT) { jsonReader.beginObject(); while (jsonReader.hasNext()) { // TODO: Read and use proprety values String name = jsonReader.nextName(); jsonReader.skipValue(); } jsonReader.endObject(); } else { jsonReader.skipValue(); } }
From source file:org.apache.hadoop.dynamodb.importformat.ImportInputFormat.java
License:Open Source License
/** * An example manifest file looks like// w ww . j a va2 s .c o m * * {"name":"DynamoDB-export","version":3, "entries":[ * {"url":"s3://path/to/object/92dd1414-a049-4c68-88fb-a23acd44907e","mandatory":true}, * {"url":"s3://path/to/object/ba3f3535-7aa1-4f97-a530-e72938bf4b76","mandatory":true} ]} */ // @formatter:on private List<InputSplit> parseManifest(FileSystem fs, Path manifestPath, JobConf job) throws IOException { List<InputSplit> splits = null; FSDataInputStream fp = fs.open(manifestPath); JsonReader reader = new JsonReader(new InputStreamReader(fp, Charsets.UTF_8)); reader.beginObject(); while (reader.hasNext()) { String name = reader.nextName(); switch (name) { case VERSION_JSON_KEY: job.set(DynamoDBConstants.EXPORT_FORMAT_VERSION, String.valueOf(reader.nextInt())); break; case ENTRIES_JSON_KEY: splits = readEntries(reader, job); break; default: log.info("Skipping a JSON key in the manifest file: " + name); reader.skipValue(); break; } } reader.endObject(); if (splits == null) { return Collections.emptyList(); } return splits; }
From source file:org.apache.hadoop.dynamodb.importformat.ImportInputFormat.java
License:Open Source License
/** * This method retrieves the URLs of all S3 files and generates input splits by combining * multiple S3 URLs into one split.// w w w.ja v a2 s. c o m * * @return a list of input splits. The length of this list may not be exactly the same as * <code>numSplits</code>. For example, if numSplits is larger than MAX_NUM_SPLITS or the number * of S3 files, then numSplits is ignored. Furthermore, not all input splits contain the same * number of S3 files. For example, with five S3 files {s1, s2, s3, s4, s5} and numSplits = 3, * this method returns a list of three input splits: {s1, s2}, {s3, s4} and {s5}. */ private List<InputSplit> readEntries(JsonReader reader, JobConf job) throws IOException { List<Path> paths = new ArrayList<Path>(); Gson gson = DynamoDBUtil.getGson(); reader.beginArray(); while (reader.hasNext()) { ExportManifestEntry entry = gson.fromJson(reader, ExportManifestEntry.class); paths.add(new Path(entry.url)); } reader.endArray(); log.info("Number of S3 files: " + paths.size()); if (paths.size() == 0) { return Collections.emptyList(); } int filesPerSplit = (int) Math.ceil((double) (paths.size()) / Math.min(MAX_NUM_SPLITS, paths.size())); int numSplits = (int) Math.ceil((double) (paths.size()) / filesPerSplit); long[] fileMaxLengths = new long[filesPerSplit]; Arrays.fill(fileMaxLengths, Long.MAX_VALUE / filesPerSplit); long[] fileStarts = new long[filesPerSplit]; Arrays.fill(fileStarts, 0); List<InputSplit> splits = new ArrayList<InputSplit>(numSplits); for (int i = 0; i < numSplits; i++) { int start = filesPerSplit * i; int end = filesPerSplit * (i + 1); if (i == (numSplits - 1)) { end = paths.size(); } Path[] pathsInOneSplit = paths.subList(start, end).toArray(new Path[end - start]); CombineFileSplit combineFileSplit = new CombineFileSplit(job, pathsInOneSplit, fileStarts, fileMaxLengths, new String[0]); splits.add(combineFileSplit); } return splits; }
From source file:org.apache.hadoop.fs.http.client.ContentSummary.java
License:Apache License
public static TypeAdapter adapter() { return new TypeAdapter<ContentSummary>() { @Override//from w ww . j a v a 2 s . co m public void write(JsonWriter out, ContentSummary value) throws IOException { /* not implemented */ } @Override public ContentSummary read(JsonReader in) throws IOException { ContentSummary instance = null; in.setLenient(true); if (in.peek() == JsonToken.BEGIN_OBJECT) { in.beginObject(); if (in.nextName().equalsIgnoreCase("ContentSummary")) { String name; in.beginObject(); instance = new ContentSummary(); while (in.hasNext()) { name = in.nextName(); if (name.equalsIgnoreCase("directoryCount")) { instance.directoryCount = in.nextInt(); } else if (name.equalsIgnoreCase("fileCount")) { instance.fileCount = in.nextInt(); } else if (name.equalsIgnoreCase("length")) { instance.length = in.nextInt(); } else if (name.equalsIgnoreCase("quota")) { instance.quota = in.nextInt(); } else if (name.equalsIgnoreCase("spaceConsumed")) { instance.spaceConsumed = in.nextInt(); } else if (name.equalsIgnoreCase("spaceQuota")) { instance.spaceQuota = in.nextInt(); } } in.endObject(); } in.endObject(); } return instance; } }; }
From source file:org.apache.hadoop.fs.http.client.FileStatus.java
License:Apache License
public static TypeAdapter adapter() { return new TypeAdapter<FileStatus>() { @Override/* w w w.j a v a2 s . c o m*/ public void write(JsonWriter out, FileStatus value) throws IOException { /* not implemented */ } @Override public FileStatus read(JsonReader in) throws IOException { FileStatus instance = null; in.setLenient(true); if (in.peek() == JsonToken.BEGIN_OBJECT) { in.beginObject(); if (in.nextName().equalsIgnoreCase("FileStatus")) { String name; in.beginObject(); instance = new FileStatus(); while (in.hasNext()) { name = in.nextName(); if (name.equalsIgnoreCase("accessTime")) { instance.accessTime = in.nextLong(); } else if (name.equalsIgnoreCase("blockSize")) { instance.blockSize = in.nextInt(); } else if (name.equalsIgnoreCase("length")) { instance.length = in.nextLong(); } else if (name.equalsIgnoreCase("modificationTime")) { instance.modTime = in.nextLong(); } else if (name.equalsIgnoreCase("replication")) { instance.replication = in.nextInt(); } else if (name.equalsIgnoreCase("group")) { instance.group = in.nextString(); } else if (name.equalsIgnoreCase("owner")) { instance.owner = in.nextString(); } else if (name.equalsIgnoreCase("pathSuffix")) { instance.suffix = in.nextString(); } else if (name.equalsIgnoreCase("permission")) { instance.permission = in.nextString(); } else if (name.equalsIgnoreCase("type")) { instance.type = FileType.valueOf(in.nextString()); } } in.endObject(); } in.endObject(); } return instance; } }; }
From source file:org.apache.jclouds.oneandone.rest.util.ServerFirewallPolicyAdapter.java
License:Apache License
@Override public List<T> read(JsonReader reader) throws IOException { List<ServerFirewallPolicy> list = new ArrayList<ServerFirewallPolicy>(); if (reader.peek() == JsonToken.BEGIN_OBJECT) { Type mapType = new TypeToken<Map<String, Object>>() { }.getType();//from www . java 2s. c o m Map<String, String> jsonMap = gson.fromJson(reader, mapType); ServerFirewallPolicy inning = ServerFirewallPolicy.create(jsonMap.get("id"), jsonMap.get("name")); list.add(inning); } else if (reader.peek() == JsonToken.BEGIN_ARRAY) { reader.beginArray(); while (reader.hasNext()) { Type mapType = new TypeToken<Map<String, Object>>() { }.getType(); Map<String, String> jsonMap = gson.fromJson(reader, mapType); ServerFirewallPolicy inning = ServerFirewallPolicy.create(jsonMap.get("id"), jsonMap.get("name")); list.add(inning); } reader.endArray(); } else { reader.skipValue(); } return (List<T>) list; }
From source file:org.apache.jclouds.oneandone.rest.util.SnapshotAdapter.java
License:Apache License
@Override public List<T> read(JsonReader reader) throws IOException { List<Snapshot> list = new ArrayList<Snapshot>(); if (reader.peek() == JsonToken.BEGIN_OBJECT) { Type mapType = new TypeToken<Map<String, Object>>() { }.getType();/*from w w w . ja v a2 s . c om*/ Map<String, String> jsonMap = gson.fromJson(reader, mapType); Snapshot inning = Snapshot.create(jsonMap.get("id"), jsonMap.get("creation_date"), jsonMap.get("deletion_date")); list.add(inning); } else if (reader.peek() == JsonToken.BEGIN_ARRAY) { reader.beginArray(); while (reader.hasNext()) { Type mapType = new TypeToken<Map<String, Object>>() { }.getType(); Map<String, String> jsonMap = gson.fromJson(reader, mapType); Snapshot inning = Snapshot.create(jsonMap.get("id"), jsonMap.get("creation_date"), jsonMap.get("deletion_date")); list.add(inning); } reader.endArray(); } else { reader.skipValue(); } return (List<T>) list; }
From source file:org.apache.nifi.toolkit.zkmigrator.ZooKeeperMigrator.java
License:Apache License
void writeZooKeeper(InputStream zkData, AuthMode authMode, byte[] authData, boolean ignoreSource, boolean useExistingACL) throws IOException, ExecutionException, InterruptedException { // ensure that the chroot path exists ZooKeeper zooKeeperRoot = getZooKeeper(Joiner.on(',').join(zooKeeperEndpointConfig.getServers()), authMode, authData);// w w w . j ava 2s .co m ensureNodeExists(zooKeeperRoot, zooKeeperEndpointConfig.getPath(), CreateMode.PERSISTENT); closeZooKeeper(zooKeeperRoot); ZooKeeper zooKeeper = getZooKeeper(zooKeeperEndpointConfig.getConnectString(), authMode, authData); JsonReader jsonReader = new JsonReader(new BufferedReader(new InputStreamReader(zkData))); Gson gson = new GsonBuilder().create(); jsonReader.beginArray(); // determine source ZooKeeperEndpointConfig for this data final ZooKeeperEndpointConfig sourceZooKeeperEndpointConfig = gson.fromJson(jsonReader, ZooKeeperEndpointConfig.class); LOGGER.info("Source data was obtained from ZooKeeper: {}", sourceZooKeeperEndpointConfig); Preconditions.checkArgument( !Strings.isNullOrEmpty(sourceZooKeeperEndpointConfig.getConnectString()) && !Strings.isNullOrEmpty(sourceZooKeeperEndpointConfig.getPath()) && sourceZooKeeperEndpointConfig.getServers() != null && sourceZooKeeperEndpointConfig.getServers().size() > 0, "Source ZooKeeper %s from %s is invalid", sourceZooKeeperEndpointConfig, zkData); Preconditions.checkArgument( Collections .disjoint(zooKeeperEndpointConfig.getServers(), sourceZooKeeperEndpointConfig.getServers()) || !zooKeeperEndpointConfig.getPath().equals(sourceZooKeeperEndpointConfig.getPath()) || ignoreSource, "Source ZooKeeper config %s for the data provided can not contain the same server and path as the configured destination ZooKeeper config %s", sourceZooKeeperEndpointConfig, zooKeeperEndpointConfig); // stream through each node read from the json input final Stream<DataStatAclNode> stream = StreamSupport .stream(new Spliterators.AbstractSpliterator<DataStatAclNode>(0, 0) { @Override public boolean tryAdvance(Consumer<? super DataStatAclNode> action) { try { // stream each DataStatAclNode from configured json file synchronized (jsonReader) { if (jsonReader.hasNext()) { action.accept(gson.fromJson(jsonReader, DataStatAclNode.class)); return true; } else { return false; } } } catch (IOException e) { throw new RuntimeException("unable to read nodes from json", e); } } }, false); final List<CompletableFuture<Stat>> writeFutures = stream.parallel().map(node -> { /* * create stage to determine the acls that should be applied to the node. * this stage will be used to initialize the chain */ final CompletableFuture<List<ACL>> determineACLStage = CompletableFuture .supplyAsync(() -> determineACLs(node, authMode, useExistingACL)); /* * create stage to apply acls to nodes and transform node to DataStatAclNode object */ final Function<List<ACL>, CompletableFuture<DataStatAclNode>> transformNodeStage = acls -> CompletableFuture .supplyAsync(() -> transformNode(node, acls)); /* * create stage to ensure that nodes exist for the entire path of the zookeeper node, must be invoked after the transformNode stage to * ensure that the node will exist after path migration */ final Function<DataStatAclNode, CompletionStage<String>> ensureNodeExistsStage = dataStatAclNode -> CompletableFuture .supplyAsync(() -> ensureNodeExists(zooKeeper, dataStatAclNode.getPath(), dataStatAclNode.getEphemeralOwner() == 0 ? CreateMode.PERSISTENT : CreateMode.EPHEMERAL)); /* * create stage that waits for both the transformNode and ensureNodeExists stages complete, and also provides that the given transformed node is * available to the next stage */ final BiFunction<String, DataStatAclNode, DataStatAclNode> combineEnsureNodeAndTransferNodeStage = (u, dataStatAclNode) -> dataStatAclNode; /* * create stage to transmit the node to the destination zookeeper endpoint, must be invoked after the node has been transformed and its path * has been created (or already exists) in the destination zookeeper */ final Function<DataStatAclNode, CompletionStage<Stat>> transmitNodeStage = dataStatNode -> CompletableFuture .supplyAsync(() -> transmitNode(zooKeeper, dataStatNode)); /* * submit the stages chained together in the proper order to perform the processing on the given node */ final CompletableFuture<DataStatAclNode> dataStatAclNodeCompletableFuture = determineACLStage .thenCompose(transformNodeStage); return dataStatAclNodeCompletableFuture.thenCompose(ensureNodeExistsStage) .thenCombine(dataStatAclNodeCompletableFuture, combineEnsureNodeAndTransferNodeStage) .thenCompose(transmitNodeStage); }).collect(Collectors.toList()); CompletableFuture<Void> allWritesFuture = CompletableFuture .allOf(writeFutures.toArray(new CompletableFuture[writeFutures.size()])); final CompletableFuture<List<Stat>> finishedWrites = allWritesFuture .thenApply(v -> writeFutures.stream().map(CompletableFuture::join).collect(Collectors.toList())); final List<Stat> writesDone = finishedWrites.get(); if (LOGGER.isInfoEnabled()) { final int writeCount = writesDone.size(); LOGGER.info("{} {} transferred to {}", writeCount, writeCount == 1 ? "node" : "nodes", zooKeeperEndpointConfig); } jsonReader.close(); closeZooKeeper(zooKeeper); }