Example usage for com.google.common.io Closeables close

List of usage examples for com.google.common.io Closeables close

Introduction

In this page you can find the example usage for com.google.common.io Closeables close.

Prototype

public static void close(@Nullable Closeable closeable, boolean swallowIOException) throws IOException 

Source Link

Document

Closes a Closeable , with control over whether an IOException may be thrown.

Usage

From source file:com.google.dart.compiler.SystemLibrariesReader.java

private String getSource(Reader reader) {
    String srcCode = null;/*w ww. j  a va  2  s .co  m*/
    boolean failed = true;
    try {
        srcCode = CharStreams.toString(reader);
        failed = false;
    } catch (IOException e) {
        e.printStackTrace();
    } finally {
        try {
            Closeables.close(reader, failed);
        } catch (IOException e) {
            e.printStackTrace();
        }
    }
    return srcCode;
}

From source file:io.druid.firehose.kafka.KafkaEightSimpleConsumerFirehoseFactory.java

@Override
public FirehoseV2 connect(final ByteBufferInputRowParser firehoseParser, Object lastCommit) throws IOException {
    final Map<Integer, Long> lastOffsets = loadOffsetFromPreviousMetaData(lastCommit);

    for (Integer partition : partitionIdList) {
        final KafkaSimpleConsumer kafkaSimpleConsumer = new KafkaSimpleConsumer(feed, partition, clientId,
                brokerList, earliest);//w  w  w  . jav a 2s .  com
        Long startOffset = lastOffsets.get(partition);
        PartitionConsumerWorker worker = new PartitionConsumerWorker(feed, kafkaSimpleConsumer, partition,
                startOffset == null ? 0 : startOffset);
        consumerWorkers.add(worker);
    }

    final LinkedBlockingQueue<BytesMessageWithOffset> messageQueue = new LinkedBlockingQueue<BytesMessageWithOffset>(
            queueBufferLength);
    log.info("Kicking off all consumers");
    for (PartitionConsumerWorker worker : consumerWorkers) {
        worker.go(messageQueue);
    }
    log.info("All consumer started");

    return new FirehoseV2() {
        private Map<Integer, Long> lastOffsetPartitions;
        private volatile boolean stopped;
        private volatile BytesMessageWithOffset msg = null;
        private volatile InputRow row = null;

        {
            lastOffsetPartitions = Maps.newHashMap();
            lastOffsetPartitions.putAll(lastOffsets);
        }

        @Override
        public void start() throws Exception {
            nextMessage();
        }

        @Override
        public boolean advance() {
            if (stopped) {
                return false;
            }

            nextMessage();
            return true;
        }

        private void nextMessage() {
            try {
                row = null;
                while (row == null) {
                    if (msg != null) {
                        lastOffsetPartitions.put(msg.getPartition(), msg.offset());
                    }

                    msg = messageQueue.take();

                    final byte[] message = msg.message();
                    row = message == null ? null : firehoseParser.parse(ByteBuffer.wrap(message));
                }
            } catch (InterruptedException e) {
                //Let the caller decide whether to stop or continue when thread is interrupted.
                log.warn(e, "Thread Interrupted while taking from queue, propagating the interrupt");
                Thread.currentThread().interrupt();
            }
        }

        @Override
        public InputRow currRow() {
            if (stopped) {
                return null;
            }
            return row;
        }

        @Override
        public Committer makeCommitter() {
            final Map<Integer, Long> offsets = Maps.newHashMap(lastOffsetPartitions);

            return new Committer() {
                @Override
                public Object getMetadata() {
                    return offsets;
                }

                @Override
                public void run() {

                }
            };
        }

        @Override
        public void close() throws IOException {
            log.info("Stopping kafka 0.8 simple firehose");
            stopped = true;
            for (PartitionConsumerWorker t : consumerWorkers) {
                Closeables.close(t, true);
            }
        }
    };
}

From source file:com.android.xml.AndroidManifest.java

/**
 * Returns whether the version Code attribute is set in a given manifest.
 * @param manifestFile the manifest to check
 * @return true if the versionCode attribute is present and its value is not empty.
 * @throws XPathExpressionException//  w w w .j  av  a 2  s.  c o m
 * @throws StreamException If any error happens when reading the manifest.
 */
public static boolean hasVersionCode(IAbstractFile manifestFile)
        throws XPathExpressionException, StreamException {
    XPath xPath = AndroidXPathFactory.newXPath();

    InputStream is = null;
    try {
        is = manifestFile.getContents();
        Object result = xPath.evaluate("/" + NODE_MANIFEST + "/@" + AndroidXPathFactory.DEFAULT_NS_PREFIX + ":"
                + ATTRIBUTE_VERSIONCODE, new InputSource(is), XPathConstants.NODE);

        if (result != null) {
            Node node = (Node) result;
            if (!node.getNodeValue().isEmpty()) {
                return true;
            }
        }
    } finally {
        try {
            Closeables.close(is, true /* swallowIOException */);
        } catch (IOException e) {
            // cannot happen
        }
    }

    return false;
}

From source file:org.apache.mahout.clustering.evaluation.RepresentativePointsDriver.java

/**
 * Run the job using supplied arguments as a sequential process
 * //from   ww w  .  j  a  v  a  2 s  .  co  m
 * @param conf
 *          the Configuration to use
 * @param clusteredPointsIn
 *          the directory pathname for input points
 * @param stateIn
 *          the directory pathname for input state
 * @param stateOut
 *          the directory pathname for output state
 * @param measure
 *          the DistanceMeasure to use
 */
private static void runIterationSeq(Configuration conf, Path clusteredPointsIn, Path stateIn, Path stateOut,
        DistanceMeasure measure) throws IOException {

    Map<Integer, List<VectorWritable>> repPoints = RepresentativePointsMapper.getRepresentativePoints(conf,
            stateIn);
    Map<Integer, WeightedVectorWritable> mostDistantPoints = Maps.newHashMap();
    FileSystem fs = FileSystem.get(clusteredPointsIn.toUri(), conf);
    for (Pair<IntWritable, WeightedVectorWritable> record : new SequenceFileDirIterable<IntWritable, WeightedVectorWritable>(
            clusteredPointsIn, PathType.LIST, PathFilters.logsCRCFilter(), null, true, conf)) {
        RepresentativePointsMapper.mapPoint(record.getFirst(), record.getSecond(), measure, repPoints,
                mostDistantPoints);
    }
    int part = 0;
    SequenceFile.Writer writer = new SequenceFile.Writer(fs, conf, new Path(stateOut, "part-m-" + part++),
            IntWritable.class, VectorWritable.class);
    try {
        for (Entry<Integer, List<VectorWritable>> entry : repPoints.entrySet()) {
            for (VectorWritable vw : entry.getValue()) {
                writer.append(new IntWritable(entry.getKey()), vw);
            }
        }
    } finally {
        Closeables.close(writer, false);
    }
    writer = new SequenceFile.Writer(fs, conf, new Path(stateOut, "part-m-" + part++), IntWritable.class,
            VectorWritable.class);
    try {
        for (Map.Entry<Integer, WeightedVectorWritable> entry : mostDistantPoints.entrySet()) {
            writer.append(new IntWritable(entry.getKey()), new VectorWritable(entry.getValue().getVector()));
        }
    } finally {
        Closeables.close(writer, false);
    }
}

From source file:org.jclouds.examples.rackspace.cloudblockstorage.CreateVolumeAndAttach.java

/**
 * Always close your service when you're done with it.
 *
 * Note that closing quietly like this is not necessary in Java 7.
 * You would use try-with-resources in the main method instead.
 *///from  ww w . ja  va 2 s  .  co m
public void close() throws IOException {
    Closeables.close(cinderApi, true);
    Closeables.close(computeService.getContext(), true);
}

From source file:org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreBlobStore.java

@Override
public int readBlob(String encodedBlobId, long pos, byte[] buff, int off, int length) throws IOException {
    //This is inefficient as repeated calls for same blobId would involve opening new Stream
    //instead clients should directly access the stream from DataRecord by special casing for
    //BlobStore which implements DataStore
    InputStream stream = getInputStream(encodedBlobId);
    boolean threw = true;
    try {//  w w  w .ja v a2  s  .  co  m
        ByteStreams.skipFully(stream, pos);
        int readCount = stream.read(buff, off, length);
        threw = false;
        return readCount;
    } finally {
        Closeables.close(stream, threw);
    }
}

From source file:com.android.sdklib.internal.project.ProjectPropertiesWorkingCopy.java

/**
 * Saves the property file, using UTF-8 encoding.
 * @throws IOException/*from   ww  w  .  j a va2 s.  c  o  m*/
 * @throws StreamException
 */
public synchronized void save() throws IOException, StreamException {
    IAbstractFile toSave = mProjectFolder.getFile(mType.getFilename());

    // write the whole file in a byte array before dumping it in the file. This
    // This is so that if the file already existing
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    OutputStreamWriter writer = new OutputStreamWriter(baos, SdkConstants.INI_CHARSET);

    if (toSave.exists()) {
        InputStream contentStream = toSave.getContents();
        InputStreamReader isr = null;
        BufferedReader reader = null;

        try {
            contentStream = toSave.getContents();
            //noinspection IOResourceOpenedButNotSafelyClosed
            isr = new InputStreamReader(contentStream, SdkConstants.INI_CHARSET);
            //noinspection IOResourceOpenedButNotSafelyClosed
            reader = new BufferedReader(isr);

            // since we're reading the existing file and replacing values with new ones, or skipping
            // removed values, we need to record what properties have been visited, so that
            // we can figure later what new properties need to be added at the end of the file.
            Set<String> visitedProps = new HashSet<String>();

            String line = null;
            while ((line = reader.readLine()) != null) {
                // check if this is a line containing a property.
                if (!line.isEmpty() && line.charAt(0) != '#') {

                    Matcher m = PATTERN_PROP.matcher(line);
                    if (m.matches()) {
                        String key = m.group(1);
                        String value = m.group(2);

                        // record the prop
                        visitedProps.add(key);

                        // check if this property must be removed.
                        if (mType.isRemovedProperty(key)) {
                            value = null;
                        } else if (mProperties.containsKey(key)) { // if the property still exists.
                            // put the new value.
                            value = mProperties.get(key);
                        } else {
                            // property doesn't exist. Check if it's a known property.
                            // if it's a known one, we'll remove it, otherwise, leave it untouched.
                            if (mType.isKnownProperty(key)) {
                                value = null;
                            }
                        }

                        // if the value is still valid, write it down.
                        if (value != null) {
                            writeValue(writer, key, value, false /*addComment*/);
                        }
                    } else {
                        // the line was wrong, let's just ignore it so that it's removed from the
                        // file.
                    }
                } else {
                    // non-property line: just write the line in the output as-is.
                    writer.append(line).append('\n');
                }
            }

            // now add the new properties.
            for (Entry<String, String> entry : mProperties.entrySet()) {
                if (!visitedProps.contains(entry.getKey())) {
                    String value = entry.getValue();
                    if (value != null) {
                        writeValue(writer, entry.getKey(), value, true /*addComment*/);
                    }
                }
            }
        } finally {
            try {
                Closeables.close(reader, true /* swallowIOException */);
            } catch (IOException e) {
                // cannot happen
            }
            try {
                Closeables.close(isr, true /* swallowIOException */);
            } catch (IOException e) {
                // cannot happen
            }
            try {
                Closeables.close(contentStream, true /* swallowIOException */);
            } catch (IOException e) {
                // cannot happen
            }
        }

    } else {
        // new file, just write it all

        // write the header (can be null, for example for PropertyType.LEGACY_BUILD)
        if (mType.getHeader() != null) {
            writer.write(mType.getHeader());
        }

        // write the properties.
        for (Entry<String, String> entry : mProperties.entrySet()) {
            String value = entry.getValue();
            if (value != null) {
                writeValue(writer, entry.getKey(), value, true /*addComment*/);
            }
        }
    }

    writer.flush();

    // now put the content in the file.
    OutputStream filestream = toSave.getOutputStream();
    filestream.write(baos.toByteArray());
    filestream.flush();
    filestream.close();
}

From source file:com.replaymod.replaystudio.replay.ZipReplayFile.java

@Override
public void close() throws IOException {
    if (zipFile != null) {
        zipFile.close();/*  w  w  w  . j  a  va2 s . c o  m*/
    }
    for (OutputStream out : outputStreams.values()) {
        Closeables.close(out, true);
    }
    outputStreams.clear();

    changedEntries.clear();
    removedEntries.clear();
    delete(tmpFiles);
}

From source file:org.kitesdk.data.spi.filesystem.SchemaManager.java

/**
 * Writes the schema and a URI to that schema and returns a URI to the
 * schema file it writes. The URI can be used in the Hive metastore or
 * other tools./*from  ww w  .ja  v  a 2  s.co m*/
 *
 * @param schema the schema to write
 * @return A URI pointing to the written schema
 */
public URI writeSchema(Schema schema) {

    Path previousPath = newestFile();

    // If the previous schema is identical to the current update,
    // simply keep the previous one.
    if (previousPath != null) {

        Schema previousSchema = loadSchema(previousPath);

        if (schema.equals(previousSchema)) {

            return rootFileSystem.makeQualified(previousPath).toUri();
        }
    }

    // Ensure all previous schemas are compatible with the new one.
    // This is necessary because with Avro schema evolution,
    // it is possible for all schemas to be compatible with their
    // immediate predecessor but not with a further ancestor.
    Map<Integer, Schema> schemas = getSchemas();

    for (Schema oldSchema : schemas.values()) {
        if (!SchemaValidationUtil.canRead(oldSchema, schema)) {
            throw new IncompatibleSchemaException(
                    "Schema cannot read data " + "written using existing schema. Schema: "
                            + schema.toString(true) + "\nPrevious schema: " + oldSchema.toString(true));
        }
    }

    Path schemaPath = null;

    if (previousPath == null) {
        schemaPath = new Path(schemaDirectory, "1.avsc");
    } else {

        String previousName = previousPath.getName().substring(0, previousPath.getName().indexOf('.'));

        int i = Integer.parseInt(previousName) + 1;

        schemaPath = new Path(schemaDirectory, Integer.toString(i) + ".avsc");
    }

    FSDataOutputStream outputStream = null;
    boolean threw = true;
    try {
        outputStream = rootFileSystem.create(schemaPath, false);
        outputStream.write(schema.toString(true).getBytes(Charsets.UTF_8));
        outputStream.flush();
        threw = false;
    } catch (IOException e) {
        throw new DatasetIOException("Unable to save schema file: " + schemaPath, e);
    } finally {
        try {
            Closeables.close(outputStream, threw);
        } catch (IOException e) {
            throw new DatasetIOException("Cannot close", e);
        }
    }

    return rootFileSystem.makeQualified(schemaPath).toUri();
}

From source file:com.hmaimi.jodis.RoundRobinJedisPool.java

@Override
public void close() {
    try {// www.  j av a 2  s  .c  o m
        Closeables.close(watcher, true);
    } catch (IOException e) {
    }
    if (closeCurator) {
        curatorClient.close();
    }
    List<PooledObject> pools = this.pools;
    this.pools = ImmutableList.of();
    for (PooledObject pool : pools) {
        pool.pool.close();
    }
}