Example usage for java.io DataInputStream close

List of usage examples for java.io DataInputStream close

Introduction

In this page you can find the example usage for java.io DataInputStream close.

Prototype

public void close() throws IOException 

Source Link

Document

Closes this input stream and releases any system resources associated with the stream.

Usage

From source file:com.sky.drovik.player.media.DiskCache.java

private void loadIndex() {
    final String indexFilePath = getIndexFilePath();
    try {/* w w w  .  ja  v a 2 s . co  m*/
        // Open the input stream.
        final FileInputStream fileInput = new FileInputStream(indexFilePath);
        final BufferedInputStream bufferedInput = new BufferedInputStream(fileInput, 1024);
        final DataInputStream dataInput = new DataInputStream(bufferedInput);

        // Read the header.
        final int magic = dataInput.readInt();
        final int version = dataInput.readInt();
        boolean valid = true;
        if (magic != INDEX_HEADER_MAGIC) {
            Log.e(TAG, "Index file appears to be corrupt (" + magic + " != " + INDEX_HEADER_MAGIC + "), "
                    + indexFilePath);
            valid = false;
        }
        if (valid && version != INDEX_HEADER_VERSION) {
            // Future versions can implement upgrade in this case.
            Log.e(TAG, "Index file version " + version + " not supported");
            valid = false;
        }
        if (valid) {
            mTailChunk = dataInput.readShort();
        }

        // Read the entries.
        if (valid) {
            // Parse the index file body into the in-memory map.
            final int numEntries = dataInput.readInt();
            mIndexMap = new LongSparseArray<Record>(numEntries);
            synchronized (mIndexMap) {
                for (int i = 0; i < numEntries; ++i) {
                    final long key = dataInput.readLong();
                    final int chunk = dataInput.readShort();
                    final int offset = dataInput.readInt();
                    final int size = dataInput.readInt();
                    final int sizeOnDisk = dataInput.readInt();
                    final long timestamp = dataInput.readLong();
                    mIndexMap.append(key, new Record(chunk, offset, size, sizeOnDisk, timestamp));
                }
            }
        }

        dataInput.close();
        if (!valid) {
            deleteAll();
        }

    } catch (FileNotFoundException e) {
        // If the file does not exist the cache is empty, so just continue.
    } catch (IOException e) {
        Log.e(TAG, "Unable to read the index file " + indexFilePath);
    } finally {
        if (mIndexMap == null) {
            mIndexMap = new LongSparseArray<Record>();
        }
    }
}

From source file:com.intel.xdk.device.Device.java

public void getRemoteDataWithID(String requestUrl, String requestMethod, String requestBody, int uuid,
        CallbackContext callbackContext) {
    try {/*from   w ww . java 2 s.c om*/
        URL url = new URL(requestUrl);
        connection = (HttpURLConnection) url.openConnection();

        connection.setDoInput(true);
        connection.setDoOutput(true);
        connection.setUseCaches(false);

        connection.setRequestMethod(requestMethod);

        //Write requestBody
        DataOutputStream outputStream = new DataOutputStream(connection.getOutputStream());
        outputStream.writeBytes(requestBody);
        outputStream.writeBytes("&uuid=" + uuid);
        outputStream.flush();
        outputStream.close();

        //Get response code and response message
        int responseCode = connection.getResponseCode();
        String responseMessage = connection.getResponseMessage();

        //Get response Message
        DataInputStream inputStream = new DataInputStream(connection.getInputStream());
        if (responseCode == 200) {
            String temp;
            String responseBody = "";
            while ((temp = inputStream.readLine()) != null) {
                responseBody += temp;
            }
            callbackContext.success(uuid + ", " + responseBody);
            //String js = "javascript:" + successCallback + "(" + uuid + ", '" + responseBody + "');";
            //injectJS(js);
        } else {
            callbackContext.error(uuid + ", Fail to get the response, response code: " + responseCode
                    + ", response message: " + responseMessage);
            //String js = "javascript:" + errorCallback + "(" + uuid + ", '" + "Fail to get the response" + "');";
            //injectJS(js);
        }

        inputStream.close();
    } catch (IOException e) {
        Log.d("request", e.getMessage());
    }
}

From source file:org.apache.hadoop.fs.CopyOfTestDFSIO.java

private void analyzeResult(FileSystem fs, TestType testType, long execTime, String resFileName)
        throws IOException {
    Path reduceFile = getReduceFilePath(testType);
    long tasks = 0;
    long size = 0;
    long time = 0;
    float rate = 0;
    float sqrate = 0;
    DataInputStream in = null;
    BufferedReader lines = null;//from   ww w  . j  a  v a2 s. c o  m
    StringBuffer IOTime = new StringBuffer(500);
    try {

        in = new DataInputStream(fs.open(reduceFile));
        lines = new BufferedReader(new InputStreamReader(in));
        String line;
        while ((line = lines.readLine()) != null) {
            StringTokenizer tokens = new StringTokenizer(line, " \t\n\r\f%");
            String attr = tokens.nextToken();
            if (attr.endsWith(":tasks"))
                tasks = Long.parseLong(tokens.nextToken());
            else if (attr.endsWith(":size"))
                size = Long.parseLong(tokens.nextToken());
            else if (attr.endsWith(":time"))
                time = Long.parseLong(tokens.nextToken());
            else if (attr.endsWith(":rate"))
                rate = Float.parseFloat(tokens.nextToken());
            else if (attr.endsWith(":sqrate"))
                sqrate = Float.parseFloat(tokens.nextToken());
            else if (attr.contains(":EachIOtime")) {
                IOTime.append("\n");
                IOTime.append(line.split(":")[0] + "\t" + line.split(":")[2]);
            }
        }
    } finally {
        if (in != null)
            in.close();
        if (lines != null)
            lines.close();
    }

    double med = rate / 1000 / tasks;
    double stdDev = Math.sqrt(Math.abs(sqrate / 1000 / tasks - med * med));
    String resultLines[] = { "----- TestDFSIO ----- : " + testType,
            "           Date & time: " + new Date(System.currentTimeMillis()),
            "       Number of files: " + tasks, "Total MBytes processed: " + toMB(size),
            "file.blocksize: " + config.get("file.blocksize"),
            "dfs.replication: " + config.get("dfs.replication"),
            "     Throughput mb/sec: " + size * 1000.0 / (time * MEGA), "Average IO rate mb/sec: " + med,
            " IO rate std deviation: " + stdDev, "    Test exec time sec: " + (float) execTime / 1000,
            IOTime.toString() };

    PrintStream res = null;
    try {
        res = new PrintStream(new FileOutputStream(new File(resFileName), true));
        for (int i = 0; i < resultLines.length; i++) {
            LOG.info(resultLines[i]);
            res.println(resultLines[i]);
        }
    } finally {
        if (res != null)
            res.close();
    }
}

From source file:org.apache.giraph.partition.DiskBackedOnlineComputePartitionStore.java

/**
 * used by partition that is hot and without in-mem-edges under
 * OnlineCompute mode//w  w w  .  ja  v  a2s.co m
 * 
 * @param id
 * @param numVertices
 * @return
 * @throws IOException
 */
private Partition<I, V, E, M> loadPartitionEdges(Partition<I, V, E, M> partition, Integer id, int numVertices) {
    try {
        File file = new File(getEdgesPath(id));
        DataInputStream inputStream = new DataInputStream(new BufferedInputStream(new FileInputStream(file)));
        //         LOG.info("loading edges for in memory partition" + id + " from"
        //               + getEdgesPath(id));
        for (int i = 0; i < numVertices; ++i) {
            readOutEdges(inputStream, partition);
        }
        //         LOG.info("read edges finished for in memory partition " + id + " from"
        //               + getEdgesPath(id));
        inputStream.close();
        /*
         * If the graph is static, keep the file around.
         */
        if (!isStaticGraph) {
            file.delete();
        }
    } catch (Exception e) {

        e.printStackTrace();
    }
    return partition;
}

From source file:org.apache.giraph.ooc.DiskBackedPartitionStore.java

/**
 * Load messages for a given partition for the current superstep to memory.
 *
 * @param partitionId Id of the partition to load the messages for
 * @throws IOException//from  w w  w .j  av a2s .  co m
 */
private void loadMessages(int partitionId) throws IOException {
    // Messages for current superstep
    if (currentMessageStore != null && !conf.getOutgoingMessageClasses().useMessageCombiner()) {
        checkState(!currentMessageStore.hasMessagesForPartition(partitionId), "loadMessages: partition "
                + partitionId + " is on disk, " + "but its message store is in memory (impossible)");
        // First, reading the message store for the partition if there is any
        File file = new File(getMessagesPath(partitionId, serviceWorker.getSuperstep()));
        if (file.exists()) {
            if (LOG.isDebugEnabled()) {
                LOG.debug("loadMessages: loading message store of partition " + partitionId);
            }
            FileInputStream filein = new FileInputStream(file);
            BufferedInputStream bufferin = new BufferedInputStream(filein);
            DataInputStream inputStream = new DataInputStream(bufferin);
            currentMessageStore.readFieldsForPartition(inputStream, partitionId);
            inputStream.close();
            checkState(file.delete(), "loadMessages: failed to delete %s.", file.getAbsolutePath());
        }

        messageBufferRWLock.writeLock().lock();
        Pair<Integer, List<VertexIdMessages<I, Writable>>> pendingMessages = pendingCurrentMessages
                .remove(partitionId);
        messageBufferRWLock.writeLock().unlock();

        // Second, reading message buffers (incoming messages in previous
        // superstep)
        file = new File(getPendingMessagesBufferPath(partitionId, serviceWorker.getSuperstep()));
        if (file.exists()) {
            FileInputStream filein = new FileInputStream(file);
            BufferedInputStream bufferin = new BufferedInputStream(filein);
            DataInputStream inputStream = new DataInputStream(bufferin);
            while (true) {
                int type;
                try {
                    type = inputStream.readInt();
                } catch (EOFException e) {
                    // Reached end of file, so all the records are read.
                    break;
                }
                SerializedMessageClass messageClass = SerializedMessageClass.values()[type];
                VertexIdMessages<I, Writable> vim;
                switch (messageClass) {
                case BYTE_ARRAY_VERTEX_ID_MESSAGES:
                    vim = new ByteArrayVertexIdMessages<>(conf.createOutgoingMessageValueFactory());
                    vim.setConf(conf);
                    break;
                case BYTE_ARRAY_ONE_MESSAGE_TO_MANY_IDS:
                    vim = new ByteArrayOneMessageToManyIds<>(conf.createOutgoingMessageValueFactory());
                    vim.setConf(conf);
                    break;
                default:
                    throw new IllegalStateException("loadMessages: unsupported " + "serialized message type!");
                }
                vim.readFields(inputStream);
                currentMessageStore.addPartitionMessages(partitionId, vim);
            }
            inputStream.close();
            checkState(!file.delete(), "loadMessages: failed to delete %s", file.getAbsolutePath());
        }

        // Third, applying message buffers already in memory
        if (pendingMessages != null) {
            for (VertexIdMessages<I, Writable> vim : pendingMessages.getRight()) {
                currentMessageStore.addPartitionMessages(partitionId, vim);
            }
        }
        currentMessagesOnDisk.put(partitionId, false);
    }
}

From source file:org.apache.giraph.worker.BspServiceSource.java

/**
 * Load saved partitions in multiple threads.
 * @param superstep superstep to load/*w  w w  . j  a va 2s  . c om*/
 * @param partitions list of partitions to load
 */
private void loadCheckpointVertices(final long superstep, List<Integer> partitions) {
    int numThreads = Math.min(GiraphConstants.NUM_CHECKPOINT_IO_THREADS.get(getConfiguration()),
            partitions.size());

    final Queue<Integer> partitionIdQueue = new ConcurrentLinkedQueue<>(partitions);

    final CompressionCodec codec = new CompressionCodecFactory(getConfiguration())
            .getCodec(new Path(GiraphConstants.CHECKPOINT_COMPRESSION_CODEC.get(getConfiguration())));

    long t0 = System.currentTimeMillis();

    CallableFactory<Void> callableFactory = new CallableFactory<Void>() {
        @Override
        public Callable<Void> newCallable(int callableId) {
            return new Callable<Void>() {

                @Override
                public Void call() throws Exception {
                    while (!partitionIdQueue.isEmpty()) {
                        Integer partitionId = partitionIdQueue.poll();
                        if (partitionId == null) {
                            break;
                        }
                        Path path = getSavedCheckpoint(superstep,
                                "_" + partitionId + CheckpointingUtils.CHECKPOINT_VERTICES_POSTFIX);

                        FSDataInputStream compressedStream = getFs().open(path);

                        DataInputStream stream = codec == null ? compressedStream
                                : new DataInputStream(codec.createInputStream(compressedStream));

                        Partition<I, V, E> partition = getConfiguration().createPartition(partitionId,
                                getContext());

                        partition.readFields(stream);

                        getPartitionStore().addPartition(partition);

                        stream.close();
                    }
                    return null;
                }

            };
        }
    };

    ProgressableUtils.getResultsWithNCallables(callableFactory, numThreads, "load-vertices-%d", getContext());

    LOG.info("Loaded checkpoint in " + (System.currentTimeMillis() - t0) + " ms, using " + numThreads
            + " threads");
}

From source file:org.apache.giraph.worker.BspServiceSource.java

@Override
public VertexEdgeCount loadCheckpoint(long superstep) {
    Path metadataFilePath = getSavedCheckpoint(superstep, CheckpointingUtils.CHECKPOINT_METADATA_POSTFIX);

    Path checkpointFilePath = getSavedCheckpoint(superstep, CheckpointingUtils.CHECKPOINT_DATA_POSTFIX);
    // Algorithm:
    // Examine all the partition owners and load the ones
    // that match my hostname and id from the master designated checkpoint
    // prefixes.// w ww .  j ava2 s  .  co m
    try {
        DataInputStream metadataStream = getFs().open(metadataFilePath);

        int partitions = metadataStream.readInt();
        List<Integer> partitionIds = new ArrayList<>(partitions);
        for (int i = 0; i < partitions; i++) {
            int partitionId = metadataStream.readInt();
            partitionIds.add(partitionId);
        }

        loadCheckpointVertices(superstep, partitionIds);

        getContext().progress();

        metadataStream.close();

        DataInputStream checkpointStream = getFs().open(checkpointFilePath);
        workerContext.readFields(checkpointStream);

        // Load global stats and superstep classes
        GlobalStats globalStats = new GlobalStats();
        SuperstepClasses superstepClasses = new SuperstepClasses();
        String finalizedCheckpointPath = getSavedCheckpointBasePath(superstep)
                + CheckpointingUtils.CHECKPOINT_FINALIZED_POSTFIX;
        DataInputStream finalizedStream = getFs().open(new Path(finalizedCheckpointPath));
        globalStats.readFields(finalizedStream);
        superstepClasses.readFields(finalizedStream);
        getConfiguration().updateSuperstepClasses(superstepClasses);
        getServerData().resetMessageStores();

        for (int i = 0; i < partitions; i++) {
            int partitionId = checkpointStream.readInt();
            getServerData().getCurrentMessageStore().readFieldsForPartition(checkpointStream, partitionId);
        }

        List<Writable> w2wMessages = (List<Writable>) WritableUtils.readList(checkpointStream);
        getServerData().getCurrentWorkerToWorkerMessages().addAll(w2wMessages);

        checkpointStream.close();

        if (LOG.isInfoEnabled()) {
            LOG.info(
                    "loadCheckpoint: Loaded " + workerGraphPartitioner.getPartitionOwners().size() + " total.");
        }

        // Communication service needs to setup the connections prior to
        // processing vertices

        workerClient.setup(getConfiguration().authenticate());

        return new VertexEdgeCount(globalStats.getVertexCount(), globalStats.getEdgeCount());

    } catch (IOException e) {
        throw new RuntimeException("loadCheckpoint: Failed for superstep=" + superstep, e);
    }
}

From source file:com.serenegiant.media.TLMediaEncoder.java

private void checkLastSequence() {
    if (DEBUG)/*from  www. j ava 2s . c  om*/
        Log.v(TAG, "checkLastSequence:");
    int sequence = -1;
    MediaFormat configFormat = null;
    try {
        final DataInputStream in = openInputStream(mBaseDir, mType, 0);
        if (in != null)
            try {
                // read MediaFormat data for MediaCodec and for MediaMuxer
                readHeader(in);
                configFormat = asMediaFormat(in.readUTF()); // for MediaCodec
                in.readUTF(); // for MediaMuxer
                // search last sequence
                // this is not a effective implementation for large intermediate file.
                // ex. it may be better to split into multiple files for each sequence
                // or split into two files; file for control block and file for raw bit stream.
                final TLMediaFrameHeader header = new TLMediaFrameHeader();
                for (; mIsRunning;) {
                    readHeader(in, header);
                    in.skipBytes(header.size);
                    sequence = Math.max(sequence, header.sequence);
                }
            } finally {
                in.close();
            }
    } catch (Exception e) {
        // ignore
    }
    mSequence = sequence;
    mConfigFormat = configFormat;
    if (sequence < 0) {
        // if intermediate files do not exist or invalid, remove them and re-create intermediate directory
        delete(mBaseDir);
        mBaseDir.mkdirs();
    }
    if (DEBUG)
        Log.v(TAG, "checkLastSequence:finished. sequence=" + sequence);
}

From source file:com.datatorrent.stram.StreamingContainerManager.java

/**
 * Get the instance for the given application. If the application directory contains a checkpoint, the state will be restored.
 *
 * @param rh/*from   www  .ja  v a 2s.  com*/
 * @param dag
 * @param enableEventRecording
 * @return instance of {@link StreamingContainerManager}
 * @throws IOException
 */
public static StreamingContainerManager getInstance(RecoveryHandler rh, LogicalPlan dag,
        boolean enableEventRecording) throws IOException {
    try {
        CheckpointState checkpointedState = (CheckpointState) rh.restore();
        StreamingContainerManager scm;
        if (checkpointedState == null) {
            scm = new StreamingContainerManager(dag, enableEventRecording, new SystemClock());
        } else {
            // find better way to support final transient members
            PhysicalPlan plan = checkpointedState.physicalPlan;
            plan.getLogicalPlan().setAttribute(LogicalPlan.APPLICATION_ATTEMPT_ID,
                    dag.getAttributes().get(LogicalPlan.APPLICATION_ATTEMPT_ID));
            scm = new StreamingContainerManager(checkpointedState, enableEventRecording);
            for (Field f : plan.getClass().getDeclaredFields()) {
                if (f.getType() == PlanContext.class) {
                    f.setAccessible(true);
                    try {
                        f.set(plan, scm);
                    } catch (Exception e) {
                        throw new RuntimeException("Failed to set " + f, e);
                    }
                    f.setAccessible(false);
                }
            }
            DataInputStream logStream = rh.getLog();
            scm.journal.replay(logStream);
            logStream.close();

            // restore checkpoint info
            plan.syncCheckpoints(scm.vars.windowStartMillis, scm.clock.getTime());
            scm.committedWindowId = scm.updateCheckpoints(true);

            // at this point the physical plan has been fully restored
            // populate container agents for existing containers
            for (PTContainer c : plan.getContainers()) {
                if (c.getExternalId() != null) {
                    LOG.debug("Restore container agent {} for {}", c.getExternalId(), c);
                    StreamingContainerAgent sca = new StreamingContainerAgent(c,
                            scm.newStreamingContainerContext(c), scm);
                    scm.containers.put(c.getExternalId(), sca);
                } else {
                    LOG.debug("Requesting new resource for {}", c.toIdStateString());
                    scm.requestContainer(c);
                }
            }
        }
        scm.recoveryHandler = rh;
        scm.checkpoint();
        return scm;
    } catch (IOException e) {
        throw new IllegalStateException("Failed to read checkpointed state", e);
    }
}

From source file:org.apache.hadoop.hive.ql.exec.tez.DynamicPartitionPruner.java

@SuppressWarnings("deprecation")
@VisibleForTesting//ww  w .j  a  va 2s.  co  m
protected String processPayload(ByteBuffer payload, String sourceName) throws SerDeException, IOException {

    DataInputStream in = new DataInputStream(new ByteBufferBackedInputStream(payload));
    try {
        String columnName = in.readUTF();

        LOG.info("Source of event: " + sourceName);

        List<SourceInfo> infos = this.sourceInfoMap.get(sourceName);
        if (infos == null) {
            throw new IllegalStateException("no source info for event source: " + sourceName);
        }

        SourceInfo info = null;
        for (SourceInfo si : infos) {
            if (columnName.equals(si.columnName)) {
                info = si;
                break;
            }
        }

        if (info == null) {
            throw new IllegalStateException("no source info for column: " + columnName);
        }

        if (info.skipPruning.get()) {
            // Marked as skipped previously. Don't bother processing the rest of the payload.
        } else {
            boolean skip = in.readBoolean();
            if (skip) {
                info.skipPruning.set(true);
            } else {
                while (payload.hasRemaining()) {
                    writable.readFields(in);

                    Object row = info.deserializer.deserialize(writable);

                    Object value = info.soi.getStructFieldData(row, info.field);
                    value = ObjectInspectorUtils.copyToStandardObject(value, info.fieldInspector);

                    if (LOG.isDebugEnabled()) {
                        LOG.debug("Adding: " + value + " to list of required partitions");
                    }
                    info.values.add(value);
                }
            }
        }
    } finally {
        if (in != null) {
            in.close();
        }
    }
    return sourceName;
}