Example usage for java.io DataInputStream close

List of usage examples for java.io DataInputStream close

Introduction

In this page you can find the example usage for java.io DataInputStream close.

Prototype

public void close() throws IOException 

Source Link

Document

Closes this input stream and releases any system resources associated with the stream.

Usage

From source file:com.polyvi.xface.extension.advancedfiletransfer.FileUploader.java

/**
 * ??????id?//from  w  ww  .java  2s .c  o  m
 *
 * @return true:??false:?
 */
private boolean handleShake() {
    HttpURLConnection httpConnection = null;
    DataInputStream dataInputStream = null;
    String souceid = mFileTransferRecorder.getSourceId(mFilePath, "" + mUploadFileSize);
    try {
        httpConnection = getHttpConnection(mServer);
        // ?
        httpConnection.setRequestProperty("Charset", "UTF-8");
        httpConnection.setRequestProperty("Content-Type", "application/x-www-form-urlencoded");
        httpConnection.setRequestProperty("ACTIONNAME", ACTION_NAME_HAND);
        httpConnection.setRequestProperty("RESOURCEID", souceid);
        httpConnection.setRequestProperty("FILENAME", getUploadFileName());
        httpConnection.setRequestProperty("FILESIZE", "" + mUploadFileSize);
        if (HttpURLConnection.HTTP_OK == httpConnection.getResponseCode()) {
            // ????? RESOURCEID=?;BFFORE=?
            dataInputStream = new DataInputStream(httpConnection.getInputStream());
            // ???response?
            handleResponse(dataInputStream.readLine());
            // souceid?
            setSourceId(souceid);
        } else {
            onError(INVALID_URL_ERR);
        }
    } catch (Exception e) {
        onError(INVALID_URL_ERR);
        e.printStackTrace();
        return false;
    } finally {
        if (null != httpConnection) {
            httpConnection.disconnect();
            httpConnection = null;
        }
        // ??
        try {
            if (null != dataInputStream) {
                dataInputStream.close();
            }
        } catch (IOException e) {
            e.printStackTrace();
        }
    }
    return true;
}

From source file:com.MainFiles.Functions.java

public static String GetSequenceNumber() {
    ClassImportantValues cl = new ClassImportantValues();

    String strcountFile = COUNT_FILE;
    File file = new File(strcountFile);
    FileLock flock = null;//  ww  w  .  ja  v  a  2  s . c  o  m
    String strPaddedNumber = "";
    try {

        if (file.exists()) {
            FileInputStream fstream = new FileInputStream(strcountFile);

            // Read the File Containing the Next Sequence.. Then
            DataInputStream in = new DataInputStream(fstream);
            BufferedReader br = new BufferedReader(new InputStreamReader(in));
            String strLine = "";
            strLine = br.readLine();
            in.close();

            int intCurrentNum = Integer.parseInt(strLine);
            intCurrentNum += 1;

            strPaddedNumber = String.format("%06d", intCurrentNum);

            //Now OverWrite the File
            FileOutputStream fos = new FileOutputStream(file, false);
            fos.write(strPaddedNumber.getBytes());
            fos.close();

        } // end if file.exists()
        else { // create the file if it doesn't exist

            file.createNewFile();
            // write 1234 to the file to begin the counter
            try (FileOutputStream fos = new FileOutputStream(file, false)) {
                // write 1234 to the file to begin the counter
                fos.write("1234".getBytes());
            }
            strPaddedNumber = String.format("%06d", 1);
        }

        return strPaddedNumber;

    } catch (IOException | NumberFormatException ex) {
        System.out.println("Error GetSequenceNumber : " + ex.getMessage() + StackTraceWriter(ex));
        return null;
    }

}

From source file:org.apache.giraph.ooc.DiskBackedPartitionStore.java

/**
 * Load a partition from disk. It deletes the files after the load,
 * except for the edges, if the graph is static.
 *
 * @param meta meta partition to load the partition of
 * @return The partition//from  w  w  w . jav  a2 s.c o  m
 * @throws IOException
 */
@SuppressWarnings("unchecked")
private Partition<I, V, E> loadPartition(MetaPartition meta) throws IOException {
    Integer partitionId = meta.getId();
    long numVertices = meta.getVertexCount();
    Partition<I, V, E> partition = conf.createPartition(partitionId, context);

    // Vertices
    File file = new File(getVerticesPath(partitionId));
    if (LOG.isDebugEnabled()) {
        LOG.debug("loadPartition: loading partition vertices " + partition.getId() + " from "
                + file.getAbsolutePath());
    }

    FileInputStream filein = new FileInputStream(file);
    BufferedInputStream bufferin = new BufferedInputStream(filein);
    DataInputStream inputStream = new DataInputStream(bufferin);
    for (int i = 0; i < numVertices; ++i) {
        Vertex<I, V, E> vertex = conf.createVertex();
        readVertexData(inputStream, vertex);
        partition.putVertex(vertex);
    }
    inputStream.close();
    checkState(file.delete(), "loadPartition: failed to delete %s", file.getAbsolutePath());

    // Edges
    file = new File(getEdgesPath(partitionId));

    if (LOG.isDebugEnabled()) {
        LOG.debug("loadPartition: loading partition edges " + partition.getId() + " from "
                + file.getAbsolutePath());
    }

    filein = new FileInputStream(file);
    bufferin = new BufferedInputStream(filein);
    inputStream = new DataInputStream(bufferin);
    for (int i = 0; i < numVertices; ++i) {
        readOutEdges(inputStream, partition);
    }
    inputStream.close();
    // If the graph is static and it is not INPUT_SUPERSTEP, keep the file
    // around.
    if (!conf.isStaticGraph() || serviceWorker.getSuperstep() == BspServiceWorker.INPUT_SUPERSTEP) {
        checkState(file.delete(), "loadPartition: failed to delete %s", file.getAbsolutePath());
    }

    // Load message for the current superstep
    loadMessages(partitionId);

    // Input vertex buffers
    // First, applying vertex buffers on disk (since they came earlier)
    Integer numBuffers = numPendingInputVerticesOnDisk.remove(partitionId);
    if (numBuffers != null) {
        file = new File(getPendingVerticesBufferPath(partitionId));
        if (LOG.isDebugEnabled()) {
            LOG.debug("loadPartition: loading " + numBuffers + " input vertex " + "buffers of partition "
                    + partitionId + " from " + file.getAbsolutePath());
        }
        filein = new FileInputStream(file);
        bufferin = new BufferedInputStream(filein);
        inputStream = new DataInputStream(bufferin);
        for (int i = 0; i < numBuffers; ++i) {
            ExtendedDataOutput extendedDataOutput = WritableUtils.readExtendedDataOutput(inputStream, conf);
            partition.addPartitionVertices(new VertexIterator<I, V, E>(extendedDataOutput, conf));
        }
        inputStream.close();
        checkState(file.delete(), "loadPartition: failed to delete %s", file.getAbsolutePath());
    }
    // Second, applying vertex buffers already in memory
    Pair<Integer, List<ExtendedDataOutput>> vertexPair;
    vertexBufferRWLock.writeLock().lock();
    vertexPair = pendingInputVertices.remove(partitionId);
    vertexBufferRWLock.writeLock().unlock();
    if (vertexPair != null) {
        for (ExtendedDataOutput extendedDataOutput : vertexPair.getRight()) {
            partition.addPartitionVertices(new VertexIterator<I, V, E>(extendedDataOutput, conf));
        }
    }

    // Edge store
    if (serviceWorker.getSuperstep() == BspServiceWorker.INPUT_SUPERSTEP) {
        checkState(hasEdgeStoreOnDisk.containsKey(partitionId),
                "loadPartition: partition is written to disk in INPUT_SUPERSTEP, "
                        + "but it is not clear whether its edge store is on disk or not " + "(impossible)");

        if (hasEdgeStoreOnDisk.remove(partitionId)) {
            file = new File(getEdgeStorePath(partitionId));
            if (LOG.isDebugEnabled()) {
                LOG.debug("loadPartition: loading edge store of partition " + partitionId + " from "
                        + file.getAbsolutePath());
            }
            filein = new FileInputStream(file);
            bufferin = new BufferedInputStream(filein);
            inputStream = new DataInputStream(bufferin);
            edgeStore.readPartitionEdgeStore(partitionId, inputStream);
            inputStream.close();
            checkState(file.delete(), "loadPartition: failed to delete %s", file.getAbsolutePath());
        }

        // Input edge buffers
        // First, applying edge buffers on disk (since they came earlier)
        numBuffers = numPendingInputEdgesOnDisk.remove(partitionId);
        if (numBuffers != null) {
            file = new File(getPendingEdgesBufferPath(partitionId));
            if (LOG.isDebugEnabled()) {
                LOG.debug("loadPartition: loading " + numBuffers + " input edge " + "buffers of partition "
                        + partitionId + " from " + file.getAbsolutePath());
            }
            filein = new FileInputStream(file);
            bufferin = new BufferedInputStream(filein);
            inputStream = new DataInputStream(bufferin);
            for (int i = 0; i < numBuffers; ++i) {
                VertexIdEdges<I, E> vertexIdEdges = new ByteArrayVertexIdEdges<I, E>();
                vertexIdEdges.setConf(conf);
                vertexIdEdges.readFields(inputStream);
                edgeStore.addPartitionEdges(partitionId, vertexIdEdges);
            }
            inputStream.close();
            checkState(file.delete(), "loadPartition: failed to delete %s", file.getAbsolutePath());
        }
        // Second, applying edge buffers already in memory
        Pair<Integer, List<VertexIdEdges<I, E>>> edgePair = null;
        edgeBufferRWLock.writeLock().lock();
        edgePair = pendingInputEdges.remove(partitionId);
        edgeBufferRWLock.writeLock().unlock();
        if (edgePair != null) {
            for (VertexIdEdges<I, E> vertexIdEdges : edgePair.getRight()) {
                edgeStore.addPartitionEdges(partitionId, vertexIdEdges);
            }
        }
    }
    return partition;
}

From source file:org.apache.giraph.master.BspServiceMaster.java

/**
 * Read the finalized checkpoint file and associated metadata files for the
 * checkpoint.  Modifies the {@link PartitionOwner} objects to get the
 * checkpoint prefixes.  It is an optimization to prevent all workers from
 * searching all the files.  Also read in the aggregator data from the
 * finalized checkpoint file and setting it.
 *
 * @param superstep Checkpoint set to examine.
 * @throws IOException/*from w w  w.ja  v  a  2 s . c o  m*/
 * @throws InterruptedException
 * @throws KeeperException
 * @return Collection of generated partition owners.
 */
private Collection<PartitionOwner> prepareCheckpointRestart(long superstep)
        throws IOException, KeeperException, InterruptedException {
    List<PartitionOwner> partitionOwners = new ArrayList<>();
    FileSystem fs = getFs();
    String finalizedCheckpointPath = getSavedCheckpointBasePath(superstep)
            + CheckpointingUtils.CHECKPOINT_FINALIZED_POSTFIX;
    LOG.info("Loading checkpoint from " + finalizedCheckpointPath);
    DataInputStream finalizedStream = fs.open(new Path(finalizedCheckpointPath));
    GlobalStats globalStats = new GlobalStats();
    globalStats.readFields(finalizedStream);
    updateCounters(globalStats);
    SuperstepClasses superstepClasses = SuperstepClasses.createToRead(getConfiguration());
    superstepClasses.readFields(finalizedStream);
    getConfiguration().updateSuperstepClasses(superstepClasses);
    int prefixFileCount = finalizedStream.readInt();

    String checkpointFile = finalizedStream.readUTF();
    for (int i = 0; i < prefixFileCount; ++i) {
        int mrTaskId = finalizedStream.readInt();

        DataInputStream metadataStream = fs.open(
                new Path(checkpointFile + "." + mrTaskId + CheckpointingUtils.CHECKPOINT_METADATA_POSTFIX));
        long partitions = metadataStream.readInt();
        WorkerInfo worker = getWorkerInfoById(mrTaskId);
        for (long p = 0; p < partitions; ++p) {
            int partitionId = metadataStream.readInt();
            PartitionOwner partitionOwner = new BasicPartitionOwner(partitionId, worker);
            partitionOwners.add(partitionOwner);
            LOG.info("prepareCheckpointRestart partitionId=" + partitionId + " assigned to " + partitionOwner);
        }
        metadataStream.close();
    }
    //Ordering appears to be important as of right now we rely on this ordering
    //in WorkerGraphPartitioner
    Collections.sort(partitionOwners, new Comparator<PartitionOwner>() {
        @Override
        public int compare(PartitionOwner p1, PartitionOwner p2) {
            return Integer.compare(p1.getPartitionId(), p2.getPartitionId());
        }
    });

    globalCommHandler.getAggregatorHandler().readFields(finalizedStream);
    aggregatorTranslation.readFields(finalizedStream);
    masterCompute.readFields(finalizedStream);
    finalizedStream.close();

    return partitionOwners;
}

From source file:org.apache.hadoop.hdfs.server.namenode.AvatarNode.java

private void verifyFailoverTestData() throws IOException {
    if (!enableTestFramework) {
        LOG.info("Failover: Test framework - disabled");
        return;//  ww w .jav  a 2 s  .  c  o  m
    }
    String fsck = "";
    LOG.info("Failover: Test framework - verification - starting...");
    AvatarFailoverSnapshot snapshot = new AvatarFailoverSnapshot();
    File snapshotFile = getSnapshotFile(confg, false);
    if (snapshotFile == null)
        return;
    DataInputStream in = new DataInputStream(new BufferedInputStream(new FileInputStream(snapshotFile)));
    try {
        snapshot.readFields(in);
        if (in.readBoolean()) {
            LOG.info("Failover: Test framework - found fsck data");
            fsck = Text.readString(in);
        }
    } finally {
        in.close();
    }

    LOG.info("Failover: Test framework - verifying open files: found "
            + snapshot.getOpenFilesInfo().getOpenFiles().size() + " files in the test snapshot");
    verifyOpenFiles(snapshot.getOpenFilesInfo());

    LOG.info("Failover: Test framework - verifying closed files: found " + snapshot.getSampledFiles().size()
            + " files in the test snapshot");
    for (FileStatusExtended stat : snapshot.getSampledFiles()) {
        verifySnapshotSampledFile(stat);
    }

    LOG.info("Failover: Test framework - verification - succeeded");
    this.oldPrimaryFsck = fsck;
}

From source file:com.cloudmaster.cmp.util.AlarmSystem.transfer.HttpSender.java

public ResponseObject send(TransportObject object) throws Exception {
    ResponseObject rs = new ResponseObject();
    ByteArrayOutputStream bOs = null;
    DataOutputStream dOs = null;/*from www . j a  v a2  s .co  m*/
    DataInputStream dIs = null;
    HttpClient client;
    PostMethod meth = null;
    byte[] rawData;
    try {
        bOs = new ByteArrayOutputStream();
        dOs = new DataOutputStream(bOs);
        object.toStream(dOs);
        bOs.flush();
        rawData = bOs.toByteArray();

        client = new HttpClient();
        client.setConnectionTimeout(this.timeout);
        client.setTimeout(this.datatimeout);
        client.setHttpConnectionFactoryTimeout(this.timeout);

        meth = new PostMethod(object.getValue(SERVER_URL));
        // meth = new UTF8PostMethod(url);
        meth.getParams().setParameter(HttpMethodParams.HTTP_CONTENT_CHARSET, ENCODING);
        // meth.addParameter(SERVER_ARGS, new String(rawData,"UTF-8"));

        // meth.setRequestBody(new String(rawData));
        // meth.setRequestBody(new String(rawData,"UTF-8"));

        byte[] base64Array = Base64.encode(rawData).getBytes();
        meth.setRequestBody(new String(base64Array));

        // System.out.println(new String(rawData));

        client.getParams().setParameter(HttpMethodParams.RETRY_HANDLER,
                new DefaultHttpMethodRetryHandler(1, false));
        client.executeMethod(meth);

        dIs = new DataInputStream(meth.getResponseBodyAsStream());

        if (meth.getStatusCode() == HttpStatus.SC_OK) {

            Header errHeader = meth.getResponseHeader(HDR_ERROR);

            if (errHeader != null) {
                rs.setError(meth.getResponseBodyAsString());
                return rs;
            }

            rs = ResponseObject.fromStream(dIs);

            return rs;
        } else {
            meth.releaseConnection();
            throw new IOException("Connection failure: " + meth.getStatusLine().toString());
        }
    } finally {
        if (meth != null) {
            meth.releaseConnection();
        }
        if (bOs != null) {
            bOs.close();
        }
        if (dOs != null) {
            dOs.close();
        }
        if (dIs != null) {
            dIs.close();
        }
    }
}

From source file:com.chinamobile.bcbsp.bspcontroller.JobInProgress.java

/**
 * Create/manage tasks/*from w  w  w.j a v a2  s . co m*/
 * @throws IOException
 *         exceptions during handle splitfiles
 */
public void initStaffs() throws IOException {
    if (staffsInited) {
        return;
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug("numBSPStaffs: " + numBSPStaffs);
    }
    // read the input split info from HDFS
    // Path sysDir = new Path(this.controller.getSystemDir());
    // FileSystem fs = sysDir.getFileSystem(conf);
    BSPFileSystem bspfs = new BSPFileSystemImpl(controller, conf);
    // DataInputStream splitFile = fs.open(new Path(conf
    // .get(Constants.USER_BC_BSP_JOB_SPLIT_FILE)));
    DataInputStream splitFile = bspfs
            .open(new BSPHdfsImpl().newPath(conf.get(Constants.USER_BC_BSP_JOB_SPLIT_FILE)));
    RawSplit[] splits;
    try {
        splits = BSPJobClient.readSplitFile(splitFile);
    } finally {
        splitFile.close();
    }
    // adjust number of map staffs to actual number of splits
    this.staffs = new StaffInProgress[numBSPStaffs];
    for (int i = 0; i < numBSPStaffs; i++) {
        if (i < splits.length) {
            // this staff will load data from DFS
            staffs[i] = new StaffInProgress(getJobID(), this.jobFile.toString(), this.controller, this.conf,
                    this, i, splits[i]);
        } else {
            // create a disable split. this only happen in Hash.
            RawSplit split = new RawSplit();
            split.setClassName("no");
            split.setDataLength(0);
            split.setBytes("no".getBytes(), 0, 2);
            split.setLocations(new String[] { "no" });
            // this staff will not load data from DFS
            staffs[i] = new StaffInProgress(getJobID(), this.jobFile.toString(), this.controller, this.conf,
                    this, i, split);
        }
    }
    // Update job status
    this.status.setRunState(JobStatus.RUNNING);
    this.status.setState(State.RUNNING);
    staffsInited = true;
    /* Zhicheng Liu added */
    this.staffSlowCount = new int[this.staffs.length];
    LOG.debug("Job is initialized.");
}

From source file:org.apache.geode.internal.cache.tier.sockets.HandShake.java

public Properties readCredential(DataInputStream dis, DataOutputStream dos, DistributedSystem system)
        throws GemFireSecurityException, IOException {

    Properties credentials = null;
    boolean requireAuthentication = securityService.isClientSecurityRequired();
    try {//from  www  . j a v a  2 s.  c o  m
        byte secureMode = dis.readByte();
        throwIfMissingRequiredCredentials(requireAuthentication, secureMode != CREDENTIALS_NONE);
        if (secureMode == CREDENTIALS_NORMAL) {
            this.appSecureMode = CREDENTIALS_NORMAL;
            /*
             * if (requireAuthentication) { credentials = DataSerializer.readProperties(dis); } else {
             * DataSerializer.readProperties(dis); // ignore the credentials }
             */
        } else if (secureMode == CREDENTIALS_DHENCRYPT) {
            this.appSecureMode = CREDENTIALS_DHENCRYPT;
            boolean sendAuthentication = dis.readBoolean();
            InternalLogWriter securityLogWriter = (InternalLogWriter) system.getSecurityLogWriter();
            // Get the symmetric encryption algorithm to be used
            // String skAlgo = DataSerializer.readString(dis);
            this.clientSKAlgo = DataSerializer.readString(dis);
            // Get the public key of the other side
            byte[] keyBytes = DataSerializer.readByteArray(dis);
            byte[] challenge = null;
            // PublicKey pubKey = null;
            if (requireAuthentication) {
                // Generate PublicKey from encoded form
                X509EncodedKeySpec x509KeySpec = new X509EncodedKeySpec(keyBytes);
                KeyFactory keyFact = KeyFactory.getInstance("DH");
                this.clientPublicKey = keyFact.generatePublic(x509KeySpec);

                // Send the public key to other side
                keyBytes = dhPublicKey.getEncoded();
                challenge = new byte[64];
                random.nextBytes(challenge);

                // If the server has to also authenticate itself then
                // sign the challenge from client.
                if (sendAuthentication) {
                    // Get the challenge string from client
                    byte[] clientChallenge = DataSerializer.readByteArray(dis);
                    if (privateKeyEncrypt == null) {
                        throw new AuthenticationFailedException(
                                LocalizedStrings.HandShake_SERVER_PRIVATE_KEY_NOT_AVAILABLE_FOR_CREATING_SIGNATURE
                                        .toLocalizedString());
                    }
                    // Sign the challenge from client and send it to the client
                    Signature sig = Signature.getInstance(privateKeySignAlgo);
                    sig.initSign(privateKeyEncrypt);
                    sig.update(clientChallenge);
                    byte[] signedBytes = sig.sign();
                    dos.writeByte(REPLY_OK);
                    DataSerializer.writeByteArray(keyBytes, dos);
                    // DataSerializer.writeString(privateKeyAlias, dos);
                    DataSerializer.writeString(privateKeySubject, dos);
                    DataSerializer.writeByteArray(signedBytes, dos);
                    securityLogWriter.fine("HandShake: sent the signed client challenge");
                } else {
                    // These two lines should not be moved before the if{} statement in
                    // a common block for both if...then...else parts. This is to handle
                    // the case when an AuthenticationFailedException is thrown by the
                    // if...then part when sending the signature.
                    dos.writeByte(REPLY_OK);
                    DataSerializer.writeByteArray(keyBytes, dos);
                }
                // Now send the server challenge
                DataSerializer.writeByteArray(challenge, dos);
                securityLogWriter.fine("HandShake: sent the public key and challenge");
                dos.flush();

                // Read and decrypt the credentials
                byte[] encBytes = DataSerializer.readByteArray(dis);
                Cipher c = getDecryptCipher(this.clientSKAlgo, this.clientPublicKey);
                byte[] credentialBytes = decryptBytes(encBytes, c);
                ByteArrayInputStream bis = new ByteArrayInputStream(credentialBytes);
                DataInputStream dinp = new DataInputStream(bis);
                // credentials = DataSerializer.readProperties(dinp);//Hitesh: we don't send in handshake
                // now
                byte[] challengeRes = DataSerializer.readByteArray(dinp);
                // Check the challenge string
                if (!Arrays.equals(challenge, challengeRes)) {
                    throw new AuthenticationFailedException(
                            LocalizedStrings.HandShake_MISMATCH_IN_CHALLENGE_BYTES_MALICIOUS_CLIENT
                                    .toLocalizedString());
                }
                dinp.close();
            } else {
                if (sendAuthentication) {
                    // Read and ignore the client challenge
                    DataSerializer.readByteArray(dis);
                }
                dos.writeByte(REPLY_AUTH_NOT_REQUIRED);
                dos.flush();
            }
        }
    } catch (IOException ex) {
        throw ex;
    } catch (GemFireSecurityException ex) {
        throw ex;
    } catch (Exception ex) {
        throw new AuthenticationFailedException(
                LocalizedStrings.HandShake_FAILURE_IN_READING_CREDENTIALS.toLocalizedString(), ex);
    }
    return credentials;
}

From source file:org.apache.hadoop.mapreduce.task.reduce.Fetcher.java

/**
 * The crux of the matter...//  w ww.  ja  v  a  2  s.  c om
 * 
 * @param host {@link MapHost} from which we need to  
 *              shuffle available map-outputs.
 */
@VisibleForTesting
protected void copyFromHost(MapHost host) throws IOException {
    // reset retryStartTime for a new host
    retryStartTime = 0;
    // Get completed maps on 'host'
    List<TaskAttemptID> maps = scheduler.getMapsForHost(host);

    // Sanity check to catch hosts with only 'OBSOLETE' maps, 
    // especially at the tail of large jobs
    if (maps.size() == 0) {
        return;
    }

    if (LOG.isDebugEnabled()) {
        LOG.debug("Fetcher " + id + " going to fetch from " + host + " for: " + maps);
    }

    // List of maps to be fetched yet
    Set<TaskAttemptID> remaining = new HashSet<TaskAttemptID>(maps);

    // Construct the url and connect
    URL url = getMapOutputURL(host, maps);
    DataInputStream input = openShuffleUrl(host, remaining, url);
    if (input == null) {
        return;
    }

    try {
        // Loop through available map-outputs and fetch them
        // On any error, faildTasks is not null and we exit
        // after putting back the remaining maps to the 
        // yet_to_be_fetched list and marking the failed tasks.
        TaskAttemptID[] failedTasks = null;
        while (!remaining.isEmpty() && failedTasks == null) {
            try {
                failedTasks = copyMapOutput(host, input, remaining, fetchRetryEnabled);
            } catch (IOException e) {
                IOUtils.cleanup(LOG, input);
                //
                // Setup connection again if disconnected by NM
                connection.disconnect();
                // Get map output from remaining tasks only.
                url = getMapOutputURL(host, remaining);
                input = openShuffleUrl(host, remaining, url);
                if (input == null) {
                    return;
                }
            }
        }

        if (failedTasks != null && failedTasks.length > 0) {
            LOG.warn("copyMapOutput failed for tasks " + Arrays.toString(failedTasks));
            scheduler.hostFailed(host.getHostName());
            for (TaskAttemptID left : failedTasks) {
                scheduler.copyFailed(left, host, true, false);
            }
        }

        // Sanity check
        if (failedTasks == null && !remaining.isEmpty()) {
            throw new IOException(
                    "server didn't return all expected map outputs: " + remaining.size() + " left.");
        }
        input.close();
        input = null;
    } finally {
        if (input != null) {
            IOUtils.cleanup(LOG, input);
            input = null;
        }
        for (TaskAttemptID left : remaining) {
            scheduler.putBackKnownMapOutput(host, left);
        }
    }
}

From source file:com.android.leanlauncher.LauncherTransitionable.java

private static void readConfiguration(Context context, LocaleConfiguration configuration) {
    DataInputStream in = null;
    try {//from  ww w  .  j  a  v  a 2 s  . c  om
        in = new DataInputStream(context.openFileInput(LauncherFiles.LAUNCHER_PREFERENCES));
        configuration.locale = in.readUTF();
        configuration.mcc = in.readInt();
        configuration.mnc = in.readInt();
    } catch (FileNotFoundException e) {
        // Ignore
    } catch (IOException e) {
        // Ignore
    } finally {
        if (in != null) {
            try {
                in.close();
            } catch (IOException e) {
                // Ignore
            }
        }
    }
}