Example usage for java.io DataInputStream readInt

List of usage examples for java.io DataInputStream readInt

Introduction

In this page you can find the example usage for java.io DataInputStream readInt.

Prototype

public final int readInt() throws IOException 

Source Link

Document

See the general contract of the readInt method of DataInput.

Usage

From source file:org.apache.hadoop.hdfs.AvatarClient.java

/**
 * Get the checksum of a file.//from  ww w  . j a va  2s .c om
 * @param src The file path
 * @return The checksum 
 */
public static MD5MD5CRC32FileChecksum getFileChecksum(String src, AvatarProtocol namenode,
        SocketFactory socketFactory, int socketTimeout) throws IOException {
    //get all block locations
    final List<LocatedBlock> locatedblocks = callGetBlockLocations(namenode, src, 0, Long.MAX_VALUE)
            .getLocatedBlocks();
    final DataOutputBuffer md5out = new DataOutputBuffer();
    int bytesPerCRC = 0;
    long crcPerBlock = 0;

    //get block checksum for each block
    for (int i = 0; i < locatedblocks.size(); i++) {
        LocatedBlock lb = locatedblocks.get(i);
        final Block block = lb.getBlock();
        final DatanodeInfo[] datanodes = lb.getLocations();

        //try each datanode location of the block
        final int timeout = 3000 * datanodes.length + socketTimeout;
        boolean done = false;
        for (int j = 0; !done && j < datanodes.length; j++) {
            //connect to a datanode
            final Socket sock = socketFactory.createSocket();
            NetUtils.connect(sock, NetUtils.createSocketAddr(datanodes[j].getName()), timeout);
            sock.setSoTimeout(timeout);

            DataOutputStream out = new DataOutputStream(
                    new BufferedOutputStream(NetUtils.getOutputStream(sock), DataNode.SMALL_BUFFER_SIZE));
            DataInputStream in = new DataInputStream(NetUtils.getInputStream(sock));

            // get block MD5
            try {
                if (LOG.isDebugEnabled()) {
                    LOG.debug("write to " + datanodes[j].getName() + ": "
                            + DataTransferProtocol.OP_BLOCK_CHECKSUM + ", block=" + block);
                }
                out.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION);
                out.write(DataTransferProtocol.OP_BLOCK_CHECKSUM);
                out.writeLong(block.getBlockId());
                out.writeLong(block.getGenerationStamp());
                out.flush();

                final short reply = in.readShort();
                if (reply != DataTransferProtocol.OP_STATUS_SUCCESS) {
                    throw new IOException("Bad response " + reply + " for block " + block + " from datanode "
                            + datanodes[j].getName());
                }

                //read byte-per-checksum
                final int bpc = in.readInt();
                if (i == 0) { //first block
                    bytesPerCRC = bpc;
                } else if (bpc != bytesPerCRC) {
                    throw new IOException(
                            "Byte-per-checksum not matched: bpc=" + bpc + " but bytesPerCRC=" + bytesPerCRC);
                }

                //read crc-per-block
                final long cpb = in.readLong();
                if (locatedblocks.size() > 1 && i == 0) {
                    crcPerBlock = cpb;
                }

                //read md5
                final MD5Hash md5 = MD5Hash.read(in);
                md5.write(md5out);

                done = true;

                if (LOG.isDebugEnabled()) {
                    if (i == 0) {
                        LOG.debug("set bytesPerCRC=" + bytesPerCRC + ", crcPerBlock=" + crcPerBlock);
                    }
                    LOG.debug("got reply from " + datanodes[j].getName() + ": md5=" + md5);
                }
            } catch (IOException ie) {
                LOG.warn("src=" + src + ", datanodes[" + j + "].getName()=" + datanodes[j].getName(), ie);
            } finally {
                IOUtils.closeStream(in);
                IOUtils.closeStream(out);
                IOUtils.closeSocket(sock);
            }
        }

        if (!done) {
            throw new IOException("Fail to get block MD5 for " + block);
        }
    }

    //compute file MD5
    final MD5Hash fileMD5 = MD5Hash.digest(md5out.getData());
    return new MD5MD5CRC32FileChecksum(bytesPerCRC, crcPerBlock, fileMD5);
}

From source file:org.apache.jxtadoop.hdfs.DFSClient.java

/**
 * Get the checksum of a file./*from  w w  w. j av  a  2 s . c  o  m*/
 * @param src The file path
 * @return The checksum 
 */
public static MD5MD5CRC32FileChecksum getFileChecksum(String src, ClientProtocol namenode,
        SocketFactory socketFactory, int socketTimeout) throws IOException {
    //get all block locations
    final List<LocatedBlock> locatedblocks = callGetBlockLocations(namenode, src, 0, Long.MAX_VALUE)
            .getLocatedBlocks();
    final DataOutputBuffer md5out = new DataOutputBuffer();
    int bytesPerCRC = 0;
    long crcPerBlock = 0;

    //get block checksum for each block
    for (int i = 0; i < locatedblocks.size(); i++) {
        LocatedBlock lb = locatedblocks.get(i);
        final Block block = lb.getBlock();
        final DatanodeInfo[] datanodes = lb.getLocations();

        //try each datanode location of the block
        final int timeout = 3000 * datanodes.length + socketTimeout;
        boolean done = false;
        for (int j = 0; !done && j < datanodes.length; j++) {
            //connect to a datanode
            /*final Socket sock = socketFactory.createSocket();
            NetUtils.connect(sock, 
                 NetUtils.createSocketAddr(datanodes[j].getName()),
                 timeout);
            sock.setSoTimeout(timeout);*/
            JxtaSocket jsock = DFSClient.getDfsClient().getDfsClientPeer()
                    .getInfoSocket(datanodes[j].getName());
            // jsock.setSoTimeout(timeout);
            jsock.setSoTimeout(Integer.parseInt(conf.get("hadoop.p2p.info.timeout")));

            /*DataOutputStream out = new DataOutputStream(
                new BufferedOutputStream(NetUtils.getOutputStream(jsock), 
                             DataNode.SMALL_BUFFER_SIZE));
            DataInputStream in = new DataInputStream(NetUtils.getInputStream(jsock));*/
            DataOutputStream out = new DataOutputStream(new BufferedOutputStream(jsock.getOutputStream()));
            DataInputStream in = new DataInputStream(jsock.getInputStream());

            // get block MD5
            try {
                if (LOG.isDebugEnabled()) {
                    LOG.debug("write to " + datanodes[j].getName() + ": "
                            + DataTransferProtocol.OP_BLOCK_CHECKSUM + ", block=" + block);
                }
                out.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION);
                out.write(DataTransferProtocol.OP_BLOCK_CHECKSUM);
                out.writeLong(block.getBlockId());
                out.writeLong(block.getGenerationStamp());
                out.flush();

                final short reply = in.readShort();
                if (reply != DataTransferProtocol.OP_STATUS_SUCCESS) {
                    throw new IOException("Bad response " + reply + " for block " + block + " from datanode "
                            + datanodes[j].getName());
                }

                //read byte-per-checksum
                final int bpc = in.readInt();
                if (i == 0) { //first block
                    bytesPerCRC = bpc;
                } else if (bpc != bytesPerCRC) {
                    throw new IOException(
                            "Byte-per-checksum not matched: bpc=" + bpc + " but bytesPerCRC=" + bytesPerCRC);
                }

                //read crc-per-block
                final long cpb = in.readLong();
                if (locatedblocks.size() > 1 && i == 0) {
                    crcPerBlock = cpb;
                }

                //read md5
                final MD5Hash md5 = MD5Hash.read(in);
                md5.write(md5out);

                done = true;

                if (LOG.isDebugEnabled()) {
                    if (i == 0) {
                        LOG.debug("set bytesPerCRC=" + bytesPerCRC + ", crcPerBlock=" + crcPerBlock);
                    }
                    LOG.debug("got reply from " + datanodes[j].getName() + ": md5=" + md5);
                }
            } catch (IOException ie) {
                LOG.warn("src=" + src + ", datanodes[" + j + "].getName()=" + datanodes[j].getName(), ie);
            } finally {
                IOUtils.closeStream(in);
                IOUtils.closeStream(out);
                IOUtils.closeSocket(jsock);
            }
        }

        if (!done) {
            throw new IOException("Fail to get block MD5 for " + block);
        }
    }

    //compute file MD5
    final MD5Hash fileMD5 = MD5Hash.digest(md5out.getData());
    return new MD5MD5CRC32FileChecksum(bytesPerCRC, crcPerBlock, fileMD5);
}

From source file:org.alfresco.repo.search.impl.lucene.index.IndexInfo.java

/**
 * Get the deletions for a given index (there is no check if they should be applied that is up to the calling layer)
 * //ww  w  . j a  v  a2s .com
 * @param id String
 * @param fileName String
 * @return Set<String>
 * @throws IOException
 */
private Set<String> getDeletions(String id, String fileName) throws IOException {
    if (id == null) {
        throw new IndexerException("\"null\" is not a valid identifier for a transaction");
    }
    // Check state
    Set<String> deletions = new HashSet<String>();
    File location = new File(indexDirectory, id).getCanonicalFile();
    File file = new File(location, fileName).getCanonicalFile();
    if (!file.exists()) {
        if (s_logger.isDebugEnabled()) {
            s_logger.debug("No deletions for " + id);
        }
        return Collections.<String>emptySet();
    }
    DataInputStream is = new DataInputStream(new BufferedInputStream(new FileInputStream(file)));
    int size = is.readInt();
    for (int i = 0; i < size; i++) {
        String ref = is.readUTF();
        deletions.add(ref);
    }
    is.close();
    if (s_logger.isDebugEnabled()) {
        s_logger.debug("There are " + deletions.size() + " deletions for " + id);
    }
    return deletions;

}

From source file:org.apache.geode.internal.cache.tier.sockets.HandShake.java

/**
 * This assumes that authentication is the last piece of info in handshake
 *//*from  w ww .j a  v a 2s.com*/
public void writeCredentials(DataOutputStream dos, DataInputStream dis, Properties p_credentials,
        boolean isNotification, DistributedMember member, HeapDataOutputStream heapdos)
        throws IOException, GemFireSecurityException {

    if (p_credentials == null) {
        // No credentials indicator
        heapdos.writeByte(CREDENTIALS_NONE);
        heapdos.flush();
        dos.write(heapdos.toByteArray());
        dos.flush();
        return;
    }

    if (dhSKAlgo == null || dhSKAlgo.length() == 0) {
        // Normal credentials without encryption indicator
        heapdos.writeByte(CREDENTIALS_NORMAL);
        DataSerializer.writeProperties(p_credentials, heapdos);
        heapdos.flush();
        dos.write(heapdos.toByteArray());
        dos.flush();
        return;
    }

    try {
        InternalLogWriter securityLogWriter = (InternalLogWriter) this.system.getSecurityLogWriter();
        securityLogWriter.fine("HandShake: using Diffie-Hellman key exchange with algo " + dhSKAlgo);
        boolean requireAuthentication = (certificateFilePath != null && certificateFilePath.length() > 0);
        if (requireAuthentication) {
            securityLogWriter.fine("HandShake: server authentication using digital " + "signature required");
        }
        // Credentials with encryption indicator
        heapdos.writeByte(CREDENTIALS_DHENCRYPT);
        heapdos.writeBoolean(requireAuthentication);
        // Send the symmetric encryption algorithm name
        DataSerializer.writeString(dhSKAlgo, heapdos);
        // Send the DH public key
        byte[] keyBytes = dhPublicKey.getEncoded();
        DataSerializer.writeByteArray(keyBytes, heapdos);
        byte[] clientChallenge = null;
        if (requireAuthentication) {
            // Authentication of server should be with the client supplied
            // challenge
            clientChallenge = new byte[64];
            random.nextBytes(clientChallenge);
            DataSerializer.writeByteArray(clientChallenge, heapdos);
        }
        heapdos.flush();
        dos.write(heapdos.toByteArray());
        dos.flush();

        // Expect the alias and signature in the reply
        byte acceptanceCode = dis.readByte();
        if (acceptanceCode != REPLY_OK && acceptanceCode != REPLY_AUTH_NOT_REQUIRED) {
            // Ignore the useless data
            dis.readByte();
            dis.readInt();
            if (!isNotification) {
                DataSerializer.readByteArray(dis);
            }
            readMessage(dis, dos, acceptanceCode, member);
        } else if (acceptanceCode == REPLY_OK) {
            // Get the public key of the other side
            keyBytes = DataSerializer.readByteArray(dis);
            if (requireAuthentication) {
                String subject = DataSerializer.readString(dis);
                byte[] signatureBytes = DataSerializer.readByteArray(dis);
                if (!certificateMap.containsKey(subject)) {
                    throw new AuthenticationFailedException(
                            LocalizedStrings.HandShake_HANDSHAKE_FAILED_TO_FIND_PUBLIC_KEY_FOR_SERVER_WITH_SUBJECT_0
                                    .toLocalizedString(subject));
                }

                // Check the signature with the public key
                X509Certificate cert = (X509Certificate) certificateMap.get(subject);
                Signature sig = Signature.getInstance(cert.getSigAlgName());
                sig.initVerify(cert);
                sig.update(clientChallenge);
                // Check the challenge string
                if (!sig.verify(signatureBytes)) {
                    throw new AuthenticationFailedException(
                            "Mismatch in client " + "challenge bytes. Malicious server?");
                }
                securityLogWriter
                        .fine("HandShake: Successfully verified the " + "digital signature from server");
            }

            byte[] challenge = DataSerializer.readByteArray(dis);
            X509EncodedKeySpec x509KeySpec = new X509EncodedKeySpec(keyBytes);
            KeyFactory keyFact = KeyFactory.getInstance("DH");
            // PublicKey pubKey = keyFact.generatePublic(x509KeySpec);
            this.clientPublicKey = keyFact.generatePublic(x509KeySpec);

            HeapDataOutputStream hdos = new HeapDataOutputStream(Version.CURRENT);
            try {
                DataSerializer.writeProperties(p_credentials, hdos);
                // Also add the challenge string
                DataSerializer.writeByteArray(challenge, hdos);

                // byte[] encBytes = encrypt.doFinal(hdos.toByteArray());
                byte[] encBytes = encryptBytes(hdos.toByteArray(),
                        getEncryptCipher(dhSKAlgo, this.clientPublicKey));
                DataSerializer.writeByteArray(encBytes, dos);
            } finally {
                hdos.close();
            }
        }
    } catch (IOException ex) {
        throw ex;
    } catch (GemFireSecurityException ex) {
        throw ex;
    } catch (Exception ex) {
        throw new AuthenticationFailedException("HandShake failed in Diffie-Hellman key exchange", ex);
    }
    dos.flush();
}

From source file:org.apache.geode.internal.cache.tier.sockets.HandShake.java

/**
 * This method writes what readCredential() method expects to read. (Note the use of singular
 * credential). It is similar to writeCredentials(), except that it doesn't write
 * credential-properties./*  ww  w . j  a  va 2 s. c  o  m*/
 */
public byte writeCredential(DataOutputStream dos, DataInputStream dis, String authInit, boolean isNotification,
        DistributedMember member, HeapDataOutputStream heapdos) throws IOException, GemFireSecurityException {

    if (!this.multiuserSecureMode && (authInit == null || authInit.length() == 0)) {
        // No credentials indicator
        heapdos.writeByte(CREDENTIALS_NONE);
        heapdos.flush();
        dos.write(heapdos.toByteArray());
        dos.flush();
        return -1;
    }

    if (dhSKAlgo == null || dhSKAlgo.length() == 0) {
        // Normal credentials without encryption indicator
        heapdos.writeByte(CREDENTIALS_NORMAL);
        this.appSecureMode = CREDENTIALS_NORMAL;
        // DataSerializer.writeProperties(p_credentials, heapdos);
        heapdos.flush();
        dos.write(heapdos.toByteArray());
        dos.flush();
        return -1;
    }
    byte acceptanceCode = -1;
    try {
        InternalLogWriter securityLogWriter = (InternalLogWriter) this.system.getSecurityLogWriter();
        securityLogWriter.fine("HandShake: using Diffie-Hellman key exchange with algo " + dhSKAlgo);
        boolean requireAuthentication = (certificateFilePath != null && certificateFilePath.length() > 0);
        if (requireAuthentication) {
            securityLogWriter.fine("HandShake: server authentication using digital " + "signature required");
        }
        // Credentials with encryption indicator
        heapdos.writeByte(CREDENTIALS_DHENCRYPT);
        this.appSecureMode = CREDENTIALS_DHENCRYPT;
        heapdos.writeBoolean(requireAuthentication);
        // Send the symmetric encryption algorithm name
        DataSerializer.writeString(dhSKAlgo, heapdos);
        // Send the DH public key
        byte[] keyBytes = dhPublicKey.getEncoded();
        DataSerializer.writeByteArray(keyBytes, heapdos);
        byte[] clientChallenge = null;
        if (requireAuthentication) {
            // Authentication of server should be with the client supplied
            // challenge
            clientChallenge = new byte[64];
            random.nextBytes(clientChallenge);
            DataSerializer.writeByteArray(clientChallenge, heapdos);
        }
        heapdos.flush();
        dos.write(heapdos.toByteArray());
        dos.flush();

        // Expect the alias and signature in the reply
        acceptanceCode = dis.readByte();
        if (acceptanceCode != REPLY_OK && acceptanceCode != REPLY_AUTH_NOT_REQUIRED) {
            // Ignore the useless data
            dis.readByte();
            dis.readInt();
            if (!isNotification) {
                DataSerializer.readByteArray(dis);
            }
            readMessage(dis, dos, acceptanceCode, member);
        } else if (acceptanceCode == REPLY_OK) {
            // Get the public key of the other side
            keyBytes = DataSerializer.readByteArray(dis);
            if (requireAuthentication) {
                String subject = DataSerializer.readString(dis);
                byte[] signatureBytes = DataSerializer.readByteArray(dis);
                if (!certificateMap.containsKey(subject)) {
                    throw new AuthenticationFailedException(
                            LocalizedStrings.HandShake_HANDSHAKE_FAILED_TO_FIND_PUBLIC_KEY_FOR_SERVER_WITH_SUBJECT_0
                                    .toLocalizedString(subject));
                }

                // Check the signature with the public key
                X509Certificate cert = (X509Certificate) certificateMap.get(subject);
                Signature sig = Signature.getInstance(cert.getSigAlgName());
                sig.initVerify(cert);
                sig.update(clientChallenge);
                // Check the challenge string
                if (!sig.verify(signatureBytes)) {
                    throw new AuthenticationFailedException(
                            "Mismatch in client " + "challenge bytes. Malicious server?");
                }
                securityLogWriter
                        .fine("HandShake: Successfully verified the " + "digital signature from server");
            }

            // Read server challenge bytes
            byte[] serverChallenge = DataSerializer.readByteArray(dis);
            X509EncodedKeySpec x509KeySpec = new X509EncodedKeySpec(keyBytes);
            KeyFactory keyFact = KeyFactory.getInstance("DH");
            // PublicKey pubKey = keyFact.generatePublic(x509KeySpec);
            this.clientPublicKey = keyFact.generatePublic(x509KeySpec);

            HeapDataOutputStream hdos = new HeapDataOutputStream(Version.CURRENT);
            try {
                // Add the challenge string
                DataSerializer.writeByteArray(serverChallenge, hdos);
                // byte[] encBytes = encrypt.doFinal(hdos.toByteArray());
                byte[] encBytes = encryptBytes(hdos.toByteArray(),
                        getEncryptCipher(dhSKAlgo, this.clientPublicKey));
                DataSerializer.writeByteArray(encBytes, dos);
            } finally {
                hdos.close();
            }
        }
    } catch (IOException ex) {
        throw ex;
    } catch (GemFireSecurityException ex) {
        throw ex;
    } catch (Exception ex) {
        throw new AuthenticationFailedException("HandShake failed in Diffie-Hellman key exchange", ex);
    }
    dos.flush();
    return acceptanceCode;
}

From source file:org.nd4j.linalg.factory.Nd4j.java

/**
 * Read in an ndarray from a data input stream
 *
 * @param dis the data input stream to read from
 * @return the ndarray/*from   w  w w  . j av  a 2  s  .  com*/
 * @throws IOException
 */
public static IComplexNDArray readComplex(DataInputStream dis) throws IOException {
    int dimensions = dis.readInt();
    int[] shape = new int[dimensions];
    int[] stride = new int[dimensions];

    for (int i = 0; i < dimensions; i++)
        shape[i] = dis.readInt();
    for (int i = 0; i < dimensions; i++)
        stride[i] = dis.readInt();
    String dataType = dis.readUTF();

    String type = dis.readUTF();

    if (!type.equals("complex"))
        throw new IllegalArgumentException("Trying to read in a real ndarray");

    if (dataType.equals("double")) {
        double[] data = ArrayUtil.readDouble(ArrayUtil.prod(shape), dis);
        return createComplex(data, shape, stride, 0);
    }

    double[] data = ArrayUtil.read(ArrayUtil.prod(shape), dis);
    return createComplex(data, shape, stride, 0);
}

From source file:org.apache.geode.internal.cache.tier.sockets.HandShake.java

/**
 * Client-side handshake with a Server//from www  .j  a  v  a2  s  . c  om
 */
public ServerQueueStatus handshakeWithServer(Connection conn, ServerLocation location,
        CommunicationMode communicationMode) throws IOException, AuthenticationRequiredException,
        AuthenticationFailedException, ServerRefusedConnectionException {
    try {
        ServerQueueStatus serverQStatus = null;
        Socket sock = conn.getSocket();
        DataOutputStream dos = new DataOutputStream(sock.getOutputStream());
        final InputStream in = sock.getInputStream();
        DataInputStream dis = new DataInputStream(in);
        DistributedMember member = getIDForSocket(sock);
        // if running in a loner system, use the new port number in the ID to
        // help differentiate from other clients
        DM dm = ((InternalDistributedSystem) this.system).getDistributionManager();
        InternalDistributedMember idm = dm.getDistributionManagerId();
        synchronized (idm) {
            if (idm.getPort() == 0 && dm instanceof LonerDistributionManager) {
                int port = sock.getLocalPort();
                ((LonerDistributionManager) dm).updateLonerPort(port);
                updateProxyID(dm.getDistributionManagerId());
            }
        }
        if (communicationMode.isWAN()) {
            this.credentials = getCredentials(member);
        }
        byte intermediateAcceptanceCode = write(dos, dis, communicationMode, REPLY_OK, this.clientReadTimeout,
                null, this.credentials, member, false);

        String authInit = this.system.getProperties().getProperty(SECURITY_CLIENT_AUTH_INIT);
        if (!communicationMode.isWAN() && intermediateAcceptanceCode != REPLY_AUTH_NOT_REQUIRED
                && (authInit != null && authInit.length() != 0)) {
            location.compareAndSetRequiresCredentials(true);
        }
        // Read the acceptance code
        byte acceptanceCode = dis.readByte();
        if (acceptanceCode == (byte) 21 && !(sock instanceof SSLSocket)) {
            // This is likely the case of server setup with SSL and client not using
            // SSL
            throw new AuthenticationRequiredException(
                    LocalizedStrings.HandShake_SERVER_EXPECTING_SSL_CONNECTION.toLocalizedString());
        }
        if (acceptanceCode == REPLY_SERVER_IS_LOCATOR) {
            throw new GemFireConfigException("Improperly configured client detected.  " + "Server at "
                    + location + " is actually a locator.  Use addPoolLocator to configure locators.");
        }

        // Successful handshake for GATEWAY_TO_GATEWAY mode sets the peer version in connection
        if (communicationMode.isWAN() && !(acceptanceCode == REPLY_EXCEPTION_AUTHENTICATION_REQUIRED
                || acceptanceCode == REPLY_EXCEPTION_AUTHENTICATION_FAILED)) {
            short wanSiteVersion = Version.readOrdinal(dis);
            conn.setWanSiteVersion(wanSiteVersion);
            // establish a versioned stream for the other site, if necessary
            if (wanSiteVersion < Version.CURRENT_ORDINAL) {
                dis = new VersionedDataInputStream(dis, Version.fromOrdinalOrCurrent(wanSiteVersion));
            }
        }

        // No need to check for return value since DataInputStream already throws
        // EOFException in case of EOF
        byte epType = dis.readByte();
        int qSize = dis.readInt();

        // Read the server member
        member = readServerMember(dis);
        serverQStatus = new ServerQueueStatus(epType, qSize, member);

        // Read the message (if any)
        readMessage(dis, dos, acceptanceCode, member);

        // Read delta-propagation property value from server.
        // [sumedh] Static variable below? Client can connect to different
        // DSes with different values of this. It shoule be a member variable.
        if (!communicationMode.isWAN() && currentClientVersion.compareTo(Version.GFE_61) >= 0) {
            deltaEnabledOnServer = dis.readBoolean();
        }

        // validate that the remote side has a different distributed system id.
        if (communicationMode.isWAN() && Version.GFE_66.compareTo(conn.getWanSiteVersion()) <= 0
                && currentClientVersion.compareTo(Version.GFE_66) >= 0) {
            int remoteDistributedSystemId = in.read();
            int localDistributedSystemId = ((InternalDistributedSystem) system).getDistributionManager()
                    .getDistributedSystemId();
            if (localDistributedSystemId >= 0 && localDistributedSystemId == remoteDistributedSystemId) {
                throw new GatewayConfigurationException(
                        "Remote WAN site's distributed system id " + remoteDistributedSystemId
                                + " matches this sites distributed system id " + localDistributedSystemId);
            }
        }
        // Read the PDX registry size from the remote size
        if (communicationMode.isWAN() && Version.GFE_80.compareTo(conn.getWanSiteVersion()) <= 0
                && currentClientVersion.compareTo(Version.GFE_80) >= 0) {
            int remotePdxSize = dis.readInt();
            serverQStatus.setPdxSize(remotePdxSize);
        }

        return serverQStatus;
    } catch (IOException ex) {
        CancelCriterion stopper = this.system.getCancelCriterion();
        stopper.checkCancelInProgress(null);
        throw ex;
    }
}

From source file:org.apache.hadoop.hdfs.DFSClient.java

/**
 * Get the checksum of a file.//from  w w  w.  j a  v  a  2  s.  c  om
 * @param src The file path
 * @return The checksum 
 */
public static MD5MD5CRC32FileChecksum getFileChecksum(String src, ClientProtocol namenode,
        SocketFactory socketFactory, int socketTimeout) throws IOException {
    //get all block locations
    LocatedBlocks blockLocations = callGetBlockLocations(namenode, src, 0, Long.MAX_VALUE);
    if (null == blockLocations) {
        throw new FileNotFoundException("File does not exist: " + src);
    }
    List<LocatedBlock> locatedblocks = blockLocations.getLocatedBlocks();
    final DataOutputBuffer md5out = new DataOutputBuffer();
    int bytesPerCRC = 0;
    long crcPerBlock = 0;
    boolean refetchBlocks = false;
    int lastRetriedIndex = -1;

    //get block checksum for each block
    for (int i = 0; i < locatedblocks.size(); i++) {
        if (refetchBlocks) { // refetch to get fresh tokens
            blockLocations = callGetBlockLocations(namenode, src, 0, Long.MAX_VALUE);
            if (null == blockLocations) {
                throw new FileNotFoundException("File does not exist: " + src);
            }
            locatedblocks = blockLocations.getLocatedBlocks();
            refetchBlocks = false;
        }
        LocatedBlock lb = locatedblocks.get(i);
        final Block block = lb.getBlock();
        final DatanodeInfo[] datanodes = lb.getLocations();

        //try each datanode location of the block
        final int timeout = (socketTimeout > 0)
                ? (socketTimeout + HdfsConstants.READ_TIMEOUT_EXTENSION * datanodes.length)
                : 0;

        boolean done = false;
        for (int j = 0; !done && j < datanodes.length; j++) {
            Socket sock = null;
            DataOutputStream out = null;
            DataInputStream in = null;

            try {
                //connect to a datanode
                sock = socketFactory.createSocket();
                NetUtils.connect(sock, NetUtils.createSocketAddr(datanodes[j].getName()), timeout);
                sock.setSoTimeout(timeout);

                out = new DataOutputStream(
                        new BufferedOutputStream(NetUtils.getOutputStream(sock), DataNode.SMALL_BUFFER_SIZE));
                in = new DataInputStream(NetUtils.getInputStream(sock));

                if (LOG.isDebugEnabled()) {
                    LOG.debug("write to " + datanodes[j].getName() + ": "
                            + DataTransferProtocol.OP_BLOCK_CHECKSUM + ", block=" + block);
                }

                // get block MD5
                out.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION);
                out.write(DataTransferProtocol.OP_BLOCK_CHECKSUM);
                out.writeLong(block.getBlockId());
                out.writeLong(block.getGenerationStamp());
                lb.getBlockToken().write(out);
                out.flush();

                final short reply = in.readShort();
                if (reply != DataTransferProtocol.OP_STATUS_SUCCESS) {
                    if (reply == DataTransferProtocol.OP_STATUS_ERROR_ACCESS_TOKEN && i > lastRetriedIndex) {
                        if (LOG.isDebugEnabled()) {
                            LOG.debug("Got access token error in response to OP_BLOCK_CHECKSUM " + "for file "
                                    + src + " for block " + block + " from datanode " + datanodes[j].getName()
                                    + ". Will retry the block once.");
                        }
                        lastRetriedIndex = i;
                        done = true; // actually it's not done; but we'll retry
                        i--; // repeat at i-th block
                        refetchBlocks = true;
                        break;
                    } else {
                        throw new IOException("Bad response " + reply + " for block " + block
                                + " from datanode " + datanodes[j].getName());
                    }
                }

                //read byte-per-checksum
                final int bpc = in.readInt();
                if (i == 0) { //first block
                    bytesPerCRC = bpc;
                } else if (bpc != bytesPerCRC) {
                    throw new IOException(
                            "Byte-per-checksum not matched: bpc=" + bpc + " but bytesPerCRC=" + bytesPerCRC);
                }

                //read crc-per-block
                final long cpb = in.readLong();
                if (locatedblocks.size() > 1 && i == 0) {
                    crcPerBlock = cpb;
                }

                //read md5
                final MD5Hash md5 = MD5Hash.read(in);
                md5.write(md5out);

                done = true;

                if (LOG.isDebugEnabled()) {
                    if (i == 0) {
                        LOG.debug("set bytesPerCRC=" + bytesPerCRC + ", crcPerBlock=" + crcPerBlock);
                    }
                    LOG.debug("got reply from " + datanodes[j].getName() + ": md5=" + md5);
                }
            } catch (IOException ie) {
                LOG.warn("src=" + src + ", datanodes[" + j + "].getName()=" + datanodes[j].getName(), ie);
            } finally {
                IOUtils.closeStream(in);
                IOUtils.closeStream(out);
                IOUtils.closeSocket(sock);
            }
        }

        if (!done) {
            throw new IOException("Fail to get block MD5 for " + block);
        }
    }

    //compute file MD5
    final MD5Hash fileMD5 = MD5Hash.digest(md5out.getData());
    return new MD5MD5CRC32FileChecksum(bytesPerCRC, crcPerBlock, fileMD5);
}

From source file:org.ramadda.repository.database.DatabaseManager.java

/**
 * _more_/*  w w w  .  jav a  2 s.c om*/
 *
 * @param file _more_
 * @param doDrop _more_
 *
 * @throws Exception _more_
 */
public void loadRdbFile(String file, boolean doDrop) throws Exception {

    DataInputStream dis = new DataInputStream(new FileInputStream(file));
    XmlEncoder encoder = new XmlEncoder();
    String tableXml = readString(dis);
    List<TableInfo> tableInfos = (List<TableInfo>) encoder.toObject(tableXml);
    System.err.println("# table infos:" + tableInfos.size());
    Hashtable<String, TableInfo> tables = new Hashtable<String, TableInfo>();
    StringBuffer sql = new StringBuffer();
    StringBuffer drop = new StringBuffer();
    for (TableInfo tableInfo : tableInfos) {
        tables.put(tableInfo.getName(), tableInfo);
        drop.append("drop table " + tableInfo.getName() + ";\n");
        sql.append("CREATE TABLE " + tableInfo.getName() + "  (\n");
        for (int i = 0; i < tableInfo.getColumns().size(); i++) {
            ColumnInfo column = tableInfo.getColumns().get(i);
            if (i > 0) {
                sql.append(",\n");
            }
            sql.append(column.getName());
            sql.append(" ");
            int type = column.getType();

            if (type == ColumnInfo.TYPE_TIMESTAMP) {
                sql.append("ramadda.datetime");
            } else if (type == ColumnInfo.TYPE_VARCHAR) {
                sql.append("varchar(" + column.getSize() + ")");
            } else if (type == ColumnInfo.TYPE_INTEGER) {
                sql.append("int");
            } else if (type == ColumnInfo.TYPE_DOUBLE) {
                sql.append("ramadda.double");
            } else if (type == ColumnInfo.TYPE_BIGINT) {
                sql.append("ramadda.bigint");
            } else if (type == ColumnInfo.TYPE_SMALLINT) {
                sql.append("int");
            } else if (type == ColumnInfo.TYPE_CLOB) {
                sql.append(convertType("clob", column.getSize()));
            } else if (type == ColumnInfo.TYPE_BLOB) {
                sql.append(convertType("blob", column.getSize()));
            } else if (type == ColumnInfo.TYPE_UNKNOWN) {
                //                    sql.append(convertType("blob", column.getSize()));
            } else {
                throw new IllegalStateException("Unknown column type:" + type);
            }
        }
        sql.append(");\n");
        for (IndexInfo indexInfo : tableInfo.getIndices()) {
            sql.append("CREATE INDEX " + indexInfo.getName() + " ON " + tableInfo.getName() + " ("
                    + indexInfo.getColumnName() + ");\n");
        }
    }

    //        System.err.println(drop);
    //        System.err.println(sql);

    //TODO: 
    if (doDrop) {
        loadSql(drop.toString(), true, false);
    }
    loadSql(convertSql(sql.toString()), false, true);

    TableInfo tableInfo = null;
    int rows = 0;
    Connection connection = getConnection();
    try {
        while (true) {
            int what = dis.readInt();
            if (what == DUMPTAG_TABLE) {
                String tableName = readString(dis);
                tableInfo = tables.get(tableName);
                if (tableInfo == null) {
                    throw new IllegalArgumentException("No table:" + tableName);
                }
                if (tableInfo.statement == null) {
                    String insert = SqlUtil.makeInsert(tableInfo.getName(), tableInfo.getColumnNames());
                    tableInfo.statement = connection.prepareStatement(insert);
                }
                System.err.println("importing table:" + tableInfo.getName());

                continue;
            }
            if (what == DUMPTAG_END) {
                break;
            }
            if (what != DUMPTAG_ROW) {
                throw new IllegalArgumentException("Unkown tag:" + what);
            }

            rows++;
            if ((rows % 1000) == 0) {
                System.err.println("rows:" + rows);
            }

            Object[] values = new Object[tableInfo.getColumns().size()];
            int colCnt = 0;
            for (ColumnInfo columnInfo : tableInfo.getColumns()) {
                int type = columnInfo.getType();
                if (type == ColumnInfo.TYPE_TIMESTAMP) {
                    long dttm = dis.readLong();
                    values[colCnt++] = new Date(dttm);
                } else if (type == ColumnInfo.TYPE_VARCHAR) {
                    String s = readString(dis);
                    if ((s != null) && (s.length() > 5000)) {
                        //A hack for old dbs
                        if (tableInfo.getName().equals("metadata")) {
                            s = s.substring(0, 4999);
                            System.err.println("clipping: " + tableInfo.getName() + "." + columnInfo.getName());
                        }

                    }
                    values[colCnt++] = s;
                } else if (type == ColumnInfo.TYPE_INTEGER) {
                    values[colCnt++] = new Integer(dis.readInt());
                } else if (type == ColumnInfo.TYPE_DOUBLE) {
                    values[colCnt++] = new Double(dis.readDouble());
                } else if (type == ColumnInfo.TYPE_CLOB) {
                    values[colCnt++] = readString(dis);
                } else if (type == ColumnInfo.TYPE_BLOB) {
                    values[colCnt++] = readString(dis);
                } else if (type == ColumnInfo.TYPE_BIGINT) {
                    long v = dis.readLong();
                    values[colCnt++] = new Long(v);
                } else if (type == ColumnInfo.TYPE_SMALLINT) {
                    short v = dis.readShort();
                    values[colCnt++] = new Short(v);
                } else if (type == ColumnInfo.TYPE_UNKNOWN) {
                } else {
                    throw new IllegalArgumentException(
                            "Unknown type for table" + tableInfo.getName() + " " + type);
                }
            }
            setValues(tableInfo.statement, values);
            tableInfo.statement.addBatch();
            tableInfo.batchCnt++;
            if (tableInfo.batchCnt > 1000) {
                tableInfo.batchCnt = 0;
                tableInfo.statement.executeBatch();

            }
        }

        //Now finish up the batch
        for (TableInfo ti : tableInfos) {
            if (ti.batchCnt > 0) {
                ti.batchCnt = 0;
                ti.statement.executeBatch();
            }
        }
    } finally {
        IOUtil.close(dis);
        closeConnection(connection);
    }

    System.err.println("imported " + rows + " rows");

}

From source file:org.apache.hadoop.hdfs.server.namenode.IngestLocal.java

/**
 * Continue to ingest transaction logs until the currentState is 
 * no longer INGEST. If lastScan is set to true, then we process 
 * till the end of the file and return./*from ww  w.  j a  v  a  2s .  co m*/
 */
int ingestFSEdits(File fname, DataInputStream in, int logVersion) throws IOException {
    FSNamesystem fsNamesys = FSNamesystem.getFSNamesystem();
    FSDirectory fsDir = fsNamesys.dir;
    int numEdits = 0;
    String clientName = null;
    String clientMachine = null;
    String path = null;
    int numOpAdd = 0, numOpClose = 0, numOpDelete = 0, numOpRename = 0, numOpSetRepl = 0, numOpMkDir = 0,
            numOpSetPerm = 0, numOpSetOwner = 0, numOpSetGenStamp = 0, numOpTimes = 0, numOpOther = 0;
    long startTime = FSNamesystem.now();
    boolean error = false;
    boolean quitAfterScan = false;

    while (running && !quitAfterScan) {

        // if the application requested that we make a final pass over 
        // the transaction log, then we remember it here. We close and
        // reopen the file to ensure that we can see all the data in the
        // file, one reason being that NFS has open-to-close cache
        // coherancy and the edit log could be stored in NFS.
        //
        if (lastScan) {
            LOG.info("Ingest: Starting last scan of transaction log " + fname);
            quitAfterScan = true;
            fp.close();
            rp = new RandomAccessFile(fname, "r");
            fp = new FileInputStream(rp.getFD()); // open for reads
            fc = rp.getChannel();

            // discard older buffers and start a fresh one.
            fc.position(currentPosition);
            in = new DataInputStream(fp);
        }

        //
        // Verify that signature of file matches. This is imporatant in the
        // case when the Primary NN was configured to write transactions to 
        // to devices (local and NFS) and the Primary had encountered errors
        // to the NFS device and has continued writing transactions to its
        // device only. In this case, the rollEditLog() RPC would return the
        // modtime of the edits file of the Primary's local device and will
        // not match with the timestamp of our local log from where we are
        // ingesting.
        //
        CheckpointSignature signature = getLastCheckpointSignature();
        if (signature != null) {
            long localtime = fname.lastModified();

            LOG.info("editLog : " + fname.getPath());
            LOG.info("editLog.lastModifiedTime : " + localtime);

            if (localtime == signature.editsTime) {
                LOG.debug("Ingest: Matched modification time of edits log. ");
            } else if (localtime < signature.editsTime) {
                LOG.info("Ingest: Timestamp of transaction log on local machine is " + localtime
                        + " and on remote namenode is " + signature.editsTime);
                String msg = "Ingest: Timestamp of transaction log on local machine is "
                        + DATE_FORM.format(new Date(localtime)) + " and on remote namenode is "
                        + DATE_FORM.format(new Date(signature.editsTime));
                LOG.info(msg);
                throw new IOException(msg);
            } else {
                LOG.info("Ingest: Timestamp of transaction log on local machine is " + localtime
                        + " and on remote namenode is " + signature.editsTime);
                String msg = "Ingest: Timestamp of transaction log on localmachine is "
                        + DATE_FORM.format(new Date(localtime)) + " and on remote namenode is "
                        + DATE_FORM.format(new Date(signature.editsTime)) + ". But this can never happen.";
                LOG.info(msg);
                throw new IOException(msg);
            }
        }

        //
        // Process all existing transactions till end of file
        //
        while (running) {
            currentPosition = fc.position(); // record the current file offset.

            try {
                long timestamp = 0;
                long mtime = 0;
                long atime = 0;
                long blockSize = 0;
                byte opcode = -1;
                error = false;
                try {
                    opcode = in.readByte();
                    if (opcode == OP_INVALID) {
                        FSNamesystem.LOG.debug("Ingest: Invalid opcode, reached end of log "
                                + "Number of transactions found " + numEdits);
                        break; // No more transactions.
                    }
                } catch (EOFException e) {
                    break; // No more transactions.
                }
                switch (opcode) {
                case OP_ADD:
                case OP_CLOSE: {
                    // versions > 0 support per file replication
                    // get name and replication
                    int length = in.readInt();
                    if (-7 == logVersion && length != 3 || -17 < logVersion && logVersion < -7 && length != 4
                            || logVersion <= -17 && length != 5) {
                        throw new IOException("Ingest: Incorrect data format." + " logVersion is " + logVersion
                                + " but writables.length is " + length + ". ");
                    }
                    path = FSImage.readString(in);
                    short replication = readShort(in);
                    mtime = readLong(in);
                    if (logVersion <= -17) {
                        atime = readLong(in);
                    }
                    if (logVersion < -7) {
                        blockSize = readLong(in);
                    }
                    // get blocks
                    Block blocks[] = null;
                    if (logVersion <= -14) {
                        blocks = readBlocks(in);

                    } else {
                        BlockTwo oldblk = new BlockTwo();
                        int num = in.readInt();
                        blocks = new Block[num];
                        for (int i = 0; i < num; i++) {
                            oldblk.readFields(in);
                            blocks[i] = new Block(oldblk.blkid, oldblk.len, Block.GRANDFATHER_GENERATION_STAMP);
                        }
                    }

                    // Older versions of HDFS does not store the block size in inode.
                    // If the file has more than one block, use the size of the
                    // first block as the blocksize. Otherwise use the default
                    // block size.
                    if (-8 <= logVersion && blockSize == 0) {
                        if (blocks.length > 1) {
                            blockSize = blocks[0].getNumBytes();
                        } else {
                            long first = ((blocks.length == 1) ? blocks[0].getNumBytes() : 0);
                            blockSize = Math.max(fsNamesys.getDefaultBlockSize(), first);
                        }
                    }

                    PermissionStatus permissions = fsNamesys.getUpgradePermission();
                    if (logVersion <= -11) {
                        permissions = PermissionStatus.read(in);
                    }

                    // clientname, clientMachine and block locations of last block.
                    if (opcode == OP_ADD && logVersion <= -12) {
                        clientName = FSImage.readString(in);
                        clientMachine = FSImage.readString(in);
                        if (-13 <= logVersion) {
                            readDatanodeDescriptorArray(in);
                        }
                    } else {
                        clientName = "";
                        clientMachine = "";
                    }

                    // The open lease transaction re-creates a file if necessary.
                    // Delete the file if it already exists.
                    if (FSNamesystem.LOG.isDebugEnabled()) {
                        FSNamesystem.LOG.debug(opcode + ": " + path + " numblocks : " + blocks.length
                                + " clientHolder " + clientName + " clientMachine " + clientMachine);
                    }

                    fsDir.unprotectedDelete(path, mtime);

                    // add to the file tree
                    INodeFile node = (INodeFile) fsDir.unprotectedAddFile(path, permissions, blocks,
                            replication, mtime, atime, blockSize);
                    if (opcode == OP_ADD) {
                        numOpAdd++;
                        //
                        // Replace current node with a INodeUnderConstruction.
                        // Recreate in-memory lease record.
                        //
                        INodeFileUnderConstruction cons = new INodeFileUnderConstruction(
                                node.getLocalNameBytes(), node.getReplication(), node.getModificationTime(),
                                node.getPreferredBlockSize(), node.getBlocks(), node.getPermissionStatus(),
                                clientName, clientMachine, null);
                        fsDir.replaceNode(path, node, cons);
                        fsNamesys.leaseManager.addLease(cons.clientName, path);
                    }
                    break;
                }
                case OP_SET_REPLICATION: {
                    numOpSetRepl++;
                    path = FSImage.readString(in);
                    short replication = readShort(in);
                    fsDir.unprotectedSetReplication(path, replication, null);
                    break;
                }
                case OP_RENAME: {
                    numOpRename++;
                    int length = in.readInt();
                    if (length != 3) {
                        throw new IOException("Ingest: Incorrect data format. " + "Mkdir operation.");
                    }
                    String s = FSImage.readString(in);
                    String d = FSImage.readString(in);
                    timestamp = readLong(in);
                    FileStatus dinfo = fsDir.getFileInfo(d);
                    fsDir.unprotectedRenameTo(s, d, timestamp);
                    fsNamesys.changeLease(s, d, dinfo);
                    break;
                }
                case OP_DELETE: {
                    numOpDelete++;
                    int length = in.readInt();
                    if (length != 2) {
                        throw new IOException("Ingest: Incorrect data format. " + "delete operation.");
                    }
                    path = FSImage.readString(in);
                    timestamp = readLong(in);
                    fsDir.unprotectedDelete(path, timestamp);
                    break;
                }
                case OP_MKDIR: {
                    numOpMkDir++;
                    PermissionStatus permissions = fsNamesys.getUpgradePermission();
                    int length = in.readInt();
                    if (-17 < logVersion && length != 2 || logVersion <= -17 && length != 3) {
                        throw new IOException("Ingest: Incorrect data format. " + "Mkdir operation.");
                    }
                    path = FSImage.readString(in);
                    timestamp = readLong(in);

                    // The disk format stores atimes for directories as well.
                    // However, currently this is not being updated/used because of
                    // performance reasons.
                    if (logVersion <= -17) {
                        atime = readLong(in);
                    }

                    if (logVersion <= -11) {
                        permissions = PermissionStatus.read(in);
                    }
                    fsDir.unprotectedMkdir(path, permissions, timestamp);
                    break;
                }
                case OP_SET_GENSTAMP: {
                    numOpSetGenStamp++;
                    long lw = in.readLong();
                    fsDir.namesystem.setGenerationStamp(lw);
                    break;
                }
                case OP_DATANODE_ADD: {
                    numOpOther++;
                    FSImage.DatanodeImage nodeimage = new FSImage.DatanodeImage();
                    nodeimage.readFields(in);
                    //Datnodes are not persistent any more.
                    break;
                }
                case OP_DATANODE_REMOVE: {
                    numOpOther++;
                    DatanodeID nodeID = new DatanodeID();
                    nodeID.readFields(in);

                    //Datanodes are not persistent any more.
                    break;
                }
                case OP_SET_PERMISSIONS: {
                    numOpSetPerm++;
                    if (logVersion > -11)
                        throw new IOException(
                                "Ingest: Unexpected opcode " + opcode + " for version " + logVersion);
                    fsDir.unprotectedSetPermission(FSImage.readString(in), FsPermission.read(in));
                    break;
                }
                case OP_SET_OWNER: {
                    numOpSetOwner++;
                    if (logVersion > -11)
                        throw new IOException(
                                "Ingest: Unexpected opcode " + opcode + " for version " + logVersion);
                    fsDir.unprotectedSetOwner(FSImage.readString(in), FSImage.readString_EmptyAsNull(in),
                            FSImage.readString_EmptyAsNull(in));
                    break;
                }
                case OP_SET_NS_QUOTA: {
                    if (logVersion > -16) {
                        throw new IOException(
                                "Ingest: Unexpected opcode " + opcode + " for version " + logVersion);
                    }
                    fsDir.unprotectedSetQuota(FSImage.readString(in), readLongWritable(in),
                            FSConstants.QUOTA_DONT_SET);
                    break;
                }
                case OP_CLEAR_NS_QUOTA: {
                    if (logVersion > -16) {
                        throw new IOException(
                                "Ingest: Unexpected opcode " + opcode + " for version " + logVersion);
                    }
                    fsDir.unprotectedSetQuota(FSImage.readString(in), FSConstants.QUOTA_RESET,
                            FSConstants.QUOTA_DONT_SET);
                    break;
                }

                case OP_SET_QUOTA:
                    fsDir.unprotectedSetQuota(FSImage.readString(in), readLongWritable(in),
                            readLongWritable(in));

                    break;

                case OP_TIMES: {
                    numOpTimes++;
                    int length = in.readInt();
                    if (length != 3) {
                        throw new IOException("Ingest: Incorrect data format. " + "times operation.");
                    }
                    path = FSImage.readString(in);
                    mtime = readLong(in);
                    atime = readLong(in);
                    fsDir.unprotectedSetTimes(path, mtime, atime, true);
                    break;
                }
                default: {
                    throw new IOException("Ingest: Never seen opcode " + opcode);
                }
                }
                numEdits++;
                LOG.info("Ingest: Processed transaction from " + fname + " opcode " + opcode + " file offset "
                        + currentPosition);
            } //rty---
            catch (IOException e) {
                error = true; // if we haven't reached eof, then error.
                break;
            }
        } // while (running)-----

        // if we failed to read the entire transaction from disk, 
        // then roll back to the offset where there was a last good 
        // read, sleep for sometime for new transaction to
        // appear in the file and then continue;
        //
        if (error || running) {

            // discard older buffers and start a fresh one.
            fc.position(currentPosition);
            in = new DataInputStream(fp);

            if (error) {
                LOG.info("Ingest: Incomplete transaction record at offset " + fc.position()
                        + " but the file is of size " + fc.size() + ". Continuing....");
            }

            if (running && !lastScan) {
                try {
                    Thread.sleep(1000); // sleep for a second
                } catch (InterruptedException e) {
                    // break out of waiting if we receive an interrupt.
                }
            }
        }
    } //while (running && !quitAfterScan)-------------
    LOG.info("Ingest: Edits file " + fname.getName() + " numedits " + numEdits + " loaded in "
            + (FSNamesystem.now() - startTime) / 1000 + " seconds.");

    // If the last Scan was completed, then stop the Ingest thread.
    if (lastScan && quitAfterScan) {
        LOG.info("Ingest: lastScan completed.");
        running = false;
    }
    return numEdits; // total transactions consumed
}