Example usage for java.io DataInputStream readLong

List of usage examples for java.io DataInputStream readLong

Introduction

In this page you can find the example usage for java.io DataInputStream readLong.

Prototype

public final long readLong() throws IOException 

Source Link

Document

See the general contract of the readLong method of DataInput.

Usage

From source file:org.apache.hadoop.hdfs.AvatarClient.java

/**
 * Get the checksum of a file./*from  w w w  .  j a  va  2 s  .c  o  m*/
 * @param src The file path
 * @return The checksum 
 */
public static MD5MD5CRC32FileChecksum getFileChecksum(String src, AvatarProtocol namenode,
        SocketFactory socketFactory, int socketTimeout) throws IOException {
    //get all block locations
    final List<LocatedBlock> locatedblocks = callGetBlockLocations(namenode, src, 0, Long.MAX_VALUE)
            .getLocatedBlocks();
    final DataOutputBuffer md5out = new DataOutputBuffer();
    int bytesPerCRC = 0;
    long crcPerBlock = 0;

    //get block checksum for each block
    for (int i = 0; i < locatedblocks.size(); i++) {
        LocatedBlock lb = locatedblocks.get(i);
        final Block block = lb.getBlock();
        final DatanodeInfo[] datanodes = lb.getLocations();

        //try each datanode location of the block
        final int timeout = 3000 * datanodes.length + socketTimeout;
        boolean done = false;
        for (int j = 0; !done && j < datanodes.length; j++) {
            //connect to a datanode
            final Socket sock = socketFactory.createSocket();
            NetUtils.connect(sock, NetUtils.createSocketAddr(datanodes[j].getName()), timeout);
            sock.setSoTimeout(timeout);

            DataOutputStream out = new DataOutputStream(
                    new BufferedOutputStream(NetUtils.getOutputStream(sock), DataNode.SMALL_BUFFER_SIZE));
            DataInputStream in = new DataInputStream(NetUtils.getInputStream(sock));

            // get block MD5
            try {
                if (LOG.isDebugEnabled()) {
                    LOG.debug("write to " + datanodes[j].getName() + ": "
                            + DataTransferProtocol.OP_BLOCK_CHECKSUM + ", block=" + block);
                }
                out.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION);
                out.write(DataTransferProtocol.OP_BLOCK_CHECKSUM);
                out.writeLong(block.getBlockId());
                out.writeLong(block.getGenerationStamp());
                out.flush();

                final short reply = in.readShort();
                if (reply != DataTransferProtocol.OP_STATUS_SUCCESS) {
                    throw new IOException("Bad response " + reply + " for block " + block + " from datanode "
                            + datanodes[j].getName());
                }

                //read byte-per-checksum
                final int bpc = in.readInt();
                if (i == 0) { //first block
                    bytesPerCRC = bpc;
                } else if (bpc != bytesPerCRC) {
                    throw new IOException(
                            "Byte-per-checksum not matched: bpc=" + bpc + " but bytesPerCRC=" + bytesPerCRC);
                }

                //read crc-per-block
                final long cpb = in.readLong();
                if (locatedblocks.size() > 1 && i == 0) {
                    crcPerBlock = cpb;
                }

                //read md5
                final MD5Hash md5 = MD5Hash.read(in);
                md5.write(md5out);

                done = true;

                if (LOG.isDebugEnabled()) {
                    if (i == 0) {
                        LOG.debug("set bytesPerCRC=" + bytesPerCRC + ", crcPerBlock=" + crcPerBlock);
                    }
                    LOG.debug("got reply from " + datanodes[j].getName() + ": md5=" + md5);
                }
            } catch (IOException ie) {
                LOG.warn("src=" + src + ", datanodes[" + j + "].getName()=" + datanodes[j].getName(), ie);
            } finally {
                IOUtils.closeStream(in);
                IOUtils.closeStream(out);
                IOUtils.closeSocket(sock);
            }
        }

        if (!done) {
            throw new IOException("Fail to get block MD5 for " + block);
        }
    }

    //compute file MD5
    final MD5Hash fileMD5 = MD5Hash.digest(md5out.getData());
    return new MD5MD5CRC32FileChecksum(bytesPerCRC, crcPerBlock, fileMD5);
}

From source file:org.apache.jxtadoop.hdfs.DFSClient.java

/**
 * Get the checksum of a file./*from   ww  w  .ja  v a 2s . co m*/
 * @param src The file path
 * @return The checksum 
 */
public static MD5MD5CRC32FileChecksum getFileChecksum(String src, ClientProtocol namenode,
        SocketFactory socketFactory, int socketTimeout) throws IOException {
    //get all block locations
    final List<LocatedBlock> locatedblocks = callGetBlockLocations(namenode, src, 0, Long.MAX_VALUE)
            .getLocatedBlocks();
    final DataOutputBuffer md5out = new DataOutputBuffer();
    int bytesPerCRC = 0;
    long crcPerBlock = 0;

    //get block checksum for each block
    for (int i = 0; i < locatedblocks.size(); i++) {
        LocatedBlock lb = locatedblocks.get(i);
        final Block block = lb.getBlock();
        final DatanodeInfo[] datanodes = lb.getLocations();

        //try each datanode location of the block
        final int timeout = 3000 * datanodes.length + socketTimeout;
        boolean done = false;
        for (int j = 0; !done && j < datanodes.length; j++) {
            //connect to a datanode
            /*final Socket sock = socketFactory.createSocket();
            NetUtils.connect(sock, 
                 NetUtils.createSocketAddr(datanodes[j].getName()),
                 timeout);
            sock.setSoTimeout(timeout);*/
            JxtaSocket jsock = DFSClient.getDfsClient().getDfsClientPeer()
                    .getInfoSocket(datanodes[j].getName());
            // jsock.setSoTimeout(timeout);
            jsock.setSoTimeout(Integer.parseInt(conf.get("hadoop.p2p.info.timeout")));

            /*DataOutputStream out = new DataOutputStream(
                new BufferedOutputStream(NetUtils.getOutputStream(jsock), 
                             DataNode.SMALL_BUFFER_SIZE));
            DataInputStream in = new DataInputStream(NetUtils.getInputStream(jsock));*/
            DataOutputStream out = new DataOutputStream(new BufferedOutputStream(jsock.getOutputStream()));
            DataInputStream in = new DataInputStream(jsock.getInputStream());

            // get block MD5
            try {
                if (LOG.isDebugEnabled()) {
                    LOG.debug("write to " + datanodes[j].getName() + ": "
                            + DataTransferProtocol.OP_BLOCK_CHECKSUM + ", block=" + block);
                }
                out.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION);
                out.write(DataTransferProtocol.OP_BLOCK_CHECKSUM);
                out.writeLong(block.getBlockId());
                out.writeLong(block.getGenerationStamp());
                out.flush();

                final short reply = in.readShort();
                if (reply != DataTransferProtocol.OP_STATUS_SUCCESS) {
                    throw new IOException("Bad response " + reply + " for block " + block + " from datanode "
                            + datanodes[j].getName());
                }

                //read byte-per-checksum
                final int bpc = in.readInt();
                if (i == 0) { //first block
                    bytesPerCRC = bpc;
                } else if (bpc != bytesPerCRC) {
                    throw new IOException(
                            "Byte-per-checksum not matched: bpc=" + bpc + " but bytesPerCRC=" + bytesPerCRC);
                }

                //read crc-per-block
                final long cpb = in.readLong();
                if (locatedblocks.size() > 1 && i == 0) {
                    crcPerBlock = cpb;
                }

                //read md5
                final MD5Hash md5 = MD5Hash.read(in);
                md5.write(md5out);

                done = true;

                if (LOG.isDebugEnabled()) {
                    if (i == 0) {
                        LOG.debug("set bytesPerCRC=" + bytesPerCRC + ", crcPerBlock=" + crcPerBlock);
                    }
                    LOG.debug("got reply from " + datanodes[j].getName() + ": md5=" + md5);
                }
            } catch (IOException ie) {
                LOG.warn("src=" + src + ", datanodes[" + j + "].getName()=" + datanodes[j].getName(), ie);
            } finally {
                IOUtils.closeStream(in);
                IOUtils.closeStream(out);
                IOUtils.closeSocket(jsock);
            }
        }

        if (!done) {
            throw new IOException("Fail to get block MD5 for " + block);
        }
    }

    //compute file MD5
    final MD5Hash fileMD5 = MD5Hash.digest(md5out.getData());
    return new MD5MD5CRC32FileChecksum(bytesPerCRC, crcPerBlock, fileMD5);
}

From source file:org.apache.jackrabbit.core.persistence.bundle.util.BundleBinding.java

/**
 * Checks a <code>PropertyState</code> from the data input stream.
 *
 * @param in the input stream//  ww w  .  j a  va2s. com
 * @return <code>true</code> if the data is valid;
 *         <code>false</code> otherwise.
 */
public boolean checkPropertyState(DataInputStream in) {
    int type;
    try {
        type = in.readInt();
        short modCount = (short) ((type >> 16) | 0xffff);
        type &= 0xffff;
        log.debug("  PropertyType: " + PropertyType.nameFromValue(type));
        log.debug("  ModCount: " + modCount);
    } catch (IOException e) {
        log.error("Error while reading property type: " + e);
        return false;
    }
    try {
        boolean isMV = in.readBoolean();
        log.debug("  MultiValued: " + isMV);
    } catch (IOException e) {
        log.error("Error while reading multivalued: " + e);
        return false;
    }
    try {
        String defintionId = in.readUTF();
        log.debug("  DefinitionId: " + defintionId);
    } catch (IOException e) {
        log.error("Error while reading definition id: " + e);
        return false;
    }

    int count;
    try {
        count = in.readInt();
        log.debug("  num values: " + count);
    } catch (IOException e) {
        log.error("Error while reading number of values: " + e);
        return false;
    }
    for (int i = 0; i < count; i++) {
        switch (type) {
        case PropertyType.BINARY:
            int size;
            try {
                size = in.readInt();
                log.debug("  binary size: " + size);
            } catch (IOException e) {
                log.error("Error while reading size of binary: " + e);
                return false;
            }
            if (size == BINARY_IN_DATA_STORE) {
                try {
                    String s = in.readUTF();
                    // truncate log output
                    if (s.length() > 80) {
                        s = s.substring(80) + "...";
                    }
                    log.debug("  global data store id: " + s);
                } catch (IOException e) {
                    log.error("Error while reading blob id: " + e);
                    return false;
                }
            } else if (size == BINARY_IN_BLOB_STORE) {
                try {
                    String s = in.readUTF();
                    log.debug("  blobid: " + s);
                } catch (IOException e) {
                    log.error("Error while reading blob id: " + e);
                    return false;
                }
            } else {
                // short values into memory
                byte[] data = new byte[size];
                try {
                    in.readFully(data);
                    log.debug("  binary: " + data.length + " bytes");
                } catch (IOException e) {
                    log.error("Error while reading inlined binary: " + e);
                    return false;
                }
            }
            break;
        case PropertyType.DOUBLE:
            try {
                double d = in.readDouble();
                log.debug("  double: " + d);
            } catch (IOException e) {
                log.error("Error while reading double value: " + e);
                return false;
            }
            break;
        case PropertyType.LONG:
            try {
                double l = in.readLong();
                log.debug("  long: " + l);
            } catch (IOException e) {
                log.error("Error while reading long value: " + e);
                return false;
            }
            break;
        case PropertyType.BOOLEAN:
            try {
                boolean b = in.readBoolean();
                log.debug("  boolean: " + b);
            } catch (IOException e) {
                log.error("Error while reading boolean value: " + e);
                return false;
            }
            break;
        case PropertyType.NAME:
            try {
                Name name = readQName(in);
                log.debug("  name: " + name);
            } catch (IOException e) {
                log.error("Error while reading name value: " + e);
                return false;
            }
            break;
        case PropertyType.REFERENCE:
            try {
                UUID uuid = readUUID(in);
                log.debug("  reference: " + uuid);
            } catch (IOException e) {
                log.error("Error while reading reference value: " + e);
                return false;
            }
            break;
        default:
            // because writeUTF(String) has a size limit of 64k,
            // Strings are serialized as <length><byte[]>
            int len;
            try {
                len = in.readInt();
                log.debug("  size of string value: " + len);
            } catch (IOException e) {
                log.error("Error while reading size of string value: " + e);
                return false;
            }
            try {
                byte[] bytes = new byte[len];
                in.readFully(bytes);
                String s = new String(bytes, "UTF-8");
                // truncate log output
                if (s.length() > 80) {
                    s = s.substring(80) + "...";
                }
                log.debug("  string: " + s);
            } catch (IOException e) {
                log.error("Error while reading string value: " + e);
                return false;
            }
        }
    }
    return true;
}

From source file:org.apache.jackrabbit.core.persistence.bundle.util.BundleBinding.java

/**
 * Checks a <code>PropertyState</code> from the data input stream.
 *
 * @param in the input stream/*  w w  w .ja  v  a  2 s.co m*/
 * @return <code>true</code> if the data is valid;
 *         <code>false</code> otherwise.
 */
public boolean checkPropertyState(DataInputStream in) {
    int type;
    try {
        type = in.readInt();
        short modCount = (short) ((type >> 16) | 0xffff);
        type &= 0xffff;
        log.debug("  PropertyType: " + PropertyType.nameFromValue(type));
        log.debug("  ModCount: " + modCount);
    } catch (IOException e) {
        log.error("Error while reading property type: " + e);
        return false;
    }
    try {
        boolean isMV = in.readBoolean();
        log.debug("  MultiValued: " + isMV);
    } catch (IOException e) {
        log.error("Error while reading multivalued: " + e);
        return false;
    }
    try {
        String defintionId = in.readUTF();
        log.debug("  DefinitionId: " + defintionId);
    } catch (IOException e) {
        log.error("Error while reading definition id: " + e);
        return false;
    }

    int count;
    try {
        count = in.readInt();
        log.debug("  num values: " + count);
    } catch (IOException e) {
        log.error("Error while reading number of values: " + e);
        return false;
    }
    for (int i = 0; i < count; i++) {
        switch (type) {
        case PropertyType.BINARY:
            int size;
            try {
                size = in.readInt();
                log.debug("  binary size: " + size);
            } catch (IOException e) {
                log.error("Error while reading size of binary: " + e);
                return false;
            }
            if (size == BINARY_IN_DATA_STORE) {
                try {
                    String s = in.readUTF();
                    // truncate log output
                    if (s.length() > 80) {
                        s = s.substring(80) + "...";
                    }
                    log.debug("  global data store id: " + s);
                } catch (IOException e) {
                    log.error("Error while reading blob id: " + e);
                    return false;
                }
            } else if (size == BINARY_IN_BLOB_STORE) {
                try {
                    String s = in.readUTF();
                    log.debug("  blobid: " + s);
                } catch (IOException e) {
                    log.error("Error while reading blob id: " + e);
                    return false;
                }
            } else {
                // short values into memory
                byte[] data = new byte[size];
                try {
                    in.readFully(data);
                    log.debug("  binary: " + data.length + " bytes");
                } catch (IOException e) {
                    log.error("Error while reading inlined binary: " + e);
                    return false;
                }
            }
            break;
        case PropertyType.DOUBLE:
            try {
                double d = in.readDouble();
                log.debug("  double: " + d);
            } catch (IOException e) {
                log.error("Error while reading double value: " + e);
                return false;
            }
            break;
        case PropertyType.DECIMAL:
            try {
                BigDecimal d = readDecimal(in);
                log.debug("  decimal: " + d);
            } catch (IOException e) {
                log.error("Error while reading decimal value: " + e);
                return false;
            }
            break;
        case PropertyType.LONG:
            try {
                double l = in.readLong();
                log.debug("  long: " + l);
            } catch (IOException e) {
                log.error("Error while reading long value: " + e);
                return false;
            }
            break;
        case PropertyType.BOOLEAN:
            try {
                boolean b = in.readBoolean();
                log.debug("  boolean: " + b);
            } catch (IOException e) {
                log.error("Error while reading boolean value: " + e);
                return false;
            }
            break;
        case PropertyType.NAME:
            try {
                Name name = readQName(in);
                log.debug("  name: " + name);
            } catch (IOException e) {
                log.error("Error while reading name value: " + e);
                return false;
            }
            break;
        case PropertyType.WEAKREFERENCE:
        case PropertyType.REFERENCE:
            try {
                NodeId id = readID(in);
                log.debug("  reference: " + id);
            } catch (IOException e) {
                log.error("Error while reading reference value: " + e);
                return false;
            }
            break;
        default:
            // because writeUTF(String) has a size limit of 64k,
            // Strings are serialized as <length><byte[]>
            int len;
            try {
                len = in.readInt();
                log.debug("  size of string value: " + len);
            } catch (IOException e) {
                log.error("Error while reading size of string value: " + e);
                return false;
            }
            try {
                byte[] bytes = new byte[len];
                in.readFully(bytes);
                String s = new String(bytes, "UTF-8");
                // truncate log output
                if (s.length() > 80) {
                    s = s.substring(80) + "...";
                }
                log.debug("  string: " + s);
            } catch (IOException e) {
                log.error("Error while reading string value: " + e);
                return false;
            }
        }
    }
    return true;
}

From source file:org.apache.jackrabbit.core.persistence.bundle.util.BundleBinding.java

/**
 * Checks a <code>PropertyState</code> from the data input stream.
 *
 * @param in the input stream//from  w w w.  j  a v a 2s. c  o  m
 * @return <code>true</code> if the data is valid;
 *         <code>false</code> otherwise.
 */
public boolean checkPropertyState(DataInputStream in) {
    int type;
    try {
        type = in.readInt();
        short modCount = (short) ((type >> 16) | 0xffff);
        type &= 0xffff;
        log.debug("  PropertyType: " + PropertyType.nameFromValue(type));
        log.debug("  ModCount: " + modCount);
    } catch (IOException e) {
        log.error("Error while reading property type: " + e);
        return false;
    }
    try {
        boolean isMV = in.readBoolean();
        log.debug("  MultiValued: " + isMV);
    } catch (IOException e) {
        log.error("Error while reading multivalued: " + e);
        return false;
    }
    try {
        String defintionId = in.readUTF();
        log.debug("  DefinitionId: " + defintionId);
    } catch (IOException e) {
        log.error("Error while reading definition id: " + e);
        return false;
    }

    int count;
    try {
        count = in.readInt();
        log.debug("  num values: " + count);
    } catch (IOException e) {
        log.error("Error while reading number of values: " + e);
        return false;
    }
    for (int i = 0; i < count; i++) {
        switch (type) {
        case PropertyType.BINARY:
            int size;
            try {
                size = in.readInt();
                log.debug("  binary size: " + size);
            } catch (IOException e) {
                log.error("Error while reading size of binary: " + e);
                return false;
            }
            if (size == BINARY_IN_DATA_STORE) {
                try {
                    String s = in.readUTF();
                    // truncate log output
                    if (s.length() > 80) {
                        s = s.substring(80) + "...";
                    }
                    log.debug("  global data store id: " + s);
                } catch (IOException e) {
                    log.error("Error while reading blob id: " + e);
                    return false;
                }
            } else if (size == BINARY_IN_BLOB_STORE) {
                try {
                    String s = in.readUTF();
                    log.debug("  blobid: " + s);
                } catch (IOException e) {
                    log.error("Error while reading blob id: " + e);
                    return false;
                }
            } else {
                // short values into memory
                byte[] data = new byte[size];
                try {
                    in.readFully(data);
                    log.debug("  binary: " + data.length + " bytes");
                } catch (IOException e) {
                    log.error("Error while reading inlined binary: " + e);
                    return false;
                }
            }
            break;
        case PropertyType.DOUBLE:
            try {
                double d = in.readDouble();
                log.debug("  double: " + d);
            } catch (IOException e) {
                log.error("Error while reading double value: " + e);
                return false;
            }
            break;
        case PropertyType.DECIMAL:
            try {
                BigDecimal d = readDecimal(in);
                log.debug("  decimal: " + d);
            } catch (IOException e) {
                log.error("Error while reading decimal value: " + e);
                return false;
            }
            break;
        case PropertyType.LONG:
            try {
                double l = in.readLong();
                log.debug("  long: " + l);
            } catch (IOException e) {
                log.error("Error while reading long value: " + e);
                return false;
            }
            break;
        case PropertyType.BOOLEAN:
            try {
                boolean b = in.readBoolean();
                log.debug("  boolean: " + b);
            } catch (IOException e) {
                log.error("Error while reading boolean value: " + e);
                return false;
            }
            break;
        case PropertyType.NAME:
            try {
                Name name = readQName(in);
                log.debug("  name: " + name);
            } catch (IOException e) {
                log.error("Error while reading name value: " + e);
                return false;
            }
            break;
        case PropertyType.WEAKREFERENCE:
        case PropertyType.REFERENCE:
            try {
                UUID uuid = readUUID(in);
                log.debug("  reference: " + uuid);
            } catch (IOException e) {
                log.error("Error while reading reference value: " + e);
                return false;
            }
            break;
        default:
            // because writeUTF(String) has a size limit of 64k,
            // Strings are serialized as <length><byte[]>
            int len;
            try {
                len = in.readInt();
                log.debug("  size of string value: " + len);
            } catch (IOException e) {
                log.error("Error while reading size of string value: " + e);
                return false;
            }
            try {
                byte[] bytes = new byte[len];
                in.readFully(bytes);
                String s = new String(bytes, "UTF-8");
                // truncate log output
                if (s.length() > 80) {
                    s = s.substring(80) + "...";
                }
                log.debug("  string: " + s);
            } catch (IOException e) {
                log.error("Error while reading string value: " + e);
                return false;
            }
        }
    }
    return true;
}

From source file:org.apache.hadoop.hdfs.DFSClient.java

/**
 * Get the checksum of a file./*from  ww  w. ja v  a 2s.  c om*/
 * @param src The file path
 * @return The checksum 
 */
public static MD5MD5CRC32FileChecksum getFileChecksum(String src, ClientProtocol namenode,
        SocketFactory socketFactory, int socketTimeout) throws IOException {
    //get all block locations
    LocatedBlocks blockLocations = callGetBlockLocations(namenode, src, 0, Long.MAX_VALUE);
    if (null == blockLocations) {
        throw new FileNotFoundException("File does not exist: " + src);
    }
    List<LocatedBlock> locatedblocks = blockLocations.getLocatedBlocks();
    final DataOutputBuffer md5out = new DataOutputBuffer();
    int bytesPerCRC = 0;
    long crcPerBlock = 0;
    boolean refetchBlocks = false;
    int lastRetriedIndex = -1;

    //get block checksum for each block
    for (int i = 0; i < locatedblocks.size(); i++) {
        if (refetchBlocks) { // refetch to get fresh tokens
            blockLocations = callGetBlockLocations(namenode, src, 0, Long.MAX_VALUE);
            if (null == blockLocations) {
                throw new FileNotFoundException("File does not exist: " + src);
            }
            locatedblocks = blockLocations.getLocatedBlocks();
            refetchBlocks = false;
        }
        LocatedBlock lb = locatedblocks.get(i);
        final Block block = lb.getBlock();
        final DatanodeInfo[] datanodes = lb.getLocations();

        //try each datanode location of the block
        final int timeout = (socketTimeout > 0)
                ? (socketTimeout + HdfsConstants.READ_TIMEOUT_EXTENSION * datanodes.length)
                : 0;

        boolean done = false;
        for (int j = 0; !done && j < datanodes.length; j++) {
            Socket sock = null;
            DataOutputStream out = null;
            DataInputStream in = null;

            try {
                //connect to a datanode
                sock = socketFactory.createSocket();
                NetUtils.connect(sock, NetUtils.createSocketAddr(datanodes[j].getName()), timeout);
                sock.setSoTimeout(timeout);

                out = new DataOutputStream(
                        new BufferedOutputStream(NetUtils.getOutputStream(sock), DataNode.SMALL_BUFFER_SIZE));
                in = new DataInputStream(NetUtils.getInputStream(sock));

                if (LOG.isDebugEnabled()) {
                    LOG.debug("write to " + datanodes[j].getName() + ": "
                            + DataTransferProtocol.OP_BLOCK_CHECKSUM + ", block=" + block);
                }

                // get block MD5
                out.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION);
                out.write(DataTransferProtocol.OP_BLOCK_CHECKSUM);
                out.writeLong(block.getBlockId());
                out.writeLong(block.getGenerationStamp());
                lb.getBlockToken().write(out);
                out.flush();

                final short reply = in.readShort();
                if (reply != DataTransferProtocol.OP_STATUS_SUCCESS) {
                    if (reply == DataTransferProtocol.OP_STATUS_ERROR_ACCESS_TOKEN && i > lastRetriedIndex) {
                        if (LOG.isDebugEnabled()) {
                            LOG.debug("Got access token error in response to OP_BLOCK_CHECKSUM " + "for file "
                                    + src + " for block " + block + " from datanode " + datanodes[j].getName()
                                    + ". Will retry the block once.");
                        }
                        lastRetriedIndex = i;
                        done = true; // actually it's not done; but we'll retry
                        i--; // repeat at i-th block
                        refetchBlocks = true;
                        break;
                    } else {
                        throw new IOException("Bad response " + reply + " for block " + block
                                + " from datanode " + datanodes[j].getName());
                    }
                }

                //read byte-per-checksum
                final int bpc = in.readInt();
                if (i == 0) { //first block
                    bytesPerCRC = bpc;
                } else if (bpc != bytesPerCRC) {
                    throw new IOException(
                            "Byte-per-checksum not matched: bpc=" + bpc + " but bytesPerCRC=" + bytesPerCRC);
                }

                //read crc-per-block
                final long cpb = in.readLong();
                if (locatedblocks.size() > 1 && i == 0) {
                    crcPerBlock = cpb;
                }

                //read md5
                final MD5Hash md5 = MD5Hash.read(in);
                md5.write(md5out);

                done = true;

                if (LOG.isDebugEnabled()) {
                    if (i == 0) {
                        LOG.debug("set bytesPerCRC=" + bytesPerCRC + ", crcPerBlock=" + crcPerBlock);
                    }
                    LOG.debug("got reply from " + datanodes[j].getName() + ": md5=" + md5);
                }
            } catch (IOException ie) {
                LOG.warn("src=" + src + ", datanodes[" + j + "].getName()=" + datanodes[j].getName(), ie);
            } finally {
                IOUtils.closeStream(in);
                IOUtils.closeStream(out);
                IOUtils.closeSocket(sock);
            }
        }

        if (!done) {
            throw new IOException("Fail to get block MD5 for " + block);
        }
    }

    //compute file MD5
    final MD5Hash fileMD5 = MD5Hash.digest(md5out.getData());
    return new MD5MD5CRC32FileChecksum(bytesPerCRC, crcPerBlock, fileMD5);
}

From source file:org.ramadda.repository.database.DatabaseManager.java

/**
 * _more_/*from   ww w . ja  va 2  s  .c om*/
 *
 * @param file _more_
 * @param doDrop _more_
 *
 * @throws Exception _more_
 */
public void loadRdbFile(String file, boolean doDrop) throws Exception {

    DataInputStream dis = new DataInputStream(new FileInputStream(file));
    XmlEncoder encoder = new XmlEncoder();
    String tableXml = readString(dis);
    List<TableInfo> tableInfos = (List<TableInfo>) encoder.toObject(tableXml);
    System.err.println("# table infos:" + tableInfos.size());
    Hashtable<String, TableInfo> tables = new Hashtable<String, TableInfo>();
    StringBuffer sql = new StringBuffer();
    StringBuffer drop = new StringBuffer();
    for (TableInfo tableInfo : tableInfos) {
        tables.put(tableInfo.getName(), tableInfo);
        drop.append("drop table " + tableInfo.getName() + ";\n");
        sql.append("CREATE TABLE " + tableInfo.getName() + "  (\n");
        for (int i = 0; i < tableInfo.getColumns().size(); i++) {
            ColumnInfo column = tableInfo.getColumns().get(i);
            if (i > 0) {
                sql.append(",\n");
            }
            sql.append(column.getName());
            sql.append(" ");
            int type = column.getType();

            if (type == ColumnInfo.TYPE_TIMESTAMP) {
                sql.append("ramadda.datetime");
            } else if (type == ColumnInfo.TYPE_VARCHAR) {
                sql.append("varchar(" + column.getSize() + ")");
            } else if (type == ColumnInfo.TYPE_INTEGER) {
                sql.append("int");
            } else if (type == ColumnInfo.TYPE_DOUBLE) {
                sql.append("ramadda.double");
            } else if (type == ColumnInfo.TYPE_BIGINT) {
                sql.append("ramadda.bigint");
            } else if (type == ColumnInfo.TYPE_SMALLINT) {
                sql.append("int");
            } else if (type == ColumnInfo.TYPE_CLOB) {
                sql.append(convertType("clob", column.getSize()));
            } else if (type == ColumnInfo.TYPE_BLOB) {
                sql.append(convertType("blob", column.getSize()));
            } else if (type == ColumnInfo.TYPE_UNKNOWN) {
                //                    sql.append(convertType("blob", column.getSize()));
            } else {
                throw new IllegalStateException("Unknown column type:" + type);
            }
        }
        sql.append(");\n");
        for (IndexInfo indexInfo : tableInfo.getIndices()) {
            sql.append("CREATE INDEX " + indexInfo.getName() + " ON " + tableInfo.getName() + " ("
                    + indexInfo.getColumnName() + ");\n");
        }
    }

    //        System.err.println(drop);
    //        System.err.println(sql);

    //TODO: 
    if (doDrop) {
        loadSql(drop.toString(), true, false);
    }
    loadSql(convertSql(sql.toString()), false, true);

    TableInfo tableInfo = null;
    int rows = 0;
    Connection connection = getConnection();
    try {
        while (true) {
            int what = dis.readInt();
            if (what == DUMPTAG_TABLE) {
                String tableName = readString(dis);
                tableInfo = tables.get(tableName);
                if (tableInfo == null) {
                    throw new IllegalArgumentException("No table:" + tableName);
                }
                if (tableInfo.statement == null) {
                    String insert = SqlUtil.makeInsert(tableInfo.getName(), tableInfo.getColumnNames());
                    tableInfo.statement = connection.prepareStatement(insert);
                }
                System.err.println("importing table:" + tableInfo.getName());

                continue;
            }
            if (what == DUMPTAG_END) {
                break;
            }
            if (what != DUMPTAG_ROW) {
                throw new IllegalArgumentException("Unkown tag:" + what);
            }

            rows++;
            if ((rows % 1000) == 0) {
                System.err.println("rows:" + rows);
            }

            Object[] values = new Object[tableInfo.getColumns().size()];
            int colCnt = 0;
            for (ColumnInfo columnInfo : tableInfo.getColumns()) {
                int type = columnInfo.getType();
                if (type == ColumnInfo.TYPE_TIMESTAMP) {
                    long dttm = dis.readLong();
                    values[colCnt++] = new Date(dttm);
                } else if (type == ColumnInfo.TYPE_VARCHAR) {
                    String s = readString(dis);
                    if ((s != null) && (s.length() > 5000)) {
                        //A hack for old dbs
                        if (tableInfo.getName().equals("metadata")) {
                            s = s.substring(0, 4999);
                            System.err.println("clipping: " + tableInfo.getName() + "." + columnInfo.getName());
                        }

                    }
                    values[colCnt++] = s;
                } else if (type == ColumnInfo.TYPE_INTEGER) {
                    values[colCnt++] = new Integer(dis.readInt());
                } else if (type == ColumnInfo.TYPE_DOUBLE) {
                    values[colCnt++] = new Double(dis.readDouble());
                } else if (type == ColumnInfo.TYPE_CLOB) {
                    values[colCnt++] = readString(dis);
                } else if (type == ColumnInfo.TYPE_BLOB) {
                    values[colCnt++] = readString(dis);
                } else if (type == ColumnInfo.TYPE_BIGINT) {
                    long v = dis.readLong();
                    values[colCnt++] = new Long(v);
                } else if (type == ColumnInfo.TYPE_SMALLINT) {
                    short v = dis.readShort();
                    values[colCnt++] = new Short(v);
                } else if (type == ColumnInfo.TYPE_UNKNOWN) {
                } else {
                    throw new IllegalArgumentException(
                            "Unknown type for table" + tableInfo.getName() + " " + type);
                }
            }
            setValues(tableInfo.statement, values);
            tableInfo.statement.addBatch();
            tableInfo.batchCnt++;
            if (tableInfo.batchCnt > 1000) {
                tableInfo.batchCnt = 0;
                tableInfo.statement.executeBatch();

            }
        }

        //Now finish up the batch
        for (TableInfo ti : tableInfos) {
            if (ti.batchCnt > 0) {
                ti.batchCnt = 0;
                ti.statement.executeBatch();
            }
        }
    } finally {
        IOUtil.close(dis);
        closeConnection(connection);
    }

    System.err.println("imported " + rows + " rows");

}

From source file:com.codename1.impl.android.AndroidImplementation.java

public static String[] getPendingPush(String type, Context a) {
    InputStream i = null;//www .ja va2  s .  c  om
    try {
        i = a.openFileInput("CN1$AndroidPendingNotifications");
        if (i == null) {
            return null;
        }
        DataInputStream is = new DataInputStream(i);
        int count = is.readByte();
        Vector v = new Vector<String>();
        for (int iter = 0; iter < count; iter++) {
            boolean hasType = is.readBoolean();
            String actualType = null;
            if (hasType) {
                actualType = is.readUTF();
            }

            final String t;
            final String b;
            if ("99".equals(actualType)) {
                // This was a rich push
                Map<String, String> vals = splitQuery(is.readUTF());
                t = vals.get("type");
                b = vals.get("body");
                //category = vals.get("category");
                //image = vals.get("image");
            } else {
                t = actualType;
                b = is.readUTF();
                //category = null;
                //image = null;
            }
            long s = is.readLong();
            if (t != null && ("3".equals(t) || "6".equals(t))) {
                String[] m = b.split(";");
                v.add(m[0]);
            } else if (t != null && "4".equals(t)) {
                String[] m = b.split(";");
                v.add(m[1]);
            } else if (t != null && "2".equals(t)) {
                continue;
            } else if (t != null && "101".equals(t)) {
                v.add(b.substring(b.indexOf(" ") + 1));
            } else {
                v.add(b);
            }
        }
        String[] retVal = new String[v.size()];
        for (int j = 0; j < retVal.length; j++) {
            retVal[j] = (String) v.get(j);
        }
        return retVal;

    } catch (Exception ex) {
        ex.printStackTrace();
    } finally {
        try {
            if (i != null) {
                i.close();
            }
        } catch (IOException ex) {
        }
    }
    return null;
}

From source file:com.codename1.impl.android.AndroidImplementation.java

public static void firePendingPushes(final PushCallback c, final Context a) {
    try {// ww w  .  j a  va  2 s. c o  m
        if (c != null) {
            InputStream i = a.openFileInput("CN1$AndroidPendingNotifications");
            if (i == null) {
                return;
            }
            DataInputStream is = new DataInputStream(i);
            int count = is.readByte();
            for (int iter = 0; iter < count; iter++) {
                boolean hasType = is.readBoolean();
                String actualType = null;
                if (hasType) {
                    actualType = is.readUTF();
                }
                final String t;
                final String b;
                final String category;
                final String image;
                if ("99".equals(actualType)) {
                    // This was a rich push
                    Map<String, String> vals = splitQuery(is.readUTF());
                    t = vals.get("type");
                    b = vals.get("body");
                    category = vals.get("category");
                    image = vals.get("image");
                } else {
                    t = actualType;
                    b = is.readUTF();
                    category = null;
                    image = null;
                }
                long s = is.readLong();
                Display.getInstance().callSerially(new Runnable() {
                    @Override
                    public void run() {
                        Display.getInstance().setProperty("pendingPush", "true");
                        Display.getInstance().setProperty("pushType", t);
                        initPushContent(b, image, t, category, a);
                        if (t != null && ("3".equals(t) || "6".equals(t))) {
                            String[] a = b.split(";");
                            c.push(a[0]);
                            c.push(a[1]);
                        } else if (t != null && ("101".equals(t))) {
                            c.push(b.substring(b.indexOf(" ") + 1));
                        } else {
                            c.push(b);
                        }
                        Display.getInstance().setProperty("pendingPush", null);
                    }
                });
            }
            a.deleteFile("CN1$AndroidPendingNotifications");
        }
    } catch (IOException err) {
    }
}

From source file:org.apache.hadoop.hdfs.server.namenode.IngestLocal.java

/**
 * Continue to ingest transaction logs until the currentState is 
 * no longer INGEST. If lastScan is set to true, then we process 
 * till the end of the file and return./*w  w w  . ja v  a  2  s  .c o  m*/
 */
int ingestFSEdits(File fname, DataInputStream in, int logVersion) throws IOException {
    FSNamesystem fsNamesys = FSNamesystem.getFSNamesystem();
    FSDirectory fsDir = fsNamesys.dir;
    int numEdits = 0;
    String clientName = null;
    String clientMachine = null;
    String path = null;
    int numOpAdd = 0, numOpClose = 0, numOpDelete = 0, numOpRename = 0, numOpSetRepl = 0, numOpMkDir = 0,
            numOpSetPerm = 0, numOpSetOwner = 0, numOpSetGenStamp = 0, numOpTimes = 0, numOpOther = 0;
    long startTime = FSNamesystem.now();
    boolean error = false;
    boolean quitAfterScan = false;

    while (running && !quitAfterScan) {

        // if the application requested that we make a final pass over 
        // the transaction log, then we remember it here. We close and
        // reopen the file to ensure that we can see all the data in the
        // file, one reason being that NFS has open-to-close cache
        // coherancy and the edit log could be stored in NFS.
        //
        if (lastScan) {
            LOG.info("Ingest: Starting last scan of transaction log " + fname);
            quitAfterScan = true;
            fp.close();
            rp = new RandomAccessFile(fname, "r");
            fp = new FileInputStream(rp.getFD()); // open for reads
            fc = rp.getChannel();

            // discard older buffers and start a fresh one.
            fc.position(currentPosition);
            in = new DataInputStream(fp);
        }

        //
        // Verify that signature of file matches. This is imporatant in the
        // case when the Primary NN was configured to write transactions to 
        // to devices (local and NFS) and the Primary had encountered errors
        // to the NFS device and has continued writing transactions to its
        // device only. In this case, the rollEditLog() RPC would return the
        // modtime of the edits file of the Primary's local device and will
        // not match with the timestamp of our local log from where we are
        // ingesting.
        //
        CheckpointSignature signature = getLastCheckpointSignature();
        if (signature != null) {
            long localtime = fname.lastModified();

            LOG.info("editLog : " + fname.getPath());
            LOG.info("editLog.lastModifiedTime : " + localtime);

            if (localtime == signature.editsTime) {
                LOG.debug("Ingest: Matched modification time of edits log. ");
            } else if (localtime < signature.editsTime) {
                LOG.info("Ingest: Timestamp of transaction log on local machine is " + localtime
                        + " and on remote namenode is " + signature.editsTime);
                String msg = "Ingest: Timestamp of transaction log on local machine is "
                        + DATE_FORM.format(new Date(localtime)) + " and on remote namenode is "
                        + DATE_FORM.format(new Date(signature.editsTime));
                LOG.info(msg);
                throw new IOException(msg);
            } else {
                LOG.info("Ingest: Timestamp of transaction log on local machine is " + localtime
                        + " and on remote namenode is " + signature.editsTime);
                String msg = "Ingest: Timestamp of transaction log on localmachine is "
                        + DATE_FORM.format(new Date(localtime)) + " and on remote namenode is "
                        + DATE_FORM.format(new Date(signature.editsTime)) + ". But this can never happen.";
                LOG.info(msg);
                throw new IOException(msg);
            }
        }

        //
        // Process all existing transactions till end of file
        //
        while (running) {
            currentPosition = fc.position(); // record the current file offset.

            try {
                long timestamp = 0;
                long mtime = 0;
                long atime = 0;
                long blockSize = 0;
                byte opcode = -1;
                error = false;
                try {
                    opcode = in.readByte();
                    if (opcode == OP_INVALID) {
                        FSNamesystem.LOG.debug("Ingest: Invalid opcode, reached end of log "
                                + "Number of transactions found " + numEdits);
                        break; // No more transactions.
                    }
                } catch (EOFException e) {
                    break; // No more transactions.
                }
                switch (opcode) {
                case OP_ADD:
                case OP_CLOSE: {
                    // versions > 0 support per file replication
                    // get name and replication
                    int length = in.readInt();
                    if (-7 == logVersion && length != 3 || -17 < logVersion && logVersion < -7 && length != 4
                            || logVersion <= -17 && length != 5) {
                        throw new IOException("Ingest: Incorrect data format." + " logVersion is " + logVersion
                                + " but writables.length is " + length + ". ");
                    }
                    path = FSImage.readString(in);
                    short replication = readShort(in);
                    mtime = readLong(in);
                    if (logVersion <= -17) {
                        atime = readLong(in);
                    }
                    if (logVersion < -7) {
                        blockSize = readLong(in);
                    }
                    // get blocks
                    Block blocks[] = null;
                    if (logVersion <= -14) {
                        blocks = readBlocks(in);

                    } else {
                        BlockTwo oldblk = new BlockTwo();
                        int num = in.readInt();
                        blocks = new Block[num];
                        for (int i = 0; i < num; i++) {
                            oldblk.readFields(in);
                            blocks[i] = new Block(oldblk.blkid, oldblk.len, Block.GRANDFATHER_GENERATION_STAMP);
                        }
                    }

                    // Older versions of HDFS does not store the block size in inode.
                    // If the file has more than one block, use the size of the
                    // first block as the blocksize. Otherwise use the default
                    // block size.
                    if (-8 <= logVersion && blockSize == 0) {
                        if (blocks.length > 1) {
                            blockSize = blocks[0].getNumBytes();
                        } else {
                            long first = ((blocks.length == 1) ? blocks[0].getNumBytes() : 0);
                            blockSize = Math.max(fsNamesys.getDefaultBlockSize(), first);
                        }
                    }

                    PermissionStatus permissions = fsNamesys.getUpgradePermission();
                    if (logVersion <= -11) {
                        permissions = PermissionStatus.read(in);
                    }

                    // clientname, clientMachine and block locations of last block.
                    if (opcode == OP_ADD && logVersion <= -12) {
                        clientName = FSImage.readString(in);
                        clientMachine = FSImage.readString(in);
                        if (-13 <= logVersion) {
                            readDatanodeDescriptorArray(in);
                        }
                    } else {
                        clientName = "";
                        clientMachine = "";
                    }

                    // The open lease transaction re-creates a file if necessary.
                    // Delete the file if it already exists.
                    if (FSNamesystem.LOG.isDebugEnabled()) {
                        FSNamesystem.LOG.debug(opcode + ": " + path + " numblocks : " + blocks.length
                                + " clientHolder " + clientName + " clientMachine " + clientMachine);
                    }

                    fsDir.unprotectedDelete(path, mtime);

                    // add to the file tree
                    INodeFile node = (INodeFile) fsDir.unprotectedAddFile(path, permissions, blocks,
                            replication, mtime, atime, blockSize);
                    if (opcode == OP_ADD) {
                        numOpAdd++;
                        //
                        // Replace current node with a INodeUnderConstruction.
                        // Recreate in-memory lease record.
                        //
                        INodeFileUnderConstruction cons = new INodeFileUnderConstruction(
                                node.getLocalNameBytes(), node.getReplication(), node.getModificationTime(),
                                node.getPreferredBlockSize(), node.getBlocks(), node.getPermissionStatus(),
                                clientName, clientMachine, null);
                        fsDir.replaceNode(path, node, cons);
                        fsNamesys.leaseManager.addLease(cons.clientName, path);
                    }
                    break;
                }
                case OP_SET_REPLICATION: {
                    numOpSetRepl++;
                    path = FSImage.readString(in);
                    short replication = readShort(in);
                    fsDir.unprotectedSetReplication(path, replication, null);
                    break;
                }
                case OP_RENAME: {
                    numOpRename++;
                    int length = in.readInt();
                    if (length != 3) {
                        throw new IOException("Ingest: Incorrect data format. " + "Mkdir operation.");
                    }
                    String s = FSImage.readString(in);
                    String d = FSImage.readString(in);
                    timestamp = readLong(in);
                    FileStatus dinfo = fsDir.getFileInfo(d);
                    fsDir.unprotectedRenameTo(s, d, timestamp);
                    fsNamesys.changeLease(s, d, dinfo);
                    break;
                }
                case OP_DELETE: {
                    numOpDelete++;
                    int length = in.readInt();
                    if (length != 2) {
                        throw new IOException("Ingest: Incorrect data format. " + "delete operation.");
                    }
                    path = FSImage.readString(in);
                    timestamp = readLong(in);
                    fsDir.unprotectedDelete(path, timestamp);
                    break;
                }
                case OP_MKDIR: {
                    numOpMkDir++;
                    PermissionStatus permissions = fsNamesys.getUpgradePermission();
                    int length = in.readInt();
                    if (-17 < logVersion && length != 2 || logVersion <= -17 && length != 3) {
                        throw new IOException("Ingest: Incorrect data format. " + "Mkdir operation.");
                    }
                    path = FSImage.readString(in);
                    timestamp = readLong(in);

                    // The disk format stores atimes for directories as well.
                    // However, currently this is not being updated/used because of
                    // performance reasons.
                    if (logVersion <= -17) {
                        atime = readLong(in);
                    }

                    if (logVersion <= -11) {
                        permissions = PermissionStatus.read(in);
                    }
                    fsDir.unprotectedMkdir(path, permissions, timestamp);
                    break;
                }
                case OP_SET_GENSTAMP: {
                    numOpSetGenStamp++;
                    long lw = in.readLong();
                    fsDir.namesystem.setGenerationStamp(lw);
                    break;
                }
                case OP_DATANODE_ADD: {
                    numOpOther++;
                    FSImage.DatanodeImage nodeimage = new FSImage.DatanodeImage();
                    nodeimage.readFields(in);
                    //Datnodes are not persistent any more.
                    break;
                }
                case OP_DATANODE_REMOVE: {
                    numOpOther++;
                    DatanodeID nodeID = new DatanodeID();
                    nodeID.readFields(in);

                    //Datanodes are not persistent any more.
                    break;
                }
                case OP_SET_PERMISSIONS: {
                    numOpSetPerm++;
                    if (logVersion > -11)
                        throw new IOException(
                                "Ingest: Unexpected opcode " + opcode + " for version " + logVersion);
                    fsDir.unprotectedSetPermission(FSImage.readString(in), FsPermission.read(in));
                    break;
                }
                case OP_SET_OWNER: {
                    numOpSetOwner++;
                    if (logVersion > -11)
                        throw new IOException(
                                "Ingest: Unexpected opcode " + opcode + " for version " + logVersion);
                    fsDir.unprotectedSetOwner(FSImage.readString(in), FSImage.readString_EmptyAsNull(in),
                            FSImage.readString_EmptyAsNull(in));
                    break;
                }
                case OP_SET_NS_QUOTA: {
                    if (logVersion > -16) {
                        throw new IOException(
                                "Ingest: Unexpected opcode " + opcode + " for version " + logVersion);
                    }
                    fsDir.unprotectedSetQuota(FSImage.readString(in), readLongWritable(in),
                            FSConstants.QUOTA_DONT_SET);
                    break;
                }
                case OP_CLEAR_NS_QUOTA: {
                    if (logVersion > -16) {
                        throw new IOException(
                                "Ingest: Unexpected opcode " + opcode + " for version " + logVersion);
                    }
                    fsDir.unprotectedSetQuota(FSImage.readString(in), FSConstants.QUOTA_RESET,
                            FSConstants.QUOTA_DONT_SET);
                    break;
                }

                case OP_SET_QUOTA:
                    fsDir.unprotectedSetQuota(FSImage.readString(in), readLongWritable(in),
                            readLongWritable(in));

                    break;

                case OP_TIMES: {
                    numOpTimes++;
                    int length = in.readInt();
                    if (length != 3) {
                        throw new IOException("Ingest: Incorrect data format. " + "times operation.");
                    }
                    path = FSImage.readString(in);
                    mtime = readLong(in);
                    atime = readLong(in);
                    fsDir.unprotectedSetTimes(path, mtime, atime, true);
                    break;
                }
                default: {
                    throw new IOException("Ingest: Never seen opcode " + opcode);
                }
                }
                numEdits++;
                LOG.info("Ingest: Processed transaction from " + fname + " opcode " + opcode + " file offset "
                        + currentPosition);
            } //rty---
            catch (IOException e) {
                error = true; // if we haven't reached eof, then error.
                break;
            }
        } // while (running)-----

        // if we failed to read the entire transaction from disk, 
        // then roll back to the offset where there was a last good 
        // read, sleep for sometime for new transaction to
        // appear in the file and then continue;
        //
        if (error || running) {

            // discard older buffers and start a fresh one.
            fc.position(currentPosition);
            in = new DataInputStream(fp);

            if (error) {
                LOG.info("Ingest: Incomplete transaction record at offset " + fc.position()
                        + " but the file is of size " + fc.size() + ". Continuing....");
            }

            if (running && !lastScan) {
                try {
                    Thread.sleep(1000); // sleep for a second
                } catch (InterruptedException e) {
                    // break out of waiting if we receive an interrupt.
                }
            }
        }
    } //while (running && !quitAfterScan)-------------
    LOG.info("Ingest: Edits file " + fname.getName() + " numedits " + numEdits + " loaded in "
            + (FSNamesystem.now() - startTime) / 1000 + " seconds.");

    // If the last Scan was completed, then stop the Ingest thread.
    if (lastScan && quitAfterScan) {
        LOG.info("Ingest: lastScan completed.");
        running = false;
    }
    return numEdits; // total transactions consumed
}