Example usage for java.io DataInputStream readInt

List of usage examples for java.io DataInputStream readInt

Introduction

In this page you can find the example usage for java.io DataInputStream readInt.

Prototype

public final int readInt() throws IOException 

Source Link

Document

See the general contract of the readInt method of DataInput.

Usage

From source file:net.timewalker.ffmq4.storage.data.impl.AbstractBlockBasedDataStore.java

/**
 * Run an integrity check on the store files and fix them as necessary
 * @throws DataStoreException if the files could not be fixed
 *//*  www  .  j  a  v  a2 s .co m*/
protected void integrityCheck() throws DataStoreException {
    try {
        //========================
        // 1 - Check files sizes
        //========================
        // -- Allocation table
        long atFileSize = allocationTableRandomAccessFile.length();
        if (atFileSize < AT_HEADER_SIZE + AT_BLOCK_SIZE) /* Should have at least one entry */
            throw new DataStoreException(
                    "Allocation table is truncated : " + allocationTableFile.getAbsolutePath());

        // Read some header fields
        FileInputStream inFile = new FileInputStream(allocationTableFile);
        DataInputStream in = new DataInputStream(new BufferedInputStream(inFile, 16384));
        int blockCount = in.readInt();
        int blockSize = in.readInt();
        int firstBlock = in.readInt();
        // Fix AT size
        long expectedATFileSize = AT_HEADER_SIZE + AT_BLOCK_SIZE * (long) blockCount;
        if (atFileSize != expectedATFileSize) {
            log.error("[" + descriptor.getName() + "] Allocation table has an invalid size (actual:"
                    + atFileSize + ",expected:" + expectedATFileSize + "), fixing.");
            allocationTableRandomAccessFile.setLength(expectedATFileSize);
        }
        // Fix data size
        long dataFileSize = dataRandomAccessFile.length();
        long expectedDataFileSize = (long) blockSize * blockCount;
        if (dataFileSize != expectedDataFileSize) {
            log.error("[" + descriptor.getName() + "] Data file has an invalid size (actual:" + dataFileSize
                    + ",expected:" + expectedDataFileSize + "), fixing.");
            dataRandomAccessFile.setLength(expectedDataFileSize);
        }

        //============================
        // 2 - Check allocation table
        //============================
        // Read the AT into memory
        byte[] flags = new byte[blockCount];
        int[] allocatedSize = new int[blockCount];
        int[] previousBlock = new int[blockCount];
        int[] nextBlock = new int[blockCount];
        int blocksInUse = 0;
        int msgCount = 0;
        for (int n = 0; n < blockCount; n++) {
            flags[n] = in.readByte();
            allocatedSize[n] = in.readInt();
            previousBlock[n] = in.readInt();
            nextBlock[n] = in.readInt();
            if (allocatedSize[n] != -1) {
                blocksInUse++;
                if ((flags[n] & FLAG_START_BLOCK) > 0)
                    msgCount++;
            }
        }
        in.close();
        log.debug("[" + descriptor.getName() + "] Blocks in use before fix : " + blocksInUse);
        log.debug("[" + descriptor.getName() + "] Messages count before fix : " + msgCount);

        // Fix first block index
        boolean changed = false;
        if (firstBlock < -1 || firstBlock >= blockCount) {
            log.error("[" + descriptor.getName() + "] Invalid allocation table first block index (" + firstBlock
                    + "), guessing new one ...");
            firstBlock = guessFirstBlockIndex(blockCount, allocatedSize, nextBlock);
            log.debug("[" + descriptor.getName() + "] Guessed first block index : " + firstBlock);
            changed = true;
        }

        // Recover table
        if (msgCount == 0) {
            if (firstBlock == -1) {
                // Table is empty, cleanup dirty entries
                changed = changed
                        || cleanupEmptyBlocks(blockCount, flags, allocatedSize, previousBlock, nextBlock);
            } else {
                log.error("[" + descriptor.getName() + "] First block index should be -1, clearing ...");
                firstBlock = -1;
                changed = true;
            }
        } else {
            if (firstBlock == -1) {
                log.error("[" + descriptor.getName() + "] Invalid first block index, guessing value ...");
                firstBlock = guessFirstBlockIndex(blockCount, allocatedSize, nextBlock);
                log.debug("[" + descriptor.getName() + "] Guessed first block index : " + firstBlock);
                changed = true;
            }

            changed = changed || fixBlocks(blockCount, blockSize, firstBlock, flags, allocatedSize,
                    previousBlock, nextBlock);
            changed = changed || cleanupEmptyBlocks(blockCount, flags, allocatedSize, previousBlock, nextBlock);
        }

        // Update the allocation file table
        if (changed) {
            // Re-compute size
            msgCount = 0;
            blocksInUse = 0;
            for (int n = 0; n < blockCount; n++) {
                if (allocatedSize[n] != -1) {
                    blocksInUse++;
                    if ((flags[n] & FLAG_START_BLOCK) > 0)
                        msgCount++;
                }
            }
            log.debug("[" + descriptor.getName() + "] Blocks in use after fix : " + blocksInUse);
            log.debug("[" + descriptor.getName() + "] Messages count after fix : " + msgCount);

            log.debug("[" + descriptor.getName() + "] Allocation table was altered, saving ...");
            allocationTableRandomAccessFile.seek(AT_HEADER_FIRSTBLOCK_OFFSET);
            allocationTableRandomAccessFile.writeInt(firstBlock);
            for (int n = 0; n < blockCount; n++) {
                byte[] allocationBlock = new byte[AT_BLOCK_SIZE];

                // Regroup I/O to improve performance
                allocationBlock[AB_FLAGS_OFFSET] = flags[n];
                allocationBlock[AB_ALLOCSIZE_OFFSET] = (byte) ((allocatedSize[n] >>> 24) & 0xFF);
                allocationBlock[AB_ALLOCSIZE_OFFSET + 1] = (byte) ((allocatedSize[n] >>> 16) & 0xFF);
                allocationBlock[AB_ALLOCSIZE_OFFSET + 2] = (byte) ((allocatedSize[n] >>> 8) & 0xFF);
                allocationBlock[AB_ALLOCSIZE_OFFSET + 3] = (byte) ((allocatedSize[n] >>> 0) & 0xFF);
                allocationBlock[AB_PREVBLOCK_OFFSET] = (byte) ((previousBlock[n] >>> 24) & 0xFF);
                allocationBlock[AB_PREVBLOCK_OFFSET + 1] = (byte) ((previousBlock[n] >>> 16) & 0xFF);
                allocationBlock[AB_PREVBLOCK_OFFSET + 2] = (byte) ((previousBlock[n] >>> 8) & 0xFF);
                allocationBlock[AB_PREVBLOCK_OFFSET + 3] = (byte) ((previousBlock[n] >>> 0) & 0xFF);
                allocationBlock[AB_NEXTBLOCK_OFFSET] = (byte) ((nextBlock[n] >>> 24) & 0xFF);
                allocationBlock[AB_NEXTBLOCK_OFFSET + 1] = (byte) ((nextBlock[n] >>> 16) & 0xFF);
                allocationBlock[AB_NEXTBLOCK_OFFSET + 2] = (byte) ((nextBlock[n] >>> 8) & 0xFF);
                allocationBlock[AB_NEXTBLOCK_OFFSET + 3] = (byte) ((nextBlock[n] >>> 0) & 0xFF);

                allocationTableRandomAccessFile.seek(AT_HEADER_SIZE + n * AT_BLOCK_SIZE);
                allocationTableRandomAccessFile.write(allocationBlock);
            }
            allocationTableRandomAccessFile.getFD().sync();
        } else
            log.debug("[" + descriptor.getName() + "] Allocation table was not altered");
    } catch (IOException e) {
        throw new DataStoreException("Cannot check/fix store integrity : " + e);
    }
}

From source file:com.cohesionforce.dis.BinaryConverter.java

public void run() {
    int count = 0;

    System.out.println("Opening file to convert: " + inputFile);
    FileInputStream fis;//from   www .jav  a  2s  .co  m
    try {
        fis = new FileInputStream(inputFile);
    } catch (FileNotFoundException e1) {
        e1.printStackTrace();
        return;
    }

    DataInputStream dis = new DataInputStream(fis);
    startWriters();

    System.out.println("Starting to convert PDUs");

    while (done == false) {
        byte buffer[] = new byte[MAX_PDU_SIZE];

        byte pduType;
        try {
            pduType = dis.readByte();
            int pduSize = dis.readInt();
            int skip = dis.read(buffer, 0, 19);
            assert (skip == 19);
            int numberRead = dis.read(buffer, 0, pduSize);
            assert (numberRead == pduSize);
            ++count;

            // Convert the byte array to an object
            Object object;
            object = unmarshaller.getPdu(buffer);
            if (object != null) {
                logPdu(object, pduType);
            }
        } catch (EOFException e) {
            done = true;
        } catch (IOException e) {
            e.printStackTrace();
        } catch (Exception e) {
            done = true;
            e.printStackTrace();
        }

        if (count % 100000 == 0) {
            System.out.println("Converted " + count + " PDUs");
        }
    } // end loop
    try {
        dis.close();
    } catch (IOException e1) {
        e1.printStackTrace();
    }
    System.out.print("Waiting on writers to clear their queues");

    boolean emptyQueue = false;
    while (!emptyQueue) {
        emptyQueue = true;
        for (LogWriter<?> writer : writers) {
            // If any queue is not empty, sleep and check again
            if (!writer.getQueue().isEmpty()) {
                try {
                    emptyQueue = false;
                    System.out.print(".");
                    Thread.sleep(1000);
                    break;
                } catch (InterruptedException e) {
                    e.printStackTrace();
                }
            }
        }
    }

    System.out.println("");
    System.out.println("PDUs converted: " + count);
    System.out.println("Shutting down logging threads");
    threadGroup.interrupt();
    int tries = 0;
    while (threadGroup.activeCount() > 0 && tries < 10) {
        try {
            Thread.sleep(2000);
        } catch (InterruptedException e) {
        }
        ++tries;
    }
    System.out.println("Completed logging threads shutdown");

}

From source file:org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore.java

private void loadRMSequentialNumberState(RMState rmState) throws Exception {
    byte[] seqData = getDataWithRetries(dtSequenceNumberPath, false);
    if (seqData != null) {
        ByteArrayInputStream seqIs = new ByteArrayInputStream(seqData);
        DataInputStream seqIn = new DataInputStream(seqIs);

        try {//www  . ja v  a  2s  . c o  m
            rmState.rmSecretManagerState.dtSequenceNumber = seqIn.readInt();
        } finally {
            seqIn.close();
        }
    }
}

From source file:org.carbondata.processing.util.LevelSortIndexWriterThread.java

private MemberSortModel[] getLevelData() throws IOException {
    DataInputStream fileChannel = null;
    long currPositionIndex = 0;
    long size = 0;
    ByteBuffer buffer = null;/*from  w  w w  .j  av a2  s  .co  m*/

    // CHECKSTYLE:OFF
    boolean enableEncoding = Boolean
            .valueOf(CarbonProperties.getInstance().getProperty(CarbonCommonConstants.ENABLE_BASE64_ENCODING,
                    CarbonCommonConstants.ENABLE_BASE64_ENCODING_DEFAULT));
    // CHECKSTYLE:ON
    try {
        fileChannel = FileFactory.getDataInputStream(levelFilePath, FileFactory.getFileType(levelFilePath));
        CarbonFile memberFile = FileFactory.getCarbonFile(levelFilePath,
                FileFactory.getFileType(levelFilePath));
        size = memberFile.getSize() - 4;
        long skipSize = size;
        long actualSkipSize = 0;
        while (actualSkipSize != size) {
            actualSkipSize += fileChannel.skip(skipSize);
            skipSize = skipSize - actualSkipSize;
        }
        maxSurrogate = fileChannel.readInt();
    } catch (IOException e) {
        LOGGER.error(e, "problem while reading the level file");
        throw e;
    } finally {
        CarbonUtil.closeStreams(fileChannel);
    }

    try {
        fileChannel = FileFactory.getDataInputStream(levelFilePath, FileFactory.getFileType(levelFilePath));
        // CHECKSTYLE:OFF
        buffer = ByteBuffer.allocate((int) size);
        // CHECKSTYLE:ON
        fileChannel.readFully(buffer.array());
        buffer.rewind();
    } catch (IOException e) {
        LOGGER.error(e, "problem while reading the level file");
        throw e;
    } finally {
        CarbonUtil.closeStreams(fileChannel);
    }
    minSurrogate = buffer.getInt();
    MemberSortModel[] surogateKeyArrays = new MemberSortModel[maxSurrogate - minSurrogate + 1];
    int surrogateKeyIndex = minSurrogate;
    currPositionIndex += 4;
    int current = 0;

    while (currPositionIndex < size) {
        int len = buffer.getInt();
        // CHECKSTYLE:OFF
        // CHECKSTYLE:ON
        currPositionIndex += 4;
        byte[] rowBytes = new byte[len];
        buffer.get(rowBytes);
        currPositionIndex += len;
        String memberName = null;// CHECKSTYLE:OFF
        if (!memberDataType.equals(DataType.STRING)) {
            if (enableEncoding) {
                memberName = new String(Base64.decodeBase64(rowBytes), Charset.defaultCharset());
            } else {
                memberName = new String(rowBytes, Charset.defaultCharset());
            }
            surogateKeyArrays[current] = new MemberSortModel(surrogateKeyIndex, memberName, null,
                    memberDataType);
        } else {
            if (enableEncoding) {
                rowBytes = Base64.decodeBase64(rowBytes);
            }
            surogateKeyArrays[current] = new MemberSortModel(surrogateKeyIndex, null, rowBytes, memberDataType);
        }
        surrogateKeyIndex++;
        current++;
    }
    return surogateKeyArrays;
}

From source file:org.apache.hadoop.hdfs.server.datanode.DataXceiver.java

/**
 * Write a block to disk.//from   w w w.  j  av a2s.c o  m
 * 
 * @param in The stream to read from
 * @throws IOException
 */
private void writeBlock(DataInputStream in) throws IOException {
    DatanodeInfo srcDataNode = null;
    LOG.debug("writeBlock receive buf size " + s.getReceiveBufferSize() + " tcp no delay " + s.getTcpNoDelay());
    //
    // Read in the header
    //
    Block block = new Block(in.readLong(), dataXceiverServer.estimateBlockSize, in.readLong());
    LOG.info("Receiving block " + block + " src: " + remoteAddress + " dest: " + localAddress);
    int pipelineSize = in.readInt(); // num of datanodes in entire pipeline
    boolean isRecovery = in.readBoolean(); // is this part of recovery?
    String client = Text.readString(in); // working on behalf of this client
    boolean hasSrcDataNode = in.readBoolean(); // is src node info present
    if (hasSrcDataNode) {
        srcDataNode = new DatanodeInfo();
        srcDataNode.readFields(in);
    }
    int numTargets = in.readInt();
    if (numTargets < 0) {
        throw new IOException("Mislabelled incoming datastream.");
    }
    DatanodeInfo targets[] = new DatanodeInfo[numTargets];
    for (int i = 0; i < targets.length; i++) {
        DatanodeInfo tmp = new DatanodeInfo();
        tmp.readFields(in);
        targets[i] = tmp;
    }
    Token<BlockTokenIdentifier> accessToken = new Token<BlockTokenIdentifier>();
    accessToken.readFields(in);
    DataOutputStream replyOut = null; // stream to prev target
    replyOut = new DataOutputStream(NetUtils.getOutputStream(s, datanode.socketWriteTimeout));
    if (datanode.isBlockTokenEnabled) {
        try {
            datanode.blockTokenSecretManager.checkAccess(accessToken, null, block,
                    BlockTokenSecretManager.AccessMode.WRITE);
        } catch (InvalidToken e) {
            try {
                if (client.length() != 0) {
                    replyOut.writeShort((short) DataTransferProtocol.OP_STATUS_ERROR_ACCESS_TOKEN);
                    Text.writeString(replyOut, datanode.dnRegistration.getName());
                    replyOut.flush();
                }
                throw new IOException("Access token verification failed, for client " + remoteAddress
                        + " for OP_WRITE_BLOCK for block " + block);
            } finally {
                IOUtils.closeStream(replyOut);
            }
        }
    }

    DataOutputStream mirrorOut = null; // stream to next target
    DataInputStream mirrorIn = null; // reply from next target
    Socket mirrorSock = null; // socket to next target
    BlockReceiver blockReceiver = null; // responsible for data handling
    String mirrorNode = null; // the name:port of next target
    String firstBadLink = ""; // first datanode that failed in connection setup
    short mirrorInStatus = (short) DataTransferProtocol.OP_STATUS_SUCCESS;
    try {
        // open a block receiver and check if the block does not exist
        blockReceiver = new BlockReceiver(block, in, s.getRemoteSocketAddress().toString(),
                s.getLocalSocketAddress().toString(), isRecovery, client, srcDataNode, datanode);

        //
        // Open network conn to backup machine, if 
        // appropriate
        //
        if (targets.length > 0) {
            InetSocketAddress mirrorTarget = null;
            // Connect to backup machine
            mirrorNode = targets[0].getName();
            mirrorTarget = NetUtils.createSocketAddr(mirrorNode);
            mirrorSock = datanode.newSocket();
            try {
                int timeoutValue = datanode.socketTimeout + (HdfsConstants.READ_TIMEOUT_EXTENSION * numTargets);
                int writeTimeout = datanode.socketWriteTimeout
                        + (HdfsConstants.WRITE_TIMEOUT_EXTENSION * numTargets);
                NetUtils.connect(mirrorSock, mirrorTarget, timeoutValue);
                mirrorSock.setSoTimeout(timeoutValue);
                mirrorSock.setSendBufferSize(DEFAULT_DATA_SOCKET_SIZE);
                mirrorOut = new DataOutputStream(new BufferedOutputStream(
                        NetUtils.getOutputStream(mirrorSock, writeTimeout), SMALL_BUFFER_SIZE));
                mirrorIn = new DataInputStream(NetUtils.getInputStream(mirrorSock));

                // Write header: Copied from DFSClient.java!
                mirrorOut.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION);
                mirrorOut.write(DataTransferProtocol.OP_WRITE_BLOCK);
                mirrorOut.writeLong(block.getBlockId());
                mirrorOut.writeLong(block.getGenerationStamp());
                mirrorOut.writeInt(pipelineSize);
                mirrorOut.writeBoolean(isRecovery);
                Text.writeString(mirrorOut, client);
                mirrorOut.writeBoolean(hasSrcDataNode);
                if (hasSrcDataNode) { // pass src node information
                    srcDataNode.write(mirrorOut);
                }
                mirrorOut.writeInt(targets.length - 1);
                for (int i = 1; i < targets.length; i++) {
                    targets[i].write(mirrorOut);
                }
                accessToken.write(mirrorOut);

                blockReceiver.writeChecksumHeader(mirrorOut);
                mirrorOut.flush();

                // read connect ack (only for clients, not for replication req)
                if (client.length() != 0) {
                    mirrorInStatus = mirrorIn.readShort();
                    firstBadLink = Text.readString(mirrorIn);
                    if (LOG.isDebugEnabled() || mirrorInStatus != DataTransferProtocol.OP_STATUS_SUCCESS) {
                        LOG.info("Datanode " + targets.length + " got response for connect ack "
                                + " from downstream datanode with firstbadlink as " + firstBadLink);
                    }
                }

            } catch (IOException e) {
                if (client.length() != 0) {
                    replyOut.writeShort((short) DataTransferProtocol.OP_STATUS_ERROR);
                    Text.writeString(replyOut, mirrorNode);
                    replyOut.flush();
                }
                IOUtils.closeStream(mirrorOut);
                mirrorOut = null;
                IOUtils.closeStream(mirrorIn);
                mirrorIn = null;
                IOUtils.closeSocket(mirrorSock);
                mirrorSock = null;
                if (client.length() > 0) {
                    throw e;
                } else {
                    LOG.info(datanode.dnRegistration + ":Exception transfering block " + block + " to mirror "
                            + mirrorNode + ". continuing without the mirror.\n"
                            + StringUtils.stringifyException(e));
                }
            }
        }

        // send connect ack back to source (only for clients)
        if (client.length() != 0) {
            if (LOG.isDebugEnabled() || mirrorInStatus != DataTransferProtocol.OP_STATUS_SUCCESS) {
                LOG.info("Datanode " + targets.length + " forwarding connect ack to upstream firstbadlink is "
                        + firstBadLink);
            }
            replyOut.writeShort(mirrorInStatus);
            Text.writeString(replyOut, firstBadLink);
            replyOut.flush();
        }

        // receive the block and mirror to the next target
        String mirrorAddr = (mirrorSock == null) ? null : mirrorNode;
        blockReceiver.receiveBlock(mirrorOut, mirrorIn, replyOut, mirrorAddr, null, targets.length);

        // if this write is for a replication request (and not
        // from a client), then confirm block. For client-writes,
        // the block is finalized in the PacketResponder.
        if (client.length() == 0) {
            datanode.notifyNamenodeReceivedBlock(block, DataNode.EMPTY_DEL_HINT);
            LOG.info("Received block " + block + " src: " + remoteAddress + " dest: " + localAddress
                    + " of size " + block.getNumBytes());
        }

        if (datanode.blockScanner != null) {
            datanode.blockScanner.addBlock(block);
        }

    } catch (IOException ioe) {
        LOG.info("writeBlock " + block + " received exception " + ioe);
        throw ioe;
    } finally {
        // close all opened streams
        IOUtils.closeStream(mirrorOut);
        IOUtils.closeStream(mirrorIn);
        IOUtils.closeStream(replyOut);
        IOUtils.closeSocket(mirrorSock);
        IOUtils.closeStream(blockReceiver);
    }
}

From source file:com.trigger_context.Main_Service.java

public void senderSync(DataInputStream in, DataOutputStream out, String folder) {
    String tfolder = folder + (folder.charAt(folder.length() - 1) == '/' ? "" : "/");
    File f = new File(folder);
    File file[] = f.listFiles();//from  w  ww  .  ja va  2 s.  co  m
    // noti(file.toString(),"");
    String md5 = null;
    HashMap<String, File> hm = new HashMap<String, File>();

    HashSet<String> A = new HashSet<String>();
    for (File element : file) {
        hm.put(md5 = calculateMD5(element), element);
        A.add(md5);
    }
    // noti(hm.toString(),"");
    int numB = 0;
    try {
        numB = in.readInt();
    } catch (IOException e) {
        // TODO Auto-generated catch block
        noti("error reading 1st int in sendersync", "");
        e.printStackTrace();
    }
    HashSet<String> B = new HashSet<String>();
    for (int i = 0; i < numB; i++) {
        try {
            B.add(in.readUTF());
        } catch (IOException e1) {
            noti("error in readins md5", "");
            e1.printStackTrace();
        }
    }
    HashSet<String> aMb = new HashSet<String>(A);
    aMb.removeAll(B);
    int l1 = aMb.size();
    try {
        out.writeInt(l1);
    } catch (IOException e) {
        // TODO Auto-generated catch block
        noti("error in writing 1st int", "");
        e.printStackTrace();
    }
    Iterator<String> itr = aMb.iterator();
    while (itr.hasNext()) {
        f = hm.get(itr.next());
        sendFile(out, f.getPath());
    }
    HashSet<String> bMa = new HashSet<String>(B);
    bMa.removeAll(A);
    int l2 = bMa.size();
    try {
        out.writeInt(l2);
    } catch (IOException e) {
        // TODO Auto-generated catch block
        noti("error in writing 2nd int", "");
        e.printStackTrace();
    }
    itr = bMa.iterator();
    while (itr.hasNext()) {
        md5 = itr.next();
        try {
            out.writeUTF(md5);
        } catch (IOException e) {
            // TODO Auto-generated catch block
            noti("error in sending md5", "");
            e.printStackTrace();
        }
        recvFile(in, folder);
    }
}

From source file:net.minecraftforge.fml.repackage.com.nothome.delta.GDiffPatcher.java

/**
 * Patches to an output stream./*w  w w . j av  a  2s  . c om*/
 */
public void patch(SeekableSource source, InputStream patch, OutputStream out) throws IOException {

    DataOutputStream outOS = new DataOutputStream(out);
    DataInputStream patchIS = new DataInputStream(patch);

    // the magic string is 'd1 ff d1 ff' + the version number
    if (patchIS.readUnsignedByte() != 0xd1 || patchIS.readUnsignedByte() != 0xff
            || patchIS.readUnsignedByte() != 0xd1 || patchIS.readUnsignedByte() != 0xff
            || patchIS.readUnsignedByte() != 0x04) {

        throw new PatchException("magic string not found, aborting!");
    }

    while (true) {
        int command = patchIS.readUnsignedByte();
        if (command == EOF)
            break;
        int length;
        int offset;

        if (command <= DATA_MAX) {
            append(command, patchIS, outOS);
            continue;
        }

        switch (command) {
        case DATA_USHORT: // ushort, n bytes following; append
            length = patchIS.readUnsignedShort();
            append(length, patchIS, outOS);
            break;
        case DATA_INT: // int, n bytes following; append
            length = patchIS.readInt();
            append(length, patchIS, outOS);
            break;
        case COPY_USHORT_UBYTE:
            offset = patchIS.readUnsignedShort();
            length = patchIS.readUnsignedByte();
            copy(offset, length, source, outOS);
            break;
        case COPY_USHORT_USHORT:
            offset = patchIS.readUnsignedShort();
            length = patchIS.readUnsignedShort();
            copy(offset, length, source, outOS);
            break;
        case COPY_USHORT_INT:
            offset = patchIS.readUnsignedShort();
            length = patchIS.readInt();
            copy(offset, length, source, outOS);
            break;
        case COPY_INT_UBYTE:
            offset = patchIS.readInt();
            length = patchIS.readUnsignedByte();
            copy(offset, length, source, outOS);
            break;
        case COPY_INT_USHORT:
            offset = patchIS.readInt();
            length = patchIS.readUnsignedShort();
            copy(offset, length, source, outOS);
            break;
        case COPY_INT_INT:
            offset = patchIS.readInt();
            length = patchIS.readInt();
            copy(offset, length, source, outOS);
            break;
        case COPY_LONG_INT:
            long loffset = patchIS.readLong();
            length = patchIS.readInt();
            copy(loffset, length, source, outOS);
            break;
        default:
            throw new IllegalStateException("command " + command);
        }
    }
    outOS.flush();
}

From source file:voldemort.VoldemortAdminTool.java

private static Iterator<Pair<ByteArray, Versioned<byte[]>>> readEntriesBinary(File inputDir, String storeName)
        throws IOException {
    File inputFile = new File(inputDir, storeName + ".entries");
    if (!inputFile.exists()) {
        throw new FileNotFoundException("File " + inputFile.getAbsolutePath() + " does not exist!");
    }//from   w w  w  . j a v  a  2s .  c  o  m
    final DataInputStream dis = new DataInputStream(new BufferedInputStream(new FileInputStream(inputFile)));

    return new AbstractIterator<Pair<ByteArray, Versioned<byte[]>>>() {

        @Override
        protected Pair<ByteArray, Versioned<byte[]>> computeNext() {
            try {
                int length = dis.readInt();
                byte[] keyBytes = new byte[length];
                ByteUtils.read(dis, keyBytes);
                length = dis.readInt();
                byte[] versionBytes = new byte[length];
                ByteUtils.read(dis, versionBytes);
                length = dis.readInt();
                byte[] valueBytes = new byte[length];
                ByteUtils.read(dis, valueBytes);

                ByteArray key = new ByteArray(keyBytes);
                VectorClock version = new VectorClock(versionBytes);
                Versioned<byte[]> value = new Versioned<byte[]>(valueBytes, version);

                return new Pair<ByteArray, Versioned<byte[]>>(key, value);
            } catch (EOFException e) {
                try {
                    dis.close();
                } catch (IOException ie) {
                    ie.printStackTrace();
                }
                return endOfData();
            } catch (IOException e) {
                try {
                    dis.close();
                } catch (IOException ie) {
                    ie.printStackTrace();
                }
                throw new VoldemortException("Error reading from input file ", e);
            }
        }
    };
}

From source file:org.apache.hadoop.hdfs.server.namenode.IngestLocal.java

/**
 * Load an edit log, and continue applying the changes to the in-memory 
 * structure. This is where we ingest transactions into the standby.
 *//*  ww w  . java  2 s .  com*/
private int loadFSEdits(File edits) throws IOException {
    FSNamesystem fsNamesys = FSNamesystem.getFSNamesystem();
    FSDirectory fsDir = fsNamesys.dir;
    int numEdits = 0;
    int logVersion = 0;
    String clientName = null;
    String clientMachine = null;
    String path = null;
    int numOpAdd = 0, numOpClose = 0, numOpDelete = 0, numOpRename = 0, numOpSetRepl = 0, numOpMkDir = 0,
            numOpSetPerm = 0, numOpSetOwner = 0, numOpSetGenStamp = 0, numOpTimes = 0, numOpOther = 0;
    long startTime = FSNamesystem.now();

    LOG.info("Ingest: Consuming transactions from file " + edits + " of size " + edits.length());
    rp = new RandomAccessFile(edits, "r");
    fp = new FileInputStream(rp.getFD()); // open for reads
    fc = rp.getChannel();

    DataInputStream in = new DataInputStream(fp);
    try {
        // Read log file version. Could be missing. 
        in.mark(4);
        // If edits log is greater than 2G, available method will return negative
        // numbers, so we avoid having to call available
        boolean available = true;
        try {
            logVersion = in.readByte();
        } catch (EOFException e) {
            available = false;
        }
        if (available) {
            fc.position(0); // reset
            in = new DataInputStream(fp);
            logVersion = in.readInt();
            if (logVersion != FSConstants.LAYOUT_VERSION) // future version
                throw new IOException("Ingest: Unexpected version of the file system log file: " + logVersion
                        + ". Current version = " + FSConstants.LAYOUT_VERSION + ".");
        }
        assert logVersion <= Storage.LAST_UPGRADABLE_LAYOUT_VERSION : "Unsupported version " + logVersion;
        currentPosition = fc.position();
        numEdits = ingestFSEdits(edits, in, logVersion); // continue to ingest 
    } finally {
        LOG.info("Ingest: Closing transactions file " + edits);
        fp.close();
    }
    LOG.info("Ingest: Edits file " + edits.getName() + " of size " + edits.length() + " edits # " + numEdits
            + " loaded in " + (FSNamesystem.now() - startTime) / 1000 + " seconds.");

    if (LOG.isDebugEnabled()) {
        LOG.debug("Ingest: numOpAdd = " + numOpAdd + " numOpClose = " + numOpClose + " numOpDelete = "
                + numOpDelete + " numOpRename = " + numOpRename + " numOpSetRepl = " + numOpSetRepl
                + " numOpMkDir = " + numOpMkDir + " numOpSetPerm = " + numOpSetPerm + " numOpSetOwner = "
                + numOpSetOwner + " numOpSetGenStamp = " + numOpSetGenStamp + " numOpTimes = " + numOpTimes
                + " numOpOther = " + numOpOther);
    }

    if (logVersion != FSConstants.LAYOUT_VERSION) // other version
        numEdits++; // save this image asap
    return numEdits;
}

From source file:org.cloudata.core.commitlog.CommitLogClient.java

public void open() throws UnmatchedLogException, CommitLogInterruptedException, IOException {
    pipeKey = generateUniqueKey();/*from   ww w . j  a v  a2  s .  c o m*/

    if (LOG.isDebugEnabled()) {
        String msg = "";
        for (String addr : ipAddressList) {
            msg = msg + addr + ", ";
        }
        LOG.debug("Open new pipe with timeout[" + this.timeout + "], key [" + pipeKey + "], -> " + msg);
    }

    String ret = null;
    Socket s = null;
    try {
        sc = SocketChannel.open();

        s = sc.socket();
        s.setTcpNoDelay(true);
        s.setSoTimeout(timeout);

        int addressIndex = 0;
        if (verifiedAddressIndex >= 0) {
            addressIndex = verifiedAddressIndex;
        }
        LOG.debug("open pipe to " + pipeAddressList[addressIndex]);
        sc.connect(pipeAddressList[addressIndex]);
    } catch (ClosedByInterruptException e) {
        internalClose();
        throw new CommitLogInterruptedException();
    } catch (IOException e) {
        throw new CommitLogShutDownException(e);
    }

    try {
        byte[] body = buildBody();

        currentStream = new DataOutputStream(new BufferedOutputStream(s.getOutputStream(), 8192));
        checkInputStream = new DataInputStream(s.getInputStream());
        currentStream.write(MAGIC_KEY);
        currentStream.writeInt(0);
        currentStream.writeInt(body.length);
        currentStream.write(body);
        currentStream.flush();

        DataInputStream dis = new DataInputStream(s.getInputStream());
        byte[] key = new byte[MAGIC_KEY.length];

        if (dis.read(key) < 0) {
            throw new IOException("Fail to establish pipe connection to " + getPipeAddressListStr());
        }

        if (!Arrays.equals(MAGIC_KEY, key)) {
            throw new IOException(
                    "Fail to establish pipe connection to " + getPipeAddressList() + "due to wrong magic key");
        }
        int opCode = dis.readInt();
        int replyLength = dis.readInt();

        body = new byte[replyLength];
        dis.read(body);
        ret = new String(body);
    } catch (ClosedByInterruptException e) {
        internalClose();
        throw new CommitLogInterruptedException();
    } catch (IOException e) {
        LOG.warn("Fail to establish pipe to " + getPipeAddressListStr() + " due to " + e, e);
        internalClose();
        throw new IOException("Fail to establish pipe connection to " + getPipeAddressListStr(), e);
    }

    if (!ret.equals(Constants.PIPE_CONNECTED)) {
        internalClose();

        if (ret.equals("Log files are unmatched among commit log servers")) {
            throw new UnmatchedLogException("Log files are unmatched among commit log servers");
        }

        throw new IOException(
                "Fail to establish pipe connection to " + getPipeAddressListStr() + ". response : " + ret);
    } else {
        closed = false;
    }
}