Example usage for java.nio ByteBuffer rewind

List of usage examples for java.nio ByteBuffer rewind

Introduction

In this page you can find the example usage for java.nio ByteBuffer rewind.

Prototype

public final Buffer rewind() 

Source Link

Document

Rewinds this buffer.

Usage

From source file:org.apache.hadoop.yarn.server.resourcemanager.security.TestDelegationTokenRenewer.java

@Test(timeout = 20000)
public void testReplaceExpiringDelegationToken() throws Exception {
    conf.setBoolean(YarnConfiguration.RM_PROXY_USER_PRIVILEGES_ENABLED, true);
    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
    UserGroupInformation.setConfiguration(conf);

    // create Token1:
    Text userText1 = new Text("user1");
    DelegationTokenIdentifier dtId1 = new DelegationTokenIdentifier(userText1, new Text("renewer1"), userText1);
    // set max date to 0 to simulate an expiring token;
    dtId1.setMaxDate(0);//from   w  ww . j  av  a 2s  .  c o m
    final Token<DelegationTokenIdentifier> token1 = new Token<DelegationTokenIdentifier>(dtId1.getBytes(),
            "password1".getBytes(), dtId1.getKind(), new Text("service1"));

    // create token2
    Text userText2 = new Text("user2");
    DelegationTokenIdentifier dtId2 = new DelegationTokenIdentifier(userText1, new Text("renewer2"), userText2);
    final Token<DelegationTokenIdentifier> expectedToken = new Token<DelegationTokenIdentifier>(
            dtId2.getBytes(), "password2".getBytes(), dtId2.getKind(), new Text("service2"));

    final MockRM rm = new TestSecurityMockRM(conf, null) {
        @Override
        protected DelegationTokenRenewer createDelegationTokenRenewer() {
            return new DelegationTokenRenewer() {
                @Override
                protected Token<?>[] obtainSystemTokensForUser(String user, final Credentials credentials)
                        throws IOException {
                    credentials.addToken(expectedToken.getService(), expectedToken);
                    return new Token<?>[] { expectedToken };
                }
            };
        }
    };
    rm.start();
    Credentials credentials = new Credentials();
    credentials.addToken(userText1, token1);

    RMApp app = rm.submitApp(200, "name", "user", new HashMap<ApplicationAccessType, String>(), false,
            "default", 1, credentials);

    // wait for the initial expiring hdfs token to be removed from allTokens
    GenericTestUtils.waitFor(new Supplier<Boolean>() {
        public Boolean get() {
            return rm.getRMContext().getDelegationTokenRenewer().getAllTokens().get(token1) == null;
        }
    }, 1000, 20000);

    // wait for the initial expiring hdfs token to be removed from appTokens
    GenericTestUtils.waitFor(new Supplier<Boolean>() {
        public Boolean get() {
            return !rm.getRMContext().getDelegationTokenRenewer().getDelegationTokens().contains(token1);
        }
    }, 1000, 20000);

    // wait for the new retrieved hdfs token.
    GenericTestUtils.waitFor(new Supplier<Boolean>() {
        public Boolean get() {
            return rm.getRMContext().getDelegationTokenRenewer().getDelegationTokens().contains(expectedToken);
        }
    }, 1000, 20000);

    // check nm can retrieve the token
    final MockNM nm1 = new MockNM("127.0.0.1:1234", 15120, rm.getResourceTrackerService());
    nm1.registerNode();
    NodeHeartbeatResponse response = nm1.nodeHeartbeat(true);
    ByteBuffer tokenBuffer = response.getSystemCredentialsForApps().get(app.getApplicationId());
    Assert.assertNotNull(tokenBuffer);
    Credentials appCredentials = new Credentials();
    DataInputByteBuffer buf = new DataInputByteBuffer();
    tokenBuffer.rewind();
    buf.reset(tokenBuffer);
    appCredentials.readTokenStorageStream(buf);
    Assert.assertTrue(appCredentials.getAllTokens().contains(expectedToken));
}

From source file:org.apache.hadoop.yarn.server.resourcemanager.security.TestDelegationTokenRenewer.java

@Test
public void testRMRestartWithExpiredToken() throws Exception {
    Configuration yarnConf = new YarnConfiguration();
    yarnConf.setBoolean(YarnConfiguration.RM_PROXY_USER_PRIVILEGES_ENABLED, true);
    yarnConf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
    yarnConf.setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
    FileSystem fs;//from  w  w  w  .j  a  v a  2  s  . c o m
    Path tmpDir;
    fs = FileSystem.get(new Configuration());
    tmpDir = new Path(new File("target", this.getClass().getSimpleName() + "-tmpDir").getAbsolutePath());
    fs.delete(tmpDir, true);
    fs.mkdirs(tmpDir);
    try {
        conf.set(YarnConfiguration.FS_RM_STATE_STORE_URI, tmpDir.toString());
        conf.set(YarnConfiguration.RM_STORE, FileSystemRMStateStore.class.getName());
        UserGroupInformation.setConfiguration(yarnConf);

        // create Token1:
        Text userText1 = new Text("user1");
        DelegationTokenIdentifier dtId1 = new DelegationTokenIdentifier(userText1, new Text("renewer1"),
                userText1);
        final Token<DelegationTokenIdentifier> originalToken = new Token<>(dtId1.getBytes(),
                "password1".getBytes(), dtId1.getKind(), new Text("service1"));
        Credentials credentials = new Credentials();
        credentials.addToken(userText1, originalToken);

        MockRM rm1 = new TestSecurityMockRM(yarnConf);
        rm1.start();
        RMApp app = rm1.submitApp(200, "name", "user", new HashMap<ApplicationAccessType, String>(), false,
                "default", 1, credentials);

        // create token2
        Text userText2 = new Text("user1");
        DelegationTokenIdentifier dtId2 = new DelegationTokenIdentifier(userText1, new Text("renewer2"),
                userText2);
        final Token<DelegationTokenIdentifier> updatedToken = new Token<DelegationTokenIdentifier>(
                dtId2.getBytes(), "password2".getBytes(), dtId2.getKind(), new Text("service2"));
        final AtomicBoolean firstRenewInvoked = new AtomicBoolean(false);
        final AtomicBoolean secondRenewInvoked = new AtomicBoolean(false);
        MockRM rm2 = new TestSecurityMockRM(yarnConf) {
            @Override
            protected DelegationTokenRenewer createDelegationTokenRenewer() {
                return new DelegationTokenRenewer() {

                    @Override
                    protected void renewToken(final DelegationTokenToRenew dttr) throws IOException {

                        if (dttr.token.equals(updatedToken)) {
                            secondRenewInvoked.set(true);
                            super.renewToken(dttr);
                        } else if (dttr.token.equals(originalToken)) {
                            firstRenewInvoked.set(true);
                            throw new InvalidToken("Failed to renew");
                        } else {
                            throw new IOException("Unexpected");
                        }
                    }

                    @Override
                    protected Token<?>[] obtainSystemTokensForUser(String user, final Credentials credentials)
                            throws IOException {
                        credentials.addToken(updatedToken.getService(), updatedToken);
                        return new Token<?>[] { updatedToken };
                    }
                };
            }
        };

        // simulating restart the rm
        rm2.start();

        // check nm can retrieve the token
        final MockNM nm1 = new MockNM("127.0.0.1:1234", 15120, rm2.getResourceTrackerService());
        nm1.registerNode();
        NodeHeartbeatResponse response = nm1.nodeHeartbeat(true);
        ByteBuffer tokenBuffer = response.getSystemCredentialsForApps().get(app.getApplicationId());
        Assert.assertNotNull(tokenBuffer);
        Credentials appCredentials = new Credentials();
        DataInputByteBuffer buf = new DataInputByteBuffer();
        tokenBuffer.rewind();
        buf.reset(tokenBuffer);
        appCredentials.readTokenStorageStream(buf);
        Assert.assertTrue(firstRenewInvoked.get() && secondRenewInvoked.get());
        Assert.assertTrue(appCredentials.getAllTokens().contains(updatedToken));
    } finally {
        fs.delete(tmpDir, true);
    }
}

From source file:org.apache.bookkeeper.bookie.Bookie.java

/**
 * Add an entry to a ledger as specified by handle.
 *//*w  ww .j  a  v  a  2 s .c om*/
private void addEntryInternal(LedgerDescriptor handle, ByteBuffer entry, WriteCallback cb, Object ctx)
        throws IOException, BookieException {
    long ledgerId = handle.getLedgerId();
    entry.rewind();
    long entryId = handle.addEntry(entry);

    entry.rewind();
    writeBytes.add(entry.remaining());

    LOG.trace("Adding {}@{}", entryId, ledgerId);
    journal.logAddEntry(entry, cb, ctx);
}

From source file:org.openhab.binding.keba.handler.KeContactHandler.java

protected void onWritable(ByteBuffer buffer, DatagramChannel theChannel) {
    lock.lock();//  ww  w  .  j a  va2  s . c  o  m
    try {

        SelectionKey theSelectionKey = theChannel.keyFor(selector);

        if (theSelectionKey != null) {

            synchronized (selector) {
                try {
                    selector.selectNow();
                } catch (IOException e) {
                    logger.error("An exception occurred while selecting: {}", e.getMessage());
                }
            }

            Iterator<SelectionKey> it = selector.selectedKeys().iterator();
            while (it.hasNext()) {
                SelectionKey selKey = it.next();
                it.remove();
                if (selKey.isValid() && selKey.isWritable() && selKey == theSelectionKey) {

                    boolean error = false;
                    buffer.rewind();

                    try {
                        logger.debug("Sending '{}' on the channel '{}'->'{}'",
                                new Object[] { new String(buffer.array()), theChannel.getLocalAddress(),
                                        theChannel.getRemoteAddress() });
                        theChannel.write(buffer);
                    } catch (NotYetConnectedException e) {
                        updateStatus(ThingStatus.OFFLINE, ThingStatusDetail.COMMUNICATION_ERROR,
                                "The remote host is not yet connected");
                        error = true;
                    } catch (ClosedChannelException e) {
                        updateStatus(ThingStatus.OFFLINE, ThingStatusDetail.COMMUNICATION_ERROR,
                                "The connection to the remote host is closed");
                        error = true;
                    } catch (IOException e) {
                        updateStatus(ThingStatus.OFFLINE, ThingStatusDetail.COMMUNICATION_ERROR,
                                "An IO exception occurred");
                        error = true;
                    }

                    if (error) {
                        logger.debug("Disconnecting '{}' because of a socket error",
                                getThing().getUID().toString());
                        try {
                            theChannel.close();
                        } catch (IOException e) {
                            logger.warn("An exception occurred while closing the channel '{}': {}",
                                    datagramChannel, e.getMessage());
                        }

                        onConnectionLost();

                    }
                }
            }
        }
    } finally {
        lock.unlock();
    }
}

From source file:org.carbondata.processing.util.LevelSortIndexWriterThread.java

private MemberSortModel[] getLevelData() throws IOException {
    DataInputStream fileChannel = null;
    long currPositionIndex = 0;
    long size = 0;
    ByteBuffer buffer = null;

    // CHECKSTYLE:OFF
    boolean enableEncoding = Boolean
            .valueOf(CarbonProperties.getInstance().getProperty(CarbonCommonConstants.ENABLE_BASE64_ENCODING,
                    CarbonCommonConstants.ENABLE_BASE64_ENCODING_DEFAULT));
    // CHECKSTYLE:ON
    try {//from   w  w w .ja  v a  2s . c o m
        fileChannel = FileFactory.getDataInputStream(levelFilePath, FileFactory.getFileType(levelFilePath));
        CarbonFile memberFile = FileFactory.getCarbonFile(levelFilePath,
                FileFactory.getFileType(levelFilePath));
        size = memberFile.getSize() - 4;
        long skipSize = size;
        long actualSkipSize = 0;
        while (actualSkipSize != size) {
            actualSkipSize += fileChannel.skip(skipSize);
            skipSize = skipSize - actualSkipSize;
        }
        maxSurrogate = fileChannel.readInt();
    } catch (IOException e) {
        LOGGER.error(e, "problem while reading the level file");
        throw e;
    } finally {
        CarbonUtil.closeStreams(fileChannel);
    }

    try {
        fileChannel = FileFactory.getDataInputStream(levelFilePath, FileFactory.getFileType(levelFilePath));
        // CHECKSTYLE:OFF
        buffer = ByteBuffer.allocate((int) size);
        // CHECKSTYLE:ON
        fileChannel.readFully(buffer.array());
        buffer.rewind();
    } catch (IOException e) {
        LOGGER.error(e, "problem while reading the level file");
        throw e;
    } finally {
        CarbonUtil.closeStreams(fileChannel);
    }
    minSurrogate = buffer.getInt();
    MemberSortModel[] surogateKeyArrays = new MemberSortModel[maxSurrogate - minSurrogate + 1];
    int surrogateKeyIndex = minSurrogate;
    currPositionIndex += 4;
    int current = 0;

    while (currPositionIndex < size) {
        int len = buffer.getInt();
        // CHECKSTYLE:OFF
        // CHECKSTYLE:ON
        currPositionIndex += 4;
        byte[] rowBytes = new byte[len];
        buffer.get(rowBytes);
        currPositionIndex += len;
        String memberName = null;// CHECKSTYLE:OFF
        if (!memberDataType.equals(DataType.STRING)) {
            if (enableEncoding) {
                memberName = new String(Base64.decodeBase64(rowBytes), Charset.defaultCharset());
            } else {
                memberName = new String(rowBytes, Charset.defaultCharset());
            }
            surogateKeyArrays[current] = new MemberSortModel(surrogateKeyIndex, memberName, null,
                    memberDataType);
        } else {
            if (enableEncoding) {
                rowBytes = Base64.decodeBase64(rowBytes);
            }
            surogateKeyArrays[current] = new MemberSortModel(surrogateKeyIndex, null, rowBytes, memberDataType);
        }
        surrogateKeyIndex++;
        current++;
    }
    return surogateKeyArrays;
}

From source file:com.servoy.j2db.util.Utils.java

public static boolean writeTXTFile(File f, String content, Charset charset) {
    if (f != null) {
        FileOutputStream fos = null;
        try {/*from   www .j  av a  2  s  .c o m*/
            fos = new FileOutputStream(f);
            FileChannel fc = fos.getChannel();
            ByteBuffer bb = charset.encode(content);
            fc.write(bb);
            bb.rewind();
            return true;
        } catch (Exception e) {
            Debug.error("Error writing txt file: " + f, e); //$NON-NLS-1$
        } finally {
            closeOutputStream(fos);
        }
    }
    return false;
}

From source file:com.servoy.j2db.util.Utils.java

public static String getTXTFileContent(File f, Charset charset) {
    if (f != null /* && f.exists() */) {
        if (Thread.currentThread().isInterrupted()) {
            Thread.interrupted(); // reset interrupted flag of current thread, FileChannel.read() will throw an exception for it.
        }//from w ww .  java2s.co  m
        FileInputStream fis = null;
        try {
            int length = (int) f.length();
            if (f.exists()) {
                fis = new FileInputStream(f);
                FileChannel fc = fis.getChannel();
                ByteBuffer bb = ByteBuffer.allocate(length);
                fc.read(bb);
                bb.rewind();
                CharBuffer cb = charset.decode(bb);
                return cb.toString();
            }
        } catch (Exception e) {
            Debug.error("Error reading txt file: " + f, e); //$NON-NLS-1$
        } finally {
            closeInputStream(fis);
        }
    }
    return null;
}

From source file:org.openhab.binding.keba.handler.KeContactP20Handler.java

protected void onWritable(ByteBuffer buffer, DatagramChannel theChannel) {
    lock.lock();//  w  ww  .  j  av  a  2 s .c o  m
    try {

        SelectionKey theSelectionKey = theChannel.keyFor(selector);

        if (theSelectionKey != null) {

            synchronized (selector) {
                try {
                    selector.selectNow();
                } catch (IOException e) {
                    logger.error("An exception occurred while selecting: {}", e.getMessage());
                }
            }

            Iterator<SelectionKey> it = selector.selectedKeys().iterator();
            while (it.hasNext()) {
                SelectionKey selKey = (SelectionKey) it.next();
                it.remove();
                if (selKey.isValid() && selKey.isWritable() && selKey == theSelectionKey) {

                    boolean error = false;
                    buffer.rewind();

                    try {
                        logger.debug("Sending '{}' on the channel '{}'->'{}'",
                                new Object[] { new String(buffer.array()), theChannel.getLocalAddress(),
                                        theChannel.getRemoteAddress() });
                        theChannel.write(buffer);
                    } catch (NotYetConnectedException e) {
                        updateStatus(ThingStatus.OFFLINE, ThingStatusDetail.COMMUNICATION_ERROR,
                                "The remote host is not yet connected");
                        error = true;
                    } catch (ClosedChannelException e) {
                        updateStatus(ThingStatus.OFFLINE, ThingStatusDetail.COMMUNICATION_ERROR,
                                "The connection to the remote host is closed");
                        error = true;
                    } catch (IOException e) {
                        updateStatus(ThingStatus.OFFLINE, ThingStatusDetail.COMMUNICATION_ERROR,
                                "An IO exception occurred");
                        error = true;
                    }

                    if (error) {
                        logger.debug("Disconnecting '{}' because of a socket error",
                                getThing().getUID().toString());
                        try {
                            theChannel.close();
                        } catch (IOException e) {
                            logger.warn("An exception occurred while closing the channel '{}': {}",
                                    datagramChannel, e.getMessage());
                        }

                        onConnectionLost();

                    }
                }
            }
        }
    } finally {
        lock.unlock();
    }
}

From source file:org.apache.bookkeeper.client.BookieWriteLedgerTest.java

/**
 * LedgerHandleAdv out of order writers with ensemble changes.
 * Verify that entry that was written to old ensemble will be
 * written to new enseble too after ensemble change.
 *
 * @throws Exception/*from ww w  .j av  a  2 s  .  co  m*/
 */
@Test
public void testLedgerHandleAdvOutOfOrderWriteAndFrocedEnsembleChange() throws Exception {
    // Create a ledger
    long ledgerId = 0xABCDEF;
    SyncObj syncObj1 = new SyncObj();
    ByteBuffer entry;
    lh = bkc.createLedgerAdv(ledgerId, 3, 3, 3, digestType, ledgerPassword, null);
    entry = ByteBuffer.allocate(4);
    // Add entries 0-4
    for (int i = 0; i < 5; i++) {
        entry.rewind();
        entry.putInt(rng.nextInt(maxInt));
        lh.addEntry(i, entry.array());
    }

    // Add 10 as Async Entry, which goes to first ensemble
    ByteBuffer entry1 = ByteBuffer.allocate(4);
    entry1.putInt(rng.nextInt(maxInt));
    lh.asyncAddEntry(10, entry1.array(), 0, entry1.capacity(), this, syncObj1);

    // Make sure entry-10 goes to the bookies and gets response.
    java.util.Queue<PendingAddOp> myPendingAddOps = Whitebox.getInternalState(lh, "pendingAddOps");
    PendingAddOp addOp = null;
    boolean pendingAddOpReceived = false;

    while (!pendingAddOpReceived) {
        addOp = myPendingAddOps.peek();
        if (addOp.entryId == 10 && addOp.completed) {
            pendingAddOpReceived = true;
        } else {
            Thread.sleep(1000);
        }
    }

    CountDownLatch sleepLatch1 = new CountDownLatch(1);
    List<BookieSocketAddress> ensemble;

    ensemble = lh.getLedgerMetadata().getAllEnsembles().entrySet().iterator().next().getValue();

    // Put all 3 bookies to sleep and start 3 new ones
    sleepBookie(ensemble.get(0), sleepLatch1);
    sleepBookie(ensemble.get(1), sleepLatch1);
    sleepBookie(ensemble.get(2), sleepLatch1);
    startNewBookie();
    startNewBookie();
    startNewBookie();

    // Original bookies are in sleep, new bookies added.
    // Now add entries 5-9 which forces ensemble changes
    // So at this point entries 0-4, 10 went to first
    // ensemble, 5-9 will go to new ensemble.
    for (int i = 5; i < 10; i++) {
        entry.rewind();
        entry.putInt(rng.nextInt(maxInt));
        lh.addEntry(i, entry.array());
    }

    // Wakeup all 3 bookies that went to sleep
    sleepLatch1.countDown();

    // Wait for all entries to be acknowledged for the first ledger
    synchronized (syncObj1) {
        while (syncObj1.counter < 1) {
            syncObj1.wait();
        }
        assertEquals(BKException.Code.OK, syncObj1.rc);
    }

    // Close write handle
    lh.close();

    // Open read handle
    lh = bkc.openLedger(ledgerId, digestType, ledgerPassword);

    // Make sure to read all 10 entries.
    for (int i = 0; i < 11; i++) {
        lh.readEntries(i, i);
    }
    lh.close();
    bkc.deleteLedger(ledgerId);
}

From source file:com.servoy.j2db.util.Utils.java

public static byte[] readFile(File f, long size) {
    if (f != null && f.exists()) {

        FileInputStream fis = null;
        try {//  ww  w.  ja va 2  s  .c  om
            int length = (int) f.length();
            fis = new FileInputStream(f);
            FileChannel fc = fis.getChannel();
            if (size > length || size < 0)
                size = length;
            ByteBuffer bb = ByteBuffer.allocate((int) size);
            fc.read(bb);
            bb.rewind();
            byte[] bytes = null;
            if (bb.hasArray()) {
                bytes = bb.array();
            } else {
                bytes = new byte[(int) size];
                bb.get(bytes, 0, (int) size);
            }
            return bytes;
        } catch (Exception e) {
            Debug.error("Error reading file: " + f, e); //$NON-NLS-1$
        } finally {
            try {
                if (fis != null)
                    fis.close();
            } catch (Exception ex) {
            }
        }

        //         ByteArrayOutputStream sb = new ByteArrayOutputStream();
        //         try
        //         {
        //            FileInputStream is = new FileInputStream(f);
        //            BufferedInputStream bis = new BufferedInputStream(is);
        //            streamCopy(bis, sb);
        //            closeInputStream(bis);
        //         }
        //         catch (Exception e)
        //         {
        //            Debug.error(e);
        //         }
        //         return sb.toByteArray();
    }
    return null;
}