Example usage for org.apache.hadoop.util Time monotonicNow

List of usage examples for org.apache.hadoop.util Time monotonicNow

Introduction

In this page you can find the example usage for org.apache.hadoop.util Time monotonicNow.

Prototype

public static long monotonicNow() 

Source Link

Document

Current time from some arbitrary time base in the past, counting in milliseconds, and not affected by settimeofday or similar system clock changes.

Usage

From source file:BlockReaderFactory.java

License:Apache License

/**
 * Request file descriptors from a DomainPeer.
 *
 * @param peer   The peer to use for communication.
 * @param slot   If non-null, the shared memory slot to associate with the 
 *               new ShortCircuitReplica.
 * /*from w  w  w  .ja va 2  s .c  o  m*/
 * @return  A ShortCircuitReplica object if we could communicate with the
 *          datanode; null, otherwise. 
 * @throws  IOException If we encountered an I/O exception while communicating
 *          with the datanode.
 */
private ShortCircuitReplicaInfo requestFileDescriptors(DomainPeer peer, Slot slot) throws IOException {
    ShortCircuitCache cache = clientContext.getShortCircuitCache();
    final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(peer.getOutputStream()));
    SlotId slotId = slot == null ? null : slot.getSlotId();
    new Sender(out).requestShortCircuitFds(block, token, slotId, 1);
    DataInputStream in = new DataInputStream(peer.getInputStream());
    BlockOpResponseProto resp = BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
    DomainSocket sock = peer.getDomainSocket();
    switch (resp.getStatus()) {
    case SUCCESS:
        byte buf[] = new byte[1];
        FileInputStream fis[] = new FileInputStream[2];
        sock.recvFileInputStreams(fis, buf, 0, buf.length);
        ShortCircuitReplica replica = null;
        try {
            ExtendedBlockId key = new ExtendedBlockId(block.getBlockId(), block.getBlockPoolId());
            replica = new ShortCircuitReplica(key, fis[0], fis[1], cache, Time.monotonicNow(), slot);
        } catch (IOException e) {
            // This indicates an error reading from disk, or a format error.  Since
            // it's not a socket communication problem, we return null rather than
            // throwing an exception.
            LOG.warn(this + ": error creating ShortCircuitReplica.", e);
            return null;
        } finally {
            if (replica == null) {
                IOUtils.cleanup(DFSClient.LOG, fis[0], fis[1]);
            }
        }
        return new ShortCircuitReplicaInfo(replica);
    case ERROR_UNSUPPORTED:
        if (!resp.hasShortCircuitAccessVersion()) {
            LOG.warn("short-circuit read access is disabled for " + "DataNode " + datanode + ".  reason: "
                    + resp.getMessage());
            clientContext.getDomainSocketFactory().disableShortCircuitForPath(pathInfo.getPath());
        } else {
            LOG.warn("short-circuit read access for the file " + fileName + " is disabled for DataNode "
                    + datanode + ".  reason: " + resp.getMessage());
        }
        return null;
    case ERROR_ACCESS_TOKEN:
        String msg = "access control error while " + "attempting to set up short-circuit access to " + fileName
                + resp.getMessage();
        if (LOG.isDebugEnabled()) {
            LOG.debug(this + ":" + msg);
        }
        return new ShortCircuitReplicaInfo(new InvalidToken(msg));
    default:
        LOG.warn(this + ": unknown response code " + resp.getStatus()
                + " while attempting to set up short-circuit access. " + resp.getMessage());
        clientContext.getDomainSocketFactory().disableShortCircuitForPath(pathInfo.getPath());
        return null;
    }
}

From source file:TestDFSUtilClient.java

License:Apache License

@Test
public void testString2Bytes() {
    final long numLoop = 100000000;
    final String str = "testString2Bytes";
    byte[] b = null;
    long start = Time.monotonicNow();
    for (int i = 0; i < numLoop; i++) {
        b = DFSUtilClient.string2Bytes(str);
    }/*from   w  w w  .  j  a  v a  2s .  c om*/
    long end = Time.monotonicNow();
    System.out.println(new String(b, StandardCharsets.UTF_8));
    System.out.println("Elapsed time: " + (end - start));

    start = Time.monotonicNow();
    for (int i = 0; i < numLoop; i++) {
        b = str.getBytes(StandardCharsets.UTF_8);
    }
    end = Time.monotonicNow();
    System.out.println(new String(b, StandardCharsets.UTF_8));
    System.out.println("Elapsed time: " + (end - start));

    start = Time.monotonicNow();
    for (int i = 0; i < numLoop; i++) {
        b = DFSUtilClient.string2Bytes(str);
    }
    end = Time.monotonicNow();
    System.out.println(new String(b, StandardCharsets.UTF_8));
    System.out.println("Elapsed time: " + (end - start));

    start = Time.monotonicNow();
    for (int i = 0; i < numLoop; i++) {
        b = str.getBytes(StandardCharsets.UTF_8);
    }
    end = Time.monotonicNow();
    System.out.println(new String(b, StandardCharsets.UTF_8));
    System.out.println("Elapsed time: " + (end - start));
}

From source file:TestDFSUtilClient.java

License:Apache License

@Test
public void testBytes2String() {
    final long numLoop = 100000000;
    final byte[] bytes = "testBytes2String".getBytes(StandardCharsets.UTF_8);
    String str = null;/*  ww  w .j ava 2  s. com*/
    long start = Time.monotonicNow();
    for (int i = 0; i < numLoop; i++) {
        str = DFSUtilClient.bytes2String(bytes);
    }
    long end = Time.monotonicNow();
    System.out.println(str);
    System.out.println("Elapsed time: " + (end - start));

    start = Time.monotonicNow();
    for (int i = 0; i < numLoop; i++) {
        str = new String(bytes, StandardCharsets.UTF_8);
    }
    end = Time.monotonicNow();
    System.out.println(str);
    System.out.println("Elapsed time: " + (end - start));

    start = Time.monotonicNow();
    for (int i = 0; i < numLoop; i++) {
        str = DFSUtilClient.bytes2String(bytes);
    }
    end = Time.monotonicNow();
    System.out.println(str);
    System.out.println("Elapsed time: " + (end - start));

    start = Time.monotonicNow();
    for (int i = 0; i < numLoop; i++) {
        str = new String(bytes, StandardCharsets.UTF_8);
    }
    end = Time.monotonicNow();
    System.out.println(str);
    System.out.println("Elapsed time: " + (end - start));
}

From source file:com.cloudera.CacheTool.java

License:Apache License

public static void main(String[] args) throws Exception {
    conf = new Configuration();
    conf.addResource(new Path("/home/james/hdfs-conf/hdfs-site.xml"));
    conf.addResource(new Path("/home/james/hdfs-conf/core-site.xml"));
    URI uri = FileSystem.getDefaultUri(conf);
    final FileSystem fs = FileSystem.get(uri, conf);

    for (int i = 0; i < 8000; i += 10) {
        final int i_copy = i;
        pool.submit(new Runnable() {
            public void run() {
                for (int j = 0; j < 10; j++) {
                    try {
                        createFile(fs, new Path("/home/james/large" + (i_copy + j)), 1024 * 1024);
                    } catch (IOException ioe) {
                        System.out.println(ioe);
                    }// w ww .j a  v a2 s .  c  om
                }
            }
        });
    }
    pool.shutdown();
    pool.awaitTermination(1, TimeUnit.DAYS);

    long start = Time.monotonicNow();
    Random r = new Random(0);
    for (int i = 0; i < 100; i++) {
        FSDataInputStream fdis = fs.open(new Path("/home/james/large" + r.nextInt(8000)), 512);
        byte[] buffer = new byte[512];

        for (int j = 0; j < 100; j++) {
            int offset = r.nextInt(1024 * 1024 - 511);
            fdis.read(offset, buffer, 0, 512);
        }
    }
    System.out.println("Time taken for 10000 random 512 byte reads: " + (Time.monotonicNow() - start) / 1000.0);

}

From source file:com.mellanox.r4h.DFSClient.java

License:Apache License

void updateLastLeaseRenewal() {
    synchronized (filesBeingWritten) {
        if (filesBeingWritten.isEmpty()) {
            return;
        }// w  w w.  j a  v a  2s  . c  o  m
        lastLeaseRenewal = Time.monotonicNow();
    }
}

From source file:com.mellanox.r4h.DFSClient.java

License:Apache License

/**
 * Renew leases.//w w  w  .j  a  v  a 2 s.  c o  m
 * 
 * @return true if lease was renewed. May return false if this
 *         client has been closed or has no files open.
 **/
boolean renewLease() throws IOException {
    if (clientRunning && !isFilesBeingWrittenEmpty()) {
        try {
            namenode.renewLease(clientName);
            updateLastLeaseRenewal();
            return true;
        } catch (IOException e) {
            // Abort if the lease has already expired.
            final long elapsed = Time.monotonicNow() - getLastLeaseRenewal();
            if (elapsed > HdfsConstants.LEASE_HARDLIMIT_PERIOD) {
                LOG.warn("Failed to renew lease for " + clientName + " for " + (elapsed / 1000)
                        + " seconds (>= hard-limit =" + (HdfsConstants.LEASE_HARDLIMIT_PERIOD / 1000)
                        + " seconds.) " + "Closing all files being written ...", e);
                closeAllFilesBeingWritten(true);
            } else {
                // Let the lease renewer handle it and retry.
                throw e;
            }
        }
    }
    return false;
}

From source file:com.mellanox.r4h.DFSClient.java

License:Apache License

/**
 * Get server default values for a number of configuration params.
 * //from   ww w.  j av  a 2 s  .com
 * @see ClientProtocol#getServerDefaults()
 */
public FsServerDefaults getServerDefaults() throws IOException {
    long now = Time.monotonicNow();
    if ((serverDefaults == null) || (now - serverDefaultsLastUpdate > SERVER_DEFAULTS_VALIDITY_PERIOD)) {
        serverDefaults = namenode.getServerDefaults();
        serverDefaultsLastUpdate = now;
    }
    assert serverDefaults != null;
    return serverDefaults;
}

From source file:com.mellanox.r4h.DFSOutputStream.java

License:Apache License

private void completeFile(ExtendedBlock last) throws IOException {
    long localstart = Time.monotonicNow();
    long localTimeout = 400;
    boolean fileComplete = false;
    int retries = dfsClient.getConf().getnBlockWriteLocateFollowingRetry();
    while (!fileComplete) {
        fileComplete = dfsClient.namenode.complete(src, dfsClient.clientName, last, fileId);
        if (!fileComplete) {
            final int hdfsTimeout = dfsClient.getHdfsTimeout();
            if (!dfsClient.clientRunning
                    || (hdfsTimeout > 0 && localstart + hdfsTimeout < Time.monotonicNow())) {
                String msg = "Unable to close file because dfsclient "
                        + " was unable to contact the HDFS servers." + " clientRunning "
                        + dfsClient.clientRunning + " hdfsTimeout " + hdfsTimeout;
                DFSClient.LOG.info(msg);
                throw new IOException(msg);
            }//from  ww  w .j  av a2  s  . c  o  m
            try {
                if (retries == 0) {
                    throw new IOException("Unable to close file because the last block"
                            + " does not have enough number of replicas.");
                }
                retries--;
                Thread.sleep(localTimeout);
                localTimeout *= 2;
                if (Time.monotonicNow() - localstart > 5000) {
                    DFSClient.LOG.info("Could not complete " + src + " retrying...");
                }
            } catch (InterruptedException ie) {
                DFSClient.LOG.warn("Caught exception ", ie);
            }
        }
    }
}

From source file:com.mellanox.r4h.LeaseRenewer.java

License:Apache License

/** Is the empty period longer than the grace period? */
private synchronized boolean isRenewerExpired() {
    return emptyTime != Long.MAX_VALUE && Time.monotonicNow() - emptyTime > gracePeriod;
}

From source file:com.mellanox.r4h.LeaseRenewer.java

License:Apache License

/** Close a file. */
void closeFile(final long inodeId, final DFSClient dfsc) {
    dfsc.removeFileBeingWritten(inodeId);

    synchronized (this) {
        if (dfsc.isFilesBeingWrittenEmpty()) {
            dfsclients.remove(dfsc);//w w  w  .j  av a 2 s .c o  m
        }
        // update emptyTime if necessary
        if (emptyTime == Long.MAX_VALUE) {
            for (DFSClient c : dfsclients) {
                if (!c.isFilesBeingWrittenEmpty()) {
                    // found a non-empty file-being-written map
                    return;
                }
            }
            // discover the first time that all file-being-written maps are empty.
            emptyTime = Time.monotonicNow();
        }
    }
}