List of usage examples for org.apache.hadoop.hdfs DFSConfigKeys DFS_BLOCK_SIZE_DEFAULT
long DFS_BLOCK_SIZE_DEFAULT
To view the source code for org.apache.hadoop.hdfs DFSConfigKeys DFS_BLOCK_SIZE_DEFAULT.
Click Source Link
From source file:com.bigstep.datalake.DLFileSystem.java
License:Apache License
@Override public long getDefaultBlockSize() { return getConf().getLongBytes(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT); }
From source file:net.arp7.HdfsPerfTest.WriteFileParameters.java
License:Apache License
/** * Initialize some write parameters from the configuration. * * @param conf/*from ww w . ja v a2s . c om*/ */ private void initDefaultsFromConfiguration(Configuration conf) { blockSize = conf.getLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT); replication = conf.getLong(DFSConfigKeys.DFS_REPLICATION_KEY, DFSConfigKeys.DFS_REPLICATION_DEFAULT); }
From source file:org.kaaproject.kaa.server.flume.sink.hdfs.BucketWriter.java
License:Apache License
/** * doOpen() must only be called by open(). *//*w ww . ja v a 2s . c o m*/ private void doOpen(long serial) throws IOException { if ((filePath == null) || (writer == null)) { throw new IOException("Invalid file settings"); } Configuration config = new Configuration(); // disable FileSystem JVM shutdown hook config.setBoolean("fs.automatic.close", false); long blockSize = DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT; if (defaultBlockSize > 0) { blockSize = defaultBlockSize; } config.set(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, "" + blockSize); // Hadoop is not thread safe when doing certain RPC operations, // including getFileSystem(), when running under Kerberos. // open() must be called by one thread at a time in the JVM. // NOTE: tried synchronizing on the underlying Kerberos principal previously // which caused deadlocks. See FLUME-1231. synchronized (staticLock) { try { bucketPath = filePath + "." + serial; // Need to get reference to FS using above config before underlying // writer does in order to avoid shutdown hook & IllegalStateExceptions fileSystem = new Path(bucketPath).getFileSystem(config); String currentBucket = bucketPath + IN_USE_EXT; LOG.debug("Creating " + currentBucket); writer.open(currentBucket); } catch (Exception ex) { sinkCounter.incrementConnectionFailedCount(); if (ex instanceof IOException) { throw (IOException) ex; } else { throw Throwables.propagate(ex); } } } sinkCounter.incrementConnectionCreatedCount(); resetCounters(); // if time-based rolling is enabled, schedule the roll if (rollInterval > 0) { Callable<Void> action = new Callable<Void>() { @Override public Void call() throws Exception { LOG.debug("Rolling file ({}): Roll scheduled after {} sec elapsed.", bucketPath + IN_USE_EXT, rollInterval); try { close(); } catch (Throwable throwable) { //NOSONAR LOG.error("Unexpected error", throwable); } return null; } }; timedRollFuture = timedRollerPool.schedule(action, rollInterval, TimeUnit.SECONDS); } isOpen = true; }