List of usage examples for java.util.concurrent.locks ReentrantReadWriteLock readLock
public ReentrantReadWriteLock.ReadLock readLock()
From source file:org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationImpl.java
public ApplicationImpl(Dispatcher dispatcher, String user, ApplicationId appId, Credentials credentials, Context context, String userFolder, int x509Version, long jwtExpiration) { this.dispatcher = dispatcher; this.user = user; this.userFolder = userFolder; this.appId = appId; this.credentials = credentials; this.aclsManager = context.getApplicationACLsManager(); this.context = context; this.x509Version = new AtomicInteger(x509Version); this.jwtExpiration = new AtomicLong(jwtExpiration); ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); readLock = lock.readLock(); writeLock = lock.writeLock();/*from w ww. j a va 2 s . co m*/ stateMachine = stateMachineFactory.make(this); }
From source file:org.apache.hadoop.yarn.server.nodemanager.DirectoryCollection.java
/** * Create collection for the directories specified. Users must specify the * maximum percentage of disk utilization allowed and the minimum amount of * free space that must be available for the dir to be used. If either check * fails the dir is removed from the good dirs list. * /* w ww . ja v a 2 s . c om*/ * @param dirs * directories to be monitored * @param utilizationPercentageCutOffHigh * percentage of disk that can be used before the dir is taken out of * the good dirs list * @param utilizationPercentageCutOffLow * percentage of disk that can be used when the dir is moved from * the bad dirs list to the good dirs list * @param utilizationSpaceCutOff * minimum space, in MB, that must be available on the disk for the * dir to be marked as good * */ public DirectoryCollection(String[] dirs, float utilizationPercentageCutOffHigh, float utilizationPercentageCutOffLow, long utilizationSpaceCutOff) { localDirs = new CopyOnWriteArrayList<>(dirs); errorDirs = new CopyOnWriteArrayList<>(); fullDirs = new CopyOnWriteArrayList<>(); ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); this.readLock = lock.readLock(); this.writeLock = lock.writeLock(); diskUtilizationPercentageCutoffHigh = Math.max(0.0F, Math.min(100.0F, utilizationPercentageCutOffHigh)); diskUtilizationPercentageCutoffLow = Math.max(0.0F, Math.min(diskUtilizationPercentageCutoffHigh, utilizationPercentageCutOffLow)); diskUtilizationSpaceCutoff = utilizationSpaceCutOff < 0 ? 0 : utilizationSpaceCutOff; dirsChangeListeners = Collections.newSetFromMap(new ConcurrentHashMap<DirsChangeListener, Boolean>()); }
From source file:org.openbel.framework.api.DefaultKamCacheService.java
/** * Creates a default KAM cache service with a supplied {@link KamStore}. * /*from w w w . jav a 2 s . com*/ * @param kamStore {@link KamStore} * @throws InvalidArgument Thrown if {@code kamStore} is null */ public DefaultKamCacheService(KamStore kamStore) { if (kamStore == null) { throw new InvalidArgument("kamStore", kamStore); } this.kamStore = kamStore; kamMap = new HashMap<String, Kam>(); unfltrdMap = new HashMap<KamInfo, String>(); fltrdMap = new HashMap<FilteredKAMKey, String>(); final ReentrantReadWriteLock rwlock = new ReentrantReadWriteLock(true); read = rwlock.readLock(); write = rwlock.writeLock(); execSvc = newFixedThreadPool(CONCURRENT_LOAD, new _ThreadFactory()); }
From source file:org.apache.hadoop.hbase.quotas.FileArchiverNotifierImpl.java
public FileArchiverNotifierImpl(Connection conn, Configuration conf, FileSystem fs, TableName tn) { this.conn = conn; this.conf = conf; this.fs = fs; this.tn = tn; ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); readLock = lock.readLock(); writeLock = lock.writeLock();/*from w w w. j a va2s.c o m*/ }
From source file:org.apache.hadoop.yarn.server.resourcemanager.ResourceTrackerService.java
public ResourceTrackerService(RMContext rmContext, NodesListManager nodesListManager, NMLivelinessMonitor nmLivelinessMonitor, RMContainerTokenSecretManager containerTokenSecretManager, NMTokenSecretManagerInRM nmTokenSecretManager) { super(ResourceTrackerService.class.getName()); this.rmContext = rmContext; this.nodesListManager = nodesListManager; this.nmLivelinessMonitor = nmLivelinessMonitor; this.containerTokenSecretManager = containerTokenSecretManager; this.nmTokenSecretManager = nmTokenSecretManager; ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); this.readLock = lock.readLock(); this.writeLock = lock.writeLock(); }
From source file:org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl.java
public RMContainerImpl(Container container, ApplicationAttemptId appAttemptId, NodeId nodeId, String user, RMContext rmContext, long creationTime, String nodeLabelExpression) { this.stateMachine = stateMachineFactory.make(this); this.containerId = container.getId(); this.nodeId = nodeId; this.container = container; this.appAttemptId = appAttemptId; this.user = user; this.creationTime = creationTime; this.rmContext = rmContext; this.eventHandler = rmContext.getDispatcher().getEventHandler(); this.containerAllocationExpirer = rmContext.getContainerAllocationExpirer(); this.isAMContainer = false; this.resourceRequests = null; this.nodeLabelExpression = nodeLabelExpression; this.lastConfirmedResource = container.getResource(); ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); this.readLock = lock.readLock(); this.writeLock = lock.writeLock(); saveNonAMContainerMetaInfo = rmContext.getYarnConfiguration().getBoolean( YarnConfiguration.APPLICATION_HISTORY_SAVE_NON_AM_CONTAINER_META_INFO, YarnConfiguration.DEFAULT_APPLICATION_HISTORY_SAVE_NON_AM_CONTAINER_META_INFO); rmContext.getRMApplicationHistoryWriter().containerStarted(this); // If saveNonAMContainerMetaInfo is true, store system metrics for all // containers. If false, and if this container is marked as the AM, metrics // will still be published for this container, but that calculation happens // later.//w w w. j a va 2s. co m if (saveNonAMContainerMetaInfo) { rmContext.getSystemMetricsPublisher().containerCreated(this, this.creationTime); } }
From source file:org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler.java
/** * Construct the service.// w w w . j a v a 2s. com * * @param name service name */ public AbstractYarnScheduler(String name) { super(name); ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); this.maxAllocReadLock = lock.readLock(); this.maxAllocWriteLock = lock.writeLock(); }
From source file:org.alfresco.encryption.AlfrescoKeyStoreImpl.java
public AlfrescoKeyStoreImpl() { ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); writeLock = lock.writeLock();//from w w w. j a v a 2 s .c om readLock = lock.readLock(); this.keys = new KeyMap(); this.backupKeys = new KeyMap(); }
From source file:org.apache.zeppelin.interpreter.InterpreterSetting.java
public InterpreterSetting() { ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); this.id = generateId(); interpreterGroupReadLock = lock.readLock(); interpreterGroupWriteLock = lock.writeLock(); }
From source file:org.soaplab.services.storage.FileStorage.java
/************************************************************************** * Sets either a read lock or write lock for the given directory * If the size of the locks in the map is more than 100 then locks * with no queued threads are removed from the map **************************************************************************/ private synchronized Lock getLock(String jobDir, boolean read) { ReentrantReadWriteLock lock = locks.get(jobDir); if (lock == null) { lock = new ReentrantReadWriteLock(); locks.put(jobDir, lock);/*from w ww . j ava2 s . com*/ } Lock retLock; if (read == true) retLock = lock.readLock(); else retLock = lock.writeLock(); return retLock; }