List of usage examples for java.util.concurrent.locks Lock unlock
void unlock();
From source file:gridool.db.sql.ParallelSQLExecJob.java
private static String invokeCsvOutputReduce(final Connection conn, final String reduceQuery, final String outputTableName, final ReadWriteLock rwlock, final Timings timings) throws GridException { File colDir = GridUtils.getWorkDir(true); final File outFile = new File(colDir, outputTableName + ".csv"); final CsvWriter writer = new CsvWriter(outFile); final MutableBoolean first = new MutableBoolean(true); final MutableLong ansGenStart = new MutableLong(-1L); final ResultSetHandler rsh = new ResultSetHandler() { public Object handle(ResultSet rs) throws SQLException { if (first.getBoolean()) { ansGenStart.setValue(System.currentTimeMillis()); first.setBoolean(false); }//from w w w .j ava 2 s . co m int numRows = writer.writeAll(rs, MONETDB_NULL_STRING, false); if (LOG.isInfoEnabled()) { LOG.info("Result row count: " + numRows); } return null; } }; if (LOG.isInfoEnabled()) { LOG.info("Executing a Reduce SQL query: \n" + reduceQuery); } final Lock rlock = rwlock.readLock(); try { rlock.lock(); conn.setReadOnly(true); JDBCUtils.query(conn, reduceQuery, rsh); } catch (SQLException e) { String errmsg = "failed running a reduce query: " + reduceQuery; LOG.error(errmsg, e); try { conn.rollback(); } catch (SQLException rbe) { LOG.warn("Rollback failed", rbe); } throw new GridException(errmsg, e); } finally { rlock.unlock(); writer.close(); } long answerGenStart = ansGenStart.getValue(); long answerGenTime = (answerGenStart == -1L) ? 0L : System.currentTimeMillis() - answerGenStart; timings.setAnswerGenerationTime(answerGenTime); if (!outFile.exists()) { throw new IllegalStateException("Output file does not exist:" + outFile.getAbsolutePath()); } return outFile.getAbsolutePath(); }
From source file:gridool.db.sql.ParallelSQLExecJob.java
private static String invokeStringOutputReduce(final Connection conn, final String reduceQuery, final ReadWriteLock rwlock) throws GridException { final ResultSetHandler rsh = new ResultSetHandler() { public String handle(ResultSet rs) throws SQLException { if (rs.next()) { String firstResult = rs.getString(1); return firstResult; }// w ww . jav a2 s . c o m return null; } }; if (LOG.isInfoEnabled()) { LOG.info("Executing a Reduce SQL query: \n" + reduceQuery); } final String result; final Lock rlock = rwlock.readLock(); try { rlock.lock(); conn.setReadOnly(true); result = (String) JDBCUtils.query(conn, reduceQuery, rsh); } catch (SQLException e) { String errmsg = "failed running a reduce query: " + reduceQuery; LOG.error(errmsg, e); try { conn.rollback(); } catch (SQLException rbe) { LOG.warn("Rollback failed", rbe); } throw new GridException(errmsg, e); } finally { rlock.unlock(); } return result; }
From source file:org.rhq.core.pc.PluginContainer.java
private void releaseLock(Lock lock) { if (lock != null) { lock.unlock(); } }
From source file:org.springframework.integration.jdbc.lock.JdbcLockRegistryDifferentClientTests.java
@Test public void testOnlyOneLock() throws Exception { for (int i = 0; i < 100; i++) { final List<String> locked = new ArrayList<String>(); final CountDownLatch latch = new CountDownLatch(20); ExecutorService pool = Executors.newFixedThreadPool(6); ArrayList<Callable<Boolean>> tasks = new ArrayList<Callable<Boolean>>(); for (int j = 0; j < 20; j++) { final DefaultLockRepository client = new DefaultLockRepository(this.dataSource); client.afterPropertiesSet(); this.context.getAutowireCapableBeanFactory().autowireBean(client); Callable<Boolean> task = () -> { Lock lock = new JdbcLockRegistry(client).obtain("foo"); try { if (locked.isEmpty() && lock.tryLock()) { if (locked.isEmpty()) { locked.add("done"); return true; }/*from w w w . j a va 2 s .c o m*/ } } finally { try { lock.unlock(); } catch (Exception e) { // ignore } latch.countDown(); } return false; }; tasks.add(task); } logger.info("Starting: " + i); pool.invokeAll(tasks); assertTrue(latch.await(10, TimeUnit.SECONDS)); // eventually they both get the lock and release it assertEquals(1, locked.size()); assertTrue(locked.contains("done")); } }
From source file:DemandCache.java
/** * Performs an access operation on a cache item, causing it to live longer in the cache * //w w w . j a va 2s .c om * @param key The key of the item to access * @param weight The weight of the access--higher weight will result in a more persistent cache * item. 1-10 are supported. A guideline is that the cache item will survive longer by * {@link #getHalfLife()} <code>weight</code>. * @see #ACCESS_GET * @see #ACCESS_SET */ public void access(K key, int weight) { Lock lock = theLock.readLock(); lock.lock(); try { CacheValue value = theCache.get(key); if (value != null) _access(value, weight); } finally { lock.unlock(); } }
From source file:com.mastfrog.netty.http.client.CookieStore.java
@Override public Iterator<Cookie> iterator() { Lock readLock = lock.readLock(); List<Cookie> cks = new ArrayList<Cookie>(); readLock.lock();//from ww w .j av a2s . co m try { cks.addAll(cookies); } finally { readLock.unlock(); } Collections.sort(cks); return cks.iterator(); }
From source file:com.cloudera.oryx.ml.serving.als.model.ALSServingModel.java
void setUserVector(String user, float[] vector) { Preconditions.checkNotNull(vector);//from w w w . j a v a 2 s . c o m Preconditions.checkArgument(vector.length == features); Lock lock = xLock.writeLock(); lock.lock(); try { if (X.put(user, vector) == null) { // User was actually new recentNewUsers.add(user); } } finally { lock.unlock(); } }
From source file:com.cip.crane.agent.sheduler.KillTaskThread.java
@Override public void run() { Lock lock = LockHelper.getLock(jobInstanceId); try {//from ww w .j a va 2 s . c o m lock.lock(); ScheduleConf conf = (ScheduleConf) cs.getConf(localIp, jobInstanceId); ScheduleStatus status = (ScheduleStatus) cs.getStatus(localIp, jobInstanceId); LOGGER.info("KILL TASK THREAD|jobInstanceId:" + jobInstanceId + "| ip:" + localIp + " |conf:" + conf + " |status:" + status); if (conf != null && status != null) { killTask(localIp, conf, status); } } catch (Exception e) { LOGGER.error(e, e); } finally { lock.unlock(); } }
From source file:org.apache.lucene.gdata.search.index.IndexController.java
private void createNewDeleteAllEntriesTask(final ServerBaseFeed feed) { checkDestroyed();// w w w. ja va 2s . co m checkInitialized(); if (LOG.isInfoEnabled()) LOG.info("Deleting all entries for feed dispatch new IndexDocumentBuilder -- " + feed.getId()); String serviceName = feed.getServiceConfig().getName(); ServiceIndex bean = this.indexerMap.get(serviceName); if (bean == null) throw new RuntimeException("no indexer for service " + serviceName + " registered"); Lock lock = bean.getLock(); lock.lock(); try { IndexDocumentBuilder<IndexDocument> callable = new IndexFeedDeleteTask(feed.getId()); sumbitTask(callable, bean.getIndexer()); } finally { lock.unlock(); } }
From source file:com.mycollab.module.ecm.service.impl.DriveInfoServiceImpl.java
@Override public void saveOrUpdateDriveInfo(@CacheKey DriveInfo driveInfo) { Integer sAccountId = driveInfo.getSaccountid(); DriveInfoExample ex = new DriveInfoExample(); ex.createCriteria().andSaccountidEqualTo(sAccountId); Lock lock = DistributionLockUtil.getLock("ecm-service" + sAccountId); try {// ww w . j a v a 2s . co m if (lock.tryLock(15, TimeUnit.SECONDS)) { if (driveInfoMapper.countByExample(ex) > 0) { driveInfo.setId(null); driveInfoMapper.updateByExampleSelective(driveInfo, ex); } else { driveInfoMapper.insert(driveInfo); } } } catch (Exception e) { LOG.error("Error while save drive info " + BeanUtility.printBeanObj(driveInfo), e); } finally { DistributionLockUtil.removeLock("ecm-service" + sAccountId); lock.unlock(); } }