Example usage for java.util.concurrent.locks ReentrantLock ReentrantLock

List of usage examples for java.util.concurrent.locks ReentrantLock ReentrantLock

Introduction

In this page you can find the example usage for java.util.concurrent.locks ReentrantLock ReentrantLock.

Prototype

public ReentrantLock() 

Source Link

Document

Creates an instance of ReentrantLock .

Usage

From source file:org.wso2.carbon.mediation.initializer.persistence.MediationPersistenceManager.java

protected Lock getLock(AxisConfiguration axisConfig) {
    Parameter p = axisConfig.getParameter(ServiceBusConstants.SYNAPSE_CONFIG_LOCK);
    if (p != null) {
        return (Lock) p.getValue();
    } else {/*from  w  ww .ja va  2  s  . c  om*/
        log.warn(ServiceBusConstants.SYNAPSE_CONFIG_LOCK + " is null, Recreating a new lock");
        Lock lock = new ReentrantLock();
        try {
            axisConfig.addParameter(ServiceBusConstants.SYNAPSE_CONFIG_LOCK, lock);
            return lock;
        } catch (AxisFault axisFault) {
            log.error("Error while setting " + ServiceBusConstants.SYNAPSE_CONFIG_LOCK);
        }
    }

    return null;
}

From source file:com.funambol.pushlistener.service.taskexecutor.ScheduledTaskExecutor.java

/**
 * Plans an execution of the given TaskWrapper (the execution will be performed as soon
 * as a thread is available and the delay expires).
 * <p>IMPORTANT NOTE: the caller must lock the task before calling this method
 * using getHandlingTaskLock to obtain a lock instance.
 * @return true if the task execution has been planned to be performed as soon as
 *         possible, false otherwise//from w  ww.j  a  v  a 2  s . com
 *         (the same task or another equal task is waiting for its execution or
 *         is already running)
 * @param newTask the task to execute
 * @param delay the delay in the execution
 */
private boolean submitTaskWrapper(TaskWrapper newTask, long delay) {

    if (newTask == null) {
        throw new IllegalArgumentException("Task must be not null");
    }

    ScheduledFuture future = null;
    TaskWrapper task = null;

    //
    // The caller must lock the task
    //
    synchronized (taskFutures) {
        future = (ScheduledFuture) taskFutures.get(newTask);
        task = (TaskWrapper) taskFutures.getKey(future);
    }

    //
    // Task null means that in taskFutures there is not the task yet (first
    // execution ?)
    //
    if (task == null) {
        task = newTask;
    } else {
        //
        // There is already an equals task in the taskFutures map. We try
        // to force queue a new execution
        //
        boolean queued = false;

        queued = task.queueNewExecution();

        if (queued) {
            if (log.isTraceEnabled()) {
                log.trace("Execution of '" + task + "' queued");
            }
            return false;
        }

    }

    //
    // We use the execution lock to avoid the task execution before putting
    // it in the taskFutures map.
    // See TaskWrapper.execute
    //
    Lock taskExecutionLock = new ReentrantLock();
    taskExecutionLock.lock();
    try {
        task.setExecutionLock(taskExecutionLock);

        future = schedule(task, delay, TimeUnit.MILLISECONDS);

        synchronized (taskFutures) {
            taskFutures.put(task, future);
        }
    } finally {
        taskExecutionLock.unlock();
    }
    return true;
}

From source file:org.exoplatform.social.core.storage.impl.ActivityStreamStorageImpl.java

private void manageRefList(UpdateContext context, ActivityEntity activityEntity, ActivityRefType type,
        boolean mustCheck) throws NodeNotFoundException {

    AtomicBoolean newYearMonthday = new AtomicBoolean(false);
    if (context.getAdded() != null) {
        for (Identity identity : context.getAdded()) {
            IdentityEntity identityEntity = identityStorage._findIdentityEntity(identity.getProviderId(),
                    identity.getRemoteId());

            ////from  w w w. ja v  a 2  s.  c  o m
            if (mustCheck) {
                //to avoid add back activity to given stream what has already existing
                if (isExistingActivityRef(identityEntity, activityEntity))
                    continue;
            }

            ActivityRefListEntity listRef = type.refsOf(identityEntity);
            //keep number
            Integer oldNumberOfStream = listRef.getNumber();

            newYearMonthday.set(false);
            ActivityRef ref = null;

            //Take care the YearMonthDay path don't throw ADD_PROPERTY exception.

            final ReentrantLock lock = new ReentrantLock();
            try {
                lock.lock();
                newYearMonthday.set(false);
                ref = listRef.getOrCreated(activityEntity, newYearMonthday);
                if (newYearMonthday.get()) {
                    StorageUtils.persist();
                }
            } catch (ChromatticException ex) {
                Throwable throwable = ex.getCause();
                if (throwable instanceof ItemExistsException || throwable instanceof InvalidItemStateException
                        || throwable instanceof PathNotFoundException) {
                    LOG.warn("Probably YearMonthDay path was created by another session");
                    LOG.debug(ex.getMessage(), ex);
                } else {
                    LOG.warn("Probably YearMonthDay path was created by another session", ex);
                    LOG.debug(ex.getMessage(), ex);
                }
                return;
            } finally {
                lock.unlock();
            }

            //LOG.info("manageRefList()::BEFORE");
            //printDebug(listRef, activityEntity.getLastUpdated());
            if (ref.getName() == null) {
                ref.setName(activityEntity.getName());
            }

            if (ref.getLastUpdated() == null) {
                ref.setLastUpdated(activityEntity.getLastUpdated());
            }

            ref.setActivityEntity(activityEntity);

            Integer newNumberOfStream = listRef.getNumber();
            //If activity is hidden, we must decrease the number of activity references
            HidableEntity hidableActivity = _getMixin(activityEntity, HidableEntity.class, true);
            if (hidableActivity.getHidden() && (newNumberOfStream > oldNumberOfStream)) {
                ref.getDay().desc();
            }

            //LOG.info("manageRefList()::AFTER");
            //printDebug(listRef, activityEntity.getLastUpdated());
        }
    }

    if (context.getRemoved() != null) {

        for (Identity identity : context.getRemoved()) {
            IdentityEntity identityEntity = identityStorage._findIdentityEntity(identity.getProviderId(),
                    identity.getRemoteId());

            ActivityRefListEntity listRef = type.refsOf(identityEntity);
            listRef.remove(activityEntity);
        }
    }
}

From source file:org.apache.solr.core.SolrCore.java

/**
 * Creates a new core and register it in the list of cores. If a core with the
 * same name already exists, it will be stopped and replaced by this one.
 *
 * @param dataDir/*w w w .j  a va2 s  .  co m*/
 *          the index directory
 * @param config
 *          a solr config instance
 * @param schema
 *          a solr schema instance
 *
 * @since solr 1.3
 */
public SolrCore(String name, String dataDir, SolrConfig config, IndexSchema schema,
        NamedList configSetProperties, CoreDescriptor coreDescriptor, UpdateHandler updateHandler,
        IndexDeletionPolicyWrapper delPolicy, SolrCore prev) {

    assert ObjectReleaseTracker.track(searcherExecutor); // ensure that in unclean shutdown tests we still close this

    this.coreDescriptor = Objects.requireNonNull(coreDescriptor, "coreDescriptor cannot be null");
    setName(name);
    MDCLoggingContext.setCore(this);

    resourceLoader = config.getResourceLoader();
    this.solrConfig = config;
    this.configSetProperties = configSetProperties;
    // Initialize the metrics manager
    this.coreMetricManager = initCoreMetricManager(config);

    if (updateHandler == null) {
        directoryFactory = initDirectoryFactory();
        solrCoreState = new DefaultSolrCoreState(directoryFactory);
    } else {
        solrCoreState = updateHandler.getSolrCoreState();
        directoryFactory = solrCoreState.getDirectoryFactory();
        isReloaded = true;
    }

    this.dataDir = initDataDir(dataDir, config, coreDescriptor);
    this.ulogDir = initUpdateLogDir(coreDescriptor);

    log.info("[{}] Opening new SolrCore at [{}], dataDir=[{}]", logid, resourceLoader.getInstancePath(),
            this.dataDir);

    checkVersionFieldExistsInSchema(schema, coreDescriptor);

    SolrMetricManager metricManager = this.coreDescriptor.getCoreContainer().getMetricManager();

    // initialize searcher-related metrics
    newSearcherCounter = metricManager.counter(coreMetricManager.getRegistryName(), "new",
            Category.SEARCHER.toString());
    newSearcherTimer = metricManager.timer(coreMetricManager.getRegistryName(), "time",
            Category.SEARCHER.toString(), "new");
    newSearcherWarmupTimer = metricManager.timer(coreMetricManager.getRegistryName(), "warmup",
            Category.SEARCHER.toString(), "new");
    newSearcherMaxReachedCounter = metricManager.counter(coreMetricManager.getRegistryName(), "maxReached",
            Category.SEARCHER.toString(), "new");
    newSearcherOtherErrorsCounter = metricManager.counter(coreMetricManager.getRegistryName(), "errors",
            Category.SEARCHER.toString(), "new");

    // Initialize JMX
    this.infoRegistry = initInfoRegistry(name, config);
    infoRegistry.put("fieldCache", new SolrFieldCacheMBean());

    initSchema(config, schema);

    this.maxWarmingSearchers = config.maxWarmingSearchers;
    this.slowQueryThresholdMillis = config.slowQueryThresholdMillis;

    booleanQueryMaxClauseCount();

    final CountDownLatch latch = new CountDownLatch(1);

    try {

        initListeners();

        this.snapshotMgr = initSnapshotMetaDataManager();
        this.solrDelPolicy = initDeletionPolicy(delPolicy);

        this.codec = initCodec(solrConfig, this.schema);

        memClassLoader = new MemClassLoader(PluginBag.RuntimeLib.getLibObjects(this,
                solrConfig.getPluginInfos(PluginBag.RuntimeLib.class.getName())), getResourceLoader());
        initIndex(prev != null);

        initWriters();
        qParserPlugins.init(createInstances(QParserPlugin.standardPlugins), this);
        valueSourceParsers.init(ValueSourceParser.standardValueSourceParsers, this);
        transformerFactories.init(TransformerFactory.defaultFactories, this);
        loadSearchComponents();
        updateProcessors.init(Collections.emptyMap(), this);

        // Processors initialized before the handlers
        updateProcessorChains = loadUpdateProcessorChains();
        reqHandlers = new RequestHandlers(this);
        reqHandlers.initHandlersFromConfig(solrConfig);

        statsCache = initStatsCache();

        // cause the executor to stall so firstSearcher events won't fire
        // until after inform() has been called for all components.
        // searchExecutor must be single-threaded for this to work
        searcherExecutor.submit(() -> {
            latch.await();
            return null;
        });

        this.updateHandler = initUpdateHandler(updateHandler);

        initSearcher(prev);

        // Initialize the RestManager
        restManager = initRestManager();

        // Finally tell anyone who wants to know
        resourceLoader.inform(resourceLoader);
        resourceLoader.inform(this); // last call before the latch is released.
    } catch (Throwable e) {
        // release the latch, otherwise we block trying to do the close. This
        // should be fine, since counting down on a latch of 0 is still fine
        latch.countDown();
        if (e instanceof OutOfMemoryError) {
            throw (OutOfMemoryError) e;
        }

        try {
            // close down the searcher and any other resources, if it exists, as this
            // is not recoverable
            close();
        } catch (Throwable t) {
            if (t instanceof OutOfMemoryError) {
                throw (OutOfMemoryError) t;
            }
            log.error("Error while closing", t);
        }

        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e.getMessage(), e);
    } finally {
        // allow firstSearcher events to fire and make sure it is released
        latch.countDown();
    }

    infoRegistry.put("core", this);

    // register any SolrInfoMBeans SolrResourceLoader initialized
    //
    // this must happen after the latch is released, because a JMX server impl may
    // choose to block on registering until properties can be fetched from an MBean,
    // and a SolrCoreAware MBean may have properties that depend on getting a Searcher
    // from the core.
    resourceLoader.inform(infoRegistry);

    // Allow the directory factory to register MBeans as well
    for (SolrInfoMBean bean : directoryFactory.offerMBeans()) {
        log.debug("Registering JMX bean [{}] from directory factory.", bean.getName());
        // Not worried about concurrency, so no reason to use putIfAbsent
        if (infoRegistry.containsKey(bean.getName())) {
            log.debug("Ignoring JMX bean [{}] due to name conflict.", bean.getName());
        } else {
            infoRegistry.put(bean.getName(), bean);
        }
    }

    // seed version buckets with max from index during core initialization ... requires a searcher!
    seedVersionBuckets();

    bufferUpdatesIfConstructing(coreDescriptor);

    this.ruleExpiryLock = new ReentrantLock();
    this.snapshotDelLock = new ReentrantLock();

    registerConfListener();

    assert ObjectReleaseTracker.track(this);
}

From source file:la2launcher.MainFrame.java

private void processValidation(boolean full) {
    final long initTime = new Date().getTime();
    final String patcherUrl = "http://" + updateHost + "/hf/updater.lst.la2";//new ArrayBlockingQueue<Runnable>(10000)
    final ThreadPoolExecutor tpe = new ThreadPoolExecutor(5, 5, 1, TimeUnit.HOURS,
            new ArrayBlockingQueue<Runnable>(10000));
    tpe.execute(new Runnable() {
        @Override/*from ww w. j  a  v a2s  .c  om*/
        public void run() {
            jTextArea2.setText("");
            try {
                if (full) {
                    jTextArea2.setText(jTextArea2.getText() + "\r\n?  ");
                } else {
                    jTextArea2.setText(jTextArea2.getText() + "\r\n?  system");
                }
                File patcher = File.createTempFile("la2", "la2");
                patcher.deleteOnExit();
                File patcherExt = File.createTempFile("la2", "la2");
                patcherExt.deleteOnExit();
                FileOutputStream fos = new FileOutputStream(patcher);
                CloseableHttpClient httpclient = HttpClients.createDefault();
                HttpGet httpGet = new HttpGet(patcherUrl);
                CloseableHttpResponse response1 = httpclient.execute(httpGet);

                HttpEntity entity1 = response1.getEntity();
                copyStream(entity1.getContent(), fos, null);
                response1.close();
                fos.close();
                jTextArea2.setText(jTextArea2.getText()
                        + "\r\n??  ? ?: " + patcherUrl);

                fixBzip2File(patcher);
                jTextArea2.setText(jTextArea2.getText() + "\r\n ?");

                BZip2CompressorInputStream bz = new BZip2CompressorInputStream(new FileInputStream(patcher));
                OutputStream pout = new FileOutputStream(patcherExt);
                copyStream(bz, pout, new CopyListener() {
                    @Override
                    public void transfered(int n) {
                        bytesRecieved += n;
                        bytesRecievedTotal += n;
                    }
                });
                pout.close();
                bz.close();
                jTextArea2.setText(jTextArea2.getText() + "\r\n? ?");

                if (full) {
                    jTextArea2.setText(jTextArea2.getText() + "\r\n  ");
                } else {
                    jTextArea2.setText(jTextArea2.getText()
                            + "\r\n     system");
                }

                DefaultTableModel model = (DefaultTableModel) jTable2.getModel();
                model.setRowCount(0);

                int filesCount = scanSumFilesCount(patcherExt, full);
                jProgressBar1.setMinimum(0);
                jProgressBar1.setMaximum(filesCount);
                jProgressBar1.setValue(0);
                jLabel4.setText("0/" + filesCount);
                scanSumFile(patcherExt, new SumHandler() {

                    private ReentrantLock lock = new ReentrantLock();

                    @Override
                    public void handle(MDNamePair pair) {
                        try {
                            jProgressBar1.setIndeterminate(false);
                            //lock.unlock();
                            tpe.execute(new Runnable() {
                                @Override
                                public void run() {
                                    try {
                                        lock.lock();
                                        //printMsg(pair.filename);
                                        String crc = digest(new File(gamePath + pair.filename));
                                        //printMsg("    : " + pair.crc);
                                        //printMsg("   ? ? : " + crc);
                                        if (!pair.crc.equals(crc)) {
                                            DefaultTableModel dtm = (DefaultTableModel) jTable2.getModel();
                                            dtm.addRow(new Object[] { pair.filename, false });
                                        }
                                        jProgressBar1.setValue(jProgressBar1.getValue() + 1);
                                        jLabel4.setText(jProgressBar1.getValue() + "/" + filesCount);
                                        lock.unlock();
                                    } catch (NoSuchAlgorithmException ex) {
                                        Logger.getLogger(MainFrame.class.getName()).log(Level.SEVERE, null, ex);
                                    } catch (IOException ex) {
                                        Logger.getLogger(MainFrame.class.getName()).log(Level.SEVERE, null, ex);
                                    } finally {
                                        //if (lock.isLocked()) lock.unlock();
                                    }
                                }
                            });
                        } finally {
                            //if (lock.isLocked()) lock.unlock();
                        }
                    }
                }, full);
            } catch (IOException ex) {
                Logger.getLogger(MainFrame.class.getName()).log(Level.SEVERE, null, ex);
            }
        }
    });

    jButton5.setEnabled(false);
    jButton6.setEnabled(false);
    jButton7.setEnabled(false);
    jButton8.setEnabled(false);
    jButton10.setEnabled(false);
    jProgressBar1.setIndeterminate(true);
    new Thread() {
        @Override
        public void run() {
            do {
                long millis = new Date().getTime();
                try {
                    sleep(300);
                } catch (InterruptedException ex) {
                    Logger.getLogger(MainFrame.class.getName()).log(Level.SEVERE, null, ex);
                }
                millis = new Date().getTime() - millis;
                BigDecimal totBig = new BigDecimal(bytesRecievedTotal / (1024 * 1024.0));
                totBig = totBig.setScale(2, BigDecimal.ROUND_CEILING);
                jLabel5.setText("?: " + (bytesRecieved / millis) + "KB/s. : "
                        + totBig + " MB");
                bytesRecieved = 0;
            } while (tpe.getActiveCount() > 0);
            tpe.shutdown();
            jButton5.setEnabled(true);
            jButton6.setEnabled(true);
            jButton7.setEnabled(true);
            jButton8.setEnabled(true);
            jButton10.setEnabled(true);
            jProgressBar1.setIndeterminate(false);
            printMsg("  " + (new Date().getTime() - initTime)
                    + " ?.");
        }
    }.start();
}

From source file:org.commonjava.maven.galley.cache.infinispan.FastLocalCacheProvider.java

private ReentrantLock getTransferLock(final ConcreteResource resource) {
    final Transfer transfer = getTransfer(resource);
    return transferLocks.computeIfAbsent(transfer, tran -> new ReentrantLock());
}

From source file:org.apache.openjpa.kernel.QueryImpl.java

public synchronized void startLocking() {
    if (_lock == null) {
        _lock = new ReentrantLock();
    }
}

From source file:org.openanzo.datasource.nodecentric.internal.NodeCentricDatasource.java

/**
 * Begin database transaction/*from w  w  w  . j a  v a  2 s .  co  m*/
 * 
 * Note:Database already in transaction
 * 
 * @param connection
 *            {@link Connection} to underlying database
 * @param needsWrite
 *            if true, tables will be locked if needed
 * @param needsTransaction
 *            TODO
 * @throws AnzoException
 *             {@link ExceptionConstants.RDB#ALREADY_IN_RDB_TRANSACTION} if this connection is already with a transaction
 * @throws AnzoException
 *             {@link ExceptionConstants.RDB#FAILED_START_RDB_TRANSACTION} if there was a problem setting autoCommit to false
 */
public void begin(Connection connection, boolean needsWrite, boolean needsTransaction) throws AnzoException {
    long start = 0;
    if (stats.isEnabled()) {
        start = System.currentTimeMillis();
        stats.getBeginUse().increment();
    }
    try {
        ReentrantLock lock = connectionLocks.get(connection);
        if (lock == null) {
            lock = new ReentrantLock();
            connectionLocks.put(connection, lock);
        }
        if (lock.isLocked()) {
            throw new AnzoException(ExceptionConstants.RDB.ALREADY_IN_RDB_TRANSACTION);
        }
        lock.lock();
        if (lock.getHoldCount() == 1 && needsTransaction) {
            try {
                connection.setAutoCommit(false);
            } catch (SQLException e) {
                lock.unlock();
                log.error(LogUtils.RDB_MARKER, "Error starting jdbc transaction", e);
                throw new AnzoRuntimeException(ExceptionConstants.RDB.FAILED_START_RDB_TRANSACTION, e);
            }
            try {
                lockTable(connection, needsWrite);
            } catch (AnzoException e) {
                try {
                    connection.setAutoCommit(false);
                } catch (SQLException sqle) {
                    log.error(LogUtils.RDB_MARKER, "Error aborting jdbc transaction", sqle);
                }
                lock.unlock();
                throw e;
            }

        }
    } finally {
        if (stats.isEnabled()) {
            stats.getBeginDuration().addTime((System.currentTimeMillis() - start));
        }
    }
}

From source file:org.openanzo.datasource.nodecentric.internal.NodeCentricDatasource.java

/**
 * Abort database transaction//from ww  w  .  j a v a 2 s.  c  om
 * 
 * Note:Database already in transaction
 * 
 * @param connection
 *            {@link Connection} to underlying database
 * @param needsWrite
 *            if true, tables will be locked if needed
 * @param needsTransaction
 *            TODO
 * @throws AnzoException
 *             {@link ExceptionConstants.RDB#DIDNT_START_RDB_TRANSACTION} if this thread didn't start the transaction
 * @throws AnzoException
 *             {@link ExceptionConstants.RDB#FAILED_ROLLBACK_RDB_TRANSACTION} if there was a problem rolling back the connection
 */
protected void abort(Connection connection, boolean needsWrite, boolean needsTransaction) throws AnzoException {
    long start = 0;
    if (stats.isEnabled()) {
        start = System.currentTimeMillis();
        stats.getAbortUse().increment();
    }
    try {
        ReentrantLock lock = connectionLocks.get(connection);
        if (lock == null) {
            lock = new ReentrantLock();
            connectionLocks.put(connection, lock);
        }
        if (lock.isLocked()) {
            if (lock.isHeldByCurrentThread()) {
                try {
                    if (needsTransaction) {
                        ArrayList<AnzoException> exceptions = null;
                        try {
                            if (!connection.isClosed()) {
                                try {
                                    connection.rollback();
                                    connection.setAutoCommit(true);
                                } catch (SQLException e) {
                                    log.error(LogUtils.RDB_MARKER, "Error rolling back transaction", e);
                                    exceptions = new ArrayList<AnzoException>();
                                    exceptions.add(new AnzoException(
                                            ExceptionConstants.RDB.FAILED_ROLLBACK_RDB_TRANSACTION, e));
                                }
                                try {
                                    unlockTable(connection, needsWrite);
                                } catch (AnzoException ae) {
                                    log.error(LogUtils.RDB_MARKER, "Error unlocking table", ae);
                                    if (exceptions == null) {
                                        exceptions = new ArrayList<AnzoException>();
                                    }
                                    exceptions.add(ae);
                                }
                            }
                        } catch (SQLException e) {
                            log.error(LogUtils.RDB_MARKER, "Error rollingback jdbc transaction", e);
                            exceptions = new ArrayList<AnzoException>();
                            exceptions.add(new AnzoException(
                                    ExceptionConstants.RDB.FAILED_ROLLBACK_RDB_TRANSACTION, e));
                        }

                        if (exceptions != null && exceptions.size() > 0) {
                            throw new CompoundAnzoException(exceptions,
                                    ExceptionConstants.RDB.FAILED_ROLLBACK_RDB_TRANSACTION);
                        }
                    }
                } finally {
                    lock.unlock();
                    nodeLayout.clearUncommittedCache();
                }
            } else {
                throw new AnzoException(ExceptionConstants.RDB.DIDNT_START_RDB_TRANSACTION);
            }
        }
    } finally {
        if (stats.isEnabled()) {
            stats.getAbortDuration().addTime((System.currentTimeMillis() - start));
        }
    }
}

From source file:org.openanzo.datasource.nodecentric.internal.NodeCentricDatasource.java

/**
 * Commit database transaction/*from ww w .  ja  va  2  s  .co m*/
 * 
 * Note:Database already in transaction
 * 
 * @param connection
 *            {@link Connection} to underlying database
 * @param needsWrite
 *            if true, tables will be locked if needed
 * @param needsTransaction
 *            TODO
 * @throws AnzoException
 *             {@link ExceptionConstants.RDB#NOT_IN_RDB_TRANSACTION} if connection isn't in a transaction
 * @throws AnzoException
 *             {@link ExceptionConstants.RDB#DIDNT_START_RDB_TRANSACTION} if this thread didn't start the transaction
 * @throws AnzoException
 *             {@link ExceptionConstants.RDB#FAILED_COMMIT_RDB_TRANSACTION} if there was a problem committing the connection
 */
public void commit(Connection connection, boolean needsWrite, boolean needsTransaction) throws AnzoException {
    long start = 0;
    if (stats.isEnabled()) {
        start = System.currentTimeMillis();
        stats.getCommitUse().increment();
    }
    try {
        ReentrantLock lock = connectionLocks.get(connection);
        if (lock == null) {
            lock = new ReentrantLock();
            connectionLocks.put(connection, lock);
        }
        if (lock.isLocked()) {
            if (lock.isHeldByCurrentThread()) {
                try {
                    if (needsTransaction) {
                        ArrayList<AnzoException> exceptions = null;
                        try {
                            connection.commit();
                            connection.setAutoCommit(true);
                        } catch (SQLException e) {
                            log.error(LogUtils.RDB_MARKER, "Error commmiting jdbc transaction", e);
                            exceptions = new ArrayList<AnzoException>();
                            exceptions.add(
                                    new AnzoException(ExceptionConstants.RDB.FAILED_COMMIT_RDB_TRANSACTION, e));
                        }
                        try {
                            unlockTable(connection, needsWrite);
                        } catch (AnzoException ae) {
                            log.error(LogUtils.RDB_MARKER, "Error unlocking tables", ae);
                            if (exceptions == null) {
                                exceptions = new ArrayList<AnzoException>();
                            }
                            exceptions.add(ae);
                        }
                        if (exceptions != null && exceptions.size() > 0) {
                            throw new CompoundAnzoException(exceptions,
                                    ExceptionConstants.RDB.FAILED_COMMIT_RDB_TRANSACTION);
                        }
                    }
                } finally {
                    lock.unlock();
                    nodeLayout.clearUncommittedCache();
                }
            } else {
                throw new AnzoException(ExceptionConstants.RDB.DIDNT_START_RDB_TRANSACTION);
            }
        } else {
            throw new AnzoException(ExceptionConstants.RDB.NOT_IN_RDB_TRANSACTION);
        }
    } finally {
        if (stats.isEnabled()) {
            stats.getCommitDuration().addTime((System.currentTimeMillis() - start));
        }
    }
}