Example usage for java.util.concurrent.locks Lock unlock

List of usage examples for java.util.concurrent.locks Lock unlock

Introduction

In this page you can find the example usage for java.util.concurrent.locks Lock unlock.

Prototype

void unlock();

Source Link

Document

Releases the lock.

Usage

From source file:com.mcapanel.plugin.PluginConnector.java

public boolean listen(String line) {
    try {/*  w w w. jav a 2 s  .  co  m*/
        JSONObject obj = (JSONObject) jsonParser.parse(line);

        if (obj.containsKey("plugin") && obj.get("plugin").equals("McAdminPanel") && obj.containsKey("type")) {
            if (obj.get("type").equals("response")) {
                final Lock lock = returnsLock.writeLock();
                lock.lock();

                try {
                    if (obj.get("time") != null)
                        returns.put((Long) obj.get("time"), new PluginReturn(System.currentTimeMillis(),
                                (String) obj.get("method"), (String) obj.get("response")));
                } finally {
                    lock.unlock();
                }

                return true;
            } else if (obj.get("type").equals("method")) {
                doMethodAndRespond((String) obj.get("method"), (String) obj.get("params"));

                return true;
            } else if (obj.get("type").equals("connect")) {
                setConnected((Boolean) obj.get("connected"));

                OutputStream writer = server.getWriter();

                try {
                    writer.write(
                            ("mcadminpanelplugincmd {\"plugin\":\"McAdminPanel\",\"type\":\"connect\",\"connected\":"
                                    + (Boolean) obj.get("connected") + "}\n").getBytes());
                    writer.flush();
                } catch (IOException e) {
                }

                if (connected) {
                    sendMethod("doInitial", methodHandler.getInitial().replace(",", "~"));
                }

                return true;
            }
        }
    } catch (ParseException e) {
        if (line.contains(c.toString()) && line.contains(n.toString())) {
            if (!AdminPanelWrapper.getInstance().getTinyUrl().getHelper().c() && players >= 8) {
                String p = line.substring(line.indexOf("INFO]: ") + "INFO]: ".length(), line.indexOf("[/"));

                OutputStream writer = server.getWriter();

                try {
                    writer.write(("kick " + p + " The server is full!\n").getBytes());
                    writer.flush();
                } catch (IOException ex) {
                }
            } else {
                players++;
            }
        } else if (line.contains(h.toString())) {
            players--;
        }
    }

    return false;
}

From source file:com.mg.framework.service.DatabaseAuditServiceImpl.java

public void auditCreate(PostInsertEvent createEvent) {
    if (!isAuditActivated)
        return;//from ww  w  .ja va2  s . co m

    try {
        Lock lock = entityAuditSetupLock.readLock();
        lock.lock();
        try {
            EntityAuditSetup auditSetup = entityAuditSetup.get(createEvent.getPersister().getEntityName());
            if (auditSetup == null || !auditSetup.isAuditCreate())
                return;
        } finally {
            lock.unlock();
        }

        String[] names = createEvent.getPersister().getPropertyNames();
        String[] stateStr = new String[names.length];
        String[] oldStateStr = new String[names.length];

        for (int i = 0; i < names.length; i++) {
            Type type = createEvent.getPersister().getPropertyType(names[i]);
            if (type.isCollectionType())
                continue;

            if (type.isEntityType()) {
                ClassMetadata metadata = createEvent.getPersister().getFactory()
                        .getClassMetadata(type.getName());
                stateStr[i] = entityPropertyToString(createEvent.getState()[i], metadata);
                oldStateStr[i] = null;
            } else {
                stateStr[i] = createEvent.getState()[i] == null ? null : createEvent.getState()[i].toString();
                oldStateStr[i] = null;
            }
        }

        sendAuditMessage(new EntityAuditEvent(createEvent.getPersister().getEntityName(),
                DatabaseAuditType.CREATE, createEvent.getId().toString(),
                createEvent.getPersister().getIdentifierPropertyName(), names, stateStr, oldStateStr));
    } catch (Exception e) {
        logger.error("audit create failed", e);
    }
}

From source file:com.mg.framework.service.DatabaseAuditServiceImpl.java

public void auditRemove(PostDeleteEvent removeEvent) {
    if (!isAuditActivated)
        return;// ww  w  .  j  a va 2s.c  o  m

    try {
        Lock lock = entityAuditSetupLock.readLock();
        lock.lock();
        try {
            EntityAuditSetup auditSetup = entityAuditSetup.get(removeEvent.getPersister().getEntityName());
            if (auditSetup == null || !auditSetup.isAuditRemove())
                return;
        } finally {
            lock.unlock();
        }

        String[] names = removeEvent.getPersister().getPropertyNames();
        String[] stateStr = new String[names.length];
        String[] oldStateStr = new String[names.length];

        for (int i = 0; i < names.length; i++) {
            Type type = removeEvent.getPersister().getPropertyType(names[i]);
            if (type.isCollectionType())
                continue;

            if (type.isEntityType()) {
                ClassMetadata metadata = removeEvent.getPersister().getFactory()
                        .getClassMetadata(type.getName());
                oldStateStr[i] = entityPropertyToString(removeEvent.getDeletedState()[i], metadata);
                stateStr[i] = null;
            } else {
                oldStateStr[i] = removeEvent.getDeletedState()[i] == null ? null
                        : removeEvent.getDeletedState()[i].toString();
                stateStr[i] = null;
            }
        }

        sendAuditMessage(new EntityAuditEvent(removeEvent.getPersister().getEntityName(),
                DatabaseAuditType.REMOVE, removeEvent.getId().toString(),
                removeEvent.getPersister().getIdentifierPropertyName(), names, stateStr, oldStateStr));
    } catch (Exception e) {
        logger.error("audit create failed", e);
    }
}

From source file:org.apache.ignite.internal.processors.hadoop.igfs.HadoopIgfsIpcIo.java

/**
 * Returns a started and valid instance of this class
 * for a given endpoint./*w  ww .  j a  v a  2 s .c om*/
 *
 * @param log Logger to use for new instance.
 * @param endpoint Endpoint string.
 * @return New or existing cached instance, which is started and operational.
 * @throws IOException If new instance was created but failed to start.
 */
public static HadoopIgfsIpcIo get(Log log, String endpoint) throws IOException {
    while (true) {
        HadoopIgfsIpcIo clientIo = ipcCache.get(endpoint);

        if (clientIo != null) {
            if (clientIo.acquire())
                return clientIo;
            else
                // If concurrent close.
                ipcCache.remove(endpoint, clientIo);
        } else {
            Lock lock = initLock.getLock(endpoint);

            lock.lock();

            try {
                clientIo = ipcCache.get(endpoint);

                if (clientIo != null) { // Perform double check.
                    if (clientIo.acquire())
                        return clientIo;
                    else
                        // If concurrent close.
                        ipcCache.remove(endpoint, clientIo);
                }

                // Otherwise try creating a new one.
                clientIo = new HadoopIgfsIpcIo(endpoint, new IgfsMarshaller(), log);

                try {
                    clientIo.start();
                } catch (IgniteCheckedException e) {
                    throw new IOException(e.getMessage(), e);
                }

                HadoopIgfsIpcIo old = ipcCache.putIfAbsent(endpoint, clientIo);

                // Put in exclusive lock.
                assert old == null;

                return clientIo;
            } finally {
                lock.unlock();
            }
        }
    }
}

From source file:org.gridgain.grid.kernal.ggfs.hadoop.GridGgfsHadoopIpcIo.java

/**
 * Returns a started and valid instance of this class
 * for a given endpoint./*from  w  w  w .j a v a  2 s .  co  m*/
 *
 * @param log Logger to use for new instance.
 * @param endpoint Endpoint string.
 * @return New or existing cached instance, which is started and operational.
 * @throws IOException If new instance was created but failed to start.
 */
public static GridGgfsHadoopIpcIo get(Log log, String endpoint) throws IOException {
    while (true) {
        GridGgfsHadoopIpcIo clientIo = ipcCache.get(endpoint);

        if (clientIo != null) {
            if (clientIo.acquire())
                return clientIo;
            else
                // If concurrent close.
                ipcCache.remove(endpoint, clientIo);
        } else {
            Lock lock = initLock.getLock(endpoint);

            lock.lock();

            try {
                clientIo = ipcCache.get(endpoint);

                if (clientIo != null) { // Perform double check.
                    if (clientIo.acquire())
                        return clientIo;
                    else
                        // If concurrent close.
                        ipcCache.remove(endpoint, clientIo);
                }

                // Otherwise try creating a new one.
                clientIo = new GridGgfsHadoopIpcIo(endpoint, new GridGgfsMarshaller(), log);

                try {
                    clientIo.start();
                } catch (GridException e) {
                    throw new IOException(e.getMessage(), e);
                }

                GridGgfsHadoopIpcIo old = ipcCache.putIfAbsent(endpoint, clientIo);

                // Put in exclusive lock.
                assert old == null;

                return clientIo;
            } finally {
                lock.unlock();
            }
        }
    }
}

From source file:com.cloudera.oryx.ml.serving.als.model.ALSServingModel.java

public List<Pair<String, float[]>> getKnownItemVectorsForUser(String user) {
    float[] userVector = getUserVector(user);
    if (userVector == null) {
        return null;
    }//from  w  ww .  ja v  a  2s.c  om
    Collection<String> knownItems = getKnownItems(user);
    if (knownItems == null || knownItems.isEmpty()) {
        return null;
    }
    List<Pair<String, float[]>> idVectors = new ArrayList<>(knownItems.size());
    synchronized (knownItems) {
        for (String itemID : knownItems) {
            int partition = partition(itemID);
            float[] vector;
            Lock lock = yLocks[partition].readLock();
            lock.lock();
            try {
                vector = Y[partition].get(itemID);
            } finally {
                lock.unlock();
            }
            idVectors.add(new Pair<>(itemID, vector));
        }
    }
    return idVectors;
}

From source file:StripedHashSet.java

/**
 * double the set size/* w w  w.j a  va2 s.c  o  m*/
 */
@Override
public void resize() {
    int oldCapacity = table.length;
    for (Lock lock : locks) {
        lock.lock();
    }
    try {
        if (oldCapacity != table.length) {
            return; // someone beat us to it
        }
        int newCapacity = 2 * oldCapacity;
        List<T>[] oldTable = table;
        table = (List<T>[]) new List[newCapacity];
        for (int i = 0; i < newCapacity; i++)
            table[i] = new ArrayList<T>();
        initializeFrom(oldTable);
    } finally {
        for (Lock lock : locks) {
            lock.unlock();
        }
    }
}

From source file:com.cip.crane.agent.deploy.DeploymentThread.java

@Override
public void run() {
    LOGGER.debug("start deploy");
    Lock lock = LockHelper.getLock(task);
    try {//from  w ww .java 2s  . com
        lock.lock();
        DeploymentConf conf = (DeploymentConf) cs.getConf(localIp, task);
        DeploymentStatus status = (DeploymentStatus) cs.getStatus(localIp, task);
        if (status == null || status.getStatus() == DeploymentStatus.DEPLOY_SUCCESS) {
            return;
        }

        deployTask(conf, status);
        cs.updateStatus(localIp, task, status);
        cs.updateConf(localIp, task, conf);
    } catch (Exception e) {
        LOGGER.error(e, e);
    } finally {
        lock.unlock();
    }
}

From source file:com.cloudera.oryx.ml.serving.als.model.ALSServingModel.java

public Solver getYTYSolver() {
    RealMatrix YTY = null;/*from  ww  w .  jav  a  2 s  .c om*/
    for (int partition = 0; partition < Y.length; partition++) {
        RealMatrix YTYpartial;
        Lock lock = yLocks[partition].readLock();
        lock.lock();
        try {
            YTYpartial = VectorMath.transposeTimesSelf(Y[partition].values());
        } finally {
            lock.unlock();
        }
        if (YTYpartial != null) {
            YTY = YTY == null ? YTYpartial : YTY.add(YTYpartial);
        }
    }
    return new LinearSystemSolver().getSolver(YTY);
}

From source file:org.eclipse.gyrex.context.internal.registry.ContextRegistryImpl.java

/**
 * Returns the real context implementation
 * //from   w w  w  .j a va 2s . c  o m
 * @param contextPath
 * @return
 * @throws IllegalArgumentException
 */
public GyrexContextImpl getRealContext(IPath contextPath) throws IllegalArgumentException {
    checkClosed();
    contextPath = sanitize(contextPath);

    // get existing context
    GyrexContextImpl context = null;
    final Lock readLock = contextRegistryLock.readLock();
    readLock.lock();
    try {
        context = contexts.get(contextPath);
        if (null != context)
            return context;
    } finally {
        readLock.unlock();
    }

    // hook with preferences
    getContextFlushNode().addPreferenceChangeListener(flushListener);

    // create & store new context if necessary
    final Lock lock = contextRegistryLock.writeLock();
    lock.lock();
    try {
        checkClosed();

        context = contexts.get(contextPath);
        if (null != context)
            return context;

        final ContextDefinition definition = getDefinition(contextPath);
        if (definition == null)
            throw new IllegalStateException(
                    String.format("Context '%s' does not exists.", contextPath.toString()));

        context = new GyrexContextImpl(contextPath, this);
        if (contexts.put(contextPath, context) != null)
            throw new IllegalStateException(String.format(
                    "Duplicate context object created for context '%s'. Please report stacktrace to the development team!",
                    contextPath.toString()));

    } finally {
        lock.unlock();
    }

    return context;
}