Example usage for java.util.concurrent ConcurrentHashMap containsKey

List of usage examples for java.util.concurrent ConcurrentHashMap containsKey

Introduction

In this page you can find the example usage for java.util.concurrent ConcurrentHashMap containsKey.

Prototype

public boolean containsKey(Object key) 

Source Link

Document

Tests if the specified object is a key in this table.

Usage

From source file:org.starnub.starnubserver.connections.player.session.PlayerSession.java

public boolean hasSubPermission(String basePermission, String subPermission, boolean checkWildCards) {
    if (PERMISSIONS.containsKey("*") && checkWildCards) {
        return true;
    }/*from   ww  w  . j a  v a2 s  .com*/
    ConcurrentHashMap<String, ArrayList<String>> concurrentHashMap = PERMISSIONS.get(basePermission);
    if (concurrentHashMap == null) {
        return false;
    }
    if (checkWildCards) {
        return concurrentHashMap.containsKey("*") || concurrentHashMap.containsKey(subPermission);
    } else {
        return false;
    }
}

From source file:cn.leancloud.diamond.client.impl.DefaultDiamondSubscriber.java

public boolean containDataId(String dataId, String group) {
    if (null == group) {
        group = Constants.DEFAULT_GROUP;
    }//from  www  .j ava2s  .  c o  m
    ConcurrentHashMap<String, CacheData> cacheDatas = this.cache.get(dataId);
    if (null == cacheDatas) {
        return false;
    }
    return cacheDatas.containsKey(group);
}

From source file:hu.sztaki.lpds.pgportal.portlets.workflow.RealWorkflowPortlet.java

/**
 * Displaying Workflow configuration interface
 *//*from  w  ww  .j  av a2s.co m*/
public void doConfigure(ActionRequest request, ActionResponse response) throws PortletException {
    // Querying logged user
    String userID;
    if (request.getParameter("adminuser") == null)
        userID = request.getRemoteUser();
    else
        userID = request.getParameter("adminuser");

    if (request.getParameter("pcwkf") != null) {
        String workflowName = request.getParameter("workflow");
        if (!workflowName.equals(request.getPortletSession().getAttribute("cworkflow")))
            NewWorkflowUtil.fromWorkflow(request, response);
        String graphName = PortalCacheService.getInstance().getUser(userID).getWorkflow(workflowName).getGraf();
        if (!graphName.equals(request.getParameter("pgraf")))
            RealWorkflowUtil.changeGraph(request, response);
    }

    //Over the Quota
    if (UserQuotaUtils.getInstance().userQuotaIsFull(userID)) {
        setRequestAttribute(request.getPortletSession(), "msg", "portal.RealWorkflowPortlet.quotaisoverfull");
        return;
    }
    //Session query
    PortletSession ps = request.getPortletSession();

    //Available resource configuration query
    try {
        if (ps.getAttribute("resources", ps.APPLICATION_SCOPE) == null) {
            ResourceConfigurationFace rc = (ResourceConfigurationFace) InformationBase.getI()
                    .getServiceClient("resourceconfigure", "portal");
            List<Middleware> tmp_r = rc.get();
            ps.setAttribute("resources", tmp_r, ps.APPLICATION_SCOPE);
            ps.setAttribute("pub_resources", tmp_r, ps.APPLICATION_SCOPE);
        }
    } catch (Exception ex) {
        ex.printStackTrace();
    }

    if (request.getParameter("workflow") != null) {
        ps.setAttribute("cworkflow1", request.getParameter("workflow"), ps.APPLICATION_SCOPE);
        request.getPortletSession().setAttribute("cworkflow", request.getParameter("workflow"));
    }
    workflow = request.getParameter("workflow");

    setRequestAttribute(request.getPortletSession(), "graphs",
            PortalCacheService.getInstance().getUser(userID).getAbstactWorkflows());
    setRequestAttribute(request.getPortletSession(), "templates",
            PortalCacheService.getInstance().getUser(userID).getTemplateWorkflows());

    Hashtable hsh = new Hashtable();
    hsh.put("url", PortalCacheService.getInstance().getUser(userID).getWorkflow(workflow).getWfsID());
    ServiceType st = InformationBase.getI().getService("wfs", "portal", hsh, new Vector());
    try {
        PortalWfsClient pc = (PortalWfsClient) Class.forName(st.getClientObject()).newInstance();
        pc.setServiceURL(st.getServiceUrl());
        pc.setServiceID(st.getServiceID());
        ComDataBean tmp = new ComDataBean();
        tmp.setPortalID(PropertyLoader.getInstance().getProperty("service.url"));
        tmp.setUserID(userID);
        tmp.setWorkflowID(workflow);
        if (PortalCacheService.getInstance().getUser(userID).getWorkflow(workflow).getWorkflowType()
                .equals("multinode")
                || PortalCacheService.getInstance().getUser(userID).getWorkflow(workflow).getWorkflowType()
                        .equals("singlenode")) {
            PortalCacheService.getInstance().getUser(userID)
                    .setConfiguringWorkflowWFProp(pc.getWorkflowConfigData(tmp), pc.getWorkflowProps(tmp));
        } else {
            PortalCacheService.getInstance().getUser(userID)
                    .setConfiguringWorkflow(pc.getWorkflowConfigData(tmp));
        }
        setRequestAttribute(request.getPortletSession(), "jobs",
                PortalCacheService.getInstance().getUser(userID).getConfiguringWorkflow());
        Vector ltmp = new Vector();
        for (int i = 0; i < PortalCacheService.getInstance().getUser(userID).getConfiguringWorkflow()
                .size(); i++) {
            // replace special characters...
            String jobtxt = new String(((JobPropertyBean) PortalCacheService.getInstance().getUser(userID)
                    .getConfiguringWorkflow().get(i)).getTxt());
            ((JobPropertyBean) PortalCacheService.getInstance().getUser(userID).getConfiguringWorkflow().get(i))
                    .setTxt(replaceTextS(jobtxt));
            // inputs
            for (int j = 0; j < ((JobPropertyBean) PortalCacheService.getInstance().getUser(userID)
                    .getConfiguringWorkflow().get(i)).getInputs().size(); j++) {
                PortDataBean ptmp = (PortDataBean) ((JobPropertyBean) PortalCacheService.getInstance()
                        .getUser(userID).getConfiguringWorkflow().get(i)).getInputs().get(j);
                // replace special characters...
                ptmp.setTxt(replaceTextS(ptmp.getTxt()));
                if (!ptmp.getPrejob().equals("")) {
                    for (int k = 0; k < PortalCacheService.getInstance().getUser(userID)
                            .getConfiguringWorkflow().size(); k++) {
                        if (((JobPropertyBean) PortalCacheService.getInstance().getUser(userID)
                                .getConfiguringWorkflow().get(k)).getName().equals(ptmp.getPrejob())) {
                            for (int z = 0; z < ((JobPropertyBean) PortalCacheService.getInstance()
                                    .getUser(userID).getConfiguringWorkflow().get(k)).getOutputs()
                                            .size(); z++) {
                                if (ptmp.getPreoutput()
                                        .equals("" + ((PortDataBean) ((JobPropertyBean) PortalCacheService
                                                .getInstance().getUser(userID).getConfiguringWorkflow().get(k))
                                                        .getOutputs().get(z)).getSeq())) {
                                    long x = ((PortDataBean) ((JobPropertyBean) PortalCacheService.getInstance()
                                            .getUser(userID).getConfiguringWorkflow().get(k)).getOutputs()
                                                    .get(z)).getX();
                                    long y = ((PortDataBean) ((JobPropertyBean) PortalCacheService.getInstance()
                                            .getUser(userID).getConfiguringWorkflow().get(k)).getOutputs()
                                                    .get(z)).getY();
                                    ltmp.add(new LineCoord("" + ptmp.getX(), "" + ptmp.getY(), "" + x, "" + y));
                                }
                            }
                        }
                    }
                }
            }
            // outputs
            for (int jo = 0; jo < ((JobPropertyBean) PortalCacheService.getInstance().getUser(userID)
                    .getConfiguringWorkflow().get(i)).getOutputs().size(); jo++) {
                PortDataBean ptmpo = (PortDataBean) ((JobPropertyBean) PortalCacheService.getInstance()
                        .getUser(userID).getConfiguringWorkflow().get(i)).getOutputs().get(jo);
                // replace special characters...
                ptmpo.setTxt(replaceTextS(ptmpo.getTxt()));
            }
        }
        setRequestAttribute(request.getPortletSession(), "lineList", ltmp);
    } catch (Exception e) {
        e.printStackTrace();
    }

    String storageURL = PortalCacheService.getInstance().getUser(userID).getWorkflow(workflow).getStorageID();
    if (storageURL == null) {
        st = InformationBase.getI().getService("storage", "portal", new Hashtable(), new Vector());
        storageURL = st.getServiceUrl();
    }

    setRequestAttribute(request.getPortletSession(), "storageID", storageURL);
    setRequestAttribute(request.getPortletSession(), "userID", userID);
    setRequestAttribute(request.getPortletSession(), "portalID",
            PropertyLoader.getInstance().getProperty("service.url"));
    setRequestAttribute(request.getPortletSession(), "wrkdata",
            PortalCacheService.getInstance().getUser(userID).getWorkflow(workflow));

    setRequestAttribute(request.getPortletSession(), "grafs", Sorter.getInstance()
            .sortFromValues(PortalCacheService.getInstance().getUser(userID).getAbstactWorkflows()));
    setRequestAttribute(request.getPortletSession(), "awkfs", Sorter.getInstance()
            .sortFromValues(PortalCacheService.getInstance().getUser(userID).getTemplateWorkflows()));
    //If workflow instance exists, the graph is not exchangeable.
    String enablecgraf = "";
    if (!PortalCacheService.getInstance().getUser(userID).getWorkflow(workflow).getAllRuntimeInstance()
            .isEmpty()) {//AllWorkflow
        ConcurrentHashMap h = PortalCacheService.getInstance().getUser(userID).getWorkflow(workflow)
                .getAllRuntimeInstance();
        if (h.size() == 1 && !(h.containsKey("AllWorkflow") || h.containsKey("allworkflow"))) {
            //"allworkflow".equalsIgnoreCase(runtimeID)
            enablecgraf = "disabled";
        } else if (h.size() > 1) {
            enablecgraf = "disabled";
        }
        // System.out.println("Modify graf LOCK:" + enablecgraf + " wfs:" + PortalCacheService.getInstance().getUser(userID).getWorkflow(workflow).getAllRuntimeInstance());

    }
    setRequestAttribute(request.getPortletSession(), "enablecgraf", enablecgraf);
    //
    // set configure ID
    String confID = userID + String.valueOf(System.currentTimeMillis());
    setRequestAttribute(request.getPortletSession(), "confID", confID);
    //
    doList(request, response);
    request.setAttribute("jsp",
            "/jsp/workflow/"
                    + PortalCacheService.getInstance().getUser(userID).getWorkflow(workflow).getWorkflowType()
                    + "/configure.jsp");
    setRequestAttribute(request.getPortletSession(), "navigatepage",
            "/jsp/workflow/"
                    + PortalCacheService.getInstance().getUser(userID).getWorkflow(workflow).getWorkflowType()
                    + "/configure.jsp");

}

From source file:org.apache.ambari.controller.Clusters.java

private void validateClusterDefinition(String clusterName, ClusterDefinition cdef) throws Exception {
    /*/*  w w w  .  jav  a 2 s  .c o m*/
     * Check if name is not empty or null
     */
    if (cdef.getName() == null || cdef.getName().equals("")) {
        String msg = "Cluster Name must be specified and must be non-empty string";
        throw new WebApplicationException((new ExceptionResponse(msg, Response.Status.BAD_REQUEST)).get());
    }

    if (!cdef.getName().equals(clusterName)) {
        String msg = "Cluster Name specified in URL and cluster definition are not same";
        throw new WebApplicationException((new ExceptionResponse(msg, Response.Status.BAD_REQUEST)).get());
    }

    if (cdef.getNodes() == null || cdef.getNodes().equals("")) {
        String msg = "Cluster node range must be specified and must be non-empty string";
        throw new WebApplicationException((new ExceptionResponse(msg, Response.Status.BAD_REQUEST)).get());
    }

    if (cdef.getStackName() == null || cdef.getStackName().equals("")) {
        String msg = "Cluster stack must be specified and must be non-empty string";
        throw new WebApplicationException((new ExceptionResponse(msg, Response.Status.BAD_REQUEST)).get());
    }

    if (cdef.getStackRevision() == null || cdef.getStackRevision().equals("")) {
        String msg = "Cluster stack revision must be specified";
        throw new WebApplicationException((new ExceptionResponse(msg, Response.Status.BAD_REQUEST)).get());
    }

    /*
     * Check if the cluster stack and its parents exist
     * getStack would throw exception if it does not find the stack
     */
    Stack bp = stacks.getStack(cdef.getStackName(), Integer.parseInt(cdef.getStackRevision()));
    while (bp.getParentName() != null) {
        bp = stacks.getStack(bp.getParentName(), bp.getParentRevision());
    }

    /*
     * Check if nodes requested for cluster are not already allocated to other clusters
     */
    ConcurrentHashMap<String, Node> all_nodes = nodes.getNodes();
    List<String> cluster_node_range = new ArrayList<String>();
    cluster_node_range.addAll(getHostnamesFromRangeExpressions(cdef.getNodes()));
    List<String> preallocatedhosts = new ArrayList<String>();
    for (String n : cluster_node_range) {
        if (all_nodes.containsKey(n) && (all_nodes.get(n).getNodeState().getClusterName() != null
                || all_nodes.get(n).getNodeState().getAllocatedToCluster())) {
            /* 
             * Following check is for a very specific case 
             * When controller starts w/ no persistent data in data store, it adds default clusters
             * and down the road restart recovery code re-validates the cluster definition when
             * it finds nodes already allocated. 
            if (all_nodes.get(n).getNodeState().getClusterName() != null && 
            all_nodes.get(n).getNodeState().getClusterName().equals(clusterName)) { 
            continue; 
            } */
            preallocatedhosts.add(n);
        }
    }

    if (!preallocatedhosts.isEmpty()) {
        String msg = "Some of the nodes specified for the cluster roles are allocated to other cluster: ["
                + preallocatedhosts + "]";
        throw new WebApplicationException((new ExceptionResponse(msg, Response.Status.CONFLICT)).get());
    }

    /*
     * Check if all the nodes explicitly specified in the RoleToNodesMap belong the cluster node range specified 
     */
    if (cdef.getRoleToNodesMap() != null) {
        List<String> nodes_specified_using_role_association = new ArrayList<String>();
        for (RoleToNodes e : cdef.getRoleToNodesMap()) {
            List<String> hosts = getHostnamesFromRangeExpressions(e.getNodes());
            nodes_specified_using_role_association.addAll(hosts);
            // TODO: Remove any duplicate nodes from nodes_specified_using_role_association
        }

        nodes_specified_using_role_association.removeAll(cluster_node_range);
        if (!nodes_specified_using_role_association.isEmpty()) {
            String msg = "Some nodes explicityly associated with roles using RoleToNodesMap do not belong in the "
                    + "golbal node range specified for the cluster : [" + nodes_specified_using_role_association
                    + "]";
            throw new WebApplicationException((new ExceptionResponse(msg, Response.Status.BAD_REQUEST)).get());
        }
    }

}

From source file:org.objectweb.proactive.extensions.dataspaces.vfs.VFSSpacesMountManagerImpl.java

/**
 * Mounts the first available VFS file system on the given dataspace
 * @param spaceInfo space information/*from w w  w  .j  a  v  a2  s.  co m*/
 * @throws FileSystemException if no file system could be mounted
 */
private void mountFirstAvailableFileSystem(final SpaceInstanceInfo spaceInfo) throws FileSystemException {

    final DataSpacesURI mountingPoint = spaceInfo.getMountingPoint();

    try {
        writeLock.lock();
        if (!mountedSpaces.containsKey(mountingPoint)) {
            mountedSpaces.put(mountingPoint, new ConcurrentHashMap<String, FileObject>());
        }
        ConcurrentHashMap<String, FileObject> fileSystems = mountedSpaces.get(mountingPoint);

        if (spaceInfo.getUrls().size() == 0) {
            throw new IllegalStateException("Empty Space configuration");
        }

        DataSpacesURI spacePart = mountingPoint.getSpacePartOnly();
        ArrayList<String> urls = new ArrayList<String>(spaceInfo.getUrls());
        if (urls.size() == 1) {
            urls.add(0, Utils.getLocalAccessURL(urls.get(0), spaceInfo.getPath(), spaceInfo.getHostname()));
        }

        logger.debug("[VFSMountManager] Request mounting VFS root list : " + urls);

        try {
            VFSMountManagerHelper.mountAny(urls, fileSystems);

            if (!accessibleFileObjectUris.containsKey(mountingPoint)) {
                LinkedHashSet<String> srl = new LinkedHashSet<String>();
                accessibleFileObjectUris.put(mountingPoint, srl);
            }

            LinkedHashSet<String> srl = accessibleFileObjectUris.get(mountingPoint);

            for (String uri : urls) {
                if (fileSystems.containsKey(uri)) {
                    srl.add(uri);
                }
            }
            if (srl.isEmpty()) {
                throw new IllegalStateException("Invalid empty size list when trying to mount " + urls
                        + " mounted map content is " + fileSystems);
            }
            accessibleFileObjectUris.put(mountingPoint, srl);

            if (logger.isDebugEnabled())
                logger.debug(
                        String.format("[VFSMountManager] Mounted space: %s (access URL: %s)", spacePart, srl));

            mountedSpaces.put(mountingPoint, fileSystems);

        } catch (org.apache.commons.vfs.FileSystemException e) {
            mountedSpaces.remove(mountingPoint);
            throw new FileSystemException("An error occurred while trying to mount " + spaceInfo.getName(), e);
        }
    } finally {
        writeLock.unlock();
    }
}

From source file:org.apache.geode.cache.client.internal.PoolImpl.java

private void authenticateOnAllServers(Op op) {
    if (this.multiuserSecureModeEnabled && ((AbstractOp) op).needsUserId()) {
        UserAttributes userAttributes = UserAttributes.userAttributes.get();
        if (userAttributes != null) {
            ConcurrentHashMap<ServerLocation, Long> map = userAttributes.getServerToId();

            if (this.queueManager == null) {
                throw new SubscriptionNotEnabledException();
            }//from   w ww. ja v a  2s.c om
            Connection primary = this.queueManager.getAllConnectionsNoWait().getPrimary();
            if (primary != null && !map.containsKey(primary.getServer())) {
                Long userId = (Long) AuthenticateUserOp.executeOn(primary.getServer(), this,
                        userAttributes.getCredentials());
                if (userId != null) {
                    map.put(primary.getServer(), userId);
                }
            }

            List<Connection> backups = this.queueManager.getAllConnectionsNoWait().getBackups();
            for (int i = 0; i < backups.size(); i++) {
                Connection conn = backups.get(i);
                if (!map.containsKey(conn.getServer())) {
                    Long userId = (Long) AuthenticateUserOp.executeOn(conn.getServer(), this,
                            userAttributes.getCredentials());
                    if (userId != null) {
                        map.put(conn.getServer(), userId);
                    }
                }
            }
        } else {
            throw new UnsupportedOperationException(
                    LocalizedStrings.MultiUserSecurityEnabled_USE_POOL_API.toLocalizedString());
        }
    }
}

From source file:com.taobao.gecko.service.impl.BaseRemotingController.java

public Map<Connection, ResponseCommand> invokeToGroupAllConnections(final String group,
        final RequestCommand command, final long time, final TimeUnit timeUnit)
        throws InterruptedException, NotifyRemotingException {
    if (group == null) {
        throw new NotifyRemotingException("Null group");
    }/*from   w w w  .ja v a 2  s.c o m*/
    if (command == null) {
        throw new NotifyRemotingException("Null command");
    }
    final List<Connection> connections = this.remotingContext.getConnectionsByGroup(group);

    if (connections != null && connections.size() > 0) {
        final long now = System.currentTimeMillis();
        final CountDownLatch countDownLatch = new CountDownLatch(connections.size());
        final ConcurrentHashMap<Connection, ResponseCommand> resultMap = new ConcurrentHashMap<Connection, ResponseCommand>();
        final GroupAllConnectionRequestCallBack requestCallBack = new GroupAllConnectionRequestCallBack(null,
                countDownLatch, TimeUnit.MILLISECONDS.convert(time, timeUnit), now, resultMap);

        for (final Connection conn : connections) {
            final DefaultConnection connection = (DefaultConnection) conn;
            if (connection.isConnected()) {
                try {
                    connection.addRequestCallBack(command.getOpaque(), requestCallBack);
                    requestCallBack.addWriteFuture(connection, connection.asyncSend(command));
                } catch (final Throwable e) {
                    requestCallBack.onResponse(group,
                            this.createCommErrorResponseCommand(command.getRequestHeader(), e.getMessage()),
                            connection);
                }
            } else {
                requestCallBack.onResponse(group,
                        this.createCommErrorResponseCommand(command.getRequestHeader(), ""),
                        connection);
            }
        }
        if (!countDownLatch.await(time, timeUnit)) {
            for (final Connection conn : connections) {
                if (!resultMap.containsKey(conn)) {
                    if (resultMap.putIfAbsent(conn, this.createTimeoutCommand(command.getRequestHeader(),
                            conn.getRemoteSocketAddress())) == null) {
                        requestCallBack.cancelWrite(conn);
                        // 
                        ((DefaultConnection) conn).removeRequestCallBack(command.getOpaque());
                    }
                }
            }
        }
        return resultMap;
    } else {
        return null;
    }
}

From source file:com.alibaba.napoli.gecko.service.impl.BaseRemotingController.java

public Map<Connection, ResponseCommand> invokeToGroupAllConnections(final String group,
        final RequestCommand command, final long time, final TimeUnit timeUnit)
        throws InterruptedException, NotifyRemotingException {
    if (group == null) {
        throw new NotifyRemotingException("Null group");
    }/*w w w.  ja  va 2  s. c  o  m*/
    if (command == null) {
        throw new NotifyRemotingException("Null command");
    }
    final List<Connection> connections = this.remotingContext.getConnectionsByGroup(group);

    if (connections != null && connections.size() > 0) {
        final long now = System.currentTimeMillis();
        final CountDownLatch countDownLatch = new CountDownLatch(connections.size());
        final ConcurrentHashMap<Connection, ResponseCommand> resultMap = new ConcurrentHashMap<Connection, ResponseCommand>();
        final GroupAllConnectionRequestCallBack requestCallBack = new GroupAllConnectionRequestCallBack(null,
                countDownLatch, TimeUnit.MILLISECONDS.convert(time, timeUnit), now, resultMap);

        for (final Connection conn : connections) {
            final DefaultConnection connection = (DefaultConnection) conn;
            if (connection.isConnected()) {
                try {
                    connection.addRequestCallBack(command.getOpaque(), requestCallBack);
                    requestCallBack.addWriteFuture(connection, connection.asyncSend(command));
                } catch (final Throwable e) {
                    requestCallBack.onResponse(group,
                            this.createCommErrorResponseCommand(command.getRequestHeader(), e.getMessage()),
                            connection);
                }
            } else {
                requestCallBack.onResponse(group,
                        this.createCommErrorResponseCommand(command.getRequestHeader(), "?"),
                        connection);
            }
        }
        if (!countDownLatch.await(time, timeUnit)) {
            for (final Connection conn : connections) {
                if (!resultMap.containsKey(conn)) {
                    if (resultMap.putIfAbsent(conn, this.createTimeoutCommand(command.getRequestHeader(),
                            conn.getRemoteSocketAddress())) == null) {
                        requestCallBack.cancelWrite(conn);
                        // 
                        ((DefaultConnection) conn).removeRequestCallBack(command.getOpaque());
                    }
                }
            }
        }
        return resultMap;
    } else {
        return null;
    }
}

From source file:org.apache.ambari.controller.Clusters.java

private synchronized void updateClusterNodesReservation(String clusterName, ClusterDefinition clsDef)
        throws Exception {

    ConcurrentHashMap<String, Node> all_nodes = nodes.getNodes();
    List<String> cluster_node_range = new ArrayList<String>();
    cluster_node_range.addAll(getHostnamesFromRangeExpressions(clsDef.getNodes()));

    /*//from   www .  j a  v  a2  s . c o m
     * Reserve the nodes as specified in the node range expressions
     * -- throw exception, if any nodes are pre-associated with other cluster
     */
    List<String> nodes_currently_allocated_to_cluster = new ArrayList<String>();
    for (Node n : nodes.getNodes().values()) {
        if (n.getNodeState().getClusterName() != null
                && n.getNodeState().getClusterName().equals(clusterName)) {
            nodes_currently_allocated_to_cluster.add(n.getName());
        }
    }

    List<String> nodes_to_allocate = new ArrayList<String>(cluster_node_range);
    nodes_to_allocate.removeAll(nodes_currently_allocated_to_cluster);
    List<String> nodes_to_deallocate = new ArrayList<String>(nodes_currently_allocated_to_cluster);
    nodes_to_deallocate.removeAll(cluster_node_range);

    /*
     * Check for any nodes that are allocated to other cluster
     */
    List<String> preallocatedhosts = new ArrayList<String>();
    for (String n : nodes_to_allocate) {
        if (all_nodes.containsKey(n) && (all_nodes.get(n).getNodeState().getClusterName() != null
                || all_nodes.get(n).getNodeState().getAllocatedToCluster())) {
            preallocatedhosts.add(n);
        }
    }

    /* 
     * Throw exception, if some of the hosts are already allocated to other cluster
     */
    if (!preallocatedhosts.isEmpty()) {
        /*
         * TODO: Return invalid request code and return list of preallocated nodes as a part of
         *       response element
         */
        String msg = "Some of the nodes specified for the cluster roles are allocated to other cluster: ["
                + preallocatedhosts + "]";
        throw new WebApplicationException((new ExceptionResponse(msg, Response.Status.CONFLICT)).get());
    }

    /*
     * Allocate nodes to given cluster
     */
    for (String node_name : nodes_to_allocate) {
        if (all_nodes.containsKey(node_name)) {
            // Set the cluster name in the node 
            synchronized (all_nodes.get(node_name)) {
                all_nodes.get(node_name).reserveNodeForCluster(clusterName, true);
            }
        } else {
            Date epoch = new Date(0);
            nodes.checkAndUpdateNode(node_name, epoch);
            Node node = nodes.getNode(node_name);
            /*
             * TODO: Set agentInstalled = true, unless controller uses SSH to setup the agent
             */
            node.reserveNodeForCluster(clusterName, true);
        }
    }

    /*
     * deallocate nodes from a given cluster
     * TODO: Node agent would asynchronously clean up the node and notify it through heartbeat which 
     * would reset the clusterID associated with node
     */
    for (String node_name : nodes_to_deallocate) {
        if (all_nodes.containsKey(node_name)) {
            synchronized (all_nodes.get(node_name)) {
                all_nodes.get(node_name).releaseNodeFromCluster();
            }
        }
    }
}

From source file:spade.utility.BitcoinTools.java

public void writeBlocksToCSV(int startIndex, int endIndex) {
    // Block block, int lastBlockId
    int lastBlockId = -1;
    final BitcoinTools bitcoinTools = new BitcoinTools();

    String pattern = "#.##";
    DecimalFormat decimalFormat = new DecimalFormat(pattern);

    final ConcurrentHashMap<Integer, Block> blockMap = new ConcurrentHashMap<Integer, Block>();
    final AtomicInteger currentBlock = new AtomicInteger(startIndex);
    final int stopIndex = endIndex;
    final int totalThreads = Runtime.getRuntime().availableProcessors();

    class BlockFetcher implements Runnable {

        public void run() {

            while (true) {
                if (blockMap.size() > totalThreads * 5) { // max objects to hold in memory max 1 MB * totalThreads * factor
                    try {
                        Thread.sleep(100);
                        continue;
                    } catch (Exception exception) {
                    }/*from  ww  w.  jav a 2 s .c  om*/
                }

                int blockToFetch = currentBlock.getAndIncrement();
                try {
                    blockMap.put(blockToFetch, bitcoinTools.getBlock(blockToFetch));
                } catch (JSONException exception) {
                    Bitcoin.log(Level.SEVERE, "Block " + blockToFetch + " has invalid json. Redownloading.",
                            exception);
                    try {
                        blockMap.put(blockToFetch, bitcoinTools.getBlock(blockToFetch));
                    } catch (JSONException ex) {
                        Bitcoin.log(Level.SEVERE, "Block " + blockToFetch + " couldn't be included in CSV.",
                                ex);
                    }
                }
                if (blockToFetch >= stopIndex) {
                    break;
                }
            }
        }
    }

    ArrayList<Thread> workers = new ArrayList<Thread>();
    for (int i = 0; i < totalThreads; i++) {
        Thread th = new Thread(new BlockFetcher());
        workers.add(th);
        th.start();
    }

    int percentageCompleted = 0;

    for (int i = startIndex; i < endIndex; i++) {

        try {

            Block block;
            while (!blockMap.containsKey(i)) {

            }
            block = blockMap.get(i);
            blockMap.remove(i);
            lastBlockId = writeBlockToCSV(block, lastBlockId);

            if ((((i - startIndex + 1) * 100) / (endIndex - startIndex)) > percentageCompleted) {
                Runtime rt = Runtime.getRuntime();
                long totalMemory = rt.totalMemory() / 1024 / 1024;
                long freeMemory = rt.freeMemory() / 1024 / 1024;
                long usedMemory = totalMemory - freeMemory;
                System.out.print("| Cores: " + rt.availableProcessors() + " | Threads: " + totalThreads
                        + " | Heap (MB) - total: " + totalMemory + ", %age free: "
                        + (freeMemory * 100) / totalMemory + " | At Block: " + (i - startIndex + 1) + " / "
                        + (endIndex - startIndex) + " | Percentage Completed: " + percentageCompleted
                        // + " |\r");
                        + " |\n");
            }

            percentageCompleted = ((i - startIndex + 1) * 100) / (endIndex - startIndex);

        } catch (IOException ex) {
            Bitcoin.log(Level.SEVERE, "Unexpected IOException. Stopping CSV creation.", ex);
            break;
        }
    }

    for (int i = 0; i < totalThreads; i++) {
        try {
            workers.get(i).interrupt();
            workers.get(i).join();
        } catch (InterruptedException exception) {
        }
    }

    System.out.println("\n\ndone with creating CSVes!");
}