Example usage for java.util.concurrent.locks Lock unlock

List of usage examples for java.util.concurrent.locks Lock unlock

Introduction

In this page you can find the example usage for java.util.concurrent.locks Lock unlock.

Prototype

void unlock();

Source Link

Document

Releases the lock.

Usage

From source file:com.funambol.pushlistener.service.taskexecutor.ScheduledTaskExecutor.java

/**
 * Called when a ScheduledTask ends its execution. See afterExecute.
 *///from   w  w  w .  j  av  a2  s  . c  o m
protected void afterScheduledTaskExecution(Runnable r, Throwable t) {
    super.afterExecute(r, t);

    ScheduledTaskWrapper scheduledTask = null;
    Lock handlingTaskLock = null;

    if (r instanceof ScheduledFuture) {
        ScheduledFuture scheduledFuture = (ScheduledFuture) r;
        synchronized (scheduledFutures) {
            scheduledTask = (ScheduledTaskWrapper) scheduledFutures.getKey(scheduledFuture);

            if (scheduledTask != null) {
                handlingTaskLock = getHandlingTaskLock(scheduledTask.getId());
                handlingTaskLock.lock();
            }
        }
        //
        // Bear in mind that here the scheduledTask could be null if the scheduledFuture
        // has been cancelled and removed from the scheduledFutures map.
        //
        if (log.isTraceEnabled()) {
            if (scheduledTask == null) {
                log.trace("Scheduled task null for: " + r + ". Is it cancelled ? "
                        + scheduledFuture.isCancelled());
            }
        }

        try {
            if (scheduledFuture.isDone()) {
                scheduledFuture.get();
            }
        } catch (InterruptedException ie) {
        } catch (ExecutionException ee) {
            //
            // This is done to retrieve the possible exception thrown by the
            // task
            //
            Throwable realThrowable = ee.getCause();

            if (scheduledTask != null) {

                log.error("Task '" + scheduledTask + "' throws an uncaught exception. "
                        + "The task will be rescheduled", realThrowable);
                try {
                    scheduledTask.shutdown();
                } catch (TaskException ex) {
                    log.error("Error shutting down scheduled task '" + scheduledTask + "'", ex);
                }

                scheduledTask.setState(ScheduledTaskWrapper.State.SHUTDOWN);

                synchronized (scheduledFutures) {
                    //
                    // Any time we remove the scheduledTask from scheduledFutures,
                    // we try to remove the scheduledFuture from the queue. This
                    // is not really needed because after a while this is performed
                    // automatically but in this way we keep scheduledFutures and
                    // the queue in sync
                    //
                    if (scheduledFuture instanceof Runnable) {
                        super.remove((Runnable) scheduledFuture);
                    }
                    scheduledFutures.remove(scheduledTask);
                }

                //
                // The task will be rescheduled using the period as delay
                // because otherwise a new execution is performed asap
                //
                scheduleTask(scheduledTask, scheduledTask.getPeriod());

            } else {
                log.error("Uncaught exception thrown by: " + scheduledFuture
                        + ". This ScheduledFuture seems not relative to a ScheduleTask"
                        + " so nothing will be rescheduled (it could be about "
                        + " to an already cancelled task)", realThrowable);
            }

        } catch (CancellationException ce) {
        } finally {
            if (handlingTaskLock != null) {
                handlingTaskLock.unlock();
            }
            handlingScheduledExecutionTimeInformation(scheduledTask);
            LogContext.clear();
        }
    }
}

From source file:org.ng200.openolympus.services.TestingService.java

private void checkVerdict(final Verdict verdict, final SolutionJudge judge, final List<Path> testFiles,
        final BigDecimal maximumScore, final Properties properties) throws ExecutionException {
    if (this.dataProvider == null) {
        throw new IllegalStateException("Shared data provider is null!");
    }//from w ww .j a  v  a2s .  c  om

    final Lock lock = verdict.getSolution().getTask().readLock();
    lock.lock();

    try {
        TestingService.logger.info("Scheduling verdict {} for testing.", verdict.getId());

        final JPPFJob job = new JPPFJob();
        job.setDataProvider(this.dataProvider);

        job.setName("Check verdict " + verdict.getId());

        final int priority = (int) ((verdict.isViewableWhenContestRunning() ? (Integer.MAX_VALUE / 2) : 0)
                - verdict.getId());
        job.getSLA().setMaxNodes(1);
        job.getSLA().setPriority(priority);
        job.getSLA().setDispatchExpirationSchedule(new JPPFSchedule(60000L));
        job.getSLA().setMaxDispatchExpirations(3);

        TaskContainer taskContainer = taskContainerCache
                .getTaskContainerForTask(verdict.getSolution().getTask());

        Thread.currentThread().setContextClassLoader(
                new URLClassLoader(taskContainer.getClassLoaderURLs().toArray(new URL[0]),
                        Thread.currentThread().getContextClassLoader()));

        job.add(new JacksonSerializationDelegatingTask<>(
                new VerdictCheckingTask(judge, testFiles, maximumScore, properties),
                taskContainer.getClassLoaderURLs()));

        job.setBlocking(true);

        jppfClient.registerClassLoader(taskContainer.getClassLoader(), job.getUuid());
        this.jppfClient.submitJob(job);
        @SuppressWarnings("unchecked")
        final org.jppf.node.protocol.Task<String> task = (org.jppf.node.protocol.Task<String>) job
                .awaitResults().get(0);

        if (task.getThrowable() != null) {
            throw task.getThrowable();
        }

        ObjectMapper objectMapper = JacksonSerializationFactory.createObjectMapper();

        final JsonTaskExecutionResult<Pair<SolutionJudge, SolutionResult>> checkingResult = ((JacksonSerializationDelegatingTask<Pair<SolutionJudge, SolutionResult>, VerdictCheckingTask>) job
                .awaitResults().get(0)).getResultOrThrowable();

        if (checkingResult.getError() != null) {
            throw checkingResult.getError();
        }

        final SolutionResult result = checkingResult.getResult().getSecond();

        verdict.setScore(result.getScore());
        verdict.setMemoryPeak(result.getMemoryPeak());
        verdict.setCpuTime(Duration.ofMillis(result.getCpuTime()));
        verdict.setRealTime(Duration.ofMillis(result.getRealTime()));

        verdict.setStatus(result.getResult());
        switch (result.getResult()) {
        case OK:
        case TIME_LIMIT:
        case MEMORY_LIMIT:
        case OUTPUT_LIMIT:
        case PRESENTATION_ERROR:
        case WRONG_ANSWER:
        case RUNTIME_ERROR:
            break;
        case INTERNAL_ERROR:
            result.getErrorMessages()
                    .forEach((stage, message) -> this.internalErrors.put(this.internalErrorCounter++,
                            new Pair<String, String>(verdict.getSolution().getTask().getName(), message)));
            break;
        case SECURITY_VIOLATION:
            verdict.setUnauthorisedSyscall(result.getUnauthorisedSyscall());
            break;
        case COMPILE_ERROR:
            final String message = result.getErrorMessages().values().stream()
                    .collect(Collectors.joining("\n"));
            verdict.setAdditionalInformation(
                    HtmlUtils.htmlEscape(message.substring(0, Math.min(128, message.length()))));
            break;
        case WAITING:
            throw new IllegalStateException("Judge returned result \"waiting\".");
        }

    } catch (final Throwable throwable) {
        verdict.setStatus(SolutionResult.Result.INTERNAL_ERROR);
        throw new RuntimeException("Couldn't run solution: ", throwable);
    } finally {
        lock.unlock();

        verdict.setTested(true);
        if (verdict.getStatus() == SolutionResult.Result.WAITING) {
            verdict.setStatus(SolutionResult.Result.INTERNAL_ERROR);
            TestingService.logger.error(
                    "Judge for task {} did not set the result status to an acceptable value: got WAITING instead.",
                    verdict.getSolution().getTask().getId());
        }
        this.solutionService.saveVerdict(verdict);
    }
}

From source file:com.alibaba.wasp.master.AssignmentManager.java

@Override
public void nodeDeleted(final String path) {
    if (path.startsWith(watcher.assignmentZNode)) {
        int wi = Math.abs(path.hashCode() % zkEventWorkers.length);
        zkEventWorkers[wi].submit(new Runnable() {
            @Override//from  w w  w . j  av a  2 s .co m
            public void run() {
                String entityGroupName = ZKAssign.getEntityGroupName(watcher, path);
                Lock lock = locker.acquireLock(entityGroupName);
                try {
                    EntityGroupState egState = entityGroupStates.getEntityGroupTransitionState(entityGroupName);
                    if (egState == null)
                        return;

                    EntityGroupInfo entityGroupInfo = egState.getEntityGroup();
                    if (egState.isSplit()) {
                        LOG.debug("Ephemeral node deleted, entityGroupserver crashed?, "
                                + "clearing from EGIT; egState=" + egState);
                        entityGroupOffline(egState.getEntityGroup());
                    } else {
                        LOG.debug("The znode of entityGroup " + entityGroupInfo.getEntityGroupNameAsString()
                                + " has been deleted.");
                        if (egState.isOpened()) {
                            ServerName serverName = egState.getServerName();
                            entityGroupOnline(entityGroupInfo, serverName);
                            LOG.info("The master has opened the entityGroup "
                                    + entityGroupInfo.getEntityGroupNameAsString() + " that was online on "
                                    + serverName);
                            if (getZKTable()
                                    .isDisablingOrDisabledTable(entityGroupInfo.getTableNameAsString())) {
                                LOG.debug("Opened entityGroup " + entityGroupInfo.getEntityGroupNameAsString()
                                        + " but " + "this table is disabled, triggering close of entityGroup");
                                unassign(entityGroupInfo);
                            }
                        }
                    }
                } finally {
                    lock.unlock();
                }
            }
        });
    }
}

From source file:org.apache.hadoop.hbase.master.AssignmentManager.java

/**
 * Process shutdown server removing any assignments.
 * @param sn Server that went down.//from   w  w w  . j  a v  a  2  s.  c  o  m
 * @return list of regions in transition on this server
 */
public List<HRegionInfo> processServerShutdown(final ServerName sn) {
    // Clean out any existing assignment plans for this server
    synchronized (this.regionPlans) {
        for (Iterator<Map.Entry<String, RegionPlan>> i = this.regionPlans.entrySet().iterator(); i.hasNext();) {
            Map.Entry<String, RegionPlan> e = i.next();
            ServerName otherSn = e.getValue().getDestination();
            // The name will be null if the region is planned for a random assign.
            if (otherSn != null && otherSn.equals(sn)) {
                // Use iterator's remove else we'll get CME
                i.remove();
            }
        }
    }
    List<HRegionInfo> regions = regionStates.serverOffline(watcher, sn);
    for (Iterator<HRegionInfo> it = regions.iterator(); it.hasNext();) {
        HRegionInfo hri = it.next();
        String encodedName = hri.getEncodedName();

        // We need a lock on the region as we could update it
        Lock lock = locker.acquireLock(encodedName);
        try {
            RegionState regionState = regionStates.getRegionTransitionState(encodedName);
            if (regionState == null || (regionState.getServerName() != null && !regionState.isOnServer(sn))
                    || !(regionState.isFailedClose() || regionState.isOffline()
                            || regionState.isPendingOpenOrOpening())) {
                LOG.info("Skip " + regionState + " since it is not opening/failed_close"
                        + " on the dead server any more: " + sn);
                it.remove();
            } else {
                try {
                    // Delete the ZNode if exists
                    ZKAssign.deleteNodeFailSilent(watcher, hri);
                } catch (KeeperException ke) {
                    server.abort("Unexpected ZK exception deleting node " + hri, ke);
                }
                if (tableStateManager.isTableState(hri.getTable(), ZooKeeperProtos.Table.State.DISABLED,
                        ZooKeeperProtos.Table.State.DISABLING)) {
                    regionStates.regionOffline(hri);
                    it.remove();
                    continue;
                }
                // Mark the region offline and assign it again by SSH
                regionStates.updateRegionState(hri, State.OFFLINE);
            }
        } finally {
            lock.unlock();
        }
    }
    return regions;
}

From source file:org.apache.hadoop.hbase.master.AssignmentManager.java

/**
 * Process failover of new master for region <code>encodedRegionName</code>
 * up in zookeeper./*from  ww w  .  j a v  a 2 s  .  c  o  m*/
 * @param encodedRegionName Region to process failover for.
 * @param regionInfo If null we'll go get it from meta table.
 * @return True if we processed <code>regionInfo</code> as a RIT.
 * @throws KeeperException
 * @throws IOException
 */
boolean processRegionInTransition(final String encodedRegionName, final HRegionInfo regionInfo)
        throws KeeperException, IOException {
    // We need a lock here to ensure that we will not put the same region twice
    // It has no reason to be a lock shared with the other operations.
    // We can do the lock on the region only, instead of a global lock: what we want to ensure
    // is that we don't have two threads working on the same region.
    Lock lock = locker.acquireLock(encodedRegionName);
    try {
        Stat stat = new Stat();
        byte[] data = ZKAssign.getDataAndWatch(watcher, encodedRegionName, stat);
        if (data == null)
            return false;
        RegionTransition rt;
        try {
            rt = RegionTransition.parseFrom(data);
        } catch (DeserializationException e) {
            LOG.warn("Failed parse znode data", e);
            return false;
        }
        HRegionInfo hri = regionInfo;
        if (hri == null) {
            // The region info is not passed in. We will try to find the region
            // from region states map/meta based on the encoded region name. But we
            // may not be able to find it. This is valid for online merge that
            // the region may have not been created if the merge is not completed.
            // Therefore, it is not in meta at master recovery time.
            hri = regionStates.getRegionInfo(rt.getRegionName());
            EventType et = rt.getEventType();
            if (hri == null && et != EventType.RS_ZK_REGION_MERGING
                    && et != EventType.RS_ZK_REQUEST_REGION_MERGE) {
                LOG.warn("Couldn't find the region in recovering " + rt);
                return false;
            }
        }
        return processRegionsInTransition(rt, hri, stat.getVersion());
    } finally {
        lock.unlock();
    }
}

From source file:jenkins.plugins.git.AbstractGitSCMSource.java

@NonNull
@Override/*w w  w .  ja va 2  s.  c  om*/
protected void retrieve(@NonNull final SCMHeadObserver observer, @NonNull TaskListener listener)
        throws IOException, InterruptedException {
    String cacheEntry = getCacheEntry();
    Lock cacheLock = getCacheLock(cacheEntry);
    cacheLock.lock();
    try {
        File cacheDir = getCacheDir(cacheEntry);
        Git git = Git.with(listener, new EnvVars(EnvVars.masterEnvVars)).in(cacheDir);
        GitClient client = git.getClient();
        client.addDefaultCredentials(getCredentials());
        if (!client.hasGitRepo()) {
            listener.getLogger().println("Creating git repository in " + cacheDir);
            client.init();
        }
        String remoteName = getRemoteName();
        listener.getLogger().println("Setting " + remoteName + " to " + getRemote());
        client.setRemoteUrl(remoteName, getRemote());
        listener.getLogger().println("Fetching " + remoteName + "...");
        List<RefSpec> refSpecs = getRefSpecs();
        client.fetch(remoteName, refSpecs.toArray(new RefSpec[refSpecs.size()]));
        listener.getLogger().println("Pruning stale remotes...");
        final Repository repository = client.getRepository();
        try {
            client.prune(new RemoteConfig(repository.getConfig(), remoteName));
        } catch (UnsupportedOperationException e) {
            e.printStackTrace(listener.error("Could not prune stale remotes"));
        } catch (URISyntaxException e) {
            e.printStackTrace(listener.error("Could not prune stale remotes"));
        }
        listener.getLogger().println("Getting remote branches...");
        SCMSourceCriteria branchCriteria = getCriteria();
        RevWalk walk = new RevWalk(repository);
        try {
            walk.setRetainBody(false);
            for (Branch b : client.getRemoteBranches()) {
                if (!b.getName().startsWith(remoteName + "/")) {
                    continue;
                }
                final String branchName = StringUtils.removeStart(b.getName(), remoteName + "/");
                listener.getLogger().println("Checking branch " + branchName);
                if (isExcluded(branchName)) {
                    continue;
                }
                if (branchCriteria != null) {
                    RevCommit commit = walk.parseCommit(b.getSHA1());
                    final long lastModified = TimeUnit.SECONDS.toMillis(commit.getCommitTime());
                    final RevTree tree = commit.getTree();
                    SCMSourceCriteria.Probe probe = new SCMSourceCriteria.Probe() {
                        @Override
                        public String name() {
                            return branchName;
                        }

                        @Override
                        public long lastModified() {
                            return lastModified;
                        }

                        @Override
                        public boolean exists(@NonNull String path) throws IOException {
                            TreeWalk tw = TreeWalk.forPath(repository, path, tree);
                            try {
                                return tw != null;
                            } finally {
                                if (tw != null) {
                                    tw.release();
                                }
                            }
                        }
                    };
                    if (branchCriteria.isHead(probe, listener)) {
                        listener.getLogger().println("Met criteria");
                    } else {
                        listener.getLogger().println("Does not meet criteria");
                        continue;
                    }
                }
                SCMHead head = new SCMHead(branchName);
                SCMRevision hash = new SCMRevisionImpl(head, b.getSHA1String());
                observer.observe(head, hash);
                if (!observer.isObserving()) {
                    return;
                }
            }
        } finally {
            walk.dispose();
        }

        listener.getLogger().println("Done.");
    } finally {
        cacheLock.unlock();
    }
}

From source file:org.marketcetera.marketdata.core.provider.AbstractMarketDataProvider.java

@Override
public void requestMarketData(MarketDataRequestToken inRequestToken) {
    if (!isRunning()) {
        throw new MarketDataProviderNotAvailable();
    }//from  ww  w .  ja v a2 s . c  o m
    Set<MarketDataRequestAtom> atoms = explodeRequest(inRequestToken.getRequest());
    totalRequests += atoms.size();
    SLF4JLoggerProxy.debug(this, "Received market data request {}, exploded to {}", //$NON-NLS-1$
            inRequestToken, atoms);
    Lock marketdataRequestLock = marketdataLock.writeLock();
    try {
        marketdataRequestLock.lockInterruptibly();
    } catch (InterruptedException e) {
        org.marketcetera.marketdata.core.Messages.UNABLE_TO_ACQUIRE_LOCK.error(this);
        stop();
        throw new MarketDataRequestFailed(e);
    }
    SLF4JLoggerProxy.trace(this, "Acquired lock"); //$NON-NLS-1$
    try {
        mapRequestToInstruments(inRequestToken);
        for (MarketDataRequestAtom atom : atoms) {
            if (requestsByAtom.containsKey(atom)) {
                SLF4JLoggerProxy.debug(this, "Already requested {}, adding to reference count", atom);
                Instrument snapshotInstrument = instrumentsBySymbol.get(atom.getSymbol());
                if (snapshotInstrument == null) {
                    SLF4JLoggerProxy.warn(this, "Symbol {} not yet mapped, cannot send snapshot",
                            atom.getSymbol());
                } else {
                    Event snapshotEvent = getSnapshot(snapshotInstrument, atom.getContent());
                    if (snapshotEvent instanceof HasEventType) {
                        HasEventType eventTypeSnapshot = (HasEventType) snapshotEvent;
                        eventTypeSnapshot.setEventType(EventType.SNAPSHOT_FINAL);
                    }
                    if (snapshotEvent != null) {
                        SLF4JLoggerProxy.debug(this, "Sending snapshot: {}", snapshotEvent);
                        if (inRequestToken.getSubscriber() != null) {
                            inRequestToken.getSubscriber().publishTo(snapshotEvent);
                        }
                    } else {
                        SLF4JLoggerProxy.debug(this, "No snapshot for {}", atom);
                    }
                }
                requestsByAtom.put(atom, inRequestToken);
                requestsBySymbol.put(atom.getSymbol(), inRequestToken);
            } else {
                Capability requiredCapability = necessaryCapabilities.get(atom.getContent());
                if (requiredCapability == null) {
                    org.marketcetera.marketdata.core.Messages.UNKNOWN_MARKETDATA_CONTENT.error(this,
                            atom.getContent());
                    throw new UnsupportedOperationException(
                            org.marketcetera.marketdata.core.Messages.UNKNOWN_MARKETDATA_CONTENT
                                    .getText(atom.getContent()));
                }
                Set<Capability> capabilities = getCapabilities();
                if (!capabilities.contains(requiredCapability)) {
                    org.marketcetera.marketdata.core.Messages.UNSUPPORTED_MARKETDATA_CONTENT.error(this,
                            atom.getContent(), capabilities.toString());
                    throw new MarketDataRequestFailed(new I18NBoundMessage2P(
                            org.marketcetera.marketdata.core.Messages.UNSUPPORTED_MARKETDATA_CONTENT,
                            atom.getContent(), capabilities.toString()));
                }
                requestsByAtom.put(atom, inRequestToken);
                requestsBySymbol.put(atom.getSymbol(), inRequestToken);
                SLF4JLoggerProxy.debug(this, "Requesting {}", atom);
                doMarketDataRequest(inRequestToken.getRequest(), atom);
            }
        }
    } catch (Exception e) {
        try {
            cancelMarketDataRequest(inRequestToken);
        } catch (Exception ignored) {
        }
        org.marketcetera.marketdata.core.Messages.MARKETDATA_REQUEST_FAILED.warn(this, e);
        if (e instanceof MarketDataException) {
            throw (MarketDataException) e;
        }
        throw new MarketDataRequestFailed(e);
    } finally {
        marketdataRequestLock.unlock();
        SLF4JLoggerProxy.trace(this, "Lock released"); //$NON-NLS-1$
    }
}

From source file:org.openhab.binding.neeo.internal.handler.NeeoBrainHandler.java

/**
 * Initializes the bridge by connecting to the configuration ip address and parsing the results. Properties will be
 * set and the thing will go online./*from ww  w  . ja v a  2  s.  c om*/
 */
private void initializeTask() {
    final Lock writerLock = stateLock.writeLock();
    writerLock.lock();
    try {
        NeeoUtil.checkInterrupt();

        final NeeoBrainConfig config = getBrainConfig();
        final String ipAddress = config.getIpAddress();
        if (ipAddress == null || StringUtils.isEmpty(ipAddress)) {
            updateStatus(ThingStatus.OFFLINE, ThingStatusDetail.CONFIGURATION_ERROR,
                    "Brain IP Address must be specified");
            return;
        }
        final NeeoBrainApi api = new NeeoBrainApi(ipAddress);
        final NeeoBrain brain = api.getBrain();
        final String brainId = getNeeoBrainId();

        NeeoUtil.checkInterrupt();
        neeoBrainApi = api;

        final Map<String, String> properties = new HashMap<>();
        addProperty(properties, "Name", brain.getName());
        addProperty(properties, "Version", brain.getVersion());
        addProperty(properties, "Label", brain.getLabel());
        addProperty(properties, "Is Configured", String.valueOf(brain.isConfigured()));
        addProperty(properties, "Key", brain.getKey());
        addProperty(properties, "AirKey", brain.getAirkey());
        addProperty(properties, "Last Change", String.valueOf(brain.getLastChange()));
        updateProperties(properties);

        if (config.isEnableForwardActions()) {
            NeeoUtil.checkInterrupt();

            forwardActionServlet = new NeeoForwardActionsServlet(scheduler,
                    new NeeoForwardActionsServlet.Callback() {
                        @Override
                        public void post(String json) {
                            triggerChannel(NeeoConstants.CHANNEL_BRAIN_FOWARDACTIONS, json);

                            final NeeoAction action = gson.fromJson(json, NeeoAction.class);

                            for (final Thing child : getThing().getThings()) {
                                final ThingHandler th = child.getHandler();
                                if (th instanceof NeeoRoomHandler) {
                                    ((NeeoRoomHandler) th).processAction(action);
                                }
                            }
                        }

                    }, config.getForwardChain());

            NeeoUtil.checkInterrupt();
            try {
                servletPath = NeeoConstants.WEBAPP_FORWARDACTIONS.replace("{brainid}", brainId);

                httpService.registerServlet(servletPath, forwardActionServlet, new Hashtable<>(),
                        httpService.createDefaultHttpContext());

                final URL callbackURL = createCallbackUrl(brainId, config);
                if (callbackURL == null) {
                    logger.debug(
                            "Unable to create a callback URL because there is no primary address specified (please set the primary address in the configuration)");
                } else {
                    final URL url = new URL(callbackURL, servletPath);
                    api.registerForwardActions(url);
                }
            } catch (NamespaceException | ServletException e) {
                logger.debug("Error registering forward actions to {}: {}", servletPath, e.getMessage(), e);
            }
        }

        NeeoUtil.checkInterrupt();
        updateStatus(ThingStatus.ONLINE);
        NeeoUtil.checkInterrupt();
        if (config.getCheckStatusInterval() > 0) {
            NeeoUtil.cancel(checkStatus.getAndSet(scheduler.scheduleWithFixedDelay(() -> {
                try {
                    NeeoUtil.checkInterrupt();
                    checkStatus(ipAddress);
                } catch (InterruptedException e) {
                    // do nothing - we were interrupted and should stop
                }
            }, config.getCheckStatusInterval(), config.getCheckStatusInterval(), TimeUnit.SECONDS)));
        }
    } catch (IOException e) {
        logger.debug("Exception occurred connecting to brain: {}", e.getMessage(), e);
        updateStatus(ThingStatus.OFFLINE, ThingStatusDetail.COMMUNICATION_ERROR,
                "Exception occurred connecting to brain: " + e.getMessage());
    } catch (InterruptedException e) {
        logger.debug("Initializtion was interrupted", e);
        updateStatus(ThingStatus.OFFLINE, ThingStatusDetail.HANDLER_INITIALIZING_ERROR,
                "Initialization was interrupted");
    } finally {
        writerLock.unlock();
    }
}

From source file:org.elasticsoftware.elasticactors.cluster.LocalActorSystemInstance.java

/**
 * Distribute the shards over the list of physical nodes
 *
 * @param nodes//ww w. jav  a  2s  . c  o  m
 */
public void distributeShards(List<PhysicalNode> nodes, ShardDistributionStrategy strategy) throws Exception {
    final boolean initializing = initialized.compareAndSet(false, true);
    // see if this was the first time, if so we need to initialize the ActorSystem
    if (initializing) {
        logger.info(format("Initializing ActorSystem [%s]", getName()));
    }

    NodeSelector nodeSelector = nodeSelectorFactory.create(nodes);
    // fetch all writelocks
    final Lock[] writeLocks = new Lock[shardLocks.length];
    for (int j = 0; j < shardLocks.length; j++) {
        writeLocks[j] = shardLocks[j].writeLock();
    }
    // store the id's of the new local shard in order to generate the events later
    final List<Integer> newLocalShards = new ArrayList<>(shards.length);
    // this is for reporting the number of shards per node
    final List<String> nodeCount = new ArrayList<>(shards.length);

    // assume we are stable until the resharding process tells us otherwise
    boolean stable = true;

    try {
        for (Lock writeLock : writeLocks) {
            writeLock.lock();
        }

        for (int i = 0; i < configuration.getNumberOfShards(); i++) {
            ShardKey shardKey = new ShardKey(configuration.getName(), i);
            PhysicalNode node = nodeSelector.getPrimary(shardKey.toString());
            nodeCount.add(node.getId());
            if (node.isLocal()) {
                // this instance should start owning the shard now
                final ActorShard currentShard = shards[i];
                if (currentShard == null || !currentShard.getOwningNode().isLocal()) {
                    String owningNodeId = currentShard != null ? currentShard.getOwningNode().getId()
                            : "<No Node>";
                    logger.info(format("I will own %s", shardKey.toString()));
                    // destroy the current remote shard instance
                    if (currentShard != null) {
                        currentShard.destroy();
                    }
                    // create a new local shard and swap it
                    LocalActorShard newShard = new LocalActorShard(node, this, i, shardAdapters[i].myRef,
                            localMessageQueueFactory, shardActorCacheManager);

                    shards[i] = newShard;
                    try {
                        // register with the strategy to wait for shard to be released
                        strategy.registerWaitForRelease(newShard, node);
                    } catch (Exception e) {
                        logger.error(format(
                                "IMPORTANT: waiting on release of shard %s from node %s failed,  ElasticActors cluster is unstable. Please check all nodes",
                                shardKey, owningNodeId), e);
                        stable = false;
                    } finally {
                        // add it to the new local shards
                        newLocalShards.add(i);
                        // initialize
                        // newShard.init();
                        // start owning the scheduler shard (this will start sending messages, but everything is blocked so it should be no problem)
                        scheduler.registerShard(newShard.getKey());
                    }
                } else {
                    // we own the shard already, no change needed
                    logger.info(format("I already own %s", shardKey.toString()));
                }
            } else {
                // the shard will be managed by another node
                final ActorShard currentShard = shards[i];
                if (currentShard == null || currentShard.getOwningNode().isLocal()) {
                    logger.info(format("%s will own %s", node, shardKey));
                    try {
                        // destroy the current local shard instance
                        if (currentShard != null) {
                            // stop owning the scheduler shard
                            scheduler.unregisterShard(currentShard.getKey());
                            currentShard.destroy();
                            strategy.signalRelease(currentShard, node);
                        }
                    } catch (Exception e) {
                        logger.error(format(
                                "IMPORTANT: signalling release of shard %s to node %s failed, ElasticActors cluster is unstable. Please check all nodes",
                                shardKey, node), e);
                        stable = false;
                    } finally {
                        // create a new remote shard and swap it
                        RemoteActorShard newShard = new RemoteActorShard(node, this, i, shardAdapters[i].myRef,
                                remoteMessageQueueFactory);
                        shards[i] = newShard;
                        // initialize
                        newShard.init();
                    }
                } else {
                    // shard was already remote
                    logger.info(format("%s will own %s", node, shardKey));
                }
            }
        }
        // now we have released all local shards, wait for the new local shards to become available
        if (!strategy.waitForReleasedShards(10, TimeUnit.SECONDS)) {
            // timeout while waiting for the shards
            stable = false;
        }
    } finally {
        // unlock all
        for (Lock writeLock : writeLocks) {
            writeLock.unlock();
        }

        this.stable.set(stable);
    }
    // This needs to happen after we initialize the shards as services expect the system to be initialized and
    // should be allowed to send messages to shards
    if (initializing) {
        // initialize the services
        Set<String> serviceActors = configuration.getServices();
        if (serviceActors != null && !serviceActors.isEmpty()) {
            // initialize the service actors in the context
            for (String elasticActorEntry : serviceActors) {
                localNodeAdapter.sendMessage(null, localNodeAdapter.myRef,
                        new ActivateActorMessage(getName(), elasticActorEntry, ActorType.SERVICE));
            }
        }
    }
    // print out the shard distribution here
    Map<String, Long> collect = nodeCount.stream().collect(groupingBy(Function.identity(), counting()));
    SortedMap<String, Long> sortedNodes = new TreeMap<>(collect);
    logger.info("Cluster shard mapping summary:");
    for (Map.Entry<String, Long> entry : sortedNodes.entrySet()) {
        logger.info(format("\t%s has %d shards assigned", entry.getKey(), entry.getValue()));
    }
    // now we need to generate the events for the new local shards (if any)
    logger.info(format("Generating ACTOR_SHARD_INITIALIZED events for %d new shards", newLocalShards.size()));
    for (Integer newLocalShard : newLocalShards) {
        this.actorSystemEventListenerService.generateEvents(shardAdapters[newLocalShard],
                ACTOR_SHARD_INITIALIZED);
    }
}

From source file:org.apache.hadoop.hbase.master.AssignmentManager.java

@Override
public void nodeDeleted(final String path) {
    if (path.startsWith(watcher.assignmentZNode)) {
        final String regionName = ZKAssign.getRegionName(watcher, path);
        zkEventWorkersSubmit(new RegionRunnable() {
            @Override//from   w ww .  java2s.  c  om
            public String getRegionName() {
                return regionName;
            }

            @Override
            public void run() {
                Lock lock = locker.acquireLock(regionName);
                try {
                    RegionState rs = regionStates.getRegionTransitionState(regionName);
                    if (rs == null) {
                        rs = regionStates.getRegionState(regionName);
                        if (rs == null || !rs.isMergingNew()) {
                            // MergingNew is an offline state
                            return;
                        }
                    }

                    HRegionInfo regionInfo = rs.getRegion();
                    String regionNameStr = regionInfo.getRegionNameAsString();
                    LOG.debug("Znode " + regionNameStr + " deleted, state: " + rs);

                    boolean disabled = getTableStateManager().isTableState(regionInfo.getTable(),
                            ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING);

                    ServerName serverName = rs.getServerName();
                    if (serverManager.isServerOnline(serverName)) {
                        if (rs.isOnServer(serverName) && (rs.isOpened() || rs.isSplitting())) {
                            regionOnline(regionInfo, serverName);
                            if (disabled) {
                                // if server is offline, no hurt to unassign again
                                LOG.info("Opened " + regionNameStr
                                        + "but this table is disabled, triggering close of region");
                                unassign(regionInfo);
                            }
                        } else if (rs.isMergingNew()) {
                            synchronized (regionStates) {
                                String p = regionInfo.getEncodedName();
                                PairOfSameType<HRegionInfo> regions = mergingRegions.get(p);
                                if (regions != null) {
                                    onlineMergingRegion(disabled, regions.getFirst(), serverName);
                                    onlineMergingRegion(disabled, regions.getSecond(), serverName);
                                }
                            }
                        }
                    }
                } finally {
                    lock.unlock();
                }
            }

            private void onlineMergingRegion(boolean disabled, final HRegionInfo hri,
                    final ServerName serverName) {
                RegionState regionState = regionStates.getRegionState(hri);
                if (regionState != null && regionState.isMerging() && regionState.isOnServer(serverName)) {
                    regionOnline(regionState.getRegion(), serverName);
                    if (disabled) {
                        unassign(hri);
                    }
                }
            }
        });
    }
}