Example usage for org.apache.commons.lang.mutable MutableInt decrement

List of usage examples for org.apache.commons.lang.mutable MutableInt decrement

Introduction

In this page you can find the example usage for org.apache.commons.lang.mutable MutableInt decrement.

Prototype

public void decrement() 

Source Link

Document

Decrements the value.

Usage

From source file:gobblin.source.extractor.extract.google.GoogleAnalyticsUnsampledExtractorTest.java

private GoogleAnalyticsUnsampledExtractor setup(final ReportCreationStatus status, WorkUnitState wuState,
        final boolean throwException) throws IOException {
    Extractor actualExtractor = mock(Extractor.class);
    gaService = mock(Analytics.class);
    Management mgmt = mock(Management.class);
    when(gaService.management()).thenReturn(mgmt);
    UnsampledReports req = mock(UnsampledReports.class);
    when(mgmt.unsampledReports()).thenReturn(req);
    getReq = mock(Get.class);
    when(req.get(anyString(), anyString(), anyString(), anyString())).thenReturn(getReq);

    int pollCount = 10;
    final MutableInt countDown = new MutableInt(pollCount);
    when(getReq.execute()).then(new Answer<UnsampledReport>() {
        @Override/*from ww w  .j  a  va 2  s .  c o m*/
        public UnsampledReport answer(InvocationOnMock invocation) throws Throwable {
            countDown.decrement();
            if (countDown.intValue() == 0) {
                UnsampledReport response = new UnsampledReport();
                DriveDownloadDetails details = new DriveDownloadDetails();
                details.setDocumentId(EXPECTED_FILE_ID);

                response.setStatus(status.name()).setDownloadType(DOWNLOAD_TYPE_GOOGLE_DRIVE)
                        .setDriveDownloadDetails(details);
                return response;
            } else if (throwException) {
                throw new RuntimeException("Dummy exception.");
            }
            return new UnsampledReport();
        }
    });

    return new GoogleAnalyticsUnsampledExtractor<>(wuState, actualExtractor, gaService);
}

From source file:gobblin.source.extractor.extract.google.GoogleDriveFsHelperTest.java

public void testPagination() throws IOException, FileBasedHelperException {
    State state = new State();
    state.appendToSetProp(GoogleDriveFileSystem.PAGE_SIZE, Integer.toString(1));

    GoogleDriveFsHelper fsHelper = new GoogleDriveFsHelper(state, client, Closer.create());
    List listRequest = mock(List.class);
    when(files.list()).thenReturn(listRequest);
    when(listRequest.setPageSize(anyInt())).thenReturn(listRequest);
    when(listRequest.setFields(anyString())).thenReturn(listRequest);
    when(listRequest.setQ(anyString())).thenReturn(listRequest);
    when(listRequest.setPageToken(anyString())).thenReturn(listRequest);

    int paginatedCalls = 5;
    final MutableInt i = new MutableInt(paginatedCalls);
    final File file = new File();
    file.setId("testId");
    file.setModifiedTime(new DateTime(System.currentTimeMillis()));

    when(listRequest.execute()).thenAnswer(new Answer<FileList>() {

        @Override/* w w  w  . ja  va 2  s.  c om*/
        public FileList answer(InvocationOnMock invocation) throws Throwable {
            FileList fileList = new FileList();
            fileList.setFiles(ImmutableList.of(file));
            if (i.intValue() > 0) {
                fileList.setNextPageToken("token");
                i.decrement();
            }
            return fileList;
        }
    });

    fsHelper.ls("test");

    int expectedCalls = 1 + paginatedCalls;
    verify(listRequest, times(expectedCalls)).execute();
}

From source file:io.druid.indexing.overlord.RemoteTaskRunner.java

@LifecycleStart
public void start() {
    try {/*from www  . j  a  va 2s  . co  m*/
        if (started) {
            return;
        }

        final MutableInt waitingFor = new MutableInt(1);
        final Object waitingForMonitor = new Object();

        // Add listener for creation/deletion of workers
        workerPathCache.getListenable().addListener(new PathChildrenCacheListener() {
            @Override
            public void childEvent(CuratorFramework client, final PathChildrenCacheEvent event)
                    throws Exception {
                final Worker worker;
                switch (event.getType()) {
                case CHILD_ADDED:
                    worker = jsonMapper.readValue(event.getData().getData(), Worker.class);
                    synchronized (waitingForMonitor) {
                        waitingFor.increment();
                    }
                    Futures.addCallback(addWorker(worker), new FutureCallback<ZkWorker>() {
                        @Override
                        public void onSuccess(ZkWorker zkWorker) {
                            synchronized (waitingForMonitor) {
                                waitingFor.decrement();
                                waitingForMonitor.notifyAll();
                            }
                        }

                        @Override
                        public void onFailure(Throwable throwable) {
                            synchronized (waitingForMonitor) {
                                waitingFor.decrement();
                                waitingForMonitor.notifyAll();
                            }
                        }
                    });
                    break;
                case CHILD_UPDATED:
                    worker = jsonMapper.readValue(event.getData().getData(), Worker.class);
                    updateWorker(worker);
                    break;

                case CHILD_REMOVED:
                    worker = jsonMapper.readValue(event.getData().getData(), Worker.class);
                    removeWorker(worker);
                    break;
                case INITIALIZED:
                    synchronized (waitingForMonitor) {
                        waitingFor.decrement();
                        waitingForMonitor.notifyAll();
                    }
                default:
                    break;
                }
            }
        });
        workerPathCache.start(PathChildrenCache.StartMode.POST_INITIALIZED_EVENT);
        synchronized (waitingForMonitor) {
            while (waitingFor.intValue() > 0) {
                waitingForMonitor.wait();
            }
        }
        // Schedule cleanup for task status of the workers that might have disconnected while overlord was not running
        List<String> workers;
        try {
            workers = cf.getChildren().forPath(indexerZkConfig.getStatusPath());
        } catch (KeeperException.NoNodeException e) {
            // statusPath doesn't exist yet; can occur if no middleManagers have started.
            workers = ImmutableList.of();
        }
        for (String worker : workers) {
            if (!zkWorkers.containsKey(worker) && cf.checkExists()
                    .forPath(JOINER.join(indexerZkConfig.getAnnouncementsPath(), worker)) == null) {
                scheduleTasksCleanupForWorker(worker,
                        cf.getChildren().forPath(JOINER.join(indexerZkConfig.getStatusPath(), worker)));
            }
        }

        started = true;
    } catch (Exception e) {
        throw Throwables.propagate(e);
    }
}

From source file:org.alfresco.repo.policy.BehaviourFilterImpl.java

@Override
public void enableBehaviour(QName className) {
    ParameterCheck.mandatory("className", className);

    if (logger.isDebugEnabled()) {
        logger.debug("Behaviour: ENABLE (" + AlfrescoTransactionSupport.getTransactionId() + "): " + className);
    }/* w  w w .  j ava2 s.  c o m*/

    TransactionalResourceHelper.decrementCount(KEY_FILTER_COUNT, false);

    if (!TransactionalResourceHelper.isResourcePresent(KEY_CLASS_FILTERS)) {
        // Nothing was disabled
        return;
    }
    Map<ClassFilter, MutableInt> classFilters = TransactionalResourceHelper.getMap(KEY_CLASS_FILTERS);
    MutableInt filterNumber = null;
    for (ClassFilter classFilter : classFilters.keySet()) {
        if (classFilter.getClassName().equals(className)) {
            filterNumber = classFilters.get(classFilter);
            break;
        }
    }
    if (filterNumber == null) {
        // Class was not disabled
        return;
    } else if (filterNumber.intValue() <= 0) {
        // Can't go below zero for this
    } else {
        filterNumber.decrement();
    }

    if (logger.isDebugEnabled()) {
        logger.debug("   Now: " + filterNumber);
    }
}

From source file:org.alfresco.repo.policy.BehaviourFilterImpl.java

@Override
public void enableBehaviour(NodeRef nodeRef, QName className) {
    ParameterCheck.mandatory("nodeRef", nodeRef);
    ParameterCheck.mandatory("className", className);

    if (logger.isDebugEnabled()) {
        logger.debug("Behaviour: ENABLE (" + AlfrescoTransactionSupport.getTransactionId() + "): " + nodeRef
                + "/" + className);
    }//from   w ww.j a v a 2s  .  co  m

    TransactionalResourceHelper.decrementCount(KEY_FILTER_COUNT, false);

    if (!TransactionalResourceHelper.isResourcePresent(KEY_INSTANCE_CLASS_FILTERS)) {
        // Nothing was disabled
        return;
    }
    nodeRef = tenantService.getName(nodeRef);

    Map<NodeRef, Map<QName, MutableInt>> instanceClassFilters = TransactionalResourceHelper
            .getMap(KEY_INSTANCE_CLASS_FILTERS);
    Map<QName, MutableInt> classFilters = instanceClassFilters.get(nodeRef);
    if (classFilters == null) {
        // Instance classes were not disabled
        return;
    }
    MutableInt filter = classFilters.get(className);
    if (filter == null) {
        // Class was not disabled
        return;
    } else if (filter.intValue() <= 0) {
        // Can't go below zero for this
    } else {
        filter.decrement();
    }

    if (logger.isDebugEnabled()) {
        logger.debug("   Now: " + filter);
    }
}

From source file:org.alfresco.repo.policy.BehaviourFilterImpl.java

@Override
public void enableBehaviour(NodeRef nodeRef) {
    ParameterCheck.mandatory("nodeRef", nodeRef);

    if (logger.isDebugEnabled()) {
        logger.debug("Behaviour: ENABLE (" + AlfrescoTransactionSupport.getTransactionId() + "): " + nodeRef
                + "/ALL");
    }//ww w  . j  a  va  2s.c o  m

    TransactionalResourceHelper.decrementCount(KEY_FILTER_COUNT, false);

    if (!TransactionalResourceHelper.isResourcePresent(KEY_INSTANCE_FILTERS)) {
        // Nothing was disabled
        return;
    }
    nodeRef = tenantService.getName(nodeRef);

    Map<NodeRef, MutableInt> instanceFilters = TransactionalResourceHelper.getMap(KEY_INSTANCE_FILTERS);
    MutableInt filter = instanceFilters.get(nodeRef);
    if (filter == null) {
        // Instance was not disabled
        return;
    } else if (filter.intValue() <= 0) {
        // Can't go below zero for this
    } else {
        filter.decrement();
    }

    if (logger.isDebugEnabled()) {
        logger.debug("   Now:" + filter);
    }
}

From source file:org.alfresco.repo.transaction.TransactionalResourceHelper.java

/**
 * Decrement a count value for a named key
 * /*from w w  w.j  av a2s.c  o  m*/
 * @param resourceKey               the key to count against
 * @param allowNegative             <tt>true</tt> to allow negative values otherwise zero will be the floor
 * @return                          the newly-decremented value (negative, if allowed)
 */
public static final int decrementCount(Object resourceKey, boolean allowNegative) {
    MutableInt counter = (MutableInt) AlfrescoTransactionSupport.getResource(resourceKey);
    if (counter == null) {
        counter = new MutableInt(0);
        AlfrescoTransactionSupport.bindResource(resourceKey, counter);
    }
    if (counter.intValue() > 0 || allowNegative) {
        counter.decrement();
    }
    return counter.intValue();
}

From source file:org.apache.druid.indexing.overlord.RemoteTaskRunner.java

@Override
@LifecycleStart//from w  w w. j  a v  a 2s .  c  o m
public void start() {
    if (!lifecycleLock.canStart()) {
        return;
    }
    try {
        final MutableInt waitingFor = new MutableInt(1);
        final Object waitingForMonitor = new Object();

        // Add listener for creation/deletion of workers
        workerPathCache.getListenable().addListener(new PathChildrenCacheListener() {
            @Override
            public void childEvent(CuratorFramework client, final PathChildrenCacheEvent event)
                    throws Exception {
                final Worker worker;
                switch (event.getType()) {
                case CHILD_ADDED:
                    worker = jsonMapper.readValue(event.getData().getData(), Worker.class);
                    synchronized (waitingForMonitor) {
                        waitingFor.increment();
                    }
                    Futures.addCallback(addWorker(worker), new FutureCallback<ZkWorker>() {
                        @Override
                        public void onSuccess(ZkWorker zkWorker) {
                            synchronized (waitingForMonitor) {
                                waitingFor.decrement();
                                waitingForMonitor.notifyAll();
                            }
                        }

                        @Override
                        public void onFailure(Throwable throwable) {
                            synchronized (waitingForMonitor) {
                                waitingFor.decrement();
                                waitingForMonitor.notifyAll();
                            }
                        }
                    });
                    break;
                case CHILD_UPDATED:
                    worker = jsonMapper.readValue(event.getData().getData(), Worker.class);
                    updateWorker(worker);
                    break;

                case CHILD_REMOVED:
                    worker = jsonMapper.readValue(event.getData().getData(), Worker.class);
                    removeWorker(worker);
                    break;
                case INITIALIZED:
                    // Schedule cleanup for task status of the workers that might have disconnected while overlord was not running
                    List<String> workers;
                    try {
                        workers = cf.getChildren().forPath(indexerZkConfig.getStatusPath());
                    } catch (KeeperException.NoNodeException e) {
                        // statusPath doesn't exist yet; can occur if no middleManagers have started.
                        workers = ImmutableList.of();
                    }
                    for (String workerId : workers) {
                        final String workerAnnouncePath = JOINER.join(indexerZkConfig.getAnnouncementsPath(),
                                workerId);
                        final String workerStatusPath = JOINER.join(indexerZkConfig.getStatusPath(), workerId);
                        if (!zkWorkers.containsKey(workerId)
                                && cf.checkExists().forPath(workerAnnouncePath) == null) {
                            try {
                                scheduleTasksCleanupForWorker(workerId,
                                        cf.getChildren().forPath(workerStatusPath));
                            } catch (Exception e) {
                                log.warn(e,
                                        "Could not schedule cleanup for worker[%s] during startup (maybe someone removed the status znode[%s]?). Skipping.",
                                        workerId, workerStatusPath);
                            }
                        }
                    }
                    synchronized (waitingForMonitor) {
                        waitingFor.decrement();
                        waitingForMonitor.notifyAll();
                    }
                    break;
                case CONNECTION_SUSPENDED:
                case CONNECTION_RECONNECTED:
                case CONNECTION_LOST:
                    // do nothing
                }
            }
        });
        workerPathCache.start(PathChildrenCache.StartMode.POST_INITIALIZED_EVENT);
        synchronized (waitingForMonitor) {
            while (waitingFor.intValue() > 0) {
                waitingForMonitor.wait();
            }
        }

        ScheduledExecutors.scheduleAtFixedRate(cleanupExec, Period.ZERO.toStandardDuration(),
                config.getWorkerBlackListCleanupPeriod().toStandardDuration(), () -> checkBlackListedNodes());

        provisioningService = provisioningStrategy.makeProvisioningService(this);
        lifecycleLock.started();
    } catch (Exception e) {
        throw Throwables.propagate(e);
    } finally {
        lifecycleLock.exitStart();
    }
}

From source file:org.apache.hadoop.hbase.client.ZKAsyncRegistry.java

private static void tryComplete(MutableInt remaining, HRegionLocation[] locs,
        CompletableFuture<RegionLocations> future) {
    remaining.decrement();
    if (remaining.intValue() > 0) {
        return;//from   w  w w . j a v a 2 s . c om
    }
    future.complete(new RegionLocations(locs));
}

From source file:org.apache.hadoop.hive.llap.tezplugins.LlapTaskSchedulerService.java

private void unregisterPendingPreemption(String host) {
    writeLock.lock();//from ww w  . j  a  va 2s . co  m
    try {
        pendingPreemptions.decrementAndGet();
        if (metrics != null) {
            metrics.decrPendingPreemptionTasksCount();
        }
        MutableInt val = pendingPreemptionsPerHost.get(host);
        Preconditions.checkNotNull(val);
        val.decrement();
        // Not bothering with removing the entry. There's a limited number of hosts, and a good
        // chance that the entry will make it back in when the AM is used for a long duration.
    } finally {
        writeLock.unlock();
    }
}