Example usage for org.apache.commons.lang3.mutable MutableObject setValue

List of usage examples for org.apache.commons.lang3.mutable MutableObject setValue

Introduction

In this page you can find the example usage for org.apache.commons.lang3.mutable MutableObject setValue.

Prototype

@Override
public void setValue(final T value) 

Source Link

Document

Sets the value.

Usage

From source file:fr.duminy.jbackup.core.JBackupImplTest.java

@Test
public void testRestore_withCancellable() throws Throwable {
    // prepare test
    final Path archive = tempFolder.newFolder().toPath().resolve("archive.zip");
    final Path targetDirectory = tempFolder.newFolder().toPath();
    final BackupConfiguration config = createConfiguration();
    final MutableBoolean taskStarted = new MutableBoolean(false);
    final Task mockRestoreTask = createAlwaysWaitingTask(Task.class, taskStarted);
    final MutableObject<Cancellable> actualCancellable = new MutableObject<>();
    final MutableObject<TaskListener> actualListener = new MutableObject<>();
    JBackupImpl jBackup = spy(new JBackupImpl() {
        @Override// w  w w.ja v a  2  s  .c o m
        Task createRestoreTask(BackupConfiguration config, Path archive, Path targetDirectory,
                TaskListener listener, Cancellable cancellable) {
            actualListener.setValue(listener);
            actualCancellable.setValue(cancellable);
            return mockRestoreTask;
        }
    });

    // test
    try {
        Future<Void> future = jBackup.restore(config, archive, targetDirectory);

        // wait task is actually started
        waitTaskStarted(taskStarted, actualCancellable);

        assertThat(actualCancellable.getValue()).as("cancellable").isNotNull();
        assertThat(actualCancellable.getValue().isCancelled()).as("cancelled").isFalse();

        future.cancel(true);
        assertThat(actualCancellable.getValue().isCancelled()).as("cancelled").isTrue();
    } finally {
        jBackup.shutdown(null);
    }

    // assertions
    InOrder inOrder = inOrder(mockRestoreTask, jBackup);
    inOrder.verify(jBackup, times(1)).restore(eq(config), eq(archive), eq(targetDirectory)); // called above
    inOrder.verify(jBackup, times(1)).createRestoreTask(eq(config), eq(archive), eq(targetDirectory),
            eq(actualListener.getValue()), eq(actualCancellable.getValue()));
    inOrder.verify(mockRestoreTask, times(1)).call();
    inOrder.verify(jBackup, times(1)).shutdown(isNull(TerminationListener.class)); // called above
    inOrder.verifyNoMoreInteractions();
}

From source file:com.spotify.reaper.unit.service.SegmentRunnerTest.java

@Test
public void timeoutTest() throws InterruptedException, ReaperException, ExecutionException {
    final AppContext context = new AppContext();
    context.storage = new MemoryStorage();
    RepairUnit cf = context.storage/*from  w ww. j av  a  2s . co m*/
            .addRepairUnit(new RepairUnit.Builder("reaper", "reaper", Sets.newHashSet("reaper")));
    RepairRun run = context.storage.addRepairRun(
            new RepairRun.Builder("reaper", cf.getId(), DateTime.now(), 0.5, 1, RepairParallelism.PARALLEL));
    context.storage.addRepairSegments(Collections.singleton(
            new RepairSegment.Builder(run.getId(), new RingRange(BigInteger.ONE, BigInteger.ZERO), cf.getId())),
            run.getId());
    final long segmentId = context.storage.getNextFreeSegment(run.getId()).get().getId();

    final ExecutorService executor = Executors.newSingleThreadExecutor();
    final MutableObject<Future<?>> future = new MutableObject<>();

    context.jmxConnectionFactory = new JmxConnectionFactory() {
        @Override
        public JmxProxy connect(final Optional<RepairStatusHandler> handler, String host) {
            JmxProxy jmx = mock(JmxProxy.class);
            when(jmx.getClusterName()).thenReturn("reaper");
            when(jmx.isConnectionAlive()).thenReturn(true);
            when(jmx.tokenRangeToEndpoint(anyString(), any(RingRange.class)))
                    .thenReturn(Lists.newArrayList(""));
            when(jmx.triggerRepair(any(BigInteger.class), any(BigInteger.class), anyString(),
                    Matchers.<RepairParallelism>any(), Sets.newHashSet(anyString())))
                            .then(new Answer<Integer>() {
                                @Override
                                public Integer answer(InvocationOnMock invocation) {
                                    assertEquals(RepairSegment.State.NOT_STARTED,
                                            context.storage.getRepairSegment(segmentId).get().getState());
                                    future.setValue(executor.submit(new Thread() {
                                        @Override
                                        public void run() {
                                            handler.get().handle(1, ActiveRepairService.Status.STARTED,
                                                    "Repair command 1 has started");
                                            assertEquals(RepairSegment.State.RUNNING, context.storage
                                                    .getRepairSegment(segmentId).get().getState());
                                        }
                                    }));
                                    return 1;
                                }
                            });

            return jmx;
        }
    };
    RepairRunner rr = mock(RepairRunner.class);
    RepairUnit ru = mock(RepairUnit.class);
    SegmentRunner sr = new SegmentRunner(context, segmentId, Collections.singleton(""), 100, 0.5,
            RepairParallelism.PARALLEL, "reaper", ru, rr);
    sr.run();

    future.getValue().get();
    executor.shutdown();

    assertEquals(RepairSegment.State.NOT_STARTED, context.storage.getRepairSegment(segmentId).get().getState());
    assertEquals(1, context.storage.getRepairSegment(segmentId).get().getFailCount());
}

From source file:com.spotify.reaper.unit.service.SegmentRunnerTest.java

@Test
public void failureTest() throws InterruptedException, ReaperException, ExecutionException {
    final IStorage storage = new MemoryStorage();
    RepairUnit cf = storage//from  w  w  w .  ja  va  2 s  .com
            .addRepairUnit(new RepairUnit.Builder("reaper", "reaper", Sets.newHashSet("reaper")));
    RepairRun run = storage.addRepairRun(
            new RepairRun.Builder("reaper", cf.getId(), DateTime.now(), 0.5, 1, RepairParallelism.PARALLEL));
    storage.addRepairSegments(Collections.singleton(
            new RepairSegment.Builder(run.getId(), new RingRange(BigInteger.ONE, BigInteger.ZERO), cf.getId())),
            run.getId());
    final long segmentId = storage.getNextFreeSegment(run.getId()).get().getId();

    final ExecutorService executor = Executors.newSingleThreadExecutor();
    final MutableObject<Future<?>> future = new MutableObject<>();

    AppContext context = new AppContext();
    context.storage = storage;
    context.jmxConnectionFactory = new JmxConnectionFactory() {
        @Override
        public JmxProxy connect(final Optional<RepairStatusHandler> handler, String host) {
            JmxProxy jmx = mock(JmxProxy.class);
            when(jmx.getClusterName()).thenReturn("reaper");
            when(jmx.isConnectionAlive()).thenReturn(true);
            when(jmx.tokenRangeToEndpoint(anyString(), any(RingRange.class)))
                    .thenReturn(Lists.newArrayList(""));
            when(jmx.triggerRepair(any(BigInteger.class), any(BigInteger.class), anyString(),
                    Matchers.<RepairParallelism>any(), Sets.newHashSet(anyString())))
                            .then(new Answer<Integer>() {
                                @Override
                                public Integer answer(InvocationOnMock invocation) {
                                    assertEquals(RepairSegment.State.NOT_STARTED,
                                            storage.getRepairSegment(segmentId).get().getState());
                                    future.setValue(executor.submit(new Runnable() {
                                        @Override
                                        public void run() {
                                            handler.get().handle(1, ActiveRepairService.Status.STARTED,
                                                    "Repair command 1 has started");
                                            assertEquals(RepairSegment.State.RUNNING,
                                                    storage.getRepairSegment(segmentId).get().getState());
                                            handler.get().handle(1, ActiveRepairService.Status.SESSION_FAILED,
                                                    "Repair command 1 has failed");
                                            assertEquals(RepairSegment.State.NOT_STARTED,
                                                    storage.getRepairSegment(segmentId).get().getState());
                                            handler.get().handle(1, ActiveRepairService.Status.FINISHED,
                                                    "Repair command 1 has finished");
                                            assertEquals(RepairSegment.State.NOT_STARTED,
                                                    storage.getRepairSegment(segmentId).get().getState());
                                        }
                                    }));

                                    return 1;
                                }
                            });

            return jmx;
        }
    };
    RepairRunner rr = mock(RepairRunner.class);
    RepairUnit ru = mock(RepairUnit.class);
    SegmentRunner sr = new SegmentRunner(context, segmentId, Collections.singleton(""), 1000, 0.5,
            RepairParallelism.PARALLEL, "reaper", ru, rr);
    sr.run();

    future.getValue().get();
    executor.shutdown();

    assertEquals(RepairSegment.State.NOT_STARTED, storage.getRepairSegment(segmentId).get().getState());
    assertEquals(1, storage.getRepairSegment(segmentId).get().getFailCount());
}

From source file:com.spotify.reaper.unit.service.SegmentRunnerTest.java

@Test
public void successTest() throws InterruptedException, ReaperException, ExecutionException {
    final IStorage storage = new MemoryStorage();
    RepairUnit cf = storage// ww w . j a  v a 2s.  c  o  m
            .addRepairUnit(new RepairUnit.Builder("reaper", "reaper", Sets.newHashSet("reaper")));
    RepairRun run = storage.addRepairRun(
            new RepairRun.Builder("reaper", cf.getId(), DateTime.now(), 0.5, 1, RepairParallelism.PARALLEL));
    storage.addRepairSegments(Collections.singleton(
            new RepairSegment.Builder(run.getId(), new RingRange(BigInteger.ONE, BigInteger.ZERO), cf.getId())),
            run.getId());
    final long segmentId = storage.getNextFreeSegment(run.getId()).get().getId();

    final ExecutorService executor = Executors.newSingleThreadExecutor();
    final MutableObject<Future<?>> future = new MutableObject<>();

    AppContext context = new AppContext();
    context.storage = storage;
    context.jmxConnectionFactory = new JmxConnectionFactory() {
        @Override
        public JmxProxy connect(final Optional<RepairStatusHandler> handler, String host) {
            JmxProxy jmx = mock(JmxProxy.class);
            when(jmx.getClusterName()).thenReturn("reaper");
            when(jmx.isConnectionAlive()).thenReturn(true);
            when(jmx.tokenRangeToEndpoint(anyString(), any(RingRange.class)))
                    .thenReturn(Lists.newArrayList(""));
            when(jmx.triggerRepair(any(BigInteger.class), any(BigInteger.class), anyString(),
                    Matchers.<RepairParallelism>any(), Sets.newHashSet(anyString())))
                            .then(new Answer<Integer>() {
                                @Override
                                public Integer answer(InvocationOnMock invocation) {
                                    assertEquals(RepairSegment.State.NOT_STARTED,
                                            storage.getRepairSegment(segmentId).get().getState());
                                    future.setValue(executor.submit(new Runnable() {
                                        @Override
                                        public void run() {
                                            handler.get().handle(1, ActiveRepairService.Status.STARTED,
                                                    "Repair command 1 has started");
                                            assertEquals(RepairSegment.State.RUNNING,
                                                    storage.getRepairSegment(segmentId).get().getState());
                                            // report about an unrelated repair. Shouldn't affect anything.
                                            handler.get().handle(2, ActiveRepairService.Status.SESSION_FAILED,
                                                    "Repair command 2 has failed");
                                            handler.get().handle(1, ActiveRepairService.Status.SESSION_SUCCESS,
                                                    "Repair session succeeded in command 1");
                                            assertEquals(RepairSegment.State.DONE,
                                                    storage.getRepairSegment(segmentId).get().getState());
                                            handler.get().handle(1, ActiveRepairService.Status.FINISHED,
                                                    "Repair command 1 has finished");
                                            assertEquals(RepairSegment.State.DONE,
                                                    storage.getRepairSegment(segmentId).get().getState());
                                        }
                                    }));
                                    return 1;
                                }
                            });

            return jmx;
        }
    };
    RepairRunner rr = mock(RepairRunner.class);
    RepairUnit ru = mock(RepairUnit.class);
    SegmentRunner sr = new SegmentRunner(context, segmentId, Collections.singleton(""), 1000, 0.5,
            RepairParallelism.PARALLEL, "reaper", ru, rr);
    sr.run();

    future.getValue().get();
    executor.shutdown();

    assertEquals(RepairSegment.State.DONE, storage.getRepairSegment(segmentId).get().getState());
    assertEquals(0, storage.getRepairSegment(segmentId).get().getFailCount());
}

From source file:com.romeikat.datamessie.core.processing.task.documentProcessing.DocumentsProcessingTask.java

private void prepareForNextIteration(final TaskExecution taskExecution,
        final MutableObject<LocalDate> downloadedDate, final List<Document> documentsToProcess)
        throws TaskCancelledException {
    // No documents to process due to an error while loading
    final boolean errorOccurred = documentsToProcess == null;
    if (errorOccurred) {
        // In case of an error, wait and continue with same downloaded date
        sessionProvider.closeStatelessSession();
        taskExecution.checkpoint(pause);

        // Next download date to be processed is the same
        return;/*  w ww. jav  a2s  . c o  m*/
    }

    // No documents to process for that downloaded date
    final boolean noDocumentsToProcess = documentsToProcess.isEmpty();
    if (noDocumentsToProcess) {
        // Determine next downloaded date
        final LocalDate previousDownloadDate = downloadedDate.getValue();
        final LocalDate nextDownloadedDate = getNextDownloadedDate(previousDownloadDate);

        // Current date is reached
        final boolean isCurrentDate = previousDownloadDate.equals(nextDownloadedDate);
        if (isCurrentDate) {
            // Pause
            sessionProvider.closeStatelessSession();
            taskExecution.checkpoint(pause);
            // Next downloaded date to be processed is the same
        }
        // Current date is not yet reached
        else {
            // Next downloaded date to be processed is the next day
            downloadedDate.setValue(nextDownloadedDate);
        }
        return;
    }

    // No more documents to process for that downloaded date
    final boolean noMoreDocumentsToProcess = documentsToProcess.size() < batchSize;
    if (noMoreDocumentsToProcess) {
        // Increase download date
        // Determine next downloaded date
        final LocalDate previousDownloadDate = downloadedDate.getValue();
        final LocalDate nextDownloadedDate = getNextDownloadedDate(previousDownloadDate);

        // Current date is reached
        final boolean isCurrentDate = previousDownloadDate.equals(nextDownloadedDate);
        if (isCurrentDate) {
            // Pause
            sessionProvider.closeStatelessSession();
            taskExecution.checkpoint(pause);
            // Next downloaded date to be processed is the same
        }
        // Current date is not yet reached
        else {
            // Next downloaded date to be processed is the next day
            downloadedDate.setValue(nextDownloadedDate);
        }
    }
}

From source file:nl.b3p.viewer.config.services.GeoService.java

/**
 * Returns the layer with the given name in this server. The first layer in
 * a depth-first tree traversal with the name is returned. If a child has
 * the same name as its parent, the child is returned.
 * @param layerName the layer name to search for
 * @return the Layer or null if not found
 *//*from  w w w .  ja  v a 2s  . c o  m*/
public Layer getLayer(final String layerName) {
    loadLayerTree();

    if (layerName == null || topLayer == null) {
        return null;
    }

    final MutableObject<Layer> layer = new MutableObject(null);

    topLayer.accept(new Layer.Visitor() {
        @Override
        public boolean visit(Layer l) {
            if (StringUtils.equals(l.getName(), layerName)) {
                layer.setValue(l);
                return false;
            }
            return true;
        }
    });

    return layer.getValue();
}

From source file:objenome.util.ClassUtils.java

/**
 * Get an {@link Iterable} that can iterate over a class hierarchy in ascending (subclass to superclass) order.
 *
 * @param type the type to get the class hierarchy from
 * @param interfacesBehavior switch indicating whether to include or exclude interfaces
 * @return Iterable an Iterable over the class hierarchy of the given class
 * @since 3.2//from   w w  w  .  j  a v a 2s .c  om
 */
public static Iterable<Class<?>> hierarchy(Class<?> type, Interfaces interfacesBehavior) {
    Iterable<Class<?>> classes = () -> {
        MutableObject<Class<?>> next = new MutableObject<>(type);
        return new Iterator<Class<?>>() {

            @Override
            public boolean hasNext() {
                return next.getValue() != null;
            }

            @Override
            public Class<?> next() {
                Class<?> result = next.getValue();
                next.setValue(result.getSuperclass());
                return result;
            }

            @Override
            public void remove() {
                throw new UnsupportedOperationException();
            }

        };
    };
    if (interfacesBehavior != Interfaces.INCLUDE) {
        return classes;
    }
    return () -> {
        Set<Class<?>> seenInterfaces = new HashSet<>();
        Iterator<Class<?>> wrapped = classes.iterator();

        return new Iterator<Class<?>>() {
            Iterator<Class<?>> interfaces = Collections.<Class<?>>emptySet().iterator();

            @Override
            public boolean hasNext() {
                return interfaces.hasNext() || wrapped.hasNext();
            }

            @Override
            public Class<?> next() {
                if (interfaces.hasNext()) {
                    Class<?> nextInterface = interfaces.next();
                    seenInterfaces.add(nextInterface);
                    return nextInterface;
                }
                Class<?> nextSuperclass = wrapped.next();
                Set<Class<?>> currentInterfaces = new LinkedHashSet<>();
                walkInterfaces(currentInterfaces, nextSuperclass);
                interfaces = currentInterfaces.iterator();
                return nextSuperclass;
            }

            private void walkInterfaces(Set<Class<?>> addTo, Class<?> c) {
                for (Class<?> iface : c.getInterfaces()) {
                    if (!seenInterfaces.contains(iface)) {
                        addTo.add(iface);
                    }
                    walkInterfaces(addTo, iface);
                }
            }

            @Override
            public void remove() {
                throw new UnsupportedOperationException();
            }

        };
    };
}

From source file:org.apache.asterix.app.translator.QueryTranslator.java

public void handleCreateDatasetStatement(AqlMetadataProvider metadataProvider, Statement stmt,
        IHyracksClientConnection hcc) throws AsterixException, Exception {
    MutableObject<ProgressState> progress = new MutableObject<>(ProgressState.NO_PROGRESS);
    DatasetDecl dd = (DatasetDecl) stmt;
    String dataverseName = getActiveDataverse(dd.getDataverse());
    String datasetName = dd.getName().getValue();
    DatasetType dsType = dd.getDatasetType();
    String itemTypeDataverseName = getActiveDataverse(dd.getItemTypeDataverse());
    String itemTypeName = dd.getItemTypeName().getValue();
    String metaItemTypeDataverseName = getActiveDataverse(dd.getMetaItemTypeDataverse());
    String metaItemTypeName = dd.getMetaItemTypeName().getValue();
    Identifier ngNameId = dd.getNodegroupName();
    String nodegroupName = getNodeGroupName(ngNameId, dd, dataverseName);
    String compactionPolicy = dd.getCompactionPolicy();
    Map<String, String> compactionPolicyProperties = dd.getCompactionPolicyProperties();
    boolean defaultCompactionPolicy = compactionPolicy == null;
    boolean temp = dd.getDatasetDetailsDecl().isTemp();

    MetadataTransactionContext mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
    boolean bActiveTxn = true;
    metadataProvider.setMetadataTxnContext(mdTxnCtx);

    MetadataLockManager.INSTANCE.createDatasetBegin(dataverseName, itemTypeDataverseName,
            itemTypeDataverseName + "." + itemTypeName, metaItemTypeDataverseName,
            metaItemTypeDataverseName + "." + metaItemTypeName, nodegroupName, compactionPolicy,
            dataverseName + "." + datasetName, defaultCompactionPolicy);
    Dataset dataset = null;/* w  ww  . j  av a 2 s .  c  om*/
    try {

        IDatasetDetails datasetDetails = null;
        Dataset ds = MetadataManager.INSTANCE.getDataset(metadataProvider.getMetadataTxnContext(),
                dataverseName, datasetName);
        if (ds != null) {
            if (dd.getIfNotExists()) {
                MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
                return;
            } else {
                throw new AlgebricksException("A dataset with this name " + datasetName + " already exists.");
            }
        }
        Datatype dt = MetadataManager.INSTANCE.getDatatype(metadataProvider.getMetadataTxnContext(),
                itemTypeDataverseName, itemTypeName);
        if (dt == null) {
            throw new AlgebricksException(": type " + itemTypeName + " could not be found.");
        }
        String ngName = ngNameId != null ? ngNameId.getValue()
                : configureNodegroupForDataset(dd, dataverseName, mdTxnCtx);

        if (compactionPolicy == null) {
            compactionPolicy = GlobalConfig.DEFAULT_COMPACTION_POLICY_NAME;
            compactionPolicyProperties = GlobalConfig.DEFAULT_COMPACTION_POLICY_PROPERTIES;
        } else {
            validateCompactionPolicy(compactionPolicy, compactionPolicyProperties, mdTxnCtx, false);
        }
        switch (dd.getDatasetType()) {
        case INTERNAL:
            IAType itemType = dt.getDatatype();
            if (itemType.getTypeTag() != ATypeTag.RECORD) {
                throw new AlgebricksException("Dataset type has to be a record type.");
            }

            IAType metaItemType = null;
            if (metaItemTypeDataverseName != null && metaItemTypeName != null) {
                metaItemType = metadataProvider.findType(metaItemTypeDataverseName, metaItemTypeName);
            }
            if (metaItemType != null && metaItemType.getTypeTag() != ATypeTag.RECORD) {
                throw new AlgebricksException("Dataset meta type has to be a record type.");
            }
            ARecordType metaRecType = (ARecordType) metaItemType;

            List<List<String>> partitioningExprs = ((InternalDetailsDecl) dd.getDatasetDetailsDecl())
                    .getPartitioningExprs();
            List<Integer> keySourceIndicators = ((InternalDetailsDecl) dd.getDatasetDetailsDecl())
                    .getKeySourceIndicators();
            boolean autogenerated = ((InternalDetailsDecl) dd.getDatasetDetailsDecl()).isAutogenerated();
            ARecordType aRecordType = (ARecordType) itemType;
            List<IAType> partitioningTypes = ValidateUtil.validatePartitioningExpressions(aRecordType,
                    metaRecType, partitioningExprs, keySourceIndicators, autogenerated);

            List<String> filterField = ((InternalDetailsDecl) dd.getDatasetDetailsDecl()).getFilterField();
            if (filterField != null) {
                ValidateUtil.validateFilterField(aRecordType, filterField);
            }
            if (compactionPolicy == null && filterField != null) {
                // If the dataset has a filter and the user didn't specify a merge
                // policy, then we will pick the
                // correlated-prefix as the default merge policy.
                compactionPolicy = GlobalConfig.DEFAULT_FILTERED_DATASET_COMPACTION_POLICY_NAME;
                compactionPolicyProperties = GlobalConfig.DEFAULT_COMPACTION_POLICY_PROPERTIES;
            }
            datasetDetails = new InternalDatasetDetails(InternalDatasetDetails.FileStructure.BTREE,
                    InternalDatasetDetails.PartitioningStrategy.HASH, partitioningExprs, partitioningExprs,
                    keySourceIndicators, partitioningTypes, autogenerated, filterField, temp);
            break;
        case EXTERNAL:
            String adapter = ((ExternalDetailsDecl) dd.getDatasetDetailsDecl()).getAdapter();
            Map<String, String> properties = ((ExternalDetailsDecl) dd.getDatasetDetailsDecl()).getProperties();

            datasetDetails = new ExternalDatasetDetails(adapter, properties, new Date(),
                    ExternalDatasetTransactionState.COMMIT);
            break;
        default:
            throw new AsterixException("Unknown datatype " + dd.getDatasetType());
        }

        // #. initialize DatasetIdFactory if it is not initialized.
        if (!DatasetIdFactory.isInitialized()) {
            DatasetIdFactory.initialize(MetadataManager.INSTANCE.getMostRecentDatasetId());
        }

        // #. add a new dataset with PendingAddOp
        dataset = new Dataset(dataverseName, datasetName, itemTypeDataverseName, itemTypeName,
                metaItemTypeDataverseName, metaItemTypeName, ngName, compactionPolicy,
                compactionPolicyProperties, datasetDetails, dd.getHints(), dsType,
                DatasetIdFactory.generateDatasetId(), IMetadataEntity.PENDING_ADD_OP);
        MetadataManager.INSTANCE.addDataset(metadataProvider.getMetadataTxnContext(), dataset);

        if (dd.getDatasetType() == DatasetType.INTERNAL) {
            Dataverse dataverse = MetadataManager.INSTANCE
                    .getDataverse(metadataProvider.getMetadataTxnContext(), dataverseName);
            JobSpecification jobSpec = DatasetOperations.createDatasetJobSpec(dataverse, datasetName,
                    metadataProvider);

            // #. make metadataTxn commit before calling runJob.
            MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
            bActiveTxn = false;
            progress.setValue(ProgressState.ADDED_PENDINGOP_RECORD_TO_METADATA);

            // #. runJob
            JobUtils.runJob(hcc, jobSpec, true);

            // #. begin new metadataTxn
            mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
            bActiveTxn = true;
            metadataProvider.setMetadataTxnContext(mdTxnCtx);
        }

        // #. add a new dataset with PendingNoOp after deleting the dataset with PendingAddOp
        MetadataManager.INSTANCE.dropDataset(metadataProvider.getMetadataTxnContext(), dataverseName,
                datasetName);
        dataset.setPendingOp(IMetadataEntity.PENDING_NO_OP);
        MetadataManager.INSTANCE.addDataset(metadataProvider.getMetadataTxnContext(), dataset);
        MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
    } catch (Exception e) {
        if (bActiveTxn) {
            abort(e, e, mdTxnCtx);
        }

        if (progress.getValue() == ProgressState.ADDED_PENDINGOP_RECORD_TO_METADATA) {

            // #. execute compensation operations
            // remove the index in NC
            // [Notice]
            // As long as we updated(and committed) metadata, we should remove any effect of the job
            // because an exception occurs during runJob.
            mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
            bActiveTxn = true;
            metadataProvider.setMetadataTxnContext(mdTxnCtx);
            CompiledDatasetDropStatement cds = new CompiledDatasetDropStatement(dataverseName, datasetName);
            try {
                JobSpecification jobSpec = DatasetOperations.createDropDatasetJobSpec(cds, metadataProvider);
                MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
                bActiveTxn = false;
                JobUtils.runJob(hcc, jobSpec, true);
            } catch (Exception e2) {
                e.addSuppressed(e2);
                if (bActiveTxn) {
                    abort(e, e2, mdTxnCtx);
                }
            }

            // remove the record from the metadata.
            mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
            metadataProvider.setMetadataTxnContext(mdTxnCtx);
            try {
                MetadataManager.INSTANCE.dropDataset(metadataProvider.getMetadataTxnContext(), dataverseName,
                        datasetName);
                MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
            } catch (Exception e2) {
                e.addSuppressed(e2);
                abort(e, e2, mdTxnCtx);
                throw new IllegalStateException("System is inconsistent state: pending dataset(" + dataverseName
                        + "." + datasetName + ") couldn't be removed from the metadata", e);
            }
        }

        throw e;
    } finally {
        MetadataLockManager.INSTANCE.createDatasetEnd(dataverseName, itemTypeDataverseName,
                itemTypeDataverseName + "." + itemTypeName, metaItemTypeDataverseName,
                metaItemTypeDataverseName + "." + metaItemTypeName, nodegroupName, compactionPolicy,
                dataverseName + "." + datasetName, defaultCompactionPolicy);
    }
}

From source file:org.apache.asterix.app.translator.QueryTranslator.java

public void handleDatasetDropStatement(AqlMetadataProvider metadataProvider, Statement stmt,
        IHyracksClientConnection hcc) throws Exception {
    DropDatasetStatement stmtDelete = (DropDatasetStatement) stmt;
    String dataverseName = getActiveDataverse(stmtDelete.getDataverseName());
    String datasetName = stmtDelete.getDatasetName().getValue();
    MutableObject<ProgressState> progress = new MutableObject<>(ProgressState.NO_PROGRESS);
    MutableObject<MetadataTransactionContext> mdTxnCtx = new MutableObject<>(
            MetadataManager.INSTANCE.beginTransaction());
    MutableBoolean bActiveTxn = new MutableBoolean(true);
    metadataProvider.setMetadataTxnContext(mdTxnCtx.getValue());
    MetadataLockManager.INSTANCE.dropDatasetBegin(dataverseName, dataverseName + "." + datasetName);
    List<JobSpecification> jobsToExecute = new ArrayList<>();
    try {/*from   w  ww . ja va  2  s. c  o m*/
        Dataset ds = MetadataManager.INSTANCE.getDataset(mdTxnCtx.getValue(), dataverseName, datasetName);
        if (ds == null) {
            if (stmtDelete.getIfExists()) {
                MetadataManager.INSTANCE.commitTransaction(mdTxnCtx.getValue());
                return;
            } else {
                throw new AlgebricksException("There is no dataset with this name " + datasetName
                        + " in dataverse " + dataverseName + ".");
            }
        }

        doDropDataset(ds, datasetName, metadataProvider, mdTxnCtx, jobsToExecute, dataverseName, bActiveTxn,
                progress, hcc);
        MetadataManager.INSTANCE.commitTransaction(mdTxnCtx.getValue());
    } catch (Exception e) {
        if (bActiveTxn.booleanValue()) {
            abort(e, e, mdTxnCtx.getValue());
        }

        if (progress.getValue() == ProgressState.ADDED_PENDINGOP_RECORD_TO_METADATA) {
            // #. execute compensation operations
            // remove the all indexes in NC
            try {
                for (JobSpecification jobSpec : jobsToExecute) {
                    JobUtils.runJob(hcc, jobSpec, true);
                }
            } catch (Exception e2) {
                // do no throw exception since still the metadata needs to be compensated.
                e.addSuppressed(e2);
            }

            // remove the record from the metadata.
            mdTxnCtx.setValue(MetadataManager.INSTANCE.beginTransaction());
            metadataProvider.setMetadataTxnContext(mdTxnCtx.getValue());
            try {
                MetadataManager.INSTANCE.dropDataset(metadataProvider.getMetadataTxnContext(), dataverseName,
                        datasetName);
                MetadataManager.INSTANCE.commitTransaction(mdTxnCtx.getValue());
            } catch (Exception e2) {
                e.addSuppressed(e2);
                abort(e, e2, mdTxnCtx.getValue());
                throw new IllegalStateException("System is inconsistent state: pending dataset(" + dataverseName
                        + "." + datasetName + ") couldn't be removed from the metadata", e);
            }
        }

        throw e;
    } finally {
        MetadataLockManager.INSTANCE.dropDatasetEnd(dataverseName, dataverseName + "." + datasetName);
    }
}

From source file:org.apache.asterix.app.translator.QueryTranslator.java

protected void doDropDataset(Dataset ds, String datasetName, AqlMetadataProvider metadataProvider,
        MutableObject<MetadataTransactionContext> mdTxnCtx, List<JobSpecification> jobsToExecute,
        String dataverseName, MutableBoolean bActiveTxn, MutableObject<ProgressState> progress,
        IHyracksClientConnection hcc) throws Exception {
    Map<FeedConnectionId, Pair<JobSpecification, Boolean>> disconnectJobList = new HashMap<>();
    if (ds.getDatasetType() == DatasetType.INTERNAL) {
        // prepare job spec(s) that would disconnect any active feeds involving the dataset.
        IActiveEntityEventsListener[] activeListeners = ActiveJobNotificationHandler.INSTANCE
                .getEventListeners();/* w w w  .ja  va  2 s .c o m*/
        for (IActiveEntityEventsListener listener : activeListeners) {
            if (listener.isEntityUsingDataset(dataverseName, datasetName)) {
                throw new AsterixException(
                        "Can't drop dataset since it is connected to active entity: " + listener.getEntityId());
            }
        }

        // #. prepare jobs to drop the datatset and the indexes in NC
        List<Index> indexes = MetadataManager.INSTANCE.getDatasetIndexes(mdTxnCtx.getValue(), dataverseName,
                datasetName);
        for (int j = 0; j < indexes.size(); j++) {
            if (indexes.get(j).isSecondaryIndex()) {
                CompiledIndexDropStatement cds = new CompiledIndexDropStatement(dataverseName, datasetName,
                        indexes.get(j).getIndexName());
                jobsToExecute.add(IndexOperations.buildDropSecondaryIndexJobSpec(cds, metadataProvider, ds));
            }
        }
        CompiledDatasetDropStatement cds = new CompiledDatasetDropStatement(dataverseName, datasetName);
        jobsToExecute.add(DatasetOperations.createDropDatasetJobSpec(cds, metadataProvider));

        // #. mark the existing dataset as PendingDropOp
        MetadataManager.INSTANCE.dropDataset(mdTxnCtx.getValue(), dataverseName, datasetName);
        MetadataManager.INSTANCE.addDataset(mdTxnCtx.getValue(),
                new Dataset(dataverseName, datasetName, ds.getItemTypeDataverseName(), ds.getItemTypeName(),
                        ds.getMetaItemTypeDataverseName(), ds.getMetaItemTypeName(), ds.getNodeGroupName(),
                        ds.getCompactionPolicy(), ds.getCompactionPolicyProperties(), ds.getDatasetDetails(),
                        ds.getHints(), ds.getDatasetType(), ds.getDatasetId(),
                        IMetadataEntity.PENDING_DROP_OP));

        MetadataManager.INSTANCE.commitTransaction(mdTxnCtx.getValue());
        bActiveTxn.setValue(false);
        progress.setValue(ProgressState.ADDED_PENDINGOP_RECORD_TO_METADATA);

        // # disconnect the feeds
        for (Pair<JobSpecification, Boolean> p : disconnectJobList.values()) {
            JobUtils.runJob(hcc, p.first, true);
        }

        // #. run the jobs
        for (JobSpecification jobSpec : jobsToExecute) {
            JobUtils.runJob(hcc, jobSpec, true);
        }

        mdTxnCtx.setValue(MetadataManager.INSTANCE.beginTransaction());
        bActiveTxn.setValue(true);
        metadataProvider.setMetadataTxnContext(mdTxnCtx.getValue());
    } else {
        // External dataset
        ExternalDatasetsRegistry.INSTANCE.removeDatasetInfo(ds);
        // #. prepare jobs to drop the datatset and the indexes in NC
        List<Index> indexes = MetadataManager.INSTANCE.getDatasetIndexes(mdTxnCtx.getValue(), dataverseName,
                datasetName);
        for (int j = 0; j < indexes.size(); j++) {
            if (ExternalIndexingOperations.isFileIndex(indexes.get(j))) {
                CompiledIndexDropStatement cds = new CompiledIndexDropStatement(dataverseName, datasetName,
                        indexes.get(j).getIndexName());
                jobsToExecute.add(IndexOperations.buildDropSecondaryIndexJobSpec(cds, metadataProvider, ds));
            } else {
                CompiledIndexDropStatement cds = new CompiledIndexDropStatement(dataverseName, datasetName,
                        indexes.get(j).getIndexName());
                jobsToExecute
                        .add(ExternalIndexingOperations.buildDropFilesIndexJobSpec(cds, metadataProvider, ds));
            }
        }

        // #. mark the existing dataset as PendingDropOp
        MetadataManager.INSTANCE.dropDataset(mdTxnCtx.getValue(), dataverseName, datasetName);
        MetadataManager.INSTANCE.addDataset(mdTxnCtx.getValue(),
                new Dataset(dataverseName, datasetName, ds.getItemTypeDataverseName(), ds.getItemTypeName(),
                        ds.getNodeGroupName(), ds.getCompactionPolicy(), ds.getCompactionPolicyProperties(),
                        ds.getDatasetDetails(), ds.getHints(), ds.getDatasetType(), ds.getDatasetId(),
                        IMetadataEntity.PENDING_DROP_OP));

        MetadataManager.INSTANCE.commitTransaction(mdTxnCtx.getValue());
        bActiveTxn.setValue(false);
        progress.setValue(ProgressState.ADDED_PENDINGOP_RECORD_TO_METADATA);

        // #. run the jobs
        for (JobSpecification jobSpec : jobsToExecute) {
            JobUtils.runJob(hcc, jobSpec, true);
        }
        if (!indexes.isEmpty()) {
            ExternalDatasetsRegistry.INSTANCE.removeDatasetInfo(ds);
        }
        mdTxnCtx.setValue(MetadataManager.INSTANCE.beginTransaction());
        bActiveTxn.setValue(true);
        metadataProvider.setMetadataTxnContext(mdTxnCtx.getValue());
    }

    // #. finally, delete the dataset.
    MetadataManager.INSTANCE.dropDataset(mdTxnCtx.getValue(), dataverseName, datasetName);
    // Drop the associated nodegroup
    String nodegroup = ds.getNodeGroupName();
    if (!nodegroup.equalsIgnoreCase(MetadataConstants.METADATA_DEFAULT_NODEGROUP_NAME)) {
        MetadataManager.INSTANCE.dropNodegroup(mdTxnCtx.getValue(), dataverseName + ":" + datasetName);
    }
}