Example usage for org.apache.commons.lang3.mutable MutableObject getValue

List of usage examples for org.apache.commons.lang3.mutable MutableObject getValue

Introduction

In this page you can find the example usage for org.apache.commons.lang3.mutable MutableObject getValue.

Prototype

@Override
public T getValue() 

Source Link

Document

Gets the value.

Usage

From source file:org.apache.asterix.app.translator.QueryTranslator.java

public void handleCreateDatasetStatement(AqlMetadataProvider metadataProvider, Statement stmt,
        IHyracksClientConnection hcc) throws AsterixException, Exception {
    MutableObject<ProgressState> progress = new MutableObject<>(ProgressState.NO_PROGRESS);
    DatasetDecl dd = (DatasetDecl) stmt;
    String dataverseName = getActiveDataverse(dd.getDataverse());
    String datasetName = dd.getName().getValue();
    DatasetType dsType = dd.getDatasetType();
    String itemTypeDataverseName = getActiveDataverse(dd.getItemTypeDataverse());
    String itemTypeName = dd.getItemTypeName().getValue();
    String metaItemTypeDataverseName = getActiveDataverse(dd.getMetaItemTypeDataverse());
    String metaItemTypeName = dd.getMetaItemTypeName().getValue();
    Identifier ngNameId = dd.getNodegroupName();
    String nodegroupName = getNodeGroupName(ngNameId, dd, dataverseName);
    String compactionPolicy = dd.getCompactionPolicy();
    Map<String, String> compactionPolicyProperties = dd.getCompactionPolicyProperties();
    boolean defaultCompactionPolicy = compactionPolicy == null;
    boolean temp = dd.getDatasetDetailsDecl().isTemp();

    MetadataTransactionContext mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
    boolean bActiveTxn = true;
    metadataProvider.setMetadataTxnContext(mdTxnCtx);

    MetadataLockManager.INSTANCE.createDatasetBegin(dataverseName, itemTypeDataverseName,
            itemTypeDataverseName + "." + itemTypeName, metaItemTypeDataverseName,
            metaItemTypeDataverseName + "." + metaItemTypeName, nodegroupName, compactionPolicy,
            dataverseName + "." + datasetName, defaultCompactionPolicy);
    Dataset dataset = null;/*from  w w  w . ja  v  a 2 s  .  com*/
    try {

        IDatasetDetails datasetDetails = null;
        Dataset ds = MetadataManager.INSTANCE.getDataset(metadataProvider.getMetadataTxnContext(),
                dataverseName, datasetName);
        if (ds != null) {
            if (dd.getIfNotExists()) {
                MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
                return;
            } else {
                throw new AlgebricksException("A dataset with this name " + datasetName + " already exists.");
            }
        }
        Datatype dt = MetadataManager.INSTANCE.getDatatype(metadataProvider.getMetadataTxnContext(),
                itemTypeDataverseName, itemTypeName);
        if (dt == null) {
            throw new AlgebricksException(": type " + itemTypeName + " could not be found.");
        }
        String ngName = ngNameId != null ? ngNameId.getValue()
                : configureNodegroupForDataset(dd, dataverseName, mdTxnCtx);

        if (compactionPolicy == null) {
            compactionPolicy = GlobalConfig.DEFAULT_COMPACTION_POLICY_NAME;
            compactionPolicyProperties = GlobalConfig.DEFAULT_COMPACTION_POLICY_PROPERTIES;
        } else {
            validateCompactionPolicy(compactionPolicy, compactionPolicyProperties, mdTxnCtx, false);
        }
        switch (dd.getDatasetType()) {
        case INTERNAL:
            IAType itemType = dt.getDatatype();
            if (itemType.getTypeTag() != ATypeTag.RECORD) {
                throw new AlgebricksException("Dataset type has to be a record type.");
            }

            IAType metaItemType = null;
            if (metaItemTypeDataverseName != null && metaItemTypeName != null) {
                metaItemType = metadataProvider.findType(metaItemTypeDataverseName, metaItemTypeName);
            }
            if (metaItemType != null && metaItemType.getTypeTag() != ATypeTag.RECORD) {
                throw new AlgebricksException("Dataset meta type has to be a record type.");
            }
            ARecordType metaRecType = (ARecordType) metaItemType;

            List<List<String>> partitioningExprs = ((InternalDetailsDecl) dd.getDatasetDetailsDecl())
                    .getPartitioningExprs();
            List<Integer> keySourceIndicators = ((InternalDetailsDecl) dd.getDatasetDetailsDecl())
                    .getKeySourceIndicators();
            boolean autogenerated = ((InternalDetailsDecl) dd.getDatasetDetailsDecl()).isAutogenerated();
            ARecordType aRecordType = (ARecordType) itemType;
            List<IAType> partitioningTypes = ValidateUtil.validatePartitioningExpressions(aRecordType,
                    metaRecType, partitioningExprs, keySourceIndicators, autogenerated);

            List<String> filterField = ((InternalDetailsDecl) dd.getDatasetDetailsDecl()).getFilterField();
            if (filterField != null) {
                ValidateUtil.validateFilterField(aRecordType, filterField);
            }
            if (compactionPolicy == null && filterField != null) {
                // If the dataset has a filter and the user didn't specify a merge
                // policy, then we will pick the
                // correlated-prefix as the default merge policy.
                compactionPolicy = GlobalConfig.DEFAULT_FILTERED_DATASET_COMPACTION_POLICY_NAME;
                compactionPolicyProperties = GlobalConfig.DEFAULT_COMPACTION_POLICY_PROPERTIES;
            }
            datasetDetails = new InternalDatasetDetails(InternalDatasetDetails.FileStructure.BTREE,
                    InternalDatasetDetails.PartitioningStrategy.HASH, partitioningExprs, partitioningExprs,
                    keySourceIndicators, partitioningTypes, autogenerated, filterField, temp);
            break;
        case EXTERNAL:
            String adapter = ((ExternalDetailsDecl) dd.getDatasetDetailsDecl()).getAdapter();
            Map<String, String> properties = ((ExternalDetailsDecl) dd.getDatasetDetailsDecl()).getProperties();

            datasetDetails = new ExternalDatasetDetails(adapter, properties, new Date(),
                    ExternalDatasetTransactionState.COMMIT);
            break;
        default:
            throw new AsterixException("Unknown datatype " + dd.getDatasetType());
        }

        // #. initialize DatasetIdFactory if it is not initialized.
        if (!DatasetIdFactory.isInitialized()) {
            DatasetIdFactory.initialize(MetadataManager.INSTANCE.getMostRecentDatasetId());
        }

        // #. add a new dataset with PendingAddOp
        dataset = new Dataset(dataverseName, datasetName, itemTypeDataverseName, itemTypeName,
                metaItemTypeDataverseName, metaItemTypeName, ngName, compactionPolicy,
                compactionPolicyProperties, datasetDetails, dd.getHints(), dsType,
                DatasetIdFactory.generateDatasetId(), IMetadataEntity.PENDING_ADD_OP);
        MetadataManager.INSTANCE.addDataset(metadataProvider.getMetadataTxnContext(), dataset);

        if (dd.getDatasetType() == DatasetType.INTERNAL) {
            Dataverse dataverse = MetadataManager.INSTANCE
                    .getDataverse(metadataProvider.getMetadataTxnContext(), dataverseName);
            JobSpecification jobSpec = DatasetOperations.createDatasetJobSpec(dataverse, datasetName,
                    metadataProvider);

            // #. make metadataTxn commit before calling runJob.
            MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
            bActiveTxn = false;
            progress.setValue(ProgressState.ADDED_PENDINGOP_RECORD_TO_METADATA);

            // #. runJob
            JobUtils.runJob(hcc, jobSpec, true);

            // #. begin new metadataTxn
            mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
            bActiveTxn = true;
            metadataProvider.setMetadataTxnContext(mdTxnCtx);
        }

        // #. add a new dataset with PendingNoOp after deleting the dataset with PendingAddOp
        MetadataManager.INSTANCE.dropDataset(metadataProvider.getMetadataTxnContext(), dataverseName,
                datasetName);
        dataset.setPendingOp(IMetadataEntity.PENDING_NO_OP);
        MetadataManager.INSTANCE.addDataset(metadataProvider.getMetadataTxnContext(), dataset);
        MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
    } catch (Exception e) {
        if (bActiveTxn) {
            abort(e, e, mdTxnCtx);
        }

        if (progress.getValue() == ProgressState.ADDED_PENDINGOP_RECORD_TO_METADATA) {

            // #. execute compensation operations
            // remove the index in NC
            // [Notice]
            // As long as we updated(and committed) metadata, we should remove any effect of the job
            // because an exception occurs during runJob.
            mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
            bActiveTxn = true;
            metadataProvider.setMetadataTxnContext(mdTxnCtx);
            CompiledDatasetDropStatement cds = new CompiledDatasetDropStatement(dataverseName, datasetName);
            try {
                JobSpecification jobSpec = DatasetOperations.createDropDatasetJobSpec(cds, metadataProvider);
                MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
                bActiveTxn = false;
                JobUtils.runJob(hcc, jobSpec, true);
            } catch (Exception e2) {
                e.addSuppressed(e2);
                if (bActiveTxn) {
                    abort(e, e2, mdTxnCtx);
                }
            }

            // remove the record from the metadata.
            mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
            metadataProvider.setMetadataTxnContext(mdTxnCtx);
            try {
                MetadataManager.INSTANCE.dropDataset(metadataProvider.getMetadataTxnContext(), dataverseName,
                        datasetName);
                MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
            } catch (Exception e2) {
                e.addSuppressed(e2);
                abort(e, e2, mdTxnCtx);
                throw new IllegalStateException("System is inconsistent state: pending dataset(" + dataverseName
                        + "." + datasetName + ") couldn't be removed from the metadata", e);
            }
        }

        throw e;
    } finally {
        MetadataLockManager.INSTANCE.createDatasetEnd(dataverseName, itemTypeDataverseName,
                itemTypeDataverseName + "." + itemTypeName, metaItemTypeDataverseName,
                metaItemTypeDataverseName + "." + metaItemTypeName, nodegroupName, compactionPolicy,
                dataverseName + "." + datasetName, defaultCompactionPolicy);
    }
}

From source file:org.apache.asterix.app.translator.QueryTranslator.java

public void handleDatasetDropStatement(AqlMetadataProvider metadataProvider, Statement stmt,
        IHyracksClientConnection hcc) throws Exception {
    DropDatasetStatement stmtDelete = (DropDatasetStatement) stmt;
    String dataverseName = getActiveDataverse(stmtDelete.getDataverseName());
    String datasetName = stmtDelete.getDatasetName().getValue();
    MutableObject<ProgressState> progress = new MutableObject<>(ProgressState.NO_PROGRESS);
    MutableObject<MetadataTransactionContext> mdTxnCtx = new MutableObject<>(
            MetadataManager.INSTANCE.beginTransaction());
    MutableBoolean bActiveTxn = new MutableBoolean(true);
    metadataProvider.setMetadataTxnContext(mdTxnCtx.getValue());
    MetadataLockManager.INSTANCE.dropDatasetBegin(dataverseName, dataverseName + "." + datasetName);
    List<JobSpecification> jobsToExecute = new ArrayList<>();
    try {//from ww  w .  j a v a 2 s  .  c  o m
        Dataset ds = MetadataManager.INSTANCE.getDataset(mdTxnCtx.getValue(), dataverseName, datasetName);
        if (ds == null) {
            if (stmtDelete.getIfExists()) {
                MetadataManager.INSTANCE.commitTransaction(mdTxnCtx.getValue());
                return;
            } else {
                throw new AlgebricksException("There is no dataset with this name " + datasetName
                        + " in dataverse " + dataverseName + ".");
            }
        }

        doDropDataset(ds, datasetName, metadataProvider, mdTxnCtx, jobsToExecute, dataverseName, bActiveTxn,
                progress, hcc);
        MetadataManager.INSTANCE.commitTransaction(mdTxnCtx.getValue());
    } catch (Exception e) {
        if (bActiveTxn.booleanValue()) {
            abort(e, e, mdTxnCtx.getValue());
        }

        if (progress.getValue() == ProgressState.ADDED_PENDINGOP_RECORD_TO_METADATA) {
            // #. execute compensation operations
            // remove the all indexes in NC
            try {
                for (JobSpecification jobSpec : jobsToExecute) {
                    JobUtils.runJob(hcc, jobSpec, true);
                }
            } catch (Exception e2) {
                // do no throw exception since still the metadata needs to be compensated.
                e.addSuppressed(e2);
            }

            // remove the record from the metadata.
            mdTxnCtx.setValue(MetadataManager.INSTANCE.beginTransaction());
            metadataProvider.setMetadataTxnContext(mdTxnCtx.getValue());
            try {
                MetadataManager.INSTANCE.dropDataset(metadataProvider.getMetadataTxnContext(), dataverseName,
                        datasetName);
                MetadataManager.INSTANCE.commitTransaction(mdTxnCtx.getValue());
            } catch (Exception e2) {
                e.addSuppressed(e2);
                abort(e, e2, mdTxnCtx.getValue());
                throw new IllegalStateException("System is inconsistent state: pending dataset(" + dataverseName
                        + "." + datasetName + ") couldn't be removed from the metadata", e);
            }
        }

        throw e;
    } finally {
        MetadataLockManager.INSTANCE.dropDatasetEnd(dataverseName, dataverseName + "." + datasetName);
    }
}

From source file:org.apache.asterix.app.translator.QueryTranslator.java

protected void doDropDataset(Dataset ds, String datasetName, AqlMetadataProvider metadataProvider,
        MutableObject<MetadataTransactionContext> mdTxnCtx, List<JobSpecification> jobsToExecute,
        String dataverseName, MutableBoolean bActiveTxn, MutableObject<ProgressState> progress,
        IHyracksClientConnection hcc) throws Exception {
    Map<FeedConnectionId, Pair<JobSpecification, Boolean>> disconnectJobList = new HashMap<>();
    if (ds.getDatasetType() == DatasetType.INTERNAL) {
        // prepare job spec(s) that would disconnect any active feeds involving the dataset.
        IActiveEntityEventsListener[] activeListeners = ActiveJobNotificationHandler.INSTANCE
                .getEventListeners();// w  w  w  .j  av a2  s .c o  m
        for (IActiveEntityEventsListener listener : activeListeners) {
            if (listener.isEntityUsingDataset(dataverseName, datasetName)) {
                throw new AsterixException(
                        "Can't drop dataset since it is connected to active entity: " + listener.getEntityId());
            }
        }

        // #. prepare jobs to drop the datatset and the indexes in NC
        List<Index> indexes = MetadataManager.INSTANCE.getDatasetIndexes(mdTxnCtx.getValue(), dataverseName,
                datasetName);
        for (int j = 0; j < indexes.size(); j++) {
            if (indexes.get(j).isSecondaryIndex()) {
                CompiledIndexDropStatement cds = new CompiledIndexDropStatement(dataverseName, datasetName,
                        indexes.get(j).getIndexName());
                jobsToExecute.add(IndexOperations.buildDropSecondaryIndexJobSpec(cds, metadataProvider, ds));
            }
        }
        CompiledDatasetDropStatement cds = new CompiledDatasetDropStatement(dataverseName, datasetName);
        jobsToExecute.add(DatasetOperations.createDropDatasetJobSpec(cds, metadataProvider));

        // #. mark the existing dataset as PendingDropOp
        MetadataManager.INSTANCE.dropDataset(mdTxnCtx.getValue(), dataverseName, datasetName);
        MetadataManager.INSTANCE.addDataset(mdTxnCtx.getValue(),
                new Dataset(dataverseName, datasetName, ds.getItemTypeDataverseName(), ds.getItemTypeName(),
                        ds.getMetaItemTypeDataverseName(), ds.getMetaItemTypeName(), ds.getNodeGroupName(),
                        ds.getCompactionPolicy(), ds.getCompactionPolicyProperties(), ds.getDatasetDetails(),
                        ds.getHints(), ds.getDatasetType(), ds.getDatasetId(),
                        IMetadataEntity.PENDING_DROP_OP));

        MetadataManager.INSTANCE.commitTransaction(mdTxnCtx.getValue());
        bActiveTxn.setValue(false);
        progress.setValue(ProgressState.ADDED_PENDINGOP_RECORD_TO_METADATA);

        // # disconnect the feeds
        for (Pair<JobSpecification, Boolean> p : disconnectJobList.values()) {
            JobUtils.runJob(hcc, p.first, true);
        }

        // #. run the jobs
        for (JobSpecification jobSpec : jobsToExecute) {
            JobUtils.runJob(hcc, jobSpec, true);
        }

        mdTxnCtx.setValue(MetadataManager.INSTANCE.beginTransaction());
        bActiveTxn.setValue(true);
        metadataProvider.setMetadataTxnContext(mdTxnCtx.getValue());
    } else {
        // External dataset
        ExternalDatasetsRegistry.INSTANCE.removeDatasetInfo(ds);
        // #. prepare jobs to drop the datatset and the indexes in NC
        List<Index> indexes = MetadataManager.INSTANCE.getDatasetIndexes(mdTxnCtx.getValue(), dataverseName,
                datasetName);
        for (int j = 0; j < indexes.size(); j++) {
            if (ExternalIndexingOperations.isFileIndex(indexes.get(j))) {
                CompiledIndexDropStatement cds = new CompiledIndexDropStatement(dataverseName, datasetName,
                        indexes.get(j).getIndexName());
                jobsToExecute.add(IndexOperations.buildDropSecondaryIndexJobSpec(cds, metadataProvider, ds));
            } else {
                CompiledIndexDropStatement cds = new CompiledIndexDropStatement(dataverseName, datasetName,
                        indexes.get(j).getIndexName());
                jobsToExecute
                        .add(ExternalIndexingOperations.buildDropFilesIndexJobSpec(cds, metadataProvider, ds));
            }
        }

        // #. mark the existing dataset as PendingDropOp
        MetadataManager.INSTANCE.dropDataset(mdTxnCtx.getValue(), dataverseName, datasetName);
        MetadataManager.INSTANCE.addDataset(mdTxnCtx.getValue(),
                new Dataset(dataverseName, datasetName, ds.getItemTypeDataverseName(), ds.getItemTypeName(),
                        ds.getNodeGroupName(), ds.getCompactionPolicy(), ds.getCompactionPolicyProperties(),
                        ds.getDatasetDetails(), ds.getHints(), ds.getDatasetType(), ds.getDatasetId(),
                        IMetadataEntity.PENDING_DROP_OP));

        MetadataManager.INSTANCE.commitTransaction(mdTxnCtx.getValue());
        bActiveTxn.setValue(false);
        progress.setValue(ProgressState.ADDED_PENDINGOP_RECORD_TO_METADATA);

        // #. run the jobs
        for (JobSpecification jobSpec : jobsToExecute) {
            JobUtils.runJob(hcc, jobSpec, true);
        }
        if (!indexes.isEmpty()) {
            ExternalDatasetsRegistry.INSTANCE.removeDatasetInfo(ds);
        }
        mdTxnCtx.setValue(MetadataManager.INSTANCE.beginTransaction());
        bActiveTxn.setValue(true);
        metadataProvider.setMetadataTxnContext(mdTxnCtx.getValue());
    }

    // #. finally, delete the dataset.
    MetadataManager.INSTANCE.dropDataset(mdTxnCtx.getValue(), dataverseName, datasetName);
    // Drop the associated nodegroup
    String nodegroup = ds.getNodeGroupName();
    if (!nodegroup.equalsIgnoreCase(MetadataConstants.METADATA_DEFAULT_NODEGROUP_NAME)) {
        MetadataManager.INSTANCE.dropNodegroup(mdTxnCtx.getValue(), dataverseName + ":" + datasetName);
    }
}

From source file:org.apache.asterix.metadata.entities.Dataset.java

/**
 * Drop this dataset/*w w  w  .  ja va2  s  .  c  o m*/
 *
 * @param metadataProvider
 *            metadata provider that can be used to get metadata info and runtimes
 * @param mdTxnCtx
 *            the transaction context
 * @param jobsToExecute
 *            a list of jobs to be executed as part of the drop operation
 * @param bActiveTxn
 *            whether the metadata transaction is ongoing
 * @param progress
 *            a mutable progress state used for error handling during the drop operation
 * @param hcc
 *            a client connection to hyracks master for job execution
 * @throws Exception
 *             if an error occur during the drop process or if the dataset can't be dropped for any reason
 */
public void drop(MetadataProvider metadataProvider, MutableObject<MetadataTransactionContext> mdTxnCtx,
        List<JobSpecification> jobsToExecute, MutableBoolean bActiveTxn, MutableObject<ProgressState> progress,
        IHyracksClientConnection hcc) throws Exception {
    Map<FeedConnectionId, Pair<JobSpecification, Boolean>> disconnectJobList = new HashMap<>();
    if (getDatasetType() == DatasetType.INTERNAL) {
        // prepare job spec(s) that would disconnect any active feeds involving the dataset.
        IActiveEntityEventsListener[] activeListeners = ActiveJobNotificationHandler.INSTANCE
                .getEventListeners();
        for (IActiveEntityEventsListener listener : activeListeners) {
            if (listener.isEntityUsingDataset(this)) {
                throw new CompilationException(ErrorCode.COMPILATION_CANT_DROP_ACTIVE_DATASET,
                        RecordUtil.toFullyQualifiedName(dataverseName, datasetName),
                        listener.getEntityId().toString());
            }
        }
        // #. prepare jobs to drop the datatset and the indexes in NC
        List<Index> indexes = MetadataManager.INSTANCE.getDatasetIndexes(mdTxnCtx.getValue(), dataverseName,
                datasetName);
        for (int j = 0; j < indexes.size(); j++) {
            if (indexes.get(j).isSecondaryIndex()) {
                jobsToExecute.add(IndexUtil.buildDropIndexJobSpec(indexes.get(j), metadataProvider, this));
            }
        }
        Index primaryIndex = MetadataManager.INSTANCE.getIndex(mdTxnCtx.getValue(), dataverseName, datasetName,
                datasetName);
        jobsToExecute.add(DatasetUtil.createDropDatasetJobSpec(this, primaryIndex, metadataProvider));
        // #. mark the existing dataset as PendingDropOp
        MetadataManager.INSTANCE.dropDataset(mdTxnCtx.getValue(), dataverseName, datasetName);
        MetadataManager.INSTANCE.addDataset(mdTxnCtx.getValue(),
                new Dataset(dataverseName, datasetName, getItemTypeDataverseName(), getItemTypeName(),
                        getMetaItemTypeDataverseName(), getMetaItemTypeName(), getNodeGroupName(),
                        getCompactionPolicy(), getCompactionPolicyProperties(), getDatasetDetails(), getHints(),
                        getDatasetType(), getDatasetId(), MetadataUtil.PENDING_DROP_OP));

        MetadataManager.INSTANCE.commitTransaction(mdTxnCtx.getValue());
        bActiveTxn.setValue(false);
        progress.setValue(ProgressState.ADDED_PENDINGOP_RECORD_TO_METADATA);

        // # disconnect the feeds
        for (Pair<JobSpecification, Boolean> p : disconnectJobList.values()) {
            JobUtils.runJob(hcc, p.first, true);
        }

        // #. run the jobs
        for (JobSpecification jobSpec : jobsToExecute) {
            JobUtils.runJob(hcc, jobSpec, true);
        }

        mdTxnCtx.setValue(MetadataManager.INSTANCE.beginTransaction());
        bActiveTxn.setValue(true);
        metadataProvider.setMetadataTxnContext(mdTxnCtx.getValue());
    } else {
        // External dataset
        ExternalDatasetsRegistry.INSTANCE.removeDatasetInfo(this);
        // #. prepare jobs to drop the datatset and the indexes in NC
        List<Index> indexes = MetadataManager.INSTANCE.getDatasetIndexes(mdTxnCtx.getValue(), dataverseName,
                datasetName);
        for (int j = 0; j < indexes.size(); j++) {
            if (ExternalIndexingOperations.isFileIndex(indexes.get(j))) {
                jobsToExecute.add(IndexUtil.buildDropIndexJobSpec(indexes.get(j), metadataProvider, this));
            } else {
                jobsToExecute.add(DatasetUtil.buildDropFilesIndexJobSpec(metadataProvider, this));
            }
        }

        // #. mark the existing dataset as PendingDropOp
        MetadataManager.INSTANCE.dropDataset(mdTxnCtx.getValue(), dataverseName, datasetName);
        MetadataManager.INSTANCE.addDataset(mdTxnCtx.getValue(),
                new Dataset(dataverseName, datasetName, getItemTypeDataverseName(), getItemTypeName(),
                        getNodeGroupName(), getCompactionPolicy(), getCompactionPolicyProperties(),
                        getDatasetDetails(), getHints(), getDatasetType(), getDatasetId(),
                        MetadataUtil.PENDING_DROP_OP));

        MetadataManager.INSTANCE.commitTransaction(mdTxnCtx.getValue());
        bActiveTxn.setValue(false);
        progress.setValue(ProgressState.ADDED_PENDINGOP_RECORD_TO_METADATA);

        // #. run the jobs
        for (JobSpecification jobSpec : jobsToExecute) {
            JobUtils.runJob(hcc, jobSpec, true);
        }
        if (!indexes.isEmpty()) {
            ExternalDatasetsRegistry.INSTANCE.removeDatasetInfo(this);
        }
        mdTxnCtx.setValue(MetadataManager.INSTANCE.beginTransaction());
        bActiveTxn.setValue(true);
        metadataProvider.setMetadataTxnContext(mdTxnCtx.getValue());
    }

    // #. finally, delete the dataset.
    MetadataManager.INSTANCE.dropDataset(mdTxnCtx.getValue(), dataverseName, datasetName);
    // Drop the associated nodegroup
    String nodegroup = getNodeGroupName();
    if (!nodegroup.equalsIgnoreCase(MetadataConstants.METADATA_DEFAULT_NODEGROUP_NAME)) {
        MetadataManager.INSTANCE.dropNodegroup(mdTxnCtx.getValue(), dataverseName + ":" + datasetName);
    }
}

From source file:org.apache.bookkeeper.replication.AuditorPlacementPolicyCheckTest.java

@Test
public void testPlacementPolicyCheckWithBookiesFromDifferentRacks() throws Exception {
    int numOfBookies = 5;
    List<BookieSocketAddress> bookieAddresses = new ArrayList<BookieSocketAddress>();
    BookieSocketAddress bookieAddress;/*from w ww. j a va  2  s.  c  om*/
    RegistrationManager regManager = driver.getRegistrationManager();

    // all the numOfBookies (5) are going to be in different racks
    for (int i = 0; i < numOfBookies; i++) {
        bookieAddress = new BookieSocketAddress("98.98.98." + i, 2181);
        StaticDNSResolver.addNodeToRack(bookieAddress.getHostName(), "/rack" + (i));
        bookieAddresses.add(bookieAddress);
        regManager.registerBookie(bookieAddress.toString(), false);
    }

    LedgerManagerFactory mFactory = driver.getLedgerManagerFactory();
    LedgerManager lm = mFactory.newLedgerManager();
    int ensembleSize = 5;
    int writeQuorumSize = 4;
    int ackQuorumSize = 2;
    int minNumRacksPerWriteQuorumConfValue = 4;
    Collections.shuffle(bookieAddresses);

    // closed ledger
    LedgerMetadata initMeta = LedgerMetadataBuilder.create().withEnsembleSize(ensembleSize)
            .withWriteQuorumSize(writeQuorumSize).withAckQuorumSize(ackQuorumSize)
            .newEnsembleEntry(0L, bookieAddresses).withClosedState().withLastEntryId(100).withLength(10000)
            .withDigestType(DigestType.DUMMY).withPassword(new byte[0]).build();
    lm.createLedgerMetadata(1L, initMeta).get();

    Collections.shuffle(bookieAddresses);
    ensembleSize = 4;
    // closed ledger with multiple segments
    initMeta = LedgerMetadataBuilder.create().withEnsembleSize(ensembleSize)
            .withWriteQuorumSize(writeQuorumSize).withAckQuorumSize(ackQuorumSize)
            .newEnsembleEntry(0L, bookieAddresses.subList(0, 4))
            .newEnsembleEntry(20L, bookieAddresses.subList(1, 5))
            .newEnsembleEntry(60L, bookieAddresses.subList(0, 4)).withClosedState().withLastEntryId(100)
            .withLength(10000).withDigestType(DigestType.DUMMY).withPassword(new byte[0]).build();
    lm.createLedgerMetadata(2L, initMeta).get();

    Collections.shuffle(bookieAddresses);
    // non-closed ledger
    initMeta = LedgerMetadataBuilder.create().withEnsembleSize(ensembleSize)
            .withWriteQuorumSize(writeQuorumSize).withAckQuorumSize(ackQuorumSize)
            .newEnsembleEntry(0L, bookieAddresses.subList(0, 4)).withDigestType(DigestType.DUMMY)
            .withPassword(new byte[0]).build();
    lm.createLedgerMetadata(3L, initMeta).get();

    Collections.shuffle(bookieAddresses);
    // non-closed ledger with multiple segments
    initMeta = LedgerMetadataBuilder.create().withEnsembleSize(ensembleSize)
            .withWriteQuorumSize(writeQuorumSize).withAckQuorumSize(ackQuorumSize)
            .newEnsembleEntry(0L, bookieAddresses.subList(0, 4))
            .newEnsembleEntry(20L, bookieAddresses.subList(1, 5))
            .newEnsembleEntry(60L, bookieAddresses.subList(0, 4)).withDigestType(DigestType.DUMMY)
            .withPassword(new byte[0]).build();
    lm.createLedgerMetadata(4L, initMeta).get();

    ServerConfiguration servConf = new ServerConfiguration(bsConfs.get(0));
    servConf.setMinNumRacksPerWriteQuorum(minNumRacksPerWriteQuorumConfValue);
    setServerConfigProperties(servConf);
    MutableObject<Auditor> auditorRef = new MutableObject<Auditor>();
    try {
        TestStatsLogger statsLogger = startAuditorAndWaitForPlacementPolicyCheck(servConf, auditorRef);
        Gauge<? extends Number> ledgersNotAdheringToPlacementPolicyGuage = statsLogger
                .getGauge(ReplicationStats.NUM_LEDGERS_NOT_ADHERING_TO_PLACEMENT_POLICY);
        /*
         * since all of the bookies are in different racks, there shouldn't be any ledger not adhering
         * to placement policy.
         */
        assertEquals("NUM_LEDGERS_NOT_ADHERING_TO_PLACEMENT_POLICY guage value", 0,
                ledgersNotAdheringToPlacementPolicyGuage.getSample());
    } finally {
        Auditor auditor = auditorRef.getValue();
        if (auditor != null) {
            auditor.close();
        }
    }
}

From source file:org.apache.bookkeeper.replication.AuditorPlacementPolicyCheckTest.java

@Test
public void testPlacementPolicyCheckWithLedgersNotAdheringToPlacementPolicy() throws Exception {
    int numOfBookies = 5;
    int numOfLedgersNotAdheringToPlacementPolicy = 0;
    List<BookieSocketAddress> bookieAddresses = new ArrayList<BookieSocketAddress>();
    RegistrationManager regManager = driver.getRegistrationManager();

    for (int i = 0; i < numOfBookies; i++) {
        BookieSocketAddress bookieAddress = new BookieSocketAddress("98.98.98." + i, 2181);
        bookieAddresses.add(bookieAddress);
        regManager.registerBookie(bookieAddress.toString(), false);
    }/*from  w ww. j a v  a2 s.  c  o m*/

    // only three racks
    StaticDNSResolver.addNodeToRack(bookieAddresses.get(0).getHostName(), "/rack1");
    StaticDNSResolver.addNodeToRack(bookieAddresses.get(1).getHostName(), "/rack2");
    StaticDNSResolver.addNodeToRack(bookieAddresses.get(2).getHostName(), "/rack3");
    StaticDNSResolver.addNodeToRack(bookieAddresses.get(3).getHostName(), "/rack1");
    StaticDNSResolver.addNodeToRack(bookieAddresses.get(4).getHostName(), "/rack2");

    LedgerManagerFactory mFactory = driver.getLedgerManagerFactory();
    LedgerManager lm = mFactory.newLedgerManager();
    int ensembleSize = 5;
    int writeQuorumSize = 3;
    int ackQuorumSize = 2;
    int minNumRacksPerWriteQuorumConfValue = 3;

    /*
     * this closed ledger doesn't adhere to placement policy because there are only
     * 3 racks, and the ensembleSize is 5.
     */
    LedgerMetadata initMeta = LedgerMetadataBuilder.create().withEnsembleSize(ensembleSize)
            .withWriteQuorumSize(writeQuorumSize).withAckQuorumSize(ackQuorumSize)
            .newEnsembleEntry(0L, bookieAddresses).withClosedState().withLastEntryId(100).withLength(10000)
            .withDigestType(DigestType.DUMMY).withPassword(new byte[0]).build();
    lm.createLedgerMetadata(1L, initMeta).get();
    numOfLedgersNotAdheringToPlacementPolicy++;

    /*
     * this is non-closed ledger, so it shouldn't count as ledger not
     * adhering to placement policy
     */
    initMeta = LedgerMetadataBuilder.create().withEnsembleSize(ensembleSize)
            .withWriteQuorumSize(writeQuorumSize).withAckQuorumSize(ackQuorumSize)
            .newEnsembleEntry(0L, bookieAddresses).withDigestType(DigestType.DUMMY).withPassword(new byte[0])
            .build();
    lm.createLedgerMetadata(2L, initMeta).get();

    ServerConfiguration servConf = new ServerConfiguration(bsConfs.get(0));
    servConf.setMinNumRacksPerWriteQuorum(minNumRacksPerWriteQuorumConfValue);
    setServerConfigProperties(servConf);
    MutableObject<Auditor> auditorRef = new MutableObject<Auditor>();
    try {
        TestStatsLogger statsLogger = startAuditorAndWaitForPlacementPolicyCheck(servConf, auditorRef);
        Gauge<? extends Number> ledgersNotAdheringToPlacementPolicyGuage = statsLogger
                .getGauge(ReplicationStats.NUM_LEDGERS_NOT_ADHERING_TO_PLACEMENT_POLICY);
        assertEquals("NUM_LEDGERS_NOT_ADHERING_TO_PLACEMENT_POLICY guage value",
                numOfLedgersNotAdheringToPlacementPolicy, ledgersNotAdheringToPlacementPolicyGuage.getSample());
    } finally {
        Auditor auditor = auditorRef.getValue();
        if (auditor != null) {
            auditor.close();
        }
    }
}

From source file:org.apache.bookkeeper.replication.AuditorPlacementPolicyCheckTest.java

@Test
public void testPlacementPolicyCheckWithLedgersNotAdheringToPolicyWithMultipleSegments() throws Exception {
    int numOfBookies = 7;
    int numOfLedgersNotAdheringToPlacementPolicy = 0;
    List<BookieSocketAddress> bookieAddresses = new ArrayList<BookieSocketAddress>();
    RegistrationManager regManager = driver.getRegistrationManager();

    for (int i = 0; i < numOfBookies; i++) {
        BookieSocketAddress bookieAddress = new BookieSocketAddress("98.98.98." + i, 2181);
        bookieAddresses.add(bookieAddress);
        regManager.registerBookie(bookieAddress.toString(), false);
    }//from ww  w.j  a v a 2s.  c o  m

    // only three racks
    StaticDNSResolver.addNodeToRack(bookieAddresses.get(0).getHostName(), "/rack1");
    StaticDNSResolver.addNodeToRack(bookieAddresses.get(1).getHostName(), "/rack2");
    StaticDNSResolver.addNodeToRack(bookieAddresses.get(2).getHostName(), "/rack3");
    StaticDNSResolver.addNodeToRack(bookieAddresses.get(3).getHostName(), "/rack4");
    StaticDNSResolver.addNodeToRack(bookieAddresses.get(4).getHostName(), "/rack1");
    StaticDNSResolver.addNodeToRack(bookieAddresses.get(5).getHostName(), "/rack2");
    StaticDNSResolver.addNodeToRack(bookieAddresses.get(6).getHostName(), "/rack3");

    LedgerManagerFactory mFactory = driver.getLedgerManagerFactory();
    LedgerManager lm = mFactory.newLedgerManager();
    int ensembleSize = 5;
    int writeQuorumSize = 5;
    int ackQuorumSize = 2;
    int minNumRacksPerWriteQuorumConfValue = 4;

    /*
     * this closed ledger in each writeQuorumSize (5), there would be
     * atleast minNumRacksPerWriteQuorumConfValue (4) racks. So it wont be
     * counted as ledgers not adhering to placement policy.
     */
    LedgerMetadata initMeta = LedgerMetadataBuilder.create().withEnsembleSize(ensembleSize)
            .withWriteQuorumSize(writeQuorumSize).withAckQuorumSize(ackQuorumSize)
            .newEnsembleEntry(0L, bookieAddresses.subList(0, 5))
            .newEnsembleEntry(20L, bookieAddresses.subList(1, 6)).withClosedState().withLastEntryId(100)
            .withLength(10000).withDigestType(DigestType.DUMMY).withPassword(new byte[0]).build();
    lm.createLedgerMetadata(1L, initMeta).get();

    /*
     * for the second segment bookies are from /rack1, /rack2 and /rack3,
     * which is < minNumRacksPerWriteQuorumConfValue (4). So it is not
     * adhering to placement policy.
     *
     * also for the third segment are from /rack1, /rack2 and /rack3, which
     * is < minNumRacksPerWriteQuorumConfValue (4). So it is not adhering to
     * placement policy.
     *
     * Though there are multiple segments are not adhering to placement
     * policy, it should be counted as single ledger.
     */
    initMeta = LedgerMetadataBuilder.create().withEnsembleSize(ensembleSize)
            .withWriteQuorumSize(writeQuorumSize).withAckQuorumSize(ackQuorumSize)
            .newEnsembleEntry(0L, bookieAddresses.subList(0, 5))
            .newEnsembleEntry(20L,
                    Arrays.asList(bookieAddresses.get(0), bookieAddresses.get(1), bookieAddresses.get(2),
                            bookieAddresses.get(4), bookieAddresses.get(5)))
            .newEnsembleEntry(40L,
                    Arrays.asList(bookieAddresses.get(0), bookieAddresses.get(1), bookieAddresses.get(2),
                            bookieAddresses.get(4), bookieAddresses.get(6)))
            .withClosedState().withLastEntryId(100).withLength(10000).withDigestType(DigestType.DUMMY)
            .withPassword(new byte[0]).build();
    lm.createLedgerMetadata(2L, initMeta).get();
    numOfLedgersNotAdheringToPlacementPolicy++;

    ServerConfiguration servConf = new ServerConfiguration(bsConfs.get(0));
    servConf.setMinNumRacksPerWriteQuorum(minNumRacksPerWriteQuorumConfValue);
    setServerConfigProperties(servConf);
    MutableObject<Auditor> auditorRef = new MutableObject<Auditor>();
    try {
        TestStatsLogger statsLogger = startAuditorAndWaitForPlacementPolicyCheck(servConf, auditorRef);
        Gauge<? extends Number> ledgersNotAdheringToPlacementPolicyGuage = statsLogger
                .getGauge(ReplicationStats.NUM_LEDGERS_NOT_ADHERING_TO_PLACEMENT_POLICY);
        assertEquals("NUM_LEDGERS_NOT_ADHERING_TO_PLACEMENT_POLICY gauge value",
                numOfLedgersNotAdheringToPlacementPolicy, ledgersNotAdheringToPlacementPolicyGuage.getSample());
    } finally {
        Auditor auditor = auditorRef.getValue();
        if (auditor != null) {
            auditor.close();
        }
    }
}

From source file:org.apache.distributedlog.BKLogSegmentWriter.java

private void closeLedgerOnClose(final boolean abort, final MutableObject<Throwable> throwExc,
        final CompletableFuture<Void> closePromise) {
    // close the log segment if it isn't in error state, so all the outstanding addEntry(s) will callback.
    if (null == throwExc.getValue() && !isLogSegmentInError()) {
        // Synchronous closing the ledger handle, if we couldn't close a ledger handle successfully.
        // we should throw the exception to #closeToFinalize, so it would fail completing a log segment.
        entryWriter.asyncClose(new CloseCallback() {
            @Override/*from  w  w w . jav a2  s.c  o  m*/
            public void closeComplete(int rc, LedgerHandle lh, Object ctx) {
                if (BKException.Code.OK != rc && BKException.Code.LedgerClosedException != rc) {
                    if (!abort) {
                        throwExc.setValue(new IOException("Failed to close ledger for "
                                + fullyQualifiedLogSegment + " : " + BKException.getMessage(rc)));
                    }
                }
                completeClosePromise(abort, throwExc, closePromise);
            }
        }, null);
    } else {
        completeClosePromise(abort, throwExc, closePromise);
    }
}

From source file:org.apache.distributedlog.BKLogSegmentWriter.java

private void completeClosePromise(final boolean abort, final MutableObject<Throwable> throwExc,
        final CompletableFuture<Void> closePromise) {
    // If add entry failed because of closing ledger above, we don't need to fail the close operation
    if (!abort && null == throwExc.getValue() && shouldFailCompleteLogSegment()) {
        throwExc.setValue(//w w w  .j a  v  a  2s  .  c  o  m
                new BKTransmitException("Closing an errored stream : ", transmitResultUpdater.get(this)));
    }

    if (null == throwExc.getValue()) {
        FutureUtils.complete(closePromise, null);
    } else {
        FutureUtils.completeExceptionally(closePromise, throwExc.getValue());
    }
}

From source file:org.apache.rya.rdftriplestore.inference.InferenceEngine.java

/**
 * Queries for all items that are in a list of the form:
 * <pre>// w w w .j a v  a 2  s .c om
 *     <:A> ?x _:bnode1 .
 *     _:bnode1 rdf:first <:B> .
 *     _:bnode1 rdf:rest _:bnode2 .
 *     _:bnode2 rdf:first <:C> .
 *     _:bnode2 rdf:rest rdf:nil .
 * </pre>
 * Where {@code :_bnode1} represents the first item in the list and
 * {@code ?x} is some restriction on {@code <:A>}. This will return the
 * list of resources, {@code [<:B>, <:C>]}.
 * @param firstItem the first item in the list.
 * @return the {@link List} of {@link Resource}s.
 * @throws QueryEvaluationException
 */
private List<Resource> getList(final URI firstItem) throws QueryEvaluationException {
    URI head = firstItem;
    final List<Resource> list = new ArrayList<>();
    // Go through and find all bnodes that are part of the defined list.
    while (!RDF.NIL.equals(head)) {
        // rdf.first will point to a type item that is in the list.
        ryaDaoQueryWrapper.queryFirst(head, RDF.FIRST, null, new RDFHandlerBase() {
            @Override
            public void handleStatement(final Statement statement) throws RDFHandlerException {
                // The object found in the query represents a type
                // that should be included in the list.
                final URI object = (URI) statement.getObject();
                list.add(object);
            }
        });
        final MutableObject<URI> headHolder = new MutableObject<>();
        // rdf.rest will point to the next bnode that's part of the list.
        ryaDaoQueryWrapper.queryFirst(head, RDF.REST, null, new RDFHandlerBase() {
            @Override
            public void handleStatement(final Statement statement) throws RDFHandlerException {
                // This object is the next bnode head to look for.
                final URI object = (URI) statement.getObject();
                headHolder.setValue(object);
            }
        });
        // As long as we get a new head there are more bnodes that are part
        // of the list. Keep going until we reach rdf.nil.
        if (headHolder.getValue() != null) {
            head = headHolder.getValue();
        } else {
            head = RDF.NIL;
        }
    }
    return list;
}