Example usage for org.apache.commons.digester3 Digester setNamespaceAware

List of usage examples for org.apache.commons.digester3 Digester setNamespaceAware

Introduction

In this page you can find the example usage for org.apache.commons.digester3 Digester setNamespaceAware.

Prototype

public void setNamespaceAware(boolean namespaceAware) 

Source Link

Document

Set the "namespace aware" flag for parsers we create.

Usage

From source file:com.dotosoft.dot4command.config.xml.XmlConfigParser.java

/**
 * <p>Return the <code>Digester</code> instance to be used for
 * parsing, creating one if necessary.</p>
 * @return A Digester instance.//from  ww w .  j a v a 2  s .  c om
 */
public Digester getDigester() {
    Digester digester = new Digester();
    RuleSet ruleSet = getRuleSet();
    digester.setNamespaceAware(ruleSet.getNamespaceURI() != null);
    digester.setUseContextClassLoader(getUseContextClassLoader());
    digester.setValidating(false);
    digester.addRuleSet(ruleSet);
    return digester;
}

From source file:br.univali.celine.scorm.model.cam.ContentPackageReader20043rd.java

public ContentPackage read(java.io.InputStream stream) throws Exception {
    // Create a Digester instance
    Digester d = new Digester();
    d.setNamespaceAware(true); // desconsidera todos os namespaces !!!

    // Prime the digester stack with an object for rules to
    // operate on. Note that it is quite common for "this"
    // to be the object pushed.
    ContentPackage manifest = new ContentPackage(this);
    d.push(manifest);/* www .j  a  va 2 s .com*/
    // Add rules to the digester that will be triggered while
    // parsing occurs.
    addRules(d);

    // Process the input file.
    d.parse(stream);

    manifest.finalization();

    return manifest;
}

From source file:eu.scape_project.planning.xml.PlanParser.java

/**
 * Imports the XML representation of plans from the given input stream.
 * // ww  w . j  a v  a2s.  c o  m
 * @param in
 *            the input stream to read from
 * @return list of read plans
 * @throws PlatoException
 *             if the plan cannot be parsed
 */
public List<Plan> importProjects(final InputStream in) throws PlatoException {
    try {

        SAXParser parser = validatingParserFactory.getValidatingParser();
        parser.setProperty(ValidatingParserFactory.JAXP_SCHEMA_SOURCE, PlanXMLConstants.PLAN_SCHEMAS);

        Digester digester = new Digester(parser);

        SchemaResolver schemaResolver = new SchemaResolver();

        schemaResolver
                .addSchemaLocation(PlanXMLConstants.PLATO_SCHEMA_URI, PlanXMLConstants.PLATO_SCHEMA_LOCATION)
                .addSchemaLocation(PlanXMLConstants.PAP_SCHEMA_URI, PlanXMLConstants.PAP_SCHEMA_LOCATION)
                .addSchemaLocation(PlanXMLConstants.TAVERNA_SCHEMA_URI,
                        PlanXMLConstants.TAVERNA_SCHEMA_LOCATION);

        digester.setEntityResolver(schemaResolver);
        digester.setErrorHandler(new StrictErrorHandler());
        digester.setNamespaceAware(true);
        digester.push(this);

        PlanParser.addRules(digester);

        digester.setUseContextClassLoader(true);
        plans = new ArrayList<Plan>();

        // finally parse the XML representation with all created rules
        digester.parse(in);

        for (Plan plan : plans) {
            String projectName = plan.getPlanProperties().getName();
            if ((projectName != null) && (!"".equals(projectName))) {
                /*
                 * establish links from values to scales. For all(!)
                 * alternatives: An alternative could have be discarded
                 * after some measurements have already been added.
                 */
                plan.getTree().initValues(plan.getAlternativesDefinition().getAlternatives(),
                        plan.getSampleRecordsDefinition().getRecords().size(), true);
                /*
                 * establish references of Experiment.uploads
                 */
                HashMap<String, SampleObject> records = new HashMap<String, SampleObject>();
                for (SampleObject record : plan.getSampleRecordsDefinition().getRecords()) {
                    records.put(record.getShortName(), record);
                }
                for (Alternative alt : plan.getAlternativesDefinition().getAlternatives()) {
                    if ((alt.getExperiment() != null) && (alt.getExperiment() instanceof ExperimentWrapper)) {
                        alt.setExperiment(((ExperimentWrapper) alt.getExperiment()).getExperiment(records));
                    }
                }

                // CHECK NUMERIC TRANSFORMER THRESHOLDS
                for (Leaf l : plan.getTree().getRoot().getAllLeaves()) {
                    eu.scape_project.planning.model.transform.Transformer t = l.getTransformer();
                    if (t != null && t instanceof NumericTransformer) {
                        NumericTransformer nt = (NumericTransformer) t;
                        if (!nt.checkOrder()) {
                            StringBuffer sb = new StringBuffer("NUMERICTRANSFORMER THRESHOLD ERROR ");
                            sb.append(l.getName()).append("::NUMERICTRANSFORMER:: ");
                            sb.append(nt.getThreshold1()).append(" ").append(nt.getThreshold2()).append(" ")
                                    .append(nt.getThreshold3()).append(" ").append(nt.getThreshold4())
                                    .append(" ").append(nt.getThreshold5());
                            log.error(sb.toString());
                        }
                    }
                }

                /*
                 * establish references to selected alternative
                 */
                HashMap<String, Alternative> alternatives = new HashMap<String, Alternative>();
                for (Alternative alt : plan.getAlternativesDefinition().getAlternatives()) {
                    alternatives.put(alt.getName(), alt);
                }
                if ((plan.getRecommendation() != null)
                        && (plan.getRecommendation() instanceof RecommendationWrapper)) {
                    plan.setRecommendation(
                            ((RecommendationWrapper) plan.getRecommendation()).getRecommendation(alternatives));
                }
                if ((plan.getPlanProperties().getState() == PlanState.ANALYSED)
                        && ((plan.getRecommendation() == null)
                                || (plan.getRecommendation().getAlternative() == null))) {
                    /*
                     * This project is NOT completely analysed
                     */
                    plan.getPlanProperties().setState(PlanState.valueOf(PlanState.ANALYSED.getValue() - 1));
                }

            } else {
                throw new PlatoException("Could not find any project data.");
            }
        }
    } catch (Exception e) {
        throw new PlatoException("Failed to import plans.", e);
    }

    return plans;
}

From source file:org.gbif.metadata.eml.EmlFactory.java

/**
 * Uses rule based parsing to read the EML XML and build the EML model.
 * Note the following: - Metadata provider rules are omitted on the assumption that the provider is the same as the
 * creator - Contact rules are omitted on the assumption that contacts are covered by the creator and associated
 * parties - Publisher rules are omitted on the assumption the publisher is covered by the creator and associated
 * parties/* ww  w.  j  a v  a  2s.  co m*/
 *
 * @param xml To read. Note this will be closed before returning
 *
 * @return The EML populated
 *
 * @throws IOException  If the Stream cannot be read from
 * @throws SAXException If the XML is not well formed
 */
public static Eml build(InputStream xml) throws IOException, SAXException, ParserConfigurationException {
    Digester digester = new Digester();
    digester.setNamespaceAware(true);

    // push the EML object onto the stack
    Eml eml = new Eml();
    digester.push(eml);

    // add the rules

    // language as xml:lang attribute
    digester.addCallMethod("eml", "setMetadataLanguage", 1);
    digester.addCallParam("eml", 0, "xml:lang");
    // guid as packageId attribute
    digester.addCallMethod("eml", "setPackageId", 1);
    digester.addCallParam("eml", 0, "packageId");

    // alternative ids
    digester.addCallMethod("eml/dataset/alternateIdentifier", "addAlternateIdentifier", 1);
    digester.addCallParam("eml/dataset/alternateIdentifier", 0);

    // title together with language
    digester.addCallMethod("eml/dataset/title", "setTitle", 2);
    digester.addCallParam("eml/dataset/title", 0);
    digester.addCallParam("eml/dataset/title", 1, "xml:lang");

    digester.addBeanPropertySetter("eml/dataset/language", "language");

    // descriptions, broken into multiple paragraphs
    digester.addCallMethod("eml/dataset/abstract/para", "addDescriptionPara", 1);
    digester.addCallParam("eml/dataset/abstract/para", 0);

    digester.addBeanPropertySetter("eml/dataset/additionalInfo/para", "additionalInfo");
    digester.addRule("eml/dataset/intellectualRights/para", new NodeCreateRule(Node.ELEMENT_NODE));
    digester.addSetNext("eml/dataset/intellectualRights/para", "parseIntellectualRights");
    digester.addCallMethod("eml/dataset/methods/methodStep/description/para", "addMethodStep", 1);
    digester.addCallParam("eml/dataset/methods/methodStep/description/para", 0);
    digester.addBeanPropertySetter("eml/dataset/methods/sampling/studyExtent/description/para", "studyExtent");
    digester.addBeanPropertySetter("eml/dataset/methods/sampling/samplingDescription/para",
            "sampleDescription");
    digester.addBeanPropertySetter("eml/dataset/methods/qualityControl/description/para", "qualityControl");
    digester.addBeanPropertySetter("eml/dataset/distribution/online/url", "distributionUrl");
    digester.addBeanPropertySetter("eml/dataset/purpose/para", "purpose");
    digester.addBeanPropertySetter("eml/dataset/maintenance/description/para", "updateFrequencyDescription");
    digester.addCallMethod("eml/dataset/maintenance/maintenanceUpdateFrequency", "setUpdateFrequency", 1);
    digester.addCallParam("eml/dataset/maintenance/maintenanceUpdateFrequency", 0);
    digester.addCallMethod("eml/additionalMetadata/metadata/gbif/citation", "setCitation", 2);
    digester.addCallParam("eml/additionalMetadata/metadata/gbif/citation", 0);
    digester.addCallParam("eml/additionalMetadata/metadata/gbif/citation", 1, "identifier");
    digester.addCallMethod("eml/additionalMetadata/metadata/gbif/specimenPreservationMethod",
            "addSpecimenPreservationMethod", 1);
    digester.addCallParam("eml/additionalMetadata/metadata/gbif/specimenPreservationMethod", 0);
    digester.addBeanPropertySetter("eml/additionalMetadata/metadata/gbif/resourceLogoUrl", "logoUrl");
    digester.addBeanPropertySetter("eml/additionalMetadata/metadata/gbif/hierarchyLevel", "hierarchyLevel");
    digester.addCallMethod("eml/dataset/pubDate", "setPubDateAsString", 1);
    digester.addCallParam("eml/dataset/pubDate", 0);

    digester.addCallMethod("eml/additionalMetadata/metadata/gbif/dateStamp", "setDateStamp", 1);
    digester.addCallParam("eml/additionalMetadata/metadata/gbif/dateStamp", 0);

    addAgentRules(digester, "eml/dataset/creator", "addCreator");
    addAgentRules(digester, "eml/dataset/metadataProvider", "addMetadataProvider");
    addAgentRules(digester, "eml/dataset/contact", "addContact");
    addAgentRules(digester, "eml/dataset/associatedParty", "addAssociatedParty");
    addKeywordRules(digester);
    addBibliographicCitations(digester);
    addGeographicCoverageRules(digester);
    addTemporalCoverageRules(digester);
    addLivingTimePeriodRules(digester);
    addFormationPeriodRules(digester);
    addTaxonomicCoverageRules(digester);
    addProjectRules(digester);
    addCollectionRules(digester);
    addPhysicalDataRules(digester);
    addJGTICuratorialIUnit(digester);

    // now parse and return the EML
    try {
        digester.parse(xml);
    } finally {
        xml.close();
    }

    return eml;
}