Example usage for java.io StringWriter flush

List of usage examples for java.io StringWriter flush

Introduction

In this page you can find the example usage for java.io StringWriter flush.

Prototype

public void flush() 

Source Link

Document

Flush the stream.

Usage

From source file:de.quist.samy.remocon.RemoteSession.java

private String getRegistrationPayload(String ip) throws IOException {
    StringWriter writer = new StringWriter();
    writer.append((char) 0x64);
    writer.append((char) 0x00);
    writeBase64Text(writer, ip);/*from w  ww . j av  a 2 s .  c o m*/
    writeBase64Text(writer, uniqueId);
    writeBase64Text(writer, applicationName);
    writer.flush();
    return writer.toString();
}

From source file:nl.ordina.bag.etl.xml.XMLMessageBuilder.java

public String handle(JAXBElement<T> e, NamespacePrefixMapper namespacePrefixMapper) throws JAXBException {
    if (e == null)
        return null;
    StringWriter result = new StringWriter();
    Marshaller marshaller = context.createMarshaller();
    marshaller.setProperty(Marshaller.JAXB_FORMATTED_OUTPUT, Boolean.TRUE);
    marshaller.setProperty("com.sun.xml.bind.namespacePrefixMapper", namespacePrefixMapper);
    marshaller.marshal(e, result);//from   w w  w.  j a v a 2  s .  c  o  m
    result.flush();
    return result.toString();
}

From source file:org.ejbca.ui.web.protocol.StoreServletBase.java

private void printInfo(HttpServletRequest req, HttpServletResponse resp) throws IOException {
    final StringWriter sw = new StringWriter();
    final PrintWriter pw = new HtmlPrintWriter(sw);
    printInfo(this.certCache.getRootCertificates(), "", pw, req.getRequestURL().toString());
    pw.flush();/*from   w ww  .j  av a 2 s.  c o m*/
    pw.close();
    sw.flush();
    returnInfoPage(resp, sw.toString());
    sw.close();
}

From source file:nl.ordina.bag.etl.xml.XMLMessageBuilder.java

public String handle(T object, NamespacePrefixMapper namespacePrefixMapper) throws JAXBException {
    if (object == null)
        return null;
    StringWriter result = new StringWriter();
    Marshaller marshaller = context.createMarshaller();
    marshaller.setProperty(Marshaller.JAXB_FORMATTED_OUTPUT, Boolean.TRUE);
    marshaller.setProperty("com.sun.xml.bind.namespacePrefixMapper", namespacePrefixMapper);
    marshaller.marshal(object, result);// w ww.j  ava  2  s  .  c om
    result.flush();
    return result.toString();
}

From source file:org.unitime.timetable.events.EventEmail.java

private String message() throws IOException, TemplateException {
    Configuration cfg = new Configuration(Configuration.VERSION_2_3_0);
    cfg.setClassForTemplateLoading(EventEmail.class, "");
    cfg.setLocale(Localization.getJavaLocale());
    cfg.setOutputEncoding("utf-8");
    Template template = cfg.getTemplate("confirmation.ftl");
    Map<String, Object> input = new HashMap<String, Object>();
    input.put("msg", MESSAGES);
    input.put("const", CONSTANTS);
    input.put("subject", subject());
    input.put("event", event());
    input.put("operation", request().getOperation() == null ? "NONE" : request().getOperation().name());
    if (response().hasCreatedMeetings())
        input.put("created", EventInterface.getMultiMeetings(response().getCreatedMeetings(), true));
    if (response().hasDeletedMeetings())
        input.put("deleted", EventInterface.getMultiMeetings(response().getDeletedMeetings(), true));
    if (response().hasCancelledMeetings())
        input.put("cancelled", EventInterface.getMultiMeetings(response().getCancelledMeetings(), true));
    if (response().hasUpdatedMeetings())
        input.put("updated", EventInterface.getMultiMeetings(response().getUpdatedMeetings(), true));
    if (request().hasMessage())
        input.put("message", request().getMessage());
    if (request().getEvent().getId() != null) {
        if (event().hasMeetings())
            input.put("meetings", EventInterface.getMultiMeetings(event().getMeetings(), true));
        else/*w  w  w . j  a v a2 s.  co m*/
            input.put("meetings", new TreeSet<MultiMeetingInterface>());
    }
    input.put("version", MESSAGES.pageVersion(Constants.getVersion(), Constants.getReleaseDate()));
    input.put("ts", new Date());
    input.put("link", ApplicationProperty.UniTimeUrl.value());
    input.put("sessionId", iRequest.getSessionId());

    StringWriter s = new StringWriter();
    template.process(input, new PrintWriter(s));
    s.flush();
    s.close();

    return s.toString();
}

From source file:org.apache.sqoop.submission.spark.SparkSubmissionEngine.java

/**
 * {@inheritDoc}//from w  ww .  j  a  v  a 2  s .  co m
 */
@Override
public boolean submit(JobRequest sparkJobRequest) {

    //This additional setting up of configuration is to be done on each submission
    //(as in the MR engine)
    SparkJobRequest request = (SparkJobRequest) sparkJobRequest;

    // Clone global configuration
    //jackh: Check 'final' - probably added by Intellij while refactoring conf (from run() in map()) to configuration
    final Configuration configuration = new Configuration(globalConfiguration);

    // Serialize driver context into job configuration
    for (Map.Entry<String, String> entry : request.getDriverContext()) {
        if (entry.getValue() == null) {
            LOG.warn("Ignoring null driver context value for key " + entry.getKey());
            continue;
        }
        configuration.set(entry.getKey(), entry.getValue());
    }

    // Serialize connector context as a sub namespace
    for (Map.Entry<String, String> entry : request.getConnectorContext(Direction.FROM)) {
        if (entry.getValue() == null) {
            LOG.warn("Ignoring null connector context value for key " + entry.getKey());
            continue;
        }
        configuration.set(MRJobConstants.PREFIX_CONNECTOR_FROM_CONTEXT + entry.getKey(), entry.getValue());
    }

    for (Map.Entry<String, String> entry : request.getConnectorContext(Direction.TO)) {
        if (entry.getValue() == null) {
            LOG.warn("Ignoring null connector context value for key " + entry.getKey());
            continue;
        }
        configuration.set(MRJobConstants.PREFIX_CONNECTOR_TO_CONTEXT + entry.getKey(), entry.getValue());
    }

    // Promote all required jars to the job
    configuration.set("tmpjars", StringUtils.join(request.getJars(), ","));

    try {
        Job job = new Job(configuration);

        // Adding link, job and connector schema configurations to the Mapreduce configuration object instead of the
        // Hadoop credentials cache. This is because hadoop, for security reasons, does not serialize the credentials
        // cache for sending over the wire (only the Configuration object is serialized, while the credentials cache
        // resides in the JobConf object).
        // Adding this configuration information to the Configuration object and sending over the wire is a security
        // issue that must be addressed later.

        // from and to link configs
        MRConfigurationUtils.setConnectorLinkConfigUnsafe(Direction.FROM, job.getConfiguration(),
                request.getConnectorLinkConfig(Direction.FROM));
        MRConfigurationUtils.setConnectorLinkConfigUnsafe(Direction.TO, job.getConfiguration(),
                request.getConnectorLinkConfig(Direction.TO));

        // from and to job configs
        MRConfigurationUtils.setConnectorJobConfigUnsafe(Direction.FROM, job.getConfiguration(),
                request.getJobConfig(Direction.FROM));
        MRConfigurationUtils.setConnectorJobConfigUnsafe(Direction.TO, job.getConfiguration(),
                request.getJobConfig(Direction.TO));

        // driver config
        MRConfigurationUtils.setDriverConfig(job, request.getDriverConfig());

        // from and to connector configs
        MRConfigurationUtils.setConnectorSchemaUnsafe(Direction.FROM, job.getConfiguration(),
                request.getJobSubmission().getFromSchema());
        MRConfigurationUtils.setConnectorSchemaUnsafe(Direction.TO, job.getConfiguration(),
                request.getJobSubmission().getToSchema());

        // Retaining to minimize change to existing functioning code
        MRConfigurationUtils.setConnectorLinkConfig(Direction.FROM, job,
                request.getConnectorLinkConfig(Direction.FROM));
        MRConfigurationUtils.setConnectorLinkConfig(Direction.TO, job,
                request.getConnectorLinkConfig(Direction.TO));
        MRConfigurationUtils.setConnectorJobConfig(Direction.FROM, job, request.getJobConfig(Direction.FROM));
        MRConfigurationUtils.setConnectorJobConfig(Direction.TO, job, request.getJobConfig(Direction.TO));
        MRConfigurationUtils.setConnectorSchema(Direction.FROM, job,
                request.getJobSubmission().getFromSchema());
        MRConfigurationUtils.setConnectorSchema(Direction.TO, job, request.getJobSubmission().getToSchema());

        if (request.getJobName() != null) {
            job.setJobName("Sqoop: " + request.getJobName());
        } else {
            job.setJobName("Sqoop job with id: " + request.getJobId());
        }

        job.setInputFormatClass(request.getInputFormatClass());

        job.setOutputFormatClass(request.getOutputFormatClass());
        job.setOutputKeyClass(request.getOutputKeyClass());
        job.setOutputValueClass(request.getOutputValueClass());

        // Form the initial RDD from the Hadoop configuration object set up above
        JavaPairRDD<SqoopSplit, SqoopSplit> initRDD = sc.newAPIHadoopRDD(job.getConfiguration(),
                SqoopInputFormatSpark.class, SqoopSplit.class, SqoopSplit.class);

        // For debugging - check size of initial RDD; remove in production
        int numPartitions = initRDD.partitions().size();

        // Create SparkMapTrigger object and use it to trigger mapToPair()
        ConfigurationWrapper wrappedConf = new ConfigurationWrapper(job.getConfiguration());
        SparkMapTrigger sparkMapTriggerObj = new SparkMapTrigger(initRDD, wrappedConf);
        JavaPairRDD<IntermediateDataFormat<Object>, Integer> mappedRDD = sparkMapTriggerObj.triggerSparkMap();

        // Add reduce phase/any transformation code here
        // For debugging - check size of RDD before partitioning; remove in production
        numPartitions = mappedRDD.partitions().size();

        JavaPairRDD<IntermediateDataFormat<Object>, Integer> repartitionedRDD = null;

        // Get number of loaders, if specified
        if (request.getLoaders() != null) {
            long numLoaders = request.getLoaders();
            long numExtractors = (request.getExtractors() != null) ? (request.getExtractors())
                    : (job.getConfiguration().getLong(MRJobConstants.JOB_ETL_EXTRACTOR_NUM, 10));

            if (numLoaders > numExtractors) {
                // Repartition the RDD: yields evenly balanced partitions but has a shuffle cost
                repartitionedRDD = mappedRDD.repartition(request.getLoaders());
            } else if (numLoaders < numExtractors) {
                // Use coalesce() in this case. Shuffle tradeoff: turning shuffle on will give us evenly balanced partitions
                // leading to an optimum write time but will incur network costs; shuffle off rids us of the network cost
                // but might lead to sub-optimal write performance if the partitioning by the InputFormar was skewed in the
                // first place
                repartitionedRDD = mappedRDD.coalesce(request.getLoaders(), false);
            } else {
                // Do not do any repartitioning/coalescing if loaders were specified but were equal to extractors
                // Check if this statement incurs any cost
                repartitionedRDD = mappedRDD;
            }
        }

        // For debugging - check size of RDD after partitioning; remove in production
        numPartitions = repartitionedRDD.partitions().size();

        // Calls the OutputFormat for writing
        //mappedRDD.saveAsNewAPIHadoopDataset(job.getConfiguration());
        repartitionedRDD.saveAsNewAPIHadoopDataset(job.getConfiguration());

        // Data transfer completed successfully if here
        request.getJobSubmission().setStatus(SubmissionStatus.SUCCEEDED);

        return true;

    } catch (Exception e) {
        SubmissionError error = new SubmissionError();
        error.setErrorSummary(e.toString());
        StringWriter writer = new StringWriter();
        e.printStackTrace(new PrintWriter(writer));
        writer.flush();
        error.setErrorDetails(writer.toString());

        request.getJobSubmission().setError(error);
        LOG.error("Error in submitting job", e);
        return false;
    }

}

From source file:org.wso2.carbon.mdm.mobileservices.windows.SyncmlParserTest.java

public String convertToString(Document doc) throws TransformerException {

    DOMSource domSource = new DOMSource(doc);
    StringWriter stringWriter = new StringWriter();
    StreamResult streamResult = new StreamResult(stringWriter);
    TransformerFactory transformerFactory = TransformerFactory.newInstance();
    Transformer transformer = transformerFactory.newTransformer();
    transformer.setOutputProperty(OutputKeys.ENCODING, "UTF-8");
    transformer.setOutputProperty(OutputKeys.INDENT, "yes");
    transformer.transform(domSource, streamResult);
    stringWriter.flush();
    return stringWriter.toString();
}

From source file:org.apache.olingo.client.core.op.AbstractODataBinder.java

@Override
public ODataEntity getODataEntity(final Entry resource, final URI defaultBaseURI) {
    if (LOG.isDebugEnabled()) {
        final StringWriter writer = new StringWriter();
        client.getSerializer().entry(resource, writer);
        writer.flush();
        LOG.debug("EntryResource -> ODataEntity:\n{}", writer.toString());
    }//  w ww .  j  a va 2 s .c  om

    final URI base = defaultBaseURI == null ? resource.getBaseURI() : defaultBaseURI;

    final ODataEntity entity = resource.getSelfLink() == null
            ? client.getObjectFactory().newEntity(resource.getType())
            : client.getObjectFactory().newEntity(resource.getType(),
                    URIUtils.getURI(base, resource.getSelfLink().getHref()));

    if (StringUtils.isNotBlank(resource.getETag())) {
        entity.setETag(resource.getETag());
    }

    if (resource.getEditLink() != null) {
        entity.setEditLink(URIUtils.getURI(base, resource.getEditLink().getHref()));
    }

    for (Link link : resource.getAssociationLinks()) {
        entity.addLink(client.getObjectFactory().newAssociationLink(link.getTitle(), base, link.getHref()));
    }

    for (Link link : resource.getNavigationLinks()) {
        final Entry inlineEntry = link.getInlineEntry();
        final Feed inlineFeed = link.getInlineFeed();

        if (inlineEntry == null && inlineFeed == null) {
            entity.addLink(
                    client.getObjectFactory().newEntityNavigationLink(link.getTitle(), base, link.getHref()));
        } else if (inlineFeed == null) {
            entity.addLink(client.getObjectFactory().newInlineEntity(link.getTitle(), base, link.getHref(),
                    getODataEntity(inlineEntry,
                            inlineEntry.getBaseURI() == null ? base : inlineEntry.getBaseURI())));
        } else {
            entity.addLink(client.getObjectFactory().newInlineEntitySet(link.getTitle(), base, link.getHref(),
                    getODataEntitySet(inlineFeed,
                            inlineFeed.getBaseURI() == null ? base : inlineFeed.getBaseURI())));
        }
    }

    for (Link link : resource.getMediaEditLinks()) {
        entity.addLink(client.getObjectFactory().newMediaEditLink(link.getTitle(), base, link.getHref()));
    }

    for (ODataOperation operation : resource.getOperations()) {
        operation.setTarget(URIUtils.getURI(base, operation.getTarget()));
        entity.getOperations().add(operation);
    }

    if (resource.isMediaEntry()) {
        entity.setMediaEntity(true);
        entity.setMediaContentSource(resource.getMediaContentSource());
        entity.setMediaContentType(resource.getMediaContentType());
    }

    for (Property property : resource.getProperties()) {
        entity.getProperties().add(getODataProperty(property));
    }

    return entity;
}

From source file:org.nuxeo.automation.scripting.blockly.converter.Chains2Blockly.java

public String convertXML(InputStream xmlChains) throws IOException {
    Element root = convert(xmlChains);
    OutputFormat format = OutputFormat.createPrettyPrint();
    StringWriter out = new StringWriter();
    XMLWriter writer = new XMLWriter(out, format);
    writer.write(root);//from   w w w .j a  v a  2s .c o m
    out.flush();
    return out.getBuffer().toString();
}

From source file:org.nuxeo.ecm.webengine.model.Template.java

public String render() {
    StringWriter w = new StringWriter();
    try {//from  w  w w  .  java  2  s. co  m
        ctx.render(script(), args, w);
    } catch (Exception e) {
        if (getRootCause(e) instanceof SocketException) {
            log.debug("Output socket closed: failed to write response");
        } else {
            throw WebException.wrap("Failed to write response", e);
        }
    }
    try {
        w.flush();
    } catch (Exception e) {
        if (getRootCause(e) instanceof SocketException) {
            log.debug("Output socket closed: failed to flush response");
        } else {
            throw WebException.wrap("Failed to flush response", e);
        }
    }
    return w.getBuffer().toString();
}