Example usage for java.io Writer append

List of usage examples for java.io Writer append

Introduction

In this page you can find the example usage for java.io Writer append.

Prototype

public Writer append(char c) throws IOException 

Source Link

Document

Appends the specified character to this writer.

Usage

From source file:org.apache.hadoop.hbase.mob.filecompactions.PartitionedMobFileCompactor.java

/**
 * Compacts the del file in a batch.//from   w w w  .ja  v a 2 s . co m
 * @param request The compaction request.
 * @param delFiles The del files.
 * @return The path of new del file after merging.
 * @throws IOException
 */
private Path compactDelFilesInBatch(PartitionedMobFileCompactionRequest request, List<StoreFile> delFiles)
        throws IOException {
    // create a scanner for the del files.
    StoreScanner scanner = createScanner(delFiles, ScanType.COMPACT_RETAIN_DELETES);
    Writer writer = null;
    Path filePath = null;
    try {
        writer = MobUtils.createDelFileWriter(conf, fs, column,
                MobUtils.formatDate(new Date(request.selectionTime)), tempPath, Long.MAX_VALUE,
                column.getCompactionCompression(), HConstants.EMPTY_START_ROW, compactionCacheConfig);
        filePath = writer.getPath();
        List<Cell> cells = new ArrayList<Cell>();
        boolean hasMore = false;
        ScannerContext scannerContext = ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build();
        do {
            hasMore = scanner.next(cells, scannerContext);
            for (Cell cell : cells) {
                // TODO remove this after the new code are introduced.
                KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
                writer.append(kv);
            }
            cells.clear();
        } while (hasMore);
    } finally {
        scanner.close();
        if (writer != null) {
            try {
                writer.close();
            } catch (IOException e) {
                LOG.error("Failed to close the writer of the file " + filePath, e);
            }
        }
    }
    // commit the new del file
    Path path = MobUtils.commitFile(conf, fs, filePath, mobFamilyDir, compactionCacheConfig);
    // archive the old del files
    try {
        MobUtils.removeMobFiles(conf, fs, tableName, mobTableDir, column.getName(), delFiles);
    } catch (IOException e) {
        LOG.error("Failed to archive the old del files " + delFiles, e);
    }
    return path;
}

From source file:org.structr.csv.ToCsvFunction.java

public static void writeCsv(final List list, final Writer out, final String propertyView,
        final List<String> properties, final char quoteChar, final char delimiterChar,
        final String recordSeparator, final boolean includeHeader, final boolean localizeHeader,
        final String headerLocalizationDomain, final Locale locale) throws IOException {

    final StringBuilder row = new StringBuilder();

    if (includeHeader) {

        row.setLength(0);/* ww w. jav a  2  s . c  om*/

        boolean isFirstCol = true;

        if (propertyView != null) {

            final Object obj = list.get(0);

            if (obj instanceof GraphObject) {
                for (PropertyKey key : ((GraphObject) obj).getPropertyKeys(propertyView)) {
                    String value = key.dbName();
                    if (localizeHeader) {
                        try {
                            value = LocalizeFunction.getLocalization(locale, value, headerLocalizationDomain);
                        } catch (FrameworkException fex) {
                            logger.warn("to_csv(): Exception", fex);
                        }
                    }

                    isFirstCol = appendColumnString(row, value, isFirstCol, quoteChar, delimiterChar);
                }
            } else {
                row.append(
                        "Error: Object is not of type GraphObject, can not determine properties of view for header row");
            }

        } else if (properties != null) {

            for (final String colName : properties) {
                String value = colName;
                if (localizeHeader) {
                    try {
                        value = LocalizeFunction.getLocalization(locale, value, headerLocalizationDomain);
                    } catch (FrameworkException fex) {
                        logger.warn("to_csv(): Exception", fex);
                    }
                }

                isFirstCol = appendColumnString(row, value, isFirstCol, quoteChar, delimiterChar);
            }
        }

        out.append(row).append(recordSeparator).flush();

    }

    for (final Object obj : list) {

        row.setLength(0);

        boolean isFirstCol = true;

        if (propertyView != null) {

            if (obj instanceof GraphObject) {

                for (PropertyKey key : ((GraphObject) obj).getPropertyKeys(propertyView)) {

                    final Object value = ((GraphObject) obj).getProperty(key);
                    isFirstCol = appendColumnString(row, value, isFirstCol, quoteChar, delimiterChar);
                }
            } else {
                row.append("Error: Object is not of type GraphObject, can not determine properties of object");
            }

        } else if (properties != null) {

            if (obj instanceof GraphObject) {

                final GraphObject castedObj = (GraphObject) obj;

                for (final String colName : properties) {
                    final PropertyKey key = StructrApp.key(obj.getClass(), colName);
                    final Object value = castedObj.getProperty(key);
                    isFirstCol = appendColumnString(row, value, isFirstCol, quoteChar, delimiterChar);
                }
            } else if (obj instanceof Map) {

                final Map castedObj = (Map) obj;

                for (final String colName : properties) {
                    final Object value = castedObj.get(colName);
                    isFirstCol = appendColumnString(row, value, isFirstCol, quoteChar, delimiterChar);
                }

            }
        }

        // Replace \r and \n so we dont get multi-line CSV (needs to be four backslashes because regex)
        final String rowWithoutRecordSeparator = row.toString().replaceAll("\n", "\\\\n").replaceAll("\r",
                "\\\\r");

        out.append(rowWithoutRecordSeparator).append(recordSeparator).flush();
    }

}

From source file:org.easyrec.store.dao.core.impl.ItemAssocDAOMysqlImpl.java

private void writeItemAssoc(Writer writer, ItemAssocVO<Integer, Integer> itemAssoc) throws IOException {
    SimpleDateFormat sqlDateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
    writer.append(itemAssoc.getTenant().toString());
    writer.append('\t');

    writer.append(itemAssoc.getItemFrom().getItem().toString());
    writer.append('\t');

    writer.append(itemAssoc.getItemFrom().getType().toString());
    writer.append('\t');

    writer.append(itemAssoc.getAssocType().toString());
    writer.append('\t');

    writer.append(itemAssoc.getAssocValue().toString());
    writer.append('\t');

    writer.append(itemAssoc.getItemTo().getItem().toString());
    writer.append('\t');

    writer.append(itemAssoc.getItemTo().getType().toString());
    writer.append('\t');

    writer.append(itemAssoc.getSourceType().toString());
    writer.append('\t');

    String sourceInfo = itemAssoc.getSourceInfo();
    sourceInfo = sourceInfo.replace("\\", "\\\\");
    sourceInfo = sourceInfo.replace("\0", "\\0");
    sourceInfo = sourceInfo.replace("\b", "\\b");
    sourceInfo = sourceInfo.replace("\n", "\\n");
    sourceInfo = sourceInfo.replace("\r", "\\r");
    sourceInfo = sourceInfo.replace("\t", "\\t");
    writer.append(sourceInfo);/* w w  w.  jav a2 s . co  m*/
    writer.append('\t');

    writer.append(itemAssoc.getViewType().toString());
    writer.append('\t');

    String isActive = "1";
    if (itemAssoc.isActive() != null)
        isActive = itemAssoc.isActive() ? "1" : "0";
    writer.append(isActive);
    writer.append('\t');

    String changeDate = sqlDateFormat.format(itemAssoc.getChangeDate());
    writer.append(changeDate);
    writer.append('\t');

    writer.append('\n');
}

From source file:org.apache.sqoop.orm.ClassWriter.java

/**
 * Generate the ORM code for the class.//ww w . j a  v  a  2 s  . com
 */
public void generate(String invalidIdentifierPrefix) throws IOException {
    Map<String, Integer> columnTypes = getColumnTypes();

    String[] colNames = getColumnNames(columnTypes);

    // Translate all the column names into names that are safe to
    // use as identifiers.
    String[] cleanedColNames = cleanColNames(colNames, invalidIdentifierPrefix);
    Set<String> uniqColNames = new HashSet<String>();
    for (int i = 0; i < colNames.length; i++) {
        String identifier = cleanedColNames[i];

        // Name can't be blank
        if (identifier.isEmpty()) {
            throw new IllegalArgumentException("We found column without column "
                    + "name. Please verify that you've entered all column names "
                    + "in your query if using free form query import (consider "
                    + "adding clause AS if you're using column transformation)");
        }

        // Guarantee uniq col identifier
        if (uniqColNames.contains(identifier)) {
            throw new IllegalArgumentException(
                    "Duplicate Column identifier " + "specified: '" + identifier + "'");
        }
        uniqColNames.add(identifier);

        // Make sure the col->type mapping holds for the
        // new identifier name, too.
        String col = colNames[i];
        Integer type = columnTypes.get(col);
        if (type == null) {
            // column doesn't have a type, means that is illegal column name!
            throw new IllegalArgumentException("Column name '" + col + "' not in table");
        }
        columnTypes.put(identifier, type);
    }

    // Check that all explicitly mapped columns are present in result set
    Properties mapping = options.getMapColumnJava();
    if (mapping != null && !mapping.isEmpty()) {
        for (Object column : mapping.keySet()) {
            if (!uniqColNames.contains((String) column)) {
                throw new IllegalArgumentException(
                        "No column by the name " + column + "found while importing data");
            }
        }
    }

    // The db write() method may use column names in a different
    // order. If this is set in the options, pull it out here and
    // make sure we format the column names to identifiers in the same way
    // as we do for the ordinary column list.
    String[] dbWriteColNames = options.getDbOutputColumns();
    String[] cleanedDbWriteColNames = null;
    if (null == dbWriteColNames) {
        cleanedDbWriteColNames = cleanedColNames;
    } else {
        cleanedDbWriteColNames = cleanColNames(dbWriteColNames);
    }

    if (LOG.isDebugEnabled()) {
        LOG.debug("selected columns:");
        for (String col : cleanedColNames) {
            LOG.debug("  " + col);
        }

        if (cleanedDbWriteColNames != cleanedColNames) {
            // dbWrite() has a different set of columns than the rest of the
            // generators.
            LOG.debug("db write column order:");
            for (String dbCol : cleanedDbWriteColNames) {
                LOG.debug("  " + dbCol);
            }
        }
    }

    // Generate the Java code.
    StringBuilder sb = generateClassForColumns(columnTypes, cleanedColNames, cleanedDbWriteColNames);

    // Write this out to a file in the jar output directory.
    // We'll move it to the user-visible CodeOutputDir after compiling.
    String codeOutDir = options.getJarOutputDir();

    // Get the class name to generate, which includes package components.
    String className = new TableClassName(options).getClassForTable(tableName);
    // Convert the '.' characters to '/' characters.
    String sourceFilename = className.replace('.', File.separatorChar) + ".java";
    String filename = codeOutDir + sourceFilename;

    if (LOG.isDebugEnabled()) {
        LOG.debug("Writing source file: " + filename);
        LOG.debug("Table name: " + tableName);
        StringBuilder sbColTypes = new StringBuilder();
        for (String col : colNames) {
            Integer colType = columnTypes.get(col);
            sbColTypes.append(col + ":" + colType + ", ");
        }
        String colTypeStr = sbColTypes.toString();
        LOG.debug("Columns: " + colTypeStr);
        LOG.debug("sourceFilename is " + sourceFilename);
    }

    compileManager.addSourceFile(sourceFilename);

    // Create any missing parent directories.
    File file = new File(filename);
    File dir = file.getParentFile();
    if (null != dir && !dir.exists()) {
        boolean mkdirSuccess = dir.mkdirs();
        if (!mkdirSuccess) {
            LOG.debug("Could not create directory tree for " + dir);
        }
    }

    OutputStream ostream = null;
    Writer writer = null;
    try {
        ostream = new FileOutputStream(filename);
        writer = new OutputStreamWriter(ostream);
        writer.append(sb.toString());
    } finally {
        if (null != writer) {
            try {
                writer.close();
            } catch (IOException ioe) {
                // ignored because we're closing.
            }
        }

        if (null != ostream) {
            try {
                ostream.close();
            } catch (IOException ioe) {
                // ignored because we're closing.
            }
        }
    }
}

From source file:pltag.parser.Lexicon.java

protected void extractFamilyLexicon(boolean writeToDisk) {
    try {/*from   w w w.  j av  a  2s .c  o m*/
        Writer unlexSizeWriter = writeToDisk ? IOUtils.openOutEasy("family_size_lexicon.txt") : null;
        Collection<String> keyset = new ArrayList<String>(noOfTrees.keySet());
        for (String key : keyset) {
            if (!key.contains("LEXEME")) {
                noOfTrees.remove(key);
                continue;
            }
            Integer frequency = noOfTrees.get(key);
            String val = frequency.toString();
            if (unlexSizeWriter != null)
                unlexSizeWriter.append(val).append("\t").append(key).append("\n");
            if (frequency < 5) {
                noOfTrees.remove(key);
            } else if (frequency >= 100) {
                String[] info = key.split("\t");
                lexEntriesTree.put(info[0], "1\t" + info[1]);
            }
        }
        if (unlexSizeWriter != null)
            unlexSizeWriter.close();
    } catch (IOException e) {
        LogInfo.error(e);
    }
}

From source file:com.emc.ecs.sync.storage.CasStorageTest.java

private List<String> createTestClips(FPPool pool, int maxBlobSize, int thisMany, Writer summaryWriter)
        throws Exception {
    ExecutorService service = Executors.newFixedThreadPool(CAS_THREADS);

    System.out.print("Creating clips");

    List<String> clipIds = Collections.synchronizedList(new ArrayList<String>());
    List<String> summaries = Collections.synchronizedList(new ArrayList<String>());
    for (int clipIdx = 0; clipIdx < thisMany; clipIdx++) {
        service.submit(new ClipWriter(pool, clipIds, maxBlobSize, summaries));
    }/*  w  w  w .  j a  v  a2 s  . c om*/

    service.shutdown();
    service.awaitTermination(CAS_SETUP_WAIT_MINUTES, TimeUnit.MINUTES);
    service.shutdownNow();

    Collections.sort(summaries);
    for (String summary : summaries) {
        summaryWriter.append(summary);
    }

    System.out.println();

    return clipIds;
}

From source file:org.netbeans.util.source.minify.MinifyUtil.java

public MinifyFileResult compressXml(String inputFilename, String outputFilename, MinifyProperty minifyProperty)
        throws IOException {
    InputStreamReader in = null;//from   ww w .j  ava2  s. com
    Writer out = null;
    MinifyFileResult minifyFileResult = new MinifyFileResult();
    try {
        File inputFile = new File(inputFilename);
        File outputFile = new File(outputFilename);
        in = new InputStreamReader(new FileInputStream(inputFile), minifyProperty.getCharset());
        minifyFileResult.setInputFileSize(inputFile.length());

        XmlCompressor compressor = new XmlCompressor();
        compressor.setRemoveIntertagSpaces(true);
        compressor.setRemoveComments(true);
        compressor.setEnabled(true);

        String output = compressor.compress(fromStream(in));//out, minifyProperty.getLineBreakPosition());

        in.close();
        in = null;

        out = new OutputStreamWriter(new FileOutputStream(outputFile), minifyProperty.getCharset());
        out.write(output);

        out.flush();
        minifyFileResult.setOutputFileSize(outputFile.length());
        if (minifyProperty.isAppendLogToFile()) {
            out.append("\n<!--Size: " + minifyFileResult.getInputFileSize() + "=>"
                    + minifyFileResult.getOutputFileSize() + "Bytes " + "\n Saved "
                    + minifyFileResult.getSavedPercentage() + "%-->");
        }
        out.flush();

    } finally {
        IOUtils.closeQuietly(in);
        IOUtils.closeQuietly(out);
    }
    return minifyFileResult;
}

From source file:org.jamwiki.migrate.MediaWikiXmlExporter.java

/**
 *
 *//*from  w  w  w .j a  va 2 s  .c o  m*/
private void writePages(Writer writer, String virtualWiki, List<String> topicNames, boolean excludeHistory)
        throws DataAccessException, IOException, MigrationException {
    // note that effort is being made to re-use temporary objects as this
    // code can generate an OOM "GC overhead limit exceeded" with HUGE (500MB) topics
    // since the garbage collector ends up being invoked excessively.
    TopicVersion topicVersion;
    Topic topic;
    WikiUser user;
    // choose 100,000 as an arbitrary default
    int maxRevisions = (Environment.getIntValue(Environment.PROP_MAX_TOPIC_VERSION_EXPORT) > 0)
            ? Environment.getIntValue(Environment.PROP_MAX_TOPIC_VERSION_EXPORT)
            : 100000;
    int revisionsRetrieved = 0;
    List<Integer> topicVersionIds;
    Map<String, String> textAttributes = new LinkedHashMap<String, String>();
    textAttributes.put("xml:space", "preserve");
    for (String topicName : topicNames) {
        topicVersionIds = new ArrayList<Integer>();
        topic = WikiBase.getDataHandler().lookupTopic(virtualWiki, topicName, false);
        if (topic == null) {
            throw new MigrationException(
                    "Failure while exporting: topic " + virtualWiki + ':' + topicName + " does not exist");
        }
        writer.append("\n<page>");
        writer.append('\n');
        XMLUtil.buildTag(writer, "title", topic.getName(), true);
        writer.append('\n');
        XMLUtil.buildTag(writer, "ns", topic.getNamespace().getId());
        writer.append('\n');
        XMLUtil.buildTag(writer, "id", topic.getTopicId());
        if (excludeHistory || (maxRevisions - revisionsRetrieved) <= 1) {
            // only include the most recent version
            topicVersionIds.add(topic.getCurrentVersionId());
        } else {
            // FIXME - changes sorted newest-to-oldest, should be reverse
            Pagination pagination = new Pagination(maxRevisions - revisionsRetrieved, 0);
            List<RecentChange> changes = WikiBase.getDataHandler().getTopicHistory(topic, pagination, true);
            revisionsRetrieved += changes.size();
            for (int i = (changes.size() - 1); i >= 0; i--) {
                topicVersionIds.add(changes.get(i).getTopicVersionId());
            }
        }
        for (int topicVersionId : topicVersionIds) {
            topicVersion = WikiBase.getDataHandler().lookupTopicVersion(topicVersionId);
            writer.append("\n<revision>");
            writer.append('\n');
            XMLUtil.buildTag(writer, "id", topicVersion.getTopicVersionId());
            writer.append('\n');
            XMLUtil.buildTag(writer, "timestamp", this.parseJAMWikiTimestamp(topicVersion.getEditDate()), true);
            writer.append("\n<contributor>");
            user = (topicVersion.getAuthorId() != null)
                    ? WikiBase.getDataHandler().lookupWikiUser(topicVersion.getAuthorId())
                    : null;
            if (user != null) {
                writer.append('\n');
                XMLUtil.buildTag(writer, "username", user.getUsername(), true);
                writer.append('\n');
                XMLUtil.buildTag(writer, "id", user.getUserId());
            } else if (Utilities.isIpAddress(topicVersion.getAuthorDisplay())) {
                writer.append('\n');
                XMLUtil.buildTag(writer, "ip", topicVersion.getAuthorDisplay(), true);
            } else {
                writer.append('\n');
                XMLUtil.buildTag(writer, "username", topicVersion.getAuthorDisplay(), true);
            }
            writer.append("\n</contributor>");
            writer.append('\n');
            if (topicVersion.getEditType() == TopicVersion.EDIT_MINOR) {
                XMLUtil.buildTag(writer, "minor", "", true);
                writer.append('\n');
            }
            XMLUtil.buildTag(writer, "comment", topicVersion.getEditComment(), true);
            writer.append('\n');
            textAttributes.put("bytes", Long.toString(topicVersion.getVersionContent().getBytes().length));
            XMLUtil.buildTag(writer, "text", topicVersion.getVersionContent(), textAttributes, true);
            writer.append("\n</revision>");
            // explicitly null out temp variables to improve garbage collection and
            // avoid OOM "GC overhead limit exceeded" errors on HUGE (500MB) topics
            topicVersion = null;
            user = null;
        }
        writer.append("\n</page>");
    }
}

From source file:org.netbeans.util.source.minify.MinifyUtil.java

public MinifyFileResult compressJavaScript(String inputFilename, String outputFilename,
        MinifyProperty minifyProperty) throws IOException {
    Reader in = null;//  w  w w  . ja  va 2  s . c  o  m
    Writer out = null;
    MinifyFileResult minifyFileResult = new MinifyFileResult();
    try {
        File inputFile = new File(inputFilename);
        File outputFile = new File(outputFilename);
        in = new InputStreamReader(new FileInputStream(inputFile), minifyProperty.getCharset());
        minifyFileResult.setInputFileSize(inputFile.length());

        JavaScriptCompressor compressor = new JavaScriptCompressor(in,
                new MinifyUtil.CompressorErrorReporter());
        in.close();
        in = null;

        out = new OutputStreamWriter(new FileOutputStream(outputFile), minifyProperty.getCharset());
        compressor.compress(out, minifyProperty.getLineBreakPosition(), minifyProperty.isJsObfuscate(),
                minifyProperty.getVerbose(), minifyProperty.isPreserveSemicolon(),
                minifyProperty.getDisableOptimizations());
        out.flush();
        minifyFileResult.setOutputFileSize(outputFile.length());
        if (minifyProperty.isAppendLogToFile()) {
            out.append("\n/*Size: " + minifyFileResult.getInputFileSize() + "->"
                    + minifyFileResult.getOutputFileSize() + "Bytes " + "\n Saved "
                    + minifyFileResult.getSavedPercentage() + "%*/");
        }
        out.flush();

    } finally {
        IOUtils.closeQuietly(in);
        IOUtils.closeQuietly(out);
    }
    return minifyFileResult;
}

From source file:com.redhat.rhn.frontend.taglibs.ListDisplayTag.java

private void renderBoundsVariables(Writer out) throws IOException {
    StringBuilder target = new StringBuilder();
    // pagination formvars
    renderHidden(target, "lower", String.valueOf(getPageList().getStart()));

    PaginationUtil putil = new PaginationUtil(getPageList().getStart(), getPageList().getEnd(),
            getPageList().getEnd() - getPageList().getStart() + 1, getPageList().getTotalSize());

    renderHidden(target, FIRST_LOWER, putil.getFirstLower());
    renderHidden(target, PREV_LOWER, putil.getPrevLower());
    renderHidden(target, NEXT_LOWER, putil.getNextLower());
    renderHidden(target, LAST_LOWER, putil.getLastLower());
    out.append(target.toString());
}