Example usage for org.jfree.util Log error

List of usage examples for org.jfree.util Log error

Introduction

In this page you can find the example usage for org.jfree.util Log error.

Prototype

public static void error(final Object message, final Exception e) 

Source Link

Document

A convenience method for logging an 'error' message.

Usage

From source file:de.csw.ontology.XWikiTextEnhancer.java

/**
 * The enhanced text contains links to the Lucene search page of the xWiki
 * system. The search terms are related to the annotated phrase.
 *///w ww  .j  a  v a2s  .com
public String enhance(String text) {
    CSWGermanAnalyzer ga = new CSWGermanAnalyzer();
    TokenStream ts = null;
    StringBuilder result = new StringBuilder();

    initializeLinkIndex(text);

    try {
        Reader r = new BufferedReader(new StringReader(text));

        ts = ga.tokenStream("", r);

        CharTermAttribute charTermAttribute = ts.addAttribute(CharTermAttribute.class);
        OffsetAttribute offsetAttribute = ts.addAttribute(OffsetAttribute.class);
        TypeAttribute typeAttribute = ts.addAttribute(TypeAttribute.class);

        String term;
        int lastEndIndex = 0;

        while (ts.incrementToken()) {

            result.append(text.substring(lastEndIndex, offsetAttribute.startOffset()));
            term = String.copyValueOf(charTermAttribute.buffer(), 0, charTermAttribute.length());

            if (typeAttribute.type().equals(ConceptFilter.CONCEPT_TYPE) && isAnnotatable(offsetAttribute)) {
                log.debug("Annotating concept: " + term);
                annotateWithSearch(result,
                        text.substring(offsetAttribute.startOffset(), offsetAttribute.endOffset()), term);
            } else {
                result.append(text.substring(offsetAttribute.startOffset(), offsetAttribute.endOffset()));
            }

            lastEndIndex = offsetAttribute.endOffset();
        }
        result.append(text.subSequence(lastEndIndex, text.length()));
    } catch (IOException e) {
        Log.error("Error while processing the page content", e);
    }

    ga.close();
    return result.toString();
}

From source file:com.ah.bo.hiveap.HiveApImageInfo.java

@Transient
public void setReleaseData(String releaseData) {
    this.releaseData = releaseData;
    try {/*from   w  w w.  j  a v a2  s.c  om*/
        Date date = rlDataFormat.parse(releaseData);
        this.releaseTime = date.getTime();
    } catch (Exception e) {
        Log.error("HOS Image release data\" + releaseData + \" parse error", e);
    }
}

From source file:com.pedra.core.setup.CoreSystemSetup.java

private File getProjectdataUpdateDirectory(String relativeUpdateDirectory) {
    if (relativeUpdateDirectory == null) {
        relativeUpdateDirectory = "";
    }/*www.j a v  a 2  s  .co  m*/
    String projectdataUpdateFolderProperty = Config.getString("projectdata.update.folder",
            "/pedracore/import/versions");
    projectdataUpdateFolderProperty = projectdataUpdateFolderProperty + relativeUpdateDirectory;
    File projectdataUpdateFolder = null;
    try {
        projectdataUpdateFolder = new File(getClass().getResource(projectdataUpdateFolderProperty).toURI());
    } catch (final URISyntaxException e) {
        Log.error("error finding project data update directory[" + projectdataUpdateFolderProperty + "]", e);
        return null;
    }
    if (!projectdataUpdateFolder.exists()) {
        Log.warn("project data update directory [" + projectdataUpdateFolderProperty + "] does not exist");
        return null;
    } else if (ArrayUtils.isEmpty(projectdataUpdateFolder.listFiles())) {
        Log.info("Project datad update directory[" + projectdataUpdateFolderProperty + "] is empty");
    }
    return projectdataUpdateFolder;
}

From source file:com.redoute.datamap.dao.impl.PictureDAO.java

private void updateFilePicture(String id, String value) {
    Picture pic = findPictureByKey(id);/*from   w w  w . j a  v a  2s .c om*/
    pic.setBase64(value);
    try {
        if (DAOUtil.isEmpty(pic.getLocalPath())) {
            pic.setLocalPath(pictureFileHelper.createLocalPath(pic));
            updatePicture(id, "localpath", pic.getLocalPath());
        }
        pictureFileHelper.save(pic, true);
    } catch (HTML5CanvasURLParsingException e) {
        Log.error("Unable to update picture " + pic, e);
    }
}

From source file:org.jenkinsci.plugins.GitLabSecurityRealm.java

private String extractToken(String content) {

    try {//from   w  ww  .  ja va 2 s  .c  o m
        ObjectMapper mapper = new ObjectMapper();
        JsonNode jsonTree = mapper.readTree(content);
        JsonNode node = jsonTree.get("access_token");
        if (node != null) {
            return node.asText();
        }
    } catch (JsonProcessingException e) {
        Log.error(e.getMessage(), e);
    } catch (IOException e) {
        Log.error(e.getMessage(), e);
    }
    return null;
}

From source file:com.digitalpebble.behemoth.mahout.SparseVectorsFromBehemoth.java

public int run(String[] args) throws Exception {
    DefaultOptionBuilder obuilder = new DefaultOptionBuilder();
    ArgumentBuilder abuilder = new ArgumentBuilder();
    GroupBuilder gbuilder = new GroupBuilder();

    Option inputDirOpt = DefaultOptionCreator.inputOption().create();

    Option outputDirOpt = DefaultOptionCreator.outputOption().create();

    Option minSupportOpt = obuilder.withLongName("minSupport")
            .withArgument(abuilder.withName("minSupport").withMinimum(1).withMaximum(1).create())
            .withDescription("(Optional) Minimum Support. Default Value: 2").withShortName("s").create();

    Option typeNameOpt = obuilder.withLongName("typeToken").withRequired(false)
            .withArgument(abuilder.withName("typeToken").withMinimum(1).withMaximum(1).create())
            .withDescription("The annotation type for Tokens").withShortName("t").create();

    Option featureNameOpt = obuilder.withLongName("featureName").withRequired(false)
            .withArgument(abuilder.withName("featureName").withMinimum(1).withMaximum(1).create())
            .withDescription(//  w  w  w .  j  a v  a 2s.c o  m
                    "The name of the feature containing the token values, uses the text if unspecified")
            .withShortName("f").create();

    Option analyzerNameOpt = obuilder.withLongName("analyzerName")
            .withArgument(abuilder.withName("analyzerName").withMinimum(1).withMaximum(1).create())
            .withDescription("The class name of the analyzer").withShortName("a").create();

    Option chunkSizeOpt = obuilder.withLongName("chunkSize")
            .withArgument(abuilder.withName("chunkSize").withMinimum(1).withMaximum(1).create())
            .withDescription("The chunkSize in MegaBytes. 100-10000 MB").withShortName("chunk").create();

    Option weightOpt = obuilder.withLongName("weight").withRequired(false)
            .withArgument(abuilder.withName("weight").withMinimum(1).withMaximum(1).create())
            .withDescription("The kind of weight to use. Currently TF or TFIDF").withShortName("wt").create();

    Option minDFOpt = obuilder.withLongName("minDF").withRequired(false)
            .withArgument(abuilder.withName("minDF").withMinimum(1).withMaximum(1).create())
            .withDescription("The minimum document frequency.  Default is 1").withShortName("md").create();

    Option maxDFPercentOpt = obuilder.withLongName("maxDFPercent").withRequired(false)
            .withArgument(abuilder.withName("maxDFPercent").withMinimum(1).withMaximum(1).create())
            .withDescription(
                    "The max percentage of docs for the DF.  Can be used to remove really high frequency terms."
                            + " Expressed as an integer between 0 and 100. Default is 99.  If maxDFSigma is also set, it will override this value.")
            .withShortName("x").create();

    Option maxDFSigmaOpt = obuilder.withLongName("maxDFSigma").withRequired(false)
            .withArgument(abuilder.withName("maxDFSigma").withMinimum(1).withMaximum(1).create())
            .withDescription(
                    "What portion of the tf (tf-idf) vectors to be used, expressed in times the standard deviation (sigma) of the document frequencies of these vectors."
                            + "  Can be used to remove really high frequency terms."
                            + " Expressed as a double value. Good value to be specified is 3.0. In case the value is less then 0 no vectors "
                            + "will be filtered out. Default is -1.0.  Overrides maxDFPercent")
            .withShortName("xs").create();

    Option minLLROpt = obuilder.withLongName("minLLR").withRequired(false)
            .withArgument(abuilder.withName("minLLR").withMinimum(1).withMaximum(1).create())
            .withDescription("(Optional)The minimum Log Likelihood Ratio(Float)  Default is "
                    + LLRReducer.DEFAULT_MIN_LLR)
            .withShortName("ml").create();

    Option numReduceTasksOpt = obuilder.withLongName("numReducers")
            .withArgument(abuilder.withName("numReducers").withMinimum(1).withMaximum(1).create())
            .withDescription("(Optional) Number of reduce tasks. Default Value: 1").withShortName("nr")
            .create();

    Option powerOpt = obuilder.withLongName("norm").withRequired(false)
            .withArgument(abuilder.withName("norm").withMinimum(1).withMaximum(1).create())
            .withDescription(
                    "The norm to use, expressed as either a float or \"INF\" if you want to use the Infinite norm.  "
                            + "Must be greater or equal to 0.  The default is not to normalize")
            .withShortName("n").create();

    Option logNormalizeOpt = obuilder.withLongName("logNormalize").withRequired(false)
            .withDescription("(Optional) Whether output vectors should be logNormalize. If set true else false")
            .withShortName("lnorm").create();

    Option maxNGramSizeOpt = obuilder.withLongName("maxNGramSize").withRequired(false)
            .withArgument(abuilder.withName("ngramSize").withMinimum(1).withMaximum(1).create())
            .withDescription("(Optional) The maximum size of ngrams to create"
                    + " (2 = bigrams, 3 = trigrams, etc) Default Value:1")
            .withShortName("ng").create();

    Option sequentialAccessVectorOpt = obuilder.withLongName("sequentialAccessVector").withRequired(false)
            .withDescription(
                    "(Optional) Whether output vectors should be SequentialAccessVectors. If set true else false")
            .withShortName("seq").create();

    Option namedVectorOpt = obuilder.withLongName("namedVector").withRequired(false)
            .withDescription("(Optional) Whether output vectors should be NamedVectors. If set true else false")
            .withShortName("nv").create();

    Option overwriteOutput = obuilder.withLongName("overwrite").withRequired(false)
            .withDescription("If set, overwrite the output directory").withShortName("ow").create();

    Option labelMDOpt = obuilder.withLongName("labelMDKey").withRequired(false)
            .withArgument(abuilder.withName("label_md_key").create())
            .withDescription("Document metadata holding the label").withShortName("label").create();

    Option helpOpt = obuilder.withLongName("help").withDescription("Print out help").withShortName("h")
            .create();

    Group group = gbuilder.withName("Options").withOption(minSupportOpt).withOption(typeNameOpt)
            .withOption(featureNameOpt).withOption(analyzerNameOpt).withOption(chunkSizeOpt)
            .withOption(outputDirOpt).withOption(inputDirOpt).withOption(minDFOpt).withOption(maxDFSigmaOpt)
            .withOption(maxDFPercentOpt).withOption(weightOpt).withOption(powerOpt).withOption(minLLROpt)
            .withOption(numReduceTasksOpt).withOption(maxNGramSizeOpt).withOption(overwriteOutput)
            .withOption(helpOpt).withOption(sequentialAccessVectorOpt).withOption(namedVectorOpt)
            .withOption(logNormalizeOpt).withOption(labelMDOpt).create();
    CommandLine cmdLine = null;
    try {
        Parser parser = new Parser();
        parser.setGroup(group);
        parser.setHelpOption(helpOpt);
        cmdLine = parser.parse(args);

        if (cmdLine.hasOption(helpOpt)) {
            CommandLineUtil.printHelp(group);
            return -1;
        }

        if (!cmdLine.hasOption(inputDirOpt)) {
            CommandLineUtil.printHelp(group);
            return -1;
        }

        if (!cmdLine.hasOption(outputDirOpt)) {
            CommandLineUtil.printHelp(group);
            return -1;
        }

    } catch (OptionException e) {
        log.error("Exception", e);
        CommandLineUtil.printHelp(group);
        return -1;
    }

    Path inputDir = new Path((String) cmdLine.getValue(inputDirOpt));
    Path outputDir = new Path((String) cmdLine.getValue(outputDirOpt));

    int chunkSize = 100;
    if (cmdLine.hasOption(chunkSizeOpt)) {
        chunkSize = Integer.parseInt((String) cmdLine.getValue(chunkSizeOpt));
    }
    int minSupport = 2;
    if (cmdLine.hasOption(minSupportOpt)) {
        String minSupportString = (String) cmdLine.getValue(minSupportOpt);
        minSupport = Integer.parseInt(minSupportString);
    }

    int maxNGramSize = 1;

    if (cmdLine.hasOption(maxNGramSizeOpt)) {
        try {
            maxNGramSize = Integer.parseInt(cmdLine.getValue(maxNGramSizeOpt).toString());
        } catch (NumberFormatException ex) {
            log.warn("Could not parse ngram size option");
        }
    }
    log.info("Maximum n-gram size is: {}", maxNGramSize);

    if (cmdLine.hasOption(overwriteOutput)) {
        HadoopUtil.delete(getConf(), outputDir);
    }

    float minLLRValue = LLRReducer.DEFAULT_MIN_LLR;
    if (cmdLine.hasOption(minLLROpt)) {
        minLLRValue = Float.parseFloat(cmdLine.getValue(minLLROpt).toString());
    }
    log.info("Minimum LLR value: {}", minLLRValue);

    int reduceTasks = 1;
    if (cmdLine.hasOption(numReduceTasksOpt)) {
        reduceTasks = Integer.parseInt(cmdLine.getValue(numReduceTasksOpt).toString());
    }
    log.info("Number of reduce tasks: {}", reduceTasks);

    Class<? extends Analyzer> analyzerClass = DefaultAnalyzer.class;
    if (cmdLine.hasOption(analyzerNameOpt)) {
        String className = cmdLine.getValue(analyzerNameOpt).toString();
        analyzerClass = Class.forName(className).asSubclass(Analyzer.class);
        // try instantiating it, b/c there isn't any point in setting it
        // if
        // you can't instantiate it
        ClassUtils.instantiateAs(analyzerClass, Analyzer.class);
    }

    String type = null;
    String featureName = "";
    if (cmdLine.hasOption(typeNameOpt)) {
        type = cmdLine.getValue(typeNameOpt).toString();
        Object tempFN = cmdLine.getValue(featureNameOpt);
        if (tempFN != null) {
            featureName = tempFN.toString();
            log.info("Getting tokens from " + type + "." + featureName.toString());
        } else
            log.info("Getting tokens from " + type);
    }

    boolean processIdf;

    if (cmdLine.hasOption(weightOpt)) {
        String wString = cmdLine.getValue(weightOpt).toString();
        if ("tf".equalsIgnoreCase(wString)) {
            processIdf = false;
        } else if ("tfidf".equalsIgnoreCase(wString)) {
            processIdf = true;
        } else {
            throw new OptionException(weightOpt);
        }
    } else {
        processIdf = true;
    }

    int minDf = 1;
    if (cmdLine.hasOption(minDFOpt)) {
        minDf = Integer.parseInt(cmdLine.getValue(minDFOpt).toString());
    }
    int maxDFPercent = 99;
    if (cmdLine.hasOption(maxDFPercentOpt)) {
        maxDFPercent = Integer.parseInt(cmdLine.getValue(maxDFPercentOpt).toString());
    }
    double maxDFSigma = -1.0;
    if (cmdLine.hasOption(maxDFSigmaOpt)) {
        maxDFSigma = Double.parseDouble(cmdLine.getValue(maxDFSigmaOpt).toString());
    }

    float norm = PartialVectorMerger.NO_NORMALIZING;
    if (cmdLine.hasOption(powerOpt)) {
        String power = cmdLine.getValue(powerOpt).toString();
        if ("INF".equals(power)) {
            norm = Float.POSITIVE_INFINITY;
        } else {
            norm = Float.parseFloat(power);
        }
    }

    boolean logNormalize = false;
    if (cmdLine.hasOption(logNormalizeOpt)) {
        logNormalize = true;
    }

    String labelMDKey = null;
    if (cmdLine.hasOption(labelMDOpt)) {
        labelMDKey = cmdLine.getValue(labelMDOpt).toString();
    }

    Configuration conf = getConf();
    Path tokenizedPath = new Path(outputDir, DocumentProcessor.TOKENIZED_DOCUMENT_OUTPUT_FOLDER);

    // no annotation type degfin
    if (type != null) {
        BehemothDocumentProcessor.tokenizeDocuments(inputDir, type, featureName, tokenizedPath);
    }
    // no annotation type defined : rely on Lucene's analysers
    else {
        BehemothDocumentProcessor.tokenizeDocuments(inputDir, analyzerClass, tokenizedPath, conf);
    }
    boolean sequentialAccessOutput = false;
    if (cmdLine.hasOption(sequentialAccessVectorOpt)) {
        sequentialAccessOutput = true;
    }

    boolean namedVectors = false;
    if (cmdLine.hasOption(namedVectorOpt)) {
        namedVectors = true;
    }
    boolean shouldPrune = maxDFSigma >= 0.0;
    String tfDirName = shouldPrune ? DictionaryVectorizer.DOCUMENT_VECTOR_OUTPUT_FOLDER + "-toprune"
            : DictionaryVectorizer.DOCUMENT_VECTOR_OUTPUT_FOLDER;

    try {
        if (!processIdf) {
            DictionaryVectorizer.createTermFrequencyVectors(tokenizedPath, outputDir, tfDirName, conf,
                    minSupport, maxNGramSize, minLLRValue, norm, logNormalize, reduceTasks, chunkSize,
                    sequentialAccessOutput, namedVectors);
        } else {
            DictionaryVectorizer.createTermFrequencyVectors(tokenizedPath, outputDir, tfDirName, conf,
                    minSupport, maxNGramSize, minLLRValue, -1.0f, false, reduceTasks, chunkSize,
                    sequentialAccessOutput, namedVectors);
        }
        Pair<Long[], List<Path>> docFrequenciesFeatures = null;
        // Should document frequency features be processed
        if (shouldPrune || processIdf) {
            docFrequenciesFeatures = TFIDFConverter.calculateDF(new Path(outputDir, tfDirName), outputDir, conf,
                    chunkSize);
        }

        long maxDF = maxDFPercent; // if we are pruning by std dev, then
                                   // this will get changed
        if (shouldPrune) {
            Path dfDir = new Path(outputDir, TFIDFConverter.WORDCOUNT_OUTPUT_FOLDER);
            Path stdCalcDir = new Path(outputDir, HighDFWordsPruner.STD_CALC_DIR);

            // Calculate the standard deviation
            double stdDev = BasicStats.stdDevForGivenMean(dfDir, stdCalcDir, 0.0, conf);
            long vectorCount = docFrequenciesFeatures.getFirst()[1];
            maxDF = (int) (100.0 * maxDFSigma * stdDev / vectorCount);

            // Prune the term frequency vectors
            Path tfDir = new Path(outputDir, tfDirName);
            Path prunedTFDir = new Path(outputDir, DictionaryVectorizer.DOCUMENT_VECTOR_OUTPUT_FOLDER);
            Path prunedPartialTFDir = new Path(outputDir,
                    DictionaryVectorizer.DOCUMENT_VECTOR_OUTPUT_FOLDER + "-partial");
            if (processIdf) {
                HighDFWordsPruner.pruneVectors(tfDir, prunedTFDir, prunedPartialTFDir, maxDF, conf,
                        docFrequenciesFeatures, -1.0f, false, reduceTasks);
            } else {
                HighDFWordsPruner.pruneVectors(tfDir, prunedTFDir, prunedPartialTFDir, maxDF, conf,
                        docFrequenciesFeatures, norm, logNormalize, reduceTasks);
            }
            HadoopUtil.delete(new Configuration(conf), tfDir);
        }
        if (processIdf) {
            TFIDFConverter.processTfIdf(new Path(outputDir, DictionaryVectorizer.DOCUMENT_VECTOR_OUTPUT_FOLDER),
                    outputDir, conf, docFrequenciesFeatures, minDf, maxDF, norm, logNormalize,
                    sequentialAccessOutput, namedVectors, reduceTasks);
        }

        // dump labels?
        if (labelMDKey != null) {
            conf.set(BehemothDocumentProcessor.MD_LABEL, labelMDKey);
            BehemothDocumentProcessor.dumpLabels(inputDir, new Path(outputDir, "labels"), conf);
        }
    } catch (RuntimeException e) {
        Log.error("Exception caught", e);
        return -1;
    }

    return 0;
}

From source file:com.evolveum.midpoint.model.impl.controller.ModelController.java

@Override
public void importObjectsFromFile(File input, ImportOptionsType options, Task task,
        OperationResult parentResult) throws FileNotFoundException {
    OperationResult result = parentResult.createSubresult(IMPORT_OBJECTS_FROM_FILE);
    FileInputStream fis;/*from ww w  . j  a  v  a  2  s .c  o m*/
    try {
        fis = new FileInputStream(input);
    } catch (FileNotFoundException e) {
        String msg = "Error reading from file " + input + ": " + e.getMessage();
        result.recordFatalError(msg, e);
        throw e;
    }
    try {
        importObjectsFromStream(fis, options, task, parentResult);
    } catch (RuntimeException e) {
        result.recordFatalError(e);
        throw e;
    } finally {
        try {
            fis.close();
        } catch (IOException e) {
            Log.error("Error closing file " + input + ": " + e.getMessage(), e);
        }
    }
    result.computeStatus();
}

From source file:org.egov.eis.web.controller.reports.EmployeeAssignmentReportPDFController.java

@RequestMapping(value = "/reports/employeeassignments/pdf", method = RequestMethod.GET)
public @ResponseBody ResponseEntity<byte[]> generateEmployeeAssignmentsPDF(final HttpServletRequest request,
        @RequestParam("code") final String code, @RequestParam("name") final String name,
        @RequestParam("departmentId") final Long departmentId,
        @RequestParam("designationId") final Long designationId,
        @RequestParam("positionId") final Long positionId,
        @RequestParam("contentType") final String contentType, @RequestParam("date") final Date date,
        final HttpSession session, final Model model) throws DocumentException {
    final EmployeeAssignmentSearch employeeAssignmentSearch = new EmployeeAssignmentSearch();
    employeeAssignmentSearch.setEmployeeCode(code);
    employeeAssignmentSearch.setEmployeeName(name);
    employeeAssignmentSearch.setDepartment(departmentId);
    employeeAssignmentSearch.setDesignation(designationId);
    employeeAssignmentSearch.setPosition(positionId);
    employeeAssignmentSearch.setAssignmentDate(date);

    final List<Employee> employeeList = assignmentService.searchEmployeeAssignments(employeeAssignmentSearch);
    final StringBuilder searchCriteria = new StringBuilder();
    searchCriteria.append("Employee Assignment Report as on ");
    if (employeeAssignmentSearch.getAssignmentDate() != null)
        searchCriteria.append(DateUtils.getDefaultFormattedDate(employeeAssignmentSearch.getAssignmentDate()));
    if (StringUtils.isNotBlank(employeeAssignmentSearch.getEmployeeName()))
        searchCriteria.append(", Employee Name : ").append(employeeAssignmentSearch.getEmployeeName())
                .append("");
    if (StringUtils.isNotBlank(employeeAssignmentSearch.getEmployeeCode()))
        searchCriteria.append(", Employee Code : ").append(employeeAssignmentSearch.getEmployeeCode())
                .append(" ");
    if (employeeAssignmentSearch.getDepartment() != null) {
        final Department department = departmentService
                .getDepartmentById(employeeAssignmentSearch.getDepartment());
        searchCriteria.append(" for Department : ").append(department.getName()).append(" ");
    }/*w  w w  .ja  v a  2 s . c o  m*/
    if (employeeAssignmentSearch.getDesignation() != null) {
        final Designation designation = designationService
                .getDesignationById(employeeAssignmentSearch.getDesignation());
        searchCriteria.append(" and Designation : ").append(designation.getName()).append(" ");
    }
    if (employeeAssignmentSearch.getPosition() != null) {
        final Position position = positionMasterService.getPositionById(employeeAssignmentSearch.getPosition());
        searchCriteria.append(" and Position : ").append(position.getName()).append(" ");
    }

    String searchString = StringUtils.EMPTY;
    if (searchCriteria.toString().endsWith(" "))
        searchString = searchCriteria.substring(0, searchCriteria.length() - 1);

    final List<EmployeeAssignmentSearch> searchResult = new ArrayList<EmployeeAssignmentSearch>();
    Map<String, String> tempAssignments = null;
    EmployeeAssignmentSearch empAssignmentSearch = null;
    int maxTempAssignments = 0;
    for (final Employee employee : employeeList) {
        int index = 0;
        tempAssignments = new HashMap<String, String>();
        empAssignmentSearch = new EmployeeAssignmentSearch();
        empAssignmentSearch.setEmployeeCode(employee.getCode());
        empAssignmentSearch.setEmployeeName(employee.getName());
        for (final Assignment assignment : employee.getAssignments())
            if (assignment.getPrimary()) {
                empAssignmentSearch.setDepartmentName(assignment.getDepartment().getName());
                empAssignmentSearch.setDesignationName(assignment.getDesignation().getName());
                empAssignmentSearch.setPositionName(assignment.getPosition().getName());
                empAssignmentSearch.setDateRange(DateUtils.getDefaultFormattedDate(assignment.getFromDate())
                        + " - " + DateUtils.getDefaultFormattedDate(assignment.getToDate()));
            } else {
                tempAssignments.put("department_" + String.valueOf(index),
                        assignment.getDepartment().getName());
                tempAssignments.put("designation_" + String.valueOf(index),
                        assignment.getDesignation().getName());
                tempAssignments.put("position_" + String.valueOf(index), assignment.getPosition().getName());
                tempAssignments.put("daterange_" + String.valueOf(index),
                        DateUtils.getDefaultFormattedDate(assignment.getFromDate()) + " - "
                                + DateUtils.getDefaultFormattedDate(assignment.getToDate()));
                index++;
            }
        empAssignmentSearch.setTempPositionDetails(tempAssignments);
        searchResult.add(empAssignmentSearch);
        if (employee.getAssignments().size() >= maxTempAssignments)
            maxTempAssignments = employee.getAssignments().size();

    }
    JasperPrint jasperPrint;
    ByteArrayOutputStream outputBytes = null;
    try {
        jasperPrint = generateEmployeeAssignmentReport(searchResult, maxTempAssignments, searchString);
        outputBytes = new ByteArrayOutputStream(MB);
        JasperExportManager.exportReportToPdfStream(jasperPrint, outputBytes);
    } catch (final Exception e) {
        Log.error("Error while generating employee assignment report ", e);
    }
    final ReportOutput reportOutput = new ReportOutput();
    reportOutput.setReportOutputData(outputBytes.toByteArray());
    final HttpHeaders headers = new HttpHeaders();
    if (contentType.equalsIgnoreCase("pdf")) {
        reportOutput.setReportFormat(ReportFormat.PDF);
        reportOutput.setReportFormat(ReportFormat.PDF);
        headers.setContentType(MediaType.parseMediaType("application/pdf"));
        headers.add("content-disposition", "inline;filename=EmployeeAssignment.pdf");
    } else {
        reportOutput.setReportFormat(ReportFormat.XLS);
        headers.setContentType(MediaType.parseMediaType("application/vnd.ms-excel"));
        headers.add("content-disposition", "inline;filename=EmployeeAssignment.xls");
    }
    return new ResponseEntity<byte[]>(reportOutput.getReportOutputData(), headers, HttpStatus.CREATED);
}

From source file:org.intermine.web.struts.InitialiserPlugin.java

/**
 * Load keys that describe how objects should be uniquely identified
 *///  ww  w. j  ava 2s.  c  o  m
private BagQueryConfig loadBagQueries(ServletContext servletContext, ObjectStore os, Properties webProperties) {
    BagQueryConfig bagQueryConfig = null;
    InputStream is = servletContext.getResourceAsStream("/WEB-INF/bag-queries.xml");
    if (is != null) {
        try {
            bagQueryConfig = BagQueryHelper.readBagQueryConfig(os.getModel(), is);
        } catch (Exception e) {
            Log.error("Error loading class bag queries. ", e);
            blockingErrorKeys.put("errors.init.bagqueries", e.getMessage());
        }
        InputStream isBag = getClass().getClassLoader().getResourceAsStream("extraBag.properties");
        Properties bagProperties = new Properties();
        if (isBag != null) {
            try {
                bagProperties.load(isBag);
                bagQueryConfig.setConnectField(bagProperties.getProperty("extraBag.connectField"));
                bagQueryConfig.setExtraConstraintClassName(bagProperties.getProperty("extraBag.className"));
                bagQueryConfig.setConstrainField(bagProperties.getProperty("extraBag.constrainField"));
            } catch (IOException e) {
                Log.error("Error loading extraBag.properties. ", e);
                blockingErrorKeys.put("errors.init.extrabagloading", null);
            }
        } else {
            LOG.error("Could not find extraBag.properties file");
            blockingErrorKeys.put("errors.init.extrabag", null);
        }
    } else {
        // can used defaults so just log a warning
        LOG.warn("No custom bag queries found - using default query");
    }
    return bagQueryConfig;
}

From source file:org.jenkinsci.plugins.viewer.XPathConfig.java

/**
 * Returns the xml block from the given file using the given xpath expression.
 * //from ww w  .  ja v a  2 s. co  m
 * @return the xml block
 */
@SuppressWarnings("rawtypes")
public Element getXmlBlock(File xmlFile) {
    if (StringUtils.isEmpty(this.getXpath())) {
        return null;
    }

    try {
        Document dom = new SAXReader().read(xmlFile);

        List nodes = dom.selectNodes(this.xpath);

        if (nodes.size() > 0) {
            return (Element) nodes.get(0);
        }
    } catch (Exception e) {
        Log.error("Exception getting xml block from config.xml: ", e);
    }

    return null;
}