Example usage for java.io FileNotFoundException toString

List of usage examples for java.io FileNotFoundException toString

Introduction

In this page you can find the example usage for java.io FileNotFoundException toString.

Prototype

public String toString() 

Source Link

Document

Returns a short description of this throwable.

Usage

From source file:org.kuali.kfs.module.ld.batch.service.impl.LaborScrubberProcess.java

/**
 * This will process a group of origin entries. The COBOL code was refactored a lot to get this so there isn't a 1 to 1 section
 * of Cobol relating to this./*from  w w w  .  j ava2  s  . co m*/
 *
 * @param originEntryGroup Group to process
 */
protected void processGroup() {
    LaborOriginEntry lastEntry = null;
    scrubCostShareAmount = KualiDecimal.ZERO;
    unitOfWork = new UnitOfWorkInfo();
    FileReader INPUT_GLE_FILE = null;
    String GLEN_RECORD;
    BufferedReader INPUT_GLE_FILE_br;
    PrintStream OUTPUT_GLE_FILE_ps;
    PrintStream OUTPUT_ERR_FILE_ps;
    PrintStream OUTPUT_EXP_FILE_ps;
    try {
        INPUT_GLE_FILE = new FileReader(inputFile);
    } catch (FileNotFoundException e) {
        throw new RuntimeException("Unable to find input file: " + inputFile, e);
    }
    try {
        OUTPUT_GLE_FILE_ps = new PrintStream(validFile);
        OUTPUT_ERR_FILE_ps = new PrintStream(errorFile);
        OUTPUT_EXP_FILE_ps = new PrintStream(expiredFile);
    } catch (IOException e) {
        throw new RuntimeException("Problem opening output files", e);
    }

    INPUT_GLE_FILE_br = new BufferedReader(INPUT_GLE_FILE);
    LOG.debug("Starting Scrubber Process process group...");

    int lineNumber = 0;
    int loadedCount = 0;
    boolean errorsLoading = false;

    LedgerSummaryReport laborLedgerSummaryReport = new LedgerSummaryReport();
    LaborOriginEntry unscrubbedEntry = new LaborOriginEntry();
    List<Message> tmperrors = new ArrayList<Message>();
    try {
        String currentLine = INPUT_GLE_FILE_br.readLine();

        while (currentLine != null) {
            boolean saveErrorTransaction = false;
            boolean saveValidTransaction = false;
            LaborOriginEntry scrubbedEntry = new LaborOriginEntry();
            try {
                lineNumber++;

                if (!StringUtils.isEmpty(currentLine) && !StringUtils.isBlank(currentLine.trim())) {
                    unscrubbedEntry = new LaborOriginEntry();
                    tmperrors = unscrubbedEntry.setFromTextFileForBatch(currentLine, lineNumber);
                    loadedCount++;

                    // just test entry with the entry loaded above
                    scrubberReport.incrementUnscrubbedRecordsRead();
                    List<Message> transactionErrors = new ArrayList<Message>();

                    // This is done so if the code modifies this row, then saves it, it will be an insert,
                    // and it won't touch the original. The Scrubber never modifies input rows/groups.
                    unscrubbedEntry.setGroup(null);
                    unscrubbedEntry.setVersionNumber(null);
                    unscrubbedEntry.setEntryId(null);
                    saveErrorTransaction = false;
                    saveValidTransaction = false;

                    // Build a scrubbed entry
                    // Labor has more fields
                    buildScrubbedEntry(unscrubbedEntry, scrubbedEntry);

                    // For Labor Scrubber
                    boolean laborIndicator = true;
                    laborLedgerSummaryReport.summarizeEntry(unscrubbedEntry);

                    try {
                        tmperrors.addAll(scrubberValidator.validateTransaction(unscrubbedEntry, scrubbedEntry,
                                universityRunDate, laborIndicator, laborAccountingCycleCachingService));
                    } catch (Exception e) {
                        transactionErrors.add(
                                new Message(e.toString() + " occurred for this record.", Message.TYPE_FATAL));
                        saveValidTransaction = false;
                    }
                    transactionErrors.addAll(tmperrors);

                    // Expired account?
                    Account unscrubbedEntryAccount = laborAccountingCycleCachingService.getAccount(
                            unscrubbedEntry.getChartOfAccountsCode(), unscrubbedEntry.getAccountNumber());
                    if (ObjectUtils.isNotNull(unscrubbedEntry.getAccount())
                            && (scrubberValidator.isAccountExpired(unscrubbedEntryAccount, universityRunDate)
                                    || unscrubbedEntryAccount.isClosed())) {
                        // Make a copy of it so OJB doesn't just update the row in the original
                        // group. It needs to make a new one in the expired group
                        LaborOriginEntry expiredEntry = new LaborOriginEntry(scrubbedEntry);

                        createOutputEntry(expiredEntry, OUTPUT_EXP_FILE_ps);
                        scrubberReport.incrementExpiredAccountFound();
                    }

                    if (!isFatal(transactionErrors)) {
                        saveValidTransaction = true;

                        // See if unit of work has changed
                        if (!unitOfWork.isSameUnitOfWork(scrubbedEntry)) {
                            // Generate offset for last unit of work
                            unitOfWork = new UnitOfWorkInfo(scrubbedEntry);
                        }
                        KualiDecimal transactionAmount = scrubbedEntry.getTransactionLedgerEntryAmount();
                        ParameterEvaluator offsetFiscalPeriods = /*REFACTORME*/SpringContext
                                .getBean(ParameterEvaluatorService.class)
                                .getParameterEvaluator(ScrubberStep.class,
                                        GeneralLedgerConstants.GlScrubberGroupRules.OFFSET_FISCAL_PERIOD_CODES,
                                        scrubbedEntry.getUniversityFiscalPeriodCode());
                        BalanceType scrubbedEntryBalanceType = laborAccountingCycleCachingService
                                .getBalanceType(scrubbedEntry.getFinancialBalanceTypeCode());
                        if (scrubbedEntryBalanceType.isFinancialOffsetGenerationIndicator()
                                && offsetFiscalPeriods.evaluationSucceeds()) {
                            if (scrubbedEntry.isDebit()) {
                                unitOfWork.offsetAmount = unitOfWork.offsetAmount.add(transactionAmount);
                            } else {
                                unitOfWork.offsetAmount = unitOfWork.offsetAmount.subtract(transactionAmount);
                            }
                        }

                        // The sub account type code will only exist if there is a valid sub account
                        // TODO: GLConstants.getSpaceSubAccountTypeCode();
                        String subAccountTypeCode = "  ";

                        A21SubAccount scrubbedEntryA21SubAccount = laborAccountingCycleCachingService
                                .getA21SubAccount(scrubbedEntry.getChartOfAccountsCode(),
                                        scrubbedEntry.getAccountNumber(), scrubbedEntry.getSubAccountNumber());
                        if (ObjectUtils.isNotNull(scrubbedEntryA21SubAccount)) {
                            subAccountTypeCode = scrubbedEntryA21SubAccount.getSubAccountTypeCode();
                        }

                        if (transactionErrors.size() > 0) {
                            this.laborMainReportWriterService.writeError(unscrubbedEntry, transactionErrors);
                        }

                        lastEntry = scrubbedEntry;
                    } else {
                        // Error transaction
                        saveErrorTransaction = true;
                        this.laborMainReportWriterService.writeError(unscrubbedEntry, transactionErrors);
                    }

                    if (saveValidTransaction) {
                        scrubbedEntry.setTransactionScrubberOffsetGenerationIndicator(false);
                        createOutputEntry(scrubbedEntry, OUTPUT_GLE_FILE_ps);
                        scrubberReport.incrementScrubbedRecordWritten();
                    }

                    if (saveErrorTransaction) {
                        // Make a copy of it so OJB doesn't just update the row in the original
                        // group. It needs to make a new one in the error group
                        LaborOriginEntry errorEntry = new LaborOriginEntry(unscrubbedEntry);
                        errorEntry.setTransactionScrubberOffsetGenerationIndicator(false);
                        createOutputEntry(currentLine, OUTPUT_ERR_FILE_ps);
                        scrubberReport.incrementErrorRecordWritten();
                    }
                }
                currentLine = INPUT_GLE_FILE_br.readLine();

            } catch (IOException ioe) {
                // catch here again, it should be from postSingleEntryIntoLaborLedger
                LOG.error("processGroup() stopped due to: " + ioe.getMessage() + " on line number : "
                        + loadedCount, ioe);
                throw new RuntimeException("processGroup() stopped due to: " + ioe.getMessage()
                        + " on line number : " + loadedCount, ioe);
            }
        }
        INPUT_GLE_FILE_br.close();
        INPUT_GLE_FILE.close();
        OUTPUT_GLE_FILE_ps.close();
        OUTPUT_ERR_FILE_ps.close();
        OUTPUT_EXP_FILE_ps.close();

        this.laborMainReportWriterService.writeStatisticLine("UNSCRUBBED RECORDS READ              %,9d",
                scrubberReport.getNumberOfUnscrubbedRecordsRead());
        this.laborMainReportWriterService.writeStatisticLine("SCRUBBED RECORDS WRITTEN             %,9d",
                scrubberReport.getNumberOfScrubbedRecordsWritten());
        this.laborMainReportWriterService.writeStatisticLine("ERROR RECORDS WRITTEN                %,9d",
                scrubberReport.getNumberOfErrorRecordsWritten());
        this.laborMainReportWriterService.writeStatisticLine("TOTAL OUTPUT RECORDS WRITTEN         %,9d",
                scrubberReport.getTotalNumberOfRecordsWritten());
        this.laborMainReportWriterService.writeStatisticLine("EXPIRED ACCOUNTS FOUND               %,9d",
                scrubberReport.getNumberOfExpiredAccountsFound());

        laborLedgerSummaryReport.writeReport(this.laborLedgerReportWriterService);
    } catch (IOException ioe) {
        LOG.error("processGroup() stopped due to: " + ioe.getMessage(), ioe);
        throw new RuntimeException("processGroup() stopped due to: " + ioe.getMessage(), ioe);
    }
}

From source file:org.etudes.jforum.view.admin.ImportExportAction.java

/**
 * creates file from input path to output path
 * /*from  w  w  w  . j av a 2 s.com*/
 * @param inputpath
 *            - input path for file
 * @param outputpath
 *            - output path for file
 * @throws Exception
 */
public void createFile(String inputurl, String outputurl) throws Exception {
    FileInputStream in = null;
    FileOutputStream out = null;
    try {
        File inputFile = new File(inputurl);
        File outputFile = new File(outputurl);
        in = new FileInputStream(inputFile);
        out = new FileOutputStream(outputFile);
        int len;
        byte buf[] = new byte[102400];
        while ((len = in.read(buf)) > 0) {
            out.write(buf, 0, len);
        }
    } catch (FileNotFoundException e) {
        if (logger.isErrorEnabled())
            logger.error(e.toString());
    } catch (IOException e) {
        throw e;
    } finally {
        try {
            if (in != null)
                in.close();
        } catch (IOException e1) {
        }
        try {
            if (out != null)
                out.close();
        } catch (IOException e2) {
        }
    }
}

From source file:org.apache.hadoop.hive.ql.exec.DDLTask.java

/**
 * Write a list of indexes to a file./*  w  ww  .jav a  2  s. c  o m*/
 *
 * @param db
 *          The database in question.
 * @param showIndexes
 *          These are the indexes we're interested in.
 * @return Returns 0 when execution succeeds and above 0 if it fails.
 * @throws HiveException
 *           Throws this exception if an unexpected error occurs.
 */
private int showIndexes(Hive db, ShowIndexesDesc showIndexes) throws HiveException {
    // get the indexes for the table and populate the output
    String tableName = showIndexes.getTableName();
    Table tbl = null;
    List<Index> indexes = null;

    tbl = db.getTable(tableName);

    indexes = db.getIndexes(tbl.getDbName(), tbl.getTableName(), (short) -1);

    // In case the query is served by HiveServer2, don't pad it with spaces,
    // as HiveServer2 output is consumed by JDBC/ODBC clients.
    boolean isOutputPadded = !SessionState.get().isHiveServerQuery();

    // write the results in the file
    DataOutputStream outStream = getOutputStream(showIndexes.getResFile());
    try {
        if (showIndexes.isFormatted()) {
            // column headers
            outStream.write(MetaDataFormatUtils.getIndexColumnsHeader().getBytes(StandardCharsets.UTF_8));
            outStream.write(terminator);
            outStream.write(terminator);
        }

        for (Index index : indexes) {
            outStream.write(MetaDataFormatUtils.getIndexInformation(index, isOutputPadded)
                    .getBytes(StandardCharsets.UTF_8));
        }
    } catch (FileNotFoundException e) {
        LOG.info("show indexes: " + stringifyException(e));
        throw new HiveException(e.toString());
    } catch (IOException e) {
        LOG.info("show indexes: " + stringifyException(e));
        throw new HiveException(e.toString());
    } catch (Exception e) {
        throw new HiveException(e.toString());
    } finally {
        IOUtils.closeStream(outStream);
    }

    return 0;
}

From source file:org.apache.hadoop.hive.ql.exec.DDLTask.java

private int showLocksNewFormat(ShowLocksDesc showLocks, HiveLockManager lm) throws HiveException {

    DbLockManager lockMgr;/* ww  w . ja  v  a 2s.c  o m*/
    if (!(lm instanceof DbLockManager)) {
        throw new RuntimeException("New lock format only supported with db lock manager.");
    }
    lockMgr = (DbLockManager) lm;

    String dbName = showLocks.getDbName();
    String tblName = showLocks.getTableName();
    Map<String, String> partSpec = showLocks.getPartSpec();
    if (dbName == null && tblName != null) {
        dbName = SessionState.get().getCurrentDatabase();
    }

    ShowLocksRequest rqst = new ShowLocksRequest();
    rqst.setDbname(dbName);
    rqst.setTablename(tblName);
    if (partSpec != null) {
        List<String> keyList = new ArrayList<String>();
        List<String> valList = new ArrayList<String>();
        for (String partKey : partSpec.keySet()) {
            String partVal = partSpec.remove(partKey);
            keyList.add(partKey);
            valList.add(partVal);
        }
        String partName = FileUtils.makePartName(keyList, valList);
        rqst.setPartname(partName);
    }

    ShowLocksResponse rsp = lockMgr.getLocks(rqst);

    // write the results in the file
    DataOutputStream os = getOutputStream(showLocks.getResFile());
    try {
        dumpLockInfo(os, rsp);
    } catch (FileNotFoundException e) {
        LOG.warn("show function: " + stringifyException(e));
        return 1;
    } catch (IOException e) {
        LOG.warn("show function: " + stringifyException(e));
        return 1;
    } catch (Exception e) {
        throw new HiveException(e.toString());
    } finally {
        IOUtils.closeStream(os);
    }
    return 0;
}

From source file:org.apache.hadoop.hive.ql.exec.DDLTask.java

/**
 * Write a list of the current locks to a file.
 * @param db//from w w  w  .ja  v  a  2  s .  c  o  m
 *
 * @param showLocks
 *          the locks we're interested in.
 * @return Returns 0 when execution succeeds and above 0 if it fails.
 * @throws HiveException
 *           Throws this exception if an unexpected error occurs.
 */
private int showLocks(Hive db, ShowLocksDesc showLocks) throws HiveException {
    Context ctx = driverContext.getCtx();
    HiveTxnManager txnManager = ctx.getHiveTxnManager();
    HiveLockManager lockMgr = txnManager.getLockManager();

    if (txnManager.useNewShowLocksFormat())
        return showLocksNewFormat(showLocks, lockMgr);

    boolean isExt = showLocks.isExt();
    if (lockMgr == null) {
        throw new HiveException("show Locks LockManager not specified");
    }

    // write the results in the file
    DataOutputStream outStream = getOutputStream(showLocks.getResFile());
    try {
        List<HiveLock> locks = null;

        if (showLocks.getTableName() == null) {
            // TODO should be doing security check here.  Users should not be
            // able to see each other's locks.
            locks = lockMgr.getLocks(false, isExt);
        } else {
            locks = lockMgr.getLocks(
                    HiveLockObject.createFrom(db, showLocks.getTableName(), showLocks.getPartSpec()), true,
                    isExt);
        }

        Collections.sort(locks, new Comparator<HiveLock>() {

            @Override
            public int compare(HiveLock o1, HiveLock o2) {
                int cmp = o1.getHiveLockObject().getName().compareTo(o2.getHiveLockObject().getName());
                if (cmp == 0) {
                    if (o1.getHiveLockMode() == o2.getHiveLockMode()) {
                        return cmp;
                    }
                    // EXCLUSIVE locks occur before SHARED locks
                    if (o1.getHiveLockMode() == HiveLockMode.EXCLUSIVE) {
                        return -1;
                    }
                    return +1;
                }
                return cmp;
            }

        });

        Iterator<HiveLock> locksIter = locks.iterator();

        while (locksIter.hasNext()) {
            HiveLock lock = locksIter.next();
            outStream.writeBytes(lock.getHiveLockObject().getDisplayName());
            outStream.write(separator);
            outStream.writeBytes(lock.getHiveLockMode().toString());
            if (isExt) {
                HiveLockObjectData lockData = lock.getHiveLockObject().getData();
                if (lockData != null) {
                    outStream.write(terminator);
                    outStream.writeBytes("LOCK_QUERYID:" + lockData.getQueryId());
                    outStream.write(terminator);
                    outStream.writeBytes("LOCK_TIME:" + lockData.getLockTime());
                    outStream.write(terminator);
                    outStream.writeBytes("LOCK_MODE:" + lockData.getLockMode());
                    outStream.write(terminator);
                    outStream.writeBytes("LOCK_QUERYSTRING:" + lockData.getQueryStr());
                }
            }
            outStream.write(terminator);
        }
    } catch (FileNotFoundException e) {
        LOG.warn("show function: " + stringifyException(e));
        return 1;
    } catch (IOException e) {
        LOG.warn("show function: " + stringifyException(e));
        return 1;
    } catch (Exception e) {
        throw new HiveException(e.toString(), e);
    } finally {
        IOUtils.closeStream(outStream);
    }
    return 0;
}

From source file:com.sentaroh.android.SMBExplorer.SMBExplorerMain.java

private ProfileListAdapter createProfileList(boolean sdcard, String fp) {

    ProfileListAdapter pfl = null;/*from w w w  .  j  a  v  a2  s.  c  om*/
    BufferedReader br = null;

    sendDebugLogMsg(1, "I", "Create profilelist");

    List<ProfileListItem> lcl = createLocalProfileEntry();

    List<ProfileListItem> rem = new ArrayList<ProfileListItem>();

    boolean error = false;

    try {
        if (sdcard) {
            File sf = new File(fp);

            if (sf.exists()) {
                br = new BufferedReader(new FileReader(fp));
            } else {
                commonDlg.showCommonDialog(false, "E",
                        String.format(getString(R.string.msgs_local_file_list_create_nfound), fp), "", null);
                error = true;
            }

        } else {
            InputStream in = openFileInput(SMBEXPLORER_PROFILE_NAME);
            br = new BufferedReader(new InputStreamReader(in, "UTF-8"));
        }
        if (!error) {
            String pl;
            String[] alp;
            while ((pl = br.readLine()) != null) {
                alp = parseProfileString(pl);
                rem.add(new ProfileListItem(alp[0], alp[1], alp[2], alp[3], alp[4], alp[5], alp[6], alp[7],
                        false));
            }
            br.close();
        }
    } catch (FileNotFoundException e) {
        e.printStackTrace();
        sendDebugLogMsg(0, "E", e.toString());
        commonDlg.showCommonDialog(false, "E", getString(R.string.msgs_exception), e.toString(), null);
        error = true;
    } catch (IOException e) {
        e.printStackTrace();
        sendDebugLogMsg(0, "E", e.toString());
        commonDlg.showCommonDialog(false, "E", getString(R.string.msgs_exception), e.toString(), null);
        error = true;
    }
    Collections.sort(rem);
    lcl.addAll(rem);
    if (lcl.size() == 0)
        lcl.add(new ProfileListItem("", "No profiles", "I", "", "", "", "", "", false));
    // profileListView = (ListView)findViewById(android.R.id.list);
    pfl = new ProfileListAdapter(this, R.layout.profile_list_view_item, lcl);
    profileListView.setAdapter(pfl);
    pfl.setNotifyOnChange(true);

    return pfl;
}