Example usage for org.apache.commons.io FilenameUtils getFullPath

List of usage examples for org.apache.commons.io FilenameUtils getFullPath

Introduction

In this page you can find the example usage for org.apache.commons.io FilenameUtils getFullPath.

Prototype

public static String getFullPath(String filename) 

Source Link

Document

Gets the full path from a full filename, which is the prefix + path.

Usage

From source file:edu.cornell.med.icb.goby.modes.SplitTranscriptsMode.java

/**
 * Perform the split transcripts mode./*from w  w w  .  j  a va  2  s .  c  om*/
 *
 * @throws IOException error reading / writing
 */
@Override
public void execute() throws IOException {
    // Load the gene to transcripts file
    if (!config.validate()) {
        throw new IOException("Invalid SplitTranscripts configuration");
    }
    final GeneTranscriptRelationships gtr = new GeneTranscriptRelationships();
    final IndexedIdentifier transcriptIdents = new IndexedIdentifier();
    final Int2ObjectMap<MutableString> transcriptIndexToIdMap = new Int2ObjectOpenHashMap<MutableString>();
    final List<FastXEntry> fastxEntries = new LinkedList<FastXEntry>();
    //
    // Pass through the file once to collect the transcript - gene relationships
    //
    int entryCount = 0;
    try {
        for (final FastXEntry entry : new FastXReader(config.getInputFile())) {
            entryCount++;
            parseHeader(entry.getEntryHeader());
            final MutableString transcriptId = transcriptHeader.get("transcriptId");
            final MutableString geneId = transcriptHeader.get("geneId");

            final int transcriptIndex = transcriptIdents.registerIdentifier(transcriptId);
            gtr.addRelationship(geneId, transcriptIndex);

            transcriptIndexToIdMap.put(transcriptIndex, transcriptId);

            fastxEntries.add(entry.clone());
        }
    } catch (CloneNotSupportedException e) {
        LOG.error("Couldn't clone for some reason", e);
        throw new GobyRuntimeException("Couldn't clone for some reason", e);
    }

    LOG.info("Loading map of genes-transcripts complete.");

    //
    // Scan through the transcript-gene relationships to determine which
    // transcript id goes into which file
    //
    final Int2IntMap transcriptIndex2FileIndex = new Int2IntOpenHashMap();
    final String configOutputFilename = config.getOutputBase() + ".config";
    final String configOutputPath = FilenameUtils.getFullPath(configOutputFilename);
    if (StringUtils.isNotBlank(configOutputPath)) {
        LOG.info("Creating output directory: " + configOutputPath);
        FileUtils.forceMkdir(new File(configOutputPath));
    }

    PrintWriter configOutput = null;
    try {
        configOutput = new PrintWriter(configOutputFilename);
        configOutput.println("Ensembl Gene ID\tEnsembl Transcript ID");

        final Int2IntMap fileIndex2NumberOfEntries = new Int2IntOpenHashMap();
        fileIndex2NumberOfEntries.defaultReturnValue(0);
        transcriptIndex2FileIndex.defaultReturnValue(-1);

        final int initialNumberOfFiles = getNumberOfFiles(gtr, transcriptIndex2FileIndex);

        for (int geneIndex = 0; geneIndex < gtr.getNumberOfGenes(); geneIndex++) {
            final MutableString geneId = gtr.getGeneId(geneIndex);
            final IntSet transcriptIndices = gtr.getTranscriptSet(geneIndex);
            int fileNum = 0;

            for (final int transcriptIndex : transcriptIndices) {
                if (transcriptIndex2FileIndex.get(transcriptIndex) != -1) {
                    LOG.warn("Skipping repeated transcriptIndex: " + transcriptIndex);
                    continue;
                }
                final int maxEntriesPerFile = config.getMaxEntriesPerFile();
                final int numberOfEntriesInOriginalBucket = fileIndex2NumberOfEntries.get(fileNum);
                final int adjustedFileIndex = fileNum
                        + initialNumberOfFiles * (numberOfEntriesInOriginalBucket / maxEntriesPerFile);

                transcriptIndex2FileIndex.put(transcriptIndex, adjustedFileIndex);
                fileIndex2NumberOfEntries.put(fileNum, fileIndex2NumberOfEntries.get(fileNum) + 1);
                final MutableString transcriptId = transcriptIndexToIdMap.get(transcriptIndex);
                configOutput.printf("%s\t%s%n", geneId, transcriptId);

                fileNum++;
            }
        }
    } finally {
        IOUtils.closeQuietly(configOutput);
    }

    final int numFiles = getFileIndices(transcriptIndex2FileIndex).size();
    if (LOG.isInfoEnabled()) {
        LOG.info(NumberFormat.getInstance().format(entryCount) + " entries will be written to " + numFiles
                + " files");
        final int maxEntriesPerFile = config.getMaxEntriesPerFile();
        if (maxEntriesPerFile < Integer.MAX_VALUE) {
            LOG.info("Each file will contain at most " + maxEntriesPerFile + " entries");
        }
    }

    // formatter for uniquely numbering files each with the same number of digits
    final NumberFormat fileNumberFormatter = getNumberFormatter(numFiles - 1);

    final ProgressLogger progressLogger = new ProgressLogger();
    progressLogger.expectedUpdates = entryCount;
    progressLogger.itemsName = "entries";
    progressLogger.start();

    // Write each file one at a time rather than in the order they appear in the input file
    // to avoid the issue of having too many streams open at the same or continually opening
    // and closing streams which is quite costly.  We could store the gene/transcripts in
    // memory and then just write the files at the end but that could be worse.
    for (final int fileIndex : getFileIndices(transcriptIndex2FileIndex)) {
        final String filename = config.getOutputBase() + "." + fileNumberFormatter.format(fileIndex) + ".fa.gz";
        PrintStream printStream = null;
        try {
            // each file is compressed
            printStream = new PrintStream(new GZIPOutputStream(new FileOutputStream(filename)));

            //
            // Read through the input file get the actual sequence information
            //
            final Iterator<FastXEntry> entries = fastxEntries.iterator();
            while (entries.hasNext()) {
                final FastXEntry entry = entries.next();
                parseHeader(entry.getEntryHeader());
                final MutableString transcriptId = transcriptHeader.get("transcriptId");
                final MutableString geneId = transcriptHeader.get("geneId");
                final int transcriptIndex = transcriptIdents.getInt(transcriptId);
                final int transcriptFileIndex = transcriptIndex2FileIndex.get(transcriptIndex);
                if (transcriptFileIndex == fileIndex) {
                    printStream.print(entry.getHeaderSymbol());
                    printStream.print(transcriptId);
                    printStream.print(" gene:");
                    printStream.println(geneId);
                    printStream.println(entry.getEntrySansHeader());
                    entries.remove();
                    progressLogger.lightUpdate();
                }
            }
        } finally {
            IOUtils.closeQuietly(printStream);
        }
    }

    assert progressLogger.count == entryCount : "Some entries were not processed!";
    progressLogger.done();
}

From source file:MSUmpire.PSMDataStructure.LCMSID.java

private static LCMSID FS_Read(String filepath, String tag) throws Exception {
    if (!tag.equals("")) {
        tag = "_" + tag;
    }//  w  w w.j a v a  2s  .  c  om
    if (!new File(
            FilenameUtils.getFullPath(filepath) + FilenameUtils.getBaseName(filepath) + tag + "_LCMSID.serFS")
                    .exists()) {
        return null;
    }
    try {
        Logger.getRootLogger().info("Reading ID results from file:" + FilenameUtils.getFullPath(filepath)
                + FilenameUtils.getBaseName(filepath) + tag + "_LCMSID.serFS...");

        FileInputStream fileIn = new FileInputStream(FilenameUtils.getFullPath(filepath)
                + FilenameUtils.getBaseName(filepath) + tag + "_LCMSID.serFS");
        FSTObjectInput in = new FSTObjectInput(fileIn);
        LCMSID lcmsid = (LCMSID) in.readObject(LCMSID.class);
        in.close();
        fileIn.close();
        return lcmsid;

    } catch (Exception ex) {
        Logger.getRootLogger().info("Reading LCMSID FS results failed.");
        Logger.getRootLogger().error(ExceptionUtils.getStackTrace(ex));
        return null;
    }
}

From source file:MSUmpire.PSMDataStructure.LCMSID.java

private boolean FSWrite(String filepath, String tag) {
    try {//from   w  w w  . j  a v a2s .  c  o  m
        if (!tag.equals("")) {
            tag = "_" + tag;
        }
        Logger.getRootLogger().info("Writing ID results to file:" + FilenameUtils.getFullPath(filepath)
                + FilenameUtils.getBaseName(filepath) + tag + "_LCMSID.serFS...");
        FileOutputStream fout = new FileOutputStream(FilenameUtils.getFullPath(filepath)
                + FilenameUtils.getBaseName(filepath) + tag + "_LCMSID.serFS", false);
        FSTObjectOutput out = new FSTObjectOutput(fout);
        ReduceMemoryUsage();
        out.writeObject(this, LCMSID.class);
        out.close();
        fout.close();
    } catch (Exception ex) {
        Logger.getRootLogger().error(ExceptionUtils.getStackTrace(ex));
        return false;
    }
    return true;
}

From source file:au.com.redboxresearchdata.fascinator.harvester.MintJsonHarvester.java

private void appendToFullPathOfHarvestKeyValue(JsonObject harvest, String key, String appendage)
        throws HarvesterException {
    String currentValue = getHarvestKeyValue(harvest, key);
    // current rules config path may have been updated - ensure only the current path is used
    updateHarvestFileKeyValue(harvest, key, FilenameUtils.getFullPath(currentValue), appendage);
}

From source file:eu.mrbussy.pdfsplitter.Application.java

/**
 * Split the given PDF file into multiple files using pages.
 * //from   w  ww  .  ja va 2  s . c  o  m
 * @param filename
 *            - Name of the PDF to split
 * @param useSubFolder
 *            - Use a separate folder to place the files in.
 */
public static void SplitFile(File file, boolean useSubFolder) {
    PdfReader reader = null;
    String format = null;

    if (useSubFolder)
        format = "%1$s%2$s%4$s%2$s_%%03d.%3$s";
    else
        format = "%1$s%2$s_%%03d.%3$s";

    String splitFile = String.format(format, FilenameUtils.getFullPath(file.getAbsolutePath()),
            FilenameUtils.getBaseName(file.getAbsolutePath()),
            FilenameUtils.getExtension(file.getAbsolutePath()), IOUtils.DIR_SEPARATOR);

    try {
        reader = new PdfReader(new FileInputStream(file));

        if (reader.getNumberOfPages() > 0) {
            for (int pageNum = 1; pageNum <= reader.getNumberOfPages(); pageNum++) {
                System.out.println(String.format(splitFile, pageNum));
                String filename = String.format(splitFile, pageNum);
                Document document = new Document(reader.getPageSizeWithRotation(1));
                PdfCopy writer = new PdfCopy(document, new FileOutputStream(filename));
                document.open();
                // Copy the page from the original
                PdfImportedPage page = writer.getImportedPage(reader, pageNum);
                writer.addPage(page);
                document.close();
                writer.close();
            }
        }
    } catch (Exception ex) {
        // TODO Implement exception handling
        ex.printStackTrace(System.err);
    } finally {
        if (reader != null)
            // Always close the stream
            reader.close();
    }
}

From source file:com.egreen.tesla.server.api.component.Component.java

/**
 *
 * Copy Components .html,.css,.js files to Folder
 *
 * @param inputStream// ww w .  j a va2s .  co  m
 * @param fileName
 * @throws IOException
 */
private void saveEntry(final InputStream inputStream, String fileName) throws IOException {
    // InputStream inputStream = null;
    OutputStream outputStream = null;

    try {
        final String FileName = component_base + "/" + fileName;
        if (FilenameUtils.getExtension(FileName) != null && !FilenameUtils.getExtension(FileName).isEmpty()) {

            File fileDir = new File(FilenameUtils.getFullPath(FileName));
            fileDir.mkdirs();
            final File file1 = new File(FileName);
            file1.createNewFile();
            // write the inputStream to a FileOutputStream
            outputStream = new FileOutputStream(file1);

            int read = 0;
            byte[] bytes = new byte[1024];

            while ((read = inputStream.read(bytes)) != -1) {
                outputStream.write(bytes, 0, read);
            }

        }
    } catch (IOException e) {
        throw e;
    } finally {
        if (inputStream != null) {
            try {
                inputStream.close();
            } catch (IOException e) {
                throw e;
            }
        }
        if (outputStream != null) {
            try {
                // outputStream.flush();
                outputStream.close();
            } catch (IOException e) {
                throw e;
            }

        }
    }

}

From source file:it.drwolf.ridire.session.async.Mapper.java

private void createArchivedResource(File f, CrawledResource cr, EntityManager entityManager) {
    // System.out.println(System.getProperty("java.io.tmpdir"));
    String posEnabled = this.em.find(Parameter.class, Parameter.POS_ENABLED.getKey()).getValue();
    File resourceDir;/*from   ww w . ja  v a2s . c o  m*/
    int status = Parameter.FINISHED;
    try {
        resourceDir = new File(FilenameUtils.getFullPath(f.getCanonicalPath().replaceAll("__\\d+", ""))
                + JobMapperMonitor.RESOURCESDIR);
        if (!resourceDir.exists()) {
            FileUtils.forceMkdir(resourceDir);
        }
        ArchiveReader reader = ArchiveReaderFactory.get(f);
        ARCRecord record = (ARCRecord) reader.get(cr.getOffset());
        record.skipHttpHeader();
        byte[] buf = new byte[Mapper.BUFLENGTH];
        int count = 0;
        String resourceFile = cr.getDigest() + ".gz";
        GZIPOutputStream baos = new GZIPOutputStream(new FileOutputStream(new File(resourceDir, resourceFile)));
        while ((count = record.read(buf)) != -1) {
            baos.write(buf, 0, count);
        }
        baos.finish();
        baos.close();
        reader.close();
        // long t1 = System.currentTimeMillis();
        StringWithEncoding cleanText = this.createPlainTextResource(f, cr, entityManager);
        this.removeGZippedResource(resourceDir, resourceFile);
        // long t2 = System.currentTimeMillis();
        // System.out.println("Creazione plain text: " + (t2 - t1));
        String plainTextFileName = cr.getDigest() + ".txt";
        if (cleanText != null && cleanText.getString() != null && cleanText.getString().trim().length() > 0
                && cleanText.getCleaner() != null && (cleanText.getCleaner().equals(Mapper.ALCHEMY)
                        || cleanText.getCleaner().equals(Mapper.READABILITY))) {
            cr.setCleaner(cleanText.getCleaner());
            File plainTextFile = new File(resourceDir, plainTextFileName);
            FileUtils.writeStringToFile(plainTextFile, cleanText.getString(), cleanText.getEncoding());
            cr.setExtractedTextHash(MD5DigestCreator.getMD5Digest(plainTextFile));
            // language detection
            // t1 = System.currentTimeMillis();
            String language = this.detectLanguage(cleanText.getString());
            // t2 = System.currentTimeMillis();
            // System.out.println("Language detection: " + (t2 - t1));
            cr.setLanguage(language);
            if (language != null && language.equalsIgnoreCase(Mapper.ITALIAN) && posEnabled != null
                    && posEnabled.equalsIgnoreCase("true")) {
                // PoS tag if it's an italian text
                // t1 = System.currentTimeMillis();
                String posTagResourceFileName = this.createPoSTagResource(plainTextFile, entityManager,
                        cleanText.getEncoding());
                // t2 = System.currentTimeMillis();
                // System.out.println("PoS tagging: " + (t2 - t1));
                if (posTagResourceFileName != null) {
                    Integer wordsNumber = Mapper.countWordsFromPoSTagResource(posTagResourceFileName);
                    cr.setWordsNumber(wordsNumber);
                }
            }
        }
    } catch (Exception e) {
        status = Parameter.PROCESSING_ERROR;
        e.printStackTrace();
    }
    cr.setProcessed(status);
}

From source file:MSUmpire.DIA.TargetMatchScoring.java

private void ExportAlignmentResult() throws IOException {
    try {//from   w  ww.j  a v  a 2s . c om
        FileWriter writer = new FileWriter(FilenameUtils.getFullPath(Filename) + "/"
                + FilenameUtils.getBaseName(Filename) + "_" + LibID + "_TargetReExtraction.xls");
        writer.write(
                "Modseq\tPredictRT\tPrecursorMz\tType\tClusterRT\tMS level\tApexDeltaScore\tCorrScore\tSpecDotProduct\tSpecCorrelation\tSpecConstrastAngle\tPPMScore\tRTOverlap\tIntScore\tMS1Corr\tNoB\tNoY\tRTdiff\tPrcursorPPM\tSumCorrScore\tSumCorrPPMScore\tPrecursorIsoPattern\tPrecursorCentralRank\tRank\tU-Score\tProbability\n");
        for (UmpireSpecLibMatch match : libTargetMatches) {
            int Rank = 1;
            for (PeakGroupScore target : match.TargetHits) {
                writer.write(match.pepIonID.ModSequence + "\t" + match.pepIonID.PredictRTString() + "\t"
                        + match.pepIonID.NeutralPrecursorMz() + "\tTarget\t" + target.PrecursorRT + "\t"
                        + target.MSlevel + "\t" + target.ApexDeltaScore + "\t" + target.AveCorrScore + "\t"
                        + target.SpecDotProduct + "\t" + target.SpecCorrelation + "\t" + target.ContrastAngle
                        + "\t" + target.PPMScore + "\t" + target.RTOverlapScore + "\t" + target.FragIntAvgScore
                        + "\t" + target.PrecursorCorr + "\t" + target.NoMatchB + "\t" + target.NoMatchY + "\t"
                        + target.RTDiff + "\t" + target.PrecursorPPM + "\t" + target.SumCorrScore + "\t"
                        + target.SumCorrPPMScore + "\t" + target.PrecursorIsoPattern + "\t"
                        + target.PrecursorCentralRank + "\t" + (Rank++) + "\t" + target.UmpireScore + "\t"
                        + target.MixtureModelLocalProb + "\n");
            }
            Rank = 1;
            for (PeakGroupScore target : match.DecoyHits) {
                writer.write(match.pepIonID.ModSequence + "\t" + match.pepIonID.PredictRTString() + "\t"
                        + match.pepIonID.NeutralPrecursorMz() + "\tTarget_Decoy\t" + target.PrecursorRT + "\t"
                        + target.MSlevel + "\t" + target.ApexDeltaScore + "\t" + target.AveCorrScore + "\t"
                        + target.SpecDotProduct + "\t" + target.SpecCorrelation + "\t" + target.ContrastAngle
                        + "\t" + target.PPMScore + "\t" + target.RTOverlapScore + "\t" + target.FragIntAvgScore
                        + "\t" + target.PrecursorCorr + "\t" + target.NoMatchB + "\t" + target.NoMatchY + "\t"
                        + target.RTDiff + "\t" + target.PrecursorPPM + "\t" + target.SumCorrScore + "\t"
                        + target.SumCorrPPMScore + "\t" + target.PrecursorIsoPattern + "\t"
                        + target.PrecursorCentralRank + "\t" + (Rank++) + "\t" + target.UmpireScore + "\t"
                        + target.MixtureModelLocalProb + "\n");
            }
        }
        for (UmpireSpecLibMatch match : libIDMatches) {
            int Rank = 1;
            for (PeakGroupScore target : match.TargetHits) {
                writer.write(match.pepIonID.ModSequence + "\t" + match.pepIonID.PredictRTString() + "\t"
                        + match.pepIonID.NeutralPrecursorMz() + "\tID\t" + target.PrecursorRT + "\t"
                        + target.MSlevel + "\t" + target.ApexDeltaScore + "\t" + target.AveCorrScore + "\t"
                        + target.SpecDotProduct + "\t" + target.SpecCorrelation + "\t" + target.ContrastAngle
                        + "\t" + target.PPMScore + "\t" + target.RTOverlapScore + "\t" + target.FragIntAvgScore
                        + "\t" + target.PrecursorCorr + "\t" + target.NoMatchB + "\t" + target.NoMatchY + "\t"
                        + target.RTDiff + "\t" + target.PrecursorPPM + "\t" + target.SumCorrScore + "\t"
                        + target.SumCorrPPMScore + "\t" + target.PrecursorIsoPattern + "\t"
                        + target.PrecursorCentralRank + "\t" + (Rank++) + "\t" + target.UmpireScore + "\t"
                        + target.MixtureModelLocalProb + "\n");
            }
            for (PeakGroupScore target : match.DecoyHits) {
                writer.write(match.pepIonID.ModSequence + "\t" + match.pepIonID.PredictRTString() + "\t"
                        + match.pepIonID.NeutralPrecursorMz() + "\tID_Decoy\t" + target.PrecursorRT + "\t"
                        + target.MSlevel + "\t" + target.ApexDeltaScore + "\t" + target.AveCorrScore + "\t"
                        + target.SpecDotProduct + "\t" + target.SpecCorrelation + "\t" + target.ContrastAngle
                        + "\t" + target.PPMScore + "\t" + target.RTOverlapScore + "\t" + target.FragIntAvgScore
                        + "\t" + target.PrecursorCorr + "\t" + target.NoMatchB + "\t" + target.NoMatchY + "\t"
                        + target.RTDiff + "\t" + target.PrecursorPPM + "\t" + target.SumCorrScore + "\t"
                        + target.SumCorrPPMScore + "\t" + target.PrecursorIsoPattern + "\t"
                        + target.PrecursorCentralRank + "\t" + (Rank++) + "\t" + target.UmpireScore + "\t"
                        + target.MixtureModelLocalProb + "\n");
            }
        }
        writer.close();
    } catch (Exception e) {
        Logger.getRootLogger().error(ExceptionUtils.getStackTrace(e));
    }
}

From source file:net.bpelunit.framework.control.deploy.ode.ODEDeployer.java

private String getArchiveLocation(String pathToTest) {
    String pathToArchive = FilenameUtils.concat(pathToTest, FilenameUtils.getFullPath(fArchive));
    String archiveName = FilenameUtils.getName(fArchive);
    return FilenameUtils.concat(pathToArchive, archiveName);
}

From source file:gov.ca.cwds.rest.util.jni.CmsPKCompressor.java

private File createFile(String file) {
    return new File(FilenameUtils.getFullPath(file), FilenameUtils.getName(file)); // NOSONAR
}