Example usage for org.apache.commons.io FilenameUtils separatorsToUnix

List of usage examples for org.apache.commons.io FilenameUtils separatorsToUnix

Introduction

In this page you can find the example usage for org.apache.commons.io FilenameUtils separatorsToUnix.

Prototype

public static String separatorsToUnix(String path) 

Source Link

Document

Converts all separators to the Unix separator of forward slash.

Usage

From source file:pt.webdetails.cpf.repository.pentaho.PentahoLegacySolutionAccess.java

private IBasicFile asBasicFile(final ISolutionFile file) {
    return new IBasicFile() {

        public InputStream getContents() throws IOException {
            return new ByteArrayInputStream(file.getData());
        }//from w  w w. j  a v  a 2  s  .  c  o  m

        public String getExtension() {
            return RepositoryHelper.getExtension(file.getFileName());
        }

        public String getFullPath() {
            return FilenameUtils.separatorsToUnix(file.getFullPath());
        }

        public String getName() {
            return file.getFileName();
        }

        public String getPath() {
            return RepositoryHelper.relativizePath(basePath,
                    RepositoryHelper.appendPath(getSolutionPath(file), file.getFileName()), true);
        }

        public boolean isDirectory() {
            return file.isDirectory();
        }

        private String getSolutionPath(ISolutionFile file) {
            if (file.isRoot()) {
                return "/";
            }
            String path = FilenameUtils.separatorsToUnix(file.getSolutionPath());
            return path;
        }

        public String toString() {
            return getFullPath();
        }
    };
}

From source file:pt.webdetails.cpf.repository.pentaho.PentahoLegacySolutionAccess.java

@Override
public boolean hasAccess(String filePath, FileAccess access) {
    filePath = getPath(filePath);/*from  ww  w  .  j  a v a 2s.  c  o m*/
    ISolutionFile file = getRepository().getSolutionFile(filePath, toResourceAction(access));
    if (file == null) {
        return false;
    } else if (SecurityHelper.canHaveACLS(file)
            && (file.retrieveParent() != null && !StringUtils.startsWith(file.getSolutionPath(), "system"))) {
        // has been checked
        return true;
    } else {
        if (!SecurityHelper.canHaveACLS(file)) {
            logger.warn("hasAccess: " + file.getExtension() + " extension not in acl-files.");
            // not declared in pentaho.xml:/pentaho-system/acl-files
            // try parent: folders have acl enabled unless in system
            ISolutionFile parent = file.retrieveParent();
            if (parent instanceof IAclSolutionFile) {
                return SecurityHelper.hasAccess((IAclSolutionFile) parent, toResourceAction(access),
                        userSession);
            }
        }
        logger.warn("hasAccess: Unable to check access control for " + filePath
                + " using default access settings.");
        if (StringUtils.startsWith(FilenameUtils.separatorsToUnix(file.getSolutionPath()), "system/")
                && !isAcceptedPluginFile(filePath, userSession)) {
            return SecurityHelper.isPentahoAdministrator(userSession);
        }
        switch (access) {
        case EXECUTE:
        case READ:
            return true;
        default:
            return SecurityHelper.isPentahoAdministrator(userSession);
        }
    }
}

From source file:pt.webdetails.cpf.repository.pentaho.SystemPluginResourceAccess.java

@Override
protected File getFile(String path) {
    if (path != null && path.startsWith("/system/")) { //XXX - review ...
        String[] sections = path.split("/");
        String sysPluginDir = sections[1] + "/" + sections[2];
        String baseString = FilenameUtils.separatorsToUnix(basePath.toString());
        if (baseString.indexOf(sysPluginDir) != -1
                && (baseString.lastIndexOf(sysPluginDir) + sysPluginDir.length() == baseString.length())) {
            path = path.replaceFirst("/.*?/.*?/", "/");
        } else if (baseString.indexOf(sysPluginDir) == -1) {
            String systemPath = StringUtils.substringBeforeLast(basePath.getAbsolutePath(), "system");
            systemPath = systemPath + sysPluginDir;
            path = path.replaceFirst("/.*?/.*?/", "/");
            return new File(systemPath, path);
        }// w  w w.jav  a  2  s . c  o m
    }
    return StringUtils.isEmpty(path) ? basePath : new File(basePath, path);
}

From source file:pt.webdetails.cpf.repository.pentaho.unified.UnifiedRepositoryAccess.java

protected IBasicFile asBasicFile(final RepositoryFile file, final String path) {
    final String relativePath = (path == null) ? relativizePath(file.getPath()) : path;
    return new IBasicFile() {

        public InputStream getContents() {
            try {
                return UnifiedRepositoryAccess.this.getFileInputStream(relativePath);
            } catch (IOException e) {
                return null;
            }/*from w  w  w  .  j  av  a 2 s . c  om*/
        }

        public String getName() {
            return file.getName();
        }

        public String getFullPath() {
            return FilenameUtils.separatorsToUnix(file.getPath());
        }

        public String getPath() {
            return relativePath;
        }

        public String getExtension() {
            return RepositoryHelper.getExtension(getName());
        }

        public boolean isDirectory() {
            return file.isFolder();
        }

    };
}

From source file:pt.webdetails.cpf.Util.java

public static String joinPath(String... paths) {
    List<String> normalizedPaths = new LinkedList<String>();
    for (String path : paths) {
        normalizedPaths.add(FilenameUtils.separatorsToUnix(path));
    }/*  w  w w  .  j a  v  a 2  s  . co m*/

    return RepositoryHelper.joinPaths(normalizedPaths);
}

From source file:pt.webdetails.cpk.elements.impl.DashboardElement.java

protected void callCDE(Map<String, Map<String, Object>> bloatedMap)
        throws UnsupportedEncodingException, IOException {

    //String path =  pluginUtils.getPluginRelativeDirectory( element.getLocation(), true );
    String path = CpkEngine.getInstance().getEnvironment().getPluginUtils()
            .getPluginRelativeDirectory(this.getLocation(), true);

    //ServletRequest wrapper = (HttpServletRequest) bloatedMap.get( "path" ).get( "httprequest" );
    HttpServletResponse response = (HttpServletResponse) bloatedMap.get("path").get("httpresponse");
    response.setContentType(MimeTypes.HTML);
    OutputStream out = response.getOutputStream();

    //String root = wrapper.getScheme() + "://" + wrapper.getServerName() + ":" + wrapper.getServerPort();

    Map<String, Object> params = new HashMap<String, Object>();
    Map<String, Object> requestParams = bloatedMap.get("request");

    path = FilenameUtils.separatorsToUnix(path);

    params.put("solution", "system");
    params.put("path", path);
    if (requestParams.containsKey("mode") && requestParams.get("mode").equals("preview")) {
        params.put("file", this.getName() + "_tmp.cdfde");
    } else {/*from  ww  w .j  a v  a2 s  .com*/
        params.put("file", this.getName() + ".wcdf");
    }
    params.put("absolute", "false");
    params.put("inferScheme", "false");
    //params.put( "root", root );
    //PluginUtils.copyParametersFromProvider( params, requestParams );
    Iterator<String> it = requestParams.keySet().iterator();
    while (it.hasNext()) {
        String name = it.next();
        params.put(name, requestParams.get(name));
    }

    if (requestParams.containsKey("mode") && requestParams.get("mode").equals("edit")) {
        redirectToCdeEditor(response, params);
        return;
    }
    try {
        InterPluginBroker.run(params, out);
    } catch (Exception e) {
        e.printStackTrace(); //To change body of catch statement use File | Settings | File Templates.
    }
}

From source file:se.nbis.sftpsquid.SftpSquid.java

/**
 * Remove superflous parts of the path, such as double /
 *
 * @param String path to normalize/*from   w ww . j  a  v a  2 s  . c  o m*/
 * @return String
 */
private String normalizePath(String path) {
    String normalized = FilenameUtils.normalize(path);
    return FilenameUtils.separatorsToUnix(normalized); // In case we run on windows
}

From source file:snake.server.PathUtils.java

public static ArrayList<PathDescriptor> getRelativePathDescriptors(File folder, boolean useSystemCreationTime) {
    ArrayList<PathDescriptor> descriptors = new ArrayList<PathDescriptor>();
    Iterator<File> filesIter = FileUtils.iterateFiles(folder, TrueFileFilter.INSTANCE, TrueFileFilter.INSTANCE);
    while (filesIter.hasNext()) {
        Path folderPath = folder.toPath();
        File file = filesIter.next();
        Path filePath = file.toPath().toAbsolutePath();
        Path relativePath = folderPath.relativize(filePath);

        PathDescriptor descriptor = new PathDescriptor();
        descriptor.relative_path = FilenameUtils.separatorsToUnix(relativePath.toString());
        descriptor.status = PathDescriptor.Status.Modified;
        descriptor.statusTimePoint = new Date();
        descriptor.lastModified = new Date(file.lastModified());
        if (useSystemCreationTime) {
            BasicFileAttributes attributes = null;
            try {
                attributes = Files.readAttributes(filePath, BasicFileAttributes.class);
                descriptor.lastCreationTime = new Date(attributes.creationTime().toMillis());
            } catch (Exception e) {
                descriptor.lastCreationTime = descriptor.statusTimePoint;
            }/*from w ww  .  ja  v  a  2s  . co  m*/
        } else {
            descriptor.lastCreationTime = descriptor.statusTimePoint;
        }
        descriptors.add(descriptor);
    }
    return descriptors;
}

From source file:uk.bl.wa.indexer.WARCIndexer.java

/**
 * This extracts metadata from the ArchiveRecord and creates a suitable SolrRecord.
 * Removes the text field if flag set.//from  ww w .  j  a v a2 s.c o  m
 * 
 * @param archiveName
 * @param record
 * @param isTextIncluded
 * @return
 * @throws IOException
 */
public SolrRecord extract(String archiveName, ArchiveRecord record, boolean isTextIncluded) throws IOException {
    final long start = System.nanoTime();
    ArchiveRecordHeader header = record.getHeader();
    SolrRecord solr = solrFactory.createRecord(archiveName, header);

    if (!header.getHeaderFields().isEmpty()) {
        if (header.getHeaderFieldKeys().contains(HEADER_KEY_TYPE)) {
            log.debug("Looking at " + header.getHeaderValue(HEADER_KEY_TYPE));

            if (!checkRecordType((String) header.getHeaderValue(HEADER_KEY_TYPE))) {
                return null;
            }
            // Store WARC record type:
            solr.setField(SolrFields.SOLR_RECORD_TYPE, (String) header.getHeaderValue(HEADER_KEY_TYPE));

            //Store WARC-Record-ID
            solr.setField(SolrFields.WARC_KEY_ID, (String) header.getHeaderValue(HEADER_KEY_ID));
            solr.setField(SolrFields.WARC_IP, (String) header.getHeaderValue(HEADER_KEY_IP));

        } else {
            // else we're processing ARCs so nothing to filter and no
            // revisits
            solr.setField(SolrFields.SOLR_RECORD_TYPE, "arc");
        }

        if (header.getUrl() == null)
            return null;

        // Get the URL:
        String targetUrl = Normalisation.sanitiseWARCHeaderValue(header.getUrl());

        // Strip down very long URLs to avoid
        // "org.apache.commons.httpclient.URIException: Created (escaped)
        // uuri > 2083"
        // Trac #2271: replace string-splitting with URI-based methods.
        if (targetUrl.length() > 2000)
            targetUrl = targetUrl.substring(0, 2000);

        log.debug(
                "Current heap usage: " + FileUtils.byteCountToDisplaySize(Runtime.getRuntime().totalMemory()));
        log.debug("Processing " + targetUrl + " from " + archiveName);

        // Check the filters:
        if (this.checkProtocol(targetUrl) == false)
            return null;
        if (this.checkUrl(targetUrl) == false)
            return null;
        if (this.checkExclusionFilter(targetUrl) == false)
            return null;

        // -----------------------------------------------------
        // Add user supplied Archive-It Solr fields and values:
        // -----------------------------------------------------
        solr.setField(SolrFields.INSTITUTION, WARCIndexerCommand.institution);
        solr.setField(SolrFields.COLLECTION, WARCIndexerCommand.collection);
        solr.setField(SolrFields.COLLECTION_ID, WARCIndexerCommand.collection_id);

        // --- Basic headers ---

        // Basic metadata:
        solr.setField(SolrFields.SOURCE_FILE, archiveName);
        solr.setField(SolrFields.SOURCE_FILE_OFFSET, "" + header.getOffset());
        String filePath = header.getReaderIdentifier();//Full path of file                        

        //Will convert windows path to linux path. Linux paths will not be modified.
        String linuxFilePath = FilenameUtils.separatorsToUnix(filePath);
        solr.setField(SolrFields.SOURCE_FILE_PATH, linuxFilePath);

        byte[] url_md5digest = md5
                .digest(Normalisation.sanitiseWARCHeaderValue(header.getUrl()).getBytes("UTF-8"));
        // String url_base64 =
        // Base64.encodeBase64String(fullUrl.getBytes("UTF-8"));
        String url_md5hex = Base64.encodeBase64String(url_md5digest);
        solr.setField(SolrFields.SOLR_URL, Normalisation.sanitiseWARCHeaderValue(header.getUrl()));
        if (addNormalisedURL) {
            solr.setField(SolrFields.SOLR_URL_NORMALISED, Normalisation.canonicaliseURL(targetUrl));
        }

        // Get the length, but beware, this value also includes the HTTP headers (i.e. it is the payload_length):
        long content_length = header.getLength();

        // Also pull out the file extension, if any:
        String resourceName = parseResourceName(targetUrl);
        solr.addField(SolrFields.RESOURCE_NAME, resourceName);
        solr.addField(SolrFields.CONTENT_TYPE_EXT, parseExtension(resourceName));

        // Add URL-based fields:
        URI saneURI = parseURL(solr, targetUrl);

        // Prepare crawl date information:
        String waybackDate = (header.getDate().replaceAll("[^0-9]", ""));
        Date crawlDate = getWaybackDate(waybackDate);

        // Store the dates:
        solr.setField(SolrFields.CRAWL_DATE, formatter.format(crawlDate));
        solr.setField(SolrFields.CRAWL_YEAR, getYearFromDate(crawlDate));

        // Use the current value as the waybackDate:
        solr.setField(SolrFields.WAYBACK_DATE, waybackDate);

        Instrument.timeRel("WARCIndexer.extract#total", "WARCIndexer.extract#archeaders", start);

        // -----------------------------------------------------
        // Now consume record and HTTP headers (only)
        // -----------------------------------------------------

        InputStream tikainput = null;

        // Only parse HTTP headers for HTTP URIs
        if (targetUrl.startsWith("http")) {
            // Parse HTTP headers:
            String statusCode = null;
            if (record instanceof WARCRecord) {
                statusCode = this.processWARCHeaders(record, header, targetUrl, solr);
                tikainput = record;
            } else if (record instanceof ARCRecord) {
                ARCRecord arcr = (ARCRecord) record;
                statusCode = "" + arcr.getStatusCode();
                this.processHeaders(solr, statusCode, arcr.getHttpHeaders(), targetUrl);
                arcr.skipHttpHeader();
                tikainput = arcr;
            } else {
                log.error("FAIL! Unsupported archive record type.");
                return solr;
            }

            solr.setField(SolrFields.SOLR_STATUS_CODE, statusCode);

            // Skip recording non-content URLs (i.e. 2xx responses only please):
            if (!checkResponseCode(statusCode)) {
                log.debug("Skipping this record based on status code " + statusCode + ": " + targetUrl);
                return null;
            }
        } else {
            log.info("Skipping header parsing as URL does not start with 'http'");
        }

        // -----------------------------------------------------
        // Headers have been processed, payload ready to cache:
        // -----------------------------------------------------

        // Update the content_length based on what's available:
        content_length = tikainput.available();

        // Record the length:
        solr.setField(SolrFields.CONTENT_LENGTH, "" + content_length);

        // Create an appropriately cached version of the payload, to allow analysis.
        final long hashStreamStart = System.nanoTime();
        HashedCachedInputStream hcis = new HashedCachedInputStream(header, tikainput, content_length);
        tikainput = hcis.getInputStream();
        String hash = hcis.getHash();
        Instrument.timeRel("WARCIndexer.extract#total", "WARCIndexer.extract#hashstreamwrap", hashStreamStart);

        // Use an ID that ensures every URL+timestamp gets a separate
        // record:
        String id = waybackDate + "/" + url_md5hex;

        // Set these last:
        solr.setField(SolrFields.ID, id);
        solr.setField(SolrFields.HASH, hash);

        // -----------------------------------------------------
        // Apply any annotations:
        // -----------------------------------------------------
        if (ant != null) {
            try {
                ant.applyAnnotations(saneURI, solr.getSolrDocument());
            } catch (URISyntaxException e) {
                e.printStackTrace();
                log.error("Failed to annotate " + saneURI + " : " + e);
            }
        }

        // -----------------------------------------------------
        // WARC revisit record handling:
        // -----------------------------------------------------

        // If this is a revisit record, we should just return an update to the crawl_dates (when using hashUrlId)
        if (WARCConstants.WARCRecordType.revisit.name()
                .equalsIgnoreCase((String) header.getHeaderValue(HEADER_KEY_TYPE))) {
            solr.removeField(SolrFields.CONTENT_LENGTH); //It is 0 and would mess with statistics                                                                                
            //Copy content_type_served to content_type (no tika/droid for revisits)
            solr.addField(SolrFields.SOLR_CONTENT_TYPE,
                    (String) solr.getFieldValue(SolrFields.CONTENT_TYPE_SERVED));
            return solr;
        }

        // -----------------------------------------------------
        // Payload duplication has been checked, ready to parse:
        // -----------------------------------------------------

        final long analyzeStart = System.nanoTime();

        // Mark the start of the payload, with a readLimit corresponding to
        // the payload size:
        tikainput.mark((int) content_length);

        // Pass on to other extractors as required, resetting the stream before each:
        this.wpa.analyse(archiveName, header, tikainput, solr, content_length);
        Instrument.timeRel("WARCIndexer.extract#total", "WARCIndexer.extract#analyzetikainput", analyzeStart);

        // Clear up the caching of the payload:
        hcis.cleanup();

        // -----------------------------------------------------
        // Payload analysis complete, now performing text analysis:
        // -----------------------------------------------------

        this.txa.analyse(solr);

        // Remove the Text Field if required
        if (!isTextIncluded) {
            solr.removeField(SolrFields.SOLR_EXTRACTED_TEXT);

        } else {
            // Otherwise, decide whether to store or both store and index
            // the text:
            if (storeText == false) {
                // Copy the text into the indexed (but not stored) field:
                solr.setField(SolrFields.SOLR_EXTRACTED_TEXT_NOT_STORED,
                        (String) solr.getField(SolrFields.SOLR_EXTRACTED_TEXT).getFirstValue());
                // Take the text out of the original (stored) field.
                solr.removeField(SolrFields.SOLR_EXTRACTED_TEXT);
            }
        }
    }
    Instrument.timeRel("WARCIndexerCommand.parseWarcFiles#solrdocCreation", "WARCIndexer.extract#total", start);
    String servedType = "" + solr.getField(SolrFields.CONTENT_TYPE_SERVED);
    Instrument.timeRel("WARCIndexer#content_types",
            "WARCIndexer#" + (servedType.contains(";") ? servedType.split(";")[0] : servedType), start);
    Instrument.timeRel("WARCIndexer#content_types", start);
    return solr;
}