Example usage for org.apache.commons.io Charsets UTF_8

List of usage examples for org.apache.commons.io Charsets UTF_8

Introduction

In this page you can find the example usage for org.apache.commons.io Charsets UTF_8.

Prototype

Charset UTF_8

To view the source code for org.apache.commons.io Charsets UTF_8.

Click Source Link

Document

Eight-bit Unicode Transformation Format.

Usage

From source file:org.apache.hadoop.hdfs.server.namenode.FSDirMkdirOp.java

private static INodesInPath createSingleDirectory(FSDirectory fsd, INodesInPath existing, String localName,
        PermissionStatus perm) throws IOException {
    existing = unprotectedMkdir(fsd, IDsGeneratorFactory.getInstance().getUniqueINodeID(), existing,
            localName.getBytes(Charsets.UTF_8), perm, null, now());
    if (existing == null) {
        return null;
    }//from  w  w w  .  j  a  v a2  s .co  m

    final INode newNode = existing.getLastINode();
    // Directory creation also count towards FilesCreated
    // to match count of FilesDeleted metric.
    NameNode.getNameNodeMetrics().incrFilesCreated();

    String cur = existing.getPath();
    if (NameNode.stateChangeLog.isDebugEnabled()) {
        NameNode.stateChangeLog.debug("mkdirs: created directory " + cur);
    }
    return existing;
}

From source file:org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.java

static DirectoryListing getListingInt(final FSDirectory fsd, final String srcArg, byte[] startAfterArg,
        final boolean needLocation) throws IOException {

    byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(srcArg);
    String startAfterString = new String(startAfterArg, Charsets.UTF_8);
    final String src = fsd.resolvePath(srcArg, pathComponents);

    // Get file name when startAfter is an INodePath
    if (FSDirectory.isReservedName(startAfterString)) {
        byte[][] startAfterComponents = FSDirectory.getPathComponentsForReservedPath(startAfterString);
        try {/*from  w  ww  .  ja  va2s  .c o  m*/
            String tmp = fsd.resolvePath(src, startAfterComponents, fsd);
            byte[][] regularPath = INode.getPathComponents(tmp);
            startAfterArg = regularPath[regularPath.length - 1];
        } catch (IOException e) {
            // Possibly the inode is deleted
            throw new DirectoryListingStartAfterNotFoundException("Can't find startAfter " + startAfterString);
        }
    }

    final byte[] startAfter = startAfterArg;

    HopsTransactionalRequestHandler getListingHandler = new HopsTransactionalRequestHandler(
            HDFSOperationType.GET_LISTING, src) {
        @Override
        public void acquireLock(TransactionLocks locks) throws IOException {
            LockFactory lf = LockFactory.getInstance();
            INodeLock il = lf
                    .getINodeLock(INodeLockType.READ, INodeResolveType.PATH_AND_IMMEDIATE_CHILDREN, src)
                    .setNameNodeID(fsd.getFSNamesystem().getNameNode().getId())
                    .setActiveNameNodes(
                            fsd.getFSNamesystem().getNameNode().getActiveNameNodes().getActiveNodes())
                    .skipReadingQuotaAttr(true);
            locks.add(il);
            if (needLocation) {
                locks.add(lf.getBlockLock()).add(lf.getBlockRelated(BLK.RE, BLK.ER, BLK.CR, BLK.UC, BLK.CA));
            }
            locks.add(lf.getAcesLock());
        }

        @Override
        public Object performTask() throws IOException {
            FSPermissionChecker pc = fsd.getPermissionChecker();
            final INodesInPath iip = fsd.getINodesInPath(src, true);
            final boolean isSuperUser = pc.isSuperUser();
            if (fsd.isPermissionEnabled()) {
                if (iip.getLastINode() != null && iip.getLastINode().isDirectory()) {
                    fsd.checkPathAccess(pc, iip, FsAction.READ_EXECUTE);
                } else {
                    fsd.checkTraverse(pc, iip);
                }
            }

            return getListing(fsd, iip, src, startAfter, needLocation, isSuperUser);
        }
    };
    return (DirectoryListing) getListingHandler.handle();
}

From source file:org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.java

static DirectoryListing getListingInt(FSDirectory fsd, final String srcArg, byte[] startAfter,
        boolean needLocation) throws IOException {
    FSPermissionChecker pc = fsd.getPermissionChecker();
    byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(srcArg);
    final String startAfterString = new String(startAfter, Charsets.UTF_8);
    final String src = fsd.resolvePath(pc, srcArg, pathComponents);
    final INodesInPath iip = fsd.getINodesInPath(src, true);

    // Get file name when startAfter is an INodePath
    if (FSDirectory.isReservedName(startAfterString)) {
        byte[][] startAfterComponents = FSDirectory.getPathComponentsForReservedPath(startAfterString);
        try {/*from w w  w  .j a  v a  2s.c  om*/
            String tmp = FSDirectory.resolvePath(src, startAfterComponents, fsd);
            byte[][] regularPath = INode.getPathComponents(tmp);
            startAfter = regularPath[regularPath.length - 1];
        } catch (IOException e) {
            // Possibly the inode is deleted
            throw new DirectoryListingStartAfterNotFoundException("Can't find startAfter " + startAfterString);
        }
    }

    boolean isSuperUser = true;
    if (fsd.isPermissionEnabled()) {
        if (iip.getLastINode() != null && iip.getLastINode().isDirectory()) {
            fsd.checkPathAccess(pc, iip, FsAction.READ_EXECUTE);
        } else {
            fsd.checkTraverse(pc, iip);
        }
        isSuperUser = pc.isSuperUser();
    }
    return getListing(fsd, iip, src, startAfter, needLocation, isSuperUser);
}

From source file:org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.java

/**
 * Add the given filename to the fs./*from www .j  av  a 2  s  .  c  om*/
 * @return the new INodesInPath instance that contains the new INode
 */
private static INodesInPath addFile(FSDirectory fsd, INodesInPath existing, String localName,
        PermissionStatus permissions, short replication, long preferredBlockSize, String clientName,
        String clientMachine) throws IOException {

    long modTime = now();
    INodeFile newNode = newINodeFile(fsd.allocateNewInodeId(), permissions, modTime, modTime, replication,
            preferredBlockSize);
    newNode.setLocalName(localName.getBytes(Charsets.UTF_8));
    newNode.toUnderConstruction(clientName, clientMachine);

    INodesInPath newiip;
    fsd.writeLock();
    try {
        newiip = fsd.addINode(existing, newNode);
    } finally {
        fsd.writeUnlock();
    }
    if (newiip == null) {
        NameNode.stateChangeLog.info("DIR* addFile: failed to add " + existing.getPath() + "/" + localName);
        return null;
    }

    if (NameNode.stateChangeLog.isDebugEnabled()) {
        NameNode.stateChangeLog.debug("DIR* addFile: " + localName + " is added");
    }
    return newiip;
}

From source file:org.apache.hadoop.hdfs.server.namenode.TestGetBlockLocations.java

private static FSNamesystem setupFileSystem() throws IOException {
    Configuration conf = new Configuration();
    conf.setLong(DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 1L);
    FSEditLog editlog = mock(FSEditLog.class);
    FSImage image = mock(FSImage.class);
    when(image.getEditLog()).thenReturn(editlog);
    final FSNamesystem fsn = new FSNamesystem(conf, image, true);

    final FSDirectory fsd = fsn.getFSDirectory();
    INodesInPath iip = fsd.getINodesInPath("/", true);
    PermissionStatus perm = new PermissionStatus("hdfs", "supergroup",
            FsPermission.createImmutable((short) 0x1ff));
    final INodeFile file = new INodeFile(MOCK_INODE_ID, FILE_NAME.getBytes(Charsets.UTF_8), perm, 1, 1,
            new BlockInfo[] {}, (short) 1, DFS_BLOCK_SIZE_DEFAULT);
    fsn.getFSDirectory().addINode(iip, file);
    return fsn;/*  ww w. j  a  v a 2  s  .c o m*/
}

From source file:org.apache.hadoop.http.HtmlQuoting.java

/**
 * Does the given string need to be quoted?
 * @param str the string to check/*from w  ww .j ava2  s. co  m*/
 * @return does the string contain any of the active html characters?
 */
public static boolean needsQuoting(String str) {
    if (str == null) {
        return false;
    }
    byte[] bytes = str.getBytes(Charsets.UTF_8);
    return needsQuoting(bytes, 0, bytes.length);
}

From source file:org.apache.hadoop.http.HtmlQuoting.java

/**
 * Quote the given item to make it html-safe.
 * @param item the string to quote/*  w  w w. jav a 2  s . c  o  m*/
 * @return the quoted string
 */
public static String quoteHtmlChars(String item) {
    if (item == null) {
        return null;
    }
    byte[] bytes = item.getBytes(Charsets.UTF_8);
    if (needsQuoting(bytes, 0, bytes.length)) {
        ByteArrayOutputStream buffer = new ByteArrayOutputStream();
        try {
            quoteHtmlChars(buffer, bytes, 0, bytes.length);
            return buffer.toString("UTF-8");
        } catch (IOException ioe) {
            // Won't happen, since it is a bytearrayoutputstream
            return null;
        }
    } else {
        return item;
    }
}

From source file:org.apache.hadoop.lib.wsrs.JSONMapProvider.java

@Override
public void writeTo(Map map, Class<?> aClass, Type type, Annotation[] annotations, MediaType mediaType,
        MultivaluedMap<String, Object> stringObjectMultivaluedMap, OutputStream outputStream)
        throws IOException, WebApplicationException {
    Writer writer = new OutputStreamWriter(outputStream, Charsets.UTF_8);
    JSONObject.writeJSONString(map, writer);
    writer.write(ENTER);/*from ww  w  . j ava2 s.  c  om*/
    writer.flush();
}

From source file:org.apache.hadoop.lib.wsrs.JSONProvider.java

@Override
public void writeTo(JSONStreamAware jsonStreamAware, Class<?> aClass, Type type, Annotation[] annotations,
        MediaType mediaType, MultivaluedMap<String, Object> stringObjectMultivaluedMap,
        OutputStream outputStream) throws IOException, WebApplicationException {
    Writer writer = new OutputStreamWriter(outputStream, Charsets.UTF_8);
    jsonStreamAware.writeJSONString(writer);
    writer.write(ENTER);//from   w ww.  j  ava  2 s.  c om
    writer.flush();
}

From source file:org.apache.hadoop.record.compiler.CGenerator.java

/**
 * Generate C code. This method only creates the requested file(s)
 * and spits-out file-level elements (such as include statements etc.)
 * record-level code is generated by JRecord.
 *//*  www .  j  a v a  2 s .  c om*/
@Override
void genCode(String name, ArrayList<JFile> ilist, ArrayList<JRecord> rlist, String destDir,
        ArrayList<String> options) throws IOException {
    name = new File(destDir, (new File(name)).getName()).getAbsolutePath();
    try (Writer cc = new FileWriterWithEncoding(name + ".c", Charsets.UTF_8);
            Writer hh = new FileWriterWithEncoding(name + ".h", Charsets.UTF_8)) {
        hh.write("#ifndef __" + StringUtils.toUpperCase(name).replace('.', '_') + "__\n");
        hh.write("#define __" + StringUtils.toUpperCase(name).replace('.', '_') + "__\n");
        hh.write("#include \"recordio.h\"\n");
        for (Iterator<JFile> iter = ilist.iterator(); iter.hasNext();) {
            hh.write("#include \"" + iter.next().getName() + ".h\"\n");
        }

        cc.write("#include \"" + name + ".h\"\n");

        /*
        for (Iterator<JRecord> iter = rlist.iterator(); iter.hasNext();) {
        iter.next().genCppCode(hh, cc);
        }
         */

        hh.write("#endif //" + StringUtils.toUpperCase(name).replace('.', '_') + "__\n");
    }
}