Example usage for org.apache.hadoop.fs FileSystem get

List of usage examples for org.apache.hadoop.fs FileSystem get

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem get.

Prototype

public static FileSystem get(Configuration conf) throws IOException 

Source Link

Document

Returns the configured FileSystem implementation.

Usage

From source file:co.cask.cdap.data2.increment.hbase94.IncrementSummingScannerTest.java

License:Apache License

private HRegion createRegion(String tableName, byte[] family) throws Exception {
    HTableDescriptor htd = new HTableDescriptor(tableName);
    HColumnDescriptor cfd = new HColumnDescriptor(family);
    cfd.setMaxVersions(Integer.MAX_VALUE);
    htd.addFamily(cfd);//from ww w .  j  a v a  2 s .  c o  m
    htd.addCoprocessor(IncrementHandler.class.getName());
    Path tablePath = new Path("/tmp/" + tableName);
    Path hlogPath = new Path("/tmp/hlog-" + tableName);
    Path oldPath = new Path("/tmp/.oldLogs-" + tableName);
    Configuration hConf = conf;
    FileSystem fs = FileSystem.get(hConf);
    assertTrue(fs.mkdirs(tablePath));
    HLog hlog = new HLog(fs, hlogPath, oldPath, hConf);
    return new HRegion(tablePath, hlog, fs, hConf, new HRegionInfo(Bytes.toBytes(tableName)), htd,
            new MockRegionServerServices());
}

From source file:co.cask.cdap.data2.increment.hbase96.IncrementSummingScannerTest.java

License:Apache License

static HRegion createRegion(Configuration hConf, CConfiguration cConf, TableId tableId, HColumnDescriptor cfd)
        throws Exception {
    HBaseTableUtil tableUtil = new HBaseTableUtilFactory(cConf).get();
    HTableDescriptorBuilder htd = tableUtil.buildHTableDescriptor(tableId);
    cfd.setMaxVersions(Integer.MAX_VALUE);
    cfd.setKeepDeletedCells(true);//w  w w . ja va 2s  .c  o m
    htd.addFamily(cfd);
    htd.addCoprocessor(IncrementHandler.class.getName());

    HTableDescriptor desc = htd.build();
    String tableName = desc.getNameAsString();
    Path tablePath = new Path("/tmp/" + tableName);
    Path hlogPath = new Path("/tmp/hlog-" + tableName);
    FileSystem fs = FileSystem.get(hConf);
    assertTrue(fs.mkdirs(tablePath));
    HLog hLog = HLogFactory.createHLog(fs, hlogPath, tableName, hConf);
    HRegionInfo regionInfo = new HRegionInfo(desc.getTableName());
    HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(hConf, fs, tablePath, regionInfo);
    return new HRegion(regionFS, hLog, hConf, desc, new MockRegionServerServices(hConf, null));
}

From source file:co.cask.cdap.hive.stream.StreamRecordReader.java

License:Apache License

StreamRecordReader(InputSplit split, JobConf conf) throws IOException {
    this.inputSplit = (StreamInputSplit) split;
    this.events = Lists.newArrayListWithCapacity(1);
    this.reader = createReader(FileSystem.get(conf), inputSplit);
    this.readFilter = new TimeRangeReadFilter(inputSplit.getStartTime(), inputSplit.getEndTime());
}

From source file:co.cask.cdap.internal.app.runtime.batch.distributed.MapReduceContainerHelper.java

License:Apache License

/**
 * Gets the MapReduce framework URI based on the {@code mapreduce.application.framework.path} setting.
 *
 * @param hConf the job configuration/*from w  ww  .  j  ava 2 s.c o m*/
 * @return the framework URI or {@code null} if not present or if the URI in the config is invalid.
 */
@Nullable
public static URI getFrameworkURI(Configuration hConf) {
    String framework = hConf.get(MRJobConfig.MAPREDUCE_APPLICATION_FRAMEWORK_PATH);
    if (framework == null) {
        return null;
    }

    try {
        // Parse the path. It can contains '#' to represent the localized file name
        URI uri = new URI(framework);
        String linkName = uri.getFragment();

        // The following resolution logic is copied from JobSubmitter in MR.
        FileSystem fs = FileSystem.get(hConf);
        Path frameworkPath = fs.makeQualified(new Path(uri.getScheme(), uri.getAuthority(), uri.getPath()));
        FileContext fc = FileContext.getFileContext(frameworkPath.toUri(), hConf);
        frameworkPath = fc.resolvePath(frameworkPath);
        uri = frameworkPath.toUri();

        // If doesn't have localized name (in the URI fragment), then use the last part of the URI path as name
        if (linkName == null) {
            linkName = uri.getPath();
            int idx = linkName.lastIndexOf('/');
            if (idx >= 0) {
                linkName = linkName.substring(idx + 1);
            }
        }
        return new URI(uri.getScheme(), uri.getAuthority(), uri.getPath(), null, linkName);
    } catch (URISyntaxException e) {
        LOG.warn("Failed to parse {} as a URI. MapReduce framework path is not used. Check the setting for {}.",
                framework, MRJobConfig.MAPREDUCE_APPLICATION_FRAMEWORK_PATH, e);
    } catch (IOException e) {
        LOG.warn("Failed to resolve {} URI. MapReduce framework path is not used. Check the setting for {}.",
                framework, MRJobConfig.MAPREDUCE_APPLICATION_FRAMEWORK_PATH, e);
    }
    return null;
}

From source file:co.cask.cdap.operations.hdfs.HDFSInfo.java

License:Apache License

@Override
public String getWebURL() {
    try {/*from   w  ww .  j  a v  a2s .  co  m*/
        if (HAUtil.isHAEnabled(conf, getNameService())) {
            URL haWebURL = getHAWebURL();
            if (haWebURL != null) {
                return haWebURL.toString();
            }
        } else {
            try (FileSystem fs = FileSystem.get(conf)) {
                URL webUrl = rpcToHttpAddress(fs.getUri());
                if (webUrl != null) {
                    return webUrl.toString();
                }
            }
        }
        lastCollectFailed = false;
    } catch (Exception e) {
        // TODO: remove once CDAP-7887 is fixed
        if (!lastCollectFailed) {
            LOG.warn(
                    "Error in determining HDFS URL. Web URL of HDFS will not be available in HDFS operational stats.",
                    e);
        }
        lastCollectFailed = true;
    }
    return null;
}

From source file:co.cask.cdap.operations.hdfs.HDFSNodes.java

License:Apache License

private List<String> getNameNodes() throws IOException {
    List<String> namenodes = new ArrayList<>();
    if (!HAUtil.isHAEnabled(conf, getNameService())) {
        try (FileSystem fs = FileSystem.get(conf)) {
            return Collections.singletonList(fs.getUri().toString());
        }/*from www  . ja  va2 s.  c  o  m*/
    }
    String nameService = getNameService();
    for (String nnId : DFSUtil.getNameNodeIds(conf, nameService)) {
        namenodes.add(DFSUtil.getNamenodeServiceAddr(conf, nameService, nnId));
    }
    return namenodes;
}

From source file:co.cask.cdap.operations.hdfs.HDFSStorage.java

License:Apache License

@Nullable
private DistributedFileSystem createDFS() throws IOException {
    FileSystem fs = FileSystem.get(conf);
    if (!(fs instanceof DistributedFileSystem)) {
        LOG.debug(//from w  w w  . j ava 2 s .  c o  m
                "The filesystem configured on this cluster is {}, which is not {}. HDFS storage stats will not be "
                        + "reported.",
                fs.getClass().getName(), DistributedFileSystem.class.getName());
        return null;
    }
    return (DistributedFileSystem) fs;
}

From source file:co.cask.cdap.security.TokenSecureStoreUpdater.java

License:Apache License

/**
 * Gets the Hadoop FileSystem from LocationFactory.
 * TODO: copied from Twill 0.6 YarnUtils for CDAP-5350. Remove after this fix is moved to Twill.
 *//*from  w w  w.j  a va 2s  .  c o m*/
private static FileSystem getFileSystem(LocationFactory locationFactory, Configuration config)
        throws IOException {
    LOG.debug("getFileSystem(): locationFactory is a {}", locationFactory.getClass());
    if (locationFactory instanceof HDFSLocationFactory) {
        return ((HDFSLocationFactory) locationFactory).getFileSystem();
    }
    if (locationFactory instanceof ForwardingLocationFactory) {
        return getFileSystem(((ForwardingLocationFactory) locationFactory).getDelegate(), config);
    }
    // CDAP-5350: For encrypted file systems, FileContext does not acquire the KMS delegation token
    // Since we know we are in Yarn, it is safe to get the FileSystem directly, bypassing LocationFactory.
    if (locationFactory instanceof FileContextLocationFactory) {
        return FileSystem.get(config);
    }
    return null;
}

From source file:co.cask.cdap.template.etl.batch.ETLMapReduceTest.java

License:Apache License

@Test
public void testFiletoTPFS() throws Exception {
    String filePath = "file:///tmp/test/text.txt";
    String testData = "String for testing purposes.";

    Path textFile = new Path(filePath);
    Configuration conf = new Configuration();
    FileSystem fs = FileSystem.get(conf);
    FSDataOutputStream writeData = fs.create(textFile);
    writeData.write(testData.getBytes());
    writeData.flush();/* w  w w. j a  va  2 s  .  c  o  m*/
    writeData.close();

    ETLStage source = new ETLStage("File", ImmutableMap.<String, String>builder()
            .put(Properties.File.FILESYSTEM, "Text").put(Properties.File.PATH, filePath).build());

    ETLStage sink = new ETLStage("TPFSAvro",
            ImmutableMap.of(Properties.TimePartitionedFileSetDataset.SCHEMA,
                    FileBatchSource.DEFAULT_SCHEMA.toString(),
                    Properties.TimePartitionedFileSetDataset.TPFS_NAME, "fileSink"));
    ETLBatchConfig etlConfig = new ETLBatchConfig("* * * * *", source, sink, Lists.<ETLStage>newArrayList());
    AdapterConfig adapterConfig = new AdapterConfig("", TEMPLATE_ID.getId(), GSON.toJsonTree(etlConfig));
    Id.Adapter adapterId = Id.Adapter.from(NAMESPACE, "testFileAdapter");
    AdapterManager manager = createAdapter(adapterId, adapterConfig);

    manager.start();
    manager.waitForOneRunToFinish(2, TimeUnit.MINUTES);
    manager.stop();

    DataSetManager<TimePartitionedFileSet> fileSetManager = getDataset("fileSink");
    TimePartitionedFileSet fileSet = fileSetManager.get();
    List<GenericRecord> records = readOutput(fileSet, FileBatchSource.DEFAULT_SCHEMA);
    Assert.assertEquals(1, records.size());
    Assert.assertEquals(testData, records.get(0).get("body").toString());
    fileSet.close();
}

From source file:co.cask.hydrator.action.ftp.FTPCopyAction.java

License:Apache License

@Override
public void run(ActionContext context) throws Exception {
    Path destination = new Path(config.getDestDirectory());
    FileSystem fileSystem = FileSystem.get(new Configuration());
    destination = fileSystem.makeQualified(destination);
    if (!fileSystem.exists(destination)) {
        fileSystem.mkdirs(destination);// w  w w . j a v  a 2s  .  c  o  m
    }

    FTPClient ftp;
    if ("ftp".equals(config.getProtocol().toLowerCase())) {
        ftp = new FTPClient();
    } else {
        ftp = new FTPSClient();
    }
    ftp.setControlKeepAliveTimeout(5);
    // UNIX type server
    FTPClientConfig ftpConfig = new FTPClientConfig();
    // Set additional parameters required for the ftp
    // for example config.setServerTimeZoneId("Pacific/Pitcairn")
    ftp.configure(ftpConfig);
    try {
        ftp.connect(config.getHost(), config.getPort());
        ftp.enterLocalPassiveMode();
        String replyString = ftp.getReplyString();
        LOG.info("Connected to server {} and port {} with reply from connect as {}.", config.getHost(),
                config.getPort(), replyString);

        // Check the reply code for actual success
        int replyCode = ftp.getReplyCode();

        if (!FTPReply.isPositiveCompletion(replyCode)) {
            ftp.disconnect();
            throw new RuntimeException(String.format("FTP server refused connection with code %s and reply %s.",
                    replyCode, replyString));
        }

        if (!ftp.login(config.getUserName(), config.getPassword())) {
            LOG.error("login command reply code {}, {}", ftp.getReplyCode(), ftp.getReplyString());
            ftp.logout();
            throw new RuntimeException(String.format(
                    "Login to the FTP server %s and port %s failed. " + "Please check user name and password.",
                    config.getHost(), config.getPort()));
        }

        FTPFile[] ftpFiles = ftp.listFiles(config.getSrcDirectory());
        LOG.info("listFiles command reply code: {}, {}.", ftp.getReplyCode(), ftp.getReplyString());
        // Check the reply code for listFiles call.
        // If its "522 Data connections must be encrypted" then it means data channel also need to be encrypted
        if (ftp.getReplyCode() == 522 && "sftp".equalsIgnoreCase(config.getProtocol())) {
            // encrypt data channel and listFiles again
            ((FTPSClient) ftp).execPROT("P");
            LOG.info("Attempting command listFiles on encrypted data channel.");
            ftpFiles = ftp.listFiles(config.getSrcDirectory());
        }
        for (FTPFile file : ftpFiles) {
            String source = config.getSrcDirectory() + "/" + file.getName();

            LOG.info("Current file {}, source {}", file.getName(), source);
            if (config.getExtractZipFiles() && file.getName().endsWith(".zip")) {
                copyZip(ftp, source, fileSystem, destination);
            } else {
                Path destinationPath = fileSystem.makeQualified(new Path(destination, file.getName()));
                LOG.debug("Downloading {} to {}", file.getName(), destinationPath.toString());
                try (OutputStream output = fileSystem.create(destinationPath)) {
                    InputStream is = ftp.retrieveFileStream(source);
                    ByteStreams.copy(is, output);
                }
            }
            if (!ftp.completePendingCommand()) {
                LOG.error("Error completing command.");
            }
        }
        ftp.logout();
    } finally {
        if (ftp.isConnected()) {
            try {
                ftp.disconnect();
            } catch (Throwable e) {
                LOG.error("Failure to disconnect the ftp connection.", e);
            }
        }
    }
}