Example usage for java.net URI getAuthority

List of usage examples for java.net URI getAuthority

Introduction

In this page you can find the example usage for java.net URI getAuthority.

Prototype

public String getAuthority() 

Source Link

Document

Returns the decoded authority component of this URI.

Usage

From source file:org.apache.hadoop.hive.ql.exec.DDLTask.java

private int unarchive(Hive db, AlterTableSimpleDesc simpleDesc) throws HiveException, URISyntaxException {

    Table tbl = db.getTable(simpleDesc.getTableName());

    // Means user specified a table, not a partition
    if (simpleDesc.getPartSpec() == null) {
        throw new HiveException("UNARCHIVE is for partitions only");
    }// w  ww  . j  a  v  a  2s.  co  m

    if (tbl.getTableType() != TableType.MANAGED_TABLE) {
        throw new HiveException("UNARCHIVE can only be performed on managed tables");
    }

    Map<String, String> partSpec = simpleDesc.getPartSpec();
    PartSpecInfo partSpecInfo = PartSpecInfo.create(tbl, partSpec);
    List<Partition> partitions = db.getPartitions(tbl, partSpec);

    int partSpecLevel = partSpec.size();

    Path originalDir = null;

    // when we have partial partitions specification we must assume partitions
    // lie in standard place - if they were in custom locations putting
    // them into one archive would involve mass amount of copying
    // in full partition specification case we allow custom locations
    // to keep backward compatibility
    if (partitions.isEmpty()) {
        throw new HiveException("No partition matches the specification");
    } else if (partSpecInfo.values.size() != tbl.getPartCols().size()) {
        // for partial specifications we need partitions to follow the scheme
        for (Partition p : partitions) {
            if (partitionInCustomLocation(tbl, p)) {
                String message = String.format(
                        "UNARCHIVE cannot run for partition " + "groups with custom locations like %s",
                        p.getLocation());
                throw new HiveException(message);
            }
        }
        originalDir = partSpecInfo.createPath(tbl);
    } else {
        Partition p = partitions.get(0);
        if (ArchiveUtils.isArchived(p)) {
            originalDir = new Path(getOriginalLocation(p));
        } else {
            originalDir = new Path(p.getLocation());
        }
    }

    URI originalUri = ArchiveUtils.addSlash(originalDir.toUri());
    Path intermediateArchivedDir = new Path(originalDir.getParent(),
            originalDir.getName() + INTERMEDIATE_ARCHIVED_DIR_SUFFIX);
    Path intermediateExtractedDir = new Path(originalDir.getParent(),
            originalDir.getName() + INTERMEDIATE_EXTRACTED_DIR_SUFFIX);
    boolean recovery = false;
    if (pathExists(intermediateArchivedDir) || pathExists(intermediateExtractedDir)) {
        recovery = true;
        console.printInfo("Starting recovery after failed UNARCHIVE");
    }

    for (Partition p : partitions) {
        checkArchiveProperty(partSpecLevel, recovery, p);
    }

    String archiveName = "data.har";
    FileSystem fs = null;
    try {
        fs = originalDir.getFileSystem(conf);
    } catch (IOException e) {
        throw new HiveException(e);
    }

    // assume the archive is in the original dir, check if it exists
    Path archivePath = new Path(originalDir, archiveName);
    URI archiveUri = archivePath.toUri();
    ArchiveUtils.HarPathHelper harHelper = new ArchiveUtils.HarPathHelper(conf, archiveUri, originalUri);
    URI sourceUri = harHelper.getHarUri(originalUri);
    Path sourceDir = new Path(sourceUri.getScheme(), sourceUri.getAuthority(), sourceUri.getPath());

    if (!pathExists(intermediateArchivedDir) && !pathExists(archivePath)) {
        throw new HiveException("Haven't found any archive where it should be");
    }

    Path tmpPath = driverContext.getCtx().getExternalTmpPath(originalDir);

    try {
        fs = tmpPath.getFileSystem(conf);
    } catch (IOException e) {
        throw new HiveException(e);
    }

    // Clarification of terms:
    // - The originalDir directory represents the original directory of the
    //   partitions' files. They now contain an archived version of those files
    //   eg. hdfs:/warehouse/myTable/ds=1/
    // - The source directory is the directory containing all the files that
    //   should be in the partitions. e.g. har:/warehouse/myTable/ds=1/myTable.har/
    //   Note the har:/ scheme

    // Steps:
    // 1. Extract the archive in a temporary folder
    // 2. Move the archive dir to an intermediate dir that is in at the same
    //    dir as originalLocation. Call the new dir intermediate-extracted.
    // 3. Rename the original partitions dir to an intermediate dir. Call the
    //    renamed dir intermediate-archive
    // 4. Rename intermediate-extracted to the original partitions dir
    // 5. Change the metadata
    // 6. Delete the archived partitions files in intermediate-archive

    if (!pathExists(intermediateExtractedDir) && !pathExists(intermediateArchivedDir)) {
        try {

            // Copy the files out of the archive into the temporary directory
            String copySource = sourceDir.toString();
            String copyDest = tmpPath.toString();
            List<String> args = new ArrayList<String>();
            args.add("-cp");
            args.add(copySource);
            args.add(copyDest);

            console.printInfo("Copying " + copySource + " to " + copyDest);
            FileSystem srcFs = FileSystem.get(sourceDir.toUri(), conf);
            srcFs.initialize(sourceDir.toUri(), conf);

            FsShell fss = new FsShell(conf);
            int ret = 0;
            try {
                ret = ToolRunner.run(fss, args.toArray(new String[0]));
            } catch (Exception e) {
                e.printStackTrace();
                throw new HiveException(e);
            }

            if (ret != 0) {
                throw new HiveException("Error while copying files from archive, return code=" + ret);
            } else {
                console.printInfo("Successfully Copied " + copySource + " to " + copyDest);
            }

            console.printInfo("Moving " + tmpPath + " to " + intermediateExtractedDir);
            if (fs.exists(intermediateExtractedDir)) {
                throw new HiveException(
                        "Invalid state: the intermediate extracted " + "directory already exists.");
            }
            fs.rename(tmpPath, intermediateExtractedDir);
        } catch (Exception e) {
            throw new HiveException(e);
        }
    }

    // At this point, we know that the extracted files are in the intermediate
    // extracted dir, or in the the original directory.

    if (!pathExists(intermediateArchivedDir)) {
        try {
            console.printInfo("Moving " + originalDir + " to " + intermediateArchivedDir);
            fs.rename(originalDir, intermediateArchivedDir);
        } catch (IOException e) {
            throw new HiveException(e);
        }
    } else {
        console.printInfo(intermediateArchivedDir + " already exists. "
                + "Assuming it contains the archived version of the partition");
    }

    // If there is a failure from here to until when the metadata is changed,
    // the partition will be empty or throw errors on read.

    // If the original location exists here, then it must be the extracted files
    // because in the previous step, we moved the previous original location
    // (containing the archived version of the files) to intermediateArchiveDir
    if (!pathExists(originalDir)) {
        try {
            console.printInfo("Moving " + intermediateExtractedDir + " to " + originalDir);
            fs.rename(intermediateExtractedDir, originalDir);
        } catch (IOException e) {
            throw new HiveException(e);
        }
    } else {
        console.printInfo(originalDir + " already exists. "
                + "Assuming it contains the extracted files in the partition");
    }

    for (Partition p : partitions) {
        setUnArchived(p);
        try {
            db.alterPartition(simpleDesc.getTableName(), p, null);
        } catch (InvalidOperationException e) {
            throw new HiveException(e);
        }
    }

    // If a failure happens here, the intermediate archive files won't be
    // deleted. The user will need to call unarchive again to clear those up.
    if (pathExists(intermediateArchivedDir)) {
        deleteDir(intermediateArchivedDir);
    }

    if (recovery) {
        console.printInfo("Recovery after UNARCHIVE succeeded");
    }

    return 0;
}

From source file:com.cloud.hypervisor.xenserver.resource.CitrixResourceBase.java

public Network getNetwork(final Connection conn, final NicTO nic) throws XenAPIException, XmlRpcException {
    final String name = nic.getName();
    final XsLocalNetwork network = getNativeNetworkForTraffic(conn, nic.getType(), name);
    if (network == null) {
        s_logger.error("Network is not configured on the backend for nic " + nic.toString());
        throw new CloudRuntimeException(
                "Network for the backend is not configured correctly for network broadcast domain: "
                        + nic.getBroadcastUri());
    }/*from w w  w.j  a v  a  2  s  . c  o  m*/
    final URI uri = nic.getBroadcastUri();
    final BroadcastDomainType type = nic.getBroadcastType();
    if (uri != null && uri.toString().contains("untagged")) {
        return network.getNetwork();
    } else if (uri != null && type == BroadcastDomainType.Vlan) {
        assert BroadcastDomainType.getSchemeValue(uri) == BroadcastDomainType.Vlan;
        final long vlan = Long.parseLong(BroadcastDomainType.getValue(uri));
        return enableVlanNetwork(conn, vlan, network);
    } else if (type == BroadcastDomainType.Native || type == BroadcastDomainType.LinkLocal
            || type == BroadcastDomainType.Vsp) {
        return network.getNetwork();
    } else if (uri != null && type == BroadcastDomainType.Vswitch) {
        final String header = uri.toString()
                .substring(Networks.BroadcastDomainType.Vswitch.scheme().length() + "://".length());
        if (header.startsWith("vlan")) {
            _isOvs = true;
            return setupvSwitchNetwork(conn);
        } else {
            return findOrCreateTunnelNetwork(conn, getOvsTunnelNetworkName(uri.getAuthority()));
        }
    } else if (type == BroadcastDomainType.Storage) {
        if (uri == null) {
            return network.getNetwork();
        } else {
            final long vlan = Long.parseLong(BroadcastDomainType.getValue(uri));
            return enableVlanNetwork(conn, vlan, network);
        }
    } else if (type == BroadcastDomainType.Lswitch) {
        // Nicira Logical Switch
        return network.getNetwork();
    } else if (uri != null && type == BroadcastDomainType.Pvlan) {
        assert BroadcastDomainType.getSchemeValue(uri) == BroadcastDomainType.Pvlan;
        // should we consider moving this NetUtils method to
        // BroadcastDomainType?
        final long vlan = Long.parseLong(NetUtils.getPrimaryPvlanFromUri(uri));
        return enableVlanNetwork(conn, vlan, network);
    }

    throw new CloudRuntimeException(
            "Unable to support this type of network broadcast domain: " + nic.getBroadcastUri());
}

From source file:de.zib.sfs.StatisticsFileSystem.java

@Override
public synchronized void initialize(URI name, Configuration conf) throws IOException {
    if (this.initialized) {
        LOG.warn("Ignoring attempt to re-initialize file system.");
        return;/*from w w w.java2  s. c  o  m*/
    }

    super.initialize(name, conf);
    setConf(conf);

    String hostname = System.getProperty("de.zib.sfs.hostname");
    if (hostname == null) {
        LOG.warn("'de.zib.sfs.hostname' not set, did the agent start properly?");

        // Obtain hostname, preferably via executing hostname
        Process hostnameProcess = Runtime.getRuntime().exec("hostname");
        try {
            int exitCode = hostnameProcess.waitFor();
            if (exitCode != 0) {
                LOG.warn("'hostname' returned " + exitCode + ", using $HOSTNAME instead.");
                hostname = System.getenv("HOSTNAME");
            } else {
                try (BufferedReader reader = new BufferedReader(
                        new InputStreamReader(hostnameProcess.getInputStream()))) {

                    StringBuilder hostnameBuilder = new StringBuilder();
                    String line = "";
                    while ((line = reader.readLine()) != null) {
                        hostnameBuilder.append(line);
                    }
                    hostname = hostnameBuilder.toString();
                }
            }
        } catch (InterruptedException e) {
            LOG.warn("Error executing 'hostname', using $HOSTNAME instead.", e);
            hostname = System.getenv("HOSTNAME");
        }

        System.setProperty("de.zib.sfs.hostname", hostname);
    }

    if (LOG.isDebugEnabled()) {
        LOG.debug("Running on " + hostname + ".");
    }

    if (System.getProperty("de.zib.sfs.pid") == null) {
        LOG.warn("'de.zib.sfs.pid' not set, did the agent start properly?");

        // use negative random number to indicate it's no real PID
        int pid = -new Random().nextInt(Integer.MAX_VALUE);
        System.setProperty("de.zib.sfs.pid", Integer.toString(pid));
    }

    if (System.getProperty("de.zib.sfs.key") == null) {
        LOG.warn("'de.zib.sfs.key' not set, did the agent start properly?");
        System.setProperty("de.zib.sfs.key", "sfs");
    }

    if (System.getProperty("de.zib.sfs.timeBin.duration") == null) {
        LOG.warn("'de.zib.sfs.timeBin.duration' not set, did the agent start properly?");
        System.setProperty("de.zib.sfs.timeBin.duration", "1000");
    }

    if (System.getProperty("de.zib.sfs.timeBin.cacheSize") == null) {
        LOG.warn("'de.zib.sfs.timeBin.cacheSize' not set, did the agent start properly?");
        System.setProperty("de.zib.sfs.timeBin.cacheSize", "30");
    }

    if (System.getProperty("de.zib.sfs.output.directory") == null) {
        LOG.warn("'de.zib.sfs.output.directory' not set, did the agent start properly?");
        System.setProperty("de.zib.sfs.output.directory", getConf().get(SFS_OUTPUT_DIRECTORY_KEY, "/tmp"));
    }

    if (System.getProperty("de.zib.sfs.output.format") == null) {
        LOG.warn("'de.zib.sfs.output.format' not set, did the agent start properly?");
        OutputFormat outputFormat = OutputFormat
                .valueOf(getConf().get(SFS_OUTPUT_FORMAT_KEY, OutputFormat.BB.name()).toUpperCase());
        System.setProperty("de.zib.sfs.output.format", outputFormat.name());
    }

    if (System.getProperty("de.zib.sfs.traceFds") == null) {
        LOG.warn("'de.zib.sfs.traceFds' not set, did the agent start properly?");
        System.setProperty("de.zib.sfs.traceFds",
                getConf().getBoolean(SFS_TRACE_FDS_KEY, false) ? "true" : "false");
    }

    LiveOperationStatisticsAggregator.instance.initialize();
    if (LOG.isDebugEnabled()) {
        LOG.debug("Initialized file system statistics aggregator.");
    }

    // Obtain the file system class we want to wrap
    String wrappedFSClassName = getConf().get(SFS_WRAPPED_FS_CLASS_NAME_KEY);
    if (wrappedFSClassName == null) {
        throw new RuntimeException(SFS_WRAPPED_FS_CLASS_NAME_KEY + " not specified");
    }
    this.wrappedFSScheme = getConf().get(SFS_WRAPPED_FS_SCHEME_KEY);
    if (this.wrappedFSScheme == null) {
        throw new RuntimeException(SFS_WRAPPED_FS_SCHEME_KEY + " not specified");
    }

    Class<?> wrappedFSClass;
    try {
        wrappedFSClass = Class.forName(wrappedFSClassName);
    } catch (Exception e) {
        throw new RuntimeException("Error obtaining class '" + wrappedFSClassName + "'", e);
    }

    // Figure out what kind of file system we are wrapping.
    if (wrappedFSClassName.startsWith("org.apache.hadoop")
            || wrappedFSClassName.startsWith("org.xtreemfs.common.clients.hadoop")) {
        try {
            // Wrap Hadoop file system directly.
            this.wrappedFS = wrappedFSClass.asSubclass(FileSystem.class).newInstance();
        } catch (Exception e) {
            throw new RuntimeException("Error instantiating Hadoop class '" + wrappedFSClassName + "'", e);
        }
    } else {
        throw new RuntimeException("Unsupported file system class '" + wrappedFSClassName + "'");
    }

    if (LOG.isDebugEnabled()) {
        LOG.debug("Wrapping file system '" + this.wrappedFS.getClass().getName() + "' with scheme '"
                + this.wrappedFSScheme + "' as '" + getScheme() + "'.");
        LOG.debug("You can change it by setting '" + SFS_WRAPPED_FS_CLASS_NAME_KEY + "'.");
    }

    if (name.getAuthority() != null) {
        this.fileSystemUri = URI.create(getScheme() + "://" + name.getAuthority() + "/");
    } else {
        this.fileSystemUri = URI.create(getScheme() + ":///");
    }

    // Finally initialize the wrapped file system with the unwrapped name.
    URI wrappedFSUri = replaceUriScheme(name, getScheme(), this.wrappedFSScheme);
    if (LOG.isDebugEnabled()) {
        LOG.debug("Initializing wrapped file system with URI '" + wrappedFSUri + "'.");
    }
    this.wrappedFS.initialize(wrappedFSUri, conf);

    // Add shutdown hook that closes this file system
    Runtime.getRuntime().addShutdownHook(new Thread() {
        @Override
        public void run() {
            if (LOG.isDebugEnabled()) {
                LOG.debug("Running shutdown hook.");
            }

            try {
                StatisticsFileSystem.this.close(true);
            } catch (IOException e) {
                LOG.error("Could not close file system.", e);
            }
        }
    });

    String instrumentationSkip = getConf().get(SFS_INSTRUMENTATION_SKIP_KEY);
    if (instrumentationSkip != null) {
        this.skipRead = instrumentationSkip.contains("r");
        this.skipWrite = instrumentationSkip.contains("w");
        this.skipOther = instrumentationSkip.contains("o");
    }

    this.initialized = true;
}