Example usage for java.net URI getAuthority

List of usage examples for java.net URI getAuthority

Introduction

In this page you can find the example usage for java.net URI getAuthority.

Prototype

public String getAuthority() 

Source Link

Document

Returns the decoded authority component of this URI.

Usage

From source file:org.apache.camel.component.avro.AvroConfiguration.java

public void parseURI(URI uri, Map<String, Object> parameters, AvroComponent component) throws Exception {
    transport = uri.getScheme();//from   www  .  j av a 2  s.c  om

    if ((!AVRO_HTTP_TRANSPORT.equalsIgnoreCase(transport))
            && (!AVRO_NETTY_TRANSPORT.equalsIgnoreCase(transport))) {
        throw new IllegalArgumentException("Unrecognized Avro IPC transport: " + protocol + " for uri: " + uri);
    }

    setHost(uri.getHost());
    setPort(uri.getPort());

    if ((uri.getPath() != null) && (StringUtils.indexOf(uri.getPath(), AVRO_MESSAGE_NAME_SEPARATOR) != -1)) {
        String path = StringUtils.substringAfter(uri.getPath(), AVRO_MESSAGE_NAME_SEPARATOR);
        if (!path.contains(AVRO_MESSAGE_NAME_SEPARATOR)) {
            setMessageName(path);
        } else {
            throw new IllegalArgumentException("Unrecognized Avro message name: " + path + " for uri: " + uri);
        }
    }

    setUriAuthority(uri.getAuthority());
}

From source file:com.microsoft.tfs.client.common.ui.dialogs.connect.ACSCredentialsDialog.java

protected URI getSignInURI(final URI serverSigninURL) {
    String query = serverSigninURL.getQuery();

    if (query.indexOf("protocol=") < 0) //$NON-NLS-1$
    {/*  w  w  w. j a va 2  s. co  m*/
        query += "&protocol=javascriptnotify"; //$NON-NLS-1$
    }

    if (query.indexOf("force=") < 0) //$NON-NLS-1$
    {
        query += "&force=1"; //$NON-NLS-1$
    }

    if (query.indexOf("compact=") < 0) //$NON-NLS-1$
    {
        query += "&compact=1"; //$NON-NLS-1$
    }

    return URIUtils.newURI(serverSigninURL.getScheme(), serverSigninURL.getAuthority(),
            serverSigninURL.getPath(), query, serverSigninURL.getFragment());
}

From source file:fr.gael.dhus.olingo.ODataClient.java

/**
 * Creates an OdataClient for the given service
 * and credentials (HTTP Basic authentication).
 * /*w  w  w. j a  v a 2  s. c  o  m*/
 * @param url an URL to an OData service, 
 *    does not have to be the root service URL.
 *    this parameter must follow this syntax :
 *    {@code odata://hostname:port/path/...}
 * @param username Username
 * @param password Password
 * 
 * @throws URISyntaxException when the {@code url} parameter is invalid.
 * @throws IOException when the OdataClient fails to contact the server 
 *    at {@code url}.
 * @throws ODataException when no OData service have been found at the 
 *    given url.
 */
public ODataClient(String url, String username, String password)
        throws URISyntaxException, IOException, ODataException {
    this.username = username;
    this.password = password;

    // Find the service root URL and retrieve the Entity Data Model (EDM).
    URI uri = new URI(url);
    String metadata = "/$metadata";

    URI svc = null;
    Edm edm = null;

    String[] pathSegments = uri.getPath().split("/");
    StringBuilder sb = new StringBuilder();

    // for each possible service root URL.
    for (int i = 1; i < pathSegments.length; i++) {
        sb.append('/').append(pathSegments[i]).append(metadata);
        svc = new URI(uri.getScheme(), uri.getAuthority(), sb.toString(), null, null);
        sb.delete(sb.length() - metadata.length(), sb.length());

        // Test if `svc` is the service root URL.
        try {
            InputStream content = execute(svc.toString(), ContentType.APPLICATION_XML, "GET");

            edm = EntityProvider.readMetadata(content, false);
            svc = new URI(uri.getScheme(), uri.getAuthority(), sb.toString(), null, null);

            break;
        } catch (HttpException | EntityProviderException e) {
            LOGGER.debug("URL not root " + svc, e);
        }
    }

    // no OData service have been found at the given URL.
    if (svc == null || edm == null)
        throw new ODataException("No service found at " + url);

    this.serviceRoot = svc;
    this.serviceEDM = edm;
    this.uriParser = RuntimeDelegate.getUriParser(edm);
}

From source file:com.streamsets.pipeline.stage.origin.hdfs.cluster.ClusterHdfsSource.java

private void validateHadoopFS(List<ConfigIssue> issues) {
    boolean validHapoopFsUri = true;
    hadoopConf = getHadoopConfiguration(issues);
    String hdfsUriInConf;/*from  w  ww. j a  va  2 s  . co m*/
    if (conf.hdfsUri != null && !conf.hdfsUri.isEmpty()) {
        hadoopConf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, conf.hdfsUri);
    } else {
        hdfsUriInConf = hadoopConf.get(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY);
        if (hdfsUriInConf == null) {
            issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(),
                    ClusterHdfsConfigBean.CLUSTER_HDFS_CONFIG_BEAN_PREFIX + "hdfsUri", Errors.HADOOPFS_19));
            return;
        } else {
            conf.hdfsUri = hdfsUriInConf;
        }
    }
    if (conf.hdfsUri.contains("://")) {
        try {
            URI uri = new URI(conf.hdfsUri);
            if (!"hdfs".equals(uri.getScheme())) {
                issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(),
                        ClusterHdfsConfigBean.CLUSTER_HDFS_CONFIG_BEAN_PREFIX + "hdfsUri", Errors.HADOOPFS_12,
                        conf.hdfsUri, uri.getScheme()));
                validHapoopFsUri = false;
            } else if (uri.getAuthority() == null) {
                issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(),
                        ClusterHdfsConfigBean.CLUSTER_HDFS_CONFIG_BEAN_PREFIX + "hdfsUri", Errors.HADOOPFS_13,
                        conf.hdfsUri));
                validHapoopFsUri = false;
            }
        } catch (Exception ex) {
            issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(),
                    ClusterHdfsConfigBean.CLUSTER_HDFS_CONFIG_BEAN_PREFIX + "hdfsUri", Errors.HADOOPFS_22,
                    conf.hdfsUri, ex.getMessage(), ex));
            validHapoopFsUri = false;
        }
    } else {
        issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(),
                ClusterHdfsConfigBean.CLUSTER_HDFS_CONFIG_BEAN_PREFIX + "hdfsUri", Errors.HADOOPFS_02,
                conf.hdfsUri));
        validHapoopFsUri = false;
    }

    StringBuilder logMessage = new StringBuilder();
    try {
        loginUgi = HadoopSecurityUtil.getLoginUser(hadoopConf);
        if (conf.hdfsKerberos) {
            logMessage.append("Using Kerberos");
            if (loginUgi.getAuthenticationMethod() != UserGroupInformation.AuthenticationMethod.KERBEROS) {
                issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(),
                        ClusterHdfsConfigBean.CLUSTER_HDFS_CONFIG_BEAN_PREFIX + "hdfsKerberos",
                        Errors.HADOOPFS_00, loginUgi.getAuthenticationMethod(),
                        UserGroupInformation.AuthenticationMethod.KERBEROS));
            }
        } else {
            logMessage.append("Using Simple");
            hadoopConf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
                    UserGroupInformation.AuthenticationMethod.SIMPLE.name());
        }
        if (validHapoopFsUri) {
            getUGI().doAs(new PrivilegedExceptionAction<Void>() {
                @Override
                public Void run() throws Exception {
                    try (FileSystem fs = getFileSystemForInitDestroy()) { //to trigger the close
                    }
                    return null;
                }
            });
        }
    } catch (Exception ex) {
        LOG.info("Error connecting to FileSystem: " + ex, ex);
        issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), null, Errors.HADOOPFS_11,
                conf.hdfsUri, String.valueOf(ex), ex));
    }
    LOG.info("Authentication Config: " + logMessage);
}

From source file:org.gridgain.grid.ggfs.hadoop.v2.GridGgfsHadoopFileSystem.java

/**
 * Convert path using the given new URI.
 *
 * @param path Old path.//from   w  w  w .  ja  v a 2  s.co  m
 * @param newUri New URI.
 * @return New path.
 */
private Path convertPath(Path path, URI newUri) {
    assert newUri != null;

    if (path != null) {
        URI pathUri = path.toUri();

        try {
            return new Path(new URI(pathUri.getScheme() != null ? newUri.getScheme() : null,
                    pathUri.getAuthority() != null ? newUri.getAuthority() : null, pathUri.getPath(), null,
                    null));
        } catch (URISyntaxException e) {
            throw new GridRuntimeException(
                    "Failed to construct secondary file system path from the primary file " + "system path: "
                            + path,
                    e);
        }
    } else
        return null;
}

From source file:com.buaa.cfs.fs.AbstractFileSystem.java

/**
 * Check that a Path belongs to this FileSystem.
 * <p>/*ww w  . java  2 s.  c om*/
 * If the path is fully qualified URI, then its scheme and authority matches that of this file system. Otherwise the
 * path must be slash-relative name.
 *
 * @throws InvalidPathException if the path is invalid
 */
public void checkPath(Path path) {
    URI uri = path.toUri();
    String thatScheme = uri.getScheme();
    String thatAuthority = uri.getAuthority();
    if (thatScheme == null) {
        if (thatAuthority == null) {
            if (path.isUriPathAbsolute()) {
                return;
            }
            throw new InvalidPathException("relative paths not allowed:" + path);
        } else {
            throw new InvalidPathException("Path without scheme with non-null authority:" + path);
        }
    }
    String thisScheme = this.getUri().getScheme();
    String thisHost = this.getUri().getHost();
    String thatHost = uri.getHost();

    // Schemes and hosts must match.
    // Allow for null Authority for file:///
    if (!thisScheme.equalsIgnoreCase(thatScheme) || (thisHost != null && !thisHost.equalsIgnoreCase(thatHost))
            || (thisHost == null && thatHost != null)) {
        throw new InvalidPathException("Wrong FS: " + path + ", expected: " + this.getUri());
    }

    // Ports must match, unless this FS instance is using the default port, in
    // which case the port may be omitted from the given URI
    int thisPort = this.getUri().getPort();
    int thatPort = uri.getPort();
    if (thatPort == -1) { // -1 => defaultPort of Uri scheme
        thatPort = this.getUriDefaultPort();
    }
    if (thisPort != thatPort) {
        throw new InvalidPathException("Wrong FS: " + path + ", expected: " + this.getUri());
    }
}

From source file:org.apache.hadoop.hive.ql.Context.java

/**
 * Gets a temporary staging directory related to a path.
 * If a path already contains a staging directory, then returns the current directory; otherwise
 * create the directory if needed.//w  w w  .j a va  2 s.  c o m
 *
 * @param inputPath URI of the temporary directory
 * @param mkdir Create the directory if True.
 * @return A temporary path.
 */
private Path getStagingDir(Path inputPath, boolean mkdir) {
    final URI inputPathUri = inputPath.toUri();
    final String inputPathName = inputPathUri.getPath();
    final String fileSystem = inputPathUri.getScheme() + ":" + inputPathUri.getAuthority();
    final FileSystem fs;

    try {
        fs = inputPath.getFileSystem(conf);
    } catch (IOException e) {
        throw new IllegalStateException("Error getting FileSystem for " + inputPath + ": " + e, e);
    }

    String stagingPathName;
    if (inputPathName.indexOf(stagingDir) == -1) {
        stagingPathName = new Path(inputPathName, stagingDir).toString();
    } else {
        stagingPathName = inputPathName.substring(0, inputPathName.indexOf(stagingDir) + stagingDir.length());
    }

    final String key = fileSystem + "-" + stagingPathName + "-" + TaskRunner.getTaskRunnerID();

    Path dir = fsScratchDirs.get(key);
    if (dir == null) {
        // Append task specific info to stagingPathName, instead of creating a sub-directory.
        // This way we don't have to worry about deleting the stagingPathName separately at
        // end of query execution.
        dir = fs.makeQualified(
                new Path(stagingPathName + "_" + this.executionId + "-" + TaskRunner.getTaskRunnerID()));

        LOG.debug("Created staging dir = " + dir + " for path = " + inputPath);

        if (mkdir) {
            try {
                if (!FileUtils.mkdir(fs, dir, conf)) {
                    throw new IllegalStateException(
                            "Cannot create staging directory  '" + dir.toString() + "'");
                }

                if (isHDFSCleanup) {
                    fs.deleteOnExit(dir);
                }
            } catch (IOException e) {
                throw new RuntimeException(
                        "Cannot create staging directory '" + dir.toString() + "': " + e.getMessage(), e);
            }
        }

        fsScratchDirs.put(key, dir);
    }

    return dir;
}

From source file:org.gridgain.grid.ggfs.hadoop.v2.GridGgfsHadoopFileSystem.java

/** {@inheritDoc} */
@Override//  w ww . ja va 2  s.co  m
public void checkPath(Path path) {
    URI uri = path.toUri();

    if (uri.isAbsolute()) {
        if (!F.eq(uri.getScheme(), GGFS_SCHEME))
            throw new InvalidPathException(
                    "Wrong path scheme [expected=" + GGFS_SCHEME + ", actual=" + uri.getAuthority() + ']');

        if (!F.eq(uri.getAuthority(), uriAuthority))
            throw new InvalidPathException(
                    "Wrong path authority [expected=" + uriAuthority + ", actual=" + uri.getAuthority() + ']');
    }
}

From source file:com.facebook.presto.hive.PrestoS3FileSystem.java

@Override
public void initialize(URI uri, Configuration conf) throws IOException {
    requireNonNull(uri, "uri is null");
    requireNonNull(conf, "conf is null");
    super.initialize(uri, conf);
    setConf(conf);//from   ww  w .j av  a 2  s .  c o m

    this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority());
    this.workingDirectory = new Path(PATH_SEPARATOR).makeQualified(this.uri, new Path(PATH_SEPARATOR));

    HiveS3Config defaults = new HiveS3Config();
    this.stagingDirectory = new File(
            conf.get(S3_STAGING_DIRECTORY, defaults.getS3StagingDirectory().toString()));
    this.maxAttempts = conf.getInt(S3_MAX_CLIENT_RETRIES, defaults.getS3MaxClientRetries()) + 1;
    this.maxBackoffTime = Duration
            .valueOf(conf.get(S3_MAX_BACKOFF_TIME, defaults.getS3MaxBackoffTime().toString()));
    this.maxRetryTime = Duration.valueOf(conf.get(S3_MAX_RETRY_TIME, defaults.getS3MaxRetryTime().toString()));
    int maxErrorRetries = conf.getInt(S3_MAX_ERROR_RETRIES, defaults.getS3MaxErrorRetries());
    boolean sslEnabled = conf.getBoolean(S3_SSL_ENABLED, defaults.isS3SslEnabled());
    Duration connectTimeout = Duration
            .valueOf(conf.get(S3_CONNECT_TIMEOUT, defaults.getS3ConnectTimeout().toString()));
    Duration socketTimeout = Duration
            .valueOf(conf.get(S3_SOCKET_TIMEOUT, defaults.getS3SocketTimeout().toString()));
    int maxConnections = conf.getInt(S3_MAX_CONNECTIONS, defaults.getS3MaxConnections());
    long minFileSize = conf.getLong(S3_MULTIPART_MIN_FILE_SIZE, defaults.getS3MultipartMinFileSize().toBytes());
    long minPartSize = conf.getLong(S3_MULTIPART_MIN_PART_SIZE, defaults.getS3MultipartMinPartSize().toBytes());
    this.useInstanceCredentials = conf.getBoolean(S3_USE_INSTANCE_CREDENTIALS,
            defaults.isS3UseInstanceCredentials());
    this.pinS3ClientToCurrentRegion = conf.getBoolean(S3_PIN_CLIENT_TO_CURRENT_REGION,
            defaults.isPinS3ClientToCurrentRegion());
    this.sseEnabled = conf.getBoolean(S3_SSE_ENABLED, defaults.isS3SseEnabled());
    this.sseType = PrestoS3SseType.valueOf(conf.get(S3_SSE_TYPE, defaults.getS3SseType().name()));
    this.sseKmsKeyId = conf.get(S3_SSE_KMS_KEY_ID, defaults.getS3SseKmsKeyId());
    String userAgentPrefix = conf.get(S3_USER_AGENT_PREFIX, defaults.getS3UserAgentPrefix());

    ClientConfiguration configuration = new ClientConfiguration().withMaxErrorRetry(maxErrorRetries)
            .withProtocol(sslEnabled ? Protocol.HTTPS : Protocol.HTTP)
            .withConnectionTimeout(toIntExact(connectTimeout.toMillis()))
            .withSocketTimeout(toIntExact(socketTimeout.toMillis())).withMaxConnections(maxConnections)
            .withUserAgentPrefix(userAgentPrefix).withUserAgentSuffix(S3_USER_AGENT_SUFFIX);

    this.s3 = createAmazonS3Client(uri, conf, configuration);

    transferConfig.setMultipartUploadThreshold(minFileSize);
    transferConfig.setMinimumUploadPartSize(minPartSize);
}

From source file:net.billylieurance.azuresearch.AbstractAzureSearchQuery.java

/**
 * Run the query that has been set up in this instance.
 * Next step would be to get the results with {@link getQueryResult()}
 *///from  w ww.j a  v a  2  s. c o  m
public void doQuery() {
    DefaultHttpClient client = new DefaultHttpClient();

    client.getCredentialsProvider().setCredentials(
            new AuthScope(_targetHost.getHostName(), _targetHost.getPort()),
            new UsernamePasswordCredentials(this.getAppid(), this.getAppid()));

    URI uri;
    try {
        String full_path = getQueryPath();
        String full_query = getUrlQuery();
        uri = new URI(AZURESEARCH_SCHEME, AZURESEARCH_AUTHORITY, full_path, full_query, null);
        // Bing and java URI disagree about how to represent + in query
        // parameters. This is what we have to do instead...
        uri = new URI(uri.getScheme() + "://" + uri.getAuthority() + uri.getPath() + "?"
                + uri.getRawQuery().replace("+", "%2b").replace("'", "%27"));
        // System.out.println(uri.toString());
        // log.log(Level.WARNING, uri.toString());
    } catch (URISyntaxException e1) {
        e1.printStackTrace();
        return;
    }

    HttpGet get = new HttpGet(uri);

    get.addHeader("Accept", "application/xml");
    get.addHeader("Content-Type", "application/xml");

    try {
        _responsePost = client.execute(get);
        _resEntity = _responsePost.getEntity();

        if (this.getProcessHTTPResults()) {
            _rawResult = loadXMLFromStream(_resEntity.getContent());
            this.loadResultsFromRawResults();
        }
        // Adding an automatic HTTP Result to String really requires
        // Apache Commons IO. That would break
        // Android compatibility. I'm not going to do that unless I
        // re-implement IOUtils.
    } catch (ClientProtocolException e) {
        e.printStackTrace();
    } catch (IOException e) {
        e.printStackTrace();
    } catch (IllegalStateException e) {
        e.printStackTrace();
    }

}