com.archivas.clienttools.arcutils.impl.adapter.HCAPAdapter.java Source code

Java tutorial

Introduction

Here is the source code for com.archivas.clienttools.arcutils.impl.adapter.HCAPAdapter.java

Source

// Copyright 2007 Hitachi Data Systems
// All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.

package com.archivas.clienttools.arcutils.impl.adapter;

import com.archivas.clienttools.arcutils.api.jobs.DeleteJob;
import com.archivas.clienttools.arcutils.config.HCPMoverConstants;
import com.archivas.clienttools.arcutils.config.HCPMoverProperties;
import com.archivas.clienttools.arcutils.profile.HCAPProfile;
import com.archivas.clienttools.arcutils.model.*;
import com.archivas.clienttools.arcutils.utils.net.GetCertsX509TrustManager;
import com.archivas.clienttools.arcutils.utils.net.SSLCertChain;
import com.archivas.clienttools.arcutils.utils.net.SSLCertificateCallback;
import org.apache.http.*;
import org.apache.http.client.methods.HttpDelete;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpHead;
import org.apache.http.client.methods.HttpUriRequest;
import org.apache.http.client.params.ClientPNames;
import org.apache.http.client.utils.URIUtils;
import org.apache.http.client.utils.URLEncodedUtils;
import org.apache.http.conn.ClientConnectionRequest;
import org.apache.http.conn.ConnectTimeoutException;
import org.apache.http.conn.params.ConnManagerParams;
import org.apache.http.conn.params.ConnPerRouteBean;
import org.apache.http.conn.routing.HttpRoute;
import org.apache.http.conn.scheme.PlainSocketFactory;
import org.apache.http.conn.scheme.Scheme;
import org.apache.http.conn.scheme.SchemeRegistry;
import org.apache.http.conn.ssl.SSLSocketFactory;
import org.apache.http.impl.client.AbstractHttpClient;
import org.apache.http.impl.client.DefaultHttpClient;
import org.apache.http.impl.conn.tsccm.ThreadSafeClientConnManager;
import org.apache.http.message.BasicNameValuePair;
import org.apache.http.params.BasicHttpParams;
import org.apache.http.params.CoreConnectionPNames;
import org.apache.http.params.HttpConnectionParams;
import org.apache.http.params.HttpParams;
import org.apache.http.protocol.HttpContext;

import javax.net.ssl.SSLContext;
import javax.net.ssl.TrustManager;
import javax.xml.stream.XMLStreamException;
import java.io.*;
import java.net.*;
import java.security.KeyManagementException;
import java.security.KeyStoreException;
import java.security.NoSuchAlgorithmException;
import java.security.NoSuchProviderException;
import java.util.*;
import java.util.logging.Level;
import java.util.logging.Logger;

/**
 * This interface defines the interface that is provided by the Archive.
 */
public abstract class HCAPAdapter implements StorageAdapter {

    public static final String DIRECTORY = "directory";
    public static final String FILE = "file";
    public static final String DOT = ".";
    public static final String OBJECT = "object";
    public static final String SYMLINK = "symlink";

    public static Logger LOG = Logger.getLogger(Hcap2Adapter.class.getName());
    public static final int MAX_NUM_CONNECTIONS_PER_NODE = 250;
    private static final int CM_BUFFER_SIZE = 1024 * 4; // 4K buffer

    // Useful for debugging
    protected String debugName;
    protected static int adapterCnt = 0;

    /**
     * InputStream and WriteStream returned by {@link #getInputStream} Save a handle to it for
     * cleanup & cancel purposes.
     *
     * Note: We have to create this cookie and give it the request and then later add the response.
     * This is because we need the request around incase we need to pause/cancel the job -- in that
     * case we can cancel the cookie
     */
    protected final Object savingCookieLock = new Object();

    public class HcapAdapterCookie {
        private final HttpUriRequest savedRequest;
        private final HttpHost httpHost;

        // Set when the request is executed
        private HttpContext savedContext;
        private HttpResponse savedResponse;

        HcapAdapterCookie(HttpUriRequest savedRequest, HttpHost httpHost) {
            this.savedRequest = savedRequest;
            this.httpHost = httpHost;
        }

        public void setResponseAndContext(HttpResponse savedResponse, HttpContext savedContext) {
            this.savedResponse = savedResponse;
            this.savedContext = savedContext;
        }

        public HttpResponse getResponse() {
            return savedResponse;
        }

        public HttpHost getHost() {
            return httpHost;
        }

        public HttpUriRequest getRequest() {
            return savedRequest;
        }

        /**
         * Aborts the connection in the middle -- will for a new connection in the pool
         */
        public void cancel() {
            // Close the request first
            if (savedRequest != null) {
                savedRequest.abort();
            }

            // Now close the response -- this is saved around in the case cleaning up the input
            // stream, need to stop
            // the request first so that it is no longer using this stream otherwise you can end up
            // with hung up requests
            close();
        }

        /**
         * Returns the connection to the pool
         */
        public void close() {
            if (savedResponse != null && savedResponse.getEntity() != null) {
                try {
                    savedResponse.getEntity().consumeContent();
                } catch (IOException ioe) {
                    LOG.log(Level.FINE, "IOException during get", ioe);
                }
            }
        }
    }

    protected HcapAdapterCookie savedCookie;

    protected AbstractHttpClient httpClient = null;
    protected SSLCertificateCallback sslExceptionCallback;
    private boolean abortASAP = false; // THis is not used, but set

    private static Map<Integer, String> statusMessages = new HashMap<Integer, String>();
    static {
        statusMessages.put(HttpStatus.SC_OK, "Request was successful");
        statusMessages.put(HttpStatus.SC_CREATED, "Object was successfully added");
        statusMessages.put(HttpStatus.SC_NO_CONTENT, "Could not retrieve custom metadata or version");
        statusMessages.put(HttpStatus.SC_PARTIAL_CONTENT, "Successfully retrieved the requested byte range");
        statusMessages.put(HttpStatus.SC_MOVED_TEMPORARILY, "The login credentials you suppplied are invalid");
        statusMessages.put(HttpStatus.SC_BAD_REQUEST, "The request was not valid");
        statusMessages.put(HttpStatus.SC_UNAUTHORIZED,
                "This user does not have permissions to perform this operation");
        statusMessages.put(HttpStatus.SC_FORBIDDEN, "The requested operation is not allowed");
        statusMessages.put(HttpStatus.SC_NOT_FOUND, "Object not found");
        statusMessages.put(HttpStatus.SC_CONFLICT, "Conflict");
        statusMessages.put(HttpStatus.SC_REQUEST_TOO_LONG, "Not enough space to store object");
        statusMessages.put(HttpStatus.SC_REQUEST_URI_TOO_LONG, "URL is longer than 4095 bytes");
        statusMessages.put(HttpStatus.SC_REQUESTED_RANGE_NOT_SATISFIABLE, "Requested range not satisfiable");
        statusMessages.put(HttpStatus.SC_INTERNAL_SERVER_ERROR, "Internal server error on HCP");
        statusMessages.put(HttpStatus.SC_SERVICE_UNAVAILABLE, "Service is temporarily unavailable");
    }

    public static String getMessageFromStatusCode(int statusCode) throws StorageAdapterException {
        String m = statusMessages.get(statusCode);
        return (m == null) ? Integer.toString(statusCode) : m;
    }

    public String getDebugName() {
        return debugName;
    }

    public abstract SSLCertChain getSSLCerts() throws IOException, StorageAdapterException;

    /**
     * Executes the request. Takes a cookie with the request and the host and fills it in with the
     * response.
     * 
     * @param cookie
     * @throws IOException
     */
    protected abstract void executeMethod(HcapAdapterCookie cookie) throws IOException;

    /*
     * This is the code that actually writes the data to the HCAP/HCP This is different with the
     * different versions of the adapter
     *
     * @param targetNode
     * 
     * @param targetPath
     * 
     * @param is
     * 
     * @param ingestionMetadata
     * 
     * @return
     * 
     * @throws StorageAdapterException
     */
    public abstract void writeObjectFromStream(final String targetNode, final String targetPath,
            final InputStream is, final FileMetadata ingestionMetadata) throws StorageAdapterException;

    /**
     * Writes metadata to HCP, without writing the object
     *
     * @param targetNode
     *            -
     * @param path
     *            -
     * @param metadata
     *            -
     * @throws StorageAdapterException
     *             -
     */
    public abstract void setMetadata(final String targetNode, final String path, final FileMetadata metadata)
            throws StorageAdapterException;

    /**
     * Handles future options for delete
     * 
     * @param operation
     *            the actual delete operation
     * @param reason
     *            only used with priviledged operations
     * @return List<NameValuePair> of the query tags for the delete operation
     * @throws StorageAdapterLiteralException
     */
    abstract protected List<NameValuePair> handleDeleteOperation(DeleteJob.Operation operation, String reason)
            throws StorageAdapterLiteralException;

    /**
     * Returns the error header for this version of HCP
     * 
     * @return
     */
    abstract protected String getErrorHeader();

    public void delete(final String path, boolean isDirectory, DeleteJob.Operation operation, String reason)
            throws StorageAdapterException {
        delete(path, isDirectory, operation, reason, false);
    }

    public void delete(final String path, boolean isDirectory, DeleteJob.Operation operation, String reason,
            boolean isCustomMetadata) throws StorageAdapterException {
        HttpHost httpHost = new HttpHost(getHost(), ((HCAPProfile) getProfile()).getPort(),
                getProfile().getProtocol());

        String resolvedPath = path;
        if (!resolvedPath.startsWith(HttpGatewayConstants.METADATA_MOUNT_URL_DIR)) {
            resolvedPath = getProfile().resolvePath(resolvedPath);
        }

        // Handle Delete Parameters
        List<NameValuePair> deleteOperations;
        if (isDirectory) {
            // Directories don't need any special flags
            deleteOperations = null;
        } else if (operation == DeleteJob.Operation.PRIVILEGED_DELETE) {
            if (reason == null || reason.equals("")) {
                throw new StorageAdapterLiteralException("When doing a privileged delete a reason is required.");
            }

            deleteOperations = new ArrayList<NameValuePair>();
            deleteOperations.add(new BasicNameValuePair(HttpGatewayConstants.PARAM_PRIVILEGED, "true"));
            deleteOperations.add(new BasicNameValuePair(HttpGatewayConstants.PARAM_REASON, reason));
        } else {
            deleteOperations = handleDeleteOperation(operation, reason);
        }

        if (isCustomMetadata) {
            if (deleteOperations == null) {
                deleteOperations = new ArrayList<NameValuePair>();
            }
            deleteOperations.add(new BasicNameValuePair(HttpGatewayConstants.MD_TYPE, "custom-metadata"));
        }

        String queryString = null;
        if (deleteOperations != null && deleteOperations.size() > 0) {
            queryString = URLEncodedUtils.format(deleteOperations, "UTF-8");
        }

        // Constructing a uri handles the filename encoding for us here
        URI uri;
        try {
            uri = URIUtils.createURI(getProfile().getProtocol(), getHost(), -1, resolvedPath, queryString, null);
        } catch (URISyntaxException e) {
            LOG.log(Level.INFO, "Unexpected error generating put URI for : " + resolvedPath);
            throw new StorageAdapterLiteralException("Error during delete", e);
        }

        HttpDelete request = new HttpDelete(uri);

        // Eventually we will just return this cookie which will be passed back to the caller.
        HcapAdapterCookie cookie = new HcapAdapterCookie(request, httpHost);
        synchronized (savingCookieLock) {
            if (savedCookie != null) {
                throw new RuntimeException(
                        "This adapter already has a current connection to host -- cannot create two at once.");
            }
            savedCookie = cookie;
        }
        try {
            executeMethod(cookie);
            this.handleHttpResponse(cookie.getResponse(), "deleting", path);
        } catch (IOException e) {
            handleIOExceptionFromRequest(e, "deleting", path);
        } finally {
            close();
        }
    }

    protected String getHost() throws StorageAdapterException {
        boolean shouldRetry = true;
        String profileString = null;
        try {
            profileString = getProfile().getHost();
        } catch (UnknownHostException e) {
            // This host is unknown, retrying will get us nowhere, give up
            shouldRetry = false;
        }

        if (profileString == null) {
            String msg = String.format("host %s in profile %s", ((HCAPProfile) getProfile()).getHostname(),
                    getProfile().getName());
            if (shouldRetry) {
                throw new StorageAdapterRetryAndFailJobException("DNS Failure for " + msg);
            } else {
                throw new StorageAdapterLiteralException("Could not reach " + msg);
            }
        }

        return profileString;
    }

    protected void handleIOExceptionFromRequest(IOException e, String cause, String fileName)
            throws StorageAdapterException {
        Throwable rootCause = e;
        while (rootCause.getCause() != null) {
            rootCause = rootCause.getCause();
        }
        if (rootCause instanceof ConnectTimeoutException || rootCause instanceof NoHttpResponseException
                || rootCause instanceof UnknownHostException || rootCause instanceof BindException
                || rootCause instanceof SocketTimeoutException) {
            String msg = "Retryable Http Exception while " + cause + " " + fileName + ": " + rootCause.getMessage();
            LOG.log(Level.WARNING, msg, e);
            throw new StorageAdapterHttpRetryException(msg, e);
        } else {
            String msg = "IOException while " + cause + " " + fileName + ": " + rootCause.getMessage();
            LOG.log(Level.WARNING, msg, e);
            throw new StorageAdapterLiteralException(msg, e);
        }
    }

    protected void handleHttpResponse(HttpResponse response, String action, String fileName)
            throws StorageAdapterException {
        if (response == null) {
            LOG.log(Level.WARNING, "Null response executing " + action + " for file: " + fileName);
            throw new StorageAdapterServerRetryException("Http request failed for file: " + fileName);
        }

        int statusCode = response.getStatusLine().getStatusCode();

        if (statusCode != HttpStatus.SC_OK && statusCode != HttpStatus.SC_CREATED
                && statusCode != HttpStatus.SC_NO_CONTENT) { // 204 NO CONTENT when deleting CM, but
                                                                                                                                       // there is no CM to delete
                                                                                                                                       // Log the error message from HCP
            Header arcErrorHeader = response.getFirstHeader(getErrorHeader());
            if (arcErrorHeader != null) {
                LOG.log(Level.WARNING, "Error during HCP " + action + ".  Status = " + statusCode + " Message = "
                        + arcErrorHeader.getValue() + " File = " + fileName);
            }

            // Now do logic
            if (statusCode == HttpStatus.SC_INTERNAL_SERVER_ERROR
                    || statusCode == HttpStatus.SC_SERVICE_UNAVAILABLE) {
                String errorMsg = String.format("%d - Problem with server, please retry", statusCode);
                throw new StorageAdapterServerRetryException(errorMsg);
            } else {
                String errorMsgFromHCP;
                if (arcErrorHeader == null) {
                    errorMsgFromHCP = getMessageFromStatusCode(statusCode);
                } else {
                    errorMsgFromHCP = arcErrorHeader.getValue();
                }

                String errorMsg = String.format("%d - %s while %s %s", statusCode, errorMsgFromHCP, action,
                        getProfile().decode(fileName));
                throw new StorageAdapterLiteralException(errorMsg, statusCode);
            }
        }
    }

    public InputStream getInputStream(final String path, final String query) throws StorageAdapterException {
        LOG.log(Level.FINEST, "getHost()=" + getHost() + ", profile=" + getProfile().toDetailString());
        return getInputStream(getHost(), path, query, true, null);
    }

    /**
     * @inheritDoc
     */
    public void writeObjectFromStream(final String targetPath, final InputStream is,
            final FileMetadata ingestionMetadata) throws StorageAdapterException {
        writeObjectFromStream(getHost(), targetPath, is, ingestionMetadata);
    }

    /**
     * @inheritDoc
     */
    public void setMetadata(final String path, final FileMetadata metadata) throws StorageAdapterException {
        setMetadata(getHost(), path, metadata);
    }

    /**
     * @inheritDoc
     */
    public void rename(final String parentDirectoryURL, final String oldName, final String newName)
            throws StorageAdapterException {
        String msg = getClass().getName() + " doesn't support rename() yet.";
        throw new UnsupportedOperationException(msg);

        // To support rename on the cluster, we would have to download, delete, rename, and
        // then upload it again
    }

    /**
     * @inheritDoc
     * @param path
     */
    public boolean exists(final String path) throws StorageAdapterException {
        return exists(path, null);
    }

    public boolean exists(final String path, final String query) throws StorageAdapterException {
        boolean result = false;

        HttpUriRequest request;
        HttpHost httpHost = new HttpHost(getHost(), ((HCAPProfile) getProfile()).getPort(),
                getProfile().getProtocol());

        String resolvedPath = path;

        if (!path.startsWith(HttpGatewayConstants.METADATA_MOUNT_URL_DIR)) {
            resolvedPath = getProfile().resolvePath(resolvedPath);
        }
        if (query != null && !query.equals("")) {
            resolvedPath = String.format("%s?%s", resolvedPath, query);
        }
        request = new HttpHead(resolvedPath);

        int statusCode = -1;
        try {
            // Eventually we will just return this cookie which will be passed back to the caller.
            HcapAdapterCookie cookie = new HcapAdapterCookie(request, httpHost);
            synchronized (savingCookieLock) {
                if (savedCookie != null) {
                    throw new RuntimeException(
                            "This adapter already has a current connection to host -- cannot create two at once.");
                }
                savedCookie = cookie;
            }
            executeMethod(cookie);
            if (cookie.getResponse() != null)
                statusCode = cookie.getResponse().getStatusLine().getStatusCode();

            // Exists needs to return false for the cases where there is no server error, but the
            // object doesn't
            // exist. Otherwise it can go through standard response error handling
            result = true;
            if (statusCode == HttpStatus.SC_NOT_FOUND || statusCode == HttpStatus.SC_NO_CONTENT) {
                result = false;
            } else {
                this.handleHttpResponse(cookie.getResponse(), "checking existence of", path);
            }
        } catch (IOException e) {
            this.handleIOExceptionFromRequest(e, "checking existence of", path);
        } finally {
            close();
        }

        return result;
    }

    /**
     * @inheritDoc
     */
    public long getFileSize(ArcMoverFile file) throws StorageAdapterException {
        HttpHost httpHost = new HttpHost(getHost(), ((HCAPProfile) getProfile()).getPort(),
                getProfile().getProtocol());

        String path = file.getPath();

        if (file.getParent().isVersionList() || file.isVersion()) {
            path = path + file.getVersionString();
        }

        HttpUriRequest request;
        try {
            request = new HttpHead(getProfile().resolvePath(path));
        } catch (IllegalArgumentException iae) {
            // If the path is not a valid URI, then fail only this object, not the job
            Throwable rootCause = iae;
            while (rootCause.getCause() != null) {
                rootCause = rootCause.getCause();
            }
            if (rootCause instanceof URISyntaxException) {
                throw new StorageAdapterLiteralException(rootCause);
            } else {
                throw iae;
            }
        }

        // Eventually we will just return this cookie which will be passed back to the caller.
        HcapAdapterCookie cookie = new HcapAdapterCookie(request, httpHost);
        synchronized (savingCookieLock) {
            if (savedCookie != null) {
                throw new RuntimeException(
                        "This adapter already has a current connection to host -- cannot create two at once.");
            }
            savedCookie = cookie;
        }
        try {
            executeMethod(cookie);
            this.handleHttpResponse(cookie.getResponse(), "checking file size of", path);
        } catch (IOException e) {
            this.handleIOExceptionFromRequest(e, "checking file size of", path);
        } finally {
            close();
        }

        return Long.parseLong(cookie.getResponse().getFirstHeader("Content-Length").getValue());
    }

    /**
     * Get the archive Capacity. This includes both total capacity and remaning capacity.
     * 
     * @param url
     *            A url pointing to any valid (ie existing) object in the archive. Typically,
     *            clients would use the root, but it is not required.
     * @return
     * @throws StorageAdapterException
     */
    public ArcCapacity getArchiveCapacity(URL url) throws StorageAdapterException {
        ArcCapacity result = null;

        HttpHost httpHost = new HttpHost(url.getHost(), url.getPort(), url.getProtocol());

        HttpUriRequest request = new HttpHead();

        try {
            // Eventually we will just return this cookie which will be passed back to the caller.
            HcapAdapterCookie cookie = new HcapAdapterCookie(request, httpHost);
            synchronized (savingCookieLock) {
                if (savedCookie != null) {
                    throw new RuntimeException(
                            "This adapter already has a current connection to host -- cannot create two at once.");
                }
                savedCookie = cookie;
            }
            executeMethod(cookie);
            this.handleHttpResponse(cookie.getResponse(), "checking capacity of", url.getHost());

            if (cookie.getResponse() != null) {
                result = new ArcCapacity();
                result.setTotal(Long.parseLong(cookie.getResponse()
                        .getFirstHeader(HttpGatewayConstants.HEADER_AVAILABLE_CAPACITY).toString()));
                result.setAvailable(
                        Long.parseLong(cookie.getResponse().getFirstHeader("X-ArcAvailableCapacity").toString()));
            }
        } catch (IOException e) {
            this.handleIOExceptionFromRequest(e, "checking capacity of", url.getHost());
        } finally {
            close();
        }

        return result;
    }

    /**
     * @inheritDoc
     * @param forceGetAllMetadata:
     *            We only get the metadata if this value is set OR if we have less than the
     *            prescribed threshold of files (default = 1000) That is because it is expensive to
     *            always get the metadata because it is a second call. See note in code below.
     */
    public ArcMoverDirectory getDirectory(final String path, final boolean forceGetAllMetadata)
            throws StorageAdapterException {
        try {
            return ArcMoverDirectory.getDirInstance(this.getProfile(), path, this);
        } catch (Throwable e) {
            String msg = "Error parsing directory for: " + path;
            LOG.log(Level.INFO, msg, e);
            throw new StorageAdapterLiteralException(msg, e);
        }
    }

    public Iterator<ArcMoverFile> getFileListIterator(ArcMoverDirectory caller, boolean includeDeleted,
            boolean supportsVersioing) throws StorageAdapterException {
        try {
            String query = getQueryStringForFileListIterator(caller, includeDeleted, supportsVersioing);
            InputStream dirList = getInputStream(caller.getPath(), query);
            return new HCAPDirectoryList(this, caller, dirList);
        } catch (XMLStreamException e) {
            String path = caller.getPath();
            LOG.log(Level.INFO, "XMLStreamException in getFileListIterator on " + path, e);
            String msg = "The response from HCP contains invalid XML.";
            if (path.contains("&")) {
                msg = msg + "  One common cause is an ampersand (&) in a directory path.  HCP-DM does not support "
                        + "ampersands (&) in directory paths in HCP releases earlier than 4.0.";
            }

            throw new StorageAdapterException(msg, e);
        }
    }

    protected String getQueryStringForFileListIterator(ArcMoverDirectory caller, boolean includeDeleted,
            boolean supportsVersioning) throws StorageAdapterException {
        return "";
    }

    public ArcMoverDirectory getVersions(final String path) throws StorageAdapterException {
        return null;
    }

    /**
     * @inheritDoc
     */
    public void cancel() {
        // ReleaseConnection
        synchronized (savingCookieLock) {
            if (savedCookie != null) {
                savedCookie.cancel();
                savedCookie = null;
            }
        }
    }

    /**
     * @inheritDoc
     */
    public void close() {
        // Close
        synchronized (savingCookieLock) {
            if (savedCookie != null) {
                savedCookie.close();
                savedCookie = null;
            }
        }
    }

    public AbstractHttpClient getHttpClient() {
        return httpClient;
    }

    public InputStream getACLStream(final String path) throws StorageAdapterException {
        return null; // Nothing for this until version 5.0
    }

    public void setACLFromStream(final String path, InputStream aclStream) throws StorageAdapterException {
        // We need to just quietly do nothing here, it is expected it just gets dropped
    }

    /**
     * Setup default http client parameters including timeouts, number of simultaneous connections,
     * and a retry handler
     * 
     * @param sslExceptionCallback
     */
    protected synchronized final void init(SSLCertificateCallback sslExceptionCallback)
            throws StorageAdapterException {

        if (httpClient != null) {
            LOG.log(Level.FINE, "Reinitializing...why?");
            return;
        }

        HttpParams params = new BasicHttpParams();

        // Right now we just set this up for normal load. This means when we go to low load it is
        // possible we will
        // be accessing more connections on HCP than what is set there. When we upgrade to the next
        // apache library
        // we can implement that functionality see: @updateHttpClient
        int maxConnectionsPerRoute = HCPMoverProperties.MAX_LOAD_MAXTHREADS.getAsInt();
        int maxConnections = HCPMoverProperties.MAX_LOAD_MAXTHREADS_PER_NODE.getAsInt();

        try {
            ConnPerRouteBean connPerRoute = new ConnPerRouteBean(maxConnectionsPerRoute);
            ConnManagerParams.setMaxConnectionsPerRoute(params, connPerRoute);
        } catch (NumberFormatException e) {
            /* Already logged by helper */ }

        try {
            ConnManagerParams.setMaxTotalConnections(params, maxConnections);
        } catch (NumberFormatException e) {
            /* Already logged by helper */ }

        // We need a timeout so we don't just hang there forever
        int connectionTimeoutMillis = HCPMoverProperties.CONNECTION_TIMEOUT_MILLIS.getAsInt();
        HttpConnectionParams.setConnectionTimeout(params, connectionTimeoutMillis);

        // And a socket timeout from properties
        int socketTimeoutMillis = HCPMoverProperties.SOCKET_TIMEOUT_MILLIS.getAsInt();
        HttpConnectionParams.setSoTimeout(params, socketTimeoutMillis);

        // This is the magic that makes windows reuse sockets.
        HttpConnectionParams.setLinger(params, 1);

        // 302's from HCP redirect -- prevent this
        params.setBooleanParameter(ClientPNames.HANDLE_REDIRECTS, false);

        try {
            SchemeRegistry schemeRegistry = getHcapProtocolSchemeRegistryForHttpClient(sslExceptionCallback);

            ThreadSafeClientConnManager connMgr = new ThreadSafeClientConnManager(params, schemeRegistry);

            // debug start
            // TestHttpThreadSafeClientConnManager connMgr = new
            // TestHttpThreadSafeClientConnManager(params, schemeRegistry);
            // debug end

            httpClient = new DefaultHttpClient(connMgr, params);
            LOG.log(Level.FINE, "Created new httpClient for profile: " + getProfile());

        } catch (Exception e) {
            LOG.log(Level.INFO, "Unable to initialize HCAPHttpAdapter!", e);
            throw new StorageAdapterLiteralException("Unable to initialize HCAPHttpAdapter!", e);
        }

    }

    public SchemeRegistry getHcapProtocolSchemeRegistryForHttpClient(SSLCertificateCallback sslExceptionCallback)
            throws StorageAdapterException {
        SchemeRegistry schemeRegistry = new SchemeRegistry();
        schemeRegistry.register(new Scheme("http", PlainSocketFactory.getSocketFactory(), 80));

        try {
            SSLContext sslcontext = SSLContext.getInstance("TLS");
            // Note: SSLContext.init takes an array of TrustManager instances, so we could in theory
            // provide more than one
            // implementation here.
            TrustManager X509TrustManager = new GetCertsX509TrustManager((HCAPProfile) getProfile(),
                    sslExceptionCallback);
            sslcontext.init(null, new TrustManager[] { X509TrustManager }, null);
            SSLSocketFactory sslSocketFactory = new SSLSocketFactory(sslcontext);

            // We are doing this here because we did the verification that would be done if we had
            // set this to
            // STRICT_HOSTNAME_VERIFIER in the init we called above.
            sslSocketFactory.setHostnameVerifier(SSLSocketFactory.ALLOW_ALL_HOSTNAME_VERIFIER);

            Scheme https = new Scheme("https", sslSocketFactory, 443);
            schemeRegistry.register(https);

            getAdditionalHcapProtocolSchemeRegistryForHttpClient(schemeRegistry, sslExceptionCallback);

        } catch (NoSuchAlgorithmException e) {
            LOG.log(Level.INFO, "Unable to initialize SSL for https protocol!", e);
            throw new StorageAdapterException("Unable to initialize SSL for https protocol", e);
        } catch (NoSuchProviderException e) {
            LOG.log(Level.INFO, "Unable to initialize SSL for https protocol!", e);
            throw new StorageAdapterException("Unable to initialize SSL for https protocol", e);
        } catch (KeyStoreException e) {
            LOG.log(Level.INFO, "Unable to initialize SSL for https protocol!", e);
            throw new StorageAdapterException("Unable to initialize SSL for https protocol", e);
        } catch (KeyManagementException e) {
            LOG.log(Level.INFO, "Unable to initialize SSL for https protocol!", e);
            throw new StorageAdapterException("Unable to initialize SSL for https protocol", e);
        }

        return schemeRegistry;
    }

    /**
     * Child classes can use this to add to the scheme registry. Currently HCP Auth namespaces do
     * this
     * 
     * @param schemeRegistry
     * @param sslExceptionCallback
     * @throws StorageAdapterException
     */
    public void getAdditionalHcapProtocolSchemeRegistryForHttpClient(SchemeRegistry schemeRegistry,
            SSLCertificateCallback sslExceptionCallback) throws StorageAdapterException {

    }

    public void updateHttpClient(LoadSchedule load) {
        // Currently this does nothing. What it should do in theory is call
        // setDefaultMaxPerRoute(int max)
        // and setMaxForRoute(HttpRoute route, int max) with the value passed from load and
        // maxCopyThreads
        // At the time of this those functions were not available in the version of the library we
        // have
        // So we are trusting that the threads manage this properly themselves
    }

    public void abortASAP() {
        this.abortASAP = true;
    }

    public int getMaxNumSimultaneousHttpConnections(final int maxConnectionsPerRoute)
            throws StorageAdapterLiteralException {
        int maxAllowedConnectionsPerNode = Math.min(MAX_NUM_CONNECTIONS_PER_NODE, maxConnectionsPerRoute);

        int numNodes = 0;
        try {
            numNodes = ((HCAPProfile) getProfile()).getHostAddresses().size();
        } catch (UnknownHostException e) {
            // We swallowed this before, it is safe to swallow here because we will just return 0
            // which will
            // mean 0 threads running.
            LOG.log(Level.INFO,
                    "UnknownHostException in getMaxNumSimultaneousHttpConnections, result will be 0 connections",
                    e);
            throw new StorageAdapterLiteralException(
                    "Cannot get to host: " + ((HCAPProfile) getProfile()).getHostname());
        }
        int result = maxAllowedConnectionsPerNode * numNodes;

        return result;
    }

    /**
     * Sets an int parameter on the http client, use null to clear
     * 
     * @param timeout
     *            The new timeout
     */
    protected void setIntClientParameter(final Integer timeout, final String parameter) {
        HttpParams params = httpClient.getParams();
        if (timeout != null) {
            params.setIntParameter(parameter, timeout);
        } else {
            params.removeParameter(parameter);
        }
        httpClient.setParams(params);
    }

    /**
     * Retrieves the current connection timeout, null if not set or if set to Integer.MIN_VALUE
     * 
     * @return The current timeout
     */
    protected Integer getIntClientParameter(final String parameter) {
        HttpParams params = httpClient.getParams();
        int value = params.getIntParameter(parameter, Integer.MIN_VALUE);
        if (value != Integer.MIN_VALUE) {
            return value;
        } else {
            return null;
        }
    }

    public boolean testConnection() throws ConnectionTestException {
        // store the old timeouts
        Integer oldTimeout = getIntClientParameter(CoreConnectionPNames.CONNECTION_TIMEOUT);
        Integer oldSocketTimeout = getIntClientParameter(CoreConnectionPNames.SO_TIMEOUT);
        boolean isValid = false;
        try {
            int tempTime = HCPMoverProperties.CONNECTION_TEST_TIMEOUT_OVERRIDE_MS.getAsInt();
            setIntClientParameter(tempTime, CoreConnectionPNames.CONNECTION_TIMEOUT);
            setIntClientParameter(tempTime, CoreConnectionPNames.SO_TIMEOUT);
            isValid = doTestConnection();
        } finally {
            // restore the old timeouts
            setIntClientParameter(oldTimeout, CoreConnectionPNames.CONNECTION_TIMEOUT);
            setIntClientParameter(oldSocketTimeout, CoreConnectionPNames.SO_TIMEOUT);
        }
        return isValid;
    }

    protected boolean doTestConnection() throws ConnectionTestException {
        boolean isValid;

        HttpUriRequest request;
        HCAPProfile hcapProfile = (HCAPProfile) getProfile();
        HttpHost httpHost = null;
        try {
            httpHost = new HttpHost(getHost(), hcapProfile.getPort(), hcapProfile.getProtocol());
        } catch (StorageAdapterException e) {
            throw new ConnectionTestException(e, null, null, getProfile().getName());
        }

        String root = "/";
        String resolvedPath = hcapProfile.resolvePath(root);
        request = new HttpHead(resolvedPath);
        HcapAdapterCookie cookie = new HcapAdapterCookie(request, httpHost);
        // Eventually we will just return this cookie which will be passed back to the caller.
        synchronized (savingCookieLock) {
            if (savedCookie != null) {
                throw new RuntimeException(
                        "This adapter already has a current connection to host -- cannot create two at once.");
            }
            savedCookie = cookie;
        }

        Throwable cause = null;
        Integer statusCode = null;
        try {
            executeMethod(cookie);

        } catch (IOException e) {
            LOG.log(Level.INFO, "IOException during testConnection", e);
            cause = e;
        } finally {
            close();
        }

        if (cookie.getResponse() != null) {
            statusCode = cookie.getResponse().getStatusLine().getStatusCode();
        }

        if (statusCode != null && statusCode == HttpStatus.SC_OK) {
            isValid = true;
        } else if (cause != null) {
            throw new ConnectionTestException(cause, statusCode, null, getProfile().getName());
        } else {
            throw new ConnectionTestException(statusCode, null, getProfile().getName());
        }

        return isValid;
    }

    // debug start
    private class TestHttpThreadSafeClientConnManager extends ThreadSafeClientConnManager {
        private int count = 0;
        private final int MAX_COUNT = 0;

        public TestHttpThreadSafeClientConnManager(HttpParams params, SchemeRegistry schreg) {
            super(params, schreg);
        }

        public ClientConnectionRequest requestConnection(HttpRoute route, Object state) {
            if (count > MAX_COUNT) {
                LOG.log(Level.INFO,
                        "****Getting connection in pool.  Current total max = " + getConnectionsInPool()
                                + "   Current for route = " + getConnectionsInPool(route) + "  Route = "
                                + route.getTargetHost());
                count = 0;
            }
            ++count;
            return super.requestConnection(route, state);
        }
    }
    // debug end

    protected static String getCustomMetadata(Reader reader) throws IOException, CustomMetadataTooLargeException {
        long maxCmdLength = HCPMoverProperties.CM_MAX_IN_MEMORY_SIZE.getAsLong();
        StringBuilder out = new StringBuilder();

        char[] chars = new char[CM_BUFFER_SIZE];
        int readCnt;
        do {
            readCnt = reader.read(chars);
            if (readCnt > 0) {
                if (out.length() + readCnt > maxCmdLength) {
                    throw new CustomMetadataTooLargeException();
                }
                out.append(chars, 0, readCnt);
            }
        } while (readCnt >= 0);

        return out.toString();
    }

    protected InputStream getInputStream(final String path, boolean resolvePath) throws StorageAdapterException {
        LOG.log(Level.FINEST, "getHost()=" + getHost() + ", profile=" + getProfile().toDetailString());
        return getInputStream(getHost(), path, resolvePath);
    }

    protected InputStream getInputStream(final String sourceNode, final String path, boolean resolvePath)
            throws StorageAdapterException {
        return getInputStream(sourceNode, path, null, resolvePath, null);
    }

    protected InputStream getInputStream(final String path, final String query, Header contentTypeHeader)
            throws StorageAdapterException {
        LOG.log(Level.FINEST, "getHost()=" + getHost() + ", profile=" + getProfile().toDetailString());
        return getInputStream(getHost(), path, query, true, contentTypeHeader);
    }

    protected InputStream getInputStream(String sourceNode, String path, String query, boolean resolvePath,
            Header contentTypeHeader) throws StorageAdapterException {
        InputStream inputStream = null; // Caller closes this

        try {
            HttpHost httpHost = new HttpHost(sourceNode, ((HCAPProfile) getProfile()).getPort(),
                    getProfile().getProtocol());
            LOG.log(Level.FINE, "sourceNode=" + sourceNode + ", path=" + path + ", httpHost=" + httpHost);

            String resolvedPath = path;
            if (resolvePath) {
                resolvedPath = getProfile().resolvePath(resolvedPath);
            }
            if (query != null && !query.equals("")) {
                if (!query.startsWith("?")) {
                    query = "?" + query;
                }
                resolvedPath = resolvedPath + query;
            }

            HttpGet request = new HttpGet(resolvedPath);

            if (contentTypeHeader != null) {
                request.setHeader(contentTypeHeader);
            }

            // Eventually we will just return this cookie which will be passed back to the caller.
            HcapAdapterCookie cookie = new HcapAdapterCookie(request, httpHost);
            synchronized (savingCookieLock) {
                if (savedCookie != null) {
                    throw new RuntimeException(
                            "This adapter already has a current connection to host -- cannot create two at once.");
                }
                savedCookie = cookie;
            }

            executeMethod(cookie);
            this.handleHttpResponse(cookie.getResponse(), "getting", path);

            // This inputStream is being returned so we cannot call close at this point. We trust
            // the caller to do that.
            if (cookie.getResponse().getEntity() != null) {
                inputStream = cookie.getResponse().getEntity().getContent();
            }

        } catch (IOException e) {
            this.handleIOExceptionFromRequest(e, "getting", path);

            // Clean up here
            close();
        }

        // Note here that the input stream and request are hanging around and needed to be closed
        // with close
        return inputStream;
    }

    @Override
    public boolean isVersioningEnabled() throws StorageAdapterException {
        return false;
    }

    @Override
    public void mkSymlink(String symlinkName, String symlinkTarget) throws StorageAdapterException {
        throw new StorageAdapterLiteralException("Creation of symbolic links via HTTP is not supported on HCAP");
    }
}