Example usage for com.amazonaws.services.glacier AmazonGlacierClient setEndpoint

List of usage examples for com.amazonaws.services.glacier AmazonGlacierClient setEndpoint

Introduction

In this page you can find the example usage for com.amazonaws.services.glacier AmazonGlacierClient setEndpoint.

Prototype

@Deprecated
public void setEndpoint(String endpoint) throws IllegalArgumentException 

Source Link

Document

Overrides the default endpoint for this client.

Usage

From source file:com.connexience.server.model.archive.glacier.SetupUtils.java

License:Open Source License

public static void setupVault(String accessKey, String secretKey, String domainName, String vaultName,
        String topicARN) {/*from w ww .  j av  a2  s . c om*/
    try {
        AWSCredentials awsCredentials = new BasicAWSCredentials(accessKey, secretKey);

        AmazonGlacierClient amazonGlacierClient = new AmazonGlacierClient(awsCredentials);
        amazonGlacierClient.setEndpoint("https://glacier." + domainName + ".amazonaws.com/");

        CreateVaultRequest createVaultRequest = new CreateVaultRequest();
        createVaultRequest.withVaultName(vaultName);

        CreateVaultResult createVaultResult = amazonGlacierClient.createVault(createVaultRequest);
        if (createVaultResult != null) {
            VaultNotificationConfig vaultNotificationConfig = new VaultNotificationConfig();
            vaultNotificationConfig.withSNSTopic(topicARN);
            vaultNotificationConfig.withEvents("ArchiveRetrievalCompleted", "InventoryRetrievalCompleted");

            SetVaultNotificationsRequest setVaultNotificationsRequest = new SetVaultNotificationsRequest();
            setVaultNotificationsRequest.withVaultName(vaultName);
            setVaultNotificationsRequest.withVaultNotificationConfig(vaultNotificationConfig);

            amazonGlacierClient.setVaultNotifications(setVaultNotificationsRequest);
        } else
            logger.warn("Unable to create vault: \"" + vaultName + "\"");

        amazonGlacierClient.shutdown();
    } catch (AmazonServiceException amazonServiceException) {
        logger.warn("AmazonServiceException: " + amazonServiceException);
        logger.debug(amazonServiceException);
    } catch (IllegalArgumentException illegalArgumentException) {
        logger.warn("IllegalArgumentException: " + illegalArgumentException);
        logger.debug(illegalArgumentException);
    } catch (AmazonClientException amazonClientException) {
        logger.warn("AmazonClientException: " + amazonClientException);
        logger.debug(amazonClientException);
    } catch (Throwable throwable) {
        logger.warn("Throwable: " + throwable);
        logger.debug(throwable);
    }
}

From source file:com.splunk.shuttl.archiver.filesystem.glacier.GlacierClient.java

License:Apache License

public static GlacierClient create(AWSCredentialsImpl credentials) {
    AmazonGlacierClient amazonGlacierClient = new AmazonGlacierClient(credentials);
    amazonGlacierClient.setEndpoint(credentials.getGlacierEndpoint());
    return new GlacierClient(new ArchiveTransferManager(amazonGlacierClient, credentials),
            credentials.getGlacierVault());
}

From source file:com.vrane.metaGlacier.gui.GlacierFrame.java

/**
 * Returns the AWS client in the specified region.
 *
 * @param region string such as 'us-west-1', 'eu-west-1'
 * @return AWS client object//from w w  w.  j a va  2s.c o  m
 */
public static AmazonGlacierClient getClient(final String region) {
    final AmazonGlacierClient client = new AmazonGlacierClient(Main.frame);
    int current_api_call_count = P.getInt(NUMBER_OF_AWS_API_CALLS, 0);
    final String endpointURL = "https://glacier.%s.amazonaws.com";

    client.setEndpoint(String.format(endpointURL, region));
    if (current_api_call_count == 0) {
        P.putLong(LAST_AWS_API_CALL_RESET, System.currentTimeMillis());
    }
    P.putInt(NUMBER_OF_AWS_API_CALLS, 1 + current_api_call_count);
    return client;
}

From source file:englishcoffeedrinker.corpse.Glacier.java

License:Open Source License

public Glacier(AWSEndpoint endpoint, AWSCredentials credentials) {
    this.endpoint = endpoint;
    this.credentials = credentials;

    AmazonGlacierClient client = new AmazonGlacierClient(this.credentials);
    client.setEndpoint(this.endpoint.getServiceURI(AWSEndpoint.Service.GLACIER));

    vaults = new ArrayList<Vault>();
    size = 0L;/*from  ww  w.  j a  v  a  2 s.c  o m*/

    String marker = null;
    do {
        ListVaultsRequest lv = new ListVaultsRequest().withMarker(marker).withLimit("1000");

        ListVaultsResult lvr = client.listVaults(lv);
        List<DescribeVaultOutput> vList = lvr.getVaultList();
        marker = lvr.getMarker();

        for (DescribeVaultOutput vault : vList) {
            vaults.add(new Vault(vault));

            if (vault.getSizeInBytes() != null)
                size += vault.getSizeInBytes();
        }

    } while (marker != null);
}

From source file:englishcoffeedrinker.corpse.Glacier.java

License:Open Source License

private void scheduleRequest(ScheduledRequest request) throws IOException {
    AmazonGlacierClient client = new AmazonGlacierClient(credentials);
    client.setEndpoint(endpoint.getServiceURI(AWSEndpoint.Service.GLACIER));

    while (request != null && request.ready(client)) {
        request = request.process(client);
    }/* w  w w .j  a v  a2s.c o m*/

    if (request != null) {
        scheduled.add(request);
    }
}

From source file:glacierpipe.GlacierPipeMain.java

License:Apache License

public static void main(String[] args) throws IOException, ParseException {
    CommandLineParser parser = new GnuParser();

    CommandLine cmd = parser.parse(OPTIONS, args);

    if (cmd.hasOption("help")) {
        try (PrintWriter writer = new PrintWriter(System.err)) {
            printHelp(writer);/*from   w  w  w  .  j  a v a 2s .  c om*/
        }

        System.exit(0);
    } else if (cmd.hasOption("upload")) {

        // Turn the CommandLine into Properties
        Properties cliProperties = new Properties();
        for (Iterator<?> i = cmd.iterator(); i.hasNext();) {
            Option o = (Option) i.next();

            String opt = o.getLongOpt();
            opt = opt != null ? opt : o.getOpt();

            String value = o.getValue();
            value = value != null ? value : "";

            cliProperties.setProperty(opt, value);
        }

        // Build up a configuration
        ConfigBuilder configBuilder = new ConfigBuilder();

        // Archive name
        List<?> archiveList = cmd.getArgList();
        if (archiveList.size() > 1) {
            throw new ParseException("Too many arguments");
        } else if (archiveList.isEmpty()) {
            throw new ParseException("No archive name provided");
        }

        configBuilder.setArchive(archiveList.get(0).toString());

        // All other arguments on the command line
        configBuilder.setFromProperties(cliProperties);

        // Load any config from the properties file
        Properties fileProperties = new Properties();
        try (InputStream in = new FileInputStream(configBuilder.propertiesFile)) {
            fileProperties.load(in);
        } catch (IOException e) {
            System.err.printf("Warning: unable to read properties file %s; %s%n", configBuilder.propertiesFile,
                    e);
        }

        configBuilder.setFromProperties(fileProperties);

        // ...
        Config config = new Config(configBuilder);

        IOBuffer buffer = new MemoryIOBuffer(config.partSize);

        AmazonGlacierClient client = new AmazonGlacierClient(
                new BasicAWSCredentials(config.accessKey, config.secretKey));
        client.setEndpoint(config.endpoint);

        // Actual upload
        try (InputStream in = new BufferedInputStream(System.in, 4096);
                PrintWriter writer = new PrintWriter(System.err);
                ObservableProperties configMonitor = config.reloadProperties
                        ? new ObservableProperties(config.propertiesFile)
                        : null;
                ProxyingThrottlingStrategy throttlingStrategy = new ProxyingThrottlingStrategy(config);) {
            TerminalGlacierPipeObserver observer = new TerminalGlacierPipeObserver(writer);

            if (configMonitor != null) {
                configMonitor.registerObserver(throttlingStrategy);
            }

            GlacierPipe pipe = new GlacierPipe(buffer, observer, config.maxRetries, throttlingStrategy);
            pipe.pipe(client, config.vault, config.archive, in);
        } catch (Exception e) {
            e.printStackTrace(System.err);
        }

        System.exit(0);
    } else {
        try (PrintWriter writer = new PrintWriter(System.err)) {
            writer.println("No action specified.");
            printHelp(writer);
        }

        System.exit(-1);
    }
}

From source file:maebackup.MaeBackup.java

License:Open Source License

public static void upload(String lrzname) {
    try {/*from w w  w  .j  a  va 2s .c  o  m*/
        System.out.println("Uploading to Glacier...");
        ClientConfiguration config = new ClientConfiguration();
        config.setProtocol(Protocol.HTTPS);
        AmazonGlacierClient client = new AmazonGlacierClient(credentials, config);
        client.setEndpoint(endpoint);

        File file = new File(lrzname);
        String archiveid = "";
        if (file.length() < 5 * 1024 * 1024) {
            System.out.println("File is small, uploading as single chunk");
            String treehash = TreeHashGenerator.calculateTreeHash(file);

            InputStream is = new FileInputStream(file);
            byte[] buffer = new byte[(int) file.length()];
            int bytes = is.read(buffer);
            if (bytes != file.length())
                throw new RuntimeException("Only read " + bytes + " of " + file.length()
                        + " byte file when preparing for upload.");
            InputStream bais = new ByteArrayInputStream(buffer);

            UploadArchiveRequest request = new UploadArchiveRequest(vaultname, lrzname, treehash, bais);
            UploadArchiveResult result = client.uploadArchive(request);
            archiveid = result.getArchiveId();
        } else {
            long chunks = file.length() / chunksize;
            while (chunks > 10000) {
                chunksize <<= 1;
                chunks = file.length() / chunksize;
            }
            String chunksizestr = new Integer(chunksize).toString();
            System.out.println(
                    "Starting multipart upload: " + chunks + " full chunks of " + chunksizestr + " bytes");

            InitiateMultipartUploadResult imures = client.initiateMultipartUpload(
                    new InitiateMultipartUploadRequest(vaultname, lrzname, chunksizestr));

            String uploadid = imures.getUploadId();
            RandomAccessFile raf = new RandomAccessFile(file, "r");

            byte[] buffer = new byte[chunksize];

            for (long x = 0; x < chunks; x++) {
                try {
                    System.out.println("Uploading chunk " + x + "/" + chunks);

                    raf.seek(x * chunksize);
                    raf.read(buffer);

                    String parthash = TreeHashGenerator.calculateTreeHash(new ByteArrayInputStream(buffer));
                    String range = "bytes " + (x * chunksize) + "-" + ((x + 1) * chunksize - 1) + "/*";

                    client.uploadMultipartPart(new UploadMultipartPartRequest(vaultname, uploadid, parthash,
                            range, new ByteArrayInputStream(buffer)));
                } catch (Exception e) {
                    e.printStackTrace();
                    System.err.println("Error uploading chunk " + x + ", retrying...");
                    x--;
                }
            }

            if (file.length() > chunks * chunksize) {
                do {
                    try {
                        System.out.println("Uploading final partial chunk");
                        raf.seek(chunks * chunksize);
                        int bytes = raf.read(buffer);

                        String parthash = TreeHashGenerator
                                .calculateTreeHash(new ByteArrayInputStream(buffer, 0, bytes));
                        String range = "bytes " + (chunks * chunksize) + "-" + (file.length() - 1) + "/*";

                        client.uploadMultipartPart(new UploadMultipartPartRequest(vaultname, uploadid, parthash,
                                range, new ByteArrayInputStream(buffer, 0, bytes)));
                    } catch (Exception e) {
                        e.printStackTrace();
                        System.err.println("Error uploading final chunk, retrying...");
                        continue;
                    }
                } while (false);
            }

            System.out.println("Completing upload");
            String treehash = TreeHashGenerator.calculateTreeHash(file);
            CompleteMultipartUploadResult result = client
                    .completeMultipartUpload(new CompleteMultipartUploadRequest(vaultname, uploadid,
                            new Long(file.length()).toString(), treehash));
            archiveid = result.getArchiveId();
        }

        System.out.println("Uploaded " + lrzname + " to Glacier as ID " + archiveid);

        File listfile = new File(cachedir, "archives.lst");
        FileWriter fw = new FileWriter(listfile, true);
        fw.write(archiveid + " " + lrzname + "\n");
        fw.close();
    } catch (Exception e) {
        throw new RuntimeException(e);
    }
}

From source file:maebackup.MaeBackup.java

License:Open Source License

public static void download(String filename, String jobid) {
    try {//from   w w w .j av a  2  s.  c o  m
        System.out.println("Starting download...");
        ClientConfiguration config = new ClientConfiguration();
        config.setProtocol(Protocol.HTTPS);
        AmazonGlacierClient client = new AmazonGlacierClient(credentials, config);
        client.setEndpoint(endpoint);

        if (jobid == null || jobid == "") {
            String archiveid;
            // Yes, this will screw up on actual 138-character file names, but... yeah.
            if (filename.length() == 138) {
                archiveid = filename;
            } else {
                File listfile = new File(cachedir, "archives.lst");
                Map<File, String> filemap = loadHashes(listfile);
                archiveid = filemap.get(filename);
                if (archiveid == null) {
                    System.err.println("Error: Could not find archive ID for file " + filename);
                    System.exit(1);
                    return;
                }
            }

            InitiateJobResult result = client.initiateJob(new InitiateJobRequest(vaultname,
                    new JobParameters().withType("archive-retrieval").withArchiveId(archiveid)));
            jobid = result.getJobId();
            System.out.println("Started download job as ID " + jobid);
        } else {
            DescribeJobResult djres = client.describeJob(new DescribeJobRequest(vaultname, jobid));
            if (!djres.getStatusCode().equals("Succeeded")) {
                System.out.println("Job is not listed as Succeeded. It is: " + djres.getStatusCode());
                System.out.println(djres.getStatusMessage());
                System.exit(2);
            }
            long size = djres.getArchiveSizeInBytes();
            long chunks = size / chunksize;
            while (chunks > 10000) {
                chunksize <<= 1;
                chunks = size / chunksize;
            }
            RandomAccessFile raf = new RandomAccessFile(filename, "w");
            raf.setLength(size);
            byte[] buffer = new byte[chunksize];

            for (int x = 0; x < chunks; x++) {
                try {
                    System.out.println("Downloading chunk " + x + " of " + chunks);
                    String range = "bytes " + (x * chunksize) + "-" + ((x + 1) * chunksize - 1) + "/*";

                    GetJobOutputResult gjores = client
                            .getJobOutput(new GetJobOutputRequest(vaultname, jobid, range));

                    gjores.getBody().read(buffer);

                    MessageDigest md = MessageDigest.getInstance("SHA-256");
                    md.update(buffer, 0, chunksize);

                    byte[] hash = md.digest();

                    StringBuffer sb = new StringBuffer();
                    for (byte b : hash) {
                        sb.append(String.format("%02x", b));
                    }
                    if (!sb.toString().equalsIgnoreCase(gjores.getChecksum())) {
                        System.err.println("Error: Chunk " + x + " does not match SHA-256. Retrying.");
                        x--;
                        continue;
                    }

                    raf.seek(x * chunksize);
                    raf.write(buffer);
                } catch (Exception e) {
                    System.err.println("Error: Exception while downloading chunk " + x + ". Retrying.");
                    x--;
                }
            }

            if (size > chunks * chunksize) {
                do {
                    try {
                        System.out.println("Downloading final partial chunk");
                        String range = "bytes " + (chunks * chunksize) + "-" + (size - 1) + "/*";

                        GetJobOutputResult gjores = client
                                .getJobOutput(new GetJobOutputRequest(vaultname, jobid, range));

                        int bytes = gjores.getBody().read(buffer);

                        MessageDigest md = MessageDigest.getInstance("SHA-256");
                        md.update(buffer, 0, bytes);

                        byte[] hash = md.digest();

                        StringBuffer sb = new StringBuffer();
                        for (byte b : hash) {
                            sb.append(String.format("%02x", b));
                        }
                        if (!sb.toString().equalsIgnoreCase(gjores.getChecksum())) {
                            System.err.println("Error: Final chunk does not match SHA-256. Retrying.");
                            continue;
                        }

                        raf.seek(chunks * chunksize);
                        raf.write(buffer, 0, bytes);
                    } catch (Exception e) {
                        System.err.println("Error: Exception while downloading final chunk. Retrying.");
                        continue;
                    }
                } while (false);
            }
            raf.close();

            String treehash = TreeHashGenerator.calculateTreeHash(new File(filename));
            if (!treehash.equalsIgnoreCase(djres.getSHA256TreeHash())) {
                System.err.println("Error: File failed final tree hash check.");
                System.exit(3);
            }

            System.out.println("Download complete.");
        }
    } catch (Exception e) {
        throw new RuntimeException(e);
    }
}

From source file:maebackup.MaeBackup.java

License:Open Source License

public static void delete(String archive) {
    try {/*from   w  ww  . j a va  2s .  c om*/
        System.out.println("Deleting from Glacier...");
        ClientConfiguration config = new ClientConfiguration();
        config.setProtocol(Protocol.HTTPS);
        AmazonGlacierClient client = new AmazonGlacierClient(credentials, config);
        client.setEndpoint(endpoint);
        client.deleteArchive(new DeleteArchiveRequest(vaultname, archive));
        System.out.println("Archive deleted.");
    } catch (Exception e) {
        throw new RuntimeException(e);
    }
}

From source file:maebackup.MaeBackup.java

License:Open Source License

public static void list(String arg) {
    try {// w ww .j  a  v a 2s  . co m
        System.out.println("Listing Glacier vault...");
        ClientConfiguration config = new ClientConfiguration();
        config.setProtocol(Protocol.HTTPS);
        AmazonGlacierClient client = new AmazonGlacierClient(credentials, config);
        client.setEndpoint(endpoint);

        if (arg == null || arg == "") {
            InitiateJobResult result = client.initiateJob(
                    new InitiateJobRequest(vaultname, new JobParameters().withType("inventory-retrieval")));
            String jobid = result.getJobId();
            System.out.println("Started inventory retrival job as ID " + jobid);
        } else {
            DescribeJobResult djres = client.describeJob(new DescribeJobRequest(vaultname, arg));
            if (!djres.getStatusCode().equals("Succeeded")) {
                System.out.println("Job is not listed as Succeeded. It is: " + djres.getStatusCode());
                System.out.println(djres.getStatusMessage());
                System.exit(2);
            }

            GetJobOutputResult gjores = client
                    .getJobOutput(new GetJobOutputRequest().withVaultName(vaultname).withJobId(arg));
            byte[] buffer = new byte[1024];
            int bytes;
            while ((bytes = gjores.getBody().read(buffer)) > 0) {
                System.out.write(buffer, 0, bytes);
            }
        }
    } catch (Exception e) {
        throw new RuntimeException(e);
    }
}