Example usage for com.amazonaws.services.s3.model BucketLifecycleConfiguration ENABLED

List of usage examples for com.amazonaws.services.s3.model BucketLifecycleConfiguration ENABLED

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model BucketLifecycleConfiguration ENABLED.

Prototype

String ENABLED

To view the source code for com.amazonaws.services.s3.model BucketLifecycleConfiguration ENABLED.

Click Source Link

Document

Constant for an enabled rule.

Usage

From source file:c3.ops.priam.aws.S3FileSystem.java

License:Apache License

private boolean updateLifecycleRule(List<Rule> rules, String prefix) {
    Rule rule = null;/*from  ww w.ja v a2s .  c  o m*/
    for (BucketLifecycleConfiguration.Rule lcRule : rules) {
        if (lcRule.getPrefix().equals(prefix)) {
            rule = lcRule;
            break;
        }
    }
    if (rule == null && config.getBackupRetentionDays() <= 0)
        return false;
    if (rule != null && rule.getExpirationInDays() == config.getBackupRetentionDays()) {
        logger.info("Cleanup rule already set");
        return false;
    }
    if (rule == null) {
        // Create a new rule
        rule = new BucketLifecycleConfiguration.Rule().withExpirationInDays(config.getBackupRetentionDays())
                .withPrefix(prefix);
        rule.setStatus(BucketLifecycleConfiguration.ENABLED);
        rule.setId(prefix);
        rules.add(rule);
        logger.info(String.format("Setting cleanup for %s to %d days", rule.getPrefix(),
                rule.getExpirationInDays()));
    } else if (config.getBackupRetentionDays() > 0) {
        logger.info(String.format("Setting cleanup for %s to %d days", rule.getPrefix(),
                config.getBackupRetentionDays()));
        rule.setExpirationInDays(config.getBackupRetentionDays());
    } else {
        logger.info(String.format("Removing cleanup rule for %s", rule.getPrefix()));
        rules.remove(rule);
    }
    return true;
}

From source file:cloudExplorer.BucketTransition.java

License:Open Source License

public void run() {
    AWSCredentials credentials = new BasicAWSCredentials(access_key, secret_key);
    AmazonS3 s3Client = new AmazonS3Client(credentials,
            new ClientConfiguration().withSignerOverride("S3SignerType"));
    s3Client.setEndpoint(endpoint);// w  w  w .ja v a 2 s . co m
    int converted_days = 0;
    if (!disabled) {
        converted_days = Integer.parseInt(days);
    }
    BucketLifecycleConfiguration.Rule ruleArchiveAndExpire = null;
    if (!disabled) {
        ruleArchiveAndExpire = new BucketLifecycleConfiguration.Rule().withPrefix(prefix)
                .withExpirationInDays(converted_days)
                .withStatus(BucketLifecycleConfiguration.ENABLED.toString());
    } else {
        ruleArchiveAndExpire = new BucketLifecycleConfiguration.Rule().withPrefix(prefix)
                .withExpirationInDays(100).withStatus(BucketLifecycleConfiguration.DISABLED.toString());
    }
    List<BucketLifecycleConfiguration.Rule> rules = new ArrayList<BucketLifecycleConfiguration.Rule>();
    rules.add(ruleArchiveAndExpire);

    try {
        BucketLifecycleConfiguration configuration = new BucketLifecycleConfiguration().withRules(rules);
        s3Client.setBucketLifecycleConfiguration(bucket, configuration);
    } catch (Exception get) {
        mainFrame.jTextArea1.append("\n" + get.getMessage());
    }
    if (!disabled) {
        mainFrame.jTextArea1.append("\nSent request to change bucket life cycle to " + converted_days
                + " day(s). Please observe for any errors.");
    } else {
        mainFrame.jTextArea1
                .append("\nSent request to disable the bucket life cycle. Please observe for any errors.");
    }
    calibrate();
    mainFrame.jPanel9.setVisible(true);
}

From source file:cloudExplorer.BucketTransitionGlacier.java

License:Open Source License

public void run() {
    AWSCredentials credentials = new BasicAWSCredentials(access_key, secret_key);
    AmazonS3 s3Client = new AmazonS3Client(credentials,
            new ClientConfiguration().withSignerOverride("S3SignerType"));
    s3Client.setEndpoint(endpoint);/*from   w  w  w .  j  a v  a  2  s.c  o  m*/
    int converted_days = 0;
    if (!disabled) {
        converted_days = Integer.parseInt(days);
    }

    Transition transToArchive = new Transition().withDays(converted_days)
            .withStorageClass(StorageClass.Glacier);

    BucketLifecycleConfiguration.Rule ruleArchiveAndExpire = null;
    if (!disabled) {
        ruleArchiveAndExpire = new BucketLifecycleConfiguration.Rule().withPrefix(prefix)
                .withTransition(transToArchive)
                // .withExpirationInDays(converted_days + 1)
                .withStatus(BucketLifecycleConfiguration.ENABLED.toString());
    } else {
        ruleArchiveAndExpire = new BucketLifecycleConfiguration.Rule().withPrefix(prefix)
                .withTransition(transToArchive)
                //.withExpirationInDays(100)
                .withStatus(BucketLifecycleConfiguration.DISABLED.toString());
    }
    List<BucketLifecycleConfiguration.Rule> rules = new ArrayList<BucketLifecycleConfiguration.Rule>();
    rules.add(ruleArchiveAndExpire);

    try {
        BucketLifecycleConfiguration configuration = new BucketLifecycleConfiguration().withRules(rules);
        s3Client.setBucketLifecycleConfiguration(bucket, configuration);
    } catch (Exception get) {
        mainFrame.jTextArea1.append("\n" + get.getMessage());
    }
    if (!disabled) {
        mainFrame.jTextArea1.append("\nSent request to set bucket life cycle to tier to Glacier after: "
                + converted_days + " day(s). Please observe for any errors.");
    } else {
        mainFrame.jTextArea1
                .append("\nSent request to disable the bucket life cycle. Please observe for any errors.");
    }
    calibrate();
}

From source file:edu.iit.s3bucket.S3Bucket.java

/**
 *
 *//*from  ww w.j  a  v a 2 s.  c  o m*/
public void setRules() {
    Transition transToArchive = new Transition().withDays(365).withStorageClass(StorageClass.Glacier);

    BucketLifecycleConfiguration.Rule ruleArchiveAndExpire = new BucketLifecycleConfiguration.Rule()
            .withId("Archive and delete rule").withTransition(transToArchive).withExpirationInDays(3650)
            .withStatus(BucketLifecycleConfiguration.ENABLED.toString());

    List<BucketLifecycleConfiguration.Rule> rules = new ArrayList<BucketLifecycleConfiguration.Rule>();
    rules.add(ruleArchiveAndExpire);

    BucketLifecycleConfiguration configuration = new BucketLifecycleConfiguration().withRules(rules);

    // Save configuration.
    s3client.setBucketLifecycleConfiguration(this.bucketname, configuration);
}

From source file:org.cloudifysource.esc.driver.provisioning.privateEc2.AmazonS3Uploader.java

License:Open Source License

/**
 * Upload file.//from w  w  w  .  j a  v a 2 s. c o  m
 * 
 * @param bucketFullPath
 *            The path of the bucket where to download the file.
 * @param file
 *            The file to upload.
 * @return The URL to access the file in s3
 */
public S3Object uploadFile(final String bucketFullPath, final File file) {
    final BucketLifecycleConfiguration.Rule ruleArchiveAndExpire = new BucketLifecycleConfiguration.Rule()
            .withId("Delete cloudFolder archives").withPrefix(this.extractPrefix(bucketFullPath) + ZIP_PREFIX)
            .withExpirationInDays(1).withStatus(BucketLifecycleConfiguration.ENABLED.toString());
    final List<BucketLifecycleConfiguration.Rule> rules = new ArrayList<BucketLifecycleConfiguration.Rule>();
    rules.add(ruleArchiveAndExpire);
    final BucketLifecycleConfiguration configuration = new BucketLifecycleConfiguration().withRules(rules);
    this.s3client.setBucketLifecycleConfiguration(bucketFullPath, configuration);

    final PutObjectRequest putObjectRequest = new PutObjectRequest(bucketFullPath, this.accessKey, file);
    putObjectRequest.setKey(file.getName());
    final ObjectMetadata metadata = new ObjectMetadata();
    putObjectRequest.setMetadata(metadata);
    this.s3client.putObject(putObjectRequest);

    final S3Object object = this.s3client.getObject(bucketFullPath, file.getName());
    return object;
}

From source file:org.duracloud.s3storage.S3StorageProvider.java

License:Apache License

/**
 * Creates a "hidden" space.  This space will not be returned by the StorageProvider.getSpaces() method.
 * It can be accessed using the getSpace* methods.  You must know the name of the space in order to
 * access it.// w w  w.java 2 s.c o m
 * @param spaceId The spaceId
 * @param expirationInDays The number of days before content in the space is automatically deleted.
 * @return
 */
public String createHiddenSpace(String spaceId, int expirationInDays) {
    String bucketName = getHiddenBucketName(spaceId);
    try {
        Bucket bucket = s3Client.createBucket(bucketName);

        // Apply lifecycle config to bucket

        BucketLifecycleConfiguration.Rule expiresRule = new BucketLifecycleConfiguration.Rule()
                .withId("ExpirationRule").withExpirationInDays(expirationInDays)
                .withStatus(BucketLifecycleConfiguration.ENABLED);

        // Add the rules to a new BucketLifecycleConfiguration.
        BucketLifecycleConfiguration configuration = new BucketLifecycleConfiguration().withRules(expiresRule);

        s3Client.setBucketLifecycleConfiguration(bucketName, configuration);

        return spaceId;
    } catch (AmazonClientException e) {
        String err = "Could not create S3 bucket with name " + bucketName + " due to error: " + e.getMessage();
        throw new StorageException(err, e, RETRY);
    }
}

From source file:org.duracloud.s3storage.StoragePolicy.java

License:Apache License

public BucketLifecycleConfiguration getBucketLifecycleConfig() {
    // Define the transition to another storage class
    BucketLifecycleConfiguration.Transition transition = new BucketLifecycleConfiguration.Transition()
            .withDays(daysToTransition).withStorageClass(storageClass);

    String policyName = "Transition to " + storageClass.name() + " in " + daysToTransition + " days";

    // Use the transition in a rule
    BucketLifecycleConfiguration.Rule rule = new BucketLifecycleConfiguration.Rule().withId(policyName)
            .withPrefix("").withStatus(BucketLifecycleConfiguration.ENABLED.toString());
    rule.addTransition(transition);/*from  w w w. ja  v a2  s.  c o  m*/

    return new BucketLifecycleConfiguration().withRules(rule);
}

From source file:org.duracloud.snapshottask.snapshot.CleanupSnapshotTaskRunner.java

License:Apache License

@Override
public String performTask(String taskParameters) {
    CleanupSnapshotTaskParameters taskParams = CleanupSnapshotTaskParameters.deserialize(taskParameters);
    final String spaceId = taskParams.getSpaceId();
    final String userId = SecurityContextHolder.getContext().getAuthentication().getName();

    String bucketName = unwrappedSnapshotProvider.getBucketName(spaceId);

    log.info("Performing Cleanup Snapshot Task for spaceID: " + spaceId);

    // Create bucket deletion policy
    BucketLifecycleConfiguration.Rule expireRule = new BucketLifecycleConfiguration.Rule()
            .withId("clear-content-rule").withExpirationInDays(EXPIRATION_DAYS)
            .withStatus(BucketLifecycleConfiguration.ENABLED.toString());

    List<BucketLifecycleConfiguration.Rule> rules = new ArrayList<>();
    rules.add(expireRule);//from  w  w  w.j a v  a 2s .co m

    BucketLifecycleConfiguration configuration = new BucketLifecycleConfiguration().withRules(rules);

    // Set policy on bucket
    s3Client.setBucketLifecycleConfiguration(bucketName, configuration);

    queueContentDeleteAuditTasks(spaceId, userId);

    log.info("Cleanup Snapshot Task for space " + spaceId + " completed successfully");

    return new CleanupSnapshotTaskResult(EXPIRATION_DAYS).serialize();
}

From source file:org.duracloud.snapshottask.snapshot.CompleteRestoreTaskRunner.java

License:Apache License

@Override
public String performTask(String taskParameters) {
    CompleteRestoreTaskParameters taskParams = CompleteRestoreTaskParameters.deserialize(taskParameters);
    String spaceId = taskParams.getSpaceId();
    int daysToExpire = taskParams.getDaysToExpire();
    String bucketName = unwrappedSnapshotProvider.getBucketName(spaceId);

    log.info("Performing Complete Restore Task for spaceID: " + spaceId + ". Restored content will expire in "
            + daysToExpire + " days");

    // Create bucket deletion policy
    BucketLifecycleConfiguration.Rule expireRule = new BucketLifecycleConfiguration.Rule()
            .withId("clear-content-rule").withExpirationInDays(daysToExpire)
            .withStatus(BucketLifecycleConfiguration.ENABLED.toString());

    List<BucketLifecycleConfiguration.Rule> rules = new ArrayList<>();
    rules.add(expireRule);//from   w w w . ja v  a  2  s  . c  om

    BucketLifecycleConfiguration configuration = new BucketLifecycleConfiguration().withRules(rules);

    // Set policy on bucket
    s3Client.setBucketLifecycleConfiguration(bucketName, configuration);

    log.info("Complete Restore Task for space " + spaceId + " completed successfully");

    String result = "Complete restore was successful";
    return new CompleteSnapshotTaskResult(result).serialize();
}

From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java

License:Open Source License

@Override
public void init(Element config) throws IOException {
    this.name = Main.cloudBucket.toLowerCase();
    this.staged_sync_location.mkdirs();
    try {/*  w w  w. ja v a  2 s  .  c  o  m*/
        if (config.hasAttribute("default-bucket-location")) {
            bucketLocation = RegionUtils.getRegion(config.getAttribute("default-bucket-location"));

        }
        if (config.hasAttribute("connection-check-interval")) {
            this.checkInterval = Integer.parseInt(config.getAttribute("connection-check-interval"));
        }
        if (config.hasAttribute("block-size")) {
            int sz = (int) StringUtils.parseSize(config.getAttribute("block-size"));
            HashBlobArchive.MAX_LEN = sz;
        }
        if (config.hasAttribute("allow-sync")) {
            HashBlobArchive.allowSync = Boolean.parseBoolean(config.getAttribute("allow-sync"));
            if (config.hasAttribute("sync-check-schedule")) {
                try {
                    new SyncFSScheduler(config.getAttribute("sync-check-schedule"));
                } catch (Exception e) {
                    SDFSLogger.getLog().error("unable to start sync scheduler", e);
                }
            }

        }
        if (config.hasAttribute("upload-thread-sleep-time")) {
            int tm = Integer.parseInt(config.getAttribute("upload-thread-sleep-time"));
            HashBlobArchive.THREAD_SLEEP_TIME = tm;
        }
        if (config.hasAttribute("cache-writes")) {
            HashBlobArchive.cacheWrites = Boolean.parseBoolean(config.getAttribute("cache-writes"));
        }
        if (config.hasAttribute("cache-reads")) {
            HashBlobArchive.cacheReads = Boolean.parseBoolean(config.getAttribute("cache-reads"));
        }
        if (config.hasAttribute("sync-files")) {
            boolean syncf = Boolean.parseBoolean(config.getAttribute("sync-files"));
            if (syncf) {
                new FileReplicationService(this);
            }
        }
        int rsp = 0;
        int wsp = 0;
        if (config.hasAttribute("read-speed")) {
            rsp = Integer.parseInt(config.getAttribute("read-speed"));
        }
        if (config.hasAttribute("write-speed")) {
            wsp = Integer.parseInt(config.getAttribute("write-speed"));
        }
        if (config.hasAttribute("local-cache-size")) {
            long sz = StringUtils.parseSize(config.getAttribute("local-cache-size"));
            HashBlobArchive.setLocalCacheSize(sz);
        }
        if (config.hasAttribute("metadata-version")) {
            this.mdVersion = Integer.parseInt(config.getAttribute("metadata-version"));
        }
        if (config.hasAttribute("map-cache-size")) {
            int sz = Integer.parseInt(config.getAttribute("map-cache-size"));
            HashBlobArchive.MAP_CACHE_SIZE = sz;
        }
        if (config.hasAttribute("io-threads")) {
            int sz = Integer.parseInt(config.getAttribute("io-threads"));
            Main.dseIOThreads = sz;
        }
        if (config.hasAttribute("clustered")) {
            this.clustered = Boolean.parseBoolean(config.getAttribute("clustered"));
        }
        if (config.hasAttribute("delete-unclaimed")) {
            this.deleteUnclaimed = Boolean.parseBoolean(config.getAttribute("delete-unclaimed"));
        }
        if (config.hasAttribute("glacier-archive-days")) {
            this.glacierDays = Integer.parseInt(config.getAttribute("glacier-archive-days"));
            if (this.glacierDays > 0)
                Main.checkArchiveOnRead = true;
        }
        if (config.hasAttribute("infrequent-access-days")) {
            this.infrequentAccess = Integer.parseInt(config.getAttribute("infrequent-access-days"));
        }
        if (config.hasAttribute("simple-s3")) {
            EncyptUtils.baseEncode = Boolean.parseBoolean(config.getAttribute("simple-s3"));
            this.simpleS3 = true;
        }
        if (config.hasAttribute("md5-sum")) {
            this.md5sum = Boolean.parseBoolean(config.getAttribute("md5-sum"));
            if (!this.md5sum) {
                System.setProperty("com.amazonaws.services.s3.disableGetObjectMD5Validation", "true");
                System.setProperty("com.amazonaws.services.s3.disablePutObjectMD5Validation", "true");
            }

        }
        ClientConfiguration clientConfig = new ClientConfiguration();
        if (config.hasAttribute("use-v4-signer")) {
            boolean v4s = Boolean.parseBoolean(config.getAttribute("use-v4-signer"));

            if (v4s) {
                clientConfig.setSignerOverride("AWSS3V4SignerType");
            }
        }
        if (config.hasAttribute("use-basic-signer")) {
            boolean v4s = Boolean.parseBoolean(config.getAttribute("use-basic-signer"));
            if (v4s) {
                clientConfig.setSignerOverride("S3SignerType");
            }
        }

        clientConfig.setMaxConnections(Main.dseIOThreads * 2);
        clientConfig.setConnectionTimeout(10000);
        clientConfig.setSocketTimeout(10000);

        String s3Target = null;
        if (config.getElementsByTagName("connection-props").getLength() > 0) {
            Element el = (Element) config.getElementsByTagName("connection-props").item(0);
            if (el.hasAttribute("connection-timeout"))
                clientConfig.setConnectionTimeout(Integer.parseInt(el.getAttribute("connection-timeout")));
            if (el.hasAttribute("socket-timeout"))
                clientConfig.setSocketTimeout(Integer.parseInt(el.getAttribute("socket-timeout")));
            if (el.hasAttribute("local-address"))
                clientConfig.setLocalAddress(InetAddress.getByName(el.getAttribute("local-address")));
            if (el.hasAttribute("max-retry"))
                clientConfig.setMaxErrorRetry(Integer.parseInt(el.getAttribute("max-retry")));
            if (el.hasAttribute("protocol")) {
                String pr = el.getAttribute("protocol");
                if (pr.equalsIgnoreCase("http"))
                    clientConfig.setProtocol(Protocol.HTTP);
                else
                    clientConfig.setProtocol(Protocol.HTTPS);

            }
            if (el.hasAttribute("s3-target")) {
                s3Target = el.getAttribute("s3-target");
            }
            if (el.hasAttribute("proxy-host")) {
                clientConfig.setProxyHost(el.getAttribute("proxy-host"));
            }
            if (el.hasAttribute("proxy-domain")) {
                clientConfig.setProxyDomain(el.getAttribute("proxy-domain"));
            }
            if (el.hasAttribute("proxy-password")) {
                clientConfig.setProxyPassword(el.getAttribute("proxy-password"));
            }
            if (el.hasAttribute("proxy-port")) {
                clientConfig.setProxyPort(Integer.parseInt(el.getAttribute("proxy-port")));
            }
            if (el.hasAttribute("proxy-username")) {
                clientConfig.setProxyUsername(el.getAttribute("proxy-username"));
            }
        }

        if (s3Target != null && s3Target.toLowerCase().startsWith("https")) {
            TrustStrategy acceptingTrustStrategy = new TrustStrategy() {
                @Override
                public boolean isTrusted(X509Certificate[] certificate, String authType) {
                    return true;
                }
            };
            SSLSocketFactory sf = new SSLSocketFactory(acceptingTrustStrategy,
                    SSLSocketFactory.ALLOW_ALL_HOSTNAME_VERIFIER);
            clientConfig.getApacheHttpClientConfig().withSslSocketFactory(sf);
        }
        if (awsCredentials != null)
            s3Service = new AmazonS3Client(awsCredentials, clientConfig);
        else
            s3Service = new AmazonS3Client(new InstanceProfileCredentialsProvider(), clientConfig);
        if (bucketLocation != null) {
            s3Service.setRegion(bucketLocation);
            System.out.println("bucketLocation=" + bucketLocation.toString());
        }
        if (s3Target != null) {
            s3Service.setEndpoint(s3Target);
            System.out.println("target=" + s3Target);
        }
        if (config.hasAttribute("disableDNSBucket")) {
            s3Service.setS3ClientOptions(new S3ClientOptions()
                    .withPathStyleAccess(Boolean.parseBoolean(config.getAttribute("disableDNSBucket")))
                    .disableChunkedEncoding());
            System.out.println(
                    "disableDNSBucket=" + Boolean.parseBoolean(config.getAttribute("disableDNSBucket")));
        }
        if (!s3Service.doesBucketExist(this.name)) {
            s3Service.createBucket(this.name);
            SDFSLogger.getLog().info("created new store " + name);
            ObjectMetadata md = new ObjectMetadata();
            md.addUserMetadata("currentsize", "0");
            md.addUserMetadata("currentcompressedsize", "0");
            md.addUserMetadata("clustered", "true");
            md.addUserMetadata("lastupdate", Long.toString(System.currentTimeMillis()));
            md.addUserMetadata("hostname", InetAddress.getLocalHost().getHostName());
            md.addUserMetadata("port", Integer.toString(Main.sdfsCliPort));

            this.clustered = true;
            byte[] sz = Long.toString(System.currentTimeMillis()).getBytes();
            if (md5sum) {
                String mds = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(sz));
                md.setContentMD5(mds);
            }
            md.setContentLength(sz.length);
            this.binm = "bucketinfo/"
                    + EncyptUtils.encHashArchiveName(Main.DSEID, Main.chunkStoreEncryptionEnabled);
            s3Service.putObject(this.name, binm, new ByteArrayInputStream(sz), md);
        } else {
            Map<String, String> obj = null;
            ObjectMetadata omd = null;
            try {
                omd = s3Service.getObjectMetadata(this.name, binm);
                obj = omd.getUserMetadata();
                obj.get("currentsize");
            } catch (Exception e) {
                omd = null;
                SDFSLogger.getLog().debug("unable to find bucketinfo object", e);
            }
            if (omd == null) {
                try {
                    this.binm = "bucketinfo/"
                            + EncyptUtils.encHashArchiveName(Main.DSEID, Main.chunkStoreEncryptionEnabled);
                    omd = s3Service.getObjectMetadata(this.name, binm);
                    obj = omd.getUserMetadata();
                    obj.get("currentsize");
                } catch (Exception e) {
                    omd = null;
                    SDFSLogger.getLog().debug("unable to find bucketinfo object", e);
                }
            }
            if (omd == null) {
                ObjectMetadata md = new ObjectMetadata();
                md.addUserMetadata("currentsize", "0");
                md.addUserMetadata("currentcompressedsize", "0");
                md.addUserMetadata("clustered", "true");
                md.addUserMetadata("lastupdate", Long.toString(System.currentTimeMillis()));
                md.addUserMetadata("hostname", InetAddress.getLocalHost().getHostName());
                md.addUserMetadata("port", Integer.toString(Main.sdfsCliPort));

                this.clustered = true;
                this.binm = "bucketinfo/"
                        + EncyptUtils.encHashArchiveName(Main.DSEID, Main.chunkStoreEncryptionEnabled);
                byte[] sz = Long.toString(System.currentTimeMillis()).getBytes();
                if (md5sum) {
                    String mds = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(sz));
                    md.setContentMD5(mds);
                }
                md.setContentLength(sz.length);
                s3Service.putObject(this.name, binm, new ByteArrayInputStream(sz), md);
            } else {
                if (obj.containsKey("currentsize")) {
                    long cl = Long.parseLong((String) obj.get("currentsize"));
                    if (cl >= 0) {
                        HashBlobArchive.currentLength.set(cl);

                    } else
                        SDFSLogger.getLog().warn("The S3 objectstore DSE did not close correctly len=" + cl);
                } else {
                    SDFSLogger.getLog().warn(
                            "The S3 objectstore DSE did not close correctly. Metadata tag currentsize was not added");
                }

                if (obj.containsKey("currentcompressedsize")) {
                    long cl = Long.parseLong((String) obj.get("currentcompressedsize"));
                    if (cl >= 0) {
                        HashBlobArchive.compressedLength.set(cl);

                    } else
                        SDFSLogger.getLog().warn("The S3 objectstore DSE did not close correctly clen=" + cl);
                } else {
                    SDFSLogger.getLog().warn(
                            "The S3 objectstore DSE did not close correctly. Metadata tag currentsize was not added");
                }
                if (obj.containsKey("clustered")) {
                    this.clustered = Boolean.parseBoolean(obj.get("clustered"));
                } else
                    this.clustered = false;

                obj.put("clustered", Boolean.toString(this.clustered));
                omd.setUserMetadata(obj);
                try {

                    updateObject(binm, omd);
                } catch (Exception e) {
                    SDFSLogger.getLog().warn("unable to update bucket info in init", e);
                    SDFSLogger.getLog().info("created new store " + name);
                    ObjectMetadata md = new ObjectMetadata();
                    md.addUserMetadata("currentsize", "0");
                    md.addUserMetadata("lastupdate", Long.toString(System.currentTimeMillis()));
                    md.addUserMetadata("currentcompressedsize", "0");
                    md.addUserMetadata("clustered", Boolean.toString(this.clustered));
                    md.addUserMetadata("hostname", InetAddress.getLocalHost().getHostName());
                    md.addUserMetadata("port", Integer.toString(Main.sdfsCliPort));
                    byte[] sz = Long.toString(System.currentTimeMillis()).getBytes();
                    if (md5sum) {
                        String mds = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(sz));
                        md.setContentMD5(mds);
                    }
                    md.setContentLength(sz.length);
                    s3Service.putObject(this.name, binm, new ByteArrayInputStream(sz), md);

                }
            }
        }
        ArrayList<Transition> trs = new ArrayList<Transition>();
        if (this.glacierDays > 0 && s3Target == null) {
            Transition transToArchive = new Transition().withDays(this.glacierDays)
                    .withStorageClass(StorageClass.Glacier);
            trs.add(transToArchive);
        }

        if (this.infrequentAccess > 0 && s3Target == null) {
            Transition transToArchive = new Transition().withDays(this.infrequentAccess)
                    .withStorageClass(StorageClass.StandardInfrequentAccess);
            trs.add(transToArchive);

        }
        if (trs.size() > 0) {
            BucketLifecycleConfiguration.Rule ruleArchiveAndExpire = new BucketLifecycleConfiguration.Rule()
                    .withId("SDFS Automated Archive Rule for Block Data").withPrefix("blocks/")
                    .withTransitions(trs).withStatus(BucketLifecycleConfiguration.ENABLED.toString());
            List<BucketLifecycleConfiguration.Rule> rules = new ArrayList<BucketLifecycleConfiguration.Rule>();
            rules.add(ruleArchiveAndExpire);

            BucketLifecycleConfiguration configuration = new BucketLifecycleConfiguration().withRules(rules);

            // Save configuration.
            s3Service.setBucketLifecycleConfiguration(this.name, configuration);
        } else if (s3Target == null) {
            s3Service.deleteBucketLifecycleConfiguration(this.name);
        }
        HashBlobArchive.init(this);
        HashBlobArchive.setReadSpeed(rsp);
        HashBlobArchive.setWriteSpeed(wsp);
        Thread th = new Thread(this);
        th.start();
    } catch (Exception e) {
        SDFSLogger.getLog().error("unable to start service", e);
        throw new IOException(e);
    }

}