Example usage for com.amazonaws.util BinaryUtils fromHex

List of usage examples for com.amazonaws.util BinaryUtils fromHex

Introduction

In this page you can find the example usage for com.amazonaws.util BinaryUtils fromHex.

Prototype

public static byte[] fromHex(String hexData) 

Source Link

Document

Converts a Hex-encoded data string to the original byte data.

Usage

From source file:com.davidsoergel.s3napback.StreamingServiceUtils.java

License:Apache License

/**
 * Downloads an S3Object, as returned from {@link com.amazonaws.services.s3.AmazonS3Client#getObject(com.amazonaws.services.s3.model.GetObjectRequest)
 * }, to/* w w w .ja v  a2s .co  m*/
 * the specified file.
 *
 * @param s3Object        The S3Object containing a reference to an InputStream containing the object's data.
 * @param destinationFile The file to store the object's data in.
 */
public static void downloadObjectToStream(S3Object s3Object, BufferedOutputStream eventualOutputStream) {
    /*
          // attempt to create the parent if it doesn't exist
          File parentDirectory = destinationFile.getParentFile();
          if (parentDirectory != null && !parentDirectory.exists())
             {
             parentDirectory.mkdirs();
             }
    */

    ByteArrayOutputStream byteOS = new ByteArrayOutputStream(
            (int) s3Object.getObjectMetadata().getContentLength());
    OutputStream outputStream = null;
    try {
        // perf extra copying, left over from file outputstream version
        outputStream = new BufferedOutputStream(byteOS);
        byte[] buffer = new byte[1024 * 10];
        int bytesRead;
        while ((bytesRead = s3Object.getObjectContent().read(buffer)) > -1) {
            outputStream.write(buffer, 0, bytesRead);
        }
    } catch (IOException e) {
        try {
            s3Object.getObjectContent().abort();
        } catch (IOException abortException) {
            log.warn("Couldn't abort stream", e);
        }
        throw new AmazonClientException("Unable to store object contents to disk: " + e.getMessage(), e);
    } finally {
        try {
            outputStream.close();
        } catch (Exception e) {
        }
        try {
            s3Object.getObjectContent().close();
        } catch (Exception e) {
        }
    }

    try {
        // Multipart Uploads don't have an MD5 calculated on the service side
        if (ServiceUtils.isMultipartUploadETag(s3Object.getObjectMetadata().getETag()) == false) {
            byte[] clientSideHash = Md5Utils.computeMD5Hash(byteOS.toByteArray()); //new FileInputStream(destinationFile));
            byte[] serverSideHash = BinaryUtils.fromHex(s3Object.getObjectMetadata().getETag());

            if (!Arrays.equals(clientSideHash, serverSideHash)) {
                throw new AmazonClientException("Unable to verify integrity of data download.  "
                        + "Client calculated content hash didn't match hash calculated by Amazon S3.  "
                        + "The data may be corrupt; please try again.");
            }
        }
    } catch (Exception e) {
        log.warn("Unable to calculate MD5 hash to validate download: " + e.getMessage(), e);
    }

    try {
        eventualOutputStream.write(byteOS.toByteArray());
    } catch (Exception e) {

        log.warn("Unable to write to output stream: " + e.getMessage(), e);
    }
}

From source file:com.emc.vipr.services.s3.ViPRS3Client.java

License:Open Source License

/**
 * Executes a (Subclass of) PutObjectRequest.  In particular, we check for subclasses
 * of the UpdateObjectRequest and inject the value of the Range header.  This version
 * also returns the raw ObjectMetadata for the response so callers can construct
 * their own result objects.//  w ww  .java  2  s.c  om
 * @param putObjectRequest the request to execute
 * @return an ObjectMetadata containing the response headers.
 */
protected ObjectMetadata doPut(PutObjectRequest putObjectRequest) {
    assertParameterNotNull(putObjectRequest,
            "The PutObjectRequest parameter must be specified when uploading an object");

    String bucketName = putObjectRequest.getBucketName();
    String key = putObjectRequest.getKey();
    ObjectMetadata metadata = putObjectRequest.getMetadata();
    InputStream input = putObjectRequest.getInputStream();
    if (metadata == null)
        metadata = new ObjectMetadata();

    assertParameterNotNull(bucketName, "The bucket name parameter must be specified when uploading an object");
    assertParameterNotNull(key, "The key parameter must be specified when uploading an object");

    /*
     * This is compatible with progress listener set by either the legacy
     * method GetObjectRequest#setProgressListener or the new method
     * GetObjectRequest#setGeneralProgressListener.
     */
    com.amazonaws.event.ProgressListener progressListener = putObjectRequest.getGeneralProgressListener();
    ProgressListenerCallbackExecutor progressListenerCallbackExecutor = ProgressListenerCallbackExecutor
            .wrapListener(progressListener);

    // If a file is specified for upload, we need to pull some additional
    // information from it to auto-configure a few options
    if (putObjectRequest.getFile() != null) {
        File file = putObjectRequest.getFile();

        // Always set the content length, even if it's already set
        metadata.setContentLength(file.length());

        // Only set the content type if it hasn't already been set
        if (metadata.getContentType() == null) {
            metadata.setContentType(Mimetypes.getInstance().getMimetype(file));
        }

        FileInputStream fileInputStream = null;
        try {
            fileInputStream = new FileInputStream(file);
            byte[] md5Hash = Md5Utils.computeMD5Hash(fileInputStream);
            metadata.setContentMD5(BinaryUtils.toBase64(md5Hash));
        } catch (Exception e) {
            throw new AmazonClientException("Unable to calculate MD5 hash: " + e.getMessage(), e);
        } finally {
            try {
                fileInputStream.close();
            } catch (Exception e) {
            }
        }

        try {
            input = new RepeatableFileInputStream(file);
        } catch (FileNotFoundException fnfe) {
            throw new AmazonClientException("Unable to find file to upload", fnfe);
        }
    }

    Request<PutObjectRequest> request = createRequest(bucketName, key, putObjectRequest, HttpMethodName.PUT);

    if (putObjectRequest.getAccessControlList() != null) {
        addAclHeaders(request, putObjectRequest.getAccessControlList());
    } else if (putObjectRequest.getCannedAcl() != null) {
        request.addHeader(Headers.S3_CANNED_ACL, putObjectRequest.getCannedAcl().toString());
    }

    if (putObjectRequest.getStorageClass() != null) {
        request.addHeader(Headers.STORAGE_CLASS, putObjectRequest.getStorageClass());
    }

    if (putObjectRequest.getRedirectLocation() != null) {
        request.addHeader(Headers.REDIRECT_LOCATION, putObjectRequest.getRedirectLocation());
        if (input == null) {
            input = new ByteArrayInputStream(new byte[0]);
        }
    }

    // Use internal interface to differentiate 0 from unset.
    if (metadata.getRawMetadata().get(Headers.CONTENT_LENGTH) == null) {
        /*
         * There's nothing we can do except for let the HTTP client buffer
         * the input stream contents if the caller doesn't tell us how much
         * data to expect in a stream since we have to explicitly tell
         * Amazon S3 how much we're sending before we start sending any of
         * it.
         */
        log.warn("No content length specified for stream data.  "
                + "Stream contents will be buffered in memory and could result in " + "out of memory errors.");
    }

    if (progressListenerCallbackExecutor != null) {
        com.amazonaws.event.ProgressReportingInputStream progressReportingInputStream = new com.amazonaws.event.ProgressReportingInputStream(
                input, progressListenerCallbackExecutor);
        fireProgressEvent(progressListenerCallbackExecutor,
                com.amazonaws.event.ProgressEvent.STARTED_EVENT_CODE);
    }

    if (!input.markSupported()) {
        int streamBufferSize = Constants.DEFAULT_STREAM_BUFFER_SIZE;
        String bufferSizeOverride = System.getProperty("com.amazonaws.sdk.s3.defaultStreamBufferSize");
        if (bufferSizeOverride != null) {
            try {
                streamBufferSize = Integer.parseInt(bufferSizeOverride);
            } catch (Exception e) {
                log.warn("Unable to parse buffer size override from value: " + bufferSizeOverride);
            }
        }

        input = new RepeatableInputStream(input, streamBufferSize);
    }

    MD5DigestCalculatingInputStream md5DigestStream = null;
    if (metadata.getContentMD5() == null) {
        /*
         * If the user hasn't set the content MD5, then we don't want to
         * buffer the whole stream in memory just to calculate it. Instead,
         * we can calculate it on the fly and validate it with the returned
         * ETag from the object upload.
         */
        try {
            md5DigestStream = new MD5DigestCalculatingInputStream(input);
            input = md5DigestStream;
        } catch (NoSuchAlgorithmException e) {
            log.warn("No MD5 digest algorithm available.  Unable to calculate "
                    + "checksum and verify data integrity.", e);
        }
    }

    if (metadata.getContentType() == null) {
        /*
         * Default to the "application/octet-stream" if the user hasn't
         * specified a content type.
         */
        metadata.setContentType(Mimetypes.MIMETYPE_OCTET_STREAM);
    }

    populateRequestMetadata(request, metadata);
    request.setContent(input);

    if (putObjectRequest instanceof UpdateObjectRequest) {
        request.addHeader(Headers.RANGE, "bytes=" + ((UpdateObjectRequest) putObjectRequest).getUpdateRange());
    }

    ObjectMetadata returnedMetadata = null;
    try {
        returnedMetadata = invoke(request, new S3MetadataResponseHandler(), bucketName, key);
    } catch (AmazonClientException ace) {
        fireProgressEvent(progressListenerCallbackExecutor,
                com.amazonaws.event.ProgressEvent.FAILED_EVENT_CODE);
        throw ace;
    } finally {
        try {
            input.close();
        } catch (Exception e) {
            log.warn("Unable to cleanly close input stream: " + e.getMessage(), e);
        }
    }

    String contentMd5 = metadata.getContentMD5();
    if (md5DigestStream != null) {
        contentMd5 = BinaryUtils.toBase64(md5DigestStream.getMd5Digest());
    }

    // Can't verify MD5 on appends/update (yet).
    if (!(putObjectRequest instanceof UpdateObjectRequest)) {
        if (returnedMetadata != null && contentMd5 != null) {
            byte[] clientSideHash = BinaryUtils.fromBase64(contentMd5);
            byte[] serverSideHash = BinaryUtils.fromHex(returnedMetadata.getETag());

            if (!Arrays.equals(clientSideHash, serverSideHash)) {
                fireProgressEvent(progressListenerCallbackExecutor,
                        com.amazonaws.event.ProgressEvent.FAILED_EVENT_CODE);
                throw new AmazonClientException("Unable to verify integrity of data upload.  "
                        + "Client calculated content hash didn't match hash calculated by Amazon S3.  "
                        + "You may need to delete the data stored in Amazon S3.");
            }
        }
    }

    fireProgressEvent(progressListenerCallbackExecutor, com.amazonaws.event.ProgressEvent.COMPLETED_EVENT_CODE);

    return returnedMetadata;
}

From source file:com.github.rholder.esthree.command.Get.java

License:Apache License

@Override
public Integer call() throws Exception {

    // this is the most up to date digest, it's initialized here but later holds the most up to date valid digest
    currentDigest = MessageDigest.getInstance("MD5");
    currentDigest = retryingGet();//from   w w  w  .j a  v a 2 s .co m

    if (progressListener != null) {
        progressListener.progressChanged(new ProgressEvent(ProgressEventType.TRANSFER_STARTED_EVENT));
    }

    if (!fullETag.contains("-")) {
        byte[] expected = BinaryUtils.fromHex(fullETag);
        byte[] current = currentDigest.digest();
        if (!Arrays.equals(expected, current)) {
            throw new AmazonClientException("Unable to verify integrity of data download.  "
                    + "Client calculated content hash didn't match hash calculated by Amazon S3.  "
                    + "The data may be corrupt.");
        }
    } else {
        // TODO log warning that we can't validate the MD5
        if (verbose) {
            System.err.println("\nMD5 does not exist on AWS for file, calculated value: "
                    + BinaryUtils.toHex(currentDigest.digest()));
        }
    }
    // TODO add ability to resume from previously downloaded chunks
    // TODO add rate limiter

    return 0;

}

From source file:com.github.rholder.esthree.command.GetMultipart.java

License:Apache License

@Override
public Integer call() throws Exception {
    ObjectMetadata om = amazonS3Client.getObjectMetadata(bucket, key);
    contentLength = om.getContentLength();

    // this is the most up to date digest, it's initialized here but later holds the most up to date valid digest
    currentDigest = MessageDigest.getInstance("MD5");
    chunkSize = chunkSize == null ? DEFAULT_CHUNK_SIZE : chunkSize;
    fileParts = Parts.among(contentLength, chunkSize);
    for (Part fp : fileParts) {

        /*/*from   ww  w .  j  av a2  s  .  com*/
         * We'll need to compute the digest on the full incoming stream for
         * each valid chunk that comes in. Invalid chunks will need to be
         * recomputed and fed through a copy of the MD5 that was valid up
         * until the latest chunk.
         */
        currentDigest = retryingGetWithRange(fp.start, fp.end);
    }

    // TODO fix total content length progress bar
    if (progressListener != null) {
        progressListener.progressChanged(new ProgressEvent(ProgressEventType.TRANSFER_STARTED_EVENT));
    }

    String fullETag = om.getETag();
    if (!fullETag.contains("-")) {
        byte[] expected = BinaryUtils.fromHex(fullETag);
        byte[] current = currentDigest.digest();
        if (!Arrays.equals(expected, current)) {
            throw new AmazonClientException("Unable to verify integrity of data download.  "
                    + "Client calculated content hash didn't match hash calculated by Amazon S3.  "
                    + "The data may be corrupt.");
        }
    } else {
        // TODO log warning that we can't validate the MD5
        if (verbose) {
            System.err.println("\nMD5 does not exist on AWS for file, calculated value: "
                    + BinaryUtils.toHex(currentDigest.digest()));
        }
    }
    // TODO add ability to resume from previously downloaded chunks
    // TODO add rate limiter

    return 0;
}

From source file:com.vrane.metaGlacier.Archive.java

private String uploadParts(String uploadId)
        throws AmazonServiceException, NoSuchAlgorithmException, AmazonClientException, IOException {
    int filePosition = 0;
    long currentPosition = 0;
    byte[] buffer = new byte[(int) granularity];
    List<byte[]> binaryChecksums = new LinkedList<>();
    final File file = new File(localPath);
    String contentRange;/*from   w w  w  .j  a  v a  2 s . c  om*/
    int read = 0;

    try (FileInputStream fileToUpload = new FileInputStream(file)) {
        while (currentPosition < file.length()) {
            read = fileToUpload.read(buffer, filePosition, buffer.length);
            if (read == -1) {
                break;
            }
            if (Thread.currentThread().isInterrupted()) {
                LGR.warning("upload job is interrupted.");
                return null;
            }
            LGR.log(Level.FINE, "reading position {0} for file {1}",
                    new Object[] { currentPosition, localPath });
            byte[] bytesRead = Arrays.copyOf(buffer, read);
            contentRange = String.format("bytes %s-%s/*", currentPosition, currentPosition + read - 1);
            String checksum = TreeHashGenerator.calculateTreeHash(new ByteArrayInputStream(bytesRead));
            byte[] binaryChecksum = BinaryUtils.fromHex(checksum);
            binaryChecksums.add(binaryChecksum);

            //Upload part.
            UploadMultipartPartRequest partRequest = new UploadMultipartPartRequest().withVaultName(vaultName)
                    .withBody(new ByteArrayInputStream(bytesRead)).withChecksum(checksum)
                    .withRange(contentRange).withUploadId(uploadId);
            try {
                GlacierFrame.getClient(region).uploadMultipartPart(partRequest);
            } catch (RequestTimeoutException e) {
                LGR.log(Level.SEVERE, "Request time out at {0}. Retrying in {1} s",
                        new Object[] { HumanBytes.convert(currentPosition), RETRY_SEC });
                LGR.log(Level.SEVERE, null, e);
                try {
                    Thread.sleep(RETRY_SEC * 1000);
                } catch (InterruptedException ex) {
                    LGR.log(Level.SEVERE, null, ex);
                    return null;
                }
                try {
                    GlacierFrame.getClient(region).uploadMultipartPart(partRequest);
                } catch (RequestTimeoutException ex) {
                    LGR.log(Level.SEVERE, null, ex);
                    LGR.severe("2nd time out.  Giving up");
                    JOptionPane.showMessageDialog(null, "Error uploading");
                    return null;
                }
            } catch (Exception e) {
                LGR.log(Level.SEVERE, null, e);
                LGR.severe("Unanticipated error.  Giving up.");
                return null;
            }
            if (Thread.currentThread().isInterrupted()) {
                LGR.warning("upload job is interrupted.");
                return null;
            }
            currentPosition = currentPosition + read;
            progress_reporter.setFilePosition(currentPosition);
        }
    }
    String checksum = TreeHashGenerator.calculateTreeHash(binaryChecksums);
    return checksum;
}

From source file:de.kopis.glacier.commands.UploadMultipartArchiveCommand.java

License:Open Source License

private String uploadParts(String uploadId, File file, final String vaultName, final Long partSize)
        throws AmazonServiceException, NoSuchAlgorithmException, AmazonClientException, IOException {
    FileInputStream fileToUpload = null;
    String checksum = "";
    try {//www. j  a v a 2  s  . c om
        long currentPosition = 0;
        List<byte[]> binaryChecksums = new LinkedList<byte[]>();
        fileToUpload = new FileInputStream(file);
        int counter = 1;
        int total = (int) Math.ceil(file.length() / (double) partSize);
        while (currentPosition < file.length()) {
            long length = partSize;
            if (currentPosition + partSize > file.length()) {
                length = file.length() - currentPosition;
            }

            Exception failedException = null;
            boolean completed = false;
            int tries = 0;

            while (!completed && tries < 5) {
                tries++;
                InputStream inputSubStream = newInputSubstream(file, currentPosition, length);
                inputSubStream.mark(-1);
                checksum = TreeHashGenerator.calculateTreeHash(inputSubStream);
                byte[] binaryChecksum = BinaryUtils.fromHex(checksum);
                inputSubStream.reset();
                String range = "bytes " + currentPosition + "-" + (currentPosition + length - 1) + "/*";
                UploadMultipartPartRequest req = new UploadMultipartPartRequest().withChecksum(checksum)
                        .withBody(inputSubStream).withRange(range).withUploadId(uploadId)
                        .withVaultName(vaultName);
                try {
                    UploadMultipartPartResult partResult = client.uploadMultipartPart(req);
                    log.info(String.format("Part %d/%d (%s) uploaded, checksum: %s", counter, total, range,
                            partResult.getChecksum()));
                    completed = true;
                    binaryChecksums.add(binaryChecksum);
                } catch (Exception e) {
                    failedException = e;
                } finally {
                    if (inputSubStream != null) {
                        try {
                            inputSubStream.close();
                        } catch (IOException ex) {
                            log.debug("Ignore failure in closing the Closeable", ex);
                        }
                    }
                }
            }
            if (!completed && failedException != null)
                throw new AmazonClientException("Failed operation", failedException);
            currentPosition += partSize;
            ++counter;
        }

        checksum = TreeHashGenerator.calculateTreeHash(binaryChecksums);
    } finally {
        if (fileToUpload != null) {
            fileToUpload.close();
        }
    }

    return checksum;
}

From source file:glacierpipe.GlacierPipe.java

License:Apache License

public String pipe(AmazonGlacierClient client, String vaultName, String archiveDesc, InputStream in)
        throws IOException {

    long currentPosition = 0;
    int partId = 0;

    try {// ww w.j av a2 s.c  o  m
        byte[] buffer = new byte[4096];

        TreeHashMessageDigest completeHash = new TreeHashMessageDigest(MessageDigest.getInstance("SHA-256"));
        in = new DigestInputStream(in, completeHash);

        /**** Create an upload ID for the current upload ****/
        InitiateMultipartUploadRequest request = new InitiateMultipartUploadRequest().withVaultName(vaultName)
                .withArchiveDescription(archiveDesc).withPartSize(Long.toString(partSize));

        InitiateMultipartUploadResult result = client.initiateMultipartUpload(request);
        String uploadId = result.getUploadId();

        this.observer.gotUploadId(uploadId);

        /**** While there are still chunks to process ****/
        do {
            TreeHashMessageDigest partHash = new TreeHashMessageDigest(MessageDigest.getInstance("SHA-256"));

            // Fill up the buffer
            try (OutputStream bufferOut = this.buffer.getOutputStream();
                    OutputStream observedOut = new ObservedOutputStream(bufferOut,
                            new BufferingObserver(this.observer, partId));
                    DigestOutputStream out = new DigestOutputStream(observedOut, partHash);) {
                int read = 0;
                while (this.buffer.getRemaining() > 0 && (read = in.read(buffer, 0,
                        (int) Math.min(this.buffer.getRemaining(), buffer.length))) >= 0) {
                    out.write(buffer, 0, read);
                }
            }

            currentPosition += this.buffer.getLength();

            // If we read zero bytes, we reached the end of the stream.  Break.
            if (this.buffer.getLength() == 0) {
                break;
            }

            // Report the Tree Hash of this chunk
            byte[] byteChecksum = partHash.digest();
            String checksum = BinaryUtils.toHex(byteChecksum);
            this.observer.computedTreeHash(partId, byteChecksum);

            // Try to upload this chunk
            int attempts = 0;
            do {
                try (InputStream bufferIn = this.buffer.getInputStream();

                        // KLUDGE: Throttling really belongs closer to EntitySerializer.serialize(), but there
                        // wasn't an easy hook for it.  Throttling on input would work well enough, but
                        // client.uploadMultipartPart() calculates a SHA-256 checksum on the request before it
                        // sends it, then calls reset() on the stream.  Because we know this, don't throttle until
                        // reset() has been called at least once.
                        InputStream throttledIn = this.throttlingStrategy == null ? bufferIn
                                : new ThrottledInputStream(bufferIn, this.throttlingStrategy) {
                                    private long resets = 0;

                                    @Override
                                    public void setBytesPerSecond() {
                                        if (this.resets > 0) {
                                            super.setBytesPerSecond();
                                        }
                                    }

                                    @Override
                                    protected long getMaxRead(long currentTime) {
                                        return this.resets > 0 ? super.getMaxRead(currentTime) : Long.MAX_VALUE;
                                    }

                                    @Override
                                    public synchronized void reset() throws IOException {
                                        super.reset();
                                        this.resets++;
                                    }
                                };

                        InputStream observedIn = new ObservedInputStream(throttledIn,
                                new UploadObserver(this.observer, partId));) {

                    UploadMultipartPartRequest partRequest = new UploadMultipartPartRequest()
                            .withVaultName(vaultName).withBody(observedIn).withChecksum(checksum)
                            .withRange(String.format("bytes %d-%d/*", currentPosition - this.buffer.getLength(),
                                    currentPosition - 1))
                            .withUploadId(uploadId).withAccountId("-");

                    UploadMultipartPartResult partResult = client.uploadMultipartPart(partRequest);

                    if (!Arrays.equals(BinaryUtils.fromHex(partResult.getChecksum()), byteChecksum)) {
                        throw new AmazonClientException("Checksum mismatch");
                    }

                    break;
                } catch (AmazonClientException e) {
                    attempts++;
                    observer.exceptionUploadingPart(partId, e, attempts, attempts < this.maxRetries);

                    if (attempts >= this.maxRetries) {
                        throw new IOException("Failed to upload after " + attempts + " attempts", e);
                    }
                } catch (IOException e) {
                    attempts++;
                    observer.exceptionUploadingPart(partId, e, attempts, attempts < this.maxRetries);

                    if (attempts >= this.maxRetries) {
                        throw new IOException("Failed to upload after " + attempts + " attempts", e);
                    }
                }

                try {
                    long sleepingFor = 1000 * (attempts < 15 ? (long) Math.pow(1.5, attempts) : 300);
                    this.observer.sleepingBeforeRetry(sleepingFor);
                    Thread.sleep(sleepingFor);
                } catch (InterruptedException e) {
                    throw new IOException("Upload interrupted", e);
                }
            } while (true);

            partId++;
        } while (this.buffer.getRemaining() == 0);

        byte[] complateHash = completeHash.digest();

        CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest()
                .withVaultName(vaultName).withUploadId(uploadId).withChecksum(BinaryUtils.toHex(complateHash))
                .withArchiveSize(Long.toString(currentPosition));

        CompleteMultipartUploadResult compResult = client.completeMultipartUpload(compRequest);
        String location = compResult.getLocation();

        this.observer.done(complateHash, location);
        return location;

    } catch (IOException e) {
        this.observer.fatalException(e);
        throw e;
    } catch (AmazonClientException e) {
        this.observer.fatalException(e);
        throw e;
    } catch (NoSuchAlgorithmException e) {
        throw new RuntimeException("SHA-256 not available", e);
    }
}