Example usage for java.io ByteArrayInputStream available

List of usage examples for java.io ByteArrayInputStream available

Introduction

In this page you can find the example usage for java.io ByteArrayInputStream available.

Prototype

public synchronized int available() 

Source Link

Document

Returns the number of remaining bytes that can be read (or skipped over) from this input stream.

Usage

From source file:com.cloud.bridge.service.EC2RestServlet.java

/**
 * The SOAP API for EC2 uses WS-Security to sign all client requests.  This requires that 
 * the client have a public/private key pair and the public key defined by a X509 certificate.
 * Thus in order for a Cloud.com account holder to use the EC2's SOAP API he must register
 * his X509 certificate with the EC2 service.   This function allows the Cloud.com account
 * holder to "load" his X509 certificate into the service.   Note, that the SetUserKeys REST
 * function must be called before this call.
 * /*from  w w  w  . j a v a 2 s .  c o m*/
 * This is an authenticated REST call and as such must contain all the required REST parameters
 * including: Signature, Timestamp, Expires, etc.   The signature is calculated using the
 * Cloud.com account holder's API access and secret keys and the Amazon defined EC2 signature
 * algorithm.
 * 
 * A user can call this REST function any number of times, on each call the X509 certificate
 * simply over writes any previously stored value.
 */
private void setCertificate(HttpServletRequest request, HttpServletResponse response) throws Exception {
    Transaction txn = null;
    try {
        // [A] Pull the cert and cloud AccessKey from the request
        String[] certificate = request.getParameterValues("cert");
        if (null == certificate || 0 == certificate.length) {
            response.sendError(530, "Missing cert parameter");
            return;
        }
        //           logger.debug( "SetCertificate cert: [" + certificate[0] + "]" );

        String[] accessKey = request.getParameterValues("AWSAccessKeyId");
        if (null == accessKey || 0 == accessKey.length) {
            response.sendError(530, "Missing AWSAccessKeyId parameter");
            return;
        }

        // [B] Open our keystore
        FileInputStream fsIn = new FileInputStream(pathToKeystore);
        KeyStore certStore = KeyStore.getInstance("JKS");
        certStore.load(fsIn, keystorePassword.toCharArray());

        // -> use the Cloud API key to save the cert in the keystore
        // -> write the cert into the keystore on disk
        Certificate userCert = null;
        CertificateFactory cf = CertificateFactory.getInstance("X.509");

        ByteArrayInputStream bs = new ByteArrayInputStream(certificate[0].getBytes());
        while (bs.available() > 0)
            userCert = cf.generateCertificate(bs);
        certStore.setCertificateEntry(accessKey[0], userCert);

        FileOutputStream fsOut = new FileOutputStream(pathToKeystore);
        certStore.store(fsOut, keystorePassword.toCharArray());

        // [C] Associate the cert's uniqueId with the Cloud API keys
        String uniqueId = AuthenticationUtils.X509CertUniqueId(userCert);
        logger.debug("SetCertificate, uniqueId: " + uniqueId);
        /*           UserCredentialsDao credentialDao = new UserCredentialsDao();
                   credentialDao.setCertificateId( accessKey[0], uniqueId );
        */
        txn = Transaction.open(Transaction.AWSAPI_DB);
        UserCredentialsVO user = ucDao.getByAccessKey(accessKey[0]);
        user.setCertUniqueId(uniqueId);
        ucDao.update(user.getId(), user);
        response.setStatus(200);
        endResponse(response, "User certificate set successfully");
        txn.commit();

    } catch (NoSuchObjectException e) {
        logger.error("SetCertificate exception " + e.getMessage(), e);
        response.sendError(404, "SetCertificate exception " + e.getMessage());

    } catch (Exception e) {
        logger.error("SetCertificate exception " + e.getMessage(), e);
        response.sendError(500, "SetCertificate exception " + e.getMessage());
    } finally {
        txn.close();
    }

}

From source file:org.yestech.publish.publisher.AmazonS3PublisherUnitTest.java

@Test
public void testPublishNoNonBlankLocation() throws S3ServiceException, IOException {
    File tempDir = new File(System.getProperty("java.io.tmpdir") + File.separator + "publishUnitTesting");
    //        final S3Service service = context.mock(S3Service.class, "s3service");
    final IFileArtifact fileArtifact = context.mock(IFileArtifact.class, "fileArtifact");
    final IFileArtifactMetaData fileArtifactMetaData = context.mock(IFileArtifactMetaData.class,
            "fileArtifactMetaData");
    final IArtifactOwner artifactOwner = context.mock(IArtifactOwner.class, "owner");
    final String ownerId = "100";
    final String fileName = "testFile.txt";

    String data = "this is a test";
    final ByteArrayInputStream stream = new ByteArrayInputStream(data.getBytes());

    context.checking(new Expectations() {
        {//w w  w .  j  ava2s. co m
            oneOf(fileArtifact).getArtifactMetaData();
            will(returnValue(fileArtifactMetaData));
            oneOf(fileArtifact).getStream();
            will(returnValue(stream));
            oneOf(fileArtifactMetaData).getArtifactOwner();
            will(returnValue(artifactOwner));
            oneOf(artifactOwner).getOwnerIdentifier();
            will(returnValue(ownerId));
            oneOf(fileArtifactMetaData).getFileName();
            will(returnValue(fileName));
            oneOf(fileArtifactMetaData).getUniqueNames();
            will(returnValue(Pair.create("", "")));
            oneOf(fileArtifactMetaData).getSize();
            will(returnValue(100l));
            oneOf(fileArtifactMetaData).getMimeType();
            will(returnValue("application/txt"));
            //                oneOf(service).putObject(with(aNonNull(S3Bucket.class)), with(aNonNull(S3Object.class)));
            oneOf(fileArtifactMetaData).getLocation();
            will(returnValue("http://localhost/pix.jpg"));
            oneOf(fileArtifact).setFile(null);
            oneOf(fileArtifact).setStream(null);
        }
    });
    final MockS3Service service = new MockS3Service(null);
    publisher.setS3Service(service);
    publisher.setArtifactType(ArtifactType.IMAGE);
    publisher.getProperties().addProperty(Pair.create(ArtifactType.IMAGE, "tempDirectory"),
            new File("/tmp//localsave"));
    //        publisher.setTempDirectory(tempDir);
    publisher.publish(fileArtifact);
    assertEquals(AccessControlList.REST_CANNED_PUBLIC_READ, service.getS3Object().getAcl());
    assertEquals(0, stream.available());
    FileUtils.deleteDirectory(tempDir);
}

From source file:org.yestech.publish.publisher.AmazonS3PublisherUnitTest.java

@Test
public void testPublish() throws S3ServiceException, IOException {
    File tempDir = new File(System.getProperty("java.io.tmpdir") + File.separator + "publishUnitTesting");
    //        final S3Service service = context.mock(S3Service.class, "s3service");
    final IFileArtifact fileArtifact = context.mock(IFileArtifact.class, "fileArtifact");
    final IFileArtifactMetaData fileArtifactMetaData = context.mock(IFileArtifactMetaData.class,
            "fileArtifactMetaData");
    final IArtifactOwner artifactOwner = context.mock(IArtifactOwner.class, "owner");
    final String ownerId = "100";
    final String fileName = "testFile.txt";

    String data = "this is a test";
    final ByteArrayInputStream stream = new ByteArrayInputStream(data.getBytes());

    context.checking(new Expectations() {
        {//from ww  w  . java  2  s . c  o  m
            oneOf(fileArtifact).getArtifactMetaData();
            will(returnValue(fileArtifactMetaData));
            oneOf(fileArtifact).getStream();
            will(returnValue(stream));
            oneOf(fileArtifactMetaData).getArtifactOwner();
            will(returnValue(artifactOwner));
            oneOf(artifactOwner).getOwnerIdentifier();
            will(returnValue(ownerId));
            oneOf(fileArtifactMetaData).getFileName();
            will(returnValue(fileName));
            oneOf(fileArtifactMetaData).getUniqueNames();
            will(returnValue(Pair.create("", "")));
            oneOf(fileArtifactMetaData).getSize();
            will(returnValue(100l));
            oneOf(fileArtifactMetaData).getMimeType();
            will(returnValue("application/txt"));
            //                oneOf(fileArtifactMetaData).getArtifactType();
            //                will(returnValue(ArtifactType.IMAGE));
            //                oneOf(service).putObject(with(aNonNull(S3Bucket.class)), with(aNonNull(S3Object.class)));
            oneOf(fileArtifactMetaData).getLocation();
            will(returnValue(""));
            oneOf(fileArtifactMetaData).setLocation(with(aNonNull(String.class)));
            oneOf(fileArtifact).setFile(null);
            oneOf(fileArtifact).setStream(null);
        }
    });
    final MockS3Service service = new MockS3Service(null);
    publisher.setArtifactType(ArtifactType.IMAGE);
    publisher.setS3Service(service);
    publisher.getProperties().addProperty(Pair.create(ArtifactType.IMAGE, "tempDirectory"),
            new File("/tmp//localsave"));
    //        publisher.setTempDirectory(tempDir);
    publisher.publish(fileArtifact);
    assertEquals(AccessControlList.REST_CANNED_PUBLIC_READ, service.getS3Object().getAcl());
    assertEquals(0, stream.available());
    FileUtils.deleteDirectory(tempDir);
}

From source file:org.globus.gsi.gssapi.GlobusGSSContextImpl.java

/**
 * Accept a delegated credential.//  w w w  .  j  a va  2  s. c  o  m
 *
 * This function drives the accepting side of the credential
 * delegation process. It is expected to be called in tandem with the
 * {@link #initDelegation(GSSCredential, Oid, int, byte[], int, int)
 * initDelegation} function.
 * <BR>
 * The behavior of this function can be modified by
 * {@link GSSConstants#GSS_MODE GSSConstants.GSS_MODE} context
 * option. The
 * {@link GSSConstants#GSS_MODE GSSConstants.GSS_MODE}
 * option if set to
 * {@link GSIConstants#MODE_SSL GSIConstants.MODE_SSL}
 * results in tokens that are not wrapped.
 *
 * @param lifetime
 *        The requested period of validity (seconds) of the delegated
 *        credential.
 * @return A token that should be passed to <code>initDelegation</code> if
 *        <code>isDelegationFinished</code> returns false. May be null.
 * @exception GSSException containing the following major error codes:
 *            <code>GSSException.FAILURE</code>
 */
public byte[] acceptDelegation(int lifetime, byte[] buf, int off, int len) throws GSSException {

    logger.debug("Enter acceptDelegation: " + delegationState);

    if (this.gssMode != GSIConstants.MODE_SSL && buf != null && len > 0) {
        buf = unwrap(buf, off, len);
        off = 0;
        len = buf.length;
    }

    byte[] token = null;

    switch (delegationState) {

    case DELEGATION_START:

        this.delegationFinished = false;

        if (len != 1 && buf[off] != GSIConstants.DELEGATION_CHAR) {
            throw new GlobusGSSException(GSSException.FAILURE, GlobusGSSException.DELEGATION_ERROR,
                    "delegError00", new Object[] { new Character((char) buf[off]) });
        }

        try {
            /*DEL
                            Vector certChain = this.conn.getCertificateChain();
            */
            Certificate[] certChain;
            try {
                certChain = this.sslEngine.getSession().getPeerCertificates();
            } catch (SSLPeerUnverifiedException e) {
                certChain = null;
            }
            if (certChain == null || certChain.length == 0) {
                throw new GlobusGSSException(GSSException.FAILURE, GlobusGSSException.DELEGATION_ERROR,
                        "noClientCert");
            }

            X509Certificate tmpCert =
                    /*DEL
                    PureTLSUtil.convertCert((X509Cert)certChain.lastElement());
                    */
                    (X509Certificate) certChain[0];

            token = generateCertRequest(tmpCert);
        } catch (GeneralSecurityException e) {
            throw new GlobusGSSException(GSSException.FAILURE, e);
        }

        this.delegationState = DELEGATION_COMPLETE_CRED;
        break;

    case DELEGATION_COMPLETE_CRED:

        ByteArrayInputStream in = null;
        X509Certificate[] chain = null;
        LinkedList certList = new LinkedList();
        X509Certificate cert = null;
        try {
            in = new ByteArrayInputStream(buf, off, len);
            while (in.available() > 0) {
                cert = CertificateLoadUtil.loadCertificate(in);
                certList.add(cert);
            }

            chain = new X509Certificate[certList.size()];
            chain = (X509Certificate[]) certList.toArray(chain);

            verifyDelegatedCert(chain[0]);

        } catch (GeneralSecurityException e) {
            throw new GlobusGSSException(GSSException.FAILURE, e);
        } finally {
            if (in != null) {
                try {
                    in.close();
                } catch (Exception e) {
                    logger.warn("Unable to close streamreader.");
                }
            }
        }

        X509Credential proxy = new X509Credential(this.keyPair.getPrivate(), chain);

        this.delegatedCred = new GlobusGSSCredentialImpl(proxy, GSSCredential.INITIATE_AND_ACCEPT);

        this.delegationState = DELEGATION_START;
        this.delegationFinished = true;
        break;

    default:
        throw new GSSException(GSSException.FAILURE);
    }

    logger.debug("Exit acceptDelegation");

    if (this.gssMode != GSIConstants.MODE_SSL && token != null) {
        // XXX: Why wrap() only when not in MODE_SSL?
        return wrap(token, 0, token.length);
    } else {
        return token;
    }
}

From source file:com.mirth.connect.donkey.server.data.jdbc.JdbcDao.java

@Override
public void insertMessageAttachment(String channelId, long messageId, Attachment attachment) {
    logger.debug(channelId + "/" + messageId + ": inserting message attachment");

    try {/*  www  .j  a va  2s  .  c  o m*/
        PreparedStatement statement = prepareStatement("insertMessageAttachment", channelId);
        statement.setString(1, attachment.getId());
        statement.setLong(2, messageId);
        statement.setString(3, attachment.getType());

        // The size of each segment of the attachment.
        int chunkSize = 10000000;

        if (attachment.getContent().length <= chunkSize) {
            // If there is only one segment, just store it
            statement.setInt(4, 1);
            statement.setInt(5, attachment.getContent().length);
            statement.setBytes(6, attachment.getContent());
            statement.executeUpdate();
        } else {
            // Use an input stream on the attachment content to segment the data.
            ByteArrayInputStream inputStream = new ByteArrayInputStream(attachment.getContent());
            // The order of the segment
            int segmentIndex = 1;

            // As long as there are bytes left
            while (inputStream.available() > 0) {
                // Set the segment number
                statement.setInt(4, segmentIndex++);
                // Determine the segment size. If there are more bytes left than the chunk size, the size is the chunk size. Otherwise it is the number of remaining bytes
                int segmentSize = Math.min(chunkSize, inputStream.available());
                // Create a byte array to store the chunk
                byte[] segment = new byte[segmentSize];
                // Read the chunk from the input stream to the byte array
                inputStream.read(segment, 0, segmentSize);
                // Set the segment size
                statement.setInt(5, segmentSize);
                // Set the byte data
                statement.setBytes(6, segment);
                // Perform the insert
                statement.executeUpdate();
            }
        }

        // Clear the parameters because the data held in memory could be quite large.
        statement.clearParameters();
    } catch (SQLException e) {
        throw new DonkeyDaoException(e);
    }
}

From source file:com.flexoodb.common.FlexUtils.java

static public void saveBytesToFile(String filepath, byte[] data) throws Exception {
    ByteArrayInputStream is = new ByteArrayInputStream(data);
    java.io.FileOutputStream fos = new java.io.FileOutputStream(new File(filepath));
    while (is.available() > 0) {
        fos.write(is.read());//w w w  . ja v  a  2s  .  c om
    }
    fos.close();
    is.close();
}

From source file:com.flexoodb.common.FlexUtils.java

static public void saveStringToFile(String filepath, String content) throws Exception {
    ByteArrayInputStream is = new ByteArrayInputStream(content.getBytes());
    java.io.FileOutputStream fos = new java.io.FileOutputStream(new File(filepath));
    while (is.available() > 0) {
        fos.write(is.read());/*ww  w  . j  av a2s  . com*/
    }
    fos.close();
    is.close();
}

From source file:s3.com.qiniu.services.s3.AmazonS3Client.java

@Override
public PutObjectResult putObject(PutObjectRequest putObjectRequest)
        throws AmazonClientException, AmazonServiceException {
    assertParameterNotNull(putObjectRequest,
            "The PutObjectRequest parameter must be specified when uploading an object");

    String bucketName = putObjectRequest.getBucketName();
    String key = putObjectRequest.getKey();
    ObjectMetadata metadata = putObjectRequest.getMetadata();
    InputStream input = putObjectRequest.getInputStream();

    /*// www .  j av a  2 s  .co  m
     * This is compatible with progress listener set by either the legacy
     * method PutObjectRequest#setProgressListener or the new method
     * PutObjectRequest#setGeneralProgressListener.
     */
    ProgressListener progressListener = putObjectRequest.getGeneralProgressListener();
    ProgressListenerCallbackExecutor progressListenerCallbackExecutor = ProgressListenerCallbackExecutor
            .wrapListener(progressListener);

    if (metadata == null)
        metadata = new ObjectMetadata();

    assertParameterNotNull(bucketName, "The bucket name parameter must be specified when uploading an object");
    assertParameterNotNull(key, "The key parameter must be specified when uploading an object");

    final boolean skipContentMd5Check = ServiceUtils.skipMd5CheckPerRequest(putObjectRequest);

    // If a file is specified for upload, we need to pull some additional
    // information from it to auto-configure a few options
    if (putObjectRequest.getFile() != null) {
        File file = putObjectRequest.getFile();
        // Always set the content length, even if it's already set
        metadata.setContentLength(file.length());

        final boolean calculateMD5 = metadata.getContentMD5() == null;

        // Only set the content type if it hasn't already been set
        if (metadata.getContentType() == null) {
            metadata.setContentType(Mimetypes.getInstance().getMimetype(file));
        }

        if (calculateMD5 && !skipContentMd5Check) {
            try {
                String contentMd5_b64 = Md5Utils.md5AsBase64(file);
                metadata.setContentMD5(contentMd5_b64);
            } catch (Exception e) {
                throw new AmazonClientException("Unable to calculate MD5 hash: " + e.getMessage(), e);
            }
        }

        try {
            input = new RepeatableFileInputStream(file);
        } catch (FileNotFoundException fnfe) {
            throw new AmazonClientException("Unable to find file to upload", fnfe);
        }
    }

    Request<PutObjectRequest> request = createRequest(bucketName, key, putObjectRequest, HttpMethodName.PUT);

    if (putObjectRequest.getAccessControlList() != null) {
        addAclHeaders(request, putObjectRequest.getAccessControlList());
    } else if (putObjectRequest.getCannedAcl() != null) {
        request.addHeader(Headers.S3_CANNED_ACL, putObjectRequest.getCannedAcl().toString());
    }

    if (putObjectRequest.getStorageClass() != null) {
        request.addHeader(Headers.STORAGE_CLASS, putObjectRequest.getStorageClass());
    }

    if (putObjectRequest.getRedirectLocation() != null) {
        request.addHeader(Headers.REDIRECT_LOCATION, putObjectRequest.getRedirectLocation());
        if (input == null) {
            setZeroContentLength(request);
            input = new ByteArrayInputStream(new byte[0]);
        }
    }

    // Populate the SSE-CPK parameters to the request header
    populateSseCpkRequestParameters(request, putObjectRequest.getSSECustomerKey());

    // Use internal interface to differentiate 0 from unset.
    final Long contentLength = (Long) metadata.getRawMetadataValue(Headers.CONTENT_LENGTH);
    if (contentLength == null) {
        /*
         * There's nothing we can do except for let the HTTP client buffer
         * the input stream contents if the caller doesn't tell us how much
         * data to expect in a stream since we have to explicitly tell
         * Amazon S3 how much we're sending before we start sending any of
         * it.
         */
        if (!input.markSupported()) {
            log.warn("No content length specified for stream data.  "
                    + "Stream contents will be buffered in memory and could result in "
                    + "out of memory errors.");
            ByteArrayInputStream bais = toByteArray(input);
            request.addHeader(Headers.CONTENT_LENGTH, String.valueOf(bais.available()));
            input = bais;
        } else {
            long len = calculateContentLength(input);
            request.addHeader(Headers.CONTENT_LENGTH, String.valueOf(len));
        }
    } else {
        final long expectedLength = contentLength.longValue();
        if (expectedLength >= 0) {
            // Performs length check on the underlying data stream.
            // For S3 encryption client, the underlying data stream here
            // refers to the cipher-text data stream (ie not the underlying
            // plain-text data stream which in turn may have been wrapped
            // with it's own length check input stream.)
            @SuppressWarnings("resource")
            LengthCheckInputStream lcis = new LengthCheckInputStream(input, expectedLength, // expected data length to be uploaded
                    EXCLUDE_SKIPPED_BYTES);
            input = lcis;
            request.addHeader(Headers.CONTENT_LENGTH, contentLength.toString());
        }
    }

    if (progressListenerCallbackExecutor != null) {
        input = new ProgressReportingInputStream(input, progressListenerCallbackExecutor);
        fireProgressEvent(progressListenerCallbackExecutor, ProgressEvent.STARTED_EVENT_CODE);
    }

    MD5DigestCalculatingInputStream md5DigestStream = null;
    if (metadata.getContentMD5() == null && !skipContentMd5Check) {
        /*
         * If the user hasn't set the content MD5, then we don't want to
         * buffer the whole stream in memory just to calculate it. Instead,
         * we can calculate it on the fly and validate it with the returned
         * ETag from the object upload.
         */
        input = md5DigestStream = new MD5DigestCalculatingInputStream(input);
    }

    if (metadata.getContentType() == null) {
        /*
         * Default to the "application/octet-stream" if the user hasn't
         * specified a content type.
         */
        metadata.setContentType(Mimetypes.MIMETYPE_OCTET_STREAM);
    }

    populateRequestMetadata(request, metadata);
    request.setContent(input);
    /*
     * Enable 100-continue support for PUT operations, since this is where
     * we're potentially uploading large amounts of data and want to find
     * out as early as possible if an operation will fail. We don't want to
     * do this for all operations since it will cause extra latency in the
     * network interaction.
     */
    request.addHeader("Expect", "100-continue");

    ObjectMetadata returnedMetadata = null;
    try {
        returnedMetadata = invoke(request, new S3MetadataResponseHandler(), bucketName, key);
    } catch (AmazonClientException ace) {
        fireProgressEvent(progressListenerCallbackExecutor, ProgressEvent.FAILED_EVENT_CODE);
        throw ace;
    } finally {
        try {
            input.close();
        } catch (AbortedException ignore) {
        } catch (Exception e) {
            log.debug("Unable to cleanly close input stream: " + e.getMessage(), e);
        }
    }

    String contentMd5 = metadata.getContentMD5();
    if (md5DigestStream != null) {
        contentMd5 = BinaryUtils.toBase64(md5DigestStream.getMd5Digest());
    }

    if (returnedMetadata != null && contentMd5 != null && !skipContentMd5Check) {
        byte[] clientSideHash = BinaryUtils.fromBase64(contentMd5);
        byte[] serverSideHash = BinaryUtils.fromHex(returnedMetadata.getETag());

        if (!Arrays.equals(clientSideHash, serverSideHash)) {
            fireProgressEvent(progressListenerCallbackExecutor, ProgressEvent.FAILED_EVENT_CODE);

            throw new AmazonClientException("Unable to verify integrity of data upload.  "
                    + "Client calculated content hash didn't match hash calculated by Amazon S3.  "
                    + "You may need to delete the data stored in Amazon S3.");
        }
    }

    fireProgressEvent(progressListenerCallbackExecutor, ProgressEvent.COMPLETED_EVENT_CODE);

    PutObjectResult result = new PutObjectResult();
    result.setETag(returnedMetadata.getETag());
    result.setVersionId(returnedMetadata.getVersionId());
    result.setSSEAlgorithm(returnedMetadata.getSSEAlgorithm());
    result.setSSEKMSKeyId(returnedMetadata.getSSEKMSKeyId());
    result.setSSECustomerAlgorithm(returnedMetadata.getSSECustomerAlgorithm());
    result.setSSECustomerKeyMd5(returnedMetadata.getSSECustomerKeyMd5());
    result.setExpirationTime(returnedMetadata.getExpirationTime());
    result.setExpirationTimeRuleId(returnedMetadata.getExpirationTimeRuleId());
    result.setContentMd5(contentMd5);

    return result;
}

From source file:org.exoplatform.forum.service.impl.JCRDataStorage.java

public void importXML(String nodePath, ByteArrayInputStream bis, int typeImport) throws Exception {
    String nodeName = "";
    byte[] bdata = new byte[bis.available()];
    bis.read(bdata);/*from  ww w  . j a v a2s. c o  m*/
    DocumentBuilderFactory docBuilderFactory = DocumentBuilderFactory.newInstance();
    DocumentBuilder docBuilder = docBuilderFactory.newDocumentBuilder();
    ByteArrayInputStream is = new ByteArrayInputStream(bdata);
    Document doc = docBuilder.parse(is);
    doc.getDocumentElement().normalize();
    String typeNodeExport = ((org.w3c.dom.Node) doc.getFirstChild().getChildNodes().item(0).getChildNodes()
            .item(0)).getTextContent();
    SessionProvider sProvider = CommonUtils.createSystemProvider();
    List<String> patchNodeImport = new ArrayList<String>();
    try {
        Node forumHome = getForumHomeNode(sProvider);
        is = new ByteArrayInputStream(bdata);
        if (!typeNodeExport.equals(EXO_FORUM_CATEGORY) && !typeNodeExport.equals(EXO_FORUM)) {
            // All nodes when import need reset childnode
            if (typeNodeExport.equals(EXO_CATEGORY_HOME)) {
                nodePath = getCategoryHome(sProvider).getPath();
                Node categoryHome = getCategoryHome(sProvider);
                nodeName = "CategoryHome";
                addDataFromXML(categoryHome, nodePath, sProvider, is, nodeName);
            } else if (typeNodeExport.equals(EXO_USER_PROFILE_HOME)) {
                Node userProfile = getUserProfileHome(sProvider);
                nodeName = "UserProfileHome";
                nodePath = getUserProfileHome(sProvider).getPath();
                addDataFromXML(userProfile, nodePath, sProvider, is, nodeName);
            } else if (typeNodeExport.equals(EXO_TAG_HOME)) {
                Node tagHome = getTagHome(sProvider);
                nodePath = getTagHome(sProvider).getPath();
                nodeName = "TagHome";
                addDataFromXML(tagHome, nodePath, sProvider, is, nodeName);
            } else if (typeNodeExport.equals(EXO_FORUM_BB_CODE_HOME)) {
                nodePath = dataLocator.getBBCodesLocation();
                Node bbcodeNode = getBBCodesHome(sProvider);
                nodeName = "forumBBCode";
                addDataFromXML(bbcodeNode, nodePath, sProvider, is, nodeName);
            }
            // Node import but don't need reset childnodes
            else if (typeNodeExport.equals(EXO_ADMINISTRATION_HOME)) {
                nodePath = getForumSystemHome(sProvider).getPath();
                Node node = getAdminHome(sProvider);
                node.remove();
                getForumSystemHome(sProvider).save();
                typeImport = ImportUUIDBehavior.IMPORT_UUID_COLLISION_REPLACE_EXISTING;
                Session session = forumHome.getSession();
                session.importXML(nodePath, is, typeImport);
                session.save();
            } else if (typeNodeExport.equals(EXO_BAN_IP_HOME)) {
                nodePath = getForumSystemHome(sProvider).getPath();
                Node node = getBanIPHome(sProvider);
                node.remove();
                getForumSystemHome(sProvider).save();
                typeImport = ImportUUIDBehavior.IMPORT_UUID_COLLISION_REPLACE_EXISTING;
                Session session = forumHome.getSession();
                session.importXML(nodePath, is, typeImport);
                session.save();
            } else {
                throw new RuntimeException("unknown type of node to export :" + typeNodeExport);
            }
        } else {
            if (typeNodeExport.equals(EXO_FORUM_CATEGORY)) {
                // Check if import forum but the data import have structure of a category --> Error
                if (nodePath.split("/").length == 6) {
                    throw new ConstraintViolationException();
                }

                nodePath = getCategoryHome(sProvider).getPath();
            }

            Session session = forumHome.getSession();
            NodeIterator iter = ((Node) session.getItem(nodePath)).getNodes();
            while (iter.hasNext()) {
                patchNodeImport.add(iter.nextNode().getName());
            }
            session.importXML(nodePath, is, typeImport);
            session.save();
            NodeIterator newIter = ((Node) session.getItem(nodePath)).getNodes();
            while (newIter.hasNext()) {
                Node node = newIter.nextNode();
                if (patchNodeImport.contains(node.getName()))
                    patchNodeImport.remove(node.getName());
                else
                    patchNodeImport.add(node.getName());
            }
        }
        // update forum statistic and profile of owner post.
        if (typeNodeExport.equals(EXO_FORUM_CATEGORY) || typeNodeExport.equals(EXO_FORUM)) {
            for (String string : patchNodeImport) {
                updateForum(nodePath + "/" + string, false);
            }
        } else if (typeNodeExport.equals(EXO_CATEGORY_HOME)) {
            updateForum(null);
        }
    } finally {
        is.close();
    }
}