Example usage for java.nio.channels Channels newChannel

List of usage examples for java.nio.channels Channels newChannel

Introduction

In this page you can find the example usage for java.nio.channels Channels newChannel.

Prototype

public static WritableByteChannel newChannel(OutputStream out) 

Source Link

Document

Constructs a channel that writes bytes to the given stream.

Usage

From source file:org.commoncrawl.util.StreamingArcFileReader.java

public void testReader(File arcFileItem) throws Exception {

    resetState();//from  www.j  ava 2 s. com

    Thread thread = new Thread(new Runnable() {

        public void run() {
            try {

                TriStateResult result;

                while ((result = hasMoreItems()) != TriStateResult.NoMoreItems) {

                    if (result == TriStateResult.MoreItems) {

                        ArcFileItem item = null;

                        while ((item = getNextItem()) == null) {
                            LOG.info("Waiting to Read Next Item...");
                            try {
                                Thread.sleep(1000);
                            } catch (InterruptedException e) {
                            }
                        }

                        LOG.info("GOT Item URL:" + item.getUri() + " OFFSET:" + item.getArcFilePos()
                                + " ContentSize:" + item.getContent().getCount());
                        for (ArcFileHeaderItem headerItem : item.getHeaderItems()) {
                            if (headerItem.isFieldDirty(ArcFileHeaderItem.Field_ITEMKEY)) {
                                //LOG.info("Header Item:" + headerItem.getItemKey() + " :" + headerItem.getItemValue());
                            } else {
                                //LOG.info("Header Item:" + headerItem.getItemValue());
                            }
                        }
                        //LOG.info("Content Length:" + item.getContent().getCount());
                    } else {
                        // LOG.info("Has More Items Returned Need More Data. Sleeping");
                        try {
                            Thread.sleep(1000);
                        } catch (InterruptedException e) {
                        }
                    }
                }
                LOG.info("NO MORE ITEMS... BYE");
            } catch (IOException e) {
                LOG.error(StringUtils.stringifyException(e));
            }
        }

    });

    // run the thread ... 
    thread.start();

    ReadableByteChannel channel = Channels.newChannel(new FileInputStream(arcFileItem));

    try {

        for (;;) {

            ByteBuffer buffer = ByteBuffer.allocate(BLOCK_SIZE);

            int bytesRead = channel.read(buffer);
            LOG.info("Read " + bytesRead + " From File");

            if (bytesRead == -1) {
                finished();
                break;
            } else {
                buffer.flip();
                available(buffer);
            }
        }
    } finally {
        channel.close();
    }

    // now wait for thread to die ...
    LOG.info("Done Reading File.... Waiting for ArcFileThread to DIE");
    thread.join();
    LOG.info("Done Reading File.... ArcFileThread to DIED");
}

From source file:org.jnetstream.capture.Captures.java

/**
 * @param <T>//from  ww w  .  j a va  2s.c  om
 * @param t
 * @param in
 * @return
 * @throws IOException
 * @see org.jnetstream.capture.Captures.LocalFactory#newInput(java.lang.Class,
 *      java.io.InputStream)
 */
public static <T extends InputCapture<? extends FilePacket>> T newInput(final Class<T> t, final InputStream in)
        throws IOException {
    return Captures.getLocal().newInput(t, Channels.newChannel(in));
}

From source file:edu.harvard.iq.dvn.core.web.servlet.FileDownloadServlet.java

public void deliverContent(StudyFile file, FileDownloadObject fileDownload, HttpServletResponse res) {
    OutputStream out = null;/*from w w w . j a  va  2s  .c o m*/

    try {
        out = res.getOutputStream();
    } catch (IOException ex) {
        // TODO: try to generate error response.
        return;
    }
    InputStream in = fileDownload.getInputStream();

    if (in == null) {
        // TODO: generate error response.
        fileDownload.releaseConnection();
        return;
    }

    // If we are streaming a TAB-delimited file, we will need to add the
    // variable header line:

    String varHeaderLine = null;

    if (!fileDownload.noVarHeader()) {
        varHeaderLine = fileDownload.getVarHeader();
    }

    for (int i = 0; i < fileDownload.getResponseHeaders().length; i++) {
        String headerName = fileDownload.getResponseHeaders()[i].getName();
        // The goal is to (re)use all the Content-* headers.
        // (if this is a remote file, we may be recycling the headers
        // we have received from the remote repository):

        if (headerName.startsWith("Content")) {

            // Special treatment case for remote
            // HTML pages:
            // if it looks like HTML, we redirect to
            // that page, instead of trying to display it:
            // (this is for cases like the harvested HGL
            // documents which contain URLs pointing to
            // dynamic content pages, not to static files.

            if (headerName.equals("Content-Type") && file.isRemote()
                    && fileDownload.getResponseHeaders()[i].getValue() != null
                    && fileDownload.getResponseHeaders()[i].getValue().startsWith("text/html")) {

                createRedirectResponse(res, fileDownload.getRemoteUrl());

                fileDownload.releaseConnection();
                return;
            }

            String headerValue = fileDownload.getResponseHeaders()[i].getValue();

            if (fileDownload.isZippedStream()) {
                headerValue = headerValue.replace(".zip", "");
            }

            res.setHeader(headerName, headerValue);
        }
    }

    // TODO: should probably do explicit res.setContent (), if mimetype
    // is available. 

    // and now send the incoming HTTP stream as the response body       

    if (fileDownload.isFile()) {
        // for files that we are reading off disk (as opposed to remote
        // streams we are reading through network sockets) it is more
        // efficient to use NIO channels.
        FileInputStream fis = (FileInputStream) in;
        FileChannel inChannel = fis.getChannel();

        WritableByteChannel outChannel = Channels.newChannel(out);

        streamData(inChannel, outChannel, varHeaderLine);

    } else {

        streamData(in, out, varHeaderLine);
    }

    fileDownload.releaseConnection();

}

From source file:org.jnetstream.capture.Captures.java

/**
 * @param <T>/*from   w  w  w  . j  a  v a2 s  .  c o m*/
 * @param t
 * @param in
 * @param filter
 * @return
 * @throws IOException
 * @see org.jnetstream.capture.Captures.LocalFactory#newInput(java.lang.Class,
 *      java.io.InputStream, Filter)
 */
public static <T extends InputCapture<? extends FilePacket>> T newInput(final Class<T> t, final InputStream in,
        final Filter<ProtocolFilterTarget> filter) throws IOException {
    return Captures.getLocal().newInput(t, Channels.newChannel(in), filter);
}

From source file:org.jnetstream.capture.Captures.java

/**
 * @param in/*from  ww w.ja  v a 2s  . c o m*/
 * @return
 * @throws IOException
 * @see org.jnetstream.capture.Captures.LocalFactory#newInput(java.io.InputStream)
 */
public static InputCapture<? extends CapturePacket> newInput(final InputStream in) throws IOException {
    return Captures.getLocal().newInput(Channels.newChannel(in));
}

From source file:org.jnetstream.capture.Captures.java

/**
 * @param in/*from  w ww .  j  a v a2 s.c  o  m*/
 * @param filter
 * @return
 * @throws IOException
 * @see org.jnetstream.capture.Captures.LocalFactory#newInput(java.io.InputStream,
 *      Filter)
 */
public static InputCapture<? extends CapturePacket> newInput(final InputStream in,
        final Filter<ProtocolFilterTarget> filter) throws IOException {
    return Captures.getLocal().newInput(Channels.newChannel(in), filter);
}

From source file:org.jnetstream.capture.Captures.java

/**
 * @param <T>/*  w  w w.j av  a2 s.c  o  m*/
 * @param t
 * @param out
 * @return
 * @throws IOException
 * @see org.jnetstream.capture.Captures.LocalFactory#newOutput(java.lang.Class,
 *      java.io.OutputStream)
 */
public static <T extends OutputCapture> T newOutput(final Class<T> t, final OutputStream out)
        throws IOException {
    return Captures.getLocal().newOutput(t, Channels.newChannel(out));
}

From source file:com.linkedin.databus.core.TestDbusEventBufferMult.java

@Test
public void testSubscriptionStream() throws Exception {
    final Logger log = Logger.getLogger("TestDbusEventBufferMult.testSubscriptionStream");
    log.info("start");

    TestSetup t = new TestSetup();

    PhysicalPartition pp100 = new PhysicalPartition(100, "multBufferTest1");
    PhysicalPartitionKey pk1 = new PhysicalPartitionKey(pp100);
    PhysicalPartition pp101 = new PhysicalPartition(101, "multBufferTest2");
    PhysicalPartitionKey pk2 = new PhysicalPartitionKey(pp101);

    //generate events in pp100
    byte[] schema = "abcdefghijklmnop".getBytes(Charset.defaultCharset());
    DbusEventBufferAppendable buf100 = t._eventBuffer.getDbusEventBufferAppendable(pp100);
    buf100.startEvents();/*  w ww .j av a 2s .c o  m*/
    assertTrue(buf100.appendEvent(new DbusEventKey(1), (short) 100, (short) 0,
            System.currentTimeMillis() * 1000000, (short) 1, schema, new byte[100], false, null));
    assertTrue(buf100.appendEvent(new DbusEventKey(10), (short) 100, (short) 0,
            System.currentTimeMillis() * 1000000, (short) 1, schema, new byte[100], false, null));
    assertTrue(buf100.appendEvent(new DbusEventKey(11), (short) 100, (short) 0,
            System.currentTimeMillis() * 1000000, (short) 1, schema, new byte[100], false, null));
    assertTrue(buf100.appendEvent(new DbusEventKey(2), (short) 100, (short) 0,
            System.currentTimeMillis() * 1000000, (short) 2, schema, new byte[100], false, null));
    buf100.endEvents(100, null);
    buf100.startEvents();
    assertTrue(buf100.appendEvent(new DbusEventKey(3), (short) 100, (short) 0,
            System.currentTimeMillis() * 1000000, (short) 2, schema, new byte[100], false, null));
    assertTrue(buf100.appendEvent(new DbusEventKey(4), (short) 100, (short) 1,
            System.currentTimeMillis() * 1000000, (short) 2, schema, new byte[100], false, null));
    buf100.endEvents(200, null);

    //generate events in pp100
    DbusEventBufferAppendable buf101 = t._eventBuffer.getDbusEventBufferAppendable(pp101);
    buf101.startEvents();
    assertTrue(buf101.appendEvent(new DbusEventKey(51), (short) 101, (short) 0,
            System.currentTimeMillis() * 1000000, (short) 11, schema, new byte[100], false, null));
    assertTrue(buf101.appendEvent(new DbusEventKey(52), (short) 101, (short) 0,
            System.currentTimeMillis() * 1000000, (short) 12, schema, new byte[100], false, null));
    assertTrue(buf101.appendEvent(new DbusEventKey(53), (short) 101, (short) 2,
            System.currentTimeMillis() * 1000000, (short) 2, schema, new byte[100], false, null));
    buf101.endEvents(120, null);
    buf101.startEvents();
    assertTrue(buf101.appendEvent(new DbusEventKey(54), (short) 101, (short) 2,
            System.currentTimeMillis() * 1000000, (short) 2, schema, new byte[100], false, null));
    assertTrue(buf101.appendEvent(new DbusEventKey(55), (short) 101, (short) 2,
            System.currentTimeMillis() * 1000000, (short) 2, schema, new byte[100], false, null));
    assertTrue(buf101.appendEvent(new DbusEventKey(56), (short) 101, (short) 2,
            System.currentTimeMillis() * 1000000, (short) 2, schema, new byte[100], false, null));
    buf101.endEvents(200, null);

    //initialization
    DatabusSubscription sub1 = DatabusSubscription
            .createPhysicalPartitionReplicationSubscription(new PhysicalPartition(100, "multBufferTest1"));

    DbusFilter filter1 = t._eventBuffer.constructFilters(Arrays.asList(sub1));
    assertNotNull(filter1);

    CheckpointMult cpMult1 = new CheckpointMult();
    Checkpoint cp100 = new Checkpoint();
    cp100.init();
    cp100.setConsumptionMode(DbusClientMode.ONLINE_CONSUMPTION);
    cp100.setWindowScn(10L);
    cp100.setWindowOffset(-1);
    cpMult1.addCheckpoint(pp100, cp100);

    String[] pnames = { "multBufferTest1:100", "multBufferTest2:101" };
    StatsCollectors<DbusEventsStatisticsCollector> statsColls1 = createStats(pnames);
    DbusEventsStatisticsCollector statsCol1 = statsColls1.getStatsCollector("multBufferTest1:100");
    DbusEventsStatisticsCollector statsCol2 = statsColls1.getStatsCollector("multBufferTest2:101");

    //read an entire buffer
    DbusEventBufferBatchReadable reader1 = t._eventBuffer.getDbusEventBufferBatchReadable(cpMult1,
            Arrays.asList(pk1), statsColls1);

    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    // Try a call with 20 bytes of fetch size, we should see the event size in the first return with 0 events read.
    StreamEventsResult result = reader1.streamEvents(false, 20, Channels.newChannel(baos), Encoding.BINARY,
            filter1);
    assertEquals(0, result.getNumEventsStreamed());
    assertEquals(161, result.getSizeOfPendingEvent());

    result = reader1.streamEvents(false, 1000000, Channels.newChannel(baos), Encoding.BINARY, filter1);
    int eventsRead = result.getNumEventsStreamed();
    assertEquals(eventsRead, 8); //4 events + 1 eop + 2 events + 1 eop
    assertEquals(statsColls1.getStatsCollector("multBufferTest1:100").getTotalStats().getNumSysEvents(), 2);
    assertEquals(statsColls1.getStatsCollector("multBufferTest1:100").getTotalStats().getNumDataEvents(), 6);
    assertEquals(result.getSizeOfPendingEvent(), 0, "Size of pending event not zero");

    // Now that we have read all the events, we should not see a pending event even if we offer a small fetch size.

    result = reader1.streamEvents(false, 20, Channels.newChannel(baos), Encoding.BINARY, filter1);

    assertEquals(0, result.getNumEventsStreamed(), "There should be no more events in the buffer now");
    assertEquals(0, result.getSizeOfPendingEvent(),
            "We should not see pending event size since there are no events in buffer");
    baos.reset();
    statsCol1.reset();
    statsCol2.reset();

    //read from two buffers, filtering out one
    cpMult1 = new CheckpointMult();
    cp100.init();
    cp100.setConsumptionMode(DbusClientMode.ONLINE_CONSUMPTION);
    cp100.setWindowScn(10L);
    cp100.setWindowOffset(-1);
    cpMult1.addCheckpoint(pp100, cp100);
    reader1 = t._eventBuffer.getDbusEventBufferBatchReadable(cpMult1, Arrays.asList(pk1, pk2), statsColls1);

    eventsRead = reader1.streamEvents(false, 1000000, Channels.newChannel(baos), Encoding.BINARY, filter1)
            .getNumEventsStreamed();
    assertEquals(eventsRead, 10); //4 events + 1 eop + 1 eop from the other buffer + 2 events +
                                  //1 eop + 1 eop from the other buffer
    assertEquals(statsColls1.getStatsCollector("multBufferTest1:100").getTotalStats().getNumSysEvents(), 2);
    assertEquals(statsColls1.getStatsCollector("multBufferTest1:100").getTotalStats().getNumDataEvents(), 6);

    baos.reset();
    statsCol1.reset();

    //read from one buffer and one source partition
    DatabusSubscription sub2 = new DatabusSubscription(PhysicalSource.MASTER_PHISYCAL_SOURCE,
            new PhysicalPartition(101, "multBufferTest2"),
            new LogicalSourceId(new LogicalSource(2, "srcName2"), (short) 2));

    DbusFilter filter2 = t._eventBuffer.constructFilters(Arrays.asList(sub2));
    assertNotNull(filter2);

    CheckpointMult cpMult2 = new CheckpointMult();
    Checkpoint cp101 = new Checkpoint();
    cp101.init();
    cp101.setConsumptionMode(DbusClientMode.ONLINE_CONSUMPTION);
    cp101.setWindowScn(10L);
    cp101.setWindowOffset(-1);
    cpMult2.addCheckpoint(pp101, cp101);
    DbusEventBufferBatchReadable reader2 = t._eventBuffer.getDbusEventBufferBatchReadable(cpMult2,
            Arrays.asList(pk2), statsColls1);

    eventsRead = reader2.streamEvents(false, 1000000, Channels.newChannel(baos), Encoding.BINARY, filter2)
            .getNumEventsStreamed();
    assertEquals(eventsRead, 6); //1 events + 1 eop + 3events + 1 eop

    baos.reset();
    statsCol1.reset();
    statsCol2.reset();

    //read all partitions for a source
    DatabusSubscription sub3 = new DatabusSubscription(PhysicalSource.MASTER_PHISYCAL_SOURCE,
            PhysicalPartition.ANY_PHYSICAL_PARTITION,
            LogicalSourceId.createAllPartitionsWildcard(new LogicalSource(2, "srcName2")));

    DbusFilter filter3 = t._eventBuffer.constructFilters(Arrays.asList(sub3));
    assertNotNull(filter3);

    CheckpointMult cpMult3 = new CheckpointMult();
    cp100.init();
    cp100.setConsumptionMode(DbusClientMode.ONLINE_CONSUMPTION);
    cp100.setWindowScn(10L);
    cp100.setWindowOffset(-1);
    cpMult1.addCheckpoint(pp100, cp100);
    cp101.init();
    cp101.setConsumptionMode(DbusClientMode.ONLINE_CONSUMPTION);
    cp101.setWindowScn(10L);
    cp101.setWindowOffset(-1);
    cpMult2.addCheckpoint(pp101, cp101);
    DbusEventBufferBatchReadable reader3 = t._eventBuffer.getDbusEventBufferBatchReadable(cpMult3,
            Arrays.asList(pk1, pk2), statsColls1);
    eventsRead = reader3.streamEvents(false, 1000000, Channels.newChannel(baos), Encoding.BINARY, filter3)
            .getNumEventsStreamed();
    assertEquals(eventsRead, 11); //1 events + 1 eop + 1 events + 1 eop + 2 events + 1 eop + 3 events + 1 eop
    assertEquals(statsColls1.getStatsCollector("multBufferTest1:100").getTotalStats().getNumSysEvents(), 2);
    assertEquals(statsColls1.getStatsCollector("multBufferTest2:101").getTotalStats().getNumSysEvents(), 2);
    assertEquals(
            statsColls1.getStatsCollector("multBufferTest1:100").getTotalStats().getNumDataEventsFiltered(), 3);
    assertEquals(
            statsColls1.getStatsCollector("multBufferTest2:101").getTotalStats().getNumDataEventsFiltered(), 4);

    baos.reset();
    statsCol1.reset();
    statsCol2.reset();

    log.info("end");
}

From source file:com.pari.ic.ICManager.java

private String retriveICPackageFile(ICStorageServerSettings settings, ICPackage pkg, PollJobDetails details)
        throws Exception {
    String log = null;//from  w w w  .j  a v  a2s  .co m
    InputStream inStream = null;
    HttpClient httpclient = null;

    Customer customer = CustomerManager.getInstance().getCustomerById(pkg.getCustomerId());
    String customerName = customer.getCustomerName();

    if (settings.getConnectivityType() == ConnectivityTypeEnum.CONNECTIVITY) {
        // DefaultHttpClient httpclient = null;
        // For Standalone NCCM, send the request via Connectivity
        String tegHost = settings.getTegHost();
        if (tegHost == null) {
            log = "TEG URL is not configured.... ";
            logMsg(details, log, JobStageConstants.PollingStages.RUNNING, Priority.INFO_INT, null);
            return null;
        }

        String tegUrl = "http://" + tegHost + "/NccmCollector/ICDownloadServlet?";

        log = "URL to TEG : " + tegUrl;
        logMsg(details, log, JobStageConstants.PollingStages.RUNNING, Priority.INFO_INT, null);

        // Sample URL -
        // http://172.21.136.202:8090/NccmCollector/ICDownloadServlet?GET_IC_PACK=TRUE&PACK_ID=1234

        tegUrl = tegUrl + "GET_IC_PACK=TRUE";
        tegUrl = tegUrl + "&";
        tegUrl = tegUrl + "PACK_ID" + "=" + pkg.getPackageId();

        String request = new URL(getFullUrl(settings.getServerHost()) + "/NetworkManagerWS/getFile/forPackage/"
                + pkg.getPackageId() + "/" + pkg.getPackageVersion() + "/" + customerName + "/"
                + pkg.getInstance_name()).toString();

        request = request + "&&&";
        request = request + settings.getUserId();
        request = request + "&&&";
        request = request + settings.getPassword();

        log = "Request URL package download : " + request;
        logMsg(details, log, JobStageConstants.PollingStages.RUNNING, Priority.INFO_INT, null);

        try {
            httpclient = new DefaultHttpClient();
            httpclient.getParams().setParameter(CoreProtocolPNames.PROTOCOL_VERSION, HttpVersion.HTTP_1_1);
            tegUrl = tegUrl.replaceAll(" ", "%20");
            log = "Posting request to url: " + tegUrl;
            logMsg(details, log, JobStageConstants.PollingStages.RUNNING, Priority.INFO_INT, null);
            HttpPost httppost = new HttpPost(tegUrl);
            httppost.setEntity(new StringEntity(request, null, null));
            httppost.setHeader("Content-type", "text/xml");

            HttpResponse response = httpclient.execute(httppost);
            log = "Response from HTTP Client in retriveICPackageFile : " + response.toString();
            logMsg(details, log, JobStageConstants.PollingStages.RUNNING, Priority.INFO_INT, null);

            inStream = response.getEntity().getContent();
        } catch (Exception e) {
            log = "Error while posting request to TEG... ";
            logMsg(details, log, JobStageConstants.PollingStages.RUNNING, Priority.ERROR_INT, e);
        }
    } else {
        try {
            httpclient = new DefaultHttpClient();
            this.pasHttpRequestHandler.getSecuredHttpClient(httpclient);
            HttpGet request = new HttpGet(getFullUrl(settings.getServerHost())
                    + "/NetworkManagerWS/getFile/forPackage/" + pkg.getPackageId() + "/"
                    + pkg.getPackageVersion() + "/" + customerName + "/" + pkg.getInstance_name());
            ((AbstractHttpClient) httpclient).getCredentialsProvider().setCredentials(AuthScope.ANY,
                    new UsernamePasswordCredentials(settings.getUserId(), settings.getPassword()));
            HttpResponse response = httpclient.execute(request);
            inStream = response.getEntity().getContent();
        } catch (Exception e) {
            log = "Error while posting request to retrieve package... ";
            logMsg(details, log, JobStageConstants.PollingStages.RUNNING, Priority.ERROR_INT, e);
        }
    }

    try {
        if (inStream != null) {
            ReadableByteChannel rbc = Channels.newChannel(inStream);
            String filePath = ICF_UPLOAD_FOLDER + File.separatorChar + pkg.getPackageId();

            File file = new File(filePath);
            if (!file.getParentFile().exists()) {
                // ensure parent folder exists
                file.getParentFile().mkdir();
            }

            if (file.exists()) {
                file.delete();
            }
            FileOutputStream fos = null;
            try {
                fos = new FileOutputStream(file);
                fos.getChannel().transferFrom(rbc, 0, 1 << 24);
            } finally {
                try {
                    if (fos != null) {
                        fos.close();
                    }
                } catch (Exception ignore) {
                    log = "Error while closing FileOutputStream : ";
                    logMsg(details, log, JobStageConstants.PollingStages.RUNNING, Priority.ERROR_INT, ignore);
                }

                try {
                    inStream.close();
                } catch (Exception ignore) {
                    log = "Error while closing inStream : ";
                    logMsg(details, log, JobStageConstants.PollingStages.RUNNING, Priority.ERROR_INT, ignore);
                }
            }
            return filePath;
        }
    } finally {
        if (httpclient != null) {
            httpclient.getConnectionManager().shutdown();
        }
    }
    return null;
}

From source file:com.healthmarketscience.jackcess.Database.java

/**
 * Copies the given InputStream to the given channel using the most
 * efficient means possible./* w ww  . j  ava2 s  .  c  o m*/
 */
private static void transferFrom(FileChannel channel, InputStream in) throws IOException {
    ReadableByteChannel readChannel = Channels.newChannel(in);
    if (!BROKEN_NIO) {
        // sane implementation
        channel.transferFrom(readChannel, 0, MAX_EMPTYDB_SIZE);
    } else {
        // do things the hard way for broken vms
        ByteBuffer bb = ByteBuffer.allocate(8096);
        while (readChannel.read(bb) >= 0) {
            bb.flip();
            channel.write(bb);
            bb.clear();
        }
    }
}