Example usage for org.apache.hadoop.io IOUtils cleanup

List of usage examples for org.apache.hadoop.io IOUtils cleanup

Introduction

In this page you can find the example usage for org.apache.hadoop.io IOUtils cleanup.

Prototype

@Deprecated
public static void cleanup(Log log, java.io.Closeable... closeables) 

Source Link

Document

Close the Closeable objects and ignore any Throwable or null pointers.

Usage

From source file:org.apache.falcon.replication.FilteredCopyListingTest.java

License:Apache License

private static void rmdirs(String path) throws Exception {
    FileSystem fileSystem = null;
    try {/*from   ww w  .  j a  va 2 s.  co  m*/
        fileSystem = FileSystem.getLocal(new Configuration());
        fileSystem.delete(new Path(path), true);
    } finally {
        IOUtils.cleanup(null, fileSystem);
    }
}

From source file:org.apache.giraffa.TestLeaseManagement.java

License:Apache License

/**
 * This test shows that if a Region is to "migrate", either by split
 * or by RegionServer shutdown, that an incomplete file with a lease migrates
 * with the Region and that the lease is reloaded upon open and stays valid.
 *///  w ww  .  j a  va 2  s.c  om
@Test
public void testLeaseMigration() throws Exception {
    String src = "/testLeaseFailure";
    Path path = new Path(src);
    FSDataOutputStream outputStream = grfs.create(path);
    MiniHBaseCluster cluster = UTIL.getHBaseCluster();
    try {
        // keep stream open intentionally
        HRegionServer newServer = cluster.startRegionServer().getRegionServer();
        newServer.waitForServerOnline();
        HRegionServer dyingServer = cluster.getRegionServer(0);
        cluster.stopRegionServer(dyingServer.getServerName());
        cluster.waitForRegionServerToStop(dyingServer.getServerName(), 10000L);

        INodeFile iNode = null;
        do {
            try {
                IOUtils.cleanup(LOG, connection);
                connection = ConnectionFactory.createConnection(conf);
                IOUtils.cleanup(LOG, nodeManager);
                nodeManager = GiraffaTestUtils.getNodeManager(conf, connection);
                iNode = INodeFile.valueOf(nodeManager.getINode(src));
            } catch (ConnectException ignored) {
            }
        } while (iNode == null);

        FileLease rowLease = iNode.getLease();
        LeaseManager leaseManager = LeaseManager
                .originateSharedLeaseManager(newServer.getRpcServer().getListenerAddress().toString());
        Collection<FileLease> leases = leaseManager.getLeases(rowLease.getHolder());
        assertThat(leases.size(), is(1));
        FileLease leaseManagerLease = leases.iterator().next();
        // The following asserts are here to highlight that as a result of
        // migrating the FileLease across RegionServers we lose expiration date
        // consistency between the row field and the LeaseManager.
        assertThat(rowLease, is(not(equalTo(leaseManagerLease))));
        assertThat(rowLease.getHolder(), is(equalTo(leaseManagerLease.getHolder())));
        assertThat(rowLease.getPath(), is(equalTo(leaseManagerLease.getPath())));
        assertThat(rowLease.getLastUpdate(), is(not(equalTo(leaseManagerLease.getLastUpdate()))));
        // Renewing the lease restores the consistency.
        grfs.grfaClient.getNamespaceService().renewLease(grfs.grfaClient.getClientName());
        iNode = INodeFile.valueOf(nodeManager.getINode(src));
        rowLease = iNode.getLease();
        leases = leaseManager.getLeases(rowLease.getHolder());
        assertThat(leases.size(), is(1));
        leaseManagerLease = leases.iterator().next();
        assertThat(rowLease, is(equalTo(leaseManagerLease)));
    } finally {
        IOUtils.cleanup(LOG, outputStream);
    }
    INodeFile iNode = INodeFile.valueOf(nodeManager.getINode(src));
    assertThat(iNode.getFileState(), is(FileState.CLOSED));
    FileLease lease = iNode.getLease();
    assertThat(lease, is(nullValue()));
}

From source file:org.apache.hive.service.cli.operation.HiveCommandOperation.java

License:Apache License

private void tearDownSessionIO() {
    IOUtils.cleanup(LOG, parentSession.getSessionState().out);
    IOUtils.cleanup(LOG, parentSession.getSessionState().err);
}

From source file:org.apache.hive.service.cli.operation.HiveCommandOperation.java

License:Apache License

private void resetResultReader() {
    if (resultReader != null) {
        IOUtils.cleanup(LOG, resultReader);
        resultReader = null;
    }
}

From source file:org.apache.james.mailbox.hbase.HBaseMailboxSessionMapperFactory.java

License:Apache License

/**
 * Creates  the necessary tables in HBase if they do not exist.
 *
 * @param conf Configuration for the cluster
 * @param uidProvider UID provider for mailbox uid.
 * @param modSeqProvider/*ww  w.j  a v a  2 s  .c om*/
 * @throws MasterNotRunningException
 * @throws ZooKeeperConnectionException
 * @throws IOException
 */
public HBaseMailboxSessionMapperFactory(Configuration conf, UidProvider uidProvider,
        ModSeqProvider modSeqProvider, MessageId.Factory messageIdFactory) {
    this.conf = conf;
    this.uidProvider = uidProvider;
    this.modSeqProvider = modSeqProvider;
    this.messageIdFactory = messageIdFactory;

    //TODO: add better exception handling for this
    HBaseAdmin hbaseAdmin = null;
    try {
        hbaseAdmin = new HBaseAdmin(conf);
        HTableDescriptor desc = null;
        HColumnDescriptor hColumnDescriptor = null;

        /* create the tables if it does not exist */

        if (!hbaseAdmin.tableExists(MAILBOXES_TABLE)) {
            desc = new HTableDescriptor(MAILBOXES_TABLE);
            hColumnDescriptor = new HColumnDescriptor(MAILBOX_CF);
            hColumnDescriptor.setMaxVersions(1);
            desc.addFamily(hColumnDescriptor);
            hbaseAdmin.createTable(desc);
        }

        if (!hbaseAdmin.tableExists(MESSAGES_TABLE)) {
            /**TODO: try to reduce the number of column families as suggested by:
             * http://hbase.apache.org/book.html#number.of.cfs
             * Down to three column families, striking for just two.
             */
            desc = new HTableDescriptor(MESSAGES_TABLE);
            hColumnDescriptor = new HColumnDescriptor(MESSAGES_META_CF);
            hColumnDescriptor.setMaxVersions(1);
            desc.addFamily(hColumnDescriptor);
            hColumnDescriptor = new HColumnDescriptor(MESSAGE_DATA_HEADERS_CF);
            hColumnDescriptor.setMaxVersions(1);
            desc.addFamily(hColumnDescriptor);
            hColumnDescriptor = new HColumnDescriptor(MESSAGE_DATA_BODY_CF);
            hColumnDescriptor.setMaxVersions(1);
            desc.addFamily(hColumnDescriptor);
            hbaseAdmin.createTable(desc);
        }

        if (!hbaseAdmin.tableExists(SUBSCRIPTIONS_TABLE)) {
            desc = new HTableDescriptor(SUBSCRIPTIONS_TABLE);
            hColumnDescriptor = new HColumnDescriptor(SUBSCRIPTION_CF);
            hColumnDescriptor.setMaxVersions(1);
            desc.addFamily(hColumnDescriptor);
            hbaseAdmin.createTable(desc);
        }

    } catch (Exception e) {
        throw new RuntimeException(e);
    } finally {
        IOUtils.cleanup(null, hbaseAdmin);
    }
}

From source file:org.apache.james.mailbox.hbase.mail.HBaseMailboxMapper.java

License:Apache License

public void deleteAllMemberships() {
    HTable messages = null;//from ww  w  .j  ava  2s  .  c  o m
    HTable mailboxes = null;
    ResultScanner scanner = null;
    try {
        messages = new HTable(conf, MESSAGES_TABLE);
        mailboxes = new HTable(conf, MAILBOXES_TABLE);
        Scan scan = new Scan();
        scan.setMaxVersions(1);
        scan.addColumn(MESSAGES_META_CF, MESSAGE_INTERNALDATE);
        scanner = messages.getScanner(scan);
        Result result;
        List<Delete> deletes = new ArrayList<Delete>();
        while ((result = scanner.next()) != null) {
            deletes.add(new Delete(result.getRow()));
        }
        long totalDeletes = deletes.size();
        messages.delete(deletes);
        if (deletes.size() > 0) {
            //TODO: what shoul we do if not all messages are deleted?
            System.out.println(
                    "Just " + deletes.size() + " out of " + totalDeletes + " messages have been deleted");
            //throw new RuntimeException("Just " + deletes.size() + " out of " + totalDeletes + " messages have been deleted");
        }
        List<Put> puts = new ArrayList<Put>();
        scan = new Scan();
        scan.setMaxVersions(1);
        scan.addColumn(MAILBOX_CF, MAILBOX_MESSAGE_COUNT);
        IOUtils.cleanup(null, scanner);
        scanner = mailboxes.getScanner(scan);
        Put put = null;
        while ((result = scanner.next()) != null) {
            put = new Put(result.getRow());
            put.add(MAILBOX_CF, MAILBOX_MESSAGE_COUNT, Bytes.toBytes(0L));
            puts.add(new Put());
        }
    } catch (IOException e) {
        throw new RuntimeException("Error deleting MESSAGES table ", e);
    } finally {
        IOUtils.cleanup(null, scanner, messages, mailboxes);
    }
}

From source file:org.apache.tajo.engine.planner.physical.RangeShuffleFileWriteExec.java

License:Apache License

public void close() throws IOException {
    super.close();

    appender.flush();/*from   ww w . j a  v a 2s  .  com*/
    IOUtils.cleanup(LOG, appender);
    indexWriter.flush();
    IOUtils.cleanup(LOG, indexWriter);

    // Collect statistics data
    context.setResultStats(appender.getStats());
    context.addShuffleFileOutput(0, context.getTaskId().toString());
    appender = null;
    indexWriter = null;
}

From source file:org.apache.tajo.engine.planner.physical.SeqScanExec.java

License:Apache License

@Override
public void close() throws IOException {
    IOUtils.cleanup(null, scanner);
    if (scanner != null) {
        try {//from w w w.  j  a v a2  s .  c o m
            TableStats stat = scanner.getInputStats();
            if (stat != null) {
                inputStats = (TableStats) (stat.clone());
            }
        } catch (CloneNotSupportedException e) {
            e.printStackTrace();
        }
    }
    scanner = null;
    plan = null;
    qual = null;
    projector = null;
}

From source file:org.apache.tajo.engine.planner.physical.StoreIndexExec.java

License:Apache License

@Override
public void close() throws IOException {
    super.close();

    indexWriter.flush();//from  ww w.j a  v  a2 s  . co m
    IOUtils.cleanup(LOG, indexWriter);

    indexWriter = null;
}

From source file:org.apache.tajo.master.TajoMaster.java

License:Apache License

@Override
public void stop() {
    if (haService != null) {
        try {/* w  w w  .  j av  a 2 s .co  m*/
            haService.delete();
        } catch (Exception e) {
            LOG.error(e, e);
        }
    }

    if (webServer != null) {
        try {
            webServer.stop();
        } catch (Exception e) {
            LOG.error(e, e);
        }
    }

    IOUtils.cleanup(LOG, catalogServer);

    if (systemMetrics != null) {
        systemMetrics.stop();
    }

    if (pauseMonitor != null)
        pauseMonitor.stop();
    super.stop();

    LOG.info("Tajo Master main thread exiting");
}