Example usage for com.google.common.net HostAndPort toString

List of usage examples for com.google.common.net HostAndPort toString

Introduction

In this page you can find the example usage for com.google.common.net HostAndPort toString.

Prototype

@Override
public String toString() 

Source Link

Document

Rebuild the host:port string, including brackets if necessary.

Usage

From source file:com.streamsets.pipeline.kafka.impl.KafkaLowLevelConsumer09.java

private PartitionMetadata getPartitionMetadata(List<HostAndPort> brokers, String topic, int partition) {
    PartitionMetadata returnMetaData = null;
    for (HostAndPort broker : brokers) {
        SimpleConsumer simpleConsumer = null;
        try {/*from   w  w  w  .j a va 2 s  .  com*/
            LOG.info(
                    "Creating SimpleConsumer using the following configuration: host {}, port {}, max wait time {}, max "
                            + "fetch size {}, client columnName {}",
                    broker.getHostText(), broker.getPort(), METADATA_READER_TIME_OUT, BUFFER_SIZE,
                    METADATA_READER_CLIENT);
            simpleConsumer = new SimpleConsumer(broker.getHostText(), broker.getPort(),
                    METADATA_READER_TIME_OUT, BUFFER_SIZE, METADATA_READER_CLIENT);

            List<String> topics = Collections.singletonList(topic);
            TopicMetadataRequest req = new TopicMetadataRequest(topics);
            kafka.javaapi.TopicMetadataResponse resp = simpleConsumer.send(req);

            List<TopicMetadata> metaData = resp.topicsMetadata();
            for (TopicMetadata item : metaData) {
                for (PartitionMetadata part : item.partitionsMetadata()) {
                    if (part.partitionId() == partition) {
                        returnMetaData = part;
                        break;
                    }
                }
            }
        } catch (Exception e) {
            LOG.error(KafkaErrors.KAFKA_25.getMessage(), broker.toString(), topic, partition, e.toString(), e);
        } finally {
            if (simpleConsumer != null) {
                simpleConsumer.close();
            }
        }
    }
    if (returnMetaData != null) {
        replicaBrokers.clear();
        for (kafka.cluster.BrokerEndPoint replica : returnMetaData.replicas()) {
            replicaBrokers.add(HostAndPort.fromParts(replica.host(), replica.port()));
        }
    }
    return returnMetaData;
}

From source file:com.streamsets.pipeline.kafka.impl.KafkaLowLevelConsumer08.java

private PartitionMetadata getPartitionMetadata(List<HostAndPort> brokers, String topic, int partition) {
    PartitionMetadata returnMetaData = null;
    for (HostAndPort broker : brokers) {
        SimpleConsumer simpleConsumer = null;
        try {/*ww w. ja v  a 2 s  .c  om*/
            LOG.info(
                    "Creating SimpleConsumer using the following configuration: host {}, port {}, max wait time {}, max "
                            + "fetch size {}, client columnName {}",
                    broker.getHostText(), broker.getPort(), METADATA_READER_TIME_OUT, BUFFER_SIZE,
                    METADATA_READER_CLIENT);
            simpleConsumer = new SimpleConsumer(broker.getHostText(), broker.getPort(),
                    METADATA_READER_TIME_OUT, BUFFER_SIZE, METADATA_READER_CLIENT);

            List<String> topics = Collections.singletonList(topic);
            TopicMetadataRequest req = new TopicMetadataRequest(topics);
            kafka.javaapi.TopicMetadataResponse resp = simpleConsumer.send(req);

            List<TopicMetadata> metaData = resp.topicsMetadata();
            for (TopicMetadata item : metaData) {
                for (PartitionMetadata part : item.partitionsMetadata()) {
                    if (part.partitionId() == partition) {
                        returnMetaData = part;
                        break;
                    }
                }
            }
        } catch (Exception e) {
            LOG.error(KafkaErrors.KAFKA_25.getMessage(), broker.toString(), topic, partition, e.toString(), e);
        } finally {
            if (simpleConsumer != null) {
                simpleConsumer.close();
            }
        }
    }
    if (returnMetaData != null) {
        replicaBrokers.clear();
        for (kafka.cluster.Broker replica : returnMetaData.replicas()) {
            replicaBrokers.add(HostAndPort.fromParts(replica.host(), replica.port()));
        }
    }
    return returnMetaData;
}

From source file:org.apache.accumulo.gc.SimpleGarbageCollector.java

private void getZooLock(HostAndPort addr) throws KeeperException, InterruptedException {
    String path = ZooUtil.getRoot(getInstance()) + Constants.ZGC_LOCK;

    LockWatcher lockWatcher = new LockWatcher() {
        @Override/*from   ww w .j a va  2s .  com*/
        public void lostLock(LockLossReason reason) {
            Halt.halt("GC lock in zookeeper lost (reason = " + reason + "), exiting!");
        }

        @Override
        public void unableToMonitorLockNode(final Throwable e) {
            // ACCUMULO-3651 Level changed to error and FATAL added to message for slf4j compatibility
            Halt.halt(-1, new Runnable() {

                @Override
                public void run() {
                    log.error("FATAL: No longer able to monitor lock node ", e);
                }
            });

        }
    };

    while (true) {
        lock = new ZooLock(path);
        if (lock.tryLock(lockWatcher,
                new ServerServices(addr.toString(), Service.GC_CLIENT).toString().getBytes())) {
            log.debug("Got GC ZooKeeper lock");
            return;
        }
        log.debug("Failed to get GC ZooKeeper lock, will retry");
        sleepUninterruptibly(1, TimeUnit.SECONDS);
    }
}

From source file:org.apache.accumulo.core.client.impl.ConditionalWriterImpl.java

/**
 * The purpose of this code is to ensure that a conditional mutation will not execute when its status is unknown. This allows a user to read the row when the
 * status is unknown and not have to worry about the tserver applying the mutation after the scan.
 *
 * <p>/*  ww w.  j  a  v a  2  s  .c o m*/
 * If a conditional mutation is taking a long time to process, then this method will wait for it to finish... unless this exceeds timeout.
 */
private void invalidateSession(SessionID sessionId, HostAndPort location)
        throws AccumuloException, AccumuloSecurityException, TableNotFoundException {

    long sleepTime = 50;

    long startTime = System.currentTimeMillis();

    Instance instance = context.getInstance();
    LockID lid = new LockID(ZooUtil.getRoot(instance) + Constants.ZTSERVERS, sessionId.lockId);

    ZooCacheFactory zcf = new ZooCacheFactory();
    while (true) {
        if (!ZooLock.isLockHeld(
                zcf.getZooCache(instance.getZooKeepers(), instance.getZooKeepersSessionTimeOut()), lid)) {
            // ACCUMULO-1152 added a tserver lock check to the tablet location cache, so this invalidation prevents future attempts to contact the
            // tserver even its gone zombie and is still running w/o a lock
            locator.invalidateCache(context.getInstance(), location.toString());
            return;
        }

        try {
            // if the mutation is currently processing, this method will block until its done or times out
            invalidateSession(sessionId.sessionID, location);

            return;
        } catch (TApplicationException tae) {
            throw new AccumuloServerException(location.toString(), tae);
        } catch (TException e) {
            locator.invalidateCache(context.getInstance(), location.toString());
        }

        if ((System.currentTimeMillis() - startTime) + sleepTime > timeout)
            throw new TimedOutException(Collections.singleton(location.toString()));

        sleepUninterruptibly(sleepTime, TimeUnit.MILLISECONDS);
        sleepTime = Math.min(2 * sleepTime, MAX_SLEEP);

    }

}

From source file:org.apache.accumulo.core.client.impl.ConditionalWriterImpl.java

private void sendToServer(HostAndPort location, TabletServerMutations<QCMutation> mutations) {
    TabletClientService.Iface client = null;

    TInfo tinfo = Tracer.traceInfo();//w  w  w . ja v a2 s  . c  o m

    Map<Long, CMK> cmidToCm = new HashMap<Long, CMK>();
    MutableLong cmid = new MutableLong(0);

    SessionID sessionId = null;

    try {
        Map<TKeyExtent, List<TConditionalMutation>> tmutations = new HashMap<TKeyExtent, List<TConditionalMutation>>();

        CompressedIterators compressedIters = new CompressedIterators();
        convertMutations(mutations, cmidToCm, cmid, tmutations, compressedIters);

        // getClient() call must come after converMutations in case it throws a TException
        client = getClient(location);

        List<TCMResult> tresults = null;
        while (tresults == null) {
            try {
                sessionId = reserveSessionID(location, client, tinfo);
                tresults = client.conditionalUpdate(tinfo, sessionId.sessionID, tmutations,
                        compressedIters.getSymbolTable());
            } catch (NoSuchScanIDException nssie) {
                sessionId = null;
                invalidateSessionID(location);
            }
        }

        HashSet<KeyExtent> extentsToInvalidate = new HashSet<KeyExtent>();

        ArrayList<QCMutation> ignored = new ArrayList<QCMutation>();

        for (TCMResult tcmResult : tresults) {
            if (tcmResult.status == TCMStatus.IGNORED) {
                CMK cmk = cmidToCm.get(tcmResult.cmid);
                ignored.add(cmk.cm);
                extentsToInvalidate.add(cmk.ke);
            } else {
                QCMutation qcm = cmidToCm.get(tcmResult.cmid).cm;
                qcm.queueResult(new Result(fromThrift(tcmResult.status), qcm, location.toString()));
            }
        }

        for (KeyExtent ke : extentsToInvalidate) {
            locator.invalidateCache(ke);
        }

        queueRetry(ignored, location);

    } catch (ThriftSecurityException tse) {
        AccumuloSecurityException ase = new AccumuloSecurityException(context.getCredentials().getPrincipal(),
                tse.getCode(), Tables.getPrintableTableInfoFromId(context.getInstance(), tableId), tse);
        queueException(location, cmidToCm, ase);
    } catch (TTransportException e) {
        locator.invalidateCache(context.getInstance(), location.toString());
        invalidateSession(location, mutations, cmidToCm, sessionId);
    } catch (TApplicationException tae) {
        queueException(location, cmidToCm, new AccumuloServerException(location.toString(), tae));
    } catch (TException e) {
        locator.invalidateCache(context.getInstance(), location.toString());
        invalidateSession(location, mutations, cmidToCm, sessionId);
    } catch (Exception e) {
        queueException(location, cmidToCm, e);
    } finally {
        if (sessionId != null)
            unreserveSessionID(location);
        ThriftUtil.returnClient((TServiceClient) client);
    }
}

From source file:org.apache.accumulo.core.client.impl.TableOperationsImpl.java

private void addSplits(String tableName, SortedSet<Text> partitionKeys, String tableId)
        throws AccumuloException, AccumuloSecurityException, TableNotFoundException, AccumuloServerException {
    TabletLocator tabLocator = TabletLocator.getLocator(context, tableId);

    for (Text split : partitionKeys) {
        boolean successful = false;
        int attempt = 0;
        long locationFailures = 0;

        while (!successful) {

            if (attempt > 0)
                sleepUninterruptibly(100, TimeUnit.MILLISECONDS);

            attempt++;//from  w ww.  j a va  2 s  . com

            TabletLocation tl = tabLocator.locateTablet(context, split, false, false);

            if (tl == null) {
                if (!Tables.exists(context.getInstance(), tableId))
                    throw new TableNotFoundException(tableId, tableName, null);
                else if (Tables.getTableState(context.getInstance(), tableId) == TableState.OFFLINE)
                    throw new TableOfflineException(context.getInstance(), tableId);
                continue;
            }

            HostAndPort address = HostAndPort.fromString(tl.tablet_location);

            try {
                TabletClientService.Client client = ThriftUtil.getTServerClient(address, context);
                try {

                    OpTimer timer = null;

                    if (log.isTraceEnabled()) {
                        log.trace("tid={} Splitting tablet {} on {} at {}", Thread.currentThread().getId(),
                                tl.tablet_extent, address, split);
                        timer = new OpTimer().start();
                    }

                    client.splitTablet(Tracer.traceInfo(), context.rpcCreds(), tl.tablet_extent.toThrift(),
                            TextUtil.getByteBuffer(split));

                    // just split it, might as well invalidate it in the cache
                    tabLocator.invalidateCache(tl.tablet_extent);

                    if (timer != null) {
                        timer.stop();
                        log.trace("Split tablet in {}",
                                String.format("%.3f secs", timer.scale(TimeUnit.SECONDS)));
                    }

                } finally {
                    ThriftUtil.returnClient(client);
                }

            } catch (TApplicationException tae) {
                throw new AccumuloServerException(address.toString(), tae);
            } catch (TTransportException e) {
                tabLocator.invalidateCache(context.getInstance(), tl.tablet_location);
                continue;
            } catch (ThriftSecurityException e) {
                Tables.clearCache(context.getInstance());
                if (!Tables.exists(context.getInstance(), tableId))
                    throw new TableNotFoundException(tableId, tableName, null);
                throw new AccumuloSecurityException(e.user, e.code, e);
            } catch (NotServingTabletException e) {
                // Do not silently spin when we repeatedly fail to get the location for a tablet
                locationFailures++;
                if (5 == locationFailures || 0 == locationFailures % 50) {
                    log.warn(
                            "Having difficulty locating hosting tabletserver for split {} on table {}. Seen {} failures.",
                            split, tableName, locationFailures);
                }

                tabLocator.invalidateCache(tl.tablet_extent);
                continue;
            } catch (TException e) {
                tabLocator.invalidateCache(context.getInstance(), tl.tablet_location);
                continue;
            }

            successful = true;
        }
    }
}

From source file:org.apache.accumulo.tserver.replication.AccumuloReplicaSystem.java

protected Status replicateLogs(ClientContext peerContext, final HostAndPort peerTserver,
        final ReplicationTarget target, final Path p, final Status status, final long sizeLimit,
        final String remoteTableId, final TCredentials tcreds, final ReplicaSystemHelper helper,
        final UserGroupInformation accumuloUgi)
        throws TTransportException, AccumuloException, AccumuloSecurityException {

    log.debug("Replication WAL to peer tserver");
    final Set<Integer> tids;
    final DataInputStream input;
    Span span = Trace.start("Read WAL header");
    span.data("file", p.toString());
    try {//  w ww. j ava  2 s .co m
        input = getWalStream(p);
    } catch (LogHeaderIncompleteException e) {
        log.warn(
                "Could not read header from {}, assuming that there is no data present in the WAL, therefore replication is complete",
                p);
        Status newStatus;
        // Bump up the begin to the (infinite) end, trying to be accurate
        if (status.getInfiniteEnd()) {
            newStatus = Status.newBuilder(status).setBegin(Long.MAX_VALUE).build();
        } else {
            newStatus = Status.newBuilder(status).setBegin(status.getEnd()).build();
        }
        span = Trace.start("Update replication table");
        try {
            helper.recordNewStatus(p, newStatus, target);
        } catch (TableNotFoundException tnfe) {
            log.error("Tried to update status in replication table for {} as {}, but the table did not exist",
                    p, ProtobufUtil.toString(newStatus), e);
            throw new RuntimeException("Replication table did not exist, will retry", e);
        } finally {
            span.stop();
        }
        return newStatus;
    } catch (IOException e) {
        log.error("Could not create stream for WAL", e);
        // No data sent (bytes nor records) and no progress made
        return status;
    } finally {
        span.stop();
    }

    log.debug("Skipping unwanted data in WAL");
    span = Trace.start("Consume WAL prefix");
    span.data("file", p.toString());
    try {
        // We want to read all records in the WAL up to the "begin" offset contained in the Status message,
        // building a Set of tids from DEFINE_TABLET events which correspond to table ids for future mutations
        tids = consumeWalPrefix(target, input, p, status, sizeLimit);
    } catch (IOException e) {
        log.warn("Unexpected error consuming file.");
        return status;
    } finally {
        span.stop();
    }

    log.debug("Sending batches of data to peer tserver");

    Status lastStatus = status, currentStatus = status;
    final AtomicReference<Exception> exceptionRef = new AtomicReference<>();
    while (true) {
        // Set some trace info
        span = Trace.start("Replicate WAL batch");
        span.data("Batch size (bytes)", Long.toString(sizeLimit));
        span.data("File", p.toString());
        span.data("Peer instance name", peerContext.getInstance().getInstanceName());
        span.data("Peer tserver", peerTserver.toString());
        span.data("Remote table ID", remoteTableId);

        ReplicationStats replResult;
        try {
            // Read and send a batch of mutations
            replResult = ReplicationClient.executeServicerWithReturn(peerContext, peerTserver,
                    new WalClientExecReturn(target, input, p, currentStatus, sizeLimit, remoteTableId, tcreds,
                            tids));
        } catch (Exception e) {
            log.error("Caught exception replicating data to {} at {}",
                    peerContext.getInstance().getInstanceName(), peerTserver, e);
            throw e;
        } finally {
            span.stop();
        }

        // Catch the overflow
        long newBegin = currentStatus.getBegin() + replResult.entriesConsumed;
        if (newBegin < 0) {
            newBegin = Long.MAX_VALUE;
        }

        currentStatus = Status.newBuilder(currentStatus).setBegin(newBegin).build();

        log.debug("Sent batch for replication of {} to {}, with new Status {}", p, target,
                ProtobufUtil.toString(currentStatus));

        // If we got a different status
        if (!currentStatus.equals(lastStatus)) {
            span = Trace.start("Update replication table");
            try {
                if (null != accumuloUgi) {
                    final Status copy = currentStatus;
                    accumuloUgi.doAs(new PrivilegedAction<Void>() {
                        @Override
                        public Void run() {
                            try {
                                helper.recordNewStatus(p, copy, target);
                            } catch (Exception e) {
                                exceptionRef.set(e);
                            }
                            return null;
                        }
                    });
                    Exception e = exceptionRef.get();
                    if (null != e) {
                        if (e instanceof TableNotFoundException) {
                            throw (TableNotFoundException) e;
                        } else if (e instanceof AccumuloSecurityException) {
                            throw (AccumuloSecurityException) e;
                        } else if (e instanceof AccumuloException) {
                            throw (AccumuloException) e;
                        } else {
                            throw new RuntimeException("Received unexpected exception", e);
                        }
                    }
                } else {
                    helper.recordNewStatus(p, currentStatus, target);
                }
            } catch (TableNotFoundException e) {
                log.error(
                        "Tried to update status in replication table for {} as {}, but the table did not exist",
                        p, ProtobufUtil.toString(currentStatus), e);
                throw new RuntimeException("Replication table did not exist, will retry", e);
            } finally {
                span.stop();
            }

            log.debug("Recorded updated status for {}: {}", p, ProtobufUtil.toString(currentStatus));

            // If we don't have any more work, just quit
            if (!StatusUtil.isWorkRequired(currentStatus)) {
                return currentStatus;
            } else {
                // Otherwise, let it loop and replicate some more data
                lastStatus = currentStatus;
            }
        } else {
            log.debug("Did not replicate any new data for {} to {}, (state was {})", p, target,
                    ProtobufUtil.toString(lastStatus));

            // otherwise, we didn't actually replicate (likely because there was error sending the data)
            // we can just not record any updates, and it will be picked up again by the work assigner
            return status;
        }
    }
}