Example usage for com.google.common.util.concurrent UncheckedExecutionException getMessage

List of usage examples for com.google.common.util.concurrent UncheckedExecutionException getMessage

Introduction

In this page you can find the example usage for com.google.common.util.concurrent UncheckedExecutionException getMessage.

Prototype

public String getMessage() 

Source Link

Document

Returns the detail message string of this throwable.

Usage

From source file:org.restexpress.plugin.content.adapter.CachedContextAdapter.java

@Override
public File retrieve(final String name) throws IOException {
    File result = null;//from w ww  . ja v  a2  s .co m
    if (name != null)
        try {
            final Optional<File> value = fileCache.get(name);
            if (value.isPresent()) {
                result = value.get();
                // file from cache has be removed by hand
                if (result != null && !result.exists()) {
                    fileCache.invalidate(name);
                    result = null;
                }
            }
        } catch (final UncheckedExecutionException u) {
            log.error(u.getMessage());
            u.printStackTrace();
        } catch (final ExecutionException e) {
            log.error(e.getMessage());
        }
    return result;
}

From source file:org.opendaylight.infrautils.caches.guava.internal.CacheGuavaAdapter.java

@Override
public V get(K key) {
    try {//from   w ww . j  ava2 s .  co m
        return guavaCache().getUnchecked(key);
    } catch (UncheckedExecutionException e) {
        throw throwCause(e);
    } catch (InvalidCacheLoadException e) {
        throw new BadCacheFunctionRuntimeException(
                "InvalidCacheLoadException from Guava getUnchecked(): " + e.getMessage(), e);
    }
}

From source file:org.opendaylight.infrautils.caches.guava.internal.CacheGuavaAdapter.java

@Override
public ImmutableMap<K, V> get(Iterable<? extends K> keys) {
    try {//from w ww .jav a  2 s .  co m
        return guavaCache().getAll(keys);
    } catch (UncheckedExecutionException e) {
        throw throwCause(e);
    } catch (InvalidCacheLoadException e) {
        throw new BadCacheFunctionRuntimeException(
                "InvalidCacheLoadException from Guava getAll(): " + e.getMessage(), e);
    } catch (ExecutionException e) {
        // This normally should never happen here, because according to Guava Cache's doc,
        // an ExecutionException is thrown by getAll when the its CacheLoader (thus our
        // CacheFunction) throws a checked exception - but our CacheFunction never can,
        // according to its signature - that's what the CheckedCacheFunction and
        // CheckedCacheGuavaAdapter are for... if this happens, something is wrong.
        // NB This is very different in CheckedCacheGuavaAdapter, where we do expect it!
        throw new BadCacheFunctionRuntimeException("CacheFunction checked exception", e);
    }
}

From source file:org.eclipse.mylyn.internal.bugzilla.rest.core.BugzillaRestConnector.java

public BugzillaRestConfiguration getRepositoryConfiguration(TaskRepository repository) throws CoreException {
    if (clientCache.getIfPresent(new RepositoryKey(repository)) == null) {
        getClient(repository);//  w  w w . j a  v  a2 s.  co m
    }
    try {
        Optional<BugzillaRestConfiguration> configurationOptional = configurationCache
                .get(new RepositoryKey(repository));
        return configurationOptional.isPresent() ? configurationOptional.get() : null;
    } catch (UncheckedExecutionException e) {
        throw new CoreException(new Status(IStatus.ERROR, BugzillaRestCore.ID_PLUGIN, e.getMessage(), e));
    } catch (ExecutionException e) {
        throw new CoreException(new Status(IStatus.ERROR, BugzillaRestCore.ID_PLUGIN, e.getMessage(), e));
    }
}

From source file:org.apache.bookkeeper.metadata.etcd.EtcdRegistrationManager.java

private boolean waitUntilRegNodeExpired(String regPath, long leaseId) throws MetadataStoreException {
    ByteSequence regPathBs = ByteSequence.fromString(regPath);
    // check regPath again
    GetResponse getResp = msResult(kvClient.get(regPathBs));
    if (getResp.getCount() <= 0) {
        // key disappears after watching it
        return false;
    } else {/*from   w ww .ja v  a 2  s. c  om*/
        KeyValue kv = getResp.getKvs().get(0);
        if (kv.getLease() != leaseId) {
            Watch watchClient = client.getWatchClient();
            Watcher watcher = watchClient.watch(regPathBs,
                    WatchOption.newBuilder().withRevision(getResp.getHeader().getRevision() + 1).build());
            log.info(
                    "Previous bookie registration (lease = {}) still exists at {}, "
                            + "so new lease '{}' will be waiting previous lease for {} seconds to be expired",
                    kv.getLease(), regPath, leaseId, bkRegister.getTtlSeconds());
            CompletableFuture<Void> watchFuture = CompletableFuture.runAsync(() -> {
                try {
                    while (true) {
                        log.info("Listening on '{}' until it is expired", regPath);
                        WatchResponse response = watcher.listen();
                        for (WatchEvent event : response.getEvents()) {
                            log.info("Received watch event on '{}' : EventType = {}", regPath,
                                    event.getEventType());
                            if (EventType.DELETE == event.getEventType()) {
                                return;
                            }
                        }
                    }
                } catch (InterruptedException e) {
                    throw new UncheckedExecutionException("Interrupted at waiting previous registration under "
                            + regPath + " (lease = " + kv.getLease() + ") to be expired", e);
                }
            });

            try {
                msResult(watchFuture, 2 * bkRegister.getTtlSeconds(), TimeUnit.SECONDS);
                return false;
            } catch (TimeoutException e) {
                watchFuture.cancel(true);
                throw new MetadataStoreException(
                        "Previous bookie registration still exists at " + regPath + " (lease = " + kv.getLease()
                                + ") after " + (2 * bkRegister.getTtlSeconds()) + " seconds elapsed");
            } catch (UncheckedExecutionException uee) {
                throw new MetadataStoreException(uee.getMessage(), uee.getCause());
            } finally {
                watcher.close();
            }
        } else {
            // key exists with same lease
            return true;
        }
    }
}

From source file:gobblin.data.management.conversion.hive.validation.ValidationJob.java

/***
 * Validate a {@link Table} if it was updated recently by checking if its update time
 * lies between between maxLookBackTime and skipRecentThanTime window.
 * @param hiveDataset {@link ConvertibleHiveDataset} containing {@link Table} info.
 * @throws IOException Issue in validating {@link HiveDataset}
 *///from w w w  . j av  a2s  . c o m
private void processNonPartitionedTable(final ConvertibleHiveDataset hiveDataset) throws IOException {
    try {
        // Validate table
        final long updateTime = this.updateProvider.getUpdateTime(hiveDataset.getTable());

        log.info(String.format("Validating table: %s", hiveDataset.getTable()));

        for (final String format : hiveDataset.getDestFormats()) {
            Optional<ConvertibleHiveDataset.ConversionConfig> conversionConfigOptional = hiveDataset
                    .getConversionConfigForFormat(format);
            if (conversionConfigOptional.isPresent()) {
                ConvertibleHiveDataset.ConversionConfig conversionConfig = conversionConfigOptional.get();
                String orcTableName = conversionConfig.getDestinationTableName();
                String orcTableDatabase = conversionConfig.getDestinationDbName();
                Pair<Optional<org.apache.hadoop.hive.metastore.api.Table>, Optional<List<Partition>>> destinationMeta = getDestinationTableMeta(
                        orcTableDatabase, orcTableName, this.props);

                // Generate validation queries
                final List<String> validationQueries = HiveValidationQueryGenerator
                        .generateCountValidationQueries(hiveDataset, Optional.<Partition>absent(),
                                conversionConfig);
                final List<String> dataValidationQueries = Lists.newArrayList(HiveValidationQueryGenerator
                        .generateDataValidationQuery(hiveDataset.getTable().getTableName(),
                                hiveDataset.getTable().getDbName(), destinationMeta.getKey().get(),
                                Optional.<Partition>absent(), this.isNestedORC));

                this.futures.add(this.exec.submit(new Callable<Void>() {
                    @Override
                    public Void call() throws Exception {

                        // Execute validation queries
                        log.debug(String.format("Going to execute queries: %s for format: %s",
                                validationQueries, format));
                        List<Long> rowCounts = ValidationJob.this
                                .getValidationOutputFromHive(validationQueries);
                        log.debug(String.format("Going to execute queries: %s for format: %s",
                                dataValidationQueries, format));
                        List<Long> rowDataValidatedCount = ValidationJob.this
                                .getValidationOutputFromHive(dataValidationQueries);
                        // Validate and populate report
                        validateAndPopulateReport(hiveDataset.getTable().getCompleteName(), updateTime,
                                rowCounts, rowDataValidatedCount.get(0));

                        return null;
                    }
                }));
            } else {
                log.warn(String.format("No config found for format: %s So skipping table: %s for this format",
                        format, hiveDataset.getTable().getCompleteName()));
            }
        }
    } catch (UncheckedExecutionException e) {
        log.warn(String.format("Not validating table: %s %s", hiveDataset.getTable().getCompleteName(),
                e.getMessage()));
    } catch (UpdateNotFoundException e) {
        log.warn(String.format("Not validating table: %s as update time was not found. %s",
                hiveDataset.getTable().getCompleteName(), e.getMessage()));
    }
}

From source file:org.apache.gobblin.data.management.conversion.hive.validation.ValidationJob.java

/***
 * Validate a {@link Table} if it was updated recently by checking if its update time
 * lies between between maxLookBackTime and skipRecentThanTime window.
 * @param hiveDataset {@link ConvertibleHiveDataset} containing {@link Table} info.
 * @throws IOException Issue in validating {@link HiveDataset}
 *///from   www  .  ja  v  a  2s.  co m
private void processNonPartitionedTable(final ConvertibleHiveDataset hiveDataset) throws IOException {
    try {
        // Validate table
        final long updateTime = this.updateProvider.getUpdateTime(hiveDataset.getTable());

        log.info(String.format("Validating table: %s", hiveDataset.getTable()));

        for (final String format : hiveDataset.getDestFormats()) {
            Optional<ConvertibleHiveDataset.ConversionConfig> conversionConfigOptional = hiveDataset
                    .getConversionConfigForFormat(format);
            if (conversionConfigOptional.isPresent()) {
                ConvertibleHiveDataset.ConversionConfig conversionConfig = conversionConfigOptional.get();
                String orcTableName = conversionConfig.getDestinationTableName();
                String orcTableDatabase = conversionConfig.getDestinationDbName();
                Pair<Optional<org.apache.hadoop.hive.metastore.api.Table>, Optional<List<Partition>>> destinationMeta = HiveConverterUtils
                        .getDestinationTableMeta(orcTableDatabase, orcTableName, this.props);

                // Generate validation queries
                final List<String> validationQueries = HiveValidationQueryGenerator
                        .generateCountValidationQueries(hiveDataset, Optional.<Partition>absent(),
                                conversionConfig);
                final List<String> dataValidationQueries = Lists.newArrayList(HiveValidationQueryGenerator
                        .generateDataValidationQuery(hiveDataset.getTable().getTableName(),
                                hiveDataset.getTable().getDbName(), destinationMeta.getKey().get(),
                                Optional.<Partition>absent(), this.isNestedORC));

                this.futures.add(this.exec.submit(new Callable<Void>() {
                    @Override
                    public Void call() throws Exception {

                        // Execute validation queries
                        log.debug(String.format("Going to execute queries: %s for format: %s",
                                validationQueries, format));
                        List<Long> rowCounts = ValidationJob.this
                                .getValidationOutputFromHive(validationQueries);
                        log.debug(String.format("Going to execute queries: %s for format: %s",
                                dataValidationQueries, format));
                        List<Long> rowDataValidatedCount = ValidationJob.this
                                .getValidationOutputFromHive(dataValidationQueries);
                        // Validate and populate report
                        validateAndPopulateReport(hiveDataset.getTable().getCompleteName(), updateTime,
                                rowCounts, rowDataValidatedCount);

                        return null;
                    }
                }));
            } else {
                log.warn(String.format("No config found for format: %s So skipping table: %s for this format",
                        format, hiveDataset.getTable().getCompleteName()));
            }
        }
    } catch (UncheckedExecutionException e) {
        log.warn(String.format("Not validating table: %s %s", hiveDataset.getTable().getCompleteName(),
                e.getMessage()));
    } catch (UpdateNotFoundException e) {
        log.warn(String.format("Not validating table: %s as update time was not found. %s",
                hiveDataset.getTable().getCompleteName(), e.getMessage()));
    }
}

From source file:gobblin.data.management.conversion.hive.validation.ValidationJob.java

/***
 * Validate all {@link Partition}s for a {@link Table} if it was updated recently by checking if its update time
 * lies between between maxLookBackTime and skipRecentThanTime window.
 * @param hiveDataset {@link HiveDataset} containing {@link Table} and {@link Partition} info.
 * @param client {@link IMetaStoreClient} to query Hive.
 * @throws IOException Issue in validating {@link HiveDataset}
 *//*from   w  w  w .ja v  a 2s  .  c o m*/
private void processPartitionedTable(ConvertibleHiveDataset hiveDataset,
        AutoReturnableObject<IMetaStoreClient> client) throws IOException {

    // Get partitions for the table
    List<Partition> sourcePartitions = HiveUtils.getPartitions(client.get(), hiveDataset.getTable(),
            Optional.<String>absent());

    for (final String format : hiveDataset.getDestFormats()) {
        Optional<ConvertibleHiveDataset.ConversionConfig> conversionConfigOptional = hiveDataset
                .getConversionConfigForFormat(format);

        if (conversionConfigOptional.isPresent()) {

            // Get conversion config
            ConvertibleHiveDataset.ConversionConfig conversionConfig = conversionConfigOptional.get();
            String orcTableName = conversionConfig.getDestinationTableName();
            String orcTableDatabase = conversionConfig.getDestinationDbName();
            Pair<Optional<org.apache.hadoop.hive.metastore.api.Table>, Optional<List<Partition>>> destinationMeta = getDestinationTableMeta(
                    orcTableDatabase, orcTableName, this.props);

            // Validate each partition
            for (final Partition sourcePartition : sourcePartitions) {
                try {
                    final long updateTime = this.updateProvider.getUpdateTime(sourcePartition);
                    if (shouldValidate(sourcePartition)) {
                        log.info(String.format("Validating partition: %s", sourcePartition.getCompleteName()));

                        // Generate validation queries
                        final List<String> countValidationQueries = HiveValidationQueryGenerator
                                .generateCountValidationQueries(hiveDataset, Optional.of(sourcePartition),
                                        conversionConfig);
                        final List<String> dataValidationQueries = Lists
                                .newArrayList(HiveValidationQueryGenerator.generateDataValidationQuery(
                                        hiveDataset.getTable().getTableName(),
                                        hiveDataset.getTable().getDbName(), destinationMeta.getKey().get(),
                                        Optional.of(sourcePartition), this.isNestedORC));

                        this.futures.add(this.exec.submit(new Callable<Void>() {
                            @Override
                            public Void call() throws Exception {

                                // Execute validation queries
                                log.debug(String.format(
                                        "Going to execute count validation queries queries: %s for format: %s "
                                                + "and partition %s",
                                        countValidationQueries, format, sourcePartition.getCompleteName()));
                                List<Long> rowCounts = ValidationJob.this
                                        .getValidationOutputFromHive(countValidationQueries);
                                log.debug(String.format(
                                        "Going to execute data validation queries: %s for format: %s and partition %s",
                                        dataValidationQueries, format, sourcePartition.getCompleteName()));
                                List<Long> rowDataValidatedCount = ValidationJob.this
                                        .getValidationOutputFromHive(dataValidationQueries);

                                // Validate and populate report
                                validateAndPopulateReport(sourcePartition.getCompleteName(), updateTime,
                                        rowCounts, rowDataValidatedCount.get(0));

                                return null;
                            }
                        }));

                    } else {
                        log.debug(String.format(
                                "Not validating partition: %s as updateTime: %s is not in range of max look back: %s "
                                        + "and skip recent than: %s",
                                sourcePartition.getCompleteName(), updateTime, this.maxLookBackTime,
                                this.skipRecentThanTime));
                    }
                } catch (UncheckedExecutionException e) {
                    log.warn(String.format("Not validating partition: %s %s", sourcePartition.getCompleteName(),
                            e.getMessage()));
                } catch (UpdateNotFoundException e) {
                    log.warn(String.format("Not validating partition: %s as update time was not found. %s",
                            sourcePartition.getCompleteName(), e.getMessage()));
                }
            }
        } else {
            log.info(String.format("No conversion config found for format %s. Ignoring data validation",
                    format));
        }
    }
}

From source file:org.apache.gobblin.data.management.conversion.hive.validation.ValidationJob.java

/***
 * Validate all {@link Partition}s for a {@link Table} if it was updated recently by checking if its update time
 * lies between between maxLookBackTime and skipRecentThanTime window.
 * @param hiveDataset {@link HiveDataset} containing {@link Table} and {@link Partition} info.
 * @param client {@link IMetaStoreClient} to query Hive.
 * @throws IOException Issue in validating {@link HiveDataset}
 *//*from www  . j  a  va2 s  .  c  o  m*/
private void processPartitionedTable(ConvertibleHiveDataset hiveDataset,
        AutoReturnableObject<IMetaStoreClient> client) throws IOException {

    // Get partitions for the table
    List<Partition> sourcePartitions = HiveUtils.getPartitions(client.get(), hiveDataset.getTable(),
            Optional.<String>absent());

    for (final String format : hiveDataset.getDestFormats()) {
        Optional<ConvertibleHiveDataset.ConversionConfig> conversionConfigOptional = hiveDataset
                .getConversionConfigForFormat(format);

        if (conversionConfigOptional.isPresent()) {

            // Get conversion config
            ConvertibleHiveDataset.ConversionConfig conversionConfig = conversionConfigOptional.get();
            String orcTableName = conversionConfig.getDestinationTableName();
            String orcTableDatabase = conversionConfig.getDestinationDbName();
            Pair<Optional<org.apache.hadoop.hive.metastore.api.Table>, Optional<List<Partition>>> destinationMeta = HiveConverterUtils
                    .getDestinationTableMeta(orcTableDatabase, orcTableName, this.props);

            // Validate each partition
            for (final Partition sourcePartition : sourcePartitions) {
                try {
                    final long updateTime = this.updateProvider.getUpdateTime(sourcePartition);
                    if (shouldValidate(sourcePartition)) {
                        log.info(String.format("Validating partition: %s", sourcePartition.getCompleteName()));

                        // Generate validation queries
                        final List<String> countValidationQueries = HiveValidationQueryGenerator
                                .generateCountValidationQueries(hiveDataset, Optional.of(sourcePartition),
                                        conversionConfig);
                        final List<String> dataValidationQueries = Lists
                                .newArrayList(HiveValidationQueryGenerator.generateDataValidationQuery(
                                        hiveDataset.getTable().getTableName(),
                                        hiveDataset.getTable().getDbName(), destinationMeta.getKey().get(),
                                        Optional.of(sourcePartition), this.isNestedORC));

                        this.futures.add(this.exec.submit(new Callable<Void>() {
                            @Override
                            public Void call() throws Exception {

                                // Execute validation queries
                                log.debug(String.format(
                                        "Going to execute count validation queries queries: %s for format: %s "
                                                + "and partition %s",
                                        countValidationQueries, format, sourcePartition.getCompleteName()));
                                List<Long> rowCounts = ValidationJob.this
                                        .getValidationOutputFromHive(countValidationQueries);
                                log.debug(String.format(
                                        "Going to execute data validation queries: %s for format: %s and partition %s",
                                        dataValidationQueries, format, sourcePartition.getCompleteName()));
                                List<Long> rowDataValidatedCount = ValidationJob.this
                                        .getValidationOutputFromHive(dataValidationQueries);

                                // Validate and populate report
                                validateAndPopulateReport(sourcePartition.getCompleteName(), updateTime,
                                        rowCounts, rowDataValidatedCount);

                                return null;
                            }
                        }));

                    } else {
                        log.debug(String.format(
                                "Not validating partition: %s as updateTime: %s is not in range of max look back: %s "
                                        + "and skip recent than: %s",
                                sourcePartition.getCompleteName(), updateTime, this.maxLookBackTime,
                                this.skipRecentThanTime));
                    }
                } catch (UncheckedExecutionException e) {
                    log.warn(String.format("Not validating partition: %s %s", sourcePartition.getCompleteName(),
                            e.getMessage()));
                } catch (UpdateNotFoundException e) {
                    log.warn(String.format("Not validating partition: %s as update time was not found. %s",
                            sourcePartition.getCompleteName(), e.getMessage()));
                }
            }
        } else {
            log.info(String.format("No conversion config found for format %s. Ignoring data validation",
                    format));
        }
    }
}

From source file:org.opendaylight.vpnservice.natservice.internal.NaptManager.java

/**
 * method to get external ip/port mapping when provided with internal ip/port pair
 * If already a mapping exist for the given input, then the existing mapping is returned
 * instead of overwriting with new ip/port pair.
 * @param segmentId// w w w . j a  va  2 s . c  o  m
 * @param sourceAddress - internal ip address/port pair
 * @return external ip address/port
 */
public SessionAddress getExternalAddressMapping(long segmentId, SessionAddress sourceAddress,
        NAPTEntryEvent.Protocol protocol) {
    LOG.debug("NAPT Service : getExternalAddressMapping called with segmentId {}, internalIp {} and port {}",
            segmentId, sourceAddress.getIpAddress(), sourceAddress.getPortNumber());
    /*
     1. Get Internal IP, Port in IP:Port format
     2. Inside DB with routerId get the list of entries and check if it matches with existing IP:Port
     3. If True return SessionAddress of ExternalIp and Port
     4. Else check ip Map and Form the ExternalIp and Port and update DB and then return ExternalIp and Port
     */

    //SessionAddress externalIpPort = new SessionAddress();
    String internalIpPort = new StringBuilder(64).append(sourceAddress.getIpAddress()).append(":")
            .append(sourceAddress.getPortNumber()).toString();

    // First check existing Port Map.
    SessionAddress existingIpPort = checkIpPortMap(segmentId, internalIpPort, protocol);
    if (existingIpPort != null) {
        // populate externalIpPort from IpPortMap and return
        LOG.debug("NAPT Service : getExternalAddressMapping successfully returning existingIpPort as {} and {}",
                existingIpPort.getIpAddress(), existingIpPort.getPortNumber());
        return existingIpPort;
    } else {
        // Now check in ip-map
        String externalIp = checkIpMap(segmentId, sourceAddress.getIpAddress());
        if (externalIp == null) {
            LOG.error(
                    "NAPT Service : getExternalAddressMapping, Unexpected error, internal to external ip map does not exist");
            return null;
        } else {
            /* Logic assuming internalIp is always ip and not subnet
             * case 1: externalIp is ip
             *        a) goto externalIp pool and getPort and return
             *        b) else return error
             * case 2: externalIp is subnet
             *        a) Take first externalIp and goto that Pool and getPort
             *             if port -> return
             *             else Take second externalIp and create that Pool and getPort
             *             if port ->return
             *             else
             *             Continue same with third externalIp till we exhaust subnet
             *        b) Nothing worked return error
             */
            SubnetUtils externalIpSubnet;
            List<String> allIps = new ArrayList<String>();
            String subnetPrefix = "/" + String.valueOf(NatConstants.DEFAULT_PREFIX);
            if (!externalIp.contains(subnetPrefix)) {
                EXTSUBNET_FLAG = true;
                externalIpSubnet = new SubnetUtils(externalIp);
                allIps = Arrays.asList(externalIpSubnet.getInfo().getAllAddresses());
                LOG.debug("NAPT Service : total count of externalIps available {}",
                        externalIpSubnet.getInfo().getAddressCount());
            } else {
                LOG.debug("NAPT Service : getExternalAddress single ip case");
                if (externalIp.contains(subnetPrefix)) {
                    String[] externalIpSplit = externalIp.split("/");
                    String extIp = externalIpSplit[0];
                    externalIp = extIp; //remove /32 what we got from checkIpMap
                }
                allIps.add(externalIp);
            }

            for (String extIp : allIps) {
                LOG.info("NAPT Service : Looping externalIPs with externalIP now as {}", extIp);
                if (NEXT_EXTIP_FLAG) {
                    createNaptPortPool(extIp);
                    LOG.debug("NAPT Service : Created Pool for next Ext IP {}", extIp);
                }
                AllocateIdInput getIdInput = new AllocateIdInputBuilder().setPoolName(extIp)
                        .setIdKey(internalIpPort).build();
                try {
                    Future<RpcResult<AllocateIdOutput>> result = idManager.allocateId(getIdInput);
                    RpcResult<AllocateIdOutput> rpcResult;
                    if ((result != null) && (result.get().isSuccessful())) {
                        LOG.debug("NAPT Service : Got id from idManager");
                        rpcResult = result.get();
                    } else {
                        LOG.error(
                                "NAPT Service : getExternalAddressMapping, idManager could not allocate id retry if subnet");
                        if (!EXTSUBNET_FLAG) {
                            LOG.error(
                                    "NAPT Service : getExternalAddressMapping returning null for single IP case, may be ports exhausted");
                            return null;
                        }
                        LOG.debug(
                                "NAPT Service : Could be ports exhausted case, try with another externalIP if possible");
                        NEXT_EXTIP_FLAG = true;
                        continue;
                    }
                    int extPort = rpcResult.getResult().getIdValue().intValue();
                    SessionAddress externalIpPort = new SessionAddress(extIp, extPort);
                    // Write to ip-port-map before returning
                    IpPortExternalBuilder ipExt = new IpPortExternalBuilder();
                    IpPortExternal ipPortExt = ipExt.setIpAddress(extIp).setPortNum(extPort).build();
                    IpPortMap ipm = new IpPortMapBuilder().setKey(new IpPortMapKey(internalIpPort))
                            .setIpPortInternal(internalIpPort).setIpPortExternal(ipPortExt).build();
                    LOG.debug(
                            "NAPT Service : getExternalAddressMapping writing into ip-port-map with externalIP {} and port {}",
                            ipPortExt.getIpAddress(), ipPortExt.getPortNum());
                    try {
                        MDSALUtil.syncWrite(broker, LogicalDatastoreType.CONFIGURATION,
                                getIpPortMapIdentifier(segmentId, internalIpPort, protocol), ipm);
                    } catch (UncheckedExecutionException uee) {
                        LOG.error("NAPT Service : Failed to write into ip-port-map with exception {}",
                                uee.getMessage());
                    }

                    // Write to snat-internal-ip-port-info
                    String internalIpAddress = sourceAddress.getIpAddress();
                    int ipPort = sourceAddress.getPortNumber();
                    ProtocolTypes protocolType = NatUtil.getProtocolType(protocol);
                    List<Integer> portList = NatUtil.getInternalIpPortListInfo(broker, segmentId,
                            internalIpAddress, protocolType);
                    if (portList == null) {
                        portList = Lists.newArrayList();
                    }
                    portList.add(ipPort);

                    IntIpProtoTypeBuilder builder = new IntIpProtoTypeBuilder();
                    IntIpProtoType intIpProtocolType = builder.setKey(new IntIpProtoTypeKey(protocolType))
                            .setPorts(portList).build();
                    try {
                        MDSALUtil.syncWrite(broker, LogicalDatastoreType.CONFIGURATION, NatUtil
                                .buildSnatIntIpPortIdentifier(segmentId, internalIpAddress, protocolType),
                                intIpProtocolType);
                    } catch (Exception ex) {
                        LOG.error(
                                "NAPT Service : Failed to write into snat-internal-ip-port-info with exception {}",
                                ex.getMessage());
                    }

                    LOG.debug(
                            "NAPT Service : getExternalAddressMapping successfully returning externalIP {} and port {}",
                            externalIpPort.getIpAddress(), externalIpPort.getPortNumber());
                    return externalIpPort;
                } catch (InterruptedException | ExecutionException e) {
                    LOG.error("NAPT Service : getExternalAddressMapping, Exception caught  {}", e);
                    return null;
                }
            } // end of for loop
        } // end of else ipmap present
    } // end of else check ipmap
    LOG.error(
            "NAPT Service: getExternalAddressMapping returning null, nothing worked or externalIPs exhausted");
    return null;
}