Example usage for org.apache.hadoop.yarn.api.records ContainerId toString

List of usage examples for org.apache.hadoop.yarn.api.records ContainerId toString

Introduction

In this page you can find the example usage for org.apache.hadoop.yarn.api.records ContainerId toString.

Prototype

@Override
public String toString() 

Source Link

Usage

From source file:gobblin.yarn.ContainerMetrics.java

License:Open Source License

private static String name(ContainerId containerId) {
    return "gobblin.metrics." + containerId.toString();
}

From source file:gobblin.yarn.ContainerMetrics.java

License:Open Source License

private static List<Tag<?>> tagsForContainer(State containerState, String applicationName,
        ContainerId containerId) {
    ImmutableList.Builder<Tag<?>> tags = new ImmutableList.Builder<>();
    tags.add(new Tag<>(GobblinYarnMetricTagNames.YARN_APPLICATION_NAME, applicationName));
    tags.add(new Tag<>(GobblinYarnMetricTagNames.YARN_APPLICATION_ID,
            containerId.getApplicationAttemptId().getApplicationId().toString()));
    tags.add(new Tag<>(GobblinYarnMetricTagNames.YARN_APPLICATION_ATTEMPT_ID,
            containerId.getApplicationAttemptId().toString()));
    tags.add(new Tag<>(GobblinYarnMetricTagNames.CONTAINER_ID, containerId.toString()));
    tags.addAll(getCustomTagsFromState(containerState));
    return tags.build();
}

From source file:gobblin.yarn.GobblinYarnLogSource.java

License:Apache License

/**
 * Build a {@link LogCopier} instance used to copy the logs out from this {@link GobblinYarnLogSource}.
 *
 * @param config the {@link Config} use to create the {@link LogCopier}
 * @param containerId the {@link ContainerId} of the container the {@link LogCopier} runs in
 * @param destFs the destination {@link FileSystem}
 * @param appWorkDir the Gobblin Yarn application working directory on HDFS
 * @return a {@link LogCopier} instance//w w w.ja v a  2 s  .c o m
 * @throws IOException if it fails on any IO operation
 */
protected LogCopier buildLogCopier(Config config, ContainerId containerId, FileSystem destFs, Path appWorkDir)
        throws IOException {
    LogCopier.Builder builder = LogCopier.newBuilder()
            .useSrcFileSystem(FileSystem.getLocal(new Configuration())).useDestFileSystem(destFs)
            .readFrom(getLocalLogDir()).writeTo(getHdfsLogDir(containerId, destFs, appWorkDir))
            .acceptsLogFileExtensions(ImmutableSet.of(ApplicationConstants.STDOUT, ApplicationConstants.STDERR))
            .useLogFileNamePrefix(containerId.toString());
    if (config.hasPath(GobblinYarnConfigurationKeys.LOG_COPIER_MAX_FILE_SIZE)) {
        builder.useMaxBytesPerLogFile(config.getBytes(GobblinYarnConfigurationKeys.LOG_COPIER_MAX_FILE_SIZE));
    }
    if (config.hasPath(GobblinYarnConfigurationKeys.LOG_COPIER_SCHEDULER)) {
        builder.useScheduler(config.getString(GobblinYarnConfigurationKeys.LOG_COPIER_SCHEDULER));
    }
    return builder.build();
}

From source file:gobblin.yarn.GobblinYarnLogSource.java

License:Apache License

private Path getHdfsLogDir(ContainerId containerId, FileSystem destFs, Path appWorkDir) throws IOException {
    Path logRootDir = new Path(appWorkDir, GobblinYarnConfigurationKeys.APP_LOGS_DIR_NAME);
    if (!destFs.exists(logRootDir)) {
        destFs.mkdirs(logRootDir);//from  w w  w. ja  v  a 2 s  .c o m
    }

    return new Path(logRootDir, containerId.toString());
}

From source file:gobblin.yarn.GobblinYarnTaskRunner.java

License:Apache License

private static String getTaskRunnerId(ContainerId containerId) {
    return containerId.toString();
}

From source file:io.hops.ha.common.FiCaSchedulerAppInfo.java

License:Apache License

private void persistLiveContainersToAdd() throws StorageException {
    if (liveContainersToAdd != null) {
        //Persist LiveContainers
        RMContainerDataAccess rmcDA = (RMContainerDataAccess) RMStorageFactory
                .getDataAccess(RMContainerDataAccess.class);
        List<RMContainer> toAddRMContainers = new ArrayList<RMContainer>();
        ContainerDataAccess cDA = (ContainerDataAccess) RMStorageFactory
                .getDataAccess(ContainerDataAccess.class);
        List<Container> toAddContainers = new ArrayList<Container>();
        FiCaSchedulerAppLiveContainersDataAccess fsalcDA = (FiCaSchedulerAppLiveContainersDataAccess) RMStorageFactory
                .getDataAccess(FiCaSchedulerAppLiveContainersDataAccess.class);
        List<FiCaSchedulerAppLiveContainers> toAddLiveContainers = new ArrayList<FiCaSchedulerAppLiveContainers>();
        for (ContainerId key : liveContainersToAdd.keySet()) {
            if (liveContainersToRemove == null || liveContainersToRemove.remove(key) == null) {
                LOG.debug("adding LiveContainers " + key + " for " + applicationAttemptId.toString());
                toAddLiveContainers.add(
                        new FiCaSchedulerAppLiveContainers(applicationAttemptId.toString(), key.toString()));
                org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer rmContainer = liveContainersToAdd
                        .get(key);// www  .j  a  v  a 2s .c o  m
                RMContainer hopRMContainer = new RMContainer(rmContainer.getContainerId().toString(),
                        rmContainer.getApplicationAttemptId().toString(), rmContainer.getNodeId().toString(),
                        rmContainer.getUser(),
                        //                rmContainer.getReservedNode(),
                        //                asdfjldsakj,
                        rmContainer.getStartTime(), rmContainer.getFinishTime(),
                        rmContainer.getState().toString(),
                        //                rmContainer.getReservedNode().getHost(),
                        //                rmContainer.getReservedNode().getPort(),
                        ((RMContainerImpl) rmContainer).getContainerState().toString(),
                        ((RMContainerImpl) rmContainer).getContainerExitStatus());

                toAddRMContainers.add(hopRMContainer);

                Container hopContainer = new Container(rmContainer.getContainerId().toString(),
                        ((ContainerPBImpl) rmContainer.getContainer()).getProto().toByteArray());
                LOG.debug("adding ha_container " + hopContainer.getContainerId());
                toAddContainers.add(hopContainer);
            }
        }
        rmcDA.addAll(toAddRMContainers);
        cDA.addAll(toAddContainers);
        fsalcDA.addAll(toAddLiveContainers);
    }
}

From source file:io.hops.ha.common.FiCaSchedulerAppInfo.java

License:Apache License

private void persistLiveContainersToRemove() throws StorageException {
    if (liveContainersToRemove != null && !liveContainersToRemove.isEmpty()) {
        FiCaSchedulerAppLiveContainersDataAccess fsalcDA = (FiCaSchedulerAppLiveContainersDataAccess) RMStorageFactory
                .getDataAccess(FiCaSchedulerAppLiveContainersDataAccess.class);
        List<FiCaSchedulerAppLiveContainers> toRemoveLiveContainers = new ArrayList<FiCaSchedulerAppLiveContainers>();
        for (ContainerId key : liveContainersToRemove.keySet()) {
            LOG.debug("remove LiveContainers " + key + " for " + applicationAttemptId.toString());
            toRemoveLiveContainers// w  ww  .j a v a  2s  .c  o m
                    .add(new FiCaSchedulerAppLiveContainers(applicationAttemptId.toString(), key.toString()));
        }
        fsalcDA.removeAll(toRemoveLiveContainers);
    }
}

From source file:io.hops.util.DBUtility.java

License:Apache License

public static void removeContainersToClean(final Set<ContainerId> containers,
        final org.apache.hadoop.yarn.api.records.NodeId nodeId) throws IOException {
    long start = System.currentTimeMillis();
    AsyncLightWeightRequestHandler removeContainerToClean = new AsyncLightWeightRequestHandler(
            YARNOperationType.TEST) {// www.  jav  a2  s. c o m
        @Override
        public Object performTask() throws StorageException {
            connector.beginTransaction();
            connector.writeLock();
            ContainerIdToCleanDataAccess ctcDA = (ContainerIdToCleanDataAccess) RMStorageFactory
                    .getDataAccess(ContainerIdToCleanDataAccess.class);
            List<io.hops.metadata.yarn.entity.ContainerId> containersToClean = new ArrayList<io.hops.metadata.yarn.entity.ContainerId>();
            for (ContainerId cid : containers) {
                containersToClean
                        .add(new io.hops.metadata.yarn.entity.ContainerId(nodeId.toString(), cid.toString()));
            }
            ctcDA.removeAll(containersToClean);
            connector.commit();
            return null;
        }
    };

    removeContainerToClean.handle();
    long duration = System.currentTimeMillis() - start;
    if (duration > 10) {
        LOG.error("too long " + duration);
    }
}

From source file:io.hops.util.DBUtility.java

License:Apache License

public static void addContainerToClean(final ContainerId containerId,
        final org.apache.hadoop.yarn.api.records.NodeId nodeId) throws IOException {
    long start = System.currentTimeMillis();
    AsyncLightWeightRequestHandler addContainerToClean = new AsyncLightWeightRequestHandler(
            YARNOperationType.TEST) {//from  ww  w.  j ava2  s.  c o  m
        @Override
        public Object performTask() throws StorageException {
            connector.beginTransaction();
            connector.writeLock();
            ContainerIdToCleanDataAccess ctcDA = (ContainerIdToCleanDataAccess) RMStorageFactory
                    .getDataAccess(ContainerIdToCleanDataAccess.class);
            ctcDA.add(new io.hops.metadata.yarn.entity.ContainerId(nodeId.toString(), containerId.toString()));
            connector.commit();
            return null;
        }
    };
    addContainerToClean.handle();
    long duration = System.currentTimeMillis() - start;
    if (duration > 10) {
        LOG.error("too long " + duration);
    }
}

From source file:org.apache.gobblin.yarn.GobblinApplicationMaster.java

License:Apache License

public GobblinApplicationMaster(String applicationName, ContainerId containerId, Config config,
        YarnConfiguration yarnConfiguration) throws Exception {
    super(applicationName, containerId.getApplicationAttemptId().getApplicationId().toString(),
            GobblinClusterUtils.addDynamicConfig(config), Optional.<Path>absent());

    String containerLogDir = config.getString(GobblinYarnConfigurationKeys.LOGS_SINK_ROOT_DIR_KEY);
    GobblinYarnLogSource gobblinYarnLogSource = new GobblinYarnLogSource();
    if (gobblinYarnLogSource.isLogSourcePresent()) {
        Path appWorkDir = PathUtils.combinePaths(containerLogDir,
                GobblinClusterUtils.getAppWorkDirPath(this.clusterName, this.applicationId), "AppMaster");
        this.applicationLauncher.addService(
                gobblinYarnLogSource.buildLogCopier(this.config, containerId.toString(), this.fs, appWorkDir));
    }/* w  w  w  . ja  v a  2s.co  m*/

    this.yarnService = buildYarnService(this.config, applicationName, this.applicationId, yarnConfiguration,
            this.fs);
    this.applicationLauncher.addService(this.yarnService);

    if (UserGroupInformation.isSecurityEnabled()) {
        LOGGER.info("Adding YarnContainerSecurityManager since security is enabled");
        this.applicationLauncher.addService(buildYarnContainerSecurityManager(this.config, this.fs));
    }

    // Add additional services
    List<String> serviceClassNames = ConfigUtils.getStringList(this.config,
            GobblinYarnConfigurationKeys.APP_MASTER_SERVICE_CLASSES);

    for (String serviceClassName : serviceClassNames) {
        Class<?> serviceClass = Class.forName(serviceClassName);
        this.applicationLauncher
                .addService((Service) GobblinConstructorUtils.invokeLongestConstructor(serviceClass, this));
    }
}