Example usage for org.apache.hadoop.yarn.api.records ContainerReport getAllocatedResource

List of usage examples for org.apache.hadoop.yarn.api.records ContainerReport getAllocatedResource

Introduction

In this page you can find the example usage for org.apache.hadoop.yarn.api.records ContainerReport getAllocatedResource.

Prototype

@Public
@Unstable
public abstract Resource getAllocatedResource();

Source Link

Document

Get the allocated Resource of the container.

Usage

From source file:org.apache.flink.yarn.YarnConfigurationITCase.java

License:Apache License

/**
 * Tests that the Flink components are started with the correct
 * memory settings./*  ww  w  .  j a va  2s  .c  o  m*/
 */
@Test(timeout = 60000)
public void testFlinkContainerMemory() throws Exception {
    final YarnClient yarnClient = getYarnClient();
    final Configuration configuration = new Configuration(flinkConfiguration);

    final int masterMemory = 64;
    final int taskManagerMemory = 128;
    final int slotsPerTaskManager = 3;

    // disable heap cutoff min
    configuration.setInteger(ResourceManagerOptions.CONTAINERIZED_HEAP_CUTOFF_MIN, 0);
    configuration.setString(TaskManagerOptions.NETWORK_BUFFERS_MEMORY_MIN, String.valueOf(1L << 20));
    configuration.setString(TaskManagerOptions.NETWORK_BUFFERS_MEMORY_MAX, String.valueOf(4L << 20));

    final YarnConfiguration yarnConfiguration = getYarnConfiguration();
    final YarnClusterDescriptor clusterDescriptor = new YarnClusterDescriptor(configuration, yarnConfiguration,
            CliFrontend.getConfigurationDirectoryFromEnv(), yarnClient, true);

    clusterDescriptor.setLocalJarPath(new Path(flinkUberjar.getAbsolutePath()));
    clusterDescriptor.addShipFiles(Arrays.asList(flinkLibFolder.listFiles()));

    final File streamingWordCountFile = getTestJarPath("WindowJoin.jar");

    final PackagedProgram packagedProgram = new PackagedProgram(streamingWordCountFile);
    final JobGraph jobGraph = PackagedProgramUtils.createJobGraph(packagedProgram, configuration, 1);

    try {
        final ClusterSpecification clusterSpecification = new ClusterSpecification.ClusterSpecificationBuilder()
                .setMasterMemoryMB(masterMemory).setTaskManagerMemoryMB(taskManagerMemory)
                .setSlotsPerTaskManager(slotsPerTaskManager).createClusterSpecification();

        final ClusterClient<ApplicationId> clusterClient = clusterDescriptor
                .deployJobCluster(clusterSpecification, jobGraph, true);

        final ApplicationId clusterId = clusterClient.getClusterId();

        final RestClient restClient = new RestClient(RestClientConfiguration.fromConfiguration(configuration),
                TestingUtils.defaultExecutor());

        try {
            final ApplicationReport applicationReport = yarnClient.getApplicationReport(clusterId);

            final ApplicationAttemptId currentApplicationAttemptId = applicationReport
                    .getCurrentApplicationAttemptId();

            // wait until we have second container allocated
            List<ContainerReport> containers = yarnClient.getContainers(currentApplicationAttemptId);

            while (containers.size() < 2) {
                // this is nasty but Yarn does not offer a better way to wait
                Thread.sleep(50L);
                containers = yarnClient.getContainers(currentApplicationAttemptId);
            }

            for (ContainerReport container : containers) {
                if (container.getContainerId().getId() == 1) {
                    // this should be the application master
                    assertThat(container.getAllocatedResource().getMemory(), is(masterMemory));
                } else {
                    assertThat(container.getAllocatedResource().getMemory(), is(taskManagerMemory));
                }
            }

            final URI webURI = new URI(clusterClient.getWebInterfaceURL());

            CompletableFuture<TaskManagersInfo> taskManagersInfoCompletableFuture;
            Collection<TaskManagerInfo> taskManagerInfos;

            while (true) {
                taskManagersInfoCompletableFuture = restClient.sendRequest(webURI.getHost(), webURI.getPort(),
                        TaskManagersHeaders.getInstance(), EmptyMessageParameters.getInstance(),
                        EmptyRequestBody.getInstance());

                final TaskManagersInfo taskManagersInfo = taskManagersInfoCompletableFuture.get();

                taskManagerInfos = taskManagersInfo.getTaskManagerInfos();

                // wait until the task manager has registered and reported its slots
                if (hasTaskManagerConnectedAndReportedSlots(taskManagerInfos)) {
                    break;
                } else {
                    Thread.sleep(100L);
                }
            }

            // there should be at least one TaskManagerInfo
            final TaskManagerInfo taskManagerInfo = taskManagerInfos.iterator().next();

            assertThat(taskManagerInfo.getNumberSlots(), is(slotsPerTaskManager));

            final ContaineredTaskManagerParameters containeredTaskManagerParameters = ContaineredTaskManagerParameters
                    .create(configuration, taskManagerMemory, slotsPerTaskManager);

            final long expectedHeadSize = containeredTaskManagerParameters.taskManagerHeapSizeMB() << 20L;

            // We compare here physical memory assigned to a container with the heap memory that we should pass to
            // jvm as Xmx parameter. Those value might differ significantly due to sytem page size or jvm
            // implementation therefore we use 15% threshold here.
            assertThat((double) taskManagerInfo.getHardwareDescription().getSizeOfJvmHeap()
                    / (double) expectedHeadSize, is(closeTo(1.0, 0.15)));
        } finally {
            restClient.shutdown(TIMEOUT);
            clusterClient.shutdown();
        }

        clusterDescriptor.killCluster(clusterId);

    } finally {
        clusterDescriptor.close();
    }
}