Example usage for java.util.concurrent TimeUnit NANOSECONDS

List of usage examples for java.util.concurrent TimeUnit NANOSECONDS

Introduction

In this page you can find the example usage for java.util.concurrent TimeUnit NANOSECONDS.

Prototype

TimeUnit NANOSECONDS

To view the source code for java.util.concurrent TimeUnit NANOSECONDS.

Click Source Link

Document

Time unit representing one thousandth of a microsecond.

Usage

From source file:com.netflix.genie.security.oauth2.pingfederate.PingFederateRemoteTokenServices.java

/**
 * Constructor./* www.  ja v  a  2  s  . c om*/
 *
 * @param serverProperties The properties of the resource server (Genie)
 * @param converter        The access token converter to use
 * @param registry         The metrics registry to use
 */
public PingFederateRemoteTokenServices(@NotNull final ResourceServerProperties serverProperties,
        @NotNull final AccessTokenConverter converter, @NotNull final MeterRegistry registry) {
    super();
    this.authenticationTimer = registry.timer(AUTHENTICATION_TIMER_NAME);
    this.pingFederateAPITimer = registry.timer(API_TIMER_NAME);
    final HttpComponentsClientHttpRequestFactory factory = new HttpComponentsClientHttpRequestFactory();
    factory.setConnectTimeout(2000);
    factory.setReadTimeout(10000);
    final RestTemplate restTemplate = new RestTemplate(factory);
    final List<ClientHttpRequestInterceptor> interceptors = new ArrayList<>();
    interceptors
            .add((final HttpRequest request, final byte[] body, final ClientHttpRequestExecution execution) -> {
                final long start = System.nanoTime();
                try {
                    return execution.execute(request, body);
                } finally {
                    pingFederateAPITimer.record(System.nanoTime() - start, TimeUnit.NANOSECONDS);
                }
            });
    restTemplate.setInterceptors(interceptors);
    restTemplate.setErrorHandler(new DefaultResponseErrorHandler() {
        // Ignore 400
        @Override
        public void handleError(final ClientHttpResponse response) throws IOException {
            final int errorCode = response.getRawStatusCode();
            registry.counter(TOKEN_VALIDATION_ERROR_COUNTER_NAME,
                    Sets.newHashSet(Tag.of(MetricsConstants.TagKeys.STATUS, Integer.toString(errorCode))))
                    .increment();
            if (response.getRawStatusCode() != HttpStatus.BAD_REQUEST.value()) {
                super.handleError(response);
            }
        }
    });

    this.setRestTemplate(restTemplate);

    this.checkTokenEndpointUrl = serverProperties.getTokenInfoUri();
    this.clientId = serverProperties.getClientId();
    this.clientSecret = serverProperties.getClientSecret();

    Assert.state(StringUtils.isNotBlank(this.checkTokenEndpointUrl), "Check Endpoint URL is required");
    Assert.state(StringUtils.isNotBlank(this.clientId), "Client ID is required");
    Assert.state(StringUtils.isNotBlank(this.clientSecret), "Client secret is required");

    log.debug("checkTokenEndpointUrl = {}", this.checkTokenEndpointUrl);
    log.debug("clientId = {}", this.clientId);
    log.debug("clientSecret = {}", this.clientSecret);

    this.converter = converter;
}

From source file:com.blacklocus.qs.worker.util.log.SamplingQSLogServiceTest.java

@Test
public void testSampledLifeCycle() throws InterruptedException {

    // With these parameters, by far most logger interactions should be filtered out, very few sampled in.
    final int numThreads = 64, iterations = 200, processingJitterMaxMs = 16, noSoonerThanMs = 100;

    final Set<String> sampledTaskIds = Collections.newSetFromMap(new ConcurrentHashMap<String, Boolean>());

    final QSLogService logService = Mockito.mock(QSLogService.class);
    // track which logging interactions were allowed through (sampled in)
    Mockito.doAnswer(new Answer() {
        @Override/*from w  ww .  j av a  2s. c o m*/
        public Object answer(InvocationOnMock invocation) throws Throwable {
            sampledTaskIds.add(((QSTaskModel) invocation.getArguments()[0]).taskId);
            return null;
        }
    }).when(logService).startedTask(Matchers.any(QSTaskModel.class));
    Mockito.doAnswer(new Answer() {
        @Override
        public Object answer(InvocationOnMock invocation) throws Throwable {
            sampledTaskIds.add(((QSLogModel) invocation.getArguments()[0]).taskId);
            return null; //TODO jason
        }
    }).when(logService).log(Matchers.any(QSLogModel.class));
    Mockito.doAnswer(new Answer() {
        @Override
        public Object answer(InvocationOnMock invocation) throws Throwable {
            sampledTaskIds.add(((QSTaskModel) invocation.getArguments()[0]).taskId);
            return null; //TODO jason
        }
    }).when(logService).completedTask(Matchers.any(QSTaskModel.class));

    Predicate<QSTaskModel> taskPredicate = SamplingPredicates.noSoonerThan(noSoonerThanMs,
            TimeUnit.MILLISECONDS);
    final QSLogService sampledLogService = new SamplingQSLogService(logService, taskPredicate);

    long startNs = System.nanoTime();
    ExecutorService threads = Executors.newFixedThreadPool(numThreads);
    for (int i = 0; i < numThreads; i++) {
        threads.submit(new Callable<Void>() {
            @Override
            public Void call() throws Exception {
                LOG.debug("Thread start {}", Thread.currentThread().getName());
                for (int i = 0; i < iterations; i++) {

                    String taskId = UUID.randomUUID().toString();

                    // simulate task processing, some have logs, some don't, processing time varies between each step
                    QSTaskModel task = new QSTaskModel();
                    task.taskId = taskId;
                    Thread.sleep(RandomUtils.nextInt(processingJitterMaxMs));

                    sampledLogService.startedTask(task);
                    Thread.sleep(RandomUtils.nextInt(processingJitterMaxMs));

                    // random number of associated logs [0, 2]
                    for (int j = RandomUtils.nextInt(2); j > 0; j--) {
                        QSLogModel log = new QSLogModel();
                        log.taskId = taskId;
                        sampledLogService.log(log);
                        Thread.sleep(RandomUtils.nextInt(processingJitterMaxMs));
                    }

                    sampledLogService.completedTask(task);
                }
                LOG.debug("Thread end {}", Thread.currentThread().getName());
                return null;

            }
        });
    }
    threads.shutdown();
    threads.awaitTermination(1, TimeUnit.MINUTES);
    long endNs = System.nanoTime();

    // Theoretical maximum number of sampled in task logging
    long durationMs = TimeUnit.NANOSECONDS.toMillis(endNs - startNs);
    long expectedMax = durationMs / noSoonerThanMs + 1; // +1 for time@0: sampled in
    LOG.debug("Run duration: {}ms  no sooner than: {}ms", durationMs, noSoonerThanMs);
    LOG.debug("Expected max sampled in: {}  Actually sampled: {}", expectedMax, sampledTaskIds.size());
    Assert.assertTrue(expectedMax >= sampledTaskIds.size());
}

From source file:org.agatom.springatom.cmp.action.DefaultActionsModelReader.java

/** {@inheritDoc} */
@Override/*from  ww w.java  2  s. c  o  m*/
public ActionModel getActionModel(final String name) throws NotFoundException {
    LOGGER.debug(String.format("getActionModel(name=%s)", name));
    final ActionModelReferenceMap map = flattenActionModel.get(name);
    if (map == null) {
        throw new NotFoundException(String.format("No action model found for key=%s", name));
    }
    try {
        final long startTime = System.nanoTime();
        final ActionModel model = new ActionModel()
                .setContent(this.resolveContent(map, Sets.<Action>newTreeSet()));

        JsonNode tmpNode;
        if ((tmpNode = map.node.findValue("description")) != null) {
            model.setDescription(tmpNode.asText());
        }
        if ((tmpNode = map.node.findValue("name")) != null) {
            model.setName(tmpNode.asText());
        }

        final long endTime = System.nanoTime() - startTime;

        LOGGER.trace(String.format("getActionModel(name=%s) took %d ms", name,
                TimeUnit.NANOSECONDS.toMillis(endTime)));

        return model;
    } catch (Exception ignore) {

    }
    return null;
}

From source file:bes.injector.InjectorBurnTest.java

private void testPromptnessOfExecution(long intervalNanos, float loadIncrement)
        throws InterruptedException, ExecutionException, TimeoutException {
    final int executorCount = 4;
    int threadCount = 8;
    int maxQueued = 1024;
    final WeibullDistribution workTime = new WeibullDistribution(3, 200000);
    final long minWorkTime = TimeUnit.MICROSECONDS.toNanos(1);
    final long maxWorkTime = TimeUnit.MILLISECONDS.toNanos(1);

    final int[] threadCounts = new int[executorCount];
    final WeibullDistribution[] workCount = new WeibullDistribution[executorCount];
    final ExecutorService[] executors = new ExecutorService[executorCount];
    final Injector injector = new Injector("");
    for (int i = 0; i < executors.length; i++) {
        executors[i] = injector.newExecutor(threadCount, maxQueued);
        threadCounts[i] = threadCount;/*from  w  w w.java 2  s  .c  om*/
        workCount[i] = new WeibullDistribution(2, maxQueued);
        threadCount *= 2;
        maxQueued *= 2;
    }

    long runs = 0;
    long events = 0;
    final TreeSet<Batch> pending = new TreeSet<Batch>();
    final BitSet executorsWithWork = new BitSet(executorCount);
    long until = 0;
    // basic idea is to go through different levels of load on the executor service; initially is all small batches
    // (mostly within max queue size) of very short operations, moving to progressively larger batches
    // (beyond max queued size), and longer operations
    for (float multiplier = 0f; multiplier < 2.01f;) {
        if (System.nanoTime() > until) {
            System.out.println(String.format("Completed %.0fK batches with %.1fM events", runs * 0.001f,
                    events * 0.000001f));
            events = 0;
            until = System.nanoTime() + intervalNanos;
            multiplier += loadIncrement;
            System.out.println(String.format("Running for %ds with load multiplier %.1f",
                    TimeUnit.NANOSECONDS.toSeconds(intervalNanos), multiplier));
        }

        // wait a random amount of time so we submit new tasks in various stages of
        long timeout;
        if (pending.isEmpty())
            timeout = 0;
        else if (Math.random() > 0.98)
            timeout = Long.MAX_VALUE;
        else if (pending.size() == executorCount)
            timeout = pending.first().timeout;
        else
            timeout = (long) (Math.random() * pending.last().timeout);

        while (!pending.isEmpty() && timeout > System.nanoTime()) {
            Batch first = pending.first();
            boolean complete = false;
            try {
                for (Result result : first.results.descendingSet())
                    result.future.get(timeout - System.nanoTime(), TimeUnit.NANOSECONDS);
                complete = true;
            } catch (TimeoutException e) {
            }
            if (!complete && System.nanoTime() > first.timeout) {
                for (Result result : first.results)
                    if (!result.future.isDone())
                        throw new AssertionError();
                complete = true;
            }
            if (complete) {
                pending.pollFirst();
                executorsWithWork.clear(first.executorIndex);
            }
        }

        // if we've emptied the executors, give all our threads an opportunity to spin down
        if (timeout == Long.MAX_VALUE) {
            try {
                Thread.sleep(10);
            } catch (InterruptedException e) {
            }
        }

        // submit a random batch to the first free executor service
        int executorIndex = executorsWithWork.nextClearBit(0);
        if (executorIndex >= executorCount)
            continue;
        executorsWithWork.set(executorIndex);
        ExecutorService executor = executors[executorIndex];
        TreeSet<Result> results = new TreeSet<Result>();
        int count = (int) (workCount[executorIndex].sample() * multiplier);
        long targetTotalElapsed = 0;
        long start = System.nanoTime();
        long baseTime;
        if (Math.random() > 0.5)
            baseTime = 2 * (long) (workTime.sample() * multiplier);
        else
            baseTime = 0;
        for (int j = 0; j < count; j++) {
            long time;
            if (baseTime == 0)
                time = (long) (workTime.sample() * multiplier);
            else
                time = (long) (baseTime * Math.random());
            if (time < minWorkTime)
                time = minWorkTime;
            if (time > maxWorkTime)
                time = maxWorkTime;
            targetTotalElapsed += time;
            Future<?> future = executor.submit(new WaitTask(time));
            results.add(new Result(future, System.nanoTime() + time));
        }
        long end = start + (long) Math.ceil(targetTotalElapsed / (double) threadCounts[executorIndex])
                + TimeUnit.MILLISECONDS.toNanos(100L);
        long now = System.nanoTime();
        if (runs++ > executorCount && now > end)
            throw new AssertionError();
        events += results.size();
        pending.add(new Batch(results, end, executorIndex));
        //            System.out.println(String.format("Submitted batch to executor %d with %d items and %d permitted millis", executorIndex, count, TimeUnit.NANOSECONDS.toMillis(end - start)));
    }
}

From source file:org.apache.cassandra.concurrent.LongSharedExecutorPoolTest.java

private void testPromptnessOfExecution(long intervalNanos, float loadIncrement)
        throws InterruptedException, ExecutionException {
    final int executorCount = 4;
    int threadCount = 8;
    int maxQueued = 1024;
    final WeibullDistribution workTime = new WeibullDistribution(3, 200000);
    final long minWorkTime = TimeUnit.MICROSECONDS.toNanos(1);
    final long maxWorkTime = TimeUnit.MILLISECONDS.toNanos(1);

    final int[] threadCounts = new int[executorCount];
    final WeibullDistribution[] workCount = new WeibullDistribution[executorCount];
    final ExecutorService[] executors = new ExecutorService[executorCount];
    for (int i = 0; i < executors.length; i++) {
        executors[i] = SharedExecutorPool.SHARED.newExecutor(threadCount, maxQueued, "test" + i, "test" + i);
        threadCounts[i] = threadCount;//from w  w w . j a v  a  2  s .  c  o  m
        workCount[i] = new WeibullDistribution(2, maxQueued);
        threadCount *= 2;
        maxQueued *= 2;
    }

    long runs = 0;
    long events = 0;
    final TreeSet<Batch> pending = new TreeSet<>();
    final BitSet executorsWithWork = new BitSet(executorCount);
    long until = 0;
    // basic idea is to go through different levels of load on the executor service; initially is all small batches
    // (mostly within max queue size) of very short operations, moving to progressively larger batches
    // (beyond max queued size), and longer operations
    for (float multiplier = 0f; multiplier < 2.01f;) {
        if (System.nanoTime() > until) {
            System.out.println(String.format("Completed %.0fK batches with %.1fM events", runs * 0.001f,
                    events * 0.000001f));
            events = 0;
            until = System.nanoTime() + intervalNanos;
            multiplier += loadIncrement;
            System.out.println(String.format("Running for %ds with load multiplier %.1f",
                    TimeUnit.NANOSECONDS.toSeconds(intervalNanos), multiplier));
        }

        // wait a random amount of time so we submit new tasks in various stages of
        long timeout;
        if (pending.isEmpty())
            timeout = 0;
        else if (Math.random() > 0.98)
            timeout = Long.MAX_VALUE;
        else if (pending.size() == executorCount)
            timeout = pending.first().timeout;
        else
            timeout = (long) (Math.random() * pending.last().timeout);

        while (!pending.isEmpty() && timeout > System.nanoTime()) {
            Batch first = pending.first();
            boolean complete = false;
            try {
                for (Result result : first.results.descendingSet())
                    result.future.get(timeout - System.nanoTime(), TimeUnit.NANOSECONDS);
                complete = true;
            } catch (TimeoutException e) {
            }
            if (!complete && System.nanoTime() > first.timeout) {
                for (Result result : first.results)
                    if (!result.future.isDone())
                        throw new AssertionError();
                complete = true;
            }
            if (complete) {
                pending.pollFirst();
                executorsWithWork.clear(first.executorIndex);
            }
        }

        // if we've emptied the executors, give all our threads an opportunity to spin down
        if (timeout == Long.MAX_VALUE)
            Uninterruptibles.sleepUninterruptibly(10, TimeUnit.MILLISECONDS);

        // submit a random batch to the first free executor service
        int executorIndex = executorsWithWork.nextClearBit(0);
        if (executorIndex >= executorCount)
            continue;
        executorsWithWork.set(executorIndex);
        ExecutorService executor = executors[executorIndex];
        TreeSet<Result> results = new TreeSet<>();
        int count = (int) (workCount[executorIndex].sample() * multiplier);
        long targetTotalElapsed = 0;
        long start = System.nanoTime();
        long baseTime;
        if (Math.random() > 0.5)
            baseTime = 2 * (long) (workTime.sample() * multiplier);
        else
            baseTime = 0;
        for (int j = 0; j < count; j++) {
            long time;
            if (baseTime == 0)
                time = (long) (workTime.sample() * multiplier);
            else
                time = (long) (baseTime * Math.random());
            if (time < minWorkTime)
                time = minWorkTime;
            if (time > maxWorkTime)
                time = maxWorkTime;
            targetTotalElapsed += time;
            Future<?> future = executor.submit(new WaitTask(time));
            results.add(new Result(future, System.nanoTime() + time));
        }
        long end = start + (long) Math.ceil(targetTotalElapsed / (double) threadCounts[executorIndex])
                + TimeUnit.MILLISECONDS.toNanos(100L);
        long now = System.nanoTime();
        if (runs++ > executorCount && now > end)
            throw new AssertionError();
        events += results.size();
        pending.add(new Batch(results, end, executorIndex));
        //            System.out.println(String.format("Submitted batch to executor %d with %d items and %d permitted millis", executorIndex, count, TimeUnit.NANOSECONDS.toMillis(end - start)));
    }
}

From source file:com.netflix.genie.core.services.impl.S3FileTransferImpl.java

/**
 * {@inheritDoc}/*  w w w  .  ja v  a  2s  .c  o m*/
 */
@Override
public void getFile(@NotBlank(message = "Source file path cannot be empty.") final String srcRemotePath,
        @NotBlank(message = "Destination local path cannot be empty") final String dstLocalPath)
        throws GenieException {
    final long start = System.nanoTime();
    final Map<String, String> tags = MetricsUtils.newSuccessTagsMap();
    try {
        log.debug("Called with src path {} and destination path {}", srcRemotePath, dstLocalPath);

        final AmazonS3URI s3Uri = getS3Uri(srcRemotePath);
        try {
            this.s3Client.getObject(new GetObjectRequest(s3Uri.getBucket(), s3Uri.getKey()),
                    new File(dstLocalPath));
        } catch (AmazonS3Exception ase) {
            log.error("Error fetching file {} from s3 due to exception {}", srcRemotePath, ase);
            throw new GenieServerException("Error downloading file from s3. Filename: " + srcRemotePath);
        }
    } catch (Throwable t) {
        MetricsUtils.addFailureTagsWithException(tags, t);
        throw t;
    } finally {
        this.registry.timer(downloadTimerId.withTags(tags)).record(System.nanoTime() - start,
                TimeUnit.NANOSECONDS);
    }
}

From source file:org.apache.hadoop.mapreduce.SimpleEntityWriterV2.java

protected void writeEntities(Configuration tlConf, TimelineCollectorManager manager, Context context)
        throws IOException {
    Configuration conf = context.getConfiguration();
    // simulate the app id with the task id
    int taskId = context.getTaskAttemptID().getTaskID().getId();
    long timestamp = conf.getLong(TIMELINE_SERVICE_PERFORMANCE_RUN_ID, 0);
    ApplicationId appId = ApplicationId.newInstance(timestamp, taskId);

    // create the app level timeline collector
    AppLevelTimelineCollector collector = new AppLevelTimelineCollector(appId);
    manager.putIfAbsent(appId, collector);

    try {//from   w  w w . ja  v  a2s.c  o  m
        // set the context
        // flow id: job name, flow run id: timestamp, user id
        TimelineCollectorContext tlContext = collector.getTimelineEntityContext();
        tlContext.setFlowName(context.getJobName());
        tlContext.setFlowRunId(timestamp);
        tlContext.setUserId(context.getUser());

        final int kbs = conf.getInt(KBS_SENT, KBS_SENT_DEFAULT);

        long totalTime = 0;
        final int testtimes = conf.getInt(TEST_TIMES, TEST_TIMES_DEFAULT);
        final Random rand = new Random();
        final TaskAttemptID taskAttemptId = context.getTaskAttemptID();
        final char[] payLoad = new char[kbs * 1024];

        for (int i = 0; i < testtimes; i++) {
            // Generate a fixed length random payload
            for (int xx = 0; xx < kbs * 1024; xx++) {
                int alphaNumIdx = rand.nextInt(ALPHA_NUMS.length);
                payLoad[xx] = ALPHA_NUMS[alphaNumIdx];
            }
            String entId = taskAttemptId + "_" + Integer.toString(i);
            final TimelineEntity entity = new TimelineEntity();
            entity.setId(entId);
            entity.setType("FOO_ATTEMPT");
            entity.addInfo("PERF_TEST", payLoad);
            // add an event
            TimelineEvent event = new TimelineEvent();
            event.setId("foo_event_id");
            event.setTimestamp(System.currentTimeMillis());
            event.addInfo("foo_event", "test");
            entity.addEvent(event);
            // add a metric
            TimelineMetric metric = new TimelineMetric();
            metric.setId("foo_metric");
            metric.addValue(System.currentTimeMillis(), 123456789L);
            entity.addMetric(metric);
            // add a config
            entity.addConfig("foo", "bar");

            TimelineEntities entities = new TimelineEntities();
            entities.addEntity(entity);
            // use the current user for this purpose
            UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
            long startWrite = System.nanoTime();
            try {
                collector.putEntities(entities, ugi);
            } catch (Exception e) {
                context.getCounter(PerfCounters.TIMELINE_SERVICE_WRITE_FAILURES).increment(1);
                LOG.error("writing to the timeline service failed", e);
            }
            long endWrite = System.nanoTime();
            totalTime += TimeUnit.NANOSECONDS.toMillis(endWrite - startWrite);
        }
        LOG.info("wrote " + testtimes + " entities (" + kbs * testtimes + " kB) in " + totalTime + " ms");
        context.getCounter(PerfCounters.TIMELINE_SERVICE_WRITE_TIME).increment(totalTime);
        context.getCounter(PerfCounters.TIMELINE_SERVICE_WRITE_COUNTER).increment(testtimes);
        context.getCounter(PerfCounters.TIMELINE_SERVICE_WRITE_KBS).increment(kbs * testtimes);
    } finally {
        // clean up
        manager.remove(appId);
    }
}

From source file:fr.xebia.monitoring.demo.payment.CreditCardServiceMonitoringImpl.java

/**
 * Human readable version of {@link #getSlowRequestThresholdInNanos()}
 *///  ww w . j  a  va  2  s .c  o  m
@ManagedAttribute
public long getSlowRequestThresholdInMillis() {
    return TimeUnit.MILLISECONDS.convert(slowRequestThresholdInNanos.get(), TimeUnit.NANOSECONDS);
}

From source file:com.netflix.genie.web.jobs.workflow.impl.InitialSetupTask.java

/**
 * {@inheritDoc}/*w w  w  . j a  v a 2  s .  c o  m*/
 */
@Override
public void executeTask(@NotNull final Map<String, Object> context) throws GenieException, IOException {
    final long start = System.nanoTime();
    final Set<Tag> tags = Sets.newHashSet();
    try {
        final JobExecutionEnvironment jobExecEnv = (JobExecutionEnvironment) context
                .get(JobConstants.JOB_EXECUTION_ENV_KEY);
        final String jobWorkingDirectory = jobExecEnv.getJobWorkingDir().getCanonicalPath();
        final Writer writer = (Writer) context.get(JobConstants.WRITER_KEY);
        final String jobId = jobExecEnv.getJobRequest().getId()
                .orElseThrow(() -> new GeniePreconditionException("No job id found. Unable to continue"));
        log.info("Starting Initial Setup Task for job {}", jobId);

        this.createJobDirStructure(jobWorkingDirectory);

        // set the env variables in the launcher script
        this.createJobDirEnvironmentVariables(writer, jobWorkingDirectory);
        this.createApplicationEnvironmentVariables(writer);

        // create environment variables for the command
        final Command command = jobExecEnv.getCommand();
        this.createCommandEnvironmentVariables(writer, command);

        // create environment variables for the cluster
        final Cluster cluster = jobExecEnv.getCluster();
        this.createClusterEnvironmentVariables(writer, cluster);

        // create environment variable for the job itself
        this.createJobEnvironmentVariables(writer, jobId, jobExecEnv.getJobRequest().getName(),
                jobExecEnv.getMemory(), jobExecEnv.getJobRequest().getTags(),
                jobExecEnv.getJobRequest().getGrouping().orElse(""),
                jobExecEnv.getJobRequest().getGroupingInstance().orElse(""));

        // create environment variables for the job request
        this.createJobRequestEnvironmentVariables(writer, jobExecEnv.getJobRequest());

        //Export the Genie Version
        writer.write(GENIE_VERSION_EXPORT);
        writer.write(LINE_SEPARATOR);
        writer.write(LINE_SEPARATOR);

        log.info("Finished Initial Setup Task for job {}", jobId);
        MetricsUtils.addSuccessTags(tags);
    } catch (final Throwable t) {
        MetricsUtils.addFailureTagsWithException(tags, t);
        throw t;
    } finally {
        this.getRegistry().timer(SETUP_TASK_TIMER_NAME, tags).record(System.nanoTime() - start,
                TimeUnit.NANOSECONDS);
    }
}