Example usage for org.apache.commons.io FilenameUtils concat

List of usage examples for org.apache.commons.io FilenameUtils concat

Introduction

In this page you can find the example usage for org.apache.commons.io FilenameUtils concat.

Prototype

public static String concat(String basePath, String fullFilenameToAdd) 

Source Link

Document

Concatenates a filename to a base path using normal command line style rules.

Usage

From source file:org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineMetricAggregatorFactory.java

/**
 * Hourly aggregation for hosts./*from   w ww.j  a  v a  2s.co m*/
 * Interval : 1 hour
 */
public static TimelineMetricAggregator createTimelineMetricAggregatorHourly(PhoenixHBaseAccessor hBaseAccessor,
        Configuration metricsConf) {

    String checkpointDir = metricsConf.get(TIMELINE_METRICS_AGGREGATOR_CHECKPOINT_DIR,
            DEFAULT_CHECKPOINT_LOCATION);
    String checkpointLocation = FilenameUtils.concat(checkpointDir, HOST_AGGREGATE_HOURLY_CHECKPOINT_FILE);
    long sleepIntervalMillis = SECONDS
            .toMillis(metricsConf.getLong(HOST_AGGREGATOR_HOUR_SLEEP_INTERVAL, 3600l));

    int checkpointCutOffMultiplier = metricsConf.getInt(HOST_AGGREGATOR_HOUR_CHECKPOINT_CUTOFF_MULTIPLIER, 2);
    String hostAggregatorDisabledParam = HOST_AGGREGATOR_HOUR_DISABLED;

    String inputTableName = METRICS_AGGREGATE_MINUTE_TABLE_NAME;
    String outputTableName = METRICS_AGGREGATE_HOURLY_TABLE_NAME;

    if (useGroupByAggregator(metricsConf)) {
        return new org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.v2.TimelineMetricHostAggregator(
                "TimelineMetricHostAggregatorHourly", hBaseAccessor, metricsConf, checkpointLocation,
                sleepIntervalMillis, checkpointCutOffMultiplier, hostAggregatorDisabledParam, inputTableName,
                outputTableName, 3600000l);
    }

    return new TimelineMetricHostAggregator("TimelineMetricHostAggregatorHourly", hBaseAccessor, metricsConf,
            checkpointLocation, sleepIntervalMillis, checkpointCutOffMultiplier, hostAggregatorDisabledParam,
            inputTableName, outputTableName, 3600000l);
}

From source file:org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineMetricAggregatorFactory.java

/**
 * Daily aggregation for hosts.//from   w  ww  .j a  va 2  s.  c o  m
 * Interval : 1 day
 */
public static TimelineMetricAggregator createTimelineMetricAggregatorDaily(PhoenixHBaseAccessor hBaseAccessor,
        Configuration metricsConf) {

    String checkpointDir = metricsConf.get(TIMELINE_METRICS_AGGREGATOR_CHECKPOINT_DIR,
            DEFAULT_CHECKPOINT_LOCATION);
    String checkpointLocation = FilenameUtils.concat(checkpointDir, HOST_AGGREGATE_DAILY_CHECKPOINT_FILE);
    long sleepIntervalMillis = SECONDS
            .toMillis(metricsConf.getLong(HOST_AGGREGATOR_DAILY_SLEEP_INTERVAL, 86400l));

    int checkpointCutOffMultiplier = metricsConf.getInt(HOST_AGGREGATOR_DAILY_CHECKPOINT_CUTOFF_MULTIPLIER, 1);
    String hostAggregatorDisabledParam = HOST_AGGREGATOR_DAILY_DISABLED;

    String inputTableName = METRICS_AGGREGATE_HOURLY_TABLE_NAME;
    String outputTableName = METRICS_AGGREGATE_DAILY_TABLE_NAME;

    if (useGroupByAggregator(metricsConf)) {
        return new org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.v2.TimelineMetricHostAggregator(
                "TimelineMetricHostAggregatorDaily", hBaseAccessor, metricsConf, checkpointLocation,
                sleepIntervalMillis, checkpointCutOffMultiplier, hostAggregatorDisabledParam, inputTableName,
                outputTableName, 3600000l);
    }

    return new TimelineMetricHostAggregator("TimelineMetricHostAggregatorDaily", hBaseAccessor, metricsConf,
            checkpointLocation, sleepIntervalMillis, checkpointCutOffMultiplier, hostAggregatorDisabledParam,
            inputTableName, outputTableName, 3600000l);
}

From source file:org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineMetricAggregatorFactory.java

/**
 * Second aggregation for cluster.//ww  w . ja  va2  s .  c o  m
 * Interval : 2 mins
 * Timeslice : 30 sec
 */
public static TimelineMetricAggregator createTimelineClusterAggregatorSecond(PhoenixHBaseAccessor hBaseAccessor,
        Configuration metricsConf, TimelineMetricMetadataManager metadataManager) {

    String checkpointDir = metricsConf.get(TIMELINE_METRICS_AGGREGATOR_CHECKPOINT_DIR,
            DEFAULT_CHECKPOINT_LOCATION);

    String checkpointLocation = FilenameUtils.concat(checkpointDir, CLUSTER_AGGREGATOR_CHECKPOINT_FILE);

    long sleepIntervalMillis = SECONDS
            .toMillis(metricsConf.getLong(CLUSTER_AGGREGATOR_SECOND_SLEEP_INTERVAL, 120l));

    long timeSliceIntervalMillis = SECONDS
            .toMillis(metricsConf.getInt(CLUSTER_AGGREGATOR_TIMESLICE_INTERVAL, 30));

    int checkpointCutOffMultiplier = metricsConf.getInt(CLUSTER_AGGREGATOR_SECOND_CHECKPOINT_CUTOFF_MULTIPLIER,
            2);

    String inputTableName = METRICS_RECORD_TABLE_NAME;
    String outputTableName = METRICS_CLUSTER_AGGREGATE_TABLE_NAME;
    String aggregatorDisabledParam = CLUSTER_AGGREGATOR_SECOND_DISABLED;

    // Second based aggregation have added responsibility of time slicing
    return new TimelineMetricClusterAggregatorSecond("TimelineClusterAggregatorSecond", metadataManager,
            hBaseAccessor, metricsConf, checkpointLocation, sleepIntervalMillis, checkpointCutOffMultiplier,
            aggregatorDisabledParam, inputTableName, outputTableName, 120000l, timeSliceIntervalMillis);
}

From source file:org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineMetricAggregatorFactory.java

/**
 * Minute aggregation for cluster./*www. java 2 s. co  m*/
 * Interval : 5 mins
 */
public static TimelineMetricAggregator createTimelineClusterAggregatorMinute(PhoenixHBaseAccessor hBaseAccessor,
        Configuration metricsConf) {

    String checkpointDir = metricsConf.get(TIMELINE_METRICS_AGGREGATOR_CHECKPOINT_DIR,
            DEFAULT_CHECKPOINT_LOCATION);

    String checkpointLocation = FilenameUtils.concat(checkpointDir, CLUSTER_AGGREGATOR_MINUTE_CHECKPOINT_FILE);

    long sleepIntervalMillis = SECONDS
            .toMillis(metricsConf.getLong(CLUSTER_AGGREGATOR_MINUTE_SLEEP_INTERVAL, 300l));

    int checkpointCutOffMultiplier = metricsConf.getInt(CLUSTER_AGGREGATOR_MINUTE_CHECKPOINT_CUTOFF_MULTIPLIER,
            2);

    String inputTableName = METRICS_CLUSTER_AGGREGATE_TABLE_NAME;
    String outputTableName = METRICS_CLUSTER_AGGREGATE_MINUTE_TABLE_NAME;
    String aggregatorDisabledParam = CLUSTER_AGGREGATOR_MINUTE_DISABLED;

    if (useGroupByAggregator(metricsConf)) {
        return new org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.v2.TimelineMetricClusterAggregator(
                "TimelineClusterAggregatorMinute", hBaseAccessor, metricsConf, checkpointLocation,
                sleepIntervalMillis, checkpointCutOffMultiplier, aggregatorDisabledParam, inputTableName,
                outputTableName, 120000l);
    }

    return new TimelineMetricClusterAggregator("TimelineClusterAggregatorMinute", hBaseAccessor, metricsConf,
            checkpointLocation, sleepIntervalMillis, checkpointCutOffMultiplier, aggregatorDisabledParam,
            inputTableName, outputTableName, 120000l);
}

From source file:org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineMetricAggregatorFactory.java

/**
 * Hourly aggregation for cluster.//  www . j a va 2  s.c o m
 * Interval : 1 hour
 */
public static TimelineMetricAggregator createTimelineClusterAggregatorHourly(PhoenixHBaseAccessor hBaseAccessor,
        Configuration metricsConf) {

    String checkpointDir = metricsConf.get(TIMELINE_METRICS_AGGREGATOR_CHECKPOINT_DIR,
            DEFAULT_CHECKPOINT_LOCATION);

    String checkpointLocation = FilenameUtils.concat(checkpointDir, CLUSTER_AGGREGATOR_HOURLY_CHECKPOINT_FILE);

    long sleepIntervalMillis = SECONDS
            .toMillis(metricsConf.getLong(CLUSTER_AGGREGATOR_HOUR_SLEEP_INTERVAL, 3600l));

    int checkpointCutOffMultiplier = metricsConf.getInt(CLUSTER_AGGREGATOR_HOUR_CHECKPOINT_CUTOFF_MULTIPLIER,
            2);

    String inputTableName = METRICS_CLUSTER_AGGREGATE_TABLE_NAME;
    String outputTableName = METRICS_CLUSTER_AGGREGATE_HOURLY_TABLE_NAME;
    String aggregatorDisabledParam = CLUSTER_AGGREGATOR_HOUR_DISABLED;

    if (useGroupByAggregator(metricsConf)) {
        return new org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.v2.TimelineMetricClusterAggregator(
                "TimelineClusterAggregatorHourly", hBaseAccessor, metricsConf, checkpointLocation,
                sleepIntervalMillis, checkpointCutOffMultiplier, aggregatorDisabledParam, inputTableName,
                outputTableName, 120000l);
    }

    return new TimelineMetricClusterAggregator("TimelineClusterAggregatorHourly", hBaseAccessor, metricsConf,
            checkpointLocation, sleepIntervalMillis, checkpointCutOffMultiplier, aggregatorDisabledParam,
            inputTableName, outputTableName, 120000l);
}

From source file:org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.TimelineMetricAggregatorFactory.java

/**
 * Daily aggregation for cluster./*from   ww w .j  a v  a2 s .  c  om*/
 * Interval : 1 day
 */
public static TimelineMetricAggregator createTimelineClusterAggregatorDaily(PhoenixHBaseAccessor hBaseAccessor,
        Configuration metricsConf) {

    String checkpointDir = metricsConf.get(TIMELINE_METRICS_AGGREGATOR_CHECKPOINT_DIR,
            DEFAULT_CHECKPOINT_LOCATION);

    String checkpointLocation = FilenameUtils.concat(checkpointDir, CLUSTER_AGGREGATOR_DAILY_CHECKPOINT_FILE);

    long sleepIntervalMillis = SECONDS
            .toMillis(metricsConf.getLong(CLUSTER_AGGREGATOR_DAILY_SLEEP_INTERVAL, 86400l));

    int checkpointCutOffMultiplier = metricsConf.getInt(CLUSTER_AGGREGATOR_DAILY_CHECKPOINT_CUTOFF_MULTIPLIER,
            1);

    String inputTableName = METRICS_CLUSTER_AGGREGATE_HOURLY_TABLE_NAME;
    String outputTableName = METRICS_CLUSTER_AGGREGATE_DAILY_TABLE_NAME;
    String aggregatorDisabledParam = CLUSTER_AGGREGATOR_DAILY_DISABLED;

    if (useGroupByAggregator(metricsConf)) {
        return new org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.aggregators.v2.TimelineMetricClusterAggregator(
                "TimelineClusterAggregatorDaily", hBaseAccessor, metricsConf, checkpointLocation,
                sleepIntervalMillis, checkpointCutOffMultiplier, aggregatorDisabledParam, inputTableName,
                outputTableName, 120000l);
    }

    return new TimelineMetricClusterAggregator("TimelineClusterAggregatorDaily", hBaseAccessor, metricsConf,
            checkpointLocation, sleepIntervalMillis, checkpointCutOffMultiplier, aggregatorDisabledParam,
            inputTableName, outputTableName, 120000l);
}

From source file:org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricAggregatorFactory.java

public static TimelineMetricAggregator createTimelineMetricAggregatorMinute(PhoenixHBaseAccessor hBaseAccessor,
        Configuration metricsConf) {

    String checkpointDir = metricsConf.get(TIMELINE_METRICS_AGGREGATOR_CHECKPOINT_DIR,
            DEFAULT_CHECKPOINT_LOCATION);
    String checkpointLocation = FilenameUtils.concat(checkpointDir, MINUTE_AGGREGATE_CHECKPOINT_FILE);
    long sleepIntervalMillis = SECONDS
            .toMillis(metricsConf.getLong(HOST_AGGREGATOR_MINUTE_SLEEP_INTERVAL, 300l)); // 5 mins

    int checkpointCutOffMultiplier = metricsConf.getInt(HOST_AGGREGATOR_MINUTE_CHECKPOINT_CUTOFF_MULTIPLIER, 3);
    String hostAggregatorDisabledParam = HOST_AGGREGATOR_MINUTE_DISABLED;

    String inputTableName = METRICS_RECORD_TABLE_NAME;
    String outputTableName = METRICS_AGGREGATE_MINUTE_TABLE_NAME;

    return new TimelineMetricAggregator(hBaseAccessor, metricsConf, checkpointLocation, sleepIntervalMillis,
            checkpointCutOffMultiplier, hostAggregatorDisabledParam, inputTableName, outputTableName, 120000l);
}

From source file:org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricAggregatorFactory.java

public static TimelineMetricAggregator createTimelineMetricAggregatorHourly(PhoenixHBaseAccessor hBaseAccessor,
        Configuration metricsConf) {

    String checkpointDir = metricsConf.get(TIMELINE_METRICS_AGGREGATOR_CHECKPOINT_DIR,
            DEFAULT_CHECKPOINT_LOCATION);
    String checkpointLocation = FilenameUtils.concat(checkpointDir, MINUTE_AGGREGATE_HOURLY_CHECKPOINT_FILE);
    long sleepIntervalMillis = SECONDS
            .toMillis(metricsConf.getLong(HOST_AGGREGATOR_HOUR_SLEEP_INTERVAL, 3600l));

    int checkpointCutOffMultiplier = metricsConf.getInt(HOST_AGGREGATOR_HOUR_CHECKPOINT_CUTOFF_MULTIPLIER, 2);
    String hostAggregatorDisabledParam = HOST_AGGREGATOR_HOUR_DISABLED;

    String inputTableName = METRICS_AGGREGATE_MINUTE_TABLE_NAME;
    String outputTableName = METRICS_AGGREGATE_HOURLY_TABLE_NAME;

    return new TimelineMetricAggregator(hBaseAccessor, metricsConf, checkpointLocation, sleepIntervalMillis,
            checkpointCutOffMultiplier, hostAggregatorDisabledParam, inputTableName, outputTableName, 3600000l);
}

From source file:org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricClusterAggregator.java

public TimelineMetricClusterAggregator(PhoenixHBaseAccessor hBaseAccessor, Configuration metricsConf) {
    super(hBaseAccessor, metricsConf);

    String checkpointDir = metricsConf.get(TIMELINE_METRICS_AGGREGATOR_CHECKPOINT_DIR,
            DEFAULT_CHECKPOINT_LOCATION);

    checkpointLocation = FilenameUtils.concat(checkpointDir, CLUSTER_AGGREGATOR_CHECKPOINT_FILE);

    sleepIntervalMillis = SECONDS.toMillis(metricsConf.getLong(CLUSTER_AGGREGATOR_MINUTE_SLEEP_INTERVAL, 120l));
    timeSliceIntervalMillis = (int) SECONDS
            .toMillis(metricsConf.getInt(CLUSTER_AGGREGATOR_TIMESLICE_INTERVAL, 15));
    checkpointCutOffMultiplier = metricsConf.getInt(CLUSTER_AGGREGATOR_MINUTE_CHECKPOINT_CUTOFF_MULTIPLIER, 2);
}

From source file:org.apache.hadoop.yarn.server.applicationhistoryservice.metrics.timeline.TimelineMetricClusterAggregatorHourly.java

public TimelineMetricClusterAggregatorHourly(PhoenixHBaseAccessor hBaseAccessor, Configuration metricsConf) {
    super(hBaseAccessor, metricsConf);

    String checkpointDir = metricsConf.get(TIMELINE_METRICS_AGGREGATOR_CHECKPOINT_DIR,
            DEFAULT_CHECKPOINT_LOCATION);

    checkpointLocation = FilenameUtils.concat(checkpointDir, CLUSTER_AGGREGATOR_HOURLY_CHECKPOINT_FILE);

    sleepIntervalMillis = SECONDS.toMillis(metricsConf.getLong(CLUSTER_AGGREGATOR_HOUR_SLEEP_INTERVAL, 3600l));
    checkpointCutOffIntervalMillis = SECONDS
            .toMillis(metricsConf.getLong(CLUSTER_AGGREGATOR_HOUR_CHECKPOINT_CUTOFF_INTERVAL, 7200l));
    checkpointCutOffMultiplier = metricsConf.getInt(CLUSTER_AGGREGATOR_HOUR_CHECKPOINT_CUTOFF_MULTIPLIER, 2);
}