Example usage for org.joda.time DateTimeZone forID

List of usage examples for org.joda.time DateTimeZone forID

Introduction

In this page you can find the example usage for org.joda.time DateTimeZone forID.

Prototype

@FromString
public static DateTimeZone forID(String id) 

Source Link

Document

Gets a time zone instance for the specified time zone id.

Usage

From source file:gobblin.source.DatePartitionedAvroFileSource.java

License:Apache License

/**
 * Gobblin calls the {@link Source#getWorkunits(SourceState)} method after creating a {@link Source} object with a
 * blank constructor, so any custom initialization of the object needs to be done here.
 *///from w w w  . ja  va2  s .  c om
protected void init(SourceState state) {
    DateTimeZone.setDefault(DateTimeZone.forID(
            state.getProp(ConfigurationKeys.SOURCE_TIMEZONE, ConfigurationKeys.DEFAULT_SOURCE_TIMEZONE)));

    initDatePartition(state);

    try {
        initFileSystemHelper(state);
    } catch (FileBasedHelperException e) {
        Throwables.propagate(e);
    }

    AvroFsHelper fsHelper = (AvroFsHelper) this.fsHelper;
    this.fs = fsHelper.getFileSystem();

    this.sourceState = state;

    this.lowWaterMark = getLowWaterMark(state.getPreviousWorkUnitStates(),
            state.getProp(DATE_PARTITIONED_SOURCE_MIN_WATERMARK_VALUE,
                    this.partitionPatternFormatter.print(DEFAULT_DATE_PARTITIONED_SOURCE_MIN_WATERMARK_VALUE)));

    this.maxFilesPerJob = state.getPropAsInt(DATE_PARTITIONED_SOURCE_MAX_FILES_PER_JOB,
            DEFAULT_DATE_PARTITIONED_SOURCE_MAX_FILES_PER_JOB);

    this.maxWorkUnitsPerJob = state.getPropAsInt(DATE_PARTITIONED_SOURCE_MAX_WORKUNITS_PER_JOB,
            DEFAULT_DATE_PARTITIONED_SOURCE_MAX_WORKUNITS_PER_JOB);

    this.tableType = TableType.valueOf(state.getProp(ConfigurationKeys.EXTRACT_TABLE_TYPE_KEY).toUpperCase());

    this.fileCount = 0;

    this.sourceDir = new Path(state.getProp(ConfigurationKeys.SOURCE_FILEBASED_DATA_DIRECTORY));

    this.sourcePartitionPrefix = state.getProp(DATE_PARTITIONED_SOURCE_PARTITION_PREFIX, StringUtils.EMPTY);

    this.sourcePartitionSuffix = state.getProp(DATE_PARTITIONED_SOURCE_PARTITION_SUFFIX, StringUtils.EMPTY);

}

From source file:gobblin.source.DatePartitionedDailyAvroSource.java

License:Open Source License

/**
 * Gobblin calls the {@link Source#getWorkunits(SourceState)} method after creating a {@link Source} object with a
 * blank constructor, so any custom initialization of the object needs to be done here.
 *//*from  ww w  . j  ava  2  s. c o  m*/
private void init(SourceState state) {
    DateTimeZone.setDefault(DateTimeZone.forID(
            state.getProp(ConfigurationKeys.SOURCE_TIMEZONE, ConfigurationKeys.DEFAULT_SOURCE_TIMEZONE)));

    try {
        initFileSystemHelper(state);
    } catch (FileBasedHelperException e) {
        Throwables.propagate(e);
    }

    AvroFsHelper fsHelper = (AvroFsHelper) this.fsHelper;
    this.fs = fsHelper.getFileSystem();

    this.sourceState = state;

    this.lowWaterMark = getLowWaterMark(state.getPreviousWorkUnitStates(),
            state.getProp(DATE_PARTITIONED_SOURCE_MIN_WATERMARK_VALUE,
                    DAILY_FOLDER_FORMATTER.print(DEFAULT_DATE_PARTITIONED_SOURCE_MIN_WATERMARK_VALUE)));

    this.maxFilesPerJob = state.getPropAsInt(DATE_PARTITIONED_SOURCE_MAX_FILES_PER_JOB,
            DEFAULT_DATE_PARTITIONED_SOURCE_MAX_FILES_PER_JOB);

    this.maxWorkUnitsPerJob = state.getPropAsInt(DATE_PARTITIONED_SOURCE_MAX_WORKUNITS_PER_JOB,
            DEFAULT_DATE_PARTITIONED_SOURCE_MAX_WORKUNITS_PER_JOB);

    this.tableType = TableType.valueOf(state.getProp(ConfigurationKeys.EXTRACT_TABLE_TYPE_KEY).toUpperCase());

    this.fileCount = 0;

    this.sourceDir = new Path(state.getProp(ConfigurationKeys.SOURCE_FILEBASED_DATA_DIRECTORY));
}

From source file:gobblin.source.DatePartitionedNestedRetriever.java

License:Apache License

@Override
public void init(SourceState state) {
    DateTimeZone.setDefault(DateTimeZone.forID(
            state.getProp(ConfigurationKeys.SOURCE_TIMEZONE, ConfigurationKeys.DEFAULT_SOURCE_TIMEZONE)));

    initDatePartition(state);//from  w  w w  . j  a va 2 s .  c  o m
    this.sourcePartitionPrefix = state
            .getProp(PartitionedFileSourceBase.DATE_PARTITIONED_SOURCE_PARTITION_PREFIX, StringUtils.EMPTY);

    this.sourcePartitionSuffix = state
            .getProp(PartitionedFileSourceBase.DATE_PARTITIONED_SOURCE_PARTITION_SUFFIX, StringUtils.EMPTY);
    this.sourceDir = new Path(state.getProp(ConfigurationKeys.SOURCE_FILEBASED_DATA_DIRECTORY));
    this.helper = new HadoopFsHelper(state);
}

From source file:gobblin.source.extractor.extract.google.GoogleAnalyticsUnsampledExtractor.java

License:Apache License

/**
 * For unsampled report, it will call GA service to produce unsampled CSV report into GoogleDrive so that getExtractor will
 * use Google drive to extract record from CSV file.
 *
 * @param wuState/* ww  w.  j a v a 2  s.  co  m*/
 * @param sampleRate
 * @throws IOException
 */
public GoogleAnalyticsUnsampledExtractor(WorkUnitState wuState) throws IOException {
    this.wuState = wuState;
    this.googleAnalyticsFormatter = DateTimeFormat.forPattern(DATE_FORMAT)
            .withZone(DateTimeZone.forID(wuState.getProp(SOURCE_TIMEZONE, DEFAULT_SOURCE_TIMEZONE)));
    this.watermarkFormatter = DateTimeFormat.forPattern(WATERMARK_INPUTFORMAT)
            .withZone(DateTimeZone.forID(wuState.getProp(SOURCE_TIMEZONE, DEFAULT_SOURCE_TIMEZONE)));

    Credential credential = new GoogleCommon.CredentialBuilder(wuState.getProp(SOURCE_CONN_PRIVATE_KEY),
            wuState.getPropAsList(API_SCOPES)).fileSystemUri(wuState.getProp(PRIVATE_KEY_FILESYSTEM_URI))
                    .proxyUrl(wuState.getProp(SOURCE_CONN_USE_PROXY_URL))
                    .port(wuState.getProp(SOURCE_CONN_USE_PROXY_PORT))
                    .serviceAccountId(wuState.getProp(SOURCE_CONN_USERNAME)).build();

    this.gaService = new Analytics.Builder(credential.getTransport(), GoogleCommon.getJsonFactory(), credential)
            .setApplicationName(Preconditions.checkNotNull(wuState.getProp(APPLICATION_NAME))).build();

    Drive driveClient = new Drive.Builder(credential.getTransport(), GoogleCommon.getJsonFactory(),
            Preconditions.checkNotNull(credential, "Credential is required"))
                    .setApplicationName(Preconditions.checkNotNull(wuState.getProp(APPLICATION_NAME),
                            "ApplicationName is required"))
                    .build();

    GoogleDriveFsHelper fsHelper = closer.register(new GoogleDriveFsHelper(wuState, driveClient));

    UnsampledReport request = new UnsampledReport()
            .setAccountId(Preconditions.checkNotNull(wuState.getProp(ACCOUNT_ID), ACCOUNT_ID + " is required"))
            .setWebPropertyId(Preconditions.checkNotNull(wuState.getProp(WEB_PROPERTY_ID),
                    WEB_PROPERTY_ID + " is required"))
            .setProfileId(Preconditions.checkNotNull(wuState.getProp(VIEW_ID), VIEW_ID + " is required"))
            .setTitle(
                    Preconditions.checkNotNull(wuState.getProp(SOURCE_ENTITY), SOURCE_ENTITY + " is required."))
            .setStartDate(convertFormat(wuState.getWorkunit().getLowWatermark(LongWatermark.class).getValue()))
            .setEndDate(convertFormat(
                    wuState.getWorkunit().getExpectedHighWatermark(LongWatermark.class).getValue()))
            .setMetrics(Preconditions.checkNotNull(wuState.getProp(METRICS), METRICS + " is required."))
            .setDimensions(wuState.getProp(DIMENSIONS)) //Optional
            .setSegment(wuState.getProp(SEGMENTS)) //Optional
            .setFilters(wuState.getProp(FILTERS)); //Optional

    UnsampledReport createdReport = prepareUnsampledReport(request, fsHelper,
            wuState.getPropAsBoolean(DELETE_TEMP_UNSAMPLED_REPORT, true));

    DateTime nextWatermarkDateTime = googleAnalyticsFormatter.parseDateTime(createdReport.getEndDate())
            .plusDays(1);
    nextWatermark = Long.parseLong(watermarkFormatter.print(nextWatermarkDateTime));

    this.actualExtractor = closer.register(new GoogleDriveExtractor<S, D>(copyOf(wuState), fsHelper));
}

From source file:gobblin.source.extractor.extract.google.GoogleAnalyticsUnsampledExtractor.java

License:Apache License

@VisibleForTesting
GoogleAnalyticsUnsampledExtractor(WorkUnitState state, Extractor<S, D> actualExtractor, Analytics gaService)
        throws IOException {
    this.wuState = state;
    this.googleAnalyticsFormatter = DateTimeFormat.forPattern(DATE_FORMAT)
            .withZone(DateTimeZone.forID(state.getProp(SOURCE_TIMEZONE, DEFAULT_SOURCE_TIMEZONE)));
    this.watermarkFormatter = DateTimeFormat.forPattern(WATERMARK_INPUTFORMAT)
            .withZone(DateTimeZone.forID(state.getProp(SOURCE_TIMEZONE, DEFAULT_SOURCE_TIMEZONE)));
    this.actualExtractor = actualExtractor;
    this.gaService = gaService;
    this.nextWatermark = -1;
}

From source file:gobblin.source.extractor.utils.Utils.java

License:Apache License

/**
 * Get time zone of time zone id// ww  w  . ja  v  a  2  s.c  o  m
 * @param id timezone id
 * @return timezone
 */
private static DateTimeZone getTimeZone(String id) {
    DateTimeZone zone;
    try {
        zone = DateTimeZone.forID(id);
    } catch (IllegalArgumentException e) {
        throw new IllegalArgumentException("TimeZone " + id + " not recognized");
    }
    return zone;
}

From source file:gobblin.writer.AvroHdfsTimePartitionedWriter.java

License:Open Source License

public AvroHdfsTimePartitionedWriter(Destination destination, String writerId, Schema schema,
        WriterOutputFormat writerOutputFormat, int numBranches, int branch) {

    // Confirm that all input parameters are not null
    Preconditions.checkNotNull(destination);
    Preconditions.checkNotNull(writerId);
    Preconditions.checkNotNull(schema);//from   www.j  a va2 s .c  o m
    Preconditions.checkNotNull(writerOutputFormat);
    Preconditions.checkNotNull(numBranches);
    Preconditions.checkNotNull(branch);
    Preconditions.checkNotNull(destination.getProperties());

    this.destination = destination;
    this.writerId = writerId;
    this.schema = schema;
    this.writerOutputFormat = writerOutputFormat;
    this.numBranches = numBranches;
    this.branch = branch;
    this.properties = destination.getProperties();
    this.datasetName = WriterUtils.getWriterFilePath(this.properties, numBranches, branch);

    // Initialize the partitionLevel
    this.partitionLevel = this.properties.getProp(getWriterPartitionLevel(),
            ConfigurationKeys.DEFAULT_WRITER_PARTITION_LEVEL);

    // Initialize the timestampToPathFormatter
    this.timestampToPathFormatter = DateTimeFormat
            .forPattern(this.properties.getProp(getWriterPartitionPattern(),
                    ConfigurationKeys.DEFAULT_WRITER_PARTITION_PATTERN))
            .withZone(DateTimeZone.forID(this.properties.getProp(ConfigurationKeys.WRITER_PARTITION_TIMEZONE,
                    ConfigurationKeys.DEFAULT_WRITER_PARTITION_TIMEZONE)));

    this.partitionColumnName = Optional.fromNullable(this.properties.getProp(getWriterPartitionColumnName()));
}

From source file:gobblin.writer.AvroToParquetHdfsTimePartitionedWriter.java

License:Open Source License

public AvroToParquetHdfsTimePartitionedWriter(Destination destination, String writerId, Schema schema,
        WriterOutputFormat writerOutputFormat, int numBranches, int branch) {

    // Confirm that all input parameters are not null
    Preconditions.checkNotNull(destination);
    Preconditions.checkNotNull(writerId);
    Preconditions.checkNotNull(schema);//  w ww . j  av a 2s  .  c  o m
    Preconditions.checkNotNull(writerOutputFormat);
    Preconditions.checkNotNull(numBranches);
    Preconditions.checkNotNull(branch);
    Preconditions.checkNotNull(destination.getProperties());

    this.destination = destination;
    this.writerId = writerId;
    this.schema = schema;
    this.writerOutputFormat = writerOutputFormat;
    this.numBranches = numBranches;
    this.branch = branch;
    this.properties = destination.getProperties();
    this.datasetName = WriterUtils.getWriterFilePath(this.properties, numBranches, branch);

    // Initialize the partitionLevel
    this.partitionLevel = this.properties.getProp(getWriterPartitionLevel(),
            ConfigurationKeys.DEFAULT_WRITER_PARTITION_LEVEL);

    // Initialize the timestampToPathFormatter
    this.timestampToPathFormatter = DateTimeFormat
            .forPattern(this.properties.getProp(getWriterPartitionPattern(),
                    ConfigurationKeys.DEFAULT_WRITER_PARTITION_PATTERN))
            .withZone(DateTimeZone.forID(this.properties.getProp(ConfigurationKeys.WRITER_PARTITION_TIMEZONE,
                    ConfigurationKeys.DEFAULT_WRITER_PARTITION_TIMEZONE)));

    this.partitionColumns = getWriterPartitionColumns();
}

From source file:gobblin.writer.partitioner.TimeBasedWriterPartitioner.java

License:Apache License

private static DateTimeZone getTimeZone(State state, int numBranches, int branchId) {
    String propName = ForkOperatorUtils.getPropertyNameForBranch(WRITER_PARTITION_TIMEZONE, numBranches,
            branchId);/* ww  w . j  a  va  2 s. c  o m*/
    return DateTimeZone.forID(state.getProp(propName, DEFAULT_WRITER_PARTITION_TIMEZONE));
}

From source file:gov.noaa.pfel.coastwatch.Projects.java

License:Open Source License

/** 
 * This was used by Bob to convert the source NewportCTD .csv data into .nc files
 * suitable for ERDDAP EDDTableFromNcFiles.
 * <br>Lynn made the .csv files 2009-12-31.
 *    ftp://192.168.31.10/outgoing/ldewitt/Simons/northwest/
 * <br>Source URL http://192.168.31.13/cgi-bin/ERDserver/northwest.sql.das .
 *///from   w ww  .j  a va2 s .  c  o m
public static void convertNewportCTD() throws Exception {
    String2.log("\n*** EDDTableFromNcFiles.convertNewportCTD");
    String sourceDir = "c:/data/rawSource/newportCTD2009-12/";
    String sourceCsv = "CTD_NH.csv";
    String sourceLatLon = "NH_Target_LatLong.csv";
    String destDir = "c:/u00/data/points/erdNewportCtd/";
    float mv = -9999;
    int factor = 10000;

    String dataColNames[] = String2
            .split("station_code, date,         station,      local_time,   depth_or_pressure, "
                    + "temperature,  salinity,     density,      fluorescence, project, " + "transect", ',');
    Class dataColTypes[] = { String.class, String.class, String.class, String.class, float.class, float.class,
            float.class, float.class, float.class, String.class, String.class };
    String dataUnits[] = { //date will be ...
            null, "seconds since 1970-01-01T00:00:00Z", null, null, "meters", "degree_C", "1e-3", "sigma",
            "volts", null, //1e-3 replaces PSU in CF std names 25
            null };

    Test.ensureEqual(dataColNames.length, dataColTypes.length, "dataColNames.length != dataColTypes.length");

    String latLonColNames[] = String2.split("line,        station,      latitude,    longitude,   transect",
            ',');
    //schema has double.class for lat, lon, but I think not necessary
    Class latLonColTypes[] = { String.class, String.class, float.class, float.class, String.class };

    //find timezone   America/Los_Angeles
    //String2.log(String2.toCSSVString(DateTimeZone.getAvailableIDs().toArray()));
    //Test.ensureTrue(false, "");

    //recursively delete any files in destDir 
    File2.deleteAllFiles(destDir, true, true);

    //read the data source file
    String2.log("\nreading the data source file");
    Table dataTable = new Table();
    dataTable.readASCII(sourceDir + sourceCsv, String2.readLinesFromFile(sourceDir + sourceCsv, null, 3), -1, 0,
            null, null, null, null, false); //don't simplify
    Test.ensureEqual(dataTable.nColumns(), dataColNames.length, "dataTable.nColumns() != dataColNames.length");
    String2.log("");

    //find bad rows?   there is 1, 48358, remove it
    PrimitiveArray depthPa = dataTable.getColumn(4);
    for (int row = 0; row < depthPa.size(); row++) {
        if (depthPa.getString(row).length() == 0) {
            String2.log("!!! row=" + row + " has no depth_or_pressure.  Removing it...");
            dataTable.removeRow(row);
        }
    }
    String2.log("");

    for (int col = 0; col < dataColNames.length; col++) {
        //set the column name
        dataTable.setColumnName(col, dataColNames[col]);

        //set the units 
        if (dataUnits[col] != null)
            dataTable.columnAttributes(col).set("units", dataUnits[col]);

        //change the columnType
        if (dataColTypes[col] != String.class) {
            PrimitiveArray pa = PrimitiveArray.factory(dataColTypes[col], 1, false);
            pa.append(dataTable.getColumn(col));
            dataTable.setColumn(col, pa);
        }

        //set data mv to mv (-9999)
        if (col >= 4 && col <= 8) {
            PrimitiveArray pa = dataTable.getColumn(col);
            String2.log(pa.switchFromTo("", "" + mv) + //the mv is packed, too
                    " " + dataColNames[col] + " values converted from '' to " + mv);
            if (col == 8) {
                //fluorescence has mv of -999999 and ""  
                //  and bruised double values (obviously originally floats)
                String2.log(pa.switchFromTo("-999999", "" + mv)
                        + " fluorescence values converted from -999999 to " + mv);
            }
            pa.scaleAddOffset(factor, 0);
            pa = new IntArray(pa);
            dataTable.setColumn(col, pa);
            dataTable.columnAttributes(col).set("missing_value", Math2.roundToInt(mv * factor)); //missing_value is packed, too
            dataTable.columnAttributes(col).set("scale_factor", 1 / (float) factor); //float
        }

        //convert "Ship of Opportu" 
        if (col == 9) {
            PrimitiveArray pa = dataTable.getColumn(col);
            String2.log(pa.switchFromTo("Ship of Opportu", "Ship of Opportunity")
                    + " project values converted from \"Ship of Opportu\".");
        }

        //convert transect "" to "Newport Hydrographic" ???
        if (col == 10) {
            PrimitiveArray pa = dataTable.getColumn(col);
            String2.log(pa.switchFromTo("", "Newport Hydrographic")
                    + " transect values converted from \"\" to \"Newport Hydrographic\".");
        }

    }

    //sort   (so all data for a given stationCode will be stored together)
    String2.log("\nsorting\n");
    dataTable.sort(new int[] { 0, 4 }, new boolean[] { true, true }); //stationCode, depth

    //make time (Z) from date and local_time "04/20/2007 12:00:00 AM,NH125,12/30/1899 12:04:00 AM"
    StringArray datePa = (StringArray) dataTable.findColumn("date");
    StringArray localTimePa = (StringArray) dataTable.findColumn("local_time");
    DoubleArray timePa = new DoubleArray();
    DateTimeFormatter dateTimeFormatter = DateTimeFormat.forPattern("MM/dd/yyyy hh:mm:ss aa")
            .withZone(DateTimeZone.forID("America/Los_Angeles"));
    for (int row = 0; row < datePa.size(); row++) {
        String tDate = datePa.get(row);
        if (tDate.length() == 0) {
            timePa.add(Double.NaN);
            continue;
        }
        Test.ensureEqual(tDate.substring(10), " 12:00:00 AM", "Unexpected date on row=" + row);
        String tLocal = localTimePa.get(row);
        if (tLocal.length() > 0) {
            Test.ensureEqual(tLocal.substring(0, 11), "12/30/1899 ",
                    "Unexpected local_time date on row=" + row);
            tDate = tDate.substring(0, 10) + tLocal.substring(10);
        }
        //Newport, OR is same time zone as Pacific Grove. so just use default local time zone.
        double sec = Math2.roundToDouble(dateTimeFormatter.parseMillis(tDate) / 1000.0); //timeInMillis is zulu time
        if (row == 0 || row == 6053)
            String2.log("time example: row=" + row + " \"" + tDate + "\" was converted to "
                    + Calendar2.safeEpochSecondsToIsoStringTZ(sec, ""));
        timePa.add(sec);
    }
    dataTable.setColumn(1, timePa);
    dataTable.setColumnName(1, "time");
    //remove local_time
    dataTable.removeColumn("local_time");

    //read the latLon file
    String2.log("\nreading the latLon source file");
    Table latLonTable = new Table();
    latLonTable.readASCII(sourceDir + sourceLatLon, -1, 0, null, null, null, null);
    Test.ensureEqual(latLonTable.nColumns(), latLonColNames.length,
            "latLonTable.nColumns() != latLonColNames.length");
    for (int col = 0; col < latLonColNames.length; col++) {
        //set the column name
        latLonTable.setColumnName(col, latLonColNames[col]);

        //change the columnType
        if (latLonColTypes[col] != String.class) {
            PrimitiveArray pa = PrimitiveArray.factory(latLonColTypes[col], 1, false);
            pa.append(latLonTable.getColumn(col));
            latLonTable.setColumn(col, pa);
        }
    }

    //make/insert lon lat columns
    String2.log("\nmake/insert lon lat columns");
    StringArray llLinePa = (StringArray) latLonTable.findColumn("line");
    StringArray llStationPa = (StringArray) latLonTable.findColumn("station");
    PrimitiveArray lla = latLonTable.findColumn("latitude");
    lla.scaleAddOffset(factor, 0);
    IntArray llLatPa = new IntArray(lla);
    lla = latLonTable.findColumn("longitude");
    lla.scaleAddOffset(factor, 0);
    IntArray llLonPa = new IntArray(lla);

    //add some missing stations   
    //(location calculated by interpolation - Roy said number is distance in km)
    for (int i = 0; i < 4; i++) {
        llLinePa.add("NH");
        llLatPa.add(446517);
    }
    llStationPa.add("NH02");
    llLonPa.add(-1241150);
    llStationPa.add("NH12");
    llLonPa.add(-1243416);
    llStationPa.add("NH30");
    llLonPa.add(-1247667);
    llStationPa.add("NH75");
    llLonPa.add(-1258250);

    StringArray newPlainStationPa = new StringArray();
    StringArray newLinePa = new StringArray();
    StringArray oldStationPa = (StringArray) dataTable.findColumn("station");
    IntArray newLatPa = new IntArray();
    IntArray newLonPa = new IntArray();

    String oPlainStation = "";
    for (int row = 0; row < oldStationPa.size(); row++) {
        String plainStation = oldStationPa.getString(row);
        //remove suffix letter
        while (String2.isLetter(plainStation.charAt(plainStation.length() - 1)))
            plainStation = plainStation.substring(0, plainStation.length() - 1);
        newPlainStationPa.add(plainStation);
        int po = llStationPa.indexOf(plainStation);
        Test.ensureTrue(po >= 0, "plainStation=" + plainStation + " not found starting on row=" + row);
        newLinePa.add(po < 0 ? "" : llLinePa.get(po));
        newLatPa.add(po < 0 ? Math2.roundToInt(mv * factor) : llLatPa.get(po));
        newLonPa.add(po < 0 ? Math2.roundToInt(mv * factor) : llLonPa.get(po));
        oPlainStation = plainStation;
    }
    dataTable.addColumn(3, "plain_station", newPlainStationPa,
            new Attributes().add("description", "The station without the suffix."));
    dataTable.addColumn(0, "line", newLinePa, new Attributes());
    dataTable.addColumn(1, "longitude", newLonPa,
            (new Attributes()).add("units", "degrees_east").add("scale_factor", 1 / (float) factor));
    dataTable.addColumn(2, "latitude", newLatPa,
            (new Attributes()).add("units", "degrees_north").add("scale_factor", 1 / (float) factor));

    String2.log("\ncolumnNames=" + String2.toCSSVString(dataTable.getColumnNames()) + "\n");

    //save in files
    StringArray oldStationCodePa = (StringArray) dataTable.findColumn("station_code");
    String lastStationCode = oldStationCodePa.get(0);
    int startRow = 0;
    int nRows = oldStationCodePa.size();
    for (int row = 0; row < nRows; row++) {
        if (row == nRows - 1 || !oldStationCodePa.get(row).equals(lastStationCode)) {
            int lastRow = row == nRows - 1 ? row : row - 1;
            Test.ensureTrue(oldStationPa.get(row).length() > 0, "row=" + row + " station=''");
            Test.ensureTrue(oldStationCodePa.get(row).length() > 0, "row=" + row + " oldStation=''");
            String eStation = String2.encodeFileNameSafe(oldStationPa.get(row));
            String eStationCode = String2.encodeFileNameSafe(oldStationCodePa.get(row));
            String fullName = destDir + eStation + "/" + eStationCode + ".nc";
            File2.makeDirectory(destDir + eStation + "/");

            Table table = new Table();
            for (int col = 0; col < dataTable.nColumns(); col++) {
                PrimitiveArray oldPa = dataTable.getColumn(col);
                PrimitiveArray newPa = PrimitiveArray.factory(oldPa.elementClass(), lastRow - startRow + 1,
                        false);
                for (int tRow = startRow; tRow <= lastRow; tRow++)
                    newPa.addString(oldPa.getString(tRow));
                table.addColumn(col, dataTable.getColumnName(col), newPa,
                        (Attributes) (dataTable.columnAttributes(col).clone()));
            }
            table.saveAsFlatNc(fullName, "row", false);

            if (startRow < 100 || row == nRows - 1)
                String2.log(table.toCSVString());
            //if (startRow > 100) Test.ensureTrue(false, "Evaluate the tables.");

            lastStationCode = oldStationCodePa.get(row);
            startRow = lastRow + 1;
        }
    }
    String2.log("Finished!");

}