Example usage for java.lang Integer compare

List of usage examples for java.lang Integer compare

Introduction

In this page you can find the example usage for java.lang Integer compare.

Prototype

public static int compare(int x, int y) 

Source Link

Document

Compares two int values numerically.

Usage

From source file:onl.netfishers.netshot.RestService.java

/**
 * Gets the device last 20 tasks.// www . j a va 2 s.  co  m
 *
 * @param request the request
 * @param id the id
 * @return the device tasks
 * @throws WebApplicationException the web application exception
 */
@SuppressWarnings("unchecked")
@GET
@Path("devices/{id}/tasks")
@RolesAllowed("readonly")
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
public List<Task> getDeviceTasks(@PathParam("id") Long id) throws WebApplicationException {
    logger.debug("REST request, get device {} tasks.", id);
    Session session = Database.getSession();
    try {
        final int max = 20;
        final Class<?>[] taskTypes = new Class<?>[] { CheckComplianceTask.class, DiscoverDeviceTypeTask.class,
                TakeSnapshotTask.class, RunDeviceScriptTask.class };
        final Criterion[] restrictions = new Criterion[] { Restrictions.eq("t.device.id", id),
                Restrictions.eq("t.deviceId", id), Restrictions.eq("t.device.id", id),
                Restrictions.eq("t.device.id", id) };
        List<Task> tasks = new ArrayList<Task>();
        for (int i = 0; i < taskTypes.length; i++) {
            List<Task> typeTasks = session.createCriteria(taskTypes[i], "t").add(restrictions[i]).list();
            tasks.addAll(typeTasks);
        }
        Collections.sort(tasks, new Comparator<Task>() {
            private int getPriority(Task.Status status) {
                switch (status) {
                case RUNNING:
                    return 1;
                case WAITING:
                    return 2;
                case SCHEDULED:
                    return 3;
                case NEW:
                    return 4;
                default:
                    return 10;
                }
            }

            private Date getSignificantDate(Task t) {
                if (t.getExecutionDate() == null) {
                    return t.getChangeDate();
                } else {
                    return t.getExecutionDate();
                }
            }

            @Override
            public int compare(Task o1, Task o2) {
                int statusDiff = Integer.compare(this.getPriority(o1.getStatus()),
                        this.getPriority(o2.getStatus()));
                if (statusDiff == 0) {
                    Date d1 = this.getSignificantDate(o1);
                    Date d2 = this.getSignificantDate(o2);
                    if (d1 == null) {
                        if (d2 == null) {
                            return 0;
                        } else {
                            return -1;
                        }
                    } else {
                        if (d2 == null) {
                            return 1;
                        } else {
                            return d2.compareTo(d1);
                        }
                    }
                }
                return statusDiff;
            }
        });
        return tasks.subList(0, (max > tasks.size() ? tasks.size() : max));
    } catch (Exception e) {
        logger.error("Unable to fetch the tasks.", e);
        throw new NetshotBadRequestException("Unable to fetch the tasks",
                NetshotBadRequestException.NETSHOT_DATABASE_ACCESS_ERROR);
    } finally {
        session.close();
    }
}

From source file:org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer.java

private static int[] getPartitionsByTopic(String topic, Producer<byte[], byte[]> producer) {
    // the fetched list is immutable, so we're creating a mutable copy in order to sort it
    List<PartitionInfo> partitionsList = new ArrayList<>(producer.partitionsFor(topic));

    // sort the partitions by partition id to make sure the fetched partition list is the same across subtasks
    Collections.sort(partitionsList, new Comparator<PartitionInfo>() {
        @Override/*from w  w  w. ja va 2  s .  c  om*/
        public int compare(PartitionInfo o1, PartitionInfo o2) {
            return Integer.compare(o1.partition(), o2.partition());
        }
    });

    int[] partitions = new int[partitionsList.size()];
    for (int i = 0; i < partitions.length; i++) {
        partitions[i] = partitionsList.get(i).partition();
    }

    return partitions;
}

From source file:org.ejbca.ui.web.admin.configuration.SystemConfigMBean.java

private ArrayList<CustomCertExtensionInfo> getNewAvailableCustomCertExtensions() {
    availableCustomCertExtensionsConfig = getEjbcaWebBean().getAvailableCustomCertExtensionsConfiguration();
    ArrayList<CustomCertExtensionInfo> extensionsInfo = new ArrayList<CustomCertExtensionInfo>();
    Collection<CertificateExtension> allExtensions = availableCustomCertExtensionsConfig
            .getAllAvailableCustomCertificateExtensions();
    for (CertificateExtension extension : allExtensions) {
        extensionsInfo.add(new CustomCertExtensionInfo(extension));
    }/*from  w w w .  j a  v a 2  s  .  c  o m*/

    Collections.sort(extensionsInfo, new Comparator<CustomCertExtensionInfo>() {
        @Override
        public int compare(CustomCertExtensionInfo first, CustomCertExtensionInfo second) {
            return Integer.compare(first.getId(), second.getId());
        }
    });

    return extensionsInfo;
}

From source file:org.apache.nifi.provenance.MiNiFiPersistentProvenanceRepository.java

/**
 * <p>//from  w w  w.j  a  v a 2s .  c o  m
 * Merges all of the given Journal Files into a single, merged Provenance Event Log File. As these records are merged, they will be compressed, if the repository is configured to compress records
 * </p>
 * <p>
 * <p>
 * If the repository is configured to compress the data, the file written to may not be the same as the <code>suggestedMergeFile</code>, as a filename extension of '.gz' may be appended. If the
 * journals are successfully merged, the file that they were merged into will be returned. If unable to merge the records (for instance, because the repository has been closed or because the list
 * of journal files was empty), this method will return <code>null</code>.
 * </p>
 *
 * @param journalFiles       the journal files to merge
 * @param suggestedMergeFile the file to write the merged records to
 * @param eventReporter      the event reporter to report any warnings or errors to; may be null.
 * @return the file that the given journals were merged into, or <code>null</code> if no records were merged.
 * @throws IOException if a problem occurs writing to the mergedFile, reading from a journal
 */
File mergeJournals(final List<File> journalFiles, final File suggestedMergeFile,
        final EventReporter eventReporter) throws IOException {
    if (this.closed.get()) {
        logger.info("Provenance Repository has been closed; will not merge journal files to {}",
                suggestedMergeFile);
        return null;
    }

    if (journalFiles.isEmpty()) {
        logger.debug("Couldn't merge journals: Journal Files is empty; won't merge journals");
        return null;
    }

    Collections.sort(journalFiles, new Comparator<File>() {
        @Override
        public int compare(final File o1, final File o2) {
            final String suffix1 = StringUtils.substringAfterLast(o1.getName(), ".");
            final String suffix2 = StringUtils.substringAfterLast(o2.getName(), ".");

            try {
                final int journalIndex1 = Integer.parseInt(suffix1);
                final int journalIndex2 = Integer.parseInt(suffix2);
                return Integer.compare(journalIndex1, journalIndex2);
            } catch (final NumberFormatException nfe) {
                return o1.getName().compareTo(o2.getName());
            }
        }
    });

    final String firstJournalFile = journalFiles.get(0).getName();
    final String firstFileSuffix = StringUtils.substringAfterLast(firstJournalFile, ".");
    final boolean allPartialFiles = firstFileSuffix.equals("0");

    // check if we have all of the "partial" files for the journal.
    if (allPartialFiles) {
        if (suggestedMergeFile.exists()) {
            // we have all "partial" files and there is already a merged file. Delete the data from the index
            // because the merge file may not be fully merged. We will re-merge.
            logger.warn("Merged Journal File {} already exists; however, all partial journal files also exist "
                    + "so assuming that the merge did not finish. Repeating procedure in order to ensure consistency.");

            // Since we only store the file's basename, block offset, and event ID, and because the newly created file could end up on
            // a different Storage Directory than the original, we need to ensure that we delete both the partially merged
            // file and the TOC file. Otherwise, we could get the wrong copy and have issues retrieving events.
            if (!suggestedMergeFile.delete()) {
                logger.error(
                        "Failed to delete partially written Provenance Journal File {}. This may result in events from this journal "
                                + "file not being able to be displayed. This file should be deleted manually.",
                        suggestedMergeFile);
            }

            final File tocFile = TocUtil.getTocFile(suggestedMergeFile);
            if (tocFile.exists() && !tocFile.delete()) {
                logger.error(
                        "Failed to delete .toc file {}; this may result in not being able to read the Provenance Events from the {} Journal File. "
                                + "This can be corrected by manually deleting the {} file",
                        tocFile, suggestedMergeFile, tocFile);
            }
        }
    } else {
        logger.warn("Cannot merge journal files {} because expected first file to end with extension '.0' "
                + "but it did not; assuming that the files were already merged but only some finished deletion "
                + "before restart. Deleting remaining partial journal files.", journalFiles);

        for (final File file : journalFiles) {
            if (!file.delete() && file.exists()) {
                logger.warn(
                        "Failed to delete unneeded journal file {}; this file should be cleaned up manually",
                        file);
            }
        }

        return null;
    }

    final long startNanos = System.nanoTime();

    // Map each journal to a RecordReader
    final List<RecordReader> readers = new ArrayList<>();
    int records = 0;

    final boolean isCompress = configuration.isCompressOnRollover();
    final File writerFile = isCompress
            ? new File(suggestedMergeFile.getParentFile(), suggestedMergeFile.getName() + ".gz")
            : suggestedMergeFile;

    try {
        for (final File journalFile : journalFiles) {
            try {
                // Use MAX_VALUE for number of chars because we don't want to truncate the value as we write it
                // out. This allows us to later decide that we want more characters and still be able to retrieve
                // the entire event.
                readers.add(RecordReaders.newRecordReader(journalFile, null, Integer.MAX_VALUE));
            } catch (final EOFException eof) {
                // there's nothing here. Skip over it.
            } catch (final IOException ioe) {
                logger.warn("Unable to merge {} with other Journal Files due to {}", journalFile,
                        ioe.toString());
                if (logger.isDebugEnabled()) {
                    logger.warn("", ioe);
                }

                if (eventReporter != null) {
                    eventReporter.reportEvent(Severity.ERROR, EVENT_CATEGORY, "re " + ioe.toString());
                }
            }
        }

        // Create a Map so that the key is the next record available from a reader and the value is the Reader from which
        // the record came. This sorted map is then used so that we are able to always get the first entry, which is the next
        // lowest record id
        final SortedMap<StandardProvenanceEventRecord, RecordReader> recordToReaderMap = new TreeMap<>(
                new Comparator<StandardProvenanceEventRecord>() {
                    @Override
                    public int compare(final StandardProvenanceEventRecord o1,
                            final StandardProvenanceEventRecord o2) {
                        return Long.compare(o1.getEventId(), o2.getEventId());
                    }
                });

        long minEventId = 0L;
        long earliestTimestamp = System.currentTimeMillis();
        for (final RecordReader reader : readers) {
            StandardProvenanceEventRecord record = null;

            try {
                record = reader.nextRecord();
            } catch (final EOFException eof) {
            } catch (final Exception e) {
                logger.warn("Failed to generate Provenance Event Record from Journal due to " + e
                        + "; it's possible that the record wasn't "
                        + "completely written to the file. This record will be skipped.");
                if (logger.isDebugEnabled()) {
                    logger.warn("", e);
                }

                if (eventReporter != null) {
                    eventReporter.reportEvent(Severity.WARNING, EVENT_CATEGORY,
                            "Failed to read Provenance Event Record from Journal due to " + e
                                    + "; it's possible that hte record wasn't completely written to the file. This record will be skipped.");
                }
            }

            if (record == null) {
                continue;
            }

            if (record.getEventTime() < earliestTimestamp) {
                earliestTimestamp = record.getEventTime();
            }

            if (record.getEventId() < minEventId) {
                minEventId = record.getEventId();
            }

            recordToReaderMap.put(record, reader);
        }

        // loop over each entry in the map, persisting the records to the merged file in order, and populating the map
        // with the next entry from the journal file from which the previous record was written.
        try (final RecordWriter writer = RecordWriters.newSchemaRecordWriter(writerFile,
                configuration.isCompressOnRollover(), true)) {
            writer.writeHeader(minEventId);

            while (!recordToReaderMap.isEmpty()) {
                final Map.Entry<StandardProvenanceEventRecord, RecordReader> entry = recordToReaderMap
                        .entrySet().iterator().next();
                final StandardProvenanceEventRecord record = entry.getKey();
                final RecordReader reader = entry.getValue();

                writer.writeRecord(record, record.getEventId());
                final int blockIndex = writer.getTocWriter().getCurrentBlockIndex();

                records++;

                // Remove this entry from the map
                recordToReaderMap.remove(record);

                // Get the next entry from this reader and add it to the map
                StandardProvenanceEventRecord nextRecord = null;

                try {
                    nextRecord = reader.nextRecord();
                } catch (final EOFException eof) {
                }

                if (nextRecord != null) {
                    recordToReaderMap.put(nextRecord, reader);
                }
            }
        }
    } finally {
        for (final RecordReader reader : readers) {
            try {
                reader.close();
            } catch (final IOException ioe) {
            }
        }
    }

    // Success. Remove all of the journal files, as they're no longer needed, now that they've been merged.
    for (final File journalFile : journalFiles) {
        if (!journalFile.delete() && journalFile.exists()) {
            logger.warn("Failed to remove temporary journal file {}; this file should be cleaned up manually",
                    journalFile.getAbsolutePath());

            if (eventReporter != null) {
                eventReporter.reportEvent(Severity.WARNING, EVENT_CATEGORY,
                        "Failed to remove temporary journal file " + journalFile.getAbsolutePath()
                                + "; this file should be cleaned up manually");
            }
        }

        final File tocFile = getTocFile(journalFile);
        if (!tocFile.delete() && tocFile.exists()) {
            logger.warn(
                    "Failed to remove temporary journal TOC file {}; this file should be cleaned up manually",
                    tocFile.getAbsolutePath());

            if (eventReporter != null) {
                eventReporter.reportEvent(Severity.WARNING, EVENT_CATEGORY,
                        "Failed to remove temporary journal TOC file " + tocFile.getAbsolutePath()
                                + "; this file should be cleaned up manually");
            }
        }
    }

    if (records == 0) {
        writerFile.delete();
        logger.debug("Couldn't merge journals: No Records to merge");
        return null;
    } else {
        final long nanos = System.nanoTime() - startNanos;
        final long millis = TimeUnit.MILLISECONDS.convert(nanos, TimeUnit.NANOSECONDS);
        logger.info(
                "Successfully merged {} journal files ({} records) into single Provenance Log File {} in {} milliseconds",
                journalFiles.size(), records, suggestedMergeFile, millis);
    }

    return writerFile;
}

From source file:org.apache.giraph.master.BspServiceMaster.java

@Override
public SuperstepState coordinateSuperstep() throws KeeperException, InterruptedException {
    // 1. Get chosen workers and set up watches on them.
    // 2. Assign partitions to the workers
    //    (possibly reloading from a superstep)
    // 3. Wait for all workers to complete
    // 4. Collect and process aggregators
    // 5. Create superstep finished node
    // 6. If the checkpoint frequency is met, finalize the checkpoint

    for (MasterObserver observer : observers) {
        observer.preSuperstep(getSuperstep());
        getContext().progress();/*from  w w  w  .  j a  v  a2s .  c  om*/
    }

    chosenWorkerInfoList = checkWorkers();
    if (chosenWorkerInfoList == null) {
        setJobStateFailed(
                "coordinateSuperstep: Not enough healthy workers for " + "superstep " + getSuperstep());
    } else {
        // Sort this list, so order stays the same over supersteps
        Collections.sort(chosenWorkerInfoList, new Comparator<WorkerInfo>() {
            @Override
            public int compare(WorkerInfo wi1, WorkerInfo wi2) {
                return Integer.compare(wi1.getTaskId(), wi2.getTaskId());
            }
        });
        for (WorkerInfo workerInfo : chosenWorkerInfoList) {
            String workerInfoHealthyPath = getWorkerInfoHealthyPath(getApplicationAttempt(), getSuperstep())
                    + "/" + workerInfo.getHostnameId();
            if (getZkExt().exists(workerInfoHealthyPath, true) == null) {
                LOG.warn("coordinateSuperstep: Chosen worker " + workerInfoHealthyPath
                        + " is no longer valid, failing superstep");
            }
        }
    }

    // We need to finalize aggregators from previous superstep
    if (getSuperstep() >= 0) {
        aggregatorTranslation.postMasterCompute();
        globalCommHandler.getAggregatorHandler().finishSuperstep();
    }

    masterClient.openConnections();

    GiraphStats.getInstance().getCurrentWorkers().setValue(chosenWorkerInfoList.size());
    assignPartitionOwners();

    // Finalize the valid checkpoint file prefixes and possibly
    // the aggregators.
    if (checkpointStatus != CheckpointStatus.NONE) {
        String workerWroteCheckpointPath = getWorkerWroteCheckpointPath(getApplicationAttempt(),
                getSuperstep());
        // first wait for all the workers to write their checkpoint data
        if (!barrierOnWorkerList(workerWroteCheckpointPath, chosenWorkerInfoList,
                getWorkerWroteCheckpointEvent(), checkpointStatus == CheckpointStatus.CHECKPOINT_AND_HALT)) {
            return SuperstepState.WORKER_FAILURE;
        }
        try {
            finalizeCheckpoint(getSuperstep(), chosenWorkerInfoList);
        } catch (IOException e) {
            throw new IllegalStateException("coordinateSuperstep: IOException on finalizing checkpoint", e);
        }
        if (checkpointStatus == CheckpointStatus.CHECKPOINT_AND_HALT) {
            return SuperstepState.CHECKPOINT_AND_HALT;
        }
    }

    // We need to send aggregators to worker owners after new worker assignments
    if (getSuperstep() >= 0) {
        globalCommHandler.getAggregatorHandler().sendDataToOwners(masterClient);
    }

    if (getSuperstep() == INPUT_SUPERSTEP) {
        // Initialize aggregators before coordinating
        initializeAggregatorInputSuperstep();
        coordinateInputSplits();
    }

    String finishedWorkerPath = getWorkerFinishedPath(getApplicationAttempt(), getSuperstep());
    if (!barrierOnWorkerList(finishedWorkerPath, chosenWorkerInfoList, getSuperstepStateChangedEvent(),
            false)) {
        return SuperstepState.WORKER_FAILURE;
    }

    // Collect aggregator values, then run the master.compute() and
    // finally save the aggregator values
    globalCommHandler.getAggregatorHandler().prepareSuperstep();
    aggregatorTranslation.prepareSuperstep();

    SuperstepClasses superstepClasses = prepareMasterCompute(getSuperstep() + 1);
    doMasterCompute();

    // If the master is halted or all the vertices voted to halt and there
    // are no more messages in the system, stop the computation
    GlobalStats globalStats = aggregateWorkerStats(getSuperstep());
    if (masterCompute.isHalted() || (globalStats.getFinishedVertexCount() == globalStats.getVertexCount()
            && globalStats.getMessageCount() == 0)) {
        globalStats.setHaltComputation(true);
    } else if (getZkExt().exists(haltComputationPath, false) != null) {
        if (LOG.isInfoEnabled()) {
            LOG.info("Halting computation because halt zookeeper node was created");
        }
        globalStats.setHaltComputation(true);
    }

    // If we have completed the maximum number of supersteps, stop
    // the computation
    if (maxNumberOfSupersteps != GiraphConstants.MAX_NUMBER_OF_SUPERSTEPS.getDefaultValue()
            && (getSuperstep() == maxNumberOfSupersteps - 1)) {
        if (LOG.isInfoEnabled()) {
            LOG.info("coordinateSuperstep: Finished " + maxNumberOfSupersteps
                    + " supersteps (max specified by the user), halting");
        }
        globalStats.setHaltComputation(true);
    }

    // Superstep 0 doesn't need to have matching types (Message types may not
    // match) and if the computation is halted, no need to check any of
    // the types.
    if (!globalStats.getHaltComputation()) {
        superstepClasses.verifyTypesMatch(getSuperstep() > 0);
    }
    getConfiguration().updateSuperstepClasses(superstepClasses);

    //Signal workers that we want to checkpoint
    checkpointStatus = getCheckpointStatus(getSuperstep() + 1);
    globalStats.setCheckpointStatus(checkpointStatus);
    // Let everyone know the aggregated application state through the
    // superstep finishing znode.
    String superstepFinishedNode = getSuperstepFinishedPath(getApplicationAttempt(), getSuperstep());

    WritableUtils.writeToZnode(getZkExt(), superstepFinishedNode, -1, globalStats, superstepClasses);
    updateCounters(globalStats);

    cleanUpOldSuperstep(getSuperstep() - 1);
    incrCachedSuperstep();
    // Counter starts at zero, so no need to increment
    if (getSuperstep() > 0) {
        GiraphStats.getInstance().getSuperstepCounter().increment();
    }
    SuperstepState superstepState;
    if (globalStats.getHaltComputation()) {
        superstepState = SuperstepState.ALL_SUPERSTEPS_DONE;
    } else {
        superstepState = SuperstepState.THIS_SUPERSTEP_DONE;
    }
    globalCommHandler.getAggregatorHandler().writeAggregators(getSuperstep(), superstepState);

    return superstepState;
}

From source file:org.openecomp.sdc.be.components.impl.GroupBusinessLogic.java

public int getNextVfModuleNameCounter(Collection<GroupDefinition> groups) {
    int counter = 0;
    if (groups != null && !groups.isEmpty()) {
        List<Integer> counters = groups.stream().filter(group -> Pattern
                .compile(Constants.MODULE_NEW_NAME_PATTERN).matcher(group.getName()).matches()
                || Pattern.compile(Constants.MODULE_OLD_NAME_PATTERN).matcher(group.getName()).matches())
                .map(group -> Integer.parseInt(group.getName().split(Constants.MODULE_NAME_DELIMITER)[1]))
                .collect(Collectors.toList());
        counter = (counters == null || counters.isEmpty()) ? 0
                : counters.stream().max((a, b) -> Integer.compare(a, b)).get() + 1;
    }//from  w  ww .  j av  a2  s . c  o m
    return counter;
}

From source file:org.apache.carbondata.core.scan.filter.FilterUtil.java

public static int compareFilterKeyBasedOnDataType(String dictionaryVal, String memberVal, DataType dataType) {
    try {//  w  w  w  .j  a  v a  2 s.  c o  m
        if (dataType == DataTypes.BOOLEAN) {
            return Boolean.compare((Boolean.parseBoolean(dictionaryVal)), (Boolean.parseBoolean(memberVal)));
        } else if (dataType == DataTypes.SHORT) {
            return Short.compare((Short.parseShort(dictionaryVal)), (Short.parseShort(memberVal)));
        } else if (dataType == DataTypes.INT) {
            return Integer.compare((Integer.parseInt(dictionaryVal)), (Integer.parseInt(memberVal)));
        } else if (dataType == DataTypes.DOUBLE) {
            return Double.compare((Double.parseDouble(dictionaryVal)), (Double.parseDouble(memberVal)));
        } else if (dataType == DataTypes.LONG) {
            return Long.compare((Long.parseLong(dictionaryVal)), (Long.parseLong(memberVal)));
        } else if (dataType == DataTypes.BOOLEAN) {
            return Boolean.compare((Boolean.parseBoolean(dictionaryVal)), (Boolean.parseBoolean(memberVal)));
        } else if (dataType == DataTypes.DATE || dataType == DataTypes.TIMESTAMP) {
            String format = CarbonUtil.getFormatFromProperty(dataType);
            SimpleDateFormat parser = new SimpleDateFormat(format);
            Date dateToStr;
            Date dictionaryDate;
            dateToStr = parser.parse(memberVal);
            dictionaryDate = parser.parse(dictionaryVal);
            return dictionaryDate.compareTo(dateToStr);
        } else if (DataTypes.isDecimal(dataType)) {
            java.math.BigDecimal javaDecValForDictVal = new java.math.BigDecimal(dictionaryVal);
            java.math.BigDecimal javaDecValForMemberVal = new java.math.BigDecimal(memberVal);
            return javaDecValForDictVal.compareTo(javaDecValForMemberVal);
        } else {
            return -1;
        }
    } catch (ParseException | NumberFormatException e) {
        return -1;
    }
}

From source file:org.ballerinalang.composer.service.workspace.rest.datamodel.BLangJSONModelBuilder.java

private int compareNodeLocations(NodeLocation l1, NodeLocation l2) {
    if (l1.getLineNumber() == l2.getLineNumber()) {
        return Integer.compare(l1.startColumn, l2.startColumn);
    } else {//from  ww w .  j a v a2 s. co  m
        return Integer.compare(l1.getLineNumber(), l2.getLineNumber());
    }
}

From source file:forge.game.card.Card.java

/** {@inheritDoc} */
@Override//  w w  w  . ja  v a 2s .com
public final int compareTo(final Card that) {
    if (that == null) {
        /*
         * "Here we can arbitrarily decide that all non-null Cards are
         * `greater than' null Cards. It doesn't really matter what we
         * return in this case, as long as it is consistent. I rather think
         * of null as being lowly." --Braids
         */
        return 1;
    }
    return Integer.compare(id, that.id);
}