Example usage for com.google.common.math IntMath mod

List of usage examples for com.google.common.math IntMath mod

Introduction

In this page you can find the example usage for com.google.common.math IntMath mod.

Prototype

public static int mod(int x, int m) 

Source Link

Document

Returns x mod m , a non-negative value less than m .

Usage

From source file:org.terasology.commonworld.heightmap.HeightMaps.java

/**
 * @param array the underlying array//from  w w  w.  ja  v a2  s  .c  o m
 * @return a height map that is mirror along the diagonal (1, -1)
 */
public static HeightMap fromArray2D(final IntArray2D array) {
    return new HeightMapAdapter() {

        @Override
        public int apply(int x, int z) {
            int lx = IntMath.mod(x, array.getWidth());
            int lz = IntMath.mod(z, array.getHeight());
            return array.get(lx, lz);
        }
    };
}

From source file:com.jeffreybosboom.tenmillionbot.Main.java

private static Map<Move, Long> horizMatch(Tile[][] board) {
    Map<Move, Long> moves = new HashMap<>();
    for (int i = 0; i < board.length; ++i) {
        for (int j = 0; j < board[i].length - 1; ++j) {
            if (board[i][j] == null)
                continue;
            if ((j + 2) < board[i].length && board[i][j] == board[i][j + 2]) {
                for (int k = 0; k < board.length; ++k)
                    if (board[k][j + 1] == board[i][j] || board[k][j + 1] == Tile.RAINBOW)
                        moves.merge(Move.col(j + 1, IntMath.mod(i - k, board.length)),
                                1L << board[i][j].ordinal() * 8, Long::sum);
            }/*from w  w  w .  j  a va  2s  . c  o  m*/
            if (board[i][j] == board[i][j + 1]) {
                if ((j - 1) >= 0)
                    for (int k = 0; k < board.length; ++k)
                        if (board[k][j - 1] == board[i][j] || board[k][j - 1] == Tile.RAINBOW)
                            moves.merge(Move.col(j - 1, IntMath.mod(i - k, board.length)),
                                    1L << board[i][j].ordinal() * 8, Long::sum);
                if ((j + 2) < board[i].length)
                    for (int k = 0; k < board.length; ++k)
                        if (board[k][j + 2] == board[i][j] || board[k][j + 2] == Tile.RAINBOW)
                            moves.merge(Move.col(j + 2, IntMath.mod(i - k, board.length)),
                                    1L << board[i][j].ordinal() * 8, Long::sum);
            }
        }
    }
    return moves;
}

From source file:org.terasology.utilities.procedural.SubSampledNoise.java

private Rect2i determineRequiredRegion(Rect2i region) {
    int newMinX = region.minX() - IntMath.mod(region.minX(), sampleRate);
    int newMinY = region.minY() - IntMath.mod(region.minY(), sampleRate);
    int newMaxX = region.maxX() + 4 - IntMath.mod(region.maxX(), sampleRate) - 1;
    int newMaxY = region.maxY() + 4 - IntMath.mod(region.maxY(), sampleRate) - 1;
    return Rect2i.createFromMinAndMax(newMinX, newMinY, newMaxX, newMaxY);
}

From source file:com.cloudera.oryx.rdf.common.tree.DecisionForest.java

public DecisionForest(final int numTrees, double fractionOfFeaturesToTry, final int minNodeSize,
        final double minInfoGainNats, final int suggestedMaxSplitCandidates, final int maxDepth,
        final double sampleRate, final ExampleSet examples) {
    Preconditions.checkArgument(numTrees > 1);
    final int numFeatures = examples.getNumFeatures();
    Preconditions.checkArgument(fractionOfFeaturesToTry > 0.0 && fractionOfFeaturesToTry <= 1.0);
    final int featuresToTry = FastMath.max(1, (int) (fractionOfFeaturesToTry * numFeatures));
    Preconditions.checkArgument(numFeatures >= 1);
    Preconditions.checkArgument(minNodeSize >= 1);
    Preconditions.checkArgument(minInfoGainNats >= 0.0);
    Preconditions.checkArgument(suggestedMaxSplitCandidates >= 1);
    Preconditions.checkArgument(maxDepth >= 1);
    Preconditions.checkArgument(sampleRate > 0.0 && sampleRate <= 1.0);

    weights = new double[numTrees];
    Arrays.fill(weights, 1.0);/*from w  w  w.ja v a 2  s  .  co  m*/
    evaluations = new double[numTrees];
    Arrays.fill(evaluations, Double.NaN);
    final double[][] perTreeFeatureImportances = new double[numTrees][];

    // Going to set an arbitrary upper bound on the training size of about 90%
    int maxFolds = FastMath.min(numTrees - 1, (int) (0.9 * numTrees));
    // Going to set an arbitrary lower bound on the CV size of about 10%
    int minFolds = FastMath.max(1, (int) (0.1 * numTrees));
    final int folds = FastMath.min(maxFolds, FastMath.max(minFolds, (int) (sampleRate * numTrees)));

    trees = new DecisionTree[numTrees];

    ExecutorService executor = Executors.newFixedThreadPool(determineParallelism(trees.length));
    try {
        Collection<Future<Object>> futures = Lists.newArrayListWithCapacity(trees.length);
        for (int i = 0; i < numTrees; i++) {
            final int treeID = i;
            futures.add(executor.submit(new Callable<Object>() {
                @Override
                public Void call() throws Exception {
                    Collection<Example> allExamples = examples.getExamples();
                    int totalExamples = allExamples.size();
                    int expectedTrainingSize = (int) (totalExamples * sampleRate);
                    int expectedCVSize = totalExamples - expectedTrainingSize;
                    List<Example> trainingExamples = Lists.newArrayListWithExpectedSize(expectedTrainingSize);
                    List<Example> cvExamples = Lists.newArrayListWithExpectedSize(expectedCVSize);
                    for (Example example : allExamples) {
                        if (IntMath.mod(IntMath.mod(example.hashCode(), numTrees) - treeID, numTrees) < folds) {
                            trainingExamples.add(example);
                        } else {
                            cvExamples.add(example);
                        }
                    }

                    Preconditions.checkState(!trainingExamples.isEmpty(), "No training examples sampled?");
                    Preconditions.checkState(!cvExamples.isEmpty(), "No CV examples sampled?");

                    trees[treeID] = new DecisionTree(numFeatures, featuresToTry, minNodeSize, minInfoGainNats,
                            suggestedMaxSplitCandidates, maxDepth, examples.subset(trainingExamples));
                    log.info("Finished tree {}", treeID);
                    ExampleSet cvExampleSet = examples.subset(cvExamples);
                    double[] weightEval = Evaluation.evaluateToWeight(trees[treeID], cvExampleSet);
                    weights[treeID] = weightEval[0];
                    evaluations[treeID] = weightEval[1];
                    perTreeFeatureImportances[treeID] = trees[treeID].featureImportance(cvExampleSet);
                    log.info("Tree {} eval: {}", treeID, weightEval[1]);
                    return null;
                }
            }));
        }
        ExecutorUtils.checkExceptions(futures);
    } finally {
        ExecutorUtils.shutdownNowAndAwait(executor);
    }

    featureImportances = new double[numFeatures];
    for (double[] perTreeFeatureImporatance : perTreeFeatureImportances) {
        for (int i = 0; i < numFeatures; i++) {
            featureImportances[i] += perTreeFeatureImporatance[i];
        }
    }
    for (int i = 0; i < numFeatures; i++) {
        featureImportances[i] /= numTrees;
    }
}

From source file:org.terasology.utilities.procedural.SubSampledNoise3D.java

private Region3i determineRequiredRegion(Region3i region) {
    int newMinX = region.minX() - IntMath.mod(region.minX(), sampleRate);
    int newMinY = region.minY() - IntMath.mod(region.minY(), sampleRate);
    int newMinZ = region.minZ() - IntMath.mod(region.minZ(), sampleRate);
    int newMaxX = region.maxX() + 4 - IntMath.mod(region.maxX(), sampleRate) - 1;
    int newMaxY = region.maxY() + 4 - IntMath.mod(region.maxY(), sampleRate) - 1;
    int newMaxZ = region.maxZ() + 4 - IntMath.mod(region.maxZ(), sampleRate) - 1;
    return Region3i.createFromMinMax(new Vector3i(newMinX, newMinY, newMinZ),
            new Vector3i(newMaxX, newMaxY, newMaxZ));
}

From source file:org.apache.hadoop.hive.ql.parse.SemanticAnalyzer.java

protected static ASTNode rewriteGroupingFunctionAST(final List<ASTNode> grpByAstExprs, ASTNode targetNode,
        final boolean noneSet) throws SemanticException {

    TreeVisitorAction action = new TreeVisitorAction() {

        @Override//from ww w  .j  av  a  2s .c  o  m
        public Object pre(Object t) {
            return t;
        }

        @Override
        public Object post(Object t) {
            ASTNode root = (ASTNode) t;
            if (root.getType() == HiveParser.TOK_FUNCTION) {
                ASTNode func = (ASTNode) ParseDriver.adaptor.getChild(root, 0);
                if (func.getText().equals("grouping") && func.getChildCount() == 0) {
                    int numberOperands = ParseDriver.adaptor.getChildCount(root);
                    // We implement this logic using replaceChildren instead of replacing
                    // the root node itself because windowing logic stores multiple
                    // pointers to the AST, and replacing root might lead to some pointers
                    // leading to non-rewritten version
                    ASTNode newRoot = new ASTNode();
                    // Rewritten grouping function
                    ASTNode groupingFunc = (ASTNode) ParseDriver.adaptor.create(HiveParser.Identifier,
                            "grouping");
                    ParseDriver.adaptor.addChild(groupingFunc,
                            ParseDriver.adaptor.create(HiveParser.Identifier, "rewritten"));
                    newRoot.addChild(groupingFunc);
                    // Grouping ID reference
                    ASTNode childGroupingID;
                    if (noneSet) {
                        // Query does not contain CUBE, ROLLUP, or GROUPING SETS, and thus,
                        // grouping should return 0
                        childGroupingID = (ASTNode) ParseDriver.adaptor.create(HiveParser.IntegralLiteral,
                                String.valueOf(0));
                    } else {
                        // We refer to grouping_id column
                        childGroupingID = (ASTNode) ParseDriver.adaptor.create(HiveParser.TOK_TABLE_OR_COL,
                                "TOK_TABLE_OR_COL");
                        ParseDriver.adaptor.addChild(childGroupingID, ParseDriver.adaptor
                                .create(HiveParser.Identifier, VirtualColumn.GROUPINGID.getName()));
                    }
                    newRoot.addChild(childGroupingID);
                    // Indices
                    for (int i = 1; i < numberOperands; i++) {
                        ASTNode c = (ASTNode) ParseDriver.adaptor.getChild(root, i);
                        for (int j = 0; j < grpByAstExprs.size(); j++) {
                            ASTNode grpByExpr = grpByAstExprs.get(j);
                            if (grpByExpr.toStringTree().equals(c.toStringTree())) {
                                // Create and add AST node with position of grouping function input
                                // in group by clause
                                ASTNode childN = (ASTNode) ParseDriver.adaptor.create(
                                        HiveParser.IntegralLiteral,
                                        String.valueOf(IntMath.mod(-j - 1, grpByAstExprs.size())));
                                newRoot.addChild(childN);
                                break;
                            }
                        }
                    }
                    if (numberOperands + 1 != ParseDriver.adaptor.getChildCount(newRoot)) {
                        throw new RuntimeException(
                                ErrorMsg.HIVE_GROUPING_FUNCTION_EXPR_NOT_IN_GROUPBY.getMsg());
                    }
                    // Replace expression
                    root.replaceChildren(0, numberOperands - 1, newRoot);
                }
            }
            return t;
        }
    };
    return (ASTNode) new TreeVisitor(ParseDriver.adaptor).visit(targetNode, action);
}