List of usage examples for org.opencv.core Point Point
public Point(double x, double y)
From source file:org.ar.rubik.Annotation.java
License:Open Source License
/** * Draw User Instructions// www. ja v a 2 s . co m * * @param image */ public void drawUserInstructions(Mat image) { // Create black area for text if (MenuAndParams.userTextDisplay == true) Core.rectangle(image, new Point(0, 0), new Point(1270, 60), ColorTileEnum.BLACK.cvColor, -1); switch (stateModel.appState) { case START: if (MenuAndParams.userTextDisplay == true) Core.putText(image, "Show Me The Rubik Cube", new Point(0, 60), Constants.FontFace, 5, ColorTileEnum.WHITE.cvColor, 5); break; case GOT_IT: if (MenuAndParams.userTextDisplay == true) Core.putText(image, "OK, Got It", new Point(0, 60), Constants.FontFace, 5, ColorTileEnum.WHITE.cvColor, 5); break; case ROTATE_CUBE: if (MenuAndParams.userTextDisplay == true) Core.putText(image, "Please Rotate: " + stateModel.getNumObservedFaces(), new Point(0, 60), Constants.FontFace, 5, ColorTileEnum.WHITE.cvColor, 5); break; case SEARCHING: if (MenuAndParams.userTextDisplay == true) Core.putText(image, "Searching for Another Face", new Point(0, 60), Constants.FontFace, 5, ColorTileEnum.WHITE.cvColor, 5); break; case COMPLETE: if (MenuAndParams.userTextDisplay == true) Core.putText(image, "Cube is Complete and has Good Colors", new Point(0, 60), Constants.FontFace, 4, ColorTileEnum.WHITE.cvColor, 4); break; case WAIT_TABLES: if (MenuAndParams.userTextDisplay == true) Core.putText(image, "Waiting - Preload Next: " + appStateMachine.pruneTableLoaderCount, new Point(0, 60), Constants.FontFace, 5, ColorTileEnum.WHITE.cvColor, 5); break; case BAD_COLORS: Core.putText(image, "Cube is Complete but has Bad Colors", new Point(0, 60), Constants.FontFace, 4, ColorTileEnum.WHITE.cvColor, 4); break; case VERIFIED: if (MenuAndParams.userTextDisplay == true) Core.putText(image, "Cube is Complete and Verified", new Point(0, 60), Constants.FontFace, 4, ColorTileEnum.WHITE.cvColor, 4); break; case INCORRECT: Core.putText(image, "Cube is Complete but Incorrect: " + stateModel.verificationResults, new Point(0, 60), Constants.FontFace, 4, ColorTileEnum.WHITE.cvColor, 4); break; case ERROR: Core.putText(image, "Cube Solution Error: " + stateModel.verificationResults, new Point(0, 60), Constants.FontFace, 4, ColorTileEnum.WHITE.cvColor, 4); break; case SOLVED: if (MenuAndParams.userTextDisplay == true) { Core.putText(image, "SOLUTION: ", new Point(0, 60), Constants.FontFace, 4, ColorTileEnum.WHITE.cvColor, 4); Core.rectangle(image, new Point(0, 60), new Point(1270, 120), ColorTileEnum.BLACK.cvColor, -1); Core.putText(image, "" + stateModel.solutionResults, new Point(0, 120), Constants.FontFace, 2, ColorTileEnum.WHITE.cvColor, 2); } break; case ROTATE_FACE: String moveNumonic = stateModel.solutionResultsArray[stateModel.solutionResultIndex]; Log.d(Constants.TAG, "Move:" + moveNumonic + ":"); StringBuffer moveDescription = new StringBuffer("Rotate "); switch (moveNumonic.charAt(0)) { case 'U': moveDescription.append("Top Face"); break; case 'D': moveDescription.append("Down Face"); break; case 'L': moveDescription.append("Left Face"); break; case 'R': moveDescription.append("Right Face"); break; case 'F': moveDescription.append("Front Face"); break; case 'B': moveDescription.append("Back Face"); break; } if (moveNumonic.length() == 1) moveDescription.append(" Clockwise"); else if (moveNumonic.charAt(1) == '2') moveDescription.append(" 180 Degrees"); else if (moveNumonic.charAt(1) == '\'') moveDescription.append(" Counter Clockwise"); else moveDescription.append("?"); if (MenuAndParams.userTextDisplay == true) Core.putText(image, moveDescription.toString(), new Point(0, 60), Constants.FontFace, 4, ColorTileEnum.WHITE.cvColor, 4); break; case WAITING_MOVE: if (MenuAndParams.userTextDisplay == true) Core.putText(image, "Waiting for move to be completed", new Point(0, 60), Constants.FontFace, 4, ColorTileEnum.WHITE.cvColor, 4); break; case DONE: if (MenuAndParams.userTextDisplay == true) Core.putText(image, "Cube should now be solved.", new Point(0, 60), Constants.FontFace, 4, ColorTileEnum.WHITE.cvColor, 4); break; default: if (MenuAndParams.userTextDisplay == true) Core.putText(image, "Oops", new Point(0, 60), Constants.FontFace, 5, ColorTileEnum.WHITE.cvColor, 5); break; } // User indicator that tables have been computed. Core.line(image, new Point(0, 0), new Point(1270, 0), appStateMachine.pruneTableLoaderCount < 12 ? ColorTileEnum.RED.cvColor : ColorTileEnum.GREEN.cvColor, 4); }
From source file:org.ar.rubik.CubePoseEstimator.java
License:Open Source License
/** * Pose Estimation/*from w w w. j a va2 s . co m*/ * * Deduce real world cube coordinates and rotation * * @param rubikFace * @param image * @param stateModel * @return */ public static CubePose poseEstimation(RubikFace rubikFace, Mat image, StateModel stateModel) { if (rubikFace == null) return null; if (rubikFace.faceRecognitionStatus != FaceRecognitionStatusEnum.SOLVED) return null; LeastMeansSquare lmsResult = rubikFace.lmsResult; if (lmsResult == null) return null; // OpenCV Pose Estimate requires at least four points. if (rubikFace.rhombusList.size() <= 4) return null; if (cameraMatrix == null) { cameraMatrix = stateModel.cameraCalibration.getOpenCVCameraMatrix((int) (image.size().width), (int) (image.size().height)); distCoeffs = new MatOfDouble(stateModel.cameraCalibration.getDistortionCoefficients()); } /* * For the purposes of external camera calibration: i.e., where the cube is * located in camera coordinates, we define the geometry of the face of a * cube composed of nine 3D locations each representing the center of each tile. * Correspondence between these points and nine 2D points from the actual * camera image, along with camera calibration data, are using to calculate * the Pose of the Cube (i.e. "Cube Pose"). * * The geometry of the cube here is defined as having center at {0,0,0}, * and edge size of 2 units (i.e., +/- 1.0). */ // List of real world point and screen points that correspond. List<Point3> objectPointsList = new ArrayList<Point3>(9); List<Point> imagePointsList = new ArrayList<Point>(9); // Create list of image (in 2D) and object (in 3D) points. // Loop over Rubik Face Tiles for (int n = 0; n < 3; n++) { for (int m = 0; m < 3; m++) { Rhombus rhombus = rubikFace.faceRhombusArray[n][m]; // Only use if Rhombus was non null. if (rhombus != null) { // Obtain center of Rhombus in screen image coordinates // Convention: // o X is zero on the left, and increases to the right. // o Y is zero on the top and increases downward. Point imagePoint = new Point(rhombus.center.x, rhombus.center.y); imagePointsList.add(imagePoint); // N and M are actual not conceptual (as in design doc). int mm = 2 - n; int nn = 2 - m; // above now matches design doc. // that is: // o the nn vector is to the right and upwards. // o the mm vector is to the left and upwards. // Calculate center of Tile in OpenCV World Space Coordinates // Convention: // o X is zero in the center, and increases to the left. // o Y is zero in the center and increases downward. // o Z is zero (at the world coordinate origin) and increase away for the camera. float x = (1 - mm) * 0.66666f; float y = -1.0f; float z = -1.0f * (1 - nn) * 0.666666f; Point3 objectPoint = new Point3(x, y, z); objectPointsList.add(objectPoint); } } } // Cast image point list into OpenCV Matrix. MatOfPoint2f imagePoints = new MatOfPoint2f(); imagePoints.fromList(imagePointsList); // Cast object point list into OpenCV Matrix. MatOfPoint3f objectPoints = new MatOfPoint3f(); objectPoints.fromList(objectPointsList); Mat rvec = new Mat(); Mat tvec = new Mat(); // Log.e(Constants.TAG, "Image Points: " + imagePoints.dump()); // Log.e(Constants.TAG, "Object Points: " + objectPoints.dump()); // =+= sometimes a "count >= 4" exception Calib3d.solvePnP(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec); Log.v(Constants.TAG, String.format("Open CV Rotation Vector x=%4.2f y=%4.2f z=%4.2f", rvec.get(0, 0)[0], rvec.get(1, 0)[0], rvec.get(2, 0)[0])); // Convert from OpenCV to OpenGL World Coordinates float x = +1.0f * (float) tvec.get(0, 0)[0]; float y = -1.0f * (float) tvec.get(1, 0)[0]; float z = -1.0f * (float) tvec.get(2, 0)[0]; // // =+= Add manual offset correction to translation // x += MenuAndParams.xTranslationOffsetParam.value; // y += MenuAndParams.yTranslationOffsetParam.value; // z += MenuAndParams.zTranslationOffsetParam.value; // Convert Rotation Vector from OpenCL polarity axes definition to OpenGL definition // Note, polarity of x-axis is OK, no need to invert. rvec.put(1, 0, -1.0f * rvec.get(1, 0)[0]); // y-axis rvec.put(2, 0, -1.0f * rvec.get(2, 0)[0]); // z-axis // // =+= Add manual offset correction to Rotation // rvec.put(0, 0, rvec.get(0, 0)[0] + MenuAndParams.xRotationOffsetParam.value * Math.PI / 180.0); // X rotation // rvec.put(1, 0, rvec.get(1, 0)[0] + MenuAndParams.yRotationOffsetParam.value * Math.PI / 180.0); // Y rotation // rvec.put(2, 0, rvec.get(2, 0)[0] + MenuAndParams.zRotationOffsetParam.value * Math.PI / 180.0); // Z rotation // Package up as CubePose object CubePose cubePose = new CubePose(); cubePose.x = x; cubePose.y = y; cubePose.z = z; cubePose.xRotation = rvec.get(0, 0)[0]; cubePose.yRotation = rvec.get(1, 0)[0]; cubePose.zRotation = rvec.get(2, 0)[0]; // Log.e(Constants.TAG, "Result: " + result); // Log.e(Constants.TAG, "Camera: " + cameraMatrix.dump()); // Log.e(Constants.TAG, "Rotation: " + rvec.dump()); // Log.e(Constants.TAG, "Translation: " + tvec.dump()); // // Reporting in OpenGL World Coordinates // Core.rectangle(image, new Point(0, 50), new Point(1270, 150), Constants.ColorBlack, -1); // Core.putText(image, String.format("Translation x=%4.2f y=%4.2f z=%4.2f", x, y, z), new Point(50, 100), Constants.FontFace, 3, Constants.ColorWhite, 3); // Core.putText(image, String.format("Rotation x=%4.0f y=%4.0f z=%4.0f", cubeXrotation, cubeYrotation, cubeZrotation), new Point(50, 150), Constants.FontFace, 3, Constants.ColorWhite, 3); Log.v(Constants.TAG, "Cube Pose: " + cubePose); return cubePose; }
From source file:org.ar.rubik.CubeReconstructor.java
License:Open Source License
/** * Pose Estimation/* w ww . j a v a 2 s .com*/ * * Deduce real world cube coordinates and rotation * * @param rubikFace * @param image * @param stateModel */ public void poseEstimation(RubikFace rubikFace, Mat image, StateModel stateModel) { if (rubikFace == null) return; if (rubikFace.faceRecognitionStatus != FaceRecognitionStatusEnum.SOLVED) return; LeastMeansSquare lmsResult = rubikFace.lmsResult; if (lmsResult == null) return; // OpenCV Pose Estimate requires at least four points. if (rubikFace.rhombusList.size() <= 4) return; // List of real world point and screen points that correspond. List<Point3> objectPointsList = new ArrayList<Point3>(9); List<Point> imagePointsList = new ArrayList<Point>(9); // Create list of image (in 2D) and object (in 3D) points. // Loop over Rubik Face Tiles/Rhombi for (int n = 0; n < 3; n++) { for (int m = 0; m < 3; m++) { Rhombus rhombus = rubikFace.faceRhombusArray[n][m]; // Only use if Rhombus was non null. if (rhombus != null) { // Obtain center of Rhombus in screen image coordinates // Convention: // o X is zero on the left, and increases to the right. // o Y is zero on the top and increases downward. Point imagePoint = new Point(rhombus.center.x, rhombus.center.y); imagePointsList.add(imagePoint); // N and M are actual not conceptual (as in design doc). int mm = 2 - n; int nn = 2 - m; // above now matches design doc. // that is: // o the nn vector is to the right and upwards. // o the mm vector is to the left and upwards. // Calculate center of Tile in OpenCV World Space Coordinates // Convention: // o X is zero in the center, and increases to the left. // o Y is zero in the center and increases downward. // o Z is zero (at the world coordinate origin) and increase away for the camera. float x = (1 - mm) * 0.66666f; float y = -1.0f; float z = -1.0f * (1 - nn) * 0.666666f; Point3 objectPoint = new Point3(x, y, z); objectPointsList.add(objectPoint); } } } // Cast image point list into OpenCV Matrix. MatOfPoint2f imagePoints = new MatOfPoint2f(); imagePoints.fromList(imagePointsList); // Cast object point list into OpenCV Matrix. MatOfPoint3f objectPoints = new MatOfPoint3f(); objectPoints.fromList(objectPointsList); Mat cameraMatrix = stateModel.cameraParameters.getOpenCVCameraMatrix(); MatOfDouble distCoeffs = new MatOfDouble(); Mat rvec = new Mat(); Mat tvec = new Mat(); // Log.e(Constants.TAG, "Image Points: " + imagePoints.dump()); // Log.e(Constants.TAG, "Object Points: " + objectPoints.dump()); // boolean result = Calib3d.solvePnP(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec); Log.v(Constants.TAG, String.format("Open CV Rotation Vector x=%4.2f y=%4.2f z=%4.2f", rvec.get(0, 0)[0], rvec.get(1, 0)[0], rvec.get(2, 0)[0])); // Convert from OpenCV to OpenGL World Coordinates x = +1.0f * (float) tvec.get(0, 0)[0]; y = -1.0f * (float) tvec.get(1, 0)[0]; z = -1.0f * (float) tvec.get(2, 0)[0]; // =+= Add manual offset correction to translation x += MenuAndParams.xTranslationOffsetParam.value; y += MenuAndParams.yTranslationOffsetParam.value; z += MenuAndParams.zTranslationOffsetParam.value; // Convert Rotation Vector from OpenCL polarity axes definition to OpenGL definition rvec.put(1, 0, -1.0f * rvec.get(1, 0)[0]); rvec.put(2, 0, -1.0f * rvec.get(2, 0)[0]); // =+= Add manual offset correction to Rotation rvec.put(0, 0, rvec.get(0, 0)[0] + MenuAndParams.xRotationOffsetParam.value * Math.PI / 180.0); // X rotation rvec.put(1, 0, rvec.get(1, 0)[0] + MenuAndParams.yRotationOffsetParam.value * Math.PI / 180.0); // Y rotation rvec.put(2, 0, rvec.get(2, 0)[0] + MenuAndParams.zRotationOffsetParam.value * Math.PI / 180.0); // Z rotation // Create an OpenCV Rotation Matrix from a Rotation Vector Mat rMatrix = new Mat(4, 4, CvType.CV_32FC2); Calib3d.Rodrigues(rvec, rMatrix); Log.v(Constants.TAG, "Rodrigues Matrix: " + rMatrix.dump()); /* * Create an OpenGL Rotation Matrix * Notes: * o OpenGL is in column-row order (correct?). * o OpenCV Rodrigues Rotation Matrix is 3x3 where OpenGL Rotation Matrix is 4x4. */ // Initialize all Rotational Matrix elements to zero. for (int i = 0; i < 16; i++) rotationMatrix[i] = 0.0f; // Initialize to zero // Initialize element [3,3] to 1.0: i.e., "w" component in homogenous coordinates rotationMatrix[3 * 4 + 3] = 1.0f; // Copy OpenCV matrix to OpenGL matrix element by element. for (int r = 0; r < 3; r++) for (int c = 0; c < 3; c++) rotationMatrix[r + c * 4] = (float) (rMatrix.get(r, c)[0]); // Diagnostics for (int r = 0; r < 4; r++) Log.v(Constants.TAG, String.format("Rotation Matrix r=%d [%5.2f %5.2f %5.2f %5.2f]", r, rotationMatrix[r + 0], rotationMatrix[r + 4], rotationMatrix[r + 8], rotationMatrix[r + 12])); // Log.e(Constants.TAG, "Result: " + result); // Log.e(Constants.TAG, "Camera: " + cameraMatrix.dump()); // Log.e(Constants.TAG, "Rotation: " + rvec.dump()); // Log.e(Constants.TAG, "Translation: " + tvec.dump()); // // Reporting in OpenGL World Coordinates // Core.rectangle(image, new Point(0, 50), new Point(1270, 150), Constants.ColorBlack, -1); // Core.putText(image, String.format("Translation x=%4.2f y=%4.2f z=%4.2f", x, y, z), new Point(50, 100), Constants.FontFace, 3, Constants.ColorWhite, 3); // Core.putText(image, String.format("Rotation x=%4.0f y=%4.0f z=%4.0f", cubeXrotation, cubeYrotation, cubeZrotation), new Point(50, 150), Constants.FontFace, 3, Constants.ColorWhite, 3); }
From source file:org.ar.rubik.ImageRecognizer.java
License:Open Source License
/** * On Camera Frame// w w w.ja v a 2 s .c o m * * Process frame image through Rubik Face recognition possibly resulting in a state change. * * (non-Javadoc) * @see org.opencv.android.CameraBridgeViewBase.CvCameraViewListener2#onCameraFrame(org.opencv.android.CameraBridgeViewBase.CvCameraViewFrame) */ @Override public Mat onCameraFrame(CvCameraViewFrame inputFrame) { // Log.e(Constants.TAG, "CV Thread ID = " + Thread.currentThread().getId()); // Just display error message if it is non-null. if (errorImage != null) return errorImage; Mat image = inputFrame.rgba(); Size imageSize = image.size(); Log.v(Constants.TAG_CAL, "Input Frame width=" + imageSize.width + " height=" + imageSize.height); if (imageSize.width != stateModel.openCVSize.width || imageSize.height != stateModel.openCVSize.height) Log.e(Constants.TAG_CAL, "State Model openCVSize does not agree with input frame!"); // Save or Recall image as requested switch (MenuAndParams.imageSourceMode) { case NORMAL: break; case SAVE_NEXT: Util.saveImage(image); MenuAndParams.imageSourceMode = ImageSourceModeEnum.NORMAL; break; case PLAYBACK: image = Util.recallImage(); default: break; } // Calculate and display Frames Per Second long newTimeStamp = System.currentTimeMillis(); if (framesPerSecondTimeStamp > 0) { long frameTime = newTimeStamp - framesPerSecondTimeStamp; double framesPerSecond = 1000.0 / frameTime; String string = String.format("%4.1f FPS", framesPerSecond); Core.putText(image, string, new Point(50, 700), Constants.FontFace, 2, ColorTileEnum.WHITE.cvColor, 2); } framesPerSecondTimeStamp = newTimeStamp; try { // Initialize RubikFace rubikFace = new RubikFace(); rubikFace.profiler.markTime(Profiler.Event.START); Log.i(Constants.TAG, "============================================================================"); /* ********************************************************************** * ********************************************************************** * Return Original Image */ if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.DIRECT) { stateModel.activeRubikFace = rubikFace; rubikFace.profiler.markTime(Profiler.Event.TOTAL); return annotation.drawAnnotation(image); } /* ********************************************************************** * ********************************************************************** * Process to Grey Scale * * This algorithm finds highlights areas that are all of nearly * the same hue. In particular, cube faces should be highlighted. */ Mat greyscale_image = new Mat(); Imgproc.cvtColor(image, greyscale_image, Imgproc.COLOR_BGR2GRAY); rubikFace.profiler.markTime(Profiler.Event.GREYSCALE); if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.GREYSCALE) { stateModel.activeRubikFace = rubikFace; rubikFace.profiler.markTime(Profiler.Event.TOTAL); image.release(); return annotation.drawAnnotation(greyscale_image); } /* ********************************************************************** * ********************************************************************** * Gaussian Filter Blur prevents getting a lot of false hits */ Mat blur_image = new Mat(); int kernelSize = (int) MenuAndParams.gaussianBlurKernelSizeParam.value; kernelSize = kernelSize % 2 == 0 ? kernelSize + 1 : kernelSize; // make odd Imgproc.GaussianBlur(greyscale_image, blur_image, new Size(kernelSize, kernelSize), -1, -1); rubikFace.profiler.markTime(Profiler.Event.GAUSSIAN); greyscale_image.release(); if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.GAUSSIAN) { stateModel.activeRubikFace = rubikFace; rubikFace.profiler.markTime(Profiler.Event.TOTAL); image.release(); return annotation.drawAnnotation(blur_image); } /* ********************************************************************** * ********************************************************************** * Canny Edge Detection */ Mat canny_image = new Mat(); Imgproc.Canny(blur_image, canny_image, MenuAndParams.cannyLowerThresholdParam.value, MenuAndParams.cannyUpperThresholdParam.value, 3, // Sobel Aperture size. This seems to be typically value used in the literature: i.e., a 3x3 Sobel Matrix. false); // use cheap gradient calculation: norm =|dI/dx|+|dI/dy| rubikFace.profiler.markTime(Profiler.Event.EDGE); blur_image.release(); if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.CANNY) { stateModel.activeRubikFace = rubikFace; rubikFace.profiler.markTime(Profiler.Event.TOTAL); image.release(); return annotation.drawAnnotation(canny_image); } /* ********************************************************************** * ********************************************************************** * Dilation Image Process */ Mat dilate_image = new Mat(); Imgproc.dilate(canny_image, dilate_image, Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size( MenuAndParams.dilationKernelSizeParam.value, MenuAndParams.dilationKernelSizeParam.value))); rubikFace.profiler.markTime(Profiler.Event.DILATION); canny_image.release(); if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.DILATION) { stateModel.activeRubikFace = rubikFace; rubikFace.profiler.markTime(Profiler.Event.TOTAL); image.release(); return annotation.drawAnnotation(dilate_image); } /* ********************************************************************** * ********************************************************************** * Contour Generation */ List<MatOfPoint> contours = new LinkedList<MatOfPoint>(); Mat heirarchy = new Mat(); Imgproc.findContours(dilate_image, contours, heirarchy, Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE); // Note: tried other TC89 options, but no significant change or improvement on cpu time. rubikFace.profiler.markTime(Profiler.Event.CONTOUR); dilate_image.release(); // Create gray scale image but in RGB format, and then added yellow colored contours on top. if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.CONTOUR) { stateModel.activeRubikFace = rubikFace; rubikFace.profiler.markTime(Profiler.Event.TOTAL); Mat gray_image = new Mat(imageSize, CvType.CV_8UC4); Mat rgba_gray_image = new Mat(imageSize, CvType.CV_8UC4); Imgproc.cvtColor(image, gray_image, Imgproc.COLOR_RGB2GRAY); Imgproc.cvtColor(gray_image, rgba_gray_image, Imgproc.COLOR_GRAY2BGRA, 3); Imgproc.drawContours(rgba_gray_image, contours, -1, ColorTileEnum.YELLOW.cvColor, 3); Core.putText(rgba_gray_image, "Num Contours: " + contours.size(), new Point(500, 50), Constants.FontFace, 4, ColorTileEnum.RED.cvColor, 4); gray_image.release(); image.release(); return annotation.drawAnnotation(rgba_gray_image); } /* ********************************************************************** * ********************************************************************** * Polygon Detection */ List<Rhombus> polygonList = new LinkedList<Rhombus>(); for (MatOfPoint contour : contours) { // Keep only counter clockwise contours. A clockwise contour is reported as a negative number. double contourArea = Imgproc.contourArea(contour, true); if (contourArea < 0.0) continue; // Keep only reasonable area contours if (contourArea < MenuAndParams.minimumContourAreaParam.value) continue; // Floating, instead of Double, for some reason required for approximate polygon detection algorithm. MatOfPoint2f contour2f = new MatOfPoint2f(); MatOfPoint2f polygone2f = new MatOfPoint2f(); MatOfPoint polygon = new MatOfPoint(); // Make a Polygon out of a contour with provide Epsilon accuracy parameter. // It uses the Douglas-Peucker algorithm http://en.wikipedia.org/wiki/Ramer-Douglas-Peucker_algorithm contour.convertTo(contour2f, CvType.CV_32FC2); Imgproc.approxPolyDP(contour2f, polygone2f, MenuAndParams.polygonEpsilonParam.value, // The maximum distance between the original curve and its approximation. true); // Resulting polygon representation is "closed:" its first and last vertices are connected. polygone2f.convertTo(polygon, CvType.CV_32S); polygonList.add(new Rhombus(polygon)); } rubikFace.profiler.markTime(Profiler.Event.POLYGON); // Create gray scale image but in RGB format, and then add yellow colored polygons on top. if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.POLYGON) { stateModel.activeRubikFace = rubikFace; rubikFace.profiler.markTime(Profiler.Event.TOTAL); Mat gray_image = new Mat(imageSize, CvType.CV_8UC4); Mat rgba_gray_image = new Mat(imageSize, CvType.CV_8UC4); Imgproc.cvtColor(image, gray_image, Imgproc.COLOR_RGB2GRAY); Imgproc.cvtColor(gray_image, rgba_gray_image, Imgproc.COLOR_GRAY2BGRA, 4); for (Rhombus polygon : polygonList) polygon.draw(rgba_gray_image, ColorTileEnum.YELLOW.cvColor); Core.putText(rgba_gray_image, "Num Polygons: " + polygonList.size(), new Point(500, 50), Constants.FontFace, 3, ColorTileEnum.RED.cvColor, 4); return annotation.drawAnnotation(rgba_gray_image); } /* ********************************************************************** * ********************************************************************** * Rhombus Tile Recognition * * From polygon list, produces a list of suitable Parallelograms (Rhombi). */ Log.i(Constants.TAG, String.format("Rhombus: X Y Area a-a b-a a-l b-l gamma")); List<Rhombus> rhombusList = new LinkedList<Rhombus>(); // Get only valid Rhombus(es) : actually parallelograms. for (Rhombus rhombus : polygonList) { rhombus.qualify(); if (rhombus.status == Rhombus.StatusEnum.VALID) rhombusList.add(rhombus); } // Filtering w.r.t. Rhmobus set characteristics Rhombus.removedOutlierRhombi(rhombusList); rubikFace.profiler.markTime(Profiler.Event.RHOMBUS); // Create gray scale image but in RGB format, and then add yellow colored Rhombi (parallelograms) on top. if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.RHOMBUS) { stateModel.activeRubikFace = rubikFace; rubikFace.profiler.markTime(Profiler.Event.TOTAL); Mat gray_image = new Mat(imageSize, CvType.CV_8UC4); Mat rgba_gray_image = new Mat(imageSize, CvType.CV_8UC4); Imgproc.cvtColor(image, gray_image, Imgproc.COLOR_RGB2GRAY); Imgproc.cvtColor(gray_image, rgba_gray_image, Imgproc.COLOR_GRAY2BGRA, 4); for (Rhombus rhombus : rhombusList) rhombus.draw(rgba_gray_image, ColorTileEnum.YELLOW.cvColor); Core.putText(rgba_gray_image, "Num Rhombus: " + rhombusList.size(), new Point(500, 50), Constants.FontFace, 4, ColorTileEnum.RED.cvColor, 4); gray_image.release(); image.release(); return annotation.drawAnnotation(rgba_gray_image); } /* ********************************************************************** * ********************************************************************** * Face Recognition * * Takes a collection of Rhombus objects and determines if a valid * Rubik Face can be determined from them, and then also determines * initial color for all nine tiles. */ rubikFace.processRhombuses(rhombusList, image); Log.i(Constants.TAG, "Face Solution = " + rubikFace.faceRecognitionStatus); rubikFace.profiler.markTime(Profiler.Event.FACE); if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.FACE_DETECT) { stateModel.activeRubikFace = rubikFace; rubikFace.profiler.markTime(Profiler.Event.TOTAL); return annotation.drawAnnotation(image); } /* ********************************************************************** * ********************************************************************** * Cube Pose Estimation * * Reconstruct the Rubik Cube 3D location and orientation in GL space coordinates. */ if (rubikFace.faceRecognitionStatus == FaceRecognitionStatusEnum.SOLVED) { // Obtain Cube Pose from Face Grid information. stateModel.cubePose = CubePoseEstimator.poseEstimation(rubikFace, image, stateModel); // Process measurement update on Kalman Filter (if it exists). KalmanFilter kalmanFilter = stateModel.kalmanFilter; if (kalmanFilter != null) kalmanFilter.measurementUpdate(stateModel.cubePose, System.currentTimeMillis()); // Process measurement update on Kalman Filter ALSM (if it exists). KalmanFilterALSM kalmanFilterALSM = stateModel.kalmanFilterALSM; if (kalmanFilter != null) kalmanFilterALSM.measurementUpdate(stateModel.cubePose, System.currentTimeMillis()); } else { stateModel.cubePose = null; } rubikFace.profiler.markTime(Profiler.Event.POSE); /* ********************************************************************** * ********************************************************************** * Application State Machine * * Will provide user instructions. * Will determine when we are on-face and off-face * Will determine when we are on-new-face * Will change state */ appStateMachine.onFaceEvent(rubikFace); rubikFace.profiler.markTime(Profiler.Event.CONTROLLER); rubikFace.profiler.markTime(Profiler.Event.TOTAL); // Normal return point. stateModel.activeRubikFace = rubikFace; return annotation.drawAnnotation(image); // =+= Issue: how to get stdio to print as error and not warning in logcat? } catch (CvException e) { Log.e(Constants.TAG, "CvException: " + e.getMessage()); e.printStackTrace(); errorImage = new Mat(imageSize, CvType.CV_8UC4); Core.putText(errorImage, "CvException: " + e.getMessage(), new Point(50, 50), Constants.FontFace, 2, ColorTileEnum.WHITE.cvColor, 2); int i = 1; for (StackTraceElement element : e.getStackTrace()) Core.putText(errorImage, element.toString(), new Point(50, 50 + 50 * i++), Constants.FontFace, 2, ColorTileEnum.WHITE.cvColor, 2); } catch (Exception e) { Log.e(Constants.TAG, "Exception: " + e.getMessage()); e.printStackTrace(); errorImage = new Mat(imageSize, CvType.CV_8UC4); Core.putText(errorImage, "Exception: " + e.getMessage(), new Point(50, 50), Constants.FontFace, 2, ColorTileEnum.WHITE.cvColor, 2); int i = 1; for (StackTraceElement element : e.getStackTrace()) Core.putText(errorImage, element.toString(), new Point(50, 50 + 50 * i++), Constants.FontFace, 2, ColorTileEnum.WHITE.cvColor, 2); } catch (Error e) { Log.e(Constants.TAG, "Error: " + e.getMessage()); e.printStackTrace(); errorImage = new Mat(imageSize, CvType.CV_8UC4); Core.putText(errorImage, "Error: " + e.getMessage(), new Point(50, 50), Constants.FontFace, 2, ColorTileEnum.WHITE.cvColor, 2); int i = 1; for (StackTraceElement element : e.getStackTrace()) Core.putText(errorImage, element.toString(), new Point(50, 50 + 50 * i++), Constants.FontFace, 2, ColorTileEnum.WHITE.cvColor, 2); } return annotation.drawAnnotation(image); }
From source file:org.ar.rubik.LeastMeansSquare.java
License:Open Source License
public LeastMeansSquare(double x, double y, double alphaLatice, Point[][] errorVectorArray, double sigma, boolean valid) { this.origin = new Point(x, y); this.alphaLattice = alphaLatice; this.errorVectorArray = errorVectorArray; this.sigma = sigma; this.valid = valid; }
From source file:org.ar.rubik.MonoChromatic.java
License:Open Source License
/** * Use mask operation and then min max.//from w w w.ja va 2 s. c o m * This solution consumes about 20 minutes per frame! * * @param original_image * @return */ @SuppressWarnings("unused") private static Mat monochromaticMedianImageFilterUtilizingOpenCv2(Mat original_image) { final Size imageSize = original_image.size(); final int numColumns = (int) original_image.size().width; final int numRows = (int) original_image.size().height; final int bufferSize = numColumns * numRows; final int span = (int) 7; final int accuracy = (int) 5; Mat hsv_image = new Mat(imageSize, CvType.CV_8UC3); Imgproc.cvtColor(original_image, hsv_image, Imgproc.COLOR_RGB2HLS); List<Mat> channels = new LinkedList<Mat>(); Core.split(hsv_image, channels); Mat hueMat = channels.get(0); Mat lumMat = channels.get(1); Mat satMat = channels.get(2); // Output byte array for speed efficiency Mat monochromatic_image = new Mat(imageSize, CvType.CV_8UC1); byte[] monochromaticByteArray = new byte[bufferSize]; Mat mask = Mat.zeros(numRows, numColumns, CvType.CV_8UC1); Log.i(Constants.TAG, "Begin MonoChromatic CV"); for (int row = 0; row < numRows; row++) { byte result_pixel = 0; for (int col = 0; col < numColumns; col++) { if (col < span || (col >= numColumns - span)) result_pixel = 0; // Just put in black else if (row < span || (row >= numRows - span)) result_pixel = 0; // Just put in black else { // Log.i(Constants.TAG, "Creating Mask at " + row +"," + col); Core.rectangle(mask, new Point(row, col), new Point(row + span, col + span), new Scalar(1, 1, 1)); // Core.MinMaxLocResult minMaxResult = Core.minMaxLoc(hueMat, mask); Mat subset = new Mat(); hueMat.copyTo(subset, mask); Core.MinMaxLocResult minMaxResult = Core.minMaxLoc(subset); if (((minMaxResult.maxVal - minMaxResult.maxVal) < accuracy)) //&& (lum_max - lum_min < accuracy) && (sat_max - sat_min < accuracy) ) result_pixel = (byte) 128; else result_pixel = (byte) 0; // Log.i(Constants.TAG, "Completed Mask at " + row +"," + col); Core.rectangle(mask, new Point(row, col), new Point(row + span, col + span), new Scalar(0, 0, 0)); } if ((col >= span / 2) && (row >= span / 2)) monochromaticByteArray[(row - span / 2) * numColumns + (col - span / 2)] = result_pixel; } Log.i(Constants.TAG, "Completed Row: " + row); } monochromatic_image.put(0, 0, monochromaticByteArray); Log.i(Constants.TAG, "Completed MonoChromatic CV"); // System.exit(0); return monochromatic_image; }
From source file:org.ar.rubik.MonoChromatic.java
License:Open Source License
/** * Use OpenCV minMax.//from ww w . j a v a 2s . c om * * However, this is enormously slow, taking 10 minutes per frame! Why? * I think because it is effective O(O^4) in computation. * * @param original_image * @return */ @SuppressWarnings("unused") private static Mat monochromaticMedianImageFilterUtilizingOpenCv(Mat original_image) { final Size imageSize = original_image.size(); final int numColumns = (int) original_image.size().width; final int numRows = (int) original_image.size().height; final int bufferSize = numColumns * numRows; final int span = (int) 7; final int accuracy = (int) 5; Mat hsv_image = new Mat(imageSize, CvType.CV_8UC3); Imgproc.cvtColor(original_image, hsv_image, Imgproc.COLOR_RGB2HLS); List<Mat> channels = new LinkedList<Mat>(); Core.split(hsv_image, channels); Mat hueMat = channels.get(0); Mat lumMat = channels.get(1); Mat satMat = channels.get(2); // Output byte array for speed efficiency Mat monochromatic_image = new Mat(imageSize, CvType.CV_8UC1); byte[] monochromaticByteArray = new byte[bufferSize]; Mat mask = Mat.zeros(numRows, numColumns, CvType.CV_8UC1); Log.i(Constants.TAG, "Begin MonoChromatic CV"); for (int row = 0; row < numRows; row++) { byte result_pixel = 0; for (int col = 0; col < numColumns; col++) { if (col < span || (col >= numColumns - span)) result_pixel = 0; // Just put in black else if (row < span || (row >= numRows - span)) result_pixel = 0; // Just put in black else { // Log.i(Constants.TAG, "Creating Mask at " + row +"," + col); Core.rectangle(mask, new Point(row, col), new Point(row + span, col + span), new Scalar(1, 1, 1)); Core.MinMaxLocResult minMaxResult = Core.minMaxLoc(hueMat, mask); if (((minMaxResult.maxVal - minMaxResult.maxVal) < accuracy)) //&& (lum_max - lum_min < accuracy) && (sat_max - sat_min < accuracy) ) result_pixel = (byte) 128; else result_pixel = (byte) 0; // Log.i(Constants.TAG, "Completed Mask at " + row +"," + col); Core.rectangle(mask, new Point(row, col), new Point(row + span, col + span), new Scalar(0, 0, 0)); } if ((col >= span / 2) && (row >= span / 2)) monochromaticByteArray[(row - span / 2) * numColumns + (col - span / 2)] = result_pixel; } Log.i(Constants.TAG, "Completed Row: " + row); } monochromatic_image.put(0, 0, monochromaticByteArray); Log.i(Constants.TAG, "Completed MonoChromatic CV"); // System.exit(0); return monochromatic_image; }
From source file:org.ar.rubik.Profiler.java
License:Open Source License
/** * Render Time Consumptions Annotation /*from w ww .j av a 2s . c o m*/ * * @param image * @param stateModel * @return */ public Mat drawTimeConsumptionMetrics(Mat image, StateModel stateModel) { Core.rectangle(image, new Point(0, 0), new Point(500, 720), ColorTileEnum.BLACK.cvColor, -1); int index = 0; long newTimeStamp = System.currentTimeMillis(); if (framesPerSecondTimeStamp > 0) { long frameTime = newTimeStamp - framesPerSecondTimeStamp; double framesPerSecond = 1000.0 / frameTime; String string = String.format("Frames Per Second: %4.1f", framesPerSecond); Core.putText(image, string, new Point(50, 100), Constants.FontFace, 2, ColorTileEnum.WHITE.cvColor, 2); } framesPerSecondTimeStamp = newTimeStamp; Core.putText(image, "Event Time Min", new Point(50, 150), Constants.FontFace, 2, ColorTileEnum.WHITE.cvColor, 2); renderAndIndex(Event.GREYSCALE, Event.START, image, index++); renderAndIndex(Event.GAUSSIAN, Event.GREYSCALE, image, index++); renderAndIndex(Event.EDGE, Event.GAUSSIAN, image, index++); renderAndIndex(Event.DILATION, Event.EDGE, image, index++); renderAndIndex(Event.CONTOUR, Event.DILATION, image, index++); renderAndIndex(Event.POLYGON, Event.CONTOUR, image, index++); renderAndIndex(Event.RHOMBUS, Event.POLYGON, image, index++); renderAndIndex(Event.FACE, Event.RHOMBUS, image, index++); renderAndIndex(Event.POSE, Event.FACE, image, index++); renderAndIndex(Event.CONTROLLER, Event.POSE, image, index++); renderAndIndex(Event.TOTAL, Event.START, image, index++); if (scheduleReset == true) { minEventSet = new HashMap<Event, Long>(32); scheduleReset = false; } return image; }
From source file:org.ar.rubik.Profiler.java
License:Open Source License
/** * Render one line of time consumption/* www . j ava 2 s.c o m*/ * * @param endEvent * @param startEvent * @param image * @param index */ private void renderAndIndex(Event endEvent, Event startEvent, Mat image, int index) { // No measurement yet for this event type. if (eventSet.containsKey(endEvent) == false) { Core.putText(image, endEvent.toString() + ": NA", new Point(50, 200 + 50 * index), Constants.FontFace, 2, ColorTileEnum.WHITE.cvColor, 2); } // If total, perform special processing. Specifically, add up and report all minimums found in // has set instead of measuring and recording a minimum total. Thus, this number should converge // more quickly to the desired value. else if (endEvent == Event.TOTAL) { long endTimeStamp = eventSet.get(endEvent); long startTimeStamp = eventSet.get(startEvent); long elapsedTime = endTimeStamp - startTimeStamp; // Sum up all minimum times: this converges faster than recording the minimum total time and should be the same. long minValue = 0; for (long minEventTime : minEventSet.values()) minValue += minEventTime; String string = String.format("%10s: %3dmS %3dmS", endEvent.toString(), elapsedTime, minValue); Core.putText(image, string, new Point(50, 200 + 50 * index), Constants.FontFace, 2, ColorTileEnum.WHITE.cvColor, 2); } // Render time and minimum tile for this event type. else { long endTimeStamp = eventSet.get(endEvent); long startTimeStamp = eventSet.get(startEvent); long elapsedTime = endTimeStamp - startTimeStamp; long minValue = minEventSet.get(endEvent); if (elapsedTime < minValue) { minValue = elapsedTime; minEventSet.put(endEvent, minValue); } String string = String.format("%10s: %3dmS %3dmS", endEvent.toString(), elapsedTime, minValue); Core.putText(image, string, new Point(50, 200 + 50 * index), Constants.FontFace, 2, ColorTileEnum.WHITE.cvColor, 2); } }
From source file:org.ar.rubik.RubikFace.java
License:Open Source License
/** * Calculate the optimum fit for the given layout of Rhombus in the Face. * /*from ww w .j a v a 2 s .c o m*/ * Set Up BIG Linear Equation: Y = AX * Where: * Y is a 2k x 1 matrix of actual x and y location from rhombus centers (known values) * X is a 3 x 1 matrix of { x_origin, y_origin, and alpha_lattice } (values we wish to find) * A is a 2k x 3 matrix of coefficients derived from m, n, alpha, beta, and gamma. * * Notes: * k := Twice the number of available rhombus. * n := integer axis of the face. * m := integer axis of the face. * * gamma := ratio of beta to alpha lattice size. * * Also, calculate sum of errors squared. * E = Y - AX * @return */ private LeastMeansSquare findOptimumFaceFit() { // Count how many non-empty cell actually have a rhombus in it. int k = 0; for (int n = 0; n < 3; n++) for (int m = 0; m < 3; m++) if (faceRhombusArray[n][m] != null) k++; Log.i(Constants.TAG, "Big K: " + k); Mat bigAmatrix = new Mat(2 * k, 3, CvType.CV_64FC1); Mat bigYmatrix = new Mat(2 * k, 1, CvType.CV_64FC1); Mat bigXmatrix = new Mat(3, 1, CvType.CV_64FC1); //{ origin_x, origin_y, latticeAlpha } // Load up matrices Y and A // X_k = X + n * L_alpha * cos(alpha) + m * L_beta * cos(beta) // Y_k = Y + n * L_alpha * sin(alpha) + m * L_beta * sin(beta) int index = 0; for (int n = 0; n < 3; n++) { for (int m = 0; m < 3; m++) { Rhombus rhombus = faceRhombusArray[n][m]; if (rhombus != null) { { // Actual X axis value of Rhombus in this location double bigY = rhombus.center.x; // Express expected X axis value : i.e. x = func( x_origin, n, m, alpha, beta, alphaLattice, gamma) double bigA = n * Math.cos(alphaAngle) + gammaRatio * m * Math.cos(betaAngle); bigYmatrix.put(index, 0, new double[] { bigY }); bigAmatrix.put(index, 0, new double[] { 1.0 }); bigAmatrix.put(index, 1, new double[] { 0.0 }); bigAmatrix.put(index, 2, new double[] { bigA }); index++; } { // Actual Y axis value of Rhombus in this location double bigY = rhombus.center.y; // Express expected Y axis value : i.e. y = func( y_origin, n, m, alpha, beta, alphaLattice, gamma) double bigA = n * Math.sin(alphaAngle) + gammaRatio * m * Math.sin(betaAngle); bigYmatrix.put(index, 0, new double[] { bigY }); bigAmatrix.put(index, 0, new double[] { 0.0 }); bigAmatrix.put(index, 1, new double[] { 1.0 }); bigAmatrix.put(index, 2, new double[] { bigA }); index++; } } } } // Log.v(Constants.TAG, "Big A Matrix: " + bigAmatrix.dump()); // Log.v(Constants.TAG, "Big Y Matrix: " + bigYmatrix.dump()); // Least Means Square Regression to find best values of origin_x, origin_y, and alpha_lattice. // Problem: Y=AX Known Y and A, but find X. // Tactic: Find minimum | AX - Y | (actually sum square of elements?) // OpenCV: Core.solve(Mat src1, Mat src2, Mat dst, int) // OpenCV: dst = arg min _X|src1 * X - src2| // Thus: src1 = A { 2k rows and 3 columns } // src2 = Y { 2k rows and 1 column } // dst = X { 3 rows and 1 column } // boolean solveFlag = Core.solve(bigAmatrix, bigYmatrix, bigXmatrix, Core.DECOMP_NORMAL); // Log.v(Constants.TAG, "Big X Matrix Result: " + bigXmatrix.dump()); // Sum of error square // Given X from above, the Y_estimate = AX // E = Y - AX Mat bigEmatrix = new Mat(2 * k, 1, CvType.CV_64FC1); for (int r = 0; r < (2 * k); r++) { double y = bigYmatrix.get(r, 0)[0]; double error = y; for (int c = 0; c < 3; c++) { double a = bigAmatrix.get(r, c)[0]; double x = bigXmatrix.get(c, 0)[0]; error -= a * x; } bigEmatrix.put(r, 0, error); } // sigma^2 = diagonal_sum( Et * E) double sigma = 0; for (int r = 0; r < (2 * k); r++) { double error = bigEmatrix.get(r, 0)[0]; sigma += error * error; } sigma = Math.sqrt(sigma); // Log.v(Constants.TAG, "Big E Matrix Result: " + bigEmatrix.dump()); // =+= not currently in use, could be deleted. // Retrieve Error terms and compose an array of error vectors: one of each occupied // cell who's vector point from tile center to actual location of rhombus. Point[][] errorVectorArray = new Point[3][3]; index = 0; for (int n = 0; n < 3; n++) { for (int m = 0; m < 3; m++) { Rhombus rhombus = faceRhombusArray[n][m]; // We expect this array to not have change from above. if (rhombus != null) { errorVectorArray[n][m] = new Point(bigEmatrix.get(index++, 0)[0], bigEmatrix.get(index++, 0)[0]); } } } double x = bigXmatrix.get(0, 0)[0]; double y = bigXmatrix.get(1, 0)[0]; double alphaLatice = bigXmatrix.get(2, 0)[0]; boolean valid = !Double.isNaN(x) && !Double.isNaN(y) && !Double.isNaN(alphaLatice) && !Double.isNaN(sigma); Log.i(Constants.TAG, String.format("Rubik Solution: x=%4.0f y=%4.0f alphaLattice=%4.0f sigma=%4.0f flag=%b", x, y, alphaLatice, sigma, solveFlag)); return new LeastMeansSquare(x, y, alphaLatice, errorVectorArray, sigma, valid); }