001// 002// This file is auto-generated. Please don't modify it! 003// 004package org.opencv.calib3d; 005 006import java.util.ArrayList; 007import java.util.List; 008import org.opencv.calib3d.UsacParams; 009import org.opencv.core.Mat; 010import org.opencv.core.MatOfDouble; 011import org.opencv.core.MatOfPoint2f; 012import org.opencv.core.MatOfPoint3f; 013import org.opencv.core.Point; 014import org.opencv.core.Rect; 015import org.opencv.core.Scalar; 016import org.opencv.core.Size; 017import org.opencv.core.TermCriteria; 018import org.opencv.utils.Converters; 019 020// C++: class Calib3d 021 022public class Calib3d { 023 024 // C++: enum <unnamed> 025 public static final int 026 CV_ITERATIVE = 0, 027 CV_EPNP = 1, 028 CV_P3P = 2, 029 CV_DLS = 3, 030 CvLevMarq_DONE = 0, 031 CvLevMarq_STARTED = 1, 032 CvLevMarq_CALC_J = 2, 033 CvLevMarq_CHECK_ERR = 3, 034 LMEDS = 4, 035 RANSAC = 8, 036 RHO = 16, 037 USAC_DEFAULT = 32, 038 USAC_PARALLEL = 33, 039 USAC_FM_8PTS = 34, 040 USAC_FAST = 35, 041 USAC_ACCURATE = 36, 042 USAC_PROSAC = 37, 043 USAC_MAGSAC = 38, 044 CALIB_CB_ADAPTIVE_THRESH = 1, 045 CALIB_CB_NORMALIZE_IMAGE = 2, 046 CALIB_CB_FILTER_QUADS = 4, 047 CALIB_CB_FAST_CHECK = 8, 048 CALIB_CB_EXHAUSTIVE = 16, 049 CALIB_CB_ACCURACY = 32, 050 CALIB_CB_LARGER = 64, 051 CALIB_CB_MARKER = 128, 052 CALIB_CB_SYMMETRIC_GRID = 1, 053 CALIB_CB_ASYMMETRIC_GRID = 2, 054 CALIB_CB_CLUSTERING = 4, 055 CALIB_NINTRINSIC = 18, 056 CALIB_USE_INTRINSIC_GUESS = 0x00001, 057 CALIB_FIX_ASPECT_RATIO = 0x00002, 058 CALIB_FIX_PRINCIPAL_POINT = 0x00004, 059 CALIB_ZERO_TANGENT_DIST = 0x00008, 060 CALIB_FIX_FOCAL_LENGTH = 0x00010, 061 CALIB_FIX_K1 = 0x00020, 062 CALIB_FIX_K2 = 0x00040, 063 CALIB_FIX_K3 = 0x00080, 064 CALIB_FIX_K4 = 0x00800, 065 CALIB_FIX_K5 = 0x01000, 066 CALIB_FIX_K6 = 0x02000, 067 CALIB_RATIONAL_MODEL = 0x04000, 068 CALIB_THIN_PRISM_MODEL = 0x08000, 069 CALIB_FIX_S1_S2_S3_S4 = 0x10000, 070 CALIB_TILTED_MODEL = 0x40000, 071 CALIB_FIX_TAUX_TAUY = 0x80000, 072 CALIB_USE_QR = 0x100000, 073 CALIB_FIX_TANGENT_DIST = 0x200000, 074 CALIB_FIX_INTRINSIC = 0x00100, 075 CALIB_SAME_FOCAL_LENGTH = 0x00200, 076 CALIB_ZERO_DISPARITY = 0x00400, 077 CALIB_USE_LU = (1 << 17), 078 CALIB_USE_EXTRINSIC_GUESS = (1 << 22), 079 FM_7POINT = 1, 080 FM_8POINT = 2, 081 FM_LMEDS = 4, 082 FM_RANSAC = 8, 083 fisheye_CALIB_USE_INTRINSIC_GUESS = 1 << 0, 084 fisheye_CALIB_RECOMPUTE_EXTRINSIC = 1 << 1, 085 fisheye_CALIB_CHECK_COND = 1 << 2, 086 fisheye_CALIB_FIX_SKEW = 1 << 3, 087 fisheye_CALIB_FIX_K1 = 1 << 4, 088 fisheye_CALIB_FIX_K2 = 1 << 5, 089 fisheye_CALIB_FIX_K3 = 1 << 6, 090 fisheye_CALIB_FIX_K4 = 1 << 7, 091 fisheye_CALIB_FIX_INTRINSIC = 1 << 8, 092 fisheye_CALIB_FIX_PRINCIPAL_POINT = 1 << 9, 093 fisheye_CALIB_ZERO_DISPARITY = 1 << 10, 094 fisheye_CALIB_FIX_FOCAL_LENGTH = 1 << 11; 095 096 097 // C++: enum GridType (cv.CirclesGridFinderParameters.GridType) 098 public static final int 099 CirclesGridFinderParameters_SYMMETRIC_GRID = 0, 100 CirclesGridFinderParameters_ASYMMETRIC_GRID = 1; 101 102 103 // C++: enum HandEyeCalibrationMethod (cv.HandEyeCalibrationMethod) 104 public static final int 105 CALIB_HAND_EYE_TSAI = 0, 106 CALIB_HAND_EYE_PARK = 1, 107 CALIB_HAND_EYE_HORAUD = 2, 108 CALIB_HAND_EYE_ANDREFF = 3, 109 CALIB_HAND_EYE_DANIILIDIS = 4; 110 111 112 // C++: enum LocalOptimMethod (cv.LocalOptimMethod) 113 public static final int 114 LOCAL_OPTIM_NULL = 0, 115 LOCAL_OPTIM_INNER_LO = 1, 116 LOCAL_OPTIM_INNER_AND_ITER_LO = 2, 117 LOCAL_OPTIM_GC = 3, 118 LOCAL_OPTIM_SIGMA = 4; 119 120 121 // C++: enum NeighborSearchMethod (cv.NeighborSearchMethod) 122 public static final int 123 NEIGH_FLANN_KNN = 0, 124 NEIGH_GRID = 1, 125 NEIGH_FLANN_RADIUS = 2; 126 127 128 // C++: enum RobotWorldHandEyeCalibrationMethod (cv.RobotWorldHandEyeCalibrationMethod) 129 public static final int 130 CALIB_ROBOT_WORLD_HAND_EYE_SHAH = 0, 131 CALIB_ROBOT_WORLD_HAND_EYE_LI = 1; 132 133 134 // C++: enum SamplingMethod (cv.SamplingMethod) 135 public static final int 136 SAMPLING_UNIFORM = 0, 137 SAMPLING_PROGRESSIVE_NAPSAC = 1, 138 SAMPLING_NAPSAC = 2, 139 SAMPLING_PROSAC = 3; 140 141 142 // C++: enum ScoreMethod (cv.ScoreMethod) 143 public static final int 144 SCORE_METHOD_RANSAC = 0, 145 SCORE_METHOD_MSAC = 1, 146 SCORE_METHOD_MAGSAC = 2, 147 SCORE_METHOD_LMEDS = 3; 148 149 150 // C++: enum SolvePnPMethod (cv.SolvePnPMethod) 151 public static final int 152 SOLVEPNP_ITERATIVE = 0, 153 SOLVEPNP_EPNP = 1, 154 SOLVEPNP_P3P = 2, 155 SOLVEPNP_DLS = 3, 156 SOLVEPNP_UPNP = 4, 157 SOLVEPNP_AP3P = 5, 158 SOLVEPNP_IPPE = 6, 159 SOLVEPNP_IPPE_SQUARE = 7, 160 SOLVEPNP_SQPNP = 8, 161 SOLVEPNP_MAX_COUNT = 8+1; 162 163 164 // C++: enum UndistortTypes (cv.UndistortTypes) 165 public static final int 166 PROJ_SPHERICAL_ORTHO = 0, 167 PROJ_SPHERICAL_EQRECT = 1; 168 169 170 // 171 // C++: void cv::Rodrigues(Mat src, Mat& dst, Mat& jacobian = Mat()) 172 // 173 174 /** 175 * Converts a rotation matrix to a rotation vector or vice versa. 176 * 177 * @param src Input rotation vector (3x1 or 1x3) or rotation matrix (3x3). 178 * @param dst Output rotation matrix (3x3) or rotation vector (3x1 or 1x3), respectively. 179 * @param jacobian Optional output Jacobian matrix, 3x9 or 9x3, which is a matrix of partial 180 * derivatives of the output array components with respect to the input array components. 181 * 182 * \(\begin{array}{l} \theta \leftarrow norm(r) \\ r \leftarrow r/ \theta \\ R = \cos(\theta) I + (1- \cos{\theta} ) r r^T + \sin(\theta) \vecthreethree{0}{-r_z}{r_y}{r_z}{0}{-r_x}{-r_y}{r_x}{0} \end{array}\) 183 * 184 * Inverse transformation can be also done easily, since 185 * 186 * \(\sin ( \theta ) \vecthreethree{0}{-r_z}{r_y}{r_z}{0}{-r_x}{-r_y}{r_x}{0} = \frac{R - R^T}{2}\) 187 * 188 * A rotation vector is a convenient and most compact representation of a rotation matrix (since any 189 * rotation matrix has just 3 degrees of freedom). The representation is used in the global 3D geometry 190 * optimization procedures like REF: calibrateCamera, REF: stereoCalibrate, or REF: solvePnP . 191 * 192 * <b>Note:</b> More information about the computation of the derivative of a 3D rotation matrix with respect to its exponential coordinate 193 * can be found in: 194 * <ul> 195 * <li> 196 * A Compact Formula for the Derivative of a 3-D Rotation in Exponential Coordinates, Guillermo Gallego, Anthony J. Yezzi CITE: Gallego2014ACF 197 * </li> 198 * </ul> 199 * 200 * <b>Note:</b> Useful information on SE(3) and Lie Groups can be found in: 201 * <ul> 202 * <li> 203 * A tutorial on SE(3) transformation parameterizations and on-manifold optimization, Jose-Luis Blanco CITE: blanco2010tutorial 204 * </li> 205 * <li> 206 * Lie Groups for 2D and 3D Transformation, Ethan Eade CITE: Eade17 207 * </li> 208 * <li> 209 * A micro Lie theory for state estimation in robotics, Joan Solà, Jérémie Deray, Dinesh Atchuthan CITE: Sol2018AML 210 * </li> 211 * </ul> 212 */ 213 public static void Rodrigues(Mat src, Mat dst, Mat jacobian) { 214 Rodrigues_0(src.nativeObj, dst.nativeObj, jacobian.nativeObj); 215 } 216 217 /** 218 * Converts a rotation matrix to a rotation vector or vice versa. 219 * 220 * @param src Input rotation vector (3x1 or 1x3) or rotation matrix (3x3). 221 * @param dst Output rotation matrix (3x3) or rotation vector (3x1 or 1x3), respectively. 222 * derivatives of the output array components with respect to the input array components. 223 * 224 * \(\begin{array}{l} \theta \leftarrow norm(r) \\ r \leftarrow r/ \theta \\ R = \cos(\theta) I + (1- \cos{\theta} ) r r^T + \sin(\theta) \vecthreethree{0}{-r_z}{r_y}{r_z}{0}{-r_x}{-r_y}{r_x}{0} \end{array}\) 225 * 226 * Inverse transformation can be also done easily, since 227 * 228 * \(\sin ( \theta ) \vecthreethree{0}{-r_z}{r_y}{r_z}{0}{-r_x}{-r_y}{r_x}{0} = \frac{R - R^T}{2}\) 229 * 230 * A rotation vector is a convenient and most compact representation of a rotation matrix (since any 231 * rotation matrix has just 3 degrees of freedom). The representation is used in the global 3D geometry 232 * optimization procedures like REF: calibrateCamera, REF: stereoCalibrate, or REF: solvePnP . 233 * 234 * <b>Note:</b> More information about the computation of the derivative of a 3D rotation matrix with respect to its exponential coordinate 235 * can be found in: 236 * <ul> 237 * <li> 238 * A Compact Formula for the Derivative of a 3-D Rotation in Exponential Coordinates, Guillermo Gallego, Anthony J. Yezzi CITE: Gallego2014ACF 239 * </li> 240 * </ul> 241 * 242 * <b>Note:</b> Useful information on SE(3) and Lie Groups can be found in: 243 * <ul> 244 * <li> 245 * A tutorial on SE(3) transformation parameterizations and on-manifold optimization, Jose-Luis Blanco CITE: blanco2010tutorial 246 * </li> 247 * <li> 248 * Lie Groups for 2D and 3D Transformation, Ethan Eade CITE: Eade17 249 * </li> 250 * <li> 251 * A micro Lie theory for state estimation in robotics, Joan Solà, Jérémie Deray, Dinesh Atchuthan CITE: Sol2018AML 252 * </li> 253 * </ul> 254 */ 255 public static void Rodrigues(Mat src, Mat dst) { 256 Rodrigues_1(src.nativeObj, dst.nativeObj); 257 } 258 259 260 // 261 // C++: Mat cv::findHomography(vector_Point2f srcPoints, vector_Point2f dstPoints, int method = 0, double ransacReprojThreshold = 3, Mat& mask = Mat(), int maxIters = 2000, double confidence = 0.995) 262 // 263 264 /** 265 * Finds a perspective transformation between two planes. 266 * 267 * @param srcPoints Coordinates of the points in the original plane, a matrix of the type CV_32FC2 268 * or vector<Point2f> . 269 * @param dstPoints Coordinates of the points in the target plane, a matrix of the type CV_32FC2 or 270 * a vector<Point2f> . 271 * @param method Method used to compute a homography matrix. The following methods are possible: 272 * <ul> 273 * <li> 274 * <b>0</b> - a regular method using all the points, i.e., the least squares method 275 * </li> 276 * <li> 277 * REF: RANSAC - RANSAC-based robust method 278 * </li> 279 * <li> 280 * REF: LMEDS - Least-Median robust method 281 * </li> 282 * <li> 283 * REF: RHO - PROSAC-based robust method 284 * </li> 285 * </ul> 286 * @param ransacReprojThreshold Maximum allowed reprojection error to treat a point pair as an inlier 287 * (used in the RANSAC and RHO methods only). That is, if 288 * \(\| \texttt{dstPoints} _i - \texttt{convertPointsHomogeneous} ( \texttt{H} * \texttt{srcPoints} _i) \|_2 > \texttt{ransacReprojThreshold}\) 289 * then the point \(i\) is considered as an outlier. If srcPoints and dstPoints are measured in pixels, 290 * it usually makes sense to set this parameter somewhere in the range of 1 to 10. 291 * @param mask Optional output mask set by a robust method ( RANSAC or LMeDS ). Note that the input 292 * mask values are ignored. 293 * @param maxIters The maximum number of RANSAC iterations. 294 * @param confidence Confidence level, between 0 and 1. 295 * 296 * The function finds and returns the perspective transformation \(H\) between the source and the 297 * destination planes: 298 * 299 * \(s_i \vecthree{x'_i}{y'_i}{1} \sim H \vecthree{x_i}{y_i}{1}\) 300 * 301 * so that the back-projection error 302 * 303 * \(\sum _i \left ( x'_i- \frac{h_{11} x_i + h_{12} y_i + h_{13}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2+ \left ( y'_i- \frac{h_{21} x_i + h_{22} y_i + h_{23}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2\) 304 * 305 * is minimized. If the parameter method is set to the default value 0, the function uses all the point 306 * pairs to compute an initial homography estimate with a simple least-squares scheme. 307 * 308 * However, if not all of the point pairs ( \(srcPoints_i\), \(dstPoints_i\) ) fit the rigid perspective 309 * transformation (that is, there are some outliers), this initial estimate will be poor. In this case, 310 * you can use one of the three robust methods. The methods RANSAC, LMeDS and RHO try many different 311 * random subsets of the corresponding point pairs (of four pairs each, collinear pairs are discarded), estimate the homography matrix 312 * using this subset and a simple least-squares algorithm, and then compute the quality/goodness of the 313 * computed homography (which is the number of inliers for RANSAC or the least median re-projection error for 314 * LMeDS). The best subset is then used to produce the initial estimate of the homography matrix and 315 * the mask of inliers/outliers. 316 * 317 * Regardless of the method, robust or not, the computed homography matrix is refined further (using 318 * inliers only in case of a robust method) with the Levenberg-Marquardt method to reduce the 319 * re-projection error even more. 320 * 321 * The methods RANSAC and RHO can handle practically any ratio of outliers but need a threshold to 322 * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works 323 * correctly only when there are more than 50% of inliers. Finally, if there are no outliers and the 324 * noise is rather small, use the default method (method=0). 325 * 326 * The function is used to find initial intrinsic and extrinsic matrices. Homography matrix is 327 * determined up to a scale. Thus, it is normalized so that \(h_{33}=1\). Note that whenever an \(H\) matrix 328 * cannot be estimated, an empty one will be returned. 329 * 330 * SEE: 331 * getAffineTransform, estimateAffine2D, estimateAffinePartial2D, getPerspectiveTransform, warpPerspective, 332 * perspectiveTransform 333 * @return automatically generated 334 */ 335 public static Mat findHomography(MatOfPoint2f srcPoints, MatOfPoint2f dstPoints, int method, double ransacReprojThreshold, Mat mask, int maxIters, double confidence) { 336 Mat srcPoints_mat = srcPoints; 337 Mat dstPoints_mat = dstPoints; 338 return new Mat(findHomography_0(srcPoints_mat.nativeObj, dstPoints_mat.nativeObj, method, ransacReprojThreshold, mask.nativeObj, maxIters, confidence)); 339 } 340 341 /** 342 * Finds a perspective transformation between two planes. 343 * 344 * @param srcPoints Coordinates of the points in the original plane, a matrix of the type CV_32FC2 345 * or vector<Point2f> . 346 * @param dstPoints Coordinates of the points in the target plane, a matrix of the type CV_32FC2 or 347 * a vector<Point2f> . 348 * @param method Method used to compute a homography matrix. The following methods are possible: 349 * <ul> 350 * <li> 351 * <b>0</b> - a regular method using all the points, i.e., the least squares method 352 * </li> 353 * <li> 354 * REF: RANSAC - RANSAC-based robust method 355 * </li> 356 * <li> 357 * REF: LMEDS - Least-Median robust method 358 * </li> 359 * <li> 360 * REF: RHO - PROSAC-based robust method 361 * </li> 362 * </ul> 363 * @param ransacReprojThreshold Maximum allowed reprojection error to treat a point pair as an inlier 364 * (used in the RANSAC and RHO methods only). That is, if 365 * \(\| \texttt{dstPoints} _i - \texttt{convertPointsHomogeneous} ( \texttt{H} * \texttt{srcPoints} _i) \|_2 > \texttt{ransacReprojThreshold}\) 366 * then the point \(i\) is considered as an outlier. If srcPoints and dstPoints are measured in pixels, 367 * it usually makes sense to set this parameter somewhere in the range of 1 to 10. 368 * @param mask Optional output mask set by a robust method ( RANSAC or LMeDS ). Note that the input 369 * mask values are ignored. 370 * @param maxIters The maximum number of RANSAC iterations. 371 * 372 * The function finds and returns the perspective transformation \(H\) between the source and the 373 * destination planes: 374 * 375 * \(s_i \vecthree{x'_i}{y'_i}{1} \sim H \vecthree{x_i}{y_i}{1}\) 376 * 377 * so that the back-projection error 378 * 379 * \(\sum _i \left ( x'_i- \frac{h_{11} x_i + h_{12} y_i + h_{13}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2+ \left ( y'_i- \frac{h_{21} x_i + h_{22} y_i + h_{23}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2\) 380 * 381 * is minimized. If the parameter method is set to the default value 0, the function uses all the point 382 * pairs to compute an initial homography estimate with a simple least-squares scheme. 383 * 384 * However, if not all of the point pairs ( \(srcPoints_i\), \(dstPoints_i\) ) fit the rigid perspective 385 * transformation (that is, there are some outliers), this initial estimate will be poor. In this case, 386 * you can use one of the three robust methods. The methods RANSAC, LMeDS and RHO try many different 387 * random subsets of the corresponding point pairs (of four pairs each, collinear pairs are discarded), estimate the homography matrix 388 * using this subset and a simple least-squares algorithm, and then compute the quality/goodness of the 389 * computed homography (which is the number of inliers for RANSAC or the least median re-projection error for 390 * LMeDS). The best subset is then used to produce the initial estimate of the homography matrix and 391 * the mask of inliers/outliers. 392 * 393 * Regardless of the method, robust or not, the computed homography matrix is refined further (using 394 * inliers only in case of a robust method) with the Levenberg-Marquardt method to reduce the 395 * re-projection error even more. 396 * 397 * The methods RANSAC and RHO can handle practically any ratio of outliers but need a threshold to 398 * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works 399 * correctly only when there are more than 50% of inliers. Finally, if there are no outliers and the 400 * noise is rather small, use the default method (method=0). 401 * 402 * The function is used to find initial intrinsic and extrinsic matrices. Homography matrix is 403 * determined up to a scale. Thus, it is normalized so that \(h_{33}=1\). Note that whenever an \(H\) matrix 404 * cannot be estimated, an empty one will be returned. 405 * 406 * SEE: 407 * getAffineTransform, estimateAffine2D, estimateAffinePartial2D, getPerspectiveTransform, warpPerspective, 408 * perspectiveTransform 409 * @return automatically generated 410 */ 411 public static Mat findHomography(MatOfPoint2f srcPoints, MatOfPoint2f dstPoints, int method, double ransacReprojThreshold, Mat mask, int maxIters) { 412 Mat srcPoints_mat = srcPoints; 413 Mat dstPoints_mat = dstPoints; 414 return new Mat(findHomography_1(srcPoints_mat.nativeObj, dstPoints_mat.nativeObj, method, ransacReprojThreshold, mask.nativeObj, maxIters)); 415 } 416 417 /** 418 * Finds a perspective transformation between two planes. 419 * 420 * @param srcPoints Coordinates of the points in the original plane, a matrix of the type CV_32FC2 421 * or vector<Point2f> . 422 * @param dstPoints Coordinates of the points in the target plane, a matrix of the type CV_32FC2 or 423 * a vector<Point2f> . 424 * @param method Method used to compute a homography matrix. The following methods are possible: 425 * <ul> 426 * <li> 427 * <b>0</b> - a regular method using all the points, i.e., the least squares method 428 * </li> 429 * <li> 430 * REF: RANSAC - RANSAC-based robust method 431 * </li> 432 * <li> 433 * REF: LMEDS - Least-Median robust method 434 * </li> 435 * <li> 436 * REF: RHO - PROSAC-based robust method 437 * </li> 438 * </ul> 439 * @param ransacReprojThreshold Maximum allowed reprojection error to treat a point pair as an inlier 440 * (used in the RANSAC and RHO methods only). That is, if 441 * \(\| \texttt{dstPoints} _i - \texttt{convertPointsHomogeneous} ( \texttt{H} * \texttt{srcPoints} _i) \|_2 > \texttt{ransacReprojThreshold}\) 442 * then the point \(i\) is considered as an outlier. If srcPoints and dstPoints are measured in pixels, 443 * it usually makes sense to set this parameter somewhere in the range of 1 to 10. 444 * @param mask Optional output mask set by a robust method ( RANSAC or LMeDS ). Note that the input 445 * mask values are ignored. 446 * 447 * The function finds and returns the perspective transformation \(H\) between the source and the 448 * destination planes: 449 * 450 * \(s_i \vecthree{x'_i}{y'_i}{1} \sim H \vecthree{x_i}{y_i}{1}\) 451 * 452 * so that the back-projection error 453 * 454 * \(\sum _i \left ( x'_i- \frac{h_{11} x_i + h_{12} y_i + h_{13}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2+ \left ( y'_i- \frac{h_{21} x_i + h_{22} y_i + h_{23}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2\) 455 * 456 * is minimized. If the parameter method is set to the default value 0, the function uses all the point 457 * pairs to compute an initial homography estimate with a simple least-squares scheme. 458 * 459 * However, if not all of the point pairs ( \(srcPoints_i\), \(dstPoints_i\) ) fit the rigid perspective 460 * transformation (that is, there are some outliers), this initial estimate will be poor. In this case, 461 * you can use one of the three robust methods. The methods RANSAC, LMeDS and RHO try many different 462 * random subsets of the corresponding point pairs (of four pairs each, collinear pairs are discarded), estimate the homography matrix 463 * using this subset and a simple least-squares algorithm, and then compute the quality/goodness of the 464 * computed homography (which is the number of inliers for RANSAC or the least median re-projection error for 465 * LMeDS). The best subset is then used to produce the initial estimate of the homography matrix and 466 * the mask of inliers/outliers. 467 * 468 * Regardless of the method, robust or not, the computed homography matrix is refined further (using 469 * inliers only in case of a robust method) with the Levenberg-Marquardt method to reduce the 470 * re-projection error even more. 471 * 472 * The methods RANSAC and RHO can handle practically any ratio of outliers but need a threshold to 473 * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works 474 * correctly only when there are more than 50% of inliers. Finally, if there are no outliers and the 475 * noise is rather small, use the default method (method=0). 476 * 477 * The function is used to find initial intrinsic and extrinsic matrices. Homography matrix is 478 * determined up to a scale. Thus, it is normalized so that \(h_{33}=1\). Note that whenever an \(H\) matrix 479 * cannot be estimated, an empty one will be returned. 480 * 481 * SEE: 482 * getAffineTransform, estimateAffine2D, estimateAffinePartial2D, getPerspectiveTransform, warpPerspective, 483 * perspectiveTransform 484 * @return automatically generated 485 */ 486 public static Mat findHomography(MatOfPoint2f srcPoints, MatOfPoint2f dstPoints, int method, double ransacReprojThreshold, Mat mask) { 487 Mat srcPoints_mat = srcPoints; 488 Mat dstPoints_mat = dstPoints; 489 return new Mat(findHomography_2(srcPoints_mat.nativeObj, dstPoints_mat.nativeObj, method, ransacReprojThreshold, mask.nativeObj)); 490 } 491 492 /** 493 * Finds a perspective transformation between two planes. 494 * 495 * @param srcPoints Coordinates of the points in the original plane, a matrix of the type CV_32FC2 496 * or vector<Point2f> . 497 * @param dstPoints Coordinates of the points in the target plane, a matrix of the type CV_32FC2 or 498 * a vector<Point2f> . 499 * @param method Method used to compute a homography matrix. The following methods are possible: 500 * <ul> 501 * <li> 502 * <b>0</b> - a regular method using all the points, i.e., the least squares method 503 * </li> 504 * <li> 505 * REF: RANSAC - RANSAC-based robust method 506 * </li> 507 * <li> 508 * REF: LMEDS - Least-Median robust method 509 * </li> 510 * <li> 511 * REF: RHO - PROSAC-based robust method 512 * </li> 513 * </ul> 514 * @param ransacReprojThreshold Maximum allowed reprojection error to treat a point pair as an inlier 515 * (used in the RANSAC and RHO methods only). That is, if 516 * \(\| \texttt{dstPoints} _i - \texttt{convertPointsHomogeneous} ( \texttt{H} * \texttt{srcPoints} _i) \|_2 > \texttt{ransacReprojThreshold}\) 517 * then the point \(i\) is considered as an outlier. If srcPoints and dstPoints are measured in pixels, 518 * it usually makes sense to set this parameter somewhere in the range of 1 to 10. 519 * mask values are ignored. 520 * 521 * The function finds and returns the perspective transformation \(H\) between the source and the 522 * destination planes: 523 * 524 * \(s_i \vecthree{x'_i}{y'_i}{1} \sim H \vecthree{x_i}{y_i}{1}\) 525 * 526 * so that the back-projection error 527 * 528 * \(\sum _i \left ( x'_i- \frac{h_{11} x_i + h_{12} y_i + h_{13}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2+ \left ( y'_i- \frac{h_{21} x_i + h_{22} y_i + h_{23}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2\) 529 * 530 * is minimized. If the parameter method is set to the default value 0, the function uses all the point 531 * pairs to compute an initial homography estimate with a simple least-squares scheme. 532 * 533 * However, if not all of the point pairs ( \(srcPoints_i\), \(dstPoints_i\) ) fit the rigid perspective 534 * transformation (that is, there are some outliers), this initial estimate will be poor. In this case, 535 * you can use one of the three robust methods. The methods RANSAC, LMeDS and RHO try many different 536 * random subsets of the corresponding point pairs (of four pairs each, collinear pairs are discarded), estimate the homography matrix 537 * using this subset and a simple least-squares algorithm, and then compute the quality/goodness of the 538 * computed homography (which is the number of inliers for RANSAC or the least median re-projection error for 539 * LMeDS). The best subset is then used to produce the initial estimate of the homography matrix and 540 * the mask of inliers/outliers. 541 * 542 * Regardless of the method, robust or not, the computed homography matrix is refined further (using 543 * inliers only in case of a robust method) with the Levenberg-Marquardt method to reduce the 544 * re-projection error even more. 545 * 546 * The methods RANSAC and RHO can handle practically any ratio of outliers but need a threshold to 547 * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works 548 * correctly only when there are more than 50% of inliers. Finally, if there are no outliers and the 549 * noise is rather small, use the default method (method=0). 550 * 551 * The function is used to find initial intrinsic and extrinsic matrices. Homography matrix is 552 * determined up to a scale. Thus, it is normalized so that \(h_{33}=1\). Note that whenever an \(H\) matrix 553 * cannot be estimated, an empty one will be returned. 554 * 555 * SEE: 556 * getAffineTransform, estimateAffine2D, estimateAffinePartial2D, getPerspectiveTransform, warpPerspective, 557 * perspectiveTransform 558 * @return automatically generated 559 */ 560 public static Mat findHomography(MatOfPoint2f srcPoints, MatOfPoint2f dstPoints, int method, double ransacReprojThreshold) { 561 Mat srcPoints_mat = srcPoints; 562 Mat dstPoints_mat = dstPoints; 563 return new Mat(findHomography_3(srcPoints_mat.nativeObj, dstPoints_mat.nativeObj, method, ransacReprojThreshold)); 564 } 565 566 /** 567 * Finds a perspective transformation between two planes. 568 * 569 * @param srcPoints Coordinates of the points in the original plane, a matrix of the type CV_32FC2 570 * or vector<Point2f> . 571 * @param dstPoints Coordinates of the points in the target plane, a matrix of the type CV_32FC2 or 572 * a vector<Point2f> . 573 * @param method Method used to compute a homography matrix. The following methods are possible: 574 * <ul> 575 * <li> 576 * <b>0</b> - a regular method using all the points, i.e., the least squares method 577 * </li> 578 * <li> 579 * REF: RANSAC - RANSAC-based robust method 580 * </li> 581 * <li> 582 * REF: LMEDS - Least-Median robust method 583 * </li> 584 * <li> 585 * REF: RHO - PROSAC-based robust method 586 * </li> 587 * </ul> 588 * (used in the RANSAC and RHO methods only). That is, if 589 * \(\| \texttt{dstPoints} _i - \texttt{convertPointsHomogeneous} ( \texttt{H} * \texttt{srcPoints} _i) \|_2 > \texttt{ransacReprojThreshold}\) 590 * then the point \(i\) is considered as an outlier. If srcPoints and dstPoints are measured in pixels, 591 * it usually makes sense to set this parameter somewhere in the range of 1 to 10. 592 * mask values are ignored. 593 * 594 * The function finds and returns the perspective transformation \(H\) between the source and the 595 * destination planes: 596 * 597 * \(s_i \vecthree{x'_i}{y'_i}{1} \sim H \vecthree{x_i}{y_i}{1}\) 598 * 599 * so that the back-projection error 600 * 601 * \(\sum _i \left ( x'_i- \frac{h_{11} x_i + h_{12} y_i + h_{13}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2+ \left ( y'_i- \frac{h_{21} x_i + h_{22} y_i + h_{23}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2\) 602 * 603 * is minimized. If the parameter method is set to the default value 0, the function uses all the point 604 * pairs to compute an initial homography estimate with a simple least-squares scheme. 605 * 606 * However, if not all of the point pairs ( \(srcPoints_i\), \(dstPoints_i\) ) fit the rigid perspective 607 * transformation (that is, there are some outliers), this initial estimate will be poor. In this case, 608 * you can use one of the three robust methods. The methods RANSAC, LMeDS and RHO try many different 609 * random subsets of the corresponding point pairs (of four pairs each, collinear pairs are discarded), estimate the homography matrix 610 * using this subset and a simple least-squares algorithm, and then compute the quality/goodness of the 611 * computed homography (which is the number of inliers for RANSAC or the least median re-projection error for 612 * LMeDS). The best subset is then used to produce the initial estimate of the homography matrix and 613 * the mask of inliers/outliers. 614 * 615 * Regardless of the method, robust or not, the computed homography matrix is refined further (using 616 * inliers only in case of a robust method) with the Levenberg-Marquardt method to reduce the 617 * re-projection error even more. 618 * 619 * The methods RANSAC and RHO can handle practically any ratio of outliers but need a threshold to 620 * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works 621 * correctly only when there are more than 50% of inliers. Finally, if there are no outliers and the 622 * noise is rather small, use the default method (method=0). 623 * 624 * The function is used to find initial intrinsic and extrinsic matrices. Homography matrix is 625 * determined up to a scale. Thus, it is normalized so that \(h_{33}=1\). Note that whenever an \(H\) matrix 626 * cannot be estimated, an empty one will be returned. 627 * 628 * SEE: 629 * getAffineTransform, estimateAffine2D, estimateAffinePartial2D, getPerspectiveTransform, warpPerspective, 630 * perspectiveTransform 631 * @return automatically generated 632 */ 633 public static Mat findHomography(MatOfPoint2f srcPoints, MatOfPoint2f dstPoints, int method) { 634 Mat srcPoints_mat = srcPoints; 635 Mat dstPoints_mat = dstPoints; 636 return new Mat(findHomography_4(srcPoints_mat.nativeObj, dstPoints_mat.nativeObj, method)); 637 } 638 639 /** 640 * Finds a perspective transformation between two planes. 641 * 642 * @param srcPoints Coordinates of the points in the original plane, a matrix of the type CV_32FC2 643 * or vector<Point2f> . 644 * @param dstPoints Coordinates of the points in the target plane, a matrix of the type CV_32FC2 or 645 * a vector<Point2f> . 646 * <ul> 647 * <li> 648 * <b>0</b> - a regular method using all the points, i.e., the least squares method 649 * </li> 650 * <li> 651 * REF: RANSAC - RANSAC-based robust method 652 * </li> 653 * <li> 654 * REF: LMEDS - Least-Median robust method 655 * </li> 656 * <li> 657 * REF: RHO - PROSAC-based robust method 658 * </li> 659 * </ul> 660 * (used in the RANSAC and RHO methods only). That is, if 661 * \(\| \texttt{dstPoints} _i - \texttt{convertPointsHomogeneous} ( \texttt{H} * \texttt{srcPoints} _i) \|_2 > \texttt{ransacReprojThreshold}\) 662 * then the point \(i\) is considered as an outlier. If srcPoints and dstPoints are measured in pixels, 663 * it usually makes sense to set this parameter somewhere in the range of 1 to 10. 664 * mask values are ignored. 665 * 666 * The function finds and returns the perspective transformation \(H\) between the source and the 667 * destination planes: 668 * 669 * \(s_i \vecthree{x'_i}{y'_i}{1} \sim H \vecthree{x_i}{y_i}{1}\) 670 * 671 * so that the back-projection error 672 * 673 * \(\sum _i \left ( x'_i- \frac{h_{11} x_i + h_{12} y_i + h_{13}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2+ \left ( y'_i- \frac{h_{21} x_i + h_{22} y_i + h_{23}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2\) 674 * 675 * is minimized. If the parameter method is set to the default value 0, the function uses all the point 676 * pairs to compute an initial homography estimate with a simple least-squares scheme. 677 * 678 * However, if not all of the point pairs ( \(srcPoints_i\), \(dstPoints_i\) ) fit the rigid perspective 679 * transformation (that is, there are some outliers), this initial estimate will be poor. In this case, 680 * you can use one of the three robust methods. The methods RANSAC, LMeDS and RHO try many different 681 * random subsets of the corresponding point pairs (of four pairs each, collinear pairs are discarded), estimate the homography matrix 682 * using this subset and a simple least-squares algorithm, and then compute the quality/goodness of the 683 * computed homography (which is the number of inliers for RANSAC or the least median re-projection error for 684 * LMeDS). The best subset is then used to produce the initial estimate of the homography matrix and 685 * the mask of inliers/outliers. 686 * 687 * Regardless of the method, robust or not, the computed homography matrix is refined further (using 688 * inliers only in case of a robust method) with the Levenberg-Marquardt method to reduce the 689 * re-projection error even more. 690 * 691 * The methods RANSAC and RHO can handle practically any ratio of outliers but need a threshold to 692 * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works 693 * correctly only when there are more than 50% of inliers. Finally, if there are no outliers and the 694 * noise is rather small, use the default method (method=0). 695 * 696 * The function is used to find initial intrinsic and extrinsic matrices. Homography matrix is 697 * determined up to a scale. Thus, it is normalized so that \(h_{33}=1\). Note that whenever an \(H\) matrix 698 * cannot be estimated, an empty one will be returned. 699 * 700 * SEE: 701 * getAffineTransform, estimateAffine2D, estimateAffinePartial2D, getPerspectiveTransform, warpPerspective, 702 * perspectiveTransform 703 * @return automatically generated 704 */ 705 public static Mat findHomography(MatOfPoint2f srcPoints, MatOfPoint2f dstPoints) { 706 Mat srcPoints_mat = srcPoints; 707 Mat dstPoints_mat = dstPoints; 708 return new Mat(findHomography_5(srcPoints_mat.nativeObj, dstPoints_mat.nativeObj)); 709 } 710 711 712 // 713 // C++: Mat cv::findHomography(vector_Point2f srcPoints, vector_Point2f dstPoints, Mat& mask, UsacParams params) 714 // 715 716 public static Mat findHomography(MatOfPoint2f srcPoints, MatOfPoint2f dstPoints, Mat mask, UsacParams params) { 717 Mat srcPoints_mat = srcPoints; 718 Mat dstPoints_mat = dstPoints; 719 return new Mat(findHomography_6(srcPoints_mat.nativeObj, dstPoints_mat.nativeObj, mask.nativeObj, params.nativeObj)); 720 } 721 722 723 // 724 // C++: Vec3d cv::RQDecomp3x3(Mat src, Mat& mtxR, Mat& mtxQ, Mat& Qx = Mat(), Mat& Qy = Mat(), Mat& Qz = Mat()) 725 // 726 727 /** 728 * Computes an RQ decomposition of 3x3 matrices. 729 * 730 * @param src 3x3 input matrix. 731 * @param mtxR Output 3x3 upper-triangular matrix. 732 * @param mtxQ Output 3x3 orthogonal matrix. 733 * @param Qx Optional output 3x3 rotation matrix around x-axis. 734 * @param Qy Optional output 3x3 rotation matrix around y-axis. 735 * @param Qz Optional output 3x3 rotation matrix around z-axis. 736 * 737 * The function computes a RQ decomposition using the given rotations. This function is used in 738 * #decomposeProjectionMatrix to decompose the left 3x3 submatrix of a projection matrix into a camera 739 * and a rotation matrix. 740 * 741 * It optionally returns three rotation matrices, one for each axis, and the three Euler angles in 742 * degrees (as the return value) that could be used in OpenGL. Note, there is always more than one 743 * sequence of rotations about the three principal axes that results in the same orientation of an 744 * object, e.g. see CITE: Slabaugh . Returned tree rotation matrices and corresponding three Euler angles 745 * are only one of the possible solutions. 746 * @return automatically generated 747 */ 748 public static double[] RQDecomp3x3(Mat src, Mat mtxR, Mat mtxQ, Mat Qx, Mat Qy, Mat Qz) { 749 return RQDecomp3x3_0(src.nativeObj, mtxR.nativeObj, mtxQ.nativeObj, Qx.nativeObj, Qy.nativeObj, Qz.nativeObj); 750 } 751 752 /** 753 * Computes an RQ decomposition of 3x3 matrices. 754 * 755 * @param src 3x3 input matrix. 756 * @param mtxR Output 3x3 upper-triangular matrix. 757 * @param mtxQ Output 3x3 orthogonal matrix. 758 * @param Qx Optional output 3x3 rotation matrix around x-axis. 759 * @param Qy Optional output 3x3 rotation matrix around y-axis. 760 * 761 * The function computes a RQ decomposition using the given rotations. This function is used in 762 * #decomposeProjectionMatrix to decompose the left 3x3 submatrix of a projection matrix into a camera 763 * and a rotation matrix. 764 * 765 * It optionally returns three rotation matrices, one for each axis, and the three Euler angles in 766 * degrees (as the return value) that could be used in OpenGL. Note, there is always more than one 767 * sequence of rotations about the three principal axes that results in the same orientation of an 768 * object, e.g. see CITE: Slabaugh . Returned tree rotation matrices and corresponding three Euler angles 769 * are only one of the possible solutions. 770 * @return automatically generated 771 */ 772 public static double[] RQDecomp3x3(Mat src, Mat mtxR, Mat mtxQ, Mat Qx, Mat Qy) { 773 return RQDecomp3x3_1(src.nativeObj, mtxR.nativeObj, mtxQ.nativeObj, Qx.nativeObj, Qy.nativeObj); 774 } 775 776 /** 777 * Computes an RQ decomposition of 3x3 matrices. 778 * 779 * @param src 3x3 input matrix. 780 * @param mtxR Output 3x3 upper-triangular matrix. 781 * @param mtxQ Output 3x3 orthogonal matrix. 782 * @param Qx Optional output 3x3 rotation matrix around x-axis. 783 * 784 * The function computes a RQ decomposition using the given rotations. This function is used in 785 * #decomposeProjectionMatrix to decompose the left 3x3 submatrix of a projection matrix into a camera 786 * and a rotation matrix. 787 * 788 * It optionally returns three rotation matrices, one for each axis, and the three Euler angles in 789 * degrees (as the return value) that could be used in OpenGL. Note, there is always more than one 790 * sequence of rotations about the three principal axes that results in the same orientation of an 791 * object, e.g. see CITE: Slabaugh . Returned tree rotation matrices and corresponding three Euler angles 792 * are only one of the possible solutions. 793 * @return automatically generated 794 */ 795 public static double[] RQDecomp3x3(Mat src, Mat mtxR, Mat mtxQ, Mat Qx) { 796 return RQDecomp3x3_2(src.nativeObj, mtxR.nativeObj, mtxQ.nativeObj, Qx.nativeObj); 797 } 798 799 /** 800 * Computes an RQ decomposition of 3x3 matrices. 801 * 802 * @param src 3x3 input matrix. 803 * @param mtxR Output 3x3 upper-triangular matrix. 804 * @param mtxQ Output 3x3 orthogonal matrix. 805 * 806 * The function computes a RQ decomposition using the given rotations. This function is used in 807 * #decomposeProjectionMatrix to decompose the left 3x3 submatrix of a projection matrix into a camera 808 * and a rotation matrix. 809 * 810 * It optionally returns three rotation matrices, one for each axis, and the three Euler angles in 811 * degrees (as the return value) that could be used in OpenGL. Note, there is always more than one 812 * sequence of rotations about the three principal axes that results in the same orientation of an 813 * object, e.g. see CITE: Slabaugh . Returned tree rotation matrices and corresponding three Euler angles 814 * are only one of the possible solutions. 815 * @return automatically generated 816 */ 817 public static double[] RQDecomp3x3(Mat src, Mat mtxR, Mat mtxQ) { 818 return RQDecomp3x3_3(src.nativeObj, mtxR.nativeObj, mtxQ.nativeObj); 819 } 820 821 822 // 823 // C++: void cv::decomposeProjectionMatrix(Mat projMatrix, Mat& cameraMatrix, Mat& rotMatrix, Mat& transVect, Mat& rotMatrixX = Mat(), Mat& rotMatrixY = Mat(), Mat& rotMatrixZ = Mat(), Mat& eulerAngles = Mat()) 824 // 825 826 /** 827 * Decomposes a projection matrix into a rotation matrix and a camera intrinsic matrix. 828 * 829 * @param projMatrix 3x4 input projection matrix P. 830 * @param cameraMatrix Output 3x3 camera intrinsic matrix \(\cameramatrix{A}\). 831 * @param rotMatrix Output 3x3 external rotation matrix R. 832 * @param transVect Output 4x1 translation vector T. 833 * @param rotMatrixX Optional 3x3 rotation matrix around x-axis. 834 * @param rotMatrixY Optional 3x3 rotation matrix around y-axis. 835 * @param rotMatrixZ Optional 3x3 rotation matrix around z-axis. 836 * @param eulerAngles Optional three-element vector containing three Euler angles of rotation in 837 * degrees. 838 * 839 * The function computes a decomposition of a projection matrix into a calibration and a rotation 840 * matrix and the position of a camera. 841 * 842 * It optionally returns three rotation matrices, one for each axis, and three Euler angles that could 843 * be used in OpenGL. Note, there is always more than one sequence of rotations about the three 844 * principal axes that results in the same orientation of an object, e.g. see CITE: Slabaugh . Returned 845 * tree rotation matrices and corresponding three Euler angles are only one of the possible solutions. 846 * 847 * The function is based on RQDecomp3x3 . 848 */ 849 public static void decomposeProjectionMatrix(Mat projMatrix, Mat cameraMatrix, Mat rotMatrix, Mat transVect, Mat rotMatrixX, Mat rotMatrixY, Mat rotMatrixZ, Mat eulerAngles) { 850 decomposeProjectionMatrix_0(projMatrix.nativeObj, cameraMatrix.nativeObj, rotMatrix.nativeObj, transVect.nativeObj, rotMatrixX.nativeObj, rotMatrixY.nativeObj, rotMatrixZ.nativeObj, eulerAngles.nativeObj); 851 } 852 853 /** 854 * Decomposes a projection matrix into a rotation matrix and a camera intrinsic matrix. 855 * 856 * @param projMatrix 3x4 input projection matrix P. 857 * @param cameraMatrix Output 3x3 camera intrinsic matrix \(\cameramatrix{A}\). 858 * @param rotMatrix Output 3x3 external rotation matrix R. 859 * @param transVect Output 4x1 translation vector T. 860 * @param rotMatrixX Optional 3x3 rotation matrix around x-axis. 861 * @param rotMatrixY Optional 3x3 rotation matrix around y-axis. 862 * @param rotMatrixZ Optional 3x3 rotation matrix around z-axis. 863 * degrees. 864 * 865 * The function computes a decomposition of a projection matrix into a calibration and a rotation 866 * matrix and the position of a camera. 867 * 868 * It optionally returns three rotation matrices, one for each axis, and three Euler angles that could 869 * be used in OpenGL. Note, there is always more than one sequence of rotations about the three 870 * principal axes that results in the same orientation of an object, e.g. see CITE: Slabaugh . Returned 871 * tree rotation matrices and corresponding three Euler angles are only one of the possible solutions. 872 * 873 * The function is based on RQDecomp3x3 . 874 */ 875 public static void decomposeProjectionMatrix(Mat projMatrix, Mat cameraMatrix, Mat rotMatrix, Mat transVect, Mat rotMatrixX, Mat rotMatrixY, Mat rotMatrixZ) { 876 decomposeProjectionMatrix_1(projMatrix.nativeObj, cameraMatrix.nativeObj, rotMatrix.nativeObj, transVect.nativeObj, rotMatrixX.nativeObj, rotMatrixY.nativeObj, rotMatrixZ.nativeObj); 877 } 878 879 /** 880 * Decomposes a projection matrix into a rotation matrix and a camera intrinsic matrix. 881 * 882 * @param projMatrix 3x4 input projection matrix P. 883 * @param cameraMatrix Output 3x3 camera intrinsic matrix \(\cameramatrix{A}\). 884 * @param rotMatrix Output 3x3 external rotation matrix R. 885 * @param transVect Output 4x1 translation vector T. 886 * @param rotMatrixX Optional 3x3 rotation matrix around x-axis. 887 * @param rotMatrixY Optional 3x3 rotation matrix around y-axis. 888 * degrees. 889 * 890 * The function computes a decomposition of a projection matrix into a calibration and a rotation 891 * matrix and the position of a camera. 892 * 893 * It optionally returns three rotation matrices, one for each axis, and three Euler angles that could 894 * be used in OpenGL. Note, there is always more than one sequence of rotations about the three 895 * principal axes that results in the same orientation of an object, e.g. see CITE: Slabaugh . Returned 896 * tree rotation matrices and corresponding three Euler angles are only one of the possible solutions. 897 * 898 * The function is based on RQDecomp3x3 . 899 */ 900 public static void decomposeProjectionMatrix(Mat projMatrix, Mat cameraMatrix, Mat rotMatrix, Mat transVect, Mat rotMatrixX, Mat rotMatrixY) { 901 decomposeProjectionMatrix_2(projMatrix.nativeObj, cameraMatrix.nativeObj, rotMatrix.nativeObj, transVect.nativeObj, rotMatrixX.nativeObj, rotMatrixY.nativeObj); 902 } 903 904 /** 905 * Decomposes a projection matrix into a rotation matrix and a camera intrinsic matrix. 906 * 907 * @param projMatrix 3x4 input projection matrix P. 908 * @param cameraMatrix Output 3x3 camera intrinsic matrix \(\cameramatrix{A}\). 909 * @param rotMatrix Output 3x3 external rotation matrix R. 910 * @param transVect Output 4x1 translation vector T. 911 * @param rotMatrixX Optional 3x3 rotation matrix around x-axis. 912 * degrees. 913 * 914 * The function computes a decomposition of a projection matrix into a calibration and a rotation 915 * matrix and the position of a camera. 916 * 917 * It optionally returns three rotation matrices, one for each axis, and three Euler angles that could 918 * be used in OpenGL. Note, there is always more than one sequence of rotations about the three 919 * principal axes that results in the same orientation of an object, e.g. see CITE: Slabaugh . Returned 920 * tree rotation matrices and corresponding three Euler angles are only one of the possible solutions. 921 * 922 * The function is based on RQDecomp3x3 . 923 */ 924 public static void decomposeProjectionMatrix(Mat projMatrix, Mat cameraMatrix, Mat rotMatrix, Mat transVect, Mat rotMatrixX) { 925 decomposeProjectionMatrix_3(projMatrix.nativeObj, cameraMatrix.nativeObj, rotMatrix.nativeObj, transVect.nativeObj, rotMatrixX.nativeObj); 926 } 927 928 /** 929 * Decomposes a projection matrix into a rotation matrix and a camera intrinsic matrix. 930 * 931 * @param projMatrix 3x4 input projection matrix P. 932 * @param cameraMatrix Output 3x3 camera intrinsic matrix \(\cameramatrix{A}\). 933 * @param rotMatrix Output 3x3 external rotation matrix R. 934 * @param transVect Output 4x1 translation vector T. 935 * degrees. 936 * 937 * The function computes a decomposition of a projection matrix into a calibration and a rotation 938 * matrix and the position of a camera. 939 * 940 * It optionally returns three rotation matrices, one for each axis, and three Euler angles that could 941 * be used in OpenGL. Note, there is always more than one sequence of rotations about the three 942 * principal axes that results in the same orientation of an object, e.g. see CITE: Slabaugh . Returned 943 * tree rotation matrices and corresponding three Euler angles are only one of the possible solutions. 944 * 945 * The function is based on RQDecomp3x3 . 946 */ 947 public static void decomposeProjectionMatrix(Mat projMatrix, Mat cameraMatrix, Mat rotMatrix, Mat transVect) { 948 decomposeProjectionMatrix_4(projMatrix.nativeObj, cameraMatrix.nativeObj, rotMatrix.nativeObj, transVect.nativeObj); 949 } 950 951 952 // 953 // C++: void cv::matMulDeriv(Mat A, Mat B, Mat& dABdA, Mat& dABdB) 954 // 955 956 /** 957 * Computes partial derivatives of the matrix product for each multiplied matrix. 958 * 959 * @param A First multiplied matrix. 960 * @param B Second multiplied matrix. 961 * @param dABdA First output derivative matrix d(A\*B)/dA of size 962 * \(\texttt{A.rows*B.cols} \times {A.rows*A.cols}\) . 963 * @param dABdB Second output derivative matrix d(A\*B)/dB of size 964 * \(\texttt{A.rows*B.cols} \times {B.rows*B.cols}\) . 965 * 966 * The function computes partial derivatives of the elements of the matrix product \(A*B\) with regard to 967 * the elements of each of the two input matrices. The function is used to compute the Jacobian 968 * matrices in #stereoCalibrate but can also be used in any other similar optimization function. 969 */ 970 public static void matMulDeriv(Mat A, Mat B, Mat dABdA, Mat dABdB) { 971 matMulDeriv_0(A.nativeObj, B.nativeObj, dABdA.nativeObj, dABdB.nativeObj); 972 } 973 974 975 // 976 // C++: void cv::composeRT(Mat rvec1, Mat tvec1, Mat rvec2, Mat tvec2, Mat& rvec3, Mat& tvec3, Mat& dr3dr1 = Mat(), Mat& dr3dt1 = Mat(), Mat& dr3dr2 = Mat(), Mat& dr3dt2 = Mat(), Mat& dt3dr1 = Mat(), Mat& dt3dt1 = Mat(), Mat& dt3dr2 = Mat(), Mat& dt3dt2 = Mat()) 977 // 978 979 /** 980 * Combines two rotation-and-shift transformations. 981 * 982 * @param rvec1 First rotation vector. 983 * @param tvec1 First translation vector. 984 * @param rvec2 Second rotation vector. 985 * @param tvec2 Second translation vector. 986 * @param rvec3 Output rotation vector of the superposition. 987 * @param tvec3 Output translation vector of the superposition. 988 * @param dr3dr1 Optional output derivative of rvec3 with regard to rvec1 989 * @param dr3dt1 Optional output derivative of rvec3 with regard to tvec1 990 * @param dr3dr2 Optional output derivative of rvec3 with regard to rvec2 991 * @param dr3dt2 Optional output derivative of rvec3 with regard to tvec2 992 * @param dt3dr1 Optional output derivative of tvec3 with regard to rvec1 993 * @param dt3dt1 Optional output derivative of tvec3 with regard to tvec1 994 * @param dt3dr2 Optional output derivative of tvec3 with regard to rvec2 995 * @param dt3dt2 Optional output derivative of tvec3 with regard to tvec2 996 * 997 * The functions compute: 998 * 999 * \(\begin{array}{l} \texttt{rvec3} = \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right ) \\ \texttt{tvec3} = \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \texttt{tvec1} + \texttt{tvec2} \end{array} ,\) 1000 * 1001 * where \(\mathrm{rodrigues}\) denotes a rotation vector to a rotation matrix transformation, and 1002 * \(\mathrm{rodrigues}^{-1}\) denotes the inverse transformation. See Rodrigues for details. 1003 * 1004 * Also, the functions can compute the derivatives of the output vectors with regards to the input 1005 * vectors (see matMulDeriv ). The functions are used inside #stereoCalibrate but can also be used in 1006 * your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a 1007 * function that contains a matrix multiplication. 1008 */ 1009 public static void composeRT(Mat rvec1, Mat tvec1, Mat rvec2, Mat tvec2, Mat rvec3, Mat tvec3, Mat dr3dr1, Mat dr3dt1, Mat dr3dr2, Mat dr3dt2, Mat dt3dr1, Mat dt3dt1, Mat dt3dr2, Mat dt3dt2) { 1010 composeRT_0(rvec1.nativeObj, tvec1.nativeObj, rvec2.nativeObj, tvec2.nativeObj, rvec3.nativeObj, tvec3.nativeObj, dr3dr1.nativeObj, dr3dt1.nativeObj, dr3dr2.nativeObj, dr3dt2.nativeObj, dt3dr1.nativeObj, dt3dt1.nativeObj, dt3dr2.nativeObj, dt3dt2.nativeObj); 1011 } 1012 1013 /** 1014 * Combines two rotation-and-shift transformations. 1015 * 1016 * @param rvec1 First rotation vector. 1017 * @param tvec1 First translation vector. 1018 * @param rvec2 Second rotation vector. 1019 * @param tvec2 Second translation vector. 1020 * @param rvec3 Output rotation vector of the superposition. 1021 * @param tvec3 Output translation vector of the superposition. 1022 * @param dr3dr1 Optional output derivative of rvec3 with regard to rvec1 1023 * @param dr3dt1 Optional output derivative of rvec3 with regard to tvec1 1024 * @param dr3dr2 Optional output derivative of rvec3 with regard to rvec2 1025 * @param dr3dt2 Optional output derivative of rvec3 with regard to tvec2 1026 * @param dt3dr1 Optional output derivative of tvec3 with regard to rvec1 1027 * @param dt3dt1 Optional output derivative of tvec3 with regard to tvec1 1028 * @param dt3dr2 Optional output derivative of tvec3 with regard to rvec2 1029 * 1030 * The functions compute: 1031 * 1032 * \(\begin{array}{l} \texttt{rvec3} = \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right ) \\ \texttt{tvec3} = \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \texttt{tvec1} + \texttt{tvec2} \end{array} ,\) 1033 * 1034 * where \(\mathrm{rodrigues}\) denotes a rotation vector to a rotation matrix transformation, and 1035 * \(\mathrm{rodrigues}^{-1}\) denotes the inverse transformation. See Rodrigues for details. 1036 * 1037 * Also, the functions can compute the derivatives of the output vectors with regards to the input 1038 * vectors (see matMulDeriv ). The functions are used inside #stereoCalibrate but can also be used in 1039 * your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a 1040 * function that contains a matrix multiplication. 1041 */ 1042 public static void composeRT(Mat rvec1, Mat tvec1, Mat rvec2, Mat tvec2, Mat rvec3, Mat tvec3, Mat dr3dr1, Mat dr3dt1, Mat dr3dr2, Mat dr3dt2, Mat dt3dr1, Mat dt3dt1, Mat dt3dr2) { 1043 composeRT_1(rvec1.nativeObj, tvec1.nativeObj, rvec2.nativeObj, tvec2.nativeObj, rvec3.nativeObj, tvec3.nativeObj, dr3dr1.nativeObj, dr3dt1.nativeObj, dr3dr2.nativeObj, dr3dt2.nativeObj, dt3dr1.nativeObj, dt3dt1.nativeObj, dt3dr2.nativeObj); 1044 } 1045 1046 /** 1047 * Combines two rotation-and-shift transformations. 1048 * 1049 * @param rvec1 First rotation vector. 1050 * @param tvec1 First translation vector. 1051 * @param rvec2 Second rotation vector. 1052 * @param tvec2 Second translation vector. 1053 * @param rvec3 Output rotation vector of the superposition. 1054 * @param tvec3 Output translation vector of the superposition. 1055 * @param dr3dr1 Optional output derivative of rvec3 with regard to rvec1 1056 * @param dr3dt1 Optional output derivative of rvec3 with regard to tvec1 1057 * @param dr3dr2 Optional output derivative of rvec3 with regard to rvec2 1058 * @param dr3dt2 Optional output derivative of rvec3 with regard to tvec2 1059 * @param dt3dr1 Optional output derivative of tvec3 with regard to rvec1 1060 * @param dt3dt1 Optional output derivative of tvec3 with regard to tvec1 1061 * 1062 * The functions compute: 1063 * 1064 * \(\begin{array}{l} \texttt{rvec3} = \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right ) \\ \texttt{tvec3} = \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \texttt{tvec1} + \texttt{tvec2} \end{array} ,\) 1065 * 1066 * where \(\mathrm{rodrigues}\) denotes a rotation vector to a rotation matrix transformation, and 1067 * \(\mathrm{rodrigues}^{-1}\) denotes the inverse transformation. See Rodrigues for details. 1068 * 1069 * Also, the functions can compute the derivatives of the output vectors with regards to the input 1070 * vectors (see matMulDeriv ). The functions are used inside #stereoCalibrate but can also be used in 1071 * your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a 1072 * function that contains a matrix multiplication. 1073 */ 1074 public static void composeRT(Mat rvec1, Mat tvec1, Mat rvec2, Mat tvec2, Mat rvec3, Mat tvec3, Mat dr3dr1, Mat dr3dt1, Mat dr3dr2, Mat dr3dt2, Mat dt3dr1, Mat dt3dt1) { 1075 composeRT_2(rvec1.nativeObj, tvec1.nativeObj, rvec2.nativeObj, tvec2.nativeObj, rvec3.nativeObj, tvec3.nativeObj, dr3dr1.nativeObj, dr3dt1.nativeObj, dr3dr2.nativeObj, dr3dt2.nativeObj, dt3dr1.nativeObj, dt3dt1.nativeObj); 1076 } 1077 1078 /** 1079 * Combines two rotation-and-shift transformations. 1080 * 1081 * @param rvec1 First rotation vector. 1082 * @param tvec1 First translation vector. 1083 * @param rvec2 Second rotation vector. 1084 * @param tvec2 Second translation vector. 1085 * @param rvec3 Output rotation vector of the superposition. 1086 * @param tvec3 Output translation vector of the superposition. 1087 * @param dr3dr1 Optional output derivative of rvec3 with regard to rvec1 1088 * @param dr3dt1 Optional output derivative of rvec3 with regard to tvec1 1089 * @param dr3dr2 Optional output derivative of rvec3 with regard to rvec2 1090 * @param dr3dt2 Optional output derivative of rvec3 with regard to tvec2 1091 * @param dt3dr1 Optional output derivative of tvec3 with regard to rvec1 1092 * 1093 * The functions compute: 1094 * 1095 * \(\begin{array}{l} \texttt{rvec3} = \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right ) \\ \texttt{tvec3} = \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \texttt{tvec1} + \texttt{tvec2} \end{array} ,\) 1096 * 1097 * where \(\mathrm{rodrigues}\) denotes a rotation vector to a rotation matrix transformation, and 1098 * \(\mathrm{rodrigues}^{-1}\) denotes the inverse transformation. See Rodrigues for details. 1099 * 1100 * Also, the functions can compute the derivatives of the output vectors with regards to the input 1101 * vectors (see matMulDeriv ). The functions are used inside #stereoCalibrate but can also be used in 1102 * your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a 1103 * function that contains a matrix multiplication. 1104 */ 1105 public static void composeRT(Mat rvec1, Mat tvec1, Mat rvec2, Mat tvec2, Mat rvec3, Mat tvec3, Mat dr3dr1, Mat dr3dt1, Mat dr3dr2, Mat dr3dt2, Mat dt3dr1) { 1106 composeRT_3(rvec1.nativeObj, tvec1.nativeObj, rvec2.nativeObj, tvec2.nativeObj, rvec3.nativeObj, tvec3.nativeObj, dr3dr1.nativeObj, dr3dt1.nativeObj, dr3dr2.nativeObj, dr3dt2.nativeObj, dt3dr1.nativeObj); 1107 } 1108 1109 /** 1110 * Combines two rotation-and-shift transformations. 1111 * 1112 * @param rvec1 First rotation vector. 1113 * @param tvec1 First translation vector. 1114 * @param rvec2 Second rotation vector. 1115 * @param tvec2 Second translation vector. 1116 * @param rvec3 Output rotation vector of the superposition. 1117 * @param tvec3 Output translation vector of the superposition. 1118 * @param dr3dr1 Optional output derivative of rvec3 with regard to rvec1 1119 * @param dr3dt1 Optional output derivative of rvec3 with regard to tvec1 1120 * @param dr3dr2 Optional output derivative of rvec3 with regard to rvec2 1121 * @param dr3dt2 Optional output derivative of rvec3 with regard to tvec2 1122 * 1123 * The functions compute: 1124 * 1125 * \(\begin{array}{l} \texttt{rvec3} = \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right ) \\ \texttt{tvec3} = \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \texttt{tvec1} + \texttt{tvec2} \end{array} ,\) 1126 * 1127 * where \(\mathrm{rodrigues}\) denotes a rotation vector to a rotation matrix transformation, and 1128 * \(\mathrm{rodrigues}^{-1}\) denotes the inverse transformation. See Rodrigues for details. 1129 * 1130 * Also, the functions can compute the derivatives of the output vectors with regards to the input 1131 * vectors (see matMulDeriv ). The functions are used inside #stereoCalibrate but can also be used in 1132 * your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a 1133 * function that contains a matrix multiplication. 1134 */ 1135 public static void composeRT(Mat rvec1, Mat tvec1, Mat rvec2, Mat tvec2, Mat rvec3, Mat tvec3, Mat dr3dr1, Mat dr3dt1, Mat dr3dr2, Mat dr3dt2) { 1136 composeRT_4(rvec1.nativeObj, tvec1.nativeObj, rvec2.nativeObj, tvec2.nativeObj, rvec3.nativeObj, tvec3.nativeObj, dr3dr1.nativeObj, dr3dt1.nativeObj, dr3dr2.nativeObj, dr3dt2.nativeObj); 1137 } 1138 1139 /** 1140 * Combines two rotation-and-shift transformations. 1141 * 1142 * @param rvec1 First rotation vector. 1143 * @param tvec1 First translation vector. 1144 * @param rvec2 Second rotation vector. 1145 * @param tvec2 Second translation vector. 1146 * @param rvec3 Output rotation vector of the superposition. 1147 * @param tvec3 Output translation vector of the superposition. 1148 * @param dr3dr1 Optional output derivative of rvec3 with regard to rvec1 1149 * @param dr3dt1 Optional output derivative of rvec3 with regard to tvec1 1150 * @param dr3dr2 Optional output derivative of rvec3 with regard to rvec2 1151 * 1152 * The functions compute: 1153 * 1154 * \(\begin{array}{l} \texttt{rvec3} = \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right ) \\ \texttt{tvec3} = \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \texttt{tvec1} + \texttt{tvec2} \end{array} ,\) 1155 * 1156 * where \(\mathrm{rodrigues}\) denotes a rotation vector to a rotation matrix transformation, and 1157 * \(\mathrm{rodrigues}^{-1}\) denotes the inverse transformation. See Rodrigues for details. 1158 * 1159 * Also, the functions can compute the derivatives of the output vectors with regards to the input 1160 * vectors (see matMulDeriv ). The functions are used inside #stereoCalibrate but can also be used in 1161 * your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a 1162 * function that contains a matrix multiplication. 1163 */ 1164 public static void composeRT(Mat rvec1, Mat tvec1, Mat rvec2, Mat tvec2, Mat rvec3, Mat tvec3, Mat dr3dr1, Mat dr3dt1, Mat dr3dr2) { 1165 composeRT_5(rvec1.nativeObj, tvec1.nativeObj, rvec2.nativeObj, tvec2.nativeObj, rvec3.nativeObj, tvec3.nativeObj, dr3dr1.nativeObj, dr3dt1.nativeObj, dr3dr2.nativeObj); 1166 } 1167 1168 /** 1169 * Combines two rotation-and-shift transformations. 1170 * 1171 * @param rvec1 First rotation vector. 1172 * @param tvec1 First translation vector. 1173 * @param rvec2 Second rotation vector. 1174 * @param tvec2 Second translation vector. 1175 * @param rvec3 Output rotation vector of the superposition. 1176 * @param tvec3 Output translation vector of the superposition. 1177 * @param dr3dr1 Optional output derivative of rvec3 with regard to rvec1 1178 * @param dr3dt1 Optional output derivative of rvec3 with regard to tvec1 1179 * 1180 * The functions compute: 1181 * 1182 * \(\begin{array}{l} \texttt{rvec3} = \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right ) \\ \texttt{tvec3} = \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \texttt{tvec1} + \texttt{tvec2} \end{array} ,\) 1183 * 1184 * where \(\mathrm{rodrigues}\) denotes a rotation vector to a rotation matrix transformation, and 1185 * \(\mathrm{rodrigues}^{-1}\) denotes the inverse transformation. See Rodrigues for details. 1186 * 1187 * Also, the functions can compute the derivatives of the output vectors with regards to the input 1188 * vectors (see matMulDeriv ). The functions are used inside #stereoCalibrate but can also be used in 1189 * your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a 1190 * function that contains a matrix multiplication. 1191 */ 1192 public static void composeRT(Mat rvec1, Mat tvec1, Mat rvec2, Mat tvec2, Mat rvec3, Mat tvec3, Mat dr3dr1, Mat dr3dt1) { 1193 composeRT_6(rvec1.nativeObj, tvec1.nativeObj, rvec2.nativeObj, tvec2.nativeObj, rvec3.nativeObj, tvec3.nativeObj, dr3dr1.nativeObj, dr3dt1.nativeObj); 1194 } 1195 1196 /** 1197 * Combines two rotation-and-shift transformations. 1198 * 1199 * @param rvec1 First rotation vector. 1200 * @param tvec1 First translation vector. 1201 * @param rvec2 Second rotation vector. 1202 * @param tvec2 Second translation vector. 1203 * @param rvec3 Output rotation vector of the superposition. 1204 * @param tvec3 Output translation vector of the superposition. 1205 * @param dr3dr1 Optional output derivative of rvec3 with regard to rvec1 1206 * 1207 * The functions compute: 1208 * 1209 * \(\begin{array}{l} \texttt{rvec3} = \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right ) \\ \texttt{tvec3} = \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \texttt{tvec1} + \texttt{tvec2} \end{array} ,\) 1210 * 1211 * where \(\mathrm{rodrigues}\) denotes a rotation vector to a rotation matrix transformation, and 1212 * \(\mathrm{rodrigues}^{-1}\) denotes the inverse transformation. See Rodrigues for details. 1213 * 1214 * Also, the functions can compute the derivatives of the output vectors with regards to the input 1215 * vectors (see matMulDeriv ). The functions are used inside #stereoCalibrate but can also be used in 1216 * your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a 1217 * function that contains a matrix multiplication. 1218 */ 1219 public static void composeRT(Mat rvec1, Mat tvec1, Mat rvec2, Mat tvec2, Mat rvec3, Mat tvec3, Mat dr3dr1) { 1220 composeRT_7(rvec1.nativeObj, tvec1.nativeObj, rvec2.nativeObj, tvec2.nativeObj, rvec3.nativeObj, tvec3.nativeObj, dr3dr1.nativeObj); 1221 } 1222 1223 /** 1224 * Combines two rotation-and-shift transformations. 1225 * 1226 * @param rvec1 First rotation vector. 1227 * @param tvec1 First translation vector. 1228 * @param rvec2 Second rotation vector. 1229 * @param tvec2 Second translation vector. 1230 * @param rvec3 Output rotation vector of the superposition. 1231 * @param tvec3 Output translation vector of the superposition. 1232 * 1233 * The functions compute: 1234 * 1235 * \(\begin{array}{l} \texttt{rvec3} = \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right ) \\ \texttt{tvec3} = \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \texttt{tvec1} + \texttt{tvec2} \end{array} ,\) 1236 * 1237 * where \(\mathrm{rodrigues}\) denotes a rotation vector to a rotation matrix transformation, and 1238 * \(\mathrm{rodrigues}^{-1}\) denotes the inverse transformation. See Rodrigues for details. 1239 * 1240 * Also, the functions can compute the derivatives of the output vectors with regards to the input 1241 * vectors (see matMulDeriv ). The functions are used inside #stereoCalibrate but can also be used in 1242 * your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a 1243 * function that contains a matrix multiplication. 1244 */ 1245 public static void composeRT(Mat rvec1, Mat tvec1, Mat rvec2, Mat tvec2, Mat rvec3, Mat tvec3) { 1246 composeRT_8(rvec1.nativeObj, tvec1.nativeObj, rvec2.nativeObj, tvec2.nativeObj, rvec3.nativeObj, tvec3.nativeObj); 1247 } 1248 1249 1250 // 1251 // C++: void cv::projectPoints(vector_Point3f objectPoints, Mat rvec, Mat tvec, Mat cameraMatrix, vector_double distCoeffs, vector_Point2f& imagePoints, Mat& jacobian = Mat(), double aspectRatio = 0) 1252 // 1253 1254 /** 1255 * Projects 3D points to an image plane. 1256 * 1257 * @param objectPoints Array of object points expressed wrt. the world coordinate frame. A 3xN/Nx3 1258 * 1-channel or 1xN/Nx1 3-channel (or vector<Point3f> ), where N is the number of points in the view. 1259 * @param rvec The rotation vector (REF: Rodrigues) that, together with tvec, performs a change of 1260 * basis from world to camera coordinate system, see REF: calibrateCamera for details. 1261 * @param tvec The translation vector, see parameter description above. 1262 * @param cameraMatrix Camera intrinsic matrix \(\cameramatrix{A}\) . 1263 * @param distCoeffs Input vector of distortion coefficients 1264 * \(\distcoeffs\) . If the vector is empty, the zero distortion coefficients are assumed. 1265 * @param imagePoints Output array of image points, 1xN/Nx1 2-channel, or 1266 * vector<Point2f> . 1267 * @param jacobian Optional output 2Nx(10+<numDistCoeffs>) jacobian matrix of derivatives of image 1268 * points with respect to components of the rotation vector, translation vector, focal lengths, 1269 * coordinates of the principal point and the distortion coefficients. In the old interface different 1270 * components of the jacobian are returned via different output parameters. 1271 * @param aspectRatio Optional "fixed aspect ratio" parameter. If the parameter is not 0, the 1272 * function assumes that the aspect ratio (\(f_x / f_y\)) is fixed and correspondingly adjusts the 1273 * jacobian matrix. 1274 * 1275 * The function computes the 2D projections of 3D points to the image plane, given intrinsic and 1276 * extrinsic camera parameters. Optionally, the function computes Jacobians -matrices of partial 1277 * derivatives of image points coordinates (as functions of all the input parameters) with respect to 1278 * the particular parameters, intrinsic and/or extrinsic. The Jacobians are used during the global 1279 * optimization in REF: calibrateCamera, REF: solvePnP, and REF: stereoCalibrate. The function itself 1280 * can also be used to compute a re-projection error, given the current intrinsic and extrinsic 1281 * parameters. 1282 * 1283 * <b>Note:</b> By setting rvec = tvec = \([0, 0, 0]\), or by setting cameraMatrix to a 3x3 identity matrix, 1284 * or by passing zero distortion coefficients, one can get various useful partial cases of the 1285 * function. This means, one can compute the distorted coordinates for a sparse set of points or apply 1286 * a perspective transformation (and also compute the derivatives) in the ideal zero-distortion setup. 1287 */ 1288 public static void projectPoints(MatOfPoint3f objectPoints, Mat rvec, Mat tvec, Mat cameraMatrix, MatOfDouble distCoeffs, MatOfPoint2f imagePoints, Mat jacobian, double aspectRatio) { 1289 Mat objectPoints_mat = objectPoints; 1290 Mat distCoeffs_mat = distCoeffs; 1291 Mat imagePoints_mat = imagePoints; 1292 projectPoints_0(objectPoints_mat.nativeObj, rvec.nativeObj, tvec.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, imagePoints_mat.nativeObj, jacobian.nativeObj, aspectRatio); 1293 } 1294 1295 /** 1296 * Projects 3D points to an image plane. 1297 * 1298 * @param objectPoints Array of object points expressed wrt. the world coordinate frame. A 3xN/Nx3 1299 * 1-channel or 1xN/Nx1 3-channel (or vector<Point3f> ), where N is the number of points in the view. 1300 * @param rvec The rotation vector (REF: Rodrigues) that, together with tvec, performs a change of 1301 * basis from world to camera coordinate system, see REF: calibrateCamera for details. 1302 * @param tvec The translation vector, see parameter description above. 1303 * @param cameraMatrix Camera intrinsic matrix \(\cameramatrix{A}\) . 1304 * @param distCoeffs Input vector of distortion coefficients 1305 * \(\distcoeffs\) . If the vector is empty, the zero distortion coefficients are assumed. 1306 * @param imagePoints Output array of image points, 1xN/Nx1 2-channel, or 1307 * vector<Point2f> . 1308 * @param jacobian Optional output 2Nx(10+<numDistCoeffs>) jacobian matrix of derivatives of image 1309 * points with respect to components of the rotation vector, translation vector, focal lengths, 1310 * coordinates of the principal point and the distortion coefficients. In the old interface different 1311 * components of the jacobian are returned via different output parameters. 1312 * function assumes that the aspect ratio (\(f_x / f_y\)) is fixed and correspondingly adjusts the 1313 * jacobian matrix. 1314 * 1315 * The function computes the 2D projections of 3D points to the image plane, given intrinsic and 1316 * extrinsic camera parameters. Optionally, the function computes Jacobians -matrices of partial 1317 * derivatives of image points coordinates (as functions of all the input parameters) with respect to 1318 * the particular parameters, intrinsic and/or extrinsic. The Jacobians are used during the global 1319 * optimization in REF: calibrateCamera, REF: solvePnP, and REF: stereoCalibrate. The function itself 1320 * can also be used to compute a re-projection error, given the current intrinsic and extrinsic 1321 * parameters. 1322 * 1323 * <b>Note:</b> By setting rvec = tvec = \([0, 0, 0]\), or by setting cameraMatrix to a 3x3 identity matrix, 1324 * or by passing zero distortion coefficients, one can get various useful partial cases of the 1325 * function. This means, one can compute the distorted coordinates for a sparse set of points or apply 1326 * a perspective transformation (and also compute the derivatives) in the ideal zero-distortion setup. 1327 */ 1328 public static void projectPoints(MatOfPoint3f objectPoints, Mat rvec, Mat tvec, Mat cameraMatrix, MatOfDouble distCoeffs, MatOfPoint2f imagePoints, Mat jacobian) { 1329 Mat objectPoints_mat = objectPoints; 1330 Mat distCoeffs_mat = distCoeffs; 1331 Mat imagePoints_mat = imagePoints; 1332 projectPoints_1(objectPoints_mat.nativeObj, rvec.nativeObj, tvec.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, imagePoints_mat.nativeObj, jacobian.nativeObj); 1333 } 1334 1335 /** 1336 * Projects 3D points to an image plane. 1337 * 1338 * @param objectPoints Array of object points expressed wrt. the world coordinate frame. A 3xN/Nx3 1339 * 1-channel or 1xN/Nx1 3-channel (or vector<Point3f> ), where N is the number of points in the view. 1340 * @param rvec The rotation vector (REF: Rodrigues) that, together with tvec, performs a change of 1341 * basis from world to camera coordinate system, see REF: calibrateCamera for details. 1342 * @param tvec The translation vector, see parameter description above. 1343 * @param cameraMatrix Camera intrinsic matrix \(\cameramatrix{A}\) . 1344 * @param distCoeffs Input vector of distortion coefficients 1345 * \(\distcoeffs\) . If the vector is empty, the zero distortion coefficients are assumed. 1346 * @param imagePoints Output array of image points, 1xN/Nx1 2-channel, or 1347 * vector<Point2f> . 1348 * points with respect to components of the rotation vector, translation vector, focal lengths, 1349 * coordinates of the principal point and the distortion coefficients. In the old interface different 1350 * components of the jacobian are returned via different output parameters. 1351 * function assumes that the aspect ratio (\(f_x / f_y\)) is fixed and correspondingly adjusts the 1352 * jacobian matrix. 1353 * 1354 * The function computes the 2D projections of 3D points to the image plane, given intrinsic and 1355 * extrinsic camera parameters. Optionally, the function computes Jacobians -matrices of partial 1356 * derivatives of image points coordinates (as functions of all the input parameters) with respect to 1357 * the particular parameters, intrinsic and/or extrinsic. The Jacobians are used during the global 1358 * optimization in REF: calibrateCamera, REF: solvePnP, and REF: stereoCalibrate. The function itself 1359 * can also be used to compute a re-projection error, given the current intrinsic and extrinsic 1360 * parameters. 1361 * 1362 * <b>Note:</b> By setting rvec = tvec = \([0, 0, 0]\), or by setting cameraMatrix to a 3x3 identity matrix, 1363 * or by passing zero distortion coefficients, one can get various useful partial cases of the 1364 * function. This means, one can compute the distorted coordinates for a sparse set of points or apply 1365 * a perspective transformation (and also compute the derivatives) in the ideal zero-distortion setup. 1366 */ 1367 public static void projectPoints(MatOfPoint3f objectPoints, Mat rvec, Mat tvec, Mat cameraMatrix, MatOfDouble distCoeffs, MatOfPoint2f imagePoints) { 1368 Mat objectPoints_mat = objectPoints; 1369 Mat distCoeffs_mat = distCoeffs; 1370 Mat imagePoints_mat = imagePoints; 1371 projectPoints_2(objectPoints_mat.nativeObj, rvec.nativeObj, tvec.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, imagePoints_mat.nativeObj); 1372 } 1373 1374 1375 // 1376 // C++: bool cv::solvePnP(vector_Point3f objectPoints, vector_Point2f imagePoints, Mat cameraMatrix, vector_double distCoeffs, Mat& rvec, Mat& tvec, bool useExtrinsicGuess = false, int flags = SOLVEPNP_ITERATIVE) 1377 // 1378 1379 /** 1380 * Finds an object pose from 3D-2D point correspondences. 1381 * 1382 * SEE: REF: calib3d_solvePnP 1383 * 1384 * This function returns the rotation and the translation vectors that transform a 3D point expressed in the object 1385 * coordinate frame to the camera coordinate frame, using different methods: 1386 * <ul> 1387 * <li> 1388 * P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): need 4 input points to return a unique solution. 1389 * </li> 1390 * <li> 1391 * REF: SOLVEPNP_IPPE Input points must be >= 4 and object points must be coplanar. 1392 * </li> 1393 * <li> 1394 * REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation. 1395 * Number of input points must be 4. Object points must be defined in the following order: 1396 * <ul> 1397 * <li> 1398 * point 0: [-squareLength / 2, squareLength / 2, 0] 1399 * </li> 1400 * <li> 1401 * point 1: [ squareLength / 2, squareLength / 2, 0] 1402 * </li> 1403 * <li> 1404 * point 2: [ squareLength / 2, -squareLength / 2, 0] 1405 * </li> 1406 * <li> 1407 * point 3: [-squareLength / 2, -squareLength / 2, 0] 1408 * </li> 1409 * </ul> 1410 * <li> 1411 * for all the other flags, number of input points must be >= 4 and object points can be in any configuration. 1412 * </li> 1413 * </ul> 1414 * 1415 * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or 1416 * 1xN/Nx1 3-channel, where N is the number of points. vector<Point3d> can be also passed here. 1417 * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel, 1418 * where N is the number of points. vector<Point2d> can be also passed here. 1419 * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) . 1420 * @param distCoeffs Input vector of distortion coefficients 1421 * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are 1422 * assumed. 1423 * @param rvec Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from 1424 * the model coordinate system to the camera coordinate system. 1425 * @param tvec Output translation vector. 1426 * @param useExtrinsicGuess Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses 1427 * the provided rvec and tvec values as initial approximations of the rotation and translation 1428 * vectors, respectively, and further optimizes them. 1429 * @param flags Method for solving a PnP problem: see REF: calib3d_solvePnP_flags 1430 * 1431 * More information about Perspective-n-Points is described in REF: calib3d_solvePnP 1432 * 1433 * <b>Note:</b> 1434 * <ul> 1435 * <li> 1436 * An example of how to use solvePnP for planar augmented reality can be found at 1437 * opencv_source_code/samples/python/plane_ar.py 1438 * </li> 1439 * <li> 1440 * If you are using Python: 1441 * <ul> 1442 * <li> 1443 * Numpy array slices won't work as input because solvePnP requires contiguous 1444 * arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of 1445 * modules/calib3d/src/solvepnp.cpp version 2.4.9) 1446 * </li> 1447 * <li> 1448 * The P3P algorithm requires image points to be in an array of shape (N,1,2) due 1449 * to its calling of #undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9) 1450 * which requires 2-channel information. 1451 * </li> 1452 * <li> 1453 * Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of 1454 * it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints = 1455 * np.ascontiguousarray(D[:,:2]).reshape((N,1,2)) 1456 * </li> 1457 * </ul> 1458 * <li> 1459 * The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are 1460 * unstable and sometimes give completely wrong results. If you pass one of these two 1461 * flags, REF: SOLVEPNP_EPNP method will be used instead. 1462 * </li> 1463 * <li> 1464 * The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P 1465 * methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions 1466 * of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error). 1467 * </li> 1468 * <li> 1469 * With REF: SOLVEPNP_ITERATIVE method and {@code useExtrinsicGuess=true}, the minimum number of points is 3 (3 points 1470 * are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the 1471 * global solution to converge. 1472 * </li> 1473 * <li> 1474 * With REF: SOLVEPNP_IPPE input points must be >= 4 and object points must be coplanar. 1475 * </li> 1476 * <li> 1477 * With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation. 1478 * Number of input points must be 4. Object points must be defined in the following order: 1479 * <ul> 1480 * <li> 1481 * point 0: [-squareLength / 2, squareLength / 2, 0] 1482 * </li> 1483 * <li> 1484 * point 1: [ squareLength / 2, squareLength / 2, 0] 1485 * </li> 1486 * <li> 1487 * point 2: [ squareLength / 2, -squareLength / 2, 0] 1488 * </li> 1489 * <li> 1490 * point 3: [-squareLength / 2, -squareLength / 2, 0] 1491 * </li> 1492 * </ul> 1493 * <ul> 1494 * <li> 1495 * With REF: SOLVEPNP_SQPNP input points must be >= 3 1496 * </li> 1497 * </ul> 1498 * </li> 1499 * </ul> 1500 * @return automatically generated 1501 */ 1502 public static boolean solvePnP(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec, boolean useExtrinsicGuess, int flags) { 1503 Mat objectPoints_mat = objectPoints; 1504 Mat imagePoints_mat = imagePoints; 1505 Mat distCoeffs_mat = distCoeffs; 1506 return solvePnP_0(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, rvec.nativeObj, tvec.nativeObj, useExtrinsicGuess, flags); 1507 } 1508 1509 /** 1510 * Finds an object pose from 3D-2D point correspondences. 1511 * 1512 * SEE: REF: calib3d_solvePnP 1513 * 1514 * This function returns the rotation and the translation vectors that transform a 3D point expressed in the object 1515 * coordinate frame to the camera coordinate frame, using different methods: 1516 * <ul> 1517 * <li> 1518 * P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): need 4 input points to return a unique solution. 1519 * </li> 1520 * <li> 1521 * REF: SOLVEPNP_IPPE Input points must be >= 4 and object points must be coplanar. 1522 * </li> 1523 * <li> 1524 * REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation. 1525 * Number of input points must be 4. Object points must be defined in the following order: 1526 * <ul> 1527 * <li> 1528 * point 0: [-squareLength / 2, squareLength / 2, 0] 1529 * </li> 1530 * <li> 1531 * point 1: [ squareLength / 2, squareLength / 2, 0] 1532 * </li> 1533 * <li> 1534 * point 2: [ squareLength / 2, -squareLength / 2, 0] 1535 * </li> 1536 * <li> 1537 * point 3: [-squareLength / 2, -squareLength / 2, 0] 1538 * </li> 1539 * </ul> 1540 * <li> 1541 * for all the other flags, number of input points must be >= 4 and object points can be in any configuration. 1542 * </li> 1543 * </ul> 1544 * 1545 * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or 1546 * 1xN/Nx1 3-channel, where N is the number of points. vector<Point3d> can be also passed here. 1547 * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel, 1548 * where N is the number of points. vector<Point2d> can be also passed here. 1549 * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) . 1550 * @param distCoeffs Input vector of distortion coefficients 1551 * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are 1552 * assumed. 1553 * @param rvec Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from 1554 * the model coordinate system to the camera coordinate system. 1555 * @param tvec Output translation vector. 1556 * @param useExtrinsicGuess Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses 1557 * the provided rvec and tvec values as initial approximations of the rotation and translation 1558 * vectors, respectively, and further optimizes them. 1559 * 1560 * More information about Perspective-n-Points is described in REF: calib3d_solvePnP 1561 * 1562 * <b>Note:</b> 1563 * <ul> 1564 * <li> 1565 * An example of how to use solvePnP for planar augmented reality can be found at 1566 * opencv_source_code/samples/python/plane_ar.py 1567 * </li> 1568 * <li> 1569 * If you are using Python: 1570 * <ul> 1571 * <li> 1572 * Numpy array slices won't work as input because solvePnP requires contiguous 1573 * arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of 1574 * modules/calib3d/src/solvepnp.cpp version 2.4.9) 1575 * </li> 1576 * <li> 1577 * The P3P algorithm requires image points to be in an array of shape (N,1,2) due 1578 * to its calling of #undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9) 1579 * which requires 2-channel information. 1580 * </li> 1581 * <li> 1582 * Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of 1583 * it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints = 1584 * np.ascontiguousarray(D[:,:2]).reshape((N,1,2)) 1585 * </li> 1586 * </ul> 1587 * <li> 1588 * The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are 1589 * unstable and sometimes give completely wrong results. If you pass one of these two 1590 * flags, REF: SOLVEPNP_EPNP method will be used instead. 1591 * </li> 1592 * <li> 1593 * The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P 1594 * methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions 1595 * of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error). 1596 * </li> 1597 * <li> 1598 * With REF: SOLVEPNP_ITERATIVE method and {@code useExtrinsicGuess=true}, the minimum number of points is 3 (3 points 1599 * are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the 1600 * global solution to converge. 1601 * </li> 1602 * <li> 1603 * With REF: SOLVEPNP_IPPE input points must be >= 4 and object points must be coplanar. 1604 * </li> 1605 * <li> 1606 * With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation. 1607 * Number of input points must be 4. Object points must be defined in the following order: 1608 * <ul> 1609 * <li> 1610 * point 0: [-squareLength / 2, squareLength / 2, 0] 1611 * </li> 1612 * <li> 1613 * point 1: [ squareLength / 2, squareLength / 2, 0] 1614 * </li> 1615 * <li> 1616 * point 2: [ squareLength / 2, -squareLength / 2, 0] 1617 * </li> 1618 * <li> 1619 * point 3: [-squareLength / 2, -squareLength / 2, 0] 1620 * </li> 1621 * </ul> 1622 * <ul> 1623 * <li> 1624 * With REF: SOLVEPNP_SQPNP input points must be >= 3 1625 * </li> 1626 * </ul> 1627 * </li> 1628 * </ul> 1629 * @return automatically generated 1630 */ 1631 public static boolean solvePnP(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec, boolean useExtrinsicGuess) { 1632 Mat objectPoints_mat = objectPoints; 1633 Mat imagePoints_mat = imagePoints; 1634 Mat distCoeffs_mat = distCoeffs; 1635 return solvePnP_1(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, rvec.nativeObj, tvec.nativeObj, useExtrinsicGuess); 1636 } 1637 1638 /** 1639 * Finds an object pose from 3D-2D point correspondences. 1640 * 1641 * SEE: REF: calib3d_solvePnP 1642 * 1643 * This function returns the rotation and the translation vectors that transform a 3D point expressed in the object 1644 * coordinate frame to the camera coordinate frame, using different methods: 1645 * <ul> 1646 * <li> 1647 * P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): need 4 input points to return a unique solution. 1648 * </li> 1649 * <li> 1650 * REF: SOLVEPNP_IPPE Input points must be >= 4 and object points must be coplanar. 1651 * </li> 1652 * <li> 1653 * REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation. 1654 * Number of input points must be 4. Object points must be defined in the following order: 1655 * <ul> 1656 * <li> 1657 * point 0: [-squareLength / 2, squareLength / 2, 0] 1658 * </li> 1659 * <li> 1660 * point 1: [ squareLength / 2, squareLength / 2, 0] 1661 * </li> 1662 * <li> 1663 * point 2: [ squareLength / 2, -squareLength / 2, 0] 1664 * </li> 1665 * <li> 1666 * point 3: [-squareLength / 2, -squareLength / 2, 0] 1667 * </li> 1668 * </ul> 1669 * <li> 1670 * for all the other flags, number of input points must be >= 4 and object points can be in any configuration. 1671 * </li> 1672 * </ul> 1673 * 1674 * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or 1675 * 1xN/Nx1 3-channel, where N is the number of points. vector<Point3d> can be also passed here. 1676 * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel, 1677 * where N is the number of points. vector<Point2d> can be also passed here. 1678 * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) . 1679 * @param distCoeffs Input vector of distortion coefficients 1680 * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are 1681 * assumed. 1682 * @param rvec Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from 1683 * the model coordinate system to the camera coordinate system. 1684 * @param tvec Output translation vector. 1685 * the provided rvec and tvec values as initial approximations of the rotation and translation 1686 * vectors, respectively, and further optimizes them. 1687 * 1688 * More information about Perspective-n-Points is described in REF: calib3d_solvePnP 1689 * 1690 * <b>Note:</b> 1691 * <ul> 1692 * <li> 1693 * An example of how to use solvePnP for planar augmented reality can be found at 1694 * opencv_source_code/samples/python/plane_ar.py 1695 * </li> 1696 * <li> 1697 * If you are using Python: 1698 * <ul> 1699 * <li> 1700 * Numpy array slices won't work as input because solvePnP requires contiguous 1701 * arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of 1702 * modules/calib3d/src/solvepnp.cpp version 2.4.9) 1703 * </li> 1704 * <li> 1705 * The P3P algorithm requires image points to be in an array of shape (N,1,2) due 1706 * to its calling of #undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9) 1707 * which requires 2-channel information. 1708 * </li> 1709 * <li> 1710 * Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of 1711 * it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints = 1712 * np.ascontiguousarray(D[:,:2]).reshape((N,1,2)) 1713 * </li> 1714 * </ul> 1715 * <li> 1716 * The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are 1717 * unstable and sometimes give completely wrong results. If you pass one of these two 1718 * flags, REF: SOLVEPNP_EPNP method will be used instead. 1719 * </li> 1720 * <li> 1721 * The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P 1722 * methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions 1723 * of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error). 1724 * </li> 1725 * <li> 1726 * With REF: SOLVEPNP_ITERATIVE method and {@code useExtrinsicGuess=true}, the minimum number of points is 3 (3 points 1727 * are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the 1728 * global solution to converge. 1729 * </li> 1730 * <li> 1731 * With REF: SOLVEPNP_IPPE input points must be >= 4 and object points must be coplanar. 1732 * </li> 1733 * <li> 1734 * With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation. 1735 * Number of input points must be 4. Object points must be defined in the following order: 1736 * <ul> 1737 * <li> 1738 * point 0: [-squareLength / 2, squareLength / 2, 0] 1739 * </li> 1740 * <li> 1741 * point 1: [ squareLength / 2, squareLength / 2, 0] 1742 * </li> 1743 * <li> 1744 * point 2: [ squareLength / 2, -squareLength / 2, 0] 1745 * </li> 1746 * <li> 1747 * point 3: [-squareLength / 2, -squareLength / 2, 0] 1748 * </li> 1749 * </ul> 1750 * <ul> 1751 * <li> 1752 * With REF: SOLVEPNP_SQPNP input points must be >= 3 1753 * </li> 1754 * </ul> 1755 * </li> 1756 * </ul> 1757 * @return automatically generated 1758 */ 1759 public static boolean solvePnP(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec) { 1760 Mat objectPoints_mat = objectPoints; 1761 Mat imagePoints_mat = imagePoints; 1762 Mat distCoeffs_mat = distCoeffs; 1763 return solvePnP_2(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, rvec.nativeObj, tvec.nativeObj); 1764 } 1765 1766 1767 // 1768 // C++: bool cv::solvePnPRansac(vector_Point3f objectPoints, vector_Point2f imagePoints, Mat cameraMatrix, vector_double distCoeffs, Mat& rvec, Mat& tvec, bool useExtrinsicGuess = false, int iterationsCount = 100, float reprojectionError = 8.0, double confidence = 0.99, Mat& inliers = Mat(), int flags = SOLVEPNP_ITERATIVE) 1769 // 1770 1771 /** 1772 * Finds an object pose from 3D-2D point correspondences using the RANSAC scheme. 1773 * 1774 * SEE: REF: calib3d_solvePnP 1775 * 1776 * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or 1777 * 1xN/Nx1 3-channel, where N is the number of points. vector<Point3d> can be also passed here. 1778 * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel, 1779 * where N is the number of points. vector<Point2d> can be also passed here. 1780 * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) . 1781 * @param distCoeffs Input vector of distortion coefficients 1782 * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are 1783 * assumed. 1784 * @param rvec Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from 1785 * the model coordinate system to the camera coordinate system. 1786 * @param tvec Output translation vector. 1787 * @param useExtrinsicGuess Parameter used for REF: SOLVEPNP_ITERATIVE. If true (1), the function uses 1788 * the provided rvec and tvec values as initial approximations of the rotation and translation 1789 * vectors, respectively, and further optimizes them. 1790 * @param iterationsCount Number of iterations. 1791 * @param reprojectionError Inlier threshold value used by the RANSAC procedure. The parameter value 1792 * is the maximum allowed distance between the observed and computed point projections to consider it 1793 * an inlier. 1794 * @param confidence The probability that the algorithm produces a useful result. 1795 * @param inliers Output vector that contains indices of inliers in objectPoints and imagePoints . 1796 * @param flags Method for solving a PnP problem (see REF: solvePnP ). 1797 * 1798 * The function estimates an object pose given a set of object points, their corresponding image 1799 * projections, as well as the camera intrinsic matrix and the distortion coefficients. This function finds such 1800 * a pose that minimizes reprojection error, that is, the sum of squared distances between the observed 1801 * projections imagePoints and the projected (using REF: projectPoints ) objectPoints. The use of RANSAC 1802 * makes the function resistant to outliers. 1803 * 1804 * <b>Note:</b> 1805 * <ul> 1806 * <li> 1807 * An example of how to use solvePNPRansac for object detection can be found at 1808 * opencv_source_code/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/ 1809 * </li> 1810 * <li> 1811 * The default method used to estimate the camera pose for the Minimal Sample Sets step 1812 * is #SOLVEPNP_EPNP. Exceptions are: 1813 * <ul> 1814 * <li> 1815 * if you choose #SOLVEPNP_P3P or #SOLVEPNP_AP3P, these methods will be used. 1816 * </li> 1817 * <li> 1818 * if the number of input points is equal to 4, #SOLVEPNP_P3P is used. 1819 * </li> 1820 * </ul> 1821 * <li> 1822 * The method used to estimate the camera pose using all the inliers is defined by the 1823 * flags parameters unless it is equal to #SOLVEPNP_P3P or #SOLVEPNP_AP3P. In this case, 1824 * the method #SOLVEPNP_EPNP will be used instead. 1825 * </li> 1826 * </ul> 1827 * @return automatically generated 1828 */ 1829 public static boolean solvePnPRansac(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec, boolean useExtrinsicGuess, int iterationsCount, float reprojectionError, double confidence, Mat inliers, int flags) { 1830 Mat objectPoints_mat = objectPoints; 1831 Mat imagePoints_mat = imagePoints; 1832 Mat distCoeffs_mat = distCoeffs; 1833 return solvePnPRansac_0(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, rvec.nativeObj, tvec.nativeObj, useExtrinsicGuess, iterationsCount, reprojectionError, confidence, inliers.nativeObj, flags); 1834 } 1835 1836 /** 1837 * Finds an object pose from 3D-2D point correspondences using the RANSAC scheme. 1838 * 1839 * SEE: REF: calib3d_solvePnP 1840 * 1841 * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or 1842 * 1xN/Nx1 3-channel, where N is the number of points. vector<Point3d> can be also passed here. 1843 * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel, 1844 * where N is the number of points. vector<Point2d> can be also passed here. 1845 * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) . 1846 * @param distCoeffs Input vector of distortion coefficients 1847 * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are 1848 * assumed. 1849 * @param rvec Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from 1850 * the model coordinate system to the camera coordinate system. 1851 * @param tvec Output translation vector. 1852 * @param useExtrinsicGuess Parameter used for REF: SOLVEPNP_ITERATIVE. If true (1), the function uses 1853 * the provided rvec and tvec values as initial approximations of the rotation and translation 1854 * vectors, respectively, and further optimizes them. 1855 * @param iterationsCount Number of iterations. 1856 * @param reprojectionError Inlier threshold value used by the RANSAC procedure. The parameter value 1857 * is the maximum allowed distance between the observed and computed point projections to consider it 1858 * an inlier. 1859 * @param confidence The probability that the algorithm produces a useful result. 1860 * @param inliers Output vector that contains indices of inliers in objectPoints and imagePoints . 1861 * 1862 * The function estimates an object pose given a set of object points, their corresponding image 1863 * projections, as well as the camera intrinsic matrix and the distortion coefficients. This function finds such 1864 * a pose that minimizes reprojection error, that is, the sum of squared distances between the observed 1865 * projections imagePoints and the projected (using REF: projectPoints ) objectPoints. The use of RANSAC 1866 * makes the function resistant to outliers. 1867 * 1868 * <b>Note:</b> 1869 * <ul> 1870 * <li> 1871 * An example of how to use solvePNPRansac for object detection can be found at 1872 * opencv_source_code/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/ 1873 * </li> 1874 * <li> 1875 * The default method used to estimate the camera pose for the Minimal Sample Sets step 1876 * is #SOLVEPNP_EPNP. Exceptions are: 1877 * <ul> 1878 * <li> 1879 * if you choose #SOLVEPNP_P3P or #SOLVEPNP_AP3P, these methods will be used. 1880 * </li> 1881 * <li> 1882 * if the number of input points is equal to 4, #SOLVEPNP_P3P is used. 1883 * </li> 1884 * </ul> 1885 * <li> 1886 * The method used to estimate the camera pose using all the inliers is defined by the 1887 * flags parameters unless it is equal to #SOLVEPNP_P3P or #SOLVEPNP_AP3P. In this case, 1888 * the method #SOLVEPNP_EPNP will be used instead. 1889 * </li> 1890 * </ul> 1891 * @return automatically generated 1892 */ 1893 public static boolean solvePnPRansac(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec, boolean useExtrinsicGuess, int iterationsCount, float reprojectionError, double confidence, Mat inliers) { 1894 Mat objectPoints_mat = objectPoints; 1895 Mat imagePoints_mat = imagePoints; 1896 Mat distCoeffs_mat = distCoeffs; 1897 return solvePnPRansac_1(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, rvec.nativeObj, tvec.nativeObj, useExtrinsicGuess, iterationsCount, reprojectionError, confidence, inliers.nativeObj); 1898 } 1899 1900 /** 1901 * Finds an object pose from 3D-2D point correspondences using the RANSAC scheme. 1902 * 1903 * SEE: REF: calib3d_solvePnP 1904 * 1905 * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or 1906 * 1xN/Nx1 3-channel, where N is the number of points. vector<Point3d> can be also passed here. 1907 * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel, 1908 * where N is the number of points. vector<Point2d> can be also passed here. 1909 * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) . 1910 * @param distCoeffs Input vector of distortion coefficients 1911 * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are 1912 * assumed. 1913 * @param rvec Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from 1914 * the model coordinate system to the camera coordinate system. 1915 * @param tvec Output translation vector. 1916 * @param useExtrinsicGuess Parameter used for REF: SOLVEPNP_ITERATIVE. If true (1), the function uses 1917 * the provided rvec and tvec values as initial approximations of the rotation and translation 1918 * vectors, respectively, and further optimizes them. 1919 * @param iterationsCount Number of iterations. 1920 * @param reprojectionError Inlier threshold value used by the RANSAC procedure. The parameter value 1921 * is the maximum allowed distance between the observed and computed point projections to consider it 1922 * an inlier. 1923 * @param confidence The probability that the algorithm produces a useful result. 1924 * 1925 * The function estimates an object pose given a set of object points, their corresponding image 1926 * projections, as well as the camera intrinsic matrix and the distortion coefficients. This function finds such 1927 * a pose that minimizes reprojection error, that is, the sum of squared distances between the observed 1928 * projections imagePoints and the projected (using REF: projectPoints ) objectPoints. The use of RANSAC 1929 * makes the function resistant to outliers. 1930 * 1931 * <b>Note:</b> 1932 * <ul> 1933 * <li> 1934 * An example of how to use solvePNPRansac for object detection can be found at 1935 * opencv_source_code/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/ 1936 * </li> 1937 * <li> 1938 * The default method used to estimate the camera pose for the Minimal Sample Sets step 1939 * is #SOLVEPNP_EPNP. Exceptions are: 1940 * <ul> 1941 * <li> 1942 * if you choose #SOLVEPNP_P3P or #SOLVEPNP_AP3P, these methods will be used. 1943 * </li> 1944 * <li> 1945 * if the number of input points is equal to 4, #SOLVEPNP_P3P is used. 1946 * </li> 1947 * </ul> 1948 * <li> 1949 * The method used to estimate the camera pose using all the inliers is defined by the 1950 * flags parameters unless it is equal to #SOLVEPNP_P3P or #SOLVEPNP_AP3P. In this case, 1951 * the method #SOLVEPNP_EPNP will be used instead. 1952 * </li> 1953 * </ul> 1954 * @return automatically generated 1955 */ 1956 public static boolean solvePnPRansac(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec, boolean useExtrinsicGuess, int iterationsCount, float reprojectionError, double confidence) { 1957 Mat objectPoints_mat = objectPoints; 1958 Mat imagePoints_mat = imagePoints; 1959 Mat distCoeffs_mat = distCoeffs; 1960 return solvePnPRansac_2(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, rvec.nativeObj, tvec.nativeObj, useExtrinsicGuess, iterationsCount, reprojectionError, confidence); 1961 } 1962 1963 /** 1964 * Finds an object pose from 3D-2D point correspondences using the RANSAC scheme. 1965 * 1966 * SEE: REF: calib3d_solvePnP 1967 * 1968 * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or 1969 * 1xN/Nx1 3-channel, where N is the number of points. vector<Point3d> can be also passed here. 1970 * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel, 1971 * where N is the number of points. vector<Point2d> can be also passed here. 1972 * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) . 1973 * @param distCoeffs Input vector of distortion coefficients 1974 * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are 1975 * assumed. 1976 * @param rvec Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from 1977 * the model coordinate system to the camera coordinate system. 1978 * @param tvec Output translation vector. 1979 * @param useExtrinsicGuess Parameter used for REF: SOLVEPNP_ITERATIVE. If true (1), the function uses 1980 * the provided rvec and tvec values as initial approximations of the rotation and translation 1981 * vectors, respectively, and further optimizes them. 1982 * @param iterationsCount Number of iterations. 1983 * @param reprojectionError Inlier threshold value used by the RANSAC procedure. The parameter value 1984 * is the maximum allowed distance between the observed and computed point projections to consider it 1985 * an inlier. 1986 * 1987 * The function estimates an object pose given a set of object points, their corresponding image 1988 * projections, as well as the camera intrinsic matrix and the distortion coefficients. This function finds such 1989 * a pose that minimizes reprojection error, that is, the sum of squared distances between the observed 1990 * projections imagePoints and the projected (using REF: projectPoints ) objectPoints. The use of RANSAC 1991 * makes the function resistant to outliers. 1992 * 1993 * <b>Note:</b> 1994 * <ul> 1995 * <li> 1996 * An example of how to use solvePNPRansac for object detection can be found at 1997 * opencv_source_code/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/ 1998 * </li> 1999 * <li> 2000 * The default method used to estimate the camera pose for the Minimal Sample Sets step 2001 * is #SOLVEPNP_EPNP. Exceptions are: 2002 * <ul> 2003 * <li> 2004 * if you choose #SOLVEPNP_P3P or #SOLVEPNP_AP3P, these methods will be used. 2005 * </li> 2006 * <li> 2007 * if the number of input points is equal to 4, #SOLVEPNP_P3P is used. 2008 * </li> 2009 * </ul> 2010 * <li> 2011 * The method used to estimate the camera pose using all the inliers is defined by the 2012 * flags parameters unless it is equal to #SOLVEPNP_P3P or #SOLVEPNP_AP3P. In this case, 2013 * the method #SOLVEPNP_EPNP will be used instead. 2014 * </li> 2015 * </ul> 2016 * @return automatically generated 2017 */ 2018 public static boolean solvePnPRansac(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec, boolean useExtrinsicGuess, int iterationsCount, float reprojectionError) { 2019 Mat objectPoints_mat = objectPoints; 2020 Mat imagePoints_mat = imagePoints; 2021 Mat distCoeffs_mat = distCoeffs; 2022 return solvePnPRansac_3(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, rvec.nativeObj, tvec.nativeObj, useExtrinsicGuess, iterationsCount, reprojectionError); 2023 } 2024 2025 /** 2026 * Finds an object pose from 3D-2D point correspondences using the RANSAC scheme. 2027 * 2028 * SEE: REF: calib3d_solvePnP 2029 * 2030 * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or 2031 * 1xN/Nx1 3-channel, where N is the number of points. vector<Point3d> can be also passed here. 2032 * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel, 2033 * where N is the number of points. vector<Point2d> can be also passed here. 2034 * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) . 2035 * @param distCoeffs Input vector of distortion coefficients 2036 * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are 2037 * assumed. 2038 * @param rvec Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from 2039 * the model coordinate system to the camera coordinate system. 2040 * @param tvec Output translation vector. 2041 * @param useExtrinsicGuess Parameter used for REF: SOLVEPNP_ITERATIVE. If true (1), the function uses 2042 * the provided rvec and tvec values as initial approximations of the rotation and translation 2043 * vectors, respectively, and further optimizes them. 2044 * @param iterationsCount Number of iterations. 2045 * is the maximum allowed distance between the observed and computed point projections to consider it 2046 * an inlier. 2047 * 2048 * The function estimates an object pose given a set of object points, their corresponding image 2049 * projections, as well as the camera intrinsic matrix and the distortion coefficients. This function finds such 2050 * a pose that minimizes reprojection error, that is, the sum of squared distances between the observed 2051 * projections imagePoints and the projected (using REF: projectPoints ) objectPoints. The use of RANSAC 2052 * makes the function resistant to outliers. 2053 * 2054 * <b>Note:</b> 2055 * <ul> 2056 * <li> 2057 * An example of how to use solvePNPRansac for object detection can be found at 2058 * opencv_source_code/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/ 2059 * </li> 2060 * <li> 2061 * The default method used to estimate the camera pose for the Minimal Sample Sets step 2062 * is #SOLVEPNP_EPNP. Exceptions are: 2063 * <ul> 2064 * <li> 2065 * if you choose #SOLVEPNP_P3P or #SOLVEPNP_AP3P, these methods will be used. 2066 * </li> 2067 * <li> 2068 * if the number of input points is equal to 4, #SOLVEPNP_P3P is used. 2069 * </li> 2070 * </ul> 2071 * <li> 2072 * The method used to estimate the camera pose using all the inliers is defined by the 2073 * flags parameters unless it is equal to #SOLVEPNP_P3P or #SOLVEPNP_AP3P. In this case, 2074 * the method #SOLVEPNP_EPNP will be used instead. 2075 * </li> 2076 * </ul> 2077 * @return automatically generated 2078 */ 2079 public static boolean solvePnPRansac(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec, boolean useExtrinsicGuess, int iterationsCount) { 2080 Mat objectPoints_mat = objectPoints; 2081 Mat imagePoints_mat = imagePoints; 2082 Mat distCoeffs_mat = distCoeffs; 2083 return solvePnPRansac_4(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, rvec.nativeObj, tvec.nativeObj, useExtrinsicGuess, iterationsCount); 2084 } 2085 2086 /** 2087 * Finds an object pose from 3D-2D point correspondences using the RANSAC scheme. 2088 * 2089 * SEE: REF: calib3d_solvePnP 2090 * 2091 * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or 2092 * 1xN/Nx1 3-channel, where N is the number of points. vector<Point3d> can be also passed here. 2093 * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel, 2094 * where N is the number of points. vector<Point2d> can be also passed here. 2095 * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) . 2096 * @param distCoeffs Input vector of distortion coefficients 2097 * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are 2098 * assumed. 2099 * @param rvec Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from 2100 * the model coordinate system to the camera coordinate system. 2101 * @param tvec Output translation vector. 2102 * @param useExtrinsicGuess Parameter used for REF: SOLVEPNP_ITERATIVE. If true (1), the function uses 2103 * the provided rvec and tvec values as initial approximations of the rotation and translation 2104 * vectors, respectively, and further optimizes them. 2105 * is the maximum allowed distance between the observed and computed point projections to consider it 2106 * an inlier. 2107 * 2108 * The function estimates an object pose given a set of object points, their corresponding image 2109 * projections, as well as the camera intrinsic matrix and the distortion coefficients. This function finds such 2110 * a pose that minimizes reprojection error, that is, the sum of squared distances between the observed 2111 * projections imagePoints and the projected (using REF: projectPoints ) objectPoints. The use of RANSAC 2112 * makes the function resistant to outliers. 2113 * 2114 * <b>Note:</b> 2115 * <ul> 2116 * <li> 2117 * An example of how to use solvePNPRansac for object detection can be found at 2118 * opencv_source_code/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/ 2119 * </li> 2120 * <li> 2121 * The default method used to estimate the camera pose for the Minimal Sample Sets step 2122 * is #SOLVEPNP_EPNP. Exceptions are: 2123 * <ul> 2124 * <li> 2125 * if you choose #SOLVEPNP_P3P or #SOLVEPNP_AP3P, these methods will be used. 2126 * </li> 2127 * <li> 2128 * if the number of input points is equal to 4, #SOLVEPNP_P3P is used. 2129 * </li> 2130 * </ul> 2131 * <li> 2132 * The method used to estimate the camera pose using all the inliers is defined by the 2133 * flags parameters unless it is equal to #SOLVEPNP_P3P or #SOLVEPNP_AP3P. In this case, 2134 * the method #SOLVEPNP_EPNP will be used instead. 2135 * </li> 2136 * </ul> 2137 * @return automatically generated 2138 */ 2139 public static boolean solvePnPRansac(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec, boolean useExtrinsicGuess) { 2140 Mat objectPoints_mat = objectPoints; 2141 Mat imagePoints_mat = imagePoints; 2142 Mat distCoeffs_mat = distCoeffs; 2143 return solvePnPRansac_5(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, rvec.nativeObj, tvec.nativeObj, useExtrinsicGuess); 2144 } 2145 2146 /** 2147 * Finds an object pose from 3D-2D point correspondences using the RANSAC scheme. 2148 * 2149 * SEE: REF: calib3d_solvePnP 2150 * 2151 * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or 2152 * 1xN/Nx1 3-channel, where N is the number of points. vector<Point3d> can be also passed here. 2153 * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel, 2154 * where N is the number of points. vector<Point2d> can be also passed here. 2155 * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) . 2156 * @param distCoeffs Input vector of distortion coefficients 2157 * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are 2158 * assumed. 2159 * @param rvec Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from 2160 * the model coordinate system to the camera coordinate system. 2161 * @param tvec Output translation vector. 2162 * the provided rvec and tvec values as initial approximations of the rotation and translation 2163 * vectors, respectively, and further optimizes them. 2164 * is the maximum allowed distance between the observed and computed point projections to consider it 2165 * an inlier. 2166 * 2167 * The function estimates an object pose given a set of object points, their corresponding image 2168 * projections, as well as the camera intrinsic matrix and the distortion coefficients. This function finds such 2169 * a pose that minimizes reprojection error, that is, the sum of squared distances between the observed 2170 * projections imagePoints and the projected (using REF: projectPoints ) objectPoints. The use of RANSAC 2171 * makes the function resistant to outliers. 2172 * 2173 * <b>Note:</b> 2174 * <ul> 2175 * <li> 2176 * An example of how to use solvePNPRansac for object detection can be found at 2177 * opencv_source_code/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/ 2178 * </li> 2179 * <li> 2180 * The default method used to estimate the camera pose for the Minimal Sample Sets step 2181 * is #SOLVEPNP_EPNP. Exceptions are: 2182 * <ul> 2183 * <li> 2184 * if you choose #SOLVEPNP_P3P or #SOLVEPNP_AP3P, these methods will be used. 2185 * </li> 2186 * <li> 2187 * if the number of input points is equal to 4, #SOLVEPNP_P3P is used. 2188 * </li> 2189 * </ul> 2190 * <li> 2191 * The method used to estimate the camera pose using all the inliers is defined by the 2192 * flags parameters unless it is equal to #SOLVEPNP_P3P or #SOLVEPNP_AP3P. In this case, 2193 * the method #SOLVEPNP_EPNP will be used instead. 2194 * </li> 2195 * </ul> 2196 * @return automatically generated 2197 */ 2198 public static boolean solvePnPRansac(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec) { 2199 Mat objectPoints_mat = objectPoints; 2200 Mat imagePoints_mat = imagePoints; 2201 Mat distCoeffs_mat = distCoeffs; 2202 return solvePnPRansac_6(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, rvec.nativeObj, tvec.nativeObj); 2203 } 2204 2205 2206 // 2207 // C++: bool cv::solvePnPRansac(vector_Point3f objectPoints, vector_Point2f imagePoints, Mat& cameraMatrix, vector_double distCoeffs, Mat& rvec, Mat& tvec, Mat& inliers, UsacParams params = UsacParams()) 2208 // 2209 2210 public static boolean solvePnPRansac(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec, Mat inliers, UsacParams params) { 2211 Mat objectPoints_mat = objectPoints; 2212 Mat imagePoints_mat = imagePoints; 2213 Mat distCoeffs_mat = distCoeffs; 2214 return solvePnPRansac_7(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, rvec.nativeObj, tvec.nativeObj, inliers.nativeObj, params.nativeObj); 2215 } 2216 2217 public static boolean solvePnPRansac(MatOfPoint3f objectPoints, MatOfPoint2f imagePoints, Mat cameraMatrix, MatOfDouble distCoeffs, Mat rvec, Mat tvec, Mat inliers) { 2218 Mat objectPoints_mat = objectPoints; 2219 Mat imagePoints_mat = imagePoints; 2220 Mat distCoeffs_mat = distCoeffs; 2221 return solvePnPRansac_8(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs_mat.nativeObj, rvec.nativeObj, tvec.nativeObj, inliers.nativeObj); 2222 } 2223 2224 2225 // 2226 // C++: int cv::solveP3P(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, int flags) 2227 // 2228 2229 /** 2230 * Finds an object pose from 3 3D-2D point correspondences. 2231 * 2232 * SEE: REF: calib3d_solvePnP 2233 * 2234 * @param objectPoints Array of object points in the object coordinate space, 3x3 1-channel or 2235 * 1x3/3x1 3-channel. vector<Point3f> can be also passed here. 2236 * @param imagePoints Array of corresponding image points, 3x2 1-channel or 1x3/3x1 2-channel. 2237 * vector<Point2f> can be also passed here. 2238 * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) . 2239 * @param distCoeffs Input vector of distortion coefficients 2240 * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are 2241 * assumed. 2242 * @param rvecs Output rotation vectors (see REF: Rodrigues ) that, together with tvecs, brings points from 2243 * the model coordinate system to the camera coordinate system. A P3P problem has up to 4 solutions. 2244 * @param tvecs Output translation vectors. 2245 * @param flags Method for solving a P3P problem: 2246 * <ul> 2247 * <li> 2248 * REF: SOLVEPNP_P3P Method is based on the paper of X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang 2249 * "Complete Solution Classification for the Perspective-Three-Point Problem" (CITE: gao2003complete). 2250 * </li> 2251 * <li> 2252 * REF: SOLVEPNP_AP3P Method is based on the paper of T. Ke and S. Roumeliotis. 2253 * "An Efficient Algebraic Solution to the Perspective-Three-Point Problem" (CITE: Ke17). 2254 * </li> 2255 * </ul> 2256 * 2257 * The function estimates the object pose given 3 object points, their corresponding image 2258 * projections, as well as the camera intrinsic matrix and the distortion coefficients. 2259 * 2260 * <b>Note:</b> 2261 * The solutions are sorted by reprojection errors (lowest to highest). 2262 * @return automatically generated 2263 */ 2264 public static int solveP3P(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, int flags) { 2265 Mat rvecs_mat = new Mat(); 2266 Mat tvecs_mat = new Mat(); 2267 int retVal = solveP3P_0(objectPoints.nativeObj, imagePoints.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, flags); 2268 Converters.Mat_to_vector_Mat(rvecs_mat, rvecs); 2269 rvecs_mat.release(); 2270 Converters.Mat_to_vector_Mat(tvecs_mat, tvecs); 2271 tvecs_mat.release(); 2272 return retVal; 2273 } 2274 2275 2276 // 2277 // C++: void cv::solvePnPRefineLM(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, Mat& rvec, Mat& tvec, TermCriteria criteria = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 20, FLT_EPSILON)) 2278 // 2279 2280 /** 2281 * Refine a pose (the translation and the rotation that transform a 3D point expressed in the object coordinate frame 2282 * to the camera coordinate frame) from a 3D-2D point correspondences and starting from an initial solution. 2283 * 2284 * SEE: REF: calib3d_solvePnP 2285 * 2286 * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or 1xN/Nx1 3-channel, 2287 * where N is the number of points. vector<Point3d> can also be passed here. 2288 * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel, 2289 * where N is the number of points. vector<Point2d> can also be passed here. 2290 * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) . 2291 * @param distCoeffs Input vector of distortion coefficients 2292 * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are 2293 * assumed. 2294 * @param rvec Input/Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from 2295 * the model coordinate system to the camera coordinate system. Input values are used as an initial solution. 2296 * @param tvec Input/Output translation vector. Input values are used as an initial solution. 2297 * @param criteria Criteria when to stop the Levenberg-Marquard iterative algorithm. 2298 * 2299 * The function refines the object pose given at least 3 object points, their corresponding image 2300 * projections, an initial solution for the rotation and translation vector, 2301 * as well as the camera intrinsic matrix and the distortion coefficients. 2302 * The function minimizes the projection error with respect to the rotation and the translation vectors, according 2303 * to a Levenberg-Marquardt iterative minimization CITE: Madsen04 CITE: Eade13 process. 2304 */ 2305 public static void solvePnPRefineLM(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, Mat rvec, Mat tvec, TermCriteria criteria) { 2306 solvePnPRefineLM_0(objectPoints.nativeObj, imagePoints.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvec.nativeObj, tvec.nativeObj, criteria.type, criteria.maxCount, criteria.epsilon); 2307 } 2308 2309 /** 2310 * Refine a pose (the translation and the rotation that transform a 3D point expressed in the object coordinate frame 2311 * to the camera coordinate frame) from a 3D-2D point correspondences and starting from an initial solution. 2312 * 2313 * SEE: REF: calib3d_solvePnP 2314 * 2315 * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or 1xN/Nx1 3-channel, 2316 * where N is the number of points. vector<Point3d> can also be passed here. 2317 * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel, 2318 * where N is the number of points. vector<Point2d> can also be passed here. 2319 * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) . 2320 * @param distCoeffs Input vector of distortion coefficients 2321 * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are 2322 * assumed. 2323 * @param rvec Input/Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from 2324 * the model coordinate system to the camera coordinate system. Input values are used as an initial solution. 2325 * @param tvec Input/Output translation vector. Input values are used as an initial solution. 2326 * 2327 * The function refines the object pose given at least 3 object points, their corresponding image 2328 * projections, an initial solution for the rotation and translation vector, 2329 * as well as the camera intrinsic matrix and the distortion coefficients. 2330 * The function minimizes the projection error with respect to the rotation and the translation vectors, according 2331 * to a Levenberg-Marquardt iterative minimization CITE: Madsen04 CITE: Eade13 process. 2332 */ 2333 public static void solvePnPRefineLM(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, Mat rvec, Mat tvec) { 2334 solvePnPRefineLM_1(objectPoints.nativeObj, imagePoints.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvec.nativeObj, tvec.nativeObj); 2335 } 2336 2337 2338 // 2339 // C++: void cv::solvePnPRefineVVS(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, Mat& rvec, Mat& tvec, TermCriteria criteria = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 20, FLT_EPSILON), double VVSlambda = 1) 2340 // 2341 2342 /** 2343 * Refine a pose (the translation and the rotation that transform a 3D point expressed in the object coordinate frame 2344 * to the camera coordinate frame) from a 3D-2D point correspondences and starting from an initial solution. 2345 * 2346 * SEE: REF: calib3d_solvePnP 2347 * 2348 * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or 1xN/Nx1 3-channel, 2349 * where N is the number of points. vector<Point3d> can also be passed here. 2350 * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel, 2351 * where N is the number of points. vector<Point2d> can also be passed here. 2352 * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) . 2353 * @param distCoeffs Input vector of distortion coefficients 2354 * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are 2355 * assumed. 2356 * @param rvec Input/Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from 2357 * the model coordinate system to the camera coordinate system. Input values are used as an initial solution. 2358 * @param tvec Input/Output translation vector. Input values are used as an initial solution. 2359 * @param criteria Criteria when to stop the Levenberg-Marquard iterative algorithm. 2360 * @param VVSlambda Gain for the virtual visual servoing control law, equivalent to the \(\alpha\) 2361 * gain in the Damped Gauss-Newton formulation. 2362 * 2363 * The function refines the object pose given at least 3 object points, their corresponding image 2364 * projections, an initial solution for the rotation and translation vector, 2365 * as well as the camera intrinsic matrix and the distortion coefficients. 2366 * The function minimizes the projection error with respect to the rotation and the translation vectors, using a 2367 * virtual visual servoing (VVS) CITE: Chaumette06 CITE: Marchand16 scheme. 2368 */ 2369 public static void solvePnPRefineVVS(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, Mat rvec, Mat tvec, TermCriteria criteria, double VVSlambda) { 2370 solvePnPRefineVVS_0(objectPoints.nativeObj, imagePoints.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvec.nativeObj, tvec.nativeObj, criteria.type, criteria.maxCount, criteria.epsilon, VVSlambda); 2371 } 2372 2373 /** 2374 * Refine a pose (the translation and the rotation that transform a 3D point expressed in the object coordinate frame 2375 * to the camera coordinate frame) from a 3D-2D point correspondences and starting from an initial solution. 2376 * 2377 * SEE: REF: calib3d_solvePnP 2378 * 2379 * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or 1xN/Nx1 3-channel, 2380 * where N is the number of points. vector<Point3d> can also be passed here. 2381 * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel, 2382 * where N is the number of points. vector<Point2d> can also be passed here. 2383 * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) . 2384 * @param distCoeffs Input vector of distortion coefficients 2385 * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are 2386 * assumed. 2387 * @param rvec Input/Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from 2388 * the model coordinate system to the camera coordinate system. Input values are used as an initial solution. 2389 * @param tvec Input/Output translation vector. Input values are used as an initial solution. 2390 * @param criteria Criteria when to stop the Levenberg-Marquard iterative algorithm. 2391 * gain in the Damped Gauss-Newton formulation. 2392 * 2393 * The function refines the object pose given at least 3 object points, their corresponding image 2394 * projections, an initial solution for the rotation and translation vector, 2395 * as well as the camera intrinsic matrix and the distortion coefficients. 2396 * The function minimizes the projection error with respect to the rotation and the translation vectors, using a 2397 * virtual visual servoing (VVS) CITE: Chaumette06 CITE: Marchand16 scheme. 2398 */ 2399 public static void solvePnPRefineVVS(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, Mat rvec, Mat tvec, TermCriteria criteria) { 2400 solvePnPRefineVVS_1(objectPoints.nativeObj, imagePoints.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvec.nativeObj, tvec.nativeObj, criteria.type, criteria.maxCount, criteria.epsilon); 2401 } 2402 2403 /** 2404 * Refine a pose (the translation and the rotation that transform a 3D point expressed in the object coordinate frame 2405 * to the camera coordinate frame) from a 3D-2D point correspondences and starting from an initial solution. 2406 * 2407 * SEE: REF: calib3d_solvePnP 2408 * 2409 * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or 1xN/Nx1 3-channel, 2410 * where N is the number of points. vector<Point3d> can also be passed here. 2411 * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel, 2412 * where N is the number of points. vector<Point2d> can also be passed here. 2413 * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) . 2414 * @param distCoeffs Input vector of distortion coefficients 2415 * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are 2416 * assumed. 2417 * @param rvec Input/Output rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from 2418 * the model coordinate system to the camera coordinate system. Input values are used as an initial solution. 2419 * @param tvec Input/Output translation vector. Input values are used as an initial solution. 2420 * gain in the Damped Gauss-Newton formulation. 2421 * 2422 * The function refines the object pose given at least 3 object points, their corresponding image 2423 * projections, an initial solution for the rotation and translation vector, 2424 * as well as the camera intrinsic matrix and the distortion coefficients. 2425 * The function minimizes the projection error with respect to the rotation and the translation vectors, using a 2426 * virtual visual servoing (VVS) CITE: Chaumette06 CITE: Marchand16 scheme. 2427 */ 2428 public static void solvePnPRefineVVS(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, Mat rvec, Mat tvec) { 2429 solvePnPRefineVVS_2(objectPoints.nativeObj, imagePoints.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvec.nativeObj, tvec.nativeObj); 2430 } 2431 2432 2433 // 2434 // C++: int cv::solvePnPGeneric(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, bool useExtrinsicGuess = false, SolvePnPMethod flags = SOLVEPNP_ITERATIVE, Mat rvec = Mat(), Mat tvec = Mat(), Mat& reprojectionError = Mat()) 2435 // 2436 2437 /** 2438 * Finds an object pose from 3D-2D point correspondences. 2439 * 2440 * SEE: REF: calib3d_solvePnP 2441 * 2442 * This function returns a list of all the possible solutions (a solution is a <rotation vector, translation vector> 2443 * couple), depending on the number of input points and the chosen method: 2444 * <ul> 2445 * <li> 2446 * P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): 3 or 4 input points. Number of returned solutions can be between 0 and 4 with 3 input points. 2447 * </li> 2448 * <li> 2449 * REF: SOLVEPNP_IPPE Input points must be >= 4 and object points must be coplanar. Returns 2 solutions. 2450 * </li> 2451 * <li> 2452 * REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation. 2453 * Number of input points must be 4 and 2 solutions are returned. Object points must be defined in the following order: 2454 * <ul> 2455 * <li> 2456 * point 0: [-squareLength / 2, squareLength / 2, 0] 2457 * </li> 2458 * <li> 2459 * point 1: [ squareLength / 2, squareLength / 2, 0] 2460 * </li> 2461 * <li> 2462 * point 2: [ squareLength / 2, -squareLength / 2, 0] 2463 * </li> 2464 * <li> 2465 * point 3: [-squareLength / 2, -squareLength / 2, 0] 2466 * </li> 2467 * </ul> 2468 * <li> 2469 * for all the other flags, number of input points must be >= 4 and object points can be in any configuration. 2470 * Only 1 solution is returned. 2471 * </li> 2472 * </ul> 2473 * 2474 * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or 2475 * 1xN/Nx1 3-channel, where N is the number of points. vector<Point3d> can be also passed here. 2476 * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel, 2477 * where N is the number of points. vector<Point2d> can be also passed here. 2478 * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) . 2479 * @param distCoeffs Input vector of distortion coefficients 2480 * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are 2481 * assumed. 2482 * @param rvecs Vector of output rotation vectors (see REF: Rodrigues ) that, together with tvecs, brings points from 2483 * the model coordinate system to the camera coordinate system. 2484 * @param tvecs Vector of output translation vectors. 2485 * @param useExtrinsicGuess Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses 2486 * the provided rvec and tvec values as initial approximations of the rotation and translation 2487 * vectors, respectively, and further optimizes them. 2488 * @param flags Method for solving a PnP problem: see REF: calib3d_solvePnP_flags 2489 * @param rvec Rotation vector used to initialize an iterative PnP refinement algorithm, when flag is REF: SOLVEPNP_ITERATIVE 2490 * and useExtrinsicGuess is set to true. 2491 * @param tvec Translation vector used to initialize an iterative PnP refinement algorithm, when flag is REF: SOLVEPNP_ITERATIVE 2492 * and useExtrinsicGuess is set to true. 2493 * @param reprojectionError Optional vector of reprojection error, that is the RMS error 2494 * (\( \text{RMSE} = \sqrt{\frac{\sum_{i}^{N} \left ( \hat{y_i} - y_i \right )^2}{N}} \)) between the input image points 2495 * and the 3D object points projected with the estimated pose. 2496 * 2497 * More information is described in REF: calib3d_solvePnP 2498 * 2499 * <b>Note:</b> 2500 * <ul> 2501 * <li> 2502 * An example of how to use solvePnP for planar augmented reality can be found at 2503 * opencv_source_code/samples/python/plane_ar.py 2504 * </li> 2505 * <li> 2506 * If you are using Python: 2507 * <ul> 2508 * <li> 2509 * Numpy array slices won't work as input because solvePnP requires contiguous 2510 * arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of 2511 * modules/calib3d/src/solvepnp.cpp version 2.4.9) 2512 * </li> 2513 * <li> 2514 * The P3P algorithm requires image points to be in an array of shape (N,1,2) due 2515 * to its calling of #undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9) 2516 * which requires 2-channel information. 2517 * </li> 2518 * <li> 2519 * Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of 2520 * it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints = 2521 * np.ascontiguousarray(D[:,:2]).reshape((N,1,2)) 2522 * </li> 2523 * </ul> 2524 * <li> 2525 * The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are 2526 * unstable and sometimes give completely wrong results. If you pass one of these two 2527 * flags, REF: SOLVEPNP_EPNP method will be used instead. 2528 * </li> 2529 * <li> 2530 * The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P 2531 * methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions 2532 * of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error). 2533 * </li> 2534 * <li> 2535 * With REF: SOLVEPNP_ITERATIVE method and {@code useExtrinsicGuess=true}, the minimum number of points is 3 (3 points 2536 * are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the 2537 * global solution to converge. 2538 * </li> 2539 * <li> 2540 * With REF: SOLVEPNP_IPPE input points must be >= 4 and object points must be coplanar. 2541 * </li> 2542 * <li> 2543 * With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation. 2544 * Number of input points must be 4. Object points must be defined in the following order: 2545 * <ul> 2546 * <li> 2547 * point 0: [-squareLength / 2, squareLength / 2, 0] 2548 * </li> 2549 * <li> 2550 * point 1: [ squareLength / 2, squareLength / 2, 0] 2551 * </li> 2552 * <li> 2553 * point 2: [ squareLength / 2, -squareLength / 2, 0] 2554 * </li> 2555 * <li> 2556 * point 3: [-squareLength / 2, -squareLength / 2, 0] 2557 * </li> 2558 * </ul> 2559 * </li> 2560 * </ul> 2561 * @return automatically generated 2562 */ 2563 public static int solvePnPGeneric(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, boolean useExtrinsicGuess, int flags, Mat rvec, Mat tvec, Mat reprojectionError) { 2564 Mat rvecs_mat = new Mat(); 2565 Mat tvecs_mat = new Mat(); 2566 int retVal = solvePnPGeneric_0(objectPoints.nativeObj, imagePoints.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, useExtrinsicGuess, flags, rvec.nativeObj, tvec.nativeObj, reprojectionError.nativeObj); 2567 Converters.Mat_to_vector_Mat(rvecs_mat, rvecs); 2568 rvecs_mat.release(); 2569 Converters.Mat_to_vector_Mat(tvecs_mat, tvecs); 2570 tvecs_mat.release(); 2571 return retVal; 2572 } 2573 2574 /** 2575 * Finds an object pose from 3D-2D point correspondences. 2576 * 2577 * SEE: REF: calib3d_solvePnP 2578 * 2579 * This function returns a list of all the possible solutions (a solution is a <rotation vector, translation vector> 2580 * couple), depending on the number of input points and the chosen method: 2581 * <ul> 2582 * <li> 2583 * P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): 3 or 4 input points. Number of returned solutions can be between 0 and 4 with 3 input points. 2584 * </li> 2585 * <li> 2586 * REF: SOLVEPNP_IPPE Input points must be >= 4 and object points must be coplanar. Returns 2 solutions. 2587 * </li> 2588 * <li> 2589 * REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation. 2590 * Number of input points must be 4 and 2 solutions are returned. Object points must be defined in the following order: 2591 * <ul> 2592 * <li> 2593 * point 0: [-squareLength / 2, squareLength / 2, 0] 2594 * </li> 2595 * <li> 2596 * point 1: [ squareLength / 2, squareLength / 2, 0] 2597 * </li> 2598 * <li> 2599 * point 2: [ squareLength / 2, -squareLength / 2, 0] 2600 * </li> 2601 * <li> 2602 * point 3: [-squareLength / 2, -squareLength / 2, 0] 2603 * </li> 2604 * </ul> 2605 * <li> 2606 * for all the other flags, number of input points must be >= 4 and object points can be in any configuration. 2607 * Only 1 solution is returned. 2608 * </li> 2609 * </ul> 2610 * 2611 * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or 2612 * 1xN/Nx1 3-channel, where N is the number of points. vector<Point3d> can be also passed here. 2613 * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel, 2614 * where N is the number of points. vector<Point2d> can be also passed here. 2615 * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) . 2616 * @param distCoeffs Input vector of distortion coefficients 2617 * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are 2618 * assumed. 2619 * @param rvecs Vector of output rotation vectors (see REF: Rodrigues ) that, together with tvecs, brings points from 2620 * the model coordinate system to the camera coordinate system. 2621 * @param tvecs Vector of output translation vectors. 2622 * @param useExtrinsicGuess Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses 2623 * the provided rvec and tvec values as initial approximations of the rotation and translation 2624 * vectors, respectively, and further optimizes them. 2625 * @param flags Method for solving a PnP problem: see REF: calib3d_solvePnP_flags 2626 * @param rvec Rotation vector used to initialize an iterative PnP refinement algorithm, when flag is REF: SOLVEPNP_ITERATIVE 2627 * and useExtrinsicGuess is set to true. 2628 * @param tvec Translation vector used to initialize an iterative PnP refinement algorithm, when flag is REF: SOLVEPNP_ITERATIVE 2629 * and useExtrinsicGuess is set to true. 2630 * (\( \text{RMSE} = \sqrt{\frac{\sum_{i}^{N} \left ( \hat{y_i} - y_i \right )^2}{N}} \)) between the input image points 2631 * and the 3D object points projected with the estimated pose. 2632 * 2633 * More information is described in REF: calib3d_solvePnP 2634 * 2635 * <b>Note:</b> 2636 * <ul> 2637 * <li> 2638 * An example of how to use solvePnP for planar augmented reality can be found at 2639 * opencv_source_code/samples/python/plane_ar.py 2640 * </li> 2641 * <li> 2642 * If you are using Python: 2643 * <ul> 2644 * <li> 2645 * Numpy array slices won't work as input because solvePnP requires contiguous 2646 * arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of 2647 * modules/calib3d/src/solvepnp.cpp version 2.4.9) 2648 * </li> 2649 * <li> 2650 * The P3P algorithm requires image points to be in an array of shape (N,1,2) due 2651 * to its calling of #undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9) 2652 * which requires 2-channel information. 2653 * </li> 2654 * <li> 2655 * Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of 2656 * it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints = 2657 * np.ascontiguousarray(D[:,:2]).reshape((N,1,2)) 2658 * </li> 2659 * </ul> 2660 * <li> 2661 * The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are 2662 * unstable and sometimes give completely wrong results. If you pass one of these two 2663 * flags, REF: SOLVEPNP_EPNP method will be used instead. 2664 * </li> 2665 * <li> 2666 * The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P 2667 * methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions 2668 * of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error). 2669 * </li> 2670 * <li> 2671 * With REF: SOLVEPNP_ITERATIVE method and {@code useExtrinsicGuess=true}, the minimum number of points is 3 (3 points 2672 * are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the 2673 * global solution to converge. 2674 * </li> 2675 * <li> 2676 * With REF: SOLVEPNP_IPPE input points must be >= 4 and object points must be coplanar. 2677 * </li> 2678 * <li> 2679 * With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation. 2680 * Number of input points must be 4. Object points must be defined in the following order: 2681 * <ul> 2682 * <li> 2683 * point 0: [-squareLength / 2, squareLength / 2, 0] 2684 * </li> 2685 * <li> 2686 * point 1: [ squareLength / 2, squareLength / 2, 0] 2687 * </li> 2688 * <li> 2689 * point 2: [ squareLength / 2, -squareLength / 2, 0] 2690 * </li> 2691 * <li> 2692 * point 3: [-squareLength / 2, -squareLength / 2, 0] 2693 * </li> 2694 * </ul> 2695 * </li> 2696 * </ul> 2697 * @return automatically generated 2698 */ 2699 public static int solvePnPGeneric(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, boolean useExtrinsicGuess, int flags, Mat rvec, Mat tvec) { 2700 Mat rvecs_mat = new Mat(); 2701 Mat tvecs_mat = new Mat(); 2702 int retVal = solvePnPGeneric_1(objectPoints.nativeObj, imagePoints.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, useExtrinsicGuess, flags, rvec.nativeObj, tvec.nativeObj); 2703 Converters.Mat_to_vector_Mat(rvecs_mat, rvecs); 2704 rvecs_mat.release(); 2705 Converters.Mat_to_vector_Mat(tvecs_mat, tvecs); 2706 tvecs_mat.release(); 2707 return retVal; 2708 } 2709 2710 /** 2711 * Finds an object pose from 3D-2D point correspondences. 2712 * 2713 * SEE: REF: calib3d_solvePnP 2714 * 2715 * This function returns a list of all the possible solutions (a solution is a <rotation vector, translation vector> 2716 * couple), depending on the number of input points and the chosen method: 2717 * <ul> 2718 * <li> 2719 * P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): 3 or 4 input points. Number of returned solutions can be between 0 and 4 with 3 input points. 2720 * </li> 2721 * <li> 2722 * REF: SOLVEPNP_IPPE Input points must be >= 4 and object points must be coplanar. Returns 2 solutions. 2723 * </li> 2724 * <li> 2725 * REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation. 2726 * Number of input points must be 4 and 2 solutions are returned. Object points must be defined in the following order: 2727 * <ul> 2728 * <li> 2729 * point 0: [-squareLength / 2, squareLength / 2, 0] 2730 * </li> 2731 * <li> 2732 * point 1: [ squareLength / 2, squareLength / 2, 0] 2733 * </li> 2734 * <li> 2735 * point 2: [ squareLength / 2, -squareLength / 2, 0] 2736 * </li> 2737 * <li> 2738 * point 3: [-squareLength / 2, -squareLength / 2, 0] 2739 * </li> 2740 * </ul> 2741 * <li> 2742 * for all the other flags, number of input points must be >= 4 and object points can be in any configuration. 2743 * Only 1 solution is returned. 2744 * </li> 2745 * </ul> 2746 * 2747 * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or 2748 * 1xN/Nx1 3-channel, where N is the number of points. vector<Point3d> can be also passed here. 2749 * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel, 2750 * where N is the number of points. vector<Point2d> can be also passed here. 2751 * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) . 2752 * @param distCoeffs Input vector of distortion coefficients 2753 * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are 2754 * assumed. 2755 * @param rvecs Vector of output rotation vectors (see REF: Rodrigues ) that, together with tvecs, brings points from 2756 * the model coordinate system to the camera coordinate system. 2757 * @param tvecs Vector of output translation vectors. 2758 * @param useExtrinsicGuess Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses 2759 * the provided rvec and tvec values as initial approximations of the rotation and translation 2760 * vectors, respectively, and further optimizes them. 2761 * @param flags Method for solving a PnP problem: see REF: calib3d_solvePnP_flags 2762 * @param rvec Rotation vector used to initialize an iterative PnP refinement algorithm, when flag is REF: SOLVEPNP_ITERATIVE 2763 * and useExtrinsicGuess is set to true. 2764 * and useExtrinsicGuess is set to true. 2765 * (\( \text{RMSE} = \sqrt{\frac{\sum_{i}^{N} \left ( \hat{y_i} - y_i \right )^2}{N}} \)) between the input image points 2766 * and the 3D object points projected with the estimated pose. 2767 * 2768 * More information is described in REF: calib3d_solvePnP 2769 * 2770 * <b>Note:</b> 2771 * <ul> 2772 * <li> 2773 * An example of how to use solvePnP for planar augmented reality can be found at 2774 * opencv_source_code/samples/python/plane_ar.py 2775 * </li> 2776 * <li> 2777 * If you are using Python: 2778 * <ul> 2779 * <li> 2780 * Numpy array slices won't work as input because solvePnP requires contiguous 2781 * arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of 2782 * modules/calib3d/src/solvepnp.cpp version 2.4.9) 2783 * </li> 2784 * <li> 2785 * The P3P algorithm requires image points to be in an array of shape (N,1,2) due 2786 * to its calling of #undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9) 2787 * which requires 2-channel information. 2788 * </li> 2789 * <li> 2790 * Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of 2791 * it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints = 2792 * np.ascontiguousarray(D[:,:2]).reshape((N,1,2)) 2793 * </li> 2794 * </ul> 2795 * <li> 2796 * The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are 2797 * unstable and sometimes give completely wrong results. If you pass one of these two 2798 * flags, REF: SOLVEPNP_EPNP method will be used instead. 2799 * </li> 2800 * <li> 2801 * The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P 2802 * methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions 2803 * of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error). 2804 * </li> 2805 * <li> 2806 * With REF: SOLVEPNP_ITERATIVE method and {@code useExtrinsicGuess=true}, the minimum number of points is 3 (3 points 2807 * are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the 2808 * global solution to converge. 2809 * </li> 2810 * <li> 2811 * With REF: SOLVEPNP_IPPE input points must be >= 4 and object points must be coplanar. 2812 * </li> 2813 * <li> 2814 * With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation. 2815 * Number of input points must be 4. Object points must be defined in the following order: 2816 * <ul> 2817 * <li> 2818 * point 0: [-squareLength / 2, squareLength / 2, 0] 2819 * </li> 2820 * <li> 2821 * point 1: [ squareLength / 2, squareLength / 2, 0] 2822 * </li> 2823 * <li> 2824 * point 2: [ squareLength / 2, -squareLength / 2, 0] 2825 * </li> 2826 * <li> 2827 * point 3: [-squareLength / 2, -squareLength / 2, 0] 2828 * </li> 2829 * </ul> 2830 * </li> 2831 * </ul> 2832 * @return automatically generated 2833 */ 2834 public static int solvePnPGeneric(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, boolean useExtrinsicGuess, int flags, Mat rvec) { 2835 Mat rvecs_mat = new Mat(); 2836 Mat tvecs_mat = new Mat(); 2837 int retVal = solvePnPGeneric_2(objectPoints.nativeObj, imagePoints.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, useExtrinsicGuess, flags, rvec.nativeObj); 2838 Converters.Mat_to_vector_Mat(rvecs_mat, rvecs); 2839 rvecs_mat.release(); 2840 Converters.Mat_to_vector_Mat(tvecs_mat, tvecs); 2841 tvecs_mat.release(); 2842 return retVal; 2843 } 2844 2845 /** 2846 * Finds an object pose from 3D-2D point correspondences. 2847 * 2848 * SEE: REF: calib3d_solvePnP 2849 * 2850 * This function returns a list of all the possible solutions (a solution is a <rotation vector, translation vector> 2851 * couple), depending on the number of input points and the chosen method: 2852 * <ul> 2853 * <li> 2854 * P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): 3 or 4 input points. Number of returned solutions can be between 0 and 4 with 3 input points. 2855 * </li> 2856 * <li> 2857 * REF: SOLVEPNP_IPPE Input points must be >= 4 and object points must be coplanar. Returns 2 solutions. 2858 * </li> 2859 * <li> 2860 * REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation. 2861 * Number of input points must be 4 and 2 solutions are returned. Object points must be defined in the following order: 2862 * <ul> 2863 * <li> 2864 * point 0: [-squareLength / 2, squareLength / 2, 0] 2865 * </li> 2866 * <li> 2867 * point 1: [ squareLength / 2, squareLength / 2, 0] 2868 * </li> 2869 * <li> 2870 * point 2: [ squareLength / 2, -squareLength / 2, 0] 2871 * </li> 2872 * <li> 2873 * point 3: [-squareLength / 2, -squareLength / 2, 0] 2874 * </li> 2875 * </ul> 2876 * <li> 2877 * for all the other flags, number of input points must be >= 4 and object points can be in any configuration. 2878 * Only 1 solution is returned. 2879 * </li> 2880 * </ul> 2881 * 2882 * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or 2883 * 1xN/Nx1 3-channel, where N is the number of points. vector<Point3d> can be also passed here. 2884 * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel, 2885 * where N is the number of points. vector<Point2d> can be also passed here. 2886 * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) . 2887 * @param distCoeffs Input vector of distortion coefficients 2888 * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are 2889 * assumed. 2890 * @param rvecs Vector of output rotation vectors (see REF: Rodrigues ) that, together with tvecs, brings points from 2891 * the model coordinate system to the camera coordinate system. 2892 * @param tvecs Vector of output translation vectors. 2893 * @param useExtrinsicGuess Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses 2894 * the provided rvec and tvec values as initial approximations of the rotation and translation 2895 * vectors, respectively, and further optimizes them. 2896 * @param flags Method for solving a PnP problem: see REF: calib3d_solvePnP_flags 2897 * and useExtrinsicGuess is set to true. 2898 * and useExtrinsicGuess is set to true. 2899 * (\( \text{RMSE} = \sqrt{\frac{\sum_{i}^{N} \left ( \hat{y_i} - y_i \right )^2}{N}} \)) between the input image points 2900 * and the 3D object points projected with the estimated pose. 2901 * 2902 * More information is described in REF: calib3d_solvePnP 2903 * 2904 * <b>Note:</b> 2905 * <ul> 2906 * <li> 2907 * An example of how to use solvePnP for planar augmented reality can be found at 2908 * opencv_source_code/samples/python/plane_ar.py 2909 * </li> 2910 * <li> 2911 * If you are using Python: 2912 * <ul> 2913 * <li> 2914 * Numpy array slices won't work as input because solvePnP requires contiguous 2915 * arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of 2916 * modules/calib3d/src/solvepnp.cpp version 2.4.9) 2917 * </li> 2918 * <li> 2919 * The P3P algorithm requires image points to be in an array of shape (N,1,2) due 2920 * to its calling of #undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9) 2921 * which requires 2-channel information. 2922 * </li> 2923 * <li> 2924 * Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of 2925 * it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints = 2926 * np.ascontiguousarray(D[:,:2]).reshape((N,1,2)) 2927 * </li> 2928 * </ul> 2929 * <li> 2930 * The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are 2931 * unstable and sometimes give completely wrong results. If you pass one of these two 2932 * flags, REF: SOLVEPNP_EPNP method will be used instead. 2933 * </li> 2934 * <li> 2935 * The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P 2936 * methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions 2937 * of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error). 2938 * </li> 2939 * <li> 2940 * With REF: SOLVEPNP_ITERATIVE method and {@code useExtrinsicGuess=true}, the minimum number of points is 3 (3 points 2941 * are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the 2942 * global solution to converge. 2943 * </li> 2944 * <li> 2945 * With REF: SOLVEPNP_IPPE input points must be >= 4 and object points must be coplanar. 2946 * </li> 2947 * <li> 2948 * With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation. 2949 * Number of input points must be 4. Object points must be defined in the following order: 2950 * <ul> 2951 * <li> 2952 * point 0: [-squareLength / 2, squareLength / 2, 0] 2953 * </li> 2954 * <li> 2955 * point 1: [ squareLength / 2, squareLength / 2, 0] 2956 * </li> 2957 * <li> 2958 * point 2: [ squareLength / 2, -squareLength / 2, 0] 2959 * </li> 2960 * <li> 2961 * point 3: [-squareLength / 2, -squareLength / 2, 0] 2962 * </li> 2963 * </ul> 2964 * </li> 2965 * </ul> 2966 * @return automatically generated 2967 */ 2968 public static int solvePnPGeneric(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, boolean useExtrinsicGuess, int flags) { 2969 Mat rvecs_mat = new Mat(); 2970 Mat tvecs_mat = new Mat(); 2971 int retVal = solvePnPGeneric_3(objectPoints.nativeObj, imagePoints.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, useExtrinsicGuess, flags); 2972 Converters.Mat_to_vector_Mat(rvecs_mat, rvecs); 2973 rvecs_mat.release(); 2974 Converters.Mat_to_vector_Mat(tvecs_mat, tvecs); 2975 tvecs_mat.release(); 2976 return retVal; 2977 } 2978 2979 /** 2980 * Finds an object pose from 3D-2D point correspondences. 2981 * 2982 * SEE: REF: calib3d_solvePnP 2983 * 2984 * This function returns a list of all the possible solutions (a solution is a <rotation vector, translation vector> 2985 * couple), depending on the number of input points and the chosen method: 2986 * <ul> 2987 * <li> 2988 * P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): 3 or 4 input points. Number of returned solutions can be between 0 and 4 with 3 input points. 2989 * </li> 2990 * <li> 2991 * REF: SOLVEPNP_IPPE Input points must be >= 4 and object points must be coplanar. Returns 2 solutions. 2992 * </li> 2993 * <li> 2994 * REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation. 2995 * Number of input points must be 4 and 2 solutions are returned. Object points must be defined in the following order: 2996 * <ul> 2997 * <li> 2998 * point 0: [-squareLength / 2, squareLength / 2, 0] 2999 * </li> 3000 * <li> 3001 * point 1: [ squareLength / 2, squareLength / 2, 0] 3002 * </li> 3003 * <li> 3004 * point 2: [ squareLength / 2, -squareLength / 2, 0] 3005 * </li> 3006 * <li> 3007 * point 3: [-squareLength / 2, -squareLength / 2, 0] 3008 * </li> 3009 * </ul> 3010 * <li> 3011 * for all the other flags, number of input points must be >= 4 and object points can be in any configuration. 3012 * Only 1 solution is returned. 3013 * </li> 3014 * </ul> 3015 * 3016 * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or 3017 * 1xN/Nx1 3-channel, where N is the number of points. vector<Point3d> can be also passed here. 3018 * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel, 3019 * where N is the number of points. vector<Point2d> can be also passed here. 3020 * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) . 3021 * @param distCoeffs Input vector of distortion coefficients 3022 * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are 3023 * assumed. 3024 * @param rvecs Vector of output rotation vectors (see REF: Rodrigues ) that, together with tvecs, brings points from 3025 * the model coordinate system to the camera coordinate system. 3026 * @param tvecs Vector of output translation vectors. 3027 * @param useExtrinsicGuess Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses 3028 * the provided rvec and tvec values as initial approximations of the rotation and translation 3029 * vectors, respectively, and further optimizes them. 3030 * and useExtrinsicGuess is set to true. 3031 * and useExtrinsicGuess is set to true. 3032 * (\( \text{RMSE} = \sqrt{\frac{\sum_{i}^{N} \left ( \hat{y_i} - y_i \right )^2}{N}} \)) between the input image points 3033 * and the 3D object points projected with the estimated pose. 3034 * 3035 * More information is described in REF: calib3d_solvePnP 3036 * 3037 * <b>Note:</b> 3038 * <ul> 3039 * <li> 3040 * An example of how to use solvePnP for planar augmented reality can be found at 3041 * opencv_source_code/samples/python/plane_ar.py 3042 * </li> 3043 * <li> 3044 * If you are using Python: 3045 * <ul> 3046 * <li> 3047 * Numpy array slices won't work as input because solvePnP requires contiguous 3048 * arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of 3049 * modules/calib3d/src/solvepnp.cpp version 2.4.9) 3050 * </li> 3051 * <li> 3052 * The P3P algorithm requires image points to be in an array of shape (N,1,2) due 3053 * to its calling of #undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9) 3054 * which requires 2-channel information. 3055 * </li> 3056 * <li> 3057 * Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of 3058 * it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints = 3059 * np.ascontiguousarray(D[:,:2]).reshape((N,1,2)) 3060 * </li> 3061 * </ul> 3062 * <li> 3063 * The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are 3064 * unstable and sometimes give completely wrong results. If you pass one of these two 3065 * flags, REF: SOLVEPNP_EPNP method will be used instead. 3066 * </li> 3067 * <li> 3068 * The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P 3069 * methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions 3070 * of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error). 3071 * </li> 3072 * <li> 3073 * With REF: SOLVEPNP_ITERATIVE method and {@code useExtrinsicGuess=true}, the minimum number of points is 3 (3 points 3074 * are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the 3075 * global solution to converge. 3076 * </li> 3077 * <li> 3078 * With REF: SOLVEPNP_IPPE input points must be >= 4 and object points must be coplanar. 3079 * </li> 3080 * <li> 3081 * With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation. 3082 * Number of input points must be 4. Object points must be defined in the following order: 3083 * <ul> 3084 * <li> 3085 * point 0: [-squareLength / 2, squareLength / 2, 0] 3086 * </li> 3087 * <li> 3088 * point 1: [ squareLength / 2, squareLength / 2, 0] 3089 * </li> 3090 * <li> 3091 * point 2: [ squareLength / 2, -squareLength / 2, 0] 3092 * </li> 3093 * <li> 3094 * point 3: [-squareLength / 2, -squareLength / 2, 0] 3095 * </li> 3096 * </ul> 3097 * </li> 3098 * </ul> 3099 * @return automatically generated 3100 */ 3101 public static int solvePnPGeneric(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, boolean useExtrinsicGuess) { 3102 Mat rvecs_mat = new Mat(); 3103 Mat tvecs_mat = new Mat(); 3104 int retVal = solvePnPGeneric_4(objectPoints.nativeObj, imagePoints.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, useExtrinsicGuess); 3105 Converters.Mat_to_vector_Mat(rvecs_mat, rvecs); 3106 rvecs_mat.release(); 3107 Converters.Mat_to_vector_Mat(tvecs_mat, tvecs); 3108 tvecs_mat.release(); 3109 return retVal; 3110 } 3111 3112 /** 3113 * Finds an object pose from 3D-2D point correspondences. 3114 * 3115 * SEE: REF: calib3d_solvePnP 3116 * 3117 * This function returns a list of all the possible solutions (a solution is a <rotation vector, translation vector> 3118 * couple), depending on the number of input points and the chosen method: 3119 * <ul> 3120 * <li> 3121 * P3P methods (REF: SOLVEPNP_P3P, REF: SOLVEPNP_AP3P): 3 or 4 input points. Number of returned solutions can be between 0 and 4 with 3 input points. 3122 * </li> 3123 * <li> 3124 * REF: SOLVEPNP_IPPE Input points must be >= 4 and object points must be coplanar. Returns 2 solutions. 3125 * </li> 3126 * <li> 3127 * REF: SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation. 3128 * Number of input points must be 4 and 2 solutions are returned. Object points must be defined in the following order: 3129 * <ul> 3130 * <li> 3131 * point 0: [-squareLength / 2, squareLength / 2, 0] 3132 * </li> 3133 * <li> 3134 * point 1: [ squareLength / 2, squareLength / 2, 0] 3135 * </li> 3136 * <li> 3137 * point 2: [ squareLength / 2, -squareLength / 2, 0] 3138 * </li> 3139 * <li> 3140 * point 3: [-squareLength / 2, -squareLength / 2, 0] 3141 * </li> 3142 * </ul> 3143 * <li> 3144 * for all the other flags, number of input points must be >= 4 and object points can be in any configuration. 3145 * Only 1 solution is returned. 3146 * </li> 3147 * </ul> 3148 * 3149 * @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or 3150 * 1xN/Nx1 3-channel, where N is the number of points. vector<Point3d> can be also passed here. 3151 * @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel, 3152 * where N is the number of points. vector<Point2d> can be also passed here. 3153 * @param cameraMatrix Input camera intrinsic matrix \(\cameramatrix{A}\) . 3154 * @param distCoeffs Input vector of distortion coefficients 3155 * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are 3156 * assumed. 3157 * @param rvecs Vector of output rotation vectors (see REF: Rodrigues ) that, together with tvecs, brings points from 3158 * the model coordinate system to the camera coordinate system. 3159 * @param tvecs Vector of output translation vectors. 3160 * the provided rvec and tvec values as initial approximations of the rotation and translation 3161 * vectors, respectively, and further optimizes them. 3162 * and useExtrinsicGuess is set to true. 3163 * and useExtrinsicGuess is set to true. 3164 * (\( \text{RMSE} = \sqrt{\frac{\sum_{i}^{N} \left ( \hat{y_i} - y_i \right )^2}{N}} \)) between the input image points 3165 * and the 3D object points projected with the estimated pose. 3166 * 3167 * More information is described in REF: calib3d_solvePnP 3168 * 3169 * <b>Note:</b> 3170 * <ul> 3171 * <li> 3172 * An example of how to use solvePnP for planar augmented reality can be found at 3173 * opencv_source_code/samples/python/plane_ar.py 3174 * </li> 3175 * <li> 3176 * If you are using Python: 3177 * <ul> 3178 * <li> 3179 * Numpy array slices won't work as input because solvePnP requires contiguous 3180 * arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of 3181 * modules/calib3d/src/solvepnp.cpp version 2.4.9) 3182 * </li> 3183 * <li> 3184 * The P3P algorithm requires image points to be in an array of shape (N,1,2) due 3185 * to its calling of #undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9) 3186 * which requires 2-channel information. 3187 * </li> 3188 * <li> 3189 * Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of 3190 * it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints = 3191 * np.ascontiguousarray(D[:,:2]).reshape((N,1,2)) 3192 * </li> 3193 * </ul> 3194 * <li> 3195 * The methods REF: SOLVEPNP_DLS and REF: SOLVEPNP_UPNP cannot be used as the current implementations are 3196 * unstable and sometimes give completely wrong results. If you pass one of these two 3197 * flags, REF: SOLVEPNP_EPNP method will be used instead. 3198 * </li> 3199 * <li> 3200 * The minimum number of points is 4 in the general case. In the case of REF: SOLVEPNP_P3P and REF: SOLVEPNP_AP3P 3201 * methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions 3202 * of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error). 3203 * </li> 3204 * <li> 3205 * With REF: SOLVEPNP_ITERATIVE method and {@code useExtrinsicGuess=true}, the minimum number of points is 3 (3 points 3206 * are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the 3207 * global solution to converge. 3208 * </li> 3209 * <li> 3210 * With REF: SOLVEPNP_IPPE input points must be >= 4 and object points must be coplanar. 3211 * </li> 3212 * <li> 3213 * With REF: SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation. 3214 * Number of input points must be 4. Object points must be defined in the following order: 3215 * <ul> 3216 * <li> 3217 * point 0: [-squareLength / 2, squareLength / 2, 0] 3218 * </li> 3219 * <li> 3220 * point 1: [ squareLength / 2, squareLength / 2, 0] 3221 * </li> 3222 * <li> 3223 * point 2: [ squareLength / 2, -squareLength / 2, 0] 3224 * </li> 3225 * <li> 3226 * point 3: [-squareLength / 2, -squareLength / 2, 0] 3227 * </li> 3228 * </ul> 3229 * </li> 3230 * </ul> 3231 * @return automatically generated 3232 */ 3233 public static int solvePnPGeneric(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs) { 3234 Mat rvecs_mat = new Mat(); 3235 Mat tvecs_mat = new Mat(); 3236 int retVal = solvePnPGeneric_5(objectPoints.nativeObj, imagePoints.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj); 3237 Converters.Mat_to_vector_Mat(rvecs_mat, rvecs); 3238 rvecs_mat.release(); 3239 Converters.Mat_to_vector_Mat(tvecs_mat, tvecs); 3240 tvecs_mat.release(); 3241 return retVal; 3242 } 3243 3244 3245 // 3246 // C++: Mat cv::initCameraMatrix2D(vector_vector_Point3f objectPoints, vector_vector_Point2f imagePoints, Size imageSize, double aspectRatio = 1.0) 3247 // 3248 3249 /** 3250 * Finds an initial camera intrinsic matrix from 3D-2D point correspondences. 3251 * 3252 * @param objectPoints Vector of vectors of the calibration pattern points in the calibration pattern 3253 * coordinate space. In the old interface all the per-view vectors are concatenated. See 3254 * #calibrateCamera for details. 3255 * @param imagePoints Vector of vectors of the projections of the calibration pattern points. In the 3256 * old interface all the per-view vectors are concatenated. 3257 * @param imageSize Image size in pixels used to initialize the principal point. 3258 * @param aspectRatio If it is zero or negative, both \(f_x\) and \(f_y\) are estimated independently. 3259 * Otherwise, \(f_x = f_y * \texttt{aspectRatio}\) . 3260 * 3261 * The function estimates and returns an initial camera intrinsic matrix for the camera calibration process. 3262 * Currently, the function only supports planar calibration patterns, which are patterns where each 3263 * object point has z-coordinate =0. 3264 * @return automatically generated 3265 */ 3266 public static Mat initCameraMatrix2D(List<MatOfPoint3f> objectPoints, List<MatOfPoint2f> imagePoints, Size imageSize, double aspectRatio) { 3267 List<Mat> objectPoints_tmplm = new ArrayList<Mat>((objectPoints != null) ? objectPoints.size() : 0); 3268 Mat objectPoints_mat = Converters.vector_vector_Point3f_to_Mat(objectPoints, objectPoints_tmplm); 3269 List<Mat> imagePoints_tmplm = new ArrayList<Mat>((imagePoints != null) ? imagePoints.size() : 0); 3270 Mat imagePoints_mat = Converters.vector_vector_Point2f_to_Mat(imagePoints, imagePoints_tmplm); 3271 return new Mat(initCameraMatrix2D_0(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, aspectRatio)); 3272 } 3273 3274 /** 3275 * Finds an initial camera intrinsic matrix from 3D-2D point correspondences. 3276 * 3277 * @param objectPoints Vector of vectors of the calibration pattern points in the calibration pattern 3278 * coordinate space. In the old interface all the per-view vectors are concatenated. See 3279 * #calibrateCamera for details. 3280 * @param imagePoints Vector of vectors of the projections of the calibration pattern points. In the 3281 * old interface all the per-view vectors are concatenated. 3282 * @param imageSize Image size in pixels used to initialize the principal point. 3283 * Otherwise, \(f_x = f_y * \texttt{aspectRatio}\) . 3284 * 3285 * The function estimates and returns an initial camera intrinsic matrix for the camera calibration process. 3286 * Currently, the function only supports planar calibration patterns, which are patterns where each 3287 * object point has z-coordinate =0. 3288 * @return automatically generated 3289 */ 3290 public static Mat initCameraMatrix2D(List<MatOfPoint3f> objectPoints, List<MatOfPoint2f> imagePoints, Size imageSize) { 3291 List<Mat> objectPoints_tmplm = new ArrayList<Mat>((objectPoints != null) ? objectPoints.size() : 0); 3292 Mat objectPoints_mat = Converters.vector_vector_Point3f_to_Mat(objectPoints, objectPoints_tmplm); 3293 List<Mat> imagePoints_tmplm = new ArrayList<Mat>((imagePoints != null) ? imagePoints.size() : 0); 3294 Mat imagePoints_mat = Converters.vector_vector_Point2f_to_Mat(imagePoints, imagePoints_tmplm); 3295 return new Mat(initCameraMatrix2D_1(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height)); 3296 } 3297 3298 3299 // 3300 // C++: bool cv::findChessboardCorners(Mat image, Size patternSize, vector_Point2f& corners, int flags = CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE) 3301 // 3302 3303 /** 3304 * Finds the positions of internal corners of the chessboard. 3305 * 3306 * @param image Source chessboard view. It must be an 8-bit grayscale or color image. 3307 * @param patternSize Number of inner corners per a chessboard row and column 3308 * ( patternSize = cv::Size(points_per_row,points_per_colum) = cv::Size(columns,rows) ). 3309 * @param corners Output array of detected corners. 3310 * @param flags Various operation flags that can be zero or a combination of the following values: 3311 * <ul> 3312 * <li> 3313 * REF: CALIB_CB_ADAPTIVE_THRESH Use adaptive thresholding to convert the image to black 3314 * and white, rather than a fixed threshold level (computed from the average image brightness). 3315 * </li> 3316 * <li> 3317 * REF: CALIB_CB_NORMALIZE_IMAGE Normalize the image gamma with equalizeHist before 3318 * applying fixed or adaptive thresholding. 3319 * </li> 3320 * <li> 3321 * REF: CALIB_CB_FILTER_QUADS Use additional criteria (like contour area, perimeter, 3322 * square-like shape) to filter out false quads extracted at the contour retrieval stage. 3323 * </li> 3324 * <li> 3325 * REF: CALIB_CB_FAST_CHECK Run a fast check on the image that looks for chessboard corners, 3326 * and shortcut the call if none is found. This can drastically speed up the call in the 3327 * degenerate condition when no chessboard is observed. 3328 * </li> 3329 * </ul> 3330 * 3331 * The function attempts to determine whether the input image is a view of the chessboard pattern and 3332 * locate the internal chessboard corners. The function returns a non-zero value if all of the corners 3333 * are found and they are placed in a certain order (row by row, left to right in every row). 3334 * Otherwise, if the function fails to find all the corners or reorder them, it returns 0. For example, 3335 * a regular chessboard has 8 x 8 squares and 7 x 7 internal corners, that is, points where the black 3336 * squares touch each other. The detected coordinates are approximate, and to determine their positions 3337 * more accurately, the function calls cornerSubPix. You also may use the function cornerSubPix with 3338 * different parameters if returned coordinates are not accurate enough. 3339 * 3340 * Sample usage of detecting and drawing chessboard corners: : 3341 * <code> 3342 * Size patternsize(8,6); //interior number of corners 3343 * Mat gray = ....; //source image 3344 * vector<Point2f> corners; //this will be filled by the detected corners 3345 * 3346 * //CALIB_CB_FAST_CHECK saves a lot of time on images 3347 * //that do not contain any chessboard corners 3348 * bool patternfound = findChessboardCorners(gray, patternsize, corners, 3349 * CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE 3350 * + CALIB_CB_FAST_CHECK); 3351 * 3352 * if(patternfound) 3353 * cornerSubPix(gray, corners, Size(11, 11), Size(-1, -1), 3354 * TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1)); 3355 * 3356 * drawChessboardCorners(img, patternsize, Mat(corners), patternfound); 3357 * </code> 3358 * <b>Note:</b> The function requires white space (like a square-thick border, the wider the better) around 3359 * the board to make the detection more robust in various environments. Otherwise, if there is no 3360 * border and the background is dark, the outer black squares cannot be segmented properly and so the 3361 * square grouping and ordering algorithm fails. 3362 * 3363 * Use gen_pattern.py (REF: tutorial_camera_calibration_pattern) to create checkerboard. 3364 * @return automatically generated 3365 */ 3366 public static boolean findChessboardCorners(Mat image, Size patternSize, MatOfPoint2f corners, int flags) { 3367 Mat corners_mat = corners; 3368 return findChessboardCorners_0(image.nativeObj, patternSize.width, patternSize.height, corners_mat.nativeObj, flags); 3369 } 3370 3371 /** 3372 * Finds the positions of internal corners of the chessboard. 3373 * 3374 * @param image Source chessboard view. It must be an 8-bit grayscale or color image. 3375 * @param patternSize Number of inner corners per a chessboard row and column 3376 * ( patternSize = cv::Size(points_per_row,points_per_colum) = cv::Size(columns,rows) ). 3377 * @param corners Output array of detected corners. 3378 * <ul> 3379 * <li> 3380 * REF: CALIB_CB_ADAPTIVE_THRESH Use adaptive thresholding to convert the image to black 3381 * and white, rather than a fixed threshold level (computed from the average image brightness). 3382 * </li> 3383 * <li> 3384 * REF: CALIB_CB_NORMALIZE_IMAGE Normalize the image gamma with equalizeHist before 3385 * applying fixed or adaptive thresholding. 3386 * </li> 3387 * <li> 3388 * REF: CALIB_CB_FILTER_QUADS Use additional criteria (like contour area, perimeter, 3389 * square-like shape) to filter out false quads extracted at the contour retrieval stage. 3390 * </li> 3391 * <li> 3392 * REF: CALIB_CB_FAST_CHECK Run a fast check on the image that looks for chessboard corners, 3393 * and shortcut the call if none is found. This can drastically speed up the call in the 3394 * degenerate condition when no chessboard is observed. 3395 * </li> 3396 * </ul> 3397 * 3398 * The function attempts to determine whether the input image is a view of the chessboard pattern and 3399 * locate the internal chessboard corners. The function returns a non-zero value if all of the corners 3400 * are found and they are placed in a certain order (row by row, left to right in every row). 3401 * Otherwise, if the function fails to find all the corners or reorder them, it returns 0. For example, 3402 * a regular chessboard has 8 x 8 squares and 7 x 7 internal corners, that is, points where the black 3403 * squares touch each other. The detected coordinates are approximate, and to determine their positions 3404 * more accurately, the function calls cornerSubPix. You also may use the function cornerSubPix with 3405 * different parameters if returned coordinates are not accurate enough. 3406 * 3407 * Sample usage of detecting and drawing chessboard corners: : 3408 * <code> 3409 * Size patternsize(8,6); //interior number of corners 3410 * Mat gray = ....; //source image 3411 * vector<Point2f> corners; //this will be filled by the detected corners 3412 * 3413 * //CALIB_CB_FAST_CHECK saves a lot of time on images 3414 * //that do not contain any chessboard corners 3415 * bool patternfound = findChessboardCorners(gray, patternsize, corners, 3416 * CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE 3417 * + CALIB_CB_FAST_CHECK); 3418 * 3419 * if(patternfound) 3420 * cornerSubPix(gray, corners, Size(11, 11), Size(-1, -1), 3421 * TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1)); 3422 * 3423 * drawChessboardCorners(img, patternsize, Mat(corners), patternfound); 3424 * </code> 3425 * <b>Note:</b> The function requires white space (like a square-thick border, the wider the better) around 3426 * the board to make the detection more robust in various environments. Otherwise, if there is no 3427 * border and the background is dark, the outer black squares cannot be segmented properly and so the 3428 * square grouping and ordering algorithm fails. 3429 * 3430 * Use gen_pattern.py (REF: tutorial_camera_calibration_pattern) to create checkerboard. 3431 * @return automatically generated 3432 */ 3433 public static boolean findChessboardCorners(Mat image, Size patternSize, MatOfPoint2f corners) { 3434 Mat corners_mat = corners; 3435 return findChessboardCorners_1(image.nativeObj, patternSize.width, patternSize.height, corners_mat.nativeObj); 3436 } 3437 3438 3439 // 3440 // C++: bool cv::checkChessboard(Mat img, Size size) 3441 // 3442 3443 public static boolean checkChessboard(Mat img, Size size) { 3444 return checkChessboard_0(img.nativeObj, size.width, size.height); 3445 } 3446 3447 3448 // 3449 // C++: bool cv::findChessboardCornersSB(Mat image, Size patternSize, Mat& corners, int flags, Mat& meta) 3450 // 3451 3452 /** 3453 * Finds the positions of internal corners of the chessboard using a sector based approach. 3454 * 3455 * @param image Source chessboard view. It must be an 8-bit grayscale or color image. 3456 * @param patternSize Number of inner corners per a chessboard row and column 3457 * ( patternSize = cv::Size(points_per_row,points_per_colum) = cv::Size(columns,rows) ). 3458 * @param corners Output array of detected corners. 3459 * @param flags Various operation flags that can be zero or a combination of the following values: 3460 * <ul> 3461 * <li> 3462 * REF: CALIB_CB_NORMALIZE_IMAGE Normalize the image gamma with equalizeHist before detection. 3463 * </li> 3464 * <li> 3465 * REF: CALIB_CB_EXHAUSTIVE Run an exhaustive search to improve detection rate. 3466 * </li> 3467 * <li> 3468 * REF: CALIB_CB_ACCURACY Up sample input image to improve sub-pixel accuracy due to aliasing effects. 3469 * </li> 3470 * <li> 3471 * REF: CALIB_CB_LARGER The detected pattern is allowed to be larger than patternSize (see description). 3472 * </li> 3473 * <li> 3474 * REF: CALIB_CB_MARKER The detected pattern must have a marker (see description). 3475 * This should be used if an accurate camera calibration is required. 3476 * </li> 3477 * </ul> 3478 * @param meta Optional output arrray of detected corners (CV_8UC1 and size = cv::Size(columns,rows)). 3479 * Each entry stands for one corner of the pattern and can have one of the following values: 3480 * <ul> 3481 * <li> 3482 * 0 = no meta data attached 3483 * </li> 3484 * <li> 3485 * 1 = left-top corner of a black cell 3486 * </li> 3487 * <li> 3488 * 2 = left-top corner of a white cell 3489 * </li> 3490 * <li> 3491 * 3 = left-top corner of a black cell with a white marker dot 3492 * </li> 3493 * <li> 3494 * 4 = left-top corner of a white cell with a black marker dot (pattern origin in case of markers otherwise first corner) 3495 * </li> 3496 * </ul> 3497 * 3498 * The function is analog to #findChessboardCorners but uses a localized radon 3499 * transformation approximated by box filters being more robust to all sort of 3500 * noise, faster on larger images and is able to directly return the sub-pixel 3501 * position of the internal chessboard corners. The Method is based on the paper 3502 * CITE: duda2018 "Accurate Detection and Localization of Checkerboard Corners for 3503 * Calibration" demonstrating that the returned sub-pixel positions are more 3504 * accurate than the one returned by cornerSubPix allowing a precise camera 3505 * calibration for demanding applications. 3506 * 3507 * In the case, the flags REF: CALIB_CB_LARGER or REF: CALIB_CB_MARKER are given, 3508 * the result can be recovered from the optional meta array. Both flags are 3509 * helpful to use calibration patterns exceeding the field of view of the camera. 3510 * These oversized patterns allow more accurate calibrations as corners can be 3511 * utilized, which are as close as possible to the image borders. For a 3512 * consistent coordinate system across all images, the optional marker (see image 3513 * below) can be used to move the origin of the board to the location where the 3514 * black circle is located. 3515 * 3516 * <b>Note:</b> The function requires a white boarder with roughly the same width as one 3517 * of the checkerboard fields around the whole board to improve the detection in 3518 * various environments. In addition, because of the localized radon 3519 * transformation it is beneficial to use round corners for the field corners 3520 * which are located on the outside of the board. The following figure illustrates 3521 * a sample checkerboard optimized for the detection. However, any other checkerboard 3522 * can be used as well. 3523 * 3524 * Use gen_pattern.py (REF: tutorial_camera_calibration_pattern) to create checkerboard. 3525 * ![Checkerboard](pics/checkerboard_radon.png) 3526 * @return automatically generated 3527 */ 3528 public static boolean findChessboardCornersSBWithMeta(Mat image, Size patternSize, Mat corners, int flags, Mat meta) { 3529 return findChessboardCornersSBWithMeta_0(image.nativeObj, patternSize.width, patternSize.height, corners.nativeObj, flags, meta.nativeObj); 3530 } 3531 3532 3533 // 3534 // C++: bool cv::findChessboardCornersSB(Mat image, Size patternSize, Mat& corners, int flags = 0) 3535 // 3536 3537 public static boolean findChessboardCornersSB(Mat image, Size patternSize, Mat corners, int flags) { 3538 return findChessboardCornersSB_0(image.nativeObj, patternSize.width, patternSize.height, corners.nativeObj, flags); 3539 } 3540 3541 public static boolean findChessboardCornersSB(Mat image, Size patternSize, Mat corners) { 3542 return findChessboardCornersSB_1(image.nativeObj, patternSize.width, patternSize.height, corners.nativeObj); 3543 } 3544 3545 3546 // 3547 // C++: Scalar cv::estimateChessboardSharpness(Mat image, Size patternSize, Mat corners, float rise_distance = 0.8F, bool vertical = false, Mat& sharpness = Mat()) 3548 // 3549 3550 /** 3551 * Estimates the sharpness of a detected chessboard. 3552 * 3553 * Image sharpness, as well as brightness, are a critical parameter for accuracte 3554 * camera calibration. For accessing these parameters for filtering out 3555 * problematic calibraiton images, this method calculates edge profiles by traveling from 3556 * black to white chessboard cell centers. Based on this, the number of pixels is 3557 * calculated required to transit from black to white. This width of the 3558 * transition area is a good indication of how sharp the chessboard is imaged 3559 * and should be below ~3.0 pixels. 3560 * 3561 * @param image Gray image used to find chessboard corners 3562 * @param patternSize Size of a found chessboard pattern 3563 * @param corners Corners found by #findChessboardCornersSB 3564 * @param rise_distance Rise distance 0.8 means 10% ... 90% of the final signal strength 3565 * @param vertical By default edge responses for horizontal lines are calculated 3566 * @param sharpness Optional output array with a sharpness value for calculated edge responses (see description) 3567 * 3568 * The optional sharpness array is of type CV_32FC1 and has for each calculated 3569 * profile one row with the following five entries: 3570 * 0 = x coordinate of the underlying edge in the image 3571 * 1 = y coordinate of the underlying edge in the image 3572 * 2 = width of the transition area (sharpness) 3573 * 3 = signal strength in the black cell (min brightness) 3574 * 4 = signal strength in the white cell (max brightness) 3575 * 3576 * @return Scalar(average sharpness, average min brightness, average max brightness,0) 3577 */ 3578 public static Scalar estimateChessboardSharpness(Mat image, Size patternSize, Mat corners, float rise_distance, boolean vertical, Mat sharpness) { 3579 return new Scalar(estimateChessboardSharpness_0(image.nativeObj, patternSize.width, patternSize.height, corners.nativeObj, rise_distance, vertical, sharpness.nativeObj)); 3580 } 3581 3582 /** 3583 * Estimates the sharpness of a detected chessboard. 3584 * 3585 * Image sharpness, as well as brightness, are a critical parameter for accuracte 3586 * camera calibration. For accessing these parameters for filtering out 3587 * problematic calibraiton images, this method calculates edge profiles by traveling from 3588 * black to white chessboard cell centers. Based on this, the number of pixels is 3589 * calculated required to transit from black to white. This width of the 3590 * transition area is a good indication of how sharp the chessboard is imaged 3591 * and should be below ~3.0 pixels. 3592 * 3593 * @param image Gray image used to find chessboard corners 3594 * @param patternSize Size of a found chessboard pattern 3595 * @param corners Corners found by #findChessboardCornersSB 3596 * @param rise_distance Rise distance 0.8 means 10% ... 90% of the final signal strength 3597 * @param vertical By default edge responses for horizontal lines are calculated 3598 * 3599 * The optional sharpness array is of type CV_32FC1 and has for each calculated 3600 * profile one row with the following five entries: 3601 * 0 = x coordinate of the underlying edge in the image 3602 * 1 = y coordinate of the underlying edge in the image 3603 * 2 = width of the transition area (sharpness) 3604 * 3 = signal strength in the black cell (min brightness) 3605 * 4 = signal strength in the white cell (max brightness) 3606 * 3607 * @return Scalar(average sharpness, average min brightness, average max brightness,0) 3608 */ 3609 public static Scalar estimateChessboardSharpness(Mat image, Size patternSize, Mat corners, float rise_distance, boolean vertical) { 3610 return new Scalar(estimateChessboardSharpness_1(image.nativeObj, patternSize.width, patternSize.height, corners.nativeObj, rise_distance, vertical)); 3611 } 3612 3613 /** 3614 * Estimates the sharpness of a detected chessboard. 3615 * 3616 * Image sharpness, as well as brightness, are a critical parameter for accuracte 3617 * camera calibration. For accessing these parameters for filtering out 3618 * problematic calibraiton images, this method calculates edge profiles by traveling from 3619 * black to white chessboard cell centers. Based on this, the number of pixels is 3620 * calculated required to transit from black to white. This width of the 3621 * transition area is a good indication of how sharp the chessboard is imaged 3622 * and should be below ~3.0 pixels. 3623 * 3624 * @param image Gray image used to find chessboard corners 3625 * @param patternSize Size of a found chessboard pattern 3626 * @param corners Corners found by #findChessboardCornersSB 3627 * @param rise_distance Rise distance 0.8 means 10% ... 90% of the final signal strength 3628 * 3629 * The optional sharpness array is of type CV_32FC1 and has for each calculated 3630 * profile one row with the following five entries: 3631 * 0 = x coordinate of the underlying edge in the image 3632 * 1 = y coordinate of the underlying edge in the image 3633 * 2 = width of the transition area (sharpness) 3634 * 3 = signal strength in the black cell (min brightness) 3635 * 4 = signal strength in the white cell (max brightness) 3636 * 3637 * @return Scalar(average sharpness, average min brightness, average max brightness,0) 3638 */ 3639 public static Scalar estimateChessboardSharpness(Mat image, Size patternSize, Mat corners, float rise_distance) { 3640 return new Scalar(estimateChessboardSharpness_2(image.nativeObj, patternSize.width, patternSize.height, corners.nativeObj, rise_distance)); 3641 } 3642 3643 /** 3644 * Estimates the sharpness of a detected chessboard. 3645 * 3646 * Image sharpness, as well as brightness, are a critical parameter for accuracte 3647 * camera calibration. For accessing these parameters for filtering out 3648 * problematic calibraiton images, this method calculates edge profiles by traveling from 3649 * black to white chessboard cell centers. Based on this, the number of pixels is 3650 * calculated required to transit from black to white. This width of the 3651 * transition area is a good indication of how sharp the chessboard is imaged 3652 * and should be below ~3.0 pixels. 3653 * 3654 * @param image Gray image used to find chessboard corners 3655 * @param patternSize Size of a found chessboard pattern 3656 * @param corners Corners found by #findChessboardCornersSB 3657 * 3658 * The optional sharpness array is of type CV_32FC1 and has for each calculated 3659 * profile one row with the following five entries: 3660 * 0 = x coordinate of the underlying edge in the image 3661 * 1 = y coordinate of the underlying edge in the image 3662 * 2 = width of the transition area (sharpness) 3663 * 3 = signal strength in the black cell (min brightness) 3664 * 4 = signal strength in the white cell (max brightness) 3665 * 3666 * @return Scalar(average sharpness, average min brightness, average max brightness,0) 3667 */ 3668 public static Scalar estimateChessboardSharpness(Mat image, Size patternSize, Mat corners) { 3669 return new Scalar(estimateChessboardSharpness_3(image.nativeObj, patternSize.width, patternSize.height, corners.nativeObj)); 3670 } 3671 3672 3673 // 3674 // C++: bool cv::find4QuadCornerSubpix(Mat img, Mat& corners, Size region_size) 3675 // 3676 3677 public static boolean find4QuadCornerSubpix(Mat img, Mat corners, Size region_size) { 3678 return find4QuadCornerSubpix_0(img.nativeObj, corners.nativeObj, region_size.width, region_size.height); 3679 } 3680 3681 3682 // 3683 // C++: void cv::drawChessboardCorners(Mat& image, Size patternSize, vector_Point2f corners, bool patternWasFound) 3684 // 3685 3686 /** 3687 * Renders the detected chessboard corners. 3688 * 3689 * @param image Destination image. It must be an 8-bit color image. 3690 * @param patternSize Number of inner corners per a chessboard row and column 3691 * (patternSize = cv::Size(points_per_row,points_per_column)). 3692 * @param corners Array of detected corners, the output of #findChessboardCorners. 3693 * @param patternWasFound Parameter indicating whether the complete board was found or not. The 3694 * return value of #findChessboardCorners should be passed here. 3695 * 3696 * The function draws individual chessboard corners detected either as red circles if the board was not 3697 * found, or as colored corners connected with lines if the board was found. 3698 */ 3699 public static void drawChessboardCorners(Mat image, Size patternSize, MatOfPoint2f corners, boolean patternWasFound) { 3700 Mat corners_mat = corners; 3701 drawChessboardCorners_0(image.nativeObj, patternSize.width, patternSize.height, corners_mat.nativeObj, patternWasFound); 3702 } 3703 3704 3705 // 3706 // C++: void cv::drawFrameAxes(Mat& image, Mat cameraMatrix, Mat distCoeffs, Mat rvec, Mat tvec, float length, int thickness = 3) 3707 // 3708 3709 /** 3710 * Draw axes of the world/object coordinate system from pose estimation. SEE: solvePnP 3711 * 3712 * @param image Input/output image. It must have 1 or 3 channels. The number of channels is not altered. 3713 * @param cameraMatrix Input 3x3 floating-point matrix of camera intrinsic parameters. 3714 * \(\cameramatrix{A}\) 3715 * @param distCoeffs Input vector of distortion coefficients 3716 * \(\distcoeffs\). If the vector is empty, the zero distortion coefficients are assumed. 3717 * @param rvec Rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from 3718 * the model coordinate system to the camera coordinate system. 3719 * @param tvec Translation vector. 3720 * @param length Length of the painted axes in the same unit than tvec (usually in meters). 3721 * @param thickness Line thickness of the painted axes. 3722 * 3723 * This function draws the axes of the world/object coordinate system w.r.t. to the camera frame. 3724 * OX is drawn in red, OY in green and OZ in blue. 3725 */ 3726 public static void drawFrameAxes(Mat image, Mat cameraMatrix, Mat distCoeffs, Mat rvec, Mat tvec, float length, int thickness) { 3727 drawFrameAxes_0(image.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvec.nativeObj, tvec.nativeObj, length, thickness); 3728 } 3729 3730 /** 3731 * Draw axes of the world/object coordinate system from pose estimation. SEE: solvePnP 3732 * 3733 * @param image Input/output image. It must have 1 or 3 channels. The number of channels is not altered. 3734 * @param cameraMatrix Input 3x3 floating-point matrix of camera intrinsic parameters. 3735 * \(\cameramatrix{A}\) 3736 * @param distCoeffs Input vector of distortion coefficients 3737 * \(\distcoeffs\). If the vector is empty, the zero distortion coefficients are assumed. 3738 * @param rvec Rotation vector (see REF: Rodrigues ) that, together with tvec, brings points from 3739 * the model coordinate system to the camera coordinate system. 3740 * @param tvec Translation vector. 3741 * @param length Length of the painted axes in the same unit than tvec (usually in meters). 3742 * 3743 * This function draws the axes of the world/object coordinate system w.r.t. to the camera frame. 3744 * OX is drawn in red, OY in green and OZ in blue. 3745 */ 3746 public static void drawFrameAxes(Mat image, Mat cameraMatrix, Mat distCoeffs, Mat rvec, Mat tvec, float length) { 3747 drawFrameAxes_1(image.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvec.nativeObj, tvec.nativeObj, length); 3748 } 3749 3750 3751 // 3752 // C++: bool cv::findCirclesGrid(Mat image, Size patternSize, Mat& centers, int flags, Ptr_FeatureDetector blobDetector, CirclesGridFinderParameters parameters) 3753 // 3754 3755 // Unknown type 'Ptr_FeatureDetector' (I), skipping the function 3756 3757 3758 // 3759 // C++: bool cv::findCirclesGrid(Mat image, Size patternSize, Mat& centers, int flags = CALIB_CB_SYMMETRIC_GRID, Ptr_FeatureDetector blobDetector = SimpleBlobDetector::create()) 3760 // 3761 3762 public static boolean findCirclesGrid(Mat image, Size patternSize, Mat centers, int flags) { 3763 return findCirclesGrid_0(image.nativeObj, patternSize.width, patternSize.height, centers.nativeObj, flags); 3764 } 3765 3766 public static boolean findCirclesGrid(Mat image, Size patternSize, Mat centers) { 3767 return findCirclesGrid_2(image.nativeObj, patternSize.width, patternSize.height, centers.nativeObj); 3768 } 3769 3770 3771 // 3772 // C++: double cv::calibrateCamera(vector_Mat objectPoints, vector_Mat imagePoints, Size imageSize, Mat& cameraMatrix, Mat& distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, Mat& stdDeviationsIntrinsics, Mat& stdDeviationsExtrinsics, Mat& perViewErrors, int flags = 0, TermCriteria criteria = TermCriteria( TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON)) 3773 // 3774 3775 /** 3776 * Finds the camera intrinsic and extrinsic parameters from several views of a calibration 3777 * pattern. 3778 * 3779 * @param objectPoints In the new interface it is a vector of vectors of calibration pattern points in 3780 * the calibration pattern coordinate space (e.g. std::vector<std::vector<cv::Vec3f>>). The outer 3781 * vector contains as many elements as the number of pattern views. If the same calibration pattern 3782 * is shown in each view and it is fully visible, all the vectors will be the same. Although, it is 3783 * possible to use partially occluded patterns or even different patterns in different views. Then, 3784 * the vectors will be different. Although the points are 3D, they all lie in the calibration pattern's 3785 * XY coordinate plane (thus 0 in the Z-coordinate), if the used calibration pattern is a planar rig. 3786 * In the old interface all the vectors of object points from different views are concatenated 3787 * together. 3788 * @param imagePoints In the new interface it is a vector of vectors of the projections of calibration 3789 * pattern points (e.g. std::vector<std::vector<cv::Vec2f>>). imagePoints.size() and 3790 * objectPoints.size(), and imagePoints[i].size() and objectPoints[i].size() for each i, must be equal, 3791 * respectively. In the old interface all the vectors of object points from different views are 3792 * concatenated together. 3793 * @param imageSize Size of the image used only to initialize the camera intrinsic matrix. 3794 * @param cameraMatrix Input/output 3x3 floating-point camera intrinsic matrix 3795 * \(\cameramatrix{A}\) . If REF: CALIB_USE_INTRINSIC_GUESS 3796 * and/or REF: CALIB_FIX_ASPECT_RATIO, REF: CALIB_FIX_PRINCIPAL_POINT or REF: CALIB_FIX_FOCAL_LENGTH 3797 * are specified, some or all of fx, fy, cx, cy must be initialized before calling the function. 3798 * @param distCoeffs Input/output vector of distortion coefficients 3799 * \(\distcoeffs\). 3800 * @param rvecs Output vector of rotation vectors (REF: Rodrigues ) estimated for each pattern view 3801 * (e.g. std::vector<cv::Mat>>). That is, each i-th rotation vector together with the corresponding 3802 * i-th translation vector (see the next output parameter description) brings the calibration pattern 3803 * from the object coordinate space (in which object points are specified) to the camera coordinate 3804 * space. In more technical terms, the tuple of the i-th rotation and translation vector performs 3805 * a change of basis from object coordinate space to camera coordinate space. Due to its duality, this 3806 * tuple is equivalent to the position of the calibration pattern with respect to the camera coordinate 3807 * space. 3808 * @param tvecs Output vector of translation vectors estimated for each pattern view, see parameter 3809 * describtion above. 3810 * @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic 3811 * parameters. Order of deviations values: 3812 * \((f_x, f_y, c_x, c_y, k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6 , s_1, s_2, s_3, 3813 * s_4, \tau_x, \tau_y)\) If one of parameters is not estimated, it's deviation is equals to zero. 3814 * @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic 3815 * parameters. Order of deviations values: \((R_0, T_0, \dotsc , R_{M - 1}, T_{M - 1})\) where M is 3816 * the number of pattern views. \(R_i, T_i\) are concatenated 1x3 vectors. 3817 * @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view. 3818 * @param flags Different flags that may be zero or a combination of the following values: 3819 * <ul> 3820 * <li> 3821 * REF: CALIB_USE_INTRINSIC_GUESS cameraMatrix contains valid initial values of 3822 * fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image 3823 * center ( imageSize is used), and focal distances are computed in a least-squares fashion. 3824 * Note, that if intrinsic parameters are known, there is no need to use this function just to 3825 * estimate extrinsic parameters. Use REF: solvePnP instead. 3826 * </li> 3827 * <li> 3828 * REF: CALIB_FIX_PRINCIPAL_POINT The principal point is not changed during the global 3829 * optimization. It stays at the center or at a different location specified when 3830 * REF: CALIB_USE_INTRINSIC_GUESS is set too. 3831 * </li> 3832 * <li> 3833 * REF: CALIB_FIX_ASPECT_RATIO The functions consider only fy as a free parameter. The 3834 * ratio fx/fy stays the same as in the input cameraMatrix . When 3835 * REF: CALIB_USE_INTRINSIC_GUESS is not set, the actual input values of fx and fy are 3836 * ignored, only their ratio is computed and used further. 3837 * </li> 3838 * <li> 3839 * REF: CALIB_ZERO_TANGENT_DIST Tangential distortion coefficients \((p_1, p_2)\) are set 3840 * to zeros and stay zero. 3841 * </li> 3842 * <li> 3843 * REF: CALIB_FIX_FOCAL_LENGTH The focal length is not changed during the global optimization if 3844 * REF: CALIB_USE_INTRINSIC_GUESS is set. 3845 * </li> 3846 * <li> 3847 * REF: CALIB_FIX_K1,..., REF: CALIB_FIX_K6 The corresponding radial distortion 3848 * coefficient is not changed during the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is 3849 * set, the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0. 3850 * </li> 3851 * <li> 3852 * REF: CALIB_RATIONAL_MODEL Coefficients k4, k5, and k6 are enabled. To provide the 3853 * backward compatibility, this extra flag should be explicitly specified to make the 3854 * calibration function use the rational model and return 8 coefficients or more. 3855 * </li> 3856 * <li> 3857 * REF: CALIB_THIN_PRISM_MODEL Coefficients s1, s2, s3 and s4 are enabled. To provide the 3858 * backward compatibility, this extra flag should be explicitly specified to make the 3859 * calibration function use the thin prism model and return 12 coefficients or more. 3860 * </li> 3861 * <li> 3862 * REF: CALIB_FIX_S1_S2_S3_S4 The thin prism distortion coefficients are not changed during 3863 * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the 3864 * supplied distCoeffs matrix is used. Otherwise, it is set to 0. 3865 * </li> 3866 * <li> 3867 * REF: CALIB_TILTED_MODEL Coefficients tauX and tauY are enabled. To provide the 3868 * backward compatibility, this extra flag should be explicitly specified to make the 3869 * calibration function use the tilted sensor model and return 14 coefficients. 3870 * </li> 3871 * <li> 3872 * REF: CALIB_FIX_TAUX_TAUY The coefficients of the tilted sensor model are not changed during 3873 * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the 3874 * supplied distCoeffs matrix is used. Otherwise, it is set to 0. 3875 * </li> 3876 * </ul> 3877 * @param criteria Termination criteria for the iterative optimization algorithm. 3878 * 3879 * @return the overall RMS re-projection error. 3880 * 3881 * The function estimates the intrinsic camera parameters and extrinsic parameters for each of the 3882 * views. The algorithm is based on CITE: Zhang2000 and CITE: BouguetMCT . The coordinates of 3D object 3883 * points and their corresponding 2D projections in each view must be specified. That may be achieved 3884 * by using an object with known geometry and easily detectable feature points. Such an object is 3885 * called a calibration rig or calibration pattern, and OpenCV has built-in support for a chessboard as 3886 * a calibration rig (see REF: findChessboardCorners). Currently, initialization of intrinsic 3887 * parameters (when REF: CALIB_USE_INTRINSIC_GUESS is not set) is only implemented for planar calibration 3888 * patterns (where Z-coordinates of the object points must be all zeros). 3D calibration rigs can also 3889 * be used as long as initial cameraMatrix is provided. 3890 * 3891 * The algorithm performs the following steps: 3892 * 3893 * <ul> 3894 * <li> 3895 * Compute the initial intrinsic parameters (the option only available for planar calibration 3896 * patterns) or read them from the input parameters. The distortion coefficients are all set to 3897 * zeros initially unless some of CALIB_FIX_K? are specified. 3898 * </li> 3899 * </ul> 3900 * 3901 * <ul> 3902 * <li> 3903 * Estimate the initial camera pose as if the intrinsic parameters have been already known. This is 3904 * done using REF: solvePnP . 3905 * </li> 3906 * </ul> 3907 * 3908 * <ul> 3909 * <li> 3910 * Run the global Levenberg-Marquardt optimization algorithm to minimize the reprojection error, 3911 * that is, the total sum of squared distances between the observed feature points imagePoints and 3912 * the projected (using the current estimates for camera parameters and the poses) object points 3913 * objectPoints. See REF: projectPoints for details. 3914 * </li> 3915 * </ul> 3916 * 3917 * <b>Note:</b> 3918 * If you use a non-square (i.e. non-N-by-N) grid and REF: findChessboardCorners for calibration, 3919 * and REF: calibrateCamera returns bad values (zero distortion coefficients, \(c_x\) and 3920 * \(c_y\) very far from the image center, and/or large differences between \(f_x\) and 3921 * \(f_y\) (ratios of 10:1 or more)), then you are probably using patternSize=cvSize(rows,cols) 3922 * instead of using patternSize=cvSize(cols,rows) in REF: findChessboardCorners. 3923 * 3924 * SEE: 3925 * calibrateCameraRO, findChessboardCorners, solvePnP, initCameraMatrix2D, stereoCalibrate, 3926 * undistort 3927 */ 3928 public static double calibrateCameraExtended(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, Mat stdDeviationsIntrinsics, Mat stdDeviationsExtrinsics, Mat perViewErrors, int flags, TermCriteria criteria) { 3929 Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints); 3930 Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints); 3931 Mat rvecs_mat = new Mat(); 3932 Mat tvecs_mat = new Mat(); 3933 double retVal = calibrateCameraExtended_0(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, stdDeviationsIntrinsics.nativeObj, stdDeviationsExtrinsics.nativeObj, perViewErrors.nativeObj, flags, criteria.type, criteria.maxCount, criteria.epsilon); 3934 Converters.Mat_to_vector_Mat(rvecs_mat, rvecs); 3935 rvecs_mat.release(); 3936 Converters.Mat_to_vector_Mat(tvecs_mat, tvecs); 3937 tvecs_mat.release(); 3938 return retVal; 3939 } 3940 3941 /** 3942 * Finds the camera intrinsic and extrinsic parameters from several views of a calibration 3943 * pattern. 3944 * 3945 * @param objectPoints In the new interface it is a vector of vectors of calibration pattern points in 3946 * the calibration pattern coordinate space (e.g. std::vector<std::vector<cv::Vec3f>>). The outer 3947 * vector contains as many elements as the number of pattern views. If the same calibration pattern 3948 * is shown in each view and it is fully visible, all the vectors will be the same. Although, it is 3949 * possible to use partially occluded patterns or even different patterns in different views. Then, 3950 * the vectors will be different. Although the points are 3D, they all lie in the calibration pattern's 3951 * XY coordinate plane (thus 0 in the Z-coordinate), if the used calibration pattern is a planar rig. 3952 * In the old interface all the vectors of object points from different views are concatenated 3953 * together. 3954 * @param imagePoints In the new interface it is a vector of vectors of the projections of calibration 3955 * pattern points (e.g. std::vector<std::vector<cv::Vec2f>>). imagePoints.size() and 3956 * objectPoints.size(), and imagePoints[i].size() and objectPoints[i].size() for each i, must be equal, 3957 * respectively. In the old interface all the vectors of object points from different views are 3958 * concatenated together. 3959 * @param imageSize Size of the image used only to initialize the camera intrinsic matrix. 3960 * @param cameraMatrix Input/output 3x3 floating-point camera intrinsic matrix 3961 * \(\cameramatrix{A}\) . If REF: CALIB_USE_INTRINSIC_GUESS 3962 * and/or REF: CALIB_FIX_ASPECT_RATIO, REF: CALIB_FIX_PRINCIPAL_POINT or REF: CALIB_FIX_FOCAL_LENGTH 3963 * are specified, some or all of fx, fy, cx, cy must be initialized before calling the function. 3964 * @param distCoeffs Input/output vector of distortion coefficients 3965 * \(\distcoeffs\). 3966 * @param rvecs Output vector of rotation vectors (REF: Rodrigues ) estimated for each pattern view 3967 * (e.g. std::vector<cv::Mat>>). That is, each i-th rotation vector together with the corresponding 3968 * i-th translation vector (see the next output parameter description) brings the calibration pattern 3969 * from the object coordinate space (in which object points are specified) to the camera coordinate 3970 * space. In more technical terms, the tuple of the i-th rotation and translation vector performs 3971 * a change of basis from object coordinate space to camera coordinate space. Due to its duality, this 3972 * tuple is equivalent to the position of the calibration pattern with respect to the camera coordinate 3973 * space. 3974 * @param tvecs Output vector of translation vectors estimated for each pattern view, see parameter 3975 * describtion above. 3976 * @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic 3977 * parameters. Order of deviations values: 3978 * \((f_x, f_y, c_x, c_y, k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6 , s_1, s_2, s_3, 3979 * s_4, \tau_x, \tau_y)\) If one of parameters is not estimated, it's deviation is equals to zero. 3980 * @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic 3981 * parameters. Order of deviations values: \((R_0, T_0, \dotsc , R_{M - 1}, T_{M - 1})\) where M is 3982 * the number of pattern views. \(R_i, T_i\) are concatenated 1x3 vectors. 3983 * @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view. 3984 * @param flags Different flags that may be zero or a combination of the following values: 3985 * <ul> 3986 * <li> 3987 * REF: CALIB_USE_INTRINSIC_GUESS cameraMatrix contains valid initial values of 3988 * fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image 3989 * center ( imageSize is used), and focal distances are computed in a least-squares fashion. 3990 * Note, that if intrinsic parameters are known, there is no need to use this function just to 3991 * estimate extrinsic parameters. Use REF: solvePnP instead. 3992 * </li> 3993 * <li> 3994 * REF: CALIB_FIX_PRINCIPAL_POINT The principal point is not changed during the global 3995 * optimization. It stays at the center or at a different location specified when 3996 * REF: CALIB_USE_INTRINSIC_GUESS is set too. 3997 * </li> 3998 * <li> 3999 * REF: CALIB_FIX_ASPECT_RATIO The functions consider only fy as a free parameter. The 4000 * ratio fx/fy stays the same as in the input cameraMatrix . When 4001 * REF: CALIB_USE_INTRINSIC_GUESS is not set, the actual input values of fx and fy are 4002 * ignored, only their ratio is computed and used further. 4003 * </li> 4004 * <li> 4005 * REF: CALIB_ZERO_TANGENT_DIST Tangential distortion coefficients \((p_1, p_2)\) are set 4006 * to zeros and stay zero. 4007 * </li> 4008 * <li> 4009 * REF: CALIB_FIX_FOCAL_LENGTH The focal length is not changed during the global optimization if 4010 * REF: CALIB_USE_INTRINSIC_GUESS is set. 4011 * </li> 4012 * <li> 4013 * REF: CALIB_FIX_K1,..., REF: CALIB_FIX_K6 The corresponding radial distortion 4014 * coefficient is not changed during the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is 4015 * set, the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0. 4016 * </li> 4017 * <li> 4018 * REF: CALIB_RATIONAL_MODEL Coefficients k4, k5, and k6 are enabled. To provide the 4019 * backward compatibility, this extra flag should be explicitly specified to make the 4020 * calibration function use the rational model and return 8 coefficients or more. 4021 * </li> 4022 * <li> 4023 * REF: CALIB_THIN_PRISM_MODEL Coefficients s1, s2, s3 and s4 are enabled. To provide the 4024 * backward compatibility, this extra flag should be explicitly specified to make the 4025 * calibration function use the thin prism model and return 12 coefficients or more. 4026 * </li> 4027 * <li> 4028 * REF: CALIB_FIX_S1_S2_S3_S4 The thin prism distortion coefficients are not changed during 4029 * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the 4030 * supplied distCoeffs matrix is used. Otherwise, it is set to 0. 4031 * </li> 4032 * <li> 4033 * REF: CALIB_TILTED_MODEL Coefficients tauX and tauY are enabled. To provide the 4034 * backward compatibility, this extra flag should be explicitly specified to make the 4035 * calibration function use the tilted sensor model and return 14 coefficients. 4036 * </li> 4037 * <li> 4038 * REF: CALIB_FIX_TAUX_TAUY The coefficients of the tilted sensor model are not changed during 4039 * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the 4040 * supplied distCoeffs matrix is used. Otherwise, it is set to 0. 4041 * </li> 4042 * </ul> 4043 * 4044 * @return the overall RMS re-projection error. 4045 * 4046 * The function estimates the intrinsic camera parameters and extrinsic parameters for each of the 4047 * views. The algorithm is based on CITE: Zhang2000 and CITE: BouguetMCT . The coordinates of 3D object 4048 * points and their corresponding 2D projections in each view must be specified. That may be achieved 4049 * by using an object with known geometry and easily detectable feature points. Such an object is 4050 * called a calibration rig or calibration pattern, and OpenCV has built-in support for a chessboard as 4051 * a calibration rig (see REF: findChessboardCorners). Currently, initialization of intrinsic 4052 * parameters (when REF: CALIB_USE_INTRINSIC_GUESS is not set) is only implemented for planar calibration 4053 * patterns (where Z-coordinates of the object points must be all zeros). 3D calibration rigs can also 4054 * be used as long as initial cameraMatrix is provided. 4055 * 4056 * The algorithm performs the following steps: 4057 * 4058 * <ul> 4059 * <li> 4060 * Compute the initial intrinsic parameters (the option only available for planar calibration 4061 * patterns) or read them from the input parameters. The distortion coefficients are all set to 4062 * zeros initially unless some of CALIB_FIX_K? are specified. 4063 * </li> 4064 * </ul> 4065 * 4066 * <ul> 4067 * <li> 4068 * Estimate the initial camera pose as if the intrinsic parameters have been already known. This is 4069 * done using REF: solvePnP . 4070 * </li> 4071 * </ul> 4072 * 4073 * <ul> 4074 * <li> 4075 * Run the global Levenberg-Marquardt optimization algorithm to minimize the reprojection error, 4076 * that is, the total sum of squared distances between the observed feature points imagePoints and 4077 * the projected (using the current estimates for camera parameters and the poses) object points 4078 * objectPoints. See REF: projectPoints for details. 4079 * </li> 4080 * </ul> 4081 * 4082 * <b>Note:</b> 4083 * If you use a non-square (i.e. non-N-by-N) grid and REF: findChessboardCorners for calibration, 4084 * and REF: calibrateCamera returns bad values (zero distortion coefficients, \(c_x\) and 4085 * \(c_y\) very far from the image center, and/or large differences between \(f_x\) and 4086 * \(f_y\) (ratios of 10:1 or more)), then you are probably using patternSize=cvSize(rows,cols) 4087 * instead of using patternSize=cvSize(cols,rows) in REF: findChessboardCorners. 4088 * 4089 * SEE: 4090 * calibrateCameraRO, findChessboardCorners, solvePnP, initCameraMatrix2D, stereoCalibrate, 4091 * undistort 4092 */ 4093 public static double calibrateCameraExtended(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, Mat stdDeviationsIntrinsics, Mat stdDeviationsExtrinsics, Mat perViewErrors, int flags) { 4094 Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints); 4095 Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints); 4096 Mat rvecs_mat = new Mat(); 4097 Mat tvecs_mat = new Mat(); 4098 double retVal = calibrateCameraExtended_1(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, stdDeviationsIntrinsics.nativeObj, stdDeviationsExtrinsics.nativeObj, perViewErrors.nativeObj, flags); 4099 Converters.Mat_to_vector_Mat(rvecs_mat, rvecs); 4100 rvecs_mat.release(); 4101 Converters.Mat_to_vector_Mat(tvecs_mat, tvecs); 4102 tvecs_mat.release(); 4103 return retVal; 4104 } 4105 4106 /** 4107 * Finds the camera intrinsic and extrinsic parameters from several views of a calibration 4108 * pattern. 4109 * 4110 * @param objectPoints In the new interface it is a vector of vectors of calibration pattern points in 4111 * the calibration pattern coordinate space (e.g. std::vector<std::vector<cv::Vec3f>>). The outer 4112 * vector contains as many elements as the number of pattern views. If the same calibration pattern 4113 * is shown in each view and it is fully visible, all the vectors will be the same. Although, it is 4114 * possible to use partially occluded patterns or even different patterns in different views. Then, 4115 * the vectors will be different. Although the points are 3D, they all lie in the calibration pattern's 4116 * XY coordinate plane (thus 0 in the Z-coordinate), if the used calibration pattern is a planar rig. 4117 * In the old interface all the vectors of object points from different views are concatenated 4118 * together. 4119 * @param imagePoints In the new interface it is a vector of vectors of the projections of calibration 4120 * pattern points (e.g. std::vector<std::vector<cv::Vec2f>>). imagePoints.size() and 4121 * objectPoints.size(), and imagePoints[i].size() and objectPoints[i].size() for each i, must be equal, 4122 * respectively. In the old interface all the vectors of object points from different views are 4123 * concatenated together. 4124 * @param imageSize Size of the image used only to initialize the camera intrinsic matrix. 4125 * @param cameraMatrix Input/output 3x3 floating-point camera intrinsic matrix 4126 * \(\cameramatrix{A}\) . If REF: CALIB_USE_INTRINSIC_GUESS 4127 * and/or REF: CALIB_FIX_ASPECT_RATIO, REF: CALIB_FIX_PRINCIPAL_POINT or REF: CALIB_FIX_FOCAL_LENGTH 4128 * are specified, some or all of fx, fy, cx, cy must be initialized before calling the function. 4129 * @param distCoeffs Input/output vector of distortion coefficients 4130 * \(\distcoeffs\). 4131 * @param rvecs Output vector of rotation vectors (REF: Rodrigues ) estimated for each pattern view 4132 * (e.g. std::vector<cv::Mat>>). That is, each i-th rotation vector together with the corresponding 4133 * i-th translation vector (see the next output parameter description) brings the calibration pattern 4134 * from the object coordinate space (in which object points are specified) to the camera coordinate 4135 * space. In more technical terms, the tuple of the i-th rotation and translation vector performs 4136 * a change of basis from object coordinate space to camera coordinate space. Due to its duality, this 4137 * tuple is equivalent to the position of the calibration pattern with respect to the camera coordinate 4138 * space. 4139 * @param tvecs Output vector of translation vectors estimated for each pattern view, see parameter 4140 * describtion above. 4141 * @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic 4142 * parameters. Order of deviations values: 4143 * \((f_x, f_y, c_x, c_y, k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6 , s_1, s_2, s_3, 4144 * s_4, \tau_x, \tau_y)\) If one of parameters is not estimated, it's deviation is equals to zero. 4145 * @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic 4146 * parameters. Order of deviations values: \((R_0, T_0, \dotsc , R_{M - 1}, T_{M - 1})\) where M is 4147 * the number of pattern views. \(R_i, T_i\) are concatenated 1x3 vectors. 4148 * @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view. 4149 * <ul> 4150 * <li> 4151 * REF: CALIB_USE_INTRINSIC_GUESS cameraMatrix contains valid initial values of 4152 * fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image 4153 * center ( imageSize is used), and focal distances are computed in a least-squares fashion. 4154 * Note, that if intrinsic parameters are known, there is no need to use this function just to 4155 * estimate extrinsic parameters. Use REF: solvePnP instead. 4156 * </li> 4157 * <li> 4158 * REF: CALIB_FIX_PRINCIPAL_POINT The principal point is not changed during the global 4159 * optimization. It stays at the center or at a different location specified when 4160 * REF: CALIB_USE_INTRINSIC_GUESS is set too. 4161 * </li> 4162 * <li> 4163 * REF: CALIB_FIX_ASPECT_RATIO The functions consider only fy as a free parameter. The 4164 * ratio fx/fy stays the same as in the input cameraMatrix . When 4165 * REF: CALIB_USE_INTRINSIC_GUESS is not set, the actual input values of fx and fy are 4166 * ignored, only their ratio is computed and used further. 4167 * </li> 4168 * <li> 4169 * REF: CALIB_ZERO_TANGENT_DIST Tangential distortion coefficients \((p_1, p_2)\) are set 4170 * to zeros and stay zero. 4171 * </li> 4172 * <li> 4173 * REF: CALIB_FIX_FOCAL_LENGTH The focal length is not changed during the global optimization if 4174 * REF: CALIB_USE_INTRINSIC_GUESS is set. 4175 * </li> 4176 * <li> 4177 * REF: CALIB_FIX_K1,..., REF: CALIB_FIX_K6 The corresponding radial distortion 4178 * coefficient is not changed during the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is 4179 * set, the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0. 4180 * </li> 4181 * <li> 4182 * REF: CALIB_RATIONAL_MODEL Coefficients k4, k5, and k6 are enabled. To provide the 4183 * backward compatibility, this extra flag should be explicitly specified to make the 4184 * calibration function use the rational model and return 8 coefficients or more. 4185 * </li> 4186 * <li> 4187 * REF: CALIB_THIN_PRISM_MODEL Coefficients s1, s2, s3 and s4 are enabled. To provide the 4188 * backward compatibility, this extra flag should be explicitly specified to make the 4189 * calibration function use the thin prism model and return 12 coefficients or more. 4190 * </li> 4191 * <li> 4192 * REF: CALIB_FIX_S1_S2_S3_S4 The thin prism distortion coefficients are not changed during 4193 * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the 4194 * supplied distCoeffs matrix is used. Otherwise, it is set to 0. 4195 * </li> 4196 * <li> 4197 * REF: CALIB_TILTED_MODEL Coefficients tauX and tauY are enabled. To provide the 4198 * backward compatibility, this extra flag should be explicitly specified to make the 4199 * calibration function use the tilted sensor model and return 14 coefficients. 4200 * </li> 4201 * <li> 4202 * REF: CALIB_FIX_TAUX_TAUY The coefficients of the tilted sensor model are not changed during 4203 * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the 4204 * supplied distCoeffs matrix is used. Otherwise, it is set to 0. 4205 * </li> 4206 * </ul> 4207 * 4208 * @return the overall RMS re-projection error. 4209 * 4210 * The function estimates the intrinsic camera parameters and extrinsic parameters for each of the 4211 * views. The algorithm is based on CITE: Zhang2000 and CITE: BouguetMCT . The coordinates of 3D object 4212 * points and their corresponding 2D projections in each view must be specified. That may be achieved 4213 * by using an object with known geometry and easily detectable feature points. Such an object is 4214 * called a calibration rig or calibration pattern, and OpenCV has built-in support for a chessboard as 4215 * a calibration rig (see REF: findChessboardCorners). Currently, initialization of intrinsic 4216 * parameters (when REF: CALIB_USE_INTRINSIC_GUESS is not set) is only implemented for planar calibration 4217 * patterns (where Z-coordinates of the object points must be all zeros). 3D calibration rigs can also 4218 * be used as long as initial cameraMatrix is provided. 4219 * 4220 * The algorithm performs the following steps: 4221 * 4222 * <ul> 4223 * <li> 4224 * Compute the initial intrinsic parameters (the option only available for planar calibration 4225 * patterns) or read them from the input parameters. The distortion coefficients are all set to 4226 * zeros initially unless some of CALIB_FIX_K? are specified. 4227 * </li> 4228 * </ul> 4229 * 4230 * <ul> 4231 * <li> 4232 * Estimate the initial camera pose as if the intrinsic parameters have been already known. This is 4233 * done using REF: solvePnP . 4234 * </li> 4235 * </ul> 4236 * 4237 * <ul> 4238 * <li> 4239 * Run the global Levenberg-Marquardt optimization algorithm to minimize the reprojection error, 4240 * that is, the total sum of squared distances between the observed feature points imagePoints and 4241 * the projected (using the current estimates for camera parameters and the poses) object points 4242 * objectPoints. See REF: projectPoints for details. 4243 * </li> 4244 * </ul> 4245 * 4246 * <b>Note:</b> 4247 * If you use a non-square (i.e. non-N-by-N) grid and REF: findChessboardCorners for calibration, 4248 * and REF: calibrateCamera returns bad values (zero distortion coefficients, \(c_x\) and 4249 * \(c_y\) very far from the image center, and/or large differences between \(f_x\) and 4250 * \(f_y\) (ratios of 10:1 or more)), then you are probably using patternSize=cvSize(rows,cols) 4251 * instead of using patternSize=cvSize(cols,rows) in REF: findChessboardCorners. 4252 * 4253 * SEE: 4254 * calibrateCameraRO, findChessboardCorners, solvePnP, initCameraMatrix2D, stereoCalibrate, 4255 * undistort 4256 */ 4257 public static double calibrateCameraExtended(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, Mat stdDeviationsIntrinsics, Mat stdDeviationsExtrinsics, Mat perViewErrors) { 4258 Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints); 4259 Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints); 4260 Mat rvecs_mat = new Mat(); 4261 Mat tvecs_mat = new Mat(); 4262 double retVal = calibrateCameraExtended_2(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, stdDeviationsIntrinsics.nativeObj, stdDeviationsExtrinsics.nativeObj, perViewErrors.nativeObj); 4263 Converters.Mat_to_vector_Mat(rvecs_mat, rvecs); 4264 rvecs_mat.release(); 4265 Converters.Mat_to_vector_Mat(tvecs_mat, tvecs); 4266 tvecs_mat.release(); 4267 return retVal; 4268 } 4269 4270 4271 // 4272 // C++: double cv::calibrateCamera(vector_Mat objectPoints, vector_Mat imagePoints, Size imageSize, Mat& cameraMatrix, Mat& distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, int flags = 0, TermCriteria criteria = TermCriteria( TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON)) 4273 // 4274 4275 public static double calibrateCamera(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, int flags, TermCriteria criteria) { 4276 Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints); 4277 Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints); 4278 Mat rvecs_mat = new Mat(); 4279 Mat tvecs_mat = new Mat(); 4280 double retVal = calibrateCamera_0(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, flags, criteria.type, criteria.maxCount, criteria.epsilon); 4281 Converters.Mat_to_vector_Mat(rvecs_mat, rvecs); 4282 rvecs_mat.release(); 4283 Converters.Mat_to_vector_Mat(tvecs_mat, tvecs); 4284 tvecs_mat.release(); 4285 return retVal; 4286 } 4287 4288 public static double calibrateCamera(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, int flags) { 4289 Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints); 4290 Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints); 4291 Mat rvecs_mat = new Mat(); 4292 Mat tvecs_mat = new Mat(); 4293 double retVal = calibrateCamera_1(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, flags); 4294 Converters.Mat_to_vector_Mat(rvecs_mat, rvecs); 4295 rvecs_mat.release(); 4296 Converters.Mat_to_vector_Mat(tvecs_mat, tvecs); 4297 tvecs_mat.release(); 4298 return retVal; 4299 } 4300 4301 public static double calibrateCamera(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs) { 4302 Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints); 4303 Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints); 4304 Mat rvecs_mat = new Mat(); 4305 Mat tvecs_mat = new Mat(); 4306 double retVal = calibrateCamera_2(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj); 4307 Converters.Mat_to_vector_Mat(rvecs_mat, rvecs); 4308 rvecs_mat.release(); 4309 Converters.Mat_to_vector_Mat(tvecs_mat, tvecs); 4310 tvecs_mat.release(); 4311 return retVal; 4312 } 4313 4314 4315 // 4316 // C++: double cv::calibrateCameraRO(vector_Mat objectPoints, vector_Mat imagePoints, Size imageSize, int iFixedPoint, Mat& cameraMatrix, Mat& distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, Mat& newObjPoints, Mat& stdDeviationsIntrinsics, Mat& stdDeviationsExtrinsics, Mat& stdDeviationsObjPoints, Mat& perViewErrors, int flags = 0, TermCriteria criteria = TermCriteria( TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON)) 4317 // 4318 4319 /** 4320 * Finds the camera intrinsic and extrinsic parameters from several views of a calibration pattern. 4321 * 4322 * This function is an extension of #calibrateCamera with the method of releasing object which was 4323 * proposed in CITE: strobl2011iccv. In many common cases with inaccurate, unmeasured, roughly planar 4324 * targets (calibration plates), this method can dramatically improve the precision of the estimated 4325 * camera parameters. Both the object-releasing method and standard method are supported by this 4326 * function. Use the parameter <b>iFixedPoint</b> for method selection. In the internal implementation, 4327 * #calibrateCamera is a wrapper for this function. 4328 * 4329 * @param objectPoints Vector of vectors of calibration pattern points in the calibration pattern 4330 * coordinate space. See #calibrateCamera for details. If the method of releasing object to be used, 4331 * the identical calibration board must be used in each view and it must be fully visible, and all 4332 * objectPoints[i] must be the same and all points should be roughly close to a plane. <b>The calibration 4333 * target has to be rigid, or at least static if the camera (rather than the calibration target) is 4334 * shifted for grabbing images.</b> 4335 * @param imagePoints Vector of vectors of the projections of calibration pattern points. See 4336 * #calibrateCamera for details. 4337 * @param imageSize Size of the image used only to initialize the intrinsic camera matrix. 4338 * @param iFixedPoint The index of the 3D object point in objectPoints[0] to be fixed. It also acts as 4339 * a switch for calibration method selection. If object-releasing method to be used, pass in the 4340 * parameter in the range of [1, objectPoints[0].size()-2], otherwise a value out of this range will 4341 * make standard calibration method selected. Usually the top-right corner point of the calibration 4342 * board grid is recommended to be fixed when object-releasing method being utilized. According to 4343 * \cite strobl2011iccv, two other points are also fixed. In this implementation, objectPoints[0].front 4344 * and objectPoints[0].back.z are used. With object-releasing method, accurate rvecs, tvecs and 4345 * newObjPoints are only possible if coordinates of these three fixed points are accurate enough. 4346 * @param cameraMatrix Output 3x3 floating-point camera matrix. See #calibrateCamera for details. 4347 * @param distCoeffs Output vector of distortion coefficients. See #calibrateCamera for details. 4348 * @param rvecs Output vector of rotation vectors estimated for each pattern view. See #calibrateCamera 4349 * for details. 4350 * @param tvecs Output vector of translation vectors estimated for each pattern view. 4351 * @param newObjPoints The updated output vector of calibration pattern points. The coordinates might 4352 * be scaled based on three fixed points. The returned coordinates are accurate only if the above 4353 * mentioned three fixed points are accurate. If not needed, noArray() can be passed in. This parameter 4354 * is ignored with standard calibration method. 4355 * @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic parameters. 4356 * See #calibrateCamera for details. 4357 * @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic parameters. 4358 * See #calibrateCamera for details. 4359 * @param stdDeviationsObjPoints Output vector of standard deviations estimated for refined coordinates 4360 * of calibration pattern points. It has the same size and order as objectPoints[0] vector. This 4361 * parameter is ignored with standard calibration method. 4362 * @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view. 4363 * @param flags Different flags that may be zero or a combination of some predefined values. See 4364 * #calibrateCamera for details. If the method of releasing object is used, the calibration time may 4365 * be much longer. CALIB_USE_QR or CALIB_USE_LU could be used for faster calibration with potentially 4366 * less precise and less stable in some rare cases. 4367 * @param criteria Termination criteria for the iterative optimization algorithm. 4368 * 4369 * @return the overall RMS re-projection error. 4370 * 4371 * The function estimates the intrinsic camera parameters and extrinsic parameters for each of the 4372 * views. The algorithm is based on CITE: Zhang2000, CITE: BouguetMCT and CITE: strobl2011iccv. See 4373 * #calibrateCamera for other detailed explanations. 4374 * SEE: 4375 * calibrateCamera, findChessboardCorners, solvePnP, initCameraMatrix2D, stereoCalibrate, undistort 4376 */ 4377 public static double calibrateCameraROExtended(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, int iFixedPoint, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, Mat newObjPoints, Mat stdDeviationsIntrinsics, Mat stdDeviationsExtrinsics, Mat stdDeviationsObjPoints, Mat perViewErrors, int flags, TermCriteria criteria) { 4378 Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints); 4379 Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints); 4380 Mat rvecs_mat = new Mat(); 4381 Mat tvecs_mat = new Mat(); 4382 double retVal = calibrateCameraROExtended_0(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, iFixedPoint, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, newObjPoints.nativeObj, stdDeviationsIntrinsics.nativeObj, stdDeviationsExtrinsics.nativeObj, stdDeviationsObjPoints.nativeObj, perViewErrors.nativeObj, flags, criteria.type, criteria.maxCount, criteria.epsilon); 4383 Converters.Mat_to_vector_Mat(rvecs_mat, rvecs); 4384 rvecs_mat.release(); 4385 Converters.Mat_to_vector_Mat(tvecs_mat, tvecs); 4386 tvecs_mat.release(); 4387 return retVal; 4388 } 4389 4390 /** 4391 * Finds the camera intrinsic and extrinsic parameters from several views of a calibration pattern. 4392 * 4393 * This function is an extension of #calibrateCamera with the method of releasing object which was 4394 * proposed in CITE: strobl2011iccv. In many common cases with inaccurate, unmeasured, roughly planar 4395 * targets (calibration plates), this method can dramatically improve the precision of the estimated 4396 * camera parameters. Both the object-releasing method and standard method are supported by this 4397 * function. Use the parameter <b>iFixedPoint</b> for method selection. In the internal implementation, 4398 * #calibrateCamera is a wrapper for this function. 4399 * 4400 * @param objectPoints Vector of vectors of calibration pattern points in the calibration pattern 4401 * coordinate space. See #calibrateCamera for details. If the method of releasing object to be used, 4402 * the identical calibration board must be used in each view and it must be fully visible, and all 4403 * objectPoints[i] must be the same and all points should be roughly close to a plane. <b>The calibration 4404 * target has to be rigid, or at least static if the camera (rather than the calibration target) is 4405 * shifted for grabbing images.</b> 4406 * @param imagePoints Vector of vectors of the projections of calibration pattern points. See 4407 * #calibrateCamera for details. 4408 * @param imageSize Size of the image used only to initialize the intrinsic camera matrix. 4409 * @param iFixedPoint The index of the 3D object point in objectPoints[0] to be fixed. It also acts as 4410 * a switch for calibration method selection. If object-releasing method to be used, pass in the 4411 * parameter in the range of [1, objectPoints[0].size()-2], otherwise a value out of this range will 4412 * make standard calibration method selected. Usually the top-right corner point of the calibration 4413 * board grid is recommended to be fixed when object-releasing method being utilized. According to 4414 * \cite strobl2011iccv, two other points are also fixed. In this implementation, objectPoints[0].front 4415 * and objectPoints[0].back.z are used. With object-releasing method, accurate rvecs, tvecs and 4416 * newObjPoints are only possible if coordinates of these three fixed points are accurate enough. 4417 * @param cameraMatrix Output 3x3 floating-point camera matrix. See #calibrateCamera for details. 4418 * @param distCoeffs Output vector of distortion coefficients. See #calibrateCamera for details. 4419 * @param rvecs Output vector of rotation vectors estimated for each pattern view. See #calibrateCamera 4420 * for details. 4421 * @param tvecs Output vector of translation vectors estimated for each pattern view. 4422 * @param newObjPoints The updated output vector of calibration pattern points. The coordinates might 4423 * be scaled based on three fixed points. The returned coordinates are accurate only if the above 4424 * mentioned three fixed points are accurate. If not needed, noArray() can be passed in. This parameter 4425 * is ignored with standard calibration method. 4426 * @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic parameters. 4427 * See #calibrateCamera for details. 4428 * @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic parameters. 4429 * See #calibrateCamera for details. 4430 * @param stdDeviationsObjPoints Output vector of standard deviations estimated for refined coordinates 4431 * of calibration pattern points. It has the same size and order as objectPoints[0] vector. This 4432 * parameter is ignored with standard calibration method. 4433 * @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view. 4434 * @param flags Different flags that may be zero or a combination of some predefined values. See 4435 * #calibrateCamera for details. If the method of releasing object is used, the calibration time may 4436 * be much longer. CALIB_USE_QR or CALIB_USE_LU could be used for faster calibration with potentially 4437 * less precise and less stable in some rare cases. 4438 * 4439 * @return the overall RMS re-projection error. 4440 * 4441 * The function estimates the intrinsic camera parameters and extrinsic parameters for each of the 4442 * views. The algorithm is based on CITE: Zhang2000, CITE: BouguetMCT and CITE: strobl2011iccv. See 4443 * #calibrateCamera for other detailed explanations. 4444 * SEE: 4445 * calibrateCamera, findChessboardCorners, solvePnP, initCameraMatrix2D, stereoCalibrate, undistort 4446 */ 4447 public static double calibrateCameraROExtended(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, int iFixedPoint, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, Mat newObjPoints, Mat stdDeviationsIntrinsics, Mat stdDeviationsExtrinsics, Mat stdDeviationsObjPoints, Mat perViewErrors, int flags) { 4448 Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints); 4449 Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints); 4450 Mat rvecs_mat = new Mat(); 4451 Mat tvecs_mat = new Mat(); 4452 double retVal = calibrateCameraROExtended_1(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, iFixedPoint, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, newObjPoints.nativeObj, stdDeviationsIntrinsics.nativeObj, stdDeviationsExtrinsics.nativeObj, stdDeviationsObjPoints.nativeObj, perViewErrors.nativeObj, flags); 4453 Converters.Mat_to_vector_Mat(rvecs_mat, rvecs); 4454 rvecs_mat.release(); 4455 Converters.Mat_to_vector_Mat(tvecs_mat, tvecs); 4456 tvecs_mat.release(); 4457 return retVal; 4458 } 4459 4460 /** 4461 * Finds the camera intrinsic and extrinsic parameters from several views of a calibration pattern. 4462 * 4463 * This function is an extension of #calibrateCamera with the method of releasing object which was 4464 * proposed in CITE: strobl2011iccv. In many common cases with inaccurate, unmeasured, roughly planar 4465 * targets (calibration plates), this method can dramatically improve the precision of the estimated 4466 * camera parameters. Both the object-releasing method and standard method are supported by this 4467 * function. Use the parameter <b>iFixedPoint</b> for method selection. In the internal implementation, 4468 * #calibrateCamera is a wrapper for this function. 4469 * 4470 * @param objectPoints Vector of vectors of calibration pattern points in the calibration pattern 4471 * coordinate space. See #calibrateCamera for details. If the method of releasing object to be used, 4472 * the identical calibration board must be used in each view and it must be fully visible, and all 4473 * objectPoints[i] must be the same and all points should be roughly close to a plane. <b>The calibration 4474 * target has to be rigid, or at least static if the camera (rather than the calibration target) is 4475 * shifted for grabbing images.</b> 4476 * @param imagePoints Vector of vectors of the projections of calibration pattern points. See 4477 * #calibrateCamera for details. 4478 * @param imageSize Size of the image used only to initialize the intrinsic camera matrix. 4479 * @param iFixedPoint The index of the 3D object point in objectPoints[0] to be fixed. It also acts as 4480 * a switch for calibration method selection. If object-releasing method to be used, pass in the 4481 * parameter in the range of [1, objectPoints[0].size()-2], otherwise a value out of this range will 4482 * make standard calibration method selected. Usually the top-right corner point of the calibration 4483 * board grid is recommended to be fixed when object-releasing method being utilized. According to 4484 * \cite strobl2011iccv, two other points are also fixed. In this implementation, objectPoints[0].front 4485 * and objectPoints[0].back.z are used. With object-releasing method, accurate rvecs, tvecs and 4486 * newObjPoints are only possible if coordinates of these three fixed points are accurate enough. 4487 * @param cameraMatrix Output 3x3 floating-point camera matrix. See #calibrateCamera for details. 4488 * @param distCoeffs Output vector of distortion coefficients. See #calibrateCamera for details. 4489 * @param rvecs Output vector of rotation vectors estimated for each pattern view. See #calibrateCamera 4490 * for details. 4491 * @param tvecs Output vector of translation vectors estimated for each pattern view. 4492 * @param newObjPoints The updated output vector of calibration pattern points. The coordinates might 4493 * be scaled based on three fixed points. The returned coordinates are accurate only if the above 4494 * mentioned three fixed points are accurate. If not needed, noArray() can be passed in. This parameter 4495 * is ignored with standard calibration method. 4496 * @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic parameters. 4497 * See #calibrateCamera for details. 4498 * @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic parameters. 4499 * See #calibrateCamera for details. 4500 * @param stdDeviationsObjPoints Output vector of standard deviations estimated for refined coordinates 4501 * of calibration pattern points. It has the same size and order as objectPoints[0] vector. This 4502 * parameter is ignored with standard calibration method. 4503 * @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view. 4504 * #calibrateCamera for details. If the method of releasing object is used, the calibration time may 4505 * be much longer. CALIB_USE_QR or CALIB_USE_LU could be used for faster calibration with potentially 4506 * less precise and less stable in some rare cases. 4507 * 4508 * @return the overall RMS re-projection error. 4509 * 4510 * The function estimates the intrinsic camera parameters and extrinsic parameters for each of the 4511 * views. The algorithm is based on CITE: Zhang2000, CITE: BouguetMCT and CITE: strobl2011iccv. See 4512 * #calibrateCamera for other detailed explanations. 4513 * SEE: 4514 * calibrateCamera, findChessboardCorners, solvePnP, initCameraMatrix2D, stereoCalibrate, undistort 4515 */ 4516 public static double calibrateCameraROExtended(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, int iFixedPoint, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, Mat newObjPoints, Mat stdDeviationsIntrinsics, Mat stdDeviationsExtrinsics, Mat stdDeviationsObjPoints, Mat perViewErrors) { 4517 Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints); 4518 Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints); 4519 Mat rvecs_mat = new Mat(); 4520 Mat tvecs_mat = new Mat(); 4521 double retVal = calibrateCameraROExtended_2(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, iFixedPoint, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, newObjPoints.nativeObj, stdDeviationsIntrinsics.nativeObj, stdDeviationsExtrinsics.nativeObj, stdDeviationsObjPoints.nativeObj, perViewErrors.nativeObj); 4522 Converters.Mat_to_vector_Mat(rvecs_mat, rvecs); 4523 rvecs_mat.release(); 4524 Converters.Mat_to_vector_Mat(tvecs_mat, tvecs); 4525 tvecs_mat.release(); 4526 return retVal; 4527 } 4528 4529 4530 // 4531 // C++: double cv::calibrateCameraRO(vector_Mat objectPoints, vector_Mat imagePoints, Size imageSize, int iFixedPoint, Mat& cameraMatrix, Mat& distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, Mat& newObjPoints, int flags = 0, TermCriteria criteria = TermCriteria( TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON)) 4532 // 4533 4534 public static double calibrateCameraRO(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, int iFixedPoint, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, Mat newObjPoints, int flags, TermCriteria criteria) { 4535 Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints); 4536 Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints); 4537 Mat rvecs_mat = new Mat(); 4538 Mat tvecs_mat = new Mat(); 4539 double retVal = calibrateCameraRO_0(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, iFixedPoint, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, newObjPoints.nativeObj, flags, criteria.type, criteria.maxCount, criteria.epsilon); 4540 Converters.Mat_to_vector_Mat(rvecs_mat, rvecs); 4541 rvecs_mat.release(); 4542 Converters.Mat_to_vector_Mat(tvecs_mat, tvecs); 4543 tvecs_mat.release(); 4544 return retVal; 4545 } 4546 4547 public static double calibrateCameraRO(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, int iFixedPoint, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, Mat newObjPoints, int flags) { 4548 Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints); 4549 Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints); 4550 Mat rvecs_mat = new Mat(); 4551 Mat tvecs_mat = new Mat(); 4552 double retVal = calibrateCameraRO_1(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, iFixedPoint, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, newObjPoints.nativeObj, flags); 4553 Converters.Mat_to_vector_Mat(rvecs_mat, rvecs); 4554 rvecs_mat.release(); 4555 Converters.Mat_to_vector_Mat(tvecs_mat, tvecs); 4556 tvecs_mat.release(); 4557 return retVal; 4558 } 4559 4560 public static double calibrateCameraRO(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, int iFixedPoint, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, Mat newObjPoints) { 4561 Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints); 4562 Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints); 4563 Mat rvecs_mat = new Mat(); 4564 Mat tvecs_mat = new Mat(); 4565 double retVal = calibrateCameraRO_2(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, iFixedPoint, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, newObjPoints.nativeObj); 4566 Converters.Mat_to_vector_Mat(rvecs_mat, rvecs); 4567 rvecs_mat.release(); 4568 Converters.Mat_to_vector_Mat(tvecs_mat, tvecs); 4569 tvecs_mat.release(); 4570 return retVal; 4571 } 4572 4573 4574 // 4575 // C++: void cv::calibrationMatrixValues(Mat cameraMatrix, Size imageSize, double apertureWidth, double apertureHeight, double& fovx, double& fovy, double& focalLength, Point2d& principalPoint, double& aspectRatio) 4576 // 4577 4578 /** 4579 * Computes useful camera characteristics from the camera intrinsic matrix. 4580 * 4581 * @param cameraMatrix Input camera intrinsic matrix that can be estimated by #calibrateCamera or 4582 * #stereoCalibrate . 4583 * @param imageSize Input image size in pixels. 4584 * @param apertureWidth Physical width in mm of the sensor. 4585 * @param apertureHeight Physical height in mm of the sensor. 4586 * @param fovx Output field of view in degrees along the horizontal sensor axis. 4587 * @param fovy Output field of view in degrees along the vertical sensor axis. 4588 * @param focalLength Focal length of the lens in mm. 4589 * @param principalPoint Principal point in mm. 4590 * @param aspectRatio \(f_y/f_x\) 4591 * 4592 * The function computes various useful camera characteristics from the previously estimated camera 4593 * matrix. 4594 * 4595 * <b>Note:</b> 4596 * Do keep in mind that the unity measure 'mm' stands for whatever unit of measure one chooses for 4597 * the chessboard pitch (it can thus be any value). 4598 */ 4599 public static void calibrationMatrixValues(Mat cameraMatrix, Size imageSize, double apertureWidth, double apertureHeight, double[] fovx, double[] fovy, double[] focalLength, Point principalPoint, double[] aspectRatio) { 4600 double[] fovx_out = new double[1]; 4601 double[] fovy_out = new double[1]; 4602 double[] focalLength_out = new double[1]; 4603 double[] principalPoint_out = new double[2]; 4604 double[] aspectRatio_out = new double[1]; 4605 calibrationMatrixValues_0(cameraMatrix.nativeObj, imageSize.width, imageSize.height, apertureWidth, apertureHeight, fovx_out, fovy_out, focalLength_out, principalPoint_out, aspectRatio_out); 4606 if(fovx!=null) fovx[0] = (double)fovx_out[0]; 4607 if(fovy!=null) fovy[0] = (double)fovy_out[0]; 4608 if(focalLength!=null) focalLength[0] = (double)focalLength_out[0]; 4609 if(principalPoint!=null){ principalPoint.x = principalPoint_out[0]; principalPoint.y = principalPoint_out[1]; } 4610 if(aspectRatio!=null) aspectRatio[0] = (double)aspectRatio_out[0]; 4611 } 4612 4613 4614 // 4615 // C++: double cv::stereoCalibrate(vector_Mat objectPoints, vector_Mat imagePoints1, vector_Mat imagePoints2, Mat& cameraMatrix1, Mat& distCoeffs1, Mat& cameraMatrix2, Mat& distCoeffs2, Size imageSize, Mat& R, Mat& T, Mat& E, Mat& F, Mat& perViewErrors, int flags = CALIB_FIX_INTRINSIC, TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6)) 4616 // 4617 4618 /** 4619 * Calibrates a stereo camera set up. This function finds the intrinsic parameters 4620 * for each of the two cameras and the extrinsic parameters between the two cameras. 4621 * 4622 * @param objectPoints Vector of vectors of the calibration pattern points. The same structure as 4623 * in REF: calibrateCamera. For each pattern view, both cameras need to see the same object 4624 * points. Therefore, objectPoints.size(), imagePoints1.size(), and imagePoints2.size() need to be 4625 * equal as well as objectPoints[i].size(), imagePoints1[i].size(), and imagePoints2[i].size() need to 4626 * be equal for each i. 4627 * @param imagePoints1 Vector of vectors of the projections of the calibration pattern points, 4628 * observed by the first camera. The same structure as in REF: calibrateCamera. 4629 * @param imagePoints2 Vector of vectors of the projections of the calibration pattern points, 4630 * observed by the second camera. The same structure as in REF: calibrateCamera. 4631 * @param cameraMatrix1 Input/output camera intrinsic matrix for the first camera, the same as in 4632 * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below. 4633 * @param distCoeffs1 Input/output vector of distortion coefficients, the same as in 4634 * REF: calibrateCamera. 4635 * @param cameraMatrix2 Input/output second camera intrinsic matrix for the second camera. See description for 4636 * cameraMatrix1. 4637 * @param distCoeffs2 Input/output lens distortion coefficients for the second camera. See 4638 * description for distCoeffs1. 4639 * @param imageSize Size of the image used only to initialize the camera intrinsic matrices. 4640 * @param R Output rotation matrix. Together with the translation vector T, this matrix brings 4641 * points given in the first camera's coordinate system to points in the second camera's 4642 * coordinate system. In more technical terms, the tuple of R and T performs a change of basis 4643 * from the first camera's coordinate system to the second camera's coordinate system. Due to its 4644 * duality, this tuple is equivalent to the position of the first camera with respect to the 4645 * second camera coordinate system. 4646 * @param T Output translation vector, see description above. 4647 * @param E Output essential matrix. 4648 * @param F Output fundamental matrix. 4649 * @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view. 4650 * @param flags Different flags that may be zero or a combination of the following values: 4651 * <ul> 4652 * <li> 4653 * REF: CALIB_FIX_INTRINSIC Fix cameraMatrix? and distCoeffs? so that only R, T, E, and F 4654 * matrices are estimated. 4655 * </li> 4656 * <li> 4657 * REF: CALIB_USE_INTRINSIC_GUESS Optimize some or all of the intrinsic parameters 4658 * according to the specified flags. Initial values are provided by the user. 4659 * </li> 4660 * <li> 4661 * REF: CALIB_USE_EXTRINSIC_GUESS R and T contain valid initial values that are optimized further. 4662 * Otherwise R and T are initialized to the median value of the pattern views (each dimension separately). 4663 * </li> 4664 * <li> 4665 * REF: CALIB_FIX_PRINCIPAL_POINT Fix the principal points during the optimization. 4666 * </li> 4667 * <li> 4668 * REF: CALIB_FIX_FOCAL_LENGTH Fix \(f^{(j)}_x\) and \(f^{(j)}_y\) . 4669 * </li> 4670 * <li> 4671 * REF: CALIB_FIX_ASPECT_RATIO Optimize \(f^{(j)}_y\) . Fix the ratio \(f^{(j)}_x/f^{(j)}_y\) 4672 * . 4673 * </li> 4674 * <li> 4675 * REF: CALIB_SAME_FOCAL_LENGTH Enforce \(f^{(0)}_x=f^{(1)}_x\) and \(f^{(0)}_y=f^{(1)}_y\) . 4676 * </li> 4677 * <li> 4678 * REF: CALIB_ZERO_TANGENT_DIST Set tangential distortion coefficients for each camera to 4679 * zeros and fix there. 4680 * </li> 4681 * <li> 4682 * REF: CALIB_FIX_K1,..., REF: CALIB_FIX_K6 Do not change the corresponding radial 4683 * distortion coefficient during the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, 4684 * the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0. 4685 * </li> 4686 * <li> 4687 * REF: CALIB_RATIONAL_MODEL Enable coefficients k4, k5, and k6. To provide the backward 4688 * compatibility, this extra flag should be explicitly specified to make the calibration 4689 * function use the rational model and return 8 coefficients. If the flag is not set, the 4690 * function computes and returns only 5 distortion coefficients. 4691 * </li> 4692 * <li> 4693 * REF: CALIB_THIN_PRISM_MODEL Coefficients s1, s2, s3 and s4 are enabled. To provide the 4694 * backward compatibility, this extra flag should be explicitly specified to make the 4695 * calibration function use the thin prism model and return 12 coefficients. If the flag is not 4696 * set, the function computes and returns only 5 distortion coefficients. 4697 * </li> 4698 * <li> 4699 * REF: CALIB_FIX_S1_S2_S3_S4 The thin prism distortion coefficients are not changed during 4700 * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the 4701 * supplied distCoeffs matrix is used. Otherwise, it is set to 0. 4702 * </li> 4703 * <li> 4704 * REF: CALIB_TILTED_MODEL Coefficients tauX and tauY are enabled. To provide the 4705 * backward compatibility, this extra flag should be explicitly specified to make the 4706 * calibration function use the tilted sensor model and return 14 coefficients. If the flag is not 4707 * set, the function computes and returns only 5 distortion coefficients. 4708 * </li> 4709 * <li> 4710 * REF: CALIB_FIX_TAUX_TAUY The coefficients of the tilted sensor model are not changed during 4711 * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the 4712 * supplied distCoeffs matrix is used. Otherwise, it is set to 0. 4713 * </li> 4714 * </ul> 4715 * @param criteria Termination criteria for the iterative optimization algorithm. 4716 * 4717 * The function estimates the transformation between two cameras making a stereo pair. If one computes 4718 * the poses of an object relative to the first camera and to the second camera, 4719 * ( \(R_1\),\(T_1\) ) and (\(R_2\),\(T_2\)), respectively, for a stereo camera where the 4720 * relative position and orientation between the two cameras are fixed, then those poses definitely 4721 * relate to each other. This means, if the relative position and orientation (\(R\),\(T\)) of the 4722 * two cameras is known, it is possible to compute (\(R_2\),\(T_2\)) when (\(R_1\),\(T_1\)) is 4723 * given. This is what the described function does. It computes (\(R\),\(T\)) such that: 4724 * 4725 * \(R_2=R R_1\) 4726 * \(T_2=R T_1 + T.\) 4727 * 4728 * Therefore, one can compute the coordinate representation of a 3D point for the second camera's 4729 * coordinate system when given the point's coordinate representation in the first camera's coordinate 4730 * system: 4731 * 4732 * \(\begin{bmatrix} 4733 * X_2 \\ 4734 * Y_2 \\ 4735 * Z_2 \\ 4736 * 1 4737 * \end{bmatrix} = \begin{bmatrix} 4738 * R & T \\ 4739 * 0 & 1 4740 * \end{bmatrix} \begin{bmatrix} 4741 * X_1 \\ 4742 * Y_1 \\ 4743 * Z_1 \\ 4744 * 1 4745 * \end{bmatrix}.\) 4746 * 4747 * 4748 * Optionally, it computes the essential matrix E: 4749 * 4750 * \(E= \vecthreethree{0}{-T_2}{T_1}{T_2}{0}{-T_0}{-T_1}{T_0}{0} R\) 4751 * 4752 * where \(T_i\) are components of the translation vector \(T\) : \(T=[T_0, T_1, T_2]^T\) . 4753 * And the function can also compute the fundamental matrix F: 4754 * 4755 * \(F = cameraMatrix2^{-T}\cdot E \cdot cameraMatrix1^{-1}\) 4756 * 4757 * Besides the stereo-related information, the function can also perform a full calibration of each of 4758 * the two cameras. However, due to the high dimensionality of the parameter space and noise in the 4759 * input data, the function can diverge from the correct solution. If the intrinsic parameters can be 4760 * estimated with high accuracy for each of the cameras individually (for example, using 4761 * #calibrateCamera ), you are recommended to do so and then pass REF: CALIB_FIX_INTRINSIC flag to the 4762 * function along with the computed intrinsic parameters. Otherwise, if all the parameters are 4763 * estimated at once, it makes sense to restrict some parameters, for example, pass 4764 * REF: CALIB_SAME_FOCAL_LENGTH and REF: CALIB_ZERO_TANGENT_DIST flags, which is usually a 4765 * reasonable assumption. 4766 * 4767 * Similarly to #calibrateCamera, the function minimizes the total re-projection error for all the 4768 * points in all the available views from both cameras. The function returns the final value of the 4769 * re-projection error. 4770 * @return automatically generated 4771 */ 4772 public static double stereoCalibrateExtended(List<Mat> objectPoints, List<Mat> imagePoints1, List<Mat> imagePoints2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat E, Mat F, Mat perViewErrors, int flags, TermCriteria criteria) { 4773 Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints); 4774 Mat imagePoints1_mat = Converters.vector_Mat_to_Mat(imagePoints1); 4775 Mat imagePoints2_mat = Converters.vector_Mat_to_Mat(imagePoints2); 4776 return stereoCalibrateExtended_0(objectPoints_mat.nativeObj, imagePoints1_mat.nativeObj, imagePoints2_mat.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, E.nativeObj, F.nativeObj, perViewErrors.nativeObj, flags, criteria.type, criteria.maxCount, criteria.epsilon); 4777 } 4778 4779 /** 4780 * Calibrates a stereo camera set up. This function finds the intrinsic parameters 4781 * for each of the two cameras and the extrinsic parameters between the two cameras. 4782 * 4783 * @param objectPoints Vector of vectors of the calibration pattern points. The same structure as 4784 * in REF: calibrateCamera. For each pattern view, both cameras need to see the same object 4785 * points. Therefore, objectPoints.size(), imagePoints1.size(), and imagePoints2.size() need to be 4786 * equal as well as objectPoints[i].size(), imagePoints1[i].size(), and imagePoints2[i].size() need to 4787 * be equal for each i. 4788 * @param imagePoints1 Vector of vectors of the projections of the calibration pattern points, 4789 * observed by the first camera. The same structure as in REF: calibrateCamera. 4790 * @param imagePoints2 Vector of vectors of the projections of the calibration pattern points, 4791 * observed by the second camera. The same structure as in REF: calibrateCamera. 4792 * @param cameraMatrix1 Input/output camera intrinsic matrix for the first camera, the same as in 4793 * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below. 4794 * @param distCoeffs1 Input/output vector of distortion coefficients, the same as in 4795 * REF: calibrateCamera. 4796 * @param cameraMatrix2 Input/output second camera intrinsic matrix for the second camera. See description for 4797 * cameraMatrix1. 4798 * @param distCoeffs2 Input/output lens distortion coefficients for the second camera. See 4799 * description for distCoeffs1. 4800 * @param imageSize Size of the image used only to initialize the camera intrinsic matrices. 4801 * @param R Output rotation matrix. Together with the translation vector T, this matrix brings 4802 * points given in the first camera's coordinate system to points in the second camera's 4803 * coordinate system. In more technical terms, the tuple of R and T performs a change of basis 4804 * from the first camera's coordinate system to the second camera's coordinate system. Due to its 4805 * duality, this tuple is equivalent to the position of the first camera with respect to the 4806 * second camera coordinate system. 4807 * @param T Output translation vector, see description above. 4808 * @param E Output essential matrix. 4809 * @param F Output fundamental matrix. 4810 * @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view. 4811 * @param flags Different flags that may be zero or a combination of the following values: 4812 * <ul> 4813 * <li> 4814 * REF: CALIB_FIX_INTRINSIC Fix cameraMatrix? and distCoeffs? so that only R, T, E, and F 4815 * matrices are estimated. 4816 * </li> 4817 * <li> 4818 * REF: CALIB_USE_INTRINSIC_GUESS Optimize some or all of the intrinsic parameters 4819 * according to the specified flags. Initial values are provided by the user. 4820 * </li> 4821 * <li> 4822 * REF: CALIB_USE_EXTRINSIC_GUESS R and T contain valid initial values that are optimized further. 4823 * Otherwise R and T are initialized to the median value of the pattern views (each dimension separately). 4824 * </li> 4825 * <li> 4826 * REF: CALIB_FIX_PRINCIPAL_POINT Fix the principal points during the optimization. 4827 * </li> 4828 * <li> 4829 * REF: CALIB_FIX_FOCAL_LENGTH Fix \(f^{(j)}_x\) and \(f^{(j)}_y\) . 4830 * </li> 4831 * <li> 4832 * REF: CALIB_FIX_ASPECT_RATIO Optimize \(f^{(j)}_y\) . Fix the ratio \(f^{(j)}_x/f^{(j)}_y\) 4833 * . 4834 * </li> 4835 * <li> 4836 * REF: CALIB_SAME_FOCAL_LENGTH Enforce \(f^{(0)}_x=f^{(1)}_x\) and \(f^{(0)}_y=f^{(1)}_y\) . 4837 * </li> 4838 * <li> 4839 * REF: CALIB_ZERO_TANGENT_DIST Set tangential distortion coefficients for each camera to 4840 * zeros and fix there. 4841 * </li> 4842 * <li> 4843 * REF: CALIB_FIX_K1,..., REF: CALIB_FIX_K6 Do not change the corresponding radial 4844 * distortion coefficient during the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, 4845 * the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0. 4846 * </li> 4847 * <li> 4848 * REF: CALIB_RATIONAL_MODEL Enable coefficients k4, k5, and k6. To provide the backward 4849 * compatibility, this extra flag should be explicitly specified to make the calibration 4850 * function use the rational model and return 8 coefficients. If the flag is not set, the 4851 * function computes and returns only 5 distortion coefficients. 4852 * </li> 4853 * <li> 4854 * REF: CALIB_THIN_PRISM_MODEL Coefficients s1, s2, s3 and s4 are enabled. To provide the 4855 * backward compatibility, this extra flag should be explicitly specified to make the 4856 * calibration function use the thin prism model and return 12 coefficients. If the flag is not 4857 * set, the function computes and returns only 5 distortion coefficients. 4858 * </li> 4859 * <li> 4860 * REF: CALIB_FIX_S1_S2_S3_S4 The thin prism distortion coefficients are not changed during 4861 * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the 4862 * supplied distCoeffs matrix is used. Otherwise, it is set to 0. 4863 * </li> 4864 * <li> 4865 * REF: CALIB_TILTED_MODEL Coefficients tauX and tauY are enabled. To provide the 4866 * backward compatibility, this extra flag should be explicitly specified to make the 4867 * calibration function use the tilted sensor model and return 14 coefficients. If the flag is not 4868 * set, the function computes and returns only 5 distortion coefficients. 4869 * </li> 4870 * <li> 4871 * REF: CALIB_FIX_TAUX_TAUY The coefficients of the tilted sensor model are not changed during 4872 * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the 4873 * supplied distCoeffs matrix is used. Otherwise, it is set to 0. 4874 * </li> 4875 * </ul> 4876 * 4877 * The function estimates the transformation between two cameras making a stereo pair. If one computes 4878 * the poses of an object relative to the first camera and to the second camera, 4879 * ( \(R_1\),\(T_1\) ) and (\(R_2\),\(T_2\)), respectively, for a stereo camera where the 4880 * relative position and orientation between the two cameras are fixed, then those poses definitely 4881 * relate to each other. This means, if the relative position and orientation (\(R\),\(T\)) of the 4882 * two cameras is known, it is possible to compute (\(R_2\),\(T_2\)) when (\(R_1\),\(T_1\)) is 4883 * given. This is what the described function does. It computes (\(R\),\(T\)) such that: 4884 * 4885 * \(R_2=R R_1\) 4886 * \(T_2=R T_1 + T.\) 4887 * 4888 * Therefore, one can compute the coordinate representation of a 3D point for the second camera's 4889 * coordinate system when given the point's coordinate representation in the first camera's coordinate 4890 * system: 4891 * 4892 * \(\begin{bmatrix} 4893 * X_2 \\ 4894 * Y_2 \\ 4895 * Z_2 \\ 4896 * 1 4897 * \end{bmatrix} = \begin{bmatrix} 4898 * R & T \\ 4899 * 0 & 1 4900 * \end{bmatrix} \begin{bmatrix} 4901 * X_1 \\ 4902 * Y_1 \\ 4903 * Z_1 \\ 4904 * 1 4905 * \end{bmatrix}.\) 4906 * 4907 * 4908 * Optionally, it computes the essential matrix E: 4909 * 4910 * \(E= \vecthreethree{0}{-T_2}{T_1}{T_2}{0}{-T_0}{-T_1}{T_0}{0} R\) 4911 * 4912 * where \(T_i\) are components of the translation vector \(T\) : \(T=[T_0, T_1, T_2]^T\) . 4913 * And the function can also compute the fundamental matrix F: 4914 * 4915 * \(F = cameraMatrix2^{-T}\cdot E \cdot cameraMatrix1^{-1}\) 4916 * 4917 * Besides the stereo-related information, the function can also perform a full calibration of each of 4918 * the two cameras. However, due to the high dimensionality of the parameter space and noise in the 4919 * input data, the function can diverge from the correct solution. If the intrinsic parameters can be 4920 * estimated with high accuracy for each of the cameras individually (for example, using 4921 * #calibrateCamera ), you are recommended to do so and then pass REF: CALIB_FIX_INTRINSIC flag to the 4922 * function along with the computed intrinsic parameters. Otherwise, if all the parameters are 4923 * estimated at once, it makes sense to restrict some parameters, for example, pass 4924 * REF: CALIB_SAME_FOCAL_LENGTH and REF: CALIB_ZERO_TANGENT_DIST flags, which is usually a 4925 * reasonable assumption. 4926 * 4927 * Similarly to #calibrateCamera, the function minimizes the total re-projection error for all the 4928 * points in all the available views from both cameras. The function returns the final value of the 4929 * re-projection error. 4930 * @return automatically generated 4931 */ 4932 public static double stereoCalibrateExtended(List<Mat> objectPoints, List<Mat> imagePoints1, List<Mat> imagePoints2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat E, Mat F, Mat perViewErrors, int flags) { 4933 Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints); 4934 Mat imagePoints1_mat = Converters.vector_Mat_to_Mat(imagePoints1); 4935 Mat imagePoints2_mat = Converters.vector_Mat_to_Mat(imagePoints2); 4936 return stereoCalibrateExtended_1(objectPoints_mat.nativeObj, imagePoints1_mat.nativeObj, imagePoints2_mat.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, E.nativeObj, F.nativeObj, perViewErrors.nativeObj, flags); 4937 } 4938 4939 /** 4940 * Calibrates a stereo camera set up. This function finds the intrinsic parameters 4941 * for each of the two cameras and the extrinsic parameters between the two cameras. 4942 * 4943 * @param objectPoints Vector of vectors of the calibration pattern points. The same structure as 4944 * in REF: calibrateCamera. For each pattern view, both cameras need to see the same object 4945 * points. Therefore, objectPoints.size(), imagePoints1.size(), and imagePoints2.size() need to be 4946 * equal as well as objectPoints[i].size(), imagePoints1[i].size(), and imagePoints2[i].size() need to 4947 * be equal for each i. 4948 * @param imagePoints1 Vector of vectors of the projections of the calibration pattern points, 4949 * observed by the first camera. The same structure as in REF: calibrateCamera. 4950 * @param imagePoints2 Vector of vectors of the projections of the calibration pattern points, 4951 * observed by the second camera. The same structure as in REF: calibrateCamera. 4952 * @param cameraMatrix1 Input/output camera intrinsic matrix for the first camera, the same as in 4953 * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below. 4954 * @param distCoeffs1 Input/output vector of distortion coefficients, the same as in 4955 * REF: calibrateCamera. 4956 * @param cameraMatrix2 Input/output second camera intrinsic matrix for the second camera. See description for 4957 * cameraMatrix1. 4958 * @param distCoeffs2 Input/output lens distortion coefficients for the second camera. See 4959 * description for distCoeffs1. 4960 * @param imageSize Size of the image used only to initialize the camera intrinsic matrices. 4961 * @param R Output rotation matrix. Together with the translation vector T, this matrix brings 4962 * points given in the first camera's coordinate system to points in the second camera's 4963 * coordinate system. In more technical terms, the tuple of R and T performs a change of basis 4964 * from the first camera's coordinate system to the second camera's coordinate system. Due to its 4965 * duality, this tuple is equivalent to the position of the first camera with respect to the 4966 * second camera coordinate system. 4967 * @param T Output translation vector, see description above. 4968 * @param E Output essential matrix. 4969 * @param F Output fundamental matrix. 4970 * @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view. 4971 * <ul> 4972 * <li> 4973 * REF: CALIB_FIX_INTRINSIC Fix cameraMatrix? and distCoeffs? so that only R, T, E, and F 4974 * matrices are estimated. 4975 * </li> 4976 * <li> 4977 * REF: CALIB_USE_INTRINSIC_GUESS Optimize some or all of the intrinsic parameters 4978 * according to the specified flags. Initial values are provided by the user. 4979 * </li> 4980 * <li> 4981 * REF: CALIB_USE_EXTRINSIC_GUESS R and T contain valid initial values that are optimized further. 4982 * Otherwise R and T are initialized to the median value of the pattern views (each dimension separately). 4983 * </li> 4984 * <li> 4985 * REF: CALIB_FIX_PRINCIPAL_POINT Fix the principal points during the optimization. 4986 * </li> 4987 * <li> 4988 * REF: CALIB_FIX_FOCAL_LENGTH Fix \(f^{(j)}_x\) and \(f^{(j)}_y\) . 4989 * </li> 4990 * <li> 4991 * REF: CALIB_FIX_ASPECT_RATIO Optimize \(f^{(j)}_y\) . Fix the ratio \(f^{(j)}_x/f^{(j)}_y\) 4992 * . 4993 * </li> 4994 * <li> 4995 * REF: CALIB_SAME_FOCAL_LENGTH Enforce \(f^{(0)}_x=f^{(1)}_x\) and \(f^{(0)}_y=f^{(1)}_y\) . 4996 * </li> 4997 * <li> 4998 * REF: CALIB_ZERO_TANGENT_DIST Set tangential distortion coefficients for each camera to 4999 * zeros and fix there. 5000 * </li> 5001 * <li> 5002 * REF: CALIB_FIX_K1,..., REF: CALIB_FIX_K6 Do not change the corresponding radial 5003 * distortion coefficient during the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, 5004 * the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0. 5005 * </li> 5006 * <li> 5007 * REF: CALIB_RATIONAL_MODEL Enable coefficients k4, k5, and k6. To provide the backward 5008 * compatibility, this extra flag should be explicitly specified to make the calibration 5009 * function use the rational model and return 8 coefficients. If the flag is not set, the 5010 * function computes and returns only 5 distortion coefficients. 5011 * </li> 5012 * <li> 5013 * REF: CALIB_THIN_PRISM_MODEL Coefficients s1, s2, s3 and s4 are enabled. To provide the 5014 * backward compatibility, this extra flag should be explicitly specified to make the 5015 * calibration function use the thin prism model and return 12 coefficients. If the flag is not 5016 * set, the function computes and returns only 5 distortion coefficients. 5017 * </li> 5018 * <li> 5019 * REF: CALIB_FIX_S1_S2_S3_S4 The thin prism distortion coefficients are not changed during 5020 * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the 5021 * supplied distCoeffs matrix is used. Otherwise, it is set to 0. 5022 * </li> 5023 * <li> 5024 * REF: CALIB_TILTED_MODEL Coefficients tauX and tauY are enabled. To provide the 5025 * backward compatibility, this extra flag should be explicitly specified to make the 5026 * calibration function use the tilted sensor model and return 14 coefficients. If the flag is not 5027 * set, the function computes and returns only 5 distortion coefficients. 5028 * </li> 5029 * <li> 5030 * REF: CALIB_FIX_TAUX_TAUY The coefficients of the tilted sensor model are not changed during 5031 * the optimization. If REF: CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the 5032 * supplied distCoeffs matrix is used. Otherwise, it is set to 0. 5033 * </li> 5034 * </ul> 5035 * 5036 * The function estimates the transformation between two cameras making a stereo pair. If one computes 5037 * the poses of an object relative to the first camera and to the second camera, 5038 * ( \(R_1\),\(T_1\) ) and (\(R_2\),\(T_2\)), respectively, for a stereo camera where the 5039 * relative position and orientation between the two cameras are fixed, then those poses definitely 5040 * relate to each other. This means, if the relative position and orientation (\(R\),\(T\)) of the 5041 * two cameras is known, it is possible to compute (\(R_2\),\(T_2\)) when (\(R_1\),\(T_1\)) is 5042 * given. This is what the described function does. It computes (\(R\),\(T\)) such that: 5043 * 5044 * \(R_2=R R_1\) 5045 * \(T_2=R T_1 + T.\) 5046 * 5047 * Therefore, one can compute the coordinate representation of a 3D point for the second camera's 5048 * coordinate system when given the point's coordinate representation in the first camera's coordinate 5049 * system: 5050 * 5051 * \(\begin{bmatrix} 5052 * X_2 \\ 5053 * Y_2 \\ 5054 * Z_2 \\ 5055 * 1 5056 * \end{bmatrix} = \begin{bmatrix} 5057 * R & T \\ 5058 * 0 & 1 5059 * \end{bmatrix} \begin{bmatrix} 5060 * X_1 \\ 5061 * Y_1 \\ 5062 * Z_1 \\ 5063 * 1 5064 * \end{bmatrix}.\) 5065 * 5066 * 5067 * Optionally, it computes the essential matrix E: 5068 * 5069 * \(E= \vecthreethree{0}{-T_2}{T_1}{T_2}{0}{-T_0}{-T_1}{T_0}{0} R\) 5070 * 5071 * where \(T_i\) are components of the translation vector \(T\) : \(T=[T_0, T_1, T_2]^T\) . 5072 * And the function can also compute the fundamental matrix F: 5073 * 5074 * \(F = cameraMatrix2^{-T}\cdot E \cdot cameraMatrix1^{-1}\) 5075 * 5076 * Besides the stereo-related information, the function can also perform a full calibration of each of 5077 * the two cameras. However, due to the high dimensionality of the parameter space and noise in the 5078 * input data, the function can diverge from the correct solution. If the intrinsic parameters can be 5079 * estimated with high accuracy for each of the cameras individually (for example, using 5080 * #calibrateCamera ), you are recommended to do so and then pass REF: CALIB_FIX_INTRINSIC flag to the 5081 * function along with the computed intrinsic parameters. Otherwise, if all the parameters are 5082 * estimated at once, it makes sense to restrict some parameters, for example, pass 5083 * REF: CALIB_SAME_FOCAL_LENGTH and REF: CALIB_ZERO_TANGENT_DIST flags, which is usually a 5084 * reasonable assumption. 5085 * 5086 * Similarly to #calibrateCamera, the function minimizes the total re-projection error for all the 5087 * points in all the available views from both cameras. The function returns the final value of the 5088 * re-projection error. 5089 * @return automatically generated 5090 */ 5091 public static double stereoCalibrateExtended(List<Mat> objectPoints, List<Mat> imagePoints1, List<Mat> imagePoints2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat E, Mat F, Mat perViewErrors) { 5092 Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints); 5093 Mat imagePoints1_mat = Converters.vector_Mat_to_Mat(imagePoints1); 5094 Mat imagePoints2_mat = Converters.vector_Mat_to_Mat(imagePoints2); 5095 return stereoCalibrateExtended_2(objectPoints_mat.nativeObj, imagePoints1_mat.nativeObj, imagePoints2_mat.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, E.nativeObj, F.nativeObj, perViewErrors.nativeObj); 5096 } 5097 5098 5099 // 5100 // C++: double cv::stereoCalibrate(vector_Mat objectPoints, vector_Mat imagePoints1, vector_Mat imagePoints2, Mat& cameraMatrix1, Mat& distCoeffs1, Mat& cameraMatrix2, Mat& distCoeffs2, Size imageSize, Mat& R, Mat& T, Mat& E, Mat& F, int flags = CALIB_FIX_INTRINSIC, TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6)) 5101 // 5102 5103 public static double stereoCalibrate(List<Mat> objectPoints, List<Mat> imagePoints1, List<Mat> imagePoints2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat E, Mat F, int flags, TermCriteria criteria) { 5104 Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints); 5105 Mat imagePoints1_mat = Converters.vector_Mat_to_Mat(imagePoints1); 5106 Mat imagePoints2_mat = Converters.vector_Mat_to_Mat(imagePoints2); 5107 return stereoCalibrate_0(objectPoints_mat.nativeObj, imagePoints1_mat.nativeObj, imagePoints2_mat.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, E.nativeObj, F.nativeObj, flags, criteria.type, criteria.maxCount, criteria.epsilon); 5108 } 5109 5110 public static double stereoCalibrate(List<Mat> objectPoints, List<Mat> imagePoints1, List<Mat> imagePoints2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat E, Mat F, int flags) { 5111 Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints); 5112 Mat imagePoints1_mat = Converters.vector_Mat_to_Mat(imagePoints1); 5113 Mat imagePoints2_mat = Converters.vector_Mat_to_Mat(imagePoints2); 5114 return stereoCalibrate_1(objectPoints_mat.nativeObj, imagePoints1_mat.nativeObj, imagePoints2_mat.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, E.nativeObj, F.nativeObj, flags); 5115 } 5116 5117 public static double stereoCalibrate(List<Mat> objectPoints, List<Mat> imagePoints1, List<Mat> imagePoints2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat E, Mat F) { 5118 Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints); 5119 Mat imagePoints1_mat = Converters.vector_Mat_to_Mat(imagePoints1); 5120 Mat imagePoints2_mat = Converters.vector_Mat_to_Mat(imagePoints2); 5121 return stereoCalibrate_2(objectPoints_mat.nativeObj, imagePoints1_mat.nativeObj, imagePoints2_mat.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, E.nativeObj, F.nativeObj); 5122 } 5123 5124 5125 // 5126 // C++: void cv::stereoRectify(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat& R1, Mat& R2, Mat& P1, Mat& P2, Mat& Q, int flags = CALIB_ZERO_DISPARITY, double alpha = -1, Size newImageSize = Size(), Rect* validPixROI1 = 0, Rect* validPixROI2 = 0) 5127 // 5128 5129 /** 5130 * Computes rectification transforms for each head of a calibrated stereo camera. 5131 * 5132 * @param cameraMatrix1 First camera intrinsic matrix. 5133 * @param distCoeffs1 First camera distortion parameters. 5134 * @param cameraMatrix2 Second camera intrinsic matrix. 5135 * @param distCoeffs2 Second camera distortion parameters. 5136 * @param imageSize Size of the image used for stereo calibration. 5137 * @param R Rotation matrix from the coordinate system of the first camera to the second camera, 5138 * see REF: stereoCalibrate. 5139 * @param T Translation vector from the coordinate system of the first camera to the second camera, 5140 * see REF: stereoCalibrate. 5141 * @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera. This matrix 5142 * brings points given in the unrectified first camera's coordinate system to points in the rectified 5143 * first camera's coordinate system. In more technical terms, it performs a change of basis from the 5144 * unrectified first camera's coordinate system to the rectified first camera's coordinate system. 5145 * @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera. This matrix 5146 * brings points given in the unrectified second camera's coordinate system to points in the rectified 5147 * second camera's coordinate system. In more technical terms, it performs a change of basis from the 5148 * unrectified second camera's coordinate system to the rectified second camera's coordinate system. 5149 * @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first 5150 * camera, i.e. it projects points given in the rectified first camera coordinate system into the 5151 * rectified first camera's image. 5152 * @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second 5153 * camera, i.e. it projects points given in the rectified first camera coordinate system into the 5154 * rectified second camera's image. 5155 * @param Q Output \(4 \times 4\) disparity-to-depth mapping matrix (see REF: reprojectImageTo3D). 5156 * @param flags Operation flags that may be zero or REF: CALIB_ZERO_DISPARITY . If the flag is set, 5157 * the function makes the principal points of each camera have the same pixel coordinates in the 5158 * rectified views. And if the flag is not set, the function may still shift the images in the 5159 * horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the 5160 * useful image area. 5161 * @param alpha Free scaling parameter. If it is -1 or absent, the function performs the default 5162 * scaling. Otherwise, the parameter should be between 0 and 1. alpha=0 means that the rectified 5163 * images are zoomed and shifted so that only valid pixels are visible (no black areas after 5164 * rectification). alpha=1 means that the rectified image is decimated and shifted so that all the 5165 * pixels from the original images from the cameras are retained in the rectified images (no source 5166 * image pixels are lost). Any intermediate value yields an intermediate result between 5167 * those two extreme cases. 5168 * @param newImageSize New image resolution after rectification. The same size should be passed to 5169 * #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0) 5170 * is passed (default), it is set to the original imageSize . Setting it to a larger value can help you 5171 * preserve details in the original image, especially when there is a big radial distortion. 5172 * @param validPixROI1 Optional output rectangles inside the rectified images where all the pixels 5173 * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller 5174 * (see the picture below). 5175 * @param validPixROI2 Optional output rectangles inside the rectified images where all the pixels 5176 * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller 5177 * (see the picture below). 5178 * 5179 * The function computes the rotation matrices for each camera that (virtually) make both camera image 5180 * planes the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies 5181 * the dense stereo correspondence problem. The function takes the matrices computed by #stereoCalibrate 5182 * as input. As output, it provides two rotation matrices and also two projection matrices in the new 5183 * coordinates. The function distinguishes the following two cases: 5184 * 5185 * <ul> 5186 * <li> 5187 * <b>Horizontal stereo</b>: the first and the second camera views are shifted relative to each other 5188 * mainly along the x-axis (with possible small vertical shift). In the rectified images, the 5189 * corresponding epipolar lines in the left and right cameras are horizontal and have the same 5190 * y-coordinate. P1 and P2 look like: 5191 * </li> 5192 * </ul> 5193 * 5194 * \(\texttt{P1} = \begin{bmatrix} 5195 * f & 0 & cx_1 & 0 \\ 5196 * 0 & f & cy & 0 \\ 5197 * 0 & 0 & 1 & 0 5198 * \end{bmatrix}\) 5199 * 5200 * \(\texttt{P2} = \begin{bmatrix} 5201 * f & 0 & cx_2 & T_x*f \\ 5202 * 0 & f & cy & 0 \\ 5203 * 0 & 0 & 1 & 0 5204 * \end{bmatrix} ,\) 5205 * 5206 * where \(T_x\) is a horizontal shift between the cameras and \(cx_1=cx_2\) if 5207 * REF: CALIB_ZERO_DISPARITY is set. 5208 * 5209 * <ul> 5210 * <li> 5211 * <b>Vertical stereo</b>: the first and the second camera views are shifted relative to each other 5212 * mainly in the vertical direction (and probably a bit in the horizontal direction too). The epipolar 5213 * lines in the rectified images are vertical and have the same x-coordinate. P1 and P2 look like: 5214 * </li> 5215 * </ul> 5216 * 5217 * \(\texttt{P1} = \begin{bmatrix} 5218 * f & 0 & cx & 0 \\ 5219 * 0 & f & cy_1 & 0 \\ 5220 * 0 & 0 & 1 & 0 5221 * \end{bmatrix}\) 5222 * 5223 * \(\texttt{P2} = \begin{bmatrix} 5224 * f & 0 & cx & 0 \\ 5225 * 0 & f & cy_2 & T_y*f \\ 5226 * 0 & 0 & 1 & 0 5227 * \end{bmatrix},\) 5228 * 5229 * where \(T_y\) is a vertical shift between the cameras and \(cy_1=cy_2\) if 5230 * REF: CALIB_ZERO_DISPARITY is set. 5231 * 5232 * As you can see, the first three columns of P1 and P2 will effectively be the new "rectified" camera 5233 * matrices. The matrices, together with R1 and R2 , can then be passed to #initUndistortRectifyMap to 5234 * initialize the rectification map for each camera. 5235 * 5236 * See below the screenshot from the stereo_calib.cpp sample. Some red horizontal lines pass through 5237 * the corresponding image regions. This means that the images are well rectified, which is what most 5238 * stereo correspondence algorithms rely on. The green rectangles are roi1 and roi2 . You see that 5239 * their interiors are all valid pixels. 5240 * 5241 * ![image](pics/stereo_undistort.jpg) 5242 */ 5243 public static void stereoRectify(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat R1, Mat R2, Mat P1, Mat P2, Mat Q, int flags, double alpha, Size newImageSize, Rect validPixROI1, Rect validPixROI2) { 5244 double[] validPixROI1_out = new double[4]; 5245 double[] validPixROI2_out = new double[4]; 5246 stereoRectify_0(cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, R1.nativeObj, R2.nativeObj, P1.nativeObj, P2.nativeObj, Q.nativeObj, flags, alpha, newImageSize.width, newImageSize.height, validPixROI1_out, validPixROI2_out); 5247 if(validPixROI1!=null){ validPixROI1.x = (int)validPixROI1_out[0]; validPixROI1.y = (int)validPixROI1_out[1]; validPixROI1.width = (int)validPixROI1_out[2]; validPixROI1.height = (int)validPixROI1_out[3]; } 5248 if(validPixROI2!=null){ validPixROI2.x = (int)validPixROI2_out[0]; validPixROI2.y = (int)validPixROI2_out[1]; validPixROI2.width = (int)validPixROI2_out[2]; validPixROI2.height = (int)validPixROI2_out[3]; } 5249 } 5250 5251 /** 5252 * Computes rectification transforms for each head of a calibrated stereo camera. 5253 * 5254 * @param cameraMatrix1 First camera intrinsic matrix. 5255 * @param distCoeffs1 First camera distortion parameters. 5256 * @param cameraMatrix2 Second camera intrinsic matrix. 5257 * @param distCoeffs2 Second camera distortion parameters. 5258 * @param imageSize Size of the image used for stereo calibration. 5259 * @param R Rotation matrix from the coordinate system of the first camera to the second camera, 5260 * see REF: stereoCalibrate. 5261 * @param T Translation vector from the coordinate system of the first camera to the second camera, 5262 * see REF: stereoCalibrate. 5263 * @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera. This matrix 5264 * brings points given in the unrectified first camera's coordinate system to points in the rectified 5265 * first camera's coordinate system. In more technical terms, it performs a change of basis from the 5266 * unrectified first camera's coordinate system to the rectified first camera's coordinate system. 5267 * @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera. This matrix 5268 * brings points given in the unrectified second camera's coordinate system to points in the rectified 5269 * second camera's coordinate system. In more technical terms, it performs a change of basis from the 5270 * unrectified second camera's coordinate system to the rectified second camera's coordinate system. 5271 * @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first 5272 * camera, i.e. it projects points given in the rectified first camera coordinate system into the 5273 * rectified first camera's image. 5274 * @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second 5275 * camera, i.e. it projects points given in the rectified first camera coordinate system into the 5276 * rectified second camera's image. 5277 * @param Q Output \(4 \times 4\) disparity-to-depth mapping matrix (see REF: reprojectImageTo3D). 5278 * @param flags Operation flags that may be zero or REF: CALIB_ZERO_DISPARITY . If the flag is set, 5279 * the function makes the principal points of each camera have the same pixel coordinates in the 5280 * rectified views. And if the flag is not set, the function may still shift the images in the 5281 * horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the 5282 * useful image area. 5283 * @param alpha Free scaling parameter. If it is -1 or absent, the function performs the default 5284 * scaling. Otherwise, the parameter should be between 0 and 1. alpha=0 means that the rectified 5285 * images are zoomed and shifted so that only valid pixels are visible (no black areas after 5286 * rectification). alpha=1 means that the rectified image is decimated and shifted so that all the 5287 * pixels from the original images from the cameras are retained in the rectified images (no source 5288 * image pixels are lost). Any intermediate value yields an intermediate result between 5289 * those two extreme cases. 5290 * @param newImageSize New image resolution after rectification. The same size should be passed to 5291 * #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0) 5292 * is passed (default), it is set to the original imageSize . Setting it to a larger value can help you 5293 * preserve details in the original image, especially when there is a big radial distortion. 5294 * @param validPixROI1 Optional output rectangles inside the rectified images where all the pixels 5295 * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller 5296 * (see the picture below). 5297 * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller 5298 * (see the picture below). 5299 * 5300 * The function computes the rotation matrices for each camera that (virtually) make both camera image 5301 * planes the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies 5302 * the dense stereo correspondence problem. The function takes the matrices computed by #stereoCalibrate 5303 * as input. As output, it provides two rotation matrices and also two projection matrices in the new 5304 * coordinates. The function distinguishes the following two cases: 5305 * 5306 * <ul> 5307 * <li> 5308 * <b>Horizontal stereo</b>: the first and the second camera views are shifted relative to each other 5309 * mainly along the x-axis (with possible small vertical shift). In the rectified images, the 5310 * corresponding epipolar lines in the left and right cameras are horizontal and have the same 5311 * y-coordinate. P1 and P2 look like: 5312 * </li> 5313 * </ul> 5314 * 5315 * \(\texttt{P1} = \begin{bmatrix} 5316 * f & 0 & cx_1 & 0 \\ 5317 * 0 & f & cy & 0 \\ 5318 * 0 & 0 & 1 & 0 5319 * \end{bmatrix}\) 5320 * 5321 * \(\texttt{P2} = \begin{bmatrix} 5322 * f & 0 & cx_2 & T_x*f \\ 5323 * 0 & f & cy & 0 \\ 5324 * 0 & 0 & 1 & 0 5325 * \end{bmatrix} ,\) 5326 * 5327 * where \(T_x\) is a horizontal shift between the cameras and \(cx_1=cx_2\) if 5328 * REF: CALIB_ZERO_DISPARITY is set. 5329 * 5330 * <ul> 5331 * <li> 5332 * <b>Vertical stereo</b>: the first and the second camera views are shifted relative to each other 5333 * mainly in the vertical direction (and probably a bit in the horizontal direction too). The epipolar 5334 * lines in the rectified images are vertical and have the same x-coordinate. P1 and P2 look like: 5335 * </li> 5336 * </ul> 5337 * 5338 * \(\texttt{P1} = \begin{bmatrix} 5339 * f & 0 & cx & 0 \\ 5340 * 0 & f & cy_1 & 0 \\ 5341 * 0 & 0 & 1 & 0 5342 * \end{bmatrix}\) 5343 * 5344 * \(\texttt{P2} = \begin{bmatrix} 5345 * f & 0 & cx & 0 \\ 5346 * 0 & f & cy_2 & T_y*f \\ 5347 * 0 & 0 & 1 & 0 5348 * \end{bmatrix},\) 5349 * 5350 * where \(T_y\) is a vertical shift between the cameras and \(cy_1=cy_2\) if 5351 * REF: CALIB_ZERO_DISPARITY is set. 5352 * 5353 * As you can see, the first three columns of P1 and P2 will effectively be the new "rectified" camera 5354 * matrices. The matrices, together with R1 and R2 , can then be passed to #initUndistortRectifyMap to 5355 * initialize the rectification map for each camera. 5356 * 5357 * See below the screenshot from the stereo_calib.cpp sample. Some red horizontal lines pass through 5358 * the corresponding image regions. This means that the images are well rectified, which is what most 5359 * stereo correspondence algorithms rely on. The green rectangles are roi1 and roi2 . You see that 5360 * their interiors are all valid pixels. 5361 * 5362 * ![image](pics/stereo_undistort.jpg) 5363 */ 5364 public static void stereoRectify(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat R1, Mat R2, Mat P1, Mat P2, Mat Q, int flags, double alpha, Size newImageSize, Rect validPixROI1) { 5365 double[] validPixROI1_out = new double[4]; 5366 stereoRectify_1(cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, R1.nativeObj, R2.nativeObj, P1.nativeObj, P2.nativeObj, Q.nativeObj, flags, alpha, newImageSize.width, newImageSize.height, validPixROI1_out); 5367 if(validPixROI1!=null){ validPixROI1.x = (int)validPixROI1_out[0]; validPixROI1.y = (int)validPixROI1_out[1]; validPixROI1.width = (int)validPixROI1_out[2]; validPixROI1.height = (int)validPixROI1_out[3]; } 5368 } 5369 5370 /** 5371 * Computes rectification transforms for each head of a calibrated stereo camera. 5372 * 5373 * @param cameraMatrix1 First camera intrinsic matrix. 5374 * @param distCoeffs1 First camera distortion parameters. 5375 * @param cameraMatrix2 Second camera intrinsic matrix. 5376 * @param distCoeffs2 Second camera distortion parameters. 5377 * @param imageSize Size of the image used for stereo calibration. 5378 * @param R Rotation matrix from the coordinate system of the first camera to the second camera, 5379 * see REF: stereoCalibrate. 5380 * @param T Translation vector from the coordinate system of the first camera to the second camera, 5381 * see REF: stereoCalibrate. 5382 * @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera. This matrix 5383 * brings points given in the unrectified first camera's coordinate system to points in the rectified 5384 * first camera's coordinate system. In more technical terms, it performs a change of basis from the 5385 * unrectified first camera's coordinate system to the rectified first camera's coordinate system. 5386 * @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera. This matrix 5387 * brings points given in the unrectified second camera's coordinate system to points in the rectified 5388 * second camera's coordinate system. In more technical terms, it performs a change of basis from the 5389 * unrectified second camera's coordinate system to the rectified second camera's coordinate system. 5390 * @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first 5391 * camera, i.e. it projects points given in the rectified first camera coordinate system into the 5392 * rectified first camera's image. 5393 * @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second 5394 * camera, i.e. it projects points given in the rectified first camera coordinate system into the 5395 * rectified second camera's image. 5396 * @param Q Output \(4 \times 4\) disparity-to-depth mapping matrix (see REF: reprojectImageTo3D). 5397 * @param flags Operation flags that may be zero or REF: CALIB_ZERO_DISPARITY . If the flag is set, 5398 * the function makes the principal points of each camera have the same pixel coordinates in the 5399 * rectified views. And if the flag is not set, the function may still shift the images in the 5400 * horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the 5401 * useful image area. 5402 * @param alpha Free scaling parameter. If it is -1 or absent, the function performs the default 5403 * scaling. Otherwise, the parameter should be between 0 and 1. alpha=0 means that the rectified 5404 * images are zoomed and shifted so that only valid pixels are visible (no black areas after 5405 * rectification). alpha=1 means that the rectified image is decimated and shifted so that all the 5406 * pixels from the original images from the cameras are retained in the rectified images (no source 5407 * image pixels are lost). Any intermediate value yields an intermediate result between 5408 * those two extreme cases. 5409 * @param newImageSize New image resolution after rectification. The same size should be passed to 5410 * #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0) 5411 * is passed (default), it is set to the original imageSize . Setting it to a larger value can help you 5412 * preserve details in the original image, especially when there is a big radial distortion. 5413 * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller 5414 * (see the picture below). 5415 * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller 5416 * (see the picture below). 5417 * 5418 * The function computes the rotation matrices for each camera that (virtually) make both camera image 5419 * planes the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies 5420 * the dense stereo correspondence problem. The function takes the matrices computed by #stereoCalibrate 5421 * as input. As output, it provides two rotation matrices and also two projection matrices in the new 5422 * coordinates. The function distinguishes the following two cases: 5423 * 5424 * <ul> 5425 * <li> 5426 * <b>Horizontal stereo</b>: the first and the second camera views are shifted relative to each other 5427 * mainly along the x-axis (with possible small vertical shift). In the rectified images, the 5428 * corresponding epipolar lines in the left and right cameras are horizontal and have the same 5429 * y-coordinate. P1 and P2 look like: 5430 * </li> 5431 * </ul> 5432 * 5433 * \(\texttt{P1} = \begin{bmatrix} 5434 * f & 0 & cx_1 & 0 \\ 5435 * 0 & f & cy & 0 \\ 5436 * 0 & 0 & 1 & 0 5437 * \end{bmatrix}\) 5438 * 5439 * \(\texttt{P2} = \begin{bmatrix} 5440 * f & 0 & cx_2 & T_x*f \\ 5441 * 0 & f & cy & 0 \\ 5442 * 0 & 0 & 1 & 0 5443 * \end{bmatrix} ,\) 5444 * 5445 * where \(T_x\) is a horizontal shift between the cameras and \(cx_1=cx_2\) if 5446 * REF: CALIB_ZERO_DISPARITY is set. 5447 * 5448 * <ul> 5449 * <li> 5450 * <b>Vertical stereo</b>: the first and the second camera views are shifted relative to each other 5451 * mainly in the vertical direction (and probably a bit in the horizontal direction too). The epipolar 5452 * lines in the rectified images are vertical and have the same x-coordinate. P1 and P2 look like: 5453 * </li> 5454 * </ul> 5455 * 5456 * \(\texttt{P1} = \begin{bmatrix} 5457 * f & 0 & cx & 0 \\ 5458 * 0 & f & cy_1 & 0 \\ 5459 * 0 & 0 & 1 & 0 5460 * \end{bmatrix}\) 5461 * 5462 * \(\texttt{P2} = \begin{bmatrix} 5463 * f & 0 & cx & 0 \\ 5464 * 0 & f & cy_2 & T_y*f \\ 5465 * 0 & 0 & 1 & 0 5466 * \end{bmatrix},\) 5467 * 5468 * where \(T_y\) is a vertical shift between the cameras and \(cy_1=cy_2\) if 5469 * REF: CALIB_ZERO_DISPARITY is set. 5470 * 5471 * As you can see, the first three columns of P1 and P2 will effectively be the new "rectified" camera 5472 * matrices. The matrices, together with R1 and R2 , can then be passed to #initUndistortRectifyMap to 5473 * initialize the rectification map for each camera. 5474 * 5475 * See below the screenshot from the stereo_calib.cpp sample. Some red horizontal lines pass through 5476 * the corresponding image regions. This means that the images are well rectified, which is what most 5477 * stereo correspondence algorithms rely on. The green rectangles are roi1 and roi2 . You see that 5478 * their interiors are all valid pixels. 5479 * 5480 * ![image](pics/stereo_undistort.jpg) 5481 */ 5482 public static void stereoRectify(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat R1, Mat R2, Mat P1, Mat P2, Mat Q, int flags, double alpha, Size newImageSize) { 5483 stereoRectify_2(cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, R1.nativeObj, R2.nativeObj, P1.nativeObj, P2.nativeObj, Q.nativeObj, flags, alpha, newImageSize.width, newImageSize.height); 5484 } 5485 5486 /** 5487 * Computes rectification transforms for each head of a calibrated stereo camera. 5488 * 5489 * @param cameraMatrix1 First camera intrinsic matrix. 5490 * @param distCoeffs1 First camera distortion parameters. 5491 * @param cameraMatrix2 Second camera intrinsic matrix. 5492 * @param distCoeffs2 Second camera distortion parameters. 5493 * @param imageSize Size of the image used for stereo calibration. 5494 * @param R Rotation matrix from the coordinate system of the first camera to the second camera, 5495 * see REF: stereoCalibrate. 5496 * @param T Translation vector from the coordinate system of the first camera to the second camera, 5497 * see REF: stereoCalibrate. 5498 * @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera. This matrix 5499 * brings points given in the unrectified first camera's coordinate system to points in the rectified 5500 * first camera's coordinate system. In more technical terms, it performs a change of basis from the 5501 * unrectified first camera's coordinate system to the rectified first camera's coordinate system. 5502 * @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera. This matrix 5503 * brings points given in the unrectified second camera's coordinate system to points in the rectified 5504 * second camera's coordinate system. In more technical terms, it performs a change of basis from the 5505 * unrectified second camera's coordinate system to the rectified second camera's coordinate system. 5506 * @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first 5507 * camera, i.e. it projects points given in the rectified first camera coordinate system into the 5508 * rectified first camera's image. 5509 * @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second 5510 * camera, i.e. it projects points given in the rectified first camera coordinate system into the 5511 * rectified second camera's image. 5512 * @param Q Output \(4 \times 4\) disparity-to-depth mapping matrix (see REF: reprojectImageTo3D). 5513 * @param flags Operation flags that may be zero or REF: CALIB_ZERO_DISPARITY . If the flag is set, 5514 * the function makes the principal points of each camera have the same pixel coordinates in the 5515 * rectified views. And if the flag is not set, the function may still shift the images in the 5516 * horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the 5517 * useful image area. 5518 * @param alpha Free scaling parameter. If it is -1 or absent, the function performs the default 5519 * scaling. Otherwise, the parameter should be between 0 and 1. alpha=0 means that the rectified 5520 * images are zoomed and shifted so that only valid pixels are visible (no black areas after 5521 * rectification). alpha=1 means that the rectified image is decimated and shifted so that all the 5522 * pixels from the original images from the cameras are retained in the rectified images (no source 5523 * image pixels are lost). Any intermediate value yields an intermediate result between 5524 * those two extreme cases. 5525 * #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0) 5526 * is passed (default), it is set to the original imageSize . Setting it to a larger value can help you 5527 * preserve details in the original image, especially when there is a big radial distortion. 5528 * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller 5529 * (see the picture below). 5530 * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller 5531 * (see the picture below). 5532 * 5533 * The function computes the rotation matrices for each camera that (virtually) make both camera image 5534 * planes the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies 5535 * the dense stereo correspondence problem. The function takes the matrices computed by #stereoCalibrate 5536 * as input. As output, it provides two rotation matrices and also two projection matrices in the new 5537 * coordinates. The function distinguishes the following two cases: 5538 * 5539 * <ul> 5540 * <li> 5541 * <b>Horizontal stereo</b>: the first and the second camera views are shifted relative to each other 5542 * mainly along the x-axis (with possible small vertical shift). In the rectified images, the 5543 * corresponding epipolar lines in the left and right cameras are horizontal and have the same 5544 * y-coordinate. P1 and P2 look like: 5545 * </li> 5546 * </ul> 5547 * 5548 * \(\texttt{P1} = \begin{bmatrix} 5549 * f & 0 & cx_1 & 0 \\ 5550 * 0 & f & cy & 0 \\ 5551 * 0 & 0 & 1 & 0 5552 * \end{bmatrix}\) 5553 * 5554 * \(\texttt{P2} = \begin{bmatrix} 5555 * f & 0 & cx_2 & T_x*f \\ 5556 * 0 & f & cy & 0 \\ 5557 * 0 & 0 & 1 & 0 5558 * \end{bmatrix} ,\) 5559 * 5560 * where \(T_x\) is a horizontal shift between the cameras and \(cx_1=cx_2\) if 5561 * REF: CALIB_ZERO_DISPARITY is set. 5562 * 5563 * <ul> 5564 * <li> 5565 * <b>Vertical stereo</b>: the first and the second camera views are shifted relative to each other 5566 * mainly in the vertical direction (and probably a bit in the horizontal direction too). The epipolar 5567 * lines in the rectified images are vertical and have the same x-coordinate. P1 and P2 look like: 5568 * </li> 5569 * </ul> 5570 * 5571 * \(\texttt{P1} = \begin{bmatrix} 5572 * f & 0 & cx & 0 \\ 5573 * 0 & f & cy_1 & 0 \\ 5574 * 0 & 0 & 1 & 0 5575 * \end{bmatrix}\) 5576 * 5577 * \(\texttt{P2} = \begin{bmatrix} 5578 * f & 0 & cx & 0 \\ 5579 * 0 & f & cy_2 & T_y*f \\ 5580 * 0 & 0 & 1 & 0 5581 * \end{bmatrix},\) 5582 * 5583 * where \(T_y\) is a vertical shift between the cameras and \(cy_1=cy_2\) if 5584 * REF: CALIB_ZERO_DISPARITY is set. 5585 * 5586 * As you can see, the first three columns of P1 and P2 will effectively be the new "rectified" camera 5587 * matrices. The matrices, together with R1 and R2 , can then be passed to #initUndistortRectifyMap to 5588 * initialize the rectification map for each camera. 5589 * 5590 * See below the screenshot from the stereo_calib.cpp sample. Some red horizontal lines pass through 5591 * the corresponding image regions. This means that the images are well rectified, which is what most 5592 * stereo correspondence algorithms rely on. The green rectangles are roi1 and roi2 . You see that 5593 * their interiors are all valid pixels. 5594 * 5595 * ![image](pics/stereo_undistort.jpg) 5596 */ 5597 public static void stereoRectify(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat R1, Mat R2, Mat P1, Mat P2, Mat Q, int flags, double alpha) { 5598 stereoRectify_3(cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, R1.nativeObj, R2.nativeObj, P1.nativeObj, P2.nativeObj, Q.nativeObj, flags, alpha); 5599 } 5600 5601 /** 5602 * Computes rectification transforms for each head of a calibrated stereo camera. 5603 * 5604 * @param cameraMatrix1 First camera intrinsic matrix. 5605 * @param distCoeffs1 First camera distortion parameters. 5606 * @param cameraMatrix2 Second camera intrinsic matrix. 5607 * @param distCoeffs2 Second camera distortion parameters. 5608 * @param imageSize Size of the image used for stereo calibration. 5609 * @param R Rotation matrix from the coordinate system of the first camera to the second camera, 5610 * see REF: stereoCalibrate. 5611 * @param T Translation vector from the coordinate system of the first camera to the second camera, 5612 * see REF: stereoCalibrate. 5613 * @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera. This matrix 5614 * brings points given in the unrectified first camera's coordinate system to points in the rectified 5615 * first camera's coordinate system. In more technical terms, it performs a change of basis from the 5616 * unrectified first camera's coordinate system to the rectified first camera's coordinate system. 5617 * @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera. This matrix 5618 * brings points given in the unrectified second camera's coordinate system to points in the rectified 5619 * second camera's coordinate system. In more technical terms, it performs a change of basis from the 5620 * unrectified second camera's coordinate system to the rectified second camera's coordinate system. 5621 * @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first 5622 * camera, i.e. it projects points given in the rectified first camera coordinate system into the 5623 * rectified first camera's image. 5624 * @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second 5625 * camera, i.e. it projects points given in the rectified first camera coordinate system into the 5626 * rectified second camera's image. 5627 * @param Q Output \(4 \times 4\) disparity-to-depth mapping matrix (see REF: reprojectImageTo3D). 5628 * @param flags Operation flags that may be zero or REF: CALIB_ZERO_DISPARITY . If the flag is set, 5629 * the function makes the principal points of each camera have the same pixel coordinates in the 5630 * rectified views. And if the flag is not set, the function may still shift the images in the 5631 * horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the 5632 * useful image area. 5633 * scaling. Otherwise, the parameter should be between 0 and 1. alpha=0 means that the rectified 5634 * images are zoomed and shifted so that only valid pixels are visible (no black areas after 5635 * rectification). alpha=1 means that the rectified image is decimated and shifted so that all the 5636 * pixels from the original images from the cameras are retained in the rectified images (no source 5637 * image pixels are lost). Any intermediate value yields an intermediate result between 5638 * those two extreme cases. 5639 * #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0) 5640 * is passed (default), it is set to the original imageSize . Setting it to a larger value can help you 5641 * preserve details in the original image, especially when there is a big radial distortion. 5642 * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller 5643 * (see the picture below). 5644 * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller 5645 * (see the picture below). 5646 * 5647 * The function computes the rotation matrices for each camera that (virtually) make both camera image 5648 * planes the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies 5649 * the dense stereo correspondence problem. The function takes the matrices computed by #stereoCalibrate 5650 * as input. As output, it provides two rotation matrices and also two projection matrices in the new 5651 * coordinates. The function distinguishes the following two cases: 5652 * 5653 * <ul> 5654 * <li> 5655 * <b>Horizontal stereo</b>: the first and the second camera views are shifted relative to each other 5656 * mainly along the x-axis (with possible small vertical shift). In the rectified images, the 5657 * corresponding epipolar lines in the left and right cameras are horizontal and have the same 5658 * y-coordinate. P1 and P2 look like: 5659 * </li> 5660 * </ul> 5661 * 5662 * \(\texttt{P1} = \begin{bmatrix} 5663 * f & 0 & cx_1 & 0 \\ 5664 * 0 & f & cy & 0 \\ 5665 * 0 & 0 & 1 & 0 5666 * \end{bmatrix}\) 5667 * 5668 * \(\texttt{P2} = \begin{bmatrix} 5669 * f & 0 & cx_2 & T_x*f \\ 5670 * 0 & f & cy & 0 \\ 5671 * 0 & 0 & 1 & 0 5672 * \end{bmatrix} ,\) 5673 * 5674 * where \(T_x\) is a horizontal shift between the cameras and \(cx_1=cx_2\) if 5675 * REF: CALIB_ZERO_DISPARITY is set. 5676 * 5677 * <ul> 5678 * <li> 5679 * <b>Vertical stereo</b>: the first and the second camera views are shifted relative to each other 5680 * mainly in the vertical direction (and probably a bit in the horizontal direction too). The epipolar 5681 * lines in the rectified images are vertical and have the same x-coordinate. P1 and P2 look like: 5682 * </li> 5683 * </ul> 5684 * 5685 * \(\texttt{P1} = \begin{bmatrix} 5686 * f & 0 & cx & 0 \\ 5687 * 0 & f & cy_1 & 0 \\ 5688 * 0 & 0 & 1 & 0 5689 * \end{bmatrix}\) 5690 * 5691 * \(\texttt{P2} = \begin{bmatrix} 5692 * f & 0 & cx & 0 \\ 5693 * 0 & f & cy_2 & T_y*f \\ 5694 * 0 & 0 & 1 & 0 5695 * \end{bmatrix},\) 5696 * 5697 * where \(T_y\) is a vertical shift between the cameras and \(cy_1=cy_2\) if 5698 * REF: CALIB_ZERO_DISPARITY is set. 5699 * 5700 * As you can see, the first three columns of P1 and P2 will effectively be the new "rectified" camera 5701 * matrices. The matrices, together with R1 and R2 , can then be passed to #initUndistortRectifyMap to 5702 * initialize the rectification map for each camera. 5703 * 5704 * See below the screenshot from the stereo_calib.cpp sample. Some red horizontal lines pass through 5705 * the corresponding image regions. This means that the images are well rectified, which is what most 5706 * stereo correspondence algorithms rely on. The green rectangles are roi1 and roi2 . You see that 5707 * their interiors are all valid pixels. 5708 * 5709 * ![image](pics/stereo_undistort.jpg) 5710 */ 5711 public static void stereoRectify(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat R1, Mat R2, Mat P1, Mat P2, Mat Q, int flags) { 5712 stereoRectify_4(cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, R1.nativeObj, R2.nativeObj, P1.nativeObj, P2.nativeObj, Q.nativeObj, flags); 5713 } 5714 5715 /** 5716 * Computes rectification transforms for each head of a calibrated stereo camera. 5717 * 5718 * @param cameraMatrix1 First camera intrinsic matrix. 5719 * @param distCoeffs1 First camera distortion parameters. 5720 * @param cameraMatrix2 Second camera intrinsic matrix. 5721 * @param distCoeffs2 Second camera distortion parameters. 5722 * @param imageSize Size of the image used for stereo calibration. 5723 * @param R Rotation matrix from the coordinate system of the first camera to the second camera, 5724 * see REF: stereoCalibrate. 5725 * @param T Translation vector from the coordinate system of the first camera to the second camera, 5726 * see REF: stereoCalibrate. 5727 * @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera. This matrix 5728 * brings points given in the unrectified first camera's coordinate system to points in the rectified 5729 * first camera's coordinate system. In more technical terms, it performs a change of basis from the 5730 * unrectified first camera's coordinate system to the rectified first camera's coordinate system. 5731 * @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera. This matrix 5732 * brings points given in the unrectified second camera's coordinate system to points in the rectified 5733 * second camera's coordinate system. In more technical terms, it performs a change of basis from the 5734 * unrectified second camera's coordinate system to the rectified second camera's coordinate system. 5735 * @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first 5736 * camera, i.e. it projects points given in the rectified first camera coordinate system into the 5737 * rectified first camera's image. 5738 * @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second 5739 * camera, i.e. it projects points given in the rectified first camera coordinate system into the 5740 * rectified second camera's image. 5741 * @param Q Output \(4 \times 4\) disparity-to-depth mapping matrix (see REF: reprojectImageTo3D). 5742 * the function makes the principal points of each camera have the same pixel coordinates in the 5743 * rectified views. And if the flag is not set, the function may still shift the images in the 5744 * horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the 5745 * useful image area. 5746 * scaling. Otherwise, the parameter should be between 0 and 1. alpha=0 means that the rectified 5747 * images are zoomed and shifted so that only valid pixels are visible (no black areas after 5748 * rectification). alpha=1 means that the rectified image is decimated and shifted so that all the 5749 * pixels from the original images from the cameras are retained in the rectified images (no source 5750 * image pixels are lost). Any intermediate value yields an intermediate result between 5751 * those two extreme cases. 5752 * #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0) 5753 * is passed (default), it is set to the original imageSize . Setting it to a larger value can help you 5754 * preserve details in the original image, especially when there is a big radial distortion. 5755 * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller 5756 * (see the picture below). 5757 * are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller 5758 * (see the picture below). 5759 * 5760 * The function computes the rotation matrices for each camera that (virtually) make both camera image 5761 * planes the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies 5762 * the dense stereo correspondence problem. The function takes the matrices computed by #stereoCalibrate 5763 * as input. As output, it provides two rotation matrices and also two projection matrices in the new 5764 * coordinates. The function distinguishes the following two cases: 5765 * 5766 * <ul> 5767 * <li> 5768 * <b>Horizontal stereo</b>: the first and the second camera views are shifted relative to each other 5769 * mainly along the x-axis (with possible small vertical shift). In the rectified images, the 5770 * corresponding epipolar lines in the left and right cameras are horizontal and have the same 5771 * y-coordinate. P1 and P2 look like: 5772 * </li> 5773 * </ul> 5774 * 5775 * \(\texttt{P1} = \begin{bmatrix} 5776 * f & 0 & cx_1 & 0 \\ 5777 * 0 & f & cy & 0 \\ 5778 * 0 & 0 & 1 & 0 5779 * \end{bmatrix}\) 5780 * 5781 * \(\texttt{P2} = \begin{bmatrix} 5782 * f & 0 & cx_2 & T_x*f \\ 5783 * 0 & f & cy & 0 \\ 5784 * 0 & 0 & 1 & 0 5785 * \end{bmatrix} ,\) 5786 * 5787 * where \(T_x\) is a horizontal shift between the cameras and \(cx_1=cx_2\) if 5788 * REF: CALIB_ZERO_DISPARITY is set. 5789 * 5790 * <ul> 5791 * <li> 5792 * <b>Vertical stereo</b>: the first and the second camera views are shifted relative to each other 5793 * mainly in the vertical direction (and probably a bit in the horizontal direction too). The epipolar 5794 * lines in the rectified images are vertical and have the same x-coordinate. P1 and P2 look like: 5795 * </li> 5796 * </ul> 5797 * 5798 * \(\texttt{P1} = \begin{bmatrix} 5799 * f & 0 & cx & 0 \\ 5800 * 0 & f & cy_1 & 0 \\ 5801 * 0 & 0 & 1 & 0 5802 * \end{bmatrix}\) 5803 * 5804 * \(\texttt{P2} = \begin{bmatrix} 5805 * f & 0 & cx & 0 \\ 5806 * 0 & f & cy_2 & T_y*f \\ 5807 * 0 & 0 & 1 & 0 5808 * \end{bmatrix},\) 5809 * 5810 * where \(T_y\) is a vertical shift between the cameras and \(cy_1=cy_2\) if 5811 * REF: CALIB_ZERO_DISPARITY is set. 5812 * 5813 * As you can see, the first three columns of P1 and P2 will effectively be the new "rectified" camera 5814 * matrices. The matrices, together with R1 and R2 , can then be passed to #initUndistortRectifyMap to 5815 * initialize the rectification map for each camera. 5816 * 5817 * See below the screenshot from the stereo_calib.cpp sample. Some red horizontal lines pass through 5818 * the corresponding image regions. This means that the images are well rectified, which is what most 5819 * stereo correspondence algorithms rely on. The green rectangles are roi1 and roi2 . You see that 5820 * their interiors are all valid pixels. 5821 * 5822 * ![image](pics/stereo_undistort.jpg) 5823 */ 5824 public static void stereoRectify(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat R1, Mat R2, Mat P1, Mat P2, Mat Q) { 5825 stereoRectify_5(cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, R1.nativeObj, R2.nativeObj, P1.nativeObj, P2.nativeObj, Q.nativeObj); 5826 } 5827 5828 5829 // 5830 // C++: bool cv::stereoRectifyUncalibrated(Mat points1, Mat points2, Mat F, Size imgSize, Mat& H1, Mat& H2, double threshold = 5) 5831 // 5832 5833 /** 5834 * Computes a rectification transform for an uncalibrated stereo camera. 5835 * 5836 * @param points1 Array of feature points in the first image. 5837 * @param points2 The corresponding points in the second image. The same formats as in 5838 * #findFundamentalMat are supported. 5839 * @param F Input fundamental matrix. It can be computed from the same set of point pairs using 5840 * #findFundamentalMat . 5841 * @param imgSize Size of the image. 5842 * @param H1 Output rectification homography matrix for the first image. 5843 * @param H2 Output rectification homography matrix for the second image. 5844 * @param threshold Optional threshold used to filter out the outliers. If the parameter is greater 5845 * than zero, all the point pairs that do not comply with the epipolar geometry (that is, the points 5846 * for which \(|\texttt{points2[i]}^T*\texttt{F}*\texttt{points1[i]}|>\texttt{threshold}\) ) are 5847 * rejected prior to computing the homographies. Otherwise, all the points are considered inliers. 5848 * 5849 * The function computes the rectification transformations without knowing intrinsic parameters of the 5850 * cameras and their relative position in the space, which explains the suffix "uncalibrated". Another 5851 * related difference from #stereoRectify is that the function outputs not the rectification 5852 * transformations in the object (3D) space, but the planar perspective transformations encoded by the 5853 * homography matrices H1 and H2 . The function implements the algorithm CITE: Hartley99 . 5854 * 5855 * <b>Note:</b> 5856 * While the algorithm does not need to know the intrinsic parameters of the cameras, it heavily 5857 * depends on the epipolar geometry. Therefore, if the camera lenses have a significant distortion, 5858 * it would be better to correct it before computing the fundamental matrix and calling this 5859 * function. For example, distortion coefficients can be estimated for each head of stereo camera 5860 * separately by using #calibrateCamera . Then, the images can be corrected using #undistort , or 5861 * just the point coordinates can be corrected with #undistortPoints . 5862 * @return automatically generated 5863 */ 5864 public static boolean stereoRectifyUncalibrated(Mat points1, Mat points2, Mat F, Size imgSize, Mat H1, Mat H2, double threshold) { 5865 return stereoRectifyUncalibrated_0(points1.nativeObj, points2.nativeObj, F.nativeObj, imgSize.width, imgSize.height, H1.nativeObj, H2.nativeObj, threshold); 5866 } 5867 5868 /** 5869 * Computes a rectification transform for an uncalibrated stereo camera. 5870 * 5871 * @param points1 Array of feature points in the first image. 5872 * @param points2 The corresponding points in the second image. The same formats as in 5873 * #findFundamentalMat are supported. 5874 * @param F Input fundamental matrix. It can be computed from the same set of point pairs using 5875 * #findFundamentalMat . 5876 * @param imgSize Size of the image. 5877 * @param H1 Output rectification homography matrix for the first image. 5878 * @param H2 Output rectification homography matrix for the second image. 5879 * than zero, all the point pairs that do not comply with the epipolar geometry (that is, the points 5880 * for which \(|\texttt{points2[i]}^T*\texttt{F}*\texttt{points1[i]}|>\texttt{threshold}\) ) are 5881 * rejected prior to computing the homographies. Otherwise, all the points are considered inliers. 5882 * 5883 * The function computes the rectification transformations without knowing intrinsic parameters of the 5884 * cameras and their relative position in the space, which explains the suffix "uncalibrated". Another 5885 * related difference from #stereoRectify is that the function outputs not the rectification 5886 * transformations in the object (3D) space, but the planar perspective transformations encoded by the 5887 * homography matrices H1 and H2 . The function implements the algorithm CITE: Hartley99 . 5888 * 5889 * <b>Note:</b> 5890 * While the algorithm does not need to know the intrinsic parameters of the cameras, it heavily 5891 * depends on the epipolar geometry. Therefore, if the camera lenses have a significant distortion, 5892 * it would be better to correct it before computing the fundamental matrix and calling this 5893 * function. For example, distortion coefficients can be estimated for each head of stereo camera 5894 * separately by using #calibrateCamera . Then, the images can be corrected using #undistort , or 5895 * just the point coordinates can be corrected with #undistortPoints . 5896 * @return automatically generated 5897 */ 5898 public static boolean stereoRectifyUncalibrated(Mat points1, Mat points2, Mat F, Size imgSize, Mat H1, Mat H2) { 5899 return stereoRectifyUncalibrated_1(points1.nativeObj, points2.nativeObj, F.nativeObj, imgSize.width, imgSize.height, H1.nativeObj, H2.nativeObj); 5900 } 5901 5902 5903 // 5904 // C++: float cv::rectify3Collinear(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Mat cameraMatrix3, Mat distCoeffs3, vector_Mat imgpt1, vector_Mat imgpt3, Size imageSize, Mat R12, Mat T12, Mat R13, Mat T13, Mat& R1, Mat& R2, Mat& R3, Mat& P1, Mat& P2, Mat& P3, Mat& Q, double alpha, Size newImgSize, Rect* roi1, Rect* roi2, int flags) 5905 // 5906 5907 public static float rectify3Collinear(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Mat cameraMatrix3, Mat distCoeffs3, List<Mat> imgpt1, List<Mat> imgpt3, Size imageSize, Mat R12, Mat T12, Mat R13, Mat T13, Mat R1, Mat R2, Mat R3, Mat P1, Mat P2, Mat P3, Mat Q, double alpha, Size newImgSize, Rect roi1, Rect roi2, int flags) { 5908 Mat imgpt1_mat = Converters.vector_Mat_to_Mat(imgpt1); 5909 Mat imgpt3_mat = Converters.vector_Mat_to_Mat(imgpt3); 5910 double[] roi1_out = new double[4]; 5911 double[] roi2_out = new double[4]; 5912 float retVal = rectify3Collinear_0(cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, cameraMatrix3.nativeObj, distCoeffs3.nativeObj, imgpt1_mat.nativeObj, imgpt3_mat.nativeObj, imageSize.width, imageSize.height, R12.nativeObj, T12.nativeObj, R13.nativeObj, T13.nativeObj, R1.nativeObj, R2.nativeObj, R3.nativeObj, P1.nativeObj, P2.nativeObj, P3.nativeObj, Q.nativeObj, alpha, newImgSize.width, newImgSize.height, roi1_out, roi2_out, flags); 5913 if(roi1!=null){ roi1.x = (int)roi1_out[0]; roi1.y = (int)roi1_out[1]; roi1.width = (int)roi1_out[2]; roi1.height = (int)roi1_out[3]; } 5914 if(roi2!=null){ roi2.x = (int)roi2_out[0]; roi2.y = (int)roi2_out[1]; roi2.width = (int)roi2_out[2]; roi2.height = (int)roi2_out[3]; } 5915 return retVal; 5916 } 5917 5918 5919 // 5920 // C++: Mat cv::getOptimalNewCameraMatrix(Mat cameraMatrix, Mat distCoeffs, Size imageSize, double alpha, Size newImgSize = Size(), Rect* validPixROI = 0, bool centerPrincipalPoint = false) 5921 // 5922 5923 /** 5924 * Returns the new camera intrinsic matrix based on the free scaling parameter. 5925 * 5926 * @param cameraMatrix Input camera intrinsic matrix. 5927 * @param distCoeffs Input vector of distortion coefficients 5928 * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are 5929 * assumed. 5930 * @param imageSize Original image size. 5931 * @param alpha Free scaling parameter between 0 (when all the pixels in the undistorted image are 5932 * valid) and 1 (when all the source image pixels are retained in the undistorted image). See 5933 * #stereoRectify for details. 5934 * @param newImgSize Image size after rectification. By default, it is set to imageSize . 5935 * @param validPixROI Optional output rectangle that outlines all-good-pixels region in the 5936 * undistorted image. See roi1, roi2 description in #stereoRectify . 5937 * @param centerPrincipalPoint Optional flag that indicates whether in the new camera intrinsic matrix the 5938 * principal point should be at the image center or not. By default, the principal point is chosen to 5939 * best fit a subset of the source image (determined by alpha) to the corrected image. 5940 * @return new_camera_matrix Output new camera intrinsic matrix. 5941 * 5942 * The function computes and returns the optimal new camera intrinsic matrix based on the free scaling parameter. 5943 * By varying this parameter, you may retrieve only sensible pixels alpha=0 , keep all the original 5944 * image pixels if there is valuable information in the corners alpha=1 , or get something in between. 5945 * When alpha>0 , the undistorted result is likely to have some black pixels corresponding to 5946 * "virtual" pixels outside of the captured distorted image. The original camera intrinsic matrix, distortion 5947 * coefficients, the computed new camera intrinsic matrix, and newImageSize should be passed to 5948 * #initUndistortRectifyMap to produce the maps for #remap . 5949 */ 5950 public static Mat getOptimalNewCameraMatrix(Mat cameraMatrix, Mat distCoeffs, Size imageSize, double alpha, Size newImgSize, Rect validPixROI, boolean centerPrincipalPoint) { 5951 double[] validPixROI_out = new double[4]; 5952 Mat retVal = new Mat(getOptimalNewCameraMatrix_0(cameraMatrix.nativeObj, distCoeffs.nativeObj, imageSize.width, imageSize.height, alpha, newImgSize.width, newImgSize.height, validPixROI_out, centerPrincipalPoint)); 5953 if(validPixROI!=null){ validPixROI.x = (int)validPixROI_out[0]; validPixROI.y = (int)validPixROI_out[1]; validPixROI.width = (int)validPixROI_out[2]; validPixROI.height = (int)validPixROI_out[3]; } 5954 return retVal; 5955 } 5956 5957 /** 5958 * Returns the new camera intrinsic matrix based on the free scaling parameter. 5959 * 5960 * @param cameraMatrix Input camera intrinsic matrix. 5961 * @param distCoeffs Input vector of distortion coefficients 5962 * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are 5963 * assumed. 5964 * @param imageSize Original image size. 5965 * @param alpha Free scaling parameter between 0 (when all the pixels in the undistorted image are 5966 * valid) and 1 (when all the source image pixels are retained in the undistorted image). See 5967 * #stereoRectify for details. 5968 * @param newImgSize Image size after rectification. By default, it is set to imageSize . 5969 * @param validPixROI Optional output rectangle that outlines all-good-pixels region in the 5970 * undistorted image. See roi1, roi2 description in #stereoRectify . 5971 * principal point should be at the image center or not. By default, the principal point is chosen to 5972 * best fit a subset of the source image (determined by alpha) to the corrected image. 5973 * @return new_camera_matrix Output new camera intrinsic matrix. 5974 * 5975 * The function computes and returns the optimal new camera intrinsic matrix based on the free scaling parameter. 5976 * By varying this parameter, you may retrieve only sensible pixels alpha=0 , keep all the original 5977 * image pixels if there is valuable information in the corners alpha=1 , or get something in between. 5978 * When alpha>0 , the undistorted result is likely to have some black pixels corresponding to 5979 * "virtual" pixels outside of the captured distorted image. The original camera intrinsic matrix, distortion 5980 * coefficients, the computed new camera intrinsic matrix, and newImageSize should be passed to 5981 * #initUndistortRectifyMap to produce the maps for #remap . 5982 */ 5983 public static Mat getOptimalNewCameraMatrix(Mat cameraMatrix, Mat distCoeffs, Size imageSize, double alpha, Size newImgSize, Rect validPixROI) { 5984 double[] validPixROI_out = new double[4]; 5985 Mat retVal = new Mat(getOptimalNewCameraMatrix_1(cameraMatrix.nativeObj, distCoeffs.nativeObj, imageSize.width, imageSize.height, alpha, newImgSize.width, newImgSize.height, validPixROI_out)); 5986 if(validPixROI!=null){ validPixROI.x = (int)validPixROI_out[0]; validPixROI.y = (int)validPixROI_out[1]; validPixROI.width = (int)validPixROI_out[2]; validPixROI.height = (int)validPixROI_out[3]; } 5987 return retVal; 5988 } 5989 5990 /** 5991 * Returns the new camera intrinsic matrix based on the free scaling parameter. 5992 * 5993 * @param cameraMatrix Input camera intrinsic matrix. 5994 * @param distCoeffs Input vector of distortion coefficients 5995 * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are 5996 * assumed. 5997 * @param imageSize Original image size. 5998 * @param alpha Free scaling parameter between 0 (when all the pixels in the undistorted image are 5999 * valid) and 1 (when all the source image pixels are retained in the undistorted image). See 6000 * #stereoRectify for details. 6001 * @param newImgSize Image size after rectification. By default, it is set to imageSize . 6002 * undistorted image. See roi1, roi2 description in #stereoRectify . 6003 * principal point should be at the image center or not. By default, the principal point is chosen to 6004 * best fit a subset of the source image (determined by alpha) to the corrected image. 6005 * @return new_camera_matrix Output new camera intrinsic matrix. 6006 * 6007 * The function computes and returns the optimal new camera intrinsic matrix based on the free scaling parameter. 6008 * By varying this parameter, you may retrieve only sensible pixels alpha=0 , keep all the original 6009 * image pixels if there is valuable information in the corners alpha=1 , or get something in between. 6010 * When alpha>0 , the undistorted result is likely to have some black pixels corresponding to 6011 * "virtual" pixels outside of the captured distorted image. The original camera intrinsic matrix, distortion 6012 * coefficients, the computed new camera intrinsic matrix, and newImageSize should be passed to 6013 * #initUndistortRectifyMap to produce the maps for #remap . 6014 */ 6015 public static Mat getOptimalNewCameraMatrix(Mat cameraMatrix, Mat distCoeffs, Size imageSize, double alpha, Size newImgSize) { 6016 return new Mat(getOptimalNewCameraMatrix_2(cameraMatrix.nativeObj, distCoeffs.nativeObj, imageSize.width, imageSize.height, alpha, newImgSize.width, newImgSize.height)); 6017 } 6018 6019 /** 6020 * Returns the new camera intrinsic matrix based on the free scaling parameter. 6021 * 6022 * @param cameraMatrix Input camera intrinsic matrix. 6023 * @param distCoeffs Input vector of distortion coefficients 6024 * \(\distcoeffs\). If the vector is NULL/empty, the zero distortion coefficients are 6025 * assumed. 6026 * @param imageSize Original image size. 6027 * @param alpha Free scaling parameter between 0 (when all the pixels in the undistorted image are 6028 * valid) and 1 (when all the source image pixels are retained in the undistorted image). See 6029 * #stereoRectify for details. 6030 * undistorted image. See roi1, roi2 description in #stereoRectify . 6031 * principal point should be at the image center or not. By default, the principal point is chosen to 6032 * best fit a subset of the source image (determined by alpha) to the corrected image. 6033 * @return new_camera_matrix Output new camera intrinsic matrix. 6034 * 6035 * The function computes and returns the optimal new camera intrinsic matrix based on the free scaling parameter. 6036 * By varying this parameter, you may retrieve only sensible pixels alpha=0 , keep all the original 6037 * image pixels if there is valuable information in the corners alpha=1 , or get something in between. 6038 * When alpha>0 , the undistorted result is likely to have some black pixels corresponding to 6039 * "virtual" pixels outside of the captured distorted image. The original camera intrinsic matrix, distortion 6040 * coefficients, the computed new camera intrinsic matrix, and newImageSize should be passed to 6041 * #initUndistortRectifyMap to produce the maps for #remap . 6042 */ 6043 public static Mat getOptimalNewCameraMatrix(Mat cameraMatrix, Mat distCoeffs, Size imageSize, double alpha) { 6044 return new Mat(getOptimalNewCameraMatrix_3(cameraMatrix.nativeObj, distCoeffs.nativeObj, imageSize.width, imageSize.height, alpha)); 6045 } 6046 6047 6048 // 6049 // C++: void cv::calibrateHandEye(vector_Mat R_gripper2base, vector_Mat t_gripper2base, vector_Mat R_target2cam, vector_Mat t_target2cam, Mat& R_cam2gripper, Mat& t_cam2gripper, HandEyeCalibrationMethod method = CALIB_HAND_EYE_TSAI) 6050 // 6051 6052 /** 6053 * Computes Hand-Eye calibration: \(_{}^{g}\textrm{T}_c\) 6054 * 6055 * @param R_gripper2base Rotation part extracted from the homogeneous matrix that transforms a point 6056 * expressed in the gripper frame to the robot base frame (\(_{}^{b}\textrm{T}_g\)). 6057 * This is a vector ({@code vector<Mat>}) that contains the rotation, {@code (3x3)} rotation matrices or {@code (3x1)} rotation vectors, 6058 * for all the transformations from gripper frame to robot base frame. 6059 * @param t_gripper2base Translation part extracted from the homogeneous matrix that transforms a point 6060 * expressed in the gripper frame to the robot base frame (\(_{}^{b}\textrm{T}_g\)). 6061 * This is a vector ({@code vector<Mat>}) that contains the {@code (3x1)} translation vectors for all the transformations 6062 * from gripper frame to robot base frame. 6063 * @param R_target2cam Rotation part extracted from the homogeneous matrix that transforms a point 6064 * expressed in the target frame to the camera frame (\(_{}^{c}\textrm{T}_t\)). 6065 * This is a vector ({@code vector<Mat>}) that contains the rotation, {@code (3x3)} rotation matrices or {@code (3x1)} rotation vectors, 6066 * for all the transformations from calibration target frame to camera frame. 6067 * @param t_target2cam Rotation part extracted from the homogeneous matrix that transforms a point 6068 * expressed in the target frame to the camera frame (\(_{}^{c}\textrm{T}_t\)). 6069 * This is a vector ({@code vector<Mat>}) that contains the {@code (3x1)} translation vectors for all the transformations 6070 * from calibration target frame to camera frame. 6071 * @param R_cam2gripper Estimated {@code (3x3)} rotation part extracted from the homogeneous matrix that transforms a point 6072 * expressed in the camera frame to the gripper frame (\(_{}^{g}\textrm{T}_c\)). 6073 * @param t_cam2gripper Estimated {@code (3x1)} translation part extracted from the homogeneous matrix that transforms a point 6074 * expressed in the camera frame to the gripper frame (\(_{}^{g}\textrm{T}_c\)). 6075 * @param method One of the implemented Hand-Eye calibration method, see cv::HandEyeCalibrationMethod 6076 * 6077 * The function performs the Hand-Eye calibration using various methods. One approach consists in estimating the 6078 * rotation then the translation (separable solutions) and the following methods are implemented: 6079 * <ul> 6080 * <li> 6081 * R. Tsai, R. Lenz A New Technique for Fully Autonomous and Efficient 3D Robotics Hand/EyeCalibration \cite Tsai89 6082 * </li> 6083 * <li> 6084 * F. Park, B. Martin Robot Sensor Calibration: Solving AX = XB on the Euclidean Group \cite Park94 6085 * </li> 6086 * <li> 6087 * R. Horaud, F. Dornaika Hand-Eye Calibration \cite Horaud95 6088 * </li> 6089 * </ul> 6090 * 6091 * Another approach consists in estimating simultaneously the rotation and the translation (simultaneous solutions), 6092 * with the following implemented methods: 6093 * <ul> 6094 * <li> 6095 * N. Andreff, R. Horaud, B. Espiau On-line Hand-Eye Calibration \cite Andreff99 6096 * </li> 6097 * <li> 6098 * K. Daniilidis Hand-Eye Calibration Using Dual Quaternions \cite Daniilidis98 6099 * </li> 6100 * </ul> 6101 * 6102 * The following picture describes the Hand-Eye calibration problem where the transformation between a camera ("eye") 6103 * mounted on a robot gripper ("hand") has to be estimated. This configuration is called eye-in-hand. 6104 * 6105 * The eye-to-hand configuration consists in a static camera observing a calibration pattern mounted on the robot 6106 * end-effector. The transformation from the camera to the robot base frame can then be estimated by inputting 6107 * the suitable transformations to the function, see below. 6108 * 6109 * ![](pics/hand-eye_figure.png) 6110 * 6111 * The calibration procedure is the following: 6112 * <ul> 6113 * <li> 6114 * a static calibration pattern is used to estimate the transformation between the target frame 6115 * and the camera frame 6116 * </li> 6117 * <li> 6118 * the robot gripper is moved in order to acquire several poses 6119 * </li> 6120 * <li> 6121 * for each pose, the homogeneous transformation between the gripper frame and the robot base frame is recorded using for 6122 * instance the robot kinematics 6123 * \( 6124 * \begin{bmatrix} 6125 * X_b\\ 6126 * Y_b\\ 6127 * Z_b\\ 6128 * 1 6129 * \end{bmatrix} 6130 * = 6131 * \begin{bmatrix} 6132 * _{}^{b}\textrm{R}_g & _{}^{b}\textrm{t}_g \\ 6133 * 0_{1 \times 3} & 1 6134 * \end{bmatrix} 6135 * \begin{bmatrix} 6136 * X_g\\ 6137 * Y_g\\ 6138 * Z_g\\ 6139 * 1 6140 * \end{bmatrix} 6141 * \) 6142 * </li> 6143 * <li> 6144 * for each pose, the homogeneous transformation between the calibration target frame and the camera frame is recorded using 6145 * for instance a pose estimation method (PnP) from 2D-3D point correspondences 6146 * \( 6147 * \begin{bmatrix} 6148 * X_c\\ 6149 * Y_c\\ 6150 * Z_c\\ 6151 * 1 6152 * \end{bmatrix} 6153 * = 6154 * \begin{bmatrix} 6155 * _{}^{c}\textrm{R}_t & _{}^{c}\textrm{t}_t \\ 6156 * 0_{1 \times 3} & 1 6157 * \end{bmatrix} 6158 * \begin{bmatrix} 6159 * X_t\\ 6160 * Y_t\\ 6161 * Z_t\\ 6162 * 1 6163 * \end{bmatrix} 6164 * \) 6165 * </li> 6166 * </ul> 6167 * 6168 * The Hand-Eye calibration procedure returns the following homogeneous transformation 6169 * \( 6170 * \begin{bmatrix} 6171 * X_g\\ 6172 * Y_g\\ 6173 * Z_g\\ 6174 * 1 6175 * \end{bmatrix} 6176 * = 6177 * \begin{bmatrix} 6178 * _{}^{g}\textrm{R}_c & _{}^{g}\textrm{t}_c \\ 6179 * 0_{1 \times 3} & 1 6180 * \end{bmatrix} 6181 * \begin{bmatrix} 6182 * X_c\\ 6183 * Y_c\\ 6184 * Z_c\\ 6185 * 1 6186 * \end{bmatrix} 6187 * \) 6188 * 6189 * This problem is also known as solving the \(\mathbf{A}\mathbf{X}=\mathbf{X}\mathbf{B}\) equation: 6190 * <ul> 6191 * <li> 6192 * for an eye-in-hand configuration 6193 * \( 6194 * \begin{align*} 6195 * ^{b}{\textrm{T}_g}^{(1)} \hspace{0.2em} ^{g}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(1)} &= 6196 * \hspace{0.1em} ^{b}{\textrm{T}_g}^{(2)} \hspace{0.2em} ^{g}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} \\ 6197 * </li> 6198 * </ul> 6199 * 6200 * (^{b}{\textrm{T}_g}^{(2)})^{-1} \hspace{0.2em} ^{b}{\textrm{T}_g}^{(1)} \hspace{0.2em} ^{g}\textrm{T}_c &= 6201 * \hspace{0.1em} ^{g}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} (^{c}{\textrm{T}_t}^{(1)})^{-1} \\ 6202 * 6203 * \textrm{A}_i \textrm{X} &= \textrm{X} \textrm{B}_i \\ 6204 * \end{align*} 6205 * \) 6206 * 6207 * <ul> 6208 * <li> 6209 * for an eye-to-hand configuration 6210 * \( 6211 * \begin{align*} 6212 * ^{g}{\textrm{T}_b}^{(1)} \hspace{0.2em} ^{b}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(1)} &= 6213 * \hspace{0.1em} ^{g}{\textrm{T}_b}^{(2)} \hspace{0.2em} ^{b}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} \\ 6214 * </li> 6215 * </ul> 6216 * 6217 * (^{g}{\textrm{T}_b}^{(2)})^{-1} \hspace{0.2em} ^{g}{\textrm{T}_b}^{(1)} \hspace{0.2em} ^{b}\textrm{T}_c &= 6218 * \hspace{0.1em} ^{b}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} (^{c}{\textrm{T}_t}^{(1)})^{-1} \\ 6219 * 6220 * \textrm{A}_i \textrm{X} &= \textrm{X} \textrm{B}_i \\ 6221 * \end{align*} 6222 * \) 6223 * 6224 * \note 6225 * Additional information can be found on this [website](http://campar.in.tum.de/Chair/HandEyeCalibration). 6226 * \note 6227 * A minimum of 2 motions with non parallel rotation axes are necessary to determine the hand-eye transformation. 6228 * So at least 3 different poses are required, but it is strongly recommended to use many more poses. 6229 */ 6230 public static void calibrateHandEye(List<Mat> R_gripper2base, List<Mat> t_gripper2base, List<Mat> R_target2cam, List<Mat> t_target2cam, Mat R_cam2gripper, Mat t_cam2gripper, int method) { 6231 Mat R_gripper2base_mat = Converters.vector_Mat_to_Mat(R_gripper2base); 6232 Mat t_gripper2base_mat = Converters.vector_Mat_to_Mat(t_gripper2base); 6233 Mat R_target2cam_mat = Converters.vector_Mat_to_Mat(R_target2cam); 6234 Mat t_target2cam_mat = Converters.vector_Mat_to_Mat(t_target2cam); 6235 calibrateHandEye_0(R_gripper2base_mat.nativeObj, t_gripper2base_mat.nativeObj, R_target2cam_mat.nativeObj, t_target2cam_mat.nativeObj, R_cam2gripper.nativeObj, t_cam2gripper.nativeObj, method); 6236 } 6237 6238 /** 6239 * Computes Hand-Eye calibration: \(_{}^{g}\textrm{T}_c\) 6240 * 6241 * @param R_gripper2base Rotation part extracted from the homogeneous matrix that transforms a point 6242 * expressed in the gripper frame to the robot base frame (\(_{}^{b}\textrm{T}_g\)). 6243 * This is a vector ({@code vector<Mat>}) that contains the rotation, {@code (3x3)} rotation matrices or {@code (3x1)} rotation vectors, 6244 * for all the transformations from gripper frame to robot base frame. 6245 * @param t_gripper2base Translation part extracted from the homogeneous matrix that transforms a point 6246 * expressed in the gripper frame to the robot base frame (\(_{}^{b}\textrm{T}_g\)). 6247 * This is a vector ({@code vector<Mat>}) that contains the {@code (3x1)} translation vectors for all the transformations 6248 * from gripper frame to robot base frame. 6249 * @param R_target2cam Rotation part extracted from the homogeneous matrix that transforms a point 6250 * expressed in the target frame to the camera frame (\(_{}^{c}\textrm{T}_t\)). 6251 * This is a vector ({@code vector<Mat>}) that contains the rotation, {@code (3x3)} rotation matrices or {@code (3x1)} rotation vectors, 6252 * for all the transformations from calibration target frame to camera frame. 6253 * @param t_target2cam Rotation part extracted from the homogeneous matrix that transforms a point 6254 * expressed in the target frame to the camera frame (\(_{}^{c}\textrm{T}_t\)). 6255 * This is a vector ({@code vector<Mat>}) that contains the {@code (3x1)} translation vectors for all the transformations 6256 * from calibration target frame to camera frame. 6257 * @param R_cam2gripper Estimated {@code (3x3)} rotation part extracted from the homogeneous matrix that transforms a point 6258 * expressed in the camera frame to the gripper frame (\(_{}^{g}\textrm{T}_c\)). 6259 * @param t_cam2gripper Estimated {@code (3x1)} translation part extracted from the homogeneous matrix that transforms a point 6260 * expressed in the camera frame to the gripper frame (\(_{}^{g}\textrm{T}_c\)). 6261 * 6262 * The function performs the Hand-Eye calibration using various methods. One approach consists in estimating the 6263 * rotation then the translation (separable solutions) and the following methods are implemented: 6264 * <ul> 6265 * <li> 6266 * R. Tsai, R. Lenz A New Technique for Fully Autonomous and Efficient 3D Robotics Hand/EyeCalibration \cite Tsai89 6267 * </li> 6268 * <li> 6269 * F. Park, B. Martin Robot Sensor Calibration: Solving AX = XB on the Euclidean Group \cite Park94 6270 * </li> 6271 * <li> 6272 * R. Horaud, F. Dornaika Hand-Eye Calibration \cite Horaud95 6273 * </li> 6274 * </ul> 6275 * 6276 * Another approach consists in estimating simultaneously the rotation and the translation (simultaneous solutions), 6277 * with the following implemented methods: 6278 * <ul> 6279 * <li> 6280 * N. Andreff, R. Horaud, B. Espiau On-line Hand-Eye Calibration \cite Andreff99 6281 * </li> 6282 * <li> 6283 * K. Daniilidis Hand-Eye Calibration Using Dual Quaternions \cite Daniilidis98 6284 * </li> 6285 * </ul> 6286 * 6287 * The following picture describes the Hand-Eye calibration problem where the transformation between a camera ("eye") 6288 * mounted on a robot gripper ("hand") has to be estimated. This configuration is called eye-in-hand. 6289 * 6290 * The eye-to-hand configuration consists in a static camera observing a calibration pattern mounted on the robot 6291 * end-effector. The transformation from the camera to the robot base frame can then be estimated by inputting 6292 * the suitable transformations to the function, see below. 6293 * 6294 * ![](pics/hand-eye_figure.png) 6295 * 6296 * The calibration procedure is the following: 6297 * <ul> 6298 * <li> 6299 * a static calibration pattern is used to estimate the transformation between the target frame 6300 * and the camera frame 6301 * </li> 6302 * <li> 6303 * the robot gripper is moved in order to acquire several poses 6304 * </li> 6305 * <li> 6306 * for each pose, the homogeneous transformation between the gripper frame and the robot base frame is recorded using for 6307 * instance the robot kinematics 6308 * \( 6309 * \begin{bmatrix} 6310 * X_b\\ 6311 * Y_b\\ 6312 * Z_b\\ 6313 * 1 6314 * \end{bmatrix} 6315 * = 6316 * \begin{bmatrix} 6317 * _{}^{b}\textrm{R}_g & _{}^{b}\textrm{t}_g \\ 6318 * 0_{1 \times 3} & 1 6319 * \end{bmatrix} 6320 * \begin{bmatrix} 6321 * X_g\\ 6322 * Y_g\\ 6323 * Z_g\\ 6324 * 1 6325 * \end{bmatrix} 6326 * \) 6327 * </li> 6328 * <li> 6329 * for each pose, the homogeneous transformation between the calibration target frame and the camera frame is recorded using 6330 * for instance a pose estimation method (PnP) from 2D-3D point correspondences 6331 * \( 6332 * \begin{bmatrix} 6333 * X_c\\ 6334 * Y_c\\ 6335 * Z_c\\ 6336 * 1 6337 * \end{bmatrix} 6338 * = 6339 * \begin{bmatrix} 6340 * _{}^{c}\textrm{R}_t & _{}^{c}\textrm{t}_t \\ 6341 * 0_{1 \times 3} & 1 6342 * \end{bmatrix} 6343 * \begin{bmatrix} 6344 * X_t\\ 6345 * Y_t\\ 6346 * Z_t\\ 6347 * 1 6348 * \end{bmatrix} 6349 * \) 6350 * </li> 6351 * </ul> 6352 * 6353 * The Hand-Eye calibration procedure returns the following homogeneous transformation 6354 * \( 6355 * \begin{bmatrix} 6356 * X_g\\ 6357 * Y_g\\ 6358 * Z_g\\ 6359 * 1 6360 * \end{bmatrix} 6361 * = 6362 * \begin{bmatrix} 6363 * _{}^{g}\textrm{R}_c & _{}^{g}\textrm{t}_c \\ 6364 * 0_{1 \times 3} & 1 6365 * \end{bmatrix} 6366 * \begin{bmatrix} 6367 * X_c\\ 6368 * Y_c\\ 6369 * Z_c\\ 6370 * 1 6371 * \end{bmatrix} 6372 * \) 6373 * 6374 * This problem is also known as solving the \(\mathbf{A}\mathbf{X}=\mathbf{X}\mathbf{B}\) equation: 6375 * <ul> 6376 * <li> 6377 * for an eye-in-hand configuration 6378 * \( 6379 * \begin{align*} 6380 * ^{b}{\textrm{T}_g}^{(1)} \hspace{0.2em} ^{g}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(1)} &= 6381 * \hspace{0.1em} ^{b}{\textrm{T}_g}^{(2)} \hspace{0.2em} ^{g}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} \\ 6382 * </li> 6383 * </ul> 6384 * 6385 * (^{b}{\textrm{T}_g}^{(2)})^{-1} \hspace{0.2em} ^{b}{\textrm{T}_g}^{(1)} \hspace{0.2em} ^{g}\textrm{T}_c &= 6386 * \hspace{0.1em} ^{g}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} (^{c}{\textrm{T}_t}^{(1)})^{-1} \\ 6387 * 6388 * \textrm{A}_i \textrm{X} &= \textrm{X} \textrm{B}_i \\ 6389 * \end{align*} 6390 * \) 6391 * 6392 * <ul> 6393 * <li> 6394 * for an eye-to-hand configuration 6395 * \( 6396 * \begin{align*} 6397 * ^{g}{\textrm{T}_b}^{(1)} \hspace{0.2em} ^{b}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(1)} &= 6398 * \hspace{0.1em} ^{g}{\textrm{T}_b}^{(2)} \hspace{0.2em} ^{b}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} \\ 6399 * </li> 6400 * </ul> 6401 * 6402 * (^{g}{\textrm{T}_b}^{(2)})^{-1} \hspace{0.2em} ^{g}{\textrm{T}_b}^{(1)} \hspace{0.2em} ^{b}\textrm{T}_c &= 6403 * \hspace{0.1em} ^{b}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} (^{c}{\textrm{T}_t}^{(1)})^{-1} \\ 6404 * 6405 * \textrm{A}_i \textrm{X} &= \textrm{X} \textrm{B}_i \\ 6406 * \end{align*} 6407 * \) 6408 * 6409 * \note 6410 * Additional information can be found on this [website](http://campar.in.tum.de/Chair/HandEyeCalibration). 6411 * \note 6412 * A minimum of 2 motions with non parallel rotation axes are necessary to determine the hand-eye transformation. 6413 * So at least 3 different poses are required, but it is strongly recommended to use many more poses. 6414 */ 6415 public static void calibrateHandEye(List<Mat> R_gripper2base, List<Mat> t_gripper2base, List<Mat> R_target2cam, List<Mat> t_target2cam, Mat R_cam2gripper, Mat t_cam2gripper) { 6416 Mat R_gripper2base_mat = Converters.vector_Mat_to_Mat(R_gripper2base); 6417 Mat t_gripper2base_mat = Converters.vector_Mat_to_Mat(t_gripper2base); 6418 Mat R_target2cam_mat = Converters.vector_Mat_to_Mat(R_target2cam); 6419 Mat t_target2cam_mat = Converters.vector_Mat_to_Mat(t_target2cam); 6420 calibrateHandEye_1(R_gripper2base_mat.nativeObj, t_gripper2base_mat.nativeObj, R_target2cam_mat.nativeObj, t_target2cam_mat.nativeObj, R_cam2gripper.nativeObj, t_cam2gripper.nativeObj); 6421 } 6422 6423 6424 // 6425 // C++: void cv::calibrateRobotWorldHandEye(vector_Mat R_world2cam, vector_Mat t_world2cam, vector_Mat R_base2gripper, vector_Mat t_base2gripper, Mat& R_base2world, Mat& t_base2world, Mat& R_gripper2cam, Mat& t_gripper2cam, RobotWorldHandEyeCalibrationMethod method = CALIB_ROBOT_WORLD_HAND_EYE_SHAH) 6426 // 6427 6428 /** 6429 * Computes Robot-World/Hand-Eye calibration: \(_{}^{w}\textrm{T}_b\) and \(_{}^{c}\textrm{T}_g\) 6430 * 6431 * @param R_world2cam Rotation part extracted from the homogeneous matrix that transforms a point 6432 * expressed in the world frame to the camera frame (\(_{}^{c}\textrm{T}_w\)). 6433 * This is a vector ({@code vector<Mat>}) that contains the rotation, {@code (3x3)} rotation matrices or {@code (3x1)} rotation vectors, 6434 * for all the transformations from world frame to the camera frame. 6435 * @param t_world2cam Translation part extracted from the homogeneous matrix that transforms a point 6436 * expressed in the world frame to the camera frame (\(_{}^{c}\textrm{T}_w\)). 6437 * This is a vector ({@code vector<Mat>}) that contains the {@code (3x1)} translation vectors for all the transformations 6438 * from world frame to the camera frame. 6439 * @param R_base2gripper Rotation part extracted from the homogeneous matrix that transforms a point 6440 * expressed in the robot base frame to the gripper frame (\(_{}^{g}\textrm{T}_b\)). 6441 * This is a vector ({@code vector<Mat>}) that contains the rotation, {@code (3x3)} rotation matrices or {@code (3x1)} rotation vectors, 6442 * for all the transformations from robot base frame to the gripper frame. 6443 * @param t_base2gripper Rotation part extracted from the homogeneous matrix that transforms a point 6444 * expressed in the robot base frame to the gripper frame (\(_{}^{g}\textrm{T}_b\)). 6445 * This is a vector ({@code vector<Mat>}) that contains the {@code (3x1)} translation vectors for all the transformations 6446 * from robot base frame to the gripper frame. 6447 * @param R_base2world Estimated {@code (3x3)} rotation part extracted from the homogeneous matrix that transforms a point 6448 * expressed in the robot base frame to the world frame (\(_{}^{w}\textrm{T}_b\)). 6449 * @param t_base2world Estimated {@code (3x1)} translation part extracted from the homogeneous matrix that transforms a point 6450 * expressed in the robot base frame to the world frame (\(_{}^{w}\textrm{T}_b\)). 6451 * @param R_gripper2cam Estimated {@code (3x3)} rotation part extracted from the homogeneous matrix that transforms a point 6452 * expressed in the gripper frame to the camera frame (\(_{}^{c}\textrm{T}_g\)). 6453 * @param t_gripper2cam Estimated {@code (3x1)} translation part extracted from the homogeneous matrix that transforms a point 6454 * expressed in the gripper frame to the camera frame (\(_{}^{c}\textrm{T}_g\)). 6455 * @param method One of the implemented Robot-World/Hand-Eye calibration method, see cv::RobotWorldHandEyeCalibrationMethod 6456 * 6457 * The function performs the Robot-World/Hand-Eye calibration using various methods. One approach consists in estimating the 6458 * rotation then the translation (separable solutions): 6459 * <ul> 6460 * <li> 6461 * M. Shah, Solving the robot-world/hand-eye calibration problem using the kronecker product \cite Shah2013SolvingTR 6462 * </li> 6463 * </ul> 6464 * 6465 * Another approach consists in estimating simultaneously the rotation and the translation (simultaneous solutions), 6466 * with the following implemented method: 6467 * <ul> 6468 * <li> 6469 * A. Li, L. Wang, and D. Wu, Simultaneous robot-world and hand-eye calibration using dual-quaternions and kronecker product \cite Li2010SimultaneousRA 6470 * </li> 6471 * </ul> 6472 * 6473 * The following picture describes the Robot-World/Hand-Eye calibration problem where the transformations between a robot and a world frame 6474 * and between a robot gripper ("hand") and a camera ("eye") mounted at the robot end-effector have to be estimated. 6475 * 6476 * ![](pics/robot-world_hand-eye_figure.png) 6477 * 6478 * The calibration procedure is the following: 6479 * <ul> 6480 * <li> 6481 * a static calibration pattern is used to estimate the transformation between the target frame 6482 * and the camera frame 6483 * </li> 6484 * <li> 6485 * the robot gripper is moved in order to acquire several poses 6486 * </li> 6487 * <li> 6488 * for each pose, the homogeneous transformation between the gripper frame and the robot base frame is recorded using for 6489 * instance the robot kinematics 6490 * \( 6491 * \begin{bmatrix} 6492 * X_g\\ 6493 * Y_g\\ 6494 * Z_g\\ 6495 * 1 6496 * \end{bmatrix} 6497 * = 6498 * \begin{bmatrix} 6499 * _{}^{g}\textrm{R}_b & _{}^{g}\textrm{t}_b \\ 6500 * 0_{1 \times 3} & 1 6501 * \end{bmatrix} 6502 * \begin{bmatrix} 6503 * X_b\\ 6504 * Y_b\\ 6505 * Z_b\\ 6506 * 1 6507 * \end{bmatrix} 6508 * \) 6509 * </li> 6510 * <li> 6511 * for each pose, the homogeneous transformation between the calibration target frame (the world frame) and the camera frame is recorded using 6512 * for instance a pose estimation method (PnP) from 2D-3D point correspondences 6513 * \( 6514 * \begin{bmatrix} 6515 * X_c\\ 6516 * Y_c\\ 6517 * Z_c\\ 6518 * 1 6519 * \end{bmatrix} 6520 * = 6521 * \begin{bmatrix} 6522 * _{}^{c}\textrm{R}_w & _{}^{c}\textrm{t}_w \\ 6523 * 0_{1 \times 3} & 1 6524 * \end{bmatrix} 6525 * \begin{bmatrix} 6526 * X_w\\ 6527 * Y_w\\ 6528 * Z_w\\ 6529 * 1 6530 * \end{bmatrix} 6531 * \) 6532 * </li> 6533 * </ul> 6534 * 6535 * The Robot-World/Hand-Eye calibration procedure returns the following homogeneous transformations 6536 * \( 6537 * \begin{bmatrix} 6538 * X_w\\ 6539 * Y_w\\ 6540 * Z_w\\ 6541 * 1 6542 * \end{bmatrix} 6543 * = 6544 * \begin{bmatrix} 6545 * _{}^{w}\textrm{R}_b & _{}^{w}\textrm{t}_b \\ 6546 * 0_{1 \times 3} & 1 6547 * \end{bmatrix} 6548 * \begin{bmatrix} 6549 * X_b\\ 6550 * Y_b\\ 6551 * Z_b\\ 6552 * 1 6553 * \end{bmatrix} 6554 * \) 6555 * \( 6556 * \begin{bmatrix} 6557 * X_c\\ 6558 * Y_c\\ 6559 * Z_c\\ 6560 * 1 6561 * \end{bmatrix} 6562 * = 6563 * \begin{bmatrix} 6564 * _{}^{c}\textrm{R}_g & _{}^{c}\textrm{t}_g \\ 6565 * 0_{1 \times 3} & 1 6566 * \end{bmatrix} 6567 * \begin{bmatrix} 6568 * X_g\\ 6569 * Y_g\\ 6570 * Z_g\\ 6571 * 1 6572 * \end{bmatrix} 6573 * \) 6574 * 6575 * This problem is also known as solving the \(\mathbf{A}\mathbf{X}=\mathbf{Z}\mathbf{B}\) equation, with: 6576 * <ul> 6577 * <li> 6578 * \(\mathbf{A} \Leftrightarrow \hspace{0.1em} _{}^{c}\textrm{T}_w\) 6579 * </li> 6580 * <li> 6581 * \(\mathbf{X} \Leftrightarrow \hspace{0.1em} _{}^{w}\textrm{T}_b\) 6582 * </li> 6583 * <li> 6584 * \(\mathbf{Z} \Leftrightarrow \hspace{0.1em} _{}^{c}\textrm{T}_g\) 6585 * </li> 6586 * <li> 6587 * \(\mathbf{B} \Leftrightarrow \hspace{0.1em} _{}^{g}\textrm{T}_b\) 6588 * </li> 6589 * </ul> 6590 * 6591 * \note 6592 * At least 3 measurements are required (input vectors size must be greater or equal to 3). 6593 */ 6594 public static void calibrateRobotWorldHandEye(List<Mat> R_world2cam, List<Mat> t_world2cam, List<Mat> R_base2gripper, List<Mat> t_base2gripper, Mat R_base2world, Mat t_base2world, Mat R_gripper2cam, Mat t_gripper2cam, int method) { 6595 Mat R_world2cam_mat = Converters.vector_Mat_to_Mat(R_world2cam); 6596 Mat t_world2cam_mat = Converters.vector_Mat_to_Mat(t_world2cam); 6597 Mat R_base2gripper_mat = Converters.vector_Mat_to_Mat(R_base2gripper); 6598 Mat t_base2gripper_mat = Converters.vector_Mat_to_Mat(t_base2gripper); 6599 calibrateRobotWorldHandEye_0(R_world2cam_mat.nativeObj, t_world2cam_mat.nativeObj, R_base2gripper_mat.nativeObj, t_base2gripper_mat.nativeObj, R_base2world.nativeObj, t_base2world.nativeObj, R_gripper2cam.nativeObj, t_gripper2cam.nativeObj, method); 6600 } 6601 6602 /** 6603 * Computes Robot-World/Hand-Eye calibration: \(_{}^{w}\textrm{T}_b\) and \(_{}^{c}\textrm{T}_g\) 6604 * 6605 * @param R_world2cam Rotation part extracted from the homogeneous matrix that transforms a point 6606 * expressed in the world frame to the camera frame (\(_{}^{c}\textrm{T}_w\)). 6607 * This is a vector ({@code vector<Mat>}) that contains the rotation, {@code (3x3)} rotation matrices or {@code (3x1)} rotation vectors, 6608 * for all the transformations from world frame to the camera frame. 6609 * @param t_world2cam Translation part extracted from the homogeneous matrix that transforms a point 6610 * expressed in the world frame to the camera frame (\(_{}^{c}\textrm{T}_w\)). 6611 * This is a vector ({@code vector<Mat>}) that contains the {@code (3x1)} translation vectors for all the transformations 6612 * from world frame to the camera frame. 6613 * @param R_base2gripper Rotation part extracted from the homogeneous matrix that transforms a point 6614 * expressed in the robot base frame to the gripper frame (\(_{}^{g}\textrm{T}_b\)). 6615 * This is a vector ({@code vector<Mat>}) that contains the rotation, {@code (3x3)} rotation matrices or {@code (3x1)} rotation vectors, 6616 * for all the transformations from robot base frame to the gripper frame. 6617 * @param t_base2gripper Rotation part extracted from the homogeneous matrix that transforms a point 6618 * expressed in the robot base frame to the gripper frame (\(_{}^{g}\textrm{T}_b\)). 6619 * This is a vector ({@code vector<Mat>}) that contains the {@code (3x1)} translation vectors for all the transformations 6620 * from robot base frame to the gripper frame. 6621 * @param R_base2world Estimated {@code (3x3)} rotation part extracted from the homogeneous matrix that transforms a point 6622 * expressed in the robot base frame to the world frame (\(_{}^{w}\textrm{T}_b\)). 6623 * @param t_base2world Estimated {@code (3x1)} translation part extracted from the homogeneous matrix that transforms a point 6624 * expressed in the robot base frame to the world frame (\(_{}^{w}\textrm{T}_b\)). 6625 * @param R_gripper2cam Estimated {@code (3x3)} rotation part extracted from the homogeneous matrix that transforms a point 6626 * expressed in the gripper frame to the camera frame (\(_{}^{c}\textrm{T}_g\)). 6627 * @param t_gripper2cam Estimated {@code (3x1)} translation part extracted from the homogeneous matrix that transforms a point 6628 * expressed in the gripper frame to the camera frame (\(_{}^{c}\textrm{T}_g\)). 6629 * 6630 * The function performs the Robot-World/Hand-Eye calibration using various methods. One approach consists in estimating the 6631 * rotation then the translation (separable solutions): 6632 * <ul> 6633 * <li> 6634 * M. Shah, Solving the robot-world/hand-eye calibration problem using the kronecker product \cite Shah2013SolvingTR 6635 * </li> 6636 * </ul> 6637 * 6638 * Another approach consists in estimating simultaneously the rotation and the translation (simultaneous solutions), 6639 * with the following implemented method: 6640 * <ul> 6641 * <li> 6642 * A. Li, L. Wang, and D. Wu, Simultaneous robot-world and hand-eye calibration using dual-quaternions and kronecker product \cite Li2010SimultaneousRA 6643 * </li> 6644 * </ul> 6645 * 6646 * The following picture describes the Robot-World/Hand-Eye calibration problem where the transformations between a robot and a world frame 6647 * and between a robot gripper ("hand") and a camera ("eye") mounted at the robot end-effector have to be estimated. 6648 * 6649 * ![](pics/robot-world_hand-eye_figure.png) 6650 * 6651 * The calibration procedure is the following: 6652 * <ul> 6653 * <li> 6654 * a static calibration pattern is used to estimate the transformation between the target frame 6655 * and the camera frame 6656 * </li> 6657 * <li> 6658 * the robot gripper is moved in order to acquire several poses 6659 * </li> 6660 * <li> 6661 * for each pose, the homogeneous transformation between the gripper frame and the robot base frame is recorded using for 6662 * instance the robot kinematics 6663 * \( 6664 * \begin{bmatrix} 6665 * X_g\\ 6666 * Y_g\\ 6667 * Z_g\\ 6668 * 1 6669 * \end{bmatrix} 6670 * = 6671 * \begin{bmatrix} 6672 * _{}^{g}\textrm{R}_b & _{}^{g}\textrm{t}_b \\ 6673 * 0_{1 \times 3} & 1 6674 * \end{bmatrix} 6675 * \begin{bmatrix} 6676 * X_b\\ 6677 * Y_b\\ 6678 * Z_b\\ 6679 * 1 6680 * \end{bmatrix} 6681 * \) 6682 * </li> 6683 * <li> 6684 * for each pose, the homogeneous transformation between the calibration target frame (the world frame) and the camera frame is recorded using 6685 * for instance a pose estimation method (PnP) from 2D-3D point correspondences 6686 * \( 6687 * \begin{bmatrix} 6688 * X_c\\ 6689 * Y_c\\ 6690 * Z_c\\ 6691 * 1 6692 * \end{bmatrix} 6693 * = 6694 * \begin{bmatrix} 6695 * _{}^{c}\textrm{R}_w & _{}^{c}\textrm{t}_w \\ 6696 * 0_{1 \times 3} & 1 6697 * \end{bmatrix} 6698 * \begin{bmatrix} 6699 * X_w\\ 6700 * Y_w\\ 6701 * Z_w\\ 6702 * 1 6703 * \end{bmatrix} 6704 * \) 6705 * </li> 6706 * </ul> 6707 * 6708 * The Robot-World/Hand-Eye calibration procedure returns the following homogeneous transformations 6709 * \( 6710 * \begin{bmatrix} 6711 * X_w\\ 6712 * Y_w\\ 6713 * Z_w\\ 6714 * 1 6715 * \end{bmatrix} 6716 * = 6717 * \begin{bmatrix} 6718 * _{}^{w}\textrm{R}_b & _{}^{w}\textrm{t}_b \\ 6719 * 0_{1 \times 3} & 1 6720 * \end{bmatrix} 6721 * \begin{bmatrix} 6722 * X_b\\ 6723 * Y_b\\ 6724 * Z_b\\ 6725 * 1 6726 * \end{bmatrix} 6727 * \) 6728 * \( 6729 * \begin{bmatrix} 6730 * X_c\\ 6731 * Y_c\\ 6732 * Z_c\\ 6733 * 1 6734 * \end{bmatrix} 6735 * = 6736 * \begin{bmatrix} 6737 * _{}^{c}\textrm{R}_g & _{}^{c}\textrm{t}_g \\ 6738 * 0_{1 \times 3} & 1 6739 * \end{bmatrix} 6740 * \begin{bmatrix} 6741 * X_g\\ 6742 * Y_g\\ 6743 * Z_g\\ 6744 * 1 6745 * \end{bmatrix} 6746 * \) 6747 * 6748 * This problem is also known as solving the \(\mathbf{A}\mathbf{X}=\mathbf{Z}\mathbf{B}\) equation, with: 6749 * <ul> 6750 * <li> 6751 * \(\mathbf{A} \Leftrightarrow \hspace{0.1em} _{}^{c}\textrm{T}_w\) 6752 * </li> 6753 * <li> 6754 * \(\mathbf{X} \Leftrightarrow \hspace{0.1em} _{}^{w}\textrm{T}_b\) 6755 * </li> 6756 * <li> 6757 * \(\mathbf{Z} \Leftrightarrow \hspace{0.1em} _{}^{c}\textrm{T}_g\) 6758 * </li> 6759 * <li> 6760 * \(\mathbf{B} \Leftrightarrow \hspace{0.1em} _{}^{g}\textrm{T}_b\) 6761 * </li> 6762 * </ul> 6763 * 6764 * \note 6765 * At least 3 measurements are required (input vectors size must be greater or equal to 3). 6766 */ 6767 public static void calibrateRobotWorldHandEye(List<Mat> R_world2cam, List<Mat> t_world2cam, List<Mat> R_base2gripper, List<Mat> t_base2gripper, Mat R_base2world, Mat t_base2world, Mat R_gripper2cam, Mat t_gripper2cam) { 6768 Mat R_world2cam_mat = Converters.vector_Mat_to_Mat(R_world2cam); 6769 Mat t_world2cam_mat = Converters.vector_Mat_to_Mat(t_world2cam); 6770 Mat R_base2gripper_mat = Converters.vector_Mat_to_Mat(R_base2gripper); 6771 Mat t_base2gripper_mat = Converters.vector_Mat_to_Mat(t_base2gripper); 6772 calibrateRobotWorldHandEye_1(R_world2cam_mat.nativeObj, t_world2cam_mat.nativeObj, R_base2gripper_mat.nativeObj, t_base2gripper_mat.nativeObj, R_base2world.nativeObj, t_base2world.nativeObj, R_gripper2cam.nativeObj, t_gripper2cam.nativeObj); 6773 } 6774 6775 6776 // 6777 // C++: void cv::convertPointsToHomogeneous(Mat src, Mat& dst) 6778 // 6779 6780 /** 6781 * Converts points from Euclidean to homogeneous space. 6782 * 6783 * @param src Input vector of N-dimensional points. 6784 * @param dst Output vector of N+1-dimensional points. 6785 * 6786 * The function converts points from Euclidean to homogeneous space by appending 1's to the tuple of 6787 * point coordinates. That is, each point (x1, x2, ..., xn) is converted to (x1, x2, ..., xn, 1). 6788 */ 6789 public static void convertPointsToHomogeneous(Mat src, Mat dst) { 6790 convertPointsToHomogeneous_0(src.nativeObj, dst.nativeObj); 6791 } 6792 6793 6794 // 6795 // C++: void cv::convertPointsFromHomogeneous(Mat src, Mat& dst) 6796 // 6797 6798 /** 6799 * Converts points from homogeneous to Euclidean space. 6800 * 6801 * @param src Input vector of N-dimensional points. 6802 * @param dst Output vector of N-1-dimensional points. 6803 * 6804 * The function converts points homogeneous to Euclidean space using perspective projection. That is, 6805 * each point (x1, x2, ... x(n-1), xn) is converted to (x1/xn, x2/xn, ..., x(n-1)/xn). When xn=0, the 6806 * output point coordinates will be (0,0,0,...). 6807 */ 6808 public static void convertPointsFromHomogeneous(Mat src, Mat dst) { 6809 convertPointsFromHomogeneous_0(src.nativeObj, dst.nativeObj); 6810 } 6811 6812 6813 // 6814 // C++: Mat cv::findFundamentalMat(vector_Point2f points1, vector_Point2f points2, int method, double ransacReprojThreshold, double confidence, int maxIters, Mat& mask = Mat()) 6815 // 6816 6817 /** 6818 * Calculates a fundamental matrix from the corresponding points in two images. 6819 * 6820 * @param points1 Array of N points from the first image. The point coordinates should be 6821 * floating-point (single or double precision). 6822 * @param points2 Array of the second image points of the same size and format as points1 . 6823 * @param method Method for computing a fundamental matrix. 6824 * <ul> 6825 * <li> 6826 * REF: FM_7POINT for a 7-point algorithm. \(N = 7\) 6827 * </li> 6828 * <li> 6829 * REF: FM_8POINT for an 8-point algorithm. \(N \ge 8\) 6830 * </li> 6831 * <li> 6832 * REF: FM_RANSAC for the RANSAC algorithm. \(N \ge 8\) 6833 * </li> 6834 * <li> 6835 * REF: FM_LMEDS for the LMedS algorithm. \(N \ge 8\) 6836 * </li> 6837 * </ul> 6838 * @param ransacReprojThreshold Parameter used only for RANSAC. It is the maximum distance from a point to an epipolar 6839 * line in pixels, beyond which the point is considered an outlier and is not used for computing the 6840 * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the 6841 * point localization, image resolution, and the image noise. 6842 * @param confidence Parameter used for the RANSAC and LMedS methods only. It specifies a desirable level 6843 * of confidence (probability) that the estimated matrix is correct. 6844 * @param mask optional output mask 6845 * @param maxIters The maximum number of robust method iterations. 6846 * 6847 * The epipolar geometry is described by the following equation: 6848 * 6849 * \([p_2; 1]^T F [p_1; 1] = 0\) 6850 * 6851 * where \(F\) is a fundamental matrix, \(p_1\) and \(p_2\) are corresponding points in the first and the 6852 * second images, respectively. 6853 * 6854 * The function calculates the fundamental matrix using one of four methods listed above and returns 6855 * the found fundamental matrix. Normally just one matrix is found. But in case of the 7-point 6856 * algorithm, the function may return up to 3 solutions ( \(9 \times 3\) matrix that stores all 3 6857 * matrices sequentially). 6858 * 6859 * The calculated fundamental matrix may be passed further to computeCorrespondEpilines that finds the 6860 * epipolar lines corresponding to the specified points. It can also be passed to 6861 * #stereoRectifyUncalibrated to compute the rectification transformation. : 6862 * <code> 6863 * // Example. Estimation of fundamental matrix using the RANSAC algorithm 6864 * int point_count = 100; 6865 * vector<Point2f> points1(point_count); 6866 * vector<Point2f> points2(point_count); 6867 * 6868 * // initialize the points here ... 6869 * for( int i = 0; i < point_count; i++ ) 6870 * { 6871 * points1[i] = ...; 6872 * points2[i] = ...; 6873 * } 6874 * 6875 * Mat fundamental_matrix = 6876 * findFundamentalMat(points1, points2, FM_RANSAC, 3, 0.99); 6877 * </code> 6878 * @return automatically generated 6879 */ 6880 public static Mat findFundamentalMat(MatOfPoint2f points1, MatOfPoint2f points2, int method, double ransacReprojThreshold, double confidence, int maxIters, Mat mask) { 6881 Mat points1_mat = points1; 6882 Mat points2_mat = points2; 6883 return new Mat(findFundamentalMat_0(points1_mat.nativeObj, points2_mat.nativeObj, method, ransacReprojThreshold, confidence, maxIters, mask.nativeObj)); 6884 } 6885 6886 /** 6887 * Calculates a fundamental matrix from the corresponding points in two images. 6888 * 6889 * @param points1 Array of N points from the first image. The point coordinates should be 6890 * floating-point (single or double precision). 6891 * @param points2 Array of the second image points of the same size and format as points1 . 6892 * @param method Method for computing a fundamental matrix. 6893 * <ul> 6894 * <li> 6895 * REF: FM_7POINT for a 7-point algorithm. \(N = 7\) 6896 * </li> 6897 * <li> 6898 * REF: FM_8POINT for an 8-point algorithm. \(N \ge 8\) 6899 * </li> 6900 * <li> 6901 * REF: FM_RANSAC for the RANSAC algorithm. \(N \ge 8\) 6902 * </li> 6903 * <li> 6904 * REF: FM_LMEDS for the LMedS algorithm. \(N \ge 8\) 6905 * </li> 6906 * </ul> 6907 * @param ransacReprojThreshold Parameter used only for RANSAC. It is the maximum distance from a point to an epipolar 6908 * line in pixels, beyond which the point is considered an outlier and is not used for computing the 6909 * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the 6910 * point localization, image resolution, and the image noise. 6911 * @param confidence Parameter used for the RANSAC and LMedS methods only. It specifies a desirable level 6912 * of confidence (probability) that the estimated matrix is correct. 6913 * @param maxIters The maximum number of robust method iterations. 6914 * 6915 * The epipolar geometry is described by the following equation: 6916 * 6917 * \([p_2; 1]^T F [p_1; 1] = 0\) 6918 * 6919 * where \(F\) is a fundamental matrix, \(p_1\) and \(p_2\) are corresponding points in the first and the 6920 * second images, respectively. 6921 * 6922 * The function calculates the fundamental matrix using one of four methods listed above and returns 6923 * the found fundamental matrix. Normally just one matrix is found. But in case of the 7-point 6924 * algorithm, the function may return up to 3 solutions ( \(9 \times 3\) matrix that stores all 3 6925 * matrices sequentially). 6926 * 6927 * The calculated fundamental matrix may be passed further to computeCorrespondEpilines that finds the 6928 * epipolar lines corresponding to the specified points. It can also be passed to 6929 * #stereoRectifyUncalibrated to compute the rectification transformation. : 6930 * <code> 6931 * // Example. Estimation of fundamental matrix using the RANSAC algorithm 6932 * int point_count = 100; 6933 * vector<Point2f> points1(point_count); 6934 * vector<Point2f> points2(point_count); 6935 * 6936 * // initialize the points here ... 6937 * for( int i = 0; i < point_count; i++ ) 6938 * { 6939 * points1[i] = ...; 6940 * points2[i] = ...; 6941 * } 6942 * 6943 * Mat fundamental_matrix = 6944 * findFundamentalMat(points1, points2, FM_RANSAC, 3, 0.99); 6945 * </code> 6946 * @return automatically generated 6947 */ 6948 public static Mat findFundamentalMat(MatOfPoint2f points1, MatOfPoint2f points2, int method, double ransacReprojThreshold, double confidence, int maxIters) { 6949 Mat points1_mat = points1; 6950 Mat points2_mat = points2; 6951 return new Mat(findFundamentalMat_1(points1_mat.nativeObj, points2_mat.nativeObj, method, ransacReprojThreshold, confidence, maxIters)); 6952 } 6953 6954 6955 // 6956 // C++: Mat cv::findFundamentalMat(vector_Point2f points1, vector_Point2f points2, int method = FM_RANSAC, double ransacReprojThreshold = 3., double confidence = 0.99, Mat& mask = Mat()) 6957 // 6958 6959 public static Mat findFundamentalMat(MatOfPoint2f points1, MatOfPoint2f points2, int method, double ransacReprojThreshold, double confidence, Mat mask) { 6960 Mat points1_mat = points1; 6961 Mat points2_mat = points2; 6962 return new Mat(findFundamentalMat_2(points1_mat.nativeObj, points2_mat.nativeObj, method, ransacReprojThreshold, confidence, mask.nativeObj)); 6963 } 6964 6965 public static Mat findFundamentalMat(MatOfPoint2f points1, MatOfPoint2f points2, int method, double ransacReprojThreshold, double confidence) { 6966 Mat points1_mat = points1; 6967 Mat points2_mat = points2; 6968 return new Mat(findFundamentalMat_3(points1_mat.nativeObj, points2_mat.nativeObj, method, ransacReprojThreshold, confidence)); 6969 } 6970 6971 public static Mat findFundamentalMat(MatOfPoint2f points1, MatOfPoint2f points2, int method, double ransacReprojThreshold) { 6972 Mat points1_mat = points1; 6973 Mat points2_mat = points2; 6974 return new Mat(findFundamentalMat_4(points1_mat.nativeObj, points2_mat.nativeObj, method, ransacReprojThreshold)); 6975 } 6976 6977 public static Mat findFundamentalMat(MatOfPoint2f points1, MatOfPoint2f points2, int method) { 6978 Mat points1_mat = points1; 6979 Mat points2_mat = points2; 6980 return new Mat(findFundamentalMat_5(points1_mat.nativeObj, points2_mat.nativeObj, method)); 6981 } 6982 6983 public static Mat findFundamentalMat(MatOfPoint2f points1, MatOfPoint2f points2) { 6984 Mat points1_mat = points1; 6985 Mat points2_mat = points2; 6986 return new Mat(findFundamentalMat_6(points1_mat.nativeObj, points2_mat.nativeObj)); 6987 } 6988 6989 6990 // 6991 // C++: Mat cv::findFundamentalMat(vector_Point2f points1, vector_Point2f points2, Mat& mask, UsacParams params) 6992 // 6993 6994 public static Mat findFundamentalMat(MatOfPoint2f points1, MatOfPoint2f points2, Mat mask, UsacParams params) { 6995 Mat points1_mat = points1; 6996 Mat points2_mat = points2; 6997 return new Mat(findFundamentalMat_7(points1_mat.nativeObj, points2_mat.nativeObj, mask.nativeObj, params.nativeObj)); 6998 } 6999 7000 7001 // 7002 // C++: Mat cv::findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix, int method = RANSAC, double prob = 0.999, double threshold = 1.0, int maxIters = 1000, Mat& mask = Mat()) 7003 // 7004 7005 /** 7006 * Calculates an essential matrix from the corresponding points in two images. 7007 * 7008 * @param points1 Array of N (N >= 5) 2D points from the first image. The point coordinates should 7009 * be floating-point (single or double precision). 7010 * @param points2 Array of the second image points of the same size and format as points1 . 7011 * @param cameraMatrix Camera intrinsic matrix \(\cameramatrix{A}\) . 7012 * Note that this function assumes that points1 and points2 are feature points from cameras with the 7013 * same camera intrinsic matrix. If this assumption does not hold for your use case, use 7014 * #undistortPoints with {@code P = cv::NoArray()} for both cameras to transform image points 7015 * to normalized image coordinates, which are valid for the identity camera intrinsic matrix. When 7016 * passing these coordinates, pass the identity matrix for this parameter. 7017 * @param method Method for computing an essential matrix. 7018 * <ul> 7019 * <li> 7020 * REF: RANSAC for the RANSAC algorithm. 7021 * </li> 7022 * <li> 7023 * REF: LMEDS for the LMedS algorithm. 7024 * </li> 7025 * </ul> 7026 * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of 7027 * confidence (probability) that the estimated matrix is correct. 7028 * @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar 7029 * line in pixels, beyond which the point is considered an outlier and is not used for computing the 7030 * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the 7031 * point localization, image resolution, and the image noise. 7032 * @param mask Output array of N elements, every element of which is set to 0 for outliers and to 1 7033 * for the other points. The array is computed only in the RANSAC and LMedS methods. 7034 * @param maxIters The maximum number of robust method iterations. 7035 * 7036 * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 . 7037 * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation: 7038 * 7039 * \([p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0\) 7040 * 7041 * where \(E\) is an essential matrix, \(p_1\) and \(p_2\) are corresponding points in the first and the 7042 * second images, respectively. The result of this function may be passed further to 7043 * #decomposeEssentialMat or #recoverPose to recover the relative pose between cameras. 7044 * @return automatically generated 7045 */ 7046 public static Mat findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix, int method, double prob, double threshold, int maxIters, Mat mask) { 7047 return new Mat(findEssentialMat_0(points1.nativeObj, points2.nativeObj, cameraMatrix.nativeObj, method, prob, threshold, maxIters, mask.nativeObj)); 7048 } 7049 7050 /** 7051 * Calculates an essential matrix from the corresponding points in two images. 7052 * 7053 * @param points1 Array of N (N >= 5) 2D points from the first image. The point coordinates should 7054 * be floating-point (single or double precision). 7055 * @param points2 Array of the second image points of the same size and format as points1 . 7056 * @param cameraMatrix Camera intrinsic matrix \(\cameramatrix{A}\) . 7057 * Note that this function assumes that points1 and points2 are feature points from cameras with the 7058 * same camera intrinsic matrix. If this assumption does not hold for your use case, use 7059 * #undistortPoints with {@code P = cv::NoArray()} for both cameras to transform image points 7060 * to normalized image coordinates, which are valid for the identity camera intrinsic matrix. When 7061 * passing these coordinates, pass the identity matrix for this parameter. 7062 * @param method Method for computing an essential matrix. 7063 * <ul> 7064 * <li> 7065 * REF: RANSAC for the RANSAC algorithm. 7066 * </li> 7067 * <li> 7068 * REF: LMEDS for the LMedS algorithm. 7069 * </li> 7070 * </ul> 7071 * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of 7072 * confidence (probability) that the estimated matrix is correct. 7073 * @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar 7074 * line in pixels, beyond which the point is considered an outlier and is not used for computing the 7075 * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the 7076 * point localization, image resolution, and the image noise. 7077 * for the other points. The array is computed only in the RANSAC and LMedS methods. 7078 * @param maxIters The maximum number of robust method iterations. 7079 * 7080 * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 . 7081 * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation: 7082 * 7083 * \([p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0\) 7084 * 7085 * where \(E\) is an essential matrix, \(p_1\) and \(p_2\) are corresponding points in the first and the 7086 * second images, respectively. The result of this function may be passed further to 7087 * #decomposeEssentialMat or #recoverPose to recover the relative pose between cameras. 7088 * @return automatically generated 7089 */ 7090 public static Mat findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix, int method, double prob, double threshold, int maxIters) { 7091 return new Mat(findEssentialMat_1(points1.nativeObj, points2.nativeObj, cameraMatrix.nativeObj, method, prob, threshold, maxIters)); 7092 } 7093 7094 /** 7095 * Calculates an essential matrix from the corresponding points in two images. 7096 * 7097 * @param points1 Array of N (N >= 5) 2D points from the first image. The point coordinates should 7098 * be floating-point (single or double precision). 7099 * @param points2 Array of the second image points of the same size and format as points1 . 7100 * @param cameraMatrix Camera intrinsic matrix \(\cameramatrix{A}\) . 7101 * Note that this function assumes that points1 and points2 are feature points from cameras with the 7102 * same camera intrinsic matrix. If this assumption does not hold for your use case, use 7103 * #undistortPoints with {@code P = cv::NoArray()} for both cameras to transform image points 7104 * to normalized image coordinates, which are valid for the identity camera intrinsic matrix. When 7105 * passing these coordinates, pass the identity matrix for this parameter. 7106 * @param method Method for computing an essential matrix. 7107 * <ul> 7108 * <li> 7109 * REF: RANSAC for the RANSAC algorithm. 7110 * </li> 7111 * <li> 7112 * REF: LMEDS for the LMedS algorithm. 7113 * </li> 7114 * </ul> 7115 * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of 7116 * confidence (probability) that the estimated matrix is correct. 7117 * @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar 7118 * line in pixels, beyond which the point is considered an outlier and is not used for computing the 7119 * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the 7120 * point localization, image resolution, and the image noise. 7121 * for the other points. The array is computed only in the RANSAC and LMedS methods. 7122 * 7123 * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 . 7124 * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation: 7125 * 7126 * \([p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0\) 7127 * 7128 * where \(E\) is an essential matrix, \(p_1\) and \(p_2\) are corresponding points in the first and the 7129 * second images, respectively. The result of this function may be passed further to 7130 * #decomposeEssentialMat or #recoverPose to recover the relative pose between cameras. 7131 * @return automatically generated 7132 */ 7133 public static Mat findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix, int method, double prob, double threshold) { 7134 return new Mat(findEssentialMat_2(points1.nativeObj, points2.nativeObj, cameraMatrix.nativeObj, method, prob, threshold)); 7135 } 7136 7137 /** 7138 * Calculates an essential matrix from the corresponding points in two images. 7139 * 7140 * @param points1 Array of N (N >= 5) 2D points from the first image. The point coordinates should 7141 * be floating-point (single or double precision). 7142 * @param points2 Array of the second image points of the same size and format as points1 . 7143 * @param cameraMatrix Camera intrinsic matrix \(\cameramatrix{A}\) . 7144 * Note that this function assumes that points1 and points2 are feature points from cameras with the 7145 * same camera intrinsic matrix. If this assumption does not hold for your use case, use 7146 * #undistortPoints with {@code P = cv::NoArray()} for both cameras to transform image points 7147 * to normalized image coordinates, which are valid for the identity camera intrinsic matrix. When 7148 * passing these coordinates, pass the identity matrix for this parameter. 7149 * @param method Method for computing an essential matrix. 7150 * <ul> 7151 * <li> 7152 * REF: RANSAC for the RANSAC algorithm. 7153 * </li> 7154 * <li> 7155 * REF: LMEDS for the LMedS algorithm. 7156 * </li> 7157 * </ul> 7158 * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of 7159 * confidence (probability) that the estimated matrix is correct. 7160 * line in pixels, beyond which the point is considered an outlier and is not used for computing the 7161 * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the 7162 * point localization, image resolution, and the image noise. 7163 * for the other points. The array is computed only in the RANSAC and LMedS methods. 7164 * 7165 * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 . 7166 * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation: 7167 * 7168 * \([p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0\) 7169 * 7170 * where \(E\) is an essential matrix, \(p_1\) and \(p_2\) are corresponding points in the first and the 7171 * second images, respectively. The result of this function may be passed further to 7172 * #decomposeEssentialMat or #recoverPose to recover the relative pose between cameras. 7173 * @return automatically generated 7174 */ 7175 public static Mat findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix, int method, double prob) { 7176 return new Mat(findEssentialMat_3(points1.nativeObj, points2.nativeObj, cameraMatrix.nativeObj, method, prob)); 7177 } 7178 7179 /** 7180 * Calculates an essential matrix from the corresponding points in two images. 7181 * 7182 * @param points1 Array of N (N >= 5) 2D points from the first image. The point coordinates should 7183 * be floating-point (single or double precision). 7184 * @param points2 Array of the second image points of the same size and format as points1 . 7185 * @param cameraMatrix Camera intrinsic matrix \(\cameramatrix{A}\) . 7186 * Note that this function assumes that points1 and points2 are feature points from cameras with the 7187 * same camera intrinsic matrix. If this assumption does not hold for your use case, use 7188 * #undistortPoints with {@code P = cv::NoArray()} for both cameras to transform image points 7189 * to normalized image coordinates, which are valid for the identity camera intrinsic matrix. When 7190 * passing these coordinates, pass the identity matrix for this parameter. 7191 * @param method Method for computing an essential matrix. 7192 * <ul> 7193 * <li> 7194 * REF: RANSAC for the RANSAC algorithm. 7195 * </li> 7196 * <li> 7197 * REF: LMEDS for the LMedS algorithm. 7198 * </li> 7199 * </ul> 7200 * confidence (probability) that the estimated matrix is correct. 7201 * line in pixels, beyond which the point is considered an outlier and is not used for computing the 7202 * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the 7203 * point localization, image resolution, and the image noise. 7204 * for the other points. The array is computed only in the RANSAC and LMedS methods. 7205 * 7206 * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 . 7207 * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation: 7208 * 7209 * \([p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0\) 7210 * 7211 * where \(E\) is an essential matrix, \(p_1\) and \(p_2\) are corresponding points in the first and the 7212 * second images, respectively. The result of this function may be passed further to 7213 * #decomposeEssentialMat or #recoverPose to recover the relative pose between cameras. 7214 * @return automatically generated 7215 */ 7216 public static Mat findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix, int method) { 7217 return new Mat(findEssentialMat_4(points1.nativeObj, points2.nativeObj, cameraMatrix.nativeObj, method)); 7218 } 7219 7220 /** 7221 * Calculates an essential matrix from the corresponding points in two images. 7222 * 7223 * @param points1 Array of N (N >= 5) 2D points from the first image. The point coordinates should 7224 * be floating-point (single or double precision). 7225 * @param points2 Array of the second image points of the same size and format as points1 . 7226 * @param cameraMatrix Camera intrinsic matrix \(\cameramatrix{A}\) . 7227 * Note that this function assumes that points1 and points2 are feature points from cameras with the 7228 * same camera intrinsic matrix. If this assumption does not hold for your use case, use 7229 * #undistortPoints with {@code P = cv::NoArray()} for both cameras to transform image points 7230 * to normalized image coordinates, which are valid for the identity camera intrinsic matrix. When 7231 * passing these coordinates, pass the identity matrix for this parameter. 7232 * <ul> 7233 * <li> 7234 * REF: RANSAC for the RANSAC algorithm. 7235 * </li> 7236 * <li> 7237 * REF: LMEDS for the LMedS algorithm. 7238 * </li> 7239 * </ul> 7240 * confidence (probability) that the estimated matrix is correct. 7241 * line in pixels, beyond which the point is considered an outlier and is not used for computing the 7242 * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the 7243 * point localization, image resolution, and the image noise. 7244 * for the other points. The array is computed only in the RANSAC and LMedS methods. 7245 * 7246 * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 . 7247 * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation: 7248 * 7249 * \([p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0\) 7250 * 7251 * where \(E\) is an essential matrix, \(p_1\) and \(p_2\) are corresponding points in the first and the 7252 * second images, respectively. The result of this function may be passed further to 7253 * #decomposeEssentialMat or #recoverPose to recover the relative pose between cameras. 7254 * @return automatically generated 7255 */ 7256 public static Mat findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix) { 7257 return new Mat(findEssentialMat_5(points1.nativeObj, points2.nativeObj, cameraMatrix.nativeObj)); 7258 } 7259 7260 7261 // 7262 // C++: Mat cv::findEssentialMat(Mat points1, Mat points2, double focal = 1.0, Point2d pp = Point2d(0, 0), int method = RANSAC, double prob = 0.999, double threshold = 1.0, int maxIters = 1000, Mat& mask = Mat()) 7263 // 7264 7265 /** 7266 * 7267 * @param points1 Array of N (N >= 5) 2D points from the first image. The point coordinates should 7268 * be floating-point (single or double precision). 7269 * @param points2 Array of the second image points of the same size and format as points1 . 7270 * @param focal focal length of the camera. Note that this function assumes that points1 and points2 7271 * are feature points from cameras with same focal length and principal point. 7272 * @param pp principal point of the camera. 7273 * @param method Method for computing a fundamental matrix. 7274 * <ul> 7275 * <li> 7276 * REF: RANSAC for the RANSAC algorithm. 7277 * </li> 7278 * <li> 7279 * REF: LMEDS for the LMedS algorithm. 7280 * </li> 7281 * </ul> 7282 * @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar 7283 * line in pixels, beyond which the point is considered an outlier and is not used for computing the 7284 * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the 7285 * point localization, image resolution, and the image noise. 7286 * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of 7287 * confidence (probability) that the estimated matrix is correct. 7288 * @param mask Output array of N elements, every element of which is set to 0 for outliers and to 1 7289 * for the other points. The array is computed only in the RANSAC and LMedS methods. 7290 * @param maxIters The maximum number of robust method iterations. 7291 * 7292 * This function differs from the one above that it computes camera intrinsic matrix from focal length and 7293 * principal point: 7294 * 7295 * \(A = 7296 * \begin{bmatrix} 7297 * f & 0 & x_{pp} \\ 7298 * 0 & f & y_{pp} \\ 7299 * 0 & 0 & 1 7300 * \end{bmatrix}\) 7301 * @return automatically generated 7302 */ 7303 public static Mat findEssentialMat(Mat points1, Mat points2, double focal, Point pp, int method, double prob, double threshold, int maxIters, Mat mask) { 7304 return new Mat(findEssentialMat_6(points1.nativeObj, points2.nativeObj, focal, pp.x, pp.y, method, prob, threshold, maxIters, mask.nativeObj)); 7305 } 7306 7307 /** 7308 * 7309 * @param points1 Array of N (N >= 5) 2D points from the first image. The point coordinates should 7310 * be floating-point (single or double precision). 7311 * @param points2 Array of the second image points of the same size and format as points1 . 7312 * @param focal focal length of the camera. Note that this function assumes that points1 and points2 7313 * are feature points from cameras with same focal length and principal point. 7314 * @param pp principal point of the camera. 7315 * @param method Method for computing a fundamental matrix. 7316 * <ul> 7317 * <li> 7318 * REF: RANSAC for the RANSAC algorithm. 7319 * </li> 7320 * <li> 7321 * REF: LMEDS for the LMedS algorithm. 7322 * </li> 7323 * </ul> 7324 * @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar 7325 * line in pixels, beyond which the point is considered an outlier and is not used for computing the 7326 * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the 7327 * point localization, image resolution, and the image noise. 7328 * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of 7329 * confidence (probability) that the estimated matrix is correct. 7330 * for the other points. The array is computed only in the RANSAC and LMedS methods. 7331 * @param maxIters The maximum number of robust method iterations. 7332 * 7333 * This function differs from the one above that it computes camera intrinsic matrix from focal length and 7334 * principal point: 7335 * 7336 * \(A = 7337 * \begin{bmatrix} 7338 * f & 0 & x_{pp} \\ 7339 * 0 & f & y_{pp} \\ 7340 * 0 & 0 & 1 7341 * \end{bmatrix}\) 7342 * @return automatically generated 7343 */ 7344 public static Mat findEssentialMat(Mat points1, Mat points2, double focal, Point pp, int method, double prob, double threshold, int maxIters) { 7345 return new Mat(findEssentialMat_7(points1.nativeObj, points2.nativeObj, focal, pp.x, pp.y, method, prob, threshold, maxIters)); 7346 } 7347 7348 /** 7349 * 7350 * @param points1 Array of N (N >= 5) 2D points from the first image. The point coordinates should 7351 * be floating-point (single or double precision). 7352 * @param points2 Array of the second image points of the same size and format as points1 . 7353 * @param focal focal length of the camera. Note that this function assumes that points1 and points2 7354 * are feature points from cameras with same focal length and principal point. 7355 * @param pp principal point of the camera. 7356 * @param method Method for computing a fundamental matrix. 7357 * <ul> 7358 * <li> 7359 * REF: RANSAC for the RANSAC algorithm. 7360 * </li> 7361 * <li> 7362 * REF: LMEDS for the LMedS algorithm. 7363 * </li> 7364 * </ul> 7365 * @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar 7366 * line in pixels, beyond which the point is considered an outlier and is not used for computing the 7367 * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the 7368 * point localization, image resolution, and the image noise. 7369 * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of 7370 * confidence (probability) that the estimated matrix is correct. 7371 * for the other points. The array is computed only in the RANSAC and LMedS methods. 7372 * 7373 * This function differs from the one above that it computes camera intrinsic matrix from focal length and 7374 * principal point: 7375 * 7376 * \(A = 7377 * \begin{bmatrix} 7378 * f & 0 & x_{pp} \\ 7379 * 0 & f & y_{pp} \\ 7380 * 0 & 0 & 1 7381 * \end{bmatrix}\) 7382 * @return automatically generated 7383 */ 7384 public static Mat findEssentialMat(Mat points1, Mat points2, double focal, Point pp, int method, double prob, double threshold) { 7385 return new Mat(findEssentialMat_8(points1.nativeObj, points2.nativeObj, focal, pp.x, pp.y, method, prob, threshold)); 7386 } 7387 7388 /** 7389 * 7390 * @param points1 Array of N (N >= 5) 2D points from the first image. The point coordinates should 7391 * be floating-point (single or double precision). 7392 * @param points2 Array of the second image points of the same size and format as points1 . 7393 * @param focal focal length of the camera. Note that this function assumes that points1 and points2 7394 * are feature points from cameras with same focal length and principal point. 7395 * @param pp principal point of the camera. 7396 * @param method Method for computing a fundamental matrix. 7397 * <ul> 7398 * <li> 7399 * REF: RANSAC for the RANSAC algorithm. 7400 * </li> 7401 * <li> 7402 * REF: LMEDS for the LMedS algorithm. 7403 * </li> 7404 * </ul> 7405 * line in pixels, beyond which the point is considered an outlier and is not used for computing the 7406 * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the 7407 * point localization, image resolution, and the image noise. 7408 * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of 7409 * confidence (probability) that the estimated matrix is correct. 7410 * for the other points. The array is computed only in the RANSAC and LMedS methods. 7411 * 7412 * This function differs from the one above that it computes camera intrinsic matrix from focal length and 7413 * principal point: 7414 * 7415 * \(A = 7416 * \begin{bmatrix} 7417 * f & 0 & x_{pp} \\ 7418 * 0 & f & y_{pp} \\ 7419 * 0 & 0 & 1 7420 * \end{bmatrix}\) 7421 * @return automatically generated 7422 */ 7423 public static Mat findEssentialMat(Mat points1, Mat points2, double focal, Point pp, int method, double prob) { 7424 return new Mat(findEssentialMat_9(points1.nativeObj, points2.nativeObj, focal, pp.x, pp.y, method, prob)); 7425 } 7426 7427 /** 7428 * 7429 * @param points1 Array of N (N >= 5) 2D points from the first image. The point coordinates should 7430 * be floating-point (single or double precision). 7431 * @param points2 Array of the second image points of the same size and format as points1 . 7432 * @param focal focal length of the camera. Note that this function assumes that points1 and points2 7433 * are feature points from cameras with same focal length and principal point. 7434 * @param pp principal point of the camera. 7435 * @param method Method for computing a fundamental matrix. 7436 * <ul> 7437 * <li> 7438 * REF: RANSAC for the RANSAC algorithm. 7439 * </li> 7440 * <li> 7441 * REF: LMEDS for the LMedS algorithm. 7442 * </li> 7443 * </ul> 7444 * line in pixels, beyond which the point is considered an outlier and is not used for computing the 7445 * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the 7446 * point localization, image resolution, and the image noise. 7447 * confidence (probability) that the estimated matrix is correct. 7448 * for the other points. The array is computed only in the RANSAC and LMedS methods. 7449 * 7450 * This function differs from the one above that it computes camera intrinsic matrix from focal length and 7451 * principal point: 7452 * 7453 * \(A = 7454 * \begin{bmatrix} 7455 * f & 0 & x_{pp} \\ 7456 * 0 & f & y_{pp} \\ 7457 * 0 & 0 & 1 7458 * \end{bmatrix}\) 7459 * @return automatically generated 7460 */ 7461 public static Mat findEssentialMat(Mat points1, Mat points2, double focal, Point pp, int method) { 7462 return new Mat(findEssentialMat_10(points1.nativeObj, points2.nativeObj, focal, pp.x, pp.y, method)); 7463 } 7464 7465 /** 7466 * 7467 * @param points1 Array of N (N >= 5) 2D points from the first image. The point coordinates should 7468 * be floating-point (single or double precision). 7469 * @param points2 Array of the second image points of the same size and format as points1 . 7470 * @param focal focal length of the camera. Note that this function assumes that points1 and points2 7471 * are feature points from cameras with same focal length and principal point. 7472 * @param pp principal point of the camera. 7473 * <ul> 7474 * <li> 7475 * REF: RANSAC for the RANSAC algorithm. 7476 * </li> 7477 * <li> 7478 * REF: LMEDS for the LMedS algorithm. 7479 * </li> 7480 * </ul> 7481 * line in pixels, beyond which the point is considered an outlier and is not used for computing the 7482 * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the 7483 * point localization, image resolution, and the image noise. 7484 * confidence (probability) that the estimated matrix is correct. 7485 * for the other points. The array is computed only in the RANSAC and LMedS methods. 7486 * 7487 * This function differs from the one above that it computes camera intrinsic matrix from focal length and 7488 * principal point: 7489 * 7490 * \(A = 7491 * \begin{bmatrix} 7492 * f & 0 & x_{pp} \\ 7493 * 0 & f & y_{pp} \\ 7494 * 0 & 0 & 1 7495 * \end{bmatrix}\) 7496 * @return automatically generated 7497 */ 7498 public static Mat findEssentialMat(Mat points1, Mat points2, double focal, Point pp) { 7499 return new Mat(findEssentialMat_11(points1.nativeObj, points2.nativeObj, focal, pp.x, pp.y)); 7500 } 7501 7502 /** 7503 * 7504 * @param points1 Array of N (N >= 5) 2D points from the first image. The point coordinates should 7505 * be floating-point (single or double precision). 7506 * @param points2 Array of the second image points of the same size and format as points1 . 7507 * @param focal focal length of the camera. Note that this function assumes that points1 and points2 7508 * are feature points from cameras with same focal length and principal point. 7509 * <ul> 7510 * <li> 7511 * REF: RANSAC for the RANSAC algorithm. 7512 * </li> 7513 * <li> 7514 * REF: LMEDS for the LMedS algorithm. 7515 * </li> 7516 * </ul> 7517 * line in pixels, beyond which the point is considered an outlier and is not used for computing the 7518 * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the 7519 * point localization, image resolution, and the image noise. 7520 * confidence (probability) that the estimated matrix is correct. 7521 * for the other points. The array is computed only in the RANSAC and LMedS methods. 7522 * 7523 * This function differs from the one above that it computes camera intrinsic matrix from focal length and 7524 * principal point: 7525 * 7526 * \(A = 7527 * \begin{bmatrix} 7528 * f & 0 & x_{pp} \\ 7529 * 0 & f & y_{pp} \\ 7530 * 0 & 0 & 1 7531 * \end{bmatrix}\) 7532 * @return automatically generated 7533 */ 7534 public static Mat findEssentialMat(Mat points1, Mat points2, double focal) { 7535 return new Mat(findEssentialMat_12(points1.nativeObj, points2.nativeObj, focal)); 7536 } 7537 7538 /** 7539 * 7540 * @param points1 Array of N (N >= 5) 2D points from the first image. The point coordinates should 7541 * be floating-point (single or double precision). 7542 * @param points2 Array of the second image points of the same size and format as points1 . 7543 * are feature points from cameras with same focal length and principal point. 7544 * <ul> 7545 * <li> 7546 * REF: RANSAC for the RANSAC algorithm. 7547 * </li> 7548 * <li> 7549 * REF: LMEDS for the LMedS algorithm. 7550 * </li> 7551 * </ul> 7552 * line in pixels, beyond which the point is considered an outlier and is not used for computing the 7553 * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the 7554 * point localization, image resolution, and the image noise. 7555 * confidence (probability) that the estimated matrix is correct. 7556 * for the other points. The array is computed only in the RANSAC and LMedS methods. 7557 * 7558 * This function differs from the one above that it computes camera intrinsic matrix from focal length and 7559 * principal point: 7560 * 7561 * \(A = 7562 * \begin{bmatrix} 7563 * f & 0 & x_{pp} \\ 7564 * 0 & f & y_{pp} \\ 7565 * 0 & 0 & 1 7566 * \end{bmatrix}\) 7567 * @return automatically generated 7568 */ 7569 public static Mat findEssentialMat(Mat points1, Mat points2) { 7570 return new Mat(findEssentialMat_13(points1.nativeObj, points2.nativeObj)); 7571 } 7572 7573 7574 // 7575 // C++: Mat cv::findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, int method = RANSAC, double prob = 0.999, double threshold = 1.0, Mat& mask = Mat()) 7576 // 7577 7578 /** 7579 * Calculates an essential matrix from the corresponding points in two images from potentially two different cameras. 7580 * 7581 * @param points1 Array of N (N >= 5) 2D points from the first image. The point coordinates should 7582 * be floating-point (single or double precision). 7583 * @param points2 Array of the second image points of the same size and format as points1 . 7584 * @param cameraMatrix1 Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) . 7585 * Note that this function assumes that points1 and points2 are feature points from cameras with the 7586 * same camera matrix. If this assumption does not hold for your use case, use 7587 * #undistortPoints with {@code P = cv::NoArray()} for both cameras to transform image points 7588 * to normalized image coordinates, which are valid for the identity camera matrix. When 7589 * passing these coordinates, pass the identity matrix for this parameter. 7590 * @param cameraMatrix2 Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) . 7591 * Note that this function assumes that points1 and points2 are feature points from cameras with the 7592 * same camera matrix. If this assumption does not hold for your use case, use 7593 * #undistortPoints with {@code P = cv::NoArray()} for both cameras to transform image points 7594 * to normalized image coordinates, which are valid for the identity camera matrix. When 7595 * passing these coordinates, pass the identity matrix for this parameter. 7596 * @param distCoeffs1 Input vector of distortion coefficients 7597 * \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) 7598 * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed. 7599 * @param distCoeffs2 Input vector of distortion coefficients 7600 * \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) 7601 * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed. 7602 * @param method Method for computing an essential matrix. 7603 * <ul> 7604 * <li> 7605 * REF: RANSAC for the RANSAC algorithm. 7606 * </li> 7607 * <li> 7608 * REF: LMEDS for the LMedS algorithm. 7609 * </li> 7610 * </ul> 7611 * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of 7612 * confidence (probability) that the estimated matrix is correct. 7613 * @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar 7614 * line in pixels, beyond which the point is considered an outlier and is not used for computing the 7615 * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the 7616 * point localization, image resolution, and the image noise. 7617 * @param mask Output array of N elements, every element of which is set to 0 for outliers and to 1 7618 * for the other points. The array is computed only in the RANSAC and LMedS methods. 7619 * 7620 * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 . 7621 * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation: 7622 * 7623 * \([p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0\) 7624 * 7625 * where \(E\) is an essential matrix, \(p_1\) and \(p_2\) are corresponding points in the first and the 7626 * second images, respectively. The result of this function may be passed further to 7627 * #decomposeEssentialMat or #recoverPose to recover the relative pose between cameras. 7628 * @return automatically generated 7629 */ 7630 public static Mat findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, int method, double prob, double threshold, Mat mask) { 7631 return new Mat(findEssentialMat_14(points1.nativeObj, points2.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, method, prob, threshold, mask.nativeObj)); 7632 } 7633 7634 /** 7635 * Calculates an essential matrix from the corresponding points in two images from potentially two different cameras. 7636 * 7637 * @param points1 Array of N (N >= 5) 2D points from the first image. The point coordinates should 7638 * be floating-point (single or double precision). 7639 * @param points2 Array of the second image points of the same size and format as points1 . 7640 * @param cameraMatrix1 Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) . 7641 * Note that this function assumes that points1 and points2 are feature points from cameras with the 7642 * same camera matrix. If this assumption does not hold for your use case, use 7643 * #undistortPoints with {@code P = cv::NoArray()} for both cameras to transform image points 7644 * to normalized image coordinates, which are valid for the identity camera matrix. When 7645 * passing these coordinates, pass the identity matrix for this parameter. 7646 * @param cameraMatrix2 Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) . 7647 * Note that this function assumes that points1 and points2 are feature points from cameras with the 7648 * same camera matrix. If this assumption does not hold for your use case, use 7649 * #undistortPoints with {@code P = cv::NoArray()} for both cameras to transform image points 7650 * to normalized image coordinates, which are valid for the identity camera matrix. When 7651 * passing these coordinates, pass the identity matrix for this parameter. 7652 * @param distCoeffs1 Input vector of distortion coefficients 7653 * \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) 7654 * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed. 7655 * @param distCoeffs2 Input vector of distortion coefficients 7656 * \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) 7657 * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed. 7658 * @param method Method for computing an essential matrix. 7659 * <ul> 7660 * <li> 7661 * REF: RANSAC for the RANSAC algorithm. 7662 * </li> 7663 * <li> 7664 * REF: LMEDS for the LMedS algorithm. 7665 * </li> 7666 * </ul> 7667 * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of 7668 * confidence (probability) that the estimated matrix is correct. 7669 * @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar 7670 * line in pixels, beyond which the point is considered an outlier and is not used for computing the 7671 * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the 7672 * point localization, image resolution, and the image noise. 7673 * for the other points. The array is computed only in the RANSAC and LMedS methods. 7674 * 7675 * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 . 7676 * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation: 7677 * 7678 * \([p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0\) 7679 * 7680 * where \(E\) is an essential matrix, \(p_1\) and \(p_2\) are corresponding points in the first and the 7681 * second images, respectively. The result of this function may be passed further to 7682 * #decomposeEssentialMat or #recoverPose to recover the relative pose between cameras. 7683 * @return automatically generated 7684 */ 7685 public static Mat findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, int method, double prob, double threshold) { 7686 return new Mat(findEssentialMat_15(points1.nativeObj, points2.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, method, prob, threshold)); 7687 } 7688 7689 /** 7690 * Calculates an essential matrix from the corresponding points in two images from potentially two different cameras. 7691 * 7692 * @param points1 Array of N (N >= 5) 2D points from the first image. The point coordinates should 7693 * be floating-point (single or double precision). 7694 * @param points2 Array of the second image points of the same size and format as points1 . 7695 * @param cameraMatrix1 Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) . 7696 * Note that this function assumes that points1 and points2 are feature points from cameras with the 7697 * same camera matrix. If this assumption does not hold for your use case, use 7698 * #undistortPoints with {@code P = cv::NoArray()} for both cameras to transform image points 7699 * to normalized image coordinates, which are valid for the identity camera matrix. When 7700 * passing these coordinates, pass the identity matrix for this parameter. 7701 * @param cameraMatrix2 Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) . 7702 * Note that this function assumes that points1 and points2 are feature points from cameras with the 7703 * same camera matrix. If this assumption does not hold for your use case, use 7704 * #undistortPoints with {@code P = cv::NoArray()} for both cameras to transform image points 7705 * to normalized image coordinates, which are valid for the identity camera matrix. When 7706 * passing these coordinates, pass the identity matrix for this parameter. 7707 * @param distCoeffs1 Input vector of distortion coefficients 7708 * \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) 7709 * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed. 7710 * @param distCoeffs2 Input vector of distortion coefficients 7711 * \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) 7712 * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed. 7713 * @param method Method for computing an essential matrix. 7714 * <ul> 7715 * <li> 7716 * REF: RANSAC for the RANSAC algorithm. 7717 * </li> 7718 * <li> 7719 * REF: LMEDS for the LMedS algorithm. 7720 * </li> 7721 * </ul> 7722 * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of 7723 * confidence (probability) that the estimated matrix is correct. 7724 * line in pixels, beyond which the point is considered an outlier and is not used for computing the 7725 * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the 7726 * point localization, image resolution, and the image noise. 7727 * for the other points. The array is computed only in the RANSAC and LMedS methods. 7728 * 7729 * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 . 7730 * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation: 7731 * 7732 * \([p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0\) 7733 * 7734 * where \(E\) is an essential matrix, \(p_1\) and \(p_2\) are corresponding points in the first and the 7735 * second images, respectively. The result of this function may be passed further to 7736 * #decomposeEssentialMat or #recoverPose to recover the relative pose between cameras. 7737 * @return automatically generated 7738 */ 7739 public static Mat findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, int method, double prob) { 7740 return new Mat(findEssentialMat_16(points1.nativeObj, points2.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, method, prob)); 7741 } 7742 7743 /** 7744 * Calculates an essential matrix from the corresponding points in two images from potentially two different cameras. 7745 * 7746 * @param points1 Array of N (N >= 5) 2D points from the first image. The point coordinates should 7747 * be floating-point (single or double precision). 7748 * @param points2 Array of the second image points of the same size and format as points1 . 7749 * @param cameraMatrix1 Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) . 7750 * Note that this function assumes that points1 and points2 are feature points from cameras with the 7751 * same camera matrix. If this assumption does not hold for your use case, use 7752 * #undistortPoints with {@code P = cv::NoArray()} for both cameras to transform image points 7753 * to normalized image coordinates, which are valid for the identity camera matrix. When 7754 * passing these coordinates, pass the identity matrix for this parameter. 7755 * @param cameraMatrix2 Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) . 7756 * Note that this function assumes that points1 and points2 are feature points from cameras with the 7757 * same camera matrix. If this assumption does not hold for your use case, use 7758 * #undistortPoints with {@code P = cv::NoArray()} for both cameras to transform image points 7759 * to normalized image coordinates, which are valid for the identity camera matrix. When 7760 * passing these coordinates, pass the identity matrix for this parameter. 7761 * @param distCoeffs1 Input vector of distortion coefficients 7762 * \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) 7763 * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed. 7764 * @param distCoeffs2 Input vector of distortion coefficients 7765 * \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) 7766 * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed. 7767 * @param method Method for computing an essential matrix. 7768 * <ul> 7769 * <li> 7770 * REF: RANSAC for the RANSAC algorithm. 7771 * </li> 7772 * <li> 7773 * REF: LMEDS for the LMedS algorithm. 7774 * </li> 7775 * </ul> 7776 * confidence (probability) that the estimated matrix is correct. 7777 * line in pixels, beyond which the point is considered an outlier and is not used for computing the 7778 * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the 7779 * point localization, image resolution, and the image noise. 7780 * for the other points. The array is computed only in the RANSAC and LMedS methods. 7781 * 7782 * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 . 7783 * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation: 7784 * 7785 * \([p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0\) 7786 * 7787 * where \(E\) is an essential matrix, \(p_1\) and \(p_2\) are corresponding points in the first and the 7788 * second images, respectively. The result of this function may be passed further to 7789 * #decomposeEssentialMat or #recoverPose to recover the relative pose between cameras. 7790 * @return automatically generated 7791 */ 7792 public static Mat findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, int method) { 7793 return new Mat(findEssentialMat_17(points1.nativeObj, points2.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, method)); 7794 } 7795 7796 /** 7797 * Calculates an essential matrix from the corresponding points in two images from potentially two different cameras. 7798 * 7799 * @param points1 Array of N (N >= 5) 2D points from the first image. The point coordinates should 7800 * be floating-point (single or double precision). 7801 * @param points2 Array of the second image points of the same size and format as points1 . 7802 * @param cameraMatrix1 Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) . 7803 * Note that this function assumes that points1 and points2 are feature points from cameras with the 7804 * same camera matrix. If this assumption does not hold for your use case, use 7805 * #undistortPoints with {@code P = cv::NoArray()} for both cameras to transform image points 7806 * to normalized image coordinates, which are valid for the identity camera matrix. When 7807 * passing these coordinates, pass the identity matrix for this parameter. 7808 * @param cameraMatrix2 Camera matrix \(K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) . 7809 * Note that this function assumes that points1 and points2 are feature points from cameras with the 7810 * same camera matrix. If this assumption does not hold for your use case, use 7811 * #undistortPoints with {@code P = cv::NoArray()} for both cameras to transform image points 7812 * to normalized image coordinates, which are valid for the identity camera matrix. When 7813 * passing these coordinates, pass the identity matrix for this parameter. 7814 * @param distCoeffs1 Input vector of distortion coefficients 7815 * \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) 7816 * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed. 7817 * @param distCoeffs2 Input vector of distortion coefficients 7818 * \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) 7819 * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed. 7820 * <ul> 7821 * <li> 7822 * REF: RANSAC for the RANSAC algorithm. 7823 * </li> 7824 * <li> 7825 * REF: LMEDS for the LMedS algorithm. 7826 * </li> 7827 * </ul> 7828 * confidence (probability) that the estimated matrix is correct. 7829 * line in pixels, beyond which the point is considered an outlier and is not used for computing the 7830 * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the 7831 * point localization, image resolution, and the image noise. 7832 * for the other points. The array is computed only in the RANSAC and LMedS methods. 7833 * 7834 * This function estimates essential matrix based on the five-point algorithm solver in CITE: Nister03 . 7835 * CITE: SteweniusCFS is also a related. The epipolar geometry is described by the following equation: 7836 * 7837 * \([p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0\) 7838 * 7839 * where \(E\) is an essential matrix, \(p_1\) and \(p_2\) are corresponding points in the first and the 7840 * second images, respectively. The result of this function may be passed further to 7841 * #decomposeEssentialMat or #recoverPose to recover the relative pose between cameras. 7842 * @return automatically generated 7843 */ 7844 public static Mat findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2) { 7845 return new Mat(findEssentialMat_18(points1.nativeObj, points2.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj)); 7846 } 7847 7848 7849 // 7850 // C++: Mat cv::findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix1, Mat cameraMatrix2, Mat dist_coeff1, Mat dist_coeff2, Mat& mask, UsacParams params) 7851 // 7852 7853 public static Mat findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix1, Mat cameraMatrix2, Mat dist_coeff1, Mat dist_coeff2, Mat mask, UsacParams params) { 7854 return new Mat(findEssentialMat_19(points1.nativeObj, points2.nativeObj, cameraMatrix1.nativeObj, cameraMatrix2.nativeObj, dist_coeff1.nativeObj, dist_coeff2.nativeObj, mask.nativeObj, params.nativeObj)); 7855 } 7856 7857 7858 // 7859 // C++: void cv::decomposeEssentialMat(Mat E, Mat& R1, Mat& R2, Mat& t) 7860 // 7861 7862 /** 7863 * Decompose an essential matrix to possible rotations and translation. 7864 * 7865 * @param E The input essential matrix. 7866 * @param R1 One possible rotation matrix. 7867 * @param R2 Another possible rotation matrix. 7868 * @param t One possible translation. 7869 * 7870 * This function decomposes the essential matrix E using svd decomposition CITE: HartleyZ00. In 7871 * general, four possible poses exist for the decomposition of E. They are \([R_1, t]\), 7872 * \([R_1, -t]\), \([R_2, t]\), \([R_2, -t]\). 7873 * 7874 * If E gives the epipolar constraint \([p_2; 1]^T A^{-T} E A^{-1} [p_1; 1] = 0\) between the image 7875 * points \(p_1\) in the first image and \(p_2\) in second image, then any of the tuples 7876 * \([R_1, t]\), \([R_1, -t]\), \([R_2, t]\), \([R_2, -t]\) is a change of basis from the first 7877 * camera's coordinate system to the second camera's coordinate system. However, by decomposing E, one 7878 * can only get the direction of the translation. For this reason, the translation t is returned with 7879 * unit length. 7880 */ 7881 public static void decomposeEssentialMat(Mat E, Mat R1, Mat R2, Mat t) { 7882 decomposeEssentialMat_0(E.nativeObj, R1.nativeObj, R2.nativeObj, t.nativeObj); 7883 } 7884 7885 7886 // 7887 // C++: int cv::recoverPose(Mat points1, Mat points2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Mat& E, Mat& R, Mat& t, int method = cv::RANSAC, double prob = 0.999, double threshold = 1.0, Mat& mask = Mat()) 7888 // 7889 7890 /** 7891 * Recovers the relative camera rotation and the translation from corresponding points in two images from two different cameras, using cheirality check. Returns the number of 7892 * inliers that pass the check. 7893 * 7894 * @param points1 Array of N 2D points from the first image. The point coordinates should be 7895 * floating-point (single or double precision). 7896 * @param points2 Array of the second image points of the same size and format as points1 . 7897 * @param cameraMatrix1 Input/output camera matrix for the first camera, the same as in 7898 * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below. 7899 * @param distCoeffs1 Input/output vector of distortion coefficients, the same as in 7900 * REF: calibrateCamera. 7901 * @param cameraMatrix2 Input/output camera matrix for the first camera, the same as in 7902 * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below. 7903 * @param distCoeffs2 Input/output vector of distortion coefficients, the same as in 7904 * REF: calibrateCamera. 7905 * @param E The output essential matrix. 7906 * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple 7907 * that performs a change of basis from the first camera's coordinate system to the second camera's 7908 * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter 7909 * described below. 7910 * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and 7911 * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit 7912 * length. 7913 * @param method Method for computing an essential matrix. 7914 * <ul> 7915 * <li> 7916 * REF: RANSAC for the RANSAC algorithm. 7917 * </li> 7918 * <li> 7919 * REF: LMEDS for the LMedS algorithm. 7920 * </li> 7921 * </ul> 7922 * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of 7923 * confidence (probability) that the estimated matrix is correct. 7924 * @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar 7925 * line in pixels, beyond which the point is considered an outlier and is not used for computing the 7926 * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the 7927 * point localization, image resolution, and the image noise. 7928 * @param mask Input/output mask for inliers in points1 and points2. If it is not empty, then it marks 7929 * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to 7930 * recover pose. In the output mask only inliers which pass the cheirality check. 7931 * 7932 * This function decomposes an essential matrix using REF: decomposeEssentialMat and then verifies 7933 * possible pose hypotheses by doing cheirality check. The cheirality check means that the 7934 * triangulated 3D points should have positive depth. Some details can be found in CITE: Nister03. 7935 * 7936 * This function can be used to process the output E and mask from REF: findEssentialMat. In this 7937 * scenario, points1 and points2 are the same input for findEssentialMat.: 7938 * <code> 7939 * // Example. Estimation of fundamental matrix using the RANSAC algorithm 7940 * int point_count = 100; 7941 * vector<Point2f> points1(point_count); 7942 * vector<Point2f> points2(point_count); 7943 * 7944 * // initialize the points here ... 7945 * for( int i = 0; i < point_count; i++ ) 7946 * { 7947 * points1[i] = ...; 7948 * points2[i] = ...; 7949 * } 7950 * 7951 * // Input: camera calibration of both cameras, for example using intrinsic chessboard calibration. 7952 * Mat cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2; 7953 * 7954 * // Output: Essential matrix, relative rotation and relative translation. 7955 * Mat E, R, t, mask; 7956 * 7957 * recoverPose(points1, points2, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, E, R, t, mask); 7958 * </code> 7959 * @return automatically generated 7960 */ 7961 public static int recoverPose(Mat points1, Mat points2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Mat E, Mat R, Mat t, int method, double prob, double threshold, Mat mask) { 7962 return recoverPose_0(points1.nativeObj, points2.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, E.nativeObj, R.nativeObj, t.nativeObj, method, prob, threshold, mask.nativeObj); 7963 } 7964 7965 /** 7966 * Recovers the relative camera rotation and the translation from corresponding points in two images from two different cameras, using cheirality check. Returns the number of 7967 * inliers that pass the check. 7968 * 7969 * @param points1 Array of N 2D points from the first image. The point coordinates should be 7970 * floating-point (single or double precision). 7971 * @param points2 Array of the second image points of the same size and format as points1 . 7972 * @param cameraMatrix1 Input/output camera matrix for the first camera, the same as in 7973 * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below. 7974 * @param distCoeffs1 Input/output vector of distortion coefficients, the same as in 7975 * REF: calibrateCamera. 7976 * @param cameraMatrix2 Input/output camera matrix for the first camera, the same as in 7977 * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below. 7978 * @param distCoeffs2 Input/output vector of distortion coefficients, the same as in 7979 * REF: calibrateCamera. 7980 * @param E The output essential matrix. 7981 * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple 7982 * that performs a change of basis from the first camera's coordinate system to the second camera's 7983 * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter 7984 * described below. 7985 * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and 7986 * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit 7987 * length. 7988 * @param method Method for computing an essential matrix. 7989 * <ul> 7990 * <li> 7991 * REF: RANSAC for the RANSAC algorithm. 7992 * </li> 7993 * <li> 7994 * REF: LMEDS for the LMedS algorithm. 7995 * </li> 7996 * </ul> 7997 * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of 7998 * confidence (probability) that the estimated matrix is correct. 7999 * @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar 8000 * line in pixels, beyond which the point is considered an outlier and is not used for computing the 8001 * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the 8002 * point localization, image resolution, and the image noise. 8003 * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to 8004 * recover pose. In the output mask only inliers which pass the cheirality check. 8005 * 8006 * This function decomposes an essential matrix using REF: decomposeEssentialMat and then verifies 8007 * possible pose hypotheses by doing cheirality check. The cheirality check means that the 8008 * triangulated 3D points should have positive depth. Some details can be found in CITE: Nister03. 8009 * 8010 * This function can be used to process the output E and mask from REF: findEssentialMat. In this 8011 * scenario, points1 and points2 are the same input for findEssentialMat.: 8012 * <code> 8013 * // Example. Estimation of fundamental matrix using the RANSAC algorithm 8014 * int point_count = 100; 8015 * vector<Point2f> points1(point_count); 8016 * vector<Point2f> points2(point_count); 8017 * 8018 * // initialize the points here ... 8019 * for( int i = 0; i < point_count; i++ ) 8020 * { 8021 * points1[i] = ...; 8022 * points2[i] = ...; 8023 * } 8024 * 8025 * // Input: camera calibration of both cameras, for example using intrinsic chessboard calibration. 8026 * Mat cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2; 8027 * 8028 * // Output: Essential matrix, relative rotation and relative translation. 8029 * Mat E, R, t, mask; 8030 * 8031 * recoverPose(points1, points2, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, E, R, t, mask); 8032 * </code> 8033 * @return automatically generated 8034 */ 8035 public static int recoverPose(Mat points1, Mat points2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Mat E, Mat R, Mat t, int method, double prob, double threshold) { 8036 return recoverPose_1(points1.nativeObj, points2.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, E.nativeObj, R.nativeObj, t.nativeObj, method, prob, threshold); 8037 } 8038 8039 /** 8040 * Recovers the relative camera rotation and the translation from corresponding points in two images from two different cameras, using cheirality check. Returns the number of 8041 * inliers that pass the check. 8042 * 8043 * @param points1 Array of N 2D points from the first image. The point coordinates should be 8044 * floating-point (single or double precision). 8045 * @param points2 Array of the second image points of the same size and format as points1 . 8046 * @param cameraMatrix1 Input/output camera matrix for the first camera, the same as in 8047 * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below. 8048 * @param distCoeffs1 Input/output vector of distortion coefficients, the same as in 8049 * REF: calibrateCamera. 8050 * @param cameraMatrix2 Input/output camera matrix for the first camera, the same as in 8051 * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below. 8052 * @param distCoeffs2 Input/output vector of distortion coefficients, the same as in 8053 * REF: calibrateCamera. 8054 * @param E The output essential matrix. 8055 * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple 8056 * that performs a change of basis from the first camera's coordinate system to the second camera's 8057 * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter 8058 * described below. 8059 * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and 8060 * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit 8061 * length. 8062 * @param method Method for computing an essential matrix. 8063 * <ul> 8064 * <li> 8065 * REF: RANSAC for the RANSAC algorithm. 8066 * </li> 8067 * <li> 8068 * REF: LMEDS for the LMedS algorithm. 8069 * </li> 8070 * </ul> 8071 * @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of 8072 * confidence (probability) that the estimated matrix is correct. 8073 * line in pixels, beyond which the point is considered an outlier and is not used for computing the 8074 * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the 8075 * point localization, image resolution, and the image noise. 8076 * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to 8077 * recover pose. In the output mask only inliers which pass the cheirality check. 8078 * 8079 * This function decomposes an essential matrix using REF: decomposeEssentialMat and then verifies 8080 * possible pose hypotheses by doing cheirality check. The cheirality check means that the 8081 * triangulated 3D points should have positive depth. Some details can be found in CITE: Nister03. 8082 * 8083 * This function can be used to process the output E and mask from REF: findEssentialMat. In this 8084 * scenario, points1 and points2 are the same input for findEssentialMat.: 8085 * <code> 8086 * // Example. Estimation of fundamental matrix using the RANSAC algorithm 8087 * int point_count = 100; 8088 * vector<Point2f> points1(point_count); 8089 * vector<Point2f> points2(point_count); 8090 * 8091 * // initialize the points here ... 8092 * for( int i = 0; i < point_count; i++ ) 8093 * { 8094 * points1[i] = ...; 8095 * points2[i] = ...; 8096 * } 8097 * 8098 * // Input: camera calibration of both cameras, for example using intrinsic chessboard calibration. 8099 * Mat cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2; 8100 * 8101 * // Output: Essential matrix, relative rotation and relative translation. 8102 * Mat E, R, t, mask; 8103 * 8104 * recoverPose(points1, points2, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, E, R, t, mask); 8105 * </code> 8106 * @return automatically generated 8107 */ 8108 public static int recoverPose(Mat points1, Mat points2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Mat E, Mat R, Mat t, int method, double prob) { 8109 return recoverPose_2(points1.nativeObj, points2.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, E.nativeObj, R.nativeObj, t.nativeObj, method, prob); 8110 } 8111 8112 /** 8113 * Recovers the relative camera rotation and the translation from corresponding points in two images from two different cameras, using cheirality check. Returns the number of 8114 * inliers that pass the check. 8115 * 8116 * @param points1 Array of N 2D points from the first image. The point coordinates should be 8117 * floating-point (single or double precision). 8118 * @param points2 Array of the second image points of the same size and format as points1 . 8119 * @param cameraMatrix1 Input/output camera matrix for the first camera, the same as in 8120 * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below. 8121 * @param distCoeffs1 Input/output vector of distortion coefficients, the same as in 8122 * REF: calibrateCamera. 8123 * @param cameraMatrix2 Input/output camera matrix for the first camera, the same as in 8124 * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below. 8125 * @param distCoeffs2 Input/output vector of distortion coefficients, the same as in 8126 * REF: calibrateCamera. 8127 * @param E The output essential matrix. 8128 * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple 8129 * that performs a change of basis from the first camera's coordinate system to the second camera's 8130 * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter 8131 * described below. 8132 * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and 8133 * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit 8134 * length. 8135 * @param method Method for computing an essential matrix. 8136 * <ul> 8137 * <li> 8138 * REF: RANSAC for the RANSAC algorithm. 8139 * </li> 8140 * <li> 8141 * REF: LMEDS for the LMedS algorithm. 8142 * </li> 8143 * </ul> 8144 * confidence (probability) that the estimated matrix is correct. 8145 * line in pixels, beyond which the point is considered an outlier and is not used for computing the 8146 * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the 8147 * point localization, image resolution, and the image noise. 8148 * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to 8149 * recover pose. In the output mask only inliers which pass the cheirality check. 8150 * 8151 * This function decomposes an essential matrix using REF: decomposeEssentialMat and then verifies 8152 * possible pose hypotheses by doing cheirality check. The cheirality check means that the 8153 * triangulated 3D points should have positive depth. Some details can be found in CITE: Nister03. 8154 * 8155 * This function can be used to process the output E and mask from REF: findEssentialMat. In this 8156 * scenario, points1 and points2 are the same input for findEssentialMat.: 8157 * <code> 8158 * // Example. Estimation of fundamental matrix using the RANSAC algorithm 8159 * int point_count = 100; 8160 * vector<Point2f> points1(point_count); 8161 * vector<Point2f> points2(point_count); 8162 * 8163 * // initialize the points here ... 8164 * for( int i = 0; i < point_count; i++ ) 8165 * { 8166 * points1[i] = ...; 8167 * points2[i] = ...; 8168 * } 8169 * 8170 * // Input: camera calibration of both cameras, for example using intrinsic chessboard calibration. 8171 * Mat cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2; 8172 * 8173 * // Output: Essential matrix, relative rotation and relative translation. 8174 * Mat E, R, t, mask; 8175 * 8176 * recoverPose(points1, points2, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, E, R, t, mask); 8177 * </code> 8178 * @return automatically generated 8179 */ 8180 public static int recoverPose(Mat points1, Mat points2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Mat E, Mat R, Mat t, int method) { 8181 return recoverPose_3(points1.nativeObj, points2.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, E.nativeObj, R.nativeObj, t.nativeObj, method); 8182 } 8183 8184 /** 8185 * Recovers the relative camera rotation and the translation from corresponding points in two images from two different cameras, using cheirality check. Returns the number of 8186 * inliers that pass the check. 8187 * 8188 * @param points1 Array of N 2D points from the first image. The point coordinates should be 8189 * floating-point (single or double precision). 8190 * @param points2 Array of the second image points of the same size and format as points1 . 8191 * @param cameraMatrix1 Input/output camera matrix for the first camera, the same as in 8192 * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below. 8193 * @param distCoeffs1 Input/output vector of distortion coefficients, the same as in 8194 * REF: calibrateCamera. 8195 * @param cameraMatrix2 Input/output camera matrix for the first camera, the same as in 8196 * REF: calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below. 8197 * @param distCoeffs2 Input/output vector of distortion coefficients, the same as in 8198 * REF: calibrateCamera. 8199 * @param E The output essential matrix. 8200 * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple 8201 * that performs a change of basis from the first camera's coordinate system to the second camera's 8202 * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter 8203 * described below. 8204 * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and 8205 * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit 8206 * length. 8207 * <ul> 8208 * <li> 8209 * REF: RANSAC for the RANSAC algorithm. 8210 * </li> 8211 * <li> 8212 * REF: LMEDS for the LMedS algorithm. 8213 * </li> 8214 * </ul> 8215 * confidence (probability) that the estimated matrix is correct. 8216 * line in pixels, beyond which the point is considered an outlier and is not used for computing the 8217 * final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the 8218 * point localization, image resolution, and the image noise. 8219 * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to 8220 * recover pose. In the output mask only inliers which pass the cheirality check. 8221 * 8222 * This function decomposes an essential matrix using REF: decomposeEssentialMat and then verifies 8223 * possible pose hypotheses by doing cheirality check. The cheirality check means that the 8224 * triangulated 3D points should have positive depth. Some details can be found in CITE: Nister03. 8225 * 8226 * This function can be used to process the output E and mask from REF: findEssentialMat. In this 8227 * scenario, points1 and points2 are the same input for findEssentialMat.: 8228 * <code> 8229 * // Example. Estimation of fundamental matrix using the RANSAC algorithm 8230 * int point_count = 100; 8231 * vector<Point2f> points1(point_count); 8232 * vector<Point2f> points2(point_count); 8233 * 8234 * // initialize the points here ... 8235 * for( int i = 0; i < point_count; i++ ) 8236 * { 8237 * points1[i] = ...; 8238 * points2[i] = ...; 8239 * } 8240 * 8241 * // Input: camera calibration of both cameras, for example using intrinsic chessboard calibration. 8242 * Mat cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2; 8243 * 8244 * // Output: Essential matrix, relative rotation and relative translation. 8245 * Mat E, R, t, mask; 8246 * 8247 * recoverPose(points1, points2, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, E, R, t, mask); 8248 * </code> 8249 * @return automatically generated 8250 */ 8251 public static int recoverPose(Mat points1, Mat points2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Mat E, Mat R, Mat t) { 8252 return recoverPose_4(points1.nativeObj, points2.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, E.nativeObj, R.nativeObj, t.nativeObj); 8253 } 8254 8255 8256 // 8257 // C++: int cv::recoverPose(Mat E, Mat points1, Mat points2, Mat cameraMatrix, Mat& R, Mat& t, Mat& mask = Mat()) 8258 // 8259 8260 /** 8261 * Recovers the relative camera rotation and the translation from an estimated essential 8262 * matrix and the corresponding points in two images, using cheirality check. Returns the number of 8263 * inliers that pass the check. 8264 * 8265 * @param E The input essential matrix. 8266 * @param points1 Array of N 2D points from the first image. The point coordinates should be 8267 * floating-point (single or double precision). 8268 * @param points2 Array of the second image points of the same size and format as points1 . 8269 * @param cameraMatrix Camera intrinsic matrix \(\cameramatrix{A}\) . 8270 * Note that this function assumes that points1 and points2 are feature points from cameras with the 8271 * same camera intrinsic matrix. 8272 * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple 8273 * that performs a change of basis from the first camera's coordinate system to the second camera's 8274 * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter 8275 * described below. 8276 * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and 8277 * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit 8278 * length. 8279 * @param mask Input/output mask for inliers in points1 and points2. If it is not empty, then it marks 8280 * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to 8281 * recover pose. In the output mask only inliers which pass the cheirality check. 8282 * 8283 * This function decomposes an essential matrix using REF: decomposeEssentialMat and then verifies 8284 * possible pose hypotheses by doing cheirality check. The cheirality check means that the 8285 * triangulated 3D points should have positive depth. Some details can be found in CITE: Nister03. 8286 * 8287 * This function can be used to process the output E and mask from REF: findEssentialMat. In this 8288 * scenario, points1 and points2 are the same input for #findEssentialMat : 8289 * <code> 8290 * // Example. Estimation of fundamental matrix using the RANSAC algorithm 8291 * int point_count = 100; 8292 * vector<Point2f> points1(point_count); 8293 * vector<Point2f> points2(point_count); 8294 * 8295 * // initialize the points here ... 8296 * for( int i = 0; i < point_count; i++ ) 8297 * { 8298 * points1[i] = ...; 8299 * points2[i] = ...; 8300 * } 8301 * 8302 * // cametra matrix with both focal lengths = 1, and principal point = (0, 0) 8303 * Mat cameraMatrix = Mat::eye(3, 3, CV_64F); 8304 * 8305 * Mat E, R, t, mask; 8306 * 8307 * E = findEssentialMat(points1, points2, cameraMatrix, RANSAC, 0.999, 1.0, mask); 8308 * recoverPose(E, points1, points2, cameraMatrix, R, t, mask); 8309 * </code> 8310 * @return automatically generated 8311 */ 8312 public static int recoverPose(Mat E, Mat points1, Mat points2, Mat cameraMatrix, Mat R, Mat t, Mat mask) { 8313 return recoverPose_5(E.nativeObj, points1.nativeObj, points2.nativeObj, cameraMatrix.nativeObj, R.nativeObj, t.nativeObj, mask.nativeObj); 8314 } 8315 8316 /** 8317 * Recovers the relative camera rotation and the translation from an estimated essential 8318 * matrix and the corresponding points in two images, using cheirality check. Returns the number of 8319 * inliers that pass the check. 8320 * 8321 * @param E The input essential matrix. 8322 * @param points1 Array of N 2D points from the first image. The point coordinates should be 8323 * floating-point (single or double precision). 8324 * @param points2 Array of the second image points of the same size and format as points1 . 8325 * @param cameraMatrix Camera intrinsic matrix \(\cameramatrix{A}\) . 8326 * Note that this function assumes that points1 and points2 are feature points from cameras with the 8327 * same camera intrinsic matrix. 8328 * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple 8329 * that performs a change of basis from the first camera's coordinate system to the second camera's 8330 * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter 8331 * described below. 8332 * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and 8333 * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit 8334 * length. 8335 * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to 8336 * recover pose. In the output mask only inliers which pass the cheirality check. 8337 * 8338 * This function decomposes an essential matrix using REF: decomposeEssentialMat and then verifies 8339 * possible pose hypotheses by doing cheirality check. The cheirality check means that the 8340 * triangulated 3D points should have positive depth. Some details can be found in CITE: Nister03. 8341 * 8342 * This function can be used to process the output E and mask from REF: findEssentialMat. In this 8343 * scenario, points1 and points2 are the same input for #findEssentialMat : 8344 * <code> 8345 * // Example. Estimation of fundamental matrix using the RANSAC algorithm 8346 * int point_count = 100; 8347 * vector<Point2f> points1(point_count); 8348 * vector<Point2f> points2(point_count); 8349 * 8350 * // initialize the points here ... 8351 * for( int i = 0; i < point_count; i++ ) 8352 * { 8353 * points1[i] = ...; 8354 * points2[i] = ...; 8355 * } 8356 * 8357 * // cametra matrix with both focal lengths = 1, and principal point = (0, 0) 8358 * Mat cameraMatrix = Mat::eye(3, 3, CV_64F); 8359 * 8360 * Mat E, R, t, mask; 8361 * 8362 * E = findEssentialMat(points1, points2, cameraMatrix, RANSAC, 0.999, 1.0, mask); 8363 * recoverPose(E, points1, points2, cameraMatrix, R, t, mask); 8364 * </code> 8365 * @return automatically generated 8366 */ 8367 public static int recoverPose(Mat E, Mat points1, Mat points2, Mat cameraMatrix, Mat R, Mat t) { 8368 return recoverPose_6(E.nativeObj, points1.nativeObj, points2.nativeObj, cameraMatrix.nativeObj, R.nativeObj, t.nativeObj); 8369 } 8370 8371 8372 // 8373 // C++: int cv::recoverPose(Mat E, Mat points1, Mat points2, Mat& R, Mat& t, double focal = 1.0, Point2d pp = Point2d(0, 0), Mat& mask = Mat()) 8374 // 8375 8376 /** 8377 * 8378 * @param E The input essential matrix. 8379 * @param points1 Array of N 2D points from the first image. The point coordinates should be 8380 * floating-point (single or double precision). 8381 * @param points2 Array of the second image points of the same size and format as points1 . 8382 * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple 8383 * that performs a change of basis from the first camera's coordinate system to the second camera's 8384 * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter 8385 * description below. 8386 * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and 8387 * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit 8388 * length. 8389 * @param focal Focal length of the camera. Note that this function assumes that points1 and points2 8390 * are feature points from cameras with same focal length and principal point. 8391 * @param pp principal point of the camera. 8392 * @param mask Input/output mask for inliers in points1 and points2. If it is not empty, then it marks 8393 * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to 8394 * recover pose. In the output mask only inliers which pass the cheirality check. 8395 * 8396 * This function differs from the one above that it computes camera intrinsic matrix from focal length and 8397 * principal point: 8398 * 8399 * \(A = 8400 * \begin{bmatrix} 8401 * f & 0 & x_{pp} \\ 8402 * 0 & f & y_{pp} \\ 8403 * 0 & 0 & 1 8404 * \end{bmatrix}\) 8405 * @return automatically generated 8406 */ 8407 public static int recoverPose(Mat E, Mat points1, Mat points2, Mat R, Mat t, double focal, Point pp, Mat mask) { 8408 return recoverPose_7(E.nativeObj, points1.nativeObj, points2.nativeObj, R.nativeObj, t.nativeObj, focal, pp.x, pp.y, mask.nativeObj); 8409 } 8410 8411 /** 8412 * 8413 * @param E The input essential matrix. 8414 * @param points1 Array of N 2D points from the first image. The point coordinates should be 8415 * floating-point (single or double precision). 8416 * @param points2 Array of the second image points of the same size and format as points1 . 8417 * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple 8418 * that performs a change of basis from the first camera's coordinate system to the second camera's 8419 * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter 8420 * description below. 8421 * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and 8422 * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit 8423 * length. 8424 * @param focal Focal length of the camera. Note that this function assumes that points1 and points2 8425 * are feature points from cameras with same focal length and principal point. 8426 * @param pp principal point of the camera. 8427 * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to 8428 * recover pose. In the output mask only inliers which pass the cheirality check. 8429 * 8430 * This function differs from the one above that it computes camera intrinsic matrix from focal length and 8431 * principal point: 8432 * 8433 * \(A = 8434 * \begin{bmatrix} 8435 * f & 0 & x_{pp} \\ 8436 * 0 & f & y_{pp} \\ 8437 * 0 & 0 & 1 8438 * \end{bmatrix}\) 8439 * @return automatically generated 8440 */ 8441 public static int recoverPose(Mat E, Mat points1, Mat points2, Mat R, Mat t, double focal, Point pp) { 8442 return recoverPose_8(E.nativeObj, points1.nativeObj, points2.nativeObj, R.nativeObj, t.nativeObj, focal, pp.x, pp.y); 8443 } 8444 8445 /** 8446 * 8447 * @param E The input essential matrix. 8448 * @param points1 Array of N 2D points from the first image. The point coordinates should be 8449 * floating-point (single or double precision). 8450 * @param points2 Array of the second image points of the same size and format as points1 . 8451 * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple 8452 * that performs a change of basis from the first camera's coordinate system to the second camera's 8453 * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter 8454 * description below. 8455 * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and 8456 * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit 8457 * length. 8458 * @param focal Focal length of the camera. Note that this function assumes that points1 and points2 8459 * are feature points from cameras with same focal length and principal point. 8460 * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to 8461 * recover pose. In the output mask only inliers which pass the cheirality check. 8462 * 8463 * This function differs from the one above that it computes camera intrinsic matrix from focal length and 8464 * principal point: 8465 * 8466 * \(A = 8467 * \begin{bmatrix} 8468 * f & 0 & x_{pp} \\ 8469 * 0 & f & y_{pp} \\ 8470 * 0 & 0 & 1 8471 * \end{bmatrix}\) 8472 * @return automatically generated 8473 */ 8474 public static int recoverPose(Mat E, Mat points1, Mat points2, Mat R, Mat t, double focal) { 8475 return recoverPose_9(E.nativeObj, points1.nativeObj, points2.nativeObj, R.nativeObj, t.nativeObj, focal); 8476 } 8477 8478 /** 8479 * 8480 * @param E The input essential matrix. 8481 * @param points1 Array of N 2D points from the first image. The point coordinates should be 8482 * floating-point (single or double precision). 8483 * @param points2 Array of the second image points of the same size and format as points1 . 8484 * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple 8485 * that performs a change of basis from the first camera's coordinate system to the second camera's 8486 * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter 8487 * description below. 8488 * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and 8489 * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit 8490 * length. 8491 * are feature points from cameras with same focal length and principal point. 8492 * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to 8493 * recover pose. In the output mask only inliers which pass the cheirality check. 8494 * 8495 * This function differs from the one above that it computes camera intrinsic matrix from focal length and 8496 * principal point: 8497 * 8498 * \(A = 8499 * \begin{bmatrix} 8500 * f & 0 & x_{pp} \\ 8501 * 0 & f & y_{pp} \\ 8502 * 0 & 0 & 1 8503 * \end{bmatrix}\) 8504 * @return automatically generated 8505 */ 8506 public static int recoverPose(Mat E, Mat points1, Mat points2, Mat R, Mat t) { 8507 return recoverPose_10(E.nativeObj, points1.nativeObj, points2.nativeObj, R.nativeObj, t.nativeObj); 8508 } 8509 8510 8511 // 8512 // C++: int cv::recoverPose(Mat E, Mat points1, Mat points2, Mat cameraMatrix, Mat& R, Mat& t, double distanceThresh, Mat& mask = Mat(), Mat& triangulatedPoints = Mat()) 8513 // 8514 8515 /** 8516 * 8517 * @param E The input essential matrix. 8518 * @param points1 Array of N 2D points from the first image. The point coordinates should be 8519 * floating-point (single or double precision). 8520 * @param points2 Array of the second image points of the same size and format as points1. 8521 * @param cameraMatrix Camera intrinsic matrix \(\cameramatrix{A}\) . 8522 * Note that this function assumes that points1 and points2 are feature points from cameras with the 8523 * same camera intrinsic matrix. 8524 * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple 8525 * that performs a change of basis from the first camera's coordinate system to the second camera's 8526 * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter 8527 * description below. 8528 * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and 8529 * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit 8530 * length. 8531 * @param distanceThresh threshold distance which is used to filter out far away points (i.e. infinite 8532 * points). 8533 * @param mask Input/output mask for inliers in points1 and points2. If it is not empty, then it marks 8534 * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to 8535 * recover pose. In the output mask only inliers which pass the cheirality check. 8536 * @param triangulatedPoints 3D points which were reconstructed by triangulation. 8537 * 8538 * This function differs from the one above that it outputs the triangulated 3D point that are used for 8539 * the cheirality check. 8540 * @return automatically generated 8541 */ 8542 public static int recoverPose(Mat E, Mat points1, Mat points2, Mat cameraMatrix, Mat R, Mat t, double distanceThresh, Mat mask, Mat triangulatedPoints) { 8543 return recoverPose_11(E.nativeObj, points1.nativeObj, points2.nativeObj, cameraMatrix.nativeObj, R.nativeObj, t.nativeObj, distanceThresh, mask.nativeObj, triangulatedPoints.nativeObj); 8544 } 8545 8546 /** 8547 * 8548 * @param E The input essential matrix. 8549 * @param points1 Array of N 2D points from the first image. The point coordinates should be 8550 * floating-point (single or double precision). 8551 * @param points2 Array of the second image points of the same size and format as points1. 8552 * @param cameraMatrix Camera intrinsic matrix \(\cameramatrix{A}\) . 8553 * Note that this function assumes that points1 and points2 are feature points from cameras with the 8554 * same camera intrinsic matrix. 8555 * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple 8556 * that performs a change of basis from the first camera's coordinate system to the second camera's 8557 * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter 8558 * description below. 8559 * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and 8560 * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit 8561 * length. 8562 * @param distanceThresh threshold distance which is used to filter out far away points (i.e. infinite 8563 * points). 8564 * @param mask Input/output mask for inliers in points1 and points2. If it is not empty, then it marks 8565 * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to 8566 * recover pose. In the output mask only inliers which pass the cheirality check. 8567 * 8568 * This function differs from the one above that it outputs the triangulated 3D point that are used for 8569 * the cheirality check. 8570 * @return automatically generated 8571 */ 8572 public static int recoverPose(Mat E, Mat points1, Mat points2, Mat cameraMatrix, Mat R, Mat t, double distanceThresh, Mat mask) { 8573 return recoverPose_12(E.nativeObj, points1.nativeObj, points2.nativeObj, cameraMatrix.nativeObj, R.nativeObj, t.nativeObj, distanceThresh, mask.nativeObj); 8574 } 8575 8576 /** 8577 * 8578 * @param E The input essential matrix. 8579 * @param points1 Array of N 2D points from the first image. The point coordinates should be 8580 * floating-point (single or double precision). 8581 * @param points2 Array of the second image points of the same size and format as points1. 8582 * @param cameraMatrix Camera intrinsic matrix \(\cameramatrix{A}\) . 8583 * Note that this function assumes that points1 and points2 are feature points from cameras with the 8584 * same camera intrinsic matrix. 8585 * @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple 8586 * that performs a change of basis from the first camera's coordinate system to the second camera's 8587 * coordinate system. Note that, in general, t can not be used for this tuple, see the parameter 8588 * description below. 8589 * @param t Output translation vector. This vector is obtained by REF: decomposeEssentialMat and 8590 * therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit 8591 * length. 8592 * @param distanceThresh threshold distance which is used to filter out far away points (i.e. infinite 8593 * points). 8594 * inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to 8595 * recover pose. In the output mask only inliers which pass the cheirality check. 8596 * 8597 * This function differs from the one above that it outputs the triangulated 3D point that are used for 8598 * the cheirality check. 8599 * @return automatically generated 8600 */ 8601 public static int recoverPose(Mat E, Mat points1, Mat points2, Mat cameraMatrix, Mat R, Mat t, double distanceThresh) { 8602 return recoverPose_13(E.nativeObj, points1.nativeObj, points2.nativeObj, cameraMatrix.nativeObj, R.nativeObj, t.nativeObj, distanceThresh); 8603 } 8604 8605 8606 // 8607 // C++: void cv::computeCorrespondEpilines(Mat points, int whichImage, Mat F, Mat& lines) 8608 // 8609 8610 /** 8611 * For points in an image of a stereo pair, computes the corresponding epilines in the other image. 8612 * 8613 * @param points Input points. \(N \times 1\) or \(1 \times N\) matrix of type CV_32FC2 or 8614 * vector<Point2f> . 8615 * @param whichImage Index of the image (1 or 2) that contains the points . 8616 * @param F Fundamental matrix that can be estimated using #findFundamentalMat or #stereoRectify . 8617 * @param lines Output vector of the epipolar lines corresponding to the points in the other image. 8618 * Each line \(ax + by + c=0\) is encoded by 3 numbers \((a, b, c)\) . 8619 * 8620 * For every point in one of the two images of a stereo pair, the function finds the equation of the 8621 * corresponding epipolar line in the other image. 8622 * 8623 * From the fundamental matrix definition (see #findFundamentalMat ), line \(l^{(2)}_i\) in the second 8624 * image for the point \(p^{(1)}_i\) in the first image (when whichImage=1 ) is computed as: 8625 * 8626 * \(l^{(2)}_i = F p^{(1)}_i\) 8627 * 8628 * And vice versa, when whichImage=2, \(l^{(1)}_i\) is computed from \(p^{(2)}_i\) as: 8629 * 8630 * \(l^{(1)}_i = F^T p^{(2)}_i\) 8631 * 8632 * Line coefficients are defined up to a scale. They are normalized so that \(a_i^2+b_i^2=1\) . 8633 */ 8634 public static void computeCorrespondEpilines(Mat points, int whichImage, Mat F, Mat lines) { 8635 computeCorrespondEpilines_0(points.nativeObj, whichImage, F.nativeObj, lines.nativeObj); 8636 } 8637 8638 8639 // 8640 // C++: void cv::triangulatePoints(Mat projMatr1, Mat projMatr2, Mat projPoints1, Mat projPoints2, Mat& points4D) 8641 // 8642 8643 /** 8644 * This function reconstructs 3-dimensional points (in homogeneous coordinates) by using 8645 * their observations with a stereo camera. 8646 * 8647 * @param projMatr1 3x4 projection matrix of the first camera, i.e. this matrix projects 3D points 8648 * given in the world's coordinate system into the first image. 8649 * @param projMatr2 3x4 projection matrix of the second camera, i.e. this matrix projects 3D points 8650 * given in the world's coordinate system into the second image. 8651 * @param projPoints1 2xN array of feature points in the first image. In the case of the c++ version, 8652 * it can be also a vector of feature points or two-channel matrix of size 1xN or Nx1. 8653 * @param projPoints2 2xN array of corresponding points in the second image. In the case of the c++ 8654 * version, it can be also a vector of feature points or two-channel matrix of size 1xN or Nx1. 8655 * @param points4D 4xN array of reconstructed points in homogeneous coordinates. These points are 8656 * returned in the world's coordinate system. 8657 * 8658 * <b>Note:</b> 8659 * Keep in mind that all input data should be of float type in order for this function to work. 8660 * 8661 * <b>Note:</b> 8662 * If the projection matrices from REF: stereoRectify are used, then the returned points are 8663 * represented in the first camera's rectified coordinate system. 8664 * 8665 * SEE: 8666 * reprojectImageTo3D 8667 */ 8668 public static void triangulatePoints(Mat projMatr1, Mat projMatr2, Mat projPoints1, Mat projPoints2, Mat points4D) { 8669 triangulatePoints_0(projMatr1.nativeObj, projMatr2.nativeObj, projPoints1.nativeObj, projPoints2.nativeObj, points4D.nativeObj); 8670 } 8671 8672 8673 // 8674 // C++: void cv::correctMatches(Mat F, Mat points1, Mat points2, Mat& newPoints1, Mat& newPoints2) 8675 // 8676 8677 /** 8678 * Refines coordinates of corresponding points. 8679 * 8680 * @param F 3x3 fundamental matrix. 8681 * @param points1 1xN array containing the first set of points. 8682 * @param points2 1xN array containing the second set of points. 8683 * @param newPoints1 The optimized points1. 8684 * @param newPoints2 The optimized points2. 8685 * 8686 * The function implements the Optimal Triangulation Method (see Multiple View Geometry for details). 8687 * For each given point correspondence points1[i] <-> points2[i], and a fundamental matrix F, it 8688 * computes the corrected correspondences newPoints1[i] <-> newPoints2[i] that minimize the geometric 8689 * error \(d(points1[i], newPoints1[i])^2 + d(points2[i],newPoints2[i])^2\) (where \(d(a,b)\) is the 8690 * geometric distance between points \(a\) and \(b\) ) subject to the epipolar constraint 8691 * \(newPoints2^T * F * newPoints1 = 0\) . 8692 */ 8693 public static void correctMatches(Mat F, Mat points1, Mat points2, Mat newPoints1, Mat newPoints2) { 8694 correctMatches_0(F.nativeObj, points1.nativeObj, points2.nativeObj, newPoints1.nativeObj, newPoints2.nativeObj); 8695 } 8696 8697 8698 // 8699 // C++: void cv::filterSpeckles(Mat& img, double newVal, int maxSpeckleSize, double maxDiff, Mat& buf = Mat()) 8700 // 8701 8702 /** 8703 * Filters off small noise blobs (speckles) in the disparity map 8704 * 8705 * @param img The input 16-bit signed disparity image 8706 * @param newVal The disparity value used to paint-off the speckles 8707 * @param maxSpeckleSize The maximum speckle size to consider it a speckle. Larger blobs are not 8708 * affected by the algorithm 8709 * @param maxDiff Maximum difference between neighbor disparity pixels to put them into the same 8710 * blob. Note that since StereoBM, StereoSGBM and may be other algorithms return a fixed-point 8711 * disparity map, where disparity values are multiplied by 16, this scale factor should be taken into 8712 * account when specifying this parameter value. 8713 * @param buf The optional temporary buffer to avoid memory allocation within the function. 8714 */ 8715 public static void filterSpeckles(Mat img, double newVal, int maxSpeckleSize, double maxDiff, Mat buf) { 8716 filterSpeckles_0(img.nativeObj, newVal, maxSpeckleSize, maxDiff, buf.nativeObj); 8717 } 8718 8719 /** 8720 * Filters off small noise blobs (speckles) in the disparity map 8721 * 8722 * @param img The input 16-bit signed disparity image 8723 * @param newVal The disparity value used to paint-off the speckles 8724 * @param maxSpeckleSize The maximum speckle size to consider it a speckle. Larger blobs are not 8725 * affected by the algorithm 8726 * @param maxDiff Maximum difference between neighbor disparity pixels to put them into the same 8727 * blob. Note that since StereoBM, StereoSGBM and may be other algorithms return a fixed-point 8728 * disparity map, where disparity values are multiplied by 16, this scale factor should be taken into 8729 * account when specifying this parameter value. 8730 */ 8731 public static void filterSpeckles(Mat img, double newVal, int maxSpeckleSize, double maxDiff) { 8732 filterSpeckles_1(img.nativeObj, newVal, maxSpeckleSize, maxDiff); 8733 } 8734 8735 8736 // 8737 // C++: Rect cv::getValidDisparityROI(Rect roi1, Rect roi2, int minDisparity, int numberOfDisparities, int blockSize) 8738 // 8739 8740 public static Rect getValidDisparityROI(Rect roi1, Rect roi2, int minDisparity, int numberOfDisparities, int blockSize) { 8741 return new Rect(getValidDisparityROI_0(roi1.x, roi1.y, roi1.width, roi1.height, roi2.x, roi2.y, roi2.width, roi2.height, minDisparity, numberOfDisparities, blockSize)); 8742 } 8743 8744 8745 // 8746 // C++: void cv::validateDisparity(Mat& disparity, Mat cost, int minDisparity, int numberOfDisparities, int disp12MaxDisp = 1) 8747 // 8748 8749 public static void validateDisparity(Mat disparity, Mat cost, int minDisparity, int numberOfDisparities, int disp12MaxDisp) { 8750 validateDisparity_0(disparity.nativeObj, cost.nativeObj, minDisparity, numberOfDisparities, disp12MaxDisp); 8751 } 8752 8753 public static void validateDisparity(Mat disparity, Mat cost, int minDisparity, int numberOfDisparities) { 8754 validateDisparity_1(disparity.nativeObj, cost.nativeObj, minDisparity, numberOfDisparities); 8755 } 8756 8757 8758 // 8759 // C++: void cv::reprojectImageTo3D(Mat disparity, Mat& _3dImage, Mat Q, bool handleMissingValues = false, int ddepth = -1) 8760 // 8761 8762 /** 8763 * Reprojects a disparity image to 3D space. 8764 * 8765 * @param disparity Input single-channel 8-bit unsigned, 16-bit signed, 32-bit signed or 32-bit 8766 * floating-point disparity image. The values of 8-bit / 16-bit signed formats are assumed to have no 8767 * fractional bits. If the disparity is 16-bit signed format, as computed by REF: StereoBM or 8768 * REF: StereoSGBM and maybe other algorithms, it should be divided by 16 (and scaled to float) before 8769 * being used here. 8770 * @param _3dImage Output 3-channel floating-point image of the same size as disparity. Each element of 8771 * _3dImage(x,y) contains 3D coordinates of the point (x,y) computed from the disparity map. If one 8772 * uses Q obtained by REF: stereoRectify, then the returned points are represented in the first 8773 * camera's rectified coordinate system. 8774 * @param Q \(4 \times 4\) perspective transformation matrix that can be obtained with 8775 * REF: stereoRectify. 8776 * @param handleMissingValues Indicates, whether the function should handle missing values (i.e. 8777 * points where the disparity was not computed). If handleMissingValues=true, then pixels with the 8778 * minimal disparity that corresponds to the outliers (see StereoMatcher::compute ) are transformed 8779 * to 3D points with a very large Z value (currently set to 10000). 8780 * @param ddepth The optional output array depth. If it is -1, the output image will have CV_32F 8781 * depth. ddepth can also be set to CV_16S, CV_32S or CV_32F. 8782 * 8783 * The function transforms a single-channel disparity map to a 3-channel image representing a 3D 8784 * surface. That is, for each pixel (x,y) and the corresponding disparity d=disparity(x,y) , it 8785 * computes: 8786 * 8787 * \(\begin{bmatrix} 8788 * X \\ 8789 * Y \\ 8790 * Z \\ 8791 * W 8792 * \end{bmatrix} = Q \begin{bmatrix} 8793 * x \\ 8794 * y \\ 8795 * \texttt{disparity} (x,y) \\ 8796 * z 8797 * \end{bmatrix}.\) 8798 * 8799 * SEE: 8800 * To reproject a sparse set of points {(x,y,d),...} to 3D space, use perspectiveTransform. 8801 */ 8802 public static void reprojectImageTo3D(Mat disparity, Mat _3dImage, Mat Q, boolean handleMissingValues, int ddepth) { 8803 reprojectImageTo3D_0(disparity.nativeObj, _3dImage.nativeObj, Q.nativeObj, handleMissingValues, ddepth); 8804 } 8805 8806 /** 8807 * Reprojects a disparity image to 3D space. 8808 * 8809 * @param disparity Input single-channel 8-bit unsigned, 16-bit signed, 32-bit signed or 32-bit 8810 * floating-point disparity image. The values of 8-bit / 16-bit signed formats are assumed to have no 8811 * fractional bits. If the disparity is 16-bit signed format, as computed by REF: StereoBM or 8812 * REF: StereoSGBM and maybe other algorithms, it should be divided by 16 (and scaled to float) before 8813 * being used here. 8814 * @param _3dImage Output 3-channel floating-point image of the same size as disparity. Each element of 8815 * _3dImage(x,y) contains 3D coordinates of the point (x,y) computed from the disparity map. If one 8816 * uses Q obtained by REF: stereoRectify, then the returned points are represented in the first 8817 * camera's rectified coordinate system. 8818 * @param Q \(4 \times 4\) perspective transformation matrix that can be obtained with 8819 * REF: stereoRectify. 8820 * @param handleMissingValues Indicates, whether the function should handle missing values (i.e. 8821 * points where the disparity was not computed). If handleMissingValues=true, then pixels with the 8822 * minimal disparity that corresponds to the outliers (see StereoMatcher::compute ) are transformed 8823 * to 3D points with a very large Z value (currently set to 10000). 8824 * depth. ddepth can also be set to CV_16S, CV_32S or CV_32F. 8825 * 8826 * The function transforms a single-channel disparity map to a 3-channel image representing a 3D 8827 * surface. That is, for each pixel (x,y) and the corresponding disparity d=disparity(x,y) , it 8828 * computes: 8829 * 8830 * \(\begin{bmatrix} 8831 * X \\ 8832 * Y \\ 8833 * Z \\ 8834 * W 8835 * \end{bmatrix} = Q \begin{bmatrix} 8836 * x \\ 8837 * y \\ 8838 * \texttt{disparity} (x,y) \\ 8839 * z 8840 * \end{bmatrix}.\) 8841 * 8842 * SEE: 8843 * To reproject a sparse set of points {(x,y,d),...} to 3D space, use perspectiveTransform. 8844 */ 8845 public static void reprojectImageTo3D(Mat disparity, Mat _3dImage, Mat Q, boolean handleMissingValues) { 8846 reprojectImageTo3D_1(disparity.nativeObj, _3dImage.nativeObj, Q.nativeObj, handleMissingValues); 8847 } 8848 8849 /** 8850 * Reprojects a disparity image to 3D space. 8851 * 8852 * @param disparity Input single-channel 8-bit unsigned, 16-bit signed, 32-bit signed or 32-bit 8853 * floating-point disparity image. The values of 8-bit / 16-bit signed formats are assumed to have no 8854 * fractional bits. If the disparity is 16-bit signed format, as computed by REF: StereoBM or 8855 * REF: StereoSGBM and maybe other algorithms, it should be divided by 16 (and scaled to float) before 8856 * being used here. 8857 * @param _3dImage Output 3-channel floating-point image of the same size as disparity. Each element of 8858 * _3dImage(x,y) contains 3D coordinates of the point (x,y) computed from the disparity map. If one 8859 * uses Q obtained by REF: stereoRectify, then the returned points are represented in the first 8860 * camera's rectified coordinate system. 8861 * @param Q \(4 \times 4\) perspective transformation matrix that can be obtained with 8862 * REF: stereoRectify. 8863 * points where the disparity was not computed). If handleMissingValues=true, then pixels with the 8864 * minimal disparity that corresponds to the outliers (see StereoMatcher::compute ) are transformed 8865 * to 3D points with a very large Z value (currently set to 10000). 8866 * depth. ddepth can also be set to CV_16S, CV_32S or CV_32F. 8867 * 8868 * The function transforms a single-channel disparity map to a 3-channel image representing a 3D 8869 * surface. That is, for each pixel (x,y) and the corresponding disparity d=disparity(x,y) , it 8870 * computes: 8871 * 8872 * \(\begin{bmatrix} 8873 * X \\ 8874 * Y \\ 8875 * Z \\ 8876 * W 8877 * \end{bmatrix} = Q \begin{bmatrix} 8878 * x \\ 8879 * y \\ 8880 * \texttt{disparity} (x,y) \\ 8881 * z 8882 * \end{bmatrix}.\) 8883 * 8884 * SEE: 8885 * To reproject a sparse set of points {(x,y,d),...} to 3D space, use perspectiveTransform. 8886 */ 8887 public static void reprojectImageTo3D(Mat disparity, Mat _3dImage, Mat Q) { 8888 reprojectImageTo3D_2(disparity.nativeObj, _3dImage.nativeObj, Q.nativeObj); 8889 } 8890 8891 8892 // 8893 // C++: double cv::sampsonDistance(Mat pt1, Mat pt2, Mat F) 8894 // 8895 8896 /** 8897 * Calculates the Sampson Distance between two points. 8898 * 8899 * The function cv::sampsonDistance calculates and returns the first order approximation of the geometric error as: 8900 * \( 8901 * sd( \texttt{pt1} , \texttt{pt2} )= 8902 * \frac{(\texttt{pt2}^t \cdot \texttt{F} \cdot \texttt{pt1})^2} 8903 * {((\texttt{F} \cdot \texttt{pt1})(0))^2 + 8904 * ((\texttt{F} \cdot \texttt{pt1})(1))^2 + 8905 * ((\texttt{F}^t \cdot \texttt{pt2})(0))^2 + 8906 * ((\texttt{F}^t \cdot \texttt{pt2})(1))^2} 8907 * \) 8908 * The fundamental matrix may be calculated using the #findFundamentalMat function. See CITE: HartleyZ00 11.4.3 for details. 8909 * @param pt1 first homogeneous 2d point 8910 * @param pt2 second homogeneous 2d point 8911 * @param F fundamental matrix 8912 * @return The computed Sampson distance. 8913 */ 8914 public static double sampsonDistance(Mat pt1, Mat pt2, Mat F) { 8915 return sampsonDistance_0(pt1.nativeObj, pt2.nativeObj, F.nativeObj); 8916 } 8917 8918 8919 // 8920 // C++: int cv::estimateAffine3D(Mat src, Mat dst, Mat& out, Mat& inliers, double ransacThreshold = 3, double confidence = 0.99) 8921 // 8922 8923 /** 8924 * Computes an optimal affine transformation between two 3D point sets. 8925 * 8926 * It computes 8927 * \( 8928 * \begin{bmatrix} 8929 * x\\ 8930 * y\\ 8931 * z\\ 8932 * \end{bmatrix} 8933 * = 8934 * \begin{bmatrix} 8935 * a_{11} & a_{12} & a_{13}\\ 8936 * a_{21} & a_{22} & a_{23}\\ 8937 * a_{31} & a_{32} & a_{33}\\ 8938 * \end{bmatrix} 8939 * \begin{bmatrix} 8940 * X\\ 8941 * Y\\ 8942 * Z\\ 8943 * \end{bmatrix} 8944 * + 8945 * \begin{bmatrix} 8946 * b_1\\ 8947 * b_2\\ 8948 * b_3\\ 8949 * \end{bmatrix} 8950 * \) 8951 * 8952 * @param src First input 3D point set containing \((X,Y,Z)\). 8953 * @param dst Second input 3D point set containing \((x,y,z)\). 8954 * @param out Output 3D affine transformation matrix \(3 \times 4\) of the form 8955 * \( 8956 * \begin{bmatrix} 8957 * a_{11} & a_{12} & a_{13} & b_1\\ 8958 * a_{21} & a_{22} & a_{23} & b_2\\ 8959 * a_{31} & a_{32} & a_{33} & b_3\\ 8960 * \end{bmatrix} 8961 * \) 8962 * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier). 8963 * @param ransacThreshold Maximum reprojection error in the RANSAC algorithm to consider a point as 8964 * an inlier. 8965 * @param confidence Confidence level, between 0 and 1, for the estimated transformation. Anything 8966 * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation 8967 * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation. 8968 * 8969 * The function estimates an optimal 3D affine transformation between two 3D point sets using the 8970 * RANSAC algorithm. 8971 * @return automatically generated 8972 */ 8973 public static int estimateAffine3D(Mat src, Mat dst, Mat out, Mat inliers, double ransacThreshold, double confidence) { 8974 return estimateAffine3D_0(src.nativeObj, dst.nativeObj, out.nativeObj, inliers.nativeObj, ransacThreshold, confidence); 8975 } 8976 8977 /** 8978 * Computes an optimal affine transformation between two 3D point sets. 8979 * 8980 * It computes 8981 * \( 8982 * \begin{bmatrix} 8983 * x\\ 8984 * y\\ 8985 * z\\ 8986 * \end{bmatrix} 8987 * = 8988 * \begin{bmatrix} 8989 * a_{11} & a_{12} & a_{13}\\ 8990 * a_{21} & a_{22} & a_{23}\\ 8991 * a_{31} & a_{32} & a_{33}\\ 8992 * \end{bmatrix} 8993 * \begin{bmatrix} 8994 * X\\ 8995 * Y\\ 8996 * Z\\ 8997 * \end{bmatrix} 8998 * + 8999 * \begin{bmatrix} 9000 * b_1\\ 9001 * b_2\\ 9002 * b_3\\ 9003 * \end{bmatrix} 9004 * \) 9005 * 9006 * @param src First input 3D point set containing \((X,Y,Z)\). 9007 * @param dst Second input 3D point set containing \((x,y,z)\). 9008 * @param out Output 3D affine transformation matrix \(3 \times 4\) of the form 9009 * \( 9010 * \begin{bmatrix} 9011 * a_{11} & a_{12} & a_{13} & b_1\\ 9012 * a_{21} & a_{22} & a_{23} & b_2\\ 9013 * a_{31} & a_{32} & a_{33} & b_3\\ 9014 * \end{bmatrix} 9015 * \) 9016 * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier). 9017 * @param ransacThreshold Maximum reprojection error in the RANSAC algorithm to consider a point as 9018 * an inlier. 9019 * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation 9020 * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation. 9021 * 9022 * The function estimates an optimal 3D affine transformation between two 3D point sets using the 9023 * RANSAC algorithm. 9024 * @return automatically generated 9025 */ 9026 public static int estimateAffine3D(Mat src, Mat dst, Mat out, Mat inliers, double ransacThreshold) { 9027 return estimateAffine3D_1(src.nativeObj, dst.nativeObj, out.nativeObj, inliers.nativeObj, ransacThreshold); 9028 } 9029 9030 /** 9031 * Computes an optimal affine transformation between two 3D point sets. 9032 * 9033 * It computes 9034 * \( 9035 * \begin{bmatrix} 9036 * x\\ 9037 * y\\ 9038 * z\\ 9039 * \end{bmatrix} 9040 * = 9041 * \begin{bmatrix} 9042 * a_{11} & a_{12} & a_{13}\\ 9043 * a_{21} & a_{22} & a_{23}\\ 9044 * a_{31} & a_{32} & a_{33}\\ 9045 * \end{bmatrix} 9046 * \begin{bmatrix} 9047 * X\\ 9048 * Y\\ 9049 * Z\\ 9050 * \end{bmatrix} 9051 * + 9052 * \begin{bmatrix} 9053 * b_1\\ 9054 * b_2\\ 9055 * b_3\\ 9056 * \end{bmatrix} 9057 * \) 9058 * 9059 * @param src First input 3D point set containing \((X,Y,Z)\). 9060 * @param dst Second input 3D point set containing \((x,y,z)\). 9061 * @param out Output 3D affine transformation matrix \(3 \times 4\) of the form 9062 * \( 9063 * \begin{bmatrix} 9064 * a_{11} & a_{12} & a_{13} & b_1\\ 9065 * a_{21} & a_{22} & a_{23} & b_2\\ 9066 * a_{31} & a_{32} & a_{33} & b_3\\ 9067 * \end{bmatrix} 9068 * \) 9069 * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier). 9070 * an inlier. 9071 * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation 9072 * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation. 9073 * 9074 * The function estimates an optimal 3D affine transformation between two 3D point sets using the 9075 * RANSAC algorithm. 9076 * @return automatically generated 9077 */ 9078 public static int estimateAffine3D(Mat src, Mat dst, Mat out, Mat inliers) { 9079 return estimateAffine3D_2(src.nativeObj, dst.nativeObj, out.nativeObj, inliers.nativeObj); 9080 } 9081 9082 9083 // 9084 // C++: Mat cv::estimateAffine3D(Mat src, Mat dst, double* scale = nullptr, bool force_rotation = true) 9085 // 9086 9087 /** 9088 * Computes an optimal affine transformation between two 3D point sets. 9089 * 9090 * It computes \(R,s,t\) minimizing \(\sum{i} dst_i - c \cdot R \cdot src_i \) 9091 * where \(R\) is a 3x3 rotation matrix, \(t\) is a 3x1 translation vector and \(s\) is a 9092 * scalar size value. This is an implementation of the algorithm by Umeyama \cite umeyama1991least . 9093 * The estimated affine transform has a homogeneous scale which is a subclass of affine 9094 * transformations with 7 degrees of freedom. The paired point sets need to comprise at least 3 9095 * points each. 9096 * 9097 * @param src First input 3D point set. 9098 * @param dst Second input 3D point set. 9099 * @param scale If null is passed, the scale parameter c will be assumed to be 1.0. 9100 * Else the pointed-to variable will be set to the optimal scale. 9101 * @param force_rotation If true, the returned rotation will never be a reflection. 9102 * This might be unwanted, e.g. when optimizing a transform between a right- and a 9103 * left-handed coordinate system. 9104 * @return 3D affine transformation matrix \(3 \times 4\) of the form 9105 * \(T = 9106 * \begin{bmatrix} 9107 * R & t\\ 9108 * \end{bmatrix} 9109 * \) 9110 */ 9111 public static Mat estimateAffine3D(Mat src, Mat dst, double[] scale, boolean force_rotation) { 9112 double[] scale_out = new double[1]; 9113 Mat retVal = new Mat(estimateAffine3D_3(src.nativeObj, dst.nativeObj, scale_out, force_rotation)); 9114 if(scale!=null) scale[0] = (double)scale_out[0]; 9115 return retVal; 9116 } 9117 9118 /** 9119 * Computes an optimal affine transformation between two 3D point sets. 9120 * 9121 * It computes \(R,s,t\) minimizing \(\sum{i} dst_i - c \cdot R \cdot src_i \) 9122 * where \(R\) is a 3x3 rotation matrix, \(t\) is a 3x1 translation vector and \(s\) is a 9123 * scalar size value. This is an implementation of the algorithm by Umeyama \cite umeyama1991least . 9124 * The estimated affine transform has a homogeneous scale which is a subclass of affine 9125 * transformations with 7 degrees of freedom. The paired point sets need to comprise at least 3 9126 * points each. 9127 * 9128 * @param src First input 3D point set. 9129 * @param dst Second input 3D point set. 9130 * @param scale If null is passed, the scale parameter c will be assumed to be 1.0. 9131 * Else the pointed-to variable will be set to the optimal scale. 9132 * This might be unwanted, e.g. when optimizing a transform between a right- and a 9133 * left-handed coordinate system. 9134 * @return 3D affine transformation matrix \(3 \times 4\) of the form 9135 * \(T = 9136 * \begin{bmatrix} 9137 * R & t\\ 9138 * \end{bmatrix} 9139 * \) 9140 */ 9141 public static Mat estimateAffine3D(Mat src, Mat dst, double[] scale) { 9142 double[] scale_out = new double[1]; 9143 Mat retVal = new Mat(estimateAffine3D_4(src.nativeObj, dst.nativeObj, scale_out)); 9144 if(scale!=null) scale[0] = (double)scale_out[0]; 9145 return retVal; 9146 } 9147 9148 /** 9149 * Computes an optimal affine transformation between two 3D point sets. 9150 * 9151 * It computes \(R,s,t\) minimizing \(\sum{i} dst_i - c \cdot R \cdot src_i \) 9152 * where \(R\) is a 3x3 rotation matrix, \(t\) is a 3x1 translation vector and \(s\) is a 9153 * scalar size value. This is an implementation of the algorithm by Umeyama \cite umeyama1991least . 9154 * The estimated affine transform has a homogeneous scale which is a subclass of affine 9155 * transformations with 7 degrees of freedom. The paired point sets need to comprise at least 3 9156 * points each. 9157 * 9158 * @param src First input 3D point set. 9159 * @param dst Second input 3D point set. 9160 * Else the pointed-to variable will be set to the optimal scale. 9161 * This might be unwanted, e.g. when optimizing a transform between a right- and a 9162 * left-handed coordinate system. 9163 * @return 3D affine transformation matrix \(3 \times 4\) of the form 9164 * \(T = 9165 * \begin{bmatrix} 9166 * R & t\\ 9167 * \end{bmatrix} 9168 * \) 9169 */ 9170 public static Mat estimateAffine3D(Mat src, Mat dst) { 9171 return new Mat(estimateAffine3D_5(src.nativeObj, dst.nativeObj)); 9172 } 9173 9174 9175 // 9176 // C++: int cv::estimateTranslation3D(Mat src, Mat dst, Mat& out, Mat& inliers, double ransacThreshold = 3, double confidence = 0.99) 9177 // 9178 9179 /** 9180 * Computes an optimal translation between two 3D point sets. 9181 * 9182 * It computes 9183 * \( 9184 * \begin{bmatrix} 9185 * x\\ 9186 * y\\ 9187 * z\\ 9188 * \end{bmatrix} 9189 * = 9190 * \begin{bmatrix} 9191 * X\\ 9192 * Y\\ 9193 * Z\\ 9194 * \end{bmatrix} 9195 * + 9196 * \begin{bmatrix} 9197 * b_1\\ 9198 * b_2\\ 9199 * b_3\\ 9200 * \end{bmatrix} 9201 * \) 9202 * 9203 * @param src First input 3D point set containing \((X,Y,Z)\). 9204 * @param dst Second input 3D point set containing \((x,y,z)\). 9205 * @param out Output 3D translation vector \(3 \times 1\) of the form 9206 * \( 9207 * \begin{bmatrix} 9208 * b_1 \\ 9209 * b_2 \\ 9210 * b_3 \\ 9211 * \end{bmatrix} 9212 * \) 9213 * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier). 9214 * @param ransacThreshold Maximum reprojection error in the RANSAC algorithm to consider a point as 9215 * an inlier. 9216 * @param confidence Confidence level, between 0 and 1, for the estimated transformation. Anything 9217 * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation 9218 * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation. 9219 * 9220 * The function estimates an optimal 3D translation between two 3D point sets using the 9221 * RANSAC algorithm. 9222 * 9223 * @return automatically generated 9224 */ 9225 public static int estimateTranslation3D(Mat src, Mat dst, Mat out, Mat inliers, double ransacThreshold, double confidence) { 9226 return estimateTranslation3D_0(src.nativeObj, dst.nativeObj, out.nativeObj, inliers.nativeObj, ransacThreshold, confidence); 9227 } 9228 9229 /** 9230 * Computes an optimal translation between two 3D point sets. 9231 * 9232 * It computes 9233 * \( 9234 * \begin{bmatrix} 9235 * x\\ 9236 * y\\ 9237 * z\\ 9238 * \end{bmatrix} 9239 * = 9240 * \begin{bmatrix} 9241 * X\\ 9242 * Y\\ 9243 * Z\\ 9244 * \end{bmatrix} 9245 * + 9246 * \begin{bmatrix} 9247 * b_1\\ 9248 * b_2\\ 9249 * b_3\\ 9250 * \end{bmatrix} 9251 * \) 9252 * 9253 * @param src First input 3D point set containing \((X,Y,Z)\). 9254 * @param dst Second input 3D point set containing \((x,y,z)\). 9255 * @param out Output 3D translation vector \(3 \times 1\) of the form 9256 * \( 9257 * \begin{bmatrix} 9258 * b_1 \\ 9259 * b_2 \\ 9260 * b_3 \\ 9261 * \end{bmatrix} 9262 * \) 9263 * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier). 9264 * @param ransacThreshold Maximum reprojection error in the RANSAC algorithm to consider a point as 9265 * an inlier. 9266 * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation 9267 * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation. 9268 * 9269 * The function estimates an optimal 3D translation between two 3D point sets using the 9270 * RANSAC algorithm. 9271 * 9272 * @return automatically generated 9273 */ 9274 public static int estimateTranslation3D(Mat src, Mat dst, Mat out, Mat inliers, double ransacThreshold) { 9275 return estimateTranslation3D_1(src.nativeObj, dst.nativeObj, out.nativeObj, inliers.nativeObj, ransacThreshold); 9276 } 9277 9278 /** 9279 * Computes an optimal translation between two 3D point sets. 9280 * 9281 * It computes 9282 * \( 9283 * \begin{bmatrix} 9284 * x\\ 9285 * y\\ 9286 * z\\ 9287 * \end{bmatrix} 9288 * = 9289 * \begin{bmatrix} 9290 * X\\ 9291 * Y\\ 9292 * Z\\ 9293 * \end{bmatrix} 9294 * + 9295 * \begin{bmatrix} 9296 * b_1\\ 9297 * b_2\\ 9298 * b_3\\ 9299 * \end{bmatrix} 9300 * \) 9301 * 9302 * @param src First input 3D point set containing \((X,Y,Z)\). 9303 * @param dst Second input 3D point set containing \((x,y,z)\). 9304 * @param out Output 3D translation vector \(3 \times 1\) of the form 9305 * \( 9306 * \begin{bmatrix} 9307 * b_1 \\ 9308 * b_2 \\ 9309 * b_3 \\ 9310 * \end{bmatrix} 9311 * \) 9312 * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier). 9313 * an inlier. 9314 * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation 9315 * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation. 9316 * 9317 * The function estimates an optimal 3D translation between two 3D point sets using the 9318 * RANSAC algorithm. 9319 * 9320 * @return automatically generated 9321 */ 9322 public static int estimateTranslation3D(Mat src, Mat dst, Mat out, Mat inliers) { 9323 return estimateTranslation3D_2(src.nativeObj, dst.nativeObj, out.nativeObj, inliers.nativeObj); 9324 } 9325 9326 9327 // 9328 // C++: Mat cv::estimateAffine2D(Mat from, Mat to, Mat& inliers = Mat(), int method = RANSAC, double ransacReprojThreshold = 3, size_t maxIters = 2000, double confidence = 0.99, size_t refineIters = 10) 9329 // 9330 9331 /** 9332 * Computes an optimal affine transformation between two 2D point sets. 9333 * 9334 * It computes 9335 * \( 9336 * \begin{bmatrix} 9337 * x\\ 9338 * y\\ 9339 * \end{bmatrix} 9340 * = 9341 * \begin{bmatrix} 9342 * a_{11} & a_{12}\\ 9343 * a_{21} & a_{22}\\ 9344 * \end{bmatrix} 9345 * \begin{bmatrix} 9346 * X\\ 9347 * Y\\ 9348 * \end{bmatrix} 9349 * + 9350 * \begin{bmatrix} 9351 * b_1\\ 9352 * b_2\\ 9353 * \end{bmatrix} 9354 * \) 9355 * 9356 * @param from First input 2D point set containing \((X,Y)\). 9357 * @param to Second input 2D point set containing \((x,y)\). 9358 * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier). 9359 * @param method Robust method used to compute transformation. The following methods are possible: 9360 * <ul> 9361 * <li> 9362 * REF: RANSAC - RANSAC-based robust method 9363 * </li> 9364 * <li> 9365 * REF: LMEDS - Least-Median robust method 9366 * RANSAC is the default method. 9367 * </li> 9368 * </ul> 9369 * @param ransacReprojThreshold Maximum reprojection error in the RANSAC algorithm to consider 9370 * a point as an inlier. Applies only to RANSAC. 9371 * @param maxIters The maximum number of robust method iterations. 9372 * @param confidence Confidence level, between 0 and 1, for the estimated transformation. Anything 9373 * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation 9374 * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation. 9375 * @param refineIters Maximum number of iterations of refining algorithm (Levenberg-Marquardt). 9376 * Passing 0 will disable refining, so the output matrix will be output of robust method. 9377 * 9378 * @return Output 2D affine transformation matrix \(2 \times 3\) or empty matrix if transformation 9379 * could not be estimated. The returned matrix has the following form: 9380 * \( 9381 * \begin{bmatrix} 9382 * a_{11} & a_{12} & b_1\\ 9383 * a_{21} & a_{22} & b_2\\ 9384 * \end{bmatrix} 9385 * \) 9386 * 9387 * The function estimates an optimal 2D affine transformation between two 2D point sets using the 9388 * selected robust algorithm. 9389 * 9390 * The computed transformation is then refined further (using only inliers) with the 9391 * Levenberg-Marquardt method to reduce the re-projection error even more. 9392 * 9393 * <b>Note:</b> 9394 * The RANSAC method can handle practically any ratio of outliers but needs a threshold to 9395 * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works 9396 * correctly only when there are more than 50% of inliers. 9397 * 9398 * SEE: estimateAffinePartial2D, getAffineTransform 9399 */ 9400 public static Mat estimateAffine2D(Mat from, Mat to, Mat inliers, int method, double ransacReprojThreshold, long maxIters, double confidence, long refineIters) { 9401 return new Mat(estimateAffine2D_0(from.nativeObj, to.nativeObj, inliers.nativeObj, method, ransacReprojThreshold, maxIters, confidence, refineIters)); 9402 } 9403 9404 /** 9405 * Computes an optimal affine transformation between two 2D point sets. 9406 * 9407 * It computes 9408 * \( 9409 * \begin{bmatrix} 9410 * x\\ 9411 * y\\ 9412 * \end{bmatrix} 9413 * = 9414 * \begin{bmatrix} 9415 * a_{11} & a_{12}\\ 9416 * a_{21} & a_{22}\\ 9417 * \end{bmatrix} 9418 * \begin{bmatrix} 9419 * X\\ 9420 * Y\\ 9421 * \end{bmatrix} 9422 * + 9423 * \begin{bmatrix} 9424 * b_1\\ 9425 * b_2\\ 9426 * \end{bmatrix} 9427 * \) 9428 * 9429 * @param from First input 2D point set containing \((X,Y)\). 9430 * @param to Second input 2D point set containing \((x,y)\). 9431 * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier). 9432 * @param method Robust method used to compute transformation. The following methods are possible: 9433 * <ul> 9434 * <li> 9435 * REF: RANSAC - RANSAC-based robust method 9436 * </li> 9437 * <li> 9438 * REF: LMEDS - Least-Median robust method 9439 * RANSAC is the default method. 9440 * </li> 9441 * </ul> 9442 * @param ransacReprojThreshold Maximum reprojection error in the RANSAC algorithm to consider 9443 * a point as an inlier. Applies only to RANSAC. 9444 * @param maxIters The maximum number of robust method iterations. 9445 * @param confidence Confidence level, between 0 and 1, for the estimated transformation. Anything 9446 * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation 9447 * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation. 9448 * Passing 0 will disable refining, so the output matrix will be output of robust method. 9449 * 9450 * @return Output 2D affine transformation matrix \(2 \times 3\) or empty matrix if transformation 9451 * could not be estimated. The returned matrix has the following form: 9452 * \( 9453 * \begin{bmatrix} 9454 * a_{11} & a_{12} & b_1\\ 9455 * a_{21} & a_{22} & b_2\\ 9456 * \end{bmatrix} 9457 * \) 9458 * 9459 * The function estimates an optimal 2D affine transformation between two 2D point sets using the 9460 * selected robust algorithm. 9461 * 9462 * The computed transformation is then refined further (using only inliers) with the 9463 * Levenberg-Marquardt method to reduce the re-projection error even more. 9464 * 9465 * <b>Note:</b> 9466 * The RANSAC method can handle practically any ratio of outliers but needs a threshold to 9467 * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works 9468 * correctly only when there are more than 50% of inliers. 9469 * 9470 * SEE: estimateAffinePartial2D, getAffineTransform 9471 */ 9472 public static Mat estimateAffine2D(Mat from, Mat to, Mat inliers, int method, double ransacReprojThreshold, long maxIters, double confidence) { 9473 return new Mat(estimateAffine2D_1(from.nativeObj, to.nativeObj, inliers.nativeObj, method, ransacReprojThreshold, maxIters, confidence)); 9474 } 9475 9476 /** 9477 * Computes an optimal affine transformation between two 2D point sets. 9478 * 9479 * It computes 9480 * \( 9481 * \begin{bmatrix} 9482 * x\\ 9483 * y\\ 9484 * \end{bmatrix} 9485 * = 9486 * \begin{bmatrix} 9487 * a_{11} & a_{12}\\ 9488 * a_{21} & a_{22}\\ 9489 * \end{bmatrix} 9490 * \begin{bmatrix} 9491 * X\\ 9492 * Y\\ 9493 * \end{bmatrix} 9494 * + 9495 * \begin{bmatrix} 9496 * b_1\\ 9497 * b_2\\ 9498 * \end{bmatrix} 9499 * \) 9500 * 9501 * @param from First input 2D point set containing \((X,Y)\). 9502 * @param to Second input 2D point set containing \((x,y)\). 9503 * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier). 9504 * @param method Robust method used to compute transformation. The following methods are possible: 9505 * <ul> 9506 * <li> 9507 * REF: RANSAC - RANSAC-based robust method 9508 * </li> 9509 * <li> 9510 * REF: LMEDS - Least-Median robust method 9511 * RANSAC is the default method. 9512 * </li> 9513 * </ul> 9514 * @param ransacReprojThreshold Maximum reprojection error in the RANSAC algorithm to consider 9515 * a point as an inlier. Applies only to RANSAC. 9516 * @param maxIters The maximum number of robust method iterations. 9517 * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation 9518 * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation. 9519 * Passing 0 will disable refining, so the output matrix will be output of robust method. 9520 * 9521 * @return Output 2D affine transformation matrix \(2 \times 3\) or empty matrix if transformation 9522 * could not be estimated. The returned matrix has the following form: 9523 * \( 9524 * \begin{bmatrix} 9525 * a_{11} & a_{12} & b_1\\ 9526 * a_{21} & a_{22} & b_2\\ 9527 * \end{bmatrix} 9528 * \) 9529 * 9530 * The function estimates an optimal 2D affine transformation between two 2D point sets using the 9531 * selected robust algorithm. 9532 * 9533 * The computed transformation is then refined further (using only inliers) with the 9534 * Levenberg-Marquardt method to reduce the re-projection error even more. 9535 * 9536 * <b>Note:</b> 9537 * The RANSAC method can handle practically any ratio of outliers but needs a threshold to 9538 * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works 9539 * correctly only when there are more than 50% of inliers. 9540 * 9541 * SEE: estimateAffinePartial2D, getAffineTransform 9542 */ 9543 public static Mat estimateAffine2D(Mat from, Mat to, Mat inliers, int method, double ransacReprojThreshold, long maxIters) { 9544 return new Mat(estimateAffine2D_2(from.nativeObj, to.nativeObj, inliers.nativeObj, method, ransacReprojThreshold, maxIters)); 9545 } 9546 9547 /** 9548 * Computes an optimal affine transformation between two 2D point sets. 9549 * 9550 * It computes 9551 * \( 9552 * \begin{bmatrix} 9553 * x\\ 9554 * y\\ 9555 * \end{bmatrix} 9556 * = 9557 * \begin{bmatrix} 9558 * a_{11} & a_{12}\\ 9559 * a_{21} & a_{22}\\ 9560 * \end{bmatrix} 9561 * \begin{bmatrix} 9562 * X\\ 9563 * Y\\ 9564 * \end{bmatrix} 9565 * + 9566 * \begin{bmatrix} 9567 * b_1\\ 9568 * b_2\\ 9569 * \end{bmatrix} 9570 * \) 9571 * 9572 * @param from First input 2D point set containing \((X,Y)\). 9573 * @param to Second input 2D point set containing \((x,y)\). 9574 * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier). 9575 * @param method Robust method used to compute transformation. The following methods are possible: 9576 * <ul> 9577 * <li> 9578 * REF: RANSAC - RANSAC-based robust method 9579 * </li> 9580 * <li> 9581 * REF: LMEDS - Least-Median robust method 9582 * RANSAC is the default method. 9583 * </li> 9584 * </ul> 9585 * @param ransacReprojThreshold Maximum reprojection error in the RANSAC algorithm to consider 9586 * a point as an inlier. Applies only to RANSAC. 9587 * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation 9588 * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation. 9589 * Passing 0 will disable refining, so the output matrix will be output of robust method. 9590 * 9591 * @return Output 2D affine transformation matrix \(2 \times 3\) or empty matrix if transformation 9592 * could not be estimated. The returned matrix has the following form: 9593 * \( 9594 * \begin{bmatrix} 9595 * a_{11} & a_{12} & b_1\\ 9596 * a_{21} & a_{22} & b_2\\ 9597 * \end{bmatrix} 9598 * \) 9599 * 9600 * The function estimates an optimal 2D affine transformation between two 2D point sets using the 9601 * selected robust algorithm. 9602 * 9603 * The computed transformation is then refined further (using only inliers) with the 9604 * Levenberg-Marquardt method to reduce the re-projection error even more. 9605 * 9606 * <b>Note:</b> 9607 * The RANSAC method can handle practically any ratio of outliers but needs a threshold to 9608 * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works 9609 * correctly only when there are more than 50% of inliers. 9610 * 9611 * SEE: estimateAffinePartial2D, getAffineTransform 9612 */ 9613 public static Mat estimateAffine2D(Mat from, Mat to, Mat inliers, int method, double ransacReprojThreshold) { 9614 return new Mat(estimateAffine2D_3(from.nativeObj, to.nativeObj, inliers.nativeObj, method, ransacReprojThreshold)); 9615 } 9616 9617 /** 9618 * Computes an optimal affine transformation between two 2D point sets. 9619 * 9620 * It computes 9621 * \( 9622 * \begin{bmatrix} 9623 * x\\ 9624 * y\\ 9625 * \end{bmatrix} 9626 * = 9627 * \begin{bmatrix} 9628 * a_{11} & a_{12}\\ 9629 * a_{21} & a_{22}\\ 9630 * \end{bmatrix} 9631 * \begin{bmatrix} 9632 * X\\ 9633 * Y\\ 9634 * \end{bmatrix} 9635 * + 9636 * \begin{bmatrix} 9637 * b_1\\ 9638 * b_2\\ 9639 * \end{bmatrix} 9640 * \) 9641 * 9642 * @param from First input 2D point set containing \((X,Y)\). 9643 * @param to Second input 2D point set containing \((x,y)\). 9644 * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier). 9645 * @param method Robust method used to compute transformation. The following methods are possible: 9646 * <ul> 9647 * <li> 9648 * REF: RANSAC - RANSAC-based robust method 9649 * </li> 9650 * <li> 9651 * REF: LMEDS - Least-Median robust method 9652 * RANSAC is the default method. 9653 * </li> 9654 * </ul> 9655 * a point as an inlier. Applies only to RANSAC. 9656 * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation 9657 * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation. 9658 * Passing 0 will disable refining, so the output matrix will be output of robust method. 9659 * 9660 * @return Output 2D affine transformation matrix \(2 \times 3\) or empty matrix if transformation 9661 * could not be estimated. The returned matrix has the following form: 9662 * \( 9663 * \begin{bmatrix} 9664 * a_{11} & a_{12} & b_1\\ 9665 * a_{21} & a_{22} & b_2\\ 9666 * \end{bmatrix} 9667 * \) 9668 * 9669 * The function estimates an optimal 2D affine transformation between two 2D point sets using the 9670 * selected robust algorithm. 9671 * 9672 * The computed transformation is then refined further (using only inliers) with the 9673 * Levenberg-Marquardt method to reduce the re-projection error even more. 9674 * 9675 * <b>Note:</b> 9676 * The RANSAC method can handle practically any ratio of outliers but needs a threshold to 9677 * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works 9678 * correctly only when there are more than 50% of inliers. 9679 * 9680 * SEE: estimateAffinePartial2D, getAffineTransform 9681 */ 9682 public static Mat estimateAffine2D(Mat from, Mat to, Mat inliers, int method) { 9683 return new Mat(estimateAffine2D_4(from.nativeObj, to.nativeObj, inliers.nativeObj, method)); 9684 } 9685 9686 /** 9687 * Computes an optimal affine transformation between two 2D point sets. 9688 * 9689 * It computes 9690 * \( 9691 * \begin{bmatrix} 9692 * x\\ 9693 * y\\ 9694 * \end{bmatrix} 9695 * = 9696 * \begin{bmatrix} 9697 * a_{11} & a_{12}\\ 9698 * a_{21} & a_{22}\\ 9699 * \end{bmatrix} 9700 * \begin{bmatrix} 9701 * X\\ 9702 * Y\\ 9703 * \end{bmatrix} 9704 * + 9705 * \begin{bmatrix} 9706 * b_1\\ 9707 * b_2\\ 9708 * \end{bmatrix} 9709 * \) 9710 * 9711 * @param from First input 2D point set containing \((X,Y)\). 9712 * @param to Second input 2D point set containing \((x,y)\). 9713 * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier). 9714 * <ul> 9715 * <li> 9716 * REF: RANSAC - RANSAC-based robust method 9717 * </li> 9718 * <li> 9719 * REF: LMEDS - Least-Median robust method 9720 * RANSAC is the default method. 9721 * </li> 9722 * </ul> 9723 * a point as an inlier. Applies only to RANSAC. 9724 * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation 9725 * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation. 9726 * Passing 0 will disable refining, so the output matrix will be output of robust method. 9727 * 9728 * @return Output 2D affine transformation matrix \(2 \times 3\) or empty matrix if transformation 9729 * could not be estimated. The returned matrix has the following form: 9730 * \( 9731 * \begin{bmatrix} 9732 * a_{11} & a_{12} & b_1\\ 9733 * a_{21} & a_{22} & b_2\\ 9734 * \end{bmatrix} 9735 * \) 9736 * 9737 * The function estimates an optimal 2D affine transformation between two 2D point sets using the 9738 * selected robust algorithm. 9739 * 9740 * The computed transformation is then refined further (using only inliers) with the 9741 * Levenberg-Marquardt method to reduce the re-projection error even more. 9742 * 9743 * <b>Note:</b> 9744 * The RANSAC method can handle practically any ratio of outliers but needs a threshold to 9745 * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works 9746 * correctly only when there are more than 50% of inliers. 9747 * 9748 * SEE: estimateAffinePartial2D, getAffineTransform 9749 */ 9750 public static Mat estimateAffine2D(Mat from, Mat to, Mat inliers) { 9751 return new Mat(estimateAffine2D_5(from.nativeObj, to.nativeObj, inliers.nativeObj)); 9752 } 9753 9754 /** 9755 * Computes an optimal affine transformation between two 2D point sets. 9756 * 9757 * It computes 9758 * \( 9759 * \begin{bmatrix} 9760 * x\\ 9761 * y\\ 9762 * \end{bmatrix} 9763 * = 9764 * \begin{bmatrix} 9765 * a_{11} & a_{12}\\ 9766 * a_{21} & a_{22}\\ 9767 * \end{bmatrix} 9768 * \begin{bmatrix} 9769 * X\\ 9770 * Y\\ 9771 * \end{bmatrix} 9772 * + 9773 * \begin{bmatrix} 9774 * b_1\\ 9775 * b_2\\ 9776 * \end{bmatrix} 9777 * \) 9778 * 9779 * @param from First input 2D point set containing \((X,Y)\). 9780 * @param to Second input 2D point set containing \((x,y)\). 9781 * <ul> 9782 * <li> 9783 * REF: RANSAC - RANSAC-based robust method 9784 * </li> 9785 * <li> 9786 * REF: LMEDS - Least-Median robust method 9787 * RANSAC is the default method. 9788 * </li> 9789 * </ul> 9790 * a point as an inlier. Applies only to RANSAC. 9791 * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation 9792 * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation. 9793 * Passing 0 will disable refining, so the output matrix will be output of robust method. 9794 * 9795 * @return Output 2D affine transformation matrix \(2 \times 3\) or empty matrix if transformation 9796 * could not be estimated. The returned matrix has the following form: 9797 * \( 9798 * \begin{bmatrix} 9799 * a_{11} & a_{12} & b_1\\ 9800 * a_{21} & a_{22} & b_2\\ 9801 * \end{bmatrix} 9802 * \) 9803 * 9804 * The function estimates an optimal 2D affine transformation between two 2D point sets using the 9805 * selected robust algorithm. 9806 * 9807 * The computed transformation is then refined further (using only inliers) with the 9808 * Levenberg-Marquardt method to reduce the re-projection error even more. 9809 * 9810 * <b>Note:</b> 9811 * The RANSAC method can handle practically any ratio of outliers but needs a threshold to 9812 * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works 9813 * correctly only when there are more than 50% of inliers. 9814 * 9815 * SEE: estimateAffinePartial2D, getAffineTransform 9816 */ 9817 public static Mat estimateAffine2D(Mat from, Mat to) { 9818 return new Mat(estimateAffine2D_6(from.nativeObj, to.nativeObj)); 9819 } 9820 9821 9822 // 9823 // C++: Mat cv::estimateAffine2D(Mat pts1, Mat pts2, Mat& inliers, UsacParams params) 9824 // 9825 9826 public static Mat estimateAffine2D(Mat pts1, Mat pts2, Mat inliers, UsacParams params) { 9827 return new Mat(estimateAffine2D_7(pts1.nativeObj, pts2.nativeObj, inliers.nativeObj, params.nativeObj)); 9828 } 9829 9830 9831 // 9832 // C++: Mat cv::estimateAffinePartial2D(Mat from, Mat to, Mat& inliers = Mat(), int method = RANSAC, double ransacReprojThreshold = 3, size_t maxIters = 2000, double confidence = 0.99, size_t refineIters = 10) 9833 // 9834 9835 /** 9836 * Computes an optimal limited affine transformation with 4 degrees of freedom between 9837 * two 2D point sets. 9838 * 9839 * @param from First input 2D point set. 9840 * @param to Second input 2D point set. 9841 * @param inliers Output vector indicating which points are inliers. 9842 * @param method Robust method used to compute transformation. The following methods are possible: 9843 * <ul> 9844 * <li> 9845 * REF: RANSAC - RANSAC-based robust method 9846 * </li> 9847 * <li> 9848 * REF: LMEDS - Least-Median robust method 9849 * RANSAC is the default method. 9850 * </li> 9851 * </ul> 9852 * @param ransacReprojThreshold Maximum reprojection error in the RANSAC algorithm to consider 9853 * a point as an inlier. Applies only to RANSAC. 9854 * @param maxIters The maximum number of robust method iterations. 9855 * @param confidence Confidence level, between 0 and 1, for the estimated transformation. Anything 9856 * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation 9857 * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation. 9858 * @param refineIters Maximum number of iterations of refining algorithm (Levenberg-Marquardt). 9859 * Passing 0 will disable refining, so the output matrix will be output of robust method. 9860 * 9861 * @return Output 2D affine transformation (4 degrees of freedom) matrix \(2 \times 3\) or 9862 * empty matrix if transformation could not be estimated. 9863 * 9864 * The function estimates an optimal 2D affine transformation with 4 degrees of freedom limited to 9865 * combinations of translation, rotation, and uniform scaling. Uses the selected algorithm for robust 9866 * estimation. 9867 * 9868 * The computed transformation is then refined further (using only inliers) with the 9869 * Levenberg-Marquardt method to reduce the re-projection error even more. 9870 * 9871 * Estimated transformation matrix is: 9872 * \( \begin{bmatrix} \cos(\theta) \cdot s & -\sin(\theta) \cdot s & t_x \\ 9873 * \sin(\theta) \cdot s & \cos(\theta) \cdot s & t_y 9874 * \end{bmatrix} \) 9875 * Where \( \theta \) is the rotation angle, \( s \) the scaling factor and \( t_x, t_y \) are 9876 * translations in \( x, y \) axes respectively. 9877 * 9878 * <b>Note:</b> 9879 * The RANSAC method can handle practically any ratio of outliers but need a threshold to 9880 * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works 9881 * correctly only when there are more than 50% of inliers. 9882 * 9883 * SEE: estimateAffine2D, getAffineTransform 9884 */ 9885 public static Mat estimateAffinePartial2D(Mat from, Mat to, Mat inliers, int method, double ransacReprojThreshold, long maxIters, double confidence, long refineIters) { 9886 return new Mat(estimateAffinePartial2D_0(from.nativeObj, to.nativeObj, inliers.nativeObj, method, ransacReprojThreshold, maxIters, confidence, refineIters)); 9887 } 9888 9889 /** 9890 * Computes an optimal limited affine transformation with 4 degrees of freedom between 9891 * two 2D point sets. 9892 * 9893 * @param from First input 2D point set. 9894 * @param to Second input 2D point set. 9895 * @param inliers Output vector indicating which points are inliers. 9896 * @param method Robust method used to compute transformation. The following methods are possible: 9897 * <ul> 9898 * <li> 9899 * REF: RANSAC - RANSAC-based robust method 9900 * </li> 9901 * <li> 9902 * REF: LMEDS - Least-Median robust method 9903 * RANSAC is the default method. 9904 * </li> 9905 * </ul> 9906 * @param ransacReprojThreshold Maximum reprojection error in the RANSAC algorithm to consider 9907 * a point as an inlier. Applies only to RANSAC. 9908 * @param maxIters The maximum number of robust method iterations. 9909 * @param confidence Confidence level, between 0 and 1, for the estimated transformation. Anything 9910 * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation 9911 * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation. 9912 * Passing 0 will disable refining, so the output matrix will be output of robust method. 9913 * 9914 * @return Output 2D affine transformation (4 degrees of freedom) matrix \(2 \times 3\) or 9915 * empty matrix if transformation could not be estimated. 9916 * 9917 * The function estimates an optimal 2D affine transformation with 4 degrees of freedom limited to 9918 * combinations of translation, rotation, and uniform scaling. Uses the selected algorithm for robust 9919 * estimation. 9920 * 9921 * The computed transformation is then refined further (using only inliers) with the 9922 * Levenberg-Marquardt method to reduce the re-projection error even more. 9923 * 9924 * Estimated transformation matrix is: 9925 * \( \begin{bmatrix} \cos(\theta) \cdot s & -\sin(\theta) \cdot s & t_x \\ 9926 * \sin(\theta) \cdot s & \cos(\theta) \cdot s & t_y 9927 * \end{bmatrix} \) 9928 * Where \( \theta \) is the rotation angle, \( s \) the scaling factor and \( t_x, t_y \) are 9929 * translations in \( x, y \) axes respectively. 9930 * 9931 * <b>Note:</b> 9932 * The RANSAC method can handle practically any ratio of outliers but need a threshold to 9933 * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works 9934 * correctly only when there are more than 50% of inliers. 9935 * 9936 * SEE: estimateAffine2D, getAffineTransform 9937 */ 9938 public static Mat estimateAffinePartial2D(Mat from, Mat to, Mat inliers, int method, double ransacReprojThreshold, long maxIters, double confidence) { 9939 return new Mat(estimateAffinePartial2D_1(from.nativeObj, to.nativeObj, inliers.nativeObj, method, ransacReprojThreshold, maxIters, confidence)); 9940 } 9941 9942 /** 9943 * Computes an optimal limited affine transformation with 4 degrees of freedom between 9944 * two 2D point sets. 9945 * 9946 * @param from First input 2D point set. 9947 * @param to Second input 2D point set. 9948 * @param inliers Output vector indicating which points are inliers. 9949 * @param method Robust method used to compute transformation. The following methods are possible: 9950 * <ul> 9951 * <li> 9952 * REF: RANSAC - RANSAC-based robust method 9953 * </li> 9954 * <li> 9955 * REF: LMEDS - Least-Median robust method 9956 * RANSAC is the default method. 9957 * </li> 9958 * </ul> 9959 * @param ransacReprojThreshold Maximum reprojection error in the RANSAC algorithm to consider 9960 * a point as an inlier. Applies only to RANSAC. 9961 * @param maxIters The maximum number of robust method iterations. 9962 * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation 9963 * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation. 9964 * Passing 0 will disable refining, so the output matrix will be output of robust method. 9965 * 9966 * @return Output 2D affine transformation (4 degrees of freedom) matrix \(2 \times 3\) or 9967 * empty matrix if transformation could not be estimated. 9968 * 9969 * The function estimates an optimal 2D affine transformation with 4 degrees of freedom limited to 9970 * combinations of translation, rotation, and uniform scaling. Uses the selected algorithm for robust 9971 * estimation. 9972 * 9973 * The computed transformation is then refined further (using only inliers) with the 9974 * Levenberg-Marquardt method to reduce the re-projection error even more. 9975 * 9976 * Estimated transformation matrix is: 9977 * \( \begin{bmatrix} \cos(\theta) \cdot s & -\sin(\theta) \cdot s & t_x \\ 9978 * \sin(\theta) \cdot s & \cos(\theta) \cdot s & t_y 9979 * \end{bmatrix} \) 9980 * Where \( \theta \) is the rotation angle, \( s \) the scaling factor and \( t_x, t_y \) are 9981 * translations in \( x, y \) axes respectively. 9982 * 9983 * <b>Note:</b> 9984 * The RANSAC method can handle practically any ratio of outliers but need a threshold to 9985 * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works 9986 * correctly only when there are more than 50% of inliers. 9987 * 9988 * SEE: estimateAffine2D, getAffineTransform 9989 */ 9990 public static Mat estimateAffinePartial2D(Mat from, Mat to, Mat inliers, int method, double ransacReprojThreshold, long maxIters) { 9991 return new Mat(estimateAffinePartial2D_2(from.nativeObj, to.nativeObj, inliers.nativeObj, method, ransacReprojThreshold, maxIters)); 9992 } 9993 9994 /** 9995 * Computes an optimal limited affine transformation with 4 degrees of freedom between 9996 * two 2D point sets. 9997 * 9998 * @param from First input 2D point set. 9999 * @param to Second input 2D point set. 10000 * @param inliers Output vector indicating which points are inliers. 10001 * @param method Robust method used to compute transformation. The following methods are possible: 10002 * <ul> 10003 * <li> 10004 * REF: RANSAC - RANSAC-based robust method 10005 * </li> 10006 * <li> 10007 * REF: LMEDS - Least-Median robust method 10008 * RANSAC is the default method. 10009 * </li> 10010 * </ul> 10011 * @param ransacReprojThreshold Maximum reprojection error in the RANSAC algorithm to consider 10012 * a point as an inlier. Applies only to RANSAC. 10013 * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation 10014 * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation. 10015 * Passing 0 will disable refining, so the output matrix will be output of robust method. 10016 * 10017 * @return Output 2D affine transformation (4 degrees of freedom) matrix \(2 \times 3\) or 10018 * empty matrix if transformation could not be estimated. 10019 * 10020 * The function estimates an optimal 2D affine transformation with 4 degrees of freedom limited to 10021 * combinations of translation, rotation, and uniform scaling. Uses the selected algorithm for robust 10022 * estimation. 10023 * 10024 * The computed transformation is then refined further (using only inliers) with the 10025 * Levenberg-Marquardt method to reduce the re-projection error even more. 10026 * 10027 * Estimated transformation matrix is: 10028 * \( \begin{bmatrix} \cos(\theta) \cdot s & -\sin(\theta) \cdot s & t_x \\ 10029 * \sin(\theta) \cdot s & \cos(\theta) \cdot s & t_y 10030 * \end{bmatrix} \) 10031 * Where \( \theta \) is the rotation angle, \( s \) the scaling factor and \( t_x, t_y \) are 10032 * translations in \( x, y \) axes respectively. 10033 * 10034 * <b>Note:</b> 10035 * The RANSAC method can handle practically any ratio of outliers but need a threshold to 10036 * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works 10037 * correctly only when there are more than 50% of inliers. 10038 * 10039 * SEE: estimateAffine2D, getAffineTransform 10040 */ 10041 public static Mat estimateAffinePartial2D(Mat from, Mat to, Mat inliers, int method, double ransacReprojThreshold) { 10042 return new Mat(estimateAffinePartial2D_3(from.nativeObj, to.nativeObj, inliers.nativeObj, method, ransacReprojThreshold)); 10043 } 10044 10045 /** 10046 * Computes an optimal limited affine transformation with 4 degrees of freedom between 10047 * two 2D point sets. 10048 * 10049 * @param from First input 2D point set. 10050 * @param to Second input 2D point set. 10051 * @param inliers Output vector indicating which points are inliers. 10052 * @param method Robust method used to compute transformation. The following methods are possible: 10053 * <ul> 10054 * <li> 10055 * REF: RANSAC - RANSAC-based robust method 10056 * </li> 10057 * <li> 10058 * REF: LMEDS - Least-Median robust method 10059 * RANSAC is the default method. 10060 * </li> 10061 * </ul> 10062 * a point as an inlier. Applies only to RANSAC. 10063 * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation 10064 * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation. 10065 * Passing 0 will disable refining, so the output matrix will be output of robust method. 10066 * 10067 * @return Output 2D affine transformation (4 degrees of freedom) matrix \(2 \times 3\) or 10068 * empty matrix if transformation could not be estimated. 10069 * 10070 * The function estimates an optimal 2D affine transformation with 4 degrees of freedom limited to 10071 * combinations of translation, rotation, and uniform scaling. Uses the selected algorithm for robust 10072 * estimation. 10073 * 10074 * The computed transformation is then refined further (using only inliers) with the 10075 * Levenberg-Marquardt method to reduce the re-projection error even more. 10076 * 10077 * Estimated transformation matrix is: 10078 * \( \begin{bmatrix} \cos(\theta) \cdot s & -\sin(\theta) \cdot s & t_x \\ 10079 * \sin(\theta) \cdot s & \cos(\theta) \cdot s & t_y 10080 * \end{bmatrix} \) 10081 * Where \( \theta \) is the rotation angle, \( s \) the scaling factor and \( t_x, t_y \) are 10082 * translations in \( x, y \) axes respectively. 10083 * 10084 * <b>Note:</b> 10085 * The RANSAC method can handle practically any ratio of outliers but need a threshold to 10086 * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works 10087 * correctly only when there are more than 50% of inliers. 10088 * 10089 * SEE: estimateAffine2D, getAffineTransform 10090 */ 10091 public static Mat estimateAffinePartial2D(Mat from, Mat to, Mat inliers, int method) { 10092 return new Mat(estimateAffinePartial2D_4(from.nativeObj, to.nativeObj, inliers.nativeObj, method)); 10093 } 10094 10095 /** 10096 * Computes an optimal limited affine transformation with 4 degrees of freedom between 10097 * two 2D point sets. 10098 * 10099 * @param from First input 2D point set. 10100 * @param to Second input 2D point set. 10101 * @param inliers Output vector indicating which points are inliers. 10102 * <ul> 10103 * <li> 10104 * REF: RANSAC - RANSAC-based robust method 10105 * </li> 10106 * <li> 10107 * REF: LMEDS - Least-Median robust method 10108 * RANSAC is the default method. 10109 * </li> 10110 * </ul> 10111 * a point as an inlier. Applies only to RANSAC. 10112 * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation 10113 * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation. 10114 * Passing 0 will disable refining, so the output matrix will be output of robust method. 10115 * 10116 * @return Output 2D affine transformation (4 degrees of freedom) matrix \(2 \times 3\) or 10117 * empty matrix if transformation could not be estimated. 10118 * 10119 * The function estimates an optimal 2D affine transformation with 4 degrees of freedom limited to 10120 * combinations of translation, rotation, and uniform scaling. Uses the selected algorithm for robust 10121 * estimation. 10122 * 10123 * The computed transformation is then refined further (using only inliers) with the 10124 * Levenberg-Marquardt method to reduce the re-projection error even more. 10125 * 10126 * Estimated transformation matrix is: 10127 * \( \begin{bmatrix} \cos(\theta) \cdot s & -\sin(\theta) \cdot s & t_x \\ 10128 * \sin(\theta) \cdot s & \cos(\theta) \cdot s & t_y 10129 * \end{bmatrix} \) 10130 * Where \( \theta \) is the rotation angle, \( s \) the scaling factor and \( t_x, t_y \) are 10131 * translations in \( x, y \) axes respectively. 10132 * 10133 * <b>Note:</b> 10134 * The RANSAC method can handle practically any ratio of outliers but need a threshold to 10135 * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works 10136 * correctly only when there are more than 50% of inliers. 10137 * 10138 * SEE: estimateAffine2D, getAffineTransform 10139 */ 10140 public static Mat estimateAffinePartial2D(Mat from, Mat to, Mat inliers) { 10141 return new Mat(estimateAffinePartial2D_5(from.nativeObj, to.nativeObj, inliers.nativeObj)); 10142 } 10143 10144 /** 10145 * Computes an optimal limited affine transformation with 4 degrees of freedom between 10146 * two 2D point sets. 10147 * 10148 * @param from First input 2D point set. 10149 * @param to Second input 2D point set. 10150 * <ul> 10151 * <li> 10152 * REF: RANSAC - RANSAC-based robust method 10153 * </li> 10154 * <li> 10155 * REF: LMEDS - Least-Median robust method 10156 * RANSAC is the default method. 10157 * </li> 10158 * </ul> 10159 * a point as an inlier. Applies only to RANSAC. 10160 * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation 10161 * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation. 10162 * Passing 0 will disable refining, so the output matrix will be output of robust method. 10163 * 10164 * @return Output 2D affine transformation (4 degrees of freedom) matrix \(2 \times 3\) or 10165 * empty matrix if transformation could not be estimated. 10166 * 10167 * The function estimates an optimal 2D affine transformation with 4 degrees of freedom limited to 10168 * combinations of translation, rotation, and uniform scaling. Uses the selected algorithm for robust 10169 * estimation. 10170 * 10171 * The computed transformation is then refined further (using only inliers) with the 10172 * Levenberg-Marquardt method to reduce the re-projection error even more. 10173 * 10174 * Estimated transformation matrix is: 10175 * \( \begin{bmatrix} \cos(\theta) \cdot s & -\sin(\theta) \cdot s & t_x \\ 10176 * \sin(\theta) \cdot s & \cos(\theta) \cdot s & t_y 10177 * \end{bmatrix} \) 10178 * Where \( \theta \) is the rotation angle, \( s \) the scaling factor and \( t_x, t_y \) are 10179 * translations in \( x, y \) axes respectively. 10180 * 10181 * <b>Note:</b> 10182 * The RANSAC method can handle practically any ratio of outliers but need a threshold to 10183 * distinguish inliers from outliers. The method LMeDS does not need any threshold but it works 10184 * correctly only when there are more than 50% of inliers. 10185 * 10186 * SEE: estimateAffine2D, getAffineTransform 10187 */ 10188 public static Mat estimateAffinePartial2D(Mat from, Mat to) { 10189 return new Mat(estimateAffinePartial2D_6(from.nativeObj, to.nativeObj)); 10190 } 10191 10192 10193 // 10194 // C++: int cv::decomposeHomographyMat(Mat H, Mat K, vector_Mat& rotations, vector_Mat& translations, vector_Mat& normals) 10195 // 10196 10197 /** 10198 * Decompose a homography matrix to rotation(s), translation(s) and plane normal(s). 10199 * 10200 * @param H The input homography matrix between two images. 10201 * @param K The input camera intrinsic matrix. 10202 * @param rotations Array of rotation matrices. 10203 * @param translations Array of translation matrices. 10204 * @param normals Array of plane normal matrices. 10205 * 10206 * This function extracts relative camera motion between two views of a planar object and returns up to 10207 * four mathematical solution tuples of rotation, translation, and plane normal. The decomposition of 10208 * the homography matrix H is described in detail in CITE: Malis. 10209 * 10210 * If the homography H, induced by the plane, gives the constraint 10211 * \(s_i \vecthree{x'_i}{y'_i}{1} \sim H \vecthree{x_i}{y_i}{1}\) on the source image points 10212 * \(p_i\) and the destination image points \(p'_i\), then the tuple of rotations[k] and 10213 * translations[k] is a change of basis from the source camera's coordinate system to the destination 10214 * camera's coordinate system. However, by decomposing H, one can only get the translation normalized 10215 * by the (typically unknown) depth of the scene, i.e. its direction but with normalized length. 10216 * 10217 * If point correspondences are available, at least two solutions may further be invalidated, by 10218 * applying positive depth constraint, i.e. all points must be in front of the camera. 10219 * @return automatically generated 10220 */ 10221 public static int decomposeHomographyMat(Mat H, Mat K, List<Mat> rotations, List<Mat> translations, List<Mat> normals) { 10222 Mat rotations_mat = new Mat(); 10223 Mat translations_mat = new Mat(); 10224 Mat normals_mat = new Mat(); 10225 int retVal = decomposeHomographyMat_0(H.nativeObj, K.nativeObj, rotations_mat.nativeObj, translations_mat.nativeObj, normals_mat.nativeObj); 10226 Converters.Mat_to_vector_Mat(rotations_mat, rotations); 10227 rotations_mat.release(); 10228 Converters.Mat_to_vector_Mat(translations_mat, translations); 10229 translations_mat.release(); 10230 Converters.Mat_to_vector_Mat(normals_mat, normals); 10231 normals_mat.release(); 10232 return retVal; 10233 } 10234 10235 10236 // 10237 // C++: void cv::filterHomographyDecompByVisibleRefpoints(vector_Mat rotations, vector_Mat normals, Mat beforePoints, Mat afterPoints, Mat& possibleSolutions, Mat pointsMask = Mat()) 10238 // 10239 10240 /** 10241 * Filters homography decompositions based on additional information. 10242 * 10243 * @param rotations Vector of rotation matrices. 10244 * @param normals Vector of plane normal matrices. 10245 * @param beforePoints Vector of (rectified) visible reference points before the homography is applied 10246 * @param afterPoints Vector of (rectified) visible reference points after the homography is applied 10247 * @param possibleSolutions Vector of int indices representing the viable solution set after filtering 10248 * @param pointsMask optional Mat/Vector of 8u type representing the mask for the inliers as given by the #findHomography function 10249 * 10250 * This function is intended to filter the output of the #decomposeHomographyMat based on additional 10251 * information as described in CITE: Malis . The summary of the method: the #decomposeHomographyMat function 10252 * returns 2 unique solutions and their "opposites" for a total of 4 solutions. If we have access to the 10253 * sets of points visible in the camera frame before and after the homography transformation is applied, 10254 * we can determine which are the true potential solutions and which are the opposites by verifying which 10255 * homographies are consistent with all visible reference points being in front of the camera. The inputs 10256 * are left unchanged; the filtered solution set is returned as indices into the existing one. 10257 */ 10258 public static void filterHomographyDecompByVisibleRefpoints(List<Mat> rotations, List<Mat> normals, Mat beforePoints, Mat afterPoints, Mat possibleSolutions, Mat pointsMask) { 10259 Mat rotations_mat = Converters.vector_Mat_to_Mat(rotations); 10260 Mat normals_mat = Converters.vector_Mat_to_Mat(normals); 10261 filterHomographyDecompByVisibleRefpoints_0(rotations_mat.nativeObj, normals_mat.nativeObj, beforePoints.nativeObj, afterPoints.nativeObj, possibleSolutions.nativeObj, pointsMask.nativeObj); 10262 } 10263 10264 /** 10265 * Filters homography decompositions based on additional information. 10266 * 10267 * @param rotations Vector of rotation matrices. 10268 * @param normals Vector of plane normal matrices. 10269 * @param beforePoints Vector of (rectified) visible reference points before the homography is applied 10270 * @param afterPoints Vector of (rectified) visible reference points after the homography is applied 10271 * @param possibleSolutions Vector of int indices representing the viable solution set after filtering 10272 * 10273 * This function is intended to filter the output of the #decomposeHomographyMat based on additional 10274 * information as described in CITE: Malis . The summary of the method: the #decomposeHomographyMat function 10275 * returns 2 unique solutions and their "opposites" for a total of 4 solutions. If we have access to the 10276 * sets of points visible in the camera frame before and after the homography transformation is applied, 10277 * we can determine which are the true potential solutions and which are the opposites by verifying which 10278 * homographies are consistent with all visible reference points being in front of the camera. The inputs 10279 * are left unchanged; the filtered solution set is returned as indices into the existing one. 10280 */ 10281 public static void filterHomographyDecompByVisibleRefpoints(List<Mat> rotations, List<Mat> normals, Mat beforePoints, Mat afterPoints, Mat possibleSolutions) { 10282 Mat rotations_mat = Converters.vector_Mat_to_Mat(rotations); 10283 Mat normals_mat = Converters.vector_Mat_to_Mat(normals); 10284 filterHomographyDecompByVisibleRefpoints_1(rotations_mat.nativeObj, normals_mat.nativeObj, beforePoints.nativeObj, afterPoints.nativeObj, possibleSolutions.nativeObj); 10285 } 10286 10287 10288 // 10289 // C++: void cv::undistort(Mat src, Mat& dst, Mat cameraMatrix, Mat distCoeffs, Mat newCameraMatrix = Mat()) 10290 // 10291 10292 /** 10293 * Transforms an image to compensate for lens distortion. 10294 * 10295 * The function transforms an image to compensate radial and tangential lens distortion. 10296 * 10297 * The function is simply a combination of #initUndistortRectifyMap (with unity R ) and #remap 10298 * (with bilinear interpolation). See the former function for details of the transformation being 10299 * performed. 10300 * 10301 * Those pixels in the destination image, for which there is no correspondent pixels in the source 10302 * image, are filled with zeros (black color). 10303 * 10304 * A particular subset of the source image that will be visible in the corrected image can be regulated 10305 * by newCameraMatrix. You can use #getOptimalNewCameraMatrix to compute the appropriate 10306 * newCameraMatrix depending on your requirements. 10307 * 10308 * The camera matrix and the distortion parameters can be determined using #calibrateCamera. If 10309 * the resolution of images is different from the resolution used at the calibration stage, \(f_x, 10310 * f_y, c_x\) and \(c_y\) need to be scaled accordingly, while the distortion coefficients remain 10311 * the same. 10312 * 10313 * @param src Input (distorted) image. 10314 * @param dst Output (corrected) image that has the same size and type as src . 10315 * @param cameraMatrix Input camera matrix \(A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) . 10316 * @param distCoeffs Input vector of distortion coefficients 10317 * \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) 10318 * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed. 10319 * @param newCameraMatrix Camera matrix of the distorted image. By default, it is the same as 10320 * cameraMatrix but you may additionally scale and shift the result by using a different matrix. 10321 */ 10322 public static void undistort(Mat src, Mat dst, Mat cameraMatrix, Mat distCoeffs, Mat newCameraMatrix) { 10323 undistort_0(src.nativeObj, dst.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, newCameraMatrix.nativeObj); 10324 } 10325 10326 /** 10327 * Transforms an image to compensate for lens distortion. 10328 * 10329 * The function transforms an image to compensate radial and tangential lens distortion. 10330 * 10331 * The function is simply a combination of #initUndistortRectifyMap (with unity R ) and #remap 10332 * (with bilinear interpolation). See the former function for details of the transformation being 10333 * performed. 10334 * 10335 * Those pixels in the destination image, for which there is no correspondent pixels in the source 10336 * image, are filled with zeros (black color). 10337 * 10338 * A particular subset of the source image that will be visible in the corrected image can be regulated 10339 * by newCameraMatrix. You can use #getOptimalNewCameraMatrix to compute the appropriate 10340 * newCameraMatrix depending on your requirements. 10341 * 10342 * The camera matrix and the distortion parameters can be determined using #calibrateCamera. If 10343 * the resolution of images is different from the resolution used at the calibration stage, \(f_x, 10344 * f_y, c_x\) and \(c_y\) need to be scaled accordingly, while the distortion coefficients remain 10345 * the same. 10346 * 10347 * @param src Input (distorted) image. 10348 * @param dst Output (corrected) image that has the same size and type as src . 10349 * @param cameraMatrix Input camera matrix \(A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) . 10350 * @param distCoeffs Input vector of distortion coefficients 10351 * \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) 10352 * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed. 10353 * cameraMatrix but you may additionally scale and shift the result by using a different matrix. 10354 */ 10355 public static void undistort(Mat src, Mat dst, Mat cameraMatrix, Mat distCoeffs) { 10356 undistort_1(src.nativeObj, dst.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj); 10357 } 10358 10359 10360 // 10361 // C++: void cv::initUndistortRectifyMap(Mat cameraMatrix, Mat distCoeffs, Mat R, Mat newCameraMatrix, Size size, int m1type, Mat& map1, Mat& map2) 10362 // 10363 10364 /** 10365 * Computes the undistortion and rectification transformation map. 10366 * 10367 * The function computes the joint undistortion and rectification transformation and represents the 10368 * result in the form of maps for #remap. The undistorted image looks like original, as if it is 10369 * captured with a camera using the camera matrix =newCameraMatrix and zero distortion. In case of a 10370 * monocular camera, newCameraMatrix is usually equal to cameraMatrix, or it can be computed by 10371 * #getOptimalNewCameraMatrix for a better control over scaling. In case of a stereo camera, 10372 * newCameraMatrix is normally set to P1 or P2 computed by #stereoRectify . 10373 * 10374 * Also, this new camera is oriented differently in the coordinate space, according to R. That, for 10375 * example, helps to align two heads of a stereo camera so that the epipolar lines on both images 10376 * become horizontal and have the same y- coordinate (in case of a horizontally aligned stereo camera). 10377 * 10378 * The function actually builds the maps for the inverse mapping algorithm that is used by #remap. That 10379 * is, for each pixel \((u, v)\) in the destination (corrected and rectified) image, the function 10380 * computes the corresponding coordinates in the source image (that is, in the original image from 10381 * camera). The following process is applied: 10382 * \( 10383 * \begin{array}{l} 10384 * x \leftarrow (u - {c'}_x)/{f'}_x \\ 10385 * y \leftarrow (v - {c'}_y)/{f'}_y \\ 10386 * {[X\,Y\,W]} ^T \leftarrow R^{-1}*[x \, y \, 1]^T \\ 10387 * x' \leftarrow X/W \\ 10388 * y' \leftarrow Y/W \\ 10389 * r^2 \leftarrow x'^2 + y'^2 \\ 10390 * x'' \leftarrow x' \frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6} 10391 * + 2p_1 x' y' + p_2(r^2 + 2 x'^2) + s_1 r^2 + s_2 r^4\\ 10392 * y'' \leftarrow y' \frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6} 10393 * + p_1 (r^2 + 2 y'^2) + 2 p_2 x' y' + s_3 r^2 + s_4 r^4 \\ 10394 * s\vecthree{x'''}{y'''}{1} = 10395 * \vecthreethree{R_{33}(\tau_x, \tau_y)}{0}{-R_{13}((\tau_x, \tau_y)} 10396 * {0}{R_{33}(\tau_x, \tau_y)}{-R_{23}(\tau_x, \tau_y)} 10397 * {0}{0}{1} R(\tau_x, \tau_y) \vecthree{x''}{y''}{1}\\ 10398 * map_x(u,v) \leftarrow x''' f_x + c_x \\ 10399 * map_y(u,v) \leftarrow y''' f_y + c_y 10400 * \end{array} 10401 * \) 10402 * where \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) 10403 * are the distortion coefficients. 10404 * 10405 * In case of a stereo camera, this function is called twice: once for each camera head, after 10406 * #stereoRectify, which in its turn is called after #stereoCalibrate. But if the stereo camera 10407 * was not calibrated, it is still possible to compute the rectification transformations directly from 10408 * the fundamental matrix using #stereoRectifyUncalibrated. For each camera, the function computes 10409 * homography H as the rectification transformation in a pixel domain, not a rotation matrix R in 3D 10410 * space. R can be computed from H as 10411 * \(\texttt{R} = \texttt{cameraMatrix} ^{-1} \cdot \texttt{H} \cdot \texttt{cameraMatrix}\) 10412 * where cameraMatrix can be chosen arbitrarily. 10413 * 10414 * @param cameraMatrix Input camera matrix \(A=\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) . 10415 * @param distCoeffs Input vector of distortion coefficients 10416 * \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) 10417 * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed. 10418 * @param R Optional rectification transformation in the object space (3x3 matrix). R1 or R2 , 10419 * computed by #stereoRectify can be passed here. If the matrix is empty, the identity transformation 10420 * is assumed. In cvInitUndistortMap R assumed to be an identity matrix. 10421 * @param newCameraMatrix New camera matrix \(A'=\vecthreethree{f_x'}{0}{c_x'}{0}{f_y'}{c_y'}{0}{0}{1}\). 10422 * @param size Undistorted image size. 10423 * @param m1type Type of the first output map that can be CV_32FC1, CV_32FC2 or CV_16SC2, see #convertMaps 10424 * @param map1 The first output map. 10425 * @param map2 The second output map. 10426 */ 10427 public static void initUndistortRectifyMap(Mat cameraMatrix, Mat distCoeffs, Mat R, Mat newCameraMatrix, Size size, int m1type, Mat map1, Mat map2) { 10428 initUndistortRectifyMap_0(cameraMatrix.nativeObj, distCoeffs.nativeObj, R.nativeObj, newCameraMatrix.nativeObj, size.width, size.height, m1type, map1.nativeObj, map2.nativeObj); 10429 } 10430 10431 10432 // 10433 // C++: void cv::initInverseRectificationMap(Mat cameraMatrix, Mat distCoeffs, Mat R, Mat newCameraMatrix, Size size, int m1type, Mat& map1, Mat& map2) 10434 // 10435 10436 /** 10437 * Computes the projection and inverse-rectification transformation map. In essense, this is the inverse of 10438 * #initUndistortRectifyMap to accomodate stereo-rectification of projectors ('inverse-cameras') in projector-camera pairs. 10439 * 10440 * The function computes the joint projection and inverse rectification transformation and represents the 10441 * result in the form of maps for #remap. The projected image looks like a distorted version of the original which, 10442 * once projected by a projector, should visually match the original. In case of a monocular camera, newCameraMatrix 10443 * is usually equal to cameraMatrix, or it can be computed by 10444 * #getOptimalNewCameraMatrix for a better control over scaling. In case of a projector-camera pair, 10445 * newCameraMatrix is normally set to P1 or P2 computed by #stereoRectify . 10446 * 10447 * The projector is oriented differently in the coordinate space, according to R. In case of projector-camera pairs, 10448 * this helps align the projector (in the same manner as #initUndistortRectifyMap for the camera) to create a stereo-rectified pair. This 10449 * allows epipolar lines on both images to become horizontal and have the same y-coordinate (in case of a horizontally aligned projector-camera pair). 10450 * 10451 * The function builds the maps for the inverse mapping algorithm that is used by #remap. That 10452 * is, for each pixel \((u, v)\) in the destination (projected and inverse-rectified) image, the function 10453 * computes the corresponding coordinates in the source image (that is, in the original digital image). The following process is applied: 10454 * 10455 * \( 10456 * \begin{array}{l} 10457 * \text{newCameraMatrix}\\ 10458 * x \leftarrow (u - {c'}_x)/{f'}_x \\ 10459 * y \leftarrow (v - {c'}_y)/{f'}_y \\ 10460 * 10461 * \\\text{Undistortion} 10462 * \\\scriptsize{\textit{though equation shown is for radial undistortion, function implements cv::undistortPoints()}}\\ 10463 * r^2 \leftarrow x^2 + y^2 \\ 10464 * \theta \leftarrow \frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6}\\ 10465 * x' \leftarrow \frac{x}{\theta} \\ 10466 * y' \leftarrow \frac{y}{\theta} \\ 10467 * 10468 * \\\text{Rectification}\\ 10469 * {[X\,Y\,W]} ^T \leftarrow R*[x' \, y' \, 1]^T \\ 10470 * x'' \leftarrow X/W \\ 10471 * y'' \leftarrow Y/W \\ 10472 * 10473 * \\\text{cameraMatrix}\\ 10474 * map_x(u,v) \leftarrow x'' f_x + c_x \\ 10475 * map_y(u,v) \leftarrow y'' f_y + c_y 10476 * \end{array} 10477 * \) 10478 * where \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) 10479 * are the distortion coefficients vector distCoeffs. 10480 * 10481 * In case of a stereo-rectified projector-camera pair, this function is called for the projector while #initUndistortRectifyMap is called for the camera head. 10482 * This is done after #stereoRectify, which in turn is called after #stereoCalibrate. If the projector-camera pair 10483 * is not calibrated, it is still possible to compute the rectification transformations directly from 10484 * the fundamental matrix using #stereoRectifyUncalibrated. For the projector and camera, the function computes 10485 * homography H as the rectification transformation in a pixel domain, not a rotation matrix R in 3D 10486 * space. R can be computed from H as 10487 * \(\texttt{R} = \texttt{cameraMatrix} ^{-1} \cdot \texttt{H} \cdot \texttt{cameraMatrix}\) 10488 * where cameraMatrix can be chosen arbitrarily. 10489 * 10490 * @param cameraMatrix Input camera matrix \(A=\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) . 10491 * @param distCoeffs Input vector of distortion coefficients 10492 * \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) 10493 * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed. 10494 * @param R Optional rectification transformation in the object space (3x3 matrix). R1 or R2, 10495 * computed by #stereoRectify can be passed here. If the matrix is empty, the identity transformation 10496 * is assumed. 10497 * @param newCameraMatrix New camera matrix \(A'=\vecthreethree{f_x'}{0}{c_x'}{0}{f_y'}{c_y'}{0}{0}{1}\). 10498 * @param size Distorted image size. 10499 * @param m1type Type of the first output map. Can be CV_32FC1, CV_32FC2 or CV_16SC2, see #convertMaps 10500 * @param map1 The first output map for #remap. 10501 * @param map2 The second output map for #remap. 10502 */ 10503 public static void initInverseRectificationMap(Mat cameraMatrix, Mat distCoeffs, Mat R, Mat newCameraMatrix, Size size, int m1type, Mat map1, Mat map2) { 10504 initInverseRectificationMap_0(cameraMatrix.nativeObj, distCoeffs.nativeObj, R.nativeObj, newCameraMatrix.nativeObj, size.width, size.height, m1type, map1.nativeObj, map2.nativeObj); 10505 } 10506 10507 10508 // 10509 // C++: Mat cv::getDefaultNewCameraMatrix(Mat cameraMatrix, Size imgsize = Size(), bool centerPrincipalPoint = false) 10510 // 10511 10512 /** 10513 * Returns the default new camera matrix. 10514 * 10515 * The function returns the camera matrix that is either an exact copy of the input cameraMatrix (when 10516 * centerPrinicipalPoint=false ), or the modified one (when centerPrincipalPoint=true). 10517 * 10518 * In the latter case, the new camera matrix will be: 10519 * 10520 * \(\begin{bmatrix} f_x && 0 && ( \texttt{imgSize.width} -1)*0.5 \\ 0 && f_y && ( \texttt{imgSize.height} -1)*0.5 \\ 0 && 0 && 1 \end{bmatrix} ,\) 10521 * 10522 * where \(f_x\) and \(f_y\) are \((0,0)\) and \((1,1)\) elements of cameraMatrix, respectively. 10523 * 10524 * By default, the undistortion functions in OpenCV (see #initUndistortRectifyMap, #undistort) do not 10525 * move the principal point. However, when you work with stereo, it is important to move the principal 10526 * points in both views to the same y-coordinate (which is required by most of stereo correspondence 10527 * algorithms), and may be to the same x-coordinate too. So, you can form the new camera matrix for 10528 * each view where the principal points are located at the center. 10529 * 10530 * @param cameraMatrix Input camera matrix. 10531 * @param imgsize Camera view image size in pixels. 10532 * @param centerPrincipalPoint Location of the principal point in the new camera matrix. The 10533 * parameter indicates whether this location should be at the image center or not. 10534 * @return automatically generated 10535 */ 10536 public static Mat getDefaultNewCameraMatrix(Mat cameraMatrix, Size imgsize, boolean centerPrincipalPoint) { 10537 return new Mat(getDefaultNewCameraMatrix_0(cameraMatrix.nativeObj, imgsize.width, imgsize.height, centerPrincipalPoint)); 10538 } 10539 10540 /** 10541 * Returns the default new camera matrix. 10542 * 10543 * The function returns the camera matrix that is either an exact copy of the input cameraMatrix (when 10544 * centerPrinicipalPoint=false ), or the modified one (when centerPrincipalPoint=true). 10545 * 10546 * In the latter case, the new camera matrix will be: 10547 * 10548 * \(\begin{bmatrix} f_x && 0 && ( \texttt{imgSize.width} -1)*0.5 \\ 0 && f_y && ( \texttt{imgSize.height} -1)*0.5 \\ 0 && 0 && 1 \end{bmatrix} ,\) 10549 * 10550 * where \(f_x\) and \(f_y\) are \((0,0)\) and \((1,1)\) elements of cameraMatrix, respectively. 10551 * 10552 * By default, the undistortion functions in OpenCV (see #initUndistortRectifyMap, #undistort) do not 10553 * move the principal point. However, when you work with stereo, it is important to move the principal 10554 * points in both views to the same y-coordinate (which is required by most of stereo correspondence 10555 * algorithms), and may be to the same x-coordinate too. So, you can form the new camera matrix for 10556 * each view where the principal points are located at the center. 10557 * 10558 * @param cameraMatrix Input camera matrix. 10559 * @param imgsize Camera view image size in pixels. 10560 * parameter indicates whether this location should be at the image center or not. 10561 * @return automatically generated 10562 */ 10563 public static Mat getDefaultNewCameraMatrix(Mat cameraMatrix, Size imgsize) { 10564 return new Mat(getDefaultNewCameraMatrix_1(cameraMatrix.nativeObj, imgsize.width, imgsize.height)); 10565 } 10566 10567 /** 10568 * Returns the default new camera matrix. 10569 * 10570 * The function returns the camera matrix that is either an exact copy of the input cameraMatrix (when 10571 * centerPrinicipalPoint=false ), or the modified one (when centerPrincipalPoint=true). 10572 * 10573 * In the latter case, the new camera matrix will be: 10574 * 10575 * \(\begin{bmatrix} f_x && 0 && ( \texttt{imgSize.width} -1)*0.5 \\ 0 && f_y && ( \texttt{imgSize.height} -1)*0.5 \\ 0 && 0 && 1 \end{bmatrix} ,\) 10576 * 10577 * where \(f_x\) and \(f_y\) are \((0,0)\) and \((1,1)\) elements of cameraMatrix, respectively. 10578 * 10579 * By default, the undistortion functions in OpenCV (see #initUndistortRectifyMap, #undistort) do not 10580 * move the principal point. However, when you work with stereo, it is important to move the principal 10581 * points in both views to the same y-coordinate (which is required by most of stereo correspondence 10582 * algorithms), and may be to the same x-coordinate too. So, you can form the new camera matrix for 10583 * each view where the principal points are located at the center. 10584 * 10585 * @param cameraMatrix Input camera matrix. 10586 * parameter indicates whether this location should be at the image center or not. 10587 * @return automatically generated 10588 */ 10589 public static Mat getDefaultNewCameraMatrix(Mat cameraMatrix) { 10590 return new Mat(getDefaultNewCameraMatrix_2(cameraMatrix.nativeObj)); 10591 } 10592 10593 10594 // 10595 // C++: void cv::undistortPoints(vector_Point2f src, vector_Point2f& dst, Mat cameraMatrix, Mat distCoeffs, Mat R = Mat(), Mat P = Mat()) 10596 // 10597 10598 /** 10599 * Computes the ideal point coordinates from the observed point coordinates. 10600 * 10601 * The function is similar to #undistort and #initUndistortRectifyMap but it operates on a 10602 * sparse set of points instead of a raster image. Also the function performs a reverse transformation 10603 * to #projectPoints. In case of a 3D object, it does not reconstruct its 3D coordinates, but for a 10604 * planar object, it does, up to a translation vector, if the proper R is specified. 10605 * 10606 * For each observed point coordinate \((u, v)\) the function computes: 10607 * \( 10608 * \begin{array}{l} 10609 * x^{"} \leftarrow (u - c_x)/f_x \\ 10610 * y^{"} \leftarrow (v - c_y)/f_y \\ 10611 * (x',y') = undistort(x^{"},y^{"}, \texttt{distCoeffs}) \\ 10612 * {[X\,Y\,W]} ^T \leftarrow R*[x' \, y' \, 1]^T \\ 10613 * x \leftarrow X/W \\ 10614 * y \leftarrow Y/W \\ 10615 * \text{only performed if P is specified:} \\ 10616 * u' \leftarrow x {f'}_x + {c'}_x \\ 10617 * v' \leftarrow y {f'}_y + {c'}_y 10618 * \end{array} 10619 * \) 10620 * 10621 * where *undistort* is an approximate iterative algorithm that estimates the normalized original 10622 * point coordinates out of the normalized distorted point coordinates ("normalized" means that the 10623 * coordinates do not depend on the camera matrix). 10624 * 10625 * The function can be used for both a stereo camera head or a monocular camera (when R is empty). 10626 * @param src Observed point coordinates, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel (CV_32FC2 or CV_64FC2) (or 10627 * vector<Point2f> ). 10628 * @param dst Output ideal point coordinates (1xN/Nx1 2-channel or vector<Point2f> ) after undistortion and reverse perspective 10629 * transformation. If matrix P is identity or omitted, dst will contain normalized point coordinates. 10630 * @param cameraMatrix Camera matrix \(\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) . 10631 * @param distCoeffs Input vector of distortion coefficients 10632 * \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) 10633 * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed. 10634 * @param R Rectification transformation in the object space (3x3 matrix). R1 or R2 computed by 10635 * #stereoRectify can be passed here. If the matrix is empty, the identity transformation is used. 10636 * @param P New camera matrix (3x3) or new projection matrix (3x4) \(\begin{bmatrix} {f'}_x & 0 & {c'}_x & t_x \\ 0 & {f'}_y & {c'}_y & t_y \\ 0 & 0 & 1 & t_z \end{bmatrix}\). P1 or P2 computed by 10637 * #stereoRectify can be passed here. If the matrix is empty, the identity new camera matrix is used. 10638 */ 10639 public static void undistortPoints(MatOfPoint2f src, MatOfPoint2f dst, Mat cameraMatrix, Mat distCoeffs, Mat R, Mat P) { 10640 Mat src_mat = src; 10641 Mat dst_mat = dst; 10642 undistortPoints_0(src_mat.nativeObj, dst_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, R.nativeObj, P.nativeObj); 10643 } 10644 10645 /** 10646 * Computes the ideal point coordinates from the observed point coordinates. 10647 * 10648 * The function is similar to #undistort and #initUndistortRectifyMap but it operates on a 10649 * sparse set of points instead of a raster image. Also the function performs a reverse transformation 10650 * to #projectPoints. In case of a 3D object, it does not reconstruct its 3D coordinates, but for a 10651 * planar object, it does, up to a translation vector, if the proper R is specified. 10652 * 10653 * For each observed point coordinate \((u, v)\) the function computes: 10654 * \( 10655 * \begin{array}{l} 10656 * x^{"} \leftarrow (u - c_x)/f_x \\ 10657 * y^{"} \leftarrow (v - c_y)/f_y \\ 10658 * (x',y') = undistort(x^{"},y^{"}, \texttt{distCoeffs}) \\ 10659 * {[X\,Y\,W]} ^T \leftarrow R*[x' \, y' \, 1]^T \\ 10660 * x \leftarrow X/W \\ 10661 * y \leftarrow Y/W \\ 10662 * \text{only performed if P is specified:} \\ 10663 * u' \leftarrow x {f'}_x + {c'}_x \\ 10664 * v' \leftarrow y {f'}_y + {c'}_y 10665 * \end{array} 10666 * \) 10667 * 10668 * where *undistort* is an approximate iterative algorithm that estimates the normalized original 10669 * point coordinates out of the normalized distorted point coordinates ("normalized" means that the 10670 * coordinates do not depend on the camera matrix). 10671 * 10672 * The function can be used for both a stereo camera head or a monocular camera (when R is empty). 10673 * @param src Observed point coordinates, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel (CV_32FC2 or CV_64FC2) (or 10674 * vector<Point2f> ). 10675 * @param dst Output ideal point coordinates (1xN/Nx1 2-channel or vector<Point2f> ) after undistortion and reverse perspective 10676 * transformation. If matrix P is identity or omitted, dst will contain normalized point coordinates. 10677 * @param cameraMatrix Camera matrix \(\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) . 10678 * @param distCoeffs Input vector of distortion coefficients 10679 * \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) 10680 * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed. 10681 * @param R Rectification transformation in the object space (3x3 matrix). R1 or R2 computed by 10682 * #stereoRectify can be passed here. If the matrix is empty, the identity transformation is used. 10683 * #stereoRectify can be passed here. If the matrix is empty, the identity new camera matrix is used. 10684 */ 10685 public static void undistortPoints(MatOfPoint2f src, MatOfPoint2f dst, Mat cameraMatrix, Mat distCoeffs, Mat R) { 10686 Mat src_mat = src; 10687 Mat dst_mat = dst; 10688 undistortPoints_1(src_mat.nativeObj, dst_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, R.nativeObj); 10689 } 10690 10691 /** 10692 * Computes the ideal point coordinates from the observed point coordinates. 10693 * 10694 * The function is similar to #undistort and #initUndistortRectifyMap but it operates on a 10695 * sparse set of points instead of a raster image. Also the function performs a reverse transformation 10696 * to #projectPoints. In case of a 3D object, it does not reconstruct its 3D coordinates, but for a 10697 * planar object, it does, up to a translation vector, if the proper R is specified. 10698 * 10699 * For each observed point coordinate \((u, v)\) the function computes: 10700 * \( 10701 * \begin{array}{l} 10702 * x^{"} \leftarrow (u - c_x)/f_x \\ 10703 * y^{"} \leftarrow (v - c_y)/f_y \\ 10704 * (x',y') = undistort(x^{"},y^{"}, \texttt{distCoeffs}) \\ 10705 * {[X\,Y\,W]} ^T \leftarrow R*[x' \, y' \, 1]^T \\ 10706 * x \leftarrow X/W \\ 10707 * y \leftarrow Y/W \\ 10708 * \text{only performed if P is specified:} \\ 10709 * u' \leftarrow x {f'}_x + {c'}_x \\ 10710 * v' \leftarrow y {f'}_y + {c'}_y 10711 * \end{array} 10712 * \) 10713 * 10714 * where *undistort* is an approximate iterative algorithm that estimates the normalized original 10715 * point coordinates out of the normalized distorted point coordinates ("normalized" means that the 10716 * coordinates do not depend on the camera matrix). 10717 * 10718 * The function can be used for both a stereo camera head or a monocular camera (when R is empty). 10719 * @param src Observed point coordinates, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel (CV_32FC2 or CV_64FC2) (or 10720 * vector<Point2f> ). 10721 * @param dst Output ideal point coordinates (1xN/Nx1 2-channel or vector<Point2f> ) after undistortion and reverse perspective 10722 * transformation. If matrix P is identity or omitted, dst will contain normalized point coordinates. 10723 * @param cameraMatrix Camera matrix \(\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) . 10724 * @param distCoeffs Input vector of distortion coefficients 10725 * \((k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\) 10726 * of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed. 10727 * #stereoRectify can be passed here. If the matrix is empty, the identity transformation is used. 10728 * #stereoRectify can be passed here. If the matrix is empty, the identity new camera matrix is used. 10729 */ 10730 public static void undistortPoints(MatOfPoint2f src, MatOfPoint2f dst, Mat cameraMatrix, Mat distCoeffs) { 10731 Mat src_mat = src; 10732 Mat dst_mat = dst; 10733 undistortPoints_2(src_mat.nativeObj, dst_mat.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj); 10734 } 10735 10736 10737 // 10738 // C++: void cv::undistortPoints(Mat src, Mat& dst, Mat cameraMatrix, Mat distCoeffs, Mat R, Mat P, TermCriteria criteria) 10739 // 10740 10741 /** 10742 * 10743 * <b>Note:</b> Default version of #undistortPoints does 5 iterations to compute undistorted points. 10744 * @param src automatically generated 10745 * @param dst automatically generated 10746 * @param cameraMatrix automatically generated 10747 * @param distCoeffs automatically generated 10748 * @param R automatically generated 10749 * @param P automatically generated 10750 * @param criteria automatically generated 10751 */ 10752 public static void undistortPointsIter(Mat src, Mat dst, Mat cameraMatrix, Mat distCoeffs, Mat R, Mat P, TermCriteria criteria) { 10753 undistortPointsIter_0(src.nativeObj, dst.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, R.nativeObj, P.nativeObj, criteria.type, criteria.maxCount, criteria.epsilon); 10754 } 10755 10756 10757 // 10758 // C++: void cv::undistortImagePoints(Mat src, Mat& dst, Mat cameraMatrix, Mat distCoeffs, TermCriteria arg1 = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, 0.01)) 10759 // 10760 10761 /** 10762 * Compute undistorted image points position 10763 * 10764 * @param src Observed points position, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel (CV_32FC2 or 10765 * CV_64FC2) (or vector<Point2f> ). 10766 * @param dst Output undistorted points position (1xN/Nx1 2-channel or vector<Point2f> ). 10767 * @param cameraMatrix Camera matrix \(\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) . 10768 * @param distCoeffs Distortion coefficients 10769 * @param arg1 automatically generated 10770 */ 10771 public static void undistortImagePoints(Mat src, Mat dst, Mat cameraMatrix, Mat distCoeffs, TermCriteria arg1) { 10772 undistortImagePoints_0(src.nativeObj, dst.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj, arg1.type, arg1.maxCount, arg1.epsilon); 10773 } 10774 10775 /** 10776 * Compute undistorted image points position 10777 * 10778 * @param src Observed points position, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel (CV_32FC2 or 10779 * CV_64FC2) (or vector<Point2f> ). 10780 * @param dst Output undistorted points position (1xN/Nx1 2-channel or vector<Point2f> ). 10781 * @param cameraMatrix Camera matrix \(\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\) . 10782 * @param distCoeffs Distortion coefficients 10783 */ 10784 public static void undistortImagePoints(Mat src, Mat dst, Mat cameraMatrix, Mat distCoeffs) { 10785 undistortImagePoints_1(src.nativeObj, dst.nativeObj, cameraMatrix.nativeObj, distCoeffs.nativeObj); 10786 } 10787 10788 10789 // 10790 // C++: void cv::fisheye::projectPoints(Mat objectPoints, Mat& imagePoints, Mat rvec, Mat tvec, Mat K, Mat D, double alpha = 0, Mat& jacobian = Mat()) 10791 // 10792 10793 public static void fisheye_projectPoints(Mat objectPoints, Mat imagePoints, Mat rvec, Mat tvec, Mat K, Mat D, double alpha, Mat jacobian) { 10794 fisheye_projectPoints_0(objectPoints.nativeObj, imagePoints.nativeObj, rvec.nativeObj, tvec.nativeObj, K.nativeObj, D.nativeObj, alpha, jacobian.nativeObj); 10795 } 10796 10797 public static void fisheye_projectPoints(Mat objectPoints, Mat imagePoints, Mat rvec, Mat tvec, Mat K, Mat D, double alpha) { 10798 fisheye_projectPoints_1(objectPoints.nativeObj, imagePoints.nativeObj, rvec.nativeObj, tvec.nativeObj, K.nativeObj, D.nativeObj, alpha); 10799 } 10800 10801 public static void fisheye_projectPoints(Mat objectPoints, Mat imagePoints, Mat rvec, Mat tvec, Mat K, Mat D) { 10802 fisheye_projectPoints_2(objectPoints.nativeObj, imagePoints.nativeObj, rvec.nativeObj, tvec.nativeObj, K.nativeObj, D.nativeObj); 10803 } 10804 10805 10806 // 10807 // C++: void cv::fisheye::distortPoints(Mat undistorted, Mat& distorted, Mat K, Mat D, double alpha = 0) 10808 // 10809 10810 /** 10811 * Distorts 2D points using fisheye model. 10812 * 10813 * @param undistorted Array of object points, 1xN/Nx1 2-channel (or vector<Point2f> ), where N is 10814 * the number of points in the view. 10815 * @param K Camera intrinsic matrix \(cameramatrix{K}\). 10816 * @param D Input vector of distortion coefficients \(\distcoeffsfisheye\). 10817 * @param alpha The skew coefficient. 10818 * @param distorted Output array of image points, 1xN/Nx1 2-channel, or vector<Point2f> . 10819 * 10820 * Note that the function assumes the camera intrinsic matrix of the undistorted points to be identity. 10821 * This means if you want to distort image points you have to multiply them with \(K^{-1}\). 10822 */ 10823 public static void fisheye_distortPoints(Mat undistorted, Mat distorted, Mat K, Mat D, double alpha) { 10824 fisheye_distortPoints_0(undistorted.nativeObj, distorted.nativeObj, K.nativeObj, D.nativeObj, alpha); 10825 } 10826 10827 /** 10828 * Distorts 2D points using fisheye model. 10829 * 10830 * @param undistorted Array of object points, 1xN/Nx1 2-channel (or vector<Point2f> ), where N is 10831 * the number of points in the view. 10832 * @param K Camera intrinsic matrix \(cameramatrix{K}\). 10833 * @param D Input vector of distortion coefficients \(\distcoeffsfisheye\). 10834 * @param distorted Output array of image points, 1xN/Nx1 2-channel, or vector<Point2f> . 10835 * 10836 * Note that the function assumes the camera intrinsic matrix of the undistorted points to be identity. 10837 * This means if you want to distort image points you have to multiply them with \(K^{-1}\). 10838 */ 10839 public static void fisheye_distortPoints(Mat undistorted, Mat distorted, Mat K, Mat D) { 10840 fisheye_distortPoints_1(undistorted.nativeObj, distorted.nativeObj, K.nativeObj, D.nativeObj); 10841 } 10842 10843 10844 // 10845 // C++: void cv::fisheye::undistortPoints(Mat distorted, Mat& undistorted, Mat K, Mat D, Mat R = Mat(), Mat P = Mat(), TermCriteria criteria = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 10, 1e-8)) 10846 // 10847 10848 /** 10849 * Undistorts 2D points using fisheye model 10850 * 10851 * @param distorted Array of object points, 1xN/Nx1 2-channel (or vector<Point2f> ), where N is the 10852 * number of points in the view. 10853 * @param K Camera intrinsic matrix \(cameramatrix{K}\). 10854 * @param D Input vector of distortion coefficients \(\distcoeffsfisheye\). 10855 * @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3 10856 * 1-channel or 1x1 3-channel 10857 * @param P New camera intrinsic matrix (3x3) or new projection matrix (3x4) 10858 * @param criteria Termination criteria 10859 * @param undistorted Output array of image points, 1xN/Nx1 2-channel, or vector<Point2f> . 10860 */ 10861 public static void fisheye_undistortPoints(Mat distorted, Mat undistorted, Mat K, Mat D, Mat R, Mat P, TermCriteria criteria) { 10862 fisheye_undistortPoints_0(distorted.nativeObj, undistorted.nativeObj, K.nativeObj, D.nativeObj, R.nativeObj, P.nativeObj, criteria.type, criteria.maxCount, criteria.epsilon); 10863 } 10864 10865 /** 10866 * Undistorts 2D points using fisheye model 10867 * 10868 * @param distorted Array of object points, 1xN/Nx1 2-channel (or vector<Point2f> ), where N is the 10869 * number of points in the view. 10870 * @param K Camera intrinsic matrix \(cameramatrix{K}\). 10871 * @param D Input vector of distortion coefficients \(\distcoeffsfisheye\). 10872 * @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3 10873 * 1-channel or 1x1 3-channel 10874 * @param P New camera intrinsic matrix (3x3) or new projection matrix (3x4) 10875 * @param undistorted Output array of image points, 1xN/Nx1 2-channel, or vector<Point2f> . 10876 */ 10877 public static void fisheye_undistortPoints(Mat distorted, Mat undistorted, Mat K, Mat D, Mat R, Mat P) { 10878 fisheye_undistortPoints_1(distorted.nativeObj, undistorted.nativeObj, K.nativeObj, D.nativeObj, R.nativeObj, P.nativeObj); 10879 } 10880 10881 /** 10882 * Undistorts 2D points using fisheye model 10883 * 10884 * @param distorted Array of object points, 1xN/Nx1 2-channel (or vector<Point2f> ), where N is the 10885 * number of points in the view. 10886 * @param K Camera intrinsic matrix \(cameramatrix{K}\). 10887 * @param D Input vector of distortion coefficients \(\distcoeffsfisheye\). 10888 * @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3 10889 * 1-channel or 1x1 3-channel 10890 * @param undistorted Output array of image points, 1xN/Nx1 2-channel, or vector<Point2f> . 10891 */ 10892 public static void fisheye_undistortPoints(Mat distorted, Mat undistorted, Mat K, Mat D, Mat R) { 10893 fisheye_undistortPoints_2(distorted.nativeObj, undistorted.nativeObj, K.nativeObj, D.nativeObj, R.nativeObj); 10894 } 10895 10896 /** 10897 * Undistorts 2D points using fisheye model 10898 * 10899 * @param distorted Array of object points, 1xN/Nx1 2-channel (or vector<Point2f> ), where N is the 10900 * number of points in the view. 10901 * @param K Camera intrinsic matrix \(cameramatrix{K}\). 10902 * @param D Input vector of distortion coefficients \(\distcoeffsfisheye\). 10903 * 1-channel or 1x1 3-channel 10904 * @param undistorted Output array of image points, 1xN/Nx1 2-channel, or vector<Point2f> . 10905 */ 10906 public static void fisheye_undistortPoints(Mat distorted, Mat undistorted, Mat K, Mat D) { 10907 fisheye_undistortPoints_3(distorted.nativeObj, undistorted.nativeObj, K.nativeObj, D.nativeObj); 10908 } 10909 10910 10911 // 10912 // C++: void cv::fisheye::initUndistortRectifyMap(Mat K, Mat D, Mat R, Mat P, Size size, int m1type, Mat& map1, Mat& map2) 10913 // 10914 10915 /** 10916 * Computes undistortion and rectification maps for image transform by #remap. If D is empty zero 10917 * distortion is used, if R or P is empty identity matrixes are used. 10918 * 10919 * @param K Camera intrinsic matrix \(cameramatrix{K}\). 10920 * @param D Input vector of distortion coefficients \(\distcoeffsfisheye\). 10921 * @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3 10922 * 1-channel or 1x1 3-channel 10923 * @param P New camera intrinsic matrix (3x3) or new projection matrix (3x4) 10924 * @param size Undistorted image size. 10925 * @param m1type Type of the first output map that can be CV_32FC1 or CV_16SC2 . See #convertMaps 10926 * for details. 10927 * @param map1 The first output map. 10928 * @param map2 The second output map. 10929 */ 10930 public static void fisheye_initUndistortRectifyMap(Mat K, Mat D, Mat R, Mat P, Size size, int m1type, Mat map1, Mat map2) { 10931 fisheye_initUndistortRectifyMap_0(K.nativeObj, D.nativeObj, R.nativeObj, P.nativeObj, size.width, size.height, m1type, map1.nativeObj, map2.nativeObj); 10932 } 10933 10934 10935 // 10936 // C++: void cv::fisheye::undistortImage(Mat distorted, Mat& undistorted, Mat K, Mat D, Mat Knew = cv::Mat(), Size new_size = Size()) 10937 // 10938 10939 /** 10940 * Transforms an image to compensate for fisheye lens distortion. 10941 * 10942 * @param distorted image with fisheye lens distortion. 10943 * @param undistorted Output image with compensated fisheye lens distortion. 10944 * @param K Camera intrinsic matrix \(cameramatrix{K}\). 10945 * @param D Input vector of distortion coefficients \(\distcoeffsfisheye\). 10946 * @param Knew Camera intrinsic matrix of the distorted image. By default, it is the identity matrix but you 10947 * may additionally scale and shift the result by using a different matrix. 10948 * @param new_size the new size 10949 * 10950 * The function transforms an image to compensate radial and tangential lens distortion. 10951 * 10952 * The function is simply a combination of #fisheye::initUndistortRectifyMap (with unity R ) and #remap 10953 * (with bilinear interpolation). See the former function for details of the transformation being 10954 * performed. 10955 * 10956 * See below the results of undistortImage. 10957 * <ul> 10958 * <li> 10959 * a\) result of undistort of perspective camera model (all possible coefficients (k_1, k_2, k_3, 10960 * k_4, k_5, k_6) of distortion were optimized under calibration) 10961 * <ul> 10962 * <li> 10963 * b\) result of #fisheye::undistortImage of fisheye camera model (all possible coefficients (k_1, k_2, 10964 * k_3, k_4) of fisheye distortion were optimized under calibration) 10965 * </li> 10966 * <li> 10967 * c\) original image was captured with fisheye lens 10968 * </li> 10969 * </ul> 10970 * 10971 * Pictures a) and b) almost the same. But if we consider points of image located far from the center 10972 * of image, we can notice that on image a) these points are distorted. 10973 * </li> 10974 * </ul> 10975 * 10976 * ![image](pics/fisheye_undistorted.jpg) 10977 */ 10978 public static void fisheye_undistortImage(Mat distorted, Mat undistorted, Mat K, Mat D, Mat Knew, Size new_size) { 10979 fisheye_undistortImage_0(distorted.nativeObj, undistorted.nativeObj, K.nativeObj, D.nativeObj, Knew.nativeObj, new_size.width, new_size.height); 10980 } 10981 10982 /** 10983 * Transforms an image to compensate for fisheye lens distortion. 10984 * 10985 * @param distorted image with fisheye lens distortion. 10986 * @param undistorted Output image with compensated fisheye lens distortion. 10987 * @param K Camera intrinsic matrix \(cameramatrix{K}\). 10988 * @param D Input vector of distortion coefficients \(\distcoeffsfisheye\). 10989 * @param Knew Camera intrinsic matrix of the distorted image. By default, it is the identity matrix but you 10990 * may additionally scale and shift the result by using a different matrix. 10991 * 10992 * The function transforms an image to compensate radial and tangential lens distortion. 10993 * 10994 * The function is simply a combination of #fisheye::initUndistortRectifyMap (with unity R ) and #remap 10995 * (with bilinear interpolation). See the former function for details of the transformation being 10996 * performed. 10997 * 10998 * See below the results of undistortImage. 10999 * <ul> 11000 * <li> 11001 * a\) result of undistort of perspective camera model (all possible coefficients (k_1, k_2, k_3, 11002 * k_4, k_5, k_6) of distortion were optimized under calibration) 11003 * <ul> 11004 * <li> 11005 * b\) result of #fisheye::undistortImage of fisheye camera model (all possible coefficients (k_1, k_2, 11006 * k_3, k_4) of fisheye distortion were optimized under calibration) 11007 * </li> 11008 * <li> 11009 * c\) original image was captured with fisheye lens 11010 * </li> 11011 * </ul> 11012 * 11013 * Pictures a) and b) almost the same. But if we consider points of image located far from the center 11014 * of image, we can notice that on image a) these points are distorted. 11015 * </li> 11016 * </ul> 11017 * 11018 * ![image](pics/fisheye_undistorted.jpg) 11019 */ 11020 public static void fisheye_undistortImage(Mat distorted, Mat undistorted, Mat K, Mat D, Mat Knew) { 11021 fisheye_undistortImage_1(distorted.nativeObj, undistorted.nativeObj, K.nativeObj, D.nativeObj, Knew.nativeObj); 11022 } 11023 11024 /** 11025 * Transforms an image to compensate for fisheye lens distortion. 11026 * 11027 * @param distorted image with fisheye lens distortion. 11028 * @param undistorted Output image with compensated fisheye lens distortion. 11029 * @param K Camera intrinsic matrix \(cameramatrix{K}\). 11030 * @param D Input vector of distortion coefficients \(\distcoeffsfisheye\). 11031 * may additionally scale and shift the result by using a different matrix. 11032 * 11033 * The function transforms an image to compensate radial and tangential lens distortion. 11034 * 11035 * The function is simply a combination of #fisheye::initUndistortRectifyMap (with unity R ) and #remap 11036 * (with bilinear interpolation). See the former function for details of the transformation being 11037 * performed. 11038 * 11039 * See below the results of undistortImage. 11040 * <ul> 11041 * <li> 11042 * a\) result of undistort of perspective camera model (all possible coefficients (k_1, k_2, k_3, 11043 * k_4, k_5, k_6) of distortion were optimized under calibration) 11044 * <ul> 11045 * <li> 11046 * b\) result of #fisheye::undistortImage of fisheye camera model (all possible coefficients (k_1, k_2, 11047 * k_3, k_4) of fisheye distortion were optimized under calibration) 11048 * </li> 11049 * <li> 11050 * c\) original image was captured with fisheye lens 11051 * </li> 11052 * </ul> 11053 * 11054 * Pictures a) and b) almost the same. But if we consider points of image located far from the center 11055 * of image, we can notice that on image a) these points are distorted. 11056 * </li> 11057 * </ul> 11058 * 11059 * ![image](pics/fisheye_undistorted.jpg) 11060 */ 11061 public static void fisheye_undistortImage(Mat distorted, Mat undistorted, Mat K, Mat D) { 11062 fisheye_undistortImage_2(distorted.nativeObj, undistorted.nativeObj, K.nativeObj, D.nativeObj); 11063 } 11064 11065 11066 // 11067 // C++: void cv::fisheye::estimateNewCameraMatrixForUndistortRectify(Mat K, Mat D, Size image_size, Mat R, Mat& P, double balance = 0.0, Size new_size = Size(), double fov_scale = 1.0) 11068 // 11069 11070 /** 11071 * Estimates new camera intrinsic matrix for undistortion or rectification. 11072 * 11073 * @param K Camera intrinsic matrix \(cameramatrix{K}\). 11074 * @param image_size Size of the image 11075 * @param D Input vector of distortion coefficients \(\distcoeffsfisheye\). 11076 * @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3 11077 * 1-channel or 1x1 3-channel 11078 * @param P New camera intrinsic matrix (3x3) or new projection matrix (3x4) 11079 * @param balance Sets the new focal length in range between the min focal length and the max focal 11080 * length. Balance is in range of [0, 1]. 11081 * @param new_size the new size 11082 * @param fov_scale Divisor for new focal length. 11083 */ 11084 public static void fisheye_estimateNewCameraMatrixForUndistortRectify(Mat K, Mat D, Size image_size, Mat R, Mat P, double balance, Size new_size, double fov_scale) { 11085 fisheye_estimateNewCameraMatrixForUndistortRectify_0(K.nativeObj, D.nativeObj, image_size.width, image_size.height, R.nativeObj, P.nativeObj, balance, new_size.width, new_size.height, fov_scale); 11086 } 11087 11088 /** 11089 * Estimates new camera intrinsic matrix for undistortion or rectification. 11090 * 11091 * @param K Camera intrinsic matrix \(cameramatrix{K}\). 11092 * @param image_size Size of the image 11093 * @param D Input vector of distortion coefficients \(\distcoeffsfisheye\). 11094 * @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3 11095 * 1-channel or 1x1 3-channel 11096 * @param P New camera intrinsic matrix (3x3) or new projection matrix (3x4) 11097 * @param balance Sets the new focal length in range between the min focal length and the max focal 11098 * length. Balance is in range of [0, 1]. 11099 * @param new_size the new size 11100 */ 11101 public static void fisheye_estimateNewCameraMatrixForUndistortRectify(Mat K, Mat D, Size image_size, Mat R, Mat P, double balance, Size new_size) { 11102 fisheye_estimateNewCameraMatrixForUndistortRectify_1(K.nativeObj, D.nativeObj, image_size.width, image_size.height, R.nativeObj, P.nativeObj, balance, new_size.width, new_size.height); 11103 } 11104 11105 /** 11106 * Estimates new camera intrinsic matrix for undistortion or rectification. 11107 * 11108 * @param K Camera intrinsic matrix \(cameramatrix{K}\). 11109 * @param image_size Size of the image 11110 * @param D Input vector of distortion coefficients \(\distcoeffsfisheye\). 11111 * @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3 11112 * 1-channel or 1x1 3-channel 11113 * @param P New camera intrinsic matrix (3x3) or new projection matrix (3x4) 11114 * @param balance Sets the new focal length in range between the min focal length and the max focal 11115 * length. Balance is in range of [0, 1]. 11116 */ 11117 public static void fisheye_estimateNewCameraMatrixForUndistortRectify(Mat K, Mat D, Size image_size, Mat R, Mat P, double balance) { 11118 fisheye_estimateNewCameraMatrixForUndistortRectify_2(K.nativeObj, D.nativeObj, image_size.width, image_size.height, R.nativeObj, P.nativeObj, balance); 11119 } 11120 11121 /** 11122 * Estimates new camera intrinsic matrix for undistortion or rectification. 11123 * 11124 * @param K Camera intrinsic matrix \(cameramatrix{K}\). 11125 * @param image_size Size of the image 11126 * @param D Input vector of distortion coefficients \(\distcoeffsfisheye\). 11127 * @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3 11128 * 1-channel or 1x1 3-channel 11129 * @param P New camera intrinsic matrix (3x3) or new projection matrix (3x4) 11130 * length. Balance is in range of [0, 1]. 11131 */ 11132 public static void fisheye_estimateNewCameraMatrixForUndistortRectify(Mat K, Mat D, Size image_size, Mat R, Mat P) { 11133 fisheye_estimateNewCameraMatrixForUndistortRectify_3(K.nativeObj, D.nativeObj, image_size.width, image_size.height, R.nativeObj, P.nativeObj); 11134 } 11135 11136 11137 // 11138 // C++: double cv::fisheye::calibrate(vector_Mat objectPoints, vector_Mat imagePoints, Size image_size, Mat& K, Mat& D, vector_Mat& rvecs, vector_Mat& tvecs, int flags = 0, TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, DBL_EPSILON)) 11139 // 11140 11141 /** 11142 * Performs camera calibration 11143 * 11144 * @param objectPoints vector of vectors of calibration pattern points in the calibration pattern 11145 * coordinate space. 11146 * @param imagePoints vector of vectors of the projections of calibration pattern points. 11147 * imagePoints.size() and objectPoints.size() and imagePoints[i].size() must be equal to 11148 * objectPoints[i].size() for each i. 11149 * @param image_size Size of the image used only to initialize the camera intrinsic matrix. 11150 * @param K Output 3x3 floating-point camera intrinsic matrix 11151 * \(\cameramatrix{A}\) . If 11152 * REF: fisheye::CALIB_USE_INTRINSIC_GUESS is specified, some or all of fx, fy, cx, cy must be 11153 * initialized before calling the function. 11154 * @param D Output vector of distortion coefficients \(\distcoeffsfisheye\). 11155 * @param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each pattern view. 11156 * That is, each k-th rotation vector together with the corresponding k-th translation vector (see 11157 * the next output parameter description) brings the calibration pattern from the model coordinate 11158 * space (in which object points are specified) to the world coordinate space, that is, a real 11159 * position of the calibration pattern in the k-th pattern view (k=0.. *M* -1). 11160 * @param tvecs Output vector of translation vectors estimated for each pattern view. 11161 * @param flags Different flags that may be zero or a combination of the following values: 11162 * <ul> 11163 * <li> 11164 * REF: fisheye::CALIB_USE_INTRINSIC_GUESS cameraMatrix contains valid initial values of 11165 * fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image 11166 * center ( imageSize is used), and focal distances are computed in a least-squares fashion. 11167 * </li> 11168 * <li> 11169 * REF: fisheye::CALIB_RECOMPUTE_EXTRINSIC Extrinsic will be recomputed after each iteration 11170 * of intrinsic optimization. 11171 * </li> 11172 * <li> 11173 * REF: fisheye::CALIB_CHECK_COND The functions will check validity of condition number. 11174 * </li> 11175 * <li> 11176 * REF: fisheye::CALIB_FIX_SKEW Skew coefficient (alpha) is set to zero and stay zero. 11177 * </li> 11178 * <li> 11179 * REF: fisheye::CALIB_FIX_K1,..., REF: fisheye::CALIB_FIX_K4 Selected distortion coefficients 11180 * are set to zeros and stay zero. 11181 * </li> 11182 * <li> 11183 * REF: fisheye::CALIB_FIX_PRINCIPAL_POINT The principal point is not changed during the global 11184 * optimization. It stays at the center or at a different location specified when REF: fisheye::CALIB_USE_INTRINSIC_GUESS is set too. 11185 * </li> 11186 * <li> 11187 * REF: fisheye::CALIB_FIX_FOCAL_LENGTH The focal length is not changed during the global 11188 * optimization. It is the \(max(width,height)/\pi\) or the provided \(f_x\), \(f_y\) when REF: fisheye::CALIB_USE_INTRINSIC_GUESS is set too. 11189 * </li> 11190 * </ul> 11191 * @param criteria Termination criteria for the iterative optimization algorithm. 11192 * @return automatically generated 11193 */ 11194 public static double fisheye_calibrate(List<Mat> objectPoints, List<Mat> imagePoints, Size image_size, Mat K, Mat D, List<Mat> rvecs, List<Mat> tvecs, int flags, TermCriteria criteria) { 11195 Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints); 11196 Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints); 11197 Mat rvecs_mat = new Mat(); 11198 Mat tvecs_mat = new Mat(); 11199 double retVal = fisheye_calibrate_0(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, image_size.width, image_size.height, K.nativeObj, D.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, flags, criteria.type, criteria.maxCount, criteria.epsilon); 11200 Converters.Mat_to_vector_Mat(rvecs_mat, rvecs); 11201 rvecs_mat.release(); 11202 Converters.Mat_to_vector_Mat(tvecs_mat, tvecs); 11203 tvecs_mat.release(); 11204 return retVal; 11205 } 11206 11207 /** 11208 * Performs camera calibration 11209 * 11210 * @param objectPoints vector of vectors of calibration pattern points in the calibration pattern 11211 * coordinate space. 11212 * @param imagePoints vector of vectors of the projections of calibration pattern points. 11213 * imagePoints.size() and objectPoints.size() and imagePoints[i].size() must be equal to 11214 * objectPoints[i].size() for each i. 11215 * @param image_size Size of the image used only to initialize the camera intrinsic matrix. 11216 * @param K Output 3x3 floating-point camera intrinsic matrix 11217 * \(\cameramatrix{A}\) . If 11218 * REF: fisheye::CALIB_USE_INTRINSIC_GUESS is specified, some or all of fx, fy, cx, cy must be 11219 * initialized before calling the function. 11220 * @param D Output vector of distortion coefficients \(\distcoeffsfisheye\). 11221 * @param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each pattern view. 11222 * That is, each k-th rotation vector together with the corresponding k-th translation vector (see 11223 * the next output parameter description) brings the calibration pattern from the model coordinate 11224 * space (in which object points are specified) to the world coordinate space, that is, a real 11225 * position of the calibration pattern in the k-th pattern view (k=0.. *M* -1). 11226 * @param tvecs Output vector of translation vectors estimated for each pattern view. 11227 * @param flags Different flags that may be zero or a combination of the following values: 11228 * <ul> 11229 * <li> 11230 * REF: fisheye::CALIB_USE_INTRINSIC_GUESS cameraMatrix contains valid initial values of 11231 * fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image 11232 * center ( imageSize is used), and focal distances are computed in a least-squares fashion. 11233 * </li> 11234 * <li> 11235 * REF: fisheye::CALIB_RECOMPUTE_EXTRINSIC Extrinsic will be recomputed after each iteration 11236 * of intrinsic optimization. 11237 * </li> 11238 * <li> 11239 * REF: fisheye::CALIB_CHECK_COND The functions will check validity of condition number. 11240 * </li> 11241 * <li> 11242 * REF: fisheye::CALIB_FIX_SKEW Skew coefficient (alpha) is set to zero and stay zero. 11243 * </li> 11244 * <li> 11245 * REF: fisheye::CALIB_FIX_K1,..., REF: fisheye::CALIB_FIX_K4 Selected distortion coefficients 11246 * are set to zeros and stay zero. 11247 * </li> 11248 * <li> 11249 * REF: fisheye::CALIB_FIX_PRINCIPAL_POINT The principal point is not changed during the global 11250 * optimization. It stays at the center or at a different location specified when REF: fisheye::CALIB_USE_INTRINSIC_GUESS is set too. 11251 * </li> 11252 * <li> 11253 * REF: fisheye::CALIB_FIX_FOCAL_LENGTH The focal length is not changed during the global 11254 * optimization. It is the \(max(width,height)/\pi\) or the provided \(f_x\), \(f_y\) when REF: fisheye::CALIB_USE_INTRINSIC_GUESS is set too. 11255 * </li> 11256 * </ul> 11257 * @return automatically generated 11258 */ 11259 public static double fisheye_calibrate(List<Mat> objectPoints, List<Mat> imagePoints, Size image_size, Mat K, Mat D, List<Mat> rvecs, List<Mat> tvecs, int flags) { 11260 Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints); 11261 Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints); 11262 Mat rvecs_mat = new Mat(); 11263 Mat tvecs_mat = new Mat(); 11264 double retVal = fisheye_calibrate_1(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, image_size.width, image_size.height, K.nativeObj, D.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, flags); 11265 Converters.Mat_to_vector_Mat(rvecs_mat, rvecs); 11266 rvecs_mat.release(); 11267 Converters.Mat_to_vector_Mat(tvecs_mat, tvecs); 11268 tvecs_mat.release(); 11269 return retVal; 11270 } 11271 11272 /** 11273 * Performs camera calibration 11274 * 11275 * @param objectPoints vector of vectors of calibration pattern points in the calibration pattern 11276 * coordinate space. 11277 * @param imagePoints vector of vectors of the projections of calibration pattern points. 11278 * imagePoints.size() and objectPoints.size() and imagePoints[i].size() must be equal to 11279 * objectPoints[i].size() for each i. 11280 * @param image_size Size of the image used only to initialize the camera intrinsic matrix. 11281 * @param K Output 3x3 floating-point camera intrinsic matrix 11282 * \(\cameramatrix{A}\) . If 11283 * REF: fisheye::CALIB_USE_INTRINSIC_GUESS is specified, some or all of fx, fy, cx, cy must be 11284 * initialized before calling the function. 11285 * @param D Output vector of distortion coefficients \(\distcoeffsfisheye\). 11286 * @param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each pattern view. 11287 * That is, each k-th rotation vector together with the corresponding k-th translation vector (see 11288 * the next output parameter description) brings the calibration pattern from the model coordinate 11289 * space (in which object points are specified) to the world coordinate space, that is, a real 11290 * position of the calibration pattern in the k-th pattern view (k=0.. *M* -1). 11291 * @param tvecs Output vector of translation vectors estimated for each pattern view. 11292 * <ul> 11293 * <li> 11294 * REF: fisheye::CALIB_USE_INTRINSIC_GUESS cameraMatrix contains valid initial values of 11295 * fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image 11296 * center ( imageSize is used), and focal distances are computed in a least-squares fashion. 11297 * </li> 11298 * <li> 11299 * REF: fisheye::CALIB_RECOMPUTE_EXTRINSIC Extrinsic will be recomputed after each iteration 11300 * of intrinsic optimization. 11301 * </li> 11302 * <li> 11303 * REF: fisheye::CALIB_CHECK_COND The functions will check validity of condition number. 11304 * </li> 11305 * <li> 11306 * REF: fisheye::CALIB_FIX_SKEW Skew coefficient (alpha) is set to zero and stay zero. 11307 * </li> 11308 * <li> 11309 * REF: fisheye::CALIB_FIX_K1,..., REF: fisheye::CALIB_FIX_K4 Selected distortion coefficients 11310 * are set to zeros and stay zero. 11311 * </li> 11312 * <li> 11313 * REF: fisheye::CALIB_FIX_PRINCIPAL_POINT The principal point is not changed during the global 11314 * optimization. It stays at the center or at a different location specified when REF: fisheye::CALIB_USE_INTRINSIC_GUESS is set too. 11315 * </li> 11316 * <li> 11317 * REF: fisheye::CALIB_FIX_FOCAL_LENGTH The focal length is not changed during the global 11318 * optimization. It is the \(max(width,height)/\pi\) or the provided \(f_x\), \(f_y\) when REF: fisheye::CALIB_USE_INTRINSIC_GUESS is set too. 11319 * </li> 11320 * </ul> 11321 * @return automatically generated 11322 */ 11323 public static double fisheye_calibrate(List<Mat> objectPoints, List<Mat> imagePoints, Size image_size, Mat K, Mat D, List<Mat> rvecs, List<Mat> tvecs) { 11324 Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints); 11325 Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints); 11326 Mat rvecs_mat = new Mat(); 11327 Mat tvecs_mat = new Mat(); 11328 double retVal = fisheye_calibrate_2(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, image_size.width, image_size.height, K.nativeObj, D.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj); 11329 Converters.Mat_to_vector_Mat(rvecs_mat, rvecs); 11330 rvecs_mat.release(); 11331 Converters.Mat_to_vector_Mat(tvecs_mat, tvecs); 11332 tvecs_mat.release(); 11333 return retVal; 11334 } 11335 11336 11337 // 11338 // C++: void cv::fisheye::stereoRectify(Mat K1, Mat D1, Mat K2, Mat D2, Size imageSize, Mat R, Mat tvec, Mat& R1, Mat& R2, Mat& P1, Mat& P2, Mat& Q, int flags, Size newImageSize = Size(), double balance = 0.0, double fov_scale = 1.0) 11339 // 11340 11341 /** 11342 * Stereo rectification for fisheye camera model 11343 * 11344 * @param K1 First camera intrinsic matrix. 11345 * @param D1 First camera distortion parameters. 11346 * @param K2 Second camera intrinsic matrix. 11347 * @param D2 Second camera distortion parameters. 11348 * @param imageSize Size of the image used for stereo calibration. 11349 * @param R Rotation matrix between the coordinate systems of the first and the second 11350 * cameras. 11351 * @param tvec Translation vector between coordinate systems of the cameras. 11352 * @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera. 11353 * @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera. 11354 * @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first 11355 * camera. 11356 * @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second 11357 * camera. 11358 * @param Q Output \(4 \times 4\) disparity-to-depth mapping matrix (see reprojectImageTo3D ). 11359 * @param flags Operation flags that may be zero or REF: fisheye::CALIB_ZERO_DISPARITY . If the flag is set, 11360 * the function makes the principal points of each camera have the same pixel coordinates in the 11361 * rectified views. And if the flag is not set, the function may still shift the images in the 11362 * horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the 11363 * useful image area. 11364 * @param newImageSize New image resolution after rectification. The same size should be passed to 11365 * #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0) 11366 * is passed (default), it is set to the original imageSize . Setting it to larger value can help you 11367 * preserve details in the original image, especially when there is a big radial distortion. 11368 * @param balance Sets the new focal length in range between the min focal length and the max focal 11369 * length. Balance is in range of [0, 1]. 11370 * @param fov_scale Divisor for new focal length. 11371 */ 11372 public static void fisheye_stereoRectify(Mat K1, Mat D1, Mat K2, Mat D2, Size imageSize, Mat R, Mat tvec, Mat R1, Mat R2, Mat P1, Mat P2, Mat Q, int flags, Size newImageSize, double balance, double fov_scale) { 11373 fisheye_stereoRectify_0(K1.nativeObj, D1.nativeObj, K2.nativeObj, D2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, tvec.nativeObj, R1.nativeObj, R2.nativeObj, P1.nativeObj, P2.nativeObj, Q.nativeObj, flags, newImageSize.width, newImageSize.height, balance, fov_scale); 11374 } 11375 11376 /** 11377 * Stereo rectification for fisheye camera model 11378 * 11379 * @param K1 First camera intrinsic matrix. 11380 * @param D1 First camera distortion parameters. 11381 * @param K2 Second camera intrinsic matrix. 11382 * @param D2 Second camera distortion parameters. 11383 * @param imageSize Size of the image used for stereo calibration. 11384 * @param R Rotation matrix between the coordinate systems of the first and the second 11385 * cameras. 11386 * @param tvec Translation vector between coordinate systems of the cameras. 11387 * @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera. 11388 * @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera. 11389 * @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first 11390 * camera. 11391 * @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second 11392 * camera. 11393 * @param Q Output \(4 \times 4\) disparity-to-depth mapping matrix (see reprojectImageTo3D ). 11394 * @param flags Operation flags that may be zero or REF: fisheye::CALIB_ZERO_DISPARITY . If the flag is set, 11395 * the function makes the principal points of each camera have the same pixel coordinates in the 11396 * rectified views. And if the flag is not set, the function may still shift the images in the 11397 * horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the 11398 * useful image area. 11399 * @param newImageSize New image resolution after rectification. The same size should be passed to 11400 * #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0) 11401 * is passed (default), it is set to the original imageSize . Setting it to larger value can help you 11402 * preserve details in the original image, especially when there is a big radial distortion. 11403 * @param balance Sets the new focal length in range between the min focal length and the max focal 11404 * length. Balance is in range of [0, 1]. 11405 */ 11406 public static void fisheye_stereoRectify(Mat K1, Mat D1, Mat K2, Mat D2, Size imageSize, Mat R, Mat tvec, Mat R1, Mat R2, Mat P1, Mat P2, Mat Q, int flags, Size newImageSize, double balance) { 11407 fisheye_stereoRectify_1(K1.nativeObj, D1.nativeObj, K2.nativeObj, D2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, tvec.nativeObj, R1.nativeObj, R2.nativeObj, P1.nativeObj, P2.nativeObj, Q.nativeObj, flags, newImageSize.width, newImageSize.height, balance); 11408 } 11409 11410 /** 11411 * Stereo rectification for fisheye camera model 11412 * 11413 * @param K1 First camera intrinsic matrix. 11414 * @param D1 First camera distortion parameters. 11415 * @param K2 Second camera intrinsic matrix. 11416 * @param D2 Second camera distortion parameters. 11417 * @param imageSize Size of the image used for stereo calibration. 11418 * @param R Rotation matrix between the coordinate systems of the first and the second 11419 * cameras. 11420 * @param tvec Translation vector between coordinate systems of the cameras. 11421 * @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera. 11422 * @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera. 11423 * @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first 11424 * camera. 11425 * @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second 11426 * camera. 11427 * @param Q Output \(4 \times 4\) disparity-to-depth mapping matrix (see reprojectImageTo3D ). 11428 * @param flags Operation flags that may be zero or REF: fisheye::CALIB_ZERO_DISPARITY . If the flag is set, 11429 * the function makes the principal points of each camera have the same pixel coordinates in the 11430 * rectified views. And if the flag is not set, the function may still shift the images in the 11431 * horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the 11432 * useful image area. 11433 * @param newImageSize New image resolution after rectification. The same size should be passed to 11434 * #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0) 11435 * is passed (default), it is set to the original imageSize . Setting it to larger value can help you 11436 * preserve details in the original image, especially when there is a big radial distortion. 11437 * length. Balance is in range of [0, 1]. 11438 */ 11439 public static void fisheye_stereoRectify(Mat K1, Mat D1, Mat K2, Mat D2, Size imageSize, Mat R, Mat tvec, Mat R1, Mat R2, Mat P1, Mat P2, Mat Q, int flags, Size newImageSize) { 11440 fisheye_stereoRectify_2(K1.nativeObj, D1.nativeObj, K2.nativeObj, D2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, tvec.nativeObj, R1.nativeObj, R2.nativeObj, P1.nativeObj, P2.nativeObj, Q.nativeObj, flags, newImageSize.width, newImageSize.height); 11441 } 11442 11443 /** 11444 * Stereo rectification for fisheye camera model 11445 * 11446 * @param K1 First camera intrinsic matrix. 11447 * @param D1 First camera distortion parameters. 11448 * @param K2 Second camera intrinsic matrix. 11449 * @param D2 Second camera distortion parameters. 11450 * @param imageSize Size of the image used for stereo calibration. 11451 * @param R Rotation matrix between the coordinate systems of the first and the second 11452 * cameras. 11453 * @param tvec Translation vector between coordinate systems of the cameras. 11454 * @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera. 11455 * @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera. 11456 * @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first 11457 * camera. 11458 * @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second 11459 * camera. 11460 * @param Q Output \(4 \times 4\) disparity-to-depth mapping matrix (see reprojectImageTo3D ). 11461 * @param flags Operation flags that may be zero or REF: fisheye::CALIB_ZERO_DISPARITY . If the flag is set, 11462 * the function makes the principal points of each camera have the same pixel coordinates in the 11463 * rectified views. And if the flag is not set, the function may still shift the images in the 11464 * horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the 11465 * useful image area. 11466 * #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0) 11467 * is passed (default), it is set to the original imageSize . Setting it to larger value can help you 11468 * preserve details in the original image, especially when there is a big radial distortion. 11469 * length. Balance is in range of [0, 1]. 11470 */ 11471 public static void fisheye_stereoRectify(Mat K1, Mat D1, Mat K2, Mat D2, Size imageSize, Mat R, Mat tvec, Mat R1, Mat R2, Mat P1, Mat P2, Mat Q, int flags) { 11472 fisheye_stereoRectify_3(K1.nativeObj, D1.nativeObj, K2.nativeObj, D2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, tvec.nativeObj, R1.nativeObj, R2.nativeObj, P1.nativeObj, P2.nativeObj, Q.nativeObj, flags); 11473 } 11474 11475 11476 // 11477 // C++: double cv::fisheye::stereoCalibrate(vector_Mat objectPoints, vector_Mat imagePoints1, vector_Mat imagePoints2, Mat& K1, Mat& D1, Mat& K2, Mat& D2, Size imageSize, Mat& R, Mat& T, int flags = fisheye::CALIB_FIX_INTRINSIC, TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, DBL_EPSILON)) 11478 // 11479 11480 /** 11481 * Performs stereo calibration 11482 * 11483 * @param objectPoints Vector of vectors of the calibration pattern points. 11484 * @param imagePoints1 Vector of vectors of the projections of the calibration pattern points, 11485 * observed by the first camera. 11486 * @param imagePoints2 Vector of vectors of the projections of the calibration pattern points, 11487 * observed by the second camera. 11488 * @param K1 Input/output first camera intrinsic matrix: 11489 * \(\vecthreethree{f_x^{(j)}}{0}{c_x^{(j)}}{0}{f_y^{(j)}}{c_y^{(j)}}{0}{0}{1}\) , \(j = 0,\, 1\) . If 11490 * any of REF: fisheye::CALIB_USE_INTRINSIC_GUESS , REF: fisheye::CALIB_FIX_INTRINSIC are specified, 11491 * some or all of the matrix components must be initialized. 11492 * @param D1 Input/output vector of distortion coefficients \(\distcoeffsfisheye\) of 4 elements. 11493 * @param K2 Input/output second camera intrinsic matrix. The parameter is similar to K1 . 11494 * @param D2 Input/output lens distortion coefficients for the second camera. The parameter is 11495 * similar to D1 . 11496 * @param imageSize Size of the image used only to initialize camera intrinsic matrix. 11497 * @param R Output rotation matrix between the 1st and the 2nd camera coordinate systems. 11498 * @param T Output translation vector between the coordinate systems of the cameras. 11499 * @param flags Different flags that may be zero or a combination of the following values: 11500 * <ul> 11501 * <li> 11502 * REF: fisheye::CALIB_FIX_INTRINSIC Fix K1, K2? and D1, D2? so that only R, T matrices 11503 * are estimated. 11504 * </li> 11505 * <li> 11506 * REF: fisheye::CALIB_USE_INTRINSIC_GUESS K1, K2 contains valid initial values of 11507 * fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image 11508 * center (imageSize is used), and focal distances are computed in a least-squares fashion. 11509 * </li> 11510 * <li> 11511 * REF: fisheye::CALIB_RECOMPUTE_EXTRINSIC Extrinsic will be recomputed after each iteration 11512 * of intrinsic optimization. 11513 * </li> 11514 * <li> 11515 * REF: fisheye::CALIB_CHECK_COND The functions will check validity of condition number. 11516 * </li> 11517 * <li> 11518 * REF: fisheye::CALIB_FIX_SKEW Skew coefficient (alpha) is set to zero and stay zero. 11519 * </li> 11520 * <li> 11521 * REF: fisheye::CALIB_FIX_K1,..., REF: fisheye::CALIB_FIX_K4 Selected distortion coefficients are set to zeros and stay 11522 * zero. 11523 * </li> 11524 * </ul> 11525 * @param criteria Termination criteria for the iterative optimization algorithm. 11526 * @return automatically generated 11527 */ 11528 public static double fisheye_stereoCalibrate(List<Mat> objectPoints, List<Mat> imagePoints1, List<Mat> imagePoints2, Mat K1, Mat D1, Mat K2, Mat D2, Size imageSize, Mat R, Mat T, int flags, TermCriteria criteria) { 11529 Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints); 11530 Mat imagePoints1_mat = Converters.vector_Mat_to_Mat(imagePoints1); 11531 Mat imagePoints2_mat = Converters.vector_Mat_to_Mat(imagePoints2); 11532 return fisheye_stereoCalibrate_0(objectPoints_mat.nativeObj, imagePoints1_mat.nativeObj, imagePoints2_mat.nativeObj, K1.nativeObj, D1.nativeObj, K2.nativeObj, D2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, flags, criteria.type, criteria.maxCount, criteria.epsilon); 11533 } 11534 11535 /** 11536 * Performs stereo calibration 11537 * 11538 * @param objectPoints Vector of vectors of the calibration pattern points. 11539 * @param imagePoints1 Vector of vectors of the projections of the calibration pattern points, 11540 * observed by the first camera. 11541 * @param imagePoints2 Vector of vectors of the projections of the calibration pattern points, 11542 * observed by the second camera. 11543 * @param K1 Input/output first camera intrinsic matrix: 11544 * \(\vecthreethree{f_x^{(j)}}{0}{c_x^{(j)}}{0}{f_y^{(j)}}{c_y^{(j)}}{0}{0}{1}\) , \(j = 0,\, 1\) . If 11545 * any of REF: fisheye::CALIB_USE_INTRINSIC_GUESS , REF: fisheye::CALIB_FIX_INTRINSIC are specified, 11546 * some or all of the matrix components must be initialized. 11547 * @param D1 Input/output vector of distortion coefficients \(\distcoeffsfisheye\) of 4 elements. 11548 * @param K2 Input/output second camera intrinsic matrix. The parameter is similar to K1 . 11549 * @param D2 Input/output lens distortion coefficients for the second camera. The parameter is 11550 * similar to D1 . 11551 * @param imageSize Size of the image used only to initialize camera intrinsic matrix. 11552 * @param R Output rotation matrix between the 1st and the 2nd camera coordinate systems. 11553 * @param T Output translation vector between the coordinate systems of the cameras. 11554 * @param flags Different flags that may be zero or a combination of the following values: 11555 * <ul> 11556 * <li> 11557 * REF: fisheye::CALIB_FIX_INTRINSIC Fix K1, K2? and D1, D2? so that only R, T matrices 11558 * are estimated. 11559 * </li> 11560 * <li> 11561 * REF: fisheye::CALIB_USE_INTRINSIC_GUESS K1, K2 contains valid initial values of 11562 * fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image 11563 * center (imageSize is used), and focal distances are computed in a least-squares fashion. 11564 * </li> 11565 * <li> 11566 * REF: fisheye::CALIB_RECOMPUTE_EXTRINSIC Extrinsic will be recomputed after each iteration 11567 * of intrinsic optimization. 11568 * </li> 11569 * <li> 11570 * REF: fisheye::CALIB_CHECK_COND The functions will check validity of condition number. 11571 * </li> 11572 * <li> 11573 * REF: fisheye::CALIB_FIX_SKEW Skew coefficient (alpha) is set to zero and stay zero. 11574 * </li> 11575 * <li> 11576 * REF: fisheye::CALIB_FIX_K1,..., REF: fisheye::CALIB_FIX_K4 Selected distortion coefficients are set to zeros and stay 11577 * zero. 11578 * </li> 11579 * </ul> 11580 * @return automatically generated 11581 */ 11582 public static double fisheye_stereoCalibrate(List<Mat> objectPoints, List<Mat> imagePoints1, List<Mat> imagePoints2, Mat K1, Mat D1, Mat K2, Mat D2, Size imageSize, Mat R, Mat T, int flags) { 11583 Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints); 11584 Mat imagePoints1_mat = Converters.vector_Mat_to_Mat(imagePoints1); 11585 Mat imagePoints2_mat = Converters.vector_Mat_to_Mat(imagePoints2); 11586 return fisheye_stereoCalibrate_1(objectPoints_mat.nativeObj, imagePoints1_mat.nativeObj, imagePoints2_mat.nativeObj, K1.nativeObj, D1.nativeObj, K2.nativeObj, D2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, flags); 11587 } 11588 11589 /** 11590 * Performs stereo calibration 11591 * 11592 * @param objectPoints Vector of vectors of the calibration pattern points. 11593 * @param imagePoints1 Vector of vectors of the projections of the calibration pattern points, 11594 * observed by the first camera. 11595 * @param imagePoints2 Vector of vectors of the projections of the calibration pattern points, 11596 * observed by the second camera. 11597 * @param K1 Input/output first camera intrinsic matrix: 11598 * \(\vecthreethree{f_x^{(j)}}{0}{c_x^{(j)}}{0}{f_y^{(j)}}{c_y^{(j)}}{0}{0}{1}\) , \(j = 0,\, 1\) . If 11599 * any of REF: fisheye::CALIB_USE_INTRINSIC_GUESS , REF: fisheye::CALIB_FIX_INTRINSIC are specified, 11600 * some or all of the matrix components must be initialized. 11601 * @param D1 Input/output vector of distortion coefficients \(\distcoeffsfisheye\) of 4 elements. 11602 * @param K2 Input/output second camera intrinsic matrix. The parameter is similar to K1 . 11603 * @param D2 Input/output lens distortion coefficients for the second camera. The parameter is 11604 * similar to D1 . 11605 * @param imageSize Size of the image used only to initialize camera intrinsic matrix. 11606 * @param R Output rotation matrix between the 1st and the 2nd camera coordinate systems. 11607 * @param T Output translation vector between the coordinate systems of the cameras. 11608 * <ul> 11609 * <li> 11610 * REF: fisheye::CALIB_FIX_INTRINSIC Fix K1, K2? and D1, D2? so that only R, T matrices 11611 * are estimated. 11612 * </li> 11613 * <li> 11614 * REF: fisheye::CALIB_USE_INTRINSIC_GUESS K1, K2 contains valid initial values of 11615 * fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image 11616 * center (imageSize is used), and focal distances are computed in a least-squares fashion. 11617 * </li> 11618 * <li> 11619 * REF: fisheye::CALIB_RECOMPUTE_EXTRINSIC Extrinsic will be recomputed after each iteration 11620 * of intrinsic optimization. 11621 * </li> 11622 * <li> 11623 * REF: fisheye::CALIB_CHECK_COND The functions will check validity of condition number. 11624 * </li> 11625 * <li> 11626 * REF: fisheye::CALIB_FIX_SKEW Skew coefficient (alpha) is set to zero and stay zero. 11627 * </li> 11628 * <li> 11629 * REF: fisheye::CALIB_FIX_K1,..., REF: fisheye::CALIB_FIX_K4 Selected distortion coefficients are set to zeros and stay 11630 * zero. 11631 * </li> 11632 * </ul> 11633 * @return automatically generated 11634 */ 11635 public static double fisheye_stereoCalibrate(List<Mat> objectPoints, List<Mat> imagePoints1, List<Mat> imagePoints2, Mat K1, Mat D1, Mat K2, Mat D2, Size imageSize, Mat R, Mat T) { 11636 Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints); 11637 Mat imagePoints1_mat = Converters.vector_Mat_to_Mat(imagePoints1); 11638 Mat imagePoints2_mat = Converters.vector_Mat_to_Mat(imagePoints2); 11639 return fisheye_stereoCalibrate_2(objectPoints_mat.nativeObj, imagePoints1_mat.nativeObj, imagePoints2_mat.nativeObj, K1.nativeObj, D1.nativeObj, K2.nativeObj, D2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj); 11640 } 11641 11642 11643 11644 11645 // C++: void cv::Rodrigues(Mat src, Mat& dst, Mat& jacobian = Mat()) 11646 private static native void Rodrigues_0(long src_nativeObj, long dst_nativeObj, long jacobian_nativeObj); 11647 private static native void Rodrigues_1(long src_nativeObj, long dst_nativeObj); 11648 11649 // C++: Mat cv::findHomography(vector_Point2f srcPoints, vector_Point2f dstPoints, int method = 0, double ransacReprojThreshold = 3, Mat& mask = Mat(), int maxIters = 2000, double confidence = 0.995) 11650 private static native long findHomography_0(long srcPoints_mat_nativeObj, long dstPoints_mat_nativeObj, int method, double ransacReprojThreshold, long mask_nativeObj, int maxIters, double confidence); 11651 private static native long findHomography_1(long srcPoints_mat_nativeObj, long dstPoints_mat_nativeObj, int method, double ransacReprojThreshold, long mask_nativeObj, int maxIters); 11652 private static native long findHomography_2(long srcPoints_mat_nativeObj, long dstPoints_mat_nativeObj, int method, double ransacReprojThreshold, long mask_nativeObj); 11653 private static native long findHomography_3(long srcPoints_mat_nativeObj, long dstPoints_mat_nativeObj, int method, double ransacReprojThreshold); 11654 private static native long findHomography_4(long srcPoints_mat_nativeObj, long dstPoints_mat_nativeObj, int method); 11655 private static native long findHomography_5(long srcPoints_mat_nativeObj, long dstPoints_mat_nativeObj); 11656 11657 // C++: Mat cv::findHomography(vector_Point2f srcPoints, vector_Point2f dstPoints, Mat& mask, UsacParams params) 11658 private static native long findHomography_6(long srcPoints_mat_nativeObj, long dstPoints_mat_nativeObj, long mask_nativeObj, long params_nativeObj); 11659 11660 // C++: Vec3d cv::RQDecomp3x3(Mat src, Mat& mtxR, Mat& mtxQ, Mat& Qx = Mat(), Mat& Qy = Mat(), Mat& Qz = Mat()) 11661 private static native double[] RQDecomp3x3_0(long src_nativeObj, long mtxR_nativeObj, long mtxQ_nativeObj, long Qx_nativeObj, long Qy_nativeObj, long Qz_nativeObj); 11662 private static native double[] RQDecomp3x3_1(long src_nativeObj, long mtxR_nativeObj, long mtxQ_nativeObj, long Qx_nativeObj, long Qy_nativeObj); 11663 private static native double[] RQDecomp3x3_2(long src_nativeObj, long mtxR_nativeObj, long mtxQ_nativeObj, long Qx_nativeObj); 11664 private static native double[] RQDecomp3x3_3(long src_nativeObj, long mtxR_nativeObj, long mtxQ_nativeObj); 11665 11666 // C++: void cv::decomposeProjectionMatrix(Mat projMatrix, Mat& cameraMatrix, Mat& rotMatrix, Mat& transVect, Mat& rotMatrixX = Mat(), Mat& rotMatrixY = Mat(), Mat& rotMatrixZ = Mat(), Mat& eulerAngles = Mat()) 11667 private static native void decomposeProjectionMatrix_0(long projMatrix_nativeObj, long cameraMatrix_nativeObj, long rotMatrix_nativeObj, long transVect_nativeObj, long rotMatrixX_nativeObj, long rotMatrixY_nativeObj, long rotMatrixZ_nativeObj, long eulerAngles_nativeObj); 11668 private static native void decomposeProjectionMatrix_1(long projMatrix_nativeObj, long cameraMatrix_nativeObj, long rotMatrix_nativeObj, long transVect_nativeObj, long rotMatrixX_nativeObj, long rotMatrixY_nativeObj, long rotMatrixZ_nativeObj); 11669 private static native void decomposeProjectionMatrix_2(long projMatrix_nativeObj, long cameraMatrix_nativeObj, long rotMatrix_nativeObj, long transVect_nativeObj, long rotMatrixX_nativeObj, long rotMatrixY_nativeObj); 11670 private static native void decomposeProjectionMatrix_3(long projMatrix_nativeObj, long cameraMatrix_nativeObj, long rotMatrix_nativeObj, long transVect_nativeObj, long rotMatrixX_nativeObj); 11671 private static native void decomposeProjectionMatrix_4(long projMatrix_nativeObj, long cameraMatrix_nativeObj, long rotMatrix_nativeObj, long transVect_nativeObj); 11672 11673 // C++: void cv::matMulDeriv(Mat A, Mat B, Mat& dABdA, Mat& dABdB) 11674 private static native void matMulDeriv_0(long A_nativeObj, long B_nativeObj, long dABdA_nativeObj, long dABdB_nativeObj); 11675 11676 // C++: void cv::composeRT(Mat rvec1, Mat tvec1, Mat rvec2, Mat tvec2, Mat& rvec3, Mat& tvec3, Mat& dr3dr1 = Mat(), Mat& dr3dt1 = Mat(), Mat& dr3dr2 = Mat(), Mat& dr3dt2 = Mat(), Mat& dt3dr1 = Mat(), Mat& dt3dt1 = Mat(), Mat& dt3dr2 = Mat(), Mat& dt3dt2 = Mat()) 11677 private static native void composeRT_0(long rvec1_nativeObj, long tvec1_nativeObj, long rvec2_nativeObj, long tvec2_nativeObj, long rvec3_nativeObj, long tvec3_nativeObj, long dr3dr1_nativeObj, long dr3dt1_nativeObj, long dr3dr2_nativeObj, long dr3dt2_nativeObj, long dt3dr1_nativeObj, long dt3dt1_nativeObj, long dt3dr2_nativeObj, long dt3dt2_nativeObj); 11678 private static native void composeRT_1(long rvec1_nativeObj, long tvec1_nativeObj, long rvec2_nativeObj, long tvec2_nativeObj, long rvec3_nativeObj, long tvec3_nativeObj, long dr3dr1_nativeObj, long dr3dt1_nativeObj, long dr3dr2_nativeObj, long dr3dt2_nativeObj, long dt3dr1_nativeObj, long dt3dt1_nativeObj, long dt3dr2_nativeObj); 11679 private static native void composeRT_2(long rvec1_nativeObj, long tvec1_nativeObj, long rvec2_nativeObj, long tvec2_nativeObj, long rvec3_nativeObj, long tvec3_nativeObj, long dr3dr1_nativeObj, long dr3dt1_nativeObj, long dr3dr2_nativeObj, long dr3dt2_nativeObj, long dt3dr1_nativeObj, long dt3dt1_nativeObj); 11680 private static native void composeRT_3(long rvec1_nativeObj, long tvec1_nativeObj, long rvec2_nativeObj, long tvec2_nativeObj, long rvec3_nativeObj, long tvec3_nativeObj, long dr3dr1_nativeObj, long dr3dt1_nativeObj, long dr3dr2_nativeObj, long dr3dt2_nativeObj, long dt3dr1_nativeObj); 11681 private static native void composeRT_4(long rvec1_nativeObj, long tvec1_nativeObj, long rvec2_nativeObj, long tvec2_nativeObj, long rvec3_nativeObj, long tvec3_nativeObj, long dr3dr1_nativeObj, long dr3dt1_nativeObj, long dr3dr2_nativeObj, long dr3dt2_nativeObj); 11682 private static native void composeRT_5(long rvec1_nativeObj, long tvec1_nativeObj, long rvec2_nativeObj, long tvec2_nativeObj, long rvec3_nativeObj, long tvec3_nativeObj, long dr3dr1_nativeObj, long dr3dt1_nativeObj, long dr3dr2_nativeObj); 11683 private static native void composeRT_6(long rvec1_nativeObj, long tvec1_nativeObj, long rvec2_nativeObj, long tvec2_nativeObj, long rvec3_nativeObj, long tvec3_nativeObj, long dr3dr1_nativeObj, long dr3dt1_nativeObj); 11684 private static native void composeRT_7(long rvec1_nativeObj, long tvec1_nativeObj, long rvec2_nativeObj, long tvec2_nativeObj, long rvec3_nativeObj, long tvec3_nativeObj, long dr3dr1_nativeObj); 11685 private static native void composeRT_8(long rvec1_nativeObj, long tvec1_nativeObj, long rvec2_nativeObj, long tvec2_nativeObj, long rvec3_nativeObj, long tvec3_nativeObj); 11686 11687 // C++: void cv::projectPoints(vector_Point3f objectPoints, Mat rvec, Mat tvec, Mat cameraMatrix, vector_double distCoeffs, vector_Point2f& imagePoints, Mat& jacobian = Mat(), double aspectRatio = 0) 11688 private static native void projectPoints_0(long objectPoints_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long imagePoints_mat_nativeObj, long jacobian_nativeObj, double aspectRatio); 11689 private static native void projectPoints_1(long objectPoints_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long imagePoints_mat_nativeObj, long jacobian_nativeObj); 11690 private static native void projectPoints_2(long objectPoints_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long imagePoints_mat_nativeObj); 11691 11692 // C++: bool cv::solvePnP(vector_Point3f objectPoints, vector_Point2f imagePoints, Mat cameraMatrix, vector_double distCoeffs, Mat& rvec, Mat& tvec, bool useExtrinsicGuess = false, int flags = SOLVEPNP_ITERATIVE) 11693 private static native boolean solvePnP_0(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj, boolean useExtrinsicGuess, int flags); 11694 private static native boolean solvePnP_1(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj, boolean useExtrinsicGuess); 11695 private static native boolean solvePnP_2(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj); 11696 11697 // C++: bool cv::solvePnPRansac(vector_Point3f objectPoints, vector_Point2f imagePoints, Mat cameraMatrix, vector_double distCoeffs, Mat& rvec, Mat& tvec, bool useExtrinsicGuess = false, int iterationsCount = 100, float reprojectionError = 8.0, double confidence = 0.99, Mat& inliers = Mat(), int flags = SOLVEPNP_ITERATIVE) 11698 private static native boolean solvePnPRansac_0(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj, boolean useExtrinsicGuess, int iterationsCount, float reprojectionError, double confidence, long inliers_nativeObj, int flags); 11699 private static native boolean solvePnPRansac_1(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj, boolean useExtrinsicGuess, int iterationsCount, float reprojectionError, double confidence, long inliers_nativeObj); 11700 private static native boolean solvePnPRansac_2(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj, boolean useExtrinsicGuess, int iterationsCount, float reprojectionError, double confidence); 11701 private static native boolean solvePnPRansac_3(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj, boolean useExtrinsicGuess, int iterationsCount, float reprojectionError); 11702 private static native boolean solvePnPRansac_4(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj, boolean useExtrinsicGuess, int iterationsCount); 11703 private static native boolean solvePnPRansac_5(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj, boolean useExtrinsicGuess); 11704 private static native boolean solvePnPRansac_6(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj); 11705 11706 // C++: bool cv::solvePnPRansac(vector_Point3f objectPoints, vector_Point2f imagePoints, Mat& cameraMatrix, vector_double distCoeffs, Mat& rvec, Mat& tvec, Mat& inliers, UsacParams params = UsacParams()) 11707 private static native boolean solvePnPRansac_7(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj, long inliers_nativeObj, long params_nativeObj); 11708 private static native boolean solvePnPRansac_8(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_mat_nativeObj, long rvec_nativeObj, long tvec_nativeObj, long inliers_nativeObj); 11709 11710 // C++: int cv::solveP3P(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, int flags) 11711 private static native int solveP3P_0(long objectPoints_nativeObj, long imagePoints_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, int flags); 11712 11713 // C++: void cv::solvePnPRefineLM(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, Mat& rvec, Mat& tvec, TermCriteria criteria = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 20, FLT_EPSILON)) 11714 private static native void solvePnPRefineLM_0(long objectPoints_nativeObj, long imagePoints_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvec_nativeObj, long tvec_nativeObj, int criteria_type, int criteria_maxCount, double criteria_epsilon); 11715 private static native void solvePnPRefineLM_1(long objectPoints_nativeObj, long imagePoints_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvec_nativeObj, long tvec_nativeObj); 11716 11717 // C++: void cv::solvePnPRefineVVS(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, Mat& rvec, Mat& tvec, TermCriteria criteria = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 20, FLT_EPSILON), double VVSlambda = 1) 11718 private static native void solvePnPRefineVVS_0(long objectPoints_nativeObj, long imagePoints_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvec_nativeObj, long tvec_nativeObj, int criteria_type, int criteria_maxCount, double criteria_epsilon, double VVSlambda); 11719 private static native void solvePnPRefineVVS_1(long objectPoints_nativeObj, long imagePoints_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvec_nativeObj, long tvec_nativeObj, int criteria_type, int criteria_maxCount, double criteria_epsilon); 11720 private static native void solvePnPRefineVVS_2(long objectPoints_nativeObj, long imagePoints_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvec_nativeObj, long tvec_nativeObj); 11721 11722 // C++: int cv::solvePnPGeneric(Mat objectPoints, Mat imagePoints, Mat cameraMatrix, Mat distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, bool useExtrinsicGuess = false, SolvePnPMethod flags = SOLVEPNP_ITERATIVE, Mat rvec = Mat(), Mat tvec = Mat(), Mat& reprojectionError = Mat()) 11723 private static native int solvePnPGeneric_0(long objectPoints_nativeObj, long imagePoints_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, boolean useExtrinsicGuess, int flags, long rvec_nativeObj, long tvec_nativeObj, long reprojectionError_nativeObj); 11724 private static native int solvePnPGeneric_1(long objectPoints_nativeObj, long imagePoints_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, boolean useExtrinsicGuess, int flags, long rvec_nativeObj, long tvec_nativeObj); 11725 private static native int solvePnPGeneric_2(long objectPoints_nativeObj, long imagePoints_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, boolean useExtrinsicGuess, int flags, long rvec_nativeObj); 11726 private static native int solvePnPGeneric_3(long objectPoints_nativeObj, long imagePoints_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, boolean useExtrinsicGuess, int flags); 11727 private static native int solvePnPGeneric_4(long objectPoints_nativeObj, long imagePoints_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, boolean useExtrinsicGuess); 11728 private static native int solvePnPGeneric_5(long objectPoints_nativeObj, long imagePoints_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj); 11729 11730 // C++: Mat cv::initCameraMatrix2D(vector_vector_Point3f objectPoints, vector_vector_Point2f imagePoints, Size imageSize, double aspectRatio = 1.0) 11731 private static native long initCameraMatrix2D_0(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double imageSize_width, double imageSize_height, double aspectRatio); 11732 private static native long initCameraMatrix2D_1(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double imageSize_width, double imageSize_height); 11733 11734 // C++: bool cv::findChessboardCorners(Mat image, Size patternSize, vector_Point2f& corners, int flags = CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE) 11735 private static native boolean findChessboardCorners_0(long image_nativeObj, double patternSize_width, double patternSize_height, long corners_mat_nativeObj, int flags); 11736 private static native boolean findChessboardCorners_1(long image_nativeObj, double patternSize_width, double patternSize_height, long corners_mat_nativeObj); 11737 11738 // C++: bool cv::checkChessboard(Mat img, Size size) 11739 private static native boolean checkChessboard_0(long img_nativeObj, double size_width, double size_height); 11740 11741 // C++: bool cv::findChessboardCornersSB(Mat image, Size patternSize, Mat& corners, int flags, Mat& meta) 11742 private static native boolean findChessboardCornersSBWithMeta_0(long image_nativeObj, double patternSize_width, double patternSize_height, long corners_nativeObj, int flags, long meta_nativeObj); 11743 11744 // C++: bool cv::findChessboardCornersSB(Mat image, Size patternSize, Mat& corners, int flags = 0) 11745 private static native boolean findChessboardCornersSB_0(long image_nativeObj, double patternSize_width, double patternSize_height, long corners_nativeObj, int flags); 11746 private static native boolean findChessboardCornersSB_1(long image_nativeObj, double patternSize_width, double patternSize_height, long corners_nativeObj); 11747 11748 // C++: Scalar cv::estimateChessboardSharpness(Mat image, Size patternSize, Mat corners, float rise_distance = 0.8F, bool vertical = false, Mat& sharpness = Mat()) 11749 private static native double[] estimateChessboardSharpness_0(long image_nativeObj, double patternSize_width, double patternSize_height, long corners_nativeObj, float rise_distance, boolean vertical, long sharpness_nativeObj); 11750 private static native double[] estimateChessboardSharpness_1(long image_nativeObj, double patternSize_width, double patternSize_height, long corners_nativeObj, float rise_distance, boolean vertical); 11751 private static native double[] estimateChessboardSharpness_2(long image_nativeObj, double patternSize_width, double patternSize_height, long corners_nativeObj, float rise_distance); 11752 private static native double[] estimateChessboardSharpness_3(long image_nativeObj, double patternSize_width, double patternSize_height, long corners_nativeObj); 11753 11754 // C++: bool cv::find4QuadCornerSubpix(Mat img, Mat& corners, Size region_size) 11755 private static native boolean find4QuadCornerSubpix_0(long img_nativeObj, long corners_nativeObj, double region_size_width, double region_size_height); 11756 11757 // C++: void cv::drawChessboardCorners(Mat& image, Size patternSize, vector_Point2f corners, bool patternWasFound) 11758 private static native void drawChessboardCorners_0(long image_nativeObj, double patternSize_width, double patternSize_height, long corners_mat_nativeObj, boolean patternWasFound); 11759 11760 // C++: void cv::drawFrameAxes(Mat& image, Mat cameraMatrix, Mat distCoeffs, Mat rvec, Mat tvec, float length, int thickness = 3) 11761 private static native void drawFrameAxes_0(long image_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvec_nativeObj, long tvec_nativeObj, float length, int thickness); 11762 private static native void drawFrameAxes_1(long image_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvec_nativeObj, long tvec_nativeObj, float length); 11763 11764 // C++: bool cv::findCirclesGrid(Mat image, Size patternSize, Mat& centers, int flags = CALIB_CB_SYMMETRIC_GRID, Ptr_FeatureDetector blobDetector = SimpleBlobDetector::create()) 11765 private static native boolean findCirclesGrid_0(long image_nativeObj, double patternSize_width, double patternSize_height, long centers_nativeObj, int flags); 11766 private static native boolean findCirclesGrid_2(long image_nativeObj, double patternSize_width, double patternSize_height, long centers_nativeObj); 11767 11768 // C++: double cv::calibrateCamera(vector_Mat objectPoints, vector_Mat imagePoints, Size imageSize, Mat& cameraMatrix, Mat& distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, Mat& stdDeviationsIntrinsics, Mat& stdDeviationsExtrinsics, Mat& perViewErrors, int flags = 0, TermCriteria criteria = TermCriteria( TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON)) 11769 private static native double calibrateCameraExtended_0(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double imageSize_width, double imageSize_height, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, long stdDeviationsIntrinsics_nativeObj, long stdDeviationsExtrinsics_nativeObj, long perViewErrors_nativeObj, int flags, int criteria_type, int criteria_maxCount, double criteria_epsilon); 11770 private static native double calibrateCameraExtended_1(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double imageSize_width, double imageSize_height, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, long stdDeviationsIntrinsics_nativeObj, long stdDeviationsExtrinsics_nativeObj, long perViewErrors_nativeObj, int flags); 11771 private static native double calibrateCameraExtended_2(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double imageSize_width, double imageSize_height, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, long stdDeviationsIntrinsics_nativeObj, long stdDeviationsExtrinsics_nativeObj, long perViewErrors_nativeObj); 11772 11773 // C++: double cv::calibrateCamera(vector_Mat objectPoints, vector_Mat imagePoints, Size imageSize, Mat& cameraMatrix, Mat& distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, int flags = 0, TermCriteria criteria = TermCriteria( TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON)) 11774 private static native double calibrateCamera_0(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double imageSize_width, double imageSize_height, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, int flags, int criteria_type, int criteria_maxCount, double criteria_epsilon); 11775 private static native double calibrateCamera_1(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double imageSize_width, double imageSize_height, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, int flags); 11776 private static native double calibrateCamera_2(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double imageSize_width, double imageSize_height, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj); 11777 11778 // C++: double cv::calibrateCameraRO(vector_Mat objectPoints, vector_Mat imagePoints, Size imageSize, int iFixedPoint, Mat& cameraMatrix, Mat& distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, Mat& newObjPoints, Mat& stdDeviationsIntrinsics, Mat& stdDeviationsExtrinsics, Mat& stdDeviationsObjPoints, Mat& perViewErrors, int flags = 0, TermCriteria criteria = TermCriteria( TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON)) 11779 private static native double calibrateCameraROExtended_0(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double imageSize_width, double imageSize_height, int iFixedPoint, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, long newObjPoints_nativeObj, long stdDeviationsIntrinsics_nativeObj, long stdDeviationsExtrinsics_nativeObj, long stdDeviationsObjPoints_nativeObj, long perViewErrors_nativeObj, int flags, int criteria_type, int criteria_maxCount, double criteria_epsilon); 11780 private static native double calibrateCameraROExtended_1(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double imageSize_width, double imageSize_height, int iFixedPoint, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, long newObjPoints_nativeObj, long stdDeviationsIntrinsics_nativeObj, long stdDeviationsExtrinsics_nativeObj, long stdDeviationsObjPoints_nativeObj, long perViewErrors_nativeObj, int flags); 11781 private static native double calibrateCameraROExtended_2(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double imageSize_width, double imageSize_height, int iFixedPoint, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, long newObjPoints_nativeObj, long stdDeviationsIntrinsics_nativeObj, long stdDeviationsExtrinsics_nativeObj, long stdDeviationsObjPoints_nativeObj, long perViewErrors_nativeObj); 11782 11783 // C++: double cv::calibrateCameraRO(vector_Mat objectPoints, vector_Mat imagePoints, Size imageSize, int iFixedPoint, Mat& cameraMatrix, Mat& distCoeffs, vector_Mat& rvecs, vector_Mat& tvecs, Mat& newObjPoints, int flags = 0, TermCriteria criteria = TermCriteria( TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON)) 11784 private static native double calibrateCameraRO_0(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double imageSize_width, double imageSize_height, int iFixedPoint, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, long newObjPoints_nativeObj, int flags, int criteria_type, int criteria_maxCount, double criteria_epsilon); 11785 private static native double calibrateCameraRO_1(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double imageSize_width, double imageSize_height, int iFixedPoint, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, long newObjPoints_nativeObj, int flags); 11786 private static native double calibrateCameraRO_2(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double imageSize_width, double imageSize_height, int iFixedPoint, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, long newObjPoints_nativeObj); 11787 11788 // C++: void cv::calibrationMatrixValues(Mat cameraMatrix, Size imageSize, double apertureWidth, double apertureHeight, double& fovx, double& fovy, double& focalLength, Point2d& principalPoint, double& aspectRatio) 11789 private static native void calibrationMatrixValues_0(long cameraMatrix_nativeObj, double imageSize_width, double imageSize_height, double apertureWidth, double apertureHeight, double[] fovx_out, double[] fovy_out, double[] focalLength_out, double[] principalPoint_out, double[] aspectRatio_out); 11790 11791 // C++: double cv::stereoCalibrate(vector_Mat objectPoints, vector_Mat imagePoints1, vector_Mat imagePoints2, Mat& cameraMatrix1, Mat& distCoeffs1, Mat& cameraMatrix2, Mat& distCoeffs2, Size imageSize, Mat& R, Mat& T, Mat& E, Mat& F, Mat& perViewErrors, int flags = CALIB_FIX_INTRINSIC, TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6)) 11792 private static native double stereoCalibrateExtended_0(long objectPoints_mat_nativeObj, long imagePoints1_mat_nativeObj, long imagePoints2_mat_nativeObj, long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, long E_nativeObj, long F_nativeObj, long perViewErrors_nativeObj, int flags, int criteria_type, int criteria_maxCount, double criteria_epsilon); 11793 private static native double stereoCalibrateExtended_1(long objectPoints_mat_nativeObj, long imagePoints1_mat_nativeObj, long imagePoints2_mat_nativeObj, long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, long E_nativeObj, long F_nativeObj, long perViewErrors_nativeObj, int flags); 11794 private static native double stereoCalibrateExtended_2(long objectPoints_mat_nativeObj, long imagePoints1_mat_nativeObj, long imagePoints2_mat_nativeObj, long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, long E_nativeObj, long F_nativeObj, long perViewErrors_nativeObj); 11795 11796 // C++: double cv::stereoCalibrate(vector_Mat objectPoints, vector_Mat imagePoints1, vector_Mat imagePoints2, Mat& cameraMatrix1, Mat& distCoeffs1, Mat& cameraMatrix2, Mat& distCoeffs2, Size imageSize, Mat& R, Mat& T, Mat& E, Mat& F, int flags = CALIB_FIX_INTRINSIC, TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6)) 11797 private static native double stereoCalibrate_0(long objectPoints_mat_nativeObj, long imagePoints1_mat_nativeObj, long imagePoints2_mat_nativeObj, long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, long E_nativeObj, long F_nativeObj, int flags, int criteria_type, int criteria_maxCount, double criteria_epsilon); 11798 private static native double stereoCalibrate_1(long objectPoints_mat_nativeObj, long imagePoints1_mat_nativeObj, long imagePoints2_mat_nativeObj, long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, long E_nativeObj, long F_nativeObj, int flags); 11799 private static native double stereoCalibrate_2(long objectPoints_mat_nativeObj, long imagePoints1_mat_nativeObj, long imagePoints2_mat_nativeObj, long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, long E_nativeObj, long F_nativeObj); 11800 11801 // C++: void cv::stereoRectify(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat& R1, Mat& R2, Mat& P1, Mat& P2, Mat& Q, int flags = CALIB_ZERO_DISPARITY, double alpha = -1, Size newImageSize = Size(), Rect* validPixROI1 = 0, Rect* validPixROI2 = 0) 11802 private static native void stereoRectify_0(long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, long R1_nativeObj, long R2_nativeObj, long P1_nativeObj, long P2_nativeObj, long Q_nativeObj, int flags, double alpha, double newImageSize_width, double newImageSize_height, double[] validPixROI1_out, double[] validPixROI2_out); 11803 private static native void stereoRectify_1(long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, long R1_nativeObj, long R2_nativeObj, long P1_nativeObj, long P2_nativeObj, long Q_nativeObj, int flags, double alpha, double newImageSize_width, double newImageSize_height, double[] validPixROI1_out); 11804 private static native void stereoRectify_2(long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, long R1_nativeObj, long R2_nativeObj, long P1_nativeObj, long P2_nativeObj, long Q_nativeObj, int flags, double alpha, double newImageSize_width, double newImageSize_height); 11805 private static native void stereoRectify_3(long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, long R1_nativeObj, long R2_nativeObj, long P1_nativeObj, long P2_nativeObj, long Q_nativeObj, int flags, double alpha); 11806 private static native void stereoRectify_4(long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, long R1_nativeObj, long R2_nativeObj, long P1_nativeObj, long P2_nativeObj, long Q_nativeObj, int flags); 11807 private static native void stereoRectify_5(long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, long R1_nativeObj, long R2_nativeObj, long P1_nativeObj, long P2_nativeObj, long Q_nativeObj); 11808 11809 // C++: bool cv::stereoRectifyUncalibrated(Mat points1, Mat points2, Mat F, Size imgSize, Mat& H1, Mat& H2, double threshold = 5) 11810 private static native boolean stereoRectifyUncalibrated_0(long points1_nativeObj, long points2_nativeObj, long F_nativeObj, double imgSize_width, double imgSize_height, long H1_nativeObj, long H2_nativeObj, double threshold); 11811 private static native boolean stereoRectifyUncalibrated_1(long points1_nativeObj, long points2_nativeObj, long F_nativeObj, double imgSize_width, double imgSize_height, long H1_nativeObj, long H2_nativeObj); 11812 11813 // C++: float cv::rectify3Collinear(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Mat cameraMatrix3, Mat distCoeffs3, vector_Mat imgpt1, vector_Mat imgpt3, Size imageSize, Mat R12, Mat T12, Mat R13, Mat T13, Mat& R1, Mat& R2, Mat& R3, Mat& P1, Mat& P2, Mat& P3, Mat& Q, double alpha, Size newImgSize, Rect* roi1, Rect* roi2, int flags) 11814 private static native float rectify3Collinear_0(long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, long cameraMatrix3_nativeObj, long distCoeffs3_nativeObj, long imgpt1_mat_nativeObj, long imgpt3_mat_nativeObj, double imageSize_width, double imageSize_height, long R12_nativeObj, long T12_nativeObj, long R13_nativeObj, long T13_nativeObj, long R1_nativeObj, long R2_nativeObj, long R3_nativeObj, long P1_nativeObj, long P2_nativeObj, long P3_nativeObj, long Q_nativeObj, double alpha, double newImgSize_width, double newImgSize_height, double[] roi1_out, double[] roi2_out, int flags); 11815 11816 // C++: Mat cv::getOptimalNewCameraMatrix(Mat cameraMatrix, Mat distCoeffs, Size imageSize, double alpha, Size newImgSize = Size(), Rect* validPixROI = 0, bool centerPrincipalPoint = false) 11817 private static native long getOptimalNewCameraMatrix_0(long cameraMatrix_nativeObj, long distCoeffs_nativeObj, double imageSize_width, double imageSize_height, double alpha, double newImgSize_width, double newImgSize_height, double[] validPixROI_out, boolean centerPrincipalPoint); 11818 private static native long getOptimalNewCameraMatrix_1(long cameraMatrix_nativeObj, long distCoeffs_nativeObj, double imageSize_width, double imageSize_height, double alpha, double newImgSize_width, double newImgSize_height, double[] validPixROI_out); 11819 private static native long getOptimalNewCameraMatrix_2(long cameraMatrix_nativeObj, long distCoeffs_nativeObj, double imageSize_width, double imageSize_height, double alpha, double newImgSize_width, double newImgSize_height); 11820 private static native long getOptimalNewCameraMatrix_3(long cameraMatrix_nativeObj, long distCoeffs_nativeObj, double imageSize_width, double imageSize_height, double alpha); 11821 11822 // C++: void cv::calibrateHandEye(vector_Mat R_gripper2base, vector_Mat t_gripper2base, vector_Mat R_target2cam, vector_Mat t_target2cam, Mat& R_cam2gripper, Mat& t_cam2gripper, HandEyeCalibrationMethod method = CALIB_HAND_EYE_TSAI) 11823 private static native void calibrateHandEye_0(long R_gripper2base_mat_nativeObj, long t_gripper2base_mat_nativeObj, long R_target2cam_mat_nativeObj, long t_target2cam_mat_nativeObj, long R_cam2gripper_nativeObj, long t_cam2gripper_nativeObj, int method); 11824 private static native void calibrateHandEye_1(long R_gripper2base_mat_nativeObj, long t_gripper2base_mat_nativeObj, long R_target2cam_mat_nativeObj, long t_target2cam_mat_nativeObj, long R_cam2gripper_nativeObj, long t_cam2gripper_nativeObj); 11825 11826 // C++: void cv::calibrateRobotWorldHandEye(vector_Mat R_world2cam, vector_Mat t_world2cam, vector_Mat R_base2gripper, vector_Mat t_base2gripper, Mat& R_base2world, Mat& t_base2world, Mat& R_gripper2cam, Mat& t_gripper2cam, RobotWorldHandEyeCalibrationMethod method = CALIB_ROBOT_WORLD_HAND_EYE_SHAH) 11827 private static native void calibrateRobotWorldHandEye_0(long R_world2cam_mat_nativeObj, long t_world2cam_mat_nativeObj, long R_base2gripper_mat_nativeObj, long t_base2gripper_mat_nativeObj, long R_base2world_nativeObj, long t_base2world_nativeObj, long R_gripper2cam_nativeObj, long t_gripper2cam_nativeObj, int method); 11828 private static native void calibrateRobotWorldHandEye_1(long R_world2cam_mat_nativeObj, long t_world2cam_mat_nativeObj, long R_base2gripper_mat_nativeObj, long t_base2gripper_mat_nativeObj, long R_base2world_nativeObj, long t_base2world_nativeObj, long R_gripper2cam_nativeObj, long t_gripper2cam_nativeObj); 11829 11830 // C++: void cv::convertPointsToHomogeneous(Mat src, Mat& dst) 11831 private static native void convertPointsToHomogeneous_0(long src_nativeObj, long dst_nativeObj); 11832 11833 // C++: void cv::convertPointsFromHomogeneous(Mat src, Mat& dst) 11834 private static native void convertPointsFromHomogeneous_0(long src_nativeObj, long dst_nativeObj); 11835 11836 // C++: Mat cv::findFundamentalMat(vector_Point2f points1, vector_Point2f points2, int method, double ransacReprojThreshold, double confidence, int maxIters, Mat& mask = Mat()) 11837 private static native long findFundamentalMat_0(long points1_mat_nativeObj, long points2_mat_nativeObj, int method, double ransacReprojThreshold, double confidence, int maxIters, long mask_nativeObj); 11838 private static native long findFundamentalMat_1(long points1_mat_nativeObj, long points2_mat_nativeObj, int method, double ransacReprojThreshold, double confidence, int maxIters); 11839 11840 // C++: Mat cv::findFundamentalMat(vector_Point2f points1, vector_Point2f points2, int method = FM_RANSAC, double ransacReprojThreshold = 3., double confidence = 0.99, Mat& mask = Mat()) 11841 private static native long findFundamentalMat_2(long points1_mat_nativeObj, long points2_mat_nativeObj, int method, double ransacReprojThreshold, double confidence, long mask_nativeObj); 11842 private static native long findFundamentalMat_3(long points1_mat_nativeObj, long points2_mat_nativeObj, int method, double ransacReprojThreshold, double confidence); 11843 private static native long findFundamentalMat_4(long points1_mat_nativeObj, long points2_mat_nativeObj, int method, double ransacReprojThreshold); 11844 private static native long findFundamentalMat_5(long points1_mat_nativeObj, long points2_mat_nativeObj, int method); 11845 private static native long findFundamentalMat_6(long points1_mat_nativeObj, long points2_mat_nativeObj); 11846 11847 // C++: Mat cv::findFundamentalMat(vector_Point2f points1, vector_Point2f points2, Mat& mask, UsacParams params) 11848 private static native long findFundamentalMat_7(long points1_mat_nativeObj, long points2_mat_nativeObj, long mask_nativeObj, long params_nativeObj); 11849 11850 // C++: Mat cv::findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix, int method = RANSAC, double prob = 0.999, double threshold = 1.0, int maxIters = 1000, Mat& mask = Mat()) 11851 private static native long findEssentialMat_0(long points1_nativeObj, long points2_nativeObj, long cameraMatrix_nativeObj, int method, double prob, double threshold, int maxIters, long mask_nativeObj); 11852 private static native long findEssentialMat_1(long points1_nativeObj, long points2_nativeObj, long cameraMatrix_nativeObj, int method, double prob, double threshold, int maxIters); 11853 private static native long findEssentialMat_2(long points1_nativeObj, long points2_nativeObj, long cameraMatrix_nativeObj, int method, double prob, double threshold); 11854 private static native long findEssentialMat_3(long points1_nativeObj, long points2_nativeObj, long cameraMatrix_nativeObj, int method, double prob); 11855 private static native long findEssentialMat_4(long points1_nativeObj, long points2_nativeObj, long cameraMatrix_nativeObj, int method); 11856 private static native long findEssentialMat_5(long points1_nativeObj, long points2_nativeObj, long cameraMatrix_nativeObj); 11857 11858 // C++: Mat cv::findEssentialMat(Mat points1, Mat points2, double focal = 1.0, Point2d pp = Point2d(0, 0), int method = RANSAC, double prob = 0.999, double threshold = 1.0, int maxIters = 1000, Mat& mask = Mat()) 11859 private static native long findEssentialMat_6(long points1_nativeObj, long points2_nativeObj, double focal, double pp_x, double pp_y, int method, double prob, double threshold, int maxIters, long mask_nativeObj); 11860 private static native long findEssentialMat_7(long points1_nativeObj, long points2_nativeObj, double focal, double pp_x, double pp_y, int method, double prob, double threshold, int maxIters); 11861 private static native long findEssentialMat_8(long points1_nativeObj, long points2_nativeObj, double focal, double pp_x, double pp_y, int method, double prob, double threshold); 11862 private static native long findEssentialMat_9(long points1_nativeObj, long points2_nativeObj, double focal, double pp_x, double pp_y, int method, double prob); 11863 private static native long findEssentialMat_10(long points1_nativeObj, long points2_nativeObj, double focal, double pp_x, double pp_y, int method); 11864 private static native long findEssentialMat_11(long points1_nativeObj, long points2_nativeObj, double focal, double pp_x, double pp_y); 11865 private static native long findEssentialMat_12(long points1_nativeObj, long points2_nativeObj, double focal); 11866 private static native long findEssentialMat_13(long points1_nativeObj, long points2_nativeObj); 11867 11868 // C++: Mat cv::findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, int method = RANSAC, double prob = 0.999, double threshold = 1.0, Mat& mask = Mat()) 11869 private static native long findEssentialMat_14(long points1_nativeObj, long points2_nativeObj, long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, int method, double prob, double threshold, long mask_nativeObj); 11870 private static native long findEssentialMat_15(long points1_nativeObj, long points2_nativeObj, long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, int method, double prob, double threshold); 11871 private static native long findEssentialMat_16(long points1_nativeObj, long points2_nativeObj, long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, int method, double prob); 11872 private static native long findEssentialMat_17(long points1_nativeObj, long points2_nativeObj, long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, int method); 11873 private static native long findEssentialMat_18(long points1_nativeObj, long points2_nativeObj, long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj); 11874 11875 // C++: Mat cv::findEssentialMat(Mat points1, Mat points2, Mat cameraMatrix1, Mat cameraMatrix2, Mat dist_coeff1, Mat dist_coeff2, Mat& mask, UsacParams params) 11876 private static native long findEssentialMat_19(long points1_nativeObj, long points2_nativeObj, long cameraMatrix1_nativeObj, long cameraMatrix2_nativeObj, long dist_coeff1_nativeObj, long dist_coeff2_nativeObj, long mask_nativeObj, long params_nativeObj); 11877 11878 // C++: void cv::decomposeEssentialMat(Mat E, Mat& R1, Mat& R2, Mat& t) 11879 private static native void decomposeEssentialMat_0(long E_nativeObj, long R1_nativeObj, long R2_nativeObj, long t_nativeObj); 11880 11881 // C++: int cv::recoverPose(Mat points1, Mat points2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Mat& E, Mat& R, Mat& t, int method = cv::RANSAC, double prob = 0.999, double threshold = 1.0, Mat& mask = Mat()) 11882 private static native int recoverPose_0(long points1_nativeObj, long points2_nativeObj, long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, long E_nativeObj, long R_nativeObj, long t_nativeObj, int method, double prob, double threshold, long mask_nativeObj); 11883 private static native int recoverPose_1(long points1_nativeObj, long points2_nativeObj, long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, long E_nativeObj, long R_nativeObj, long t_nativeObj, int method, double prob, double threshold); 11884 private static native int recoverPose_2(long points1_nativeObj, long points2_nativeObj, long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, long E_nativeObj, long R_nativeObj, long t_nativeObj, int method, double prob); 11885 private static native int recoverPose_3(long points1_nativeObj, long points2_nativeObj, long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, long E_nativeObj, long R_nativeObj, long t_nativeObj, int method); 11886 private static native int recoverPose_4(long points1_nativeObj, long points2_nativeObj, long cameraMatrix1_nativeObj, long distCoeffs1_nativeObj, long cameraMatrix2_nativeObj, long distCoeffs2_nativeObj, long E_nativeObj, long R_nativeObj, long t_nativeObj); 11887 11888 // C++: int cv::recoverPose(Mat E, Mat points1, Mat points2, Mat cameraMatrix, Mat& R, Mat& t, Mat& mask = Mat()) 11889 private static native int recoverPose_5(long E_nativeObj, long points1_nativeObj, long points2_nativeObj, long cameraMatrix_nativeObj, long R_nativeObj, long t_nativeObj, long mask_nativeObj); 11890 private static native int recoverPose_6(long E_nativeObj, long points1_nativeObj, long points2_nativeObj, long cameraMatrix_nativeObj, long R_nativeObj, long t_nativeObj); 11891 11892 // C++: int cv::recoverPose(Mat E, Mat points1, Mat points2, Mat& R, Mat& t, double focal = 1.0, Point2d pp = Point2d(0, 0), Mat& mask = Mat()) 11893 private static native int recoverPose_7(long E_nativeObj, long points1_nativeObj, long points2_nativeObj, long R_nativeObj, long t_nativeObj, double focal, double pp_x, double pp_y, long mask_nativeObj); 11894 private static native int recoverPose_8(long E_nativeObj, long points1_nativeObj, long points2_nativeObj, long R_nativeObj, long t_nativeObj, double focal, double pp_x, double pp_y); 11895 private static native int recoverPose_9(long E_nativeObj, long points1_nativeObj, long points2_nativeObj, long R_nativeObj, long t_nativeObj, double focal); 11896 private static native int recoverPose_10(long E_nativeObj, long points1_nativeObj, long points2_nativeObj, long R_nativeObj, long t_nativeObj); 11897 11898 // C++: int cv::recoverPose(Mat E, Mat points1, Mat points2, Mat cameraMatrix, Mat& R, Mat& t, double distanceThresh, Mat& mask = Mat(), Mat& triangulatedPoints = Mat()) 11899 private static native int recoverPose_11(long E_nativeObj, long points1_nativeObj, long points2_nativeObj, long cameraMatrix_nativeObj, long R_nativeObj, long t_nativeObj, double distanceThresh, long mask_nativeObj, long triangulatedPoints_nativeObj); 11900 private static native int recoverPose_12(long E_nativeObj, long points1_nativeObj, long points2_nativeObj, long cameraMatrix_nativeObj, long R_nativeObj, long t_nativeObj, double distanceThresh, long mask_nativeObj); 11901 private static native int recoverPose_13(long E_nativeObj, long points1_nativeObj, long points2_nativeObj, long cameraMatrix_nativeObj, long R_nativeObj, long t_nativeObj, double distanceThresh); 11902 11903 // C++: void cv::computeCorrespondEpilines(Mat points, int whichImage, Mat F, Mat& lines) 11904 private static native void computeCorrespondEpilines_0(long points_nativeObj, int whichImage, long F_nativeObj, long lines_nativeObj); 11905 11906 // C++: void cv::triangulatePoints(Mat projMatr1, Mat projMatr2, Mat projPoints1, Mat projPoints2, Mat& points4D) 11907 private static native void triangulatePoints_0(long projMatr1_nativeObj, long projMatr2_nativeObj, long projPoints1_nativeObj, long projPoints2_nativeObj, long points4D_nativeObj); 11908 11909 // C++: void cv::correctMatches(Mat F, Mat points1, Mat points2, Mat& newPoints1, Mat& newPoints2) 11910 private static native void correctMatches_0(long F_nativeObj, long points1_nativeObj, long points2_nativeObj, long newPoints1_nativeObj, long newPoints2_nativeObj); 11911 11912 // C++: void cv::filterSpeckles(Mat& img, double newVal, int maxSpeckleSize, double maxDiff, Mat& buf = Mat()) 11913 private static native void filterSpeckles_0(long img_nativeObj, double newVal, int maxSpeckleSize, double maxDiff, long buf_nativeObj); 11914 private static native void filterSpeckles_1(long img_nativeObj, double newVal, int maxSpeckleSize, double maxDiff); 11915 11916 // C++: Rect cv::getValidDisparityROI(Rect roi1, Rect roi2, int minDisparity, int numberOfDisparities, int blockSize) 11917 private static native double[] getValidDisparityROI_0(int roi1_x, int roi1_y, int roi1_width, int roi1_height, int roi2_x, int roi2_y, int roi2_width, int roi2_height, int minDisparity, int numberOfDisparities, int blockSize); 11918 11919 // C++: void cv::validateDisparity(Mat& disparity, Mat cost, int minDisparity, int numberOfDisparities, int disp12MaxDisp = 1) 11920 private static native void validateDisparity_0(long disparity_nativeObj, long cost_nativeObj, int minDisparity, int numberOfDisparities, int disp12MaxDisp); 11921 private static native void validateDisparity_1(long disparity_nativeObj, long cost_nativeObj, int minDisparity, int numberOfDisparities); 11922 11923 // C++: void cv::reprojectImageTo3D(Mat disparity, Mat& _3dImage, Mat Q, bool handleMissingValues = false, int ddepth = -1) 11924 private static native void reprojectImageTo3D_0(long disparity_nativeObj, long _3dImage_nativeObj, long Q_nativeObj, boolean handleMissingValues, int ddepth); 11925 private static native void reprojectImageTo3D_1(long disparity_nativeObj, long _3dImage_nativeObj, long Q_nativeObj, boolean handleMissingValues); 11926 private static native void reprojectImageTo3D_2(long disparity_nativeObj, long _3dImage_nativeObj, long Q_nativeObj); 11927 11928 // C++: double cv::sampsonDistance(Mat pt1, Mat pt2, Mat F) 11929 private static native double sampsonDistance_0(long pt1_nativeObj, long pt2_nativeObj, long F_nativeObj); 11930 11931 // C++: int cv::estimateAffine3D(Mat src, Mat dst, Mat& out, Mat& inliers, double ransacThreshold = 3, double confidence = 0.99) 11932 private static native int estimateAffine3D_0(long src_nativeObj, long dst_nativeObj, long out_nativeObj, long inliers_nativeObj, double ransacThreshold, double confidence); 11933 private static native int estimateAffine3D_1(long src_nativeObj, long dst_nativeObj, long out_nativeObj, long inliers_nativeObj, double ransacThreshold); 11934 private static native int estimateAffine3D_2(long src_nativeObj, long dst_nativeObj, long out_nativeObj, long inliers_nativeObj); 11935 11936 // C++: Mat cv::estimateAffine3D(Mat src, Mat dst, double* scale = nullptr, bool force_rotation = true) 11937 private static native long estimateAffine3D_3(long src_nativeObj, long dst_nativeObj, double[] scale_out, boolean force_rotation); 11938 private static native long estimateAffine3D_4(long src_nativeObj, long dst_nativeObj, double[] scale_out); 11939 private static native long estimateAffine3D_5(long src_nativeObj, long dst_nativeObj); 11940 11941 // C++: int cv::estimateTranslation3D(Mat src, Mat dst, Mat& out, Mat& inliers, double ransacThreshold = 3, double confidence = 0.99) 11942 private static native int estimateTranslation3D_0(long src_nativeObj, long dst_nativeObj, long out_nativeObj, long inliers_nativeObj, double ransacThreshold, double confidence); 11943 private static native int estimateTranslation3D_1(long src_nativeObj, long dst_nativeObj, long out_nativeObj, long inliers_nativeObj, double ransacThreshold); 11944 private static native int estimateTranslation3D_2(long src_nativeObj, long dst_nativeObj, long out_nativeObj, long inliers_nativeObj); 11945 11946 // C++: Mat cv::estimateAffine2D(Mat from, Mat to, Mat& inliers = Mat(), int method = RANSAC, double ransacReprojThreshold = 3, size_t maxIters = 2000, double confidence = 0.99, size_t refineIters = 10) 11947 private static native long estimateAffine2D_0(long from_nativeObj, long to_nativeObj, long inliers_nativeObj, int method, double ransacReprojThreshold, long maxIters, double confidence, long refineIters); 11948 private static native long estimateAffine2D_1(long from_nativeObj, long to_nativeObj, long inliers_nativeObj, int method, double ransacReprojThreshold, long maxIters, double confidence); 11949 private static native long estimateAffine2D_2(long from_nativeObj, long to_nativeObj, long inliers_nativeObj, int method, double ransacReprojThreshold, long maxIters); 11950 private static native long estimateAffine2D_3(long from_nativeObj, long to_nativeObj, long inliers_nativeObj, int method, double ransacReprojThreshold); 11951 private static native long estimateAffine2D_4(long from_nativeObj, long to_nativeObj, long inliers_nativeObj, int method); 11952 private static native long estimateAffine2D_5(long from_nativeObj, long to_nativeObj, long inliers_nativeObj); 11953 private static native long estimateAffine2D_6(long from_nativeObj, long to_nativeObj); 11954 11955 // C++: Mat cv::estimateAffine2D(Mat pts1, Mat pts2, Mat& inliers, UsacParams params) 11956 private static native long estimateAffine2D_7(long pts1_nativeObj, long pts2_nativeObj, long inliers_nativeObj, long params_nativeObj); 11957 11958 // C++: Mat cv::estimateAffinePartial2D(Mat from, Mat to, Mat& inliers = Mat(), int method = RANSAC, double ransacReprojThreshold = 3, size_t maxIters = 2000, double confidence = 0.99, size_t refineIters = 10) 11959 private static native long estimateAffinePartial2D_0(long from_nativeObj, long to_nativeObj, long inliers_nativeObj, int method, double ransacReprojThreshold, long maxIters, double confidence, long refineIters); 11960 private static native long estimateAffinePartial2D_1(long from_nativeObj, long to_nativeObj, long inliers_nativeObj, int method, double ransacReprojThreshold, long maxIters, double confidence); 11961 private static native long estimateAffinePartial2D_2(long from_nativeObj, long to_nativeObj, long inliers_nativeObj, int method, double ransacReprojThreshold, long maxIters); 11962 private static native long estimateAffinePartial2D_3(long from_nativeObj, long to_nativeObj, long inliers_nativeObj, int method, double ransacReprojThreshold); 11963 private static native long estimateAffinePartial2D_4(long from_nativeObj, long to_nativeObj, long inliers_nativeObj, int method); 11964 private static native long estimateAffinePartial2D_5(long from_nativeObj, long to_nativeObj, long inliers_nativeObj); 11965 private static native long estimateAffinePartial2D_6(long from_nativeObj, long to_nativeObj); 11966 11967 // C++: int cv::decomposeHomographyMat(Mat H, Mat K, vector_Mat& rotations, vector_Mat& translations, vector_Mat& normals) 11968 private static native int decomposeHomographyMat_0(long H_nativeObj, long K_nativeObj, long rotations_mat_nativeObj, long translations_mat_nativeObj, long normals_mat_nativeObj); 11969 11970 // C++: void cv::filterHomographyDecompByVisibleRefpoints(vector_Mat rotations, vector_Mat normals, Mat beforePoints, Mat afterPoints, Mat& possibleSolutions, Mat pointsMask = Mat()) 11971 private static native void filterHomographyDecompByVisibleRefpoints_0(long rotations_mat_nativeObj, long normals_mat_nativeObj, long beforePoints_nativeObj, long afterPoints_nativeObj, long possibleSolutions_nativeObj, long pointsMask_nativeObj); 11972 private static native void filterHomographyDecompByVisibleRefpoints_1(long rotations_mat_nativeObj, long normals_mat_nativeObj, long beforePoints_nativeObj, long afterPoints_nativeObj, long possibleSolutions_nativeObj); 11973 11974 // C++: void cv::undistort(Mat src, Mat& dst, Mat cameraMatrix, Mat distCoeffs, Mat newCameraMatrix = Mat()) 11975 private static native void undistort_0(long src_nativeObj, long dst_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long newCameraMatrix_nativeObj); 11976 private static native void undistort_1(long src_nativeObj, long dst_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj); 11977 11978 // C++: void cv::initUndistortRectifyMap(Mat cameraMatrix, Mat distCoeffs, Mat R, Mat newCameraMatrix, Size size, int m1type, Mat& map1, Mat& map2) 11979 private static native void initUndistortRectifyMap_0(long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long R_nativeObj, long newCameraMatrix_nativeObj, double size_width, double size_height, int m1type, long map1_nativeObj, long map2_nativeObj); 11980 11981 // C++: void cv::initInverseRectificationMap(Mat cameraMatrix, Mat distCoeffs, Mat R, Mat newCameraMatrix, Size size, int m1type, Mat& map1, Mat& map2) 11982 private static native void initInverseRectificationMap_0(long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long R_nativeObj, long newCameraMatrix_nativeObj, double size_width, double size_height, int m1type, long map1_nativeObj, long map2_nativeObj); 11983 11984 // C++: Mat cv::getDefaultNewCameraMatrix(Mat cameraMatrix, Size imgsize = Size(), bool centerPrincipalPoint = false) 11985 private static native long getDefaultNewCameraMatrix_0(long cameraMatrix_nativeObj, double imgsize_width, double imgsize_height, boolean centerPrincipalPoint); 11986 private static native long getDefaultNewCameraMatrix_1(long cameraMatrix_nativeObj, double imgsize_width, double imgsize_height); 11987 private static native long getDefaultNewCameraMatrix_2(long cameraMatrix_nativeObj); 11988 11989 // C++: void cv::undistortPoints(vector_Point2f src, vector_Point2f& dst, Mat cameraMatrix, Mat distCoeffs, Mat R = Mat(), Mat P = Mat()) 11990 private static native void undistortPoints_0(long src_mat_nativeObj, long dst_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long R_nativeObj, long P_nativeObj); 11991 private static native void undistortPoints_1(long src_mat_nativeObj, long dst_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long R_nativeObj); 11992 private static native void undistortPoints_2(long src_mat_nativeObj, long dst_mat_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj); 11993 11994 // C++: void cv::undistortPoints(Mat src, Mat& dst, Mat cameraMatrix, Mat distCoeffs, Mat R, Mat P, TermCriteria criteria) 11995 private static native void undistortPointsIter_0(long src_nativeObj, long dst_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, long R_nativeObj, long P_nativeObj, int criteria_type, int criteria_maxCount, double criteria_epsilon); 11996 11997 // C++: void cv::undistortImagePoints(Mat src, Mat& dst, Mat cameraMatrix, Mat distCoeffs, TermCriteria arg1 = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, 0.01)) 11998 private static native void undistortImagePoints_0(long src_nativeObj, long dst_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj, int arg1_type, int arg1_maxCount, double arg1_epsilon); 11999 private static native void undistortImagePoints_1(long src_nativeObj, long dst_nativeObj, long cameraMatrix_nativeObj, long distCoeffs_nativeObj); 12000 12001 // C++: void cv::fisheye::projectPoints(Mat objectPoints, Mat& imagePoints, Mat rvec, Mat tvec, Mat K, Mat D, double alpha = 0, Mat& jacobian = Mat()) 12002 private static native void fisheye_projectPoints_0(long objectPoints_nativeObj, long imagePoints_nativeObj, long rvec_nativeObj, long tvec_nativeObj, long K_nativeObj, long D_nativeObj, double alpha, long jacobian_nativeObj); 12003 private static native void fisheye_projectPoints_1(long objectPoints_nativeObj, long imagePoints_nativeObj, long rvec_nativeObj, long tvec_nativeObj, long K_nativeObj, long D_nativeObj, double alpha); 12004 private static native void fisheye_projectPoints_2(long objectPoints_nativeObj, long imagePoints_nativeObj, long rvec_nativeObj, long tvec_nativeObj, long K_nativeObj, long D_nativeObj); 12005 12006 // C++: void cv::fisheye::distortPoints(Mat undistorted, Mat& distorted, Mat K, Mat D, double alpha = 0) 12007 private static native void fisheye_distortPoints_0(long undistorted_nativeObj, long distorted_nativeObj, long K_nativeObj, long D_nativeObj, double alpha); 12008 private static native void fisheye_distortPoints_1(long undistorted_nativeObj, long distorted_nativeObj, long K_nativeObj, long D_nativeObj); 12009 12010 // C++: void cv::fisheye::undistortPoints(Mat distorted, Mat& undistorted, Mat K, Mat D, Mat R = Mat(), Mat P = Mat(), TermCriteria criteria = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 10, 1e-8)) 12011 private static native void fisheye_undistortPoints_0(long distorted_nativeObj, long undistorted_nativeObj, long K_nativeObj, long D_nativeObj, long R_nativeObj, long P_nativeObj, int criteria_type, int criteria_maxCount, double criteria_epsilon); 12012 private static native void fisheye_undistortPoints_1(long distorted_nativeObj, long undistorted_nativeObj, long K_nativeObj, long D_nativeObj, long R_nativeObj, long P_nativeObj); 12013 private static native void fisheye_undistortPoints_2(long distorted_nativeObj, long undistorted_nativeObj, long K_nativeObj, long D_nativeObj, long R_nativeObj); 12014 private static native void fisheye_undistortPoints_3(long distorted_nativeObj, long undistorted_nativeObj, long K_nativeObj, long D_nativeObj); 12015 12016 // C++: void cv::fisheye::initUndistortRectifyMap(Mat K, Mat D, Mat R, Mat P, Size size, int m1type, Mat& map1, Mat& map2) 12017 private static native void fisheye_initUndistortRectifyMap_0(long K_nativeObj, long D_nativeObj, long R_nativeObj, long P_nativeObj, double size_width, double size_height, int m1type, long map1_nativeObj, long map2_nativeObj); 12018 12019 // C++: void cv::fisheye::undistortImage(Mat distorted, Mat& undistorted, Mat K, Mat D, Mat Knew = cv::Mat(), Size new_size = Size()) 12020 private static native void fisheye_undistortImage_0(long distorted_nativeObj, long undistorted_nativeObj, long K_nativeObj, long D_nativeObj, long Knew_nativeObj, double new_size_width, double new_size_height); 12021 private static native void fisheye_undistortImage_1(long distorted_nativeObj, long undistorted_nativeObj, long K_nativeObj, long D_nativeObj, long Knew_nativeObj); 12022 private static native void fisheye_undistortImage_2(long distorted_nativeObj, long undistorted_nativeObj, long K_nativeObj, long D_nativeObj); 12023 12024 // C++: void cv::fisheye::estimateNewCameraMatrixForUndistortRectify(Mat K, Mat D, Size image_size, Mat R, Mat& P, double balance = 0.0, Size new_size = Size(), double fov_scale = 1.0) 12025 private static native void fisheye_estimateNewCameraMatrixForUndistortRectify_0(long K_nativeObj, long D_nativeObj, double image_size_width, double image_size_height, long R_nativeObj, long P_nativeObj, double balance, double new_size_width, double new_size_height, double fov_scale); 12026 private static native void fisheye_estimateNewCameraMatrixForUndistortRectify_1(long K_nativeObj, long D_nativeObj, double image_size_width, double image_size_height, long R_nativeObj, long P_nativeObj, double balance, double new_size_width, double new_size_height); 12027 private static native void fisheye_estimateNewCameraMatrixForUndistortRectify_2(long K_nativeObj, long D_nativeObj, double image_size_width, double image_size_height, long R_nativeObj, long P_nativeObj, double balance); 12028 private static native void fisheye_estimateNewCameraMatrixForUndistortRectify_3(long K_nativeObj, long D_nativeObj, double image_size_width, double image_size_height, long R_nativeObj, long P_nativeObj); 12029 12030 // C++: double cv::fisheye::calibrate(vector_Mat objectPoints, vector_Mat imagePoints, Size image_size, Mat& K, Mat& D, vector_Mat& rvecs, vector_Mat& tvecs, int flags = 0, TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, DBL_EPSILON)) 12031 private static native double fisheye_calibrate_0(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double image_size_width, double image_size_height, long K_nativeObj, long D_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, int flags, int criteria_type, int criteria_maxCount, double criteria_epsilon); 12032 private static native double fisheye_calibrate_1(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double image_size_width, double image_size_height, long K_nativeObj, long D_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj, int flags); 12033 private static native double fisheye_calibrate_2(long objectPoints_mat_nativeObj, long imagePoints_mat_nativeObj, double image_size_width, double image_size_height, long K_nativeObj, long D_nativeObj, long rvecs_mat_nativeObj, long tvecs_mat_nativeObj); 12034 12035 // C++: void cv::fisheye::stereoRectify(Mat K1, Mat D1, Mat K2, Mat D2, Size imageSize, Mat R, Mat tvec, Mat& R1, Mat& R2, Mat& P1, Mat& P2, Mat& Q, int flags, Size newImageSize = Size(), double balance = 0.0, double fov_scale = 1.0) 12036 private static native void fisheye_stereoRectify_0(long K1_nativeObj, long D1_nativeObj, long K2_nativeObj, long D2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long tvec_nativeObj, long R1_nativeObj, long R2_nativeObj, long P1_nativeObj, long P2_nativeObj, long Q_nativeObj, int flags, double newImageSize_width, double newImageSize_height, double balance, double fov_scale); 12037 private static native void fisheye_stereoRectify_1(long K1_nativeObj, long D1_nativeObj, long K2_nativeObj, long D2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long tvec_nativeObj, long R1_nativeObj, long R2_nativeObj, long P1_nativeObj, long P2_nativeObj, long Q_nativeObj, int flags, double newImageSize_width, double newImageSize_height, double balance); 12038 private static native void fisheye_stereoRectify_2(long K1_nativeObj, long D1_nativeObj, long K2_nativeObj, long D2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long tvec_nativeObj, long R1_nativeObj, long R2_nativeObj, long P1_nativeObj, long P2_nativeObj, long Q_nativeObj, int flags, double newImageSize_width, double newImageSize_height); 12039 private static native void fisheye_stereoRectify_3(long K1_nativeObj, long D1_nativeObj, long K2_nativeObj, long D2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long tvec_nativeObj, long R1_nativeObj, long R2_nativeObj, long P1_nativeObj, long P2_nativeObj, long Q_nativeObj, int flags); 12040 12041 // C++: double cv::fisheye::stereoCalibrate(vector_Mat objectPoints, vector_Mat imagePoints1, vector_Mat imagePoints2, Mat& K1, Mat& D1, Mat& K2, Mat& D2, Size imageSize, Mat& R, Mat& T, int flags = fisheye::CALIB_FIX_INTRINSIC, TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, DBL_EPSILON)) 12042 private static native double fisheye_stereoCalibrate_0(long objectPoints_mat_nativeObj, long imagePoints1_mat_nativeObj, long imagePoints2_mat_nativeObj, long K1_nativeObj, long D1_nativeObj, long K2_nativeObj, long D2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, int flags, int criteria_type, int criteria_maxCount, double criteria_epsilon); 12043 private static native double fisheye_stereoCalibrate_1(long objectPoints_mat_nativeObj, long imagePoints1_mat_nativeObj, long imagePoints2_mat_nativeObj, long K1_nativeObj, long D1_nativeObj, long K2_nativeObj, long D2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj, int flags); 12044 private static native double fisheye_stereoCalibrate_2(long objectPoints_mat_nativeObj, long imagePoints1_mat_nativeObj, long imagePoints2_mat_nativeObj, long K1_nativeObj, long D1_nativeObj, long K2_nativeObj, long D2_nativeObj, double imageSize_width, double imageSize_height, long R_nativeObj, long T_nativeObj); 12045 12046}