001// 002// This file is auto-generated. Please don't modify it! 003// 004package org.opencv.imgproc; 005 006import java.util.ArrayList; 007import java.util.List; 008import org.opencv.core.Mat; 009import org.opencv.core.MatOfFloat; 010import org.opencv.core.MatOfInt; 011import org.opencv.core.MatOfInt4; 012import org.opencv.core.MatOfPoint; 013import org.opencv.core.MatOfPoint2f; 014import org.opencv.core.Point; 015import org.opencv.core.Rect; 016import org.opencv.core.RotatedRect; 017import org.opencv.core.Scalar; 018import org.opencv.core.Size; 019import org.opencv.core.TermCriteria; 020import org.opencv.imgproc.CLAHE; 021import org.opencv.imgproc.GeneralizedHoughBallard; 022import org.opencv.imgproc.GeneralizedHoughGuil; 023import org.opencv.imgproc.LineSegmentDetector; 024import org.opencv.utils.Converters; 025 026// C++: class Imgproc 027 028public class Imgproc { 029 030 private static final int 031 IPL_BORDER_CONSTANT = 0, 032 IPL_BORDER_REPLICATE = 1, 033 IPL_BORDER_REFLECT = 2, 034 IPL_BORDER_WRAP = 3, 035 IPL_BORDER_REFLECT_101 = 4, 036 IPL_BORDER_TRANSPARENT = 5, 037 CV_INTER_NN = 0, 038 CV_INTER_LINEAR = 1, 039 CV_INTER_CUBIC = 2, 040 CV_INTER_AREA = 3, 041 CV_INTER_LANCZOS4 = 4, 042 CV_MOP_ERODE = 0, 043 CV_MOP_DILATE = 1, 044 CV_MOP_OPEN = 2, 045 CV_MOP_CLOSE = 3, 046 CV_MOP_GRADIENT = 4, 047 CV_MOP_TOPHAT = 5, 048 CV_MOP_BLACKHAT = 6, 049 CV_RETR_EXTERNAL = 0, 050 CV_RETR_LIST = 1, 051 CV_RETR_CCOMP = 2, 052 CV_RETR_TREE = 3, 053 CV_RETR_FLOODFILL = 4, 054 CV_CHAIN_APPROX_NONE = 1, 055 CV_CHAIN_APPROX_SIMPLE = 2, 056 CV_CHAIN_APPROX_TC89_L1 = 3, 057 CV_CHAIN_APPROX_TC89_KCOS = 4, 058 CV_THRESH_BINARY = 0, 059 CV_THRESH_BINARY_INV = 1, 060 CV_THRESH_TRUNC = 2, 061 CV_THRESH_TOZERO = 3, 062 CV_THRESH_TOZERO_INV = 4, 063 CV_THRESH_MASK = 7, 064 CV_THRESH_OTSU = 8, 065 CV_THRESH_TRIANGLE = 16; 066 067 068 // C++: enum <unnamed> 069 public static final int 070 CV_GAUSSIAN_5x5 = 7, 071 CV_SCHARR = -1, 072 CV_MAX_SOBEL_KSIZE = 7, 073 CV_RGBA2mRGBA = 125, 074 CV_mRGBA2RGBA = 126, 075 CV_WARP_FILL_OUTLIERS = 8, 076 CV_WARP_INVERSE_MAP = 16, 077 CV_CHAIN_CODE = 0, 078 CV_LINK_RUNS = 5, 079 CV_POLY_APPROX_DP = 0, 080 CV_CONTOURS_MATCH_I1 = 1, 081 CV_CONTOURS_MATCH_I2 = 2, 082 CV_CONTOURS_MATCH_I3 = 3, 083 CV_CLOCKWISE = 1, 084 CV_COUNTER_CLOCKWISE = 2, 085 CV_COMP_CORREL = 0, 086 CV_COMP_CHISQR = 1, 087 CV_COMP_INTERSECT = 2, 088 CV_COMP_BHATTACHARYYA = 3, 089 CV_COMP_HELLINGER = CV_COMP_BHATTACHARYYA, 090 CV_COMP_CHISQR_ALT = 4, 091 CV_COMP_KL_DIV = 5, 092 CV_DIST_MASK_3 = 3, 093 CV_DIST_MASK_5 = 5, 094 CV_DIST_MASK_PRECISE = 0, 095 CV_DIST_LABEL_CCOMP = 0, 096 CV_DIST_LABEL_PIXEL = 1, 097 CV_DIST_USER = -1, 098 CV_DIST_L1 = 1, 099 CV_DIST_L2 = 2, 100 CV_DIST_C = 3, 101 CV_DIST_L12 = 4, 102 CV_DIST_FAIR = 5, 103 CV_DIST_WELSCH = 6, 104 CV_DIST_HUBER = 7, 105 CV_CANNY_L2_GRADIENT = (1 << 31), 106 CV_HOUGH_STANDARD = 0, 107 CV_HOUGH_PROBABILISTIC = 1, 108 CV_HOUGH_MULTI_SCALE = 2, 109 CV_HOUGH_GRADIENT = 3; 110 111 112 // C++: enum MorphShapes_c (MorphShapes_c) 113 public static final int 114 CV_SHAPE_RECT = 0, 115 CV_SHAPE_CROSS = 1, 116 CV_SHAPE_ELLIPSE = 2, 117 CV_SHAPE_CUSTOM = 100; 118 119 120 // C++: enum SmoothMethod_c (SmoothMethod_c) 121 public static final int 122 CV_BLUR_NO_SCALE = 0, 123 CV_BLUR = 1, 124 CV_GAUSSIAN = 2, 125 CV_MEDIAN = 3, 126 CV_BILATERAL = 4; 127 128 129 // C++: enum AdaptiveThresholdTypes (cv.AdaptiveThresholdTypes) 130 public static final int 131 ADAPTIVE_THRESH_MEAN_C = 0, 132 ADAPTIVE_THRESH_GAUSSIAN_C = 1; 133 134 135 // C++: enum ColorConversionCodes (cv.ColorConversionCodes) 136 public static final int 137 COLOR_BGR2BGRA = 0, 138 COLOR_RGB2RGBA = COLOR_BGR2BGRA, 139 COLOR_BGRA2BGR = 1, 140 COLOR_RGBA2RGB = COLOR_BGRA2BGR, 141 COLOR_BGR2RGBA = 2, 142 COLOR_RGB2BGRA = COLOR_BGR2RGBA, 143 COLOR_RGBA2BGR = 3, 144 COLOR_BGRA2RGB = COLOR_RGBA2BGR, 145 COLOR_BGR2RGB = 4, 146 COLOR_RGB2BGR = COLOR_BGR2RGB, 147 COLOR_BGRA2RGBA = 5, 148 COLOR_RGBA2BGRA = COLOR_BGRA2RGBA, 149 COLOR_BGR2GRAY = 6, 150 COLOR_RGB2GRAY = 7, 151 COLOR_GRAY2BGR = 8, 152 COLOR_GRAY2RGB = COLOR_GRAY2BGR, 153 COLOR_GRAY2BGRA = 9, 154 COLOR_GRAY2RGBA = COLOR_GRAY2BGRA, 155 COLOR_BGRA2GRAY = 10, 156 COLOR_RGBA2GRAY = 11, 157 COLOR_BGR2BGR565 = 12, 158 COLOR_RGB2BGR565 = 13, 159 COLOR_BGR5652BGR = 14, 160 COLOR_BGR5652RGB = 15, 161 COLOR_BGRA2BGR565 = 16, 162 COLOR_RGBA2BGR565 = 17, 163 COLOR_BGR5652BGRA = 18, 164 COLOR_BGR5652RGBA = 19, 165 COLOR_GRAY2BGR565 = 20, 166 COLOR_BGR5652GRAY = 21, 167 COLOR_BGR2BGR555 = 22, 168 COLOR_RGB2BGR555 = 23, 169 COLOR_BGR5552BGR = 24, 170 COLOR_BGR5552RGB = 25, 171 COLOR_BGRA2BGR555 = 26, 172 COLOR_RGBA2BGR555 = 27, 173 COLOR_BGR5552BGRA = 28, 174 COLOR_BGR5552RGBA = 29, 175 COLOR_GRAY2BGR555 = 30, 176 COLOR_BGR5552GRAY = 31, 177 COLOR_BGR2XYZ = 32, 178 COLOR_RGB2XYZ = 33, 179 COLOR_XYZ2BGR = 34, 180 COLOR_XYZ2RGB = 35, 181 COLOR_BGR2YCrCb = 36, 182 COLOR_RGB2YCrCb = 37, 183 COLOR_YCrCb2BGR = 38, 184 COLOR_YCrCb2RGB = 39, 185 COLOR_BGR2HSV = 40, 186 COLOR_RGB2HSV = 41, 187 COLOR_BGR2Lab = 44, 188 COLOR_RGB2Lab = 45, 189 COLOR_BGR2Luv = 50, 190 COLOR_RGB2Luv = 51, 191 COLOR_BGR2HLS = 52, 192 COLOR_RGB2HLS = 53, 193 COLOR_HSV2BGR = 54, 194 COLOR_HSV2RGB = 55, 195 COLOR_Lab2BGR = 56, 196 COLOR_Lab2RGB = 57, 197 COLOR_Luv2BGR = 58, 198 COLOR_Luv2RGB = 59, 199 COLOR_HLS2BGR = 60, 200 COLOR_HLS2RGB = 61, 201 COLOR_BGR2HSV_FULL = 66, 202 COLOR_RGB2HSV_FULL = 67, 203 COLOR_BGR2HLS_FULL = 68, 204 COLOR_RGB2HLS_FULL = 69, 205 COLOR_HSV2BGR_FULL = 70, 206 COLOR_HSV2RGB_FULL = 71, 207 COLOR_HLS2BGR_FULL = 72, 208 COLOR_HLS2RGB_FULL = 73, 209 COLOR_LBGR2Lab = 74, 210 COLOR_LRGB2Lab = 75, 211 COLOR_LBGR2Luv = 76, 212 COLOR_LRGB2Luv = 77, 213 COLOR_Lab2LBGR = 78, 214 COLOR_Lab2LRGB = 79, 215 COLOR_Luv2LBGR = 80, 216 COLOR_Luv2LRGB = 81, 217 COLOR_BGR2YUV = 82, 218 COLOR_RGB2YUV = 83, 219 COLOR_YUV2BGR = 84, 220 COLOR_YUV2RGB = 85, 221 COLOR_YUV2RGB_NV12 = 90, 222 COLOR_YUV2BGR_NV12 = 91, 223 COLOR_YUV2RGB_NV21 = 92, 224 COLOR_YUV2BGR_NV21 = 93, 225 COLOR_YUV420sp2RGB = COLOR_YUV2RGB_NV21, 226 COLOR_YUV420sp2BGR = COLOR_YUV2BGR_NV21, 227 COLOR_YUV2RGBA_NV12 = 94, 228 COLOR_YUV2BGRA_NV12 = 95, 229 COLOR_YUV2RGBA_NV21 = 96, 230 COLOR_YUV2BGRA_NV21 = 97, 231 COLOR_YUV420sp2RGBA = COLOR_YUV2RGBA_NV21, 232 COLOR_YUV420sp2BGRA = COLOR_YUV2BGRA_NV21, 233 COLOR_YUV2RGB_YV12 = 98, 234 COLOR_YUV2BGR_YV12 = 99, 235 COLOR_YUV2RGB_IYUV = 100, 236 COLOR_YUV2BGR_IYUV = 101, 237 COLOR_YUV2RGB_I420 = COLOR_YUV2RGB_IYUV, 238 COLOR_YUV2BGR_I420 = COLOR_YUV2BGR_IYUV, 239 COLOR_YUV420p2RGB = COLOR_YUV2RGB_YV12, 240 COLOR_YUV420p2BGR = COLOR_YUV2BGR_YV12, 241 COLOR_YUV2RGBA_YV12 = 102, 242 COLOR_YUV2BGRA_YV12 = 103, 243 COLOR_YUV2RGBA_IYUV = 104, 244 COLOR_YUV2BGRA_IYUV = 105, 245 COLOR_YUV2RGBA_I420 = COLOR_YUV2RGBA_IYUV, 246 COLOR_YUV2BGRA_I420 = COLOR_YUV2BGRA_IYUV, 247 COLOR_YUV420p2RGBA = COLOR_YUV2RGBA_YV12, 248 COLOR_YUV420p2BGRA = COLOR_YUV2BGRA_YV12, 249 COLOR_YUV2GRAY_420 = 106, 250 COLOR_YUV2GRAY_NV21 = COLOR_YUV2GRAY_420, 251 COLOR_YUV2GRAY_NV12 = COLOR_YUV2GRAY_420, 252 COLOR_YUV2GRAY_YV12 = COLOR_YUV2GRAY_420, 253 COLOR_YUV2GRAY_IYUV = COLOR_YUV2GRAY_420, 254 COLOR_YUV2GRAY_I420 = COLOR_YUV2GRAY_420, 255 COLOR_YUV420sp2GRAY = COLOR_YUV2GRAY_420, 256 COLOR_YUV420p2GRAY = COLOR_YUV2GRAY_420, 257 COLOR_YUV2RGB_UYVY = 107, 258 COLOR_YUV2BGR_UYVY = 108, 259 COLOR_YUV2RGB_Y422 = COLOR_YUV2RGB_UYVY, 260 COLOR_YUV2BGR_Y422 = COLOR_YUV2BGR_UYVY, 261 COLOR_YUV2RGB_UYNV = COLOR_YUV2RGB_UYVY, 262 COLOR_YUV2BGR_UYNV = COLOR_YUV2BGR_UYVY, 263 COLOR_YUV2RGBA_UYVY = 111, 264 COLOR_YUV2BGRA_UYVY = 112, 265 COLOR_YUV2RGBA_Y422 = COLOR_YUV2RGBA_UYVY, 266 COLOR_YUV2BGRA_Y422 = COLOR_YUV2BGRA_UYVY, 267 COLOR_YUV2RGBA_UYNV = COLOR_YUV2RGBA_UYVY, 268 COLOR_YUV2BGRA_UYNV = COLOR_YUV2BGRA_UYVY, 269 COLOR_YUV2RGB_YUY2 = 115, 270 COLOR_YUV2BGR_YUY2 = 116, 271 COLOR_YUV2RGB_YVYU = 117, 272 COLOR_YUV2BGR_YVYU = 118, 273 COLOR_YUV2RGB_YUYV = COLOR_YUV2RGB_YUY2, 274 COLOR_YUV2BGR_YUYV = COLOR_YUV2BGR_YUY2, 275 COLOR_YUV2RGB_YUNV = COLOR_YUV2RGB_YUY2, 276 COLOR_YUV2BGR_YUNV = COLOR_YUV2BGR_YUY2, 277 COLOR_YUV2RGBA_YUY2 = 119, 278 COLOR_YUV2BGRA_YUY2 = 120, 279 COLOR_YUV2RGBA_YVYU = 121, 280 COLOR_YUV2BGRA_YVYU = 122, 281 COLOR_YUV2RGBA_YUYV = COLOR_YUV2RGBA_YUY2, 282 COLOR_YUV2BGRA_YUYV = COLOR_YUV2BGRA_YUY2, 283 COLOR_YUV2RGBA_YUNV = COLOR_YUV2RGBA_YUY2, 284 COLOR_YUV2BGRA_YUNV = COLOR_YUV2BGRA_YUY2, 285 COLOR_YUV2GRAY_UYVY = 123, 286 COLOR_YUV2GRAY_YUY2 = 124, 287 COLOR_YUV2GRAY_Y422 = COLOR_YUV2GRAY_UYVY, 288 COLOR_YUV2GRAY_UYNV = COLOR_YUV2GRAY_UYVY, 289 COLOR_YUV2GRAY_YVYU = COLOR_YUV2GRAY_YUY2, 290 COLOR_YUV2GRAY_YUYV = COLOR_YUV2GRAY_YUY2, 291 COLOR_YUV2GRAY_YUNV = COLOR_YUV2GRAY_YUY2, 292 COLOR_RGBA2mRGBA = 125, 293 COLOR_mRGBA2RGBA = 126, 294 COLOR_RGB2YUV_I420 = 127, 295 COLOR_BGR2YUV_I420 = 128, 296 COLOR_RGB2YUV_IYUV = COLOR_RGB2YUV_I420, 297 COLOR_BGR2YUV_IYUV = COLOR_BGR2YUV_I420, 298 COLOR_RGBA2YUV_I420 = 129, 299 COLOR_BGRA2YUV_I420 = 130, 300 COLOR_RGBA2YUV_IYUV = COLOR_RGBA2YUV_I420, 301 COLOR_BGRA2YUV_IYUV = COLOR_BGRA2YUV_I420, 302 COLOR_RGB2YUV_YV12 = 131, 303 COLOR_BGR2YUV_YV12 = 132, 304 COLOR_RGBA2YUV_YV12 = 133, 305 COLOR_BGRA2YUV_YV12 = 134, 306 COLOR_BayerBG2BGR = 46, 307 COLOR_BayerGB2BGR = 47, 308 COLOR_BayerRG2BGR = 48, 309 COLOR_BayerGR2BGR = 49, 310 COLOR_BayerRGGB2BGR = COLOR_BayerBG2BGR, 311 COLOR_BayerGRBG2BGR = COLOR_BayerGB2BGR, 312 COLOR_BayerBGGR2BGR = COLOR_BayerRG2BGR, 313 COLOR_BayerGBRG2BGR = COLOR_BayerGR2BGR, 314 COLOR_BayerRGGB2RGB = COLOR_BayerBGGR2BGR, 315 COLOR_BayerGRBG2RGB = COLOR_BayerGBRG2BGR, 316 COLOR_BayerBGGR2RGB = COLOR_BayerRGGB2BGR, 317 COLOR_BayerGBRG2RGB = COLOR_BayerGRBG2BGR, 318 COLOR_BayerBG2RGB = COLOR_BayerRG2BGR, 319 COLOR_BayerGB2RGB = COLOR_BayerGR2BGR, 320 COLOR_BayerRG2RGB = COLOR_BayerBG2BGR, 321 COLOR_BayerGR2RGB = COLOR_BayerGB2BGR, 322 COLOR_BayerBG2GRAY = 86, 323 COLOR_BayerGB2GRAY = 87, 324 COLOR_BayerRG2GRAY = 88, 325 COLOR_BayerGR2GRAY = 89, 326 COLOR_BayerRGGB2GRAY = COLOR_BayerBG2GRAY, 327 COLOR_BayerGRBG2GRAY = COLOR_BayerGB2GRAY, 328 COLOR_BayerBGGR2GRAY = COLOR_BayerRG2GRAY, 329 COLOR_BayerGBRG2GRAY = COLOR_BayerGR2GRAY, 330 COLOR_BayerBG2BGR_VNG = 62, 331 COLOR_BayerGB2BGR_VNG = 63, 332 COLOR_BayerRG2BGR_VNG = 64, 333 COLOR_BayerGR2BGR_VNG = 65, 334 COLOR_BayerRGGB2BGR_VNG = COLOR_BayerBG2BGR_VNG, 335 COLOR_BayerGRBG2BGR_VNG = COLOR_BayerGB2BGR_VNG, 336 COLOR_BayerBGGR2BGR_VNG = COLOR_BayerRG2BGR_VNG, 337 COLOR_BayerGBRG2BGR_VNG = COLOR_BayerGR2BGR_VNG, 338 COLOR_BayerRGGB2RGB_VNG = COLOR_BayerBGGR2BGR_VNG, 339 COLOR_BayerGRBG2RGB_VNG = COLOR_BayerGBRG2BGR_VNG, 340 COLOR_BayerBGGR2RGB_VNG = COLOR_BayerRGGB2BGR_VNG, 341 COLOR_BayerGBRG2RGB_VNG = COLOR_BayerGRBG2BGR_VNG, 342 COLOR_BayerBG2RGB_VNG = COLOR_BayerRG2BGR_VNG, 343 COLOR_BayerGB2RGB_VNG = COLOR_BayerGR2BGR_VNG, 344 COLOR_BayerRG2RGB_VNG = COLOR_BayerBG2BGR_VNG, 345 COLOR_BayerGR2RGB_VNG = COLOR_BayerGB2BGR_VNG, 346 COLOR_BayerBG2BGR_EA = 135, 347 COLOR_BayerGB2BGR_EA = 136, 348 COLOR_BayerRG2BGR_EA = 137, 349 COLOR_BayerGR2BGR_EA = 138, 350 COLOR_BayerRGGB2BGR_EA = COLOR_BayerBG2BGR_EA, 351 COLOR_BayerGRBG2BGR_EA = COLOR_BayerGB2BGR_EA, 352 COLOR_BayerBGGR2BGR_EA = COLOR_BayerRG2BGR_EA, 353 COLOR_BayerGBRG2BGR_EA = COLOR_BayerGR2BGR_EA, 354 COLOR_BayerRGGB2RGB_EA = COLOR_BayerBGGR2BGR_EA, 355 COLOR_BayerGRBG2RGB_EA = COLOR_BayerGBRG2BGR_EA, 356 COLOR_BayerBGGR2RGB_EA = COLOR_BayerRGGB2BGR_EA, 357 COLOR_BayerGBRG2RGB_EA = COLOR_BayerGRBG2BGR_EA, 358 COLOR_BayerBG2RGB_EA = COLOR_BayerRG2BGR_EA, 359 COLOR_BayerGB2RGB_EA = COLOR_BayerGR2BGR_EA, 360 COLOR_BayerRG2RGB_EA = COLOR_BayerBG2BGR_EA, 361 COLOR_BayerGR2RGB_EA = COLOR_BayerGB2BGR_EA, 362 COLOR_BayerBG2BGRA = 139, 363 COLOR_BayerGB2BGRA = 140, 364 COLOR_BayerRG2BGRA = 141, 365 COLOR_BayerGR2BGRA = 142, 366 COLOR_BayerRGGB2BGRA = COLOR_BayerBG2BGRA, 367 COLOR_BayerGRBG2BGRA = COLOR_BayerGB2BGRA, 368 COLOR_BayerBGGR2BGRA = COLOR_BayerRG2BGRA, 369 COLOR_BayerGBRG2BGRA = COLOR_BayerGR2BGRA, 370 COLOR_BayerRGGB2RGBA = COLOR_BayerBGGR2BGRA, 371 COLOR_BayerGRBG2RGBA = COLOR_BayerGBRG2BGRA, 372 COLOR_BayerBGGR2RGBA = COLOR_BayerRGGB2BGRA, 373 COLOR_BayerGBRG2RGBA = COLOR_BayerGRBG2BGRA, 374 COLOR_BayerBG2RGBA = COLOR_BayerRG2BGRA, 375 COLOR_BayerGB2RGBA = COLOR_BayerGR2BGRA, 376 COLOR_BayerRG2RGBA = COLOR_BayerBG2BGRA, 377 COLOR_BayerGR2RGBA = COLOR_BayerGB2BGRA, 378 COLOR_COLORCVT_MAX = 143; 379 380 381 // C++: enum ColormapTypes (cv.ColormapTypes) 382 public static final int 383 COLORMAP_AUTUMN = 0, 384 COLORMAP_BONE = 1, 385 COLORMAP_JET = 2, 386 COLORMAP_WINTER = 3, 387 COLORMAP_RAINBOW = 4, 388 COLORMAP_OCEAN = 5, 389 COLORMAP_SUMMER = 6, 390 COLORMAP_SPRING = 7, 391 COLORMAP_COOL = 8, 392 COLORMAP_HSV = 9, 393 COLORMAP_PINK = 10, 394 COLORMAP_HOT = 11, 395 COLORMAP_PARULA = 12, 396 COLORMAP_MAGMA = 13, 397 COLORMAP_INFERNO = 14, 398 COLORMAP_PLASMA = 15, 399 COLORMAP_VIRIDIS = 16, 400 COLORMAP_CIVIDIS = 17, 401 COLORMAP_TWILIGHT = 18, 402 COLORMAP_TWILIGHT_SHIFTED = 19, 403 COLORMAP_TURBO = 20, 404 COLORMAP_DEEPGREEN = 21; 405 406 407 // C++: enum ConnectedComponentsAlgorithmsTypes (cv.ConnectedComponentsAlgorithmsTypes) 408 public static final int 409 CCL_DEFAULT = -1, 410 CCL_WU = 0, 411 CCL_GRANA = 1, 412 CCL_BOLELLI = 2, 413 CCL_SAUF = 3, 414 CCL_BBDT = 4, 415 CCL_SPAGHETTI = 5; 416 417 418 // C++: enum ConnectedComponentsTypes (cv.ConnectedComponentsTypes) 419 public static final int 420 CC_STAT_LEFT = 0, 421 CC_STAT_TOP = 1, 422 CC_STAT_WIDTH = 2, 423 CC_STAT_HEIGHT = 3, 424 CC_STAT_AREA = 4, 425 CC_STAT_MAX = 5; 426 427 428 // C++: enum ContourApproximationModes (cv.ContourApproximationModes) 429 public static final int 430 CHAIN_APPROX_NONE = 1, 431 CHAIN_APPROX_SIMPLE = 2, 432 CHAIN_APPROX_TC89_L1 = 3, 433 CHAIN_APPROX_TC89_KCOS = 4; 434 435 436 // C++: enum DistanceTransformLabelTypes (cv.DistanceTransformLabelTypes) 437 public static final int 438 DIST_LABEL_CCOMP = 0, 439 DIST_LABEL_PIXEL = 1; 440 441 442 // C++: enum DistanceTransformMasks (cv.DistanceTransformMasks) 443 public static final int 444 DIST_MASK_3 = 3, 445 DIST_MASK_5 = 5, 446 DIST_MASK_PRECISE = 0; 447 448 449 // C++: enum DistanceTypes (cv.DistanceTypes) 450 public static final int 451 DIST_USER = -1, 452 DIST_L1 = 1, 453 DIST_L2 = 2, 454 DIST_C = 3, 455 DIST_L12 = 4, 456 DIST_FAIR = 5, 457 DIST_WELSCH = 6, 458 DIST_HUBER = 7; 459 460 461 // C++: enum FloodFillFlags (cv.FloodFillFlags) 462 public static final int 463 FLOODFILL_FIXED_RANGE = 1 << 16, 464 FLOODFILL_MASK_ONLY = 1 << 17; 465 466 467 // C++: enum GrabCutClasses (cv.GrabCutClasses) 468 public static final int 469 GC_BGD = 0, 470 GC_FGD = 1, 471 GC_PR_BGD = 2, 472 GC_PR_FGD = 3; 473 474 475 // C++: enum GrabCutModes (cv.GrabCutModes) 476 public static final int 477 GC_INIT_WITH_RECT = 0, 478 GC_INIT_WITH_MASK = 1, 479 GC_EVAL = 2, 480 GC_EVAL_FREEZE_MODEL = 3; 481 482 483 // C++: enum HersheyFonts (cv.HersheyFonts) 484 public static final int 485 FONT_HERSHEY_SIMPLEX = 0, 486 FONT_HERSHEY_PLAIN = 1, 487 FONT_HERSHEY_DUPLEX = 2, 488 FONT_HERSHEY_COMPLEX = 3, 489 FONT_HERSHEY_TRIPLEX = 4, 490 FONT_HERSHEY_COMPLEX_SMALL = 5, 491 FONT_HERSHEY_SCRIPT_SIMPLEX = 6, 492 FONT_HERSHEY_SCRIPT_COMPLEX = 7, 493 FONT_ITALIC = 16; 494 495 496 // C++: enum HistCompMethods (cv.HistCompMethods) 497 public static final int 498 HISTCMP_CORREL = 0, 499 HISTCMP_CHISQR = 1, 500 HISTCMP_INTERSECT = 2, 501 HISTCMP_BHATTACHARYYA = 3, 502 HISTCMP_HELLINGER = HISTCMP_BHATTACHARYYA, 503 HISTCMP_CHISQR_ALT = 4, 504 HISTCMP_KL_DIV = 5; 505 506 507 // C++: enum HoughModes (cv.HoughModes) 508 public static final int 509 HOUGH_STANDARD = 0, 510 HOUGH_PROBABILISTIC = 1, 511 HOUGH_MULTI_SCALE = 2, 512 HOUGH_GRADIENT = 3, 513 HOUGH_GRADIENT_ALT = 4; 514 515 516 // C++: enum InterpolationFlags (cv.InterpolationFlags) 517 public static final int 518 INTER_NEAREST = 0, 519 INTER_LINEAR = 1, 520 INTER_CUBIC = 2, 521 INTER_AREA = 3, 522 INTER_LANCZOS4 = 4, 523 INTER_LINEAR_EXACT = 5, 524 INTER_NEAREST_EXACT = 6, 525 INTER_MAX = 7, 526 WARP_FILL_OUTLIERS = 8, 527 WARP_INVERSE_MAP = 16; 528 529 530 // C++: enum InterpolationMasks (cv.InterpolationMasks) 531 public static final int 532 INTER_BITS = 5, 533 INTER_BITS2 = INTER_BITS * 2, 534 INTER_TAB_SIZE = 1 << INTER_BITS, 535 INTER_TAB_SIZE2 = INTER_TAB_SIZE * INTER_TAB_SIZE; 536 537 538 // C++: enum LineSegmentDetectorModes (cv.LineSegmentDetectorModes) 539 public static final int 540 LSD_REFINE_NONE = 0, 541 LSD_REFINE_STD = 1, 542 LSD_REFINE_ADV = 2; 543 544 545 // C++: enum LineTypes (cv.LineTypes) 546 public static final int 547 FILLED = -1, 548 LINE_4 = 4, 549 LINE_8 = 8, 550 LINE_AA = 16; 551 552 553 // C++: enum MarkerTypes (cv.MarkerTypes) 554 public static final int 555 MARKER_CROSS = 0, 556 MARKER_TILTED_CROSS = 1, 557 MARKER_STAR = 2, 558 MARKER_DIAMOND = 3, 559 MARKER_SQUARE = 4, 560 MARKER_TRIANGLE_UP = 5, 561 MARKER_TRIANGLE_DOWN = 6; 562 563 564 // C++: enum MorphShapes (cv.MorphShapes) 565 public static final int 566 MORPH_RECT = 0, 567 MORPH_CROSS = 1, 568 MORPH_ELLIPSE = 2; 569 570 571 // C++: enum MorphTypes (cv.MorphTypes) 572 public static final int 573 MORPH_ERODE = 0, 574 MORPH_DILATE = 1, 575 MORPH_OPEN = 2, 576 MORPH_CLOSE = 3, 577 MORPH_GRADIENT = 4, 578 MORPH_TOPHAT = 5, 579 MORPH_BLACKHAT = 6, 580 MORPH_HITMISS = 7; 581 582 583 // C++: enum RectanglesIntersectTypes (cv.RectanglesIntersectTypes) 584 public static final int 585 INTERSECT_NONE = 0, 586 INTERSECT_PARTIAL = 1, 587 INTERSECT_FULL = 2; 588 589 590 // C++: enum RetrievalModes (cv.RetrievalModes) 591 public static final int 592 RETR_EXTERNAL = 0, 593 RETR_LIST = 1, 594 RETR_CCOMP = 2, 595 RETR_TREE = 3, 596 RETR_FLOODFILL = 4; 597 598 599 // C++: enum ShapeMatchModes (cv.ShapeMatchModes) 600 public static final int 601 CONTOURS_MATCH_I1 = 1, 602 CONTOURS_MATCH_I2 = 2, 603 CONTOURS_MATCH_I3 = 3; 604 605 606 // C++: enum SpecialFilter (cv.SpecialFilter) 607 public static final int 608 FILTER_SCHARR = -1; 609 610 611 // C++: enum TemplateMatchModes (cv.TemplateMatchModes) 612 public static final int 613 TM_SQDIFF = 0, 614 TM_SQDIFF_NORMED = 1, 615 TM_CCORR = 2, 616 TM_CCORR_NORMED = 3, 617 TM_CCOEFF = 4, 618 TM_CCOEFF_NORMED = 5; 619 620 621 // C++: enum ThresholdTypes (cv.ThresholdTypes) 622 public static final int 623 THRESH_BINARY = 0, 624 THRESH_BINARY_INV = 1, 625 THRESH_TRUNC = 2, 626 THRESH_TOZERO = 3, 627 THRESH_TOZERO_INV = 4, 628 THRESH_MASK = 7, 629 THRESH_OTSU = 8, 630 THRESH_TRIANGLE = 16; 631 632 633 // C++: enum WarpPolarMode (cv.WarpPolarMode) 634 public static final int 635 WARP_POLAR_LINEAR = 0, 636 WARP_POLAR_LOG = 256; 637 638 639 // 640 // C++: Ptr_LineSegmentDetector cv::createLineSegmentDetector(int refine = LSD_REFINE_STD, double scale = 0.8, double sigma_scale = 0.6, double quant = 2.0, double ang_th = 22.5, double log_eps = 0, double density_th = 0.7, int n_bins = 1024) 641 // 642 643 /** 644 * Creates a smart pointer to a LineSegmentDetector object and initializes it. 645 * 646 * The LineSegmentDetector algorithm is defined using the standard values. Only advanced users may want 647 * to edit those, as to tailor it for their own application. 648 * 649 * @param refine The way found lines will be refined, see #LineSegmentDetectorModes 650 * @param scale The scale of the image that will be used to find the lines. Range (0..1]. 651 * @param sigma_scale Sigma for Gaussian filter. It is computed as sigma = sigma_scale/scale. 652 * @param quant Bound to the quantization error on the gradient norm. 653 * @param ang_th Gradient angle tolerance in degrees. 654 * @param log_eps Detection threshold: -log10(NFA) > log_eps. Used only when advance refinement is chosen. 655 * @param density_th Minimal density of aligned region points in the enclosing rectangle. 656 * @param n_bins Number of bins in pseudo-ordering of gradient modulus. 657 * @return automatically generated 658 */ 659 public static LineSegmentDetector createLineSegmentDetector(int refine, double scale, double sigma_scale, double quant, double ang_th, double log_eps, double density_th, int n_bins) { 660 return LineSegmentDetector.__fromPtr__(createLineSegmentDetector_0(refine, scale, sigma_scale, quant, ang_th, log_eps, density_th, n_bins)); 661 } 662 663 /** 664 * Creates a smart pointer to a LineSegmentDetector object and initializes it. 665 * 666 * The LineSegmentDetector algorithm is defined using the standard values. Only advanced users may want 667 * to edit those, as to tailor it for their own application. 668 * 669 * @param refine The way found lines will be refined, see #LineSegmentDetectorModes 670 * @param scale The scale of the image that will be used to find the lines. Range (0..1]. 671 * @param sigma_scale Sigma for Gaussian filter. It is computed as sigma = sigma_scale/scale. 672 * @param quant Bound to the quantization error on the gradient norm. 673 * @param ang_th Gradient angle tolerance in degrees. 674 * @param log_eps Detection threshold: -log10(NFA) > log_eps. Used only when advance refinement is chosen. 675 * @param density_th Minimal density of aligned region points in the enclosing rectangle. 676 * @return automatically generated 677 */ 678 public static LineSegmentDetector createLineSegmentDetector(int refine, double scale, double sigma_scale, double quant, double ang_th, double log_eps, double density_th) { 679 return LineSegmentDetector.__fromPtr__(createLineSegmentDetector_1(refine, scale, sigma_scale, quant, ang_th, log_eps, density_th)); 680 } 681 682 /** 683 * Creates a smart pointer to a LineSegmentDetector object and initializes it. 684 * 685 * The LineSegmentDetector algorithm is defined using the standard values. Only advanced users may want 686 * to edit those, as to tailor it for their own application. 687 * 688 * @param refine The way found lines will be refined, see #LineSegmentDetectorModes 689 * @param scale The scale of the image that will be used to find the lines. Range (0..1]. 690 * @param sigma_scale Sigma for Gaussian filter. It is computed as sigma = sigma_scale/scale. 691 * @param quant Bound to the quantization error on the gradient norm. 692 * @param ang_th Gradient angle tolerance in degrees. 693 * @param log_eps Detection threshold: -log10(NFA) > log_eps. Used only when advance refinement is chosen. 694 * @return automatically generated 695 */ 696 public static LineSegmentDetector createLineSegmentDetector(int refine, double scale, double sigma_scale, double quant, double ang_th, double log_eps) { 697 return LineSegmentDetector.__fromPtr__(createLineSegmentDetector_2(refine, scale, sigma_scale, quant, ang_th, log_eps)); 698 } 699 700 /** 701 * Creates a smart pointer to a LineSegmentDetector object and initializes it. 702 * 703 * The LineSegmentDetector algorithm is defined using the standard values. Only advanced users may want 704 * to edit those, as to tailor it for their own application. 705 * 706 * @param refine The way found lines will be refined, see #LineSegmentDetectorModes 707 * @param scale The scale of the image that will be used to find the lines. Range (0..1]. 708 * @param sigma_scale Sigma for Gaussian filter. It is computed as sigma = sigma_scale/scale. 709 * @param quant Bound to the quantization error on the gradient norm. 710 * @param ang_th Gradient angle tolerance in degrees. 711 * @return automatically generated 712 */ 713 public static LineSegmentDetector createLineSegmentDetector(int refine, double scale, double sigma_scale, double quant, double ang_th) { 714 return LineSegmentDetector.__fromPtr__(createLineSegmentDetector_3(refine, scale, sigma_scale, quant, ang_th)); 715 } 716 717 /** 718 * Creates a smart pointer to a LineSegmentDetector object and initializes it. 719 * 720 * The LineSegmentDetector algorithm is defined using the standard values. Only advanced users may want 721 * to edit those, as to tailor it for their own application. 722 * 723 * @param refine The way found lines will be refined, see #LineSegmentDetectorModes 724 * @param scale The scale of the image that will be used to find the lines. Range (0..1]. 725 * @param sigma_scale Sigma for Gaussian filter. It is computed as sigma = sigma_scale/scale. 726 * @param quant Bound to the quantization error on the gradient norm. 727 * @return automatically generated 728 */ 729 public static LineSegmentDetector createLineSegmentDetector(int refine, double scale, double sigma_scale, double quant) { 730 return LineSegmentDetector.__fromPtr__(createLineSegmentDetector_4(refine, scale, sigma_scale, quant)); 731 } 732 733 /** 734 * Creates a smart pointer to a LineSegmentDetector object and initializes it. 735 * 736 * The LineSegmentDetector algorithm is defined using the standard values. Only advanced users may want 737 * to edit those, as to tailor it for their own application. 738 * 739 * @param refine The way found lines will be refined, see #LineSegmentDetectorModes 740 * @param scale The scale of the image that will be used to find the lines. Range (0..1]. 741 * @param sigma_scale Sigma for Gaussian filter. It is computed as sigma = sigma_scale/scale. 742 * @return automatically generated 743 */ 744 public static LineSegmentDetector createLineSegmentDetector(int refine, double scale, double sigma_scale) { 745 return LineSegmentDetector.__fromPtr__(createLineSegmentDetector_5(refine, scale, sigma_scale)); 746 } 747 748 /** 749 * Creates a smart pointer to a LineSegmentDetector object and initializes it. 750 * 751 * The LineSegmentDetector algorithm is defined using the standard values. Only advanced users may want 752 * to edit those, as to tailor it for their own application. 753 * 754 * @param refine The way found lines will be refined, see #LineSegmentDetectorModes 755 * @param scale The scale of the image that will be used to find the lines. Range (0..1]. 756 * @return automatically generated 757 */ 758 public static LineSegmentDetector createLineSegmentDetector(int refine, double scale) { 759 return LineSegmentDetector.__fromPtr__(createLineSegmentDetector_6(refine, scale)); 760 } 761 762 /** 763 * Creates a smart pointer to a LineSegmentDetector object and initializes it. 764 * 765 * The LineSegmentDetector algorithm is defined using the standard values. Only advanced users may want 766 * to edit those, as to tailor it for their own application. 767 * 768 * @param refine The way found lines will be refined, see #LineSegmentDetectorModes 769 * @return automatically generated 770 */ 771 public static LineSegmentDetector createLineSegmentDetector(int refine) { 772 return LineSegmentDetector.__fromPtr__(createLineSegmentDetector_7(refine)); 773 } 774 775 /** 776 * Creates a smart pointer to a LineSegmentDetector object and initializes it. 777 * 778 * The LineSegmentDetector algorithm is defined using the standard values. Only advanced users may want 779 * to edit those, as to tailor it for their own application. 780 * 781 * @return automatically generated 782 */ 783 public static LineSegmentDetector createLineSegmentDetector() { 784 return LineSegmentDetector.__fromPtr__(createLineSegmentDetector_8()); 785 } 786 787 788 // 789 // C++: Mat cv::getGaussianKernel(int ksize, double sigma, int ktype = CV_64F) 790 // 791 792 /** 793 * Returns Gaussian filter coefficients. 794 * 795 * The function computes and returns the \(\texttt{ksize} \times 1\) matrix of Gaussian filter 796 * coefficients: 797 * 798 * \(G_i= \alpha *e^{-(i-( \texttt{ksize} -1)/2)^2/(2* \texttt{sigma}^2)},\) 799 * 800 * where \(i=0..\texttt{ksize}-1\) and \(\alpha\) is the scale factor chosen so that \(\sum_i G_i=1\). 801 * 802 * Two of such generated kernels can be passed to sepFilter2D. Those functions automatically recognize 803 * smoothing kernels (a symmetrical kernel with sum of weights equal to 1) and handle them accordingly. 804 * You may also use the higher-level GaussianBlur. 805 * @param ksize Aperture size. It should be odd ( \(\texttt{ksize} \mod 2 = 1\) ) and positive. 806 * @param sigma Gaussian standard deviation. If it is non-positive, it is computed from ksize as 807 * {@code sigma = 0.3*((ksize-1)*0.5 - 1) + 0.8}. 808 * @param ktype Type of filter coefficients. It can be CV_32F or CV_64F . 809 * SEE: sepFilter2D, getDerivKernels, getStructuringElement, GaussianBlur 810 * @return automatically generated 811 */ 812 public static Mat getGaussianKernel(int ksize, double sigma, int ktype) { 813 return new Mat(getGaussianKernel_0(ksize, sigma, ktype)); 814 } 815 816 /** 817 * Returns Gaussian filter coefficients. 818 * 819 * The function computes and returns the \(\texttt{ksize} \times 1\) matrix of Gaussian filter 820 * coefficients: 821 * 822 * \(G_i= \alpha *e^{-(i-( \texttt{ksize} -1)/2)^2/(2* \texttt{sigma}^2)},\) 823 * 824 * where \(i=0..\texttt{ksize}-1\) and \(\alpha\) is the scale factor chosen so that \(\sum_i G_i=1\). 825 * 826 * Two of such generated kernels can be passed to sepFilter2D. Those functions automatically recognize 827 * smoothing kernels (a symmetrical kernel with sum of weights equal to 1) and handle them accordingly. 828 * You may also use the higher-level GaussianBlur. 829 * @param ksize Aperture size. It should be odd ( \(\texttt{ksize} \mod 2 = 1\) ) and positive. 830 * @param sigma Gaussian standard deviation. If it is non-positive, it is computed from ksize as 831 * {@code sigma = 0.3*((ksize-1)*0.5 - 1) + 0.8}. 832 * SEE: sepFilter2D, getDerivKernels, getStructuringElement, GaussianBlur 833 * @return automatically generated 834 */ 835 public static Mat getGaussianKernel(int ksize, double sigma) { 836 return new Mat(getGaussianKernel_1(ksize, sigma)); 837 } 838 839 840 // 841 // C++: void cv::getDerivKernels(Mat& kx, Mat& ky, int dx, int dy, int ksize, bool normalize = false, int ktype = CV_32F) 842 // 843 844 /** 845 * Returns filter coefficients for computing spatial image derivatives. 846 * 847 * The function computes and returns the filter coefficients for spatial image derivatives. When 848 * {@code ksize=FILTER_SCHARR}, the Scharr \(3 \times 3\) kernels are generated (see #Scharr). Otherwise, Sobel 849 * kernels are generated (see #Sobel). The filters are normally passed to #sepFilter2D or to 850 * 851 * @param kx Output matrix of row filter coefficients. It has the type ktype . 852 * @param ky Output matrix of column filter coefficients. It has the type ktype . 853 * @param dx Derivative order in respect of x. 854 * @param dy Derivative order in respect of y. 855 * @param ksize Aperture size. It can be FILTER_SCHARR, 1, 3, 5, or 7. 856 * @param normalize Flag indicating whether to normalize (scale down) the filter coefficients or not. 857 * Theoretically, the coefficients should have the denominator \(=2^{ksize*2-dx-dy-2}\). If you are 858 * going to filter floating-point images, you are likely to use the normalized kernels. But if you 859 * compute derivatives of an 8-bit image, store the results in a 16-bit image, and wish to preserve 860 * all the fractional bits, you may want to set normalize=false . 861 * @param ktype Type of filter coefficients. It can be CV_32f or CV_64F . 862 */ 863 public static void getDerivKernels(Mat kx, Mat ky, int dx, int dy, int ksize, boolean normalize, int ktype) { 864 getDerivKernels_0(kx.nativeObj, ky.nativeObj, dx, dy, ksize, normalize, ktype); 865 } 866 867 /** 868 * Returns filter coefficients for computing spatial image derivatives. 869 * 870 * The function computes and returns the filter coefficients for spatial image derivatives. When 871 * {@code ksize=FILTER_SCHARR}, the Scharr \(3 \times 3\) kernels are generated (see #Scharr). Otherwise, Sobel 872 * kernels are generated (see #Sobel). The filters are normally passed to #sepFilter2D or to 873 * 874 * @param kx Output matrix of row filter coefficients. It has the type ktype . 875 * @param ky Output matrix of column filter coefficients. It has the type ktype . 876 * @param dx Derivative order in respect of x. 877 * @param dy Derivative order in respect of y. 878 * @param ksize Aperture size. It can be FILTER_SCHARR, 1, 3, 5, or 7. 879 * @param normalize Flag indicating whether to normalize (scale down) the filter coefficients or not. 880 * Theoretically, the coefficients should have the denominator \(=2^{ksize*2-dx-dy-2}\). If you are 881 * going to filter floating-point images, you are likely to use the normalized kernels. But if you 882 * compute derivatives of an 8-bit image, store the results in a 16-bit image, and wish to preserve 883 * all the fractional bits, you may want to set normalize=false . 884 */ 885 public static void getDerivKernels(Mat kx, Mat ky, int dx, int dy, int ksize, boolean normalize) { 886 getDerivKernels_1(kx.nativeObj, ky.nativeObj, dx, dy, ksize, normalize); 887 } 888 889 /** 890 * Returns filter coefficients for computing spatial image derivatives. 891 * 892 * The function computes and returns the filter coefficients for spatial image derivatives. When 893 * {@code ksize=FILTER_SCHARR}, the Scharr \(3 \times 3\) kernels are generated (see #Scharr). Otherwise, Sobel 894 * kernels are generated (see #Sobel). The filters are normally passed to #sepFilter2D or to 895 * 896 * @param kx Output matrix of row filter coefficients. It has the type ktype . 897 * @param ky Output matrix of column filter coefficients. It has the type ktype . 898 * @param dx Derivative order in respect of x. 899 * @param dy Derivative order in respect of y. 900 * @param ksize Aperture size. It can be FILTER_SCHARR, 1, 3, 5, or 7. 901 * Theoretically, the coefficients should have the denominator \(=2^{ksize*2-dx-dy-2}\). If you are 902 * going to filter floating-point images, you are likely to use the normalized kernels. But if you 903 * compute derivatives of an 8-bit image, store the results in a 16-bit image, and wish to preserve 904 * all the fractional bits, you may want to set normalize=false . 905 */ 906 public static void getDerivKernels(Mat kx, Mat ky, int dx, int dy, int ksize) { 907 getDerivKernels_2(kx.nativeObj, ky.nativeObj, dx, dy, ksize); 908 } 909 910 911 // 912 // C++: Mat cv::getGaborKernel(Size ksize, double sigma, double theta, double lambd, double gamma, double psi = CV_PI*0.5, int ktype = CV_64F) 913 // 914 915 /** 916 * Returns Gabor filter coefficients. 917 * 918 * For more details about gabor filter equations and parameters, see: [Gabor 919 * Filter](http://en.wikipedia.org/wiki/Gabor_filter). 920 * 921 * @param ksize Size of the filter returned. 922 * @param sigma Standard deviation of the gaussian envelope. 923 * @param theta Orientation of the normal to the parallel stripes of a Gabor function. 924 * @param lambd Wavelength of the sinusoidal factor. 925 * @param gamma Spatial aspect ratio. 926 * @param psi Phase offset. 927 * @param ktype Type of filter coefficients. It can be CV_32F or CV_64F . 928 * @return automatically generated 929 */ 930 public static Mat getGaborKernel(Size ksize, double sigma, double theta, double lambd, double gamma, double psi, int ktype) { 931 return new Mat(getGaborKernel_0(ksize.width, ksize.height, sigma, theta, lambd, gamma, psi, ktype)); 932 } 933 934 /** 935 * Returns Gabor filter coefficients. 936 * 937 * For more details about gabor filter equations and parameters, see: [Gabor 938 * Filter](http://en.wikipedia.org/wiki/Gabor_filter). 939 * 940 * @param ksize Size of the filter returned. 941 * @param sigma Standard deviation of the gaussian envelope. 942 * @param theta Orientation of the normal to the parallel stripes of a Gabor function. 943 * @param lambd Wavelength of the sinusoidal factor. 944 * @param gamma Spatial aspect ratio. 945 * @param psi Phase offset. 946 * @return automatically generated 947 */ 948 public static Mat getGaborKernel(Size ksize, double sigma, double theta, double lambd, double gamma, double psi) { 949 return new Mat(getGaborKernel_1(ksize.width, ksize.height, sigma, theta, lambd, gamma, psi)); 950 } 951 952 /** 953 * Returns Gabor filter coefficients. 954 * 955 * For more details about gabor filter equations and parameters, see: [Gabor 956 * Filter](http://en.wikipedia.org/wiki/Gabor_filter). 957 * 958 * @param ksize Size of the filter returned. 959 * @param sigma Standard deviation of the gaussian envelope. 960 * @param theta Orientation of the normal to the parallel stripes of a Gabor function. 961 * @param lambd Wavelength of the sinusoidal factor. 962 * @param gamma Spatial aspect ratio. 963 * @return automatically generated 964 */ 965 public static Mat getGaborKernel(Size ksize, double sigma, double theta, double lambd, double gamma) { 966 return new Mat(getGaborKernel_2(ksize.width, ksize.height, sigma, theta, lambd, gamma)); 967 } 968 969 970 // 971 // C++: Mat cv::getStructuringElement(int shape, Size ksize, Point anchor = Point(-1,-1)) 972 // 973 974 /** 975 * Returns a structuring element of the specified size and shape for morphological operations. 976 * 977 * The function constructs and returns the structuring element that can be further passed to #erode, 978 * #dilate or #morphologyEx. But you can also construct an arbitrary binary mask yourself and use it as 979 * the structuring element. 980 * 981 * @param shape Element shape that could be one of #MorphShapes 982 * @param ksize Size of the structuring element. 983 * @param anchor Anchor position within the element. The default value \((-1, -1)\) means that the 984 * anchor is at the center. Note that only the shape of a cross-shaped element depends on the anchor 985 * position. In other cases the anchor just regulates how much the result of the morphological 986 * operation is shifted. 987 * @return automatically generated 988 */ 989 public static Mat getStructuringElement(int shape, Size ksize, Point anchor) { 990 return new Mat(getStructuringElement_0(shape, ksize.width, ksize.height, anchor.x, anchor.y)); 991 } 992 993 /** 994 * Returns a structuring element of the specified size and shape for morphological operations. 995 * 996 * The function constructs and returns the structuring element that can be further passed to #erode, 997 * #dilate or #morphologyEx. But you can also construct an arbitrary binary mask yourself and use it as 998 * the structuring element. 999 * 1000 * @param shape Element shape that could be one of #MorphShapes 1001 * @param ksize Size of the structuring element. 1002 * anchor is at the center. Note that only the shape of a cross-shaped element depends on the anchor 1003 * position. In other cases the anchor just regulates how much the result of the morphological 1004 * operation is shifted. 1005 * @return automatically generated 1006 */ 1007 public static Mat getStructuringElement(int shape, Size ksize) { 1008 return new Mat(getStructuringElement_1(shape, ksize.width, ksize.height)); 1009 } 1010 1011 1012 // 1013 // C++: void cv::medianBlur(Mat src, Mat& dst, int ksize) 1014 // 1015 1016 /** 1017 * Blurs an image using the median filter. 1018 * 1019 * The function smoothes an image using the median filter with the \(\texttt{ksize} \times 1020 * \texttt{ksize}\) aperture. Each channel of a multi-channel image is processed independently. 1021 * In-place operation is supported. 1022 * 1023 * <b>Note:</b> The median filter uses #BORDER_REPLICATE internally to cope with border pixels, see #BorderTypes 1024 * 1025 * @param src input 1-, 3-, or 4-channel image; when ksize is 3 or 5, the image depth should be 1026 * CV_8U, CV_16U, or CV_32F, for larger aperture sizes, it can only be CV_8U. 1027 * @param dst destination array of the same size and type as src. 1028 * @param ksize aperture linear size; it must be odd and greater than 1, for example: 3, 5, 7 ... 1029 * SEE: bilateralFilter, blur, boxFilter, GaussianBlur 1030 */ 1031 public static void medianBlur(Mat src, Mat dst, int ksize) { 1032 medianBlur_0(src.nativeObj, dst.nativeObj, ksize); 1033 } 1034 1035 1036 // 1037 // C++: void cv::GaussianBlur(Mat src, Mat& dst, Size ksize, double sigmaX, double sigmaY = 0, int borderType = BORDER_DEFAULT) 1038 // 1039 1040 /** 1041 * Blurs an image using a Gaussian filter. 1042 * 1043 * The function convolves the source image with the specified Gaussian kernel. In-place filtering is 1044 * supported. 1045 * 1046 * @param src input image; the image can have any number of channels, which are processed 1047 * independently, but the depth should be CV_8U, CV_16U, CV_16S, CV_32F or CV_64F. 1048 * @param dst output image of the same size and type as src. 1049 * @param ksize Gaussian kernel size. ksize.width and ksize.height can differ but they both must be 1050 * positive and odd. Or, they can be zero's and then they are computed from sigma. 1051 * @param sigmaX Gaussian kernel standard deviation in X direction. 1052 * @param sigmaY Gaussian kernel standard deviation in Y direction; if sigmaY is zero, it is set to be 1053 * equal to sigmaX, if both sigmas are zeros, they are computed from ksize.width and ksize.height, 1054 * respectively (see #getGaussianKernel for details); to fully control the result regardless of 1055 * possible future modifications of all this semantics, it is recommended to specify all of ksize, 1056 * sigmaX, and sigmaY. 1057 * @param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported. 1058 * 1059 * SEE: sepFilter2D, filter2D, blur, boxFilter, bilateralFilter, medianBlur 1060 */ 1061 public static void GaussianBlur(Mat src, Mat dst, Size ksize, double sigmaX, double sigmaY, int borderType) { 1062 GaussianBlur_0(src.nativeObj, dst.nativeObj, ksize.width, ksize.height, sigmaX, sigmaY, borderType); 1063 } 1064 1065 /** 1066 * Blurs an image using a Gaussian filter. 1067 * 1068 * The function convolves the source image with the specified Gaussian kernel. In-place filtering is 1069 * supported. 1070 * 1071 * @param src input image; the image can have any number of channels, which are processed 1072 * independently, but the depth should be CV_8U, CV_16U, CV_16S, CV_32F or CV_64F. 1073 * @param dst output image of the same size and type as src. 1074 * @param ksize Gaussian kernel size. ksize.width and ksize.height can differ but they both must be 1075 * positive and odd. Or, they can be zero's and then they are computed from sigma. 1076 * @param sigmaX Gaussian kernel standard deviation in X direction. 1077 * @param sigmaY Gaussian kernel standard deviation in Y direction; if sigmaY is zero, it is set to be 1078 * equal to sigmaX, if both sigmas are zeros, they are computed from ksize.width and ksize.height, 1079 * respectively (see #getGaussianKernel for details); to fully control the result regardless of 1080 * possible future modifications of all this semantics, it is recommended to specify all of ksize, 1081 * sigmaX, and sigmaY. 1082 * 1083 * SEE: sepFilter2D, filter2D, blur, boxFilter, bilateralFilter, medianBlur 1084 */ 1085 public static void GaussianBlur(Mat src, Mat dst, Size ksize, double sigmaX, double sigmaY) { 1086 GaussianBlur_1(src.nativeObj, dst.nativeObj, ksize.width, ksize.height, sigmaX, sigmaY); 1087 } 1088 1089 /** 1090 * Blurs an image using a Gaussian filter. 1091 * 1092 * The function convolves the source image with the specified Gaussian kernel. In-place filtering is 1093 * supported. 1094 * 1095 * @param src input image; the image can have any number of channels, which are processed 1096 * independently, but the depth should be CV_8U, CV_16U, CV_16S, CV_32F or CV_64F. 1097 * @param dst output image of the same size and type as src. 1098 * @param ksize Gaussian kernel size. ksize.width and ksize.height can differ but they both must be 1099 * positive and odd. Or, they can be zero's and then they are computed from sigma. 1100 * @param sigmaX Gaussian kernel standard deviation in X direction. 1101 * equal to sigmaX, if both sigmas are zeros, they are computed from ksize.width and ksize.height, 1102 * respectively (see #getGaussianKernel for details); to fully control the result regardless of 1103 * possible future modifications of all this semantics, it is recommended to specify all of ksize, 1104 * sigmaX, and sigmaY. 1105 * 1106 * SEE: sepFilter2D, filter2D, blur, boxFilter, bilateralFilter, medianBlur 1107 */ 1108 public static void GaussianBlur(Mat src, Mat dst, Size ksize, double sigmaX) { 1109 GaussianBlur_2(src.nativeObj, dst.nativeObj, ksize.width, ksize.height, sigmaX); 1110 } 1111 1112 1113 // 1114 // C++: void cv::bilateralFilter(Mat src, Mat& dst, int d, double sigmaColor, double sigmaSpace, int borderType = BORDER_DEFAULT) 1115 // 1116 1117 /** 1118 * Applies the bilateral filter to an image. 1119 * 1120 * The function applies bilateral filtering to the input image, as described in 1121 * http://www.dai.ed.ac.uk/CVonline/LOCAL_COPIES/MANDUCHI1/Bilateral_Filtering.html 1122 * bilateralFilter can reduce unwanted noise very well while keeping edges fairly sharp. However, it is 1123 * very slow compared to most filters. 1124 * 1125 * _Sigma values_: For simplicity, you can set the 2 sigma values to be the same. If they are small (< 1126 * 10), the filter will not have much effect, whereas if they are large (> 150), they will have a very 1127 * strong effect, making the image look "cartoonish". 1128 * 1129 * _Filter size_: Large filters (d > 5) are very slow, so it is recommended to use d=5 for real-time 1130 * applications, and perhaps d=9 for offline applications that need heavy noise filtering. 1131 * 1132 * This filter does not work inplace. 1133 * @param src Source 8-bit or floating-point, 1-channel or 3-channel image. 1134 * @param dst Destination image of the same size and type as src . 1135 * @param d Diameter of each pixel neighborhood that is used during filtering. If it is non-positive, 1136 * it is computed from sigmaSpace. 1137 * @param sigmaColor Filter sigma in the color space. A larger value of the parameter means that 1138 * farther colors within the pixel neighborhood (see sigmaSpace) will be mixed together, resulting 1139 * in larger areas of semi-equal color. 1140 * @param sigmaSpace Filter sigma in the coordinate space. A larger value of the parameter means that 1141 * farther pixels will influence each other as long as their colors are close enough (see sigmaColor 1142 * ). When d>0, it specifies the neighborhood size regardless of sigmaSpace. Otherwise, d is 1143 * proportional to sigmaSpace. 1144 * @param borderType border mode used to extrapolate pixels outside of the image, see #BorderTypes 1145 */ 1146 public static void bilateralFilter(Mat src, Mat dst, int d, double sigmaColor, double sigmaSpace, int borderType) { 1147 bilateralFilter_0(src.nativeObj, dst.nativeObj, d, sigmaColor, sigmaSpace, borderType); 1148 } 1149 1150 /** 1151 * Applies the bilateral filter to an image. 1152 * 1153 * The function applies bilateral filtering to the input image, as described in 1154 * http://www.dai.ed.ac.uk/CVonline/LOCAL_COPIES/MANDUCHI1/Bilateral_Filtering.html 1155 * bilateralFilter can reduce unwanted noise very well while keeping edges fairly sharp. However, it is 1156 * very slow compared to most filters. 1157 * 1158 * _Sigma values_: For simplicity, you can set the 2 sigma values to be the same. If they are small (< 1159 * 10), the filter will not have much effect, whereas if they are large (> 150), they will have a very 1160 * strong effect, making the image look "cartoonish". 1161 * 1162 * _Filter size_: Large filters (d > 5) are very slow, so it is recommended to use d=5 for real-time 1163 * applications, and perhaps d=9 for offline applications that need heavy noise filtering. 1164 * 1165 * This filter does not work inplace. 1166 * @param src Source 8-bit or floating-point, 1-channel or 3-channel image. 1167 * @param dst Destination image of the same size and type as src . 1168 * @param d Diameter of each pixel neighborhood that is used during filtering. If it is non-positive, 1169 * it is computed from sigmaSpace. 1170 * @param sigmaColor Filter sigma in the color space. A larger value of the parameter means that 1171 * farther colors within the pixel neighborhood (see sigmaSpace) will be mixed together, resulting 1172 * in larger areas of semi-equal color. 1173 * @param sigmaSpace Filter sigma in the coordinate space. A larger value of the parameter means that 1174 * farther pixels will influence each other as long as their colors are close enough (see sigmaColor 1175 * ). When d>0, it specifies the neighborhood size regardless of sigmaSpace. Otherwise, d is 1176 * proportional to sigmaSpace. 1177 */ 1178 public static void bilateralFilter(Mat src, Mat dst, int d, double sigmaColor, double sigmaSpace) { 1179 bilateralFilter_1(src.nativeObj, dst.nativeObj, d, sigmaColor, sigmaSpace); 1180 } 1181 1182 1183 // 1184 // C++: void cv::boxFilter(Mat src, Mat& dst, int ddepth, Size ksize, Point anchor = Point(-1,-1), bool normalize = true, int borderType = BORDER_DEFAULT) 1185 // 1186 1187 /** 1188 * Blurs an image using the box filter. 1189 * 1190 * The function smooths an image using the kernel: 1191 * 1192 * \(\texttt{K} = \alpha \begin{bmatrix} 1 & 1 & 1 & \cdots & 1 & 1 \\ 1 & 1 & 1 & \cdots & 1 & 1 \\ \hdotsfor{6} \\ 1 & 1 & 1 & \cdots & 1 & 1 \end{bmatrix}\) 1193 * 1194 * where 1195 * 1196 * \(\alpha = \begin{cases} \frac{1}{\texttt{ksize.width*ksize.height}} & \texttt{when } \texttt{normalize=true} \\1 & \texttt{otherwise}\end{cases}\) 1197 * 1198 * Unnormalized box filter is useful for computing various integral characteristics over each pixel 1199 * neighborhood, such as covariance matrices of image derivatives (used in dense optical flow 1200 * algorithms, and so on). If you need to compute pixel sums over variable-size windows, use #integral. 1201 * 1202 * @param src input image. 1203 * @param dst output image of the same size and type as src. 1204 * @param ddepth the output image depth (-1 to use src.depth()). 1205 * @param ksize blurring kernel size. 1206 * @param anchor anchor point; default value Point(-1,-1) means that the anchor is at the kernel 1207 * center. 1208 * @param normalize flag, specifying whether the kernel is normalized by its area or not. 1209 * @param borderType border mode used to extrapolate pixels outside of the image, see #BorderTypes. #BORDER_WRAP is not supported. 1210 * SEE: blur, bilateralFilter, GaussianBlur, medianBlur, integral 1211 */ 1212 public static void boxFilter(Mat src, Mat dst, int ddepth, Size ksize, Point anchor, boolean normalize, int borderType) { 1213 boxFilter_0(src.nativeObj, dst.nativeObj, ddepth, ksize.width, ksize.height, anchor.x, anchor.y, normalize, borderType); 1214 } 1215 1216 /** 1217 * Blurs an image using the box filter. 1218 * 1219 * The function smooths an image using the kernel: 1220 * 1221 * \(\texttt{K} = \alpha \begin{bmatrix} 1 & 1 & 1 & \cdots & 1 & 1 \\ 1 & 1 & 1 & \cdots & 1 & 1 \\ \hdotsfor{6} \\ 1 & 1 & 1 & \cdots & 1 & 1 \end{bmatrix}\) 1222 * 1223 * where 1224 * 1225 * \(\alpha = \begin{cases} \frac{1}{\texttt{ksize.width*ksize.height}} & \texttt{when } \texttt{normalize=true} \\1 & \texttt{otherwise}\end{cases}\) 1226 * 1227 * Unnormalized box filter is useful for computing various integral characteristics over each pixel 1228 * neighborhood, such as covariance matrices of image derivatives (used in dense optical flow 1229 * algorithms, and so on). If you need to compute pixel sums over variable-size windows, use #integral. 1230 * 1231 * @param src input image. 1232 * @param dst output image of the same size and type as src. 1233 * @param ddepth the output image depth (-1 to use src.depth()). 1234 * @param ksize blurring kernel size. 1235 * @param anchor anchor point; default value Point(-1,-1) means that the anchor is at the kernel 1236 * center. 1237 * @param normalize flag, specifying whether the kernel is normalized by its area or not. 1238 * SEE: blur, bilateralFilter, GaussianBlur, medianBlur, integral 1239 */ 1240 public static void boxFilter(Mat src, Mat dst, int ddepth, Size ksize, Point anchor, boolean normalize) { 1241 boxFilter_1(src.nativeObj, dst.nativeObj, ddepth, ksize.width, ksize.height, anchor.x, anchor.y, normalize); 1242 } 1243 1244 /** 1245 * Blurs an image using the box filter. 1246 * 1247 * The function smooths an image using the kernel: 1248 * 1249 * \(\texttt{K} = \alpha \begin{bmatrix} 1 & 1 & 1 & \cdots & 1 & 1 \\ 1 & 1 & 1 & \cdots & 1 & 1 \\ \hdotsfor{6} \\ 1 & 1 & 1 & \cdots & 1 & 1 \end{bmatrix}\) 1250 * 1251 * where 1252 * 1253 * \(\alpha = \begin{cases} \frac{1}{\texttt{ksize.width*ksize.height}} & \texttt{when } \texttt{normalize=true} \\1 & \texttt{otherwise}\end{cases}\) 1254 * 1255 * Unnormalized box filter is useful for computing various integral characteristics over each pixel 1256 * neighborhood, such as covariance matrices of image derivatives (used in dense optical flow 1257 * algorithms, and so on). If you need to compute pixel sums over variable-size windows, use #integral. 1258 * 1259 * @param src input image. 1260 * @param dst output image of the same size and type as src. 1261 * @param ddepth the output image depth (-1 to use src.depth()). 1262 * @param ksize blurring kernel size. 1263 * @param anchor anchor point; default value Point(-1,-1) means that the anchor is at the kernel 1264 * center. 1265 * SEE: blur, bilateralFilter, GaussianBlur, medianBlur, integral 1266 */ 1267 public static void boxFilter(Mat src, Mat dst, int ddepth, Size ksize, Point anchor) { 1268 boxFilter_2(src.nativeObj, dst.nativeObj, ddepth, ksize.width, ksize.height, anchor.x, anchor.y); 1269 } 1270 1271 /** 1272 * Blurs an image using the box filter. 1273 * 1274 * The function smooths an image using the kernel: 1275 * 1276 * \(\texttt{K} = \alpha \begin{bmatrix} 1 & 1 & 1 & \cdots & 1 & 1 \\ 1 & 1 & 1 & \cdots & 1 & 1 \\ \hdotsfor{6} \\ 1 & 1 & 1 & \cdots & 1 & 1 \end{bmatrix}\) 1277 * 1278 * where 1279 * 1280 * \(\alpha = \begin{cases} \frac{1}{\texttt{ksize.width*ksize.height}} & \texttt{when } \texttt{normalize=true} \\1 & \texttt{otherwise}\end{cases}\) 1281 * 1282 * Unnormalized box filter is useful for computing various integral characteristics over each pixel 1283 * neighborhood, such as covariance matrices of image derivatives (used in dense optical flow 1284 * algorithms, and so on). If you need to compute pixel sums over variable-size windows, use #integral. 1285 * 1286 * @param src input image. 1287 * @param dst output image of the same size and type as src. 1288 * @param ddepth the output image depth (-1 to use src.depth()). 1289 * @param ksize blurring kernel size. 1290 * center. 1291 * SEE: blur, bilateralFilter, GaussianBlur, medianBlur, integral 1292 */ 1293 public static void boxFilter(Mat src, Mat dst, int ddepth, Size ksize) { 1294 boxFilter_3(src.nativeObj, dst.nativeObj, ddepth, ksize.width, ksize.height); 1295 } 1296 1297 1298 // 1299 // C++: void cv::sqrBoxFilter(Mat src, Mat& dst, int ddepth, Size ksize, Point anchor = Point(-1, -1), bool normalize = true, int borderType = BORDER_DEFAULT) 1300 // 1301 1302 /** 1303 * Calculates the normalized sum of squares of the pixel values overlapping the filter. 1304 * 1305 * For every pixel \( (x, y) \) in the source image, the function calculates the sum of squares of those neighboring 1306 * pixel values which overlap the filter placed over the pixel \( (x, y) \). 1307 * 1308 * The unnormalized square box filter can be useful in computing local image statistics such as the local 1309 * variance and standard deviation around the neighborhood of a pixel. 1310 * 1311 * @param src input image 1312 * @param dst output image of the same size and type as src 1313 * @param ddepth the output image depth (-1 to use src.depth()) 1314 * @param ksize kernel size 1315 * @param anchor kernel anchor point. The default value of Point(-1, -1) denotes that the anchor is at the kernel 1316 * center. 1317 * @param normalize flag, specifying whether the kernel is to be normalized by it's area or not. 1318 * @param borderType border mode used to extrapolate pixels outside of the image, see #BorderTypes. #BORDER_WRAP is not supported. 1319 * SEE: boxFilter 1320 */ 1321 public static void sqrBoxFilter(Mat src, Mat dst, int ddepth, Size ksize, Point anchor, boolean normalize, int borderType) { 1322 sqrBoxFilter_0(src.nativeObj, dst.nativeObj, ddepth, ksize.width, ksize.height, anchor.x, anchor.y, normalize, borderType); 1323 } 1324 1325 /** 1326 * Calculates the normalized sum of squares of the pixel values overlapping the filter. 1327 * 1328 * For every pixel \( (x, y) \) in the source image, the function calculates the sum of squares of those neighboring 1329 * pixel values which overlap the filter placed over the pixel \( (x, y) \). 1330 * 1331 * The unnormalized square box filter can be useful in computing local image statistics such as the local 1332 * variance and standard deviation around the neighborhood of a pixel. 1333 * 1334 * @param src input image 1335 * @param dst output image of the same size and type as src 1336 * @param ddepth the output image depth (-1 to use src.depth()) 1337 * @param ksize kernel size 1338 * @param anchor kernel anchor point. The default value of Point(-1, -1) denotes that the anchor is at the kernel 1339 * center. 1340 * @param normalize flag, specifying whether the kernel is to be normalized by it's area or not. 1341 * SEE: boxFilter 1342 */ 1343 public static void sqrBoxFilter(Mat src, Mat dst, int ddepth, Size ksize, Point anchor, boolean normalize) { 1344 sqrBoxFilter_1(src.nativeObj, dst.nativeObj, ddepth, ksize.width, ksize.height, anchor.x, anchor.y, normalize); 1345 } 1346 1347 /** 1348 * Calculates the normalized sum of squares of the pixel values overlapping the filter. 1349 * 1350 * For every pixel \( (x, y) \) in the source image, the function calculates the sum of squares of those neighboring 1351 * pixel values which overlap the filter placed over the pixel \( (x, y) \). 1352 * 1353 * The unnormalized square box filter can be useful in computing local image statistics such as the local 1354 * variance and standard deviation around the neighborhood of a pixel. 1355 * 1356 * @param src input image 1357 * @param dst output image of the same size and type as src 1358 * @param ddepth the output image depth (-1 to use src.depth()) 1359 * @param ksize kernel size 1360 * @param anchor kernel anchor point. The default value of Point(-1, -1) denotes that the anchor is at the kernel 1361 * center. 1362 * SEE: boxFilter 1363 */ 1364 public static void sqrBoxFilter(Mat src, Mat dst, int ddepth, Size ksize, Point anchor) { 1365 sqrBoxFilter_2(src.nativeObj, dst.nativeObj, ddepth, ksize.width, ksize.height, anchor.x, anchor.y); 1366 } 1367 1368 /** 1369 * Calculates the normalized sum of squares of the pixel values overlapping the filter. 1370 * 1371 * For every pixel \( (x, y) \) in the source image, the function calculates the sum of squares of those neighboring 1372 * pixel values which overlap the filter placed over the pixel \( (x, y) \). 1373 * 1374 * The unnormalized square box filter can be useful in computing local image statistics such as the local 1375 * variance and standard deviation around the neighborhood of a pixel. 1376 * 1377 * @param src input image 1378 * @param dst output image of the same size and type as src 1379 * @param ddepth the output image depth (-1 to use src.depth()) 1380 * @param ksize kernel size 1381 * center. 1382 * SEE: boxFilter 1383 */ 1384 public static void sqrBoxFilter(Mat src, Mat dst, int ddepth, Size ksize) { 1385 sqrBoxFilter_3(src.nativeObj, dst.nativeObj, ddepth, ksize.width, ksize.height); 1386 } 1387 1388 1389 // 1390 // C++: void cv::blur(Mat src, Mat& dst, Size ksize, Point anchor = Point(-1,-1), int borderType = BORDER_DEFAULT) 1391 // 1392 1393 /** 1394 * Blurs an image using the normalized box filter. 1395 * 1396 * The function smooths an image using the kernel: 1397 * 1398 * \(\texttt{K} = \frac{1}{\texttt{ksize.width*ksize.height}} \begin{bmatrix} 1 & 1 & 1 & \cdots & 1 & 1 \\ 1 & 1 & 1 & \cdots & 1 & 1 \\ \hdotsfor{6} \\ 1 & 1 & 1 & \cdots & 1 & 1 \\ \end{bmatrix}\) 1399 * 1400 * The call {@code blur(src, dst, ksize, anchor, borderType)} is equivalent to `boxFilter(src, dst, src.type(), ksize, 1401 * anchor, true, borderType)`. 1402 * 1403 * @param src input image; it can have any number of channels, which are processed independently, but 1404 * the depth should be CV_8U, CV_16U, CV_16S, CV_32F or CV_64F. 1405 * @param dst output image of the same size and type as src. 1406 * @param ksize blurring kernel size. 1407 * @param anchor anchor point; default value Point(-1,-1) means that the anchor is at the kernel 1408 * center. 1409 * @param borderType border mode used to extrapolate pixels outside of the image, see #BorderTypes. #BORDER_WRAP is not supported. 1410 * SEE: boxFilter, bilateralFilter, GaussianBlur, medianBlur 1411 */ 1412 public static void blur(Mat src, Mat dst, Size ksize, Point anchor, int borderType) { 1413 blur_0(src.nativeObj, dst.nativeObj, ksize.width, ksize.height, anchor.x, anchor.y, borderType); 1414 } 1415 1416 /** 1417 * Blurs an image using the normalized box filter. 1418 * 1419 * The function smooths an image using the kernel: 1420 * 1421 * \(\texttt{K} = \frac{1}{\texttt{ksize.width*ksize.height}} \begin{bmatrix} 1 & 1 & 1 & \cdots & 1 & 1 \\ 1 & 1 & 1 & \cdots & 1 & 1 \\ \hdotsfor{6} \\ 1 & 1 & 1 & \cdots & 1 & 1 \\ \end{bmatrix}\) 1422 * 1423 * The call {@code blur(src, dst, ksize, anchor, borderType)} is equivalent to `boxFilter(src, dst, src.type(), ksize, 1424 * anchor, true, borderType)`. 1425 * 1426 * @param src input image; it can have any number of channels, which are processed independently, but 1427 * the depth should be CV_8U, CV_16U, CV_16S, CV_32F or CV_64F. 1428 * @param dst output image of the same size and type as src. 1429 * @param ksize blurring kernel size. 1430 * @param anchor anchor point; default value Point(-1,-1) means that the anchor is at the kernel 1431 * center. 1432 * SEE: boxFilter, bilateralFilter, GaussianBlur, medianBlur 1433 */ 1434 public static void blur(Mat src, Mat dst, Size ksize, Point anchor) { 1435 blur_1(src.nativeObj, dst.nativeObj, ksize.width, ksize.height, anchor.x, anchor.y); 1436 } 1437 1438 /** 1439 * Blurs an image using the normalized box filter. 1440 * 1441 * The function smooths an image using the kernel: 1442 * 1443 * \(\texttt{K} = \frac{1}{\texttt{ksize.width*ksize.height}} \begin{bmatrix} 1 & 1 & 1 & \cdots & 1 & 1 \\ 1 & 1 & 1 & \cdots & 1 & 1 \\ \hdotsfor{6} \\ 1 & 1 & 1 & \cdots & 1 & 1 \\ \end{bmatrix}\) 1444 * 1445 * The call {@code blur(src, dst, ksize, anchor, borderType)} is equivalent to `boxFilter(src, dst, src.type(), ksize, 1446 * anchor, true, borderType)`. 1447 * 1448 * @param src input image; it can have any number of channels, which are processed independently, but 1449 * the depth should be CV_8U, CV_16U, CV_16S, CV_32F or CV_64F. 1450 * @param dst output image of the same size and type as src. 1451 * @param ksize blurring kernel size. 1452 * center. 1453 * SEE: boxFilter, bilateralFilter, GaussianBlur, medianBlur 1454 */ 1455 public static void blur(Mat src, Mat dst, Size ksize) { 1456 blur_2(src.nativeObj, dst.nativeObj, ksize.width, ksize.height); 1457 } 1458 1459 1460 // 1461 // C++: void cv::filter2D(Mat src, Mat& dst, int ddepth, Mat kernel, Point anchor = Point(-1,-1), double delta = 0, int borderType = BORDER_DEFAULT) 1462 // 1463 1464 /** 1465 * Convolves an image with the kernel. 1466 * 1467 * The function applies an arbitrary linear filter to an image. In-place operation is supported. When 1468 * the aperture is partially outside the image, the function interpolates outlier pixel values 1469 * according to the specified border mode. 1470 * 1471 * The function does actually compute correlation, not the convolution: 1472 * 1473 * \(\texttt{dst} (x,y) = \sum _{ \substack{0\leq x' < \texttt{kernel.cols}\\{0\leq y' < \texttt{kernel.rows}}}} \texttt{kernel} (x',y')* \texttt{src} (x+x'- \texttt{anchor.x} ,y+y'- \texttt{anchor.y} )\) 1474 * 1475 * That is, the kernel is not mirrored around the anchor point. If you need a real convolution, flip 1476 * the kernel using #flip and set the new anchor to `(kernel.cols - anchor.x - 1, kernel.rows - 1477 * anchor.y - 1)`. 1478 * 1479 * The function uses the DFT-based algorithm in case of sufficiently large kernels (~{@code 11 x 11} or 1480 * larger) and the direct algorithm for small kernels. 1481 * 1482 * @param src input image. 1483 * @param dst output image of the same size and the same number of channels as src. 1484 * @param ddepth desired depth of the destination image, see REF: filter_depths "combinations" 1485 * @param kernel convolution kernel (or rather a correlation kernel), a single-channel floating point 1486 * matrix; if you want to apply different kernels to different channels, split the image into 1487 * separate color planes using split and process them individually. 1488 * @param anchor anchor of the kernel that indicates the relative position of a filtered point within 1489 * the kernel; the anchor should lie within the kernel; default value (-1,-1) means that the anchor 1490 * is at the kernel center. 1491 * @param delta optional value added to the filtered pixels before storing them in dst. 1492 * @param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported. 1493 * SEE: sepFilter2D, dft, matchTemplate 1494 */ 1495 public static void filter2D(Mat src, Mat dst, int ddepth, Mat kernel, Point anchor, double delta, int borderType) { 1496 filter2D_0(src.nativeObj, dst.nativeObj, ddepth, kernel.nativeObj, anchor.x, anchor.y, delta, borderType); 1497 } 1498 1499 /** 1500 * Convolves an image with the kernel. 1501 * 1502 * The function applies an arbitrary linear filter to an image. In-place operation is supported. When 1503 * the aperture is partially outside the image, the function interpolates outlier pixel values 1504 * according to the specified border mode. 1505 * 1506 * The function does actually compute correlation, not the convolution: 1507 * 1508 * \(\texttt{dst} (x,y) = \sum _{ \substack{0\leq x' < \texttt{kernel.cols}\\{0\leq y' < \texttt{kernel.rows}}}} \texttt{kernel} (x',y')* \texttt{src} (x+x'- \texttt{anchor.x} ,y+y'- \texttt{anchor.y} )\) 1509 * 1510 * That is, the kernel is not mirrored around the anchor point. If you need a real convolution, flip 1511 * the kernel using #flip and set the new anchor to `(kernel.cols - anchor.x - 1, kernel.rows - 1512 * anchor.y - 1)`. 1513 * 1514 * The function uses the DFT-based algorithm in case of sufficiently large kernels (~{@code 11 x 11} or 1515 * larger) and the direct algorithm for small kernels. 1516 * 1517 * @param src input image. 1518 * @param dst output image of the same size and the same number of channels as src. 1519 * @param ddepth desired depth of the destination image, see REF: filter_depths "combinations" 1520 * @param kernel convolution kernel (or rather a correlation kernel), a single-channel floating point 1521 * matrix; if you want to apply different kernels to different channels, split the image into 1522 * separate color planes using split and process them individually. 1523 * @param anchor anchor of the kernel that indicates the relative position of a filtered point within 1524 * the kernel; the anchor should lie within the kernel; default value (-1,-1) means that the anchor 1525 * is at the kernel center. 1526 * @param delta optional value added to the filtered pixels before storing them in dst. 1527 * SEE: sepFilter2D, dft, matchTemplate 1528 */ 1529 public static void filter2D(Mat src, Mat dst, int ddepth, Mat kernel, Point anchor, double delta) { 1530 filter2D_1(src.nativeObj, dst.nativeObj, ddepth, kernel.nativeObj, anchor.x, anchor.y, delta); 1531 } 1532 1533 /** 1534 * Convolves an image with the kernel. 1535 * 1536 * The function applies an arbitrary linear filter to an image. In-place operation is supported. When 1537 * the aperture is partially outside the image, the function interpolates outlier pixel values 1538 * according to the specified border mode. 1539 * 1540 * The function does actually compute correlation, not the convolution: 1541 * 1542 * \(\texttt{dst} (x,y) = \sum _{ \substack{0\leq x' < \texttt{kernel.cols}\\{0\leq y' < \texttt{kernel.rows}}}} \texttt{kernel} (x',y')* \texttt{src} (x+x'- \texttt{anchor.x} ,y+y'- \texttt{anchor.y} )\) 1543 * 1544 * That is, the kernel is not mirrored around the anchor point. If you need a real convolution, flip 1545 * the kernel using #flip and set the new anchor to `(kernel.cols - anchor.x - 1, kernel.rows - 1546 * anchor.y - 1)`. 1547 * 1548 * The function uses the DFT-based algorithm in case of sufficiently large kernels (~{@code 11 x 11} or 1549 * larger) and the direct algorithm for small kernels. 1550 * 1551 * @param src input image. 1552 * @param dst output image of the same size and the same number of channels as src. 1553 * @param ddepth desired depth of the destination image, see REF: filter_depths "combinations" 1554 * @param kernel convolution kernel (or rather a correlation kernel), a single-channel floating point 1555 * matrix; if you want to apply different kernels to different channels, split the image into 1556 * separate color planes using split and process them individually. 1557 * @param anchor anchor of the kernel that indicates the relative position of a filtered point within 1558 * the kernel; the anchor should lie within the kernel; default value (-1,-1) means that the anchor 1559 * is at the kernel center. 1560 * SEE: sepFilter2D, dft, matchTemplate 1561 */ 1562 public static void filter2D(Mat src, Mat dst, int ddepth, Mat kernel, Point anchor) { 1563 filter2D_2(src.nativeObj, dst.nativeObj, ddepth, kernel.nativeObj, anchor.x, anchor.y); 1564 } 1565 1566 /** 1567 * Convolves an image with the kernel. 1568 * 1569 * The function applies an arbitrary linear filter to an image. In-place operation is supported. When 1570 * the aperture is partially outside the image, the function interpolates outlier pixel values 1571 * according to the specified border mode. 1572 * 1573 * The function does actually compute correlation, not the convolution: 1574 * 1575 * \(\texttt{dst} (x,y) = \sum _{ \substack{0\leq x' < \texttt{kernel.cols}\\{0\leq y' < \texttt{kernel.rows}}}} \texttt{kernel} (x',y')* \texttt{src} (x+x'- \texttt{anchor.x} ,y+y'- \texttt{anchor.y} )\) 1576 * 1577 * That is, the kernel is not mirrored around the anchor point. If you need a real convolution, flip 1578 * the kernel using #flip and set the new anchor to `(kernel.cols - anchor.x - 1, kernel.rows - 1579 * anchor.y - 1)`. 1580 * 1581 * The function uses the DFT-based algorithm in case of sufficiently large kernels (~{@code 11 x 11} or 1582 * larger) and the direct algorithm for small kernels. 1583 * 1584 * @param src input image. 1585 * @param dst output image of the same size and the same number of channels as src. 1586 * @param ddepth desired depth of the destination image, see REF: filter_depths "combinations" 1587 * @param kernel convolution kernel (or rather a correlation kernel), a single-channel floating point 1588 * matrix; if you want to apply different kernels to different channels, split the image into 1589 * separate color planes using split and process them individually. 1590 * the kernel; the anchor should lie within the kernel; default value (-1,-1) means that the anchor 1591 * is at the kernel center. 1592 * SEE: sepFilter2D, dft, matchTemplate 1593 */ 1594 public static void filter2D(Mat src, Mat dst, int ddepth, Mat kernel) { 1595 filter2D_3(src.nativeObj, dst.nativeObj, ddepth, kernel.nativeObj); 1596 } 1597 1598 1599 // 1600 // C++: void cv::sepFilter2D(Mat src, Mat& dst, int ddepth, Mat kernelX, Mat kernelY, Point anchor = Point(-1,-1), double delta = 0, int borderType = BORDER_DEFAULT) 1601 // 1602 1603 /** 1604 * Applies a separable linear filter to an image. 1605 * 1606 * The function applies a separable linear filter to the image. That is, first, every row of src is 1607 * filtered with the 1D kernel kernelX. Then, every column of the result is filtered with the 1D 1608 * kernel kernelY. The final result shifted by delta is stored in dst . 1609 * 1610 * @param src Source image. 1611 * @param dst Destination image of the same size and the same number of channels as src . 1612 * @param ddepth Destination image depth, see REF: filter_depths "combinations" 1613 * @param kernelX Coefficients for filtering each row. 1614 * @param kernelY Coefficients for filtering each column. 1615 * @param anchor Anchor position within the kernel. The default value \((-1,-1)\) means that the anchor 1616 * is at the kernel center. 1617 * @param delta Value added to the filtered results before storing them. 1618 * @param borderType Pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported. 1619 * SEE: filter2D, Sobel, GaussianBlur, boxFilter, blur 1620 */ 1621 public static void sepFilter2D(Mat src, Mat dst, int ddepth, Mat kernelX, Mat kernelY, Point anchor, double delta, int borderType) { 1622 sepFilter2D_0(src.nativeObj, dst.nativeObj, ddepth, kernelX.nativeObj, kernelY.nativeObj, anchor.x, anchor.y, delta, borderType); 1623 } 1624 1625 /** 1626 * Applies a separable linear filter to an image. 1627 * 1628 * The function applies a separable linear filter to the image. That is, first, every row of src is 1629 * filtered with the 1D kernel kernelX. Then, every column of the result is filtered with the 1D 1630 * kernel kernelY. The final result shifted by delta is stored in dst . 1631 * 1632 * @param src Source image. 1633 * @param dst Destination image of the same size and the same number of channels as src . 1634 * @param ddepth Destination image depth, see REF: filter_depths "combinations" 1635 * @param kernelX Coefficients for filtering each row. 1636 * @param kernelY Coefficients for filtering each column. 1637 * @param anchor Anchor position within the kernel. The default value \((-1,-1)\) means that the anchor 1638 * is at the kernel center. 1639 * @param delta Value added to the filtered results before storing them. 1640 * SEE: filter2D, Sobel, GaussianBlur, boxFilter, blur 1641 */ 1642 public static void sepFilter2D(Mat src, Mat dst, int ddepth, Mat kernelX, Mat kernelY, Point anchor, double delta) { 1643 sepFilter2D_1(src.nativeObj, dst.nativeObj, ddepth, kernelX.nativeObj, kernelY.nativeObj, anchor.x, anchor.y, delta); 1644 } 1645 1646 /** 1647 * Applies a separable linear filter to an image. 1648 * 1649 * The function applies a separable linear filter to the image. That is, first, every row of src is 1650 * filtered with the 1D kernel kernelX. Then, every column of the result is filtered with the 1D 1651 * kernel kernelY. The final result shifted by delta is stored in dst . 1652 * 1653 * @param src Source image. 1654 * @param dst Destination image of the same size and the same number of channels as src . 1655 * @param ddepth Destination image depth, see REF: filter_depths "combinations" 1656 * @param kernelX Coefficients for filtering each row. 1657 * @param kernelY Coefficients for filtering each column. 1658 * @param anchor Anchor position within the kernel. The default value \((-1,-1)\) means that the anchor 1659 * is at the kernel center. 1660 * SEE: filter2D, Sobel, GaussianBlur, boxFilter, blur 1661 */ 1662 public static void sepFilter2D(Mat src, Mat dst, int ddepth, Mat kernelX, Mat kernelY, Point anchor) { 1663 sepFilter2D_2(src.nativeObj, dst.nativeObj, ddepth, kernelX.nativeObj, kernelY.nativeObj, anchor.x, anchor.y); 1664 } 1665 1666 /** 1667 * Applies a separable linear filter to an image. 1668 * 1669 * The function applies a separable linear filter to the image. That is, first, every row of src is 1670 * filtered with the 1D kernel kernelX. Then, every column of the result is filtered with the 1D 1671 * kernel kernelY. The final result shifted by delta is stored in dst . 1672 * 1673 * @param src Source image. 1674 * @param dst Destination image of the same size and the same number of channels as src . 1675 * @param ddepth Destination image depth, see REF: filter_depths "combinations" 1676 * @param kernelX Coefficients for filtering each row. 1677 * @param kernelY Coefficients for filtering each column. 1678 * is at the kernel center. 1679 * SEE: filter2D, Sobel, GaussianBlur, boxFilter, blur 1680 */ 1681 public static void sepFilter2D(Mat src, Mat dst, int ddepth, Mat kernelX, Mat kernelY) { 1682 sepFilter2D_3(src.nativeObj, dst.nativeObj, ddepth, kernelX.nativeObj, kernelY.nativeObj); 1683 } 1684 1685 1686 // 1687 // C++: void cv::Sobel(Mat src, Mat& dst, int ddepth, int dx, int dy, int ksize = 3, double scale = 1, double delta = 0, int borderType = BORDER_DEFAULT) 1688 // 1689 1690 /** 1691 * Calculates the first, second, third, or mixed image derivatives using an extended Sobel operator. 1692 * 1693 * In all cases except one, the \(\texttt{ksize} \times \texttt{ksize}\) separable kernel is used to 1694 * calculate the derivative. When \(\texttt{ksize = 1}\), the \(3 \times 1\) or \(1 \times 3\) 1695 * kernel is used (that is, no Gaussian smoothing is done). {@code ksize = 1} can only be used for the first 1696 * or the second x- or y- derivatives. 1697 * 1698 * There is also the special value {@code ksize = #FILTER_SCHARR (-1)} that corresponds to the \(3\times3\) Scharr 1699 * filter that may give more accurate results than the \(3\times3\) Sobel. The Scharr aperture is 1700 * 1701 * \(\vecthreethree{-3}{0}{3}{-10}{0}{10}{-3}{0}{3}\) 1702 * 1703 * for the x-derivative, or transposed for the y-derivative. 1704 * 1705 * The function calculates an image derivative by convolving the image with the appropriate kernel: 1706 * 1707 * \(\texttt{dst} = \frac{\partial^{xorder+yorder} \texttt{src}}{\partial x^{xorder} \partial y^{yorder}}\) 1708 * 1709 * The Sobel operators combine Gaussian smoothing and differentiation, so the result is more or less 1710 * resistant to the noise. Most often, the function is called with ( xorder = 1, yorder = 0, ksize = 3) 1711 * or ( xorder = 0, yorder = 1, ksize = 3) to calculate the first x- or y- image derivative. The first 1712 * case corresponds to a kernel of: 1713 * 1714 * \(\vecthreethree{-1}{0}{1}{-2}{0}{2}{-1}{0}{1}\) 1715 * 1716 * The second case corresponds to a kernel of: 1717 * 1718 * \(\vecthreethree{-1}{-2}{-1}{0}{0}{0}{1}{2}{1}\) 1719 * 1720 * @param src input image. 1721 * @param dst output image of the same size and the same number of channels as src . 1722 * @param ddepth output image depth, see REF: filter_depths "combinations"; in the case of 1723 * 8-bit input images it will result in truncated derivatives. 1724 * @param dx order of the derivative x. 1725 * @param dy order of the derivative y. 1726 * @param ksize size of the extended Sobel kernel; it must be 1, 3, 5, or 7. 1727 * @param scale optional scale factor for the computed derivative values; by default, no scaling is 1728 * applied (see #getDerivKernels for details). 1729 * @param delta optional delta value that is added to the results prior to storing them in dst. 1730 * @param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported. 1731 * SEE: Scharr, Laplacian, sepFilter2D, filter2D, GaussianBlur, cartToPolar 1732 */ 1733 public static void Sobel(Mat src, Mat dst, int ddepth, int dx, int dy, int ksize, double scale, double delta, int borderType) { 1734 Sobel_0(src.nativeObj, dst.nativeObj, ddepth, dx, dy, ksize, scale, delta, borderType); 1735 } 1736 1737 /** 1738 * Calculates the first, second, third, or mixed image derivatives using an extended Sobel operator. 1739 * 1740 * In all cases except one, the \(\texttt{ksize} \times \texttt{ksize}\) separable kernel is used to 1741 * calculate the derivative. When \(\texttt{ksize = 1}\), the \(3 \times 1\) or \(1 \times 3\) 1742 * kernel is used (that is, no Gaussian smoothing is done). {@code ksize = 1} can only be used for the first 1743 * or the second x- or y- derivatives. 1744 * 1745 * There is also the special value {@code ksize = #FILTER_SCHARR (-1)} that corresponds to the \(3\times3\) Scharr 1746 * filter that may give more accurate results than the \(3\times3\) Sobel. The Scharr aperture is 1747 * 1748 * \(\vecthreethree{-3}{0}{3}{-10}{0}{10}{-3}{0}{3}\) 1749 * 1750 * for the x-derivative, or transposed for the y-derivative. 1751 * 1752 * The function calculates an image derivative by convolving the image with the appropriate kernel: 1753 * 1754 * \(\texttt{dst} = \frac{\partial^{xorder+yorder} \texttt{src}}{\partial x^{xorder} \partial y^{yorder}}\) 1755 * 1756 * The Sobel operators combine Gaussian smoothing and differentiation, so the result is more or less 1757 * resistant to the noise. Most often, the function is called with ( xorder = 1, yorder = 0, ksize = 3) 1758 * or ( xorder = 0, yorder = 1, ksize = 3) to calculate the first x- or y- image derivative. The first 1759 * case corresponds to a kernel of: 1760 * 1761 * \(\vecthreethree{-1}{0}{1}{-2}{0}{2}{-1}{0}{1}\) 1762 * 1763 * The second case corresponds to a kernel of: 1764 * 1765 * \(\vecthreethree{-1}{-2}{-1}{0}{0}{0}{1}{2}{1}\) 1766 * 1767 * @param src input image. 1768 * @param dst output image of the same size and the same number of channels as src . 1769 * @param ddepth output image depth, see REF: filter_depths "combinations"; in the case of 1770 * 8-bit input images it will result in truncated derivatives. 1771 * @param dx order of the derivative x. 1772 * @param dy order of the derivative y. 1773 * @param ksize size of the extended Sobel kernel; it must be 1, 3, 5, or 7. 1774 * @param scale optional scale factor for the computed derivative values; by default, no scaling is 1775 * applied (see #getDerivKernels for details). 1776 * @param delta optional delta value that is added to the results prior to storing them in dst. 1777 * SEE: Scharr, Laplacian, sepFilter2D, filter2D, GaussianBlur, cartToPolar 1778 */ 1779 public static void Sobel(Mat src, Mat dst, int ddepth, int dx, int dy, int ksize, double scale, double delta) { 1780 Sobel_1(src.nativeObj, dst.nativeObj, ddepth, dx, dy, ksize, scale, delta); 1781 } 1782 1783 /** 1784 * Calculates the first, second, third, or mixed image derivatives using an extended Sobel operator. 1785 * 1786 * In all cases except one, the \(\texttt{ksize} \times \texttt{ksize}\) separable kernel is used to 1787 * calculate the derivative. When \(\texttt{ksize = 1}\), the \(3 \times 1\) or \(1 \times 3\) 1788 * kernel is used (that is, no Gaussian smoothing is done). {@code ksize = 1} can only be used for the first 1789 * or the second x- or y- derivatives. 1790 * 1791 * There is also the special value {@code ksize = #FILTER_SCHARR (-1)} that corresponds to the \(3\times3\) Scharr 1792 * filter that may give more accurate results than the \(3\times3\) Sobel. The Scharr aperture is 1793 * 1794 * \(\vecthreethree{-3}{0}{3}{-10}{0}{10}{-3}{0}{3}\) 1795 * 1796 * for the x-derivative, or transposed for the y-derivative. 1797 * 1798 * The function calculates an image derivative by convolving the image with the appropriate kernel: 1799 * 1800 * \(\texttt{dst} = \frac{\partial^{xorder+yorder} \texttt{src}}{\partial x^{xorder} \partial y^{yorder}}\) 1801 * 1802 * The Sobel operators combine Gaussian smoothing and differentiation, so the result is more or less 1803 * resistant to the noise. Most often, the function is called with ( xorder = 1, yorder = 0, ksize = 3) 1804 * or ( xorder = 0, yorder = 1, ksize = 3) to calculate the first x- or y- image derivative. The first 1805 * case corresponds to a kernel of: 1806 * 1807 * \(\vecthreethree{-1}{0}{1}{-2}{0}{2}{-1}{0}{1}\) 1808 * 1809 * The second case corresponds to a kernel of: 1810 * 1811 * \(\vecthreethree{-1}{-2}{-1}{0}{0}{0}{1}{2}{1}\) 1812 * 1813 * @param src input image. 1814 * @param dst output image of the same size and the same number of channels as src . 1815 * @param ddepth output image depth, see REF: filter_depths "combinations"; in the case of 1816 * 8-bit input images it will result in truncated derivatives. 1817 * @param dx order of the derivative x. 1818 * @param dy order of the derivative y. 1819 * @param ksize size of the extended Sobel kernel; it must be 1, 3, 5, or 7. 1820 * @param scale optional scale factor for the computed derivative values; by default, no scaling is 1821 * applied (see #getDerivKernels for details). 1822 * SEE: Scharr, Laplacian, sepFilter2D, filter2D, GaussianBlur, cartToPolar 1823 */ 1824 public static void Sobel(Mat src, Mat dst, int ddepth, int dx, int dy, int ksize, double scale) { 1825 Sobel_2(src.nativeObj, dst.nativeObj, ddepth, dx, dy, ksize, scale); 1826 } 1827 1828 /** 1829 * Calculates the first, second, third, or mixed image derivatives using an extended Sobel operator. 1830 * 1831 * In all cases except one, the \(\texttt{ksize} \times \texttt{ksize}\) separable kernel is used to 1832 * calculate the derivative. When \(\texttt{ksize = 1}\), the \(3 \times 1\) or \(1 \times 3\) 1833 * kernel is used (that is, no Gaussian smoothing is done). {@code ksize = 1} can only be used for the first 1834 * or the second x- or y- derivatives. 1835 * 1836 * There is also the special value {@code ksize = #FILTER_SCHARR (-1)} that corresponds to the \(3\times3\) Scharr 1837 * filter that may give more accurate results than the \(3\times3\) Sobel. The Scharr aperture is 1838 * 1839 * \(\vecthreethree{-3}{0}{3}{-10}{0}{10}{-3}{0}{3}\) 1840 * 1841 * for the x-derivative, or transposed for the y-derivative. 1842 * 1843 * The function calculates an image derivative by convolving the image with the appropriate kernel: 1844 * 1845 * \(\texttt{dst} = \frac{\partial^{xorder+yorder} \texttt{src}}{\partial x^{xorder} \partial y^{yorder}}\) 1846 * 1847 * The Sobel operators combine Gaussian smoothing and differentiation, so the result is more or less 1848 * resistant to the noise. Most often, the function is called with ( xorder = 1, yorder = 0, ksize = 3) 1849 * or ( xorder = 0, yorder = 1, ksize = 3) to calculate the first x- or y- image derivative. The first 1850 * case corresponds to a kernel of: 1851 * 1852 * \(\vecthreethree{-1}{0}{1}{-2}{0}{2}{-1}{0}{1}\) 1853 * 1854 * The second case corresponds to a kernel of: 1855 * 1856 * \(\vecthreethree{-1}{-2}{-1}{0}{0}{0}{1}{2}{1}\) 1857 * 1858 * @param src input image. 1859 * @param dst output image of the same size and the same number of channels as src . 1860 * @param ddepth output image depth, see REF: filter_depths "combinations"; in the case of 1861 * 8-bit input images it will result in truncated derivatives. 1862 * @param dx order of the derivative x. 1863 * @param dy order of the derivative y. 1864 * @param ksize size of the extended Sobel kernel; it must be 1, 3, 5, or 7. 1865 * applied (see #getDerivKernels for details). 1866 * SEE: Scharr, Laplacian, sepFilter2D, filter2D, GaussianBlur, cartToPolar 1867 */ 1868 public static void Sobel(Mat src, Mat dst, int ddepth, int dx, int dy, int ksize) { 1869 Sobel_3(src.nativeObj, dst.nativeObj, ddepth, dx, dy, ksize); 1870 } 1871 1872 /** 1873 * Calculates the first, second, third, or mixed image derivatives using an extended Sobel operator. 1874 * 1875 * In all cases except one, the \(\texttt{ksize} \times \texttt{ksize}\) separable kernel is used to 1876 * calculate the derivative. When \(\texttt{ksize = 1}\), the \(3 \times 1\) or \(1 \times 3\) 1877 * kernel is used (that is, no Gaussian smoothing is done). {@code ksize = 1} can only be used for the first 1878 * or the second x- or y- derivatives. 1879 * 1880 * There is also the special value {@code ksize = #FILTER_SCHARR (-1)} that corresponds to the \(3\times3\) Scharr 1881 * filter that may give more accurate results than the \(3\times3\) Sobel. The Scharr aperture is 1882 * 1883 * \(\vecthreethree{-3}{0}{3}{-10}{0}{10}{-3}{0}{3}\) 1884 * 1885 * for the x-derivative, or transposed for the y-derivative. 1886 * 1887 * The function calculates an image derivative by convolving the image with the appropriate kernel: 1888 * 1889 * \(\texttt{dst} = \frac{\partial^{xorder+yorder} \texttt{src}}{\partial x^{xorder} \partial y^{yorder}}\) 1890 * 1891 * The Sobel operators combine Gaussian smoothing and differentiation, so the result is more or less 1892 * resistant to the noise. Most often, the function is called with ( xorder = 1, yorder = 0, ksize = 3) 1893 * or ( xorder = 0, yorder = 1, ksize = 3) to calculate the first x- or y- image derivative. The first 1894 * case corresponds to a kernel of: 1895 * 1896 * \(\vecthreethree{-1}{0}{1}{-2}{0}{2}{-1}{0}{1}\) 1897 * 1898 * The second case corresponds to a kernel of: 1899 * 1900 * \(\vecthreethree{-1}{-2}{-1}{0}{0}{0}{1}{2}{1}\) 1901 * 1902 * @param src input image. 1903 * @param dst output image of the same size and the same number of channels as src . 1904 * @param ddepth output image depth, see REF: filter_depths "combinations"; in the case of 1905 * 8-bit input images it will result in truncated derivatives. 1906 * @param dx order of the derivative x. 1907 * @param dy order of the derivative y. 1908 * applied (see #getDerivKernels for details). 1909 * SEE: Scharr, Laplacian, sepFilter2D, filter2D, GaussianBlur, cartToPolar 1910 */ 1911 public static void Sobel(Mat src, Mat dst, int ddepth, int dx, int dy) { 1912 Sobel_4(src.nativeObj, dst.nativeObj, ddepth, dx, dy); 1913 } 1914 1915 1916 // 1917 // C++: void cv::spatialGradient(Mat src, Mat& dx, Mat& dy, int ksize = 3, int borderType = BORDER_DEFAULT) 1918 // 1919 1920 /** 1921 * Calculates the first order image derivative in both x and y using a Sobel operator 1922 * 1923 * Equivalent to calling: 1924 * 1925 * <code> 1926 * Sobel( src, dx, CV_16SC1, 1, 0, 3 ); 1927 * Sobel( src, dy, CV_16SC1, 0, 1, 3 ); 1928 * </code> 1929 * 1930 * @param src input image. 1931 * @param dx output image with first-order derivative in x. 1932 * @param dy output image with first-order derivative in y. 1933 * @param ksize size of Sobel kernel. It must be 3. 1934 * @param borderType pixel extrapolation method, see #BorderTypes. 1935 * Only #BORDER_DEFAULT=#BORDER_REFLECT_101 and #BORDER_REPLICATE are supported. 1936 * 1937 * SEE: Sobel 1938 */ 1939 public static void spatialGradient(Mat src, Mat dx, Mat dy, int ksize, int borderType) { 1940 spatialGradient_0(src.nativeObj, dx.nativeObj, dy.nativeObj, ksize, borderType); 1941 } 1942 1943 /** 1944 * Calculates the first order image derivative in both x and y using a Sobel operator 1945 * 1946 * Equivalent to calling: 1947 * 1948 * <code> 1949 * Sobel( src, dx, CV_16SC1, 1, 0, 3 ); 1950 * Sobel( src, dy, CV_16SC1, 0, 1, 3 ); 1951 * </code> 1952 * 1953 * @param src input image. 1954 * @param dx output image with first-order derivative in x. 1955 * @param dy output image with first-order derivative in y. 1956 * @param ksize size of Sobel kernel. It must be 3. 1957 * Only #BORDER_DEFAULT=#BORDER_REFLECT_101 and #BORDER_REPLICATE are supported. 1958 * 1959 * SEE: Sobel 1960 */ 1961 public static void spatialGradient(Mat src, Mat dx, Mat dy, int ksize) { 1962 spatialGradient_1(src.nativeObj, dx.nativeObj, dy.nativeObj, ksize); 1963 } 1964 1965 /** 1966 * Calculates the first order image derivative in both x and y using a Sobel operator 1967 * 1968 * Equivalent to calling: 1969 * 1970 * <code> 1971 * Sobel( src, dx, CV_16SC1, 1, 0, 3 ); 1972 * Sobel( src, dy, CV_16SC1, 0, 1, 3 ); 1973 * </code> 1974 * 1975 * @param src input image. 1976 * @param dx output image with first-order derivative in x. 1977 * @param dy output image with first-order derivative in y. 1978 * Only #BORDER_DEFAULT=#BORDER_REFLECT_101 and #BORDER_REPLICATE are supported. 1979 * 1980 * SEE: Sobel 1981 */ 1982 public static void spatialGradient(Mat src, Mat dx, Mat dy) { 1983 spatialGradient_2(src.nativeObj, dx.nativeObj, dy.nativeObj); 1984 } 1985 1986 1987 // 1988 // C++: void cv::Scharr(Mat src, Mat& dst, int ddepth, int dx, int dy, double scale = 1, double delta = 0, int borderType = BORDER_DEFAULT) 1989 // 1990 1991 /** 1992 * Calculates the first x- or y- image derivative using Scharr operator. 1993 * 1994 * The function computes the first x- or y- spatial image derivative using the Scharr operator. The 1995 * call 1996 * 1997 * \(\texttt{Scharr(src, dst, ddepth, dx, dy, scale, delta, borderType)}\) 1998 * 1999 * is equivalent to 2000 * 2001 * \(\texttt{Sobel(src, dst, ddepth, dx, dy, FILTER_SCHARR, scale, delta, borderType)} .\) 2002 * 2003 * @param src input image. 2004 * @param dst output image of the same size and the same number of channels as src. 2005 * @param ddepth output image depth, see REF: filter_depths "combinations" 2006 * @param dx order of the derivative x. 2007 * @param dy order of the derivative y. 2008 * @param scale optional scale factor for the computed derivative values; by default, no scaling is 2009 * applied (see #getDerivKernels for details). 2010 * @param delta optional delta value that is added to the results prior to storing them in dst. 2011 * @param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported. 2012 * SEE: cartToPolar 2013 */ 2014 public static void Scharr(Mat src, Mat dst, int ddepth, int dx, int dy, double scale, double delta, int borderType) { 2015 Scharr_0(src.nativeObj, dst.nativeObj, ddepth, dx, dy, scale, delta, borderType); 2016 } 2017 2018 /** 2019 * Calculates the first x- or y- image derivative using Scharr operator. 2020 * 2021 * The function computes the first x- or y- spatial image derivative using the Scharr operator. The 2022 * call 2023 * 2024 * \(\texttt{Scharr(src, dst, ddepth, dx, dy, scale, delta, borderType)}\) 2025 * 2026 * is equivalent to 2027 * 2028 * \(\texttt{Sobel(src, dst, ddepth, dx, dy, FILTER_SCHARR, scale, delta, borderType)} .\) 2029 * 2030 * @param src input image. 2031 * @param dst output image of the same size and the same number of channels as src. 2032 * @param ddepth output image depth, see REF: filter_depths "combinations" 2033 * @param dx order of the derivative x. 2034 * @param dy order of the derivative y. 2035 * @param scale optional scale factor for the computed derivative values; by default, no scaling is 2036 * applied (see #getDerivKernels for details). 2037 * @param delta optional delta value that is added to the results prior to storing them in dst. 2038 * SEE: cartToPolar 2039 */ 2040 public static void Scharr(Mat src, Mat dst, int ddepth, int dx, int dy, double scale, double delta) { 2041 Scharr_1(src.nativeObj, dst.nativeObj, ddepth, dx, dy, scale, delta); 2042 } 2043 2044 /** 2045 * Calculates the first x- or y- image derivative using Scharr operator. 2046 * 2047 * The function computes the first x- or y- spatial image derivative using the Scharr operator. The 2048 * call 2049 * 2050 * \(\texttt{Scharr(src, dst, ddepth, dx, dy, scale, delta, borderType)}\) 2051 * 2052 * is equivalent to 2053 * 2054 * \(\texttt{Sobel(src, dst, ddepth, dx, dy, FILTER_SCHARR, scale, delta, borderType)} .\) 2055 * 2056 * @param src input image. 2057 * @param dst output image of the same size and the same number of channels as src. 2058 * @param ddepth output image depth, see REF: filter_depths "combinations" 2059 * @param dx order of the derivative x. 2060 * @param dy order of the derivative y. 2061 * @param scale optional scale factor for the computed derivative values; by default, no scaling is 2062 * applied (see #getDerivKernels for details). 2063 * SEE: cartToPolar 2064 */ 2065 public static void Scharr(Mat src, Mat dst, int ddepth, int dx, int dy, double scale) { 2066 Scharr_2(src.nativeObj, dst.nativeObj, ddepth, dx, dy, scale); 2067 } 2068 2069 /** 2070 * Calculates the first x- or y- image derivative using Scharr operator. 2071 * 2072 * The function computes the first x- or y- spatial image derivative using the Scharr operator. The 2073 * call 2074 * 2075 * \(\texttt{Scharr(src, dst, ddepth, dx, dy, scale, delta, borderType)}\) 2076 * 2077 * is equivalent to 2078 * 2079 * \(\texttt{Sobel(src, dst, ddepth, dx, dy, FILTER_SCHARR, scale, delta, borderType)} .\) 2080 * 2081 * @param src input image. 2082 * @param dst output image of the same size and the same number of channels as src. 2083 * @param ddepth output image depth, see REF: filter_depths "combinations" 2084 * @param dx order of the derivative x. 2085 * @param dy order of the derivative y. 2086 * applied (see #getDerivKernels for details). 2087 * SEE: cartToPolar 2088 */ 2089 public static void Scharr(Mat src, Mat dst, int ddepth, int dx, int dy) { 2090 Scharr_3(src.nativeObj, dst.nativeObj, ddepth, dx, dy); 2091 } 2092 2093 2094 // 2095 // C++: void cv::Laplacian(Mat src, Mat& dst, int ddepth, int ksize = 1, double scale = 1, double delta = 0, int borderType = BORDER_DEFAULT) 2096 // 2097 2098 /** 2099 * Calculates the Laplacian of an image. 2100 * 2101 * The function calculates the Laplacian of the source image by adding up the second x and y 2102 * derivatives calculated using the Sobel operator: 2103 * 2104 * \(\texttt{dst} = \Delta \texttt{src} = \frac{\partial^2 \texttt{src}}{\partial x^2} + \frac{\partial^2 \texttt{src}}{\partial y^2}\) 2105 * 2106 * This is done when {@code ksize > 1}. When {@code ksize == 1}, the Laplacian is computed by filtering the image 2107 * with the following \(3 \times 3\) aperture: 2108 * 2109 * \(\vecthreethree {0}{1}{0}{1}{-4}{1}{0}{1}{0}\) 2110 * 2111 * @param src Source image. 2112 * @param dst Destination image of the same size and the same number of channels as src . 2113 * @param ddepth Desired depth of the destination image. 2114 * @param ksize Aperture size used to compute the second-derivative filters. See #getDerivKernels for 2115 * details. The size must be positive and odd. 2116 * @param scale Optional scale factor for the computed Laplacian values. By default, no scaling is 2117 * applied. See #getDerivKernels for details. 2118 * @param delta Optional delta value that is added to the results prior to storing them in dst . 2119 * @param borderType Pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported. 2120 * SEE: Sobel, Scharr 2121 */ 2122 public static void Laplacian(Mat src, Mat dst, int ddepth, int ksize, double scale, double delta, int borderType) { 2123 Laplacian_0(src.nativeObj, dst.nativeObj, ddepth, ksize, scale, delta, borderType); 2124 } 2125 2126 /** 2127 * Calculates the Laplacian of an image. 2128 * 2129 * The function calculates the Laplacian of the source image by adding up the second x and y 2130 * derivatives calculated using the Sobel operator: 2131 * 2132 * \(\texttt{dst} = \Delta \texttt{src} = \frac{\partial^2 \texttt{src}}{\partial x^2} + \frac{\partial^2 \texttt{src}}{\partial y^2}\) 2133 * 2134 * This is done when {@code ksize > 1}. When {@code ksize == 1}, the Laplacian is computed by filtering the image 2135 * with the following \(3 \times 3\) aperture: 2136 * 2137 * \(\vecthreethree {0}{1}{0}{1}{-4}{1}{0}{1}{0}\) 2138 * 2139 * @param src Source image. 2140 * @param dst Destination image of the same size and the same number of channels as src . 2141 * @param ddepth Desired depth of the destination image. 2142 * @param ksize Aperture size used to compute the second-derivative filters. See #getDerivKernels for 2143 * details. The size must be positive and odd. 2144 * @param scale Optional scale factor for the computed Laplacian values. By default, no scaling is 2145 * applied. See #getDerivKernels for details. 2146 * @param delta Optional delta value that is added to the results prior to storing them in dst . 2147 * SEE: Sobel, Scharr 2148 */ 2149 public static void Laplacian(Mat src, Mat dst, int ddepth, int ksize, double scale, double delta) { 2150 Laplacian_1(src.nativeObj, dst.nativeObj, ddepth, ksize, scale, delta); 2151 } 2152 2153 /** 2154 * Calculates the Laplacian of an image. 2155 * 2156 * The function calculates the Laplacian of the source image by adding up the second x and y 2157 * derivatives calculated using the Sobel operator: 2158 * 2159 * \(\texttt{dst} = \Delta \texttt{src} = \frac{\partial^2 \texttt{src}}{\partial x^2} + \frac{\partial^2 \texttt{src}}{\partial y^2}\) 2160 * 2161 * This is done when {@code ksize > 1}. When {@code ksize == 1}, the Laplacian is computed by filtering the image 2162 * with the following \(3 \times 3\) aperture: 2163 * 2164 * \(\vecthreethree {0}{1}{0}{1}{-4}{1}{0}{1}{0}\) 2165 * 2166 * @param src Source image. 2167 * @param dst Destination image of the same size and the same number of channels as src . 2168 * @param ddepth Desired depth of the destination image. 2169 * @param ksize Aperture size used to compute the second-derivative filters. See #getDerivKernels for 2170 * details. The size must be positive and odd. 2171 * @param scale Optional scale factor for the computed Laplacian values. By default, no scaling is 2172 * applied. See #getDerivKernels for details. 2173 * SEE: Sobel, Scharr 2174 */ 2175 public static void Laplacian(Mat src, Mat dst, int ddepth, int ksize, double scale) { 2176 Laplacian_2(src.nativeObj, dst.nativeObj, ddepth, ksize, scale); 2177 } 2178 2179 /** 2180 * Calculates the Laplacian of an image. 2181 * 2182 * The function calculates the Laplacian of the source image by adding up the second x and y 2183 * derivatives calculated using the Sobel operator: 2184 * 2185 * \(\texttt{dst} = \Delta \texttt{src} = \frac{\partial^2 \texttt{src}}{\partial x^2} + \frac{\partial^2 \texttt{src}}{\partial y^2}\) 2186 * 2187 * This is done when {@code ksize > 1}. When {@code ksize == 1}, the Laplacian is computed by filtering the image 2188 * with the following \(3 \times 3\) aperture: 2189 * 2190 * \(\vecthreethree {0}{1}{0}{1}{-4}{1}{0}{1}{0}\) 2191 * 2192 * @param src Source image. 2193 * @param dst Destination image of the same size and the same number of channels as src . 2194 * @param ddepth Desired depth of the destination image. 2195 * @param ksize Aperture size used to compute the second-derivative filters. See #getDerivKernels for 2196 * details. The size must be positive and odd. 2197 * applied. See #getDerivKernels for details. 2198 * SEE: Sobel, Scharr 2199 */ 2200 public static void Laplacian(Mat src, Mat dst, int ddepth, int ksize) { 2201 Laplacian_3(src.nativeObj, dst.nativeObj, ddepth, ksize); 2202 } 2203 2204 /** 2205 * Calculates the Laplacian of an image. 2206 * 2207 * The function calculates the Laplacian of the source image by adding up the second x and y 2208 * derivatives calculated using the Sobel operator: 2209 * 2210 * \(\texttt{dst} = \Delta \texttt{src} = \frac{\partial^2 \texttt{src}}{\partial x^2} + \frac{\partial^2 \texttt{src}}{\partial y^2}\) 2211 * 2212 * This is done when {@code ksize > 1}. When {@code ksize == 1}, the Laplacian is computed by filtering the image 2213 * with the following \(3 \times 3\) aperture: 2214 * 2215 * \(\vecthreethree {0}{1}{0}{1}{-4}{1}{0}{1}{0}\) 2216 * 2217 * @param src Source image. 2218 * @param dst Destination image of the same size and the same number of channels as src . 2219 * @param ddepth Desired depth of the destination image. 2220 * details. The size must be positive and odd. 2221 * applied. See #getDerivKernels for details. 2222 * SEE: Sobel, Scharr 2223 */ 2224 public static void Laplacian(Mat src, Mat dst, int ddepth) { 2225 Laplacian_4(src.nativeObj, dst.nativeObj, ddepth); 2226 } 2227 2228 2229 // 2230 // C++: void cv::Canny(Mat image, Mat& edges, double threshold1, double threshold2, int apertureSize = 3, bool L2gradient = false) 2231 // 2232 2233 /** 2234 * Finds edges in an image using the Canny algorithm CITE: Canny86 . 2235 * 2236 * The function finds edges in the input image and marks them in the output map edges using the 2237 * Canny algorithm. The smallest value between threshold1 and threshold2 is used for edge linking. The 2238 * largest value is used to find initial segments of strong edges. See 2239 * <http://en.wikipedia.org/wiki/Canny_edge_detector> 2240 * 2241 * @param image 8-bit input image. 2242 * @param edges output edge map; single channels 8-bit image, which has the same size as image . 2243 * @param threshold1 first threshold for the hysteresis procedure. 2244 * @param threshold2 second threshold for the hysteresis procedure. 2245 * @param apertureSize aperture size for the Sobel operator. 2246 * @param L2gradient a flag, indicating whether a more accurate \(L_2\) norm 2247 * \(=\sqrt{(dI/dx)^2 + (dI/dy)^2}\) should be used to calculate the image gradient magnitude ( 2248 * L2gradient=true ), or whether the default \(L_1\) norm \(=|dI/dx|+|dI/dy|\) is enough ( 2249 * L2gradient=false ). 2250 */ 2251 public static void Canny(Mat image, Mat edges, double threshold1, double threshold2, int apertureSize, boolean L2gradient) { 2252 Canny_0(image.nativeObj, edges.nativeObj, threshold1, threshold2, apertureSize, L2gradient); 2253 } 2254 2255 /** 2256 * Finds edges in an image using the Canny algorithm CITE: Canny86 . 2257 * 2258 * The function finds edges in the input image and marks them in the output map edges using the 2259 * Canny algorithm. The smallest value between threshold1 and threshold2 is used for edge linking. The 2260 * largest value is used to find initial segments of strong edges. See 2261 * <http://en.wikipedia.org/wiki/Canny_edge_detector> 2262 * 2263 * @param image 8-bit input image. 2264 * @param edges output edge map; single channels 8-bit image, which has the same size as image . 2265 * @param threshold1 first threshold for the hysteresis procedure. 2266 * @param threshold2 second threshold for the hysteresis procedure. 2267 * @param apertureSize aperture size for the Sobel operator. 2268 * \(=\sqrt{(dI/dx)^2 + (dI/dy)^2}\) should be used to calculate the image gradient magnitude ( 2269 * L2gradient=true ), or whether the default \(L_1\) norm \(=|dI/dx|+|dI/dy|\) is enough ( 2270 * L2gradient=false ). 2271 */ 2272 public static void Canny(Mat image, Mat edges, double threshold1, double threshold2, int apertureSize) { 2273 Canny_1(image.nativeObj, edges.nativeObj, threshold1, threshold2, apertureSize); 2274 } 2275 2276 /** 2277 * Finds edges in an image using the Canny algorithm CITE: Canny86 . 2278 * 2279 * The function finds edges in the input image and marks them in the output map edges using the 2280 * Canny algorithm. The smallest value between threshold1 and threshold2 is used for edge linking. The 2281 * largest value is used to find initial segments of strong edges. See 2282 * <http://en.wikipedia.org/wiki/Canny_edge_detector> 2283 * 2284 * @param image 8-bit input image. 2285 * @param edges output edge map; single channels 8-bit image, which has the same size as image . 2286 * @param threshold1 first threshold for the hysteresis procedure. 2287 * @param threshold2 second threshold for the hysteresis procedure. 2288 * \(=\sqrt{(dI/dx)^2 + (dI/dy)^2}\) should be used to calculate the image gradient magnitude ( 2289 * L2gradient=true ), or whether the default \(L_1\) norm \(=|dI/dx|+|dI/dy|\) is enough ( 2290 * L2gradient=false ). 2291 */ 2292 public static void Canny(Mat image, Mat edges, double threshold1, double threshold2) { 2293 Canny_2(image.nativeObj, edges.nativeObj, threshold1, threshold2); 2294 } 2295 2296 2297 // 2298 // C++: void cv::Canny(Mat dx, Mat dy, Mat& edges, double threshold1, double threshold2, bool L2gradient = false) 2299 // 2300 2301 /** 2302 * \overload 2303 * 2304 * Finds edges in an image using the Canny algorithm with custom image gradient. 2305 * 2306 * @param dx 16-bit x derivative of input image (CV_16SC1 or CV_16SC3). 2307 * @param dy 16-bit y derivative of input image (same type as dx). 2308 * @param edges output edge map; single channels 8-bit image, which has the same size as image . 2309 * @param threshold1 first threshold for the hysteresis procedure. 2310 * @param threshold2 second threshold for the hysteresis procedure. 2311 * @param L2gradient a flag, indicating whether a more accurate \(L_2\) norm 2312 * \(=\sqrt{(dI/dx)^2 + (dI/dy)^2}\) should be used to calculate the image gradient magnitude ( 2313 * L2gradient=true ), or whether the default \(L_1\) norm \(=|dI/dx|+|dI/dy|\) is enough ( 2314 * L2gradient=false ). 2315 */ 2316 public static void Canny(Mat dx, Mat dy, Mat edges, double threshold1, double threshold2, boolean L2gradient) { 2317 Canny_3(dx.nativeObj, dy.nativeObj, edges.nativeObj, threshold1, threshold2, L2gradient); 2318 } 2319 2320 /** 2321 * \overload 2322 * 2323 * Finds edges in an image using the Canny algorithm with custom image gradient. 2324 * 2325 * @param dx 16-bit x derivative of input image (CV_16SC1 or CV_16SC3). 2326 * @param dy 16-bit y derivative of input image (same type as dx). 2327 * @param edges output edge map; single channels 8-bit image, which has the same size as image . 2328 * @param threshold1 first threshold for the hysteresis procedure. 2329 * @param threshold2 second threshold for the hysteresis procedure. 2330 * \(=\sqrt{(dI/dx)^2 + (dI/dy)^2}\) should be used to calculate the image gradient magnitude ( 2331 * L2gradient=true ), or whether the default \(L_1\) norm \(=|dI/dx|+|dI/dy|\) is enough ( 2332 * L2gradient=false ). 2333 */ 2334 public static void Canny(Mat dx, Mat dy, Mat edges, double threshold1, double threshold2) { 2335 Canny_4(dx.nativeObj, dy.nativeObj, edges.nativeObj, threshold1, threshold2); 2336 } 2337 2338 2339 // 2340 // C++: void cv::cornerMinEigenVal(Mat src, Mat& dst, int blockSize, int ksize = 3, int borderType = BORDER_DEFAULT) 2341 // 2342 2343 /** 2344 * Calculates the minimal eigenvalue of gradient matrices for corner detection. 2345 * 2346 * The function is similar to cornerEigenValsAndVecs but it calculates and stores only the minimal 2347 * eigenvalue of the covariance matrix of derivatives, that is, \(\min(\lambda_1, \lambda_2)\) in terms 2348 * of the formulae in the cornerEigenValsAndVecs description. 2349 * 2350 * @param src Input single-channel 8-bit or floating-point image. 2351 * @param dst Image to store the minimal eigenvalues. It has the type CV_32FC1 and the same size as 2352 * src . 2353 * @param blockSize Neighborhood size (see the details on #cornerEigenValsAndVecs ). 2354 * @param ksize Aperture parameter for the Sobel operator. 2355 * @param borderType Pixel extrapolation method. See #BorderTypes. #BORDER_WRAP is not supported. 2356 */ 2357 public static void cornerMinEigenVal(Mat src, Mat dst, int blockSize, int ksize, int borderType) { 2358 cornerMinEigenVal_0(src.nativeObj, dst.nativeObj, blockSize, ksize, borderType); 2359 } 2360 2361 /** 2362 * Calculates the minimal eigenvalue of gradient matrices for corner detection. 2363 * 2364 * The function is similar to cornerEigenValsAndVecs but it calculates and stores only the minimal 2365 * eigenvalue of the covariance matrix of derivatives, that is, \(\min(\lambda_1, \lambda_2)\) in terms 2366 * of the formulae in the cornerEigenValsAndVecs description. 2367 * 2368 * @param src Input single-channel 8-bit or floating-point image. 2369 * @param dst Image to store the minimal eigenvalues. It has the type CV_32FC1 and the same size as 2370 * src . 2371 * @param blockSize Neighborhood size (see the details on #cornerEigenValsAndVecs ). 2372 * @param ksize Aperture parameter for the Sobel operator. 2373 */ 2374 public static void cornerMinEigenVal(Mat src, Mat dst, int blockSize, int ksize) { 2375 cornerMinEigenVal_1(src.nativeObj, dst.nativeObj, blockSize, ksize); 2376 } 2377 2378 /** 2379 * Calculates the minimal eigenvalue of gradient matrices for corner detection. 2380 * 2381 * The function is similar to cornerEigenValsAndVecs but it calculates and stores only the minimal 2382 * eigenvalue of the covariance matrix of derivatives, that is, \(\min(\lambda_1, \lambda_2)\) in terms 2383 * of the formulae in the cornerEigenValsAndVecs description. 2384 * 2385 * @param src Input single-channel 8-bit or floating-point image. 2386 * @param dst Image to store the minimal eigenvalues. It has the type CV_32FC1 and the same size as 2387 * src . 2388 * @param blockSize Neighborhood size (see the details on #cornerEigenValsAndVecs ). 2389 */ 2390 public static void cornerMinEigenVal(Mat src, Mat dst, int blockSize) { 2391 cornerMinEigenVal_2(src.nativeObj, dst.nativeObj, blockSize); 2392 } 2393 2394 2395 // 2396 // C++: void cv::cornerHarris(Mat src, Mat& dst, int blockSize, int ksize, double k, int borderType = BORDER_DEFAULT) 2397 // 2398 2399 /** 2400 * Harris corner detector. 2401 * 2402 * The function runs the Harris corner detector on the image. Similarly to cornerMinEigenVal and 2403 * cornerEigenValsAndVecs , for each pixel \((x, y)\) it calculates a \(2\times2\) gradient covariance 2404 * matrix \(M^{(x,y)}\) over a \(\texttt{blockSize} \times \texttt{blockSize}\) neighborhood. Then, it 2405 * computes the following characteristic: 2406 * 2407 * \(\texttt{dst} (x,y) = \mathrm{det} M^{(x,y)} - k \cdot \left ( \mathrm{tr} M^{(x,y)} \right )^2\) 2408 * 2409 * Corners in the image can be found as the local maxima of this response map. 2410 * 2411 * @param src Input single-channel 8-bit or floating-point image. 2412 * @param dst Image to store the Harris detector responses. It has the type CV_32FC1 and the same 2413 * size as src . 2414 * @param blockSize Neighborhood size (see the details on #cornerEigenValsAndVecs ). 2415 * @param ksize Aperture parameter for the Sobel operator. 2416 * @param k Harris detector free parameter. See the formula above. 2417 * @param borderType Pixel extrapolation method. See #BorderTypes. #BORDER_WRAP is not supported. 2418 */ 2419 public static void cornerHarris(Mat src, Mat dst, int blockSize, int ksize, double k, int borderType) { 2420 cornerHarris_0(src.nativeObj, dst.nativeObj, blockSize, ksize, k, borderType); 2421 } 2422 2423 /** 2424 * Harris corner detector. 2425 * 2426 * The function runs the Harris corner detector on the image. Similarly to cornerMinEigenVal and 2427 * cornerEigenValsAndVecs , for each pixel \((x, y)\) it calculates a \(2\times2\) gradient covariance 2428 * matrix \(M^{(x,y)}\) over a \(\texttt{blockSize} \times \texttt{blockSize}\) neighborhood. Then, it 2429 * computes the following characteristic: 2430 * 2431 * \(\texttt{dst} (x,y) = \mathrm{det} M^{(x,y)} - k \cdot \left ( \mathrm{tr} M^{(x,y)} \right )^2\) 2432 * 2433 * Corners in the image can be found as the local maxima of this response map. 2434 * 2435 * @param src Input single-channel 8-bit or floating-point image. 2436 * @param dst Image to store the Harris detector responses. It has the type CV_32FC1 and the same 2437 * size as src . 2438 * @param blockSize Neighborhood size (see the details on #cornerEigenValsAndVecs ). 2439 * @param ksize Aperture parameter for the Sobel operator. 2440 * @param k Harris detector free parameter. See the formula above. 2441 */ 2442 public static void cornerHarris(Mat src, Mat dst, int blockSize, int ksize, double k) { 2443 cornerHarris_1(src.nativeObj, dst.nativeObj, blockSize, ksize, k); 2444 } 2445 2446 2447 // 2448 // C++: void cv::cornerEigenValsAndVecs(Mat src, Mat& dst, int blockSize, int ksize, int borderType = BORDER_DEFAULT) 2449 // 2450 2451 /** 2452 * Calculates eigenvalues and eigenvectors of image blocks for corner detection. 2453 * 2454 * For every pixel \(p\) , the function cornerEigenValsAndVecs considers a blockSize \(\times\) blockSize 2455 * neighborhood \(S(p)\) . It calculates the covariation matrix of derivatives over the neighborhood as: 2456 * 2457 * \(M = \begin{bmatrix} \sum _{S(p)}(dI/dx)^2 & \sum _{S(p)}dI/dx dI/dy \\ \sum _{S(p)}dI/dx dI/dy & \sum _{S(p)}(dI/dy)^2 \end{bmatrix}\) 2458 * 2459 * where the derivatives are computed using the Sobel operator. 2460 * 2461 * After that, it finds eigenvectors and eigenvalues of \(M\) and stores them in the destination image as 2462 * \((\lambda_1, \lambda_2, x_1, y_1, x_2, y_2)\) where 2463 * 2464 * <ul> 2465 * <li> 2466 * \(\lambda_1, \lambda_2\) are the non-sorted eigenvalues of \(M\) 2467 * </li> 2468 * <li> 2469 * \(x_1, y_1\) are the eigenvectors corresponding to \(\lambda_1\) 2470 * </li> 2471 * <li> 2472 * \(x_2, y_2\) are the eigenvectors corresponding to \(\lambda_2\) 2473 * </li> 2474 * </ul> 2475 * 2476 * The output of the function can be used for robust edge or corner detection. 2477 * 2478 * @param src Input single-channel 8-bit or floating-point image. 2479 * @param dst Image to store the results. It has the same size as src and the type CV_32FC(6) . 2480 * @param blockSize Neighborhood size (see details below). 2481 * @param ksize Aperture parameter for the Sobel operator. 2482 * @param borderType Pixel extrapolation method. See #BorderTypes. #BORDER_WRAP is not supported. 2483 * 2484 * SEE: cornerMinEigenVal, cornerHarris, preCornerDetect 2485 */ 2486 public static void cornerEigenValsAndVecs(Mat src, Mat dst, int blockSize, int ksize, int borderType) { 2487 cornerEigenValsAndVecs_0(src.nativeObj, dst.nativeObj, blockSize, ksize, borderType); 2488 } 2489 2490 /** 2491 * Calculates eigenvalues and eigenvectors of image blocks for corner detection. 2492 * 2493 * For every pixel \(p\) , the function cornerEigenValsAndVecs considers a blockSize \(\times\) blockSize 2494 * neighborhood \(S(p)\) . It calculates the covariation matrix of derivatives over the neighborhood as: 2495 * 2496 * \(M = \begin{bmatrix} \sum _{S(p)}(dI/dx)^2 & \sum _{S(p)}dI/dx dI/dy \\ \sum _{S(p)}dI/dx dI/dy & \sum _{S(p)}(dI/dy)^2 \end{bmatrix}\) 2497 * 2498 * where the derivatives are computed using the Sobel operator. 2499 * 2500 * After that, it finds eigenvectors and eigenvalues of \(M\) and stores them in the destination image as 2501 * \((\lambda_1, \lambda_2, x_1, y_1, x_2, y_2)\) where 2502 * 2503 * <ul> 2504 * <li> 2505 * \(\lambda_1, \lambda_2\) are the non-sorted eigenvalues of \(M\) 2506 * </li> 2507 * <li> 2508 * \(x_1, y_1\) are the eigenvectors corresponding to \(\lambda_1\) 2509 * </li> 2510 * <li> 2511 * \(x_2, y_2\) are the eigenvectors corresponding to \(\lambda_2\) 2512 * </li> 2513 * </ul> 2514 * 2515 * The output of the function can be used for robust edge or corner detection. 2516 * 2517 * @param src Input single-channel 8-bit or floating-point image. 2518 * @param dst Image to store the results. It has the same size as src and the type CV_32FC(6) . 2519 * @param blockSize Neighborhood size (see details below). 2520 * @param ksize Aperture parameter for the Sobel operator. 2521 * 2522 * SEE: cornerMinEigenVal, cornerHarris, preCornerDetect 2523 */ 2524 public static void cornerEigenValsAndVecs(Mat src, Mat dst, int blockSize, int ksize) { 2525 cornerEigenValsAndVecs_1(src.nativeObj, dst.nativeObj, blockSize, ksize); 2526 } 2527 2528 2529 // 2530 // C++: void cv::preCornerDetect(Mat src, Mat& dst, int ksize, int borderType = BORDER_DEFAULT) 2531 // 2532 2533 /** 2534 * Calculates a feature map for corner detection. 2535 * 2536 * The function calculates the complex spatial derivative-based function of the source image 2537 * 2538 * \(\texttt{dst} = (D_x \texttt{src} )^2 \cdot D_{yy} \texttt{src} + (D_y \texttt{src} )^2 \cdot D_{xx} \texttt{src} - 2 D_x \texttt{src} \cdot D_y \texttt{src} \cdot D_{xy} \texttt{src}\) 2539 * 2540 * where \(D_x\),\(D_y\) are the first image derivatives, \(D_{xx}\),\(D_{yy}\) are the second image 2541 * derivatives, and \(D_{xy}\) is the mixed derivative. 2542 * 2543 * The corners can be found as local maximums of the functions, as shown below: 2544 * <code> 2545 * Mat corners, dilated_corners; 2546 * preCornerDetect(image, corners, 3); 2547 * // dilation with 3x3 rectangular structuring element 2548 * dilate(corners, dilated_corners, Mat(), 1); 2549 * Mat corner_mask = corners == dilated_corners; 2550 * </code> 2551 * 2552 * @param src Source single-channel 8-bit of floating-point image. 2553 * @param dst Output image that has the type CV_32F and the same size as src . 2554 * @param ksize %Aperture size of the Sobel . 2555 * @param borderType Pixel extrapolation method. See #BorderTypes. #BORDER_WRAP is not supported. 2556 */ 2557 public static void preCornerDetect(Mat src, Mat dst, int ksize, int borderType) { 2558 preCornerDetect_0(src.nativeObj, dst.nativeObj, ksize, borderType); 2559 } 2560 2561 /** 2562 * Calculates a feature map for corner detection. 2563 * 2564 * The function calculates the complex spatial derivative-based function of the source image 2565 * 2566 * \(\texttt{dst} = (D_x \texttt{src} )^2 \cdot D_{yy} \texttt{src} + (D_y \texttt{src} )^2 \cdot D_{xx} \texttt{src} - 2 D_x \texttt{src} \cdot D_y \texttt{src} \cdot D_{xy} \texttt{src}\) 2567 * 2568 * where \(D_x\),\(D_y\) are the first image derivatives, \(D_{xx}\),\(D_{yy}\) are the second image 2569 * derivatives, and \(D_{xy}\) is the mixed derivative. 2570 * 2571 * The corners can be found as local maximums of the functions, as shown below: 2572 * <code> 2573 * Mat corners, dilated_corners; 2574 * preCornerDetect(image, corners, 3); 2575 * // dilation with 3x3 rectangular structuring element 2576 * dilate(corners, dilated_corners, Mat(), 1); 2577 * Mat corner_mask = corners == dilated_corners; 2578 * </code> 2579 * 2580 * @param src Source single-channel 8-bit of floating-point image. 2581 * @param dst Output image that has the type CV_32F and the same size as src . 2582 * @param ksize %Aperture size of the Sobel . 2583 */ 2584 public static void preCornerDetect(Mat src, Mat dst, int ksize) { 2585 preCornerDetect_1(src.nativeObj, dst.nativeObj, ksize); 2586 } 2587 2588 2589 // 2590 // C++: void cv::cornerSubPix(Mat image, Mat& corners, Size winSize, Size zeroZone, TermCriteria criteria) 2591 // 2592 2593 /** 2594 * Refines the corner locations. 2595 * 2596 * The function iterates to find the sub-pixel accurate location of corners or radial saddle 2597 * points as described in CITE: forstner1987fast, and as shown on the figure below. 2598 * 2599 * ![image](pics/cornersubpix.png) 2600 * 2601 * Sub-pixel accurate corner locator is based on the observation that every vector from the center \(q\) 2602 * to a point \(p\) located within a neighborhood of \(q\) is orthogonal to the image gradient at \(p\) 2603 * subject to image and measurement noise. Consider the expression: 2604 * 2605 * \(\epsilon _i = {DI_{p_i}}^T \cdot (q - p_i)\) 2606 * 2607 * where \({DI_{p_i}}\) is an image gradient at one of the points \(p_i\) in a neighborhood of \(q\) . The 2608 * value of \(q\) is to be found so that \(\epsilon_i\) is minimized. A system of equations may be set up 2609 * with \(\epsilon_i\) set to zero: 2610 * 2611 * \(\sum _i(DI_{p_i} \cdot {DI_{p_i}}^T) \cdot q - \sum _i(DI_{p_i} \cdot {DI_{p_i}}^T \cdot p_i)\) 2612 * 2613 * where the gradients are summed within a neighborhood ("search window") of \(q\) . Calling the first 2614 * gradient term \(G\) and the second gradient term \(b\) gives: 2615 * 2616 * \(q = G^{-1} \cdot b\) 2617 * 2618 * The algorithm sets the center of the neighborhood window at this new center \(q\) and then iterates 2619 * until the center stays within a set threshold. 2620 * 2621 * @param image Input single-channel, 8-bit or float image. 2622 * @param corners Initial coordinates of the input corners and refined coordinates provided for 2623 * output. 2624 * @param winSize Half of the side length of the search window. For example, if winSize=Size(5,5) , 2625 * then a \((5*2+1) \times (5*2+1) = 11 \times 11\) search window is used. 2626 * @param zeroZone Half of the size of the dead region in the middle of the search zone over which 2627 * the summation in the formula below is not done. It is used sometimes to avoid possible 2628 * singularities of the autocorrelation matrix. The value of (-1,-1) indicates that there is no such 2629 * a size. 2630 * @param criteria Criteria for termination of the iterative process of corner refinement. That is, 2631 * the process of corner position refinement stops either after criteria.maxCount iterations or when 2632 * the corner position moves by less than criteria.epsilon on some iteration. 2633 */ 2634 public static void cornerSubPix(Mat image, Mat corners, Size winSize, Size zeroZone, TermCriteria criteria) { 2635 cornerSubPix_0(image.nativeObj, corners.nativeObj, winSize.width, winSize.height, zeroZone.width, zeroZone.height, criteria.type, criteria.maxCount, criteria.epsilon); 2636 } 2637 2638 2639 // 2640 // C++: void cv::goodFeaturesToTrack(Mat image, vector_Point& corners, int maxCorners, double qualityLevel, double minDistance, Mat mask = Mat(), int blockSize = 3, bool useHarrisDetector = false, double k = 0.04) 2641 // 2642 2643 /** 2644 * Determines strong corners on an image. 2645 * 2646 * The function finds the most prominent corners in the image or in the specified image region, as 2647 * described in CITE: Shi94 2648 * 2649 * <ul> 2650 * <li> 2651 * Function calculates the corner quality measure at every source image pixel using the 2652 * #cornerMinEigenVal or #cornerHarris . 2653 * </li> 2654 * <li> 2655 * Function performs a non-maximum suppression (the local maximums in *3 x 3* neighborhood are 2656 * retained). 2657 * </li> 2658 * <li> 2659 * The corners with the minimal eigenvalue less than 2660 * \(\texttt{qualityLevel} \cdot \max_{x,y} qualityMeasureMap(x,y)\) are rejected. 2661 * </li> 2662 * <li> 2663 * The remaining corners are sorted by the quality measure in the descending order. 2664 * </li> 2665 * <li> 2666 * Function throws away each corner for which there is a stronger corner at a distance less than 2667 * maxDistance. 2668 * </li> 2669 * </ul> 2670 * 2671 * The function can be used to initialize a point-based tracker of an object. 2672 * 2673 * <b>Note:</b> If the function is called with different values A and B of the parameter qualityLevel , and 2674 * A > B, the vector of returned corners with qualityLevel=A will be the prefix of the output vector 2675 * with qualityLevel=B . 2676 * 2677 * @param image Input 8-bit or floating-point 32-bit, single-channel image. 2678 * @param corners Output vector of detected corners. 2679 * @param maxCorners Maximum number of corners to return. If there are more corners than are found, 2680 * the strongest of them is returned. {@code maxCorners <= 0} implies that no limit on the maximum is set 2681 * and all detected corners are returned. 2682 * @param qualityLevel Parameter characterizing the minimal accepted quality of image corners. The 2683 * parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue 2684 * (see #cornerMinEigenVal ) or the Harris function response (see #cornerHarris ). The corners with the 2685 * quality measure less than the product are rejected. For example, if the best corner has the 2686 * quality measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure 2687 * less than 15 are rejected. 2688 * @param minDistance Minimum possible Euclidean distance between the returned corners. 2689 * @param mask Optional region of interest. If the image is not empty (it needs to have the type 2690 * CV_8UC1 and the same size as image ), it specifies the region in which the corners are detected. 2691 * @param blockSize Size of an average block for computing a derivative covariation matrix over each 2692 * pixel neighborhood. See cornerEigenValsAndVecs . 2693 * @param useHarrisDetector Parameter indicating whether to use a Harris detector (see #cornerHarris) 2694 * or #cornerMinEigenVal. 2695 * @param k Free parameter of the Harris detector. 2696 * 2697 * SEE: cornerMinEigenVal, cornerHarris, calcOpticalFlowPyrLK, estimateRigidTransform, 2698 */ 2699 public static void goodFeaturesToTrack(Mat image, MatOfPoint corners, int maxCorners, double qualityLevel, double minDistance, Mat mask, int blockSize, boolean useHarrisDetector, double k) { 2700 Mat corners_mat = corners; 2701 goodFeaturesToTrack_0(image.nativeObj, corners_mat.nativeObj, maxCorners, qualityLevel, minDistance, mask.nativeObj, blockSize, useHarrisDetector, k); 2702 } 2703 2704 /** 2705 * Determines strong corners on an image. 2706 * 2707 * The function finds the most prominent corners in the image or in the specified image region, as 2708 * described in CITE: Shi94 2709 * 2710 * <ul> 2711 * <li> 2712 * Function calculates the corner quality measure at every source image pixel using the 2713 * #cornerMinEigenVal or #cornerHarris . 2714 * </li> 2715 * <li> 2716 * Function performs a non-maximum suppression (the local maximums in *3 x 3* neighborhood are 2717 * retained). 2718 * </li> 2719 * <li> 2720 * The corners with the minimal eigenvalue less than 2721 * \(\texttt{qualityLevel} \cdot \max_{x,y} qualityMeasureMap(x,y)\) are rejected. 2722 * </li> 2723 * <li> 2724 * The remaining corners are sorted by the quality measure in the descending order. 2725 * </li> 2726 * <li> 2727 * Function throws away each corner for which there is a stronger corner at a distance less than 2728 * maxDistance. 2729 * </li> 2730 * </ul> 2731 * 2732 * The function can be used to initialize a point-based tracker of an object. 2733 * 2734 * <b>Note:</b> If the function is called with different values A and B of the parameter qualityLevel , and 2735 * A > B, the vector of returned corners with qualityLevel=A will be the prefix of the output vector 2736 * with qualityLevel=B . 2737 * 2738 * @param image Input 8-bit or floating-point 32-bit, single-channel image. 2739 * @param corners Output vector of detected corners. 2740 * @param maxCorners Maximum number of corners to return. If there are more corners than are found, 2741 * the strongest of them is returned. {@code maxCorners <= 0} implies that no limit on the maximum is set 2742 * and all detected corners are returned. 2743 * @param qualityLevel Parameter characterizing the minimal accepted quality of image corners. The 2744 * parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue 2745 * (see #cornerMinEigenVal ) or the Harris function response (see #cornerHarris ). The corners with the 2746 * quality measure less than the product are rejected. For example, if the best corner has the 2747 * quality measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure 2748 * less than 15 are rejected. 2749 * @param minDistance Minimum possible Euclidean distance between the returned corners. 2750 * @param mask Optional region of interest. If the image is not empty (it needs to have the type 2751 * CV_8UC1 and the same size as image ), it specifies the region in which the corners are detected. 2752 * @param blockSize Size of an average block for computing a derivative covariation matrix over each 2753 * pixel neighborhood. See cornerEigenValsAndVecs . 2754 * @param useHarrisDetector Parameter indicating whether to use a Harris detector (see #cornerHarris) 2755 * or #cornerMinEigenVal. 2756 * 2757 * SEE: cornerMinEigenVal, cornerHarris, calcOpticalFlowPyrLK, estimateRigidTransform, 2758 */ 2759 public static void goodFeaturesToTrack(Mat image, MatOfPoint corners, int maxCorners, double qualityLevel, double minDistance, Mat mask, int blockSize, boolean useHarrisDetector) { 2760 Mat corners_mat = corners; 2761 goodFeaturesToTrack_1(image.nativeObj, corners_mat.nativeObj, maxCorners, qualityLevel, minDistance, mask.nativeObj, blockSize, useHarrisDetector); 2762 } 2763 2764 /** 2765 * Determines strong corners on an image. 2766 * 2767 * The function finds the most prominent corners in the image or in the specified image region, as 2768 * described in CITE: Shi94 2769 * 2770 * <ul> 2771 * <li> 2772 * Function calculates the corner quality measure at every source image pixel using the 2773 * #cornerMinEigenVal or #cornerHarris . 2774 * </li> 2775 * <li> 2776 * Function performs a non-maximum suppression (the local maximums in *3 x 3* neighborhood are 2777 * retained). 2778 * </li> 2779 * <li> 2780 * The corners with the minimal eigenvalue less than 2781 * \(\texttt{qualityLevel} \cdot \max_{x,y} qualityMeasureMap(x,y)\) are rejected. 2782 * </li> 2783 * <li> 2784 * The remaining corners are sorted by the quality measure in the descending order. 2785 * </li> 2786 * <li> 2787 * Function throws away each corner for which there is a stronger corner at a distance less than 2788 * maxDistance. 2789 * </li> 2790 * </ul> 2791 * 2792 * The function can be used to initialize a point-based tracker of an object. 2793 * 2794 * <b>Note:</b> If the function is called with different values A and B of the parameter qualityLevel , and 2795 * A > B, the vector of returned corners with qualityLevel=A will be the prefix of the output vector 2796 * with qualityLevel=B . 2797 * 2798 * @param image Input 8-bit or floating-point 32-bit, single-channel image. 2799 * @param corners Output vector of detected corners. 2800 * @param maxCorners Maximum number of corners to return. If there are more corners than are found, 2801 * the strongest of them is returned. {@code maxCorners <= 0} implies that no limit on the maximum is set 2802 * and all detected corners are returned. 2803 * @param qualityLevel Parameter characterizing the minimal accepted quality of image corners. The 2804 * parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue 2805 * (see #cornerMinEigenVal ) or the Harris function response (see #cornerHarris ). The corners with the 2806 * quality measure less than the product are rejected. For example, if the best corner has the 2807 * quality measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure 2808 * less than 15 are rejected. 2809 * @param minDistance Minimum possible Euclidean distance between the returned corners. 2810 * @param mask Optional region of interest. If the image is not empty (it needs to have the type 2811 * CV_8UC1 and the same size as image ), it specifies the region in which the corners are detected. 2812 * @param blockSize Size of an average block for computing a derivative covariation matrix over each 2813 * pixel neighborhood. See cornerEigenValsAndVecs . 2814 * or #cornerMinEigenVal. 2815 * 2816 * SEE: cornerMinEigenVal, cornerHarris, calcOpticalFlowPyrLK, estimateRigidTransform, 2817 */ 2818 public static void goodFeaturesToTrack(Mat image, MatOfPoint corners, int maxCorners, double qualityLevel, double minDistance, Mat mask, int blockSize) { 2819 Mat corners_mat = corners; 2820 goodFeaturesToTrack_2(image.nativeObj, corners_mat.nativeObj, maxCorners, qualityLevel, minDistance, mask.nativeObj, blockSize); 2821 } 2822 2823 /** 2824 * Determines strong corners on an image. 2825 * 2826 * The function finds the most prominent corners in the image or in the specified image region, as 2827 * described in CITE: Shi94 2828 * 2829 * <ul> 2830 * <li> 2831 * Function calculates the corner quality measure at every source image pixel using the 2832 * #cornerMinEigenVal or #cornerHarris . 2833 * </li> 2834 * <li> 2835 * Function performs a non-maximum suppression (the local maximums in *3 x 3* neighborhood are 2836 * retained). 2837 * </li> 2838 * <li> 2839 * The corners with the minimal eigenvalue less than 2840 * \(\texttt{qualityLevel} \cdot \max_{x,y} qualityMeasureMap(x,y)\) are rejected. 2841 * </li> 2842 * <li> 2843 * The remaining corners are sorted by the quality measure in the descending order. 2844 * </li> 2845 * <li> 2846 * Function throws away each corner for which there is a stronger corner at a distance less than 2847 * maxDistance. 2848 * </li> 2849 * </ul> 2850 * 2851 * The function can be used to initialize a point-based tracker of an object. 2852 * 2853 * <b>Note:</b> If the function is called with different values A and B of the parameter qualityLevel , and 2854 * A > B, the vector of returned corners with qualityLevel=A will be the prefix of the output vector 2855 * with qualityLevel=B . 2856 * 2857 * @param image Input 8-bit or floating-point 32-bit, single-channel image. 2858 * @param corners Output vector of detected corners. 2859 * @param maxCorners Maximum number of corners to return. If there are more corners than are found, 2860 * the strongest of them is returned. {@code maxCorners <= 0} implies that no limit on the maximum is set 2861 * and all detected corners are returned. 2862 * @param qualityLevel Parameter characterizing the minimal accepted quality of image corners. The 2863 * parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue 2864 * (see #cornerMinEigenVal ) or the Harris function response (see #cornerHarris ). The corners with the 2865 * quality measure less than the product are rejected. For example, if the best corner has the 2866 * quality measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure 2867 * less than 15 are rejected. 2868 * @param minDistance Minimum possible Euclidean distance between the returned corners. 2869 * @param mask Optional region of interest. If the image is not empty (it needs to have the type 2870 * CV_8UC1 and the same size as image ), it specifies the region in which the corners are detected. 2871 * pixel neighborhood. See cornerEigenValsAndVecs . 2872 * or #cornerMinEigenVal. 2873 * 2874 * SEE: cornerMinEigenVal, cornerHarris, calcOpticalFlowPyrLK, estimateRigidTransform, 2875 */ 2876 public static void goodFeaturesToTrack(Mat image, MatOfPoint corners, int maxCorners, double qualityLevel, double minDistance, Mat mask) { 2877 Mat corners_mat = corners; 2878 goodFeaturesToTrack_3(image.nativeObj, corners_mat.nativeObj, maxCorners, qualityLevel, minDistance, mask.nativeObj); 2879 } 2880 2881 /** 2882 * Determines strong corners on an image. 2883 * 2884 * The function finds the most prominent corners in the image or in the specified image region, as 2885 * described in CITE: Shi94 2886 * 2887 * <ul> 2888 * <li> 2889 * Function calculates the corner quality measure at every source image pixel using the 2890 * #cornerMinEigenVal or #cornerHarris . 2891 * </li> 2892 * <li> 2893 * Function performs a non-maximum suppression (the local maximums in *3 x 3* neighborhood are 2894 * retained). 2895 * </li> 2896 * <li> 2897 * The corners with the minimal eigenvalue less than 2898 * \(\texttt{qualityLevel} \cdot \max_{x,y} qualityMeasureMap(x,y)\) are rejected. 2899 * </li> 2900 * <li> 2901 * The remaining corners are sorted by the quality measure in the descending order. 2902 * </li> 2903 * <li> 2904 * Function throws away each corner for which there is a stronger corner at a distance less than 2905 * maxDistance. 2906 * </li> 2907 * </ul> 2908 * 2909 * The function can be used to initialize a point-based tracker of an object. 2910 * 2911 * <b>Note:</b> If the function is called with different values A and B of the parameter qualityLevel , and 2912 * A > B, the vector of returned corners with qualityLevel=A will be the prefix of the output vector 2913 * with qualityLevel=B . 2914 * 2915 * @param image Input 8-bit or floating-point 32-bit, single-channel image. 2916 * @param corners Output vector of detected corners. 2917 * @param maxCorners Maximum number of corners to return. If there are more corners than are found, 2918 * the strongest of them is returned. {@code maxCorners <= 0} implies that no limit on the maximum is set 2919 * and all detected corners are returned. 2920 * @param qualityLevel Parameter characterizing the minimal accepted quality of image corners. The 2921 * parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue 2922 * (see #cornerMinEigenVal ) or the Harris function response (see #cornerHarris ). The corners with the 2923 * quality measure less than the product are rejected. For example, if the best corner has the 2924 * quality measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure 2925 * less than 15 are rejected. 2926 * @param minDistance Minimum possible Euclidean distance between the returned corners. 2927 * CV_8UC1 and the same size as image ), it specifies the region in which the corners are detected. 2928 * pixel neighborhood. See cornerEigenValsAndVecs . 2929 * or #cornerMinEigenVal. 2930 * 2931 * SEE: cornerMinEigenVal, cornerHarris, calcOpticalFlowPyrLK, estimateRigidTransform, 2932 */ 2933 public static void goodFeaturesToTrack(Mat image, MatOfPoint corners, int maxCorners, double qualityLevel, double minDistance) { 2934 Mat corners_mat = corners; 2935 goodFeaturesToTrack_4(image.nativeObj, corners_mat.nativeObj, maxCorners, qualityLevel, minDistance); 2936 } 2937 2938 2939 // 2940 // C++: void cv::goodFeaturesToTrack(Mat image, vector_Point& corners, int maxCorners, double qualityLevel, double minDistance, Mat mask, int blockSize, int gradientSize, bool useHarrisDetector = false, double k = 0.04) 2941 // 2942 2943 public static void goodFeaturesToTrack(Mat image, MatOfPoint corners, int maxCorners, double qualityLevel, double minDistance, Mat mask, int blockSize, int gradientSize, boolean useHarrisDetector, double k) { 2944 Mat corners_mat = corners; 2945 goodFeaturesToTrack_5(image.nativeObj, corners_mat.nativeObj, maxCorners, qualityLevel, minDistance, mask.nativeObj, blockSize, gradientSize, useHarrisDetector, k); 2946 } 2947 2948 public static void goodFeaturesToTrack(Mat image, MatOfPoint corners, int maxCorners, double qualityLevel, double minDistance, Mat mask, int blockSize, int gradientSize, boolean useHarrisDetector) { 2949 Mat corners_mat = corners; 2950 goodFeaturesToTrack_6(image.nativeObj, corners_mat.nativeObj, maxCorners, qualityLevel, minDistance, mask.nativeObj, blockSize, gradientSize, useHarrisDetector); 2951 } 2952 2953 public static void goodFeaturesToTrack(Mat image, MatOfPoint corners, int maxCorners, double qualityLevel, double minDistance, Mat mask, int blockSize, int gradientSize) { 2954 Mat corners_mat = corners; 2955 goodFeaturesToTrack_7(image.nativeObj, corners_mat.nativeObj, maxCorners, qualityLevel, minDistance, mask.nativeObj, blockSize, gradientSize); 2956 } 2957 2958 2959 // 2960 // C++: void cv::goodFeaturesToTrack(Mat image, Mat& corners, int maxCorners, double qualityLevel, double minDistance, Mat mask, Mat& cornersQuality, int blockSize = 3, int gradientSize = 3, bool useHarrisDetector = false, double k = 0.04) 2961 // 2962 2963 /** 2964 * Same as above, but returns also quality measure of the detected corners. 2965 * 2966 * @param image Input 8-bit or floating-point 32-bit, single-channel image. 2967 * @param corners Output vector of detected corners. 2968 * @param maxCorners Maximum number of corners to return. If there are more corners than are found, 2969 * the strongest of them is returned. {@code maxCorners <= 0} implies that no limit on the maximum is set 2970 * and all detected corners are returned. 2971 * @param qualityLevel Parameter characterizing the minimal accepted quality of image corners. The 2972 * parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue 2973 * (see #cornerMinEigenVal ) or the Harris function response (see #cornerHarris ). The corners with the 2974 * quality measure less than the product are rejected. For example, if the best corner has the 2975 * quality measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure 2976 * less than 15 are rejected. 2977 * @param minDistance Minimum possible Euclidean distance between the returned corners. 2978 * @param mask Region of interest. If the image is not empty (it needs to have the type 2979 * CV_8UC1 and the same size as image ), it specifies the region in which the corners are detected. 2980 * @param cornersQuality Output vector of quality measure of the detected corners. 2981 * @param blockSize Size of an average block for computing a derivative covariation matrix over each 2982 * pixel neighborhood. See cornerEigenValsAndVecs . 2983 * @param gradientSize Aperture parameter for the Sobel operator used for derivatives computation. 2984 * See cornerEigenValsAndVecs . 2985 * @param useHarrisDetector Parameter indicating whether to use a Harris detector (see #cornerHarris) 2986 * or #cornerMinEigenVal. 2987 * @param k Free parameter of the Harris detector. 2988 */ 2989 public static void goodFeaturesToTrackWithQuality(Mat image, Mat corners, int maxCorners, double qualityLevel, double minDistance, Mat mask, Mat cornersQuality, int blockSize, int gradientSize, boolean useHarrisDetector, double k) { 2990 goodFeaturesToTrackWithQuality_0(image.nativeObj, corners.nativeObj, maxCorners, qualityLevel, minDistance, mask.nativeObj, cornersQuality.nativeObj, blockSize, gradientSize, useHarrisDetector, k); 2991 } 2992 2993 /** 2994 * Same as above, but returns also quality measure of the detected corners. 2995 * 2996 * @param image Input 8-bit or floating-point 32-bit, single-channel image. 2997 * @param corners Output vector of detected corners. 2998 * @param maxCorners Maximum number of corners to return. If there are more corners than are found, 2999 * the strongest of them is returned. {@code maxCorners <= 0} implies that no limit on the maximum is set 3000 * and all detected corners are returned. 3001 * @param qualityLevel Parameter characterizing the minimal accepted quality of image corners. The 3002 * parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue 3003 * (see #cornerMinEigenVal ) or the Harris function response (see #cornerHarris ). The corners with the 3004 * quality measure less than the product are rejected. For example, if the best corner has the 3005 * quality measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure 3006 * less than 15 are rejected. 3007 * @param minDistance Minimum possible Euclidean distance between the returned corners. 3008 * @param mask Region of interest. If the image is not empty (it needs to have the type 3009 * CV_8UC1 and the same size as image ), it specifies the region in which the corners are detected. 3010 * @param cornersQuality Output vector of quality measure of the detected corners. 3011 * @param blockSize Size of an average block for computing a derivative covariation matrix over each 3012 * pixel neighborhood. See cornerEigenValsAndVecs . 3013 * @param gradientSize Aperture parameter for the Sobel operator used for derivatives computation. 3014 * See cornerEigenValsAndVecs . 3015 * @param useHarrisDetector Parameter indicating whether to use a Harris detector (see #cornerHarris) 3016 * or #cornerMinEigenVal. 3017 */ 3018 public static void goodFeaturesToTrackWithQuality(Mat image, Mat corners, int maxCorners, double qualityLevel, double minDistance, Mat mask, Mat cornersQuality, int blockSize, int gradientSize, boolean useHarrisDetector) { 3019 goodFeaturesToTrackWithQuality_1(image.nativeObj, corners.nativeObj, maxCorners, qualityLevel, minDistance, mask.nativeObj, cornersQuality.nativeObj, blockSize, gradientSize, useHarrisDetector); 3020 } 3021 3022 /** 3023 * Same as above, but returns also quality measure of the detected corners. 3024 * 3025 * @param image Input 8-bit or floating-point 32-bit, single-channel image. 3026 * @param corners Output vector of detected corners. 3027 * @param maxCorners Maximum number of corners to return. If there are more corners than are found, 3028 * the strongest of them is returned. {@code maxCorners <= 0} implies that no limit on the maximum is set 3029 * and all detected corners are returned. 3030 * @param qualityLevel Parameter characterizing the minimal accepted quality of image corners. The 3031 * parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue 3032 * (see #cornerMinEigenVal ) or the Harris function response (see #cornerHarris ). The corners with the 3033 * quality measure less than the product are rejected. For example, if the best corner has the 3034 * quality measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure 3035 * less than 15 are rejected. 3036 * @param minDistance Minimum possible Euclidean distance between the returned corners. 3037 * @param mask Region of interest. If the image is not empty (it needs to have the type 3038 * CV_8UC1 and the same size as image ), it specifies the region in which the corners are detected. 3039 * @param cornersQuality Output vector of quality measure of the detected corners. 3040 * @param blockSize Size of an average block for computing a derivative covariation matrix over each 3041 * pixel neighborhood. See cornerEigenValsAndVecs . 3042 * @param gradientSize Aperture parameter for the Sobel operator used for derivatives computation. 3043 * See cornerEigenValsAndVecs . 3044 * or #cornerMinEigenVal. 3045 */ 3046 public static void goodFeaturesToTrackWithQuality(Mat image, Mat corners, int maxCorners, double qualityLevel, double minDistance, Mat mask, Mat cornersQuality, int blockSize, int gradientSize) { 3047 goodFeaturesToTrackWithQuality_2(image.nativeObj, corners.nativeObj, maxCorners, qualityLevel, minDistance, mask.nativeObj, cornersQuality.nativeObj, blockSize, gradientSize); 3048 } 3049 3050 /** 3051 * Same as above, but returns also quality measure of the detected corners. 3052 * 3053 * @param image Input 8-bit or floating-point 32-bit, single-channel image. 3054 * @param corners Output vector of detected corners. 3055 * @param maxCorners Maximum number of corners to return. If there are more corners than are found, 3056 * the strongest of them is returned. {@code maxCorners <= 0} implies that no limit on the maximum is set 3057 * and all detected corners are returned. 3058 * @param qualityLevel Parameter characterizing the minimal accepted quality of image corners. The 3059 * parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue 3060 * (see #cornerMinEigenVal ) or the Harris function response (see #cornerHarris ). The corners with the 3061 * quality measure less than the product are rejected. For example, if the best corner has the 3062 * quality measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure 3063 * less than 15 are rejected. 3064 * @param minDistance Minimum possible Euclidean distance between the returned corners. 3065 * @param mask Region of interest. If the image is not empty (it needs to have the type 3066 * CV_8UC1 and the same size as image ), it specifies the region in which the corners are detected. 3067 * @param cornersQuality Output vector of quality measure of the detected corners. 3068 * @param blockSize Size of an average block for computing a derivative covariation matrix over each 3069 * pixel neighborhood. See cornerEigenValsAndVecs . 3070 * See cornerEigenValsAndVecs . 3071 * or #cornerMinEigenVal. 3072 */ 3073 public static void goodFeaturesToTrackWithQuality(Mat image, Mat corners, int maxCorners, double qualityLevel, double minDistance, Mat mask, Mat cornersQuality, int blockSize) { 3074 goodFeaturesToTrackWithQuality_3(image.nativeObj, corners.nativeObj, maxCorners, qualityLevel, minDistance, mask.nativeObj, cornersQuality.nativeObj, blockSize); 3075 } 3076 3077 /** 3078 * Same as above, but returns also quality measure of the detected corners. 3079 * 3080 * @param image Input 8-bit or floating-point 32-bit, single-channel image. 3081 * @param corners Output vector of detected corners. 3082 * @param maxCorners Maximum number of corners to return. If there are more corners than are found, 3083 * the strongest of them is returned. {@code maxCorners <= 0} implies that no limit on the maximum is set 3084 * and all detected corners are returned. 3085 * @param qualityLevel Parameter characterizing the minimal accepted quality of image corners. The 3086 * parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue 3087 * (see #cornerMinEigenVal ) or the Harris function response (see #cornerHarris ). The corners with the 3088 * quality measure less than the product are rejected. For example, if the best corner has the 3089 * quality measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure 3090 * less than 15 are rejected. 3091 * @param minDistance Minimum possible Euclidean distance between the returned corners. 3092 * @param mask Region of interest. If the image is not empty (it needs to have the type 3093 * CV_8UC1 and the same size as image ), it specifies the region in which the corners are detected. 3094 * @param cornersQuality Output vector of quality measure of the detected corners. 3095 * pixel neighborhood. See cornerEigenValsAndVecs . 3096 * See cornerEigenValsAndVecs . 3097 * or #cornerMinEigenVal. 3098 */ 3099 public static void goodFeaturesToTrackWithQuality(Mat image, Mat corners, int maxCorners, double qualityLevel, double minDistance, Mat mask, Mat cornersQuality) { 3100 goodFeaturesToTrackWithQuality_4(image.nativeObj, corners.nativeObj, maxCorners, qualityLevel, minDistance, mask.nativeObj, cornersQuality.nativeObj); 3101 } 3102 3103 3104 // 3105 // C++: void cv::HoughLines(Mat image, Mat& lines, double rho, double theta, int threshold, double srn = 0, double stn = 0, double min_theta = 0, double max_theta = CV_PI) 3106 // 3107 3108 /** 3109 * Finds lines in a binary image using the standard Hough transform. 3110 * 3111 * The function implements the standard or standard multi-scale Hough transform algorithm for line 3112 * detection. See <http://homepages.inf.ed.ac.uk/rbf/HIPR2/hough.htm> for a good explanation of Hough 3113 * transform. 3114 * 3115 * @param image 8-bit, single-channel binary source image. The image may be modified by the function. 3116 * @param lines Output vector of lines. Each line is represented by a 2 or 3 element vector 3117 * \((\rho, \theta)\) or \((\rho, \theta, \textrm{votes})\) . \(\rho\) is the distance from the coordinate origin \((0,0)\) (top-left corner of 3118 * the image). \(\theta\) is the line rotation angle in radians ( 3119 * \(0 \sim \textrm{vertical line}, \pi/2 \sim \textrm{horizontal line}\) ). 3120 * \(\textrm{votes}\) is the value of accumulator. 3121 * @param rho Distance resolution of the accumulator in pixels. 3122 * @param theta Angle resolution of the accumulator in radians. 3123 * @param threshold Accumulator threshold parameter. Only those lines are returned that get enough 3124 * votes ( \(>\texttt{threshold}\) ). 3125 * @param srn For the multi-scale Hough transform, it is a divisor for the distance resolution rho . 3126 * The coarse accumulator distance resolution is rho and the accurate accumulator resolution is 3127 * rho/srn . If both srn=0 and stn=0 , the classical Hough transform is used. Otherwise, both these 3128 * parameters should be positive. 3129 * @param stn For the multi-scale Hough transform, it is a divisor for the distance resolution theta. 3130 * @param min_theta For standard and multi-scale Hough transform, minimum angle to check for lines. 3131 * Must fall between 0 and max_theta. 3132 * @param max_theta For standard and multi-scale Hough transform, maximum angle to check for lines. 3133 * Must fall between min_theta and CV_PI. 3134 */ 3135 public static void HoughLines(Mat image, Mat lines, double rho, double theta, int threshold, double srn, double stn, double min_theta, double max_theta) { 3136 HoughLines_0(image.nativeObj, lines.nativeObj, rho, theta, threshold, srn, stn, min_theta, max_theta); 3137 } 3138 3139 /** 3140 * Finds lines in a binary image using the standard Hough transform. 3141 * 3142 * The function implements the standard or standard multi-scale Hough transform algorithm for line 3143 * detection. See <http://homepages.inf.ed.ac.uk/rbf/HIPR2/hough.htm> for a good explanation of Hough 3144 * transform. 3145 * 3146 * @param image 8-bit, single-channel binary source image. The image may be modified by the function. 3147 * @param lines Output vector of lines. Each line is represented by a 2 or 3 element vector 3148 * \((\rho, \theta)\) or \((\rho, \theta, \textrm{votes})\) . \(\rho\) is the distance from the coordinate origin \((0,0)\) (top-left corner of 3149 * the image). \(\theta\) is the line rotation angle in radians ( 3150 * \(0 \sim \textrm{vertical line}, \pi/2 \sim \textrm{horizontal line}\) ). 3151 * \(\textrm{votes}\) is the value of accumulator. 3152 * @param rho Distance resolution of the accumulator in pixels. 3153 * @param theta Angle resolution of the accumulator in radians. 3154 * @param threshold Accumulator threshold parameter. Only those lines are returned that get enough 3155 * votes ( \(>\texttt{threshold}\) ). 3156 * @param srn For the multi-scale Hough transform, it is a divisor for the distance resolution rho . 3157 * The coarse accumulator distance resolution is rho and the accurate accumulator resolution is 3158 * rho/srn . If both srn=0 and stn=0 , the classical Hough transform is used. Otherwise, both these 3159 * parameters should be positive. 3160 * @param stn For the multi-scale Hough transform, it is a divisor for the distance resolution theta. 3161 * @param min_theta For standard and multi-scale Hough transform, minimum angle to check for lines. 3162 * Must fall between 0 and max_theta. 3163 * Must fall between min_theta and CV_PI. 3164 */ 3165 public static void HoughLines(Mat image, Mat lines, double rho, double theta, int threshold, double srn, double stn, double min_theta) { 3166 HoughLines_1(image.nativeObj, lines.nativeObj, rho, theta, threshold, srn, stn, min_theta); 3167 } 3168 3169 /** 3170 * Finds lines in a binary image using the standard Hough transform. 3171 * 3172 * The function implements the standard or standard multi-scale Hough transform algorithm for line 3173 * detection. See <http://homepages.inf.ed.ac.uk/rbf/HIPR2/hough.htm> for a good explanation of Hough 3174 * transform. 3175 * 3176 * @param image 8-bit, single-channel binary source image. The image may be modified by the function. 3177 * @param lines Output vector of lines. Each line is represented by a 2 or 3 element vector 3178 * \((\rho, \theta)\) or \((\rho, \theta, \textrm{votes})\) . \(\rho\) is the distance from the coordinate origin \((0,0)\) (top-left corner of 3179 * the image). \(\theta\) is the line rotation angle in radians ( 3180 * \(0 \sim \textrm{vertical line}, \pi/2 \sim \textrm{horizontal line}\) ). 3181 * \(\textrm{votes}\) is the value of accumulator. 3182 * @param rho Distance resolution of the accumulator in pixels. 3183 * @param theta Angle resolution of the accumulator in radians. 3184 * @param threshold Accumulator threshold parameter. Only those lines are returned that get enough 3185 * votes ( \(>\texttt{threshold}\) ). 3186 * @param srn For the multi-scale Hough transform, it is a divisor for the distance resolution rho . 3187 * The coarse accumulator distance resolution is rho and the accurate accumulator resolution is 3188 * rho/srn . If both srn=0 and stn=0 , the classical Hough transform is used. Otherwise, both these 3189 * parameters should be positive. 3190 * @param stn For the multi-scale Hough transform, it is a divisor for the distance resolution theta. 3191 * Must fall between 0 and max_theta. 3192 * Must fall between min_theta and CV_PI. 3193 */ 3194 public static void HoughLines(Mat image, Mat lines, double rho, double theta, int threshold, double srn, double stn) { 3195 HoughLines_2(image.nativeObj, lines.nativeObj, rho, theta, threshold, srn, stn); 3196 } 3197 3198 /** 3199 * Finds lines in a binary image using the standard Hough transform. 3200 * 3201 * The function implements the standard or standard multi-scale Hough transform algorithm for line 3202 * detection. See <http://homepages.inf.ed.ac.uk/rbf/HIPR2/hough.htm> for a good explanation of Hough 3203 * transform. 3204 * 3205 * @param image 8-bit, single-channel binary source image. The image may be modified by the function. 3206 * @param lines Output vector of lines. Each line is represented by a 2 or 3 element vector 3207 * \((\rho, \theta)\) or \((\rho, \theta, \textrm{votes})\) . \(\rho\) is the distance from the coordinate origin \((0,0)\) (top-left corner of 3208 * the image). \(\theta\) is the line rotation angle in radians ( 3209 * \(0 \sim \textrm{vertical line}, \pi/2 \sim \textrm{horizontal line}\) ). 3210 * \(\textrm{votes}\) is the value of accumulator. 3211 * @param rho Distance resolution of the accumulator in pixels. 3212 * @param theta Angle resolution of the accumulator in radians. 3213 * @param threshold Accumulator threshold parameter. Only those lines are returned that get enough 3214 * votes ( \(>\texttt{threshold}\) ). 3215 * @param srn For the multi-scale Hough transform, it is a divisor for the distance resolution rho . 3216 * The coarse accumulator distance resolution is rho and the accurate accumulator resolution is 3217 * rho/srn . If both srn=0 and stn=0 , the classical Hough transform is used. Otherwise, both these 3218 * parameters should be positive. 3219 * Must fall between 0 and max_theta. 3220 * Must fall between min_theta and CV_PI. 3221 */ 3222 public static void HoughLines(Mat image, Mat lines, double rho, double theta, int threshold, double srn) { 3223 HoughLines_3(image.nativeObj, lines.nativeObj, rho, theta, threshold, srn); 3224 } 3225 3226 /** 3227 * Finds lines in a binary image using the standard Hough transform. 3228 * 3229 * The function implements the standard or standard multi-scale Hough transform algorithm for line 3230 * detection. See <http://homepages.inf.ed.ac.uk/rbf/HIPR2/hough.htm> for a good explanation of Hough 3231 * transform. 3232 * 3233 * @param image 8-bit, single-channel binary source image. The image may be modified by the function. 3234 * @param lines Output vector of lines. Each line is represented by a 2 or 3 element vector 3235 * \((\rho, \theta)\) or \((\rho, \theta, \textrm{votes})\) . \(\rho\) is the distance from the coordinate origin \((0,0)\) (top-left corner of 3236 * the image). \(\theta\) is the line rotation angle in radians ( 3237 * \(0 \sim \textrm{vertical line}, \pi/2 \sim \textrm{horizontal line}\) ). 3238 * \(\textrm{votes}\) is the value of accumulator. 3239 * @param rho Distance resolution of the accumulator in pixels. 3240 * @param theta Angle resolution of the accumulator in radians. 3241 * @param threshold Accumulator threshold parameter. Only those lines are returned that get enough 3242 * votes ( \(>\texttt{threshold}\) ). 3243 * The coarse accumulator distance resolution is rho and the accurate accumulator resolution is 3244 * rho/srn . If both srn=0 and stn=0 , the classical Hough transform is used. Otherwise, both these 3245 * parameters should be positive. 3246 * Must fall between 0 and max_theta. 3247 * Must fall between min_theta and CV_PI. 3248 */ 3249 public static void HoughLines(Mat image, Mat lines, double rho, double theta, int threshold) { 3250 HoughLines_4(image.nativeObj, lines.nativeObj, rho, theta, threshold); 3251 } 3252 3253 3254 // 3255 // C++: void cv::HoughLinesP(Mat image, Mat& lines, double rho, double theta, int threshold, double minLineLength = 0, double maxLineGap = 0) 3256 // 3257 3258 /** 3259 * Finds line segments in a binary image using the probabilistic Hough transform. 3260 * 3261 * The function implements the probabilistic Hough transform algorithm for line detection, described 3262 * in CITE: Matas00 3263 * 3264 * See the line detection example below: 3265 * INCLUDE: snippets/imgproc_HoughLinesP.cpp 3266 * This is a sample picture the function parameters have been tuned for: 3267 * 3268 * ![image](pics/building.jpg) 3269 * 3270 * And this is the output of the above program in case of the probabilistic Hough transform: 3271 * 3272 * ![image](pics/houghp.png) 3273 * 3274 * @param image 8-bit, single-channel binary source image. The image may be modified by the function. 3275 * @param lines Output vector of lines. Each line is represented by a 4-element vector 3276 * \((x_1, y_1, x_2, y_2)\) , where \((x_1,y_1)\) and \((x_2, y_2)\) are the ending points of each detected 3277 * line segment. 3278 * @param rho Distance resolution of the accumulator in pixels. 3279 * @param theta Angle resolution of the accumulator in radians. 3280 * @param threshold Accumulator threshold parameter. Only those lines are returned that get enough 3281 * votes ( \(>\texttt{threshold}\) ). 3282 * @param minLineLength Minimum line length. Line segments shorter than that are rejected. 3283 * @param maxLineGap Maximum allowed gap between points on the same line to link them. 3284 * 3285 * SEE: LineSegmentDetector 3286 */ 3287 public static void HoughLinesP(Mat image, Mat lines, double rho, double theta, int threshold, double minLineLength, double maxLineGap) { 3288 HoughLinesP_0(image.nativeObj, lines.nativeObj, rho, theta, threshold, minLineLength, maxLineGap); 3289 } 3290 3291 /** 3292 * Finds line segments in a binary image using the probabilistic Hough transform. 3293 * 3294 * The function implements the probabilistic Hough transform algorithm for line detection, described 3295 * in CITE: Matas00 3296 * 3297 * See the line detection example below: 3298 * INCLUDE: snippets/imgproc_HoughLinesP.cpp 3299 * This is a sample picture the function parameters have been tuned for: 3300 * 3301 * ![image](pics/building.jpg) 3302 * 3303 * And this is the output of the above program in case of the probabilistic Hough transform: 3304 * 3305 * ![image](pics/houghp.png) 3306 * 3307 * @param image 8-bit, single-channel binary source image. The image may be modified by the function. 3308 * @param lines Output vector of lines. Each line is represented by a 4-element vector 3309 * \((x_1, y_1, x_2, y_2)\) , where \((x_1,y_1)\) and \((x_2, y_2)\) are the ending points of each detected 3310 * line segment. 3311 * @param rho Distance resolution of the accumulator in pixels. 3312 * @param theta Angle resolution of the accumulator in radians. 3313 * @param threshold Accumulator threshold parameter. Only those lines are returned that get enough 3314 * votes ( \(>\texttt{threshold}\) ). 3315 * @param minLineLength Minimum line length. Line segments shorter than that are rejected. 3316 * 3317 * SEE: LineSegmentDetector 3318 */ 3319 public static void HoughLinesP(Mat image, Mat lines, double rho, double theta, int threshold, double minLineLength) { 3320 HoughLinesP_1(image.nativeObj, lines.nativeObj, rho, theta, threshold, minLineLength); 3321 } 3322 3323 /** 3324 * Finds line segments in a binary image using the probabilistic Hough transform. 3325 * 3326 * The function implements the probabilistic Hough transform algorithm for line detection, described 3327 * in CITE: Matas00 3328 * 3329 * See the line detection example below: 3330 * INCLUDE: snippets/imgproc_HoughLinesP.cpp 3331 * This is a sample picture the function parameters have been tuned for: 3332 * 3333 * ![image](pics/building.jpg) 3334 * 3335 * And this is the output of the above program in case of the probabilistic Hough transform: 3336 * 3337 * ![image](pics/houghp.png) 3338 * 3339 * @param image 8-bit, single-channel binary source image. The image may be modified by the function. 3340 * @param lines Output vector of lines. Each line is represented by a 4-element vector 3341 * \((x_1, y_1, x_2, y_2)\) , where \((x_1,y_1)\) and \((x_2, y_2)\) are the ending points of each detected 3342 * line segment. 3343 * @param rho Distance resolution of the accumulator in pixels. 3344 * @param theta Angle resolution of the accumulator in radians. 3345 * @param threshold Accumulator threshold parameter. Only those lines are returned that get enough 3346 * votes ( \(>\texttt{threshold}\) ). 3347 * 3348 * SEE: LineSegmentDetector 3349 */ 3350 public static void HoughLinesP(Mat image, Mat lines, double rho, double theta, int threshold) { 3351 HoughLinesP_2(image.nativeObj, lines.nativeObj, rho, theta, threshold); 3352 } 3353 3354 3355 // 3356 // C++: void cv::HoughLinesPointSet(Mat point, Mat& lines, int lines_max, int threshold, double min_rho, double max_rho, double rho_step, double min_theta, double max_theta, double theta_step) 3357 // 3358 3359 /** 3360 * Finds lines in a set of points using the standard Hough transform. 3361 * 3362 * The function finds lines in a set of points using a modification of the Hough transform. 3363 * INCLUDE: snippets/imgproc_HoughLinesPointSet.cpp 3364 * @param point Input vector of points. Each vector must be encoded as a Point vector \((x,y)\). Type must be CV_32FC2 or CV_32SC2. 3365 * @param lines Output vector of found lines. Each vector is encoded as a vector<Vec3d> \((votes, rho, theta)\). 3366 * The larger the value of 'votes', the higher the reliability of the Hough line. 3367 * @param lines_max Max count of Hough lines. 3368 * @param threshold Accumulator threshold parameter. Only those lines are returned that get enough 3369 * votes ( \(>\texttt{threshold}\) ). 3370 * @param min_rho Minimum value for \(\rho\) for the accumulator (Note: \(\rho\) can be negative. The absolute value \(|\rho|\) is the distance of a line to the origin.). 3371 * @param max_rho Maximum value for \(\rho\) for the accumulator. 3372 * @param rho_step Distance resolution of the accumulator. 3373 * @param min_theta Minimum angle value of the accumulator in radians. 3374 * @param max_theta Maximum angle value of the accumulator in radians. 3375 * @param theta_step Angle resolution of the accumulator in radians. 3376 */ 3377 public static void HoughLinesPointSet(Mat point, Mat lines, int lines_max, int threshold, double min_rho, double max_rho, double rho_step, double min_theta, double max_theta, double theta_step) { 3378 HoughLinesPointSet_0(point.nativeObj, lines.nativeObj, lines_max, threshold, min_rho, max_rho, rho_step, min_theta, max_theta, theta_step); 3379 } 3380 3381 3382 // 3383 // C++: void cv::HoughCircles(Mat image, Mat& circles, int method, double dp, double minDist, double param1 = 100, double param2 = 100, int minRadius = 0, int maxRadius = 0) 3384 // 3385 3386 /** 3387 * Finds circles in a grayscale image using the Hough transform. 3388 * 3389 * The function finds circles in a grayscale image using a modification of the Hough transform. 3390 * 3391 * Example: : 3392 * INCLUDE: snippets/imgproc_HoughLinesCircles.cpp 3393 * 3394 * <b>Note:</b> Usually the function detects the centers of circles well. However, it may fail to find correct 3395 * radii. You can assist to the function by specifying the radius range ( minRadius and maxRadius ) if 3396 * you know it. Or, in the case of #HOUGH_GRADIENT method you may set maxRadius to a negative number 3397 * to return centers only without radius search, and find the correct radius using an additional procedure. 3398 * 3399 * It also helps to smooth image a bit unless it's already soft. For example, 3400 * GaussianBlur() with 7x7 kernel and 1.5x1.5 sigma or similar blurring may help. 3401 * 3402 * @param image 8-bit, single-channel, grayscale input image. 3403 * @param circles Output vector of found circles. Each vector is encoded as 3 or 4 element 3404 * floating-point vector \((x, y, radius)\) or \((x, y, radius, votes)\) . 3405 * @param method Detection method, see #HoughModes. The available methods are #HOUGH_GRADIENT and #HOUGH_GRADIENT_ALT. 3406 * @param dp Inverse ratio of the accumulator resolution to the image resolution. For example, if 3407 * dp=1 , the accumulator has the same resolution as the input image. If dp=2 , the accumulator has 3408 * half as big width and height. For #HOUGH_GRADIENT_ALT the recommended value is dp=1.5, 3409 * unless some small very circles need to be detected. 3410 * @param minDist Minimum distance between the centers of the detected circles. If the parameter is 3411 * too small, multiple neighbor circles may be falsely detected in addition to a true one. If it is 3412 * too large, some circles may be missed. 3413 * @param param1 First method-specific parameter. In case of #HOUGH_GRADIENT and #HOUGH_GRADIENT_ALT, 3414 * it is the higher threshold of the two passed to the Canny edge detector (the lower one is twice smaller). 3415 * Note that #HOUGH_GRADIENT_ALT uses #Scharr algorithm to compute image derivatives, so the threshold value 3416 * shough normally be higher, such as 300 or normally exposed and contrasty images. 3417 * @param param2 Second method-specific parameter. In case of #HOUGH_GRADIENT, it is the 3418 * accumulator threshold for the circle centers at the detection stage. The smaller it is, the more 3419 * false circles may be detected. Circles, corresponding to the larger accumulator values, will be 3420 * returned first. In the case of #HOUGH_GRADIENT_ALT algorithm, this is the circle "perfectness" measure. 3421 * The closer it to 1, the better shaped circles algorithm selects. In most cases 0.9 should be fine. 3422 * If you want get better detection of small circles, you may decrease it to 0.85, 0.8 or even less. 3423 * But then also try to limit the search range [minRadius, maxRadius] to avoid many false circles. 3424 * @param minRadius Minimum circle radius. 3425 * @param maxRadius Maximum circle radius. If <= 0, uses the maximum image dimension. If < 0, #HOUGH_GRADIENT returns 3426 * centers without finding the radius. #HOUGH_GRADIENT_ALT always computes circle radiuses. 3427 * 3428 * SEE: fitEllipse, minEnclosingCircle 3429 */ 3430 public static void HoughCircles(Mat image, Mat circles, int method, double dp, double minDist, double param1, double param2, int minRadius, int maxRadius) { 3431 HoughCircles_0(image.nativeObj, circles.nativeObj, method, dp, minDist, param1, param2, minRadius, maxRadius); 3432 } 3433 3434 /** 3435 * Finds circles in a grayscale image using the Hough transform. 3436 * 3437 * The function finds circles in a grayscale image using a modification of the Hough transform. 3438 * 3439 * Example: : 3440 * INCLUDE: snippets/imgproc_HoughLinesCircles.cpp 3441 * 3442 * <b>Note:</b> Usually the function detects the centers of circles well. However, it may fail to find correct 3443 * radii. You can assist to the function by specifying the radius range ( minRadius and maxRadius ) if 3444 * you know it. Or, in the case of #HOUGH_GRADIENT method you may set maxRadius to a negative number 3445 * to return centers only without radius search, and find the correct radius using an additional procedure. 3446 * 3447 * It also helps to smooth image a bit unless it's already soft. For example, 3448 * GaussianBlur() with 7x7 kernel and 1.5x1.5 sigma or similar blurring may help. 3449 * 3450 * @param image 8-bit, single-channel, grayscale input image. 3451 * @param circles Output vector of found circles. Each vector is encoded as 3 or 4 element 3452 * floating-point vector \((x, y, radius)\) or \((x, y, radius, votes)\) . 3453 * @param method Detection method, see #HoughModes. The available methods are #HOUGH_GRADIENT and #HOUGH_GRADIENT_ALT. 3454 * @param dp Inverse ratio of the accumulator resolution to the image resolution. For example, if 3455 * dp=1 , the accumulator has the same resolution as the input image. If dp=2 , the accumulator has 3456 * half as big width and height. For #HOUGH_GRADIENT_ALT the recommended value is dp=1.5, 3457 * unless some small very circles need to be detected. 3458 * @param minDist Minimum distance between the centers of the detected circles. If the parameter is 3459 * too small, multiple neighbor circles may be falsely detected in addition to a true one. If it is 3460 * too large, some circles may be missed. 3461 * @param param1 First method-specific parameter. In case of #HOUGH_GRADIENT and #HOUGH_GRADIENT_ALT, 3462 * it is the higher threshold of the two passed to the Canny edge detector (the lower one is twice smaller). 3463 * Note that #HOUGH_GRADIENT_ALT uses #Scharr algorithm to compute image derivatives, so the threshold value 3464 * shough normally be higher, such as 300 or normally exposed and contrasty images. 3465 * @param param2 Second method-specific parameter. In case of #HOUGH_GRADIENT, it is the 3466 * accumulator threshold for the circle centers at the detection stage. The smaller it is, the more 3467 * false circles may be detected. Circles, corresponding to the larger accumulator values, will be 3468 * returned first. In the case of #HOUGH_GRADIENT_ALT algorithm, this is the circle "perfectness" measure. 3469 * The closer it to 1, the better shaped circles algorithm selects. In most cases 0.9 should be fine. 3470 * If you want get better detection of small circles, you may decrease it to 0.85, 0.8 or even less. 3471 * But then also try to limit the search range [minRadius, maxRadius] to avoid many false circles. 3472 * @param minRadius Minimum circle radius. 3473 * centers without finding the radius. #HOUGH_GRADIENT_ALT always computes circle radiuses. 3474 * 3475 * SEE: fitEllipse, minEnclosingCircle 3476 */ 3477 public static void HoughCircles(Mat image, Mat circles, int method, double dp, double minDist, double param1, double param2, int minRadius) { 3478 HoughCircles_1(image.nativeObj, circles.nativeObj, method, dp, minDist, param1, param2, minRadius); 3479 } 3480 3481 /** 3482 * Finds circles in a grayscale image using the Hough transform. 3483 * 3484 * The function finds circles in a grayscale image using a modification of the Hough transform. 3485 * 3486 * Example: : 3487 * INCLUDE: snippets/imgproc_HoughLinesCircles.cpp 3488 * 3489 * <b>Note:</b> Usually the function detects the centers of circles well. However, it may fail to find correct 3490 * radii. You can assist to the function by specifying the radius range ( minRadius and maxRadius ) if 3491 * you know it. Or, in the case of #HOUGH_GRADIENT method you may set maxRadius to a negative number 3492 * to return centers only without radius search, and find the correct radius using an additional procedure. 3493 * 3494 * It also helps to smooth image a bit unless it's already soft. For example, 3495 * GaussianBlur() with 7x7 kernel and 1.5x1.5 sigma or similar blurring may help. 3496 * 3497 * @param image 8-bit, single-channel, grayscale input image. 3498 * @param circles Output vector of found circles. Each vector is encoded as 3 or 4 element 3499 * floating-point vector \((x, y, radius)\) or \((x, y, radius, votes)\) . 3500 * @param method Detection method, see #HoughModes. The available methods are #HOUGH_GRADIENT and #HOUGH_GRADIENT_ALT. 3501 * @param dp Inverse ratio of the accumulator resolution to the image resolution. For example, if 3502 * dp=1 , the accumulator has the same resolution as the input image. If dp=2 , the accumulator has 3503 * half as big width and height. For #HOUGH_GRADIENT_ALT the recommended value is dp=1.5, 3504 * unless some small very circles need to be detected. 3505 * @param minDist Minimum distance between the centers of the detected circles. If the parameter is 3506 * too small, multiple neighbor circles may be falsely detected in addition to a true one. If it is 3507 * too large, some circles may be missed. 3508 * @param param1 First method-specific parameter. In case of #HOUGH_GRADIENT and #HOUGH_GRADIENT_ALT, 3509 * it is the higher threshold of the two passed to the Canny edge detector (the lower one is twice smaller). 3510 * Note that #HOUGH_GRADIENT_ALT uses #Scharr algorithm to compute image derivatives, so the threshold value 3511 * shough normally be higher, such as 300 or normally exposed and contrasty images. 3512 * @param param2 Second method-specific parameter. In case of #HOUGH_GRADIENT, it is the 3513 * accumulator threshold for the circle centers at the detection stage. The smaller it is, the more 3514 * false circles may be detected. Circles, corresponding to the larger accumulator values, will be 3515 * returned first. In the case of #HOUGH_GRADIENT_ALT algorithm, this is the circle "perfectness" measure. 3516 * The closer it to 1, the better shaped circles algorithm selects. In most cases 0.9 should be fine. 3517 * If you want get better detection of small circles, you may decrease it to 0.85, 0.8 or even less. 3518 * But then also try to limit the search range [minRadius, maxRadius] to avoid many false circles. 3519 * centers without finding the radius. #HOUGH_GRADIENT_ALT always computes circle radiuses. 3520 * 3521 * SEE: fitEllipse, minEnclosingCircle 3522 */ 3523 public static void HoughCircles(Mat image, Mat circles, int method, double dp, double minDist, double param1, double param2) { 3524 HoughCircles_2(image.nativeObj, circles.nativeObj, method, dp, minDist, param1, param2); 3525 } 3526 3527 /** 3528 * Finds circles in a grayscale image using the Hough transform. 3529 * 3530 * The function finds circles in a grayscale image using a modification of the Hough transform. 3531 * 3532 * Example: : 3533 * INCLUDE: snippets/imgproc_HoughLinesCircles.cpp 3534 * 3535 * <b>Note:</b> Usually the function detects the centers of circles well. However, it may fail to find correct 3536 * radii. You can assist to the function by specifying the radius range ( minRadius and maxRadius ) if 3537 * you know it. Or, in the case of #HOUGH_GRADIENT method you may set maxRadius to a negative number 3538 * to return centers only without radius search, and find the correct radius using an additional procedure. 3539 * 3540 * It also helps to smooth image a bit unless it's already soft. For example, 3541 * GaussianBlur() with 7x7 kernel and 1.5x1.5 sigma or similar blurring may help. 3542 * 3543 * @param image 8-bit, single-channel, grayscale input image. 3544 * @param circles Output vector of found circles. Each vector is encoded as 3 or 4 element 3545 * floating-point vector \((x, y, radius)\) or \((x, y, radius, votes)\) . 3546 * @param method Detection method, see #HoughModes. The available methods are #HOUGH_GRADIENT and #HOUGH_GRADIENT_ALT. 3547 * @param dp Inverse ratio of the accumulator resolution to the image resolution. For example, if 3548 * dp=1 , the accumulator has the same resolution as the input image. If dp=2 , the accumulator has 3549 * half as big width and height. For #HOUGH_GRADIENT_ALT the recommended value is dp=1.5, 3550 * unless some small very circles need to be detected. 3551 * @param minDist Minimum distance between the centers of the detected circles. If the parameter is 3552 * too small, multiple neighbor circles may be falsely detected in addition to a true one. If it is 3553 * too large, some circles may be missed. 3554 * @param param1 First method-specific parameter. In case of #HOUGH_GRADIENT and #HOUGH_GRADIENT_ALT, 3555 * it is the higher threshold of the two passed to the Canny edge detector (the lower one is twice smaller). 3556 * Note that #HOUGH_GRADIENT_ALT uses #Scharr algorithm to compute image derivatives, so the threshold value 3557 * shough normally be higher, such as 300 or normally exposed and contrasty images. 3558 * accumulator threshold for the circle centers at the detection stage. The smaller it is, the more 3559 * false circles may be detected. Circles, corresponding to the larger accumulator values, will be 3560 * returned first. In the case of #HOUGH_GRADIENT_ALT algorithm, this is the circle "perfectness" measure. 3561 * The closer it to 1, the better shaped circles algorithm selects. In most cases 0.9 should be fine. 3562 * If you want get better detection of small circles, you may decrease it to 0.85, 0.8 or even less. 3563 * But then also try to limit the search range [minRadius, maxRadius] to avoid many false circles. 3564 * centers without finding the radius. #HOUGH_GRADIENT_ALT always computes circle radiuses. 3565 * 3566 * SEE: fitEllipse, minEnclosingCircle 3567 */ 3568 public static void HoughCircles(Mat image, Mat circles, int method, double dp, double minDist, double param1) { 3569 HoughCircles_3(image.nativeObj, circles.nativeObj, method, dp, minDist, param1); 3570 } 3571 3572 /** 3573 * Finds circles in a grayscale image using the Hough transform. 3574 * 3575 * The function finds circles in a grayscale image using a modification of the Hough transform. 3576 * 3577 * Example: : 3578 * INCLUDE: snippets/imgproc_HoughLinesCircles.cpp 3579 * 3580 * <b>Note:</b> Usually the function detects the centers of circles well. However, it may fail to find correct 3581 * radii. You can assist to the function by specifying the radius range ( minRadius and maxRadius ) if 3582 * you know it. Or, in the case of #HOUGH_GRADIENT method you may set maxRadius to a negative number 3583 * to return centers only without radius search, and find the correct radius using an additional procedure. 3584 * 3585 * It also helps to smooth image a bit unless it's already soft. For example, 3586 * GaussianBlur() with 7x7 kernel and 1.5x1.5 sigma or similar blurring may help. 3587 * 3588 * @param image 8-bit, single-channel, grayscale input image. 3589 * @param circles Output vector of found circles. Each vector is encoded as 3 or 4 element 3590 * floating-point vector \((x, y, radius)\) or \((x, y, radius, votes)\) . 3591 * @param method Detection method, see #HoughModes. The available methods are #HOUGH_GRADIENT and #HOUGH_GRADIENT_ALT. 3592 * @param dp Inverse ratio of the accumulator resolution to the image resolution. For example, if 3593 * dp=1 , the accumulator has the same resolution as the input image. If dp=2 , the accumulator has 3594 * half as big width and height. For #HOUGH_GRADIENT_ALT the recommended value is dp=1.5, 3595 * unless some small very circles need to be detected. 3596 * @param minDist Minimum distance between the centers of the detected circles. If the parameter is 3597 * too small, multiple neighbor circles may be falsely detected in addition to a true one. If it is 3598 * too large, some circles may be missed. 3599 * it is the higher threshold of the two passed to the Canny edge detector (the lower one is twice smaller). 3600 * Note that #HOUGH_GRADIENT_ALT uses #Scharr algorithm to compute image derivatives, so the threshold value 3601 * shough normally be higher, such as 300 or normally exposed and contrasty images. 3602 * accumulator threshold for the circle centers at the detection stage. The smaller it is, the more 3603 * false circles may be detected. Circles, corresponding to the larger accumulator values, will be 3604 * returned first. In the case of #HOUGH_GRADIENT_ALT algorithm, this is the circle "perfectness" measure. 3605 * The closer it to 1, the better shaped circles algorithm selects. In most cases 0.9 should be fine. 3606 * If you want get better detection of small circles, you may decrease it to 0.85, 0.8 or even less. 3607 * But then also try to limit the search range [minRadius, maxRadius] to avoid many false circles. 3608 * centers without finding the radius. #HOUGH_GRADIENT_ALT always computes circle radiuses. 3609 * 3610 * SEE: fitEllipse, minEnclosingCircle 3611 */ 3612 public static void HoughCircles(Mat image, Mat circles, int method, double dp, double minDist) { 3613 HoughCircles_4(image.nativeObj, circles.nativeObj, method, dp, minDist); 3614 } 3615 3616 3617 // 3618 // C++: void cv::erode(Mat src, Mat& dst, Mat kernel, Point anchor = Point(-1,-1), int iterations = 1, int borderType = BORDER_CONSTANT, Scalar borderValue = morphologyDefaultBorderValue()) 3619 // 3620 3621 /** 3622 * Erodes an image by using a specific structuring element. 3623 * 3624 * The function erodes the source image using the specified structuring element that determines the 3625 * shape of a pixel neighborhood over which the minimum is taken: 3626 * 3627 * \(\texttt{dst} (x,y) = \min _{(x',y'): \, \texttt{element} (x',y') \ne0 } \texttt{src} (x+x',y+y')\) 3628 * 3629 * The function supports the in-place mode. Erosion can be applied several ( iterations ) times. In 3630 * case of multi-channel images, each channel is processed independently. 3631 * 3632 * @param src input image; the number of channels can be arbitrary, but the depth should be one of 3633 * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F. 3634 * @param dst output image of the same size and type as src. 3635 * @param kernel structuring element used for erosion; if {@code element=Mat()}, a {@code 3 x 3} rectangular 3636 * structuring element is used. Kernel can be created using #getStructuringElement. 3637 * @param anchor position of the anchor within the element; default value (-1, -1) means that the 3638 * anchor is at the element center. 3639 * @param iterations number of times erosion is applied. 3640 * @param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported. 3641 * @param borderValue border value in case of a constant border 3642 * SEE: dilate, morphologyEx, getStructuringElement 3643 */ 3644 public static void erode(Mat src, Mat dst, Mat kernel, Point anchor, int iterations, int borderType, Scalar borderValue) { 3645 erode_0(src.nativeObj, dst.nativeObj, kernel.nativeObj, anchor.x, anchor.y, iterations, borderType, borderValue.val[0], borderValue.val[1], borderValue.val[2], borderValue.val[3]); 3646 } 3647 3648 /** 3649 * Erodes an image by using a specific structuring element. 3650 * 3651 * The function erodes the source image using the specified structuring element that determines the 3652 * shape of a pixel neighborhood over which the minimum is taken: 3653 * 3654 * \(\texttt{dst} (x,y) = \min _{(x',y'): \, \texttt{element} (x',y') \ne0 } \texttt{src} (x+x',y+y')\) 3655 * 3656 * The function supports the in-place mode. Erosion can be applied several ( iterations ) times. In 3657 * case of multi-channel images, each channel is processed independently. 3658 * 3659 * @param src input image; the number of channels can be arbitrary, but the depth should be one of 3660 * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F. 3661 * @param dst output image of the same size and type as src. 3662 * @param kernel structuring element used for erosion; if {@code element=Mat()}, a {@code 3 x 3} rectangular 3663 * structuring element is used. Kernel can be created using #getStructuringElement. 3664 * @param anchor position of the anchor within the element; default value (-1, -1) means that the 3665 * anchor is at the element center. 3666 * @param iterations number of times erosion is applied. 3667 * @param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported. 3668 * SEE: dilate, morphologyEx, getStructuringElement 3669 */ 3670 public static void erode(Mat src, Mat dst, Mat kernel, Point anchor, int iterations, int borderType) { 3671 erode_1(src.nativeObj, dst.nativeObj, kernel.nativeObj, anchor.x, anchor.y, iterations, borderType); 3672 } 3673 3674 /** 3675 * Erodes an image by using a specific structuring element. 3676 * 3677 * The function erodes the source image using the specified structuring element that determines the 3678 * shape of a pixel neighborhood over which the minimum is taken: 3679 * 3680 * \(\texttt{dst} (x,y) = \min _{(x',y'): \, \texttt{element} (x',y') \ne0 } \texttt{src} (x+x',y+y')\) 3681 * 3682 * The function supports the in-place mode. Erosion can be applied several ( iterations ) times. In 3683 * case of multi-channel images, each channel is processed independently. 3684 * 3685 * @param src input image; the number of channels can be arbitrary, but the depth should be one of 3686 * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F. 3687 * @param dst output image of the same size and type as src. 3688 * @param kernel structuring element used for erosion; if {@code element=Mat()}, a {@code 3 x 3} rectangular 3689 * structuring element is used. Kernel can be created using #getStructuringElement. 3690 * @param anchor position of the anchor within the element; default value (-1, -1) means that the 3691 * anchor is at the element center. 3692 * @param iterations number of times erosion is applied. 3693 * SEE: dilate, morphologyEx, getStructuringElement 3694 */ 3695 public static void erode(Mat src, Mat dst, Mat kernel, Point anchor, int iterations) { 3696 erode_2(src.nativeObj, dst.nativeObj, kernel.nativeObj, anchor.x, anchor.y, iterations); 3697 } 3698 3699 /** 3700 * Erodes an image by using a specific structuring element. 3701 * 3702 * The function erodes the source image using the specified structuring element that determines the 3703 * shape of a pixel neighborhood over which the minimum is taken: 3704 * 3705 * \(\texttt{dst} (x,y) = \min _{(x',y'): \, \texttt{element} (x',y') \ne0 } \texttt{src} (x+x',y+y')\) 3706 * 3707 * The function supports the in-place mode. Erosion can be applied several ( iterations ) times. In 3708 * case of multi-channel images, each channel is processed independently. 3709 * 3710 * @param src input image; the number of channels can be arbitrary, but the depth should be one of 3711 * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F. 3712 * @param dst output image of the same size and type as src. 3713 * @param kernel structuring element used for erosion; if {@code element=Mat()}, a {@code 3 x 3} rectangular 3714 * structuring element is used. Kernel can be created using #getStructuringElement. 3715 * @param anchor position of the anchor within the element; default value (-1, -1) means that the 3716 * anchor is at the element center. 3717 * SEE: dilate, morphologyEx, getStructuringElement 3718 */ 3719 public static void erode(Mat src, Mat dst, Mat kernel, Point anchor) { 3720 erode_3(src.nativeObj, dst.nativeObj, kernel.nativeObj, anchor.x, anchor.y); 3721 } 3722 3723 /** 3724 * Erodes an image by using a specific structuring element. 3725 * 3726 * The function erodes the source image using the specified structuring element that determines the 3727 * shape of a pixel neighborhood over which the minimum is taken: 3728 * 3729 * \(\texttt{dst} (x,y) = \min _{(x',y'): \, \texttt{element} (x',y') \ne0 } \texttt{src} (x+x',y+y')\) 3730 * 3731 * The function supports the in-place mode. Erosion can be applied several ( iterations ) times. In 3732 * case of multi-channel images, each channel is processed independently. 3733 * 3734 * @param src input image; the number of channels can be arbitrary, but the depth should be one of 3735 * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F. 3736 * @param dst output image of the same size and type as src. 3737 * @param kernel structuring element used for erosion; if {@code element=Mat()}, a {@code 3 x 3} rectangular 3738 * structuring element is used. Kernel can be created using #getStructuringElement. 3739 * anchor is at the element center. 3740 * SEE: dilate, morphologyEx, getStructuringElement 3741 */ 3742 public static void erode(Mat src, Mat dst, Mat kernel) { 3743 erode_4(src.nativeObj, dst.nativeObj, kernel.nativeObj); 3744 } 3745 3746 3747 // 3748 // C++: void cv::dilate(Mat src, Mat& dst, Mat kernel, Point anchor = Point(-1,-1), int iterations = 1, int borderType = BORDER_CONSTANT, Scalar borderValue = morphologyDefaultBorderValue()) 3749 // 3750 3751 /** 3752 * Dilates an image by using a specific structuring element. 3753 * 3754 * The function dilates the source image using the specified structuring element that determines the 3755 * shape of a pixel neighborhood over which the maximum is taken: 3756 * \(\texttt{dst} (x,y) = \max _{(x',y'): \, \texttt{element} (x',y') \ne0 } \texttt{src} (x+x',y+y')\) 3757 * 3758 * The function supports the in-place mode. Dilation can be applied several ( iterations ) times. In 3759 * case of multi-channel images, each channel is processed independently. 3760 * 3761 * @param src input image; the number of channels can be arbitrary, but the depth should be one of 3762 * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F. 3763 * @param dst output image of the same size and type as src. 3764 * @param kernel structuring element used for dilation; if elemenat=Mat(), a 3 x 3 rectangular 3765 * structuring element is used. Kernel can be created using #getStructuringElement 3766 * @param anchor position of the anchor within the element; default value (-1, -1) means that the 3767 * anchor is at the element center. 3768 * @param iterations number of times dilation is applied. 3769 * @param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not suported. 3770 * @param borderValue border value in case of a constant border 3771 * SEE: erode, morphologyEx, getStructuringElement 3772 */ 3773 public static void dilate(Mat src, Mat dst, Mat kernel, Point anchor, int iterations, int borderType, Scalar borderValue) { 3774 dilate_0(src.nativeObj, dst.nativeObj, kernel.nativeObj, anchor.x, anchor.y, iterations, borderType, borderValue.val[0], borderValue.val[1], borderValue.val[2], borderValue.val[3]); 3775 } 3776 3777 /** 3778 * Dilates an image by using a specific structuring element. 3779 * 3780 * The function dilates the source image using the specified structuring element that determines the 3781 * shape of a pixel neighborhood over which the maximum is taken: 3782 * \(\texttt{dst} (x,y) = \max _{(x',y'): \, \texttt{element} (x',y') \ne0 } \texttt{src} (x+x',y+y')\) 3783 * 3784 * The function supports the in-place mode. Dilation can be applied several ( iterations ) times. In 3785 * case of multi-channel images, each channel is processed independently. 3786 * 3787 * @param src input image; the number of channels can be arbitrary, but the depth should be one of 3788 * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F. 3789 * @param dst output image of the same size and type as src. 3790 * @param kernel structuring element used for dilation; if elemenat=Mat(), a 3 x 3 rectangular 3791 * structuring element is used. Kernel can be created using #getStructuringElement 3792 * @param anchor position of the anchor within the element; default value (-1, -1) means that the 3793 * anchor is at the element center. 3794 * @param iterations number of times dilation is applied. 3795 * @param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not suported. 3796 * SEE: erode, morphologyEx, getStructuringElement 3797 */ 3798 public static void dilate(Mat src, Mat dst, Mat kernel, Point anchor, int iterations, int borderType) { 3799 dilate_1(src.nativeObj, dst.nativeObj, kernel.nativeObj, anchor.x, anchor.y, iterations, borderType); 3800 } 3801 3802 /** 3803 * Dilates an image by using a specific structuring element. 3804 * 3805 * The function dilates the source image using the specified structuring element that determines the 3806 * shape of a pixel neighborhood over which the maximum is taken: 3807 * \(\texttt{dst} (x,y) = \max _{(x',y'): \, \texttt{element} (x',y') \ne0 } \texttt{src} (x+x',y+y')\) 3808 * 3809 * The function supports the in-place mode. Dilation can be applied several ( iterations ) times. In 3810 * case of multi-channel images, each channel is processed independently. 3811 * 3812 * @param src input image; the number of channels can be arbitrary, but the depth should be one of 3813 * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F. 3814 * @param dst output image of the same size and type as src. 3815 * @param kernel structuring element used for dilation; if elemenat=Mat(), a 3 x 3 rectangular 3816 * structuring element is used. Kernel can be created using #getStructuringElement 3817 * @param anchor position of the anchor within the element; default value (-1, -1) means that the 3818 * anchor is at the element center. 3819 * @param iterations number of times dilation is applied. 3820 * SEE: erode, morphologyEx, getStructuringElement 3821 */ 3822 public static void dilate(Mat src, Mat dst, Mat kernel, Point anchor, int iterations) { 3823 dilate_2(src.nativeObj, dst.nativeObj, kernel.nativeObj, anchor.x, anchor.y, iterations); 3824 } 3825 3826 /** 3827 * Dilates an image by using a specific structuring element. 3828 * 3829 * The function dilates the source image using the specified structuring element that determines the 3830 * shape of a pixel neighborhood over which the maximum is taken: 3831 * \(\texttt{dst} (x,y) = \max _{(x',y'): \, \texttt{element} (x',y') \ne0 } \texttt{src} (x+x',y+y')\) 3832 * 3833 * The function supports the in-place mode. Dilation can be applied several ( iterations ) times. In 3834 * case of multi-channel images, each channel is processed independently. 3835 * 3836 * @param src input image; the number of channels can be arbitrary, but the depth should be one of 3837 * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F. 3838 * @param dst output image of the same size and type as src. 3839 * @param kernel structuring element used for dilation; if elemenat=Mat(), a 3 x 3 rectangular 3840 * structuring element is used. Kernel can be created using #getStructuringElement 3841 * @param anchor position of the anchor within the element; default value (-1, -1) means that the 3842 * anchor is at the element center. 3843 * SEE: erode, morphologyEx, getStructuringElement 3844 */ 3845 public static void dilate(Mat src, Mat dst, Mat kernel, Point anchor) { 3846 dilate_3(src.nativeObj, dst.nativeObj, kernel.nativeObj, anchor.x, anchor.y); 3847 } 3848 3849 /** 3850 * Dilates an image by using a specific structuring element. 3851 * 3852 * The function dilates the source image using the specified structuring element that determines the 3853 * shape of a pixel neighborhood over which the maximum is taken: 3854 * \(\texttt{dst} (x,y) = \max _{(x',y'): \, \texttt{element} (x',y') \ne0 } \texttt{src} (x+x',y+y')\) 3855 * 3856 * The function supports the in-place mode. Dilation can be applied several ( iterations ) times. In 3857 * case of multi-channel images, each channel is processed independently. 3858 * 3859 * @param src input image; the number of channels can be arbitrary, but the depth should be one of 3860 * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F. 3861 * @param dst output image of the same size and type as src. 3862 * @param kernel structuring element used for dilation; if elemenat=Mat(), a 3 x 3 rectangular 3863 * structuring element is used. Kernel can be created using #getStructuringElement 3864 * anchor is at the element center. 3865 * SEE: erode, morphologyEx, getStructuringElement 3866 */ 3867 public static void dilate(Mat src, Mat dst, Mat kernel) { 3868 dilate_4(src.nativeObj, dst.nativeObj, kernel.nativeObj); 3869 } 3870 3871 3872 // 3873 // C++: void cv::morphologyEx(Mat src, Mat& dst, int op, Mat kernel, Point anchor = Point(-1,-1), int iterations = 1, int borderType = BORDER_CONSTANT, Scalar borderValue = morphologyDefaultBorderValue()) 3874 // 3875 3876 /** 3877 * Performs advanced morphological transformations. 3878 * 3879 * The function cv::morphologyEx can perform advanced morphological transformations using an erosion and dilation as 3880 * basic operations. 3881 * 3882 * Any of the operations can be done in-place. In case of multi-channel images, each channel is 3883 * processed independently. 3884 * 3885 * @param src Source image. The number of channels can be arbitrary. The depth should be one of 3886 * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F. 3887 * @param dst Destination image of the same size and type as source image. 3888 * @param op Type of a morphological operation, see #MorphTypes 3889 * @param kernel Structuring element. It can be created using #getStructuringElement. 3890 * @param anchor Anchor position with the kernel. Negative values mean that the anchor is at the 3891 * kernel center. 3892 * @param iterations Number of times erosion and dilation are applied. 3893 * @param borderType Pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported. 3894 * @param borderValue Border value in case of a constant border. The default value has a special 3895 * meaning. 3896 * SEE: dilate, erode, getStructuringElement 3897 * <b>Note:</b> The number of iterations is the number of times erosion or dilatation operation will be applied. 3898 * For instance, an opening operation (#MORPH_OPEN) with two iterations is equivalent to apply 3899 * successively: erode -> erode -> dilate -> dilate (and not erode -> dilate -> erode -> dilate). 3900 */ 3901 public static void morphologyEx(Mat src, Mat dst, int op, Mat kernel, Point anchor, int iterations, int borderType, Scalar borderValue) { 3902 morphologyEx_0(src.nativeObj, dst.nativeObj, op, kernel.nativeObj, anchor.x, anchor.y, iterations, borderType, borderValue.val[0], borderValue.val[1], borderValue.val[2], borderValue.val[3]); 3903 } 3904 3905 /** 3906 * Performs advanced morphological transformations. 3907 * 3908 * The function cv::morphologyEx can perform advanced morphological transformations using an erosion and dilation as 3909 * basic operations. 3910 * 3911 * Any of the operations can be done in-place. In case of multi-channel images, each channel is 3912 * processed independently. 3913 * 3914 * @param src Source image. The number of channels can be arbitrary. The depth should be one of 3915 * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F. 3916 * @param dst Destination image of the same size and type as source image. 3917 * @param op Type of a morphological operation, see #MorphTypes 3918 * @param kernel Structuring element. It can be created using #getStructuringElement. 3919 * @param anchor Anchor position with the kernel. Negative values mean that the anchor is at the 3920 * kernel center. 3921 * @param iterations Number of times erosion and dilation are applied. 3922 * @param borderType Pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported. 3923 * meaning. 3924 * SEE: dilate, erode, getStructuringElement 3925 * <b>Note:</b> The number of iterations is the number of times erosion or dilatation operation will be applied. 3926 * For instance, an opening operation (#MORPH_OPEN) with two iterations is equivalent to apply 3927 * successively: erode -> erode -> dilate -> dilate (and not erode -> dilate -> erode -> dilate). 3928 */ 3929 public static void morphologyEx(Mat src, Mat dst, int op, Mat kernel, Point anchor, int iterations, int borderType) { 3930 morphologyEx_1(src.nativeObj, dst.nativeObj, op, kernel.nativeObj, anchor.x, anchor.y, iterations, borderType); 3931 } 3932 3933 /** 3934 * Performs advanced morphological transformations. 3935 * 3936 * The function cv::morphologyEx can perform advanced morphological transformations using an erosion and dilation as 3937 * basic operations. 3938 * 3939 * Any of the operations can be done in-place. In case of multi-channel images, each channel is 3940 * processed independently. 3941 * 3942 * @param src Source image. The number of channels can be arbitrary. The depth should be one of 3943 * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F. 3944 * @param dst Destination image of the same size and type as source image. 3945 * @param op Type of a morphological operation, see #MorphTypes 3946 * @param kernel Structuring element. It can be created using #getStructuringElement. 3947 * @param anchor Anchor position with the kernel. Negative values mean that the anchor is at the 3948 * kernel center. 3949 * @param iterations Number of times erosion and dilation are applied. 3950 * meaning. 3951 * SEE: dilate, erode, getStructuringElement 3952 * <b>Note:</b> The number of iterations is the number of times erosion or dilatation operation will be applied. 3953 * For instance, an opening operation (#MORPH_OPEN) with two iterations is equivalent to apply 3954 * successively: erode -> erode -> dilate -> dilate (and not erode -> dilate -> erode -> dilate). 3955 */ 3956 public static void morphologyEx(Mat src, Mat dst, int op, Mat kernel, Point anchor, int iterations) { 3957 morphologyEx_2(src.nativeObj, dst.nativeObj, op, kernel.nativeObj, anchor.x, anchor.y, iterations); 3958 } 3959 3960 /** 3961 * Performs advanced morphological transformations. 3962 * 3963 * The function cv::morphologyEx can perform advanced morphological transformations using an erosion and dilation as 3964 * basic operations. 3965 * 3966 * Any of the operations can be done in-place. In case of multi-channel images, each channel is 3967 * processed independently. 3968 * 3969 * @param src Source image. The number of channels can be arbitrary. The depth should be one of 3970 * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F. 3971 * @param dst Destination image of the same size and type as source image. 3972 * @param op Type of a morphological operation, see #MorphTypes 3973 * @param kernel Structuring element. It can be created using #getStructuringElement. 3974 * @param anchor Anchor position with the kernel. Negative values mean that the anchor is at the 3975 * kernel center. 3976 * meaning. 3977 * SEE: dilate, erode, getStructuringElement 3978 * <b>Note:</b> The number of iterations is the number of times erosion or dilatation operation will be applied. 3979 * For instance, an opening operation (#MORPH_OPEN) with two iterations is equivalent to apply 3980 * successively: erode -> erode -> dilate -> dilate (and not erode -> dilate -> erode -> dilate). 3981 */ 3982 public static void morphologyEx(Mat src, Mat dst, int op, Mat kernel, Point anchor) { 3983 morphologyEx_3(src.nativeObj, dst.nativeObj, op, kernel.nativeObj, anchor.x, anchor.y); 3984 } 3985 3986 /** 3987 * Performs advanced morphological transformations. 3988 * 3989 * The function cv::morphologyEx can perform advanced morphological transformations using an erosion and dilation as 3990 * basic operations. 3991 * 3992 * Any of the operations can be done in-place. In case of multi-channel images, each channel is 3993 * processed independently. 3994 * 3995 * @param src Source image. The number of channels can be arbitrary. The depth should be one of 3996 * CV_8U, CV_16U, CV_16S, CV_32F or CV_64F. 3997 * @param dst Destination image of the same size and type as source image. 3998 * @param op Type of a morphological operation, see #MorphTypes 3999 * @param kernel Structuring element. It can be created using #getStructuringElement. 4000 * kernel center. 4001 * meaning. 4002 * SEE: dilate, erode, getStructuringElement 4003 * <b>Note:</b> The number of iterations is the number of times erosion or dilatation operation will be applied. 4004 * For instance, an opening operation (#MORPH_OPEN) with two iterations is equivalent to apply 4005 * successively: erode -> erode -> dilate -> dilate (and not erode -> dilate -> erode -> dilate). 4006 */ 4007 public static void morphologyEx(Mat src, Mat dst, int op, Mat kernel) { 4008 morphologyEx_4(src.nativeObj, dst.nativeObj, op, kernel.nativeObj); 4009 } 4010 4011 4012 // 4013 // C++: void cv::resize(Mat src, Mat& dst, Size dsize, double fx = 0, double fy = 0, int interpolation = INTER_LINEAR) 4014 // 4015 4016 /** 4017 * Resizes an image. 4018 * 4019 * The function resize resizes the image src down to or up to the specified size. Note that the 4020 * initial dst type or size are not taken into account. Instead, the size and type are derived from 4021 * the {@code src},{@code dsize},{@code fx}, and {@code fy}. If you want to resize src so that it fits the pre-created dst, 4022 * you may call the function as follows: 4023 * <code> 4024 * // explicitly specify dsize=dst.size(); fx and fy will be computed from that. 4025 * resize(src, dst, dst.size(), 0, 0, interpolation); 4026 * </code> 4027 * If you want to decimate the image by factor of 2 in each direction, you can call the function this 4028 * way: 4029 * <code> 4030 * // specify fx and fy and let the function compute the destination image size. 4031 * resize(src, dst, Size(), 0.5, 0.5, interpolation); 4032 * </code> 4033 * To shrink an image, it will generally look best with #INTER_AREA interpolation, whereas to 4034 * enlarge an image, it will generally look best with #INTER_CUBIC (slow) or #INTER_LINEAR 4035 * (faster but still looks OK). 4036 * 4037 * @param src input image. 4038 * @param dst output image; it has the size dsize (when it is non-zero) or the size computed from 4039 * src.size(), fx, and fy; the type of dst is the same as of src. 4040 * @param dsize output image size; if it equals zero ({@code None} in Python), it is computed as: 4041 * \(\texttt{dsize = Size(round(fx*src.cols), round(fy*src.rows))}\) 4042 * Either dsize or both fx and fy must be non-zero. 4043 * @param fx scale factor along the horizontal axis; when it equals 0, it is computed as 4044 * \(\texttt{(double)dsize.width/src.cols}\) 4045 * @param fy scale factor along the vertical axis; when it equals 0, it is computed as 4046 * \(\texttt{(double)dsize.height/src.rows}\) 4047 * @param interpolation interpolation method, see #InterpolationFlags 4048 * 4049 * SEE: warpAffine, warpPerspective, remap 4050 */ 4051 public static void resize(Mat src, Mat dst, Size dsize, double fx, double fy, int interpolation) { 4052 resize_0(src.nativeObj, dst.nativeObj, dsize.width, dsize.height, fx, fy, interpolation); 4053 } 4054 4055 /** 4056 * Resizes an image. 4057 * 4058 * The function resize resizes the image src down to or up to the specified size. Note that the 4059 * initial dst type or size are not taken into account. Instead, the size and type are derived from 4060 * the {@code src},{@code dsize},{@code fx}, and {@code fy}. If you want to resize src so that it fits the pre-created dst, 4061 * you may call the function as follows: 4062 * <code> 4063 * // explicitly specify dsize=dst.size(); fx and fy will be computed from that. 4064 * resize(src, dst, dst.size(), 0, 0, interpolation); 4065 * </code> 4066 * If you want to decimate the image by factor of 2 in each direction, you can call the function this 4067 * way: 4068 * <code> 4069 * // specify fx and fy and let the function compute the destination image size. 4070 * resize(src, dst, Size(), 0.5, 0.5, interpolation); 4071 * </code> 4072 * To shrink an image, it will generally look best with #INTER_AREA interpolation, whereas to 4073 * enlarge an image, it will generally look best with #INTER_CUBIC (slow) or #INTER_LINEAR 4074 * (faster but still looks OK). 4075 * 4076 * @param src input image. 4077 * @param dst output image; it has the size dsize (when it is non-zero) or the size computed from 4078 * src.size(), fx, and fy; the type of dst is the same as of src. 4079 * @param dsize output image size; if it equals zero ({@code None} in Python), it is computed as: 4080 * \(\texttt{dsize = Size(round(fx*src.cols), round(fy*src.rows))}\) 4081 * Either dsize or both fx and fy must be non-zero. 4082 * @param fx scale factor along the horizontal axis; when it equals 0, it is computed as 4083 * \(\texttt{(double)dsize.width/src.cols}\) 4084 * @param fy scale factor along the vertical axis; when it equals 0, it is computed as 4085 * \(\texttt{(double)dsize.height/src.rows}\) 4086 * 4087 * SEE: warpAffine, warpPerspective, remap 4088 */ 4089 public static void resize(Mat src, Mat dst, Size dsize, double fx, double fy) { 4090 resize_1(src.nativeObj, dst.nativeObj, dsize.width, dsize.height, fx, fy); 4091 } 4092 4093 /** 4094 * Resizes an image. 4095 * 4096 * The function resize resizes the image src down to or up to the specified size. Note that the 4097 * initial dst type or size are not taken into account. Instead, the size and type are derived from 4098 * the {@code src},{@code dsize},{@code fx}, and {@code fy}. If you want to resize src so that it fits the pre-created dst, 4099 * you may call the function as follows: 4100 * <code> 4101 * // explicitly specify dsize=dst.size(); fx and fy will be computed from that. 4102 * resize(src, dst, dst.size(), 0, 0, interpolation); 4103 * </code> 4104 * If you want to decimate the image by factor of 2 in each direction, you can call the function this 4105 * way: 4106 * <code> 4107 * // specify fx and fy and let the function compute the destination image size. 4108 * resize(src, dst, Size(), 0.5, 0.5, interpolation); 4109 * </code> 4110 * To shrink an image, it will generally look best with #INTER_AREA interpolation, whereas to 4111 * enlarge an image, it will generally look best with #INTER_CUBIC (slow) or #INTER_LINEAR 4112 * (faster but still looks OK). 4113 * 4114 * @param src input image. 4115 * @param dst output image; it has the size dsize (when it is non-zero) or the size computed from 4116 * src.size(), fx, and fy; the type of dst is the same as of src. 4117 * @param dsize output image size; if it equals zero ({@code None} in Python), it is computed as: 4118 * \(\texttt{dsize = Size(round(fx*src.cols), round(fy*src.rows))}\) 4119 * Either dsize or both fx and fy must be non-zero. 4120 * @param fx scale factor along the horizontal axis; when it equals 0, it is computed as 4121 * \(\texttt{(double)dsize.width/src.cols}\) 4122 * \(\texttt{(double)dsize.height/src.rows}\) 4123 * 4124 * SEE: warpAffine, warpPerspective, remap 4125 */ 4126 public static void resize(Mat src, Mat dst, Size dsize, double fx) { 4127 resize_2(src.nativeObj, dst.nativeObj, dsize.width, dsize.height, fx); 4128 } 4129 4130 /** 4131 * Resizes an image. 4132 * 4133 * The function resize resizes the image src down to or up to the specified size. Note that the 4134 * initial dst type or size are not taken into account. Instead, the size and type are derived from 4135 * the {@code src},{@code dsize},{@code fx}, and {@code fy}. If you want to resize src so that it fits the pre-created dst, 4136 * you may call the function as follows: 4137 * <code> 4138 * // explicitly specify dsize=dst.size(); fx and fy will be computed from that. 4139 * resize(src, dst, dst.size(), 0, 0, interpolation); 4140 * </code> 4141 * If you want to decimate the image by factor of 2 in each direction, you can call the function this 4142 * way: 4143 * <code> 4144 * // specify fx and fy and let the function compute the destination image size. 4145 * resize(src, dst, Size(), 0.5, 0.5, interpolation); 4146 * </code> 4147 * To shrink an image, it will generally look best with #INTER_AREA interpolation, whereas to 4148 * enlarge an image, it will generally look best with #INTER_CUBIC (slow) or #INTER_LINEAR 4149 * (faster but still looks OK). 4150 * 4151 * @param src input image. 4152 * @param dst output image; it has the size dsize (when it is non-zero) or the size computed from 4153 * src.size(), fx, and fy; the type of dst is the same as of src. 4154 * @param dsize output image size; if it equals zero ({@code None} in Python), it is computed as: 4155 * \(\texttt{dsize = Size(round(fx*src.cols), round(fy*src.rows))}\) 4156 * Either dsize or both fx and fy must be non-zero. 4157 * \(\texttt{(double)dsize.width/src.cols}\) 4158 * \(\texttt{(double)dsize.height/src.rows}\) 4159 * 4160 * SEE: warpAffine, warpPerspective, remap 4161 */ 4162 public static void resize(Mat src, Mat dst, Size dsize) { 4163 resize_3(src.nativeObj, dst.nativeObj, dsize.width, dsize.height); 4164 } 4165 4166 4167 // 4168 // C++: void cv::warpAffine(Mat src, Mat& dst, Mat M, Size dsize, int flags = INTER_LINEAR, int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar()) 4169 // 4170 4171 /** 4172 * Applies an affine transformation to an image. 4173 * 4174 * The function warpAffine transforms the source image using the specified matrix: 4175 * 4176 * \(\texttt{dst} (x,y) = \texttt{src} ( \texttt{M} _{11} x + \texttt{M} _{12} y + \texttt{M} _{13}, \texttt{M} _{21} x + \texttt{M} _{22} y + \texttt{M} _{23})\) 4177 * 4178 * when the flag #WARP_INVERSE_MAP is set. Otherwise, the transformation is first inverted 4179 * with #invertAffineTransform and then put in the formula above instead of M. The function cannot 4180 * operate in-place. 4181 * 4182 * @param src input image. 4183 * @param dst output image that has the size dsize and the same type as src . 4184 * @param M \(2\times 3\) transformation matrix. 4185 * @param dsize size of the output image. 4186 * @param flags combination of interpolation methods (see #InterpolationFlags) and the optional 4187 * flag #WARP_INVERSE_MAP that means that M is the inverse transformation ( 4188 * \(\texttt{dst}\rightarrow\texttt{src}\) ). 4189 * @param borderMode pixel extrapolation method (see #BorderTypes); when 4190 * borderMode=#BORDER_TRANSPARENT, it means that the pixels in the destination image corresponding to 4191 * the "outliers" in the source image are not modified by the function. 4192 * @param borderValue value used in case of a constant border; by default, it is 0. 4193 * 4194 * SEE: warpPerspective, resize, remap, getRectSubPix, transform 4195 */ 4196 public static void warpAffine(Mat src, Mat dst, Mat M, Size dsize, int flags, int borderMode, Scalar borderValue) { 4197 warpAffine_0(src.nativeObj, dst.nativeObj, M.nativeObj, dsize.width, dsize.height, flags, borderMode, borderValue.val[0], borderValue.val[1], borderValue.val[2], borderValue.val[3]); 4198 } 4199 4200 /** 4201 * Applies an affine transformation to an image. 4202 * 4203 * The function warpAffine transforms the source image using the specified matrix: 4204 * 4205 * \(\texttt{dst} (x,y) = \texttt{src} ( \texttt{M} _{11} x + \texttt{M} _{12} y + \texttt{M} _{13}, \texttt{M} _{21} x + \texttt{M} _{22} y + \texttt{M} _{23})\) 4206 * 4207 * when the flag #WARP_INVERSE_MAP is set. Otherwise, the transformation is first inverted 4208 * with #invertAffineTransform and then put in the formula above instead of M. The function cannot 4209 * operate in-place. 4210 * 4211 * @param src input image. 4212 * @param dst output image that has the size dsize and the same type as src . 4213 * @param M \(2\times 3\) transformation matrix. 4214 * @param dsize size of the output image. 4215 * @param flags combination of interpolation methods (see #InterpolationFlags) and the optional 4216 * flag #WARP_INVERSE_MAP that means that M is the inverse transformation ( 4217 * \(\texttt{dst}\rightarrow\texttt{src}\) ). 4218 * @param borderMode pixel extrapolation method (see #BorderTypes); when 4219 * borderMode=#BORDER_TRANSPARENT, it means that the pixels in the destination image corresponding to 4220 * the "outliers" in the source image are not modified by the function. 4221 * 4222 * SEE: warpPerspective, resize, remap, getRectSubPix, transform 4223 */ 4224 public static void warpAffine(Mat src, Mat dst, Mat M, Size dsize, int flags, int borderMode) { 4225 warpAffine_1(src.nativeObj, dst.nativeObj, M.nativeObj, dsize.width, dsize.height, flags, borderMode); 4226 } 4227 4228 /** 4229 * Applies an affine transformation to an image. 4230 * 4231 * The function warpAffine transforms the source image using the specified matrix: 4232 * 4233 * \(\texttt{dst} (x,y) = \texttt{src} ( \texttt{M} _{11} x + \texttt{M} _{12} y + \texttt{M} _{13}, \texttt{M} _{21} x + \texttt{M} _{22} y + \texttt{M} _{23})\) 4234 * 4235 * when the flag #WARP_INVERSE_MAP is set. Otherwise, the transformation is first inverted 4236 * with #invertAffineTransform and then put in the formula above instead of M. The function cannot 4237 * operate in-place. 4238 * 4239 * @param src input image. 4240 * @param dst output image that has the size dsize and the same type as src . 4241 * @param M \(2\times 3\) transformation matrix. 4242 * @param dsize size of the output image. 4243 * @param flags combination of interpolation methods (see #InterpolationFlags) and the optional 4244 * flag #WARP_INVERSE_MAP that means that M is the inverse transformation ( 4245 * \(\texttt{dst}\rightarrow\texttt{src}\) ). 4246 * borderMode=#BORDER_TRANSPARENT, it means that the pixels in the destination image corresponding to 4247 * the "outliers" in the source image are not modified by the function. 4248 * 4249 * SEE: warpPerspective, resize, remap, getRectSubPix, transform 4250 */ 4251 public static void warpAffine(Mat src, Mat dst, Mat M, Size dsize, int flags) { 4252 warpAffine_2(src.nativeObj, dst.nativeObj, M.nativeObj, dsize.width, dsize.height, flags); 4253 } 4254 4255 /** 4256 * Applies an affine transformation to an image. 4257 * 4258 * The function warpAffine transforms the source image using the specified matrix: 4259 * 4260 * \(\texttt{dst} (x,y) = \texttt{src} ( \texttt{M} _{11} x + \texttt{M} _{12} y + \texttt{M} _{13}, \texttt{M} _{21} x + \texttt{M} _{22} y + \texttt{M} _{23})\) 4261 * 4262 * when the flag #WARP_INVERSE_MAP is set. Otherwise, the transformation is first inverted 4263 * with #invertAffineTransform and then put in the formula above instead of M. The function cannot 4264 * operate in-place. 4265 * 4266 * @param src input image. 4267 * @param dst output image that has the size dsize and the same type as src . 4268 * @param M \(2\times 3\) transformation matrix. 4269 * @param dsize size of the output image. 4270 * flag #WARP_INVERSE_MAP that means that M is the inverse transformation ( 4271 * \(\texttt{dst}\rightarrow\texttt{src}\) ). 4272 * borderMode=#BORDER_TRANSPARENT, it means that the pixels in the destination image corresponding to 4273 * the "outliers" in the source image are not modified by the function. 4274 * 4275 * SEE: warpPerspective, resize, remap, getRectSubPix, transform 4276 */ 4277 public static void warpAffine(Mat src, Mat dst, Mat M, Size dsize) { 4278 warpAffine_3(src.nativeObj, dst.nativeObj, M.nativeObj, dsize.width, dsize.height); 4279 } 4280 4281 4282 // 4283 // C++: void cv::warpPerspective(Mat src, Mat& dst, Mat M, Size dsize, int flags = INTER_LINEAR, int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar()) 4284 // 4285 4286 /** 4287 * Applies a perspective transformation to an image. 4288 * 4289 * The function warpPerspective transforms the source image using the specified matrix: 4290 * 4291 * \(\texttt{dst} (x,y) = \texttt{src} \left ( \frac{M_{11} x + M_{12} y + M_{13}}{M_{31} x + M_{32} y + M_{33}} , 4292 * \frac{M_{21} x + M_{22} y + M_{23}}{M_{31} x + M_{32} y + M_{33}} \right )\) 4293 * 4294 * when the flag #WARP_INVERSE_MAP is set. Otherwise, the transformation is first inverted with invert 4295 * and then put in the formula above instead of M. The function cannot operate in-place. 4296 * 4297 * @param src input image. 4298 * @param dst output image that has the size dsize and the same type as src . 4299 * @param M \(3\times 3\) transformation matrix. 4300 * @param dsize size of the output image. 4301 * @param flags combination of interpolation methods (#INTER_LINEAR or #INTER_NEAREST) and the 4302 * optional flag #WARP_INVERSE_MAP, that sets M as the inverse transformation ( 4303 * \(\texttt{dst}\rightarrow\texttt{src}\) ). 4304 * @param borderMode pixel extrapolation method (#BORDER_CONSTANT or #BORDER_REPLICATE). 4305 * @param borderValue value used in case of a constant border; by default, it equals 0. 4306 * 4307 * SEE: warpAffine, resize, remap, getRectSubPix, perspectiveTransform 4308 */ 4309 public static void warpPerspective(Mat src, Mat dst, Mat M, Size dsize, int flags, int borderMode, Scalar borderValue) { 4310 warpPerspective_0(src.nativeObj, dst.nativeObj, M.nativeObj, dsize.width, dsize.height, flags, borderMode, borderValue.val[0], borderValue.val[1], borderValue.val[2], borderValue.val[3]); 4311 } 4312 4313 /** 4314 * Applies a perspective transformation to an image. 4315 * 4316 * The function warpPerspective transforms the source image using the specified matrix: 4317 * 4318 * \(\texttt{dst} (x,y) = \texttt{src} \left ( \frac{M_{11} x + M_{12} y + M_{13}}{M_{31} x + M_{32} y + M_{33}} , 4319 * \frac{M_{21} x + M_{22} y + M_{23}}{M_{31} x + M_{32} y + M_{33}} \right )\) 4320 * 4321 * when the flag #WARP_INVERSE_MAP is set. Otherwise, the transformation is first inverted with invert 4322 * and then put in the formula above instead of M. The function cannot operate in-place. 4323 * 4324 * @param src input image. 4325 * @param dst output image that has the size dsize and the same type as src . 4326 * @param M \(3\times 3\) transformation matrix. 4327 * @param dsize size of the output image. 4328 * @param flags combination of interpolation methods (#INTER_LINEAR or #INTER_NEAREST) and the 4329 * optional flag #WARP_INVERSE_MAP, that sets M as the inverse transformation ( 4330 * \(\texttt{dst}\rightarrow\texttt{src}\) ). 4331 * @param borderMode pixel extrapolation method (#BORDER_CONSTANT or #BORDER_REPLICATE). 4332 * 4333 * SEE: warpAffine, resize, remap, getRectSubPix, perspectiveTransform 4334 */ 4335 public static void warpPerspective(Mat src, Mat dst, Mat M, Size dsize, int flags, int borderMode) { 4336 warpPerspective_1(src.nativeObj, dst.nativeObj, M.nativeObj, dsize.width, dsize.height, flags, borderMode); 4337 } 4338 4339 /** 4340 * Applies a perspective transformation to an image. 4341 * 4342 * The function warpPerspective transforms the source image using the specified matrix: 4343 * 4344 * \(\texttt{dst} (x,y) = \texttt{src} \left ( \frac{M_{11} x + M_{12} y + M_{13}}{M_{31} x + M_{32} y + M_{33}} , 4345 * \frac{M_{21} x + M_{22} y + M_{23}}{M_{31} x + M_{32} y + M_{33}} \right )\) 4346 * 4347 * when the flag #WARP_INVERSE_MAP is set. Otherwise, the transformation is first inverted with invert 4348 * and then put in the formula above instead of M. The function cannot operate in-place. 4349 * 4350 * @param src input image. 4351 * @param dst output image that has the size dsize and the same type as src . 4352 * @param M \(3\times 3\) transformation matrix. 4353 * @param dsize size of the output image. 4354 * @param flags combination of interpolation methods (#INTER_LINEAR or #INTER_NEAREST) and the 4355 * optional flag #WARP_INVERSE_MAP, that sets M as the inverse transformation ( 4356 * \(\texttt{dst}\rightarrow\texttt{src}\) ). 4357 * 4358 * SEE: warpAffine, resize, remap, getRectSubPix, perspectiveTransform 4359 */ 4360 public static void warpPerspective(Mat src, Mat dst, Mat M, Size dsize, int flags) { 4361 warpPerspective_2(src.nativeObj, dst.nativeObj, M.nativeObj, dsize.width, dsize.height, flags); 4362 } 4363 4364 /** 4365 * Applies a perspective transformation to an image. 4366 * 4367 * The function warpPerspective transforms the source image using the specified matrix: 4368 * 4369 * \(\texttt{dst} (x,y) = \texttt{src} \left ( \frac{M_{11} x + M_{12} y + M_{13}}{M_{31} x + M_{32} y + M_{33}} , 4370 * \frac{M_{21} x + M_{22} y + M_{23}}{M_{31} x + M_{32} y + M_{33}} \right )\) 4371 * 4372 * when the flag #WARP_INVERSE_MAP is set. Otherwise, the transformation is first inverted with invert 4373 * and then put in the formula above instead of M. The function cannot operate in-place. 4374 * 4375 * @param src input image. 4376 * @param dst output image that has the size dsize and the same type as src . 4377 * @param M \(3\times 3\) transformation matrix. 4378 * @param dsize size of the output image. 4379 * optional flag #WARP_INVERSE_MAP, that sets M as the inverse transformation ( 4380 * \(\texttt{dst}\rightarrow\texttt{src}\) ). 4381 * 4382 * SEE: warpAffine, resize, remap, getRectSubPix, perspectiveTransform 4383 */ 4384 public static void warpPerspective(Mat src, Mat dst, Mat M, Size dsize) { 4385 warpPerspective_3(src.nativeObj, dst.nativeObj, M.nativeObj, dsize.width, dsize.height); 4386 } 4387 4388 4389 // 4390 // C++: void cv::remap(Mat src, Mat& dst, Mat map1, Mat map2, int interpolation, int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar()) 4391 // 4392 4393 /** 4394 * Applies a generic geometrical transformation to an image. 4395 * 4396 * The function remap transforms the source image using the specified map: 4397 * 4398 * \(\texttt{dst} (x,y) = \texttt{src} (map_x(x,y),map_y(x,y))\) 4399 * 4400 * where values of pixels with non-integer coordinates are computed using one of available 4401 * interpolation methods. \(map_x\) and \(map_y\) can be encoded as separate floating-point maps 4402 * in \(map_1\) and \(map_2\) respectively, or interleaved floating-point maps of \((x,y)\) in 4403 * \(map_1\), or fixed-point maps created by using #convertMaps. The reason you might want to 4404 * convert from floating to fixed-point representations of a map is that they can yield much faster 4405 * (\~2x) remapping operations. In the converted case, \(map_1\) contains pairs (cvFloor(x), 4406 * cvFloor(y)) and \(map_2\) contains indices in a table of interpolation coefficients. 4407 * 4408 * This function cannot operate in-place. 4409 * 4410 * @param src Source image. 4411 * @param dst Destination image. It has the same size as map1 and the same type as src . 4412 * @param map1 The first map of either (x,y) points or just x values having the type CV_16SC2 , 4413 * CV_32FC1, or CV_32FC2. See #convertMaps for details on converting a floating point 4414 * representation to fixed-point for speed. 4415 * @param map2 The second map of y values having the type CV_16UC1, CV_32FC1, or none (empty map 4416 * if map1 is (x,y) points), respectively. 4417 * @param interpolation Interpolation method (see #InterpolationFlags). The methods #INTER_AREA 4418 * and #INTER_LINEAR_EXACT are not supported by this function. 4419 * @param borderMode Pixel extrapolation method (see #BorderTypes). When 4420 * borderMode=#BORDER_TRANSPARENT, it means that the pixels in the destination image that 4421 * corresponds to the "outliers" in the source image are not modified by the function. 4422 * @param borderValue Value used in case of a constant border. By default, it is 0. 4423 * <b>Note:</b> 4424 * Due to current implementation limitations the size of an input and output images should be less than 32767x32767. 4425 */ 4426 public static void remap(Mat src, Mat dst, Mat map1, Mat map2, int interpolation, int borderMode, Scalar borderValue) { 4427 remap_0(src.nativeObj, dst.nativeObj, map1.nativeObj, map2.nativeObj, interpolation, borderMode, borderValue.val[0], borderValue.val[1], borderValue.val[2], borderValue.val[3]); 4428 } 4429 4430 /** 4431 * Applies a generic geometrical transformation to an image. 4432 * 4433 * The function remap transforms the source image using the specified map: 4434 * 4435 * \(\texttt{dst} (x,y) = \texttt{src} (map_x(x,y),map_y(x,y))\) 4436 * 4437 * where values of pixels with non-integer coordinates are computed using one of available 4438 * interpolation methods. \(map_x\) and \(map_y\) can be encoded as separate floating-point maps 4439 * in \(map_1\) and \(map_2\) respectively, or interleaved floating-point maps of \((x,y)\) in 4440 * \(map_1\), or fixed-point maps created by using #convertMaps. The reason you might want to 4441 * convert from floating to fixed-point representations of a map is that they can yield much faster 4442 * (\~2x) remapping operations. In the converted case, \(map_1\) contains pairs (cvFloor(x), 4443 * cvFloor(y)) and \(map_2\) contains indices in a table of interpolation coefficients. 4444 * 4445 * This function cannot operate in-place. 4446 * 4447 * @param src Source image. 4448 * @param dst Destination image. It has the same size as map1 and the same type as src . 4449 * @param map1 The first map of either (x,y) points or just x values having the type CV_16SC2 , 4450 * CV_32FC1, or CV_32FC2. See #convertMaps for details on converting a floating point 4451 * representation to fixed-point for speed. 4452 * @param map2 The second map of y values having the type CV_16UC1, CV_32FC1, or none (empty map 4453 * if map1 is (x,y) points), respectively. 4454 * @param interpolation Interpolation method (see #InterpolationFlags). The methods #INTER_AREA 4455 * and #INTER_LINEAR_EXACT are not supported by this function. 4456 * @param borderMode Pixel extrapolation method (see #BorderTypes). When 4457 * borderMode=#BORDER_TRANSPARENT, it means that the pixels in the destination image that 4458 * corresponds to the "outliers" in the source image are not modified by the function. 4459 * <b>Note:</b> 4460 * Due to current implementation limitations the size of an input and output images should be less than 32767x32767. 4461 */ 4462 public static void remap(Mat src, Mat dst, Mat map1, Mat map2, int interpolation, int borderMode) { 4463 remap_1(src.nativeObj, dst.nativeObj, map1.nativeObj, map2.nativeObj, interpolation, borderMode); 4464 } 4465 4466 /** 4467 * Applies a generic geometrical transformation to an image. 4468 * 4469 * The function remap transforms the source image using the specified map: 4470 * 4471 * \(\texttt{dst} (x,y) = \texttt{src} (map_x(x,y),map_y(x,y))\) 4472 * 4473 * where values of pixels with non-integer coordinates are computed using one of available 4474 * interpolation methods. \(map_x\) and \(map_y\) can be encoded as separate floating-point maps 4475 * in \(map_1\) and \(map_2\) respectively, or interleaved floating-point maps of \((x,y)\) in 4476 * \(map_1\), or fixed-point maps created by using #convertMaps. The reason you might want to 4477 * convert from floating to fixed-point representations of a map is that they can yield much faster 4478 * (\~2x) remapping operations. In the converted case, \(map_1\) contains pairs (cvFloor(x), 4479 * cvFloor(y)) and \(map_2\) contains indices in a table of interpolation coefficients. 4480 * 4481 * This function cannot operate in-place. 4482 * 4483 * @param src Source image. 4484 * @param dst Destination image. It has the same size as map1 and the same type as src . 4485 * @param map1 The first map of either (x,y) points or just x values having the type CV_16SC2 , 4486 * CV_32FC1, or CV_32FC2. See #convertMaps for details on converting a floating point 4487 * representation to fixed-point for speed. 4488 * @param map2 The second map of y values having the type CV_16UC1, CV_32FC1, or none (empty map 4489 * if map1 is (x,y) points), respectively. 4490 * @param interpolation Interpolation method (see #InterpolationFlags). The methods #INTER_AREA 4491 * and #INTER_LINEAR_EXACT are not supported by this function. 4492 * borderMode=#BORDER_TRANSPARENT, it means that the pixels in the destination image that 4493 * corresponds to the "outliers" in the source image are not modified by the function. 4494 * <b>Note:</b> 4495 * Due to current implementation limitations the size of an input and output images should be less than 32767x32767. 4496 */ 4497 public static void remap(Mat src, Mat dst, Mat map1, Mat map2, int interpolation) { 4498 remap_2(src.nativeObj, dst.nativeObj, map1.nativeObj, map2.nativeObj, interpolation); 4499 } 4500 4501 4502 // 4503 // C++: void cv::convertMaps(Mat map1, Mat map2, Mat& dstmap1, Mat& dstmap2, int dstmap1type, bool nninterpolation = false) 4504 // 4505 4506 /** 4507 * Converts image transformation maps from one representation to another. 4508 * 4509 * The function converts a pair of maps for remap from one representation to another. The following 4510 * options ( (map1.type(), map2.type()) \(\rightarrow\) (dstmap1.type(), dstmap2.type()) ) are 4511 * supported: 4512 * 4513 * <ul> 4514 * <li> 4515 * \(\texttt{(CV_32FC1, CV_32FC1)} \rightarrow \texttt{(CV_16SC2, CV_16UC1)}\). This is the 4516 * most frequently used conversion operation, in which the original floating-point maps (see #remap) 4517 * are converted to a more compact and much faster fixed-point representation. The first output array 4518 * contains the rounded coordinates and the second array (created only when nninterpolation=false ) 4519 * contains indices in the interpolation tables. 4520 * </li> 4521 * </ul> 4522 * 4523 * <ul> 4524 * <li> 4525 * \(\texttt{(CV_32FC2)} \rightarrow \texttt{(CV_16SC2, CV_16UC1)}\). The same as above but 4526 * the original maps are stored in one 2-channel matrix. 4527 * </li> 4528 * </ul> 4529 * 4530 * <ul> 4531 * <li> 4532 * Reverse conversion. Obviously, the reconstructed floating-point maps will not be exactly the same 4533 * as the originals. 4534 * </li> 4535 * </ul> 4536 * 4537 * @param map1 The first input map of type CV_16SC2, CV_32FC1, or CV_32FC2 . 4538 * @param map2 The second input map of type CV_16UC1, CV_32FC1, or none (empty matrix), 4539 * respectively. 4540 * @param dstmap1 The first output map that has the type dstmap1type and the same size as src . 4541 * @param dstmap2 The second output map. 4542 * @param dstmap1type Type of the first output map that should be CV_16SC2, CV_32FC1, or 4543 * CV_32FC2 . 4544 * @param nninterpolation Flag indicating whether the fixed-point maps are used for the 4545 * nearest-neighbor or for a more complex interpolation. 4546 * 4547 * SEE: remap, undistort, initUndistortRectifyMap 4548 */ 4549 public static void convertMaps(Mat map1, Mat map2, Mat dstmap1, Mat dstmap2, int dstmap1type, boolean nninterpolation) { 4550 convertMaps_0(map1.nativeObj, map2.nativeObj, dstmap1.nativeObj, dstmap2.nativeObj, dstmap1type, nninterpolation); 4551 } 4552 4553 /** 4554 * Converts image transformation maps from one representation to another. 4555 * 4556 * The function converts a pair of maps for remap from one representation to another. The following 4557 * options ( (map1.type(), map2.type()) \(\rightarrow\) (dstmap1.type(), dstmap2.type()) ) are 4558 * supported: 4559 * 4560 * <ul> 4561 * <li> 4562 * \(\texttt{(CV_32FC1, CV_32FC1)} \rightarrow \texttt{(CV_16SC2, CV_16UC1)}\). This is the 4563 * most frequently used conversion operation, in which the original floating-point maps (see #remap) 4564 * are converted to a more compact and much faster fixed-point representation. The first output array 4565 * contains the rounded coordinates and the second array (created only when nninterpolation=false ) 4566 * contains indices in the interpolation tables. 4567 * </li> 4568 * </ul> 4569 * 4570 * <ul> 4571 * <li> 4572 * \(\texttt{(CV_32FC2)} \rightarrow \texttt{(CV_16SC2, CV_16UC1)}\). The same as above but 4573 * the original maps are stored in one 2-channel matrix. 4574 * </li> 4575 * </ul> 4576 * 4577 * <ul> 4578 * <li> 4579 * Reverse conversion. Obviously, the reconstructed floating-point maps will not be exactly the same 4580 * as the originals. 4581 * </li> 4582 * </ul> 4583 * 4584 * @param map1 The first input map of type CV_16SC2, CV_32FC1, or CV_32FC2 . 4585 * @param map2 The second input map of type CV_16UC1, CV_32FC1, or none (empty matrix), 4586 * respectively. 4587 * @param dstmap1 The first output map that has the type dstmap1type and the same size as src . 4588 * @param dstmap2 The second output map. 4589 * @param dstmap1type Type of the first output map that should be CV_16SC2, CV_32FC1, or 4590 * CV_32FC2 . 4591 * nearest-neighbor or for a more complex interpolation. 4592 * 4593 * SEE: remap, undistort, initUndistortRectifyMap 4594 */ 4595 public static void convertMaps(Mat map1, Mat map2, Mat dstmap1, Mat dstmap2, int dstmap1type) { 4596 convertMaps_1(map1.nativeObj, map2.nativeObj, dstmap1.nativeObj, dstmap2.nativeObj, dstmap1type); 4597 } 4598 4599 4600 // 4601 // C++: Mat cv::getRotationMatrix2D(Point2f center, double angle, double scale) 4602 // 4603 4604 /** 4605 * Calculates an affine matrix of 2D rotation. 4606 * 4607 * The function calculates the following matrix: 4608 * 4609 * \(\begin{bmatrix} \alpha & \beta & (1- \alpha ) \cdot \texttt{center.x} - \beta \cdot \texttt{center.y} \\ - \beta & \alpha & \beta \cdot \texttt{center.x} + (1- \alpha ) \cdot \texttt{center.y} \end{bmatrix}\) 4610 * 4611 * where 4612 * 4613 * \(\begin{array}{l} \alpha = \texttt{scale} \cdot \cos \texttt{angle} , \\ \beta = \texttt{scale} \cdot \sin \texttt{angle} \end{array}\) 4614 * 4615 * The transformation maps the rotation center to itself. If this is not the target, adjust the shift. 4616 * 4617 * @param center Center of the rotation in the source image. 4618 * @param angle Rotation angle in degrees. Positive values mean counter-clockwise rotation (the 4619 * coordinate origin is assumed to be the top-left corner). 4620 * @param scale Isotropic scale factor. 4621 * 4622 * SEE: getAffineTransform, warpAffine, transform 4623 * @return automatically generated 4624 */ 4625 public static Mat getRotationMatrix2D(Point center, double angle, double scale) { 4626 return new Mat(getRotationMatrix2D_0(center.x, center.y, angle, scale)); 4627 } 4628 4629 4630 // 4631 // C++: void cv::invertAffineTransform(Mat M, Mat& iM) 4632 // 4633 4634 /** 4635 * Inverts an affine transformation. 4636 * 4637 * The function computes an inverse affine transformation represented by \(2 \times 3\) matrix M: 4638 * 4639 * \(\begin{bmatrix} a_{11} & a_{12} & b_1 \\ a_{21} & a_{22} & b_2 \end{bmatrix}\) 4640 * 4641 * The result is also a \(2 \times 3\) matrix of the same type as M. 4642 * 4643 * @param M Original affine transformation. 4644 * @param iM Output reverse affine transformation. 4645 */ 4646 public static void invertAffineTransform(Mat M, Mat iM) { 4647 invertAffineTransform_0(M.nativeObj, iM.nativeObj); 4648 } 4649 4650 4651 // 4652 // C++: Mat cv::getPerspectiveTransform(Mat src, Mat dst, int solveMethod = DECOMP_LU) 4653 // 4654 4655 /** 4656 * Calculates a perspective transform from four pairs of the corresponding points. 4657 * 4658 * The function calculates the \(3 \times 3\) matrix of a perspective transform so that: 4659 * 4660 * \(\begin{bmatrix} t_i x'_i \\ t_i y'_i \\ t_i \end{bmatrix} = \texttt{map_matrix} \cdot \begin{bmatrix} x_i \\ y_i \\ 1 \end{bmatrix}\) 4661 * 4662 * where 4663 * 4664 * \(dst(i)=(x'_i,y'_i), src(i)=(x_i, y_i), i=0,1,2,3\) 4665 * 4666 * @param src Coordinates of quadrangle vertices in the source image. 4667 * @param dst Coordinates of the corresponding quadrangle vertices in the destination image. 4668 * @param solveMethod method passed to cv::solve (#DecompTypes) 4669 * 4670 * SEE: findHomography, warpPerspective, perspectiveTransform 4671 * @return automatically generated 4672 */ 4673 public static Mat getPerspectiveTransform(Mat src, Mat dst, int solveMethod) { 4674 return new Mat(getPerspectiveTransform_0(src.nativeObj, dst.nativeObj, solveMethod)); 4675 } 4676 4677 /** 4678 * Calculates a perspective transform from four pairs of the corresponding points. 4679 * 4680 * The function calculates the \(3 \times 3\) matrix of a perspective transform so that: 4681 * 4682 * \(\begin{bmatrix} t_i x'_i \\ t_i y'_i \\ t_i \end{bmatrix} = \texttt{map_matrix} \cdot \begin{bmatrix} x_i \\ y_i \\ 1 \end{bmatrix}\) 4683 * 4684 * where 4685 * 4686 * \(dst(i)=(x'_i,y'_i), src(i)=(x_i, y_i), i=0,1,2,3\) 4687 * 4688 * @param src Coordinates of quadrangle vertices in the source image. 4689 * @param dst Coordinates of the corresponding quadrangle vertices in the destination image. 4690 * 4691 * SEE: findHomography, warpPerspective, perspectiveTransform 4692 * @return automatically generated 4693 */ 4694 public static Mat getPerspectiveTransform(Mat src, Mat dst) { 4695 return new Mat(getPerspectiveTransform_1(src.nativeObj, dst.nativeObj)); 4696 } 4697 4698 4699 // 4700 // C++: Mat cv::getAffineTransform(vector_Point2f src, vector_Point2f dst) 4701 // 4702 4703 public static Mat getAffineTransform(MatOfPoint2f src, MatOfPoint2f dst) { 4704 Mat src_mat = src; 4705 Mat dst_mat = dst; 4706 return new Mat(getAffineTransform_0(src_mat.nativeObj, dst_mat.nativeObj)); 4707 } 4708 4709 4710 // 4711 // C++: void cv::getRectSubPix(Mat image, Size patchSize, Point2f center, Mat& patch, int patchType = -1) 4712 // 4713 4714 /** 4715 * Retrieves a pixel rectangle from an image with sub-pixel accuracy. 4716 * 4717 * The function getRectSubPix extracts pixels from src: 4718 * 4719 * \(patch(x, y) = src(x + \texttt{center.x} - ( \texttt{dst.cols} -1)*0.5, y + \texttt{center.y} - ( \texttt{dst.rows} -1)*0.5)\) 4720 * 4721 * where the values of the pixels at non-integer coordinates are retrieved using bilinear 4722 * interpolation. Every channel of multi-channel images is processed independently. Also 4723 * the image should be a single channel or three channel image. While the center of the 4724 * rectangle must be inside the image, parts of the rectangle may be outside. 4725 * 4726 * @param image Source image. 4727 * @param patchSize Size of the extracted patch. 4728 * @param center Floating point coordinates of the center of the extracted rectangle within the 4729 * source image. The center must be inside the image. 4730 * @param patch Extracted patch that has the size patchSize and the same number of channels as src . 4731 * @param patchType Depth of the extracted pixels. By default, they have the same depth as src . 4732 * 4733 * SEE: warpAffine, warpPerspective 4734 */ 4735 public static void getRectSubPix(Mat image, Size patchSize, Point center, Mat patch, int patchType) { 4736 getRectSubPix_0(image.nativeObj, patchSize.width, patchSize.height, center.x, center.y, patch.nativeObj, patchType); 4737 } 4738 4739 /** 4740 * Retrieves a pixel rectangle from an image with sub-pixel accuracy. 4741 * 4742 * The function getRectSubPix extracts pixels from src: 4743 * 4744 * \(patch(x, y) = src(x + \texttt{center.x} - ( \texttt{dst.cols} -1)*0.5, y + \texttt{center.y} - ( \texttt{dst.rows} -1)*0.5)\) 4745 * 4746 * where the values of the pixels at non-integer coordinates are retrieved using bilinear 4747 * interpolation. Every channel of multi-channel images is processed independently. Also 4748 * the image should be a single channel or three channel image. While the center of the 4749 * rectangle must be inside the image, parts of the rectangle may be outside. 4750 * 4751 * @param image Source image. 4752 * @param patchSize Size of the extracted patch. 4753 * @param center Floating point coordinates of the center of the extracted rectangle within the 4754 * source image. The center must be inside the image. 4755 * @param patch Extracted patch that has the size patchSize and the same number of channels as src . 4756 * 4757 * SEE: warpAffine, warpPerspective 4758 */ 4759 public static void getRectSubPix(Mat image, Size patchSize, Point center, Mat patch) { 4760 getRectSubPix_1(image.nativeObj, patchSize.width, patchSize.height, center.x, center.y, patch.nativeObj); 4761 } 4762 4763 4764 // 4765 // C++: void cv::logPolar(Mat src, Mat& dst, Point2f center, double M, int flags) 4766 // 4767 4768 /** 4769 * Remaps an image to semilog-polar coordinates space. 4770 * 4771 * @deprecated This function produces same result as cv::warpPolar(src, dst, src.size(), center, maxRadius, flags+WARP_POLAR_LOG); 4772 * 4773 * 4774 * Transform the source image using the following transformation (See REF: polar_remaps_reference_image "Polar remaps reference image d)"): 4775 * \(\begin{array}{l} 4776 * dst( \rho , \phi ) = src(x,y) \\ 4777 * dst.size() \leftarrow src.size() 4778 * \end{array}\) 4779 * 4780 * where 4781 * \(\begin{array}{l} 4782 * I = (dx,dy) = (x - center.x,y - center.y) \\ 4783 * \rho = M \cdot log_e(\texttt{magnitude} (I)) ,\\ 4784 * \phi = Kangle \cdot \texttt{angle} (I) \\ 4785 * \end{array}\) 4786 * 4787 * and 4788 * \(\begin{array}{l} 4789 * M = src.cols / log_e(maxRadius) \\ 4790 * Kangle = src.rows / 2\Pi \\ 4791 * \end{array}\) 4792 * 4793 * The function emulates the human "foveal" vision and can be used for fast scale and 4794 * rotation-invariant template matching, for object tracking and so forth. 4795 * @param src Source image 4796 * @param dst Destination image. It will have same size and type as src. 4797 * @param center The transformation center; where the output precision is maximal 4798 * @param M Magnitude scale parameter. It determines the radius of the bounding circle to transform too. 4799 * @param flags A combination of interpolation methods, see #InterpolationFlags 4800 * 4801 * <b>Note:</b> 4802 * <ul> 4803 * <li> 4804 * The function can not operate in-place. 4805 * </li> 4806 * <li> 4807 * To calculate magnitude and angle in degrees #cartToPolar is used internally thus angles are measured from 0 to 360 with accuracy about 0.3 degrees. 4808 * </li> 4809 * </ul> 4810 * 4811 * SEE: cv::linearPolar 4812 */ 4813 @Deprecated 4814 public static void logPolar(Mat src, Mat dst, Point center, double M, int flags) { 4815 logPolar_0(src.nativeObj, dst.nativeObj, center.x, center.y, M, flags); 4816 } 4817 4818 4819 // 4820 // C++: void cv::linearPolar(Mat src, Mat& dst, Point2f center, double maxRadius, int flags) 4821 // 4822 4823 /** 4824 * Remaps an image to polar coordinates space. 4825 * 4826 * @deprecated This function produces same result as cv::warpPolar(src, dst, src.size(), center, maxRadius, flags) 4827 * 4828 * 4829 * Transform the source image using the following transformation (See REF: polar_remaps_reference_image "Polar remaps reference image c)"): 4830 * \(\begin{array}{l} 4831 * dst( \rho , \phi ) = src(x,y) \\ 4832 * dst.size() \leftarrow src.size() 4833 * \end{array}\) 4834 * 4835 * where 4836 * \(\begin{array}{l} 4837 * I = (dx,dy) = (x - center.x,y - center.y) \\ 4838 * \rho = Kmag \cdot \texttt{magnitude} (I) ,\\ 4839 * \phi = angle \cdot \texttt{angle} (I) 4840 * \end{array}\) 4841 * 4842 * and 4843 * \(\begin{array}{l} 4844 * Kx = src.cols / maxRadius \\ 4845 * Ky = src.rows / 2\Pi 4846 * \end{array}\) 4847 * 4848 * 4849 * @param src Source image 4850 * @param dst Destination image. It will have same size and type as src. 4851 * @param center The transformation center; 4852 * @param maxRadius The radius of the bounding circle to transform. It determines the inverse magnitude scale parameter too. 4853 * @param flags A combination of interpolation methods, see #InterpolationFlags 4854 * 4855 * <b>Note:</b> 4856 * <ul> 4857 * <li> 4858 * The function can not operate in-place. 4859 * </li> 4860 * <li> 4861 * To calculate magnitude and angle in degrees #cartToPolar is used internally thus angles are measured from 0 to 360 with accuracy about 0.3 degrees. 4862 * </li> 4863 * </ul> 4864 * 4865 * SEE: cv::logPolar 4866 */ 4867 @Deprecated 4868 public static void linearPolar(Mat src, Mat dst, Point center, double maxRadius, int flags) { 4869 linearPolar_0(src.nativeObj, dst.nativeObj, center.x, center.y, maxRadius, flags); 4870 } 4871 4872 4873 // 4874 // C++: void cv::warpPolar(Mat src, Mat& dst, Size dsize, Point2f center, double maxRadius, int flags) 4875 // 4876 4877 /** 4878 * Remaps an image to polar or semilog-polar coordinates space 4879 * 4880 * polar_remaps_reference_image 4881 * ![Polar remaps reference](pics/polar_remap_doc.png) 4882 * 4883 * Transform the source image using the following transformation: 4884 * \( 4885 * dst(\rho , \phi ) = src(x,y) 4886 * \) 4887 * 4888 * where 4889 * \( 4890 * \begin{array}{l} 4891 * \vec{I} = (x - center.x, \;y - center.y) \\ 4892 * \phi = Kangle \cdot \texttt{angle} (\vec{I}) \\ 4893 * \rho = \left\{\begin{matrix} 4894 * Klin \cdot \texttt{magnitude} (\vec{I}) & default \\ 4895 * Klog \cdot log_e(\texttt{magnitude} (\vec{I})) & if \; semilog \\ 4896 * \end{matrix}\right. 4897 * \end{array} 4898 * \) 4899 * 4900 * and 4901 * \( 4902 * \begin{array}{l} 4903 * Kangle = dsize.height / 2\Pi \\ 4904 * Klin = dsize.width / maxRadius \\ 4905 * Klog = dsize.width / log_e(maxRadius) \\ 4906 * \end{array} 4907 * \) 4908 * 4909 * 4910 * \par Linear vs semilog mapping 4911 * 4912 * Polar mapping can be linear or semi-log. Add one of #WarpPolarMode to {@code flags} to specify the polar mapping mode. 4913 * 4914 * Linear is the default mode. 4915 * 4916 * The semilog mapping emulates the human "foveal" vision that permit very high acuity on the line of sight (central vision) 4917 * in contrast to peripheral vision where acuity is minor. 4918 * 4919 * \par Option on {@code dsize}: 4920 * 4921 * <ul> 4922 * <li> 4923 * if both values in {@code dsize <=0 } (default), 4924 * the destination image will have (almost) same area of source bounding circle: 4925 * \(\begin{array}{l} 4926 * dsize.area \leftarrow (maxRadius^2 \cdot \Pi) \\ 4927 * dsize.width = \texttt{cvRound}(maxRadius) \\ 4928 * dsize.height = \texttt{cvRound}(maxRadius \cdot \Pi) \\ 4929 * \end{array}\) 4930 * </li> 4931 * </ul> 4932 * 4933 * 4934 * <ul> 4935 * <li> 4936 * if only {@code dsize.height <= 0}, 4937 * the destination image area will be proportional to the bounding circle area but scaled by {@code Kx * Kx}: 4938 * \(\begin{array}{l} 4939 * dsize.height = \texttt{cvRound}(dsize.width \cdot \Pi) \\ 4940 * \end{array} 4941 * \) 4942 * </li> 4943 * </ul> 4944 * 4945 * <ul> 4946 * <li> 4947 * if both values in {@code dsize > 0 }, 4948 * the destination image will have the given size therefore the area of the bounding circle will be scaled to {@code dsize}. 4949 * </li> 4950 * </ul> 4951 * 4952 * 4953 * \par Reverse mapping 4954 * 4955 * You can get reverse mapping adding #WARP_INVERSE_MAP to {@code flags} 4956 * \snippet polar_transforms.cpp InverseMap 4957 * 4958 * In addiction, to calculate the original coordinate from a polar mapped coordinate \((rho, phi)->(x, y)\): 4959 * \snippet polar_transforms.cpp InverseCoordinate 4960 * 4961 * @param src Source image. 4962 * @param dst Destination image. It will have same type as src. 4963 * @param dsize The destination image size (see description for valid options). 4964 * @param center The transformation center. 4965 * @param maxRadius The radius of the bounding circle to transform. It determines the inverse magnitude scale parameter too. 4966 * @param flags A combination of interpolation methods, #InterpolationFlags + #WarpPolarMode. 4967 * <ul> 4968 * <li> 4969 * Add #WARP_POLAR_LINEAR to select linear polar mapping (default) 4970 * </li> 4971 * <li> 4972 * Add #WARP_POLAR_LOG to select semilog polar mapping 4973 * </li> 4974 * <li> 4975 * Add #WARP_INVERSE_MAP for reverse mapping. 4976 * </li> 4977 * </ul> 4978 * <b>Note:</b> 4979 * <ul> 4980 * <li> 4981 * The function can not operate in-place. 4982 * </li> 4983 * <li> 4984 * To calculate magnitude and angle in degrees #cartToPolar is used internally thus angles are measured from 0 to 360 with accuracy about 0.3 degrees. 4985 * </li> 4986 * <li> 4987 * This function uses #remap. Due to current implementation limitations the size of an input and output images should be less than 32767x32767. 4988 * </li> 4989 * </ul> 4990 * 4991 * SEE: cv::remap 4992 */ 4993 public static void warpPolar(Mat src, Mat dst, Size dsize, Point center, double maxRadius, int flags) { 4994 warpPolar_0(src.nativeObj, dst.nativeObj, dsize.width, dsize.height, center.x, center.y, maxRadius, flags); 4995 } 4996 4997 4998 // 4999 // C++: void cv::integral(Mat src, Mat& sum, Mat& sqsum, Mat& tilted, int sdepth = -1, int sqdepth = -1) 5000 // 5001 5002 /** 5003 * Calculates the integral of an image. 5004 * 5005 * The function calculates one or more integral images for the source image as follows: 5006 * 5007 * \(\texttt{sum} (X,Y) = \sum _{x<X,y<Y} \texttt{image} (x,y)\) 5008 * 5009 * \(\texttt{sqsum} (X,Y) = \sum _{x<X,y<Y} \texttt{image} (x,y)^2\) 5010 * 5011 * \(\texttt{tilted} (X,Y) = \sum _{y<Y,abs(x-X+1) \leq Y-y-1} \texttt{image} (x,y)\) 5012 * 5013 * Using these integral images, you can calculate sum, mean, and standard deviation over a specific 5014 * up-right or rotated rectangular region of the image in a constant time, for example: 5015 * 5016 * \(\sum _{x_1 \leq x < x_2, \, y_1 \leq y < y_2} \texttt{image} (x,y) = \texttt{sum} (x_2,y_2)- \texttt{sum} (x_1,y_2)- \texttt{sum} (x_2,y_1)+ \texttt{sum} (x_1,y_1)\) 5017 * 5018 * It makes possible to do a fast blurring or fast block correlation with a variable window size, for 5019 * example. In case of multi-channel images, sums for each channel are accumulated independently. 5020 * 5021 * As a practical example, the next figure shows the calculation of the integral of a straight 5022 * rectangle Rect(3,3,3,2) and of a tilted rectangle Rect(5,1,2,3) . The selected pixels in the 5023 * original image are shown, as well as the relative pixels in the integral images sum and tilted . 5024 * 5025 * ![integral calculation example](pics/integral.png) 5026 * 5027 * @param src input image as \(W \times H\), 8-bit or floating-point (32f or 64f). 5028 * @param sum integral image as \((W+1)\times (H+1)\) , 32-bit integer or floating-point (32f or 64f). 5029 * @param sqsum integral image for squared pixel values; it is \((W+1)\times (H+1)\), double-precision 5030 * floating-point (64f) array. 5031 * @param tilted integral for the image rotated by 45 degrees; it is \((W+1)\times (H+1)\) array with 5032 * the same data type as sum. 5033 * @param sdepth desired depth of the integral and the tilted integral images, CV_32S, CV_32F, or 5034 * CV_64F. 5035 * @param sqdepth desired depth of the integral image of squared pixel values, CV_32F or CV_64F. 5036 */ 5037 public static void integral3(Mat src, Mat sum, Mat sqsum, Mat tilted, int sdepth, int sqdepth) { 5038 integral3_0(src.nativeObj, sum.nativeObj, sqsum.nativeObj, tilted.nativeObj, sdepth, sqdepth); 5039 } 5040 5041 /** 5042 * Calculates the integral of an image. 5043 * 5044 * The function calculates one or more integral images for the source image as follows: 5045 * 5046 * \(\texttt{sum} (X,Y) = \sum _{x<X,y<Y} \texttt{image} (x,y)\) 5047 * 5048 * \(\texttt{sqsum} (X,Y) = \sum _{x<X,y<Y} \texttt{image} (x,y)^2\) 5049 * 5050 * \(\texttt{tilted} (X,Y) = \sum _{y<Y,abs(x-X+1) \leq Y-y-1} \texttt{image} (x,y)\) 5051 * 5052 * Using these integral images, you can calculate sum, mean, and standard deviation over a specific 5053 * up-right or rotated rectangular region of the image in a constant time, for example: 5054 * 5055 * \(\sum _{x_1 \leq x < x_2, \, y_1 \leq y < y_2} \texttt{image} (x,y) = \texttt{sum} (x_2,y_2)- \texttt{sum} (x_1,y_2)- \texttt{sum} (x_2,y_1)+ \texttt{sum} (x_1,y_1)\) 5056 * 5057 * It makes possible to do a fast blurring or fast block correlation with a variable window size, for 5058 * example. In case of multi-channel images, sums for each channel are accumulated independently. 5059 * 5060 * As a practical example, the next figure shows the calculation of the integral of a straight 5061 * rectangle Rect(3,3,3,2) and of a tilted rectangle Rect(5,1,2,3) . The selected pixels in the 5062 * original image are shown, as well as the relative pixels in the integral images sum and tilted . 5063 * 5064 * ![integral calculation example](pics/integral.png) 5065 * 5066 * @param src input image as \(W \times H\), 8-bit or floating-point (32f or 64f). 5067 * @param sum integral image as \((W+1)\times (H+1)\) , 32-bit integer or floating-point (32f or 64f). 5068 * @param sqsum integral image for squared pixel values; it is \((W+1)\times (H+1)\), double-precision 5069 * floating-point (64f) array. 5070 * @param tilted integral for the image rotated by 45 degrees; it is \((W+1)\times (H+1)\) array with 5071 * the same data type as sum. 5072 * @param sdepth desired depth of the integral and the tilted integral images, CV_32S, CV_32F, or 5073 * CV_64F. 5074 */ 5075 public static void integral3(Mat src, Mat sum, Mat sqsum, Mat tilted, int sdepth) { 5076 integral3_1(src.nativeObj, sum.nativeObj, sqsum.nativeObj, tilted.nativeObj, sdepth); 5077 } 5078 5079 /** 5080 * Calculates the integral of an image. 5081 * 5082 * The function calculates one or more integral images for the source image as follows: 5083 * 5084 * \(\texttt{sum} (X,Y) = \sum _{x<X,y<Y} \texttt{image} (x,y)\) 5085 * 5086 * \(\texttt{sqsum} (X,Y) = \sum _{x<X,y<Y} \texttt{image} (x,y)^2\) 5087 * 5088 * \(\texttt{tilted} (X,Y) = \sum _{y<Y,abs(x-X+1) \leq Y-y-1} \texttt{image} (x,y)\) 5089 * 5090 * Using these integral images, you can calculate sum, mean, and standard deviation over a specific 5091 * up-right or rotated rectangular region of the image in a constant time, for example: 5092 * 5093 * \(\sum _{x_1 \leq x < x_2, \, y_1 \leq y < y_2} \texttt{image} (x,y) = \texttt{sum} (x_2,y_2)- \texttt{sum} (x_1,y_2)- \texttt{sum} (x_2,y_1)+ \texttt{sum} (x_1,y_1)\) 5094 * 5095 * It makes possible to do a fast blurring or fast block correlation with a variable window size, for 5096 * example. In case of multi-channel images, sums for each channel are accumulated independently. 5097 * 5098 * As a practical example, the next figure shows the calculation of the integral of a straight 5099 * rectangle Rect(3,3,3,2) and of a tilted rectangle Rect(5,1,2,3) . The selected pixels in the 5100 * original image are shown, as well as the relative pixels in the integral images sum and tilted . 5101 * 5102 * ![integral calculation example](pics/integral.png) 5103 * 5104 * @param src input image as \(W \times H\), 8-bit or floating-point (32f or 64f). 5105 * @param sum integral image as \((W+1)\times (H+1)\) , 32-bit integer or floating-point (32f or 64f). 5106 * @param sqsum integral image for squared pixel values; it is \((W+1)\times (H+1)\), double-precision 5107 * floating-point (64f) array. 5108 * @param tilted integral for the image rotated by 45 degrees; it is \((W+1)\times (H+1)\) array with 5109 * the same data type as sum. 5110 * CV_64F. 5111 */ 5112 public static void integral3(Mat src, Mat sum, Mat sqsum, Mat tilted) { 5113 integral3_2(src.nativeObj, sum.nativeObj, sqsum.nativeObj, tilted.nativeObj); 5114 } 5115 5116 5117 // 5118 // C++: void cv::integral(Mat src, Mat& sum, int sdepth = -1) 5119 // 5120 5121 public static void integral(Mat src, Mat sum, int sdepth) { 5122 integral_0(src.nativeObj, sum.nativeObj, sdepth); 5123 } 5124 5125 public static void integral(Mat src, Mat sum) { 5126 integral_1(src.nativeObj, sum.nativeObj); 5127 } 5128 5129 5130 // 5131 // C++: void cv::integral(Mat src, Mat& sum, Mat& sqsum, int sdepth = -1, int sqdepth = -1) 5132 // 5133 5134 public static void integral2(Mat src, Mat sum, Mat sqsum, int sdepth, int sqdepth) { 5135 integral2_0(src.nativeObj, sum.nativeObj, sqsum.nativeObj, sdepth, sqdepth); 5136 } 5137 5138 public static void integral2(Mat src, Mat sum, Mat sqsum, int sdepth) { 5139 integral2_1(src.nativeObj, sum.nativeObj, sqsum.nativeObj, sdepth); 5140 } 5141 5142 public static void integral2(Mat src, Mat sum, Mat sqsum) { 5143 integral2_2(src.nativeObj, sum.nativeObj, sqsum.nativeObj); 5144 } 5145 5146 5147 // 5148 // C++: void cv::accumulate(Mat src, Mat& dst, Mat mask = Mat()) 5149 // 5150 5151 /** 5152 * Adds an image to the accumulator image. 5153 * 5154 * The function adds src or some of its elements to dst : 5155 * 5156 * \(\texttt{dst} (x,y) \leftarrow \texttt{dst} (x,y) + \texttt{src} (x,y) \quad \text{if} \quad \texttt{mask} (x,y) \ne 0\) 5157 * 5158 * The function supports multi-channel images. Each channel is processed independently. 5159 * 5160 * The function cv::accumulate can be used, for example, to collect statistics of a scene background 5161 * viewed by a still camera and for the further foreground-background segmentation. 5162 * 5163 * @param src Input image of type CV_8UC(n), CV_16UC(n), CV_32FC(n) or CV_64FC(n), where n is a positive integer. 5164 * @param dst %Accumulator image with the same number of channels as input image, and a depth of CV_32F or CV_64F. 5165 * @param mask Optional operation mask. 5166 * 5167 * SEE: accumulateSquare, accumulateProduct, accumulateWeighted 5168 */ 5169 public static void accumulate(Mat src, Mat dst, Mat mask) { 5170 accumulate_0(src.nativeObj, dst.nativeObj, mask.nativeObj); 5171 } 5172 5173 /** 5174 * Adds an image to the accumulator image. 5175 * 5176 * The function adds src or some of its elements to dst : 5177 * 5178 * \(\texttt{dst} (x,y) \leftarrow \texttt{dst} (x,y) + \texttt{src} (x,y) \quad \text{if} \quad \texttt{mask} (x,y) \ne 0\) 5179 * 5180 * The function supports multi-channel images. Each channel is processed independently. 5181 * 5182 * The function cv::accumulate can be used, for example, to collect statistics of a scene background 5183 * viewed by a still camera and for the further foreground-background segmentation. 5184 * 5185 * @param src Input image of type CV_8UC(n), CV_16UC(n), CV_32FC(n) or CV_64FC(n), where n is a positive integer. 5186 * @param dst %Accumulator image with the same number of channels as input image, and a depth of CV_32F or CV_64F. 5187 * 5188 * SEE: accumulateSquare, accumulateProduct, accumulateWeighted 5189 */ 5190 public static void accumulate(Mat src, Mat dst) { 5191 accumulate_1(src.nativeObj, dst.nativeObj); 5192 } 5193 5194 5195 // 5196 // C++: void cv::accumulateSquare(Mat src, Mat& dst, Mat mask = Mat()) 5197 // 5198 5199 /** 5200 * Adds the square of a source image to the accumulator image. 5201 * 5202 * The function adds the input image src or its selected region, raised to a power of 2, to the 5203 * accumulator dst : 5204 * 5205 * \(\texttt{dst} (x,y) \leftarrow \texttt{dst} (x,y) + \texttt{src} (x,y)^2 \quad \text{if} \quad \texttt{mask} (x,y) \ne 0\) 5206 * 5207 * The function supports multi-channel images. Each channel is processed independently. 5208 * 5209 * @param src Input image as 1- or 3-channel, 8-bit or 32-bit floating point. 5210 * @param dst %Accumulator image with the same number of channels as input image, 32-bit or 64-bit 5211 * floating-point. 5212 * @param mask Optional operation mask. 5213 * 5214 * SEE: accumulateSquare, accumulateProduct, accumulateWeighted 5215 */ 5216 public static void accumulateSquare(Mat src, Mat dst, Mat mask) { 5217 accumulateSquare_0(src.nativeObj, dst.nativeObj, mask.nativeObj); 5218 } 5219 5220 /** 5221 * Adds the square of a source image to the accumulator image. 5222 * 5223 * The function adds the input image src or its selected region, raised to a power of 2, to the 5224 * accumulator dst : 5225 * 5226 * \(\texttt{dst} (x,y) \leftarrow \texttt{dst} (x,y) + \texttt{src} (x,y)^2 \quad \text{if} \quad \texttt{mask} (x,y) \ne 0\) 5227 * 5228 * The function supports multi-channel images. Each channel is processed independently. 5229 * 5230 * @param src Input image as 1- or 3-channel, 8-bit or 32-bit floating point. 5231 * @param dst %Accumulator image with the same number of channels as input image, 32-bit or 64-bit 5232 * floating-point. 5233 * 5234 * SEE: accumulateSquare, accumulateProduct, accumulateWeighted 5235 */ 5236 public static void accumulateSquare(Mat src, Mat dst) { 5237 accumulateSquare_1(src.nativeObj, dst.nativeObj); 5238 } 5239 5240 5241 // 5242 // C++: void cv::accumulateProduct(Mat src1, Mat src2, Mat& dst, Mat mask = Mat()) 5243 // 5244 5245 /** 5246 * Adds the per-element product of two input images to the accumulator image. 5247 * 5248 * The function adds the product of two images or their selected regions to the accumulator dst : 5249 * 5250 * \(\texttt{dst} (x,y) \leftarrow \texttt{dst} (x,y) + \texttt{src1} (x,y) \cdot \texttt{src2} (x,y) \quad \text{if} \quad \texttt{mask} (x,y) \ne 0\) 5251 * 5252 * The function supports multi-channel images. Each channel is processed independently. 5253 * 5254 * @param src1 First input image, 1- or 3-channel, 8-bit or 32-bit floating point. 5255 * @param src2 Second input image of the same type and the same size as src1 . 5256 * @param dst %Accumulator image with the same number of channels as input images, 32-bit or 64-bit 5257 * floating-point. 5258 * @param mask Optional operation mask. 5259 * 5260 * SEE: accumulate, accumulateSquare, accumulateWeighted 5261 */ 5262 public static void accumulateProduct(Mat src1, Mat src2, Mat dst, Mat mask) { 5263 accumulateProduct_0(src1.nativeObj, src2.nativeObj, dst.nativeObj, mask.nativeObj); 5264 } 5265 5266 /** 5267 * Adds the per-element product of two input images to the accumulator image. 5268 * 5269 * The function adds the product of two images or their selected regions to the accumulator dst : 5270 * 5271 * \(\texttt{dst} (x,y) \leftarrow \texttt{dst} (x,y) + \texttt{src1} (x,y) \cdot \texttt{src2} (x,y) \quad \text{if} \quad \texttt{mask} (x,y) \ne 0\) 5272 * 5273 * The function supports multi-channel images. Each channel is processed independently. 5274 * 5275 * @param src1 First input image, 1- or 3-channel, 8-bit or 32-bit floating point. 5276 * @param src2 Second input image of the same type and the same size as src1 . 5277 * @param dst %Accumulator image with the same number of channels as input images, 32-bit or 64-bit 5278 * floating-point. 5279 * 5280 * SEE: accumulate, accumulateSquare, accumulateWeighted 5281 */ 5282 public static void accumulateProduct(Mat src1, Mat src2, Mat dst) { 5283 accumulateProduct_1(src1.nativeObj, src2.nativeObj, dst.nativeObj); 5284 } 5285 5286 5287 // 5288 // C++: void cv::accumulateWeighted(Mat src, Mat& dst, double alpha, Mat mask = Mat()) 5289 // 5290 5291 /** 5292 * Updates a running average. 5293 * 5294 * The function calculates the weighted sum of the input image src and the accumulator dst so that dst 5295 * becomes a running average of a frame sequence: 5296 * 5297 * \(\texttt{dst} (x,y) \leftarrow (1- \texttt{alpha} ) \cdot \texttt{dst} (x,y) + \texttt{alpha} \cdot \texttt{src} (x,y) \quad \text{if} \quad \texttt{mask} (x,y) \ne 0\) 5298 * 5299 * That is, alpha regulates the update speed (how fast the accumulator "forgets" about earlier images). 5300 * The function supports multi-channel images. Each channel is processed independently. 5301 * 5302 * @param src Input image as 1- or 3-channel, 8-bit or 32-bit floating point. 5303 * @param dst %Accumulator image with the same number of channels as input image, 32-bit or 64-bit 5304 * floating-point. 5305 * @param alpha Weight of the input image. 5306 * @param mask Optional operation mask. 5307 * 5308 * SEE: accumulate, accumulateSquare, accumulateProduct 5309 */ 5310 public static void accumulateWeighted(Mat src, Mat dst, double alpha, Mat mask) { 5311 accumulateWeighted_0(src.nativeObj, dst.nativeObj, alpha, mask.nativeObj); 5312 } 5313 5314 /** 5315 * Updates a running average. 5316 * 5317 * The function calculates the weighted sum of the input image src and the accumulator dst so that dst 5318 * becomes a running average of a frame sequence: 5319 * 5320 * \(\texttt{dst} (x,y) \leftarrow (1- \texttt{alpha} ) \cdot \texttt{dst} (x,y) + \texttt{alpha} \cdot \texttt{src} (x,y) \quad \text{if} \quad \texttt{mask} (x,y) \ne 0\) 5321 * 5322 * That is, alpha regulates the update speed (how fast the accumulator "forgets" about earlier images). 5323 * The function supports multi-channel images. Each channel is processed independently. 5324 * 5325 * @param src Input image as 1- or 3-channel, 8-bit or 32-bit floating point. 5326 * @param dst %Accumulator image with the same number of channels as input image, 32-bit or 64-bit 5327 * floating-point. 5328 * @param alpha Weight of the input image. 5329 * 5330 * SEE: accumulate, accumulateSquare, accumulateProduct 5331 */ 5332 public static void accumulateWeighted(Mat src, Mat dst, double alpha) { 5333 accumulateWeighted_1(src.nativeObj, dst.nativeObj, alpha); 5334 } 5335 5336 5337 // 5338 // C++: Point2d cv::phaseCorrelate(Mat src1, Mat src2, Mat window = Mat(), double* response = 0) 5339 // 5340 5341 /** 5342 * The function is used to detect translational shifts that occur between two images. 5343 * 5344 * The operation takes advantage of the Fourier shift theorem for detecting the translational shift in 5345 * the frequency domain. It can be used for fast image registration as well as motion estimation. For 5346 * more information please see <http://en.wikipedia.org/wiki/Phase_correlation> 5347 * 5348 * Calculates the cross-power spectrum of two supplied source arrays. The arrays are padded if needed 5349 * with getOptimalDFTSize. 5350 * 5351 * The function performs the following equations: 5352 * <ul> 5353 * <li> 5354 * First it applies a Hanning window (see <http://en.wikipedia.org/wiki/Hann_function>) to each 5355 * image to remove possible edge effects. This window is cached until the array size changes to speed 5356 * up processing time. 5357 * </li> 5358 * <li> 5359 * Next it computes the forward DFTs of each source array: 5360 * \(\mathbf{G}_a = \mathcal{F}\{src_1\}, \; \mathbf{G}_b = \mathcal{F}\{src_2\}\) 5361 * where \(\mathcal{F}\) is the forward DFT. 5362 * </li> 5363 * <li> 5364 * It then computes the cross-power spectrum of each frequency domain array: 5365 * \(R = \frac{ \mathbf{G}_a \mathbf{G}_b^*}{|\mathbf{G}_a \mathbf{G}_b^*|}\) 5366 * </li> 5367 * <li> 5368 * Next the cross-correlation is converted back into the time domain via the inverse DFT: 5369 * \(r = \mathcal{F}^{-1}\{R\}\) 5370 * </li> 5371 * <li> 5372 * Finally, it computes the peak location and computes a 5x5 weighted centroid around the peak to 5373 * achieve sub-pixel accuracy. 5374 * \((\Delta x, \Delta y) = \texttt{weightedCentroid} \{\arg \max_{(x, y)}\{r\}\}\) 5375 * </li> 5376 * <li> 5377 * If non-zero, the response parameter is computed as the sum of the elements of r within the 5x5 5378 * centroid around the peak location. It is normalized to a maximum of 1 (meaning there is a single 5379 * peak) and will be smaller when there are multiple peaks. 5380 * </li> 5381 * </ul> 5382 * 5383 * @param src1 Source floating point array (CV_32FC1 or CV_64FC1) 5384 * @param src2 Source floating point array (CV_32FC1 or CV_64FC1) 5385 * @param window Floating point array with windowing coefficients to reduce edge effects (optional). 5386 * @param response Signal power within the 5x5 centroid around the peak, between 0 and 1 (optional). 5387 * @return detected phase shift (sub-pixel) between the two arrays. 5388 * 5389 * SEE: dft, getOptimalDFTSize, idft, mulSpectrums createHanningWindow 5390 */ 5391 public static Point phaseCorrelate(Mat src1, Mat src2, Mat window, double[] response) { 5392 double[] response_out = new double[1]; 5393 Point retVal = new Point(phaseCorrelate_0(src1.nativeObj, src2.nativeObj, window.nativeObj, response_out)); 5394 if(response!=null) response[0] = (double)response_out[0]; 5395 return retVal; 5396 } 5397 5398 /** 5399 * The function is used to detect translational shifts that occur between two images. 5400 * 5401 * The operation takes advantage of the Fourier shift theorem for detecting the translational shift in 5402 * the frequency domain. It can be used for fast image registration as well as motion estimation. For 5403 * more information please see <http://en.wikipedia.org/wiki/Phase_correlation> 5404 * 5405 * Calculates the cross-power spectrum of two supplied source arrays. The arrays are padded if needed 5406 * with getOptimalDFTSize. 5407 * 5408 * The function performs the following equations: 5409 * <ul> 5410 * <li> 5411 * First it applies a Hanning window (see <http://en.wikipedia.org/wiki/Hann_function>) to each 5412 * image to remove possible edge effects. This window is cached until the array size changes to speed 5413 * up processing time. 5414 * </li> 5415 * <li> 5416 * Next it computes the forward DFTs of each source array: 5417 * \(\mathbf{G}_a = \mathcal{F}\{src_1\}, \; \mathbf{G}_b = \mathcal{F}\{src_2\}\) 5418 * where \(\mathcal{F}\) is the forward DFT. 5419 * </li> 5420 * <li> 5421 * It then computes the cross-power spectrum of each frequency domain array: 5422 * \(R = \frac{ \mathbf{G}_a \mathbf{G}_b^*}{|\mathbf{G}_a \mathbf{G}_b^*|}\) 5423 * </li> 5424 * <li> 5425 * Next the cross-correlation is converted back into the time domain via the inverse DFT: 5426 * \(r = \mathcal{F}^{-1}\{R\}\) 5427 * </li> 5428 * <li> 5429 * Finally, it computes the peak location and computes a 5x5 weighted centroid around the peak to 5430 * achieve sub-pixel accuracy. 5431 * \((\Delta x, \Delta y) = \texttt{weightedCentroid} \{\arg \max_{(x, y)}\{r\}\}\) 5432 * </li> 5433 * <li> 5434 * If non-zero, the response parameter is computed as the sum of the elements of r within the 5x5 5435 * centroid around the peak location. It is normalized to a maximum of 1 (meaning there is a single 5436 * peak) and will be smaller when there are multiple peaks. 5437 * </li> 5438 * </ul> 5439 * 5440 * @param src1 Source floating point array (CV_32FC1 or CV_64FC1) 5441 * @param src2 Source floating point array (CV_32FC1 or CV_64FC1) 5442 * @param window Floating point array with windowing coefficients to reduce edge effects (optional). 5443 * @return detected phase shift (sub-pixel) between the two arrays. 5444 * 5445 * SEE: dft, getOptimalDFTSize, idft, mulSpectrums createHanningWindow 5446 */ 5447 public static Point phaseCorrelate(Mat src1, Mat src2, Mat window) { 5448 return new Point(phaseCorrelate_1(src1.nativeObj, src2.nativeObj, window.nativeObj)); 5449 } 5450 5451 /** 5452 * The function is used to detect translational shifts that occur between two images. 5453 * 5454 * The operation takes advantage of the Fourier shift theorem for detecting the translational shift in 5455 * the frequency domain. It can be used for fast image registration as well as motion estimation. For 5456 * more information please see <http://en.wikipedia.org/wiki/Phase_correlation> 5457 * 5458 * Calculates the cross-power spectrum of two supplied source arrays. The arrays are padded if needed 5459 * with getOptimalDFTSize. 5460 * 5461 * The function performs the following equations: 5462 * <ul> 5463 * <li> 5464 * First it applies a Hanning window (see <http://en.wikipedia.org/wiki/Hann_function>) to each 5465 * image to remove possible edge effects. This window is cached until the array size changes to speed 5466 * up processing time. 5467 * </li> 5468 * <li> 5469 * Next it computes the forward DFTs of each source array: 5470 * \(\mathbf{G}_a = \mathcal{F}\{src_1\}, \; \mathbf{G}_b = \mathcal{F}\{src_2\}\) 5471 * where \(\mathcal{F}\) is the forward DFT. 5472 * </li> 5473 * <li> 5474 * It then computes the cross-power spectrum of each frequency domain array: 5475 * \(R = \frac{ \mathbf{G}_a \mathbf{G}_b^*}{|\mathbf{G}_a \mathbf{G}_b^*|}\) 5476 * </li> 5477 * <li> 5478 * Next the cross-correlation is converted back into the time domain via the inverse DFT: 5479 * \(r = \mathcal{F}^{-1}\{R\}\) 5480 * </li> 5481 * <li> 5482 * Finally, it computes the peak location and computes a 5x5 weighted centroid around the peak to 5483 * achieve sub-pixel accuracy. 5484 * \((\Delta x, \Delta y) = \texttt{weightedCentroid} \{\arg \max_{(x, y)}\{r\}\}\) 5485 * </li> 5486 * <li> 5487 * If non-zero, the response parameter is computed as the sum of the elements of r within the 5x5 5488 * centroid around the peak location. It is normalized to a maximum of 1 (meaning there is a single 5489 * peak) and will be smaller when there are multiple peaks. 5490 * </li> 5491 * </ul> 5492 * 5493 * @param src1 Source floating point array (CV_32FC1 or CV_64FC1) 5494 * @param src2 Source floating point array (CV_32FC1 or CV_64FC1) 5495 * @return detected phase shift (sub-pixel) between the two arrays. 5496 * 5497 * SEE: dft, getOptimalDFTSize, idft, mulSpectrums createHanningWindow 5498 */ 5499 public static Point phaseCorrelate(Mat src1, Mat src2) { 5500 return new Point(phaseCorrelate_2(src1.nativeObj, src2.nativeObj)); 5501 } 5502 5503 5504 // 5505 // C++: void cv::createHanningWindow(Mat& dst, Size winSize, int type) 5506 // 5507 5508 /** 5509 * This function computes a Hanning window coefficients in two dimensions. 5510 * 5511 * See (http://en.wikipedia.org/wiki/Hann_function) and (http://en.wikipedia.org/wiki/Window_function) 5512 * for more information. 5513 * 5514 * An example is shown below: 5515 * <code> 5516 * // create hanning window of size 100x100 and type CV_32F 5517 * Mat hann; 5518 * createHanningWindow(hann, Size(100, 100), CV_32F); 5519 * </code> 5520 * @param dst Destination array to place Hann coefficients in 5521 * @param winSize The window size specifications (both width and height must be > 1) 5522 * @param type Created array type 5523 */ 5524 public static void createHanningWindow(Mat dst, Size winSize, int type) { 5525 createHanningWindow_0(dst.nativeObj, winSize.width, winSize.height, type); 5526 } 5527 5528 5529 // 5530 // C++: void cv::divSpectrums(Mat a, Mat b, Mat& c, int flags, bool conjB = false) 5531 // 5532 5533 /** 5534 * Performs the per-element division of the first Fourier spectrum by the second Fourier spectrum. 5535 * 5536 * The function cv::divSpectrums performs the per-element division of the first array by the second array. 5537 * The arrays are CCS-packed or complex matrices that are results of a real or complex Fourier transform. 5538 * 5539 * @param a first input array. 5540 * @param b second input array of the same size and type as src1 . 5541 * @param c output array of the same size and type as src1 . 5542 * @param flags operation flags; currently, the only supported flag is cv::DFT_ROWS, which indicates that 5543 * each row of src1 and src2 is an independent 1D Fourier spectrum. If you do not want to use this flag, then simply add a {@code 0} as value. 5544 * @param conjB optional flag that conjugates the second input array before the multiplication (true) 5545 * or not (false). 5546 */ 5547 public static void divSpectrums(Mat a, Mat b, Mat c, int flags, boolean conjB) { 5548 divSpectrums_0(a.nativeObj, b.nativeObj, c.nativeObj, flags, conjB); 5549 } 5550 5551 /** 5552 * Performs the per-element division of the first Fourier spectrum by the second Fourier spectrum. 5553 * 5554 * The function cv::divSpectrums performs the per-element division of the first array by the second array. 5555 * The arrays are CCS-packed or complex matrices that are results of a real or complex Fourier transform. 5556 * 5557 * @param a first input array. 5558 * @param b second input array of the same size and type as src1 . 5559 * @param c output array of the same size and type as src1 . 5560 * @param flags operation flags; currently, the only supported flag is cv::DFT_ROWS, which indicates that 5561 * each row of src1 and src2 is an independent 1D Fourier spectrum. If you do not want to use this flag, then simply add a {@code 0} as value. 5562 * or not (false). 5563 */ 5564 public static void divSpectrums(Mat a, Mat b, Mat c, int flags) { 5565 divSpectrums_1(a.nativeObj, b.nativeObj, c.nativeObj, flags); 5566 } 5567 5568 5569 // 5570 // C++: double cv::threshold(Mat src, Mat& dst, double thresh, double maxval, int type) 5571 // 5572 5573 /** 5574 * Applies a fixed-level threshold to each array element. 5575 * 5576 * The function applies fixed-level thresholding to a multiple-channel array. The function is typically 5577 * used to get a bi-level (binary) image out of a grayscale image ( #compare could be also used for 5578 * this purpose) or for removing a noise, that is, filtering out pixels with too small or too large 5579 * values. There are several types of thresholding supported by the function. They are determined by 5580 * type parameter. 5581 * 5582 * Also, the special values #THRESH_OTSU or #THRESH_TRIANGLE may be combined with one of the 5583 * above values. In these cases, the function determines the optimal threshold value using the Otsu's 5584 * or Triangle algorithm and uses it instead of the specified thresh. 5585 * 5586 * <b>Note:</b> Currently, the Otsu's and Triangle methods are implemented only for 8-bit single-channel images. 5587 * 5588 * @param src input array (multiple-channel, 8-bit or 32-bit floating point). 5589 * @param dst output array of the same size and type and the same number of channels as src. 5590 * @param thresh threshold value. 5591 * @param maxval maximum value to use with the #THRESH_BINARY and #THRESH_BINARY_INV thresholding 5592 * types. 5593 * @param type thresholding type (see #ThresholdTypes). 5594 * @return the computed threshold value if Otsu's or Triangle methods used. 5595 * 5596 * SEE: adaptiveThreshold, findContours, compare, min, max 5597 */ 5598 public static double threshold(Mat src, Mat dst, double thresh, double maxval, int type) { 5599 return threshold_0(src.nativeObj, dst.nativeObj, thresh, maxval, type); 5600 } 5601 5602 5603 // 5604 // C++: void cv::adaptiveThreshold(Mat src, Mat& dst, double maxValue, int adaptiveMethod, int thresholdType, int blockSize, double C) 5605 // 5606 5607 /** 5608 * Applies an adaptive threshold to an array. 5609 * 5610 * The function transforms a grayscale image to a binary image according to the formulae: 5611 * <ul> 5612 * <li> 5613 * <b>THRESH_BINARY</b> 5614 * \(dst(x,y) = \fork{\texttt{maxValue}}{if \(src(x,y) > T(x,y)\)}{0}{otherwise}\) 5615 * </li> 5616 * <li> 5617 * <b>THRESH_BINARY_INV</b> 5618 * \(dst(x,y) = \fork{0}{if \(src(x,y) > T(x,y)\)}{\texttt{maxValue}}{otherwise}\) 5619 * where \(T(x,y)\) is a threshold calculated individually for each pixel (see adaptiveMethod parameter). 5620 * </li> 5621 * </ul> 5622 * 5623 * The function can process the image in-place. 5624 * 5625 * @param src Source 8-bit single-channel image. 5626 * @param dst Destination image of the same size and the same type as src. 5627 * @param maxValue Non-zero value assigned to the pixels for which the condition is satisfied 5628 * @param adaptiveMethod Adaptive thresholding algorithm to use, see #AdaptiveThresholdTypes. 5629 * The #BORDER_REPLICATE | #BORDER_ISOLATED is used to process boundaries. 5630 * @param thresholdType Thresholding type that must be either #THRESH_BINARY or #THRESH_BINARY_INV, 5631 * see #ThresholdTypes. 5632 * @param blockSize Size of a pixel neighborhood that is used to calculate a threshold value for the 5633 * pixel: 3, 5, 7, and so on. 5634 * @param C Constant subtracted from the mean or weighted mean (see the details below). Normally, it 5635 * is positive but may be zero or negative as well. 5636 * 5637 * SEE: threshold, blur, GaussianBlur 5638 */ 5639 public static void adaptiveThreshold(Mat src, Mat dst, double maxValue, int adaptiveMethod, int thresholdType, int blockSize, double C) { 5640 adaptiveThreshold_0(src.nativeObj, dst.nativeObj, maxValue, adaptiveMethod, thresholdType, blockSize, C); 5641 } 5642 5643 5644 // 5645 // C++: void cv::pyrDown(Mat src, Mat& dst, Size dstsize = Size(), int borderType = BORDER_DEFAULT) 5646 // 5647 5648 /** 5649 * Blurs an image and downsamples it. 5650 * 5651 * By default, size of the output image is computed as {@code Size((src.cols+1)/2, (src.rows+1)/2)}, but in 5652 * any case, the following conditions should be satisfied: 5653 * 5654 * \(\begin{array}{l} | \texttt{dstsize.width} *2-src.cols| \leq 2 \\ | \texttt{dstsize.height} *2-src.rows| \leq 2 \end{array}\) 5655 * 5656 * The function performs the downsampling step of the Gaussian pyramid construction. First, it 5657 * convolves the source image with the kernel: 5658 * 5659 * \(\frac{1}{256} \begin{bmatrix} 1 & 4 & 6 & 4 & 1 \\ 4 & 16 & 24 & 16 & 4 \\ 6 & 24 & 36 & 24 & 6 \\ 4 & 16 & 24 & 16 & 4 \\ 1 & 4 & 6 & 4 & 1 \end{bmatrix}\) 5660 * 5661 * Then, it downsamples the image by rejecting even rows and columns. 5662 * 5663 * @param src input image. 5664 * @param dst output image; it has the specified size and the same type as src. 5665 * @param dstsize size of the output image. 5666 * @param borderType Pixel extrapolation method, see #BorderTypes (#BORDER_CONSTANT isn't supported) 5667 */ 5668 public static void pyrDown(Mat src, Mat dst, Size dstsize, int borderType) { 5669 pyrDown_0(src.nativeObj, dst.nativeObj, dstsize.width, dstsize.height, borderType); 5670 } 5671 5672 /** 5673 * Blurs an image and downsamples it. 5674 * 5675 * By default, size of the output image is computed as {@code Size((src.cols+1)/2, (src.rows+1)/2)}, but in 5676 * any case, the following conditions should be satisfied: 5677 * 5678 * \(\begin{array}{l} | \texttt{dstsize.width} *2-src.cols| \leq 2 \\ | \texttt{dstsize.height} *2-src.rows| \leq 2 \end{array}\) 5679 * 5680 * The function performs the downsampling step of the Gaussian pyramid construction. First, it 5681 * convolves the source image with the kernel: 5682 * 5683 * \(\frac{1}{256} \begin{bmatrix} 1 & 4 & 6 & 4 & 1 \\ 4 & 16 & 24 & 16 & 4 \\ 6 & 24 & 36 & 24 & 6 \\ 4 & 16 & 24 & 16 & 4 \\ 1 & 4 & 6 & 4 & 1 \end{bmatrix}\) 5684 * 5685 * Then, it downsamples the image by rejecting even rows and columns. 5686 * 5687 * @param src input image. 5688 * @param dst output image; it has the specified size and the same type as src. 5689 * @param dstsize size of the output image. 5690 */ 5691 public static void pyrDown(Mat src, Mat dst, Size dstsize) { 5692 pyrDown_1(src.nativeObj, dst.nativeObj, dstsize.width, dstsize.height); 5693 } 5694 5695 /** 5696 * Blurs an image and downsamples it. 5697 * 5698 * By default, size of the output image is computed as {@code Size((src.cols+1)/2, (src.rows+1)/2)}, but in 5699 * any case, the following conditions should be satisfied: 5700 * 5701 * \(\begin{array}{l} | \texttt{dstsize.width} *2-src.cols| \leq 2 \\ | \texttt{dstsize.height} *2-src.rows| \leq 2 \end{array}\) 5702 * 5703 * The function performs the downsampling step of the Gaussian pyramid construction. First, it 5704 * convolves the source image with the kernel: 5705 * 5706 * \(\frac{1}{256} \begin{bmatrix} 1 & 4 & 6 & 4 & 1 \\ 4 & 16 & 24 & 16 & 4 \\ 6 & 24 & 36 & 24 & 6 \\ 4 & 16 & 24 & 16 & 4 \\ 1 & 4 & 6 & 4 & 1 \end{bmatrix}\) 5707 * 5708 * Then, it downsamples the image by rejecting even rows and columns. 5709 * 5710 * @param src input image. 5711 * @param dst output image; it has the specified size and the same type as src. 5712 */ 5713 public static void pyrDown(Mat src, Mat dst) { 5714 pyrDown_2(src.nativeObj, dst.nativeObj); 5715 } 5716 5717 5718 // 5719 // C++: void cv::pyrUp(Mat src, Mat& dst, Size dstsize = Size(), int borderType = BORDER_DEFAULT) 5720 // 5721 5722 /** 5723 * Upsamples an image and then blurs it. 5724 * 5725 * By default, size of the output image is computed as {@code Size(src.cols\*2, (src.rows\*2)}, but in any 5726 * case, the following conditions should be satisfied: 5727 * 5728 * \(\begin{array}{l} | \texttt{dstsize.width} -src.cols*2| \leq ( \texttt{dstsize.width} \mod 2) \\ | \texttt{dstsize.height} -src.rows*2| \leq ( \texttt{dstsize.height} \mod 2) \end{array}\) 5729 * 5730 * The function performs the upsampling step of the Gaussian pyramid construction, though it can 5731 * actually be used to construct the Laplacian pyramid. First, it upsamples the source image by 5732 * injecting even zero rows and columns and then convolves the result with the same kernel as in 5733 * pyrDown multiplied by 4. 5734 * 5735 * @param src input image. 5736 * @param dst output image. It has the specified size and the same type as src . 5737 * @param dstsize size of the output image. 5738 * @param borderType Pixel extrapolation method, see #BorderTypes (only #BORDER_DEFAULT is supported) 5739 */ 5740 public static void pyrUp(Mat src, Mat dst, Size dstsize, int borderType) { 5741 pyrUp_0(src.nativeObj, dst.nativeObj, dstsize.width, dstsize.height, borderType); 5742 } 5743 5744 /** 5745 * Upsamples an image and then blurs it. 5746 * 5747 * By default, size of the output image is computed as {@code Size(src.cols\*2, (src.rows\*2)}, but in any 5748 * case, the following conditions should be satisfied: 5749 * 5750 * \(\begin{array}{l} | \texttt{dstsize.width} -src.cols*2| \leq ( \texttt{dstsize.width} \mod 2) \\ | \texttt{dstsize.height} -src.rows*2| \leq ( \texttt{dstsize.height} \mod 2) \end{array}\) 5751 * 5752 * The function performs the upsampling step of the Gaussian pyramid construction, though it can 5753 * actually be used to construct the Laplacian pyramid. First, it upsamples the source image by 5754 * injecting even zero rows and columns and then convolves the result with the same kernel as in 5755 * pyrDown multiplied by 4. 5756 * 5757 * @param src input image. 5758 * @param dst output image. It has the specified size and the same type as src . 5759 * @param dstsize size of the output image. 5760 */ 5761 public static void pyrUp(Mat src, Mat dst, Size dstsize) { 5762 pyrUp_1(src.nativeObj, dst.nativeObj, dstsize.width, dstsize.height); 5763 } 5764 5765 /** 5766 * Upsamples an image and then blurs it. 5767 * 5768 * By default, size of the output image is computed as {@code Size(src.cols\*2, (src.rows\*2)}, but in any 5769 * case, the following conditions should be satisfied: 5770 * 5771 * \(\begin{array}{l} | \texttt{dstsize.width} -src.cols*2| \leq ( \texttt{dstsize.width} \mod 2) \\ | \texttt{dstsize.height} -src.rows*2| \leq ( \texttt{dstsize.height} \mod 2) \end{array}\) 5772 * 5773 * The function performs the upsampling step of the Gaussian pyramid construction, though it can 5774 * actually be used to construct the Laplacian pyramid. First, it upsamples the source image by 5775 * injecting even zero rows and columns and then convolves the result with the same kernel as in 5776 * pyrDown multiplied by 4. 5777 * 5778 * @param src input image. 5779 * @param dst output image. It has the specified size and the same type as src . 5780 */ 5781 public static void pyrUp(Mat src, Mat dst) { 5782 pyrUp_2(src.nativeObj, dst.nativeObj); 5783 } 5784 5785 5786 // 5787 // C++: void cv::calcHist(vector_Mat images, vector_int channels, Mat mask, Mat& hist, vector_int histSize, vector_float ranges, bool accumulate = false) 5788 // 5789 5790 public static void calcHist(List<Mat> images, MatOfInt channels, Mat mask, Mat hist, MatOfInt histSize, MatOfFloat ranges, boolean accumulate) { 5791 Mat images_mat = Converters.vector_Mat_to_Mat(images); 5792 Mat channels_mat = channels; 5793 Mat histSize_mat = histSize; 5794 Mat ranges_mat = ranges; 5795 calcHist_0(images_mat.nativeObj, channels_mat.nativeObj, mask.nativeObj, hist.nativeObj, histSize_mat.nativeObj, ranges_mat.nativeObj, accumulate); 5796 } 5797 5798 public static void calcHist(List<Mat> images, MatOfInt channels, Mat mask, Mat hist, MatOfInt histSize, MatOfFloat ranges) { 5799 Mat images_mat = Converters.vector_Mat_to_Mat(images); 5800 Mat channels_mat = channels; 5801 Mat histSize_mat = histSize; 5802 Mat ranges_mat = ranges; 5803 calcHist_1(images_mat.nativeObj, channels_mat.nativeObj, mask.nativeObj, hist.nativeObj, histSize_mat.nativeObj, ranges_mat.nativeObj); 5804 } 5805 5806 5807 // 5808 // C++: void cv::calcBackProject(vector_Mat images, vector_int channels, Mat hist, Mat& dst, vector_float ranges, double scale) 5809 // 5810 5811 public static void calcBackProject(List<Mat> images, MatOfInt channels, Mat hist, Mat dst, MatOfFloat ranges, double scale) { 5812 Mat images_mat = Converters.vector_Mat_to_Mat(images); 5813 Mat channels_mat = channels; 5814 Mat ranges_mat = ranges; 5815 calcBackProject_0(images_mat.nativeObj, channels_mat.nativeObj, hist.nativeObj, dst.nativeObj, ranges_mat.nativeObj, scale); 5816 } 5817 5818 5819 // 5820 // C++: double cv::compareHist(Mat H1, Mat H2, int method) 5821 // 5822 5823 /** 5824 * Compares two histograms. 5825 * 5826 * The function cv::compareHist compares two dense or two sparse histograms using the specified method. 5827 * 5828 * The function returns \(d(H_1, H_2)\) . 5829 * 5830 * While the function works well with 1-, 2-, 3-dimensional dense histograms, it may not be suitable 5831 * for high-dimensional sparse histograms. In such histograms, because of aliasing and sampling 5832 * problems, the coordinates of non-zero histogram bins can slightly shift. To compare such histograms 5833 * or more general sparse configurations of weighted points, consider using the #EMD function. 5834 * 5835 * @param H1 First compared histogram. 5836 * @param H2 Second compared histogram of the same size as H1 . 5837 * @param method Comparison method, see #HistCompMethods 5838 * @return automatically generated 5839 */ 5840 public static double compareHist(Mat H1, Mat H2, int method) { 5841 return compareHist_0(H1.nativeObj, H2.nativeObj, method); 5842 } 5843 5844 5845 // 5846 // C++: void cv::equalizeHist(Mat src, Mat& dst) 5847 // 5848 5849 /** 5850 * Equalizes the histogram of a grayscale image. 5851 * 5852 * The function equalizes the histogram of the input image using the following algorithm: 5853 * 5854 * <ul> 5855 * <li> 5856 * Calculate the histogram \(H\) for src . 5857 * </li> 5858 * <li> 5859 * Normalize the histogram so that the sum of histogram bins is 255. 5860 * </li> 5861 * <li> 5862 * Compute the integral of the histogram: 5863 * \(H'_i = \sum _{0 \le j < i} H(j)\) 5864 * </li> 5865 * <li> 5866 * Transform the image using \(H'\) as a look-up table: \(\texttt{dst}(x,y) = H'(\texttt{src}(x,y))\) 5867 * </li> 5868 * </ul> 5869 * 5870 * The algorithm normalizes the brightness and increases the contrast of the image. 5871 * 5872 * @param src Source 8-bit single channel image. 5873 * @param dst Destination image of the same size and type as src . 5874 */ 5875 public static void equalizeHist(Mat src, Mat dst) { 5876 equalizeHist_0(src.nativeObj, dst.nativeObj); 5877 } 5878 5879 5880 // 5881 // C++: Ptr_CLAHE cv::createCLAHE(double clipLimit = 40.0, Size tileGridSize = Size(8, 8)) 5882 // 5883 5884 /** 5885 * Creates a smart pointer to a cv::CLAHE class and initializes it. 5886 * 5887 * @param clipLimit Threshold for contrast limiting. 5888 * @param tileGridSize Size of grid for histogram equalization. Input image will be divided into 5889 * equally sized rectangular tiles. tileGridSize defines the number of tiles in row and column. 5890 * @return automatically generated 5891 */ 5892 public static CLAHE createCLAHE(double clipLimit, Size tileGridSize) { 5893 return CLAHE.__fromPtr__(createCLAHE_0(clipLimit, tileGridSize.width, tileGridSize.height)); 5894 } 5895 5896 /** 5897 * Creates a smart pointer to a cv::CLAHE class and initializes it. 5898 * 5899 * @param clipLimit Threshold for contrast limiting. 5900 * equally sized rectangular tiles. tileGridSize defines the number of tiles in row and column. 5901 * @return automatically generated 5902 */ 5903 public static CLAHE createCLAHE(double clipLimit) { 5904 return CLAHE.__fromPtr__(createCLAHE_1(clipLimit)); 5905 } 5906 5907 /** 5908 * Creates a smart pointer to a cv::CLAHE class and initializes it. 5909 * 5910 * equally sized rectangular tiles. tileGridSize defines the number of tiles in row and column. 5911 * @return automatically generated 5912 */ 5913 public static CLAHE createCLAHE() { 5914 return CLAHE.__fromPtr__(createCLAHE_2()); 5915 } 5916 5917 5918 // 5919 // C++: float cv::wrapperEMD(Mat signature1, Mat signature2, int distType, Mat cost = Mat(), Ptr_float& lowerBound = Ptr<float>(), Mat& flow = Mat()) 5920 // 5921 5922 /** 5923 * Computes the "minimal work" distance between two weighted point configurations. 5924 * 5925 * The function computes the earth mover distance and/or a lower boundary of the distance between the 5926 * two weighted point configurations. One of the applications described in CITE: RubnerSept98, 5927 * CITE: Rubner2000 is multi-dimensional histogram comparison for image retrieval. EMD is a transportation 5928 * problem that is solved using some modification of a simplex algorithm, thus the complexity is 5929 * exponential in the worst case, though, on average it is much faster. In the case of a real metric 5930 * the lower boundary can be calculated even faster (using linear-time algorithm) and it can be used 5931 * to determine roughly whether the two signatures are far enough so that they cannot relate to the 5932 * same object. 5933 * 5934 * @param signature1 First signature, a \(\texttt{size1}\times \texttt{dims}+1\) floating-point matrix. 5935 * Each row stores the point weight followed by the point coordinates. The matrix is allowed to have 5936 * a single column (weights only) if the user-defined cost matrix is used. The weights must be 5937 * non-negative and have at least one non-zero value. 5938 * @param signature2 Second signature of the same format as signature1 , though the number of rows 5939 * may be different. The total weights may be different. In this case an extra "dummy" point is added 5940 * to either signature1 or signature2. The weights must be non-negative and have at least one non-zero 5941 * value. 5942 * @param distType Used metric. See #DistanceTypes. 5943 * @param cost User-defined \(\texttt{size1}\times \texttt{size2}\) cost matrix. Also, if a cost matrix 5944 * is used, lower boundary lowerBound cannot be calculated because it needs a metric function. 5945 * signatures that is a distance between mass centers. The lower boundary may not be calculated if 5946 * the user-defined cost matrix is used, the total weights of point configurations are not equal, or 5947 * if the signatures consist of weights only (the signature matrices have a single column). You 5948 * <b>must</b> initialize \*lowerBound . If the calculated distance between mass centers is greater or 5949 * equal to \*lowerBound (it means that the signatures are far enough), the function does not 5950 * calculate EMD. In any case \*lowerBound is set to the calculated distance between mass centers on 5951 * return. Thus, if you want to calculate both distance between mass centers and EMD, \*lowerBound 5952 * should be set to 0. 5953 * @param flow Resultant \(\texttt{size1} \times \texttt{size2}\) flow matrix: \(\texttt{flow}_{i,j}\) is 5954 * a flow from \(i\) -th point of signature1 to \(j\) -th point of signature2 . 5955 * @return automatically generated 5956 */ 5957 public static float EMD(Mat signature1, Mat signature2, int distType, Mat cost, Mat flow) { 5958 return EMD_0(signature1.nativeObj, signature2.nativeObj, distType, cost.nativeObj, flow.nativeObj); 5959 } 5960 5961 /** 5962 * Computes the "minimal work" distance between two weighted point configurations. 5963 * 5964 * The function computes the earth mover distance and/or a lower boundary of the distance between the 5965 * two weighted point configurations. One of the applications described in CITE: RubnerSept98, 5966 * CITE: Rubner2000 is multi-dimensional histogram comparison for image retrieval. EMD is a transportation 5967 * problem that is solved using some modification of a simplex algorithm, thus the complexity is 5968 * exponential in the worst case, though, on average it is much faster. In the case of a real metric 5969 * the lower boundary can be calculated even faster (using linear-time algorithm) and it can be used 5970 * to determine roughly whether the two signatures are far enough so that they cannot relate to the 5971 * same object. 5972 * 5973 * @param signature1 First signature, a \(\texttt{size1}\times \texttt{dims}+1\) floating-point matrix. 5974 * Each row stores the point weight followed by the point coordinates. The matrix is allowed to have 5975 * a single column (weights only) if the user-defined cost matrix is used. The weights must be 5976 * non-negative and have at least one non-zero value. 5977 * @param signature2 Second signature of the same format as signature1 , though the number of rows 5978 * may be different. The total weights may be different. In this case an extra "dummy" point is added 5979 * to either signature1 or signature2. The weights must be non-negative and have at least one non-zero 5980 * value. 5981 * @param distType Used metric. See #DistanceTypes. 5982 * @param cost User-defined \(\texttt{size1}\times \texttt{size2}\) cost matrix. Also, if a cost matrix 5983 * is used, lower boundary lowerBound cannot be calculated because it needs a metric function. 5984 * signatures that is a distance between mass centers. The lower boundary may not be calculated if 5985 * the user-defined cost matrix is used, the total weights of point configurations are not equal, or 5986 * if the signatures consist of weights only (the signature matrices have a single column). You 5987 * <b>must</b> initialize \*lowerBound . If the calculated distance between mass centers is greater or 5988 * equal to \*lowerBound (it means that the signatures are far enough), the function does not 5989 * calculate EMD. In any case \*lowerBound is set to the calculated distance between mass centers on 5990 * return. Thus, if you want to calculate both distance between mass centers and EMD, \*lowerBound 5991 * should be set to 0. 5992 * a flow from \(i\) -th point of signature1 to \(j\) -th point of signature2 . 5993 * @return automatically generated 5994 */ 5995 public static float EMD(Mat signature1, Mat signature2, int distType, Mat cost) { 5996 return EMD_1(signature1.nativeObj, signature2.nativeObj, distType, cost.nativeObj); 5997 } 5998 5999 /** 6000 * Computes the "minimal work" distance between two weighted point configurations. 6001 * 6002 * The function computes the earth mover distance and/or a lower boundary of the distance between the 6003 * two weighted point configurations. One of the applications described in CITE: RubnerSept98, 6004 * CITE: Rubner2000 is multi-dimensional histogram comparison for image retrieval. EMD is a transportation 6005 * problem that is solved using some modification of a simplex algorithm, thus the complexity is 6006 * exponential in the worst case, though, on average it is much faster. In the case of a real metric 6007 * the lower boundary can be calculated even faster (using linear-time algorithm) and it can be used 6008 * to determine roughly whether the two signatures are far enough so that they cannot relate to the 6009 * same object. 6010 * 6011 * @param signature1 First signature, a \(\texttt{size1}\times \texttt{dims}+1\) floating-point matrix. 6012 * Each row stores the point weight followed by the point coordinates. The matrix is allowed to have 6013 * a single column (weights only) if the user-defined cost matrix is used. The weights must be 6014 * non-negative and have at least one non-zero value. 6015 * @param signature2 Second signature of the same format as signature1 , though the number of rows 6016 * may be different. The total weights may be different. In this case an extra "dummy" point is added 6017 * to either signature1 or signature2. The weights must be non-negative and have at least one non-zero 6018 * value. 6019 * @param distType Used metric. See #DistanceTypes. 6020 * is used, lower boundary lowerBound cannot be calculated because it needs a metric function. 6021 * signatures that is a distance between mass centers. The lower boundary may not be calculated if 6022 * the user-defined cost matrix is used, the total weights of point configurations are not equal, or 6023 * if the signatures consist of weights only (the signature matrices have a single column). You 6024 * <b>must</b> initialize \*lowerBound . If the calculated distance between mass centers is greater or 6025 * equal to \*lowerBound (it means that the signatures are far enough), the function does not 6026 * calculate EMD. In any case \*lowerBound is set to the calculated distance between mass centers on 6027 * return. Thus, if you want to calculate both distance between mass centers and EMD, \*lowerBound 6028 * should be set to 0. 6029 * a flow from \(i\) -th point of signature1 to \(j\) -th point of signature2 . 6030 * @return automatically generated 6031 */ 6032 public static float EMD(Mat signature1, Mat signature2, int distType) { 6033 return EMD_3(signature1.nativeObj, signature2.nativeObj, distType); 6034 } 6035 6036 6037 // 6038 // C++: void cv::watershed(Mat image, Mat& markers) 6039 // 6040 6041 /** 6042 * Performs a marker-based image segmentation using the watershed algorithm. 6043 * 6044 * The function implements one of the variants of watershed, non-parametric marker-based segmentation 6045 * algorithm, described in CITE: Meyer92 . 6046 * 6047 * Before passing the image to the function, you have to roughly outline the desired regions in the 6048 * image markers with positive (>0) indices. So, every region is represented as one or more connected 6049 * components with the pixel values 1, 2, 3, and so on. Such markers can be retrieved from a binary 6050 * mask using #findContours and #drawContours (see the watershed.cpp demo). The markers are "seeds" of 6051 * the future image regions. All the other pixels in markers , whose relation to the outlined regions 6052 * is not known and should be defined by the algorithm, should be set to 0's. In the function output, 6053 * each pixel in markers is set to a value of the "seed" components or to -1 at boundaries between the 6054 * regions. 6055 * 6056 * <b>Note:</b> Any two neighbor connected components are not necessarily separated by a watershed boundary 6057 * (-1's pixels); for example, they can touch each other in the initial marker image passed to the 6058 * function. 6059 * 6060 * @param image Input 8-bit 3-channel image. 6061 * @param markers Input/output 32-bit single-channel image (map) of markers. It should have the same 6062 * size as image . 6063 * 6064 * SEE: findContours 6065 */ 6066 public static void watershed(Mat image, Mat markers) { 6067 watershed_0(image.nativeObj, markers.nativeObj); 6068 } 6069 6070 6071 // 6072 // C++: void cv::pyrMeanShiftFiltering(Mat src, Mat& dst, double sp, double sr, int maxLevel = 1, TermCriteria termcrit = TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS,5,1)) 6073 // 6074 6075 /** 6076 * Performs initial step of meanshift segmentation of an image. 6077 * 6078 * The function implements the filtering stage of meanshift segmentation, that is, the output of the 6079 * function is the filtered "posterized" image with color gradients and fine-grain texture flattened. 6080 * At every pixel (X,Y) of the input image (or down-sized input image, see below) the function executes 6081 * meanshift iterations, that is, the pixel (X,Y) neighborhood in the joint space-color hyperspace is 6082 * considered: 6083 * 6084 * \((x,y): X- \texttt{sp} \le x \le X+ \texttt{sp} , Y- \texttt{sp} \le y \le Y+ \texttt{sp} , ||(R,G,B)-(r,g,b)|| \le \texttt{sr}\) 6085 * 6086 * where (R,G,B) and (r,g,b) are the vectors of color components at (X,Y) and (x,y), respectively 6087 * (though, the algorithm does not depend on the color space used, so any 3-component color space can 6088 * be used instead). Over the neighborhood the average spatial value (X',Y') and average color vector 6089 * (R',G',B') are found and they act as the neighborhood center on the next iteration: 6090 * 6091 * \((X,Y)~(X',Y'), (R,G,B)~(R',G',B').\) 6092 * 6093 * After the iterations over, the color components of the initial pixel (that is, the pixel from where 6094 * the iterations started) are set to the final value (average color at the last iteration): 6095 * 6096 * \(I(X,Y) <- (R*,G*,B*)\) 6097 * 6098 * When maxLevel > 0, the gaussian pyramid of maxLevel+1 levels is built, and the above procedure is 6099 * run on the smallest layer first. After that, the results are propagated to the larger layer and the 6100 * iterations are run again only on those pixels where the layer colors differ by more than sr from the 6101 * lower-resolution layer of the pyramid. That makes boundaries of color regions sharper. Note that the 6102 * results will be actually different from the ones obtained by running the meanshift procedure on the 6103 * whole original image (i.e. when maxLevel==0). 6104 * 6105 * @param src The source 8-bit, 3-channel image. 6106 * @param dst The destination image of the same format and the same size as the source. 6107 * @param sp The spatial window radius. 6108 * @param sr The color window radius. 6109 * @param maxLevel Maximum level of the pyramid for the segmentation. 6110 * @param termcrit Termination criteria: when to stop meanshift iterations. 6111 */ 6112 public static void pyrMeanShiftFiltering(Mat src, Mat dst, double sp, double sr, int maxLevel, TermCriteria termcrit) { 6113 pyrMeanShiftFiltering_0(src.nativeObj, dst.nativeObj, sp, sr, maxLevel, termcrit.type, termcrit.maxCount, termcrit.epsilon); 6114 } 6115 6116 /** 6117 * Performs initial step of meanshift segmentation of an image. 6118 * 6119 * The function implements the filtering stage of meanshift segmentation, that is, the output of the 6120 * function is the filtered "posterized" image with color gradients and fine-grain texture flattened. 6121 * At every pixel (X,Y) of the input image (or down-sized input image, see below) the function executes 6122 * meanshift iterations, that is, the pixel (X,Y) neighborhood in the joint space-color hyperspace is 6123 * considered: 6124 * 6125 * \((x,y): X- \texttt{sp} \le x \le X+ \texttt{sp} , Y- \texttt{sp} \le y \le Y+ \texttt{sp} , ||(R,G,B)-(r,g,b)|| \le \texttt{sr}\) 6126 * 6127 * where (R,G,B) and (r,g,b) are the vectors of color components at (X,Y) and (x,y), respectively 6128 * (though, the algorithm does not depend on the color space used, so any 3-component color space can 6129 * be used instead). Over the neighborhood the average spatial value (X',Y') and average color vector 6130 * (R',G',B') are found and they act as the neighborhood center on the next iteration: 6131 * 6132 * \((X,Y)~(X',Y'), (R,G,B)~(R',G',B').\) 6133 * 6134 * After the iterations over, the color components of the initial pixel (that is, the pixel from where 6135 * the iterations started) are set to the final value (average color at the last iteration): 6136 * 6137 * \(I(X,Y) <- (R*,G*,B*)\) 6138 * 6139 * When maxLevel > 0, the gaussian pyramid of maxLevel+1 levels is built, and the above procedure is 6140 * run on the smallest layer first. After that, the results are propagated to the larger layer and the 6141 * iterations are run again only on those pixels where the layer colors differ by more than sr from the 6142 * lower-resolution layer of the pyramid. That makes boundaries of color regions sharper. Note that the 6143 * results will be actually different from the ones obtained by running the meanshift procedure on the 6144 * whole original image (i.e. when maxLevel==0). 6145 * 6146 * @param src The source 8-bit, 3-channel image. 6147 * @param dst The destination image of the same format and the same size as the source. 6148 * @param sp The spatial window radius. 6149 * @param sr The color window radius. 6150 * @param maxLevel Maximum level of the pyramid for the segmentation. 6151 */ 6152 public static void pyrMeanShiftFiltering(Mat src, Mat dst, double sp, double sr, int maxLevel) { 6153 pyrMeanShiftFiltering_1(src.nativeObj, dst.nativeObj, sp, sr, maxLevel); 6154 } 6155 6156 /** 6157 * Performs initial step of meanshift segmentation of an image. 6158 * 6159 * The function implements the filtering stage of meanshift segmentation, that is, the output of the 6160 * function is the filtered "posterized" image with color gradients and fine-grain texture flattened. 6161 * At every pixel (X,Y) of the input image (or down-sized input image, see below) the function executes 6162 * meanshift iterations, that is, the pixel (X,Y) neighborhood in the joint space-color hyperspace is 6163 * considered: 6164 * 6165 * \((x,y): X- \texttt{sp} \le x \le X+ \texttt{sp} , Y- \texttt{sp} \le y \le Y+ \texttt{sp} , ||(R,G,B)-(r,g,b)|| \le \texttt{sr}\) 6166 * 6167 * where (R,G,B) and (r,g,b) are the vectors of color components at (X,Y) and (x,y), respectively 6168 * (though, the algorithm does not depend on the color space used, so any 3-component color space can 6169 * be used instead). Over the neighborhood the average spatial value (X',Y') and average color vector 6170 * (R',G',B') are found and they act as the neighborhood center on the next iteration: 6171 * 6172 * \((X,Y)~(X',Y'), (R,G,B)~(R',G',B').\) 6173 * 6174 * After the iterations over, the color components of the initial pixel (that is, the pixel from where 6175 * the iterations started) are set to the final value (average color at the last iteration): 6176 * 6177 * \(I(X,Y) <- (R*,G*,B*)\) 6178 * 6179 * When maxLevel > 0, the gaussian pyramid of maxLevel+1 levels is built, and the above procedure is 6180 * run on the smallest layer first. After that, the results are propagated to the larger layer and the 6181 * iterations are run again only on those pixels where the layer colors differ by more than sr from the 6182 * lower-resolution layer of the pyramid. That makes boundaries of color regions sharper. Note that the 6183 * results will be actually different from the ones obtained by running the meanshift procedure on the 6184 * whole original image (i.e. when maxLevel==0). 6185 * 6186 * @param src The source 8-bit, 3-channel image. 6187 * @param dst The destination image of the same format and the same size as the source. 6188 * @param sp The spatial window radius. 6189 * @param sr The color window radius. 6190 */ 6191 public static void pyrMeanShiftFiltering(Mat src, Mat dst, double sp, double sr) { 6192 pyrMeanShiftFiltering_2(src.nativeObj, dst.nativeObj, sp, sr); 6193 } 6194 6195 6196 // 6197 // C++: void cv::grabCut(Mat img, Mat& mask, Rect rect, Mat& bgdModel, Mat& fgdModel, int iterCount, int mode = GC_EVAL) 6198 // 6199 6200 /** 6201 * Runs the GrabCut algorithm. 6202 * 6203 * The function implements the [GrabCut image segmentation algorithm](http://en.wikipedia.org/wiki/GrabCut). 6204 * 6205 * @param img Input 8-bit 3-channel image. 6206 * @param mask Input/output 8-bit single-channel mask. The mask is initialized by the function when 6207 * mode is set to #GC_INIT_WITH_RECT. Its elements may have one of the #GrabCutClasses. 6208 * @param rect ROI containing a segmented object. The pixels outside of the ROI are marked as 6209 * "obvious background". The parameter is only used when mode==#GC_INIT_WITH_RECT . 6210 * @param bgdModel Temporary array for the background model. Do not modify it while you are 6211 * processing the same image. 6212 * @param fgdModel Temporary arrays for the foreground model. Do not modify it while you are 6213 * processing the same image. 6214 * @param iterCount Number of iterations the algorithm should make before returning the result. Note 6215 * that the result can be refined with further calls with mode==#GC_INIT_WITH_MASK or 6216 * mode==GC_EVAL . 6217 * @param mode Operation mode that could be one of the #GrabCutModes 6218 */ 6219 public static void grabCut(Mat img, Mat mask, Rect rect, Mat bgdModel, Mat fgdModel, int iterCount, int mode) { 6220 grabCut_0(img.nativeObj, mask.nativeObj, rect.x, rect.y, rect.width, rect.height, bgdModel.nativeObj, fgdModel.nativeObj, iterCount, mode); 6221 } 6222 6223 /** 6224 * Runs the GrabCut algorithm. 6225 * 6226 * The function implements the [GrabCut image segmentation algorithm](http://en.wikipedia.org/wiki/GrabCut). 6227 * 6228 * @param img Input 8-bit 3-channel image. 6229 * @param mask Input/output 8-bit single-channel mask. The mask is initialized by the function when 6230 * mode is set to #GC_INIT_WITH_RECT. Its elements may have one of the #GrabCutClasses. 6231 * @param rect ROI containing a segmented object. The pixels outside of the ROI are marked as 6232 * "obvious background". The parameter is only used when mode==#GC_INIT_WITH_RECT . 6233 * @param bgdModel Temporary array for the background model. Do not modify it while you are 6234 * processing the same image. 6235 * @param fgdModel Temporary arrays for the foreground model. Do not modify it while you are 6236 * processing the same image. 6237 * @param iterCount Number of iterations the algorithm should make before returning the result. Note 6238 * that the result can be refined with further calls with mode==#GC_INIT_WITH_MASK or 6239 * mode==GC_EVAL . 6240 */ 6241 public static void grabCut(Mat img, Mat mask, Rect rect, Mat bgdModel, Mat fgdModel, int iterCount) { 6242 grabCut_1(img.nativeObj, mask.nativeObj, rect.x, rect.y, rect.width, rect.height, bgdModel.nativeObj, fgdModel.nativeObj, iterCount); 6243 } 6244 6245 6246 // 6247 // C++: void cv::distanceTransform(Mat src, Mat& dst, Mat& labels, int distanceType, int maskSize, int labelType = DIST_LABEL_CCOMP) 6248 // 6249 6250 /** 6251 * Calculates the distance to the closest zero pixel for each pixel of the source image. 6252 * 6253 * The function cv::distanceTransform calculates the approximate or precise distance from every binary 6254 * image pixel to the nearest zero pixel. For zero image pixels, the distance will obviously be zero. 6255 * 6256 * When maskSize == #DIST_MASK_PRECISE and distanceType == #DIST_L2 , the function runs the 6257 * algorithm described in CITE: Felzenszwalb04 . This algorithm is parallelized with the TBB library. 6258 * 6259 * In other cases, the algorithm CITE: Borgefors86 is used. This means that for a pixel the function 6260 * finds the shortest path to the nearest zero pixel consisting of basic shifts: horizontal, vertical, 6261 * diagonal, or knight's move (the latest is available for a \(5\times 5\) mask). The overall 6262 * distance is calculated as a sum of these basic distances. Since the distance function should be 6263 * symmetric, all of the horizontal and vertical shifts must have the same cost (denoted as a ), all 6264 * the diagonal shifts must have the same cost (denoted as {@code b}), and all knight's moves must have the 6265 * same cost (denoted as {@code c}). For the #DIST_C and #DIST_L1 types, the distance is calculated 6266 * precisely, whereas for #DIST_L2 (Euclidean distance) the distance can be calculated only with a 6267 * relative error (a \(5\times 5\) mask gives more accurate results). For {@code a},{@code b}, and {@code c}, OpenCV 6268 * uses the values suggested in the original paper: 6269 * <ul> 6270 * <li> 6271 * DIST_L1: {@code a = 1, b = 2} 6272 * </li> 6273 * <li> 6274 * DIST_L2: 6275 * <ul> 6276 * <li> 6277 * {@code 3 x 3}: {@code a=0.955, b=1.3693} 6278 * </li> 6279 * <li> 6280 * {@code 5 x 5}: {@code a=1, b=1.4, c=2.1969} 6281 * </li> 6282 * </ul> 6283 * <li> 6284 * DIST_C: {@code a = 1, b = 1} 6285 * </li> 6286 * </ul> 6287 * 6288 * Typically, for a fast, coarse distance estimation #DIST_L2, a \(3\times 3\) mask is used. For a 6289 * more accurate distance estimation #DIST_L2, a \(5\times 5\) mask or the precise algorithm is used. 6290 * Note that both the precise and the approximate algorithms are linear on the number of pixels. 6291 * 6292 * This variant of the function does not only compute the minimum distance for each pixel \((x, y)\) 6293 * but also identifies the nearest connected component consisting of zero pixels 6294 * (labelType==#DIST_LABEL_CCOMP) or the nearest zero pixel (labelType==#DIST_LABEL_PIXEL). Index of the 6295 * component/pixel is stored in {@code labels(x, y)}. When labelType==#DIST_LABEL_CCOMP, the function 6296 * automatically finds connected components of zero pixels in the input image and marks them with 6297 * distinct labels. When labelType==#DIST_LABEL_PIXEL, the function scans through the input image and 6298 * marks all the zero pixels with distinct labels. 6299 * 6300 * In this mode, the complexity is still linear. That is, the function provides a very fast way to 6301 * compute the Voronoi diagram for a binary image. Currently, the second variant can use only the 6302 * approximate distance transform algorithm, i.e. maskSize=#DIST_MASK_PRECISE is not supported 6303 * yet. 6304 * 6305 * @param src 8-bit, single-channel (binary) source image. 6306 * @param dst Output image with calculated distances. It is a 8-bit or 32-bit floating-point, 6307 * single-channel image of the same size as src. 6308 * @param labels Output 2D array of labels (the discrete Voronoi diagram). It has the type 6309 * CV_32SC1 and the same size as src. 6310 * @param distanceType Type of distance, see #DistanceTypes 6311 * @param maskSize Size of the distance transform mask, see #DistanceTransformMasks. 6312 * #DIST_MASK_PRECISE is not supported by this variant. In case of the #DIST_L1 or #DIST_C distance type, 6313 * the parameter is forced to 3 because a \(3\times 3\) mask gives the same result as \(5\times 6314 * 5\) or any larger aperture. 6315 * @param labelType Type of the label array to build, see #DistanceTransformLabelTypes. 6316 */ 6317 public static void distanceTransformWithLabels(Mat src, Mat dst, Mat labels, int distanceType, int maskSize, int labelType) { 6318 distanceTransformWithLabels_0(src.nativeObj, dst.nativeObj, labels.nativeObj, distanceType, maskSize, labelType); 6319 } 6320 6321 /** 6322 * Calculates the distance to the closest zero pixel for each pixel of the source image. 6323 * 6324 * The function cv::distanceTransform calculates the approximate or precise distance from every binary 6325 * image pixel to the nearest zero pixel. For zero image pixels, the distance will obviously be zero. 6326 * 6327 * When maskSize == #DIST_MASK_PRECISE and distanceType == #DIST_L2 , the function runs the 6328 * algorithm described in CITE: Felzenszwalb04 . This algorithm is parallelized with the TBB library. 6329 * 6330 * In other cases, the algorithm CITE: Borgefors86 is used. This means that for a pixel the function 6331 * finds the shortest path to the nearest zero pixel consisting of basic shifts: horizontal, vertical, 6332 * diagonal, or knight's move (the latest is available for a \(5\times 5\) mask). The overall 6333 * distance is calculated as a sum of these basic distances. Since the distance function should be 6334 * symmetric, all of the horizontal and vertical shifts must have the same cost (denoted as a ), all 6335 * the diagonal shifts must have the same cost (denoted as {@code b}), and all knight's moves must have the 6336 * same cost (denoted as {@code c}). For the #DIST_C and #DIST_L1 types, the distance is calculated 6337 * precisely, whereas for #DIST_L2 (Euclidean distance) the distance can be calculated only with a 6338 * relative error (a \(5\times 5\) mask gives more accurate results). For {@code a},{@code b}, and {@code c}, OpenCV 6339 * uses the values suggested in the original paper: 6340 * <ul> 6341 * <li> 6342 * DIST_L1: {@code a = 1, b = 2} 6343 * </li> 6344 * <li> 6345 * DIST_L2: 6346 * <ul> 6347 * <li> 6348 * {@code 3 x 3}: {@code a=0.955, b=1.3693} 6349 * </li> 6350 * <li> 6351 * {@code 5 x 5}: {@code a=1, b=1.4, c=2.1969} 6352 * </li> 6353 * </ul> 6354 * <li> 6355 * DIST_C: {@code a = 1, b = 1} 6356 * </li> 6357 * </ul> 6358 * 6359 * Typically, for a fast, coarse distance estimation #DIST_L2, a \(3\times 3\) mask is used. For a 6360 * more accurate distance estimation #DIST_L2, a \(5\times 5\) mask or the precise algorithm is used. 6361 * Note that both the precise and the approximate algorithms are linear on the number of pixels. 6362 * 6363 * This variant of the function does not only compute the minimum distance for each pixel \((x, y)\) 6364 * but also identifies the nearest connected component consisting of zero pixels 6365 * (labelType==#DIST_LABEL_CCOMP) or the nearest zero pixel (labelType==#DIST_LABEL_PIXEL). Index of the 6366 * component/pixel is stored in {@code labels(x, y)}. When labelType==#DIST_LABEL_CCOMP, the function 6367 * automatically finds connected components of zero pixels in the input image and marks them with 6368 * distinct labels. When labelType==#DIST_LABEL_PIXEL, the function scans through the input image and 6369 * marks all the zero pixels with distinct labels. 6370 * 6371 * In this mode, the complexity is still linear. That is, the function provides a very fast way to 6372 * compute the Voronoi diagram for a binary image. Currently, the second variant can use only the 6373 * approximate distance transform algorithm, i.e. maskSize=#DIST_MASK_PRECISE is not supported 6374 * yet. 6375 * 6376 * @param src 8-bit, single-channel (binary) source image. 6377 * @param dst Output image with calculated distances. It is a 8-bit or 32-bit floating-point, 6378 * single-channel image of the same size as src. 6379 * @param labels Output 2D array of labels (the discrete Voronoi diagram). It has the type 6380 * CV_32SC1 and the same size as src. 6381 * @param distanceType Type of distance, see #DistanceTypes 6382 * @param maskSize Size of the distance transform mask, see #DistanceTransformMasks. 6383 * #DIST_MASK_PRECISE is not supported by this variant. In case of the #DIST_L1 or #DIST_C distance type, 6384 * the parameter is forced to 3 because a \(3\times 3\) mask gives the same result as \(5\times 6385 * 5\) or any larger aperture. 6386 */ 6387 public static void distanceTransformWithLabels(Mat src, Mat dst, Mat labels, int distanceType, int maskSize) { 6388 distanceTransformWithLabels_1(src.nativeObj, dst.nativeObj, labels.nativeObj, distanceType, maskSize); 6389 } 6390 6391 6392 // 6393 // C++: void cv::distanceTransform(Mat src, Mat& dst, int distanceType, int maskSize, int dstType = CV_32F) 6394 // 6395 6396 /** 6397 * 6398 * @param src 8-bit, single-channel (binary) source image. 6399 * @param dst Output image with calculated distances. It is a 8-bit or 32-bit floating-point, 6400 * single-channel image of the same size as src . 6401 * @param distanceType Type of distance, see #DistanceTypes 6402 * @param maskSize Size of the distance transform mask, see #DistanceTransformMasks. In case of the 6403 * #DIST_L1 or #DIST_C distance type, the parameter is forced to 3 because a \(3\times 3\) mask gives 6404 * the same result as \(5\times 5\) or any larger aperture. 6405 * @param dstType Type of output image. It can be CV_8U or CV_32F. Type CV_8U can be used only for 6406 * the first variant of the function and distanceType == #DIST_L1. 6407 */ 6408 public static void distanceTransform(Mat src, Mat dst, int distanceType, int maskSize, int dstType) { 6409 distanceTransform_0(src.nativeObj, dst.nativeObj, distanceType, maskSize, dstType); 6410 } 6411 6412 /** 6413 * 6414 * @param src 8-bit, single-channel (binary) source image. 6415 * @param dst Output image with calculated distances. It is a 8-bit or 32-bit floating-point, 6416 * single-channel image of the same size as src . 6417 * @param distanceType Type of distance, see #DistanceTypes 6418 * @param maskSize Size of the distance transform mask, see #DistanceTransformMasks. In case of the 6419 * #DIST_L1 or #DIST_C distance type, the parameter is forced to 3 because a \(3\times 3\) mask gives 6420 * the same result as \(5\times 5\) or any larger aperture. 6421 * the first variant of the function and distanceType == #DIST_L1. 6422 */ 6423 public static void distanceTransform(Mat src, Mat dst, int distanceType, int maskSize) { 6424 distanceTransform_1(src.nativeObj, dst.nativeObj, distanceType, maskSize); 6425 } 6426 6427 6428 // 6429 // C++: int cv::floodFill(Mat& image, Mat& mask, Point seedPoint, Scalar newVal, Rect* rect = 0, Scalar loDiff = Scalar(), Scalar upDiff = Scalar(), int flags = 4) 6430 // 6431 6432 /** 6433 * Fills a connected component with the given color. 6434 * 6435 * The function cv::floodFill fills a connected component starting from the seed point with the specified 6436 * color. The connectivity is determined by the color/brightness closeness of the neighbor pixels. The 6437 * pixel at \((x,y)\) is considered to belong to the repainted domain if: 6438 * 6439 * <ul> 6440 * <li> 6441 * in case of a grayscale image and floating range 6442 * \(\texttt{src} (x',y')- \texttt{loDiff} \leq \texttt{src} (x,y) \leq \texttt{src} (x',y')+ \texttt{upDiff}\) 6443 * </li> 6444 * </ul> 6445 * 6446 * 6447 * <ul> 6448 * <li> 6449 * in case of a grayscale image and fixed range 6450 * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)- \texttt{loDiff} \leq \texttt{src} (x,y) \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)+ \texttt{upDiff}\) 6451 * </li> 6452 * </ul> 6453 * 6454 * 6455 * <ul> 6456 * <li> 6457 * in case of a color image and floating range 6458 * \(\texttt{src} (x',y')_r- \texttt{loDiff} _r \leq \texttt{src} (x,y)_r \leq \texttt{src} (x',y')_r+ \texttt{upDiff} _r,\) 6459 * \(\texttt{src} (x',y')_g- \texttt{loDiff} _g \leq \texttt{src} (x,y)_g \leq \texttt{src} (x',y')_g+ \texttt{upDiff} _g\) 6460 * and 6461 * \(\texttt{src} (x',y')_b- \texttt{loDiff} _b \leq \texttt{src} (x,y)_b \leq \texttt{src} (x',y')_b+ \texttt{upDiff} _b\) 6462 * </li> 6463 * </ul> 6464 * 6465 * 6466 * <ul> 6467 * <li> 6468 * in case of a color image and fixed range 6469 * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_r- \texttt{loDiff} _r \leq \texttt{src} (x,y)_r \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_r+ \texttt{upDiff} _r,\) 6470 * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_g- \texttt{loDiff} _g \leq \texttt{src} (x,y)_g \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_g+ \texttt{upDiff} _g\) 6471 * and 6472 * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_b- \texttt{loDiff} _b \leq \texttt{src} (x,y)_b \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_b+ \texttt{upDiff} _b\) 6473 * </li> 6474 * </ul> 6475 * 6476 * 6477 * where \(src(x',y')\) is the value of one of pixel neighbors that is already known to belong to the 6478 * component. That is, to be added to the connected component, a color/brightness of the pixel should 6479 * be close enough to: 6480 * <ul> 6481 * <li> 6482 * Color/brightness of one of its neighbors that already belong to the connected component in case 6483 * of a floating range. 6484 * </li> 6485 * <li> 6486 * Color/brightness of the seed point in case of a fixed range. 6487 * </li> 6488 * </ul> 6489 * 6490 * Use these functions to either mark a connected component with the specified color in-place, or build 6491 * a mask and then extract the contour, or copy the region to another image, and so on. 6492 * 6493 * @param image Input/output 1- or 3-channel, 8-bit, or floating-point image. It is modified by the 6494 * function unless the #FLOODFILL_MASK_ONLY flag is set in the second variant of the function. See 6495 * the details below. 6496 * @param mask Operation mask that should be a single-channel 8-bit image, 2 pixels wider and 2 pixels 6497 * taller than image. If an empty Mat is passed it will be created automatically. Since this is both an 6498 * input and output parameter, you must take responsibility of initializing it. 6499 * Flood-filling cannot go across non-zero pixels in the input mask. For example, 6500 * an edge detector output can be used as a mask to stop filling at edges. On output, pixels in the 6501 * mask corresponding to filled pixels in the image are set to 1 or to the specified value in flags 6502 * as described below. Additionally, the function fills the border of the mask with ones to simplify 6503 * internal processing. It is therefore possible to use the same mask in multiple calls to the function 6504 * to make sure the filled areas do not overlap. 6505 * @param seedPoint Starting point. 6506 * @param newVal New value of the repainted domain pixels. 6507 * @param loDiff Maximal lower brightness/color difference between the currently observed pixel and 6508 * one of its neighbors belonging to the component, or a seed pixel being added to the component. 6509 * @param upDiff Maximal upper brightness/color difference between the currently observed pixel and 6510 * one of its neighbors belonging to the component, or a seed pixel being added to the component. 6511 * @param rect Optional output parameter set by the function to the minimum bounding rectangle of the 6512 * repainted domain. 6513 * @param flags Operation flags. The first 8 bits contain a connectivity value. The default value of 6514 * 4 means that only the four nearest neighbor pixels (those that share an edge) are considered. A 6515 * connectivity value of 8 means that the eight nearest neighbor pixels (those that share a corner) 6516 * will be considered. The next 8 bits (8-16) contain a value between 1 and 255 with which to fill 6517 * the mask (the default value is 1). For example, 4 | ( 255 << 8 ) will consider 4 nearest 6518 * neighbours and fill the mask with a value of 255. The following additional options occupy higher 6519 * bits and therefore may be further combined with the connectivity and mask fill values using 6520 * bit-wise or (|), see #FloodFillFlags. 6521 * 6522 * <b>Note:</b> Since the mask is larger than the filled image, a pixel \((x, y)\) in image corresponds to the 6523 * pixel \((x+1, y+1)\) in the mask . 6524 * 6525 * SEE: findContours 6526 * @return automatically generated 6527 */ 6528 public static int floodFill(Mat image, Mat mask, Point seedPoint, Scalar newVal, Rect rect, Scalar loDiff, Scalar upDiff, int flags) { 6529 double[] rect_out = new double[4]; 6530 int retVal = floodFill_0(image.nativeObj, mask.nativeObj, seedPoint.x, seedPoint.y, newVal.val[0], newVal.val[1], newVal.val[2], newVal.val[3], rect_out, loDiff.val[0], loDiff.val[1], loDiff.val[2], loDiff.val[3], upDiff.val[0], upDiff.val[1], upDiff.val[2], upDiff.val[3], flags); 6531 if(rect!=null){ rect.x = (int)rect_out[0]; rect.y = (int)rect_out[1]; rect.width = (int)rect_out[2]; rect.height = (int)rect_out[3]; } 6532 return retVal; 6533 } 6534 6535 /** 6536 * Fills a connected component with the given color. 6537 * 6538 * The function cv::floodFill fills a connected component starting from the seed point with the specified 6539 * color. The connectivity is determined by the color/brightness closeness of the neighbor pixels. The 6540 * pixel at \((x,y)\) is considered to belong to the repainted domain if: 6541 * 6542 * <ul> 6543 * <li> 6544 * in case of a grayscale image and floating range 6545 * \(\texttt{src} (x',y')- \texttt{loDiff} \leq \texttt{src} (x,y) \leq \texttt{src} (x',y')+ \texttt{upDiff}\) 6546 * </li> 6547 * </ul> 6548 * 6549 * 6550 * <ul> 6551 * <li> 6552 * in case of a grayscale image and fixed range 6553 * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)- \texttt{loDiff} \leq \texttt{src} (x,y) \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)+ \texttt{upDiff}\) 6554 * </li> 6555 * </ul> 6556 * 6557 * 6558 * <ul> 6559 * <li> 6560 * in case of a color image and floating range 6561 * \(\texttt{src} (x',y')_r- \texttt{loDiff} _r \leq \texttt{src} (x,y)_r \leq \texttt{src} (x',y')_r+ \texttt{upDiff} _r,\) 6562 * \(\texttt{src} (x',y')_g- \texttt{loDiff} _g \leq \texttt{src} (x,y)_g \leq \texttt{src} (x',y')_g+ \texttt{upDiff} _g\) 6563 * and 6564 * \(\texttt{src} (x',y')_b- \texttt{loDiff} _b \leq \texttt{src} (x,y)_b \leq \texttt{src} (x',y')_b+ \texttt{upDiff} _b\) 6565 * </li> 6566 * </ul> 6567 * 6568 * 6569 * <ul> 6570 * <li> 6571 * in case of a color image and fixed range 6572 * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_r- \texttt{loDiff} _r \leq \texttt{src} (x,y)_r \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_r+ \texttt{upDiff} _r,\) 6573 * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_g- \texttt{loDiff} _g \leq \texttt{src} (x,y)_g \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_g+ \texttt{upDiff} _g\) 6574 * and 6575 * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_b- \texttt{loDiff} _b \leq \texttt{src} (x,y)_b \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_b+ \texttt{upDiff} _b\) 6576 * </li> 6577 * </ul> 6578 * 6579 * 6580 * where \(src(x',y')\) is the value of one of pixel neighbors that is already known to belong to the 6581 * component. That is, to be added to the connected component, a color/brightness of the pixel should 6582 * be close enough to: 6583 * <ul> 6584 * <li> 6585 * Color/brightness of one of its neighbors that already belong to the connected component in case 6586 * of a floating range. 6587 * </li> 6588 * <li> 6589 * Color/brightness of the seed point in case of a fixed range. 6590 * </li> 6591 * </ul> 6592 * 6593 * Use these functions to either mark a connected component with the specified color in-place, or build 6594 * a mask and then extract the contour, or copy the region to another image, and so on. 6595 * 6596 * @param image Input/output 1- or 3-channel, 8-bit, or floating-point image. It is modified by the 6597 * function unless the #FLOODFILL_MASK_ONLY flag is set in the second variant of the function. See 6598 * the details below. 6599 * @param mask Operation mask that should be a single-channel 8-bit image, 2 pixels wider and 2 pixels 6600 * taller than image. If an empty Mat is passed it will be created automatically. Since this is both an 6601 * input and output parameter, you must take responsibility of initializing it. 6602 * Flood-filling cannot go across non-zero pixels in the input mask. For example, 6603 * an edge detector output can be used as a mask to stop filling at edges. On output, pixels in the 6604 * mask corresponding to filled pixels in the image are set to 1 or to the specified value in flags 6605 * as described below. Additionally, the function fills the border of the mask with ones to simplify 6606 * internal processing. It is therefore possible to use the same mask in multiple calls to the function 6607 * to make sure the filled areas do not overlap. 6608 * @param seedPoint Starting point. 6609 * @param newVal New value of the repainted domain pixels. 6610 * @param loDiff Maximal lower brightness/color difference between the currently observed pixel and 6611 * one of its neighbors belonging to the component, or a seed pixel being added to the component. 6612 * @param upDiff Maximal upper brightness/color difference between the currently observed pixel and 6613 * one of its neighbors belonging to the component, or a seed pixel being added to the component. 6614 * @param rect Optional output parameter set by the function to the minimum bounding rectangle of the 6615 * repainted domain. 6616 * 4 means that only the four nearest neighbor pixels (those that share an edge) are considered. A 6617 * connectivity value of 8 means that the eight nearest neighbor pixels (those that share a corner) 6618 * will be considered. The next 8 bits (8-16) contain a value between 1 and 255 with which to fill 6619 * the mask (the default value is 1). For example, 4 | ( 255 << 8 ) will consider 4 nearest 6620 * neighbours and fill the mask with a value of 255. The following additional options occupy higher 6621 * bits and therefore may be further combined with the connectivity and mask fill values using 6622 * bit-wise or (|), see #FloodFillFlags. 6623 * 6624 * <b>Note:</b> Since the mask is larger than the filled image, a pixel \((x, y)\) in image corresponds to the 6625 * pixel \((x+1, y+1)\) in the mask . 6626 * 6627 * SEE: findContours 6628 * @return automatically generated 6629 */ 6630 public static int floodFill(Mat image, Mat mask, Point seedPoint, Scalar newVal, Rect rect, Scalar loDiff, Scalar upDiff) { 6631 double[] rect_out = new double[4]; 6632 int retVal = floodFill_1(image.nativeObj, mask.nativeObj, seedPoint.x, seedPoint.y, newVal.val[0], newVal.val[1], newVal.val[2], newVal.val[3], rect_out, loDiff.val[0], loDiff.val[1], loDiff.val[2], loDiff.val[3], upDiff.val[0], upDiff.val[1], upDiff.val[2], upDiff.val[3]); 6633 if(rect!=null){ rect.x = (int)rect_out[0]; rect.y = (int)rect_out[1]; rect.width = (int)rect_out[2]; rect.height = (int)rect_out[3]; } 6634 return retVal; 6635 } 6636 6637 /** 6638 * Fills a connected component with the given color. 6639 * 6640 * The function cv::floodFill fills a connected component starting from the seed point with the specified 6641 * color. The connectivity is determined by the color/brightness closeness of the neighbor pixels. The 6642 * pixel at \((x,y)\) is considered to belong to the repainted domain if: 6643 * 6644 * <ul> 6645 * <li> 6646 * in case of a grayscale image and floating range 6647 * \(\texttt{src} (x',y')- \texttt{loDiff} \leq \texttt{src} (x,y) \leq \texttt{src} (x',y')+ \texttt{upDiff}\) 6648 * </li> 6649 * </ul> 6650 * 6651 * 6652 * <ul> 6653 * <li> 6654 * in case of a grayscale image and fixed range 6655 * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)- \texttt{loDiff} \leq \texttt{src} (x,y) \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)+ \texttt{upDiff}\) 6656 * </li> 6657 * </ul> 6658 * 6659 * 6660 * <ul> 6661 * <li> 6662 * in case of a color image and floating range 6663 * \(\texttt{src} (x',y')_r- \texttt{loDiff} _r \leq \texttt{src} (x,y)_r \leq \texttt{src} (x',y')_r+ \texttt{upDiff} _r,\) 6664 * \(\texttt{src} (x',y')_g- \texttt{loDiff} _g \leq \texttt{src} (x,y)_g \leq \texttt{src} (x',y')_g+ \texttt{upDiff} _g\) 6665 * and 6666 * \(\texttt{src} (x',y')_b- \texttt{loDiff} _b \leq \texttt{src} (x,y)_b \leq \texttt{src} (x',y')_b+ \texttt{upDiff} _b\) 6667 * </li> 6668 * </ul> 6669 * 6670 * 6671 * <ul> 6672 * <li> 6673 * in case of a color image and fixed range 6674 * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_r- \texttt{loDiff} _r \leq \texttt{src} (x,y)_r \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_r+ \texttt{upDiff} _r,\) 6675 * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_g- \texttt{loDiff} _g \leq \texttt{src} (x,y)_g \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_g+ \texttt{upDiff} _g\) 6676 * and 6677 * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_b- \texttt{loDiff} _b \leq \texttt{src} (x,y)_b \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_b+ \texttt{upDiff} _b\) 6678 * </li> 6679 * </ul> 6680 * 6681 * 6682 * where \(src(x',y')\) is the value of one of pixel neighbors that is already known to belong to the 6683 * component. That is, to be added to the connected component, a color/brightness of the pixel should 6684 * be close enough to: 6685 * <ul> 6686 * <li> 6687 * Color/brightness of one of its neighbors that already belong to the connected component in case 6688 * of a floating range. 6689 * </li> 6690 * <li> 6691 * Color/brightness of the seed point in case of a fixed range. 6692 * </li> 6693 * </ul> 6694 * 6695 * Use these functions to either mark a connected component with the specified color in-place, or build 6696 * a mask and then extract the contour, or copy the region to another image, and so on. 6697 * 6698 * @param image Input/output 1- or 3-channel, 8-bit, or floating-point image. It is modified by the 6699 * function unless the #FLOODFILL_MASK_ONLY flag is set in the second variant of the function. See 6700 * the details below. 6701 * @param mask Operation mask that should be a single-channel 8-bit image, 2 pixels wider and 2 pixels 6702 * taller than image. If an empty Mat is passed it will be created automatically. Since this is both an 6703 * input and output parameter, you must take responsibility of initializing it. 6704 * Flood-filling cannot go across non-zero pixels in the input mask. For example, 6705 * an edge detector output can be used as a mask to stop filling at edges. On output, pixels in the 6706 * mask corresponding to filled pixels in the image are set to 1 or to the specified value in flags 6707 * as described below. Additionally, the function fills the border of the mask with ones to simplify 6708 * internal processing. It is therefore possible to use the same mask in multiple calls to the function 6709 * to make sure the filled areas do not overlap. 6710 * @param seedPoint Starting point. 6711 * @param newVal New value of the repainted domain pixels. 6712 * @param loDiff Maximal lower brightness/color difference between the currently observed pixel and 6713 * one of its neighbors belonging to the component, or a seed pixel being added to the component. 6714 * one of its neighbors belonging to the component, or a seed pixel being added to the component. 6715 * @param rect Optional output parameter set by the function to the minimum bounding rectangle of the 6716 * repainted domain. 6717 * 4 means that only the four nearest neighbor pixels (those that share an edge) are considered. A 6718 * connectivity value of 8 means that the eight nearest neighbor pixels (those that share a corner) 6719 * will be considered. The next 8 bits (8-16) contain a value between 1 and 255 with which to fill 6720 * the mask (the default value is 1). For example, 4 | ( 255 << 8 ) will consider 4 nearest 6721 * neighbours and fill the mask with a value of 255. The following additional options occupy higher 6722 * bits and therefore may be further combined with the connectivity and mask fill values using 6723 * bit-wise or (|), see #FloodFillFlags. 6724 * 6725 * <b>Note:</b> Since the mask is larger than the filled image, a pixel \((x, y)\) in image corresponds to the 6726 * pixel \((x+1, y+1)\) in the mask . 6727 * 6728 * SEE: findContours 6729 * @return automatically generated 6730 */ 6731 public static int floodFill(Mat image, Mat mask, Point seedPoint, Scalar newVal, Rect rect, Scalar loDiff) { 6732 double[] rect_out = new double[4]; 6733 int retVal = floodFill_2(image.nativeObj, mask.nativeObj, seedPoint.x, seedPoint.y, newVal.val[0], newVal.val[1], newVal.val[2], newVal.val[3], rect_out, loDiff.val[0], loDiff.val[1], loDiff.val[2], loDiff.val[3]); 6734 if(rect!=null){ rect.x = (int)rect_out[0]; rect.y = (int)rect_out[1]; rect.width = (int)rect_out[2]; rect.height = (int)rect_out[3]; } 6735 return retVal; 6736 } 6737 6738 /** 6739 * Fills a connected component with the given color. 6740 * 6741 * The function cv::floodFill fills a connected component starting from the seed point with the specified 6742 * color. The connectivity is determined by the color/brightness closeness of the neighbor pixels. The 6743 * pixel at \((x,y)\) is considered to belong to the repainted domain if: 6744 * 6745 * <ul> 6746 * <li> 6747 * in case of a grayscale image and floating range 6748 * \(\texttt{src} (x',y')- \texttt{loDiff} \leq \texttt{src} (x,y) \leq \texttt{src} (x',y')+ \texttt{upDiff}\) 6749 * </li> 6750 * </ul> 6751 * 6752 * 6753 * <ul> 6754 * <li> 6755 * in case of a grayscale image and fixed range 6756 * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)- \texttt{loDiff} \leq \texttt{src} (x,y) \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)+ \texttt{upDiff}\) 6757 * </li> 6758 * </ul> 6759 * 6760 * 6761 * <ul> 6762 * <li> 6763 * in case of a color image and floating range 6764 * \(\texttt{src} (x',y')_r- \texttt{loDiff} _r \leq \texttt{src} (x,y)_r \leq \texttt{src} (x',y')_r+ \texttt{upDiff} _r,\) 6765 * \(\texttt{src} (x',y')_g- \texttt{loDiff} _g \leq \texttt{src} (x,y)_g \leq \texttt{src} (x',y')_g+ \texttt{upDiff} _g\) 6766 * and 6767 * \(\texttt{src} (x',y')_b- \texttt{loDiff} _b \leq \texttt{src} (x,y)_b \leq \texttt{src} (x',y')_b+ \texttt{upDiff} _b\) 6768 * </li> 6769 * </ul> 6770 * 6771 * 6772 * <ul> 6773 * <li> 6774 * in case of a color image and fixed range 6775 * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_r- \texttt{loDiff} _r \leq \texttt{src} (x,y)_r \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_r+ \texttt{upDiff} _r,\) 6776 * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_g- \texttt{loDiff} _g \leq \texttt{src} (x,y)_g \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_g+ \texttt{upDiff} _g\) 6777 * and 6778 * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_b- \texttt{loDiff} _b \leq \texttt{src} (x,y)_b \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_b+ \texttt{upDiff} _b\) 6779 * </li> 6780 * </ul> 6781 * 6782 * 6783 * where \(src(x',y')\) is the value of one of pixel neighbors that is already known to belong to the 6784 * component. That is, to be added to the connected component, a color/brightness of the pixel should 6785 * be close enough to: 6786 * <ul> 6787 * <li> 6788 * Color/brightness of one of its neighbors that already belong to the connected component in case 6789 * of a floating range. 6790 * </li> 6791 * <li> 6792 * Color/brightness of the seed point in case of a fixed range. 6793 * </li> 6794 * </ul> 6795 * 6796 * Use these functions to either mark a connected component with the specified color in-place, or build 6797 * a mask and then extract the contour, or copy the region to another image, and so on. 6798 * 6799 * @param image Input/output 1- or 3-channel, 8-bit, or floating-point image. It is modified by the 6800 * function unless the #FLOODFILL_MASK_ONLY flag is set in the second variant of the function. See 6801 * the details below. 6802 * @param mask Operation mask that should be a single-channel 8-bit image, 2 pixels wider and 2 pixels 6803 * taller than image. If an empty Mat is passed it will be created automatically. Since this is both an 6804 * input and output parameter, you must take responsibility of initializing it. 6805 * Flood-filling cannot go across non-zero pixels in the input mask. For example, 6806 * an edge detector output can be used as a mask to stop filling at edges. On output, pixels in the 6807 * mask corresponding to filled pixels in the image are set to 1 or to the specified value in flags 6808 * as described below. Additionally, the function fills the border of the mask with ones to simplify 6809 * internal processing. It is therefore possible to use the same mask in multiple calls to the function 6810 * to make sure the filled areas do not overlap. 6811 * @param seedPoint Starting point. 6812 * @param newVal New value of the repainted domain pixels. 6813 * one of its neighbors belonging to the component, or a seed pixel being added to the component. 6814 * one of its neighbors belonging to the component, or a seed pixel being added to the component. 6815 * @param rect Optional output parameter set by the function to the minimum bounding rectangle of the 6816 * repainted domain. 6817 * 4 means that only the four nearest neighbor pixels (those that share an edge) are considered. A 6818 * connectivity value of 8 means that the eight nearest neighbor pixels (those that share a corner) 6819 * will be considered. The next 8 bits (8-16) contain a value between 1 and 255 with which to fill 6820 * the mask (the default value is 1). For example, 4 | ( 255 << 8 ) will consider 4 nearest 6821 * neighbours and fill the mask with a value of 255. The following additional options occupy higher 6822 * bits and therefore may be further combined with the connectivity and mask fill values using 6823 * bit-wise or (|), see #FloodFillFlags. 6824 * 6825 * <b>Note:</b> Since the mask is larger than the filled image, a pixel \((x, y)\) in image corresponds to the 6826 * pixel \((x+1, y+1)\) in the mask . 6827 * 6828 * SEE: findContours 6829 * @return automatically generated 6830 */ 6831 public static int floodFill(Mat image, Mat mask, Point seedPoint, Scalar newVal, Rect rect) { 6832 double[] rect_out = new double[4]; 6833 int retVal = floodFill_3(image.nativeObj, mask.nativeObj, seedPoint.x, seedPoint.y, newVal.val[0], newVal.val[1], newVal.val[2], newVal.val[3], rect_out); 6834 if(rect!=null){ rect.x = (int)rect_out[0]; rect.y = (int)rect_out[1]; rect.width = (int)rect_out[2]; rect.height = (int)rect_out[3]; } 6835 return retVal; 6836 } 6837 6838 /** 6839 * Fills a connected component with the given color. 6840 * 6841 * The function cv::floodFill fills a connected component starting from the seed point with the specified 6842 * color. The connectivity is determined by the color/brightness closeness of the neighbor pixels. The 6843 * pixel at \((x,y)\) is considered to belong to the repainted domain if: 6844 * 6845 * <ul> 6846 * <li> 6847 * in case of a grayscale image and floating range 6848 * \(\texttt{src} (x',y')- \texttt{loDiff} \leq \texttt{src} (x,y) \leq \texttt{src} (x',y')+ \texttt{upDiff}\) 6849 * </li> 6850 * </ul> 6851 * 6852 * 6853 * <ul> 6854 * <li> 6855 * in case of a grayscale image and fixed range 6856 * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)- \texttt{loDiff} \leq \texttt{src} (x,y) \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)+ \texttt{upDiff}\) 6857 * </li> 6858 * </ul> 6859 * 6860 * 6861 * <ul> 6862 * <li> 6863 * in case of a color image and floating range 6864 * \(\texttt{src} (x',y')_r- \texttt{loDiff} _r \leq \texttt{src} (x,y)_r \leq \texttt{src} (x',y')_r+ \texttt{upDiff} _r,\) 6865 * \(\texttt{src} (x',y')_g- \texttt{loDiff} _g \leq \texttt{src} (x,y)_g \leq \texttt{src} (x',y')_g+ \texttt{upDiff} _g\) 6866 * and 6867 * \(\texttt{src} (x',y')_b- \texttt{loDiff} _b \leq \texttt{src} (x,y)_b \leq \texttt{src} (x',y')_b+ \texttt{upDiff} _b\) 6868 * </li> 6869 * </ul> 6870 * 6871 * 6872 * <ul> 6873 * <li> 6874 * in case of a color image and fixed range 6875 * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_r- \texttt{loDiff} _r \leq \texttt{src} (x,y)_r \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_r+ \texttt{upDiff} _r,\) 6876 * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_g- \texttt{loDiff} _g \leq \texttt{src} (x,y)_g \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_g+ \texttt{upDiff} _g\) 6877 * and 6878 * \(\texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_b- \texttt{loDiff} _b \leq \texttt{src} (x,y)_b \leq \texttt{src} ( \texttt{seedPoint} .x, \texttt{seedPoint} .y)_b+ \texttt{upDiff} _b\) 6879 * </li> 6880 * </ul> 6881 * 6882 * 6883 * where \(src(x',y')\) is the value of one of pixel neighbors that is already known to belong to the 6884 * component. That is, to be added to the connected component, a color/brightness of the pixel should 6885 * be close enough to: 6886 * <ul> 6887 * <li> 6888 * Color/brightness of one of its neighbors that already belong to the connected component in case 6889 * of a floating range. 6890 * </li> 6891 * <li> 6892 * Color/brightness of the seed point in case of a fixed range. 6893 * </li> 6894 * </ul> 6895 * 6896 * Use these functions to either mark a connected component with the specified color in-place, or build 6897 * a mask and then extract the contour, or copy the region to another image, and so on. 6898 * 6899 * @param image Input/output 1- or 3-channel, 8-bit, or floating-point image. It is modified by the 6900 * function unless the #FLOODFILL_MASK_ONLY flag is set in the second variant of the function. See 6901 * the details below. 6902 * @param mask Operation mask that should be a single-channel 8-bit image, 2 pixels wider and 2 pixels 6903 * taller than image. If an empty Mat is passed it will be created automatically. Since this is both an 6904 * input and output parameter, you must take responsibility of initializing it. 6905 * Flood-filling cannot go across non-zero pixels in the input mask. For example, 6906 * an edge detector output can be used as a mask to stop filling at edges. On output, pixels in the 6907 * mask corresponding to filled pixels in the image are set to 1 or to the specified value in flags 6908 * as described below. Additionally, the function fills the border of the mask with ones to simplify 6909 * internal processing. It is therefore possible to use the same mask in multiple calls to the function 6910 * to make sure the filled areas do not overlap. 6911 * @param seedPoint Starting point. 6912 * @param newVal New value of the repainted domain pixels. 6913 * one of its neighbors belonging to the component, or a seed pixel being added to the component. 6914 * one of its neighbors belonging to the component, or a seed pixel being added to the component. 6915 * repainted domain. 6916 * 4 means that only the four nearest neighbor pixels (those that share an edge) are considered. A 6917 * connectivity value of 8 means that the eight nearest neighbor pixels (those that share a corner) 6918 * will be considered. The next 8 bits (8-16) contain a value between 1 and 255 with which to fill 6919 * the mask (the default value is 1). For example, 4 | ( 255 << 8 ) will consider 4 nearest 6920 * neighbours and fill the mask with a value of 255. The following additional options occupy higher 6921 * bits and therefore may be further combined with the connectivity and mask fill values using 6922 * bit-wise or (|), see #FloodFillFlags. 6923 * 6924 * <b>Note:</b> Since the mask is larger than the filled image, a pixel \((x, y)\) in image corresponds to the 6925 * pixel \((x+1, y+1)\) in the mask . 6926 * 6927 * SEE: findContours 6928 * @return automatically generated 6929 */ 6930 public static int floodFill(Mat image, Mat mask, Point seedPoint, Scalar newVal) { 6931 return floodFill_4(image.nativeObj, mask.nativeObj, seedPoint.x, seedPoint.y, newVal.val[0], newVal.val[1], newVal.val[2], newVal.val[3]); 6932 } 6933 6934 6935 // 6936 // C++: void cv::blendLinear(Mat src1, Mat src2, Mat weights1, Mat weights2, Mat& dst) 6937 // 6938 6939 /** 6940 * 6941 * 6942 * variant without {@code mask} parameter 6943 * @param src1 automatically generated 6944 * @param src2 automatically generated 6945 * @param weights1 automatically generated 6946 * @param weights2 automatically generated 6947 * @param dst automatically generated 6948 */ 6949 public static void blendLinear(Mat src1, Mat src2, Mat weights1, Mat weights2, Mat dst) { 6950 blendLinear_0(src1.nativeObj, src2.nativeObj, weights1.nativeObj, weights2.nativeObj, dst.nativeObj); 6951 } 6952 6953 6954 // 6955 // C++: void cv::cvtColor(Mat src, Mat& dst, int code, int dstCn = 0) 6956 // 6957 6958 /** 6959 * Converts an image from one color space to another. 6960 * 6961 * The function converts an input image from one color space to another. In case of a transformation 6962 * to-from RGB color space, the order of the channels should be specified explicitly (RGB or BGR). Note 6963 * that the default color format in OpenCV is often referred to as RGB but it is actually BGR (the 6964 * bytes are reversed). So the first byte in a standard (24-bit) color image will be an 8-bit Blue 6965 * component, the second byte will be Green, and the third byte will be Red. The fourth, fifth, and 6966 * sixth bytes would then be the second pixel (Blue, then Green, then Red), and so on. 6967 * 6968 * The conventional ranges for R, G, and B channel values are: 6969 * <ul> 6970 * <li> 6971 * 0 to 255 for CV_8U images 6972 * </li> 6973 * <li> 6974 * 0 to 65535 for CV_16U images 6975 * </li> 6976 * <li> 6977 * 0 to 1 for CV_32F images 6978 * </li> 6979 * </ul> 6980 * 6981 * In case of linear transformations, the range does not matter. But in case of a non-linear 6982 * transformation, an input RGB image should be normalized to the proper value range to get the correct 6983 * results, for example, for RGB \(\rightarrow\) L\*u\*v\* transformation. For example, if you have a 6984 * 32-bit floating-point image directly converted from an 8-bit image without any scaling, then it will 6985 * have the 0..255 value range instead of 0..1 assumed by the function. So, before calling #cvtColor , 6986 * you need first to scale the image down: 6987 * <code> 6988 * img *= 1./255; 6989 * cvtColor(img, img, COLOR_BGR2Luv); 6990 * </code> 6991 * If you use #cvtColor with 8-bit images, the conversion will have some information lost. For many 6992 * applications, this will not be noticeable but it is recommended to use 32-bit images in applications 6993 * that need the full range of colors or that convert an image before an operation and then convert 6994 * back. 6995 * 6996 * If conversion adds the alpha channel, its value will set to the maximum of corresponding channel 6997 * range: 255 for CV_8U, 65535 for CV_16U, 1 for CV_32F. 6998 * 6999 * @param src input image: 8-bit unsigned, 16-bit unsigned ( CV_16UC... ), or single-precision 7000 * floating-point. 7001 * @param dst output image of the same size and depth as src. 7002 * @param code color space conversion code (see #ColorConversionCodes). 7003 * @param dstCn number of channels in the destination image; if the parameter is 0, the number of the 7004 * channels is derived automatically from src and code. 7005 * 7006 * SEE: REF: imgproc_color_conversions 7007 */ 7008 public static void cvtColor(Mat src, Mat dst, int code, int dstCn) { 7009 cvtColor_0(src.nativeObj, dst.nativeObj, code, dstCn); 7010 } 7011 7012 /** 7013 * Converts an image from one color space to another. 7014 * 7015 * The function converts an input image from one color space to another. In case of a transformation 7016 * to-from RGB color space, the order of the channels should be specified explicitly (RGB or BGR). Note 7017 * that the default color format in OpenCV is often referred to as RGB but it is actually BGR (the 7018 * bytes are reversed). So the first byte in a standard (24-bit) color image will be an 8-bit Blue 7019 * component, the second byte will be Green, and the third byte will be Red. The fourth, fifth, and 7020 * sixth bytes would then be the second pixel (Blue, then Green, then Red), and so on. 7021 * 7022 * The conventional ranges for R, G, and B channel values are: 7023 * <ul> 7024 * <li> 7025 * 0 to 255 for CV_8U images 7026 * </li> 7027 * <li> 7028 * 0 to 65535 for CV_16U images 7029 * </li> 7030 * <li> 7031 * 0 to 1 for CV_32F images 7032 * </li> 7033 * </ul> 7034 * 7035 * In case of linear transformations, the range does not matter. But in case of a non-linear 7036 * transformation, an input RGB image should be normalized to the proper value range to get the correct 7037 * results, for example, for RGB \(\rightarrow\) L\*u\*v\* transformation. For example, if you have a 7038 * 32-bit floating-point image directly converted from an 8-bit image without any scaling, then it will 7039 * have the 0..255 value range instead of 0..1 assumed by the function. So, before calling #cvtColor , 7040 * you need first to scale the image down: 7041 * <code> 7042 * img *= 1./255; 7043 * cvtColor(img, img, COLOR_BGR2Luv); 7044 * </code> 7045 * If you use #cvtColor with 8-bit images, the conversion will have some information lost. For many 7046 * applications, this will not be noticeable but it is recommended to use 32-bit images in applications 7047 * that need the full range of colors or that convert an image before an operation and then convert 7048 * back. 7049 * 7050 * If conversion adds the alpha channel, its value will set to the maximum of corresponding channel 7051 * range: 255 for CV_8U, 65535 for CV_16U, 1 for CV_32F. 7052 * 7053 * @param src input image: 8-bit unsigned, 16-bit unsigned ( CV_16UC... ), or single-precision 7054 * floating-point. 7055 * @param dst output image of the same size and depth as src. 7056 * @param code color space conversion code (see #ColorConversionCodes). 7057 * channels is derived automatically from src and code. 7058 * 7059 * SEE: REF: imgproc_color_conversions 7060 */ 7061 public static void cvtColor(Mat src, Mat dst, int code) { 7062 cvtColor_1(src.nativeObj, dst.nativeObj, code); 7063 } 7064 7065 7066 // 7067 // C++: void cv::cvtColorTwoPlane(Mat src1, Mat src2, Mat& dst, int code) 7068 // 7069 7070 /** 7071 * Converts an image from one color space to another where the source image is 7072 * stored in two planes. 7073 * 7074 * This function only supports YUV420 to RGB conversion as of now. 7075 * 7076 * <ul> 7077 * <li> 7078 * #COLOR_YUV2BGR_NV12 7079 * </li> 7080 * <li> 7081 * #COLOR_YUV2RGB_NV12 7082 * </li> 7083 * <li> 7084 * #COLOR_YUV2BGRA_NV12 7085 * </li> 7086 * <li> 7087 * #COLOR_YUV2RGBA_NV12 7088 * </li> 7089 * <li> 7090 * #COLOR_YUV2BGR_NV21 7091 * </li> 7092 * <li> 7093 * #COLOR_YUV2RGB_NV21 7094 * </li> 7095 * <li> 7096 * #COLOR_YUV2BGRA_NV21 7097 * </li> 7098 * <li> 7099 * #COLOR_YUV2RGBA_NV21 7100 * </li> 7101 * </ul> 7102 * @param src1 automatically generated 7103 * @param src2 automatically generated 7104 * @param dst automatically generated 7105 * @param code automatically generated 7106 */ 7107 public static void cvtColorTwoPlane(Mat src1, Mat src2, Mat dst, int code) { 7108 cvtColorTwoPlane_0(src1.nativeObj, src2.nativeObj, dst.nativeObj, code); 7109 } 7110 7111 7112 // 7113 // C++: void cv::demosaicing(Mat src, Mat& dst, int code, int dstCn = 0) 7114 // 7115 7116 /** 7117 * main function for all demosaicing processes 7118 * 7119 * @param src input image: 8-bit unsigned or 16-bit unsigned. 7120 * @param dst output image of the same size and depth as src. 7121 * @param code Color space conversion code (see the description below). 7122 * @param dstCn number of channels in the destination image; if the parameter is 0, the number of the 7123 * channels is derived automatically from src and code. 7124 * 7125 * The function can do the following transformations: 7126 * 7127 * <ul> 7128 * <li> 7129 * Demosaicing using bilinear interpolation 7130 * </li> 7131 * </ul> 7132 * 7133 * #COLOR_BayerBG2BGR , #COLOR_BayerGB2BGR , #COLOR_BayerRG2BGR , #COLOR_BayerGR2BGR 7134 * 7135 * #COLOR_BayerBG2GRAY , #COLOR_BayerGB2GRAY , #COLOR_BayerRG2GRAY , #COLOR_BayerGR2GRAY 7136 * 7137 * <ul> 7138 * <li> 7139 * Demosaicing using Variable Number of Gradients. 7140 * </li> 7141 * </ul> 7142 * 7143 * #COLOR_BayerBG2BGR_VNG , #COLOR_BayerGB2BGR_VNG , #COLOR_BayerRG2BGR_VNG , #COLOR_BayerGR2BGR_VNG 7144 * 7145 * <ul> 7146 * <li> 7147 * Edge-Aware Demosaicing. 7148 * </li> 7149 * </ul> 7150 * 7151 * #COLOR_BayerBG2BGR_EA , #COLOR_BayerGB2BGR_EA , #COLOR_BayerRG2BGR_EA , #COLOR_BayerGR2BGR_EA 7152 * 7153 * <ul> 7154 * <li> 7155 * Demosaicing with alpha channel 7156 * </li> 7157 * </ul> 7158 * 7159 * #COLOR_BayerBG2BGRA , #COLOR_BayerGB2BGRA , #COLOR_BayerRG2BGRA , #COLOR_BayerGR2BGRA 7160 * 7161 * SEE: cvtColor 7162 */ 7163 public static void demosaicing(Mat src, Mat dst, int code, int dstCn) { 7164 demosaicing_0(src.nativeObj, dst.nativeObj, code, dstCn); 7165 } 7166 7167 /** 7168 * main function for all demosaicing processes 7169 * 7170 * @param src input image: 8-bit unsigned or 16-bit unsigned. 7171 * @param dst output image of the same size and depth as src. 7172 * @param code Color space conversion code (see the description below). 7173 * channels is derived automatically from src and code. 7174 * 7175 * The function can do the following transformations: 7176 * 7177 * <ul> 7178 * <li> 7179 * Demosaicing using bilinear interpolation 7180 * </li> 7181 * </ul> 7182 * 7183 * #COLOR_BayerBG2BGR , #COLOR_BayerGB2BGR , #COLOR_BayerRG2BGR , #COLOR_BayerGR2BGR 7184 * 7185 * #COLOR_BayerBG2GRAY , #COLOR_BayerGB2GRAY , #COLOR_BayerRG2GRAY , #COLOR_BayerGR2GRAY 7186 * 7187 * <ul> 7188 * <li> 7189 * Demosaicing using Variable Number of Gradients. 7190 * </li> 7191 * </ul> 7192 * 7193 * #COLOR_BayerBG2BGR_VNG , #COLOR_BayerGB2BGR_VNG , #COLOR_BayerRG2BGR_VNG , #COLOR_BayerGR2BGR_VNG 7194 * 7195 * <ul> 7196 * <li> 7197 * Edge-Aware Demosaicing. 7198 * </li> 7199 * </ul> 7200 * 7201 * #COLOR_BayerBG2BGR_EA , #COLOR_BayerGB2BGR_EA , #COLOR_BayerRG2BGR_EA , #COLOR_BayerGR2BGR_EA 7202 * 7203 * <ul> 7204 * <li> 7205 * Demosaicing with alpha channel 7206 * </li> 7207 * </ul> 7208 * 7209 * #COLOR_BayerBG2BGRA , #COLOR_BayerGB2BGRA , #COLOR_BayerRG2BGRA , #COLOR_BayerGR2BGRA 7210 * 7211 * SEE: cvtColor 7212 */ 7213 public static void demosaicing(Mat src, Mat dst, int code) { 7214 demosaicing_1(src.nativeObj, dst.nativeObj, code); 7215 } 7216 7217 7218 // 7219 // C++: Moments cv::moments(Mat array, bool binaryImage = false) 7220 // 7221 7222 /** 7223 * Calculates all of the moments up to the third order of a polygon or rasterized shape. 7224 * 7225 * The function computes moments, up to the 3rd order, of a vector shape or a rasterized shape. The 7226 * results are returned in the structure cv::Moments. 7227 * 7228 * @param array Raster image (single-channel, 8-bit or floating-point 2D array) or an array ( 7229 * \(1 \times N\) or \(N \times 1\) ) of 2D points (Point or Point2f ). 7230 * @param binaryImage If it is true, all non-zero image pixels are treated as 1's. The parameter is 7231 * used for images only. 7232 * @return moments. 7233 * 7234 * <b>Note:</b> Only applicable to contour moments calculations from Python bindings: Note that the numpy 7235 * type for the input array should be either np.int32 or np.float32. 7236 * 7237 * SEE: contourArea, arcLength 7238 */ 7239 public static Moments moments(Mat array, boolean binaryImage) { 7240 return new Moments(moments_0(array.nativeObj, binaryImage)); 7241 } 7242 7243 /** 7244 * Calculates all of the moments up to the third order of a polygon or rasterized shape. 7245 * 7246 * The function computes moments, up to the 3rd order, of a vector shape or a rasterized shape. The 7247 * results are returned in the structure cv::Moments. 7248 * 7249 * @param array Raster image (single-channel, 8-bit or floating-point 2D array) or an array ( 7250 * \(1 \times N\) or \(N \times 1\) ) of 2D points (Point or Point2f ). 7251 * used for images only. 7252 * @return moments. 7253 * 7254 * <b>Note:</b> Only applicable to contour moments calculations from Python bindings: Note that the numpy 7255 * type for the input array should be either np.int32 or np.float32. 7256 * 7257 * SEE: contourArea, arcLength 7258 */ 7259 public static Moments moments(Mat array) { 7260 return new Moments(moments_1(array.nativeObj)); 7261 } 7262 7263 7264 // 7265 // C++: void cv::HuMoments(Moments m, Mat& hu) 7266 // 7267 7268 public static void HuMoments(Moments m, Mat hu) { 7269 HuMoments_0(m.m00, m.m10, m.m01, m.m20, m.m11, m.m02, m.m30, m.m21, m.m12, m.m03, hu.nativeObj); 7270 } 7271 7272 7273 // 7274 // C++: void cv::matchTemplate(Mat image, Mat templ, Mat& result, int method, Mat mask = Mat()) 7275 // 7276 7277 /** 7278 * Compares a template against overlapped image regions. 7279 * 7280 * The function slides through image , compares the overlapped patches of size \(w \times h\) against 7281 * templ using the specified method and stores the comparison results in result . #TemplateMatchModes 7282 * describes the formulae for the available comparison methods ( \(I\) denotes image, \(T\) 7283 * template, \(R\) result, \(M\) the optional mask ). The summation is done over template and/or 7284 * the image patch: \(x' = 0...w-1, y' = 0...h-1\) 7285 * 7286 * After the function finishes the comparison, the best matches can be found as global minimums (when 7287 * #TM_SQDIFF was used) or maximums (when #TM_CCORR or #TM_CCOEFF was used) using the 7288 * #minMaxLoc function. In case of a color image, template summation in the numerator and each sum in 7289 * the denominator is done over all of the channels and separate mean values are used for each channel. 7290 * That is, the function can take a color template and a color image. The result will still be a 7291 * single-channel image, which is easier to analyze. 7292 * 7293 * @param image Image where the search is running. It must be 8-bit or 32-bit floating-point. 7294 * @param templ Searched template. It must be not greater than the source image and have the same 7295 * data type. 7296 * @param result Map of comparison results. It must be single-channel 32-bit floating-point. If image 7297 * is \(W \times H\) and templ is \(w \times h\) , then result is \((W-w+1) \times (H-h+1)\) . 7298 * @param method Parameter specifying the comparison method, see #TemplateMatchModes 7299 * @param mask Optional mask. It must have the same size as templ. It must either have the same number 7300 * of channels as template or only one channel, which is then used for all template and 7301 * image channels. If the data type is #CV_8U, the mask is interpreted as a binary mask, 7302 * meaning only elements where mask is nonzero are used and are kept unchanged independent 7303 * of the actual mask value (weight equals 1). For data tpye #CV_32F, the mask values are 7304 * used as weights. The exact formulas are documented in #TemplateMatchModes. 7305 */ 7306 public static void matchTemplate(Mat image, Mat templ, Mat result, int method, Mat mask) { 7307 matchTemplate_0(image.nativeObj, templ.nativeObj, result.nativeObj, method, mask.nativeObj); 7308 } 7309 7310 /** 7311 * Compares a template against overlapped image regions. 7312 * 7313 * The function slides through image , compares the overlapped patches of size \(w \times h\) against 7314 * templ using the specified method and stores the comparison results in result . #TemplateMatchModes 7315 * describes the formulae for the available comparison methods ( \(I\) denotes image, \(T\) 7316 * template, \(R\) result, \(M\) the optional mask ). The summation is done over template and/or 7317 * the image patch: \(x' = 0...w-1, y' = 0...h-1\) 7318 * 7319 * After the function finishes the comparison, the best matches can be found as global minimums (when 7320 * #TM_SQDIFF was used) or maximums (when #TM_CCORR or #TM_CCOEFF was used) using the 7321 * #minMaxLoc function. In case of a color image, template summation in the numerator and each sum in 7322 * the denominator is done over all of the channels and separate mean values are used for each channel. 7323 * That is, the function can take a color template and a color image. The result will still be a 7324 * single-channel image, which is easier to analyze. 7325 * 7326 * @param image Image where the search is running. It must be 8-bit or 32-bit floating-point. 7327 * @param templ Searched template. It must be not greater than the source image and have the same 7328 * data type. 7329 * @param result Map of comparison results. It must be single-channel 32-bit floating-point. If image 7330 * is \(W \times H\) and templ is \(w \times h\) , then result is \((W-w+1) \times (H-h+1)\) . 7331 * @param method Parameter specifying the comparison method, see #TemplateMatchModes 7332 * of channels as template or only one channel, which is then used for all template and 7333 * image channels. If the data type is #CV_8U, the mask is interpreted as a binary mask, 7334 * meaning only elements where mask is nonzero are used and are kept unchanged independent 7335 * of the actual mask value (weight equals 1). For data tpye #CV_32F, the mask values are 7336 * used as weights. The exact formulas are documented in #TemplateMatchModes. 7337 */ 7338 public static void matchTemplate(Mat image, Mat templ, Mat result, int method) { 7339 matchTemplate_1(image.nativeObj, templ.nativeObj, result.nativeObj, method); 7340 } 7341 7342 7343 // 7344 // C++: int cv::connectedComponents(Mat image, Mat& labels, int connectivity, int ltype, int ccltype) 7345 // 7346 7347 /** 7348 * computes the connected components labeled image of boolean image 7349 * 7350 * image with 4 or 8 way connectivity - returns N, the total number of labels [0, N-1] where 0 7351 * represents the background label. ltype specifies the output label image type, an important 7352 * consideration based on the total number of labels or alternatively the total number of pixels in 7353 * the source image. ccltype specifies the connected components labeling algorithm to use, currently 7354 * Bolelli (Spaghetti) CITE: Bolelli2019, Grana (BBDT) CITE: Grana2010 and Wu's (SAUF) CITE: Wu2009 algorithms 7355 * are supported, see the #ConnectedComponentsAlgorithmsTypes for details. Note that SAUF algorithm forces 7356 * a row major ordering of labels while Spaghetti and BBDT do not. 7357 * This function uses parallel version of the algorithms if at least one allowed 7358 * parallel framework is enabled and if the rows of the image are at least twice the number returned by #getNumberOfCPUs. 7359 * 7360 * @param image the 8-bit single-channel image to be labeled 7361 * @param labels destination labeled image 7362 * @param connectivity 8 or 4 for 8-way or 4-way connectivity respectively 7363 * @param ltype output image label type. Currently CV_32S and CV_16U are supported. 7364 * @param ccltype connected components algorithm type (see the #ConnectedComponentsAlgorithmsTypes). 7365 * @return automatically generated 7366 */ 7367 public static int connectedComponentsWithAlgorithm(Mat image, Mat labels, int connectivity, int ltype, int ccltype) { 7368 return connectedComponentsWithAlgorithm_0(image.nativeObj, labels.nativeObj, connectivity, ltype, ccltype); 7369 } 7370 7371 7372 // 7373 // C++: int cv::connectedComponents(Mat image, Mat& labels, int connectivity = 8, int ltype = CV_32S) 7374 // 7375 7376 /** 7377 * 7378 * 7379 * @param image the 8-bit single-channel image to be labeled 7380 * @param labels destination labeled image 7381 * @param connectivity 8 or 4 for 8-way or 4-way connectivity respectively 7382 * @param ltype output image label type. Currently CV_32S and CV_16U are supported. 7383 * @return automatically generated 7384 */ 7385 public static int connectedComponents(Mat image, Mat labels, int connectivity, int ltype) { 7386 return connectedComponents_0(image.nativeObj, labels.nativeObj, connectivity, ltype); 7387 } 7388 7389 /** 7390 * 7391 * 7392 * @param image the 8-bit single-channel image to be labeled 7393 * @param labels destination labeled image 7394 * @param connectivity 8 or 4 for 8-way or 4-way connectivity respectively 7395 * @return automatically generated 7396 */ 7397 public static int connectedComponents(Mat image, Mat labels, int connectivity) { 7398 return connectedComponents_1(image.nativeObj, labels.nativeObj, connectivity); 7399 } 7400 7401 /** 7402 * 7403 * 7404 * @param image the 8-bit single-channel image to be labeled 7405 * @param labels destination labeled image 7406 * @return automatically generated 7407 */ 7408 public static int connectedComponents(Mat image, Mat labels) { 7409 return connectedComponents_2(image.nativeObj, labels.nativeObj); 7410 } 7411 7412 7413 // 7414 // C++: int cv::connectedComponentsWithStats(Mat image, Mat& labels, Mat& stats, Mat& centroids, int connectivity, int ltype, int ccltype) 7415 // 7416 7417 /** 7418 * computes the connected components labeled image of boolean image and also produces a statistics output for each label 7419 * 7420 * image with 4 or 8 way connectivity - returns N, the total number of labels [0, N-1] where 0 7421 * represents the background label. ltype specifies the output label image type, an important 7422 * consideration based on the total number of labels or alternatively the total number of pixels in 7423 * the source image. ccltype specifies the connected components labeling algorithm to use, currently 7424 * Bolelli (Spaghetti) CITE: Bolelli2019, Grana (BBDT) CITE: Grana2010 and Wu's (SAUF) CITE: Wu2009 algorithms 7425 * are supported, see the #ConnectedComponentsAlgorithmsTypes for details. Note that SAUF algorithm forces 7426 * a row major ordering of labels while Spaghetti and BBDT do not. 7427 * This function uses parallel version of the algorithms (statistics included) if at least one allowed 7428 * parallel framework is enabled and if the rows of the image are at least twice the number returned by #getNumberOfCPUs. 7429 * 7430 * @param image the 8-bit single-channel image to be labeled 7431 * @param labels destination labeled image 7432 * @param stats statistics output for each label, including the background label. 7433 * Statistics are accessed via stats(label, COLUMN) where COLUMN is one of 7434 * #ConnectedComponentsTypes, selecting the statistic. The data type is CV_32S. 7435 * @param centroids centroid output for each label, including the background label. Centroids are 7436 * accessed via centroids(label, 0) for x and centroids(label, 1) for y. The data type CV_64F. 7437 * @param connectivity 8 or 4 for 8-way or 4-way connectivity respectively 7438 * @param ltype output image label type. Currently CV_32S and CV_16U are supported. 7439 * @param ccltype connected components algorithm type (see #ConnectedComponentsAlgorithmsTypes). 7440 * @return automatically generated 7441 */ 7442 public static int connectedComponentsWithStatsWithAlgorithm(Mat image, Mat labels, Mat stats, Mat centroids, int connectivity, int ltype, int ccltype) { 7443 return connectedComponentsWithStatsWithAlgorithm_0(image.nativeObj, labels.nativeObj, stats.nativeObj, centroids.nativeObj, connectivity, ltype, ccltype); 7444 } 7445 7446 7447 // 7448 // C++: int cv::connectedComponentsWithStats(Mat image, Mat& labels, Mat& stats, Mat& centroids, int connectivity = 8, int ltype = CV_32S) 7449 // 7450 7451 /** 7452 * 7453 * @param image the 8-bit single-channel image to be labeled 7454 * @param labels destination labeled image 7455 * @param stats statistics output for each label, including the background label. 7456 * Statistics are accessed via stats(label, COLUMN) where COLUMN is one of 7457 * #ConnectedComponentsTypes, selecting the statistic. The data type is CV_32S. 7458 * @param centroids centroid output for each label, including the background label. Centroids are 7459 * accessed via centroids(label, 0) for x and centroids(label, 1) for y. The data type CV_64F. 7460 * @param connectivity 8 or 4 for 8-way or 4-way connectivity respectively 7461 * @param ltype output image label type. Currently CV_32S and CV_16U are supported. 7462 * @return automatically generated 7463 */ 7464 public static int connectedComponentsWithStats(Mat image, Mat labels, Mat stats, Mat centroids, int connectivity, int ltype) { 7465 return connectedComponentsWithStats_0(image.nativeObj, labels.nativeObj, stats.nativeObj, centroids.nativeObj, connectivity, ltype); 7466 } 7467 7468 /** 7469 * 7470 * @param image the 8-bit single-channel image to be labeled 7471 * @param labels destination labeled image 7472 * @param stats statistics output for each label, including the background label. 7473 * Statistics are accessed via stats(label, COLUMN) where COLUMN is one of 7474 * #ConnectedComponentsTypes, selecting the statistic. The data type is CV_32S. 7475 * @param centroids centroid output for each label, including the background label. Centroids are 7476 * accessed via centroids(label, 0) for x and centroids(label, 1) for y. The data type CV_64F. 7477 * @param connectivity 8 or 4 for 8-way or 4-way connectivity respectively 7478 * @return automatically generated 7479 */ 7480 public static int connectedComponentsWithStats(Mat image, Mat labels, Mat stats, Mat centroids, int connectivity) { 7481 return connectedComponentsWithStats_1(image.nativeObj, labels.nativeObj, stats.nativeObj, centroids.nativeObj, connectivity); 7482 } 7483 7484 /** 7485 * 7486 * @param image the 8-bit single-channel image to be labeled 7487 * @param labels destination labeled image 7488 * @param stats statistics output for each label, including the background label. 7489 * Statistics are accessed via stats(label, COLUMN) where COLUMN is one of 7490 * #ConnectedComponentsTypes, selecting the statistic. The data type is CV_32S. 7491 * @param centroids centroid output for each label, including the background label. Centroids are 7492 * accessed via centroids(label, 0) for x and centroids(label, 1) for y. The data type CV_64F. 7493 * @return automatically generated 7494 */ 7495 public static int connectedComponentsWithStats(Mat image, Mat labels, Mat stats, Mat centroids) { 7496 return connectedComponentsWithStats_2(image.nativeObj, labels.nativeObj, stats.nativeObj, centroids.nativeObj); 7497 } 7498 7499 7500 // 7501 // C++: void cv::findContours(Mat image, vector_vector_Point& contours, Mat& hierarchy, int mode, int method, Point offset = Point()) 7502 // 7503 7504 /** 7505 * Finds contours in a binary image. 7506 * 7507 * The function retrieves contours from the binary image using the algorithm CITE: Suzuki85 . The contours 7508 * are a useful tool for shape analysis and object detection and recognition. See squares.cpp in the 7509 * OpenCV sample directory. 7510 * <b>Note:</b> Since opencv 3.2 source image is not modified by this function. 7511 * 7512 * @param image Source, an 8-bit single-channel image. Non-zero pixels are treated as 1's. Zero 7513 * pixels remain 0's, so the image is treated as binary . You can use #compare, #inRange, #threshold , 7514 * #adaptiveThreshold, #Canny, and others to create a binary image out of a grayscale or color one. 7515 * If mode equals to #RETR_CCOMP or #RETR_FLOODFILL, the input can also be a 32-bit integer image of labels (CV_32SC1). 7516 * @param contours Detected contours. Each contour is stored as a vector of points (e.g. 7517 * std::vector<std::vector<cv::Point> >). 7518 * @param hierarchy Optional output vector (e.g. std::vector<cv::Vec4i>), containing information about the image topology. It has 7519 * as many elements as the number of contours. For each i-th contour contours[i], the elements 7520 * hierarchy[i][0] , hierarchy[i][1] , hierarchy[i][2] , and hierarchy[i][3] are set to 0-based indices 7521 * in contours of the next and previous contours at the same hierarchical level, the first child 7522 * contour and the parent contour, respectively. If for the contour i there are no next, previous, 7523 * parent, or nested contours, the corresponding elements of hierarchy[i] will be negative. 7524 * <b>Note:</b> In Python, hierarchy is nested inside a top level array. Use hierarchy[0][i] to access hierarchical elements of i-th contour. 7525 * @param mode Contour retrieval mode, see #RetrievalModes 7526 * @param method Contour approximation method, see #ContourApproximationModes 7527 * @param offset Optional offset by which every contour point is shifted. This is useful if the 7528 * contours are extracted from the image ROI and then they should be analyzed in the whole image 7529 * context. 7530 */ 7531 public static void findContours(Mat image, List<MatOfPoint> contours, Mat hierarchy, int mode, int method, Point offset) { 7532 Mat contours_mat = new Mat(); 7533 findContours_0(image.nativeObj, contours_mat.nativeObj, hierarchy.nativeObj, mode, method, offset.x, offset.y); 7534 Converters.Mat_to_vector_vector_Point(contours_mat, contours); 7535 contours_mat.release(); 7536 } 7537 7538 /** 7539 * Finds contours in a binary image. 7540 * 7541 * The function retrieves contours from the binary image using the algorithm CITE: Suzuki85 . The contours 7542 * are a useful tool for shape analysis and object detection and recognition. See squares.cpp in the 7543 * OpenCV sample directory. 7544 * <b>Note:</b> Since opencv 3.2 source image is not modified by this function. 7545 * 7546 * @param image Source, an 8-bit single-channel image. Non-zero pixels are treated as 1's. Zero 7547 * pixels remain 0's, so the image is treated as binary . You can use #compare, #inRange, #threshold , 7548 * #adaptiveThreshold, #Canny, and others to create a binary image out of a grayscale or color one. 7549 * If mode equals to #RETR_CCOMP or #RETR_FLOODFILL, the input can also be a 32-bit integer image of labels (CV_32SC1). 7550 * @param contours Detected contours. Each contour is stored as a vector of points (e.g. 7551 * std::vector<std::vector<cv::Point> >). 7552 * @param hierarchy Optional output vector (e.g. std::vector<cv::Vec4i>), containing information about the image topology. It has 7553 * as many elements as the number of contours. For each i-th contour contours[i], the elements 7554 * hierarchy[i][0] , hierarchy[i][1] , hierarchy[i][2] , and hierarchy[i][3] are set to 0-based indices 7555 * in contours of the next and previous contours at the same hierarchical level, the first child 7556 * contour and the parent contour, respectively. If for the contour i there are no next, previous, 7557 * parent, or nested contours, the corresponding elements of hierarchy[i] will be negative. 7558 * <b>Note:</b> In Python, hierarchy is nested inside a top level array. Use hierarchy[0][i] to access hierarchical elements of i-th contour. 7559 * @param mode Contour retrieval mode, see #RetrievalModes 7560 * @param method Contour approximation method, see #ContourApproximationModes 7561 * contours are extracted from the image ROI and then they should be analyzed in the whole image 7562 * context. 7563 */ 7564 public static void findContours(Mat image, List<MatOfPoint> contours, Mat hierarchy, int mode, int method) { 7565 Mat contours_mat = new Mat(); 7566 findContours_1(image.nativeObj, contours_mat.nativeObj, hierarchy.nativeObj, mode, method); 7567 Converters.Mat_to_vector_vector_Point(contours_mat, contours); 7568 contours_mat.release(); 7569 } 7570 7571 7572 // 7573 // C++: void cv::approxPolyDP(vector_Point2f curve, vector_Point2f& approxCurve, double epsilon, bool closed) 7574 // 7575 7576 /** 7577 * Approximates a polygonal curve(s) with the specified precision. 7578 * 7579 * The function cv::approxPolyDP approximates a curve or a polygon with another curve/polygon with less 7580 * vertices so that the distance between them is less or equal to the specified precision. It uses the 7581 * Douglas-Peucker algorithm <http://en.wikipedia.org/wiki/Ramer-Douglas-Peucker_algorithm> 7582 * 7583 * @param curve Input vector of a 2D point stored in std::vector or Mat 7584 * @param approxCurve Result of the approximation. The type should match the type of the input curve. 7585 * @param epsilon Parameter specifying the approximation accuracy. This is the maximum distance 7586 * between the original curve and its approximation. 7587 * @param closed If true, the approximated curve is closed (its first and last vertices are 7588 * connected). Otherwise, it is not closed. 7589 */ 7590 public static void approxPolyDP(MatOfPoint2f curve, MatOfPoint2f approxCurve, double epsilon, boolean closed) { 7591 Mat curve_mat = curve; 7592 Mat approxCurve_mat = approxCurve; 7593 approxPolyDP_0(curve_mat.nativeObj, approxCurve_mat.nativeObj, epsilon, closed); 7594 } 7595 7596 7597 // 7598 // C++: double cv::arcLength(vector_Point2f curve, bool closed) 7599 // 7600 7601 /** 7602 * Calculates a contour perimeter or a curve length. 7603 * 7604 * The function computes a curve length or a closed contour perimeter. 7605 * 7606 * @param curve Input vector of 2D points, stored in std::vector or Mat. 7607 * @param closed Flag indicating whether the curve is closed or not. 7608 * @return automatically generated 7609 */ 7610 public static double arcLength(MatOfPoint2f curve, boolean closed) { 7611 Mat curve_mat = curve; 7612 return arcLength_0(curve_mat.nativeObj, closed); 7613 } 7614 7615 7616 // 7617 // C++: Rect cv::boundingRect(Mat array) 7618 // 7619 7620 /** 7621 * Calculates the up-right bounding rectangle of a point set or non-zero pixels of gray-scale image. 7622 * 7623 * The function calculates and returns the minimal up-right bounding rectangle for the specified point set or 7624 * non-zero pixels of gray-scale image. 7625 * 7626 * @param array Input gray-scale image or 2D point set, stored in std::vector or Mat. 7627 * @return automatically generated 7628 */ 7629 public static Rect boundingRect(Mat array) { 7630 return new Rect(boundingRect_0(array.nativeObj)); 7631 } 7632 7633 7634 // 7635 // C++: double cv::contourArea(Mat contour, bool oriented = false) 7636 // 7637 7638 /** 7639 * Calculates a contour area. 7640 * 7641 * The function computes a contour area. Similarly to moments , the area is computed using the Green 7642 * formula. Thus, the returned area and the number of non-zero pixels, if you draw the contour using 7643 * #drawContours or #fillPoly , can be different. Also, the function will most certainly give a wrong 7644 * results for contours with self-intersections. 7645 * 7646 * Example: 7647 * <code> 7648 * vector<Point> contour; 7649 * contour.push_back(Point2f(0, 0)); 7650 * contour.push_back(Point2f(10, 0)); 7651 * contour.push_back(Point2f(10, 10)); 7652 * contour.push_back(Point2f(5, 4)); 7653 * 7654 * double area0 = contourArea(contour); 7655 * vector<Point> approx; 7656 * approxPolyDP(contour, approx, 5, true); 7657 * double area1 = contourArea(approx); 7658 * 7659 * cout << "area0 =" << area0 << endl << 7660 * "area1 =" << area1 << endl << 7661 * "approx poly vertices" << approx.size() << endl; 7662 * </code> 7663 * @param contour Input vector of 2D points (contour vertices), stored in std::vector or Mat. 7664 * @param oriented Oriented area flag. If it is true, the function returns a signed area value, 7665 * depending on the contour orientation (clockwise or counter-clockwise). Using this feature you can 7666 * determine orientation of a contour by taking the sign of an area. By default, the parameter is 7667 * false, which means that the absolute value is returned. 7668 * @return automatically generated 7669 */ 7670 public static double contourArea(Mat contour, boolean oriented) { 7671 return contourArea_0(contour.nativeObj, oriented); 7672 } 7673 7674 /** 7675 * Calculates a contour area. 7676 * 7677 * The function computes a contour area. Similarly to moments , the area is computed using the Green 7678 * formula. Thus, the returned area and the number of non-zero pixels, if you draw the contour using 7679 * #drawContours or #fillPoly , can be different. Also, the function will most certainly give a wrong 7680 * results for contours with self-intersections. 7681 * 7682 * Example: 7683 * <code> 7684 * vector<Point> contour; 7685 * contour.push_back(Point2f(0, 0)); 7686 * contour.push_back(Point2f(10, 0)); 7687 * contour.push_back(Point2f(10, 10)); 7688 * contour.push_back(Point2f(5, 4)); 7689 * 7690 * double area0 = contourArea(contour); 7691 * vector<Point> approx; 7692 * approxPolyDP(contour, approx, 5, true); 7693 * double area1 = contourArea(approx); 7694 * 7695 * cout << "area0 =" << area0 << endl << 7696 * "area1 =" << area1 << endl << 7697 * "approx poly vertices" << approx.size() << endl; 7698 * </code> 7699 * @param contour Input vector of 2D points (contour vertices), stored in std::vector or Mat. 7700 * depending on the contour orientation (clockwise or counter-clockwise). Using this feature you can 7701 * determine orientation of a contour by taking the sign of an area. By default, the parameter is 7702 * false, which means that the absolute value is returned. 7703 * @return automatically generated 7704 */ 7705 public static double contourArea(Mat contour) { 7706 return contourArea_1(contour.nativeObj); 7707 } 7708 7709 7710 // 7711 // C++: RotatedRect cv::minAreaRect(vector_Point2f points) 7712 // 7713 7714 /** 7715 * Finds a rotated rectangle of the minimum area enclosing the input 2D point set. 7716 * 7717 * The function calculates and returns the minimum-area bounding rectangle (possibly rotated) for a 7718 * specified point set. Developer should keep in mind that the returned RotatedRect can contain negative 7719 * indices when data is close to the containing Mat element boundary. 7720 * 7721 * @param points Input vector of 2D points, stored in std::vector<> or Mat 7722 * @return automatically generated 7723 */ 7724 public static RotatedRect minAreaRect(MatOfPoint2f points) { 7725 Mat points_mat = points; 7726 return new RotatedRect(minAreaRect_0(points_mat.nativeObj)); 7727 } 7728 7729 7730 // 7731 // C++: void cv::boxPoints(RotatedRect box, Mat& points) 7732 // 7733 7734 /** 7735 * Finds the four vertices of a rotated rect. Useful to draw the rotated rectangle. 7736 * 7737 * The function finds the four vertices of a rotated rectangle. This function is useful to draw the 7738 * rectangle. In C++, instead of using this function, you can directly use RotatedRect::points method. Please 7739 * visit the REF: tutorial_bounding_rotated_ellipses "tutorial on Creating Bounding rotated boxes and ellipses for contours" for more information. 7740 * 7741 * @param box The input rotated rectangle. It may be the output of 7742 * @param points The output array of four vertices of rectangles. 7743 */ 7744 public static void boxPoints(RotatedRect box, Mat points) { 7745 boxPoints_0(box.center.x, box.center.y, box.size.width, box.size.height, box.angle, points.nativeObj); 7746 } 7747 7748 7749 // 7750 // C++: void cv::minEnclosingCircle(vector_Point2f points, Point2f& center, float& radius) 7751 // 7752 7753 /** 7754 * Finds a circle of the minimum area enclosing a 2D point set. 7755 * 7756 * The function finds the minimal enclosing circle of a 2D point set using an iterative algorithm. 7757 * 7758 * @param points Input vector of 2D points, stored in std::vector<> or Mat 7759 * @param center Output center of the circle. 7760 * @param radius Output radius of the circle. 7761 */ 7762 public static void minEnclosingCircle(MatOfPoint2f points, Point center, float[] radius) { 7763 Mat points_mat = points; 7764 double[] center_out = new double[2]; 7765 double[] radius_out = new double[1]; 7766 minEnclosingCircle_0(points_mat.nativeObj, center_out, radius_out); 7767 if(center!=null){ center.x = center_out[0]; center.y = center_out[1]; } 7768 if(radius!=null) radius[0] = (float)radius_out[0]; 7769 } 7770 7771 7772 // 7773 // C++: double cv::minEnclosingTriangle(Mat points, Mat& triangle) 7774 // 7775 7776 /** 7777 * Finds a triangle of minimum area enclosing a 2D point set and returns its area. 7778 * 7779 * The function finds a triangle of minimum area enclosing the given set of 2D points and returns its 7780 * area. The output for a given 2D point set is shown in the image below. 2D points are depicted in 7781 * red* and the enclosing triangle in *yellow*. 7782 * 7783 * ![Sample output of the minimum enclosing triangle function](pics/minenclosingtriangle.png) 7784 * 7785 * The implementation of the algorithm is based on O'Rourke's CITE: ORourke86 and Klee and Laskowski's 7786 * CITE: KleeLaskowski85 papers. O'Rourke provides a \(\theta(n)\) algorithm for finding the minimal 7787 * enclosing triangle of a 2D convex polygon with n vertices. Since the #minEnclosingTriangle function 7788 * takes a 2D point set as input an additional preprocessing step of computing the convex hull of the 7789 * 2D point set is required. The complexity of the #convexHull function is \(O(n log(n))\) which is higher 7790 * than \(\theta(n)\). Thus the overall complexity of the function is \(O(n log(n))\). 7791 * 7792 * @param points Input vector of 2D points with depth CV_32S or CV_32F, stored in std::vector<> or Mat 7793 * @param triangle Output vector of three 2D points defining the vertices of the triangle. The depth 7794 * of the OutputArray must be CV_32F. 7795 * @return automatically generated 7796 */ 7797 public static double minEnclosingTriangle(Mat points, Mat triangle) { 7798 return minEnclosingTriangle_0(points.nativeObj, triangle.nativeObj); 7799 } 7800 7801 7802 // 7803 // C++: double cv::matchShapes(Mat contour1, Mat contour2, int method, double parameter) 7804 // 7805 7806 /** 7807 * Compares two shapes. 7808 * 7809 * The function compares two shapes. All three implemented methods use the Hu invariants (see #HuMoments) 7810 * 7811 * @param contour1 First contour or grayscale image. 7812 * @param contour2 Second contour or grayscale image. 7813 * @param method Comparison method, see #ShapeMatchModes 7814 * @param parameter Method-specific parameter (not supported now). 7815 * @return automatically generated 7816 */ 7817 public static double matchShapes(Mat contour1, Mat contour2, int method, double parameter) { 7818 return matchShapes_0(contour1.nativeObj, contour2.nativeObj, method, parameter); 7819 } 7820 7821 7822 // 7823 // C++: void cv::convexHull(vector_Point points, vector_int& hull, bool clockwise = false, _hidden_ returnPoints = true) 7824 // 7825 7826 /** 7827 * Finds the convex hull of a point set. 7828 * 7829 * The function cv::convexHull finds the convex hull of a 2D point set using the Sklansky's algorithm CITE: Sklansky82 7830 * that has *O(N logN)* complexity in the current implementation. 7831 * 7832 * @param points Input 2D point set, stored in std::vector or Mat. 7833 * @param hull Output convex hull. It is either an integer vector of indices or vector of points. In 7834 * the first case, the hull elements are 0-based indices of the convex hull points in the original 7835 * array (since the set of convex hull points is a subset of the original point set). In the second 7836 * case, hull elements are the convex hull points themselves. 7837 * @param clockwise Orientation flag. If it is true, the output convex hull is oriented clockwise. 7838 * Otherwise, it is oriented counter-clockwise. The assumed coordinate system has its X axis pointing 7839 * to the right, and its Y axis pointing upwards. 7840 * returns convex hull points. Otherwise, it returns indices of the convex hull points. When the 7841 * output array is std::vector, the flag is ignored, and the output depends on the type of the 7842 * vector: std::vector<int> implies returnPoints=false, std::vector<Point> implies 7843 * returnPoints=true. 7844 * 7845 * <b>Note:</b> {@code points} and {@code hull} should be different arrays, inplace processing isn't supported. 7846 * 7847 * Check REF: tutorial_hull "the corresponding tutorial" for more details. 7848 * 7849 * useful links: 7850 * 7851 * https://www.learnopencv.com/convex-hull-using-opencv-in-python-and-c/ 7852 */ 7853 public static void convexHull(MatOfPoint points, MatOfInt hull, boolean clockwise) { 7854 Mat points_mat = points; 7855 Mat hull_mat = hull; 7856 convexHull_0(points_mat.nativeObj, hull_mat.nativeObj, clockwise); 7857 } 7858 7859 /** 7860 * Finds the convex hull of a point set. 7861 * 7862 * The function cv::convexHull finds the convex hull of a 2D point set using the Sklansky's algorithm CITE: Sklansky82 7863 * that has *O(N logN)* complexity in the current implementation. 7864 * 7865 * @param points Input 2D point set, stored in std::vector or Mat. 7866 * @param hull Output convex hull. It is either an integer vector of indices or vector of points. In 7867 * the first case, the hull elements are 0-based indices of the convex hull points in the original 7868 * array (since the set of convex hull points is a subset of the original point set). In the second 7869 * case, hull elements are the convex hull points themselves. 7870 * Otherwise, it is oriented counter-clockwise. The assumed coordinate system has its X axis pointing 7871 * to the right, and its Y axis pointing upwards. 7872 * returns convex hull points. Otherwise, it returns indices of the convex hull points. When the 7873 * output array is std::vector, the flag is ignored, and the output depends on the type of the 7874 * vector: std::vector<int> implies returnPoints=false, std::vector<Point> implies 7875 * returnPoints=true. 7876 * 7877 * <b>Note:</b> {@code points} and {@code hull} should be different arrays, inplace processing isn't supported. 7878 * 7879 * Check REF: tutorial_hull "the corresponding tutorial" for more details. 7880 * 7881 * useful links: 7882 * 7883 * https://www.learnopencv.com/convex-hull-using-opencv-in-python-and-c/ 7884 */ 7885 public static void convexHull(MatOfPoint points, MatOfInt hull) { 7886 Mat points_mat = points; 7887 Mat hull_mat = hull; 7888 convexHull_2(points_mat.nativeObj, hull_mat.nativeObj); 7889 } 7890 7891 7892 // 7893 // C++: void cv::convexityDefects(vector_Point contour, vector_int convexhull, vector_Vec4i& convexityDefects) 7894 // 7895 7896 /** 7897 * Finds the convexity defects of a contour. 7898 * 7899 * The figure below displays convexity defects of a hand contour: 7900 * 7901 * ![image](pics/defects.png) 7902 * 7903 * @param contour Input contour. 7904 * @param convexhull Convex hull obtained using convexHull that should contain indices of the contour 7905 * points that make the hull. 7906 * @param convexityDefects The output vector of convexity defects. In C++ and the new Python/Java 7907 * interface each convexity defect is represented as 4-element integer vector (a.k.a. #Vec4i): 7908 * (start_index, end_index, farthest_pt_index, fixpt_depth), where indices are 0-based indices 7909 * in the original contour of the convexity defect beginning, end and the farthest point, and 7910 * fixpt_depth is fixed-point approximation (with 8 fractional bits) of the distance between the 7911 * farthest contour point and the hull. That is, to get the floating-point value of the depth will be 7912 * fixpt_depth/256.0. 7913 */ 7914 public static void convexityDefects(MatOfPoint contour, MatOfInt convexhull, MatOfInt4 convexityDefects) { 7915 Mat contour_mat = contour; 7916 Mat convexhull_mat = convexhull; 7917 Mat convexityDefects_mat = convexityDefects; 7918 convexityDefects_0(contour_mat.nativeObj, convexhull_mat.nativeObj, convexityDefects_mat.nativeObj); 7919 } 7920 7921 7922 // 7923 // C++: bool cv::isContourConvex(vector_Point contour) 7924 // 7925 7926 /** 7927 * Tests a contour convexity. 7928 * 7929 * The function tests whether the input contour is convex or not. The contour must be simple, that is, 7930 * without self-intersections. Otherwise, the function output is undefined. 7931 * 7932 * @param contour Input vector of 2D points, stored in std::vector<> or Mat 7933 * @return automatically generated 7934 */ 7935 public static boolean isContourConvex(MatOfPoint contour) { 7936 Mat contour_mat = contour; 7937 return isContourConvex_0(contour_mat.nativeObj); 7938 } 7939 7940 7941 // 7942 // C++: float cv::intersectConvexConvex(Mat p1, Mat p2, Mat& p12, bool handleNested = true) 7943 // 7944 7945 /** 7946 * Finds intersection of two convex polygons 7947 * 7948 * @param p1 First polygon 7949 * @param p2 Second polygon 7950 * @param p12 Output polygon describing the intersecting area 7951 * @param handleNested When true, an intersection is found if one of the polygons is fully enclosed in the other. 7952 * When false, no intersection is found. If the polygons share a side or the vertex of one polygon lies on an edge 7953 * of the other, they are not considered nested and an intersection will be found regardless of the value of handleNested. 7954 * 7955 * @return Absolute value of area of intersecting polygon 7956 * 7957 * <b>Note:</b> intersectConvexConvex doesn't confirm that both polygons are convex and will return invalid results if they aren't. 7958 */ 7959 public static float intersectConvexConvex(Mat p1, Mat p2, Mat p12, boolean handleNested) { 7960 return intersectConvexConvex_0(p1.nativeObj, p2.nativeObj, p12.nativeObj, handleNested); 7961 } 7962 7963 /** 7964 * Finds intersection of two convex polygons 7965 * 7966 * @param p1 First polygon 7967 * @param p2 Second polygon 7968 * @param p12 Output polygon describing the intersecting area 7969 * When false, no intersection is found. If the polygons share a side or the vertex of one polygon lies on an edge 7970 * of the other, they are not considered nested and an intersection will be found regardless of the value of handleNested. 7971 * 7972 * @return Absolute value of area of intersecting polygon 7973 * 7974 * <b>Note:</b> intersectConvexConvex doesn't confirm that both polygons are convex and will return invalid results if they aren't. 7975 */ 7976 public static float intersectConvexConvex(Mat p1, Mat p2, Mat p12) { 7977 return intersectConvexConvex_1(p1.nativeObj, p2.nativeObj, p12.nativeObj); 7978 } 7979 7980 7981 // 7982 // C++: RotatedRect cv::fitEllipse(vector_Point2f points) 7983 // 7984 7985 /** 7986 * Fits an ellipse around a set of 2D points. 7987 * 7988 * The function calculates the ellipse that fits (in a least-squares sense) a set of 2D points best of 7989 * all. It returns the rotated rectangle in which the ellipse is inscribed. The first algorithm described by CITE: Fitzgibbon95 7990 * is used. Developer should keep in mind that it is possible that the returned 7991 * ellipse/rotatedRect data contains negative indices, due to the data points being close to the 7992 * border of the containing Mat element. 7993 * 7994 * @param points Input 2D point set, stored in std::vector<> or Mat 7995 * @return automatically generated 7996 */ 7997 public static RotatedRect fitEllipse(MatOfPoint2f points) { 7998 Mat points_mat = points; 7999 return new RotatedRect(fitEllipse_0(points_mat.nativeObj)); 8000 } 8001 8002 8003 // 8004 // C++: RotatedRect cv::fitEllipseAMS(Mat points) 8005 // 8006 8007 /** 8008 * Fits an ellipse around a set of 2D points. 8009 * 8010 * The function calculates the ellipse that fits a set of 2D points. 8011 * It returns the rotated rectangle in which the ellipse is inscribed. 8012 * The Approximate Mean Square (AMS) proposed by CITE: Taubin1991 is used. 8013 * 8014 * For an ellipse, this basis set is \( \chi= \left(x^2, x y, y^2, x, y, 1\right) \), 8015 * which is a set of six free coefficients \( A^T=\left\{A_{\text{xx}},A_{\text{xy}},A_{\text{yy}},A_x,A_y,A_0\right\} \). 8016 * However, to specify an ellipse, all that is needed is five numbers; the major and minor axes lengths \( (a,b) \), 8017 * the position \( (x_0,y_0) \), and the orientation \( \theta \). This is because the basis set includes lines, 8018 * quadratics, parabolic and hyperbolic functions as well as elliptical functions as possible fits. 8019 * If the fit is found to be a parabolic or hyperbolic function then the standard #fitEllipse method is used. 8020 * The AMS method restricts the fit to parabolic, hyperbolic and elliptical curves 8021 * by imposing the condition that \( A^T ( D_x^T D_x + D_y^T D_y) A = 1 \) where 8022 * the matrices \( Dx \) and \( Dy \) are the partial derivatives of the design matrix \( D \) with 8023 * respect to x and y. The matrices are formed row by row applying the following to 8024 * each of the points in the set: 8025 * \(align*}{ 8026 * D(i,:)&=\left\{x_i^2, x_i y_i, y_i^2, x_i, y_i, 1\right\} & 8027 * D_x(i,:)&=\left\{2 x_i,y_i,0,1,0,0\right\} & 8028 * D_y(i,:)&=\left\{0,x_i,2 y_i,0,1,0\right\} 8029 * \) 8030 * The AMS method minimizes the cost function 8031 * \(equation*}{ 8032 * \epsilon ^2=\frac{ A^T D^T D A }{ A^T (D_x^T D_x + D_y^T D_y) A^T } 8033 * \) 8034 * 8035 * The minimum cost is found by solving the generalized eigenvalue problem. 8036 * 8037 * \(equation*}{ 8038 * D^T D A = \lambda \left( D_x^T D_x + D_y^T D_y\right) A 8039 * \) 8040 * 8041 * @param points Input 2D point set, stored in std::vector<> or Mat 8042 * @return automatically generated 8043 */ 8044 public static RotatedRect fitEllipseAMS(Mat points) { 8045 return new RotatedRect(fitEllipseAMS_0(points.nativeObj)); 8046 } 8047 8048 8049 // 8050 // C++: RotatedRect cv::fitEllipseDirect(Mat points) 8051 // 8052 8053 /** 8054 * Fits an ellipse around a set of 2D points. 8055 * 8056 * The function calculates the ellipse that fits a set of 2D points. 8057 * It returns the rotated rectangle in which the ellipse is inscribed. 8058 * The Direct least square (Direct) method by CITE: Fitzgibbon1999 is used. 8059 * 8060 * For an ellipse, this basis set is \( \chi= \left(x^2, x y, y^2, x, y, 1\right) \), 8061 * which is a set of six free coefficients \( A^T=\left\{A_{\text{xx}},A_{\text{xy}},A_{\text{yy}},A_x,A_y,A_0\right\} \). 8062 * However, to specify an ellipse, all that is needed is five numbers; the major and minor axes lengths \( (a,b) \), 8063 * the position \( (x_0,y_0) \), and the orientation \( \theta \). This is because the basis set includes lines, 8064 * quadratics, parabolic and hyperbolic functions as well as elliptical functions as possible fits. 8065 * The Direct method confines the fit to ellipses by ensuring that \( 4 A_{xx} A_{yy}- A_{xy}^2 > 0 \). 8066 * The condition imposed is that \( 4 A_{xx} A_{yy}- A_{xy}^2=1 \) which satisfies the inequality 8067 * and as the coefficients can be arbitrarily scaled is not overly restrictive. 8068 * 8069 * \(equation*}{ 8070 * \epsilon ^2= A^T D^T D A \quad \text{with} \quad A^T C A =1 \quad \text{and} \quad C=\left(\begin{matrix} 8071 * 0 & 0 & 2 & 0 & 0 & 0 \\ 8072 * 0 & -1 & 0 & 0 & 0 & 0 \\ 8073 * 2 & 0 & 0 & 0 & 0 & 0 \\ 8074 * 0 & 0 & 0 & 0 & 0 & 0 \\ 8075 * 0 & 0 & 0 & 0 & 0 & 0 \\ 8076 * 0 & 0 & 0 & 0 & 0 & 0 8077 * \end{matrix} \right) 8078 * \) 8079 * 8080 * The minimum cost is found by solving the generalized eigenvalue problem. 8081 * 8082 * \(equation*}{ 8083 * D^T D A = \lambda \left( C\right) A 8084 * \) 8085 * 8086 * The system produces only one positive eigenvalue \( \lambda\) which is chosen as the solution 8087 * with its eigenvector \(\mathbf{u}\). These are used to find the coefficients 8088 * 8089 * \(equation*}{ 8090 * A = \sqrt{\frac{1}{\mathbf{u}^T C \mathbf{u}}} \mathbf{u} 8091 * \) 8092 * The scaling factor guarantees that \(A^T C A =1\). 8093 * 8094 * @param points Input 2D point set, stored in std::vector<> or Mat 8095 * @return automatically generated 8096 */ 8097 public static RotatedRect fitEllipseDirect(Mat points) { 8098 return new RotatedRect(fitEllipseDirect_0(points.nativeObj)); 8099 } 8100 8101 8102 // 8103 // C++: void cv::fitLine(Mat points, Mat& line, int distType, double param, double reps, double aeps) 8104 // 8105 8106 /** 8107 * Fits a line to a 2D or 3D point set. 8108 * 8109 * The function fitLine fits a line to a 2D or 3D point set by minimizing \(\sum_i \rho(r_i)\) where 8110 * \(r_i\) is a distance between the \(i^{th}\) point, the line and \(\rho(r)\) is a distance function, one 8111 * of the following: 8112 * <ul> 8113 * <li> 8114 * DIST_L2 8115 * \(\rho (r) = r^2/2 \quad \text{(the simplest and the fastest least-squares method)}\) 8116 * </li> 8117 * <li> 8118 * DIST_L1 8119 * \(\rho (r) = r\) 8120 * </li> 8121 * <li> 8122 * DIST_L12 8123 * \(\rho (r) = 2 \cdot ( \sqrt{1 + \frac{r^2}{2}} - 1)\) 8124 * </li> 8125 * <li> 8126 * DIST_FAIR 8127 * \(\rho \left (r \right ) = C^2 \cdot \left ( \frac{r}{C} - \log{\left(1 + \frac{r}{C}\right)} \right ) \quad \text{where} \quad C=1.3998\) 8128 * </li> 8129 * <li> 8130 * DIST_WELSCH 8131 * \(\rho \left (r \right ) = \frac{C^2}{2} \cdot \left ( 1 - \exp{\left(-\left(\frac{r}{C}\right)^2\right)} \right ) \quad \text{where} \quad C=2.9846\) 8132 * </li> 8133 * <li> 8134 * DIST_HUBER 8135 * \(\rho (r) = \fork{r^2/2}{if \(r < C\)}{C \cdot (r-C/2)}{otherwise} \quad \text{where} \quad C=1.345\) 8136 * </li> 8137 * </ul> 8138 * 8139 * The algorithm is based on the M-estimator ( <http://en.wikipedia.org/wiki/M-estimator> ) technique 8140 * that iteratively fits the line using the weighted least-squares algorithm. After each iteration the 8141 * weights \(w_i\) are adjusted to be inversely proportional to \(\rho(r_i)\) . 8142 * 8143 * @param points Input vector of 2D or 3D points, stored in std::vector<> or Mat. 8144 * @param line Output line parameters. In case of 2D fitting, it should be a vector of 4 elements 8145 * (like Vec4f) - (vx, vy, x0, y0), where (vx, vy) is a normalized vector collinear to the line and 8146 * (x0, y0) is a point on the line. In case of 3D fitting, it should be a vector of 6 elements (like 8147 * Vec6f) - (vx, vy, vz, x0, y0, z0), where (vx, vy, vz) is a normalized vector collinear to the line 8148 * and (x0, y0, z0) is a point on the line. 8149 * @param distType Distance used by the M-estimator, see #DistanceTypes 8150 * @param param Numerical parameter ( C ) for some types of distances. If it is 0, an optimal value 8151 * is chosen. 8152 * @param reps Sufficient accuracy for the radius (distance between the coordinate origin and the line). 8153 * @param aeps Sufficient accuracy for the angle. 0.01 would be a good default value for reps and aeps. 8154 */ 8155 public static void fitLine(Mat points, Mat line, int distType, double param, double reps, double aeps) { 8156 fitLine_0(points.nativeObj, line.nativeObj, distType, param, reps, aeps); 8157 } 8158 8159 8160 // 8161 // C++: double cv::pointPolygonTest(vector_Point2f contour, Point2f pt, bool measureDist) 8162 // 8163 8164 /** 8165 * Performs a point-in-contour test. 8166 * 8167 * The function determines whether the point is inside a contour, outside, or lies on an edge (or 8168 * coincides with a vertex). It returns positive (inside), negative (outside), or zero (on an edge) 8169 * value, correspondingly. When measureDist=false , the return value is +1, -1, and 0, respectively. 8170 * Otherwise, the return value is a signed distance between the point and the nearest contour edge. 8171 * 8172 * See below a sample output of the function where each image pixel is tested against the contour: 8173 * 8174 * ![sample output](pics/pointpolygon.png) 8175 * 8176 * @param contour Input contour. 8177 * @param pt Point tested against the contour. 8178 * @param measureDist If true, the function estimates the signed distance from the point to the 8179 * nearest contour edge. Otherwise, the function only checks if the point is inside a contour or not. 8180 * @return automatically generated 8181 */ 8182 public static double pointPolygonTest(MatOfPoint2f contour, Point pt, boolean measureDist) { 8183 Mat contour_mat = contour; 8184 return pointPolygonTest_0(contour_mat.nativeObj, pt.x, pt.y, measureDist); 8185 } 8186 8187 8188 // 8189 // C++: int cv::rotatedRectangleIntersection(RotatedRect rect1, RotatedRect rect2, Mat& intersectingRegion) 8190 // 8191 8192 /** 8193 * Finds out if there is any intersection between two rotated rectangles. 8194 * 8195 * If there is then the vertices of the intersecting region are returned as well. 8196 * 8197 * Below are some examples of intersection configurations. The hatched pattern indicates the 8198 * intersecting region and the red vertices are returned by the function. 8199 * 8200 * ![intersection examples](pics/intersection.png) 8201 * 8202 * @param rect1 First rectangle 8203 * @param rect2 Second rectangle 8204 * @param intersectingRegion The output array of the vertices of the intersecting region. It returns 8205 * at most 8 vertices. Stored as std::vector<cv::Point2f> or cv::Mat as Mx1 of type CV_32FC2. 8206 * @return One of #RectanglesIntersectTypes 8207 */ 8208 public static int rotatedRectangleIntersection(RotatedRect rect1, RotatedRect rect2, Mat intersectingRegion) { 8209 return rotatedRectangleIntersection_0(rect1.center.x, rect1.center.y, rect1.size.width, rect1.size.height, rect1.angle, rect2.center.x, rect2.center.y, rect2.size.width, rect2.size.height, rect2.angle, intersectingRegion.nativeObj); 8210 } 8211 8212 8213 // 8214 // C++: Ptr_GeneralizedHoughBallard cv::createGeneralizedHoughBallard() 8215 // 8216 8217 /** 8218 * Creates a smart pointer to a cv::GeneralizedHoughBallard class and initializes it. 8219 * @return automatically generated 8220 */ 8221 public static GeneralizedHoughBallard createGeneralizedHoughBallard() { 8222 return GeneralizedHoughBallard.__fromPtr__(createGeneralizedHoughBallard_0()); 8223 } 8224 8225 8226 // 8227 // C++: Ptr_GeneralizedHoughGuil cv::createGeneralizedHoughGuil() 8228 // 8229 8230 /** 8231 * Creates a smart pointer to a cv::GeneralizedHoughGuil class and initializes it. 8232 * @return automatically generated 8233 */ 8234 public static GeneralizedHoughGuil createGeneralizedHoughGuil() { 8235 return GeneralizedHoughGuil.__fromPtr__(createGeneralizedHoughGuil_0()); 8236 } 8237 8238 8239 // 8240 // C++: void cv::applyColorMap(Mat src, Mat& dst, int colormap) 8241 // 8242 8243 /** 8244 * Applies a GNU Octave/MATLAB equivalent colormap on a given image. 8245 * 8246 * @param src The source image, grayscale or colored of type CV_8UC1 or CV_8UC3. 8247 * @param dst The result is the colormapped source image. Note: Mat::create is called on dst. 8248 * @param colormap The colormap to apply, see #ColormapTypes 8249 */ 8250 public static void applyColorMap(Mat src, Mat dst, int colormap) { 8251 applyColorMap_0(src.nativeObj, dst.nativeObj, colormap); 8252 } 8253 8254 8255 // 8256 // C++: void cv::applyColorMap(Mat src, Mat& dst, Mat userColor) 8257 // 8258 8259 /** 8260 * Applies a user colormap on a given image. 8261 * 8262 * @param src The source image, grayscale or colored of type CV_8UC1 or CV_8UC3. 8263 * @param dst The result is the colormapped source image. Note: Mat::create is called on dst. 8264 * @param userColor The colormap to apply of type CV_8UC1 or CV_8UC3 and size 256 8265 */ 8266 public static void applyColorMap(Mat src, Mat dst, Mat userColor) { 8267 applyColorMap_1(src.nativeObj, dst.nativeObj, userColor.nativeObj); 8268 } 8269 8270 8271 // 8272 // C++: void cv::line(Mat& img, Point pt1, Point pt2, Scalar color, int thickness = 1, int lineType = LINE_8, int shift = 0) 8273 // 8274 8275 /** 8276 * Draws a line segment connecting two points. 8277 * 8278 * The function line draws the line segment between pt1 and pt2 points in the image. The line is 8279 * clipped by the image boundaries. For non-antialiased lines with integer coordinates, the 8-connected 8280 * or 4-connected Bresenham algorithm is used. Thick lines are drawn with rounding endings. Antialiased 8281 * lines are drawn using Gaussian filtering. 8282 * 8283 * @param img Image. 8284 * @param pt1 First point of the line segment. 8285 * @param pt2 Second point of the line segment. 8286 * @param color Line color. 8287 * @param thickness Line thickness. 8288 * @param lineType Type of the line. See #LineTypes. 8289 * @param shift Number of fractional bits in the point coordinates. 8290 */ 8291 public static void line(Mat img, Point pt1, Point pt2, Scalar color, int thickness, int lineType, int shift) { 8292 line_0(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType, shift); 8293 } 8294 8295 /** 8296 * Draws a line segment connecting two points. 8297 * 8298 * The function line draws the line segment between pt1 and pt2 points in the image. The line is 8299 * clipped by the image boundaries. For non-antialiased lines with integer coordinates, the 8-connected 8300 * or 4-connected Bresenham algorithm is used. Thick lines are drawn with rounding endings. Antialiased 8301 * lines are drawn using Gaussian filtering. 8302 * 8303 * @param img Image. 8304 * @param pt1 First point of the line segment. 8305 * @param pt2 Second point of the line segment. 8306 * @param color Line color. 8307 * @param thickness Line thickness. 8308 * @param lineType Type of the line. See #LineTypes. 8309 */ 8310 public static void line(Mat img, Point pt1, Point pt2, Scalar color, int thickness, int lineType) { 8311 line_1(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType); 8312 } 8313 8314 /** 8315 * Draws a line segment connecting two points. 8316 * 8317 * The function line draws the line segment between pt1 and pt2 points in the image. The line is 8318 * clipped by the image boundaries. For non-antialiased lines with integer coordinates, the 8-connected 8319 * or 4-connected Bresenham algorithm is used. Thick lines are drawn with rounding endings. Antialiased 8320 * lines are drawn using Gaussian filtering. 8321 * 8322 * @param img Image. 8323 * @param pt1 First point of the line segment. 8324 * @param pt2 Second point of the line segment. 8325 * @param color Line color. 8326 * @param thickness Line thickness. 8327 */ 8328 public static void line(Mat img, Point pt1, Point pt2, Scalar color, int thickness) { 8329 line_2(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3], thickness); 8330 } 8331 8332 /** 8333 * Draws a line segment connecting two points. 8334 * 8335 * The function line draws the line segment between pt1 and pt2 points in the image. The line is 8336 * clipped by the image boundaries. For non-antialiased lines with integer coordinates, the 8-connected 8337 * or 4-connected Bresenham algorithm is used. Thick lines are drawn with rounding endings. Antialiased 8338 * lines are drawn using Gaussian filtering. 8339 * 8340 * @param img Image. 8341 * @param pt1 First point of the line segment. 8342 * @param pt2 Second point of the line segment. 8343 * @param color Line color. 8344 */ 8345 public static void line(Mat img, Point pt1, Point pt2, Scalar color) { 8346 line_3(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3]); 8347 } 8348 8349 8350 // 8351 // C++: void cv::arrowedLine(Mat& img, Point pt1, Point pt2, Scalar color, int thickness = 1, int line_type = 8, int shift = 0, double tipLength = 0.1) 8352 // 8353 8354 /** 8355 * Draws an arrow segment pointing from the first point to the second one. 8356 * 8357 * The function cv::arrowedLine draws an arrow between pt1 and pt2 points in the image. See also #line. 8358 * 8359 * @param img Image. 8360 * @param pt1 The point the arrow starts from. 8361 * @param pt2 The point the arrow points to. 8362 * @param color Line color. 8363 * @param thickness Line thickness. 8364 * @param line_type Type of the line. See #LineTypes 8365 * @param shift Number of fractional bits in the point coordinates. 8366 * @param tipLength The length of the arrow tip in relation to the arrow length 8367 */ 8368 public static void arrowedLine(Mat img, Point pt1, Point pt2, Scalar color, int thickness, int line_type, int shift, double tipLength) { 8369 arrowedLine_0(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3], thickness, line_type, shift, tipLength); 8370 } 8371 8372 /** 8373 * Draws an arrow segment pointing from the first point to the second one. 8374 * 8375 * The function cv::arrowedLine draws an arrow between pt1 and pt2 points in the image. See also #line. 8376 * 8377 * @param img Image. 8378 * @param pt1 The point the arrow starts from. 8379 * @param pt2 The point the arrow points to. 8380 * @param color Line color. 8381 * @param thickness Line thickness. 8382 * @param line_type Type of the line. See #LineTypes 8383 * @param shift Number of fractional bits in the point coordinates. 8384 */ 8385 public static void arrowedLine(Mat img, Point pt1, Point pt2, Scalar color, int thickness, int line_type, int shift) { 8386 arrowedLine_1(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3], thickness, line_type, shift); 8387 } 8388 8389 /** 8390 * Draws an arrow segment pointing from the first point to the second one. 8391 * 8392 * The function cv::arrowedLine draws an arrow between pt1 and pt2 points in the image. See also #line. 8393 * 8394 * @param img Image. 8395 * @param pt1 The point the arrow starts from. 8396 * @param pt2 The point the arrow points to. 8397 * @param color Line color. 8398 * @param thickness Line thickness. 8399 * @param line_type Type of the line. See #LineTypes 8400 */ 8401 public static void arrowedLine(Mat img, Point pt1, Point pt2, Scalar color, int thickness, int line_type) { 8402 arrowedLine_2(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3], thickness, line_type); 8403 } 8404 8405 /** 8406 * Draws an arrow segment pointing from the first point to the second one. 8407 * 8408 * The function cv::arrowedLine draws an arrow between pt1 and pt2 points in the image. See also #line. 8409 * 8410 * @param img Image. 8411 * @param pt1 The point the arrow starts from. 8412 * @param pt2 The point the arrow points to. 8413 * @param color Line color. 8414 * @param thickness Line thickness. 8415 */ 8416 public static void arrowedLine(Mat img, Point pt1, Point pt2, Scalar color, int thickness) { 8417 arrowedLine_3(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3], thickness); 8418 } 8419 8420 /** 8421 * Draws an arrow segment pointing from the first point to the second one. 8422 * 8423 * The function cv::arrowedLine draws an arrow between pt1 and pt2 points in the image. See also #line. 8424 * 8425 * @param img Image. 8426 * @param pt1 The point the arrow starts from. 8427 * @param pt2 The point the arrow points to. 8428 * @param color Line color. 8429 */ 8430 public static void arrowedLine(Mat img, Point pt1, Point pt2, Scalar color) { 8431 arrowedLine_4(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3]); 8432 } 8433 8434 8435 // 8436 // C++: void cv::rectangle(Mat& img, Point pt1, Point pt2, Scalar color, int thickness = 1, int lineType = LINE_8, int shift = 0) 8437 // 8438 8439 /** 8440 * Draws a simple, thick, or filled up-right rectangle. 8441 * 8442 * The function cv::rectangle draws a rectangle outline or a filled rectangle whose two opposite corners 8443 * are pt1 and pt2. 8444 * 8445 * @param img Image. 8446 * @param pt1 Vertex of the rectangle. 8447 * @param pt2 Vertex of the rectangle opposite to pt1 . 8448 * @param color Rectangle color or brightness (grayscale image). 8449 * @param thickness Thickness of lines that make up the rectangle. Negative values, like #FILLED, 8450 * mean that the function has to draw a filled rectangle. 8451 * @param lineType Type of the line. See #LineTypes 8452 * @param shift Number of fractional bits in the point coordinates. 8453 */ 8454 public static void rectangle(Mat img, Point pt1, Point pt2, Scalar color, int thickness, int lineType, int shift) { 8455 rectangle_0(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType, shift); 8456 } 8457 8458 /** 8459 * Draws a simple, thick, or filled up-right rectangle. 8460 * 8461 * The function cv::rectangle draws a rectangle outline or a filled rectangle whose two opposite corners 8462 * are pt1 and pt2. 8463 * 8464 * @param img Image. 8465 * @param pt1 Vertex of the rectangle. 8466 * @param pt2 Vertex of the rectangle opposite to pt1 . 8467 * @param color Rectangle color or brightness (grayscale image). 8468 * @param thickness Thickness of lines that make up the rectangle. Negative values, like #FILLED, 8469 * mean that the function has to draw a filled rectangle. 8470 * @param lineType Type of the line. See #LineTypes 8471 */ 8472 public static void rectangle(Mat img, Point pt1, Point pt2, Scalar color, int thickness, int lineType) { 8473 rectangle_1(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType); 8474 } 8475 8476 /** 8477 * Draws a simple, thick, or filled up-right rectangle. 8478 * 8479 * The function cv::rectangle draws a rectangle outline or a filled rectangle whose two opposite corners 8480 * are pt1 and pt2. 8481 * 8482 * @param img Image. 8483 * @param pt1 Vertex of the rectangle. 8484 * @param pt2 Vertex of the rectangle opposite to pt1 . 8485 * @param color Rectangle color or brightness (grayscale image). 8486 * @param thickness Thickness of lines that make up the rectangle. Negative values, like #FILLED, 8487 * mean that the function has to draw a filled rectangle. 8488 */ 8489 public static void rectangle(Mat img, Point pt1, Point pt2, Scalar color, int thickness) { 8490 rectangle_2(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3], thickness); 8491 } 8492 8493 /** 8494 * Draws a simple, thick, or filled up-right rectangle. 8495 * 8496 * The function cv::rectangle draws a rectangle outline or a filled rectangle whose two opposite corners 8497 * are pt1 and pt2. 8498 * 8499 * @param img Image. 8500 * @param pt1 Vertex of the rectangle. 8501 * @param pt2 Vertex of the rectangle opposite to pt1 . 8502 * @param color Rectangle color or brightness (grayscale image). 8503 * mean that the function has to draw a filled rectangle. 8504 */ 8505 public static void rectangle(Mat img, Point pt1, Point pt2, Scalar color) { 8506 rectangle_3(img.nativeObj, pt1.x, pt1.y, pt2.x, pt2.y, color.val[0], color.val[1], color.val[2], color.val[3]); 8507 } 8508 8509 8510 // 8511 // C++: void cv::rectangle(Mat& img, Rect rec, Scalar color, int thickness = 1, int lineType = LINE_8, int shift = 0) 8512 // 8513 8514 /** 8515 * 8516 * 8517 * use {@code rec} parameter as alternative specification of the drawn rectangle: `r.tl() and 8518 * r.br()-Point(1,1)` are opposite corners 8519 * @param img automatically generated 8520 * @param rec automatically generated 8521 * @param color automatically generated 8522 * @param thickness automatically generated 8523 * @param lineType automatically generated 8524 * @param shift automatically generated 8525 */ 8526 public static void rectangle(Mat img, Rect rec, Scalar color, int thickness, int lineType, int shift) { 8527 rectangle_4(img.nativeObj, rec.x, rec.y, rec.width, rec.height, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType, shift); 8528 } 8529 8530 /** 8531 * 8532 * 8533 * use {@code rec} parameter as alternative specification of the drawn rectangle: `r.tl() and 8534 * r.br()-Point(1,1)` are opposite corners 8535 * @param img automatically generated 8536 * @param rec automatically generated 8537 * @param color automatically generated 8538 * @param thickness automatically generated 8539 * @param lineType automatically generated 8540 */ 8541 public static void rectangle(Mat img, Rect rec, Scalar color, int thickness, int lineType) { 8542 rectangle_5(img.nativeObj, rec.x, rec.y, rec.width, rec.height, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType); 8543 } 8544 8545 /** 8546 * 8547 * 8548 * use {@code rec} parameter as alternative specification of the drawn rectangle: `r.tl() and 8549 * r.br()-Point(1,1)` are opposite corners 8550 * @param img automatically generated 8551 * @param rec automatically generated 8552 * @param color automatically generated 8553 * @param thickness automatically generated 8554 */ 8555 public static void rectangle(Mat img, Rect rec, Scalar color, int thickness) { 8556 rectangle_6(img.nativeObj, rec.x, rec.y, rec.width, rec.height, color.val[0], color.val[1], color.val[2], color.val[3], thickness); 8557 } 8558 8559 /** 8560 * 8561 * 8562 * use {@code rec} parameter as alternative specification of the drawn rectangle: `r.tl() and 8563 * r.br()-Point(1,1)` are opposite corners 8564 * @param img automatically generated 8565 * @param rec automatically generated 8566 * @param color automatically generated 8567 */ 8568 public static void rectangle(Mat img, Rect rec, Scalar color) { 8569 rectangle_7(img.nativeObj, rec.x, rec.y, rec.width, rec.height, color.val[0], color.val[1], color.val[2], color.val[3]); 8570 } 8571 8572 8573 // 8574 // C++: void cv::circle(Mat& img, Point center, int radius, Scalar color, int thickness = 1, int lineType = LINE_8, int shift = 0) 8575 // 8576 8577 /** 8578 * Draws a circle. 8579 * 8580 * The function cv::circle draws a simple or filled circle with a given center and radius. 8581 * @param img Image where the circle is drawn. 8582 * @param center Center of the circle. 8583 * @param radius Radius of the circle. 8584 * @param color Circle color. 8585 * @param thickness Thickness of the circle outline, if positive. Negative values, like #FILLED, 8586 * mean that a filled circle is to be drawn. 8587 * @param lineType Type of the circle boundary. See #LineTypes 8588 * @param shift Number of fractional bits in the coordinates of the center and in the radius value. 8589 */ 8590 public static void circle(Mat img, Point center, int radius, Scalar color, int thickness, int lineType, int shift) { 8591 circle_0(img.nativeObj, center.x, center.y, radius, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType, shift); 8592 } 8593 8594 /** 8595 * Draws a circle. 8596 * 8597 * The function cv::circle draws a simple or filled circle with a given center and radius. 8598 * @param img Image where the circle is drawn. 8599 * @param center Center of the circle. 8600 * @param radius Radius of the circle. 8601 * @param color Circle color. 8602 * @param thickness Thickness of the circle outline, if positive. Negative values, like #FILLED, 8603 * mean that a filled circle is to be drawn. 8604 * @param lineType Type of the circle boundary. See #LineTypes 8605 */ 8606 public static void circle(Mat img, Point center, int radius, Scalar color, int thickness, int lineType) { 8607 circle_1(img.nativeObj, center.x, center.y, radius, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType); 8608 } 8609 8610 /** 8611 * Draws a circle. 8612 * 8613 * The function cv::circle draws a simple or filled circle with a given center and radius. 8614 * @param img Image where the circle is drawn. 8615 * @param center Center of the circle. 8616 * @param radius Radius of the circle. 8617 * @param color Circle color. 8618 * @param thickness Thickness of the circle outline, if positive. Negative values, like #FILLED, 8619 * mean that a filled circle is to be drawn. 8620 */ 8621 public static void circle(Mat img, Point center, int radius, Scalar color, int thickness) { 8622 circle_2(img.nativeObj, center.x, center.y, radius, color.val[0], color.val[1], color.val[2], color.val[3], thickness); 8623 } 8624 8625 /** 8626 * Draws a circle. 8627 * 8628 * The function cv::circle draws a simple or filled circle with a given center and radius. 8629 * @param img Image where the circle is drawn. 8630 * @param center Center of the circle. 8631 * @param radius Radius of the circle. 8632 * @param color Circle color. 8633 * mean that a filled circle is to be drawn. 8634 */ 8635 public static void circle(Mat img, Point center, int radius, Scalar color) { 8636 circle_3(img.nativeObj, center.x, center.y, radius, color.val[0], color.val[1], color.val[2], color.val[3]); 8637 } 8638 8639 8640 // 8641 // C++: void cv::ellipse(Mat& img, Point center, Size axes, double angle, double startAngle, double endAngle, Scalar color, int thickness = 1, int lineType = LINE_8, int shift = 0) 8642 // 8643 8644 /** 8645 * Draws a simple or thick elliptic arc or fills an ellipse sector. 8646 * 8647 * The function cv::ellipse with more parameters draws an ellipse outline, a filled ellipse, an elliptic 8648 * arc, or a filled ellipse sector. The drawing code uses general parametric form. 8649 * A piecewise-linear curve is used to approximate the elliptic arc 8650 * boundary. If you need more control of the ellipse rendering, you can retrieve the curve using 8651 * #ellipse2Poly and then render it with #polylines or fill it with #fillPoly. If you use the first 8652 * variant of the function and want to draw the whole ellipse, not an arc, pass {@code startAngle=0} and 8653 * {@code endAngle=360}. If {@code startAngle} is greater than {@code endAngle}, they are swapped. The figure below explains 8654 * the meaning of the parameters to draw the blue arc. 8655 * 8656 * ![Parameters of Elliptic Arc](pics/ellipse.svg) 8657 * 8658 * @param img Image. 8659 * @param center Center of the ellipse. 8660 * @param axes Half of the size of the ellipse main axes. 8661 * @param angle Ellipse rotation angle in degrees. 8662 * @param startAngle Starting angle of the elliptic arc in degrees. 8663 * @param endAngle Ending angle of the elliptic arc in degrees. 8664 * @param color Ellipse color. 8665 * @param thickness Thickness of the ellipse arc outline, if positive. Otherwise, this indicates that 8666 * a filled ellipse sector is to be drawn. 8667 * @param lineType Type of the ellipse boundary. See #LineTypes 8668 * @param shift Number of fractional bits in the coordinates of the center and values of axes. 8669 */ 8670 public static void ellipse(Mat img, Point center, Size axes, double angle, double startAngle, double endAngle, Scalar color, int thickness, int lineType, int shift) { 8671 ellipse_0(img.nativeObj, center.x, center.y, axes.width, axes.height, angle, startAngle, endAngle, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType, shift); 8672 } 8673 8674 /** 8675 * Draws a simple or thick elliptic arc or fills an ellipse sector. 8676 * 8677 * The function cv::ellipse with more parameters draws an ellipse outline, a filled ellipse, an elliptic 8678 * arc, or a filled ellipse sector. The drawing code uses general parametric form. 8679 * A piecewise-linear curve is used to approximate the elliptic arc 8680 * boundary. If you need more control of the ellipse rendering, you can retrieve the curve using 8681 * #ellipse2Poly and then render it with #polylines or fill it with #fillPoly. If you use the first 8682 * variant of the function and want to draw the whole ellipse, not an arc, pass {@code startAngle=0} and 8683 * {@code endAngle=360}. If {@code startAngle} is greater than {@code endAngle}, they are swapped. The figure below explains 8684 * the meaning of the parameters to draw the blue arc. 8685 * 8686 * ![Parameters of Elliptic Arc](pics/ellipse.svg) 8687 * 8688 * @param img Image. 8689 * @param center Center of the ellipse. 8690 * @param axes Half of the size of the ellipse main axes. 8691 * @param angle Ellipse rotation angle in degrees. 8692 * @param startAngle Starting angle of the elliptic arc in degrees. 8693 * @param endAngle Ending angle of the elliptic arc in degrees. 8694 * @param color Ellipse color. 8695 * @param thickness Thickness of the ellipse arc outline, if positive. Otherwise, this indicates that 8696 * a filled ellipse sector is to be drawn. 8697 * @param lineType Type of the ellipse boundary. See #LineTypes 8698 */ 8699 public static void ellipse(Mat img, Point center, Size axes, double angle, double startAngle, double endAngle, Scalar color, int thickness, int lineType) { 8700 ellipse_1(img.nativeObj, center.x, center.y, axes.width, axes.height, angle, startAngle, endAngle, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType); 8701 } 8702 8703 /** 8704 * Draws a simple or thick elliptic arc or fills an ellipse sector. 8705 * 8706 * The function cv::ellipse with more parameters draws an ellipse outline, a filled ellipse, an elliptic 8707 * arc, or a filled ellipse sector. The drawing code uses general parametric form. 8708 * A piecewise-linear curve is used to approximate the elliptic arc 8709 * boundary. If you need more control of the ellipse rendering, you can retrieve the curve using 8710 * #ellipse2Poly and then render it with #polylines or fill it with #fillPoly. If you use the first 8711 * variant of the function and want to draw the whole ellipse, not an arc, pass {@code startAngle=0} and 8712 * {@code endAngle=360}. If {@code startAngle} is greater than {@code endAngle}, they are swapped. The figure below explains 8713 * the meaning of the parameters to draw the blue arc. 8714 * 8715 * ![Parameters of Elliptic Arc](pics/ellipse.svg) 8716 * 8717 * @param img Image. 8718 * @param center Center of the ellipse. 8719 * @param axes Half of the size of the ellipse main axes. 8720 * @param angle Ellipse rotation angle in degrees. 8721 * @param startAngle Starting angle of the elliptic arc in degrees. 8722 * @param endAngle Ending angle of the elliptic arc in degrees. 8723 * @param color Ellipse color. 8724 * @param thickness Thickness of the ellipse arc outline, if positive. Otherwise, this indicates that 8725 * a filled ellipse sector is to be drawn. 8726 */ 8727 public static void ellipse(Mat img, Point center, Size axes, double angle, double startAngle, double endAngle, Scalar color, int thickness) { 8728 ellipse_2(img.nativeObj, center.x, center.y, axes.width, axes.height, angle, startAngle, endAngle, color.val[0], color.val[1], color.val[2], color.val[3], thickness); 8729 } 8730 8731 /** 8732 * Draws a simple or thick elliptic arc or fills an ellipse sector. 8733 * 8734 * The function cv::ellipse with more parameters draws an ellipse outline, a filled ellipse, an elliptic 8735 * arc, or a filled ellipse sector. The drawing code uses general parametric form. 8736 * A piecewise-linear curve is used to approximate the elliptic arc 8737 * boundary. If you need more control of the ellipse rendering, you can retrieve the curve using 8738 * #ellipse2Poly and then render it with #polylines or fill it with #fillPoly. If you use the first 8739 * variant of the function and want to draw the whole ellipse, not an arc, pass {@code startAngle=0} and 8740 * {@code endAngle=360}. If {@code startAngle} is greater than {@code endAngle}, they are swapped. The figure below explains 8741 * the meaning of the parameters to draw the blue arc. 8742 * 8743 * ![Parameters of Elliptic Arc](pics/ellipse.svg) 8744 * 8745 * @param img Image. 8746 * @param center Center of the ellipse. 8747 * @param axes Half of the size of the ellipse main axes. 8748 * @param angle Ellipse rotation angle in degrees. 8749 * @param startAngle Starting angle of the elliptic arc in degrees. 8750 * @param endAngle Ending angle of the elliptic arc in degrees. 8751 * @param color Ellipse color. 8752 * a filled ellipse sector is to be drawn. 8753 */ 8754 public static void ellipse(Mat img, Point center, Size axes, double angle, double startAngle, double endAngle, Scalar color) { 8755 ellipse_3(img.nativeObj, center.x, center.y, axes.width, axes.height, angle, startAngle, endAngle, color.val[0], color.val[1], color.val[2], color.val[3]); 8756 } 8757 8758 8759 // 8760 // C++: void cv::ellipse(Mat& img, RotatedRect box, Scalar color, int thickness = 1, int lineType = LINE_8) 8761 // 8762 8763 /** 8764 * 8765 * @param img Image. 8766 * @param box Alternative ellipse representation via RotatedRect. This means that the function draws 8767 * an ellipse inscribed in the rotated rectangle. 8768 * @param color Ellipse color. 8769 * @param thickness Thickness of the ellipse arc outline, if positive. Otherwise, this indicates that 8770 * a filled ellipse sector is to be drawn. 8771 * @param lineType Type of the ellipse boundary. See #LineTypes 8772 */ 8773 public static void ellipse(Mat img, RotatedRect box, Scalar color, int thickness, int lineType) { 8774 ellipse_4(img.nativeObj, box.center.x, box.center.y, box.size.width, box.size.height, box.angle, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType); 8775 } 8776 8777 /** 8778 * 8779 * @param img Image. 8780 * @param box Alternative ellipse representation via RotatedRect. This means that the function draws 8781 * an ellipse inscribed in the rotated rectangle. 8782 * @param color Ellipse color. 8783 * @param thickness Thickness of the ellipse arc outline, if positive. Otherwise, this indicates that 8784 * a filled ellipse sector is to be drawn. 8785 */ 8786 public static void ellipse(Mat img, RotatedRect box, Scalar color, int thickness) { 8787 ellipse_5(img.nativeObj, box.center.x, box.center.y, box.size.width, box.size.height, box.angle, color.val[0], color.val[1], color.val[2], color.val[3], thickness); 8788 } 8789 8790 /** 8791 * 8792 * @param img Image. 8793 * @param box Alternative ellipse representation via RotatedRect. This means that the function draws 8794 * an ellipse inscribed in the rotated rectangle. 8795 * @param color Ellipse color. 8796 * a filled ellipse sector is to be drawn. 8797 */ 8798 public static void ellipse(Mat img, RotatedRect box, Scalar color) { 8799 ellipse_6(img.nativeObj, box.center.x, box.center.y, box.size.width, box.size.height, box.angle, color.val[0], color.val[1], color.val[2], color.val[3]); 8800 } 8801 8802 8803 // 8804 // C++: void cv::drawMarker(Mat& img, Point position, Scalar color, int markerType = MARKER_CROSS, int markerSize = 20, int thickness = 1, int line_type = 8) 8805 // 8806 8807 /** 8808 * Draws a marker on a predefined position in an image. 8809 * 8810 * The function cv::drawMarker draws a marker on a given position in the image. For the moment several 8811 * marker types are supported, see #MarkerTypes for more information. 8812 * 8813 * @param img Image. 8814 * @param position The point where the crosshair is positioned. 8815 * @param color Line color. 8816 * @param markerType The specific type of marker you want to use, see #MarkerTypes 8817 * @param thickness Line thickness. 8818 * @param line_type Type of the line, See #LineTypes 8819 * @param markerSize The length of the marker axis [default = 20 pixels] 8820 */ 8821 public static void drawMarker(Mat img, Point position, Scalar color, int markerType, int markerSize, int thickness, int line_type) { 8822 drawMarker_0(img.nativeObj, position.x, position.y, color.val[0], color.val[1], color.val[2], color.val[3], markerType, markerSize, thickness, line_type); 8823 } 8824 8825 /** 8826 * Draws a marker on a predefined position in an image. 8827 * 8828 * The function cv::drawMarker draws a marker on a given position in the image. For the moment several 8829 * marker types are supported, see #MarkerTypes for more information. 8830 * 8831 * @param img Image. 8832 * @param position The point where the crosshair is positioned. 8833 * @param color Line color. 8834 * @param markerType The specific type of marker you want to use, see #MarkerTypes 8835 * @param thickness Line thickness. 8836 * @param markerSize The length of the marker axis [default = 20 pixels] 8837 */ 8838 public static void drawMarker(Mat img, Point position, Scalar color, int markerType, int markerSize, int thickness) { 8839 drawMarker_1(img.nativeObj, position.x, position.y, color.val[0], color.val[1], color.val[2], color.val[3], markerType, markerSize, thickness); 8840 } 8841 8842 /** 8843 * Draws a marker on a predefined position in an image. 8844 * 8845 * The function cv::drawMarker draws a marker on a given position in the image. For the moment several 8846 * marker types are supported, see #MarkerTypes for more information. 8847 * 8848 * @param img Image. 8849 * @param position The point where the crosshair is positioned. 8850 * @param color Line color. 8851 * @param markerType The specific type of marker you want to use, see #MarkerTypes 8852 * @param markerSize The length of the marker axis [default = 20 pixels] 8853 */ 8854 public static void drawMarker(Mat img, Point position, Scalar color, int markerType, int markerSize) { 8855 drawMarker_2(img.nativeObj, position.x, position.y, color.val[0], color.val[1], color.val[2], color.val[3], markerType, markerSize); 8856 } 8857 8858 /** 8859 * Draws a marker on a predefined position in an image. 8860 * 8861 * The function cv::drawMarker draws a marker on a given position in the image. For the moment several 8862 * marker types are supported, see #MarkerTypes for more information. 8863 * 8864 * @param img Image. 8865 * @param position The point where the crosshair is positioned. 8866 * @param color Line color. 8867 * @param markerType The specific type of marker you want to use, see #MarkerTypes 8868 */ 8869 public static void drawMarker(Mat img, Point position, Scalar color, int markerType) { 8870 drawMarker_3(img.nativeObj, position.x, position.y, color.val[0], color.val[1], color.val[2], color.val[3], markerType); 8871 } 8872 8873 /** 8874 * Draws a marker on a predefined position in an image. 8875 * 8876 * The function cv::drawMarker draws a marker on a given position in the image. For the moment several 8877 * marker types are supported, see #MarkerTypes for more information. 8878 * 8879 * @param img Image. 8880 * @param position The point where the crosshair is positioned. 8881 * @param color Line color. 8882 */ 8883 public static void drawMarker(Mat img, Point position, Scalar color) { 8884 drawMarker_4(img.nativeObj, position.x, position.y, color.val[0], color.val[1], color.val[2], color.val[3]); 8885 } 8886 8887 8888 // 8889 // C++: void cv::fillConvexPoly(Mat& img, vector_Point points, Scalar color, int lineType = LINE_8, int shift = 0) 8890 // 8891 8892 /** 8893 * Fills a convex polygon. 8894 * 8895 * The function cv::fillConvexPoly draws a filled convex polygon. This function is much faster than the 8896 * function #fillPoly . It can fill not only convex polygons but any monotonic polygon without 8897 * self-intersections, that is, a polygon whose contour intersects every horizontal line (scan line) 8898 * twice at the most (though, its top-most and/or the bottom edge could be horizontal). 8899 * 8900 * @param img Image. 8901 * @param points Polygon vertices. 8902 * @param color Polygon color. 8903 * @param lineType Type of the polygon boundaries. See #LineTypes 8904 * @param shift Number of fractional bits in the vertex coordinates. 8905 */ 8906 public static void fillConvexPoly(Mat img, MatOfPoint points, Scalar color, int lineType, int shift) { 8907 Mat points_mat = points; 8908 fillConvexPoly_0(img.nativeObj, points_mat.nativeObj, color.val[0], color.val[1], color.val[2], color.val[3], lineType, shift); 8909 } 8910 8911 /** 8912 * Fills a convex polygon. 8913 * 8914 * The function cv::fillConvexPoly draws a filled convex polygon. This function is much faster than the 8915 * function #fillPoly . It can fill not only convex polygons but any monotonic polygon without 8916 * self-intersections, that is, a polygon whose contour intersects every horizontal line (scan line) 8917 * twice at the most (though, its top-most and/or the bottom edge could be horizontal). 8918 * 8919 * @param img Image. 8920 * @param points Polygon vertices. 8921 * @param color Polygon color. 8922 * @param lineType Type of the polygon boundaries. See #LineTypes 8923 */ 8924 public static void fillConvexPoly(Mat img, MatOfPoint points, Scalar color, int lineType) { 8925 Mat points_mat = points; 8926 fillConvexPoly_1(img.nativeObj, points_mat.nativeObj, color.val[0], color.val[1], color.val[2], color.val[3], lineType); 8927 } 8928 8929 /** 8930 * Fills a convex polygon. 8931 * 8932 * The function cv::fillConvexPoly draws a filled convex polygon. This function is much faster than the 8933 * function #fillPoly . It can fill not only convex polygons but any monotonic polygon without 8934 * self-intersections, that is, a polygon whose contour intersects every horizontal line (scan line) 8935 * twice at the most (though, its top-most and/or the bottom edge could be horizontal). 8936 * 8937 * @param img Image. 8938 * @param points Polygon vertices. 8939 * @param color Polygon color. 8940 */ 8941 public static void fillConvexPoly(Mat img, MatOfPoint points, Scalar color) { 8942 Mat points_mat = points; 8943 fillConvexPoly_2(img.nativeObj, points_mat.nativeObj, color.val[0], color.val[1], color.val[2], color.val[3]); 8944 } 8945 8946 8947 // 8948 // C++: void cv::fillPoly(Mat& img, vector_vector_Point pts, Scalar color, int lineType = LINE_8, int shift = 0, Point offset = Point()) 8949 // 8950 8951 /** 8952 * Fills the area bounded by one or more polygons. 8953 * 8954 * The function cv::fillPoly fills an area bounded by several polygonal contours. The function can fill 8955 * complex areas, for example, areas with holes, contours with self-intersections (some of their 8956 * parts), and so forth. 8957 * 8958 * @param img Image. 8959 * @param pts Array of polygons where each polygon is represented as an array of points. 8960 * @param color Polygon color. 8961 * @param lineType Type of the polygon boundaries. See #LineTypes 8962 * @param shift Number of fractional bits in the vertex coordinates. 8963 * @param offset Optional offset of all points of the contours. 8964 */ 8965 public static void fillPoly(Mat img, List<MatOfPoint> pts, Scalar color, int lineType, int shift, Point offset) { 8966 List<Mat> pts_tmplm = new ArrayList<Mat>((pts != null) ? pts.size() : 0); 8967 Mat pts_mat = Converters.vector_vector_Point_to_Mat(pts, pts_tmplm); 8968 fillPoly_0(img.nativeObj, pts_mat.nativeObj, color.val[0], color.val[1], color.val[2], color.val[3], lineType, shift, offset.x, offset.y); 8969 } 8970 8971 /** 8972 * Fills the area bounded by one or more polygons. 8973 * 8974 * The function cv::fillPoly fills an area bounded by several polygonal contours. The function can fill 8975 * complex areas, for example, areas with holes, contours with self-intersections (some of their 8976 * parts), and so forth. 8977 * 8978 * @param img Image. 8979 * @param pts Array of polygons where each polygon is represented as an array of points. 8980 * @param color Polygon color. 8981 * @param lineType Type of the polygon boundaries. See #LineTypes 8982 * @param shift Number of fractional bits in the vertex coordinates. 8983 */ 8984 public static void fillPoly(Mat img, List<MatOfPoint> pts, Scalar color, int lineType, int shift) { 8985 List<Mat> pts_tmplm = new ArrayList<Mat>((pts != null) ? pts.size() : 0); 8986 Mat pts_mat = Converters.vector_vector_Point_to_Mat(pts, pts_tmplm); 8987 fillPoly_1(img.nativeObj, pts_mat.nativeObj, color.val[0], color.val[1], color.val[2], color.val[3], lineType, shift); 8988 } 8989 8990 /** 8991 * Fills the area bounded by one or more polygons. 8992 * 8993 * The function cv::fillPoly fills an area bounded by several polygonal contours. The function can fill 8994 * complex areas, for example, areas with holes, contours with self-intersections (some of their 8995 * parts), and so forth. 8996 * 8997 * @param img Image. 8998 * @param pts Array of polygons where each polygon is represented as an array of points. 8999 * @param color Polygon color. 9000 * @param lineType Type of the polygon boundaries. See #LineTypes 9001 */ 9002 public static void fillPoly(Mat img, List<MatOfPoint> pts, Scalar color, int lineType) { 9003 List<Mat> pts_tmplm = new ArrayList<Mat>((pts != null) ? pts.size() : 0); 9004 Mat pts_mat = Converters.vector_vector_Point_to_Mat(pts, pts_tmplm); 9005 fillPoly_2(img.nativeObj, pts_mat.nativeObj, color.val[0], color.val[1], color.val[2], color.val[3], lineType); 9006 } 9007 9008 /** 9009 * Fills the area bounded by one or more polygons. 9010 * 9011 * The function cv::fillPoly fills an area bounded by several polygonal contours. The function can fill 9012 * complex areas, for example, areas with holes, contours with self-intersections (some of their 9013 * parts), and so forth. 9014 * 9015 * @param img Image. 9016 * @param pts Array of polygons where each polygon is represented as an array of points. 9017 * @param color Polygon color. 9018 */ 9019 public static void fillPoly(Mat img, List<MatOfPoint> pts, Scalar color) { 9020 List<Mat> pts_tmplm = new ArrayList<Mat>((pts != null) ? pts.size() : 0); 9021 Mat pts_mat = Converters.vector_vector_Point_to_Mat(pts, pts_tmplm); 9022 fillPoly_3(img.nativeObj, pts_mat.nativeObj, color.val[0], color.val[1], color.val[2], color.val[3]); 9023 } 9024 9025 9026 // 9027 // C++: void cv::polylines(Mat& img, vector_vector_Point pts, bool isClosed, Scalar color, int thickness = 1, int lineType = LINE_8, int shift = 0) 9028 // 9029 9030 /** 9031 * Draws several polygonal curves. 9032 * 9033 * @param img Image. 9034 * @param pts Array of polygonal curves. 9035 * @param isClosed Flag indicating whether the drawn polylines are closed or not. If they are closed, 9036 * the function draws a line from the last vertex of each curve to its first vertex. 9037 * @param color Polyline color. 9038 * @param thickness Thickness of the polyline edges. 9039 * @param lineType Type of the line segments. See #LineTypes 9040 * @param shift Number of fractional bits in the vertex coordinates. 9041 * 9042 * The function cv::polylines draws one or more polygonal curves. 9043 */ 9044 public static void polylines(Mat img, List<MatOfPoint> pts, boolean isClosed, Scalar color, int thickness, int lineType, int shift) { 9045 List<Mat> pts_tmplm = new ArrayList<Mat>((pts != null) ? pts.size() : 0); 9046 Mat pts_mat = Converters.vector_vector_Point_to_Mat(pts, pts_tmplm); 9047 polylines_0(img.nativeObj, pts_mat.nativeObj, isClosed, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType, shift); 9048 } 9049 9050 /** 9051 * Draws several polygonal curves. 9052 * 9053 * @param img Image. 9054 * @param pts Array of polygonal curves. 9055 * @param isClosed Flag indicating whether the drawn polylines are closed or not. If they are closed, 9056 * the function draws a line from the last vertex of each curve to its first vertex. 9057 * @param color Polyline color. 9058 * @param thickness Thickness of the polyline edges. 9059 * @param lineType Type of the line segments. See #LineTypes 9060 * 9061 * The function cv::polylines draws one or more polygonal curves. 9062 */ 9063 public static void polylines(Mat img, List<MatOfPoint> pts, boolean isClosed, Scalar color, int thickness, int lineType) { 9064 List<Mat> pts_tmplm = new ArrayList<Mat>((pts != null) ? pts.size() : 0); 9065 Mat pts_mat = Converters.vector_vector_Point_to_Mat(pts, pts_tmplm); 9066 polylines_1(img.nativeObj, pts_mat.nativeObj, isClosed, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType); 9067 } 9068 9069 /** 9070 * Draws several polygonal curves. 9071 * 9072 * @param img Image. 9073 * @param pts Array of polygonal curves. 9074 * @param isClosed Flag indicating whether the drawn polylines are closed or not. If they are closed, 9075 * the function draws a line from the last vertex of each curve to its first vertex. 9076 * @param color Polyline color. 9077 * @param thickness Thickness of the polyline edges. 9078 * 9079 * The function cv::polylines draws one or more polygonal curves. 9080 */ 9081 public static void polylines(Mat img, List<MatOfPoint> pts, boolean isClosed, Scalar color, int thickness) { 9082 List<Mat> pts_tmplm = new ArrayList<Mat>((pts != null) ? pts.size() : 0); 9083 Mat pts_mat = Converters.vector_vector_Point_to_Mat(pts, pts_tmplm); 9084 polylines_2(img.nativeObj, pts_mat.nativeObj, isClosed, color.val[0], color.val[1], color.val[2], color.val[3], thickness); 9085 } 9086 9087 /** 9088 * Draws several polygonal curves. 9089 * 9090 * @param img Image. 9091 * @param pts Array of polygonal curves. 9092 * @param isClosed Flag indicating whether the drawn polylines are closed or not. If they are closed, 9093 * the function draws a line from the last vertex of each curve to its first vertex. 9094 * @param color Polyline color. 9095 * 9096 * The function cv::polylines draws one or more polygonal curves. 9097 */ 9098 public static void polylines(Mat img, List<MatOfPoint> pts, boolean isClosed, Scalar color) { 9099 List<Mat> pts_tmplm = new ArrayList<Mat>((pts != null) ? pts.size() : 0); 9100 Mat pts_mat = Converters.vector_vector_Point_to_Mat(pts, pts_tmplm); 9101 polylines_3(img.nativeObj, pts_mat.nativeObj, isClosed, color.val[0], color.val[1], color.val[2], color.val[3]); 9102 } 9103 9104 9105 // 9106 // C++: void cv::drawContours(Mat& image, vector_vector_Point contours, int contourIdx, Scalar color, int thickness = 1, int lineType = LINE_8, Mat hierarchy = Mat(), int maxLevel = INT_MAX, Point offset = Point()) 9107 // 9108 9109 /** 9110 * Draws contours outlines or filled contours. 9111 * 9112 * The function draws contour outlines in the image if \(\texttt{thickness} \ge 0\) or fills the area 9113 * bounded by the contours if \(\texttt{thickness}<0\) . The example below shows how to retrieve 9114 * connected components from the binary image and label them: : 9115 * INCLUDE: snippets/imgproc_drawContours.cpp 9116 * 9117 * @param image Destination image. 9118 * @param contours All the input contours. Each contour is stored as a point vector. 9119 * @param contourIdx Parameter indicating a contour to draw. If it is negative, all the contours are drawn. 9120 * @param color Color of the contours. 9121 * @param thickness Thickness of lines the contours are drawn with. If it is negative (for example, 9122 * thickness=#FILLED ), the contour interiors are drawn. 9123 * @param lineType Line connectivity. See #LineTypes 9124 * @param hierarchy Optional information about hierarchy. It is only needed if you want to draw only 9125 * some of the contours (see maxLevel ). 9126 * @param maxLevel Maximal level for drawn contours. If it is 0, only the specified contour is drawn. 9127 * If it is 1, the function draws the contour(s) and all the nested contours. If it is 2, the function 9128 * draws the contours, all the nested contours, all the nested-to-nested contours, and so on. This 9129 * parameter is only taken into account when there is hierarchy available. 9130 * @param offset Optional contour shift parameter. Shift all the drawn contours by the specified 9131 * \(\texttt{offset}=(dx,dy)\) . 9132 * <b>Note:</b> When thickness=#FILLED, the function is designed to handle connected components with holes correctly 9133 * even when no hierarchy data is provided. This is done by analyzing all the outlines together 9134 * using even-odd rule. This may give incorrect results if you have a joint collection of separately retrieved 9135 * contours. In order to solve this problem, you need to call #drawContours separately for each sub-group 9136 * of contours, or iterate over the collection using contourIdx parameter. 9137 */ 9138 public static void drawContours(Mat image, List<MatOfPoint> contours, int contourIdx, Scalar color, int thickness, int lineType, Mat hierarchy, int maxLevel, Point offset) { 9139 List<Mat> contours_tmplm = new ArrayList<Mat>((contours != null) ? contours.size() : 0); 9140 Mat contours_mat = Converters.vector_vector_Point_to_Mat(contours, contours_tmplm); 9141 drawContours_0(image.nativeObj, contours_mat.nativeObj, contourIdx, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType, hierarchy.nativeObj, maxLevel, offset.x, offset.y); 9142 } 9143 9144 /** 9145 * Draws contours outlines or filled contours. 9146 * 9147 * The function draws contour outlines in the image if \(\texttt{thickness} \ge 0\) or fills the area 9148 * bounded by the contours if \(\texttt{thickness}<0\) . The example below shows how to retrieve 9149 * connected components from the binary image and label them: : 9150 * INCLUDE: snippets/imgproc_drawContours.cpp 9151 * 9152 * @param image Destination image. 9153 * @param contours All the input contours. Each contour is stored as a point vector. 9154 * @param contourIdx Parameter indicating a contour to draw. If it is negative, all the contours are drawn. 9155 * @param color Color of the contours. 9156 * @param thickness Thickness of lines the contours are drawn with. If it is negative (for example, 9157 * thickness=#FILLED ), the contour interiors are drawn. 9158 * @param lineType Line connectivity. See #LineTypes 9159 * @param hierarchy Optional information about hierarchy. It is only needed if you want to draw only 9160 * some of the contours (see maxLevel ). 9161 * @param maxLevel Maximal level for drawn contours. If it is 0, only the specified contour is drawn. 9162 * If it is 1, the function draws the contour(s) and all the nested contours. If it is 2, the function 9163 * draws the contours, all the nested contours, all the nested-to-nested contours, and so on. This 9164 * parameter is only taken into account when there is hierarchy available. 9165 * \(\texttt{offset}=(dx,dy)\) . 9166 * <b>Note:</b> When thickness=#FILLED, the function is designed to handle connected components with holes correctly 9167 * even when no hierarchy data is provided. This is done by analyzing all the outlines together 9168 * using even-odd rule. This may give incorrect results if you have a joint collection of separately retrieved 9169 * contours. In order to solve this problem, you need to call #drawContours separately for each sub-group 9170 * of contours, or iterate over the collection using contourIdx parameter. 9171 */ 9172 public static void drawContours(Mat image, List<MatOfPoint> contours, int contourIdx, Scalar color, int thickness, int lineType, Mat hierarchy, int maxLevel) { 9173 List<Mat> contours_tmplm = new ArrayList<Mat>((contours != null) ? contours.size() : 0); 9174 Mat contours_mat = Converters.vector_vector_Point_to_Mat(contours, contours_tmplm); 9175 drawContours_1(image.nativeObj, contours_mat.nativeObj, contourIdx, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType, hierarchy.nativeObj, maxLevel); 9176 } 9177 9178 /** 9179 * Draws contours outlines or filled contours. 9180 * 9181 * The function draws contour outlines in the image if \(\texttt{thickness} \ge 0\) or fills the area 9182 * bounded by the contours if \(\texttt{thickness}<0\) . The example below shows how to retrieve 9183 * connected components from the binary image and label them: : 9184 * INCLUDE: snippets/imgproc_drawContours.cpp 9185 * 9186 * @param image Destination image. 9187 * @param contours All the input contours. Each contour is stored as a point vector. 9188 * @param contourIdx Parameter indicating a contour to draw. If it is negative, all the contours are drawn. 9189 * @param color Color of the contours. 9190 * @param thickness Thickness of lines the contours are drawn with. If it is negative (for example, 9191 * thickness=#FILLED ), the contour interiors are drawn. 9192 * @param lineType Line connectivity. See #LineTypes 9193 * @param hierarchy Optional information about hierarchy. It is only needed if you want to draw only 9194 * some of the contours (see maxLevel ). 9195 * If it is 1, the function draws the contour(s) and all the nested contours. If it is 2, the function 9196 * draws the contours, all the nested contours, all the nested-to-nested contours, and so on. This 9197 * parameter is only taken into account when there is hierarchy available. 9198 * \(\texttt{offset}=(dx,dy)\) . 9199 * <b>Note:</b> When thickness=#FILLED, the function is designed to handle connected components with holes correctly 9200 * even when no hierarchy data is provided. This is done by analyzing all the outlines together 9201 * using even-odd rule. This may give incorrect results if you have a joint collection of separately retrieved 9202 * contours. In order to solve this problem, you need to call #drawContours separately for each sub-group 9203 * of contours, or iterate over the collection using contourIdx parameter. 9204 */ 9205 public static void drawContours(Mat image, List<MatOfPoint> contours, int contourIdx, Scalar color, int thickness, int lineType, Mat hierarchy) { 9206 List<Mat> contours_tmplm = new ArrayList<Mat>((contours != null) ? contours.size() : 0); 9207 Mat contours_mat = Converters.vector_vector_Point_to_Mat(contours, contours_tmplm); 9208 drawContours_2(image.nativeObj, contours_mat.nativeObj, contourIdx, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType, hierarchy.nativeObj); 9209 } 9210 9211 /** 9212 * Draws contours outlines or filled contours. 9213 * 9214 * The function draws contour outlines in the image if \(\texttt{thickness} \ge 0\) or fills the area 9215 * bounded by the contours if \(\texttt{thickness}<0\) . The example below shows how to retrieve 9216 * connected components from the binary image and label them: : 9217 * INCLUDE: snippets/imgproc_drawContours.cpp 9218 * 9219 * @param image Destination image. 9220 * @param contours All the input contours. Each contour is stored as a point vector. 9221 * @param contourIdx Parameter indicating a contour to draw. If it is negative, all the contours are drawn. 9222 * @param color Color of the contours. 9223 * @param thickness Thickness of lines the contours are drawn with. If it is negative (for example, 9224 * thickness=#FILLED ), the contour interiors are drawn. 9225 * @param lineType Line connectivity. See #LineTypes 9226 * some of the contours (see maxLevel ). 9227 * If it is 1, the function draws the contour(s) and all the nested contours. If it is 2, the function 9228 * draws the contours, all the nested contours, all the nested-to-nested contours, and so on. This 9229 * parameter is only taken into account when there is hierarchy available. 9230 * \(\texttt{offset}=(dx,dy)\) . 9231 * <b>Note:</b> When thickness=#FILLED, the function is designed to handle connected components with holes correctly 9232 * even when no hierarchy data is provided. This is done by analyzing all the outlines together 9233 * using even-odd rule. This may give incorrect results if you have a joint collection of separately retrieved 9234 * contours. In order to solve this problem, you need to call #drawContours separately for each sub-group 9235 * of contours, or iterate over the collection using contourIdx parameter. 9236 */ 9237 public static void drawContours(Mat image, List<MatOfPoint> contours, int contourIdx, Scalar color, int thickness, int lineType) { 9238 List<Mat> contours_tmplm = new ArrayList<Mat>((contours != null) ? contours.size() : 0); 9239 Mat contours_mat = Converters.vector_vector_Point_to_Mat(contours, contours_tmplm); 9240 drawContours_3(image.nativeObj, contours_mat.nativeObj, contourIdx, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType); 9241 } 9242 9243 /** 9244 * Draws contours outlines or filled contours. 9245 * 9246 * The function draws contour outlines in the image if \(\texttt{thickness} \ge 0\) or fills the area 9247 * bounded by the contours if \(\texttt{thickness}<0\) . The example below shows how to retrieve 9248 * connected components from the binary image and label them: : 9249 * INCLUDE: snippets/imgproc_drawContours.cpp 9250 * 9251 * @param image Destination image. 9252 * @param contours All the input contours. Each contour is stored as a point vector. 9253 * @param contourIdx Parameter indicating a contour to draw. If it is negative, all the contours are drawn. 9254 * @param color Color of the contours. 9255 * @param thickness Thickness of lines the contours are drawn with. If it is negative (for example, 9256 * thickness=#FILLED ), the contour interiors are drawn. 9257 * some of the contours (see maxLevel ). 9258 * If it is 1, the function draws the contour(s) and all the nested contours. If it is 2, the function 9259 * draws the contours, all the nested contours, all the nested-to-nested contours, and so on. This 9260 * parameter is only taken into account when there is hierarchy available. 9261 * \(\texttt{offset}=(dx,dy)\) . 9262 * <b>Note:</b> When thickness=#FILLED, the function is designed to handle connected components with holes correctly 9263 * even when no hierarchy data is provided. This is done by analyzing all the outlines together 9264 * using even-odd rule. This may give incorrect results if you have a joint collection of separately retrieved 9265 * contours. In order to solve this problem, you need to call #drawContours separately for each sub-group 9266 * of contours, or iterate over the collection using contourIdx parameter. 9267 */ 9268 public static void drawContours(Mat image, List<MatOfPoint> contours, int contourIdx, Scalar color, int thickness) { 9269 List<Mat> contours_tmplm = new ArrayList<Mat>((contours != null) ? contours.size() : 0); 9270 Mat contours_mat = Converters.vector_vector_Point_to_Mat(contours, contours_tmplm); 9271 drawContours_4(image.nativeObj, contours_mat.nativeObj, contourIdx, color.val[0], color.val[1], color.val[2], color.val[3], thickness); 9272 } 9273 9274 /** 9275 * Draws contours outlines or filled contours. 9276 * 9277 * The function draws contour outlines in the image if \(\texttt{thickness} \ge 0\) or fills the area 9278 * bounded by the contours if \(\texttt{thickness}<0\) . The example below shows how to retrieve 9279 * connected components from the binary image and label them: : 9280 * INCLUDE: snippets/imgproc_drawContours.cpp 9281 * 9282 * @param image Destination image. 9283 * @param contours All the input contours. Each contour is stored as a point vector. 9284 * @param contourIdx Parameter indicating a contour to draw. If it is negative, all the contours are drawn. 9285 * @param color Color of the contours. 9286 * thickness=#FILLED ), the contour interiors are drawn. 9287 * some of the contours (see maxLevel ). 9288 * If it is 1, the function draws the contour(s) and all the nested contours. If it is 2, the function 9289 * draws the contours, all the nested contours, all the nested-to-nested contours, and so on. This 9290 * parameter is only taken into account when there is hierarchy available. 9291 * \(\texttt{offset}=(dx,dy)\) . 9292 * <b>Note:</b> When thickness=#FILLED, the function is designed to handle connected components with holes correctly 9293 * even when no hierarchy data is provided. This is done by analyzing all the outlines together 9294 * using even-odd rule. This may give incorrect results if you have a joint collection of separately retrieved 9295 * contours. In order to solve this problem, you need to call #drawContours separately for each sub-group 9296 * of contours, or iterate over the collection using contourIdx parameter. 9297 */ 9298 public static void drawContours(Mat image, List<MatOfPoint> contours, int contourIdx, Scalar color) { 9299 List<Mat> contours_tmplm = new ArrayList<Mat>((contours != null) ? contours.size() : 0); 9300 Mat contours_mat = Converters.vector_vector_Point_to_Mat(contours, contours_tmplm); 9301 drawContours_5(image.nativeObj, contours_mat.nativeObj, contourIdx, color.val[0], color.val[1], color.val[2], color.val[3]); 9302 } 9303 9304 9305 // 9306 // C++: bool cv::clipLine(Rect imgRect, Point& pt1, Point& pt2) 9307 // 9308 9309 /** 9310 * 9311 * @param imgRect Image rectangle. 9312 * @param pt1 First line point. 9313 * @param pt2 Second line point. 9314 * @return automatically generated 9315 */ 9316 public static boolean clipLine(Rect imgRect, Point pt1, Point pt2) { 9317 double[] pt1_out = new double[2]; 9318 double[] pt2_out = new double[2]; 9319 boolean retVal = clipLine_0(imgRect.x, imgRect.y, imgRect.width, imgRect.height, pt1.x, pt1.y, pt1_out, pt2.x, pt2.y, pt2_out); 9320 if(pt1!=null){ pt1.x = pt1_out[0]; pt1.y = pt1_out[1]; } 9321 if(pt2!=null){ pt2.x = pt2_out[0]; pt2.y = pt2_out[1]; } 9322 return retVal; 9323 } 9324 9325 9326 // 9327 // C++: void cv::ellipse2Poly(Point center, Size axes, int angle, int arcStart, int arcEnd, int delta, vector_Point& pts) 9328 // 9329 9330 /** 9331 * Approximates an elliptic arc with a polyline. 9332 * 9333 * The function ellipse2Poly computes the vertices of a polyline that approximates the specified 9334 * elliptic arc. It is used by #ellipse. If {@code arcStart} is greater than {@code arcEnd}, they are swapped. 9335 * 9336 * @param center Center of the arc. 9337 * @param axes Half of the size of the ellipse main axes. See #ellipse for details. 9338 * @param angle Rotation angle of the ellipse in degrees. See #ellipse for details. 9339 * @param arcStart Starting angle of the elliptic arc in degrees. 9340 * @param arcEnd Ending angle of the elliptic arc in degrees. 9341 * @param delta Angle between the subsequent polyline vertices. It defines the approximation 9342 * accuracy. 9343 * @param pts Output vector of polyline vertices. 9344 */ 9345 public static void ellipse2Poly(Point center, Size axes, int angle, int arcStart, int arcEnd, int delta, MatOfPoint pts) { 9346 Mat pts_mat = pts; 9347 ellipse2Poly_0(center.x, center.y, axes.width, axes.height, angle, arcStart, arcEnd, delta, pts_mat.nativeObj); 9348 } 9349 9350 9351 // 9352 // C++: void cv::putText(Mat& img, String text, Point org, int fontFace, double fontScale, Scalar color, int thickness = 1, int lineType = LINE_8, bool bottomLeftOrigin = false) 9353 // 9354 9355 /** 9356 * Draws a text string. 9357 * 9358 * The function cv::putText renders the specified text string in the image. Symbols that cannot be rendered 9359 * using the specified font are replaced by question marks. See #getTextSize for a text rendering code 9360 * example. 9361 * 9362 * @param img Image. 9363 * @param text Text string to be drawn. 9364 * @param org Bottom-left corner of the text string in the image. 9365 * @param fontFace Font type, see #HersheyFonts. 9366 * @param fontScale Font scale factor that is multiplied by the font-specific base size. 9367 * @param color Text color. 9368 * @param thickness Thickness of the lines used to draw a text. 9369 * @param lineType Line type. See #LineTypes 9370 * @param bottomLeftOrigin When true, the image data origin is at the bottom-left corner. Otherwise, 9371 * it is at the top-left corner. 9372 */ 9373 public static void putText(Mat img, String text, Point org, int fontFace, double fontScale, Scalar color, int thickness, int lineType, boolean bottomLeftOrigin) { 9374 putText_0(img.nativeObj, text, org.x, org.y, fontFace, fontScale, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType, bottomLeftOrigin); 9375 } 9376 9377 /** 9378 * Draws a text string. 9379 * 9380 * The function cv::putText renders the specified text string in the image. Symbols that cannot be rendered 9381 * using the specified font are replaced by question marks. See #getTextSize for a text rendering code 9382 * example. 9383 * 9384 * @param img Image. 9385 * @param text Text string to be drawn. 9386 * @param org Bottom-left corner of the text string in the image. 9387 * @param fontFace Font type, see #HersheyFonts. 9388 * @param fontScale Font scale factor that is multiplied by the font-specific base size. 9389 * @param color Text color. 9390 * @param thickness Thickness of the lines used to draw a text. 9391 * @param lineType Line type. See #LineTypes 9392 * it is at the top-left corner. 9393 */ 9394 public static void putText(Mat img, String text, Point org, int fontFace, double fontScale, Scalar color, int thickness, int lineType) { 9395 putText_1(img.nativeObj, text, org.x, org.y, fontFace, fontScale, color.val[0], color.val[1], color.val[2], color.val[3], thickness, lineType); 9396 } 9397 9398 /** 9399 * Draws a text string. 9400 * 9401 * The function cv::putText renders the specified text string in the image. Symbols that cannot be rendered 9402 * using the specified font are replaced by question marks. See #getTextSize for a text rendering code 9403 * example. 9404 * 9405 * @param img Image. 9406 * @param text Text string to be drawn. 9407 * @param org Bottom-left corner of the text string in the image. 9408 * @param fontFace Font type, see #HersheyFonts. 9409 * @param fontScale Font scale factor that is multiplied by the font-specific base size. 9410 * @param color Text color. 9411 * @param thickness Thickness of the lines used to draw a text. 9412 * it is at the top-left corner. 9413 */ 9414 public static void putText(Mat img, String text, Point org, int fontFace, double fontScale, Scalar color, int thickness) { 9415 putText_2(img.nativeObj, text, org.x, org.y, fontFace, fontScale, color.val[0], color.val[1], color.val[2], color.val[3], thickness); 9416 } 9417 9418 /** 9419 * Draws a text string. 9420 * 9421 * The function cv::putText renders the specified text string in the image. Symbols that cannot be rendered 9422 * using the specified font are replaced by question marks. See #getTextSize for a text rendering code 9423 * example. 9424 * 9425 * @param img Image. 9426 * @param text Text string to be drawn. 9427 * @param org Bottom-left corner of the text string in the image. 9428 * @param fontFace Font type, see #HersheyFonts. 9429 * @param fontScale Font scale factor that is multiplied by the font-specific base size. 9430 * @param color Text color. 9431 * it is at the top-left corner. 9432 */ 9433 public static void putText(Mat img, String text, Point org, int fontFace, double fontScale, Scalar color) { 9434 putText_3(img.nativeObj, text, org.x, org.y, fontFace, fontScale, color.val[0], color.val[1], color.val[2], color.val[3]); 9435 } 9436 9437 9438 // 9439 // C++: double cv::getFontScaleFromHeight(int fontFace, int pixelHeight, int thickness = 1) 9440 // 9441 9442 /** 9443 * Calculates the font-specific size to use to achieve a given height in pixels. 9444 * 9445 * @param fontFace Font to use, see cv::HersheyFonts. 9446 * @param pixelHeight Pixel height to compute the fontScale for 9447 * @param thickness Thickness of lines used to render the text.See putText for details. 9448 * @return The fontSize to use for cv::putText 9449 * 9450 * SEE: cv::putText 9451 */ 9452 public static double getFontScaleFromHeight(int fontFace, int pixelHeight, int thickness) { 9453 return getFontScaleFromHeight_0(fontFace, pixelHeight, thickness); 9454 } 9455 9456 /** 9457 * Calculates the font-specific size to use to achieve a given height in pixels. 9458 * 9459 * @param fontFace Font to use, see cv::HersheyFonts. 9460 * @param pixelHeight Pixel height to compute the fontScale for 9461 * @return The fontSize to use for cv::putText 9462 * 9463 * SEE: cv::putText 9464 */ 9465 public static double getFontScaleFromHeight(int fontFace, int pixelHeight) { 9466 return getFontScaleFromHeight_1(fontFace, pixelHeight); 9467 } 9468 9469 9470 // 9471 // C++: void cv::HoughLinesWithAccumulator(Mat image, Mat& lines, double rho, double theta, int threshold, double srn = 0, double stn = 0, double min_theta = 0, double max_theta = CV_PI) 9472 // 9473 9474 /** 9475 * Finds lines in a binary image using the standard Hough transform and get accumulator. 9476 * 9477 * <b>Note:</b> This function is for bindings use only. Use original function in C++ code 9478 * 9479 * SEE: HoughLines 9480 * @param image automatically generated 9481 * @param lines automatically generated 9482 * @param rho automatically generated 9483 * @param theta automatically generated 9484 * @param threshold automatically generated 9485 * @param srn automatically generated 9486 * @param stn automatically generated 9487 * @param min_theta automatically generated 9488 * @param max_theta automatically generated 9489 */ 9490 public static void HoughLinesWithAccumulator(Mat image, Mat lines, double rho, double theta, int threshold, double srn, double stn, double min_theta, double max_theta) { 9491 HoughLinesWithAccumulator_0(image.nativeObj, lines.nativeObj, rho, theta, threshold, srn, stn, min_theta, max_theta); 9492 } 9493 9494 /** 9495 * Finds lines in a binary image using the standard Hough transform and get accumulator. 9496 * 9497 * <b>Note:</b> This function is for bindings use only. Use original function in C++ code 9498 * 9499 * SEE: HoughLines 9500 * @param image automatically generated 9501 * @param lines automatically generated 9502 * @param rho automatically generated 9503 * @param theta automatically generated 9504 * @param threshold automatically generated 9505 * @param srn automatically generated 9506 * @param stn automatically generated 9507 * @param min_theta automatically generated 9508 */ 9509 public static void HoughLinesWithAccumulator(Mat image, Mat lines, double rho, double theta, int threshold, double srn, double stn, double min_theta) { 9510 HoughLinesWithAccumulator_1(image.nativeObj, lines.nativeObj, rho, theta, threshold, srn, stn, min_theta); 9511 } 9512 9513 /** 9514 * Finds lines in a binary image using the standard Hough transform and get accumulator. 9515 * 9516 * <b>Note:</b> This function is for bindings use only. Use original function in C++ code 9517 * 9518 * SEE: HoughLines 9519 * @param image automatically generated 9520 * @param lines automatically generated 9521 * @param rho automatically generated 9522 * @param theta automatically generated 9523 * @param threshold automatically generated 9524 * @param srn automatically generated 9525 * @param stn automatically generated 9526 */ 9527 public static void HoughLinesWithAccumulator(Mat image, Mat lines, double rho, double theta, int threshold, double srn, double stn) { 9528 HoughLinesWithAccumulator_2(image.nativeObj, lines.nativeObj, rho, theta, threshold, srn, stn); 9529 } 9530 9531 /** 9532 * Finds lines in a binary image using the standard Hough transform and get accumulator. 9533 * 9534 * <b>Note:</b> This function is for bindings use only. Use original function in C++ code 9535 * 9536 * SEE: HoughLines 9537 * @param image automatically generated 9538 * @param lines automatically generated 9539 * @param rho automatically generated 9540 * @param theta automatically generated 9541 * @param threshold automatically generated 9542 * @param srn automatically generated 9543 */ 9544 public static void HoughLinesWithAccumulator(Mat image, Mat lines, double rho, double theta, int threshold, double srn) { 9545 HoughLinesWithAccumulator_3(image.nativeObj, lines.nativeObj, rho, theta, threshold, srn); 9546 } 9547 9548 /** 9549 * Finds lines in a binary image using the standard Hough transform and get accumulator. 9550 * 9551 * <b>Note:</b> This function is for bindings use only. Use original function in C++ code 9552 * 9553 * SEE: HoughLines 9554 * @param image automatically generated 9555 * @param lines automatically generated 9556 * @param rho automatically generated 9557 * @param theta automatically generated 9558 * @param threshold automatically generated 9559 */ 9560 public static void HoughLinesWithAccumulator(Mat image, Mat lines, double rho, double theta, int threshold) { 9561 HoughLinesWithAccumulator_4(image.nativeObj, lines.nativeObj, rho, theta, threshold); 9562 } 9563 9564 9565 9566// C++: Size getTextSize(const String& text, int fontFace, double fontScale, int thickness, int* baseLine); 9567//javadoc:getTextSize(text, fontFace, fontScale, thickness, baseLine) 9568public static Size getTextSize(String text, int fontFace, double fontScale, int thickness, int[] baseLine) { 9569 if(baseLine != null && baseLine.length != 1) 9570 throw new java.lang.IllegalArgumentException("'baseLine' must be 'int[1]' or 'null'."); 9571 Size retVal = new Size(n_getTextSize(text, fontFace, fontScale, thickness, baseLine)); 9572 return retVal; 9573} 9574 9575 9576 9577 9578 // C++: Ptr_LineSegmentDetector cv::createLineSegmentDetector(int refine = LSD_REFINE_STD, double scale = 0.8, double sigma_scale = 0.6, double quant = 2.0, double ang_th = 22.5, double log_eps = 0, double density_th = 0.7, int n_bins = 1024) 9579 private static native long createLineSegmentDetector_0(int refine, double scale, double sigma_scale, double quant, double ang_th, double log_eps, double density_th, int n_bins); 9580 private static native long createLineSegmentDetector_1(int refine, double scale, double sigma_scale, double quant, double ang_th, double log_eps, double density_th); 9581 private static native long createLineSegmentDetector_2(int refine, double scale, double sigma_scale, double quant, double ang_th, double log_eps); 9582 private static native long createLineSegmentDetector_3(int refine, double scale, double sigma_scale, double quant, double ang_th); 9583 private static native long createLineSegmentDetector_4(int refine, double scale, double sigma_scale, double quant); 9584 private static native long createLineSegmentDetector_5(int refine, double scale, double sigma_scale); 9585 private static native long createLineSegmentDetector_6(int refine, double scale); 9586 private static native long createLineSegmentDetector_7(int refine); 9587 private static native long createLineSegmentDetector_8(); 9588 9589 // C++: Mat cv::getGaussianKernel(int ksize, double sigma, int ktype = CV_64F) 9590 private static native long getGaussianKernel_0(int ksize, double sigma, int ktype); 9591 private static native long getGaussianKernel_1(int ksize, double sigma); 9592 9593 // C++: void cv::getDerivKernels(Mat& kx, Mat& ky, int dx, int dy, int ksize, bool normalize = false, int ktype = CV_32F) 9594 private static native void getDerivKernels_0(long kx_nativeObj, long ky_nativeObj, int dx, int dy, int ksize, boolean normalize, int ktype); 9595 private static native void getDerivKernels_1(long kx_nativeObj, long ky_nativeObj, int dx, int dy, int ksize, boolean normalize); 9596 private static native void getDerivKernels_2(long kx_nativeObj, long ky_nativeObj, int dx, int dy, int ksize); 9597 9598 // C++: Mat cv::getGaborKernel(Size ksize, double sigma, double theta, double lambd, double gamma, double psi = CV_PI*0.5, int ktype = CV_64F) 9599 private static native long getGaborKernel_0(double ksize_width, double ksize_height, double sigma, double theta, double lambd, double gamma, double psi, int ktype); 9600 private static native long getGaborKernel_1(double ksize_width, double ksize_height, double sigma, double theta, double lambd, double gamma, double psi); 9601 private static native long getGaborKernel_2(double ksize_width, double ksize_height, double sigma, double theta, double lambd, double gamma); 9602 9603 // C++: Mat cv::getStructuringElement(int shape, Size ksize, Point anchor = Point(-1,-1)) 9604 private static native long getStructuringElement_0(int shape, double ksize_width, double ksize_height, double anchor_x, double anchor_y); 9605 private static native long getStructuringElement_1(int shape, double ksize_width, double ksize_height); 9606 9607 // C++: void cv::medianBlur(Mat src, Mat& dst, int ksize) 9608 private static native void medianBlur_0(long src_nativeObj, long dst_nativeObj, int ksize); 9609 9610 // C++: void cv::GaussianBlur(Mat src, Mat& dst, Size ksize, double sigmaX, double sigmaY = 0, int borderType = BORDER_DEFAULT) 9611 private static native void GaussianBlur_0(long src_nativeObj, long dst_nativeObj, double ksize_width, double ksize_height, double sigmaX, double sigmaY, int borderType); 9612 private static native void GaussianBlur_1(long src_nativeObj, long dst_nativeObj, double ksize_width, double ksize_height, double sigmaX, double sigmaY); 9613 private static native void GaussianBlur_2(long src_nativeObj, long dst_nativeObj, double ksize_width, double ksize_height, double sigmaX); 9614 9615 // C++: void cv::bilateralFilter(Mat src, Mat& dst, int d, double sigmaColor, double sigmaSpace, int borderType = BORDER_DEFAULT) 9616 private static native void bilateralFilter_0(long src_nativeObj, long dst_nativeObj, int d, double sigmaColor, double sigmaSpace, int borderType); 9617 private static native void bilateralFilter_1(long src_nativeObj, long dst_nativeObj, int d, double sigmaColor, double sigmaSpace); 9618 9619 // C++: void cv::boxFilter(Mat src, Mat& dst, int ddepth, Size ksize, Point anchor = Point(-1,-1), bool normalize = true, int borderType = BORDER_DEFAULT) 9620 private static native void boxFilter_0(long src_nativeObj, long dst_nativeObj, int ddepth, double ksize_width, double ksize_height, double anchor_x, double anchor_y, boolean normalize, int borderType); 9621 private static native void boxFilter_1(long src_nativeObj, long dst_nativeObj, int ddepth, double ksize_width, double ksize_height, double anchor_x, double anchor_y, boolean normalize); 9622 private static native void boxFilter_2(long src_nativeObj, long dst_nativeObj, int ddepth, double ksize_width, double ksize_height, double anchor_x, double anchor_y); 9623 private static native void boxFilter_3(long src_nativeObj, long dst_nativeObj, int ddepth, double ksize_width, double ksize_height); 9624 9625 // C++: void cv::sqrBoxFilter(Mat src, Mat& dst, int ddepth, Size ksize, Point anchor = Point(-1, -1), bool normalize = true, int borderType = BORDER_DEFAULT) 9626 private static native void sqrBoxFilter_0(long src_nativeObj, long dst_nativeObj, int ddepth, double ksize_width, double ksize_height, double anchor_x, double anchor_y, boolean normalize, int borderType); 9627 private static native void sqrBoxFilter_1(long src_nativeObj, long dst_nativeObj, int ddepth, double ksize_width, double ksize_height, double anchor_x, double anchor_y, boolean normalize); 9628 private static native void sqrBoxFilter_2(long src_nativeObj, long dst_nativeObj, int ddepth, double ksize_width, double ksize_height, double anchor_x, double anchor_y); 9629 private static native void sqrBoxFilter_3(long src_nativeObj, long dst_nativeObj, int ddepth, double ksize_width, double ksize_height); 9630 9631 // C++: void cv::blur(Mat src, Mat& dst, Size ksize, Point anchor = Point(-1,-1), int borderType = BORDER_DEFAULT) 9632 private static native void blur_0(long src_nativeObj, long dst_nativeObj, double ksize_width, double ksize_height, double anchor_x, double anchor_y, int borderType); 9633 private static native void blur_1(long src_nativeObj, long dst_nativeObj, double ksize_width, double ksize_height, double anchor_x, double anchor_y); 9634 private static native void blur_2(long src_nativeObj, long dst_nativeObj, double ksize_width, double ksize_height); 9635 9636 // C++: void cv::filter2D(Mat src, Mat& dst, int ddepth, Mat kernel, Point anchor = Point(-1,-1), double delta = 0, int borderType = BORDER_DEFAULT) 9637 private static native void filter2D_0(long src_nativeObj, long dst_nativeObj, int ddepth, long kernel_nativeObj, double anchor_x, double anchor_y, double delta, int borderType); 9638 private static native void filter2D_1(long src_nativeObj, long dst_nativeObj, int ddepth, long kernel_nativeObj, double anchor_x, double anchor_y, double delta); 9639 private static native void filter2D_2(long src_nativeObj, long dst_nativeObj, int ddepth, long kernel_nativeObj, double anchor_x, double anchor_y); 9640 private static native void filter2D_3(long src_nativeObj, long dst_nativeObj, int ddepth, long kernel_nativeObj); 9641 9642 // C++: void cv::sepFilter2D(Mat src, Mat& dst, int ddepth, Mat kernelX, Mat kernelY, Point anchor = Point(-1,-1), double delta = 0, int borderType = BORDER_DEFAULT) 9643 private static native void sepFilter2D_0(long src_nativeObj, long dst_nativeObj, int ddepth, long kernelX_nativeObj, long kernelY_nativeObj, double anchor_x, double anchor_y, double delta, int borderType); 9644 private static native void sepFilter2D_1(long src_nativeObj, long dst_nativeObj, int ddepth, long kernelX_nativeObj, long kernelY_nativeObj, double anchor_x, double anchor_y, double delta); 9645 private static native void sepFilter2D_2(long src_nativeObj, long dst_nativeObj, int ddepth, long kernelX_nativeObj, long kernelY_nativeObj, double anchor_x, double anchor_y); 9646 private static native void sepFilter2D_3(long src_nativeObj, long dst_nativeObj, int ddepth, long kernelX_nativeObj, long kernelY_nativeObj); 9647 9648 // C++: void cv::Sobel(Mat src, Mat& dst, int ddepth, int dx, int dy, int ksize = 3, double scale = 1, double delta = 0, int borderType = BORDER_DEFAULT) 9649 private static native void Sobel_0(long src_nativeObj, long dst_nativeObj, int ddepth, int dx, int dy, int ksize, double scale, double delta, int borderType); 9650 private static native void Sobel_1(long src_nativeObj, long dst_nativeObj, int ddepth, int dx, int dy, int ksize, double scale, double delta); 9651 private static native void Sobel_2(long src_nativeObj, long dst_nativeObj, int ddepth, int dx, int dy, int ksize, double scale); 9652 private static native void Sobel_3(long src_nativeObj, long dst_nativeObj, int ddepth, int dx, int dy, int ksize); 9653 private static native void Sobel_4(long src_nativeObj, long dst_nativeObj, int ddepth, int dx, int dy); 9654 9655 // C++: void cv::spatialGradient(Mat src, Mat& dx, Mat& dy, int ksize = 3, int borderType = BORDER_DEFAULT) 9656 private static native void spatialGradient_0(long src_nativeObj, long dx_nativeObj, long dy_nativeObj, int ksize, int borderType); 9657 private static native void spatialGradient_1(long src_nativeObj, long dx_nativeObj, long dy_nativeObj, int ksize); 9658 private static native void spatialGradient_2(long src_nativeObj, long dx_nativeObj, long dy_nativeObj); 9659 9660 // C++: void cv::Scharr(Mat src, Mat& dst, int ddepth, int dx, int dy, double scale = 1, double delta = 0, int borderType = BORDER_DEFAULT) 9661 private static native void Scharr_0(long src_nativeObj, long dst_nativeObj, int ddepth, int dx, int dy, double scale, double delta, int borderType); 9662 private static native void Scharr_1(long src_nativeObj, long dst_nativeObj, int ddepth, int dx, int dy, double scale, double delta); 9663 private static native void Scharr_2(long src_nativeObj, long dst_nativeObj, int ddepth, int dx, int dy, double scale); 9664 private static native void Scharr_3(long src_nativeObj, long dst_nativeObj, int ddepth, int dx, int dy); 9665 9666 // C++: void cv::Laplacian(Mat src, Mat& dst, int ddepth, int ksize = 1, double scale = 1, double delta = 0, int borderType = BORDER_DEFAULT) 9667 private static native void Laplacian_0(long src_nativeObj, long dst_nativeObj, int ddepth, int ksize, double scale, double delta, int borderType); 9668 private static native void Laplacian_1(long src_nativeObj, long dst_nativeObj, int ddepth, int ksize, double scale, double delta); 9669 private static native void Laplacian_2(long src_nativeObj, long dst_nativeObj, int ddepth, int ksize, double scale); 9670 private static native void Laplacian_3(long src_nativeObj, long dst_nativeObj, int ddepth, int ksize); 9671 private static native void Laplacian_4(long src_nativeObj, long dst_nativeObj, int ddepth); 9672 9673 // C++: void cv::Canny(Mat image, Mat& edges, double threshold1, double threshold2, int apertureSize = 3, bool L2gradient = false) 9674 private static native void Canny_0(long image_nativeObj, long edges_nativeObj, double threshold1, double threshold2, int apertureSize, boolean L2gradient); 9675 private static native void Canny_1(long image_nativeObj, long edges_nativeObj, double threshold1, double threshold2, int apertureSize); 9676 private static native void Canny_2(long image_nativeObj, long edges_nativeObj, double threshold1, double threshold2); 9677 9678 // C++: void cv::Canny(Mat dx, Mat dy, Mat& edges, double threshold1, double threshold2, bool L2gradient = false) 9679 private static native void Canny_3(long dx_nativeObj, long dy_nativeObj, long edges_nativeObj, double threshold1, double threshold2, boolean L2gradient); 9680 private static native void Canny_4(long dx_nativeObj, long dy_nativeObj, long edges_nativeObj, double threshold1, double threshold2); 9681 9682 // C++: void cv::cornerMinEigenVal(Mat src, Mat& dst, int blockSize, int ksize = 3, int borderType = BORDER_DEFAULT) 9683 private static native void cornerMinEigenVal_0(long src_nativeObj, long dst_nativeObj, int blockSize, int ksize, int borderType); 9684 private static native void cornerMinEigenVal_1(long src_nativeObj, long dst_nativeObj, int blockSize, int ksize); 9685 private static native void cornerMinEigenVal_2(long src_nativeObj, long dst_nativeObj, int blockSize); 9686 9687 // C++: void cv::cornerHarris(Mat src, Mat& dst, int blockSize, int ksize, double k, int borderType = BORDER_DEFAULT) 9688 private static native void cornerHarris_0(long src_nativeObj, long dst_nativeObj, int blockSize, int ksize, double k, int borderType); 9689 private static native void cornerHarris_1(long src_nativeObj, long dst_nativeObj, int blockSize, int ksize, double k); 9690 9691 // C++: void cv::cornerEigenValsAndVecs(Mat src, Mat& dst, int blockSize, int ksize, int borderType = BORDER_DEFAULT) 9692 private static native void cornerEigenValsAndVecs_0(long src_nativeObj, long dst_nativeObj, int blockSize, int ksize, int borderType); 9693 private static native void cornerEigenValsAndVecs_1(long src_nativeObj, long dst_nativeObj, int blockSize, int ksize); 9694 9695 // C++: void cv::preCornerDetect(Mat src, Mat& dst, int ksize, int borderType = BORDER_DEFAULT) 9696 private static native void preCornerDetect_0(long src_nativeObj, long dst_nativeObj, int ksize, int borderType); 9697 private static native void preCornerDetect_1(long src_nativeObj, long dst_nativeObj, int ksize); 9698 9699 // C++: void cv::cornerSubPix(Mat image, Mat& corners, Size winSize, Size zeroZone, TermCriteria criteria) 9700 private static native void cornerSubPix_0(long image_nativeObj, long corners_nativeObj, double winSize_width, double winSize_height, double zeroZone_width, double zeroZone_height, int criteria_type, int criteria_maxCount, double criteria_epsilon); 9701 9702 // C++: void cv::goodFeaturesToTrack(Mat image, vector_Point& corners, int maxCorners, double qualityLevel, double minDistance, Mat mask = Mat(), int blockSize = 3, bool useHarrisDetector = false, double k = 0.04) 9703 private static native void goodFeaturesToTrack_0(long image_nativeObj, long corners_mat_nativeObj, int maxCorners, double qualityLevel, double minDistance, long mask_nativeObj, int blockSize, boolean useHarrisDetector, double k); 9704 private static native void goodFeaturesToTrack_1(long image_nativeObj, long corners_mat_nativeObj, int maxCorners, double qualityLevel, double minDistance, long mask_nativeObj, int blockSize, boolean useHarrisDetector); 9705 private static native void goodFeaturesToTrack_2(long image_nativeObj, long corners_mat_nativeObj, int maxCorners, double qualityLevel, double minDistance, long mask_nativeObj, int blockSize); 9706 private static native void goodFeaturesToTrack_3(long image_nativeObj, long corners_mat_nativeObj, int maxCorners, double qualityLevel, double minDistance, long mask_nativeObj); 9707 private static native void goodFeaturesToTrack_4(long image_nativeObj, long corners_mat_nativeObj, int maxCorners, double qualityLevel, double minDistance); 9708 9709 // C++: void cv::goodFeaturesToTrack(Mat image, vector_Point& corners, int maxCorners, double qualityLevel, double minDistance, Mat mask, int blockSize, int gradientSize, bool useHarrisDetector = false, double k = 0.04) 9710 private static native void goodFeaturesToTrack_5(long image_nativeObj, long corners_mat_nativeObj, int maxCorners, double qualityLevel, double minDistance, long mask_nativeObj, int blockSize, int gradientSize, boolean useHarrisDetector, double k); 9711 private static native void goodFeaturesToTrack_6(long image_nativeObj, long corners_mat_nativeObj, int maxCorners, double qualityLevel, double minDistance, long mask_nativeObj, int blockSize, int gradientSize, boolean useHarrisDetector); 9712 private static native void goodFeaturesToTrack_7(long image_nativeObj, long corners_mat_nativeObj, int maxCorners, double qualityLevel, double minDistance, long mask_nativeObj, int blockSize, int gradientSize); 9713 9714 // C++: void cv::goodFeaturesToTrack(Mat image, Mat& corners, int maxCorners, double qualityLevel, double minDistance, Mat mask, Mat& cornersQuality, int blockSize = 3, int gradientSize = 3, bool useHarrisDetector = false, double k = 0.04) 9715 private static native void goodFeaturesToTrackWithQuality_0(long image_nativeObj, long corners_nativeObj, int maxCorners, double qualityLevel, double minDistance, long mask_nativeObj, long cornersQuality_nativeObj, int blockSize, int gradientSize, boolean useHarrisDetector, double k); 9716 private static native void goodFeaturesToTrackWithQuality_1(long image_nativeObj, long corners_nativeObj, int maxCorners, double qualityLevel, double minDistance, long mask_nativeObj, long cornersQuality_nativeObj, int blockSize, int gradientSize, boolean useHarrisDetector); 9717 private static native void goodFeaturesToTrackWithQuality_2(long image_nativeObj, long corners_nativeObj, int maxCorners, double qualityLevel, double minDistance, long mask_nativeObj, long cornersQuality_nativeObj, int blockSize, int gradientSize); 9718 private static native void goodFeaturesToTrackWithQuality_3(long image_nativeObj, long corners_nativeObj, int maxCorners, double qualityLevel, double minDistance, long mask_nativeObj, long cornersQuality_nativeObj, int blockSize); 9719 private static native void goodFeaturesToTrackWithQuality_4(long image_nativeObj, long corners_nativeObj, int maxCorners, double qualityLevel, double minDistance, long mask_nativeObj, long cornersQuality_nativeObj); 9720 9721 // C++: void cv::HoughLines(Mat image, Mat& lines, double rho, double theta, int threshold, double srn = 0, double stn = 0, double min_theta = 0, double max_theta = CV_PI) 9722 private static native void HoughLines_0(long image_nativeObj, long lines_nativeObj, double rho, double theta, int threshold, double srn, double stn, double min_theta, double max_theta); 9723 private static native void HoughLines_1(long image_nativeObj, long lines_nativeObj, double rho, double theta, int threshold, double srn, double stn, double min_theta); 9724 private static native void HoughLines_2(long image_nativeObj, long lines_nativeObj, double rho, double theta, int threshold, double srn, double stn); 9725 private static native void HoughLines_3(long image_nativeObj, long lines_nativeObj, double rho, double theta, int threshold, double srn); 9726 private static native void HoughLines_4(long image_nativeObj, long lines_nativeObj, double rho, double theta, int threshold); 9727 9728 // C++: void cv::HoughLinesP(Mat image, Mat& lines, double rho, double theta, int threshold, double minLineLength = 0, double maxLineGap = 0) 9729 private static native void HoughLinesP_0(long image_nativeObj, long lines_nativeObj, double rho, double theta, int threshold, double minLineLength, double maxLineGap); 9730 private static native void HoughLinesP_1(long image_nativeObj, long lines_nativeObj, double rho, double theta, int threshold, double minLineLength); 9731 private static native void HoughLinesP_2(long image_nativeObj, long lines_nativeObj, double rho, double theta, int threshold); 9732 9733 // C++: void cv::HoughLinesPointSet(Mat point, Mat& lines, int lines_max, int threshold, double min_rho, double max_rho, double rho_step, double min_theta, double max_theta, double theta_step) 9734 private static native void HoughLinesPointSet_0(long point_nativeObj, long lines_nativeObj, int lines_max, int threshold, double min_rho, double max_rho, double rho_step, double min_theta, double max_theta, double theta_step); 9735 9736 // C++: void cv::HoughCircles(Mat image, Mat& circles, int method, double dp, double minDist, double param1 = 100, double param2 = 100, int minRadius = 0, int maxRadius = 0) 9737 private static native void HoughCircles_0(long image_nativeObj, long circles_nativeObj, int method, double dp, double minDist, double param1, double param2, int minRadius, int maxRadius); 9738 private static native void HoughCircles_1(long image_nativeObj, long circles_nativeObj, int method, double dp, double minDist, double param1, double param2, int minRadius); 9739 private static native void HoughCircles_2(long image_nativeObj, long circles_nativeObj, int method, double dp, double minDist, double param1, double param2); 9740 private static native void HoughCircles_3(long image_nativeObj, long circles_nativeObj, int method, double dp, double minDist, double param1); 9741 private static native void HoughCircles_4(long image_nativeObj, long circles_nativeObj, int method, double dp, double minDist); 9742 9743 // C++: void cv::erode(Mat src, Mat& dst, Mat kernel, Point anchor = Point(-1,-1), int iterations = 1, int borderType = BORDER_CONSTANT, Scalar borderValue = morphologyDefaultBorderValue()) 9744 private static native void erode_0(long src_nativeObj, long dst_nativeObj, long kernel_nativeObj, double anchor_x, double anchor_y, int iterations, int borderType, double borderValue_val0, double borderValue_val1, double borderValue_val2, double borderValue_val3); 9745 private static native void erode_1(long src_nativeObj, long dst_nativeObj, long kernel_nativeObj, double anchor_x, double anchor_y, int iterations, int borderType); 9746 private static native void erode_2(long src_nativeObj, long dst_nativeObj, long kernel_nativeObj, double anchor_x, double anchor_y, int iterations); 9747 private static native void erode_3(long src_nativeObj, long dst_nativeObj, long kernel_nativeObj, double anchor_x, double anchor_y); 9748 private static native void erode_4(long src_nativeObj, long dst_nativeObj, long kernel_nativeObj); 9749 9750 // C++: void cv::dilate(Mat src, Mat& dst, Mat kernel, Point anchor = Point(-1,-1), int iterations = 1, int borderType = BORDER_CONSTANT, Scalar borderValue = morphologyDefaultBorderValue()) 9751 private static native void dilate_0(long src_nativeObj, long dst_nativeObj, long kernel_nativeObj, double anchor_x, double anchor_y, int iterations, int borderType, double borderValue_val0, double borderValue_val1, double borderValue_val2, double borderValue_val3); 9752 private static native void dilate_1(long src_nativeObj, long dst_nativeObj, long kernel_nativeObj, double anchor_x, double anchor_y, int iterations, int borderType); 9753 private static native void dilate_2(long src_nativeObj, long dst_nativeObj, long kernel_nativeObj, double anchor_x, double anchor_y, int iterations); 9754 private static native void dilate_3(long src_nativeObj, long dst_nativeObj, long kernel_nativeObj, double anchor_x, double anchor_y); 9755 private static native void dilate_4(long src_nativeObj, long dst_nativeObj, long kernel_nativeObj); 9756 9757 // C++: void cv::morphologyEx(Mat src, Mat& dst, int op, Mat kernel, Point anchor = Point(-1,-1), int iterations = 1, int borderType = BORDER_CONSTANT, Scalar borderValue = morphologyDefaultBorderValue()) 9758 private static native void morphologyEx_0(long src_nativeObj, long dst_nativeObj, int op, long kernel_nativeObj, double anchor_x, double anchor_y, int iterations, int borderType, double borderValue_val0, double borderValue_val1, double borderValue_val2, double borderValue_val3); 9759 private static native void morphologyEx_1(long src_nativeObj, long dst_nativeObj, int op, long kernel_nativeObj, double anchor_x, double anchor_y, int iterations, int borderType); 9760 private static native void morphologyEx_2(long src_nativeObj, long dst_nativeObj, int op, long kernel_nativeObj, double anchor_x, double anchor_y, int iterations); 9761 private static native void morphologyEx_3(long src_nativeObj, long dst_nativeObj, int op, long kernel_nativeObj, double anchor_x, double anchor_y); 9762 private static native void morphologyEx_4(long src_nativeObj, long dst_nativeObj, int op, long kernel_nativeObj); 9763 9764 // C++: void cv::resize(Mat src, Mat& dst, Size dsize, double fx = 0, double fy = 0, int interpolation = INTER_LINEAR) 9765 private static native void resize_0(long src_nativeObj, long dst_nativeObj, double dsize_width, double dsize_height, double fx, double fy, int interpolation); 9766 private static native void resize_1(long src_nativeObj, long dst_nativeObj, double dsize_width, double dsize_height, double fx, double fy); 9767 private static native void resize_2(long src_nativeObj, long dst_nativeObj, double dsize_width, double dsize_height, double fx); 9768 private static native void resize_3(long src_nativeObj, long dst_nativeObj, double dsize_width, double dsize_height); 9769 9770 // C++: void cv::warpAffine(Mat src, Mat& dst, Mat M, Size dsize, int flags = INTER_LINEAR, int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar()) 9771 private static native void warpAffine_0(long src_nativeObj, long dst_nativeObj, long M_nativeObj, double dsize_width, double dsize_height, int flags, int borderMode, double borderValue_val0, double borderValue_val1, double borderValue_val2, double borderValue_val3); 9772 private static native void warpAffine_1(long src_nativeObj, long dst_nativeObj, long M_nativeObj, double dsize_width, double dsize_height, int flags, int borderMode); 9773 private static native void warpAffine_2(long src_nativeObj, long dst_nativeObj, long M_nativeObj, double dsize_width, double dsize_height, int flags); 9774 private static native void warpAffine_3(long src_nativeObj, long dst_nativeObj, long M_nativeObj, double dsize_width, double dsize_height); 9775 9776 // C++: void cv::warpPerspective(Mat src, Mat& dst, Mat M, Size dsize, int flags = INTER_LINEAR, int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar()) 9777 private static native void warpPerspective_0(long src_nativeObj, long dst_nativeObj, long M_nativeObj, double dsize_width, double dsize_height, int flags, int borderMode, double borderValue_val0, double borderValue_val1, double borderValue_val2, double borderValue_val3); 9778 private static native void warpPerspective_1(long src_nativeObj, long dst_nativeObj, long M_nativeObj, double dsize_width, double dsize_height, int flags, int borderMode); 9779 private static native void warpPerspective_2(long src_nativeObj, long dst_nativeObj, long M_nativeObj, double dsize_width, double dsize_height, int flags); 9780 private static native void warpPerspective_3(long src_nativeObj, long dst_nativeObj, long M_nativeObj, double dsize_width, double dsize_height); 9781 9782 // C++: void cv::remap(Mat src, Mat& dst, Mat map1, Mat map2, int interpolation, int borderMode = BORDER_CONSTANT, Scalar borderValue = Scalar()) 9783 private static native void remap_0(long src_nativeObj, long dst_nativeObj, long map1_nativeObj, long map2_nativeObj, int interpolation, int borderMode, double borderValue_val0, double borderValue_val1, double borderValue_val2, double borderValue_val3); 9784 private static native void remap_1(long src_nativeObj, long dst_nativeObj, long map1_nativeObj, long map2_nativeObj, int interpolation, int borderMode); 9785 private static native void remap_2(long src_nativeObj, long dst_nativeObj, long map1_nativeObj, long map2_nativeObj, int interpolation); 9786 9787 // C++: void cv::convertMaps(Mat map1, Mat map2, Mat& dstmap1, Mat& dstmap2, int dstmap1type, bool nninterpolation = false) 9788 private static native void convertMaps_0(long map1_nativeObj, long map2_nativeObj, long dstmap1_nativeObj, long dstmap2_nativeObj, int dstmap1type, boolean nninterpolation); 9789 private static native void convertMaps_1(long map1_nativeObj, long map2_nativeObj, long dstmap1_nativeObj, long dstmap2_nativeObj, int dstmap1type); 9790 9791 // C++: Mat cv::getRotationMatrix2D(Point2f center, double angle, double scale) 9792 private static native long getRotationMatrix2D_0(double center_x, double center_y, double angle, double scale); 9793 9794 // C++: void cv::invertAffineTransform(Mat M, Mat& iM) 9795 private static native void invertAffineTransform_0(long M_nativeObj, long iM_nativeObj); 9796 9797 // C++: Mat cv::getPerspectiveTransform(Mat src, Mat dst, int solveMethod = DECOMP_LU) 9798 private static native long getPerspectiveTransform_0(long src_nativeObj, long dst_nativeObj, int solveMethod); 9799 private static native long getPerspectiveTransform_1(long src_nativeObj, long dst_nativeObj); 9800 9801 // C++: Mat cv::getAffineTransform(vector_Point2f src, vector_Point2f dst) 9802 private static native long getAffineTransform_0(long src_mat_nativeObj, long dst_mat_nativeObj); 9803 9804 // C++: void cv::getRectSubPix(Mat image, Size patchSize, Point2f center, Mat& patch, int patchType = -1) 9805 private static native void getRectSubPix_0(long image_nativeObj, double patchSize_width, double patchSize_height, double center_x, double center_y, long patch_nativeObj, int patchType); 9806 private static native void getRectSubPix_1(long image_nativeObj, double patchSize_width, double patchSize_height, double center_x, double center_y, long patch_nativeObj); 9807 9808 // C++: void cv::logPolar(Mat src, Mat& dst, Point2f center, double M, int flags) 9809 private static native void logPolar_0(long src_nativeObj, long dst_nativeObj, double center_x, double center_y, double M, int flags); 9810 9811 // C++: void cv::linearPolar(Mat src, Mat& dst, Point2f center, double maxRadius, int flags) 9812 private static native void linearPolar_0(long src_nativeObj, long dst_nativeObj, double center_x, double center_y, double maxRadius, int flags); 9813 9814 // C++: void cv::warpPolar(Mat src, Mat& dst, Size dsize, Point2f center, double maxRadius, int flags) 9815 private static native void warpPolar_0(long src_nativeObj, long dst_nativeObj, double dsize_width, double dsize_height, double center_x, double center_y, double maxRadius, int flags); 9816 9817 // C++: void cv::integral(Mat src, Mat& sum, Mat& sqsum, Mat& tilted, int sdepth = -1, int sqdepth = -1) 9818 private static native void integral3_0(long src_nativeObj, long sum_nativeObj, long sqsum_nativeObj, long tilted_nativeObj, int sdepth, int sqdepth); 9819 private static native void integral3_1(long src_nativeObj, long sum_nativeObj, long sqsum_nativeObj, long tilted_nativeObj, int sdepth); 9820 private static native void integral3_2(long src_nativeObj, long sum_nativeObj, long sqsum_nativeObj, long tilted_nativeObj); 9821 9822 // C++: void cv::integral(Mat src, Mat& sum, int sdepth = -1) 9823 private static native void integral_0(long src_nativeObj, long sum_nativeObj, int sdepth); 9824 private static native void integral_1(long src_nativeObj, long sum_nativeObj); 9825 9826 // C++: void cv::integral(Mat src, Mat& sum, Mat& sqsum, int sdepth = -1, int sqdepth = -1) 9827 private static native void integral2_0(long src_nativeObj, long sum_nativeObj, long sqsum_nativeObj, int sdepth, int sqdepth); 9828 private static native void integral2_1(long src_nativeObj, long sum_nativeObj, long sqsum_nativeObj, int sdepth); 9829 private static native void integral2_2(long src_nativeObj, long sum_nativeObj, long sqsum_nativeObj); 9830 9831 // C++: void cv::accumulate(Mat src, Mat& dst, Mat mask = Mat()) 9832 private static native void accumulate_0(long src_nativeObj, long dst_nativeObj, long mask_nativeObj); 9833 private static native void accumulate_1(long src_nativeObj, long dst_nativeObj); 9834 9835 // C++: void cv::accumulateSquare(Mat src, Mat& dst, Mat mask = Mat()) 9836 private static native void accumulateSquare_0(long src_nativeObj, long dst_nativeObj, long mask_nativeObj); 9837 private static native void accumulateSquare_1(long src_nativeObj, long dst_nativeObj); 9838 9839 // C++: void cv::accumulateProduct(Mat src1, Mat src2, Mat& dst, Mat mask = Mat()) 9840 private static native void accumulateProduct_0(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj, long mask_nativeObj); 9841 private static native void accumulateProduct_1(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj); 9842 9843 // C++: void cv::accumulateWeighted(Mat src, Mat& dst, double alpha, Mat mask = Mat()) 9844 private static native void accumulateWeighted_0(long src_nativeObj, long dst_nativeObj, double alpha, long mask_nativeObj); 9845 private static native void accumulateWeighted_1(long src_nativeObj, long dst_nativeObj, double alpha); 9846 9847 // C++: Point2d cv::phaseCorrelate(Mat src1, Mat src2, Mat window = Mat(), double* response = 0) 9848 private static native double[] phaseCorrelate_0(long src1_nativeObj, long src2_nativeObj, long window_nativeObj, double[] response_out); 9849 private static native double[] phaseCorrelate_1(long src1_nativeObj, long src2_nativeObj, long window_nativeObj); 9850 private static native double[] phaseCorrelate_2(long src1_nativeObj, long src2_nativeObj); 9851 9852 // C++: void cv::createHanningWindow(Mat& dst, Size winSize, int type) 9853 private static native void createHanningWindow_0(long dst_nativeObj, double winSize_width, double winSize_height, int type); 9854 9855 // C++: void cv::divSpectrums(Mat a, Mat b, Mat& c, int flags, bool conjB = false) 9856 private static native void divSpectrums_0(long a_nativeObj, long b_nativeObj, long c_nativeObj, int flags, boolean conjB); 9857 private static native void divSpectrums_1(long a_nativeObj, long b_nativeObj, long c_nativeObj, int flags); 9858 9859 // C++: double cv::threshold(Mat src, Mat& dst, double thresh, double maxval, int type) 9860 private static native double threshold_0(long src_nativeObj, long dst_nativeObj, double thresh, double maxval, int type); 9861 9862 // C++: void cv::adaptiveThreshold(Mat src, Mat& dst, double maxValue, int adaptiveMethod, int thresholdType, int blockSize, double C) 9863 private static native void adaptiveThreshold_0(long src_nativeObj, long dst_nativeObj, double maxValue, int adaptiveMethod, int thresholdType, int blockSize, double C); 9864 9865 // C++: void cv::pyrDown(Mat src, Mat& dst, Size dstsize = Size(), int borderType = BORDER_DEFAULT) 9866 private static native void pyrDown_0(long src_nativeObj, long dst_nativeObj, double dstsize_width, double dstsize_height, int borderType); 9867 private static native void pyrDown_1(long src_nativeObj, long dst_nativeObj, double dstsize_width, double dstsize_height); 9868 private static native void pyrDown_2(long src_nativeObj, long dst_nativeObj); 9869 9870 // C++: void cv::pyrUp(Mat src, Mat& dst, Size dstsize = Size(), int borderType = BORDER_DEFAULT) 9871 private static native void pyrUp_0(long src_nativeObj, long dst_nativeObj, double dstsize_width, double dstsize_height, int borderType); 9872 private static native void pyrUp_1(long src_nativeObj, long dst_nativeObj, double dstsize_width, double dstsize_height); 9873 private static native void pyrUp_2(long src_nativeObj, long dst_nativeObj); 9874 9875 // C++: void cv::calcHist(vector_Mat images, vector_int channels, Mat mask, Mat& hist, vector_int histSize, vector_float ranges, bool accumulate = false) 9876 private static native void calcHist_0(long images_mat_nativeObj, long channels_mat_nativeObj, long mask_nativeObj, long hist_nativeObj, long histSize_mat_nativeObj, long ranges_mat_nativeObj, boolean accumulate); 9877 private static native void calcHist_1(long images_mat_nativeObj, long channels_mat_nativeObj, long mask_nativeObj, long hist_nativeObj, long histSize_mat_nativeObj, long ranges_mat_nativeObj); 9878 9879 // C++: void cv::calcBackProject(vector_Mat images, vector_int channels, Mat hist, Mat& dst, vector_float ranges, double scale) 9880 private static native void calcBackProject_0(long images_mat_nativeObj, long channels_mat_nativeObj, long hist_nativeObj, long dst_nativeObj, long ranges_mat_nativeObj, double scale); 9881 9882 // C++: double cv::compareHist(Mat H1, Mat H2, int method) 9883 private static native double compareHist_0(long H1_nativeObj, long H2_nativeObj, int method); 9884 9885 // C++: void cv::equalizeHist(Mat src, Mat& dst) 9886 private static native void equalizeHist_0(long src_nativeObj, long dst_nativeObj); 9887 9888 // C++: Ptr_CLAHE cv::createCLAHE(double clipLimit = 40.0, Size tileGridSize = Size(8, 8)) 9889 private static native long createCLAHE_0(double clipLimit, double tileGridSize_width, double tileGridSize_height); 9890 private static native long createCLAHE_1(double clipLimit); 9891 private static native long createCLAHE_2(); 9892 9893 // C++: float cv::wrapperEMD(Mat signature1, Mat signature2, int distType, Mat cost = Mat(), Ptr_float& lowerBound = Ptr<float>(), Mat& flow = Mat()) 9894 private static native float EMD_0(long signature1_nativeObj, long signature2_nativeObj, int distType, long cost_nativeObj, long flow_nativeObj); 9895 private static native float EMD_1(long signature1_nativeObj, long signature2_nativeObj, int distType, long cost_nativeObj); 9896 private static native float EMD_3(long signature1_nativeObj, long signature2_nativeObj, int distType); 9897 9898 // C++: void cv::watershed(Mat image, Mat& markers) 9899 private static native void watershed_0(long image_nativeObj, long markers_nativeObj); 9900 9901 // C++: void cv::pyrMeanShiftFiltering(Mat src, Mat& dst, double sp, double sr, int maxLevel = 1, TermCriteria termcrit = TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS,5,1)) 9902 private static native void pyrMeanShiftFiltering_0(long src_nativeObj, long dst_nativeObj, double sp, double sr, int maxLevel, int termcrit_type, int termcrit_maxCount, double termcrit_epsilon); 9903 private static native void pyrMeanShiftFiltering_1(long src_nativeObj, long dst_nativeObj, double sp, double sr, int maxLevel); 9904 private static native void pyrMeanShiftFiltering_2(long src_nativeObj, long dst_nativeObj, double sp, double sr); 9905 9906 // C++: void cv::grabCut(Mat img, Mat& mask, Rect rect, Mat& bgdModel, Mat& fgdModel, int iterCount, int mode = GC_EVAL) 9907 private static native void grabCut_0(long img_nativeObj, long mask_nativeObj, int rect_x, int rect_y, int rect_width, int rect_height, long bgdModel_nativeObj, long fgdModel_nativeObj, int iterCount, int mode); 9908 private static native void grabCut_1(long img_nativeObj, long mask_nativeObj, int rect_x, int rect_y, int rect_width, int rect_height, long bgdModel_nativeObj, long fgdModel_nativeObj, int iterCount); 9909 9910 // C++: void cv::distanceTransform(Mat src, Mat& dst, Mat& labels, int distanceType, int maskSize, int labelType = DIST_LABEL_CCOMP) 9911 private static native void distanceTransformWithLabels_0(long src_nativeObj, long dst_nativeObj, long labels_nativeObj, int distanceType, int maskSize, int labelType); 9912 private static native void distanceTransformWithLabels_1(long src_nativeObj, long dst_nativeObj, long labels_nativeObj, int distanceType, int maskSize); 9913 9914 // C++: void cv::distanceTransform(Mat src, Mat& dst, int distanceType, int maskSize, int dstType = CV_32F) 9915 private static native void distanceTransform_0(long src_nativeObj, long dst_nativeObj, int distanceType, int maskSize, int dstType); 9916 private static native void distanceTransform_1(long src_nativeObj, long dst_nativeObj, int distanceType, int maskSize); 9917 9918 // C++: int cv::floodFill(Mat& image, Mat& mask, Point seedPoint, Scalar newVal, Rect* rect = 0, Scalar loDiff = Scalar(), Scalar upDiff = Scalar(), int flags = 4) 9919 private static native int floodFill_0(long image_nativeObj, long mask_nativeObj, double seedPoint_x, double seedPoint_y, double newVal_val0, double newVal_val1, double newVal_val2, double newVal_val3, double[] rect_out, double loDiff_val0, double loDiff_val1, double loDiff_val2, double loDiff_val3, double upDiff_val0, double upDiff_val1, double upDiff_val2, double upDiff_val3, int flags); 9920 private static native int floodFill_1(long image_nativeObj, long mask_nativeObj, double seedPoint_x, double seedPoint_y, double newVal_val0, double newVal_val1, double newVal_val2, double newVal_val3, double[] rect_out, double loDiff_val0, double loDiff_val1, double loDiff_val2, double loDiff_val3, double upDiff_val0, double upDiff_val1, double upDiff_val2, double upDiff_val3); 9921 private static native int floodFill_2(long image_nativeObj, long mask_nativeObj, double seedPoint_x, double seedPoint_y, double newVal_val0, double newVal_val1, double newVal_val2, double newVal_val3, double[] rect_out, double loDiff_val0, double loDiff_val1, double loDiff_val2, double loDiff_val3); 9922 private static native int floodFill_3(long image_nativeObj, long mask_nativeObj, double seedPoint_x, double seedPoint_y, double newVal_val0, double newVal_val1, double newVal_val2, double newVal_val3, double[] rect_out); 9923 private static native int floodFill_4(long image_nativeObj, long mask_nativeObj, double seedPoint_x, double seedPoint_y, double newVal_val0, double newVal_val1, double newVal_val2, double newVal_val3); 9924 9925 // C++: void cv::blendLinear(Mat src1, Mat src2, Mat weights1, Mat weights2, Mat& dst) 9926 private static native void blendLinear_0(long src1_nativeObj, long src2_nativeObj, long weights1_nativeObj, long weights2_nativeObj, long dst_nativeObj); 9927 9928 // C++: void cv::cvtColor(Mat src, Mat& dst, int code, int dstCn = 0) 9929 private static native void cvtColor_0(long src_nativeObj, long dst_nativeObj, int code, int dstCn); 9930 private static native void cvtColor_1(long src_nativeObj, long dst_nativeObj, int code); 9931 9932 // C++: void cv::cvtColorTwoPlane(Mat src1, Mat src2, Mat& dst, int code) 9933 private static native void cvtColorTwoPlane_0(long src1_nativeObj, long src2_nativeObj, long dst_nativeObj, int code); 9934 9935 // C++: void cv::demosaicing(Mat src, Mat& dst, int code, int dstCn = 0) 9936 private static native void demosaicing_0(long src_nativeObj, long dst_nativeObj, int code, int dstCn); 9937 private static native void demosaicing_1(long src_nativeObj, long dst_nativeObj, int code); 9938 9939 // C++: Moments cv::moments(Mat array, bool binaryImage = false) 9940 private static native double[] moments_0(long array_nativeObj, boolean binaryImage); 9941 private static native double[] moments_1(long array_nativeObj); 9942 9943 // C++: void cv::HuMoments(Moments m, Mat& hu) 9944 private static native void HuMoments_0(double m_m00, double m_m10, double m_m01, double m_m20, double m_m11, double m_m02, double m_m30, double m_m21, double m_m12, double m_m03, long hu_nativeObj); 9945 9946 // C++: void cv::matchTemplate(Mat image, Mat templ, Mat& result, int method, Mat mask = Mat()) 9947 private static native void matchTemplate_0(long image_nativeObj, long templ_nativeObj, long result_nativeObj, int method, long mask_nativeObj); 9948 private static native void matchTemplate_1(long image_nativeObj, long templ_nativeObj, long result_nativeObj, int method); 9949 9950 // C++: int cv::connectedComponents(Mat image, Mat& labels, int connectivity, int ltype, int ccltype) 9951 private static native int connectedComponentsWithAlgorithm_0(long image_nativeObj, long labels_nativeObj, int connectivity, int ltype, int ccltype); 9952 9953 // C++: int cv::connectedComponents(Mat image, Mat& labels, int connectivity = 8, int ltype = CV_32S) 9954 private static native int connectedComponents_0(long image_nativeObj, long labels_nativeObj, int connectivity, int ltype); 9955 private static native int connectedComponents_1(long image_nativeObj, long labels_nativeObj, int connectivity); 9956 private static native int connectedComponents_2(long image_nativeObj, long labels_nativeObj); 9957 9958 // C++: int cv::connectedComponentsWithStats(Mat image, Mat& labels, Mat& stats, Mat& centroids, int connectivity, int ltype, int ccltype) 9959 private static native int connectedComponentsWithStatsWithAlgorithm_0(long image_nativeObj, long labels_nativeObj, long stats_nativeObj, long centroids_nativeObj, int connectivity, int ltype, int ccltype); 9960 9961 // C++: int cv::connectedComponentsWithStats(Mat image, Mat& labels, Mat& stats, Mat& centroids, int connectivity = 8, int ltype = CV_32S) 9962 private static native int connectedComponentsWithStats_0(long image_nativeObj, long labels_nativeObj, long stats_nativeObj, long centroids_nativeObj, int connectivity, int ltype); 9963 private static native int connectedComponentsWithStats_1(long image_nativeObj, long labels_nativeObj, long stats_nativeObj, long centroids_nativeObj, int connectivity); 9964 private static native int connectedComponentsWithStats_2(long image_nativeObj, long labels_nativeObj, long stats_nativeObj, long centroids_nativeObj); 9965 9966 // C++: void cv::findContours(Mat image, vector_vector_Point& contours, Mat& hierarchy, int mode, int method, Point offset = Point()) 9967 private static native void findContours_0(long image_nativeObj, long contours_mat_nativeObj, long hierarchy_nativeObj, int mode, int method, double offset_x, double offset_y); 9968 private static native void findContours_1(long image_nativeObj, long contours_mat_nativeObj, long hierarchy_nativeObj, int mode, int method); 9969 9970 // C++: void cv::approxPolyDP(vector_Point2f curve, vector_Point2f& approxCurve, double epsilon, bool closed) 9971 private static native void approxPolyDP_0(long curve_mat_nativeObj, long approxCurve_mat_nativeObj, double epsilon, boolean closed); 9972 9973 // C++: double cv::arcLength(vector_Point2f curve, bool closed) 9974 private static native double arcLength_0(long curve_mat_nativeObj, boolean closed); 9975 9976 // C++: Rect cv::boundingRect(Mat array) 9977 private static native double[] boundingRect_0(long array_nativeObj); 9978 9979 // C++: double cv::contourArea(Mat contour, bool oriented = false) 9980 private static native double contourArea_0(long contour_nativeObj, boolean oriented); 9981 private static native double contourArea_1(long contour_nativeObj); 9982 9983 // C++: RotatedRect cv::minAreaRect(vector_Point2f points) 9984 private static native double[] minAreaRect_0(long points_mat_nativeObj); 9985 9986 // C++: void cv::boxPoints(RotatedRect box, Mat& points) 9987 private static native void boxPoints_0(double box_center_x, double box_center_y, double box_size_width, double box_size_height, double box_angle, long points_nativeObj); 9988 9989 // C++: void cv::minEnclosingCircle(vector_Point2f points, Point2f& center, float& radius) 9990 private static native void minEnclosingCircle_0(long points_mat_nativeObj, double[] center_out, double[] radius_out); 9991 9992 // C++: double cv::minEnclosingTriangle(Mat points, Mat& triangle) 9993 private static native double minEnclosingTriangle_0(long points_nativeObj, long triangle_nativeObj); 9994 9995 // C++: double cv::matchShapes(Mat contour1, Mat contour2, int method, double parameter) 9996 private static native double matchShapes_0(long contour1_nativeObj, long contour2_nativeObj, int method, double parameter); 9997 9998 // C++: void cv::convexHull(vector_Point points, vector_int& hull, bool clockwise = false, _hidden_ returnPoints = true) 9999 private static native void convexHull_0(long points_mat_nativeObj, long hull_mat_nativeObj, boolean clockwise); 10000 private static native void convexHull_2(long points_mat_nativeObj, long hull_mat_nativeObj); 10001 10002 // C++: void cv::convexityDefects(vector_Point contour, vector_int convexhull, vector_Vec4i& convexityDefects) 10003 private static native void convexityDefects_0(long contour_mat_nativeObj, long convexhull_mat_nativeObj, long convexityDefects_mat_nativeObj); 10004 10005 // C++: bool cv::isContourConvex(vector_Point contour) 10006 private static native boolean isContourConvex_0(long contour_mat_nativeObj); 10007 10008 // C++: float cv::intersectConvexConvex(Mat p1, Mat p2, Mat& p12, bool handleNested = true) 10009 private static native float intersectConvexConvex_0(long p1_nativeObj, long p2_nativeObj, long p12_nativeObj, boolean handleNested); 10010 private static native float intersectConvexConvex_1(long p1_nativeObj, long p2_nativeObj, long p12_nativeObj); 10011 10012 // C++: RotatedRect cv::fitEllipse(vector_Point2f points) 10013 private static native double[] fitEllipse_0(long points_mat_nativeObj); 10014 10015 // C++: RotatedRect cv::fitEllipseAMS(Mat points) 10016 private static native double[] fitEllipseAMS_0(long points_nativeObj); 10017 10018 // C++: RotatedRect cv::fitEllipseDirect(Mat points) 10019 private static native double[] fitEllipseDirect_0(long points_nativeObj); 10020 10021 // C++: void cv::fitLine(Mat points, Mat& line, int distType, double param, double reps, double aeps) 10022 private static native void fitLine_0(long points_nativeObj, long line_nativeObj, int distType, double param, double reps, double aeps); 10023 10024 // C++: double cv::pointPolygonTest(vector_Point2f contour, Point2f pt, bool measureDist) 10025 private static native double pointPolygonTest_0(long contour_mat_nativeObj, double pt_x, double pt_y, boolean measureDist); 10026 10027 // C++: int cv::rotatedRectangleIntersection(RotatedRect rect1, RotatedRect rect2, Mat& intersectingRegion) 10028 private static native int rotatedRectangleIntersection_0(double rect1_center_x, double rect1_center_y, double rect1_size_width, double rect1_size_height, double rect1_angle, double rect2_center_x, double rect2_center_y, double rect2_size_width, double rect2_size_height, double rect2_angle, long intersectingRegion_nativeObj); 10029 10030 // C++: Ptr_GeneralizedHoughBallard cv::createGeneralizedHoughBallard() 10031 private static native long createGeneralizedHoughBallard_0(); 10032 10033 // C++: Ptr_GeneralizedHoughGuil cv::createGeneralizedHoughGuil() 10034 private static native long createGeneralizedHoughGuil_0(); 10035 10036 // C++: void cv::applyColorMap(Mat src, Mat& dst, int colormap) 10037 private static native void applyColorMap_0(long src_nativeObj, long dst_nativeObj, int colormap); 10038 10039 // C++: void cv::applyColorMap(Mat src, Mat& dst, Mat userColor) 10040 private static native void applyColorMap_1(long src_nativeObj, long dst_nativeObj, long userColor_nativeObj); 10041 10042 // C++: void cv::line(Mat& img, Point pt1, Point pt2, Scalar color, int thickness = 1, int lineType = LINE_8, int shift = 0) 10043 private static native void line_0(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType, int shift); 10044 private static native void line_1(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType); 10045 private static native void line_2(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3, int thickness); 10046 private static native void line_3(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3); 10047 10048 // C++: void cv::arrowedLine(Mat& img, Point pt1, Point pt2, Scalar color, int thickness = 1, int line_type = 8, int shift = 0, double tipLength = 0.1) 10049 private static native void arrowedLine_0(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int line_type, int shift, double tipLength); 10050 private static native void arrowedLine_1(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int line_type, int shift); 10051 private static native void arrowedLine_2(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int line_type); 10052 private static native void arrowedLine_3(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3, int thickness); 10053 private static native void arrowedLine_4(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3); 10054 10055 // C++: void cv::rectangle(Mat& img, Point pt1, Point pt2, Scalar color, int thickness = 1, int lineType = LINE_8, int shift = 0) 10056 private static native void rectangle_0(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType, int shift); 10057 private static native void rectangle_1(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType); 10058 private static native void rectangle_2(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3, int thickness); 10059 private static native void rectangle_3(long img_nativeObj, double pt1_x, double pt1_y, double pt2_x, double pt2_y, double color_val0, double color_val1, double color_val2, double color_val3); 10060 10061 // C++: void cv::rectangle(Mat& img, Rect rec, Scalar color, int thickness = 1, int lineType = LINE_8, int shift = 0) 10062 private static native void rectangle_4(long img_nativeObj, int rec_x, int rec_y, int rec_width, int rec_height, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType, int shift); 10063 private static native void rectangle_5(long img_nativeObj, int rec_x, int rec_y, int rec_width, int rec_height, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType); 10064 private static native void rectangle_6(long img_nativeObj, int rec_x, int rec_y, int rec_width, int rec_height, double color_val0, double color_val1, double color_val2, double color_val3, int thickness); 10065 private static native void rectangle_7(long img_nativeObj, int rec_x, int rec_y, int rec_width, int rec_height, double color_val0, double color_val1, double color_val2, double color_val3); 10066 10067 // C++: void cv::circle(Mat& img, Point center, int radius, Scalar color, int thickness = 1, int lineType = LINE_8, int shift = 0) 10068 private static native void circle_0(long img_nativeObj, double center_x, double center_y, int radius, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType, int shift); 10069 private static native void circle_1(long img_nativeObj, double center_x, double center_y, int radius, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType); 10070 private static native void circle_2(long img_nativeObj, double center_x, double center_y, int radius, double color_val0, double color_val1, double color_val2, double color_val3, int thickness); 10071 private static native void circle_3(long img_nativeObj, double center_x, double center_y, int radius, double color_val0, double color_val1, double color_val2, double color_val3); 10072 10073 // C++: void cv::ellipse(Mat& img, Point center, Size axes, double angle, double startAngle, double endAngle, Scalar color, int thickness = 1, int lineType = LINE_8, int shift = 0) 10074 private static native void ellipse_0(long img_nativeObj, double center_x, double center_y, double axes_width, double axes_height, double angle, double startAngle, double endAngle, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType, int shift); 10075 private static native void ellipse_1(long img_nativeObj, double center_x, double center_y, double axes_width, double axes_height, double angle, double startAngle, double endAngle, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType); 10076 private static native void ellipse_2(long img_nativeObj, double center_x, double center_y, double axes_width, double axes_height, double angle, double startAngle, double endAngle, double color_val0, double color_val1, double color_val2, double color_val3, int thickness); 10077 private static native void ellipse_3(long img_nativeObj, double center_x, double center_y, double axes_width, double axes_height, double angle, double startAngle, double endAngle, double color_val0, double color_val1, double color_val2, double color_val3); 10078 10079 // C++: void cv::ellipse(Mat& img, RotatedRect box, Scalar color, int thickness = 1, int lineType = LINE_8) 10080 private static native void ellipse_4(long img_nativeObj, double box_center_x, double box_center_y, double box_size_width, double box_size_height, double box_angle, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType); 10081 private static native void ellipse_5(long img_nativeObj, double box_center_x, double box_center_y, double box_size_width, double box_size_height, double box_angle, double color_val0, double color_val1, double color_val2, double color_val3, int thickness); 10082 private static native void ellipse_6(long img_nativeObj, double box_center_x, double box_center_y, double box_size_width, double box_size_height, double box_angle, double color_val0, double color_val1, double color_val2, double color_val3); 10083 10084 // C++: void cv::drawMarker(Mat& img, Point position, Scalar color, int markerType = MARKER_CROSS, int markerSize = 20, int thickness = 1, int line_type = 8) 10085 private static native void drawMarker_0(long img_nativeObj, double position_x, double position_y, double color_val0, double color_val1, double color_val2, double color_val3, int markerType, int markerSize, int thickness, int line_type); 10086 private static native void drawMarker_1(long img_nativeObj, double position_x, double position_y, double color_val0, double color_val1, double color_val2, double color_val3, int markerType, int markerSize, int thickness); 10087 private static native void drawMarker_2(long img_nativeObj, double position_x, double position_y, double color_val0, double color_val1, double color_val2, double color_val3, int markerType, int markerSize); 10088 private static native void drawMarker_3(long img_nativeObj, double position_x, double position_y, double color_val0, double color_val1, double color_val2, double color_val3, int markerType); 10089 private static native void drawMarker_4(long img_nativeObj, double position_x, double position_y, double color_val0, double color_val1, double color_val2, double color_val3); 10090 10091 // C++: void cv::fillConvexPoly(Mat& img, vector_Point points, Scalar color, int lineType = LINE_8, int shift = 0) 10092 private static native void fillConvexPoly_0(long img_nativeObj, long points_mat_nativeObj, double color_val0, double color_val1, double color_val2, double color_val3, int lineType, int shift); 10093 private static native void fillConvexPoly_1(long img_nativeObj, long points_mat_nativeObj, double color_val0, double color_val1, double color_val2, double color_val3, int lineType); 10094 private static native void fillConvexPoly_2(long img_nativeObj, long points_mat_nativeObj, double color_val0, double color_val1, double color_val2, double color_val3); 10095 10096 // C++: void cv::fillPoly(Mat& img, vector_vector_Point pts, Scalar color, int lineType = LINE_8, int shift = 0, Point offset = Point()) 10097 private static native void fillPoly_0(long img_nativeObj, long pts_mat_nativeObj, double color_val0, double color_val1, double color_val2, double color_val3, int lineType, int shift, double offset_x, double offset_y); 10098 private static native void fillPoly_1(long img_nativeObj, long pts_mat_nativeObj, double color_val0, double color_val1, double color_val2, double color_val3, int lineType, int shift); 10099 private static native void fillPoly_2(long img_nativeObj, long pts_mat_nativeObj, double color_val0, double color_val1, double color_val2, double color_val3, int lineType); 10100 private static native void fillPoly_3(long img_nativeObj, long pts_mat_nativeObj, double color_val0, double color_val1, double color_val2, double color_val3); 10101 10102 // C++: void cv::polylines(Mat& img, vector_vector_Point pts, bool isClosed, Scalar color, int thickness = 1, int lineType = LINE_8, int shift = 0) 10103 private static native void polylines_0(long img_nativeObj, long pts_mat_nativeObj, boolean isClosed, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType, int shift); 10104 private static native void polylines_1(long img_nativeObj, long pts_mat_nativeObj, boolean isClosed, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType); 10105 private static native void polylines_2(long img_nativeObj, long pts_mat_nativeObj, boolean isClosed, double color_val0, double color_val1, double color_val2, double color_val3, int thickness); 10106 private static native void polylines_3(long img_nativeObj, long pts_mat_nativeObj, boolean isClosed, double color_val0, double color_val1, double color_val2, double color_val3); 10107 10108 // C++: void cv::drawContours(Mat& image, vector_vector_Point contours, int contourIdx, Scalar color, int thickness = 1, int lineType = LINE_8, Mat hierarchy = Mat(), int maxLevel = INT_MAX, Point offset = Point()) 10109 private static native void drawContours_0(long image_nativeObj, long contours_mat_nativeObj, int contourIdx, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType, long hierarchy_nativeObj, int maxLevel, double offset_x, double offset_y); 10110 private static native void drawContours_1(long image_nativeObj, long contours_mat_nativeObj, int contourIdx, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType, long hierarchy_nativeObj, int maxLevel); 10111 private static native void drawContours_2(long image_nativeObj, long contours_mat_nativeObj, int contourIdx, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType, long hierarchy_nativeObj); 10112 private static native void drawContours_3(long image_nativeObj, long contours_mat_nativeObj, int contourIdx, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType); 10113 private static native void drawContours_4(long image_nativeObj, long contours_mat_nativeObj, int contourIdx, double color_val0, double color_val1, double color_val2, double color_val3, int thickness); 10114 private static native void drawContours_5(long image_nativeObj, long contours_mat_nativeObj, int contourIdx, double color_val0, double color_val1, double color_val2, double color_val3); 10115 10116 // C++: bool cv::clipLine(Rect imgRect, Point& pt1, Point& pt2) 10117 private static native boolean clipLine_0(int imgRect_x, int imgRect_y, int imgRect_width, int imgRect_height, double pt1_x, double pt1_y, double[] pt1_out, double pt2_x, double pt2_y, double[] pt2_out); 10118 10119 // C++: void cv::ellipse2Poly(Point center, Size axes, int angle, int arcStart, int arcEnd, int delta, vector_Point& pts) 10120 private static native void ellipse2Poly_0(double center_x, double center_y, double axes_width, double axes_height, int angle, int arcStart, int arcEnd, int delta, long pts_mat_nativeObj); 10121 10122 // C++: void cv::putText(Mat& img, String text, Point org, int fontFace, double fontScale, Scalar color, int thickness = 1, int lineType = LINE_8, bool bottomLeftOrigin = false) 10123 private static native void putText_0(long img_nativeObj, String text, double org_x, double org_y, int fontFace, double fontScale, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType, boolean bottomLeftOrigin); 10124 private static native void putText_1(long img_nativeObj, String text, double org_x, double org_y, int fontFace, double fontScale, double color_val0, double color_val1, double color_val2, double color_val3, int thickness, int lineType); 10125 private static native void putText_2(long img_nativeObj, String text, double org_x, double org_y, int fontFace, double fontScale, double color_val0, double color_val1, double color_val2, double color_val3, int thickness); 10126 private static native void putText_3(long img_nativeObj, String text, double org_x, double org_y, int fontFace, double fontScale, double color_val0, double color_val1, double color_val2, double color_val3); 10127 10128 // C++: double cv::getFontScaleFromHeight(int fontFace, int pixelHeight, int thickness = 1) 10129 private static native double getFontScaleFromHeight_0(int fontFace, int pixelHeight, int thickness); 10130 private static native double getFontScaleFromHeight_1(int fontFace, int pixelHeight); 10131 10132 // C++: void cv::HoughLinesWithAccumulator(Mat image, Mat& lines, double rho, double theta, int threshold, double srn = 0, double stn = 0, double min_theta = 0, double max_theta = CV_PI) 10133 private static native void HoughLinesWithAccumulator_0(long image_nativeObj, long lines_nativeObj, double rho, double theta, int threshold, double srn, double stn, double min_theta, double max_theta); 10134 private static native void HoughLinesWithAccumulator_1(long image_nativeObj, long lines_nativeObj, double rho, double theta, int threshold, double srn, double stn, double min_theta); 10135 private static native void HoughLinesWithAccumulator_2(long image_nativeObj, long lines_nativeObj, double rho, double theta, int threshold, double srn, double stn); 10136 private static native void HoughLinesWithAccumulator_3(long image_nativeObj, long lines_nativeObj, double rho, double theta, int threshold, double srn); 10137 private static native void HoughLinesWithAccumulator_4(long image_nativeObj, long lines_nativeObj, double rho, double theta, int threshold); 10138private static native double[] n_getTextSize(String text, int fontFace, double fontScale, int thickness, int[] baseLine); 10139 10140}