1 /*M/////////////////////////////////////////////////////////////////////////////////////// 2 // 3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. 4 // 5 // By downloading, copying, installing or using the software you agree to this license. 6 // If you do not agree to this license, do not download, install, 7 // copy or use the software. 8 // 9 // 10 // License Agreement 11 // For Open Source Computer Vision Library 12 // 13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. 14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved. 15 // Third party copyrights are property of their respective owners. 16 // 17 // Redistribution and use in source and binary forms, with or without modification, 18 // are permitted provided that the following conditions are met: 19 // 20 // * Redistribution's of source code must retain the above copyright notice, 21 // this list of conditions and the following disclaimer. 22 // 23 // * Redistribution's in binary form must reproduce the above copyright notice, 24 // this list of conditions and the following disclaimer in the documentation 25 // and/or other materials provided with the distribution. 26 // 27 // * The name of the copyright holders may not be used to endorse or promote products 28 // derived from this software without specific prior written permission. 29 // 30 // This software is provided by the copyright holders and contributors "as is" and 31 // any express or implied warranties, including, but not limited to, the implied 32 // warranties of merchantability and fitness for a particular purpose are disclaimed. 33 // In no event shall the Intel Corporation or contributors be liable for any direct, 34 // indirect, incidental, special, exemplary, or consequential damages 35 // (including, but not limited to, procurement of substitute goods or services; 36 // loss of use, data, or profits; or business interruption) however caused 37 // and on any theory of liability, whether in contract, strict liability, 38 // or tort (including negligence or otherwise) arising in any way out of 39 // the use of this software, even if advised of the possibility of such damage. 40 // 41 //M*/ 42 43 #ifndef OPENCV_FEATURES_2D_HPP 44 #define OPENCV_FEATURES_2D_HPP 45 46 #include "opencv2/opencv_modules.hpp" 47 #include "opencv2/core.hpp" 48 49 #ifdef HAVE_OPENCV_FLANN 50 #include "opencv2/flann/miniflann.hpp" 51 #endif 52 53 /** 54 @defgroup features2d 2D Features Framework 55 @{ 56 @defgroup features2d_main Feature Detection and Description 57 @defgroup features2d_match Descriptor Matchers 58 59 Matchers of keypoint descriptors in OpenCV have wrappers with a common interface that enables you to 60 easily switch between different algorithms solving the same problem. This section is devoted to 61 matching descriptors that are represented as vectors in a multidimensional space. All objects that 62 implement vector descriptor matchers inherit the DescriptorMatcher interface. 63 64 @note 65 - An example explaining keypoint matching can be found at 66 opencv_source_code/samples/cpp/descriptor_extractor_matcher.cpp 67 - An example on descriptor matching evaluation can be found at 68 opencv_source_code/samples/cpp/detector_descriptor_matcher_evaluation.cpp 69 - An example on one to many image matching can be found at 70 opencv_source_code/samples/cpp/matching_to_many_images.cpp 71 72 @defgroup features2d_draw Drawing Function of Keypoints and Matches 73 @defgroup features2d_category Object Categorization 74 75 This section describes approaches based on local 2D features and used to categorize objects. 76 77 @note 78 - A complete Bag-Of-Words sample can be found at 79 opencv_source_code/samples/cpp/bagofwords_classification.cpp 80 - (Python) An example using the features2D framework to perform object categorization can be 81 found at opencv_source_code/samples/python/find_obj.py 82 83 @} 84 */ 85 86 namespace cv 87 { 88 89 //! @addtogroup features2d 90 //! @{ 91 92 // //! writes vector of keypoints to the file storage 93 // CV_EXPORTS void write(FileStorage& fs, const String& name, const std::vector<KeyPoint>& keypoints); 94 // //! reads vector of keypoints from the specified file storage node 95 // CV_EXPORTS void read(const FileNode& node, CV_OUT std::vector<KeyPoint>& keypoints); 96 97 /** @brief A class filters a vector of keypoints. 98 99 Because now it is difficult to provide a convenient interface for all usage scenarios of the 100 keypoints filter class, it has only several needed by now static methods. 101 */ 102 class CV_EXPORTS KeyPointsFilter 103 { 104 public: KeyPointsFilter()105 KeyPointsFilter(){} 106 107 /* 108 * Remove keypoints within borderPixels of an image edge. 109 */ 110 static void runByImageBorder( std::vector<KeyPoint>& keypoints, Size imageSize, int borderSize ); 111 /* 112 * Remove keypoints of sizes out of range. 113 */ 114 static void runByKeypointSize( std::vector<KeyPoint>& keypoints, float minSize, 115 float maxSize=FLT_MAX ); 116 /* 117 * Remove keypoints from some image by mask for pixels of this image. 118 */ 119 static void runByPixelsMask( std::vector<KeyPoint>& keypoints, const Mat& mask ); 120 /* 121 * Remove duplicated keypoints. 122 */ 123 static void removeDuplicated( std::vector<KeyPoint>& keypoints ); 124 /* 125 * Remove duplicated keypoints and sort the remaining keypoints 126 */ 127 static void removeDuplicatedSorted( std::vector<KeyPoint>& keypoints ); 128 129 /* 130 * Retain the specified number of the best keypoints (according to the response) 131 */ 132 static void retainBest( std::vector<KeyPoint>& keypoints, int npoints ); 133 }; 134 135 136 /************************************ Base Classes ************************************/ 137 138 /** @brief Abstract base class for 2D image feature detectors and descriptor extractors 139 */ 140 #ifdef __EMSCRIPTEN__ 141 class CV_EXPORTS_W Feature2D : public Algorithm 142 #else 143 class CV_EXPORTS_W Feature2D : public virtual Algorithm 144 #endif 145 { 146 public: 147 virtual ~Feature2D(); 148 149 /** @brief Detects keypoints in an image (first variant) or image set (second variant). 150 151 @param image Image. 152 @param keypoints The detected keypoints. In the second variant of the method keypoints[i] is a set 153 of keypoints detected in images[i] . 154 @param mask Mask specifying where to look for keypoints (optional). It must be a 8-bit integer 155 matrix with non-zero values in the region of interest. 156 */ 157 CV_WRAP virtual void detect( InputArray image, 158 CV_OUT std::vector<KeyPoint>& keypoints, 159 InputArray mask=noArray() ); 160 161 /** @overload 162 @param images Image set. 163 @param keypoints The detected keypoints. In the second variant of the method keypoints[i] is a set 164 of keypoints detected in images[i] . 165 @param masks Masks for each input image specifying where to look for keypoints (optional). 166 masks[i] is a mask for images[i]. 167 */ 168 CV_WRAP virtual void detect( InputArrayOfArrays images, 169 CV_OUT std::vector<std::vector<KeyPoint> >& keypoints, 170 InputArrayOfArrays masks=noArray() ); 171 172 /** @brief Computes the descriptors for a set of keypoints detected in an image (first variant) or image set 173 (second variant). 174 175 @param image Image. 176 @param keypoints Input collection of keypoints. Keypoints for which a descriptor cannot be 177 computed are removed. Sometimes new keypoints can be added, for example: SIFT duplicates keypoint 178 with several dominant orientations (for each orientation). 179 @param descriptors Computed descriptors. In the second variant of the method descriptors[i] are 180 descriptors computed for a keypoints[i]. Row j is the keypoints (or keypoints[i]) is the 181 descriptor for keypoint j-th keypoint. 182 */ 183 CV_WRAP virtual void compute( InputArray image, 184 CV_OUT CV_IN_OUT std::vector<KeyPoint>& keypoints, 185 OutputArray descriptors ); 186 187 /** @overload 188 189 @param images Image set. 190 @param keypoints Input collection of keypoints. Keypoints for which a descriptor cannot be 191 computed are removed. Sometimes new keypoints can be added, for example: SIFT duplicates keypoint 192 with several dominant orientations (for each orientation). 193 @param descriptors Computed descriptors. In the second variant of the method descriptors[i] are 194 descriptors computed for a keypoints[i]. Row j is the keypoints (or keypoints[i]) is the 195 descriptor for keypoint j-th keypoint. 196 */ 197 CV_WRAP virtual void compute( InputArrayOfArrays images, 198 CV_OUT CV_IN_OUT std::vector<std::vector<KeyPoint> >& keypoints, 199 OutputArrayOfArrays descriptors ); 200 201 /** Detects keypoints and computes the descriptors */ 202 CV_WRAP virtual void detectAndCompute( InputArray image, InputArray mask, 203 CV_OUT std::vector<KeyPoint>& keypoints, 204 OutputArray descriptors, 205 bool useProvidedKeypoints=false ); 206 207 CV_WRAP virtual int descriptorSize() const; 208 CV_WRAP virtual int descriptorType() const; 209 CV_WRAP virtual int defaultNorm() const; 210 211 CV_WRAP void write( const String& fileName ) const; 212 213 CV_WRAP void read( const String& fileName ); 214 215 virtual void write( FileStorage&) const CV_OVERRIDE; 216 217 // see corresponding cv::Algorithm method 218 CV_WRAP virtual void read( const FileNode&) CV_OVERRIDE; 219 220 //! Return true if detector object is empty 221 CV_WRAP virtual bool empty() const CV_OVERRIDE; 222 CV_WRAP virtual String getDefaultName() const CV_OVERRIDE; 223 224 // see corresponding cv::Algorithm method write(const Ptr<FileStorage> & fs,const String & name=String ()) const225 CV_WRAP inline void write(const Ptr<FileStorage>& fs, const String& name = String()) const { Algorithm::write(fs, name); } 226 }; 227 228 /** Feature detectors in OpenCV have wrappers with a common interface that enables you to easily switch 229 between different algorithms solving the same problem. All objects that implement keypoint detectors 230 inherit the FeatureDetector interface. */ 231 typedef Feature2D FeatureDetector; 232 233 /** Extractors of keypoint descriptors in OpenCV have wrappers with a common interface that enables you 234 to easily switch between different algorithms solving the same problem. This section is devoted to 235 computing descriptors represented as vectors in a multidimensional space. All objects that implement 236 the vector descriptor extractors inherit the DescriptorExtractor interface. 237 */ 238 typedef Feature2D DescriptorExtractor; 239 240 //! @addtogroup features2d_main 241 //! @{ 242 243 /** @brief Class implementing the BRISK keypoint detector and descriptor extractor, described in @cite LCS11 . 244 */ 245 class CV_EXPORTS_W BRISK : public Feature2D 246 { 247 public: 248 /** @brief The BRISK constructor 249 250 @param thresh AGAST detection threshold score. 251 @param octaves detection octaves. Use 0 to do single scale. 252 @param patternScale apply this scale to the pattern used for sampling the neighbourhood of a 253 keypoint. 254 */ 255 CV_WRAP static Ptr<BRISK> create(int thresh=30, int octaves=3, float patternScale=1.0f); 256 257 /** @brief The BRISK constructor for a custom pattern 258 259 @param radiusList defines the radii (in pixels) where the samples around a keypoint are taken (for 260 keypoint scale 1). 261 @param numberList defines the number of sampling points on the sampling circle. Must be the same 262 size as radiusList.. 263 @param dMax threshold for the short pairings used for descriptor formation (in pixels for keypoint 264 scale 1). 265 @param dMin threshold for the long pairings used for orientation determination (in pixels for 266 keypoint scale 1). 267 @param indexChange index remapping of the bits. */ 268 CV_WRAP static Ptr<BRISK> create(const std::vector<float> &radiusList, const std::vector<int> &numberList, 269 float dMax=5.85f, float dMin=8.2f, const std::vector<int>& indexChange=std::vector<int>()); 270 271 /** @brief The BRISK constructor for a custom pattern, detection threshold and octaves 272 273 @param thresh AGAST detection threshold score. 274 @param octaves detection octaves. Use 0 to do single scale. 275 @param radiusList defines the radii (in pixels) where the samples around a keypoint are taken (for 276 keypoint scale 1). 277 @param numberList defines the number of sampling points on the sampling circle. Must be the same 278 size as radiusList.. 279 @param dMax threshold for the short pairings used for descriptor formation (in pixels for keypoint 280 scale 1). 281 @param dMin threshold for the long pairings used for orientation determination (in pixels for 282 keypoint scale 1). 283 @param indexChange index remapping of the bits. */ 284 CV_WRAP static Ptr<BRISK> create(int thresh, int octaves, const std::vector<float> &radiusList, 285 const std::vector<int> &numberList, float dMax=5.85f, float dMin=8.2f, 286 const std::vector<int>& indexChange=std::vector<int>()); 287 CV_WRAP virtual String getDefaultName() const CV_OVERRIDE; 288 }; 289 290 /** @brief Class implementing the ORB (*oriented BRIEF*) keypoint detector and descriptor extractor 291 292 described in @cite RRKB11 . The algorithm uses FAST in pyramids to detect stable keypoints, selects 293 the strongest features using FAST or Harris response, finds their orientation using first-order 294 moments and computes the descriptors using BRIEF (where the coordinates of random point pairs (or 295 k-tuples) are rotated according to the measured orientation). 296 */ 297 class CV_EXPORTS_W ORB : public Feature2D 298 { 299 public: 300 enum { kBytes = 32, HARRIS_SCORE=0, FAST_SCORE=1 }; 301 302 /** @brief The ORB constructor 303 304 @param nfeatures The maximum number of features to retain. 305 @param scaleFactor Pyramid decimation ratio, greater than 1. scaleFactor==2 means the classical 306 pyramid, where each next level has 4x less pixels than the previous, but such a big scale factor 307 will degrade feature matching scores dramatically. On the other hand, too close to 1 scale factor 308 will mean that to cover certain scale range you will need more pyramid levels and so the speed 309 will suffer. 310 @param nlevels The number of pyramid levels. The smallest level will have linear size equal to 311 input_image_linear_size/pow(scaleFactor, nlevels - firstLevel). 312 @param edgeThreshold This is size of the border where the features are not detected. It should 313 roughly match the patchSize parameter. 314 @param firstLevel The level of pyramid to put source image to. Previous layers are filled 315 with upscaled source image. 316 @param WTA_K The number of points that produce each element of the oriented BRIEF descriptor. The 317 default value 2 means the BRIEF where we take a random point pair and compare their brightnesses, 318 so we get 0/1 response. Other possible values are 3 and 4. For example, 3 means that we take 3 319 random points (of course, those point coordinates are random, but they are generated from the 320 pre-defined seed, so each element of BRIEF descriptor is computed deterministically from the pixel 321 rectangle), find point of maximum brightness and output index of the winner (0, 1 or 2). Such 322 output will occupy 2 bits, and therefore it will need a special variant of Hamming distance, 323 denoted as NORM_HAMMING2 (2 bits per bin). When WTA_K=4, we take 4 random points to compute each 324 bin (that will also occupy 2 bits with possible values 0, 1, 2 or 3). 325 @param scoreType The default HARRIS_SCORE means that Harris algorithm is used to rank features 326 (the score is written to KeyPoint::score and is used to retain best nfeatures features); 327 FAST_SCORE is alternative value of the parameter that produces slightly less stable keypoints, 328 but it is a little faster to compute. 329 @param patchSize size of the patch used by the oriented BRIEF descriptor. Of course, on smaller 330 pyramid layers the perceived image area covered by a feature will be larger. 331 @param fastThreshold 332 */ 333 CV_WRAP static Ptr<ORB> create(int nfeatures=500, float scaleFactor=1.2f, int nlevels=8, int edgeThreshold=31, 334 int firstLevel=0, int WTA_K=2, int scoreType=ORB::HARRIS_SCORE, int patchSize=31, int fastThreshold=20); 335 336 CV_WRAP virtual void setMaxFeatures(int maxFeatures) = 0; 337 CV_WRAP virtual int getMaxFeatures() const = 0; 338 339 CV_WRAP virtual void setScaleFactor(double scaleFactor) = 0; 340 CV_WRAP virtual double getScaleFactor() const = 0; 341 342 CV_WRAP virtual void setNLevels(int nlevels) = 0; 343 CV_WRAP virtual int getNLevels() const = 0; 344 345 CV_WRAP virtual void setEdgeThreshold(int edgeThreshold) = 0; 346 CV_WRAP virtual int getEdgeThreshold() const = 0; 347 348 CV_WRAP virtual void setFirstLevel(int firstLevel) = 0; 349 CV_WRAP virtual int getFirstLevel() const = 0; 350 351 CV_WRAP virtual void setWTA_K(int wta_k) = 0; 352 CV_WRAP virtual int getWTA_K() const = 0; 353 354 CV_WRAP virtual void setScoreType(int scoreType) = 0; 355 CV_WRAP virtual int getScoreType() const = 0; 356 357 CV_WRAP virtual void setPatchSize(int patchSize) = 0; 358 CV_WRAP virtual int getPatchSize() const = 0; 359 360 CV_WRAP virtual void setFastThreshold(int fastThreshold) = 0; 361 CV_WRAP virtual int getFastThreshold() const = 0; 362 CV_WRAP virtual String getDefaultName() const CV_OVERRIDE; 363 }; 364 365 /** @brief Maximally stable extremal region extractor 366 367 The class encapsulates all the parameters of the %MSER extraction algorithm (see [wiki 368 article](http://en.wikipedia.org/wiki/Maximally_stable_extremal_regions)). 369 370 - there are two different implementation of %MSER: one for grey image, one for color image 371 372 - the grey image algorithm is taken from: @cite nister2008linear ; the paper claims to be faster 373 than union-find method; it actually get 1.5~2m/s on my centrino L7200 1.2GHz laptop. 374 375 - the color image algorithm is taken from: @cite forssen2007maximally ; it should be much slower 376 than grey image method ( 3~4 times ); the chi_table.h file is taken directly from paper's source 377 code which is distributed under GPL. 378 379 - (Python) A complete example showing the use of the %MSER detector can be found at samples/python/mser.py 380 */ 381 class CV_EXPORTS_W MSER : public Feature2D 382 { 383 public: 384 /** @brief Full consturctor for %MSER detector 385 386 @param _delta it compares \f$(size_{i}-size_{i-delta})/size_{i-delta}\f$ 387 @param _min_area prune the area which smaller than minArea 388 @param _max_area prune the area which bigger than maxArea 389 @param _max_variation prune the area have similar size to its children 390 @param _min_diversity for color image, trace back to cut off mser with diversity less than min_diversity 391 @param _max_evolution for color image, the evolution steps 392 @param _area_threshold for color image, the area threshold to cause re-initialize 393 @param _min_margin for color image, ignore too small margin 394 @param _edge_blur_size for color image, the aperture size for edge blur 395 */ 396 CV_WRAP static Ptr<MSER> create( int _delta=5, int _min_area=60, int _max_area=14400, 397 double _max_variation=0.25, double _min_diversity=.2, 398 int _max_evolution=200, double _area_threshold=1.01, 399 double _min_margin=0.003, int _edge_blur_size=5 ); 400 401 /** @brief Detect %MSER regions 402 403 @param image input image (8UC1, 8UC3 or 8UC4, must be greater or equal than 3x3) 404 @param msers resulting list of point sets 405 @param bboxes resulting bounding boxes 406 */ 407 CV_WRAP virtual void detectRegions( InputArray image, 408 CV_OUT std::vector<std::vector<Point> >& msers, 409 CV_OUT std::vector<Rect>& bboxes ) = 0; 410 411 CV_WRAP virtual void setDelta(int delta) = 0; 412 CV_WRAP virtual int getDelta() const = 0; 413 414 CV_WRAP virtual void setMinArea(int minArea) = 0; 415 CV_WRAP virtual int getMinArea() const = 0; 416 417 CV_WRAP virtual void setMaxArea(int maxArea) = 0; 418 CV_WRAP virtual int getMaxArea() const = 0; 419 420 CV_WRAP virtual void setPass2Only(bool f) = 0; 421 CV_WRAP virtual bool getPass2Only() const = 0; 422 CV_WRAP virtual String getDefaultName() const CV_OVERRIDE; 423 }; 424 425 /** @overload */ 426 CV_EXPORTS void FAST( InputArray image, CV_OUT std::vector<KeyPoint>& keypoints, 427 int threshold, bool nonmaxSuppression=true ); 428 429 /** @brief Detects corners using the FAST algorithm 430 431 @param image grayscale image where keypoints (corners) are detected. 432 @param keypoints keypoints detected on the image. 433 @param threshold threshold on difference between intensity of the central pixel and pixels of a 434 circle around this pixel. 435 @param nonmaxSuppression if true, non-maximum suppression is applied to detected corners 436 (keypoints). 437 @param type one of the three neighborhoods as defined in the paper: 438 FastFeatureDetector::TYPE_9_16, FastFeatureDetector::TYPE_7_12, 439 FastFeatureDetector::TYPE_5_8 440 441 Detects corners using the FAST algorithm by @cite Rosten06 . 442 443 @note In Python API, types are given as cv2.FAST_FEATURE_DETECTOR_TYPE_5_8, 444 cv2.FAST_FEATURE_DETECTOR_TYPE_7_12 and cv2.FAST_FEATURE_DETECTOR_TYPE_9_16. For corner 445 detection, use cv2.FAST.detect() method. 446 */ 447 CV_EXPORTS void FAST( InputArray image, CV_OUT std::vector<KeyPoint>& keypoints, 448 int threshold, bool nonmaxSuppression, int type ); 449 450 //! @} features2d_main 451 452 //! @addtogroup features2d_main 453 //! @{ 454 455 /** @brief Wrapping class for feature detection using the FAST method. : 456 */ 457 class CV_EXPORTS_W FastFeatureDetector : public Feature2D 458 { 459 public: 460 enum 461 { 462 TYPE_5_8 = 0, TYPE_7_12 = 1, TYPE_9_16 = 2, 463 THRESHOLD = 10000, NONMAX_SUPPRESSION=10001, FAST_N=10002, 464 }; 465 466 CV_WRAP static Ptr<FastFeatureDetector> create( int threshold=10, 467 bool nonmaxSuppression=true, 468 int type=FastFeatureDetector::TYPE_9_16 ); 469 470 CV_WRAP virtual void setThreshold(int threshold) = 0; 471 CV_WRAP virtual int getThreshold() const = 0; 472 473 CV_WRAP virtual void setNonmaxSuppression(bool f) = 0; 474 CV_WRAP virtual bool getNonmaxSuppression() const = 0; 475 476 CV_WRAP virtual void setType(int type) = 0; 477 CV_WRAP virtual int getType() const = 0; 478 CV_WRAP virtual String getDefaultName() const CV_OVERRIDE; 479 }; 480 481 /** @overload */ 482 CV_EXPORTS void AGAST( InputArray image, CV_OUT std::vector<KeyPoint>& keypoints, 483 int threshold, bool nonmaxSuppression=true ); 484 485 /** @brief Detects corners using the AGAST algorithm 486 487 @param image grayscale image where keypoints (corners) are detected. 488 @param keypoints keypoints detected on the image. 489 @param threshold threshold on difference between intensity of the central pixel and pixels of a 490 circle around this pixel. 491 @param nonmaxSuppression if true, non-maximum suppression is applied to detected corners 492 (keypoints). 493 @param type one of the four neighborhoods as defined in the paper: 494 AgastFeatureDetector::AGAST_5_8, AgastFeatureDetector::AGAST_7_12d, 495 AgastFeatureDetector::AGAST_7_12s, AgastFeatureDetector::OAST_9_16 496 497 For non-Intel platforms, there is a tree optimised variant of AGAST with same numerical results. 498 The 32-bit binary tree tables were generated automatically from original code using perl script. 499 The perl script and examples of tree generation are placed in features2d/doc folder. 500 Detects corners using the AGAST algorithm by @cite mair2010_agast . 501 502 */ 503 CV_EXPORTS void AGAST( InputArray image, CV_OUT std::vector<KeyPoint>& keypoints, 504 int threshold, bool nonmaxSuppression, int type ); 505 //! @} features2d_main 506 507 //! @addtogroup features2d_main 508 //! @{ 509 510 /** @brief Wrapping class for feature detection using the AGAST method. : 511 */ 512 class CV_EXPORTS_W AgastFeatureDetector : public Feature2D 513 { 514 public: 515 enum 516 { 517 AGAST_5_8 = 0, AGAST_7_12d = 1, AGAST_7_12s = 2, OAST_9_16 = 3, 518 THRESHOLD = 10000, NONMAX_SUPPRESSION = 10001, 519 }; 520 521 CV_WRAP static Ptr<AgastFeatureDetector> create( int threshold=10, 522 bool nonmaxSuppression=true, 523 int type=AgastFeatureDetector::OAST_9_16 ); 524 525 CV_WRAP virtual void setThreshold(int threshold) = 0; 526 CV_WRAP virtual int getThreshold() const = 0; 527 528 CV_WRAP virtual void setNonmaxSuppression(bool f) = 0; 529 CV_WRAP virtual bool getNonmaxSuppression() const = 0; 530 531 CV_WRAP virtual void setType(int type) = 0; 532 CV_WRAP virtual int getType() const = 0; 533 CV_WRAP virtual String getDefaultName() const CV_OVERRIDE; 534 }; 535 536 /** @brief Wrapping class for feature detection using the goodFeaturesToTrack function. : 537 */ 538 class CV_EXPORTS_W GFTTDetector : public Feature2D 539 { 540 public: 541 CV_WRAP static Ptr<GFTTDetector> create( int maxCorners=1000, double qualityLevel=0.01, double minDistance=1, 542 int blockSize=3, bool useHarrisDetector=false, double k=0.04 ); 543 CV_WRAP static Ptr<GFTTDetector> create( int maxCorners, double qualityLevel, double minDistance, 544 int blockSize, int gradiantSize, bool useHarrisDetector=false, double k=0.04 ); 545 CV_WRAP virtual void setMaxFeatures(int maxFeatures) = 0; 546 CV_WRAP virtual int getMaxFeatures() const = 0; 547 548 CV_WRAP virtual void setQualityLevel(double qlevel) = 0; 549 CV_WRAP virtual double getQualityLevel() const = 0; 550 551 CV_WRAP virtual void setMinDistance(double minDistance) = 0; 552 CV_WRAP virtual double getMinDistance() const = 0; 553 554 CV_WRAP virtual void setBlockSize(int blockSize) = 0; 555 CV_WRAP virtual int getBlockSize() const = 0; 556 557 CV_WRAP virtual void setHarrisDetector(bool val) = 0; 558 CV_WRAP virtual bool getHarrisDetector() const = 0; 559 560 CV_WRAP virtual void setK(double k) = 0; 561 CV_WRAP virtual double getK() const = 0; 562 CV_WRAP virtual String getDefaultName() const CV_OVERRIDE; 563 }; 564 565 /** @brief Class for extracting blobs from an image. : 566 567 The class implements a simple algorithm for extracting blobs from an image: 568 569 1. Convert the source image to binary images by applying thresholding with several thresholds from 570 minThreshold (inclusive) to maxThreshold (exclusive) with distance thresholdStep between 571 neighboring thresholds. 572 2. Extract connected components from every binary image by findContours and calculate their 573 centers. 574 3. Group centers from several binary images by their coordinates. Close centers form one group that 575 corresponds to one blob, which is controlled by the minDistBetweenBlobs parameter. 576 4. From the groups, estimate final centers of blobs and their radiuses and return as locations and 577 sizes of keypoints. 578 579 This class performs several filtrations of returned blobs. You should set filterBy\* to true/false 580 to turn on/off corresponding filtration. Available filtrations: 581 582 - **By color**. This filter compares the intensity of a binary image at the center of a blob to 583 blobColor. If they differ, the blob is filtered out. Use blobColor = 0 to extract dark blobs 584 and blobColor = 255 to extract light blobs. 585 - **By area**. Extracted blobs have an area between minArea (inclusive) and maxArea (exclusive). 586 - **By circularity**. Extracted blobs have circularity 587 (\f$\frac{4*\pi*Area}{perimeter * perimeter}\f$) between minCircularity (inclusive) and 588 maxCircularity (exclusive). 589 - **By ratio of the minimum inertia to maximum inertia**. Extracted blobs have this ratio 590 between minInertiaRatio (inclusive) and maxInertiaRatio (exclusive). 591 - **By convexity**. Extracted blobs have convexity (area / area of blob convex hull) between 592 minConvexity (inclusive) and maxConvexity (exclusive). 593 594 Default values of parameters are tuned to extract dark circular blobs. 595 */ 596 class CV_EXPORTS_W SimpleBlobDetector : public Feature2D 597 { 598 public: 599 struct CV_EXPORTS_W_SIMPLE Params 600 { 601 CV_WRAP Params(); 602 CV_PROP_RW float thresholdStep; 603 CV_PROP_RW float minThreshold; 604 CV_PROP_RW float maxThreshold; 605 CV_PROP_RW size_t minRepeatability; 606 CV_PROP_RW float minDistBetweenBlobs; 607 608 CV_PROP_RW bool filterByColor; 609 CV_PROP_RW uchar blobColor; 610 611 CV_PROP_RW bool filterByArea; 612 CV_PROP_RW float minArea, maxArea; 613 614 CV_PROP_RW bool filterByCircularity; 615 CV_PROP_RW float minCircularity, maxCircularity; 616 617 CV_PROP_RW bool filterByInertia; 618 CV_PROP_RW float minInertiaRatio, maxInertiaRatio; 619 620 CV_PROP_RW bool filterByConvexity; 621 CV_PROP_RW float minConvexity, maxConvexity; 622 623 void read( const FileNode& fn ); 624 void write( FileStorage& fs ) const; 625 }; 626 627 CV_WRAP static Ptr<SimpleBlobDetector> 628 create(const SimpleBlobDetector::Params ¶meters = SimpleBlobDetector::Params()); 629 CV_WRAP virtual String getDefaultName() const CV_OVERRIDE; 630 }; 631 632 //! @} features2d_main 633 634 //! @addtogroup features2d_main 635 //! @{ 636 637 /** @brief Class implementing the KAZE keypoint detector and descriptor extractor, described in @cite ABD12 . 638 639 @note AKAZE descriptor can only be used with KAZE or AKAZE keypoints .. [ABD12] KAZE Features. Pablo 640 F. Alcantarilla, Adrien Bartoli and Andrew J. Davison. In European Conference on Computer Vision 641 (ECCV), Fiorenze, Italy, October 2012. 642 */ 643 class CV_EXPORTS_W KAZE : public Feature2D 644 { 645 public: 646 enum 647 { 648 DIFF_PM_G1 = 0, 649 DIFF_PM_G2 = 1, 650 DIFF_WEICKERT = 2, 651 DIFF_CHARBONNIER = 3 652 }; 653 654 /** @brief The KAZE constructor 655 656 @param extended Set to enable extraction of extended (128-byte) descriptor. 657 @param upright Set to enable use of upright descriptors (non rotation-invariant). 658 @param threshold Detector response threshold to accept point 659 @param nOctaves Maximum octave evolution of the image 660 @param nOctaveLayers Default number of sublevels per scale level 661 @param diffusivity Diffusivity type. DIFF_PM_G1, DIFF_PM_G2, DIFF_WEICKERT or 662 DIFF_CHARBONNIER 663 */ 664 CV_WRAP static Ptr<KAZE> create(bool extended=false, bool upright=false, 665 float threshold = 0.001f, 666 int nOctaves = 4, int nOctaveLayers = 4, 667 int diffusivity = KAZE::DIFF_PM_G2); 668 669 CV_WRAP virtual void setExtended(bool extended) = 0; 670 CV_WRAP virtual bool getExtended() const = 0; 671 672 CV_WRAP virtual void setUpright(bool upright) = 0; 673 CV_WRAP virtual bool getUpright() const = 0; 674 675 CV_WRAP virtual void setThreshold(double threshold) = 0; 676 CV_WRAP virtual double getThreshold() const = 0; 677 678 CV_WRAP virtual void setNOctaves(int octaves) = 0; 679 CV_WRAP virtual int getNOctaves() const = 0; 680 681 CV_WRAP virtual void setNOctaveLayers(int octaveLayers) = 0; 682 CV_WRAP virtual int getNOctaveLayers() const = 0; 683 684 CV_WRAP virtual void setDiffusivity(int diff) = 0; 685 CV_WRAP virtual int getDiffusivity() const = 0; 686 CV_WRAP virtual String getDefaultName() const CV_OVERRIDE; 687 }; 688 689 /** @brief Class implementing the AKAZE keypoint detector and descriptor extractor, described in @cite ANB13. 690 691 @details AKAZE descriptors can only be used with KAZE or AKAZE keypoints. This class is thread-safe. 692 693 @note When you need descriptors use Feature2D::detectAndCompute, which 694 provides better performance. When using Feature2D::detect followed by 695 Feature2D::compute scale space pyramid is computed twice. 696 697 @note AKAZE implements T-API. When image is passed as UMat some parts of the algorithm 698 will use OpenCL. 699 700 @note [ANB13] Fast Explicit Diffusion for Accelerated Features in Nonlinear 701 Scale Spaces. Pablo F. Alcantarilla, Jesús Nuevo and Adrien Bartoli. In 702 British Machine Vision Conference (BMVC), Bristol, UK, September 2013. 703 704 */ 705 class CV_EXPORTS_W AKAZE : public Feature2D 706 { 707 public: 708 // AKAZE descriptor type 709 enum 710 { 711 DESCRIPTOR_KAZE_UPRIGHT = 2, ///< Upright descriptors, not invariant to rotation 712 DESCRIPTOR_KAZE = 3, 713 DESCRIPTOR_MLDB_UPRIGHT = 4, ///< Upright descriptors, not invariant to rotation 714 DESCRIPTOR_MLDB = 5 715 }; 716 717 /** @brief The AKAZE constructor 718 719 @param descriptor_type Type of the extracted descriptor: DESCRIPTOR_KAZE, 720 DESCRIPTOR_KAZE_UPRIGHT, DESCRIPTOR_MLDB or DESCRIPTOR_MLDB_UPRIGHT. 721 @param descriptor_size Size of the descriptor in bits. 0 -\> Full size 722 @param descriptor_channels Number of channels in the descriptor (1, 2, 3) 723 @param threshold Detector response threshold to accept point 724 @param nOctaves Maximum octave evolution of the image 725 @param nOctaveLayers Default number of sublevels per scale level 726 @param diffusivity Diffusivity type. DIFF_PM_G1, DIFF_PM_G2, DIFF_WEICKERT or 727 DIFF_CHARBONNIER 728 */ 729 CV_WRAP static Ptr<AKAZE> create(int descriptor_type=AKAZE::DESCRIPTOR_MLDB, 730 int descriptor_size = 0, int descriptor_channels = 3, 731 float threshold = 0.001f, int nOctaves = 4, 732 int nOctaveLayers = 4, int diffusivity = KAZE::DIFF_PM_G2); 733 734 CV_WRAP virtual void setDescriptorType(int dtype) = 0; 735 CV_WRAP virtual int getDescriptorType() const = 0; 736 737 CV_WRAP virtual void setDescriptorSize(int dsize) = 0; 738 CV_WRAP virtual int getDescriptorSize() const = 0; 739 740 CV_WRAP virtual void setDescriptorChannels(int dch) = 0; 741 CV_WRAP virtual int getDescriptorChannels() const = 0; 742 743 CV_WRAP virtual void setThreshold(double threshold) = 0; 744 CV_WRAP virtual double getThreshold() const = 0; 745 746 CV_WRAP virtual void setNOctaves(int octaves) = 0; 747 CV_WRAP virtual int getNOctaves() const = 0; 748 749 CV_WRAP virtual void setNOctaveLayers(int octaveLayers) = 0; 750 CV_WRAP virtual int getNOctaveLayers() const = 0; 751 752 CV_WRAP virtual void setDiffusivity(int diff) = 0; 753 CV_WRAP virtual int getDiffusivity() const = 0; 754 CV_WRAP virtual String getDefaultName() const CV_OVERRIDE; 755 }; 756 757 //! @} features2d_main 758 759 /****************************************************************************************\ 760 * Distance * 761 \****************************************************************************************/ 762 763 template<typename T> 764 struct CV_EXPORTS Accumulator 765 { 766 typedef T Type; 767 }; 768 769 template<> struct Accumulator<unsigned char> { typedef float Type; }; 770 template<> struct Accumulator<unsigned short> { typedef float Type; }; 771 template<> struct Accumulator<char> { typedef float Type; }; 772 template<> struct Accumulator<short> { typedef float Type; }; 773 774 /* 775 * Squared Euclidean distance functor 776 */ 777 template<class T> 778 struct CV_EXPORTS SL2 779 { 780 enum { normType = NORM_L2SQR }; 781 typedef T ValueType; 782 typedef typename Accumulator<T>::Type ResultType; 783 operator ()cv::SL2784 ResultType operator()( const T* a, const T* b, int size ) const 785 { 786 return normL2Sqr<ValueType, ResultType>(a, b, size); 787 } 788 }; 789 790 /* 791 * Euclidean distance functor 792 */ 793 template<class T> 794 struct L2 795 { 796 enum { normType = NORM_L2 }; 797 typedef T ValueType; 798 typedef typename Accumulator<T>::Type ResultType; 799 operator ()cv::L2800 ResultType operator()( const T* a, const T* b, int size ) const 801 { 802 return (ResultType)std::sqrt((double)normL2Sqr<ValueType, ResultType>(a, b, size)); 803 } 804 }; 805 806 /* 807 * Manhattan distance (city block distance) functor 808 */ 809 template<class T> 810 struct L1 811 { 812 enum { normType = NORM_L1 }; 813 typedef T ValueType; 814 typedef typename Accumulator<T>::Type ResultType; 815 operator ()cv::L1816 ResultType operator()( const T* a, const T* b, int size ) const 817 { 818 return normL1<ValueType, ResultType>(a, b, size); 819 } 820 }; 821 822 /****************************************************************************************\ 823 * DescriptorMatcher * 824 \****************************************************************************************/ 825 826 //! @addtogroup features2d_match 827 //! @{ 828 829 /** @brief Abstract base class for matching keypoint descriptors. 830 831 It has two groups of match methods: for matching descriptors of an image with another image or with 832 an image set. 833 */ 834 class CV_EXPORTS_W DescriptorMatcher : public Algorithm 835 { 836 public: 837 enum 838 { 839 FLANNBASED = 1, 840 BRUTEFORCE = 2, 841 BRUTEFORCE_L1 = 3, 842 BRUTEFORCE_HAMMING = 4, 843 BRUTEFORCE_HAMMINGLUT = 5, 844 BRUTEFORCE_SL2 = 6 845 }; 846 virtual ~DescriptorMatcher(); 847 848 /** @brief Adds descriptors to train a CPU(trainDescCollectionis) or GPU(utrainDescCollectionis) descriptor 849 collection. 850 851 If the collection is not empty, the new descriptors are added to existing train descriptors. 852 853 @param descriptors Descriptors to add. Each descriptors[i] is a set of descriptors from the same 854 train image. 855 */ 856 CV_WRAP virtual void add( InputArrayOfArrays descriptors ); 857 858 /** @brief Returns a constant link to the train descriptor collection trainDescCollection . 859 */ 860 CV_WRAP const std::vector<Mat>& getTrainDescriptors() const; 861 862 /** @brief Clears the train descriptor collections. 863 */ 864 CV_WRAP virtual void clear() CV_OVERRIDE; 865 866 /** @brief Returns true if there are no train descriptors in the both collections. 867 */ 868 CV_WRAP virtual bool empty() const CV_OVERRIDE; 869 870 /** @brief Returns true if the descriptor matcher supports masking permissible matches. 871 */ 872 CV_WRAP virtual bool isMaskSupported() const = 0; 873 874 /** @brief Trains a descriptor matcher 875 876 Trains a descriptor matcher (for example, the flann index). In all methods to match, the method 877 train() is run every time before matching. Some descriptor matchers (for example, BruteForceMatcher) 878 have an empty implementation of this method. Other matchers really train their inner structures (for 879 example, FlannBasedMatcher trains flann::Index ). 880 */ 881 CV_WRAP virtual void train(); 882 883 /** @brief Finds the best match for each descriptor from a query set. 884 885 @param queryDescriptors Query set of descriptors. 886 @param trainDescriptors Train set of descriptors. This set is not added to the train descriptors 887 collection stored in the class object. 888 @param matches Matches. If a query descriptor is masked out in mask , no match is added for this 889 descriptor. So, matches size may be smaller than the query descriptors count. 890 @param mask Mask specifying permissible matches between an input query and train matrices of 891 descriptors. 892 893 In the first variant of this method, the train descriptors are passed as an input argument. In the 894 second variant of the method, train descriptors collection that was set by DescriptorMatcher::add is 895 used. Optional mask (or masks) can be passed to specify which query and training descriptors can be 896 matched. Namely, queryDescriptors[i] can be matched with trainDescriptors[j] only if 897 mask.at\<uchar\>(i,j) is non-zero. 898 */ 899 CV_WRAP void match( InputArray queryDescriptors, InputArray trainDescriptors, 900 CV_OUT std::vector<DMatch>& matches, InputArray mask=noArray() ) const; 901 902 /** @brief Finds the k best matches for each descriptor from a query set. 903 904 @param queryDescriptors Query set of descriptors. 905 @param trainDescriptors Train set of descriptors. This set is not added to the train descriptors 906 collection stored in the class object. 907 @param mask Mask specifying permissible matches between an input query and train matrices of 908 descriptors. 909 @param matches Matches. Each matches[i] is k or less matches for the same query descriptor. 910 @param k Count of best matches found per each query descriptor or less if a query descriptor has 911 less than k possible matches in total. 912 @param compactResult Parameter used when the mask (or masks) is not empty. If compactResult is 913 false, the matches vector has the same size as queryDescriptors rows. If compactResult is true, 914 the matches vector does not contain matches for fully masked-out query descriptors. 915 916 These extended variants of DescriptorMatcher::match methods find several best matches for each query 917 descriptor. The matches are returned in the distance increasing order. See DescriptorMatcher::match 918 for the details about query and train descriptors. 919 */ 920 CV_WRAP void knnMatch( InputArray queryDescriptors, InputArray trainDescriptors, 921 CV_OUT std::vector<std::vector<DMatch> >& matches, int k, 922 InputArray mask=noArray(), bool compactResult=false ) const; 923 924 /** @brief For each query descriptor, finds the training descriptors not farther than the specified distance. 925 926 @param queryDescriptors Query set of descriptors. 927 @param trainDescriptors Train set of descriptors. This set is not added to the train descriptors 928 collection stored in the class object. 929 @param matches Found matches. 930 @param compactResult Parameter used when the mask (or masks) is not empty. If compactResult is 931 false, the matches vector has the same size as queryDescriptors rows. If compactResult is true, 932 the matches vector does not contain matches for fully masked-out query descriptors. 933 @param maxDistance Threshold for the distance between matched descriptors. Distance means here 934 metric distance (e.g. Hamming distance), not the distance between coordinates (which is measured 935 in Pixels)! 936 @param mask Mask specifying permissible matches between an input query and train matrices of 937 descriptors. 938 939 For each query descriptor, the methods find such training descriptors that the distance between the 940 query descriptor and the training descriptor is equal or smaller than maxDistance. Found matches are 941 returned in the distance increasing order. 942 */ 943 CV_WRAP void radiusMatch( InputArray queryDescriptors, InputArray trainDescriptors, 944 CV_OUT std::vector<std::vector<DMatch> >& matches, float maxDistance, 945 InputArray mask=noArray(), bool compactResult=false ) const; 946 947 /** @overload 948 @param queryDescriptors Query set of descriptors. 949 @param matches Matches. If a query descriptor is masked out in mask , no match is added for this 950 descriptor. So, matches size may be smaller than the query descriptors count. 951 @param masks Set of masks. Each masks[i] specifies permissible matches between the input query 952 descriptors and stored train descriptors from the i-th image trainDescCollection[i]. 953 */ 954 CV_WRAP void match( InputArray queryDescriptors, CV_OUT std::vector<DMatch>& matches, 955 InputArrayOfArrays masks=noArray() ); 956 /** @overload 957 @param queryDescriptors Query set of descriptors. 958 @param matches Matches. Each matches[i] is k or less matches for the same query descriptor. 959 @param k Count of best matches found per each query descriptor or less if a query descriptor has 960 less than k possible matches in total. 961 @param masks Set of masks. Each masks[i] specifies permissible matches between the input query 962 descriptors and stored train descriptors from the i-th image trainDescCollection[i]. 963 @param compactResult Parameter used when the mask (or masks) is not empty. If compactResult is 964 false, the matches vector has the same size as queryDescriptors rows. If compactResult is true, 965 the matches vector does not contain matches for fully masked-out query descriptors. 966 */ 967 CV_WRAP void knnMatch( InputArray queryDescriptors, CV_OUT std::vector<std::vector<DMatch> >& matches, int k, 968 InputArrayOfArrays masks=noArray(), bool compactResult=false ); 969 /** @overload 970 @param queryDescriptors Query set of descriptors. 971 @param matches Found matches. 972 @param maxDistance Threshold for the distance between matched descriptors. Distance means here 973 metric distance (e.g. Hamming distance), not the distance between coordinates (which is measured 974 in Pixels)! 975 @param masks Set of masks. Each masks[i] specifies permissible matches between the input query 976 descriptors and stored train descriptors from the i-th image trainDescCollection[i]. 977 @param compactResult Parameter used when the mask (or masks) is not empty. If compactResult is 978 false, the matches vector has the same size as queryDescriptors rows. If compactResult is true, 979 the matches vector does not contain matches for fully masked-out query descriptors. 980 */ 981 CV_WRAP void radiusMatch( InputArray queryDescriptors, CV_OUT std::vector<std::vector<DMatch> >& matches, float maxDistance, 982 InputArrayOfArrays masks=noArray(), bool compactResult=false ); 983 984 write(const String & fileName) const985 CV_WRAP void write( const String& fileName ) const 986 { 987 FileStorage fs(fileName, FileStorage::WRITE); 988 write(fs); 989 } 990 read(const String & fileName)991 CV_WRAP void read( const String& fileName ) 992 { 993 FileStorage fs(fileName, FileStorage::READ); 994 read(fs.root()); 995 } 996 // Reads matcher object from a file node 997 // see corresponding cv::Algorithm method 998 CV_WRAP virtual void read( const FileNode& ) CV_OVERRIDE; 999 // Writes matcher object to a file storage 1000 virtual void write( FileStorage& ) const CV_OVERRIDE; 1001 1002 /** @brief Clones the matcher. 1003 1004 @param emptyTrainData If emptyTrainData is false, the method creates a deep copy of the object, 1005 that is, copies both parameters and train data. If emptyTrainData is true, the method creates an 1006 object copy with the current parameters but with empty train data. 1007 */ 1008 CV_WRAP virtual Ptr<DescriptorMatcher> clone( bool emptyTrainData=false ) const = 0; 1009 1010 /** @brief Creates a descriptor matcher of a given type with the default parameters (using default 1011 constructor). 1012 1013 @param descriptorMatcherType Descriptor matcher type. Now the following matcher types are 1014 supported: 1015 - `BruteForce` (it uses L2 ) 1016 - `BruteForce-L1` 1017 - `BruteForce-Hamming` 1018 - `BruteForce-Hamming(2)` 1019 - `FlannBased` 1020 */ 1021 CV_WRAP static Ptr<DescriptorMatcher> create( const String& descriptorMatcherType ); 1022 1023 CV_WRAP static Ptr<DescriptorMatcher> create( int matcherType ); 1024 1025 1026 // see corresponding cv::Algorithm method write(const Ptr<FileStorage> & fs,const String & name=String ()) const1027 CV_WRAP inline void write(const Ptr<FileStorage>& fs, const String& name = String()) const { Algorithm::write(fs, name); } 1028 1029 protected: 1030 /** 1031 * Class to work with descriptors from several images as with one merged matrix. 1032 * It is used e.g. in FlannBasedMatcher. 1033 */ 1034 class CV_EXPORTS DescriptorCollection 1035 { 1036 public: 1037 DescriptorCollection(); 1038 DescriptorCollection( const DescriptorCollection& collection ); 1039 virtual ~DescriptorCollection(); 1040 1041 // Vector of matrices "descriptors" will be merged to one matrix "mergedDescriptors" here. 1042 void set( const std::vector<Mat>& descriptors ); 1043 virtual void clear(); 1044 1045 const Mat& getDescriptors() const; 1046 const Mat getDescriptor( int imgIdx, int localDescIdx ) const; 1047 const Mat getDescriptor( int globalDescIdx ) const; 1048 void getLocalIdx( int globalDescIdx, int& imgIdx, int& localDescIdx ) const; 1049 1050 int size() const; 1051 1052 protected: 1053 Mat mergedDescriptors; 1054 std::vector<int> startIdxs; 1055 }; 1056 1057 //! In fact the matching is implemented only by the following two methods. These methods suppose 1058 //! that the class object has been trained already. Public match methods call these methods 1059 //! after calling train(). 1060 virtual void knnMatchImpl( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, int k, 1061 InputArrayOfArrays masks=noArray(), bool compactResult=false ) = 0; 1062 virtual void radiusMatchImpl( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, float maxDistance, 1063 InputArrayOfArrays masks=noArray(), bool compactResult=false ) = 0; 1064 1065 static bool isPossibleMatch( InputArray mask, int queryIdx, int trainIdx ); 1066 static bool isMaskedOut( InputArrayOfArrays masks, int queryIdx ); 1067 clone_op(Mat m)1068 static Mat clone_op( Mat m ) { return m.clone(); } 1069 void checkMasks( InputArrayOfArrays masks, int queryDescriptorsCount ) const; 1070 1071 //! Collection of descriptors from train images. 1072 std::vector<Mat> trainDescCollection; 1073 std::vector<UMat> utrainDescCollection; 1074 }; 1075 1076 /** @brief Brute-force descriptor matcher. 1077 1078 For each descriptor in the first set, this matcher finds the closest descriptor in the second set 1079 by trying each one. This descriptor matcher supports masking permissible matches of descriptor 1080 sets. 1081 */ 1082 class CV_EXPORTS_W BFMatcher : public DescriptorMatcher 1083 { 1084 public: 1085 /** @brief Brute-force matcher constructor (obsolete). Please use BFMatcher.create() 1086 * 1087 * 1088 */ 1089 CV_WRAP BFMatcher( int normType=NORM_L2, bool crossCheck=false ); 1090 ~BFMatcher()1091 virtual ~BFMatcher() {} 1092 isMaskSupported() const1093 virtual bool isMaskSupported() const CV_OVERRIDE { return true; } 1094 1095 /** @brief Brute-force matcher create method. 1096 @param normType One of NORM_L1, NORM_L2, NORM_HAMMING, NORM_HAMMING2. L1 and L2 norms are 1097 preferable choices for SIFT and SURF descriptors, NORM_HAMMING should be used with ORB, BRISK and 1098 BRIEF, NORM_HAMMING2 should be used with ORB when WTA_K==3 or 4 (see ORB::ORB constructor 1099 description). 1100 @param crossCheck If it is false, this is will be default BFMatcher behaviour when it finds the k 1101 nearest neighbors for each query descriptor. If crossCheck==true, then the knnMatch() method with 1102 k=1 will only return pairs (i,j) such that for i-th query descriptor the j-th descriptor in the 1103 matcher's collection is the nearest and vice versa, i.e. the BFMatcher will only return consistent 1104 pairs. Such technique usually produces best results with minimal number of outliers when there are 1105 enough matches. This is alternative to the ratio test, used by D. Lowe in SIFT paper. 1106 */ 1107 CV_WRAP static Ptr<BFMatcher> create( int normType=NORM_L2, bool crossCheck=false ) ; 1108 1109 virtual Ptr<DescriptorMatcher> clone( bool emptyTrainData=false ) const CV_OVERRIDE; 1110 protected: 1111 virtual void knnMatchImpl( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, int k, 1112 InputArrayOfArrays masks=noArray(), bool compactResult=false ) CV_OVERRIDE; 1113 virtual void radiusMatchImpl( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, float maxDistance, 1114 InputArrayOfArrays masks=noArray(), bool compactResult=false ) CV_OVERRIDE; 1115 1116 int normType; 1117 bool crossCheck; 1118 }; 1119 1120 #if defined(HAVE_OPENCV_FLANN) || defined(CV_DOXYGEN) 1121 1122 /** @brief Flann-based descriptor matcher. 1123 1124 This matcher trains cv::flann::Index on a train descriptor collection and calls its nearest search 1125 methods to find the best matches. So, this matcher may be faster when matching a large train 1126 collection than the brute force matcher. FlannBasedMatcher does not support masking permissible 1127 matches of descriptor sets because flann::Index does not support this. : 1128 */ 1129 class CV_EXPORTS_W FlannBasedMatcher : public DescriptorMatcher 1130 { 1131 public: 1132 CV_WRAP FlannBasedMatcher( const Ptr<flann::IndexParams>& indexParams=makePtr<flann::KDTreeIndexParams>(), 1133 const Ptr<flann::SearchParams>& searchParams=makePtr<flann::SearchParams>() ); 1134 1135 virtual void add( InputArrayOfArrays descriptors ) CV_OVERRIDE; 1136 virtual void clear() CV_OVERRIDE; 1137 1138 // Reads matcher object from a file node 1139 virtual void read( const FileNode& ) CV_OVERRIDE; 1140 // Writes matcher object to a file storage 1141 virtual void write( FileStorage& ) const CV_OVERRIDE; 1142 1143 virtual void train() CV_OVERRIDE; 1144 virtual bool isMaskSupported() const CV_OVERRIDE; 1145 1146 CV_WRAP static Ptr<FlannBasedMatcher> create(); 1147 1148 virtual Ptr<DescriptorMatcher> clone( bool emptyTrainData=false ) const CV_OVERRIDE; 1149 protected: 1150 static void convertToDMatches( const DescriptorCollection& descriptors, 1151 const Mat& indices, const Mat& distances, 1152 std::vector<std::vector<DMatch> >& matches ); 1153 1154 virtual void knnMatchImpl( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, int k, 1155 InputArrayOfArrays masks=noArray(), bool compactResult=false ) CV_OVERRIDE; 1156 virtual void radiusMatchImpl( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, float maxDistance, 1157 InputArrayOfArrays masks=noArray(), bool compactResult=false ) CV_OVERRIDE; 1158 1159 Ptr<flann::IndexParams> indexParams; 1160 Ptr<flann::SearchParams> searchParams; 1161 Ptr<flann::Index> flannIndex; 1162 1163 DescriptorCollection mergedDescriptors; 1164 int addedDescCount; 1165 }; 1166 1167 #endif 1168 1169 //! @} features2d_match 1170 1171 /****************************************************************************************\ 1172 * Drawing functions * 1173 \****************************************************************************************/ 1174 1175 //! @addtogroup features2d_draw 1176 //! @{ 1177 1178 struct CV_EXPORTS DrawMatchesFlags 1179 { 1180 enum{ DEFAULT = 0, //!< Output image matrix will be created (Mat::create), 1181 //!< i.e. existing memory of output image may be reused. 1182 //!< Two source image, matches and single keypoints will be drawn. 1183 //!< For each keypoint only the center point will be drawn (without 1184 //!< the circle around keypoint with keypoint size and orientation). 1185 DRAW_OVER_OUTIMG = 1, //!< Output image matrix will not be created (Mat::create). 1186 //!< Matches will be drawn on existing content of output image. 1187 NOT_DRAW_SINGLE_POINTS = 2, //!< Single keypoints will not be drawn. 1188 DRAW_RICH_KEYPOINTS = 4 //!< For each keypoint the circle around keypoint with keypoint size and 1189 //!< orientation will be drawn. 1190 }; 1191 }; 1192 1193 /** @brief Draws keypoints. 1194 1195 @param image Source image. 1196 @param keypoints Keypoints from the source image. 1197 @param outImage Output image. Its content depends on the flags value defining what is drawn in the 1198 output image. See possible flags bit values below. 1199 @param color Color of keypoints. 1200 @param flags Flags setting drawing features. Possible flags bit values are defined by 1201 DrawMatchesFlags. See details above in drawMatches . 1202 1203 @note 1204 For Python API, flags are modified as cv2.DRAW_MATCHES_FLAGS_DEFAULT, 1205 cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS, cv2.DRAW_MATCHES_FLAGS_DRAW_OVER_OUTIMG, 1206 cv2.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS 1207 */ 1208 CV_EXPORTS_W void drawKeypoints( InputArray image, const std::vector<KeyPoint>& keypoints, InputOutputArray outImage, 1209 const Scalar& color=Scalar::all(-1), int flags=DrawMatchesFlags::DEFAULT ); 1210 1211 /** @brief Draws the found matches of keypoints from two images. 1212 1213 @param img1 First source image. 1214 @param keypoints1 Keypoints from the first source image. 1215 @param img2 Second source image. 1216 @param keypoints2 Keypoints from the second source image. 1217 @param matches1to2 Matches from the first image to the second one, which means that keypoints1[i] 1218 has a corresponding point in keypoints2[matches[i]] . 1219 @param outImg Output image. Its content depends on the flags value defining what is drawn in the 1220 output image. See possible flags bit values below. 1221 @param matchColor Color of matches (lines and connected keypoints). If matchColor==Scalar::all(-1) 1222 , the color is generated randomly. 1223 @param singlePointColor Color of single keypoints (circles), which means that keypoints do not 1224 have the matches. If singlePointColor==Scalar::all(-1) , the color is generated randomly. 1225 @param matchesMask Mask determining which matches are drawn. If the mask is empty, all matches are 1226 drawn. 1227 @param flags Flags setting drawing features. Possible flags bit values are defined by 1228 DrawMatchesFlags. 1229 1230 This function draws matches of keypoints from two images in the output image. Match is a line 1231 connecting two keypoints (circles). See cv::DrawMatchesFlags. 1232 */ 1233 CV_EXPORTS_W void drawMatches( InputArray img1, const std::vector<KeyPoint>& keypoints1, 1234 InputArray img2, const std::vector<KeyPoint>& keypoints2, 1235 const std::vector<DMatch>& matches1to2, InputOutputArray outImg, 1236 const Scalar& matchColor=Scalar::all(-1), const Scalar& singlePointColor=Scalar::all(-1), 1237 const std::vector<char>& matchesMask=std::vector<char>(), int flags=DrawMatchesFlags::DEFAULT ); 1238 1239 /** @overload */ 1240 CV_EXPORTS_AS(drawMatchesKnn) void drawMatches( InputArray img1, const std::vector<KeyPoint>& keypoints1, 1241 InputArray img2, const std::vector<KeyPoint>& keypoints2, 1242 const std::vector<std::vector<DMatch> >& matches1to2, InputOutputArray outImg, 1243 const Scalar& matchColor=Scalar::all(-1), const Scalar& singlePointColor=Scalar::all(-1), 1244 const std::vector<std::vector<char> >& matchesMask=std::vector<std::vector<char> >(), int flags=DrawMatchesFlags::DEFAULT ); 1245 1246 //! @} features2d_draw 1247 1248 /****************************************************************************************\ 1249 * Functions to evaluate the feature detectors and [generic] descriptor extractors * 1250 \****************************************************************************************/ 1251 1252 CV_EXPORTS void evaluateFeatureDetector( const Mat& img1, const Mat& img2, const Mat& H1to2, 1253 std::vector<KeyPoint>* keypoints1, std::vector<KeyPoint>* keypoints2, 1254 float& repeatability, int& correspCount, 1255 const Ptr<FeatureDetector>& fdetector=Ptr<FeatureDetector>() ); 1256 1257 CV_EXPORTS void computeRecallPrecisionCurve( const std::vector<std::vector<DMatch> >& matches1to2, 1258 const std::vector<std::vector<uchar> >& correctMatches1to2Mask, 1259 std::vector<Point2f>& recallPrecisionCurve ); 1260 1261 CV_EXPORTS float getRecall( const std::vector<Point2f>& recallPrecisionCurve, float l_precision ); 1262 CV_EXPORTS int getNearestPoint( const std::vector<Point2f>& recallPrecisionCurve, float l_precision ); 1263 1264 /****************************************************************************************\ 1265 * Bag of visual words * 1266 \****************************************************************************************/ 1267 1268 //! @addtogroup features2d_category 1269 //! @{ 1270 1271 /** @brief Abstract base class for training the *bag of visual words* vocabulary from a set of descriptors. 1272 1273 For details, see, for example, *Visual Categorization with Bags of Keypoints* by Gabriella Csurka, 1274 Christopher R. Dance, Lixin Fan, Jutta Willamowski, Cedric Bray, 2004. : 1275 */ 1276 class CV_EXPORTS_W BOWTrainer 1277 { 1278 public: 1279 BOWTrainer(); 1280 virtual ~BOWTrainer(); 1281 1282 /** @brief Adds descriptors to a training set. 1283 1284 @param descriptors Descriptors to add to a training set. Each row of the descriptors matrix is a 1285 descriptor. 1286 1287 The training set is clustered using clustermethod to construct the vocabulary. 1288 */ 1289 CV_WRAP void add( const Mat& descriptors ); 1290 1291 /** @brief Returns a training set of descriptors. 1292 */ 1293 CV_WRAP const std::vector<Mat>& getDescriptors() const; 1294 1295 /** @brief Returns the count of all descriptors stored in the training set. 1296 */ 1297 CV_WRAP int descriptorsCount() const; 1298 1299 CV_WRAP virtual void clear(); 1300 1301 /** @overload */ 1302 CV_WRAP virtual Mat cluster() const = 0; 1303 1304 /** @brief Clusters train descriptors. 1305 1306 @param descriptors Descriptors to cluster. Each row of the descriptors matrix is a descriptor. 1307 Descriptors are not added to the inner train descriptor set. 1308 1309 The vocabulary consists of cluster centers. So, this method returns the vocabulary. In the first 1310 variant of the method, train descriptors stored in the object are clustered. In the second variant, 1311 input descriptors are clustered. 1312 */ 1313 CV_WRAP virtual Mat cluster( const Mat& descriptors ) const = 0; 1314 1315 protected: 1316 std::vector<Mat> descriptors; 1317 int size; 1318 }; 1319 1320 /** @brief kmeans -based class to train visual vocabulary using the *bag of visual words* approach. : 1321 */ 1322 class CV_EXPORTS_W BOWKMeansTrainer : public BOWTrainer 1323 { 1324 public: 1325 /** @brief The constructor. 1326 1327 @see cv::kmeans 1328 */ 1329 CV_WRAP BOWKMeansTrainer( int clusterCount, const TermCriteria& termcrit=TermCriteria(), 1330 int attempts=3, int flags=KMEANS_PP_CENTERS ); 1331 virtual ~BOWKMeansTrainer(); 1332 1333 // Returns trained vocabulary (i.e. cluster centers). 1334 CV_WRAP virtual Mat cluster() const CV_OVERRIDE; 1335 CV_WRAP virtual Mat cluster( const Mat& descriptors ) const CV_OVERRIDE; 1336 1337 protected: 1338 1339 int clusterCount; 1340 TermCriteria termcrit; 1341 int attempts; 1342 int flags; 1343 }; 1344 1345 /** @brief Class to compute an image descriptor using the *bag of visual words*. 1346 1347 Such a computation consists of the following steps: 1348 1349 1. Compute descriptors for a given image and its keypoints set. 1350 2. Find the nearest visual words from the vocabulary for each keypoint descriptor. 1351 3. Compute the bag-of-words image descriptor as is a normalized histogram of vocabulary words 1352 encountered in the image. The i-th bin of the histogram is a frequency of i-th word of the 1353 vocabulary in the given image. 1354 */ 1355 class CV_EXPORTS_W BOWImgDescriptorExtractor 1356 { 1357 public: 1358 /** @brief The constructor. 1359 1360 @param dextractor Descriptor extractor that is used to compute descriptors for an input image and 1361 its keypoints. 1362 @param dmatcher Descriptor matcher that is used to find the nearest word of the trained vocabulary 1363 for each keypoint descriptor of the image. 1364 */ 1365 CV_WRAP BOWImgDescriptorExtractor( const Ptr<DescriptorExtractor>& dextractor, 1366 const Ptr<DescriptorMatcher>& dmatcher ); 1367 /** @overload */ 1368 BOWImgDescriptorExtractor( const Ptr<DescriptorMatcher>& dmatcher ); 1369 virtual ~BOWImgDescriptorExtractor(); 1370 1371 /** @brief Sets a visual vocabulary. 1372 1373 @param vocabulary Vocabulary (can be trained using the inheritor of BOWTrainer ). Each row of the 1374 vocabulary is a visual word (cluster center). 1375 */ 1376 CV_WRAP void setVocabulary( const Mat& vocabulary ); 1377 1378 /** @brief Returns the set vocabulary. 1379 */ 1380 CV_WRAP const Mat& getVocabulary() const; 1381 1382 /** @brief Computes an image descriptor using the set visual vocabulary. 1383 1384 @param image Image, for which the descriptor is computed. 1385 @param keypoints Keypoints detected in the input image. 1386 @param imgDescriptor Computed output image descriptor. 1387 @param pointIdxsOfClusters Indices of keypoints that belong to the cluster. This means that 1388 pointIdxsOfClusters[i] are keypoint indices that belong to the i -th cluster (word of vocabulary) 1389 returned if it is non-zero. 1390 @param descriptors Descriptors of the image keypoints that are returned if they are non-zero. 1391 */ 1392 void compute( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray imgDescriptor, 1393 std::vector<std::vector<int> >* pointIdxsOfClusters=0, Mat* descriptors=0 ); 1394 /** @overload 1395 @param keypointDescriptors Computed descriptors to match with vocabulary. 1396 @param imgDescriptor Computed output image descriptor. 1397 @param pointIdxsOfClusters Indices of keypoints that belong to the cluster. This means that 1398 pointIdxsOfClusters[i] are keypoint indices that belong to the i -th cluster (word of vocabulary) 1399 returned if it is non-zero. 1400 */ 1401 void compute( InputArray keypointDescriptors, OutputArray imgDescriptor, 1402 std::vector<std::vector<int> >* pointIdxsOfClusters=0 ); 1403 // compute() is not constant because DescriptorMatcher::match is not constant 1404 CV_WRAP_AS(compute)1405 CV_WRAP_AS(compute) void compute2( const Mat& image, std::vector<KeyPoint>& keypoints, CV_OUT Mat& imgDescriptor ) 1406 { compute(image,keypoints,imgDescriptor); } 1407 1408 /** @brief Returns an image descriptor size if the vocabulary is set. Otherwise, it returns 0. 1409 */ 1410 CV_WRAP int descriptorSize() const; 1411 1412 /** @brief Returns an image descriptor type. 1413 */ 1414 CV_WRAP int descriptorType() const; 1415 1416 protected: 1417 Mat vocabulary; 1418 Ptr<DescriptorExtractor> dextractor; 1419 Ptr<DescriptorMatcher> dmatcher; 1420 }; 1421 1422 //! @} features2d_category 1423 1424 //! @} features2d 1425 1426 } /* namespace cv */ 1427 1428 #endif 1429