38 #ifndef _vpKeyPoint_h_
39 #define _vpKeyPoint_h_
51 #include <visp3/core/vpConfig.h>
52 #include <visp3/core/vpDisplay.h>
53 #include <visp3/core/vpImageConvert.h>
54 #include <visp3/core/vpPixelMeterConversion.h>
55 #include <visp3/core/vpPlane.h>
56 #include <visp3/core/vpPoint.h>
57 #include <visp3/vision/vpBasicKeyPoint.h>
58 #include <visp3/vision/vpPose.h>
59 #ifdef VISP_HAVE_MODULE_IO
60 # include <visp3/io/vpImageIo.h>
62 #include <visp3/core/vpConvert.h>
63 #include <visp3/core/vpCylinder.h>
64 #include <visp3/core/vpMeterPixelConversion.h>
65 #include <visp3/core/vpPolygon.h>
66 #include <visp3/vision/vpXmlConfigParserKeyPoint.h>
69 #if (VISP_HAVE_OPENCV_VERSION >= 0x020101)
71 # include <opencv2/calib3d/calib3d.hpp>
72 # include <opencv2/features2d/features2d.hpp>
73 # include <opencv2/imgproc/imgproc.hpp>
75 # if (VISP_HAVE_OPENCV_VERSION >= 0x040000) // Require opencv >= 4.0.0
76 # include <opencv2/imgproc/imgproc_c.h>
77 # include <opencv2/imgproc.hpp>
80 # if defined(VISP_HAVE_OPENCV_XFEATURES2D) // OpenCV >= 3.0.0
81 # include <opencv2/xfeatures2d.hpp>
82 # elif defined(VISP_HAVE_OPENCV_NONFREE) && (VISP_HAVE_OPENCV_VERSION >= 0x020400) && \
83 (VISP_HAVE_OPENCV_VERSION < 0x030000)
84 # include <opencv2/nonfree/nonfree.hpp>
228 constantFactorDistanceThreshold,
230 stdDistanceThreshold,
232 ratioDistanceThreshold,
235 stdAndRatioDistanceThreshold,
259 #if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
266 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
269 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)
273 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
278 #if (VISP_HAVE_OPENCV_VERSION >= 0x030100) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
287 #if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
290 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
294 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)
298 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
301 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
306 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
308 DESCRIPTOR_BoostDesc,
314 vpKeyPoint(
const vpFeatureDetectorType &detectorType,
const vpFeatureDescriptorType &descriptorType,
315 const std::string &matcherName,
const vpFilterMatchingType &filterType = ratioDistanceThreshold);
316 vpKeyPoint(
const std::string &detectorName =
"ORB",
const std::string &extractorName =
"ORB",
317 const std::string &matcherName =
"BruteForce-Hamming",
318 const vpFilterMatchingType &filterType = ratioDistanceThreshold);
319 vpKeyPoint(
const std::vector<std::string> &detectorNames,
const std::vector<std::string> &extractorNames,
320 const std::string &matcherName =
"BruteForce",
321 const vpFilterMatchingType &filterType = ratioDistanceThreshold);
329 std::vector<cv::Point3f> &points3f,
bool append =
false,
int class_id = -1);
331 const cv::Mat &trainDescriptors,
const std::vector<cv::Point3f> &points3f,
332 bool append =
false,
int class_id = -1);
340 std::vector<cv::Point3f> &points3f,
bool append =
false,
int class_id = -1);
342 const cv::Mat &trainDescriptors,
const std::vector<cv::Point3f> &points3f,
343 bool append =
false,
int class_id = -1);
345 static void compute3D(
const cv::KeyPoint &candidate,
const std::vector<vpPoint> &roi,
const vpCameraParameters &cam,
352 std::vector<cv::KeyPoint> &candidates,
353 const std::vector<vpPolygon> &polygons,
354 const std::vector<std::vector<vpPoint> > &roisPt,
355 std::vector<cv::Point3f> &points, cv::Mat *descriptors = NULL);
358 std::vector<vpImagePoint> &candidates,
359 const std::vector<vpPolygon> &polygons,
360 const std::vector<std::vector<vpPoint> > &roisPt,
361 std::vector<vpPoint> &points, cv::Mat *descriptors = NULL);
365 std::vector<cv::KeyPoint> &candidates,
const std::vector<vpCylinder> &cylinders,
366 const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois,
367 std::vector<cv::Point3f> &points, cv::Mat *descriptors = NULL);
371 std::vector<vpImagePoint> &candidates,
const std::vector<vpCylinder> &cylinders,
372 const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois,
373 std::vector<vpPoint> &points, cv::Mat *descriptors = NULL);
375 bool computePose(
const std::vector<cv::Point2f> &imagePoints,
const std::vector<cv::Point3f> &objectPoints,
379 bool computePose(
const std::vector<vpPoint> &objectVpPoints,
vpHomogeneousMatrix &cMo, std::vector<vpPoint> &inliers,
382 bool computePose(
const std::vector<vpPoint> &objectVpPoints,
vpHomogeneousMatrix &cMo, std::vector<vpPoint> &inliers,
383 std::vector<unsigned int> &inlierIndex,
double &elapsedTime,
396 void detect(
const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints,
398 void detect(
const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints,
const cv::Mat &mask = cv::Mat());
401 void detect(
const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints,
double &elapsedTime,
403 void detect(
const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints,
double &elapsedTime,
404 const cv::Mat &mask = cv::Mat());
406 void detectExtractAffine(
const vpImage<unsigned char> &I, std::vector<std::vector<cv::KeyPoint> > &listOfKeypoints,
407 std::vector<cv::Mat> &listOfDescriptors,
418 const std::vector<vpImagePoint> &ransacInliers = std::vector<vpImagePoint>(),
419 unsigned int crossSize = 3,
unsigned int lineThickness = 1);
425 const std::vector<vpImagePoint> &ransacInliers = std::vector<vpImagePoint>(),
426 unsigned int crossSize = 3,
unsigned int lineThickness = 1);
429 std::vector<cv::Point3f> *trainPoints = NULL);
430 void extract(
const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
431 std::vector<cv::Point3f> *trainPoints = NULL);
432 void extract(
const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
433 std::vector<cv::Point3f> *trainPoints = NULL);
435 double &elapsedTime, std::vector<cv::Point3f> *trainPoints = NULL);
436 void extract(
const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
437 double &elapsedTime, std::vector<cv::Point3f> *trainPoints = NULL);
438 void extract(
const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
double &elapsedTime,
439 std::vector<cv::Point3f> *trainPoints = NULL);
452 if (!m_computeCovariance) {
453 std::cout <<
"Warning : The covariance matrix has not been computed. "
454 "See setCovarianceComputation() to do it."
459 if (m_computeCovariance && !m_useRansacVVS) {
460 std::cout <<
"Warning : The covariance matrix can only be computed "
461 "with a Virtual Visual Servoing approach."
463 <<
"Use setUseRansacVVS(true) to choose to use a pose "
464 "estimation method based on a Virtual Visual Servoing "
470 return m_covarianceMatrix;
489 std::map<vpFeatureDetectorType, std::string>::const_iterator it_name = m_mapOfDetectorNames.find(type);
490 if (it_name == m_mapOfDetectorNames.end()) {
491 std::cerr <<
"Internal problem with the feature type and the "
492 "corresponding name!"
496 std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator findDetector =
497 m_detectors.find(it_name->second);
498 if (findDetector != m_detectors.end()) {
499 return findDetector->second;
502 std::cerr <<
"Cannot find: " << it_name->second << std::endl;
503 return cv::Ptr<cv::FeatureDetector>();
513 inline cv::Ptr<cv::FeatureDetector>
getDetector(
const std::string &name)
const
515 std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator findDetector = m_detectors.find(name);
516 if (findDetector != m_detectors.end()) {
517 return findDetector->second;
520 std::cerr <<
"Cannot find: " << name << std::endl;
521 return cv::Ptr<cv::FeatureDetector>();
527 inline std::map<vpFeatureDetectorType, std::string>
getDetectorNames()
const {
return m_mapOfDetectorNames; }
545 std::map<vpFeatureDescriptorType, std::string>::const_iterator it_name = m_mapOfDescriptorNames.find(type);
546 if (it_name == m_mapOfDescriptorNames.end()) {
547 std::cerr <<
"Internal problem with the feature type and the "
548 "corresponding name!"
552 std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator findExtractor =
553 m_extractors.find(it_name->second);
554 if (findExtractor != m_extractors.end()) {
555 return findExtractor->second;
558 std::cerr <<
"Cannot find: " << it_name->second << std::endl;
559 return cv::Ptr<cv::DescriptorExtractor>();
569 inline cv::Ptr<cv::DescriptorExtractor>
getExtractor(
const std::string &name)
const
571 std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator findExtractor = m_extractors.find(name);
572 if (findExtractor != m_extractors.end()) {
573 return findExtractor->second;
576 std::cerr <<
"Cannot find: " << name << std::endl;
577 return cv::Ptr<cv::DescriptorExtractor>();
583 inline std::map<vpFeatureDescriptorType, std::string>
getExtractorNames()
const {
return m_mapOfDescriptorNames; }
604 inline cv::Ptr<cv::DescriptorMatcher>
getMatcher()
const {
return m_matcher; }
612 inline std::vector<cv::DMatch>
getMatches()
const {
return m_filteredMatches; }
623 std::vector<std::pair<cv::KeyPoint, cv::KeyPoint> > matchQueryToTrainKeyPoints(m_filteredMatches.size());
624 for (
size_t i = 0; i < m_filteredMatches.size(); i++) {
625 matchQueryToTrainKeyPoints.push_back(
626 std::pair<cv::KeyPoint, cv::KeyPoint>(m_queryFilteredKeyPoints[(
size_t)m_filteredMatches[i].queryIdx],
627 m_trainKeyPoints[(
size_t)m_filteredMatches[i].trainIdx]));
629 return matchQueryToTrainKeyPoints;
638 return static_cast<unsigned int>(m_mapOfImages.size());
641 void getObjectPoints(std::vector<cv::Point3f> &objectPoints)
const;
642 void getObjectPoints(std::vector<vpPoint> &objectPoints)
const;
659 void getQueryKeyPoints(std::vector<cv::KeyPoint> &keyPoints)
const;
660 void getQueryKeyPoints(std::vector<vpImagePoint> &keyPoints)
const;
684 void getTrainKeyPoints(std::vector<cv::KeyPoint> &keyPoints)
const;
685 void getTrainKeyPoints(std::vector<vpImagePoint> &keyPoints)
const;
687 void getTrainPoints(std::vector<cv::Point3f> &points)
const;
688 void getTrainPoints(std::vector<vpPoint> &points)
const;
690 void initMatcher(
const std::string &matcherName);
700 #ifdef VISP_HAVE_PUGIXML
701 void loadConfigFile(
const std::string &configFile);
704 void loadLearningData(
const std::string &filename,
bool binaryMode =
false,
bool append =
false);
706 void match(
const cv::Mat &trainDescriptors,
const cv::Mat &queryDescriptors, std::vector<cv::DMatch> &matches,
707 double &elapsedTime);
721 const bool isPlanarObject =
true, std::vector<vpImagePoint> *imPts1 = NULL,
722 std::vector<vpImagePoint> *imPts2 = NULL,
double *meanDescriptorDistance = NULL,
723 double *detectionScore = NULL,
const vpRect &rectangle =
vpRect());
726 double &error,
double &elapsedTime,
vpRect &boundingBox,
vpImagePoint ¢erOfGravity,
742 void saveLearningData(
const std::string &filename,
bool binaryMode =
false,
743 bool saveTrainingImages =
true);
753 m_computeCovariance = flag;
754 if (!m_useRansacVVS) {
755 std::cout <<
"Warning : The covariance matrix can only be computed "
756 "with a Virtual Visual Servoing approach."
758 <<
"Use setUseRansacVVS(true) to choose to use a pose "
759 "estimation method based on a Virtual "
760 "Visual Servoing approach."
779 m_detectorNames.clear();
780 m_detectorNames.push_back(m_mapOfDetectorNames[detectorType]);
782 initDetector(m_mapOfDetectorNames[detectorType]);
792 m_detectorNames.clear();
793 m_detectorNames.push_back(detectorName);
795 initDetector(detectorName);
798 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
807 template <
typename T1,
typename T2,
typename T3>
808 inline void setDetectorParameter(
const T1 detectorName,
const T2 parameterName,
const T3 value)
810 if (m_detectors.find(detectorName) != m_detectors.end()) {
811 m_detectors[detectorName]->set(parameterName, value);
822 inline void setDetectors(
const std::vector<std::string> &detectorNames)
824 m_detectorNames.clear();
826 m_detectorNames = detectorNames;
827 initDetectors(m_detectorNames);
837 m_extractorNames.clear();
838 m_extractorNames.push_back(m_mapOfDescriptorNames[extractorType]);
839 m_extractors.clear();
840 initExtractor(m_mapOfDescriptorNames[extractorType]);
851 m_extractorNames.clear();
852 m_extractorNames.push_back(extractorName);
853 m_extractors.clear();
854 initExtractor(extractorName);
857 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
866 template <
typename T1,
typename T2,
typename T3>
867 inline void setExtractorParameter(
const T1 extractorName,
const T2 parameterName,
const T3 value)
869 if (m_extractors.find(extractorName) != m_extractors.end()) {
870 m_extractors[extractorName]->set(parameterName, value);
883 m_extractorNames.clear();
884 m_extractorNames = extractorNames;
885 m_extractors.clear();
886 initExtractors(m_extractorNames);
913 m_matcherName = matcherName;
914 initMatcher(m_matcherName);
934 m_filterType = filterType;
938 if (filterType == ratioDistanceThreshold || filterType == stdAndRatioDistanceThreshold) {
941 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
942 if (m_matcher != NULL && m_matcherName ==
"BruteForce") {
945 m_matcher->set(
"crossCheck",
false);
951 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
952 if (m_matcher != NULL && m_matcherName ==
"BruteForce") {
955 m_matcher->set(
"crossCheck", m_useBruteForceCrossCheck);
970 m_matchingFactorThreshold = factor;
983 if (ratio > 0.0 && (ratio < 1.0 || std::fabs(ratio - 1.0) < std::numeric_limits<double>::epsilon())) {
984 m_matchingRatioThreshold = ratio;
998 if (percentage > 0.0 &&
999 (percentage < 100.0 || std::fabs(percentage - 100.0) < std::numeric_limits<double>::epsilon())) {
1000 m_ransacConsensusPercentage = percentage;
1011 m_ransacFilterFlag = flag;
1023 m_nbRansacIterations = nbIter;
1036 m_ransacParallel = parallel;
1047 m_ransacParallelNbThreads = nthreads;
1059 if (reprojectionError > 0.0) {
1060 m_ransacReprojectionError = reprojectionError;
1063 "threshold must be positive "
1064 "as we deal with distance.");
1076 m_nbRansacMinInlierCount = minCount;
1090 if (threshold > 0.0) {
1091 m_ransacThreshold = threshold;
1106 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
1113 inline void setUseBruteForceCrossCheck(
bool useCrossCheck)
1117 if (m_matcher != NULL && !m_useKnn && m_matcherName ==
"BruteForce") {
1118 m_matcher->set(
"crossCheck", useCrossCheck);
1119 }
else if (m_matcher != NULL && m_useKnn && m_matcherName ==
"BruteForce") {
1120 std::cout <<
"Warning, you try to set the crossCheck parameter with a "
1121 "BruteForce matcher but knn is enabled";
1122 std::cout <<
" (the filtering method uses a ratio constraint)" << std::endl;
1135 m_useMatchTrainToQuery = useMatchTrainToQuery;
1167 bool m_computeCovariance;
1171 int m_currentImageId;
1174 vpDetectionMethodType m_detectionMethod;
1176 double m_detectionScore;
1179 double m_detectionThreshold;
1181 double m_detectionTime;
1183 std::vector<std::string> m_detectorNames;
1187 std::map<std::string, cv::Ptr<cv::FeatureDetector> > m_detectors;
1189 double m_extractionTime;
1191 std::vector<std::string> m_extractorNames;
1195 std::map<std::string, cv::Ptr<cv::DescriptorExtractor> > m_extractors;
1197 std::vector<cv::DMatch> m_filteredMatches;
1199 vpFilterMatchingType m_filterType;
1201 vpImageFormatType m_imageFormat;
1204 std::vector<std::vector<cv::DMatch> > m_knnMatches;
1206 std::map<vpFeatureDescriptorType, std::string> m_mapOfDescriptorNames;
1208 std::map<vpFeatureDetectorType, std::string> m_mapOfDetectorNames;
1211 std::map<int, int> m_mapOfImageId;
1214 std::map<int, vpImage<unsigned char> > m_mapOfImages;
1217 cv::Ptr<cv::DescriptorMatcher> m_matcher;
1219 std::string m_matcherName;
1221 std::vector<cv::DMatch> m_matches;
1223 double m_matchingFactorThreshold;
1225 double m_matchingRatioThreshold;
1227 double m_matchingTime;
1229 std::vector<std::pair<cv::KeyPoint, cv::Point3f> > m_matchRansacKeyPointsToPoints;
1231 int m_nbRansacIterations;
1233 int m_nbRansacMinInlierCount;
1236 std::vector<cv::Point3f> m_objectFilteredPoints;
1241 cv::Mat m_queryDescriptors;
1243 std::vector<cv::KeyPoint> m_queryFilteredKeyPoints;
1245 std::vector<cv::KeyPoint> m_queryKeyPoints;
1248 double m_ransacConsensusPercentage;
1252 std::vector<vpImagePoint> m_ransacInliers;
1254 std::vector<vpImagePoint> m_ransacOutliers;
1256 bool m_ransacParallel;
1258 unsigned int m_ransacParallelNbThreads;
1261 double m_ransacReprojectionError;
1264 double m_ransacThreshold;
1268 cv::Mat m_trainDescriptors;
1270 std::vector<cv::KeyPoint> m_trainKeyPoints;
1273 std::vector<cv::Point3f> m_trainPoints;
1276 std::vector<vpPoint> m_trainVpPoints;
1279 bool m_useAffineDetection;
1280 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
1281 bool m_useBruteForceCrossCheck;
1286 bool m_useConsensusPercentage;
1295 bool m_useMatchTrainToQuery;
1297 bool m_useRansacVVS;
1300 bool m_useSingleMatchFilter;
1304 void affineSkew(
double tilt,
double phi, cv::Mat &img, cv::Mat &mask, cv::Mat &Ai);
1306 double computePoseEstimationError(
const std::vector<std::pair<cv::KeyPoint, cv::Point3f> > &matchKeyPoints,
1309 void filterMatches();
1312 void initDetector(
const std::string &detectorNames);
1313 void initDetectors(
const std::vector<std::string> &detectorNames);
1315 void initExtractor(
const std::string &extractorName);
1316 void initExtractors(
const std::vector<std::string> &extractorNames);
1318 void initFeatureNames();
1320 inline size_t myKeypointHash(
const cv::KeyPoint &kp)
1322 size_t _Val = 2166136261U, scale = 16777619U;
1325 _Val = (scale * _Val) ^ u.u;
1327 _Val = (scale * _Val) ^ u.u;
1329 _Val = (scale * _Val) ^ u.u;
1335 _Val = (scale * _Val) ^ u.u;
1336 _Val = (scale * _Val) ^ ((
size_t)kp.octave);
1337 _Val = (scale * _Val) ^ ((
size_t)kp.class_id);
1341 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
1347 class PyramidAdaptedFeatureDetector :
public cv::FeatureDetector
1351 PyramidAdaptedFeatureDetector(
const cv::Ptr<cv::FeatureDetector> &detector,
int maxLevel = 2);
1354 virtual bool empty()
const;
1357 virtual void detect(cv::InputArray image, CV_OUT std::vector<cv::KeyPoint> &keypoints,
1358 cv::InputArray mask = cv::noArray());
1359 virtual void detectImpl(
const cv::Mat &image, std::vector<cv::KeyPoint> &keypoints,
1360 const cv::Mat &mask = cv::Mat())
const;
1362 cv::Ptr<cv::FeatureDetector> detector;
1372 class KeyPointsFilter
1375 KeyPointsFilter() {}
1380 static void runByImageBorder(std::vector<cv::KeyPoint> &keypoints, cv::Size imageSize,
int borderSize);
1384 static void runByKeypointSize(std::vector<cv::KeyPoint> &keypoints,
float minSize,
float maxSize = FLT_MAX);
1388 static void runByPixelsMask(std::vector<cv::KeyPoint> &keypoints,
const cv::Mat &mask);
1392 static void removeDuplicated(std::vector<cv::KeyPoint> &keypoints);
1398 static void retainBest(std::vector<cv::KeyPoint> &keypoints,
int npoints);