OrbFeatureDetector problem when using opencv

I’m trying to follow jetson OpenCV tutorials Ep 5, and I keep getting this area when i try to make:

/home/ubuntu/5-cv-features/features.cpp: In function ‘int main()’:
/home/ubuntu/5-cv-features/features.cpp:19:5: error: ‘OrbFeatureDetector’ is not a member of ‘cv’
cv::OrbFeatureDetector detector(5000);
^
/home/ubuntu/5-cv-features/features.cpp:26:51: error: ‘detector’ was not declared in this scope
detector(frisbee, cv::Mat(), frisbee_keypoints);

Here is my code:

// Nov 2018
// Detect an object

#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/videoio.hpp>
#include <opencv2/opencv.hpp>
#include <vector>
#include <iostream>

int main()
{
    cv::VideoCapture input(0);

    cv::Mat img, img_gray;

    cv::OrbFeatureDetector detector(5000);
    std::vector<cv::KeyPoint> img_keypoints, frisbee_keypoints;

    input.read(img);
    cv::Mat frisbee;
    img(cv::Rect(720, 320, 150, 100)).copyTo(frisbee);

    detector(frisbee, cv::Mat(), frisbee_keypoints);
    drawKeypoints(frisbee, frisbee_keypoints, frisbee);

    for (;;)
    {
       if(!input.read(img))
          break;

       
       detector(img, cv::Mat(), img_keypoints);

       drawKeypoints(img, img_keypoints, img);

       cv::imshow("frisbee", frisbee);
       cv::imshow("img", img);
       char c = cv::waitKey();
 
       if (c == 27) // 27 is ESC code
          break;
    }
}

Hi GoBears,

Which version of opencv are you using?

You may check https://devtalk.nvidia.com/default/topic/1024551/jetson-tx2/episode-4-feature-detection-and-optical-flow-problem-/post/5211987/#5211987

For posterity, to add on to Honey’s help, the detect method doesn’t extract descriptions like the detector method did in the Nvidia tutorial (why can’t you guys at Nvidia update this??). I found I needed to combine it with the extractor method. Here’s the code that should get you working

#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/calib3d/calib3d.hpp>

int main()
{
    cv::VideoCapture input("/home/nvidia/5-cv-features/bird.mp4");
    cv::Mat img, img_gray; // to track img
    cv::Mat img_desc;

    std::vector<cv::KeyPoint> img_keypoints, h20fall_keypoints; //vec to store features

    cv::Ptr<cv::ORB> detector = cv::ORB::create(5000);
    cv::Ptr<cv::ORB> extractor = cv::ORB::create();

    // detect on 1st frame to track in loop
    input.read(img);
    
    cv::Mat h20fall;
    cv::Mat h20fall_desc;

    //grab section of image
    img(cv::Rect(715, 500, 150, 150)).copyTo(h20fall);

    // call detector object method (input, keypoints vec)
    detector->detect(h20fall, h20fall_keypoints);
    extractor->compute(h20fall, h20fall_keypoints, h20fall_desc);
    drawKeypoints(h20fall, h20fall_keypoints, h20fall);

    for (;;)
    {
	if (!input.read(img))
	    break;

	detector->detect(img, img_keypoints);
	extractor->compute(img, img_keypoints, img_desc);
	drawKeypoints(img, img_keypoints, img);

	// each column in feature desc mat contains descriptions
	// brute force matcher - compare all descs and choose best matches
	// result stored in vec of matches struct
	cv::BFMatcher matcher;
	std::vector<cv::DMatch> matches;
	matcher.match(h20fall_desc, img_desc, matches);

	// split matches into 2 vectors
	std::vector<cv::Point2f> h20fall_points, img_points;
	for (int i = 0; i < matches.size(); i++)
	{
	    h20fall_points.push_back(h20fall_keypoints[matches[i].queryIdx].pt);
	    img_points.push_back(img_keypoints[matches[i].trainIdx].pt);
	}

	// find homographic transform between images (3x3) and filter out outliers w ransac
	cv::Matx33f H = cv::findHomography(h20fall_points, img_points, CV_RANSAC);

	// create 2 vectors to hold corner pts of bounding boxes and fill 1 w/ known image
	std::vector<cv::Point> h20fall_border, img_border;
	h20fall_border.push_back(cv::Point(0, 0));
	h20fall_border.push_back(cv::Point(0, h20fall.rows));
	h20fall_border.push_back(cv::Point(h20fall.cols, h20fall.rows));
	h20fall_border.push_back(cv::Point(h20fall.cols, 0));
	
	// mult by homography matrix
	for (size_t i = 0; i < h20fall_border.size(); i++)
	{
	    cv::Vec3f p = H * cv::Vec3f(h20fall_border[i].x, h20fall_border[i].y, 1);
	    
	    // push onto img
	    img_border.push_back(cv::Point(p[0] / p[2], p[1] / p[2]));
	}
	
	// draw enclosed shape
	cv::polylines(img, img_border, true, CV_RGB(0, 255, 0));
	
	//draw match - return matches vector of combined images
	cv::Mat img_matches;
	cv::drawMatches(h20fall, h20fall_keypoints, img, img_keypoints, matches, img_matches);
	
	cv:imshow("img_matches", img_matches);
	//cv::imshow("h20fall", h20fall);
	//cv::imshow("img", img);
	char c = cv::waitKey(); // play frame on any key

	if (c == 27) // escape acii
	    break;
    }
}

Note that if your regions don’t have enough points matched to begin with, you may experience odd error complaints. Should probably put conditions in there, but anyways…