preamble
When one camera field of view is not large, we want to perform a merge of the two fields of view, which allows the front view to increase so that a wider standard field of view can be seen. The splicing is done in two ways, the first way is Sticher class and the second idea is feature point matching.
This uses feature point matching for two graphs to field of view merge splicing.
Demo
100% point matching
Changed the picture:
So, opencv traditional way, there are some requirements for these feature points. (Note: these two images can't be stitched successfully using the sStitcher class to implement panoramic image stitching!!!)
The process of stitching the two images together
Step 1: Open the image
cv::Mat leftImageMat = cv::imread("D:/qtProject/openCVDemo/openCVDemo/modules/openCVManager/images/");
cv::Mat rightImageMat = cv::imread("D:/qtProject/openCVDemo/openCVDemo/modules/openCVManager/images/");
Step 2: Extract feature points
// Extract feature points
cv::Ptr<cv::xfeatures2d::SurfFeatureDetector> pSurfFeatureDetector = cv::xfeatures2d::SurfFeatureDetector::create();
std::vector<cv::KeyPoint> leftKeyPoints;
std::vector<cv::KeyPoint> rightKeyPoints;
cv::Mat leftMatch;
cv::Mat rightMatch;
std::vector<cv::DMatch> matches;
pSurfFeatureDetector->detectAndCompute(leftImageGrayMat, cv::Mat(), leftKeyPoints, leftMatch);
pSurfFeatureDetector->detectAndCompute(rightImageGrayMat, cv::Mat(), rightKeyPoints, rightMatch);
Step 3: Violent Matching
// Violent Matching
cv::Ptr<cv::FlannBasedMatcher> pFlannBasedMatcher = cv::FlannBasedMatcher::create();
pFlannBasedMatcher->match(leftMatch, rightMatch, matches);
Step 4: Extract the better points after violent matching
// Filter the matches, sorting the matches by distance from each other, from smallest to largest.
std::sort(matches.begin(), matches.end());
// Filter the matches, leaving the best matches according to the ordering
std::vector<cv::DMatch> goodMatchs;
// Threshold minimum number of points
int count = 40;
// Number of threshold points Less than 10% of total 10% of total used
int validPoints = (int)(matches.size() * 1.0f);
if(validPoints > count)
{
count = validPoints;
}
// all matches are less than the threshold, then take all points
if(matches.size() < count)
{
count = matches.size();
}
// Treating filtered points as better points
for(int index = 0; index < count; index++)
{
goodMatchs.push_back(matches.at(index));
}
// Matching results
cv::Mat matchedMat;
// Plot the result, note the order
cv::drawMatches(leftImageMat, leftKeyPoints, rightImageMat, rightKeyPoints, goodMatchs, matchedMat);
#if 1
cv::namedWindow("matchedMat", cv::WINDOW_NORMAL);
cv::resizeWindow("matchedMat", cv::Size(800, 300));
cv::imshow("matchedMat", matchedMat);
#endif
Step 5: Calculate the transformation matrix
// Prepare the points for matching
std::vector<cv::Point2f> leftImagePoints;
std::vector<cv::Point2f> rightImagePoints;
for(int index = 0; index < goodMatchs.size(); index++)
{
leftImagePoints.push_back(rightKeyPoints.at(goodMatchs.at(index).trainIdx).pt);
rightImagePoints.push_back(leftKeyPoints.at(goodMatchs.at(index).queryIdx).pt);
}
// Calculate the perspective transformation matrix using violently matched points
cv::Mat m = cv::findHomography(leftImagePoints, rightImagePoints, CV_RANSAC);
Step 6: Calculate the size of the transformed image of the second image
// Calculate the transform size of the second graph
cv::Point2f leftTopPoint2f;
cv::Point2f leftBottomPoint2f;
cv::Point2f rightTopPoint2f;
cv::Point2f rightBottomPoint2f;
cv::Mat H = m.clone();
cv::Mat src = leftImageMat.clone();
{
cv::Mat V1;
cv::Mat V2;
// Upper left corner (0, 0, 1)
double v2[3] = {0, 0, 1};
// Transformed coordinate values
double v1[3];
//Column Vector
V2= cv::Mat(3, 1, CV_64FC1, v2);
V1 = cv::Mat(3, 1, CV_64FC1, v1);
V1 = H * V2;
leftTopPoint2f.x = v1[0] / v1[2];
leftTopPoint2f.y = v1[1] / v1[2];
// Lower left corner (0, , 1)
v2[0] = 0;
v2[1] = src.rows;
v2[2] = 1;
V2 = cv::Mat(3, 1, CV_64FC1, v2);
V1 = cv::Mat(3, 1, CV_64FC1, v1);
V1 = H * V2;
leftBottomPoint2f.x = v1[0] / v1[2];
leftBottomPoint2f.y = v1[1] / v1[2];
// Upper right corner (, 0, 1)
v2[0] = src.cols;
v2[1] = 0;
v2[2] = 1;
V2 = cv::Mat(3, 1, CV_64FC1, v2);
V1 = cv::Mat(3, 1, CV_64FC1, v1);
V1 = H * V2;
rightTopPoint2f.x = v1[0] / v1[2];
rightTopPoint2f.y = v1[1] / v1[2];
// Bottom right (,,1)
v2[0] = src.cols;
v2[1] = src.rows;
v2[2] = 1;
V2 = cv::