banner



How To Calculate Camera Pose From Cv2.solvepnp

Arrangement information (version)
  • OpenCV => master (ee257ff)
  • Operating System / Platform => Ubuntu 16.04 / 64 $.25
  • Compiler => gcc
Detailed description

In some cases, the pose estimated with solvePnP() give a rotation matrix that looks correct (det(R)=1, R.R^t=Identity) only volition project the z-centrality of the object frame in the opposite management.

More than information hither: ArUco marking z-axis mirrored.

Example image from the linked question:
14939088089358535

My example (less obvious, with aruco::CORNER_REFINE_NONE):

bad_rotation_108

bad_rotation2_108

The event can appear on one frame and disappear in the next frame.

Steps to reproduce

To become a complete reproducible case with Aruco detection see hither.

A minimal reproducible code with some optional debug code/brandish
              #include "opencv2/opencv.hpp" #include "opencv2/aruco.hpp"  int main(int argc, char *argv[]) {     cv::Mat frame = cv::imread(argc > 1 ? argv[1] : "img_fail.png");      // Intrinsic camera parameters.     cv::Mat camMatrix = cv::Mat::eye(3, 3, CV_64F);     cv::Mat distortionCoeffs = cv::Mat::zeros(8, 1, CV_64F);     camMatrix.at<double>(0, 0) = 2.3396381685789738e+03;     camMatrix.at<double>(0, two) = 960.;     camMatrix.at<double>(1, 1) = 2.3396381685789738e+03;     camMatrix.at<double>(i, 2) = 540.;     std::cout << "camMatrix:\n" << camMatrix << std::endl;  //    distortionCoeffs.at<double>(0, 0) = -1.0982746232841779e-01; //    distortionCoeffs.at<double>(1, 0) = ii.2689585715220828e-01; //    distortionCoeffs.at<double>(2, 0) = 0.; //    distortionCoeffs.at<double>(3, 0) = 0.; //    distortionCoeffs.at<double>(4, 0) = -2.2112148171171589e-01;     std::cout << "distortionCoeffs: " << distortionCoeffs.t() << std::endl;      std::vector<cv::Point3d> objectPoints;     objectPoints.push_back( cv::Point3d(-2.5, ii.5, 0.0) );     objectPoints.push_back( cv::Point3d(2.five, 2.5, 0.0) );     objectPoints.push_back( cv::Point3d(2.v, -two.5, 0.0) );     objectPoints.push_back( cv::Point3d(-two.5, -ii.5, 0.0) );      std::vector<cv::Point2d> imagePoints;     imagePoints.push_back( cv::Point2d(988, 512) );     imagePoints.push_back( cv::Point2d(945, 575) );     imagePoints.push_back( cv::Point2d(849, 544) );     imagePoints.push_back( cv::Point2d(893, 480) );      std::vector<int> solvePnP_methods;     solvePnP_methods.push_back(cv::SOLVEPNP_ITERATIVE);     solvePnP_methods.push_back(cv::SOLVEPNP_EPNP);     solvePnP_methods.push_back(cv::SOLVEPNP_P3P);     solvePnP_methods.push_back(cv::SOLVEPNP_AP3P);      std::map<int, std::string> solvePnP_method_names;     solvePnP_method_names[cv::SOLVEPNP_ITERATIVE] = "cv::SOLVEPNP_ITERATIVE";     solvePnP_method_names[cv::SOLVEPNP_EPNP] = "cv::SOLVEPNP_EPNP";     solvePnP_method_names[cv::SOLVEPNP_P3P] = "cv::SOLVEPNP_P3P";     solvePnP_method_names[cv::SOLVEPNP_AP3P] = "cv::SOLVEPNP_AP3P";      for (size_t idx = 0; idx < solvePnP_methods.size(); idx++)     {         cv::Mat debugFrame = frame.clone();         cv::putText(debugFrame, solvePnP_method_names[solvePnP_methods[idx]], cv::Point(20,xx), cv::FONT_HERSHEY_SIMPLEX , 0.v, cv::Scalar(0,0,255));          cv::line(debugFrame, imagePoints[0], imagePoints[1], cv::Scalar(0,0,255), ii);         cv::line(debugFrame, imagePoints[one], imagePoints[two], cv::Scalar(0,255,0), 2);         cv::line(debugFrame, imagePoints[ii], imagePoints[3], cv::Scalar(255,0,0), 2);          cv::Mat rvec, tvec;         cv::solvePnP(objectPoints, imagePoints, camMatrix, distortionCoeffs, rvec, tvec, false, solvePnP_methods[idx]);          cv::Mat rotation_matrix;         cv::Rodrigues(rvec, rotation_matrix);          std::cout << "\nrotation_matrix:\due north" << rotation_matrix << std::endl;         std::cout << "R.R^t=" << rotation_matrix*rotation_matrix.t() << std::endl;         std::cout << "det(R)=" << cv::determinant(rotation_matrix) << std::endl << std::endl;          cv::aruco::drawAxis(debugFrame, camMatrix, distortionCoeffs, rvec, tvec, five.0);          std::vector<cv::Point3d> axis_vec;         axis_vec.push_back( cv::Point3d(0, 0, 0) );         axis_vec.push_back( cv::Point3d(i, 0, 0) );         axis_vec.push_back( cv::Point3d(0, 1, 0) );         axis_vec.push_back( cv::Point3d(0, 0, 1) );          std::vector<cv::Point2d> axis_vec_proj;         cv::projectPoints(axis_vec, rvec, tvec, camMatrix, distortionCoeffs, axis_vec_proj);         for (size_t i = 0; i < axis_vec_proj.size(); i++)         {             std::cout << "axis_vec_proj[" << i << "]=" << axis_vec_proj[i].x << " ; " << axis_vec_proj[i].y << std::endl;         }          // Project cube.         float length = 20.0f;         std::vector<cv::Point3f> testObj3d;         testObj3d.push_back(cv::Point3f(0, 0, 0));         testObj3d.push_back(cv::Point3f(length, 0, 0));         testObj3d.push_back(cv::Point3f(0, length, 0));         testObj3d.push_back(cv::Point3f(length, length, 0));         testObj3d.push_back(cv::Point3f(0, 0, length));         testObj3d.push_back(cv::Point3f(length, 0, length));         testObj3d.push_back(cv::Point3f(0, length, length));         testObj3d.push_back(cv::Point3f(length, length, length));         std::vector<cv::Point2f> testObj2d;         cv::projectPoints(testObj3d, rvec, tvec, camMatrix, distortionCoeffs, testObj2d);          cv::line(debugFrame, testObj2d[0], testObj2d[one], cv::Scalar(0,0,0), one);         cv::line(debugFrame, testObj2d[0], testObj2d[ii], cv::Scalar(0,0,0), ane);         cv::line(debugFrame, testObj2d[ane], testObj2d[three], cv::Scalar(0,0,0), 1);         cv::line(debugFrame, testObj2d[2], testObj2d[three], cv::Scalar(0,0,0), 1);         cv::line(debugFrame, testObj2d[0], testObj2d[4], cv::Scalar(0,0,0), i);         cv::line(debugFrame, testObj2d[i], testObj2d[5], cv::Scalar(0,0,0), 1);         cv::line(debugFrame, testObj2d[ii], testObj2d[six], cv::Scalar(0,0,0), ane);         cv::line(debugFrame, testObj2d[3], testObj2d[7], cv::Scalar(0,0,0), 1);         cv::line(debugFrame, testObj2d[four], testObj2d[v], cv::Scalar(0,0,0), 1);         cv::line(debugFrame, testObj2d[4], testObj2d[6], cv::Scalar(0,0,0), 1);         cv::line(debugFrame, testObj2d[v], testObj2d[7], cv::Scalar(0,0,0), 1);         cv::line(debugFrame, testObj2d[6], testObj2d[7], cv::Scalar(0,0,0), 1);          cv::imshow("debug", debugFrame);         cv::waitKey(0);     } }                          

The prototype from which the corners have been extracted here.


Notation:

  • cv::aruco::drawAxis() should exist ok as with manual matrix multiplications lead to same issue
  • I don't remember the issue comes from cv::Rodrigues() as I get more or less (I suppose the correct rotation matrix should have a different sign somewhere, which is not the case?) the same rotation matrix in solvePnP role (simply before the call to cv::Rodrigues(), as solvePnP returns a rvec and practise internally the conversion rotation matrix -> rotation vector)
  • cv::SOLVEPNP_ITERATIVE, cv::SOLVEPNP_EPNP, cv::SOLVEPNP_P3P and cv::SOLVEPNP_AP3P all seem to fail, the estimated rotation is not the same for some methods
  • using cv::aruco::CORNER_REFINE_SUBPIX can solve the result in the original question (in my case, as the markers are much smaller the refinement is not always right and the issue remains)
  • while I can sympathise that some numerical imprecision in the corner locations tin pb to a pose badly estimated, I cannot understand why in this case the translation is ok and the rotation is virtually ok except a flipped z-axis (or why a left-handed rotation matrix seems to be returned)

Source: https://github.com/opencv/opencv/issues/8813

Posted by: lightyproffecanded69.blogspot.com

0 Response to "How To Calculate Camera Pose From Cv2.solvepnp"

Post a Comment

Iklan Atas Artikel

Iklan Tengah Artikel 1

Iklan Tengah Artikel 2

Iklan Bawah Artikel