| 1 | /*M/////////////////////////////////////////////////////////////////////////////////////// |
| 2 | // |
| 3 | // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. |
| 4 | // |
| 5 | // By downloading, copying, installing or using the software you agree to this license. |
| 6 | // If you do not agree to this license, do not download, install, |
| 7 | // copy or use the software. |
| 8 | // |
| 9 | // |
| 10 | // License Agreement |
| 11 | // For Open Source Computer Vision Library |
| 12 | // |
| 13 | // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. |
| 14 | // Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. |
| 15 | // Third party copyrights are property of their respective owners. |
| 16 | // |
| 17 | // Redistribution and use in source and binary forms, with or without modification, |
| 18 | // are permitted provided that the following conditions are met: |
| 19 | // |
| 20 | // * Redistribution's of source code must retain the above copyright notice, |
| 21 | // this list of conditions and the following disclaimer. |
| 22 | // |
| 23 | // * Redistribution's in binary form must reproduce the above copyright notice, |
| 24 | // this list of conditions and the following disclaimer in the documentation |
| 25 | // and/or other materials provided with the distribution. |
| 26 | // |
| 27 | // * The name of the copyright holders may not be used to endorse or promote products |
| 28 | // derived from this software without specific prior written permission. |
| 29 | // |
| 30 | // This software is provided by the copyright holders and contributors "as is" and |
| 31 | // any express or implied warranties, including, but not limited to, the implied |
| 32 | // warranties of merchantability and fitness for a particular purpose are disclaimed. |
| 33 | // In no event shall the Intel Corporation or contributors be liable for any direct, |
| 34 | // indirect, incidental, special, exemplary, or consequential damages |
| 35 | // (including, but not limited to, procurement of substitute goods or services; |
| 36 | // loss of use, data, or profits; or business interruption) however caused |
| 37 | // and on any theory of liability, whether in contract, strict liability, |
| 38 | // or tort (including negligence or otherwise) arising in any way out of |
| 39 | // the use of this software, even if advised of the possibility of such damage. |
| 40 | // |
| 41 | //M*/ |
| 42 | |
| 43 | #include "precomp.hpp" |
| 44 | #include "fisheye.hpp" |
| 45 | #include <limits> |
| 46 | |
| 47 | namespace cv { namespace |
| 48 | { |
| 49 | struct JacobianRow |
| 50 | { |
| 51 | Vec2d df, dc; |
| 52 | Vec4d dk; |
| 53 | Vec3d dom, dT; |
| 54 | double dalpha; |
| 55 | }; |
| 56 | |
| 57 | void subMatrix(const Mat& src, Mat& dst, const std::vector<uchar>& cols, const std::vector<uchar>& rows); |
| 58 | }} |
| 59 | |
| 60 | ////////////////////////////////////////////////////////////////////////////////////////////////////////////// |
| 61 | /// cv::fisheye::projectPoints |
| 62 | |
| 63 | void cv::fisheye::projectPoints(InputArray objectPoints, OutputArray imagePoints, const Affine3d& affine, |
| 64 | InputArray K, InputArray D, double alpha, OutputArray jacobian) |
| 65 | { |
| 66 | CV_INSTRUMENT_REGION(); |
| 67 | |
| 68 | projectPoints(objectPoints, imagePoints, rvec: affine.rvec(), tvec: affine.translation(), K, D, alpha, jacobian); |
| 69 | } |
| 70 | |
| 71 | void cv::fisheye::projectPoints(InputArray objectPoints, OutputArray imagePoints, InputArray _rvec, |
| 72 | InputArray _tvec, InputArray _K, InputArray _D, double alpha, OutputArray jacobian) |
| 73 | { |
| 74 | CV_INSTRUMENT_REGION(); |
| 75 | |
| 76 | // will support only 3-channel data now for points |
| 77 | CV_Assert(objectPoints.type() == CV_32FC3 || objectPoints.type() == CV_64FC3); |
| 78 | imagePoints.create(sz: objectPoints.size(), CV_MAKETYPE(objectPoints.depth(), 2)); |
| 79 | size_t n = objectPoints.total(); |
| 80 | |
| 81 | CV_Assert(_rvec.total() * _rvec.channels() == 3 && (_rvec.depth() == CV_32F || _rvec.depth() == CV_64F)); |
| 82 | CV_Assert(_tvec.total() * _tvec.channels() == 3 && (_tvec.depth() == CV_32F || _tvec.depth() == CV_64F)); |
| 83 | CV_Assert(_tvec.getMat().isContinuous() && _rvec.getMat().isContinuous()); |
| 84 | |
| 85 | Vec3d om = _rvec.depth() == CV_32F ? (Vec3d)*_rvec.getMat().ptr<Vec3f>() : *_rvec.getMat().ptr<Vec3d>(); |
| 86 | Vec3d T = _tvec.depth() == CV_32F ? (Vec3d)*_tvec.getMat().ptr<Vec3f>() : *_tvec.getMat().ptr<Vec3d>(); |
| 87 | |
| 88 | CV_Assert(_K.size() == Size(3,3) && (_K.type() == CV_32F || _K.type() == CV_64F) && _D.type() == _K.type() && _D.total() == 4); |
| 89 | |
| 90 | cv::Vec2d f, c; |
| 91 | if (_K.depth() == CV_32F) |
| 92 | { |
| 93 | |
| 94 | Matx33f K = _K.getMat(); |
| 95 | f = Vec2f(K(0, 0), K(1, 1)); |
| 96 | c = Vec2f(K(0, 2), K(1, 2)); |
| 97 | } |
| 98 | else |
| 99 | { |
| 100 | Matx33d K = _K.getMat(); |
| 101 | f = Vec2d(K(0, 0), K(1, 1)); |
| 102 | c = Vec2d(K(0, 2), K(1, 2)); |
| 103 | } |
| 104 | |
| 105 | Vec4d k = _D.depth() == CV_32F ? (Vec4d)*_D.getMat().ptr<Vec4f>(): *_D.getMat().ptr<Vec4d>(); |
| 106 | |
| 107 | const bool isJacobianNeeded = jacobian.needed(); |
| 108 | JacobianRow *Jn = 0; |
| 109 | if (isJacobianNeeded) |
| 110 | { |
| 111 | int nvars = 2 + 2 + 1 + 4 + 3 + 3; // f, c, alpha, k, om, T, |
| 112 | jacobian.create(rows: 2*(int)n, cols: nvars, CV_64F); |
| 113 | Jn = jacobian.getMat().ptr<JacobianRow>(y: 0); |
| 114 | } |
| 115 | |
| 116 | Matx33d R; |
| 117 | Matx<double, 3, 9> dRdom; |
| 118 | Rodrigues(src: om, dst: R, jacobian: dRdom); |
| 119 | Affine3d aff(om, T); |
| 120 | |
| 121 | const Vec3f* Xf = objectPoints.getMat().ptr<Vec3f>(); |
| 122 | const Vec3d* Xd = objectPoints.getMat().ptr<Vec3d>(); |
| 123 | Vec2f *xpf = imagePoints.getMat().ptr<Vec2f>(); |
| 124 | Vec2d *xpd = imagePoints.getMat().ptr<Vec2d>(); |
| 125 | |
| 126 | for(size_t i = 0; i < n; ++i) |
| 127 | { |
| 128 | Vec3d Xi = objectPoints.depth() == CV_32F ? (Vec3d)Xf[i] : Xd[i]; |
| 129 | Vec3d Y = aff*Xi; |
| 130 | if (fabs(x: Y[2]) < DBL_MIN) |
| 131 | Y[2] = 1; |
| 132 | Vec2d x(Y[0]/Y[2], Y[1]/Y[2]); |
| 133 | |
| 134 | double r2 = x.dot(M: x); |
| 135 | double r = std::sqrt(x: r2); |
| 136 | |
| 137 | // Angle of the incoming ray: |
| 138 | double theta = atan(x: r); |
| 139 | |
| 140 | double theta2 = theta*theta, theta3 = theta2*theta, theta4 = theta2*theta2, theta5 = theta4*theta, |
| 141 | theta6 = theta3*theta3, theta7 = theta6*theta, theta8 = theta4*theta4, theta9 = theta8*theta; |
| 142 | |
| 143 | double theta_d = theta + k[0]*theta3 + k[1]*theta5 + k[2]*theta7 + k[3]*theta9; |
| 144 | |
| 145 | double inv_r = r > 1e-8 ? 1.0/r : 1; |
| 146 | double cdist = r > 1e-8 ? theta_d * inv_r : 1; |
| 147 | |
| 148 | Vec2d xd1 = x * cdist; |
| 149 | Vec2d xd3(xd1[0] + alpha*xd1[1], xd1[1]); |
| 150 | Vec2d final_point(xd3[0] * f[0] + c[0], xd3[1] * f[1] + c[1]); |
| 151 | |
| 152 | if (objectPoints.depth() == CV_32F) |
| 153 | xpf[i] = final_point; |
| 154 | else |
| 155 | xpd[i] = final_point; |
| 156 | |
| 157 | if (isJacobianNeeded) |
| 158 | { |
| 159 | //Vec3d Xi = pdepth == CV_32F ? (Vec3d)Xf[i] : Xd[i]; |
| 160 | //Vec3d Y = aff*Xi; |
| 161 | double dYdR[] = { Xi[0], Xi[1], Xi[2], 0, 0, 0, 0, 0, 0, |
| 162 | 0, 0, 0, Xi[0], Xi[1], Xi[2], 0, 0, 0, |
| 163 | 0, 0, 0, 0, 0, 0, Xi[0], Xi[1], Xi[2] }; |
| 164 | |
| 165 | Matx33d dYdom_data = Matx<double, 3, 9>(dYdR) * dRdom.t(); |
| 166 | const Vec3d *dYdom = (Vec3d*)dYdom_data.val; |
| 167 | |
| 168 | Matx33d dYdT_data = Matx33d::eye(); |
| 169 | const Vec3d *dYdT = (Vec3d*)dYdT_data.val; |
| 170 | |
| 171 | //Vec2d x(Y[0]/Y[2], Y[1]/Y[2]); |
| 172 | Vec3d dxdom[2]; |
| 173 | dxdom[0] = (1.0/Y[2]) * dYdom[0] - x[0]/Y[2] * dYdom[2]; |
| 174 | dxdom[1] = (1.0/Y[2]) * dYdom[1] - x[1]/Y[2] * dYdom[2]; |
| 175 | |
| 176 | Vec3d dxdT[2]; |
| 177 | dxdT[0] = (1.0/Y[2]) * dYdT[0] - x[0]/Y[2] * dYdT[2]; |
| 178 | dxdT[1] = (1.0/Y[2]) * dYdT[1] - x[1]/Y[2] * dYdT[2]; |
| 179 | |
| 180 | //double r2 = x.dot(x); |
| 181 | Vec3d dr2dom = 2 * x[0] * dxdom[0] + 2 * x[1] * dxdom[1]; |
| 182 | Vec3d dr2dT = 2 * x[0] * dxdT[0] + 2 * x[1] * dxdT[1]; |
| 183 | |
| 184 | //double r = std::sqrt(r2); |
| 185 | double drdr2 = r > 1e-8 ? 1.0/(2*r) : 1; |
| 186 | Vec3d drdom = drdr2 * dr2dom; |
| 187 | Vec3d drdT = drdr2 * dr2dT; |
| 188 | |
| 189 | // Angle of the incoming ray: |
| 190 | //double theta = atan(r); |
| 191 | double dthetadr = 1.0/(1+r2); |
| 192 | Vec3d dthetadom = dthetadr * drdom; |
| 193 | Vec3d dthetadT = dthetadr * drdT; |
| 194 | |
| 195 | //double theta_d = theta + k[0]*theta3 + k[1]*theta5 + k[2]*theta7 + k[3]*theta9; |
| 196 | double dtheta_ddtheta = 1 + 3*k[0]*theta2 + 5*k[1]*theta4 + 7*k[2]*theta6 + 9*k[3]*theta8; |
| 197 | Vec3d dtheta_ddom = dtheta_ddtheta * dthetadom; |
| 198 | Vec3d dtheta_ddT = dtheta_ddtheta * dthetadT; |
| 199 | Vec4d dtheta_ddk = Vec4d(theta3, theta5, theta7, theta9); |
| 200 | |
| 201 | //double inv_r = r > 1e-8 ? 1.0/r : 1; |
| 202 | //double cdist = r > 1e-8 ? theta_d / r : 1; |
| 203 | Vec3d dcdistdom = inv_r * (dtheta_ddom - cdist*drdom); |
| 204 | Vec3d dcdistdT = inv_r * (dtheta_ddT - cdist*drdT); |
| 205 | Vec4d dcdistdk = inv_r * dtheta_ddk; |
| 206 | |
| 207 | //Vec2d xd1 = x * cdist; |
| 208 | Vec4d dxd1dk[2]; |
| 209 | Vec3d dxd1dom[2], dxd1dT[2]; |
| 210 | dxd1dom[0] = x[0] * dcdistdom + cdist * dxdom[0]; |
| 211 | dxd1dom[1] = x[1] * dcdistdom + cdist * dxdom[1]; |
| 212 | dxd1dT[0] = x[0] * dcdistdT + cdist * dxdT[0]; |
| 213 | dxd1dT[1] = x[1] * dcdistdT + cdist * dxdT[1]; |
| 214 | dxd1dk[0] = x[0] * dcdistdk; |
| 215 | dxd1dk[1] = x[1] * dcdistdk; |
| 216 | |
| 217 | //Vec2d xd3(xd1[0] + alpha*xd1[1], xd1[1]); |
| 218 | Vec4d dxd3dk[2]; |
| 219 | Vec3d dxd3dom[2], dxd3dT[2]; |
| 220 | dxd3dom[0] = dxd1dom[0] + alpha * dxd1dom[1]; |
| 221 | dxd3dom[1] = dxd1dom[1]; |
| 222 | dxd3dT[0] = dxd1dT[0] + alpha * dxd1dT[1]; |
| 223 | dxd3dT[1] = dxd1dT[1]; |
| 224 | dxd3dk[0] = dxd1dk[0] + alpha * dxd1dk[1]; |
| 225 | dxd3dk[1] = dxd1dk[1]; |
| 226 | |
| 227 | Vec2d dxd3dalpha(xd1[1], 0); |
| 228 | |
| 229 | //final jacobian |
| 230 | Jn[0].dom = f[0] * dxd3dom[0]; |
| 231 | Jn[1].dom = f[1] * dxd3dom[1]; |
| 232 | |
| 233 | Jn[0].dT = f[0] * dxd3dT[0]; |
| 234 | Jn[1].dT = f[1] * dxd3dT[1]; |
| 235 | |
| 236 | Jn[0].dk = f[0] * dxd3dk[0]; |
| 237 | Jn[1].dk = f[1] * dxd3dk[1]; |
| 238 | |
| 239 | Jn[0].dalpha = f[0] * dxd3dalpha[0]; |
| 240 | Jn[1].dalpha = 0; //f[1] * dxd3dalpha[1]; |
| 241 | |
| 242 | Jn[0].df = Vec2d(xd3[0], 0); |
| 243 | Jn[1].df = Vec2d(0, xd3[1]); |
| 244 | |
| 245 | Jn[0].dc = Vec2d(1, 0); |
| 246 | Jn[1].dc = Vec2d(0, 1); |
| 247 | |
| 248 | //step to jacobian rows for next point |
| 249 | Jn += 2; |
| 250 | } |
| 251 | } |
| 252 | } |
| 253 | |
| 254 | ////////////////////////////////////////////////////////////////////////////////////////////////////////////// |
| 255 | /// cv::fisheye::distortPoints |
| 256 | |
| 257 | void cv::fisheye::distortPoints(InputArray undistorted, OutputArray distorted, InputArray K, InputArray D, double alpha) |
| 258 | { |
| 259 | CV_INSTRUMENT_REGION(); |
| 260 | |
| 261 | // will support only 2-channel data now for points |
| 262 | CV_Assert(undistorted.type() == CV_32FC2 || undistorted.type() == CV_64FC2); |
| 263 | distorted.create(sz: undistorted.size(), type: undistorted.type()); |
| 264 | size_t n = undistorted.total(); |
| 265 | |
| 266 | CV_Assert(K.size() == Size(3,3) && (K.type() == CV_32F || K.type() == CV_64F) && D.total() == 4); |
| 267 | |
| 268 | cv::Vec2d f, c; |
| 269 | if (K.depth() == CV_32F) |
| 270 | { |
| 271 | Matx33f camMat = K.getMat(); |
| 272 | f = Vec2f(camMat(0, 0), camMat(1, 1)); |
| 273 | c = Vec2f(camMat(0, 2), camMat(1, 2)); |
| 274 | } |
| 275 | else |
| 276 | { |
| 277 | Matx33d camMat = K.getMat(); |
| 278 | f = Vec2d(camMat(0, 0), camMat(1, 1)); |
| 279 | c = Vec2d(camMat(0 ,2), camMat(1, 2)); |
| 280 | } |
| 281 | |
| 282 | Vec4d k = D.depth() == CV_32F ? (Vec4d)*D.getMat().ptr<Vec4f>(): *D.getMat().ptr<Vec4d>(); |
| 283 | |
| 284 | const Vec2f* Xf = undistorted.getMat().ptr<Vec2f>(); |
| 285 | const Vec2d* Xd = undistorted.getMat().ptr<Vec2d>(); |
| 286 | Vec2f *xpf = distorted.getMat().ptr<Vec2f>(); |
| 287 | Vec2d *xpd = distorted.getMat().ptr<Vec2d>(); |
| 288 | |
| 289 | for(size_t i = 0; i < n; ++i) |
| 290 | { |
| 291 | Vec2d x = undistorted.depth() == CV_32F ? (Vec2d)Xf[i] : Xd[i]; |
| 292 | |
| 293 | double r2 = x.dot(M: x); |
| 294 | double r = std::sqrt(x: r2); |
| 295 | |
| 296 | // Angle of the incoming ray: |
| 297 | double theta = atan(x: r); |
| 298 | |
| 299 | double theta2 = theta*theta, theta3 = theta2*theta, theta4 = theta2*theta2, theta5 = theta4*theta, |
| 300 | theta6 = theta3*theta3, theta7 = theta6*theta, theta8 = theta4*theta4, theta9 = theta8*theta; |
| 301 | |
| 302 | double theta_d = theta + k[0]*theta3 + k[1]*theta5 + k[2]*theta7 + k[3]*theta9; |
| 303 | |
| 304 | double inv_r = r > 1e-8 ? 1.0/r : 1; |
| 305 | double cdist = r > 1e-8 ? theta_d * inv_r : 1; |
| 306 | |
| 307 | Vec2d xd1 = x * cdist; |
| 308 | Vec2d xd3(xd1[0] + alpha*xd1[1], xd1[1]); |
| 309 | Vec2d final_point(xd3[0] * f[0] + c[0], xd3[1] * f[1] + c[1]); |
| 310 | |
| 311 | if (undistorted.depth() == CV_32F) |
| 312 | xpf[i] = final_point; |
| 313 | else |
| 314 | xpd[i] = final_point; |
| 315 | } |
| 316 | } |
| 317 | |
| 318 | void cv::fisheye::distortPoints(InputArray _undistorted, OutputArray distorted, InputArray Kundistorted, InputArray K, InputArray D, double alpha) |
| 319 | { |
| 320 | CV_INSTRUMENT_REGION(); |
| 321 | |
| 322 | CV_Assert(_undistorted.type() == CV_32FC2 || _undistorted.type() == CV_64FC2); |
| 323 | CV_Assert(Kundistorted.size() == Size(3,3) && (Kundistorted.type() == CV_32F || Kundistorted.type() == CV_64F)); |
| 324 | |
| 325 | cv::Mat undistorted = _undistorted.getMat(); |
| 326 | cv::Mat normalized(undistorted.size(), CV_64FC2); |
| 327 | |
| 328 | Mat Knew = Kundistorted.getMat(); |
| 329 | |
| 330 | double cx, cy, fx, fy; |
| 331 | if (Knew.depth() == CV_32F) |
| 332 | { |
| 333 | fx = (double)Knew.at<float>(i0: 0, i1: 0); |
| 334 | fy = (double)Knew.at<float>(i0: 1, i1: 1); |
| 335 | cx = (double)Knew.at<float>(i0: 0, i1: 2); |
| 336 | cy = (double)Knew.at<float>(i0: 1, i1: 2); |
| 337 | } |
| 338 | else |
| 339 | { |
| 340 | fx = Knew.at<double>(i0: 0, i1: 0); |
| 341 | fy = Knew.at<double>(i0: 1, i1: 1); |
| 342 | cx = Knew.at<double>(i0: 0, i1: 2); |
| 343 | cy = Knew.at<double>(i0: 1, i1: 2); |
| 344 | } |
| 345 | |
| 346 | size_t n = undistorted.total(); |
| 347 | const Vec2f* Xf = undistorted.ptr<Vec2f>(); |
| 348 | const Vec2d* Xd = undistorted.ptr<Vec2d>(); |
| 349 | Vec2d* normXd = normalized.ptr<Vec2d>(); |
| 350 | for (size_t i = 0; i < n; i++) |
| 351 | { |
| 352 | Vec2d p = undistorted.depth() == CV_32F ? (Vec2d)Xf[i] : Xd[i]; |
| 353 | normXd[i][0] = (p[0] - cx) / fx; |
| 354 | normXd[i][1] = (p[1] - cy) / fy; |
| 355 | } |
| 356 | |
| 357 | cv::fisheye::distortPoints(undistorted: normalized, distorted, K, D, alpha); |
| 358 | } |
| 359 | |
| 360 | ////////////////////////////////////////////////////////////////////////////////////////////////////////////// |
| 361 | /// cv::fisheye::undistortPoints |
| 362 | |
| 363 | void cv::fisheye::undistortPoints( InputArray distorted, OutputArray undistorted, InputArray K, InputArray D, |
| 364 | InputArray R, InputArray P, TermCriteria criteria) |
| 365 | { |
| 366 | CV_INSTRUMENT_REGION(); |
| 367 | |
| 368 | // will support only 2-channel data now for points |
| 369 | CV_Assert(distorted.type() == CV_32FC2 || distorted.type() == CV_64FC2); |
| 370 | undistorted.create(sz: distorted.size(), type: distorted.type()); |
| 371 | |
| 372 | CV_Assert(P.empty() || P.size() == Size(3, 3) || P.size() == Size(4, 3)); |
| 373 | CV_Assert(R.empty() || R.size() == Size(3, 3) || R.total() * R.channels() == 3); |
| 374 | CV_Assert(D.total() == 4 && K.size() == Size(3, 3) && (K.depth() == CV_32F || K.depth() == CV_64F)); |
| 375 | |
| 376 | CV_Assert(criteria.isValid()); |
| 377 | |
| 378 | cv::Vec2d f, c; |
| 379 | if (K.depth() == CV_32F) |
| 380 | { |
| 381 | Matx33f camMat = K.getMat(); |
| 382 | f = Vec2f(camMat(0, 0), camMat(1, 1)); |
| 383 | c = Vec2f(camMat(0, 2), camMat(1, 2)); |
| 384 | } |
| 385 | else |
| 386 | { |
| 387 | Matx33d camMat = K.getMat(); |
| 388 | f = Vec2d(camMat(0, 0), camMat(1, 1)); |
| 389 | c = Vec2d(camMat(0, 2), camMat(1, 2)); |
| 390 | } |
| 391 | |
| 392 | Vec4d k = D.depth() == CV_32F ? (Vec4d)*D.getMat().ptr<Vec4f>(): *D.getMat().ptr<Vec4d>(); |
| 393 | |
| 394 | cv::Matx33d RR = cv::Matx33d::eye(); |
| 395 | if (!R.empty() && R.total() * R.channels() == 3) |
| 396 | { |
| 397 | cv::Vec3d rvec; |
| 398 | R.getMat().convertTo(m: rvec, CV_64F); |
| 399 | RR = cv::Affine3d(rvec).rotation(); |
| 400 | } |
| 401 | else if (!R.empty() && R.size() == Size(3, 3)) |
| 402 | R.getMat().convertTo(m: RR, CV_64F); |
| 403 | |
| 404 | if(!P.empty()) |
| 405 | { |
| 406 | cv::Matx33d PP; |
| 407 | P.getMat().colRange(startcol: 0, endcol: 3).convertTo(m: PP, CV_64F); |
| 408 | RR = PP * RR; |
| 409 | } |
| 410 | |
| 411 | // start undistorting |
| 412 | const cv::Vec2f* srcf = distorted.getMat().ptr<cv::Vec2f>(); |
| 413 | const cv::Vec2d* srcd = distorted.getMat().ptr<cv::Vec2d>(); |
| 414 | cv::Vec2f* dstf = undistorted.getMat().ptr<cv::Vec2f>(); |
| 415 | cv::Vec2d* dstd = undistorted.getMat().ptr<cv::Vec2d>(); |
| 416 | |
| 417 | size_t n = distorted.total(); |
| 418 | int sdepth = distorted.depth(); |
| 419 | |
| 420 | const bool isEps = (criteria.type & TermCriteria::EPS) != 0; |
| 421 | |
| 422 | /* Define max count for solver iterations */ |
| 423 | int maxCount = std::numeric_limits<int>::max(); |
| 424 | if (criteria.type & TermCriteria::MAX_ITER) { |
| 425 | maxCount = criteria.maxCount; |
| 426 | } |
| 427 | |
| 428 | |
| 429 | for(size_t i = 0; i < n; i++ ) |
| 430 | { |
| 431 | Vec2d pi = sdepth == CV_32F ? (Vec2d)srcf[i] : srcd[i]; // image point |
| 432 | Vec2d pw((pi[0] - c[0])/f[0], (pi[1] - c[1])/f[1]); // world point |
| 433 | |
| 434 | double theta_d = sqrt(x: pw[0]*pw[0] + pw[1]*pw[1]); |
| 435 | |
| 436 | // the current camera model is only valid up to 180 FOV |
| 437 | // for larger FOV the loop below does not converge |
| 438 | // clip values so we still get plausible results for super fisheye images > 180 grad |
| 439 | theta_d = min(a: max(a: -CV_PI/2., b: theta_d), CV_PI/2.); |
| 440 | |
| 441 | bool converged = false; |
| 442 | double theta = theta_d; |
| 443 | |
| 444 | double scale = 0.0; |
| 445 | |
| 446 | if (!isEps || fabs(x: theta_d) > criteria.epsilon) |
| 447 | { |
| 448 | // compensate distortion iteratively using Newton method |
| 449 | |
| 450 | for (int j = 0; j < maxCount; j++) |
| 451 | { |
| 452 | double theta2 = theta*theta, theta4 = theta2*theta2, theta6 = theta4*theta2, theta8 = theta6*theta2; |
| 453 | double k0_theta2 = k[0] * theta2, k1_theta4 = k[1] * theta4, k2_theta6 = k[2] * theta6, k3_theta8 = k[3] * theta8; |
| 454 | /* new_theta = theta - theta_fix, theta_fix = f0(theta) / f0'(theta) */ |
| 455 | double theta_fix = (theta * (1 + k0_theta2 + k1_theta4 + k2_theta6 + k3_theta8) - theta_d) / |
| 456 | (1 + 3*k0_theta2 + 5*k1_theta4 + 7*k2_theta6 + 9*k3_theta8); |
| 457 | theta = theta - theta_fix; |
| 458 | |
| 459 | if (isEps && (fabs(x: theta_fix) < criteria.epsilon)) |
| 460 | { |
| 461 | converged = true; |
| 462 | break; |
| 463 | } |
| 464 | } |
| 465 | |
| 466 | scale = std::tan(x: theta) / theta_d; |
| 467 | } |
| 468 | else |
| 469 | { |
| 470 | converged = true; |
| 471 | } |
| 472 | |
| 473 | // theta is monotonously increasing or decreasing depending on the sign of theta |
| 474 | // if theta has flipped, it might converge due to symmetry but on the opposite of the camera center |
| 475 | // so we can check whether theta has changed the sign during the optimization |
| 476 | bool theta_flipped = ((theta_d < 0 && theta > 0) || (theta_d > 0 && theta < 0)); |
| 477 | |
| 478 | if ((converged || !isEps) && !theta_flipped) |
| 479 | { |
| 480 | Vec2d pu = pw * scale; //undistorted point |
| 481 | Vec2d fi; |
| 482 | |
| 483 | if (!R.empty() || !P.empty()) |
| 484 | { |
| 485 | // reproject |
| 486 | Vec3d pr = RR * Vec3d(pu[0], pu[1], 1.0); // rotated point optionally multiplied by new camera matrix |
| 487 | fi = Vec2d(pr[0]/pr[2], pr[1]/pr[2]); // final |
| 488 | } |
| 489 | else |
| 490 | { |
| 491 | fi = pu; |
| 492 | } |
| 493 | |
| 494 | if( sdepth == CV_32F ) |
| 495 | dstf[i] = fi; |
| 496 | else |
| 497 | dstd[i] = fi; |
| 498 | } |
| 499 | else |
| 500 | { |
| 501 | // Vec2d fi(std::numeric_limits<double>::quiet_NaN(), std::numeric_limits<double>::quiet_NaN()); |
| 502 | Vec2d fi(-1000000.0, -1000000.0); |
| 503 | |
| 504 | if( sdepth == CV_32F ) |
| 505 | dstf[i] = fi; |
| 506 | else |
| 507 | dstd[i] = fi; |
| 508 | } |
| 509 | } |
| 510 | } |
| 511 | |
| 512 | ////////////////////////////////////////////////////////////////////////////////////////////////////////////// |
| 513 | /// cv::fisheye::initUndistortRectifyMap |
| 514 | |
| 515 | void cv::fisheye::initUndistortRectifyMap( InputArray K, InputArray D, InputArray R, InputArray P, |
| 516 | const cv::Size& size, int m1type, OutputArray map1, OutputArray map2 ) |
| 517 | { |
| 518 | CV_INSTRUMENT_REGION(); |
| 519 | |
| 520 | CV_Assert( m1type == CV_16SC2 || m1type == CV_32F || m1type <=0 ); |
| 521 | map1.create( sz: size, type: m1type <= 0 ? CV_16SC2 : m1type ); |
| 522 | map2.create( sz: size, type: map1.type() == CV_16SC2 ? CV_16UC1 : CV_32F ); |
| 523 | |
| 524 | CV_Assert((K.depth() == CV_32F || K.depth() == CV_64F) && (D.depth() == CV_32F || D.depth() == CV_64F)); |
| 525 | CV_Assert((P.empty() || P.depth() == CV_32F || P.depth() == CV_64F) && (R.empty() || R.depth() == CV_32F || R.depth() == CV_64F)); |
| 526 | CV_Assert(K.size() == Size(3, 3) && (D.empty() || D.total() == 4)); |
| 527 | CV_Assert(R.empty() || R.size() == Size(3, 3) || R.total() * R.channels() == 3); |
| 528 | CV_Assert(P.empty() || P.size() == Size(3, 3) || P.size() == Size(4, 3)); |
| 529 | |
| 530 | cv::Vec2d f, c; |
| 531 | if (K.depth() == CV_32F) |
| 532 | { |
| 533 | Matx33f camMat = K.getMat(); |
| 534 | f = Vec2f(camMat(0, 0), camMat(1, 1)); |
| 535 | c = Vec2f(camMat(0, 2), camMat(1, 2)); |
| 536 | } |
| 537 | else |
| 538 | { |
| 539 | Matx33d camMat = K.getMat(); |
| 540 | f = Vec2d(camMat(0, 0), camMat(1, 1)); |
| 541 | c = Vec2d(camMat(0, 2), camMat(1, 2)); |
| 542 | } |
| 543 | |
| 544 | Vec4d k = Vec4d::all(alpha: 0); |
| 545 | if (!D.empty()) |
| 546 | k = D.depth() == CV_32F ? (Vec4d)*D.getMat().ptr<Vec4f>(): *D.getMat().ptr<Vec4d>(); |
| 547 | |
| 548 | cv::Matx33d RR = cv::Matx33d::eye(); |
| 549 | if (!R.empty() && R.total() * R.channels() == 3) |
| 550 | { |
| 551 | cv::Vec3d rvec; |
| 552 | R.getMat().convertTo(m: rvec, CV_64F); |
| 553 | RR = Affine3d(rvec).rotation(); |
| 554 | } |
| 555 | else if (!R.empty() && R.size() == Size(3, 3)) |
| 556 | R.getMat().convertTo(m: RR, CV_64F); |
| 557 | |
| 558 | cv::Matx33d PP = cv::Matx33d::eye(); |
| 559 | if (!P.empty()) |
| 560 | P.getMat().colRange(startcol: 0, endcol: 3).convertTo(m: PP, CV_64F); |
| 561 | |
| 562 | cv::Matx33d iR = (PP * RR).inv(method: cv::DECOMP_SVD); |
| 563 | |
| 564 | for( int i = 0; i < size.height; ++i) |
| 565 | { |
| 566 | float* m1f = map1.getMat().ptr<float>(y: i); |
| 567 | float* m2f = map2.getMat().ptr<float>(y: i); |
| 568 | short* m1 = (short*)m1f; |
| 569 | ushort* m2 = (ushort*)m2f; |
| 570 | |
| 571 | double _x = i*iR(0, 1) + iR(0, 2), |
| 572 | _y = i*iR(1, 1) + iR(1, 2), |
| 573 | _w = i*iR(2, 1) + iR(2, 2); |
| 574 | |
| 575 | for( int j = 0; j < size.width; ++j) |
| 576 | { |
| 577 | double u, v; |
| 578 | if( _w <= 0) |
| 579 | { |
| 580 | u = (_x > 0) ? -std::numeric_limits<double>::infinity() : std::numeric_limits<double>::infinity(); |
| 581 | v = (_y > 0) ? -std::numeric_limits<double>::infinity() : std::numeric_limits<double>::infinity(); |
| 582 | } |
| 583 | else |
| 584 | { |
| 585 | double x = _x/_w, y = _y/_w; |
| 586 | |
| 587 | double r = sqrt(x: x*x + y*y); |
| 588 | double theta = atan(x: r); |
| 589 | |
| 590 | double theta2 = theta*theta, theta4 = theta2*theta2, theta6 = theta4*theta2, theta8 = theta4*theta4; |
| 591 | double theta_d = theta * (1 + k[0]*theta2 + k[1]*theta4 + k[2]*theta6 + k[3]*theta8); |
| 592 | |
| 593 | double scale = (r == 0) ? 1.0 : theta_d / r; |
| 594 | u = f[0]*x*scale + c[0]; |
| 595 | v = f[1]*y*scale + c[1]; |
| 596 | } |
| 597 | |
| 598 | if( m1type == CV_16SC2 ) |
| 599 | { |
| 600 | int iu = cv::saturate_cast<int>(v: u*static_cast<double>(cv::INTER_TAB_SIZE)); |
| 601 | int iv = cv::saturate_cast<int>(v: v*static_cast<double>(cv::INTER_TAB_SIZE)); |
| 602 | m1[j*2+0] = (short)(iu >> cv::INTER_BITS); |
| 603 | m1[j*2+1] = (short)(iv >> cv::INTER_BITS); |
| 604 | m2[j] = (ushort)((iv & (cv::INTER_TAB_SIZE-1))*cv::INTER_TAB_SIZE + (iu & (cv::INTER_TAB_SIZE-1))); |
| 605 | } |
| 606 | else if( m1type == CV_32FC1 ) |
| 607 | { |
| 608 | m1f[j] = (float)u; |
| 609 | m2f[j] = (float)v; |
| 610 | } |
| 611 | |
| 612 | _x += iR(0, 0); |
| 613 | _y += iR(1, 0); |
| 614 | _w += iR(2, 0); |
| 615 | } |
| 616 | } |
| 617 | } |
| 618 | |
| 619 | ////////////////////////////////////////////////////////////////////////////////////////////////////////////// |
| 620 | /// cv::fisheye::undistortImage |
| 621 | |
| 622 | void cv::fisheye::undistortImage(InputArray distorted, OutputArray undistorted, |
| 623 | InputArray K, InputArray D, InputArray Knew, const Size& new_size) |
| 624 | { |
| 625 | CV_INSTRUMENT_REGION(); |
| 626 | |
| 627 | Size size = !new_size.empty() ? new_size : distorted.size(); |
| 628 | |
| 629 | cv::Mat map1, map2; |
| 630 | fisheye::initUndistortRectifyMap(K, D, R: cv::Matx33d::eye(), P: Knew, size, CV_16SC2, map1, map2 ); |
| 631 | cv::remap(src: distorted, dst: undistorted, map1, map2, interpolation: INTER_LINEAR, borderMode: BORDER_CONSTANT); |
| 632 | } |
| 633 | |
| 634 | |
| 635 | ////////////////////////////////////////////////////////////////////////////////////////////////////////////// |
| 636 | /// cv::fisheye::estimateNewCameraMatrixForUndistortRectify |
| 637 | |
| 638 | void cv::fisheye::estimateNewCameraMatrixForUndistortRectify(InputArray K, InputArray D, const Size &image_size, InputArray R, |
| 639 | OutputArray P, double balance, const Size& new_size, double fov_scale) |
| 640 | { |
| 641 | CV_INSTRUMENT_REGION(); |
| 642 | |
| 643 | CV_Assert( K.size() == Size(3, 3) && (K.depth() == CV_32F || K.depth() == CV_64F)); |
| 644 | CV_Assert(D.empty() || ((D.total() == 4) && (D.depth() == CV_32F || D.depth() == CV_64F))); |
| 645 | |
| 646 | int w = image_size.width, h = image_size.height; |
| 647 | balance = std::min(a: std::max(a: balance, b: 0.0), b: 1.0); |
| 648 | |
| 649 | cv::Mat points(1, 4, CV_64FC2); |
| 650 | Vec2d* pptr = points.ptr<Vec2d>(); |
| 651 | pptr[0] = Vec2d(w/2, 0); |
| 652 | pptr[1] = Vec2d(w, h/2); |
| 653 | pptr[2] = Vec2d(w/2, h); |
| 654 | pptr[3] = Vec2d(0, h/2); |
| 655 | |
| 656 | fisheye::undistortPoints(distorted: points, undistorted: points, K, D, R); |
| 657 | cv::Scalar center_mass = mean(src: points); |
| 658 | cv::Vec2d cn(center_mass.val); |
| 659 | |
| 660 | double aspect_ratio = (K.depth() == CV_32F) ? K.getMat().at<float >(i0: 0,i1: 0)/K.getMat().at<float> (i0: 1,i1: 1) |
| 661 | : K.getMat().at<double>(i0: 0,i1: 0)/K.getMat().at<double>(i0: 1,i1: 1); |
| 662 | |
| 663 | // convert to identity ratio |
| 664 | cn[1] *= aspect_ratio; |
| 665 | for(size_t i = 0; i < points.total(); ++i) |
| 666 | pptr[i][1] *= aspect_ratio; |
| 667 | |
| 668 | double minx = DBL_MAX, miny = DBL_MAX, maxx = -DBL_MAX, maxy = -DBL_MAX; |
| 669 | for(size_t i = 0; i < points.total(); ++i) |
| 670 | { |
| 671 | miny = std::min(a: miny, b: pptr[i][1]); |
| 672 | maxy = std::max(a: maxy, b: pptr[i][1]); |
| 673 | minx = std::min(a: minx, b: pptr[i][0]); |
| 674 | maxx = std::max(a: maxx, b: pptr[i][0]); |
| 675 | } |
| 676 | |
| 677 | double f1 = w * 0.5/(cn[0] - minx); |
| 678 | double f2 = w * 0.5/(maxx - cn[0]); |
| 679 | double f3 = h * 0.5 * aspect_ratio/(cn[1] - miny); |
| 680 | double f4 = h * 0.5 * aspect_ratio/(maxy - cn[1]); |
| 681 | |
| 682 | double fmin = std::min(a: f1, b: std::min(a: f2, b: std::min(a: f3, b: f4))); |
| 683 | double fmax = std::max(a: f1, b: std::max(a: f2, b: std::max(a: f3, b: f4))); |
| 684 | |
| 685 | double f = balance * fmin + (1.0 - balance) * fmax; |
| 686 | f *= fov_scale > 0 ? 1.0/fov_scale : 1.0; |
| 687 | |
| 688 | cv::Vec2d new_f(f, f), new_c = -cn * f + Vec2d(w, h * aspect_ratio) * 0.5; |
| 689 | |
| 690 | // restore aspect ratio |
| 691 | new_f[1] /= aspect_ratio; |
| 692 | new_c[1] /= aspect_ratio; |
| 693 | |
| 694 | if (!new_size.empty()) |
| 695 | { |
| 696 | double rx = new_size.width /(double)image_size.width; |
| 697 | double ry = new_size.height/(double)image_size.height; |
| 698 | |
| 699 | new_f[0] *= rx; new_f[1] *= ry; |
| 700 | new_c[0] *= rx; new_c[1] *= ry; |
| 701 | } |
| 702 | |
| 703 | Mat(Matx33d(new_f[0], 0, new_c[0], |
| 704 | 0, new_f[1], new_c[1], |
| 705 | 0, 0, 1)).convertTo(m: P, rtype: P.empty() ? K.type() : P.type()); |
| 706 | } |
| 707 | |
| 708 | |
| 709 | ////////////////////////////////////////////////////////////////////////////////////////////////////////////// |
| 710 | /// cv::fisheye::calibrate |
| 711 | |
| 712 | double cv::fisheye::calibrate(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, const Size& image_size, |
| 713 | InputOutputArray K, InputOutputArray D, OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, |
| 714 | int flags , cv::TermCriteria criteria) |
| 715 | { |
| 716 | CV_INSTRUMENT_REGION(); |
| 717 | |
| 718 | CV_Assert(!objectPoints.empty() && !imagePoints.empty() && objectPoints.total() == imagePoints.total()); |
| 719 | CV_Assert(objectPoints.type() == CV_32FC3 || objectPoints.type() == CV_64FC3); |
| 720 | CV_Assert(imagePoints.type() == CV_32FC2 || imagePoints.type() == CV_64FC2); |
| 721 | CV_Assert(K.empty() || (K.size() == Size(3,3))); |
| 722 | CV_Assert(D.empty() || (D.total() == 4)); |
| 723 | CV_Assert(rvecs.empty() || (rvecs.channels() == 3)); |
| 724 | CV_Assert(tvecs.empty() || (tvecs.channels() == 3)); |
| 725 | |
| 726 | CV_Assert((!K.empty() && !D.empty()) || !(flags & CALIB_USE_INTRINSIC_GUESS)); |
| 727 | |
| 728 | using namespace cv::internal; |
| 729 | //-------------------------------Initialization |
| 730 | IntrinsicParams finalParam; |
| 731 | IntrinsicParams currentParam; |
| 732 | IntrinsicParams errors; |
| 733 | |
| 734 | finalParam.isEstimate[0] = flags & CALIB_FIX_FOCAL_LENGTH ? 0 : 1; |
| 735 | finalParam.isEstimate[1] = flags & CALIB_FIX_FOCAL_LENGTH ? 0 : 1; |
| 736 | finalParam.isEstimate[2] = flags & CALIB_FIX_PRINCIPAL_POINT ? 0 : 1; |
| 737 | finalParam.isEstimate[3] = flags & CALIB_FIX_PRINCIPAL_POINT ? 0 : 1; |
| 738 | finalParam.isEstimate[4] = flags & CALIB_FIX_SKEW ? 0 : 1; |
| 739 | finalParam.isEstimate[5] = flags & CALIB_FIX_K1 ? 0 : 1; |
| 740 | finalParam.isEstimate[6] = flags & CALIB_FIX_K2 ? 0 : 1; |
| 741 | finalParam.isEstimate[7] = flags & CALIB_FIX_K3 ? 0 : 1; |
| 742 | finalParam.isEstimate[8] = flags & CALIB_FIX_K4 ? 0 : 1; |
| 743 | |
| 744 | const int recompute_extrinsic = flags & CALIB_RECOMPUTE_EXTRINSIC ? 1: 0; |
| 745 | const int check_cond = flags & CALIB_CHECK_COND ? 1 : 0; |
| 746 | |
| 747 | const double alpha_smooth = 0.4; |
| 748 | const double thresh_cond = 1e6; |
| 749 | double change = 1; |
| 750 | Vec2d err_std; |
| 751 | |
| 752 | Matx33d _K; |
| 753 | Vec4d _D; |
| 754 | if (flags & CALIB_USE_INTRINSIC_GUESS) |
| 755 | { |
| 756 | K.getMat().convertTo(m: _K, CV_64FC1); |
| 757 | D.getMat().convertTo(m: _D, CV_64FC1); |
| 758 | finalParam.Init(f: Vec2d(_K(0,0), _K(1, 1)), |
| 759 | c: Vec2d(_K(0,2), _K(1, 2)), |
| 760 | k: Vec4d(flags & CALIB_FIX_K1 ? 0 : _D[0], |
| 761 | flags & CALIB_FIX_K2 ? 0 : _D[1], |
| 762 | flags & CALIB_FIX_K3 ? 0 : _D[2], |
| 763 | flags & CALIB_FIX_K4 ? 0 : _D[3]), |
| 764 | alpha: _K(0, 1) / _K(0, 0)); |
| 765 | } |
| 766 | else |
| 767 | { |
| 768 | finalParam.Init(f: Vec2d(max(a: image_size.width, b: image_size.height) / 2., max(a: image_size.width, b: image_size.height) / 2.), |
| 769 | c: Vec2d(image_size.width / 2.0 - 0.5, image_size.height / 2.0 - 0.5)); |
| 770 | } |
| 771 | |
| 772 | errors.isEstimate = finalParam.isEstimate; |
| 773 | |
| 774 | std::vector<Vec3d> omc(objectPoints.total()), Tc(objectPoints.total()); |
| 775 | |
| 776 | CalibrateExtrinsics(objectPoints, imagePoints, param: finalParam, check_cond, thresh_cond, omc, Tc); |
| 777 | |
| 778 | |
| 779 | //-------------------------------Optimization |
| 780 | for(int iter = 0; iter < std::numeric_limits<int>::max(); ++iter) |
| 781 | { |
| 782 | if ((criteria.type == 1 && iter >= criteria.maxCount) || |
| 783 | (criteria.type == 2 && change <= criteria.epsilon) || |
| 784 | (criteria.type == 3 && (change <= criteria.epsilon || iter >= criteria.maxCount))) |
| 785 | break; |
| 786 | |
| 787 | double alpha_smooth2 = 1 - std::pow(x: 1 - alpha_smooth, y: iter + 1.0); |
| 788 | |
| 789 | Mat JJ2, ex3; |
| 790 | ComputeJacobians(objectPoints, imagePoints, param: finalParam, omc, Tc, check_cond,thresh_cond, JJ2_inv&: JJ2, ex3); |
| 791 | |
| 792 | Mat G; |
| 793 | solve(src1: JJ2, src2: ex3, dst: G); |
| 794 | currentParam = finalParam + alpha_smooth2*G; |
| 795 | |
| 796 | change = norm(M: Vec4d(currentParam.f[0], currentParam.f[1], currentParam.c[0], currentParam.c[1]) - |
| 797 | Vec4d(finalParam.f[0], finalParam.f[1], finalParam.c[0], finalParam.c[1])) |
| 798 | / norm(M: Vec4d(currentParam.f[0], currentParam.f[1], currentParam.c[0], currentParam.c[1])); |
| 799 | |
| 800 | finalParam = currentParam; |
| 801 | |
| 802 | if (recompute_extrinsic) |
| 803 | { |
| 804 | CalibrateExtrinsics(objectPoints, imagePoints, param: finalParam, check_cond, |
| 805 | thresh_cond, omc, Tc); |
| 806 | } |
| 807 | } |
| 808 | |
| 809 | //-------------------------------Validation |
| 810 | double rms; |
| 811 | EstimateUncertainties(objectPoints, imagePoints, params: finalParam, omc, Tc, errors, std_err&: err_std, thresh_cond, |
| 812 | check_cond, rms); |
| 813 | |
| 814 | //------------------------------- |
| 815 | _K = Matx33d(finalParam.f[0], finalParam.f[0] * finalParam.alpha, finalParam.c[0], |
| 816 | 0, finalParam.f[1], finalParam.c[1], |
| 817 | 0, 0, 1); |
| 818 | |
| 819 | if (K.needed()) cv::Mat(_K).convertTo(m: K, rtype: K.empty() ? CV_64FC1 : K.type()); |
| 820 | if (D.needed()) cv::Mat(finalParam.k).convertTo(m: D, rtype: D.empty() ? CV_64FC1 : D.type()); |
| 821 | if (rvecs.isMatVector()) |
| 822 | { |
| 823 | int N = (int)objectPoints.total(); |
| 824 | |
| 825 | if(rvecs.empty()) |
| 826 | rvecs.create(rows: N, cols: 1, CV_64FC3); |
| 827 | |
| 828 | if(tvecs.empty()) |
| 829 | tvecs.create(rows: N, cols: 1, CV_64FC3); |
| 830 | |
| 831 | for(int i = 0; i < N; i++ ) |
| 832 | { |
| 833 | rvecs.create(rows: 3, cols: 1, CV_64F, i, allowTransposed: true); |
| 834 | tvecs.create(rows: 3, cols: 1, CV_64F, i, allowTransposed: true); |
| 835 | memcpy(dest: rvecs.getMat(i).ptr(), src: omc[i].val, n: sizeof(Vec3d)); |
| 836 | memcpy(dest: tvecs.getMat(i).ptr(), src: Tc[i].val, n: sizeof(Vec3d)); |
| 837 | } |
| 838 | } |
| 839 | else |
| 840 | { |
| 841 | if (rvecs.needed()) cv::Mat(omc).convertTo(m: rvecs, rtype: rvecs.empty() ? CV_64FC3 : rvecs.type()); |
| 842 | if (tvecs.needed()) cv::Mat(Tc).convertTo(m: tvecs, rtype: tvecs.empty() ? CV_64FC3 : tvecs.type()); |
| 843 | } |
| 844 | |
| 845 | return rms; |
| 846 | } |
| 847 | |
| 848 | ////////////////////////////////////////////////////////////////////////////////////////////////////////////// |
| 849 | /// cv::fisheye::stereoCalibrate |
| 850 | |
| 851 | double cv::fisheye::stereoCalibrate(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2, |
| 852 | InputOutputArray K1, InputOutputArray D1, InputOutputArray K2, InputOutputArray D2, Size imageSize, |
| 853 | OutputArray R, OutputArray T, int flags, TermCriteria criteria) |
| 854 | { |
| 855 | return cv::fisheye::stereoCalibrate(objectPoints, imagePoints1, imagePoints2, K1, D1, K2, D2, imageSize, R, T, rvecs: noArray(), tvecs: noArray(), flags, criteria); |
| 856 | } |
| 857 | |
| 858 | double cv::fisheye::stereoCalibrate(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2, |
| 859 | InputOutputArray K1, InputOutputArray D1, InputOutputArray K2, InputOutputArray D2, Size imageSize, |
| 860 | OutputArray R, OutputArray T, OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, int flags, TermCriteria criteria) |
| 861 | { |
| 862 | CV_INSTRUMENT_REGION(); |
| 863 | |
| 864 | CV_Assert(!objectPoints.empty() && !imagePoints1.empty() && !imagePoints2.empty()); |
| 865 | CV_Assert(objectPoints.total() == imagePoints1.total() || imagePoints1.total() == imagePoints2.total()); |
| 866 | CV_Assert(objectPoints.type() == CV_32FC3 || objectPoints.type() == CV_64FC3); |
| 867 | CV_Assert(imagePoints1.type() == CV_32FC2 || imagePoints1.type() == CV_64FC2); |
| 868 | CV_Assert(imagePoints2.type() == CV_32FC2 || imagePoints2.type() == CV_64FC2); |
| 869 | |
| 870 | CV_Assert(K1.empty() || (K1.size() == Size(3,3))); |
| 871 | CV_Assert(D1.empty() || (D1.total() == 4)); |
| 872 | CV_Assert(K2.empty() || (K2.size() == Size(3,3))); |
| 873 | CV_Assert(D2.empty() || (D2.total() == 4)); |
| 874 | |
| 875 | CV_Assert((!K1.empty() && !K2.empty() && !D1.empty() && !D2.empty()) || !(flags & CALIB_FIX_INTRINSIC)); |
| 876 | |
| 877 | //-------------------------------Initialization |
| 878 | |
| 879 | const int threshold = 50; |
| 880 | const double thresh_cond = 1e6; |
| 881 | const int check_cond = 1; |
| 882 | |
| 883 | int n_points = (int)objectPoints.getMat(i: 0).total(); |
| 884 | int n_images = (int)objectPoints.total(); |
| 885 | |
| 886 | double change = 1; |
| 887 | |
| 888 | cv::internal::IntrinsicParams intrinsicLeft; |
| 889 | cv::internal::IntrinsicParams intrinsicRight; |
| 890 | |
| 891 | cv::internal::IntrinsicParams intrinsicLeft_errors; |
| 892 | cv::internal::IntrinsicParams intrinsicRight_errors; |
| 893 | |
| 894 | Matx33d _K1, _K2; |
| 895 | Vec4d _D1, _D2; |
| 896 | if (!K1.empty()) K1.getMat().convertTo(m: _K1, CV_64FC1); |
| 897 | if (!D1.empty()) D1.getMat().convertTo(m: _D1, CV_64FC1); |
| 898 | if (!K2.empty()) K2.getMat().convertTo(m: _K2, CV_64FC1); |
| 899 | if (!D2.empty()) D2.getMat().convertTo(m: _D2, CV_64FC1); |
| 900 | |
| 901 | std::vector<Vec3d> rvecs1(n_images), tvecs1(n_images), rvecs2(n_images), tvecs2(n_images); |
| 902 | |
| 903 | if (!(flags & CALIB_FIX_INTRINSIC)) |
| 904 | { |
| 905 | calibrate(objectPoints, imagePoints: imagePoints1, image_size: imageSize, K: _K1, D: _D1, rvecs: rvecs1, tvecs: tvecs1, flags, criteria: TermCriteria(3, 20, 1e-6)); |
| 906 | calibrate(objectPoints, imagePoints: imagePoints2, image_size: imageSize, K: _K2, D: _D2, rvecs: rvecs2, tvecs: tvecs2, flags, criteria: TermCriteria(3, 20, 1e-6)); |
| 907 | } |
| 908 | |
| 909 | intrinsicLeft.Init(f: Vec2d(_K1(0,0), _K1(1, 1)), c: Vec2d(_K1(0,2), _K1(1, 2)), |
| 910 | k: Vec4d(_D1[0], _D1[1], _D1[2], _D1[3]), alpha: _K1(0, 1) / _K1(0, 0)); |
| 911 | |
| 912 | intrinsicRight.Init(f: Vec2d(_K2(0,0), _K2(1, 1)), c: Vec2d(_K2(0,2), _K2(1, 2)), |
| 913 | k: Vec4d(_D2[0], _D2[1], _D2[2], _D2[3]), alpha: _K2(0, 1) / _K2(0, 0)); |
| 914 | |
| 915 | if ((flags & CALIB_FIX_INTRINSIC)) |
| 916 | { |
| 917 | cv::internal::CalibrateExtrinsics(objectPoints, imagePoints: imagePoints1, param: intrinsicLeft, check_cond, thresh_cond, omc: rvecs1, Tc: tvecs1); |
| 918 | cv::internal::CalibrateExtrinsics(objectPoints, imagePoints: imagePoints2, param: intrinsicRight, check_cond, thresh_cond, omc: rvecs2, Tc: tvecs2); |
| 919 | } |
| 920 | |
| 921 | intrinsicLeft.isEstimate[0] = flags & CALIB_FIX_INTRINSIC ? 0 : 1; |
| 922 | intrinsicLeft.isEstimate[1] = flags & CALIB_FIX_INTRINSIC ? 0 : 1; |
| 923 | intrinsicLeft.isEstimate[2] = flags & CALIB_FIX_INTRINSIC ? 0 : 1; |
| 924 | intrinsicLeft.isEstimate[3] = flags & CALIB_FIX_INTRINSIC ? 0 : 1; |
| 925 | intrinsicLeft.isEstimate[4] = flags & (CALIB_FIX_SKEW | CALIB_FIX_INTRINSIC) ? 0 : 1; |
| 926 | intrinsicLeft.isEstimate[5] = flags & (CALIB_FIX_K1 | CALIB_FIX_INTRINSIC) ? 0 : 1; |
| 927 | intrinsicLeft.isEstimate[6] = flags & (CALIB_FIX_K2 | CALIB_FIX_INTRINSIC) ? 0 : 1; |
| 928 | intrinsicLeft.isEstimate[7] = flags & (CALIB_FIX_K3 | CALIB_FIX_INTRINSIC) ? 0 : 1; |
| 929 | intrinsicLeft.isEstimate[8] = flags & (CALIB_FIX_K4 | CALIB_FIX_INTRINSIC) ? 0 : 1; |
| 930 | |
| 931 | intrinsicRight.isEstimate[0] = flags & CALIB_FIX_INTRINSIC ? 0 : 1; |
| 932 | intrinsicRight.isEstimate[1] = flags & CALIB_FIX_INTRINSIC ? 0 : 1; |
| 933 | intrinsicRight.isEstimate[2] = flags & CALIB_FIX_INTRINSIC ? 0 : 1; |
| 934 | intrinsicRight.isEstimate[3] = flags & CALIB_FIX_INTRINSIC ? 0 : 1; |
| 935 | intrinsicRight.isEstimate[4] = flags & (CALIB_FIX_SKEW | CALIB_FIX_INTRINSIC) ? 0 : 1; |
| 936 | intrinsicRight.isEstimate[5] = flags & (CALIB_FIX_K1 | CALIB_FIX_INTRINSIC) ? 0 : 1; |
| 937 | intrinsicRight.isEstimate[6] = flags & (CALIB_FIX_K2 | CALIB_FIX_INTRINSIC) ? 0 : 1; |
| 938 | intrinsicRight.isEstimate[7] = flags & (CALIB_FIX_K3 | CALIB_FIX_INTRINSIC) ? 0 : 1; |
| 939 | intrinsicRight.isEstimate[8] = flags & (CALIB_FIX_K4 | CALIB_FIX_INTRINSIC) ? 0 : 1; |
| 940 | |
| 941 | intrinsicLeft_errors.isEstimate = intrinsicLeft.isEstimate; |
| 942 | intrinsicRight_errors.isEstimate = intrinsicRight.isEstimate; |
| 943 | |
| 944 | std::vector<uchar> selectedParams; |
| 945 | std::vector<uchar> tmp(6 * (n_images + 1), 1); |
| 946 | selectedParams.insert(position: selectedParams.end(), first: intrinsicLeft.isEstimate.begin(), last: intrinsicLeft.isEstimate.end()); |
| 947 | selectedParams.insert(position: selectedParams.end(), first: intrinsicRight.isEstimate.begin(), last: intrinsicRight.isEstimate.end()); |
| 948 | selectedParams.insert(position: selectedParams.end(), first: tmp.begin(), last: tmp.end()); |
| 949 | |
| 950 | //Init values for rotation and translation between two views |
| 951 | cv::Mat om_list(1, n_images, CV_64FC3), T_list(1, n_images, CV_64FC3); |
| 952 | cv::Mat om_ref, R_ref, T_ref, R1, R2; |
| 953 | for (int image_idx = 0; image_idx < n_images; ++image_idx) |
| 954 | { |
| 955 | cv::Rodrigues(src: rvecs1[image_idx], dst: R1); |
| 956 | cv::Rodrigues(src: rvecs2[image_idx], dst: R2); |
| 957 | R_ref = R2 * R1.t(); |
| 958 | T_ref = cv::Mat(tvecs2[image_idx]) - R_ref * cv::Mat(tvecs1[image_idx]); |
| 959 | cv::Rodrigues(src: R_ref, dst: om_ref); |
| 960 | om_ref.reshape(cn: 3, rows: 1).copyTo(m: om_list.col(x: image_idx)); |
| 961 | T_ref.reshape(cn: 3, rows: 1).copyTo(m: T_list.col(x: image_idx)); |
| 962 | } |
| 963 | cv::Vec3d omcur = cv::internal::median3d(m: om_list); |
| 964 | cv::Vec3d Tcur = cv::internal::median3d(m: T_list); |
| 965 | |
| 966 | cv::Mat J = cv::Mat::zeros(rows: 4 * n_points * n_images, cols: 18 + 6 * (n_images + 1), CV_64FC1), |
| 967 | e = cv::Mat::zeros(rows: 4 * n_points * n_images, cols: 1, CV_64FC1), Jkk, ekk; |
| 968 | |
| 969 | for(int iter = 0; ; ++iter) |
| 970 | { |
| 971 | if ((criteria.type == 1 && iter >= criteria.maxCount) || |
| 972 | (criteria.type == 2 && change <= criteria.epsilon) || |
| 973 | (criteria.type == 3 && (change <= criteria.epsilon || iter >= criteria.maxCount))) |
| 974 | break; |
| 975 | |
| 976 | J.create(rows: 4 * n_points * n_images, cols: 18 + 6 * (n_images + 1), CV_64FC1); |
| 977 | e.create(rows: 4 * n_points * n_images, cols: 1, CV_64FC1); |
| 978 | Jkk.create(rows: 4 * n_points, cols: 18 + 6 * (n_images + 1), CV_64FC1); |
| 979 | ekk.create(rows: 4 * n_points, cols: 1, CV_64FC1); |
| 980 | |
| 981 | cv::Mat omr, Tr, domrdomckk, domrdTckk, domrdom, domrdT, dTrdomckk, dTrdTckk, dTrdom, dTrdT; |
| 982 | |
| 983 | for (int image_idx = 0; image_idx < n_images; ++image_idx) |
| 984 | { |
| 985 | Jkk = cv::Mat::zeros(rows: 4 * n_points, cols: 18 + 6 * (n_images + 1), CV_64FC1); |
| 986 | |
| 987 | cv::Mat object = objectPoints.getMat(i: image_idx).clone(); |
| 988 | cv::Mat imageLeft = imagePoints1.getMat(i: image_idx).clone(); |
| 989 | cv::Mat imageRight = imagePoints2.getMat(i: image_idx).clone(); |
| 990 | cv::Mat jacobians, projected; |
| 991 | |
| 992 | //left camera jacobian |
| 993 | cv::Mat rvec = cv::Mat(rvecs1[image_idx]); |
| 994 | cv::Mat tvec = cv::Mat(tvecs1[image_idx]); |
| 995 | cv::internal::projectPoints(objectPoints: object, imagePoints: projected, rvec: rvec, tvec: tvec, param: intrinsicLeft, jacobian: jacobians); |
| 996 | cv::Mat(cv::Mat((imageLeft - projected).t()).reshape(cn: 1, rows: 1).t()).copyTo(m: ekk.rowRange(startrow: 0, endrow: 2 * n_points)); |
| 997 | jacobians.colRange(startcol: 8, endcol: 11).copyTo(m: Jkk.colRange(startcol: 24 + image_idx * 6, endcol: 27 + image_idx * 6).rowRange(startrow: 0, endrow: 2 * n_points)); |
| 998 | jacobians.colRange(startcol: 11, endcol: 14).copyTo(m: Jkk.colRange(startcol: 27 + image_idx * 6, endcol: 30 + image_idx * 6).rowRange(startrow: 0, endrow: 2 * n_points)); |
| 999 | jacobians.colRange(startcol: 0, endcol: 2).copyTo(m: Jkk.colRange(startcol: 0, endcol: 2).rowRange(startrow: 0, endrow: 2 * n_points)); |
| 1000 | jacobians.colRange(startcol: 2, endcol: 4).copyTo(m: Jkk.colRange(startcol: 2, endcol: 4).rowRange(startrow: 0, endrow: 2 * n_points)); |
| 1001 | jacobians.colRange(startcol: 4, endcol: 8).copyTo(m: Jkk.colRange(startcol: 5, endcol: 9).rowRange(startrow: 0, endrow: 2 * n_points)); |
| 1002 | jacobians.col(x: 14).copyTo(m: Jkk.col(x: 4).rowRange(startrow: 0, endrow: 2 * n_points)); |
| 1003 | |
| 1004 | //right camera jacobian |
| 1005 | cv::internal::compose_motion(om1: rvec, T1: tvec, om2: omcur, T2: Tcur, om3&: omr, T3&: Tr, dom3dom1&: domrdomckk, dom3dT1&: domrdTckk, dom3dom2&: domrdom, dom3dT2&: domrdT, dT3dom1&: dTrdomckk, dT3dT1&: dTrdTckk, dT3dom2&: dTrdom, dT3dT2&: dTrdT); |
| 1006 | rvec = cv::Mat(rvecs2[image_idx]); |
| 1007 | tvec = cv::Mat(tvecs2[image_idx]); |
| 1008 | |
| 1009 | cv::internal::projectPoints(objectPoints: object, imagePoints: projected, rvec: omr, tvec: Tr, param: intrinsicRight, jacobian: jacobians); |
| 1010 | cv::Mat(cv::Mat((imageRight - projected).t()).reshape(cn: 1, rows: 1).t()).copyTo(m: ekk.rowRange(startrow: 2 * n_points, endrow: 4 * n_points)); |
| 1011 | cv::Mat dxrdom = jacobians.colRange(startcol: 8, endcol: 11) * domrdom + jacobians.colRange(startcol: 11, endcol: 14) * dTrdom; |
| 1012 | cv::Mat dxrdT = jacobians.colRange(startcol: 8, endcol: 11) * domrdT + jacobians.colRange(startcol: 11, endcol: 14)* dTrdT; |
| 1013 | cv::Mat dxrdomckk = jacobians.colRange(startcol: 8, endcol: 11) * domrdomckk + jacobians.colRange(startcol: 11, endcol: 14) * dTrdomckk; |
| 1014 | cv::Mat dxrdTckk = jacobians.colRange(startcol: 8, endcol: 11) * domrdTckk + jacobians.colRange(startcol: 11, endcol: 14) * dTrdTckk; |
| 1015 | |
| 1016 | dxrdom.copyTo(m: Jkk.colRange(startcol: 18, endcol: 21).rowRange(startrow: 2 * n_points, endrow: 4 * n_points)); |
| 1017 | dxrdT.copyTo(m: Jkk.colRange(startcol: 21, endcol: 24).rowRange(startrow: 2 * n_points, endrow: 4 * n_points)); |
| 1018 | dxrdomckk.copyTo(m: Jkk.colRange(startcol: 24 + image_idx * 6, endcol: 27 + image_idx * 6).rowRange(startrow: 2 * n_points, endrow: 4 * n_points)); |
| 1019 | dxrdTckk.copyTo(m: Jkk.colRange(startcol: 27 + image_idx * 6, endcol: 30 + image_idx * 6).rowRange(startrow: 2 * n_points, endrow: 4 * n_points)); |
| 1020 | jacobians.colRange(startcol: 0, endcol: 2).copyTo(m: Jkk.colRange(startcol: 9 + 0, endcol: 9 + 2).rowRange(startrow: 2 * n_points, endrow: 4 * n_points)); |
| 1021 | jacobians.colRange(startcol: 2, endcol: 4).copyTo(m: Jkk.colRange(startcol: 9 + 2, endcol: 9 + 4).rowRange(startrow: 2 * n_points, endrow: 4 * n_points)); |
| 1022 | jacobians.colRange(startcol: 4, endcol: 8).copyTo(m: Jkk.colRange(startcol: 9 + 5, endcol: 9 + 9).rowRange(startrow: 2 * n_points, endrow: 4 * n_points)); |
| 1023 | jacobians.col(x: 14).copyTo(m: Jkk.col(x: 9 + 4).rowRange(startrow: 2 * n_points, endrow: 4 * n_points)); |
| 1024 | |
| 1025 | //check goodness of sterepair |
| 1026 | double abs_max = 0; |
| 1027 | for (int i = 0; i < 4 * n_points; i++) |
| 1028 | { |
| 1029 | if (fabs(x: ekk.at<double>(i0: i)) > abs_max) |
| 1030 | { |
| 1031 | abs_max = fabs(x: ekk.at<double>(i0: i)); |
| 1032 | } |
| 1033 | } |
| 1034 | |
| 1035 | CV_Assert(abs_max < threshold); // bad stereo pair |
| 1036 | |
| 1037 | Jkk.copyTo(m: J.rowRange(startrow: image_idx * 4 * n_points, endrow: (image_idx + 1) * 4 * n_points)); |
| 1038 | ekk.copyTo(m: e.rowRange(startrow: image_idx * 4 * n_points, endrow: (image_idx + 1) * 4 * n_points)); |
| 1039 | } |
| 1040 | |
| 1041 | cv::Vec6d oldTom(Tcur[0], Tcur[1], Tcur[2], omcur[0], omcur[1], omcur[2]); |
| 1042 | |
| 1043 | //update all parameters |
| 1044 | cv::subMatrix(src: J, dst&: J, cols: selectedParams, rows: std::vector<uchar>(J.rows, 1)); |
| 1045 | int a = cv::countNonZero(src: intrinsicLeft.isEstimate); |
| 1046 | int b = cv::countNonZero(src: intrinsicRight.isEstimate); |
| 1047 | cv::Mat deltas; |
| 1048 | solve(src1: J.t() * J, src2: J.t()*e, dst: deltas); |
| 1049 | if (a > 0) |
| 1050 | intrinsicLeft = intrinsicLeft + deltas.rowRange(startrow: 0, endrow: a); |
| 1051 | if (b > 0) |
| 1052 | intrinsicRight = intrinsicRight + deltas.rowRange(startrow: a, endrow: a + b); |
| 1053 | omcur = omcur + cv::Vec3d(deltas.rowRange(startrow: a + b, endrow: a + b + 3)); |
| 1054 | Tcur = Tcur + cv::Vec3d(deltas.rowRange(startrow: a + b + 3, endrow: a + b + 6)); |
| 1055 | for (int image_idx = 0; image_idx < n_images; ++image_idx) |
| 1056 | { |
| 1057 | rvecs1[image_idx] = cv::Mat(cv::Mat(rvecs1[image_idx]) + deltas.rowRange(startrow: a + b + 6 + image_idx * 6, endrow: a + b + 9 + image_idx * 6)); |
| 1058 | tvecs1[image_idx] = cv::Mat(cv::Mat(tvecs1[image_idx]) + deltas.rowRange(startrow: a + b + 9 + image_idx * 6, endrow: a + b + 12 + image_idx * 6)); |
| 1059 | } |
| 1060 | |
| 1061 | cv::Vec6d newTom(Tcur[0], Tcur[1], Tcur[2], omcur[0], omcur[1], omcur[2]); |
| 1062 | change = cv::norm(M: newTom - oldTom) / cv::norm(M: newTom); |
| 1063 | } |
| 1064 | |
| 1065 | double rms = 0; |
| 1066 | const Vec2d* ptr_e = e.ptr<Vec2d>(); |
| 1067 | for (size_t i = 0; i < e.total() / 2; i++) |
| 1068 | { |
| 1069 | rms += ptr_e[i][0] * ptr_e[i][0] + ptr_e[i][1] * ptr_e[i][1]; |
| 1070 | } |
| 1071 | |
| 1072 | rms /= ((double)e.total() / 2.0); |
| 1073 | rms = sqrt(x: rms); |
| 1074 | |
| 1075 | _K1 = Matx33d(intrinsicLeft.f[0], intrinsicLeft.f[0] * intrinsicLeft.alpha, intrinsicLeft.c[0], |
| 1076 | 0, intrinsicLeft.f[1], intrinsicLeft.c[1], |
| 1077 | 0, 0, 1); |
| 1078 | |
| 1079 | _K2 = Matx33d(intrinsicRight.f[0], intrinsicRight.f[0] * intrinsicRight.alpha, intrinsicRight.c[0], |
| 1080 | 0, intrinsicRight.f[1], intrinsicRight.c[1], |
| 1081 | 0, 0, 1); |
| 1082 | |
| 1083 | Mat _R; |
| 1084 | Rodrigues(src: omcur, dst: _R); |
| 1085 | |
| 1086 | if (K1.needed()) cv::Mat(_K1).convertTo(m: K1, rtype: K1.empty() ? CV_64FC1 : K1.type()); |
| 1087 | if (K2.needed()) cv::Mat(_K2).convertTo(m: K2, rtype: K2.empty() ? CV_64FC1 : K2.type()); |
| 1088 | if (D1.needed()) cv::Mat(intrinsicLeft.k).convertTo(m: D1, rtype: D1.empty() ? CV_64FC1 : D1.type()); |
| 1089 | if (D2.needed()) cv::Mat(intrinsicRight.k).convertTo(m: D2, rtype: D2.empty() ? CV_64FC1 : D2.type()); |
| 1090 | if (R.needed()) _R.convertTo(m: R, rtype: R.empty() ? CV_64FC1 : R.type()); |
| 1091 | if (T.needed()) cv::Mat(Tcur).convertTo(m: T, rtype: T.empty() ? CV_64FC1 : T.type()); |
| 1092 | if (rvecs.isMatVector()) |
| 1093 | { |
| 1094 | if(rvecs.empty()) |
| 1095 | rvecs.create(rows: n_images, cols: 1, CV_64FC3); |
| 1096 | |
| 1097 | if(tvecs.empty()) |
| 1098 | tvecs.create(rows: n_images, cols: 1, CV_64FC3); |
| 1099 | |
| 1100 | for(int i = 0; i < n_images; i++ ) |
| 1101 | { |
| 1102 | rvecs.create(rows: 3, cols: 1, CV_64F, i, allowTransposed: true); |
| 1103 | tvecs.create(rows: 3, cols: 1, CV_64F, i, allowTransposed: true); |
| 1104 | memcpy(dest: rvecs.getMat(i).ptr(), src: rvecs1[i].val, n: sizeof(Vec3d)); |
| 1105 | memcpy(dest: tvecs.getMat(i).ptr(), src: tvecs1[i].val, n: sizeof(Vec3d)); |
| 1106 | } |
| 1107 | } |
| 1108 | else |
| 1109 | { |
| 1110 | if (rvecs.needed()) cv::Mat(rvecs1).convertTo(m: rvecs, rtype: rvecs.empty() ? CV_64FC3 : rvecs.type()); |
| 1111 | if (tvecs.needed()) cv::Mat(tvecs1).convertTo(m: tvecs, rtype: tvecs.empty() ? CV_64FC3 : tvecs.type()); |
| 1112 | } |
| 1113 | |
| 1114 | return rms; |
| 1115 | } |
| 1116 | |
| 1117 | ////////////////////////////////////////////////////////////////////////////////////////////////////////////// |
| 1118 | /// cv::fisheye::solvePnP |
| 1119 | |
| 1120 | bool cv::fisheye::solvePnP( InputArray opoints, InputArray ipoints, |
| 1121 | InputArray cameraMatrix, InputArray distCoeffs, |
| 1122 | OutputArray rvec, OutputArray tvec, bool useExtrinsicGuess, |
| 1123 | int flags, TermCriteria criteria) |
| 1124 | { |
| 1125 | |
| 1126 | Mat imagePointsNormalized; |
| 1127 | cv::fisheye::undistortPoints(distorted: ipoints, undistorted: imagePointsNormalized, K: cameraMatrix, D: distCoeffs, R: noArray(), P: cameraMatrix, criteria); |
| 1128 | return cv::solvePnP(objectPoints: opoints, imagePoints: imagePointsNormalized, cameraMatrix, distCoeffs: noArray(), rvec, tvec, useExtrinsicGuess, flags); |
| 1129 | } |
| 1130 | |
| 1131 | ////////////////////////////////////////////////////////////////////////////////////////////////////////////// |
| 1132 | /// cv::fisheye::solvePnPRansac |
| 1133 | |
| 1134 | bool cv::fisheye::solvePnPRansac( InputArray opoints, InputArray ipoints, |
| 1135 | InputArray cameraMatrix, InputArray distCoeffs, |
| 1136 | OutputArray rvec, OutputArray tvec, bool useExtrinsicGuess, |
| 1137 | int iterationsCount, float reprojectionError, |
| 1138 | double confidence, OutputArray inliers, |
| 1139 | int flags, TermCriteria criteria) |
| 1140 | { |
| 1141 | Mat imagePointsNormalized; |
| 1142 | cv::fisheye::undistortPoints(distorted: ipoints, undistorted: imagePointsNormalized, K: cameraMatrix, D: distCoeffs, R: noArray(), P: cameraMatrix, criteria); |
| 1143 | return cv::solvePnPRansac(objectPoints: opoints, imagePoints: imagePointsNormalized, cameraMatrix, distCoeffs: noArray(), rvec, tvec, |
| 1144 | useExtrinsicGuess, iterationsCount, reprojectionError, confidence, inliers, flags); |
| 1145 | } |
| 1146 | |
| 1147 | namespace cv{ namespace { |
| 1148 | void subMatrix(const Mat& src, Mat& dst, const std::vector<uchar>& cols, const std::vector<uchar>& rows) |
| 1149 | { |
| 1150 | CV_Assert(src.channels() == 1); |
| 1151 | |
| 1152 | int nonzeros_cols = cv::countNonZero(src: cols); |
| 1153 | Mat tmp(src.rows, nonzeros_cols, CV_64F); |
| 1154 | |
| 1155 | for (int i = 0, j = 0; i < (int)cols.size(); i++) |
| 1156 | { |
| 1157 | if (cols[i]) |
| 1158 | { |
| 1159 | src.col(x: i).copyTo(m: tmp.col(x: j++)); |
| 1160 | } |
| 1161 | } |
| 1162 | |
| 1163 | int nonzeros_rows = cv::countNonZero(src: rows); |
| 1164 | dst.create(rows: nonzeros_rows, cols: nonzeros_cols, CV_64F); |
| 1165 | for (int i = 0, j = 0; i < (int)rows.size(); i++) |
| 1166 | { |
| 1167 | if (rows[i]) |
| 1168 | { |
| 1169 | tmp.row(y: i).copyTo(m: dst.row(y: j++)); |
| 1170 | } |
| 1171 | } |
| 1172 | } |
| 1173 | |
| 1174 | }} |
| 1175 | |
| 1176 | cv::internal::IntrinsicParams::IntrinsicParams(): |
| 1177 | f(Vec2d::all(alpha: 0)), c(Vec2d::all(alpha: 0)), k(Vec4d::all(alpha: 0)), alpha(0), isEstimate(9,0) |
| 1178 | { |
| 1179 | } |
| 1180 | |
| 1181 | cv::internal::IntrinsicParams::IntrinsicParams(Vec2d _f, Vec2d _c, Vec4d _k, double _alpha): |
| 1182 | f(_f), c(_c), k(_k), alpha(_alpha), isEstimate(9,0) |
| 1183 | { |
| 1184 | } |
| 1185 | |
| 1186 | cv::internal::IntrinsicParams cv::internal::IntrinsicParams::operator+(const Mat& a) |
| 1187 | { |
| 1188 | CV_Assert(a.type() == CV_64FC1); |
| 1189 | IntrinsicParams tmp; |
| 1190 | const double* ptr = a.ptr<double>(); |
| 1191 | |
| 1192 | int j = 0; |
| 1193 | tmp.f[0] = this->f[0] + (isEstimate[0] ? ptr[j++] : 0); |
| 1194 | tmp.f[1] = this->f[1] + (isEstimate[1] ? ptr[j++] : 0); |
| 1195 | tmp.c[0] = this->c[0] + (isEstimate[2] ? ptr[j++] : 0); |
| 1196 | tmp.c[1] = this->c[1] + (isEstimate[3] ? ptr[j++] : 0); |
| 1197 | tmp.alpha = this->alpha + (isEstimate[4] ? ptr[j++] : 0); |
| 1198 | tmp.k[0] = this->k[0] + (isEstimate[5] ? ptr[j++] : 0); |
| 1199 | tmp.k[1] = this->k[1] + (isEstimate[6] ? ptr[j++] : 0); |
| 1200 | tmp.k[2] = this->k[2] + (isEstimate[7] ? ptr[j++] : 0); |
| 1201 | tmp.k[3] = this->k[3] + (isEstimate[8] ? ptr[j++] : 0); |
| 1202 | |
| 1203 | tmp.isEstimate = isEstimate; |
| 1204 | return tmp; |
| 1205 | } |
| 1206 | |
| 1207 | cv::internal::IntrinsicParams& cv::internal::IntrinsicParams::operator =(const Mat& a) |
| 1208 | { |
| 1209 | CV_Assert(a.type() == CV_64FC1); |
| 1210 | const double* ptr = a.ptr<double>(); |
| 1211 | |
| 1212 | int j = 0; |
| 1213 | |
| 1214 | this->f[0] = isEstimate[0] ? ptr[j++] : 0; |
| 1215 | this->f[1] = isEstimate[1] ? ptr[j++] : 0; |
| 1216 | this->c[0] = isEstimate[2] ? ptr[j++] : 0; |
| 1217 | this->c[1] = isEstimate[3] ? ptr[j++] : 0; |
| 1218 | this->alpha = isEstimate[4] ? ptr[j++] : 0; |
| 1219 | this->k[0] = isEstimate[5] ? ptr[j++] : 0; |
| 1220 | this->k[1] = isEstimate[6] ? ptr[j++] : 0; |
| 1221 | this->k[2] = isEstimate[7] ? ptr[j++] : 0; |
| 1222 | this->k[3] = isEstimate[8] ? ptr[j++] : 0; |
| 1223 | |
| 1224 | return *this; |
| 1225 | } |
| 1226 | |
| 1227 | void cv::internal::IntrinsicParams::Init(const cv::Vec2d& _f, const cv::Vec2d& _c, const cv::Vec4d& _k, const double& _alpha) |
| 1228 | { |
| 1229 | this->c = _c; |
| 1230 | this->f = _f; |
| 1231 | this->k = _k; |
| 1232 | this->alpha = _alpha; |
| 1233 | } |
| 1234 | |
| 1235 | void cv::internal::projectPoints(cv::InputArray objectPoints, cv::OutputArray imagePoints, |
| 1236 | cv::InputArray _rvec,cv::InputArray _tvec, |
| 1237 | const IntrinsicParams& param, cv::OutputArray jacobian) |
| 1238 | { |
| 1239 | CV_INSTRUMENT_REGION(); |
| 1240 | |
| 1241 | CV_Assert(!objectPoints.empty() && (objectPoints.type() == CV_32FC3 || objectPoints.type() == CV_64FC3)); |
| 1242 | Matx33d K(param.f[0], param.f[0] * param.alpha, param.c[0], |
| 1243 | 0, param.f[1], param.c[1], |
| 1244 | 0, 0, 1); |
| 1245 | fisheye::projectPoints(objectPoints, imagePoints, _rvec, _tvec, K: K, D: param.k, alpha: param.alpha, jacobian); |
| 1246 | } |
| 1247 | |
| 1248 | void cv::internal::ComputeExtrinsicRefine(const Mat& imagePoints, const Mat& objectPoints, Mat& rvec, |
| 1249 | Mat& tvec, Mat& J, const int MaxIter, |
| 1250 | const IntrinsicParams& param, const double thresh_cond) |
| 1251 | { |
| 1252 | CV_Assert(!objectPoints.empty() && objectPoints.type() == CV_64FC3); |
| 1253 | CV_Assert(!imagePoints.empty() && imagePoints.type() == CV_64FC2); |
| 1254 | CV_Assert(rvec.total() > 2 && tvec.total() > 2); |
| 1255 | Vec6d extrinsics(rvec.at<double>(i0: 0), rvec.at<double>(i0: 1), rvec.at<double>(i0: 2), |
| 1256 | tvec.at<double>(i0: 0), tvec.at<double>(i0: 1), tvec.at<double>(i0: 2)); |
| 1257 | double change = 1; |
| 1258 | int iter = 0; |
| 1259 | |
| 1260 | while (change > 1e-10 && iter < MaxIter) |
| 1261 | { |
| 1262 | std::vector<Point2d> x; |
| 1263 | Mat jacobians; |
| 1264 | projectPoints(objectPoints, imagePoints: x, rvec: rvec, tvec: tvec, param, jacobian: jacobians); |
| 1265 | |
| 1266 | Mat ex = imagePoints - Mat(x).t(); |
| 1267 | ex = ex.reshape(cn: 1, rows: 2); |
| 1268 | |
| 1269 | J = jacobians.colRange(startcol: 8, endcol: 14).clone(); |
| 1270 | |
| 1271 | SVD svd(J, SVD::NO_UV); |
| 1272 | double condJJ = svd.w.at<double>(i0: 0)/svd.w.at<double>(i0: 5); |
| 1273 | |
| 1274 | if (condJJ > thresh_cond) |
| 1275 | change = 0; |
| 1276 | else |
| 1277 | { |
| 1278 | Vec6d param_innov; |
| 1279 | solve(src1: J, src2: ex.reshape(cn: 1, rows: (int)ex.total()), dst: param_innov, flags: DECOMP_SVD + DECOMP_NORMAL); |
| 1280 | |
| 1281 | Vec6d param_up = extrinsics + param_innov; |
| 1282 | change = norm(M: param_innov)/norm(M: param_up); |
| 1283 | extrinsics = param_up; |
| 1284 | iter = iter + 1; |
| 1285 | |
| 1286 | rvec = Mat(Vec3d(extrinsics.val)); |
| 1287 | tvec = Mat(Vec3d(extrinsics.val+3)); |
| 1288 | } |
| 1289 | } |
| 1290 | } |
| 1291 | |
| 1292 | cv::Mat cv::internal::ComputeHomography(Mat m, Mat M) |
| 1293 | { |
| 1294 | CV_INSTRUMENT_REGION(); |
| 1295 | |
| 1296 | int Np = m.cols; |
| 1297 | |
| 1298 | if (m.rows < 3) |
| 1299 | { |
| 1300 | vconcat(src1: m, src2: Mat::ones(rows: 1, cols: Np, CV_64FC1), dst: m); |
| 1301 | } |
| 1302 | if (M.rows < 3) |
| 1303 | { |
| 1304 | vconcat(src1: M, src2: Mat::ones(rows: 1, cols: Np, CV_64FC1), dst: M); |
| 1305 | } |
| 1306 | |
| 1307 | divide(src1: m, src2: Mat::ones(rows: 3, cols: 1, CV_64FC1) * m.row(y: 2), dst: m); |
| 1308 | divide(src1: M, src2: Mat::ones(rows: 3, cols: 1, CV_64FC1) * M.row(y: 2), dst: M); |
| 1309 | |
| 1310 | Mat ax = m.row(y: 0).clone(); |
| 1311 | Mat ay = m.row(y: 1).clone(); |
| 1312 | |
| 1313 | double mxx = mean(src: ax)[0]; |
| 1314 | double myy = mean(src: ay)[0]; |
| 1315 | |
| 1316 | ax = ax - mxx; |
| 1317 | ay = ay - myy; |
| 1318 | |
| 1319 | double scxx = mean(src: abs(m: ax))[0]; |
| 1320 | double scyy = mean(src: abs(m: ay))[0]; |
| 1321 | |
| 1322 | Mat Hnorm (Matx33d( 1/scxx, 0.0, -mxx/scxx, |
| 1323 | 0.0, 1/scyy, -myy/scyy, |
| 1324 | 0.0, 0.0, 1.0 )); |
| 1325 | |
| 1326 | Mat inv_Hnorm (Matx33d( scxx, 0, mxx, |
| 1327 | 0, scyy, myy, |
| 1328 | 0, 0, 1 )); |
| 1329 | Mat mn = Hnorm * m; |
| 1330 | |
| 1331 | Mat L = Mat::zeros(rows: 2*Np, cols: 9, CV_64FC1); |
| 1332 | |
| 1333 | for (int i = 0; i < Np; ++i) |
| 1334 | { |
| 1335 | for (int j = 0; j < 3; j++) |
| 1336 | { |
| 1337 | L.at<double>(i0: 2 * i, i1: j) = M.at<double>(i0: j, i1: i); |
| 1338 | L.at<double>(i0: 2 * i + 1, i1: j + 3) = M.at<double>(i0: j, i1: i); |
| 1339 | L.at<double>(i0: 2 * i, i1: j + 6) = -mn.at<double>(i0: 0,i1: i) * M.at<double>(i0: j, i1: i); |
| 1340 | L.at<double>(i0: 2 * i + 1, i1: j + 6) = -mn.at<double>(i0: 1,i1: i) * M.at<double>(i0: j, i1: i); |
| 1341 | } |
| 1342 | } |
| 1343 | |
| 1344 | if (Np > 4) L = L.t() * L; |
| 1345 | SVD svd(L); |
| 1346 | Mat hh = svd.vt.row(y: 8) / svd.vt.row(y: 8).at<double>(i0: 8); |
| 1347 | Mat Hrem = hh.reshape(cn: 1, rows: 3); |
| 1348 | Mat H = inv_Hnorm * Hrem; |
| 1349 | |
| 1350 | if (Np > 4) |
| 1351 | { |
| 1352 | Mat hhv = H.reshape(cn: 1, rows: 9)(Rect(0, 0, 1, 8)).clone(); |
| 1353 | for (int iter = 0; iter < 10; iter++) |
| 1354 | { |
| 1355 | Mat mrep = H * M; |
| 1356 | Mat J = Mat::zeros(rows: 2 * Np, cols: 8, CV_64FC1); |
| 1357 | Mat MMM; |
| 1358 | divide(src1: M, src2: Mat::ones(rows: 3, cols: 1, CV_64FC1) * mrep(Rect(0, 2, mrep.cols, 1)), dst: MMM); |
| 1359 | divide(src1: mrep, src2: Mat::ones(rows: 3, cols: 1, CV_64FC1) * mrep(Rect(0, 2, mrep.cols, 1)), dst: mrep); |
| 1360 | Mat m_err = m(Rect(0,0, m.cols, 2)) - mrep(Rect(0,0, mrep.cols, 2)); |
| 1361 | m_err = Mat(m_err.t()).reshape(cn: 1, rows: m_err.cols * m_err.rows); |
| 1362 | Mat MMM2, MMM3; |
| 1363 | multiply(src1: Mat::ones(rows: 3, cols: 1, CV_64FC1) * mrep(Rect(0, 0, mrep.cols, 1)), src2: MMM, dst: MMM2); |
| 1364 | multiply(src1: Mat::ones(rows: 3, cols: 1, CV_64FC1) * mrep(Rect(0, 1, mrep.cols, 1)), src2: MMM, dst: MMM3); |
| 1365 | |
| 1366 | for (int i = 0; i < Np; ++i) |
| 1367 | { |
| 1368 | for (int j = 0; j < 3; ++j) |
| 1369 | { |
| 1370 | J.at<double>(i0: 2 * i, i1: j) = -MMM.at<double>(i0: j, i1: i); |
| 1371 | J.at<double>(i0: 2 * i + 1, i1: j + 3) = -MMM.at<double>(i0: j, i1: i); |
| 1372 | } |
| 1373 | |
| 1374 | for (int j = 0; j < 2; ++j) |
| 1375 | { |
| 1376 | J.at<double>(i0: 2 * i, i1: j + 6) = MMM2.at<double>(i0: j, i1: i); |
| 1377 | J.at<double>(i0: 2 * i + 1, i1: j + 6) = MMM3.at<double>(i0: j, i1: i); |
| 1378 | } |
| 1379 | } |
| 1380 | divide(src1: M, src2: Mat::ones(rows: 3, cols: 1, CV_64FC1) * mrep(Rect(0,2,mrep.cols,1)), dst: MMM); |
| 1381 | Mat hh_innov = (J.t() * J).inv() * (J.t()) * m_err; |
| 1382 | Mat hhv_up = hhv - hh_innov; |
| 1383 | Mat tmp; |
| 1384 | vconcat(src1: hhv_up, src2: Mat::ones(rows: 1,cols: 1,CV_64FC1), dst: tmp); |
| 1385 | Mat H_up = tmp.reshape(cn: 1,rows: 3); |
| 1386 | hhv = hhv_up; |
| 1387 | H = H_up; |
| 1388 | } |
| 1389 | } |
| 1390 | return H; |
| 1391 | } |
| 1392 | |
| 1393 | cv::Mat cv::internal::NormalizePixels(const Mat& imagePoints, const IntrinsicParams& param) |
| 1394 | { |
| 1395 | CV_INSTRUMENT_REGION(); |
| 1396 | |
| 1397 | CV_Assert(!imagePoints.empty() && imagePoints.type() == CV_64FC2); |
| 1398 | |
| 1399 | Mat distorted((int)imagePoints.total(), 1, CV_64FC2), undistorted; |
| 1400 | const Vec2d* ptr = imagePoints.ptr<Vec2d>(); |
| 1401 | Vec2d* ptr_d = distorted.ptr<Vec2d>(); |
| 1402 | for (size_t i = 0; i < imagePoints.total(); ++i) |
| 1403 | { |
| 1404 | ptr_d[i] = (ptr[i] - param.c).mul(v: Vec2d(1.0 / param.f[0], 1.0 / param.f[1])); |
| 1405 | ptr_d[i][0] -= param.alpha * ptr_d[i][1]; |
| 1406 | } |
| 1407 | cv::fisheye::undistortPoints(distorted, undistorted, K: Matx33d::eye(), D: param.k); |
| 1408 | return undistorted; |
| 1409 | } |
| 1410 | |
| 1411 | void cv::internal::InitExtrinsics(const Mat& _imagePoints, const Mat& _objectPoints, const IntrinsicParams& param, Mat& omckk, Mat& Tckk) |
| 1412 | { |
| 1413 | CV_Assert(!_objectPoints.empty() && _objectPoints.type() == CV_64FC3); |
| 1414 | CV_Assert(!_imagePoints.empty() && _imagePoints.type() == CV_64FC2); |
| 1415 | |
| 1416 | Mat imagePointsNormalized = NormalizePixels(imagePoints: _imagePoints, param).reshape(cn: 1).t(); |
| 1417 | Mat objectPoints = _objectPoints.reshape(cn: 1).t(); |
| 1418 | Mat objectPointsMean, covObjectPoints; |
| 1419 | Mat Rckk; |
| 1420 | int Np = imagePointsNormalized.cols; |
| 1421 | calcCovarMatrix(samples: objectPoints, covar: covObjectPoints, mean: objectPointsMean, flags: COVAR_NORMAL | COVAR_COLS); |
| 1422 | SVD svd(covObjectPoints); |
| 1423 | Mat R(svd.vt); |
| 1424 | if (norm(src1: R(Rect(2, 0, 1, 2))) < 1e-6) |
| 1425 | R = Mat::eye(rows: 3,cols: 3, CV_64FC1); |
| 1426 | if (determinant(mtx: R) < 0) |
| 1427 | R = -R; |
| 1428 | Mat T = -R * objectPointsMean; |
| 1429 | Mat X_new = R * objectPoints + T * Mat::ones(rows: 1, cols: Np, CV_64FC1); |
| 1430 | Mat H = ComputeHomography(m: imagePointsNormalized, M: X_new(Rect(0,0,X_new.cols,2))); |
| 1431 | double sc = .5 * (norm(src1: H.col(x: 0)) + norm(src1: H.col(x: 1))); |
| 1432 | H = H / sc; |
| 1433 | Mat u1 = H.col(x: 0).clone(); |
| 1434 | double norm_u1 = norm(src1: u1); |
| 1435 | CV_Assert(fabs(norm_u1) > 0); |
| 1436 | u1 = u1 / norm_u1; |
| 1437 | Mat u2 = H.col(x: 1).clone() - u1.dot(m: H.col(x: 1).clone()) * u1; |
| 1438 | double norm_u2 = norm(src1: u2); |
| 1439 | CV_Assert(fabs(norm_u2) > 0); |
| 1440 | u2 = u2 / norm_u2; |
| 1441 | Mat u3 = u1.cross(m: u2); |
| 1442 | Mat RRR; |
| 1443 | hconcat(src1: u1, src2: u2, dst: RRR); |
| 1444 | hconcat(src1: RRR, src2: u3, dst: RRR); |
| 1445 | Rodrigues(src: RRR, dst: omckk); |
| 1446 | Rodrigues(src: omckk, dst: Rckk); |
| 1447 | Tckk = H.col(x: 2).clone(); |
| 1448 | Tckk = Tckk + Rckk * T; |
| 1449 | Rckk = Rckk * R; |
| 1450 | Rodrigues(src: Rckk, dst: omckk); |
| 1451 | } |
| 1452 | |
| 1453 | void cv::internal::CalibrateExtrinsics(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, |
| 1454 | const IntrinsicParams& param, const int check_cond, |
| 1455 | const double thresh_cond, InputOutputArray omc, InputOutputArray Tc) |
| 1456 | { |
| 1457 | CV_Assert(!objectPoints.empty() && (objectPoints.type() == CV_32FC3 || objectPoints.type() == CV_64FC3)); |
| 1458 | CV_Assert(!imagePoints.empty() && (imagePoints.type() == CV_32FC2 || imagePoints.type() == CV_64FC2)); |
| 1459 | CV_Assert(omc.type() == CV_64FC3 || Tc.type() == CV_64FC3); |
| 1460 | |
| 1461 | if (omc.empty()) omc.create(rows: 1, cols: (int)objectPoints.total(), CV_64FC3); |
| 1462 | if (Tc.empty()) Tc.create(rows: 1, cols: (int)objectPoints.total(), CV_64FC3); |
| 1463 | |
| 1464 | const int maxIter = 20; |
| 1465 | |
| 1466 | for(int image_idx = 0; image_idx < (int)imagePoints.total(); ++image_idx) |
| 1467 | { |
| 1468 | Mat omckk, Tckk, JJ_kk; |
| 1469 | Mat image, object; |
| 1470 | |
| 1471 | objectPoints.getMat(i: image_idx).convertTo(m: object, CV_64FC3); |
| 1472 | imagePoints.getMat (i: image_idx).convertTo(m: image, CV_64FC2); |
| 1473 | |
| 1474 | bool imT = image.rows < image.cols; |
| 1475 | bool obT = object.rows < object.cols; |
| 1476 | |
| 1477 | InitExtrinsics(imagePoints: imT ? image.t() : image, objectPoints: obT ? object.t() : object, param, omckk, Tckk); |
| 1478 | |
| 1479 | ComputeExtrinsicRefine(imagePoints: !imT ? image.t() : image, objectPoints: !obT ? object.t() : object, rvec&: omckk, tvec&: Tckk, J&: JJ_kk, MaxIter: maxIter, param, thresh_cond); |
| 1480 | if (check_cond) |
| 1481 | { |
| 1482 | SVD svd(JJ_kk, SVD::NO_UV); |
| 1483 | if(svd.w.at<double>(i0: 0) / svd.w.at<double>(i0: (int)svd.w.total() - 1) > thresh_cond ) |
| 1484 | CV_Error( cv::Error::StsInternal, format("CALIB_CHECK_COND - Ill-conditioned matrix for input array %d" ,image_idx)); |
| 1485 | } |
| 1486 | omckk.reshape(cn: 3,rows: 1).copyTo(m: omc.getMat().col(x: image_idx)); |
| 1487 | Tckk.reshape(cn: 3,rows: 1).copyTo(m: Tc.getMat().col(x: image_idx)); |
| 1488 | } |
| 1489 | } |
| 1490 | |
| 1491 | void cv::internal::ComputeJacobians(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, |
| 1492 | const IntrinsicParams& param, InputArray omc, InputArray Tc, |
| 1493 | const int& check_cond, const double& thresh_cond, Mat& JJ2, Mat& ex3) |
| 1494 | { |
| 1495 | CV_Assert(!objectPoints.empty() && (objectPoints.type() == CV_32FC3 || objectPoints.type() == CV_64FC3)); |
| 1496 | CV_Assert(!imagePoints.empty() && (imagePoints.type() == CV_32FC2 || imagePoints.type() == CV_64FC2)); |
| 1497 | |
| 1498 | CV_Assert(!omc.empty() && omc.type() == CV_64FC3); |
| 1499 | CV_Assert(!Tc.empty() && Tc.type() == CV_64FC3); |
| 1500 | |
| 1501 | int n = (int)objectPoints.total(); |
| 1502 | |
| 1503 | JJ2 = Mat::zeros(rows: 9 + 6 * n, cols: 9 + 6 * n, CV_64FC1); |
| 1504 | ex3 = Mat::zeros(rows: 9 + 6 * n, cols: 1, CV_64FC1 ); |
| 1505 | |
| 1506 | for (int image_idx = 0; image_idx < n; ++image_idx) |
| 1507 | { |
| 1508 | Mat image, object; |
| 1509 | objectPoints.getMat(i: image_idx).convertTo(m: object, CV_64FC3); |
| 1510 | imagePoints.getMat (i: image_idx).convertTo(m: image, CV_64FC2); |
| 1511 | |
| 1512 | bool imT = image.rows < image.cols; |
| 1513 | Mat om(omc.getMat().col(x: image_idx)), T(Tc.getMat().col(x: image_idx)); |
| 1514 | |
| 1515 | std::vector<Point2d> x; |
| 1516 | Mat jacobians; |
| 1517 | projectPoints(objectPoints: object, imagePoints: x, rvec: om, tvec: T, param, jacobian: jacobians); |
| 1518 | Mat exkk = (imT ? image.t() : image) - Mat(x); |
| 1519 | |
| 1520 | Mat A(jacobians.rows, 9, CV_64FC1); |
| 1521 | jacobians.colRange(startcol: 0, endcol: 4).copyTo(m: A.colRange(startcol: 0, endcol: 4)); |
| 1522 | jacobians.col(x: 14).copyTo(m: A.col(x: 4)); |
| 1523 | jacobians.colRange(startcol: 4, endcol: 8).copyTo(m: A.colRange(startcol: 5, endcol: 9)); |
| 1524 | |
| 1525 | A = A.t(); |
| 1526 | |
| 1527 | Mat B = jacobians.colRange(startcol: 8, endcol: 14).clone(); |
| 1528 | B = B.t(); |
| 1529 | |
| 1530 | JJ2(Rect(0, 0, 9, 9)) += A * A.t(); |
| 1531 | JJ2(Rect(9 + 6 * image_idx, 9 + 6 * image_idx, 6, 6)) = B * B.t(); |
| 1532 | |
| 1533 | JJ2(Rect(9 + 6 * image_idx, 0, 6, 9)) = A * B.t(); |
| 1534 | JJ2(Rect(0, 9 + 6 * image_idx, 9, 6)) = JJ2(Rect(9 + 6 * image_idx, 0, 6, 9)).t(); |
| 1535 | |
| 1536 | ex3.rowRange(startrow: 0, endrow: 9) += A * exkk.reshape(cn: 1, rows: 2 * exkk.rows); |
| 1537 | ex3.rowRange(startrow: 9 + 6 * image_idx, endrow: 9 + 6 * (image_idx + 1)) = B * exkk.reshape(cn: 1, rows: 2 * exkk.rows); |
| 1538 | |
| 1539 | if (check_cond) |
| 1540 | { |
| 1541 | Mat JJ_kk = B.t(); |
| 1542 | SVD svd(JJ_kk, SVD::NO_UV); |
| 1543 | CV_Assert(svd.w.at<double>(0) / svd.w.at<double>(svd.w.rows - 1) < thresh_cond); |
| 1544 | } |
| 1545 | } |
| 1546 | |
| 1547 | std::vector<uchar> idxs(param.isEstimate); |
| 1548 | idxs.insert(position: idxs.end(), n: 6 * n, x: 1); |
| 1549 | |
| 1550 | subMatrix(src: JJ2, dst&: JJ2, cols: idxs, rows: idxs); |
| 1551 | subMatrix(src: ex3, dst&: ex3, cols: std::vector<uchar>(1, 1), rows: idxs); |
| 1552 | } |
| 1553 | |
| 1554 | void cv::internal::EstimateUncertainties(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, |
| 1555 | const IntrinsicParams& params, InputArray omc, InputArray Tc, |
| 1556 | IntrinsicParams& errors, Vec2d& std_err, double thresh_cond, int check_cond, double& rms) |
| 1557 | { |
| 1558 | CV_Assert(!objectPoints.empty() && (objectPoints.type() == CV_32FC3 || objectPoints.type() == CV_64FC3)); |
| 1559 | CV_Assert(!imagePoints.empty() && (imagePoints.type() == CV_32FC2 || imagePoints.type() == CV_64FC2)); |
| 1560 | |
| 1561 | CV_Assert(!omc.empty() && omc.type() == CV_64FC3); |
| 1562 | CV_Assert(!Tc.empty() && Tc.type() == CV_64FC3); |
| 1563 | |
| 1564 | int total_ex = 0; |
| 1565 | for (int image_idx = 0; image_idx < (int)objectPoints.total(); ++image_idx) |
| 1566 | { |
| 1567 | total_ex += (int)objectPoints.getMat(i: image_idx).total(); |
| 1568 | } |
| 1569 | Mat ex(total_ex, 1, CV_64FC2); |
| 1570 | int insert_idx = 0; |
| 1571 | for (int image_idx = 0; image_idx < (int)objectPoints.total(); ++image_idx) |
| 1572 | { |
| 1573 | Mat image, object; |
| 1574 | objectPoints.getMat(i: image_idx).convertTo(m: object, CV_64FC3); |
| 1575 | imagePoints.getMat (i: image_idx).convertTo(m: image, CV_64FC2); |
| 1576 | |
| 1577 | bool imT = image.rows < image.cols; |
| 1578 | |
| 1579 | Mat om(omc.getMat().col(x: image_idx)), T(Tc.getMat().col(x: image_idx)); |
| 1580 | |
| 1581 | std::vector<Point2d> x; |
| 1582 | projectPoints(objectPoints: object, imagePoints: x, rvec: om, tvec: T, param: params, jacobian: noArray()); |
| 1583 | Mat ex_ = (imT ? image.t() : image) - Mat(x); |
| 1584 | ex_.copyTo(m: ex.rowRange(startrow: insert_idx, endrow: insert_idx + ex_.rows)); |
| 1585 | insert_idx += ex_.rows; |
| 1586 | } |
| 1587 | |
| 1588 | meanStdDev(src: ex, mean: noArray(), stddev: std_err); |
| 1589 | std_err *= sqrt(x: (double)ex.total()/((double)ex.total() - 1.0)); |
| 1590 | |
| 1591 | Vec<double, 1> sigma_x; |
| 1592 | meanStdDev(src: ex.reshape(cn: 1, rows: 1), mean: noArray(), stddev: sigma_x); |
| 1593 | |
| 1594 | Mat JJ2, ex3; |
| 1595 | ComputeJacobians(objectPoints, imagePoints, param: params, omc, Tc, check_cond, thresh_cond, JJ2, ex3); |
| 1596 | |
| 1597 | sqrt(src: JJ2.inv(), dst: JJ2); |
| 1598 | |
| 1599 | int nParams = JJ2.rows; |
| 1600 | // an explanation of that denominator correction can be found here: |
| 1601 | // R. Hartley, A. Zisserman, Multiple View Geometry in Computer Vision, 2004, section 5.1.3, page 134 |
| 1602 | // see the discussion for more details: https://github.com/opencv/opencv/pull/22992 |
| 1603 | sigma_x *= sqrt(x: 2.0 * (double)ex.total()/(2.0 * (double)ex.total() - nParams)); |
| 1604 | |
| 1605 | errors = 3 * sigma_x(0) * JJ2.diag(); |
| 1606 | rms = sqrt(x: norm(src1: ex, normType: NORM_L2SQR)/ex.total()); |
| 1607 | } |
| 1608 | |
| 1609 | void cv::internal::dAB(InputArray A, InputArray B, OutputArray dABdA, OutputArray dABdB) |
| 1610 | { |
| 1611 | CV_Assert(A.getMat().cols == B.getMat().rows); |
| 1612 | CV_Assert(A.type() == CV_64FC1 && B.type() == CV_64FC1); |
| 1613 | |
| 1614 | int p = A.getMat().rows; |
| 1615 | int n = A.getMat().cols; |
| 1616 | int q = B.getMat().cols; |
| 1617 | |
| 1618 | dABdA.create(rows: p * q, cols: p * n, CV_64FC1); |
| 1619 | dABdB.create(rows: p * q, cols: q * n, CV_64FC1); |
| 1620 | |
| 1621 | dABdA.getMat() = Mat::zeros(rows: p * q, cols: p * n, CV_64FC1); |
| 1622 | dABdB.getMat() = Mat::zeros(rows: p * q, cols: q * n, CV_64FC1); |
| 1623 | |
| 1624 | for (int i = 0; i < q; ++i) |
| 1625 | { |
| 1626 | for (int j = 0; j < p; ++j) |
| 1627 | { |
| 1628 | int ij = j + i * p; |
| 1629 | for (int k = 0; k < n; ++k) |
| 1630 | { |
| 1631 | int kj = j + k * p; |
| 1632 | dABdA.getMat().at<double>(i0: ij, i1: kj) = B.getMat().at<double>(i0: k, i1: i); |
| 1633 | } |
| 1634 | } |
| 1635 | } |
| 1636 | |
| 1637 | for (int i = 0; i < q; ++i) |
| 1638 | { |
| 1639 | A.getMat().copyTo(m: dABdB.getMat().rowRange(startrow: i * p, endrow: i * p + p).colRange(startcol: i * n, endcol: i * n + n)); |
| 1640 | } |
| 1641 | } |
| 1642 | |
| 1643 | void cv::internal::JRodriguesMatlab(const Mat& src, Mat& dst) |
| 1644 | { |
| 1645 | Mat tmp(src.cols, src.rows, src.type()); |
| 1646 | if (src.rows == 9) |
| 1647 | { |
| 1648 | Mat(src.row(y: 0).t()).copyTo(m: tmp.col(x: 0)); |
| 1649 | Mat(src.row(y: 1).t()).copyTo(m: tmp.col(x: 3)); |
| 1650 | Mat(src.row(y: 2).t()).copyTo(m: tmp.col(x: 6)); |
| 1651 | Mat(src.row(y: 3).t()).copyTo(m: tmp.col(x: 1)); |
| 1652 | Mat(src.row(y: 4).t()).copyTo(m: tmp.col(x: 4)); |
| 1653 | Mat(src.row(y: 5).t()).copyTo(m: tmp.col(x: 7)); |
| 1654 | Mat(src.row(y: 6).t()).copyTo(m: tmp.col(x: 2)); |
| 1655 | Mat(src.row(y: 7).t()).copyTo(m: tmp.col(x: 5)); |
| 1656 | Mat(src.row(y: 8).t()).copyTo(m: tmp.col(x: 8)); |
| 1657 | } |
| 1658 | else |
| 1659 | { |
| 1660 | Mat(src.col(x: 0).t()).copyTo(m: tmp.row(y: 0)); |
| 1661 | Mat(src.col(x: 1).t()).copyTo(m: tmp.row(y: 3)); |
| 1662 | Mat(src.col(x: 2).t()).copyTo(m: tmp.row(y: 6)); |
| 1663 | Mat(src.col(x: 3).t()).copyTo(m: tmp.row(y: 1)); |
| 1664 | Mat(src.col(x: 4).t()).copyTo(m: tmp.row(y: 4)); |
| 1665 | Mat(src.col(x: 5).t()).copyTo(m: tmp.row(y: 7)); |
| 1666 | Mat(src.col(x: 6).t()).copyTo(m: tmp.row(y: 2)); |
| 1667 | Mat(src.col(x: 7).t()).copyTo(m: tmp.row(y: 5)); |
| 1668 | Mat(src.col(x: 8).t()).copyTo(m: tmp.row(y: 8)); |
| 1669 | } |
| 1670 | dst = tmp.clone(); |
| 1671 | } |
| 1672 | |
| 1673 | void cv::internal::compose_motion(InputArray _om1, InputArray _T1, InputArray _om2, InputArray _T2, |
| 1674 | Mat& om3, Mat& T3, Mat& dom3dom1, Mat& dom3dT1, Mat& dom3dom2, |
| 1675 | Mat& dom3dT2, Mat& dT3dom1, Mat& dT3dT1, Mat& dT3dom2, Mat& dT3dT2) |
| 1676 | { |
| 1677 | Mat om1 = _om1.getMat(); |
| 1678 | Mat om2 = _om2.getMat(); |
| 1679 | Mat T1 = _T1.getMat().reshape(cn: 1, rows: 3); |
| 1680 | Mat T2 = _T2.getMat().reshape(cn: 1, rows: 3); |
| 1681 | |
| 1682 | //% Rotations: |
| 1683 | Mat R1, R2, R3, dR1dom1(9, 3, CV_64FC1), dR2dom2; |
| 1684 | Rodrigues(src: om1, dst: R1, jacobian: dR1dom1); |
| 1685 | Rodrigues(src: om2, dst: R2, jacobian: dR2dom2); |
| 1686 | JRodriguesMatlab(src: dR1dom1, dst&: dR1dom1); |
| 1687 | JRodriguesMatlab(src: dR2dom2, dst&: dR2dom2); |
| 1688 | R3 = R2 * R1; |
| 1689 | Mat dR3dR2, dR3dR1; |
| 1690 | dAB(A: R2, B: R1, dABdA: dR3dR2, dABdB: dR3dR1); |
| 1691 | Mat dom3dR3; |
| 1692 | Rodrigues(src: R3, dst: om3, jacobian: dom3dR3); |
| 1693 | JRodriguesMatlab(src: dom3dR3, dst&: dom3dR3); |
| 1694 | dom3dom1 = dom3dR3 * dR3dR1 * dR1dom1; |
| 1695 | dom3dom2 = dom3dR3 * dR3dR2 * dR2dom2; |
| 1696 | dom3dT1 = Mat::zeros(rows: 3, cols: 3, CV_64FC1); |
| 1697 | dom3dT2 = Mat::zeros(rows: 3, cols: 3, CV_64FC1); |
| 1698 | |
| 1699 | //% Translations: |
| 1700 | Mat T3t = R2 * T1; |
| 1701 | Mat dT3tdR2, dT3tdT1; |
| 1702 | dAB(A: R2, B: T1, dABdA: dT3tdR2, dABdB: dT3tdT1); |
| 1703 | Mat dT3tdom2 = dT3tdR2 * dR2dom2; |
| 1704 | T3 = T3t + T2; |
| 1705 | dT3dT1 = dT3tdT1; |
| 1706 | dT3dT2 = Mat::eye(rows: 3, cols: 3, CV_64FC1); |
| 1707 | dT3dom2 = dT3tdom2; |
| 1708 | dT3dom1 = Mat::zeros(rows: 3, cols: 3, CV_64FC1); |
| 1709 | } |
| 1710 | |
| 1711 | double cv::internal::median(const Mat& row) |
| 1712 | { |
| 1713 | CV_Assert(row.type() == CV_64FC1); |
| 1714 | CV_Assert(!row.empty() && row.rows == 1); |
| 1715 | Mat tmp = row.clone(); |
| 1716 | sort(src: tmp, dst: tmp, flags: 0); |
| 1717 | if ((int)tmp.total() % 2) return tmp.at<double>(i0: (int)tmp.total() / 2); |
| 1718 | else return 0.5 *(tmp.at<double>(i0: (int)tmp.total() / 2) + tmp.at<double>(i0: (int)tmp.total() / 2 - 1)); |
| 1719 | } |
| 1720 | |
| 1721 | cv::Vec3d cv::internal::median3d(InputArray m) |
| 1722 | { |
| 1723 | CV_Assert(m.depth() == CV_64F && m.getMat().rows == 1); |
| 1724 | Mat M = Mat(m.getMat().t()).reshape(cn: 1).t(); |
| 1725 | return Vec3d(median(row: M.row(y: 0)), median(row: M.row(y: 1)), median(row: M.row(y: 2))); |
| 1726 | } |
| 1727 | |