| 1 | /*M/////////////////////////////////////////////////////////////////////////////////////// |
| 2 | // |
| 3 | // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. |
| 4 | // |
| 5 | // By downloading, copying, installing or using the software you agree to this license. |
| 6 | // If you do not agree to this license, do not download, install, |
| 7 | // copy or use the software. |
| 8 | // |
| 9 | // |
| 10 | // License Agreement |
| 11 | // For Open Source Computer Vision Library |
| 12 | // |
| 13 | // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. |
| 14 | // Copyright (C) 2009, Willow Garage Inc., all rights reserved. |
| 15 | // Third party copyrights are property of their respective owners. |
| 16 | // |
| 17 | // Redistribution and use in source and binary forms, with or without modification, |
| 18 | // are permitted provided that the following conditions are met: |
| 19 | // |
| 20 | // * Redistribution's of source code must retain the above copyright notice, |
| 21 | // this list of conditions and the following disclaimer. |
| 22 | // |
| 23 | // * Redistribution's in binary form must reproduce the above copyright notice, |
| 24 | // this list of conditions and the following disclaimer in the documentation |
| 25 | // and/or other materials provided with the distribution. |
| 26 | // |
| 27 | // * The name of the copyright holders may not be used to endorse or promote products |
| 28 | // derived from this software without specific prior written permission. |
| 29 | // |
| 30 | // This software is provided by the copyright holders and contributors "as is" and |
| 31 | // any express or implied warranties, including, but not limited to, the implied |
| 32 | // warranties of merchantability and fitness for a particular purpose are disclaimed. |
| 33 | // In no event shall the Intel Corporation or contributors be liable for any direct, |
| 34 | // indirect, incidental, special, exemplary, or consequential damages |
| 35 | // (including, but not limited to, procurement of substitute goods or services; |
| 36 | // loss of use, data, or profits; or business interruption) however caused |
| 37 | // and on any theory of liability, whether in contract, strict liability, |
| 38 | // or tort (including negligence or otherwise) arising in any way out of |
| 39 | // the use of this software, even if advised of the possibility of such damage. |
| 40 | // |
| 41 | //M*/ |
| 42 | |
| 43 | #include "opencv2/core/core_c.h" |
| 44 | #include "opencv2/core/types.hpp" |
| 45 | #include "precomp.hpp" |
| 46 | #include "distortion_model.hpp" |
| 47 | |
| 48 | #include "undistort.simd.hpp" |
| 49 | #include "undistort.simd_declarations.hpp" // defines CV_CPU_DISPATCH_MODES_ALL=AVX2,...,BASELINE based on CMakeLists.txt content |
| 50 | |
| 51 | namespace cv |
| 52 | { |
| 53 | |
| 54 | Mat getDefaultNewCameraMatrix( InputArray _cameraMatrix, Size imgsize, |
| 55 | bool centerPrincipalPoint ) |
| 56 | { |
| 57 | Mat cameraMatrix = _cameraMatrix.getMat(); |
| 58 | if( !centerPrincipalPoint && cameraMatrix.type() == CV_64F ) |
| 59 | return cameraMatrix; |
| 60 | |
| 61 | Mat newCameraMatrix; |
| 62 | cameraMatrix.convertTo(m: newCameraMatrix, CV_64F); |
| 63 | if( centerPrincipalPoint ) |
| 64 | { |
| 65 | newCameraMatrix.ptr<double>()[2] = (imgsize.width-1)*0.5; |
| 66 | newCameraMatrix.ptr<double>()[5] = (imgsize.height-1)*0.5; |
| 67 | } |
| 68 | return newCameraMatrix; |
| 69 | } |
| 70 | |
| 71 | namespace { |
| 72 | Ptr<ParallelLoopBody> getInitUndistortRectifyMapComputer(Size _size, Mat &_map1, Mat &_map2, int _m1type, |
| 73 | const double* _ir, Matx33d &_matTilt, |
| 74 | double _u0, double _v0, double _fx, double _fy, |
| 75 | double _k1, double _k2, double _p1, double _p2, |
| 76 | double _k3, double _k4, double _k5, double _k6, |
| 77 | double _s1, double _s2, double _s3, double _s4) |
| 78 | { |
| 79 | CV_INSTRUMENT_REGION(); |
| 80 | |
| 81 | CV_CPU_DISPATCH(getInitUndistortRectifyMapComputer, (_size, _map1, _map2, _m1type, _ir, _matTilt, _u0, _v0, _fx, _fy, _k1, _k2, _p1, _p2, _k3, _k4, _k5, _k6, _s1, _s2, _s3, _s4), |
| 82 | CV_CPU_DISPATCH_MODES_ALL); |
| 83 | } |
| 84 | } |
| 85 | |
| 86 | void initUndistortRectifyMap( InputArray _cameraMatrix, InputArray _distCoeffs, |
| 87 | InputArray _matR, InputArray _newCameraMatrix, |
| 88 | Size size, int m1type, OutputArray _map1, OutputArray _map2 ) |
| 89 | { |
| 90 | Mat cameraMatrix = _cameraMatrix.getMat(), distCoeffs = _distCoeffs.getMat(); |
| 91 | Mat matR = _matR.getMat(), newCameraMatrix = _newCameraMatrix.getMat(); |
| 92 | |
| 93 | if( m1type <= 0 ) |
| 94 | m1type = CV_16SC2; |
| 95 | CV_Assert( m1type == CV_16SC2 || m1type == CV_32FC1 || m1type == CV_32FC2 ); |
| 96 | _map1.create( sz: size, type: m1type ); |
| 97 | Mat map1 = _map1.getMat(), map2; |
| 98 | if( m1type != CV_32FC2 ) |
| 99 | { |
| 100 | _map2.create( sz: size, type: m1type == CV_16SC2 ? CV_16UC1 : CV_32FC1 ); |
| 101 | map2 = _map2.getMat(); |
| 102 | } |
| 103 | else |
| 104 | _map2.release(); |
| 105 | |
| 106 | Mat_<double> R = Mat_<double>::eye(rows: 3, cols: 3); |
| 107 | Mat_<double> A = Mat_<double>(cameraMatrix), Ar; |
| 108 | |
| 109 | if( !newCameraMatrix.empty() ) |
| 110 | Ar = Mat_<double>(newCameraMatrix); |
| 111 | else |
| 112 | Ar = getDefaultNewCameraMatrix( cameraMatrix: A, imgsize: size, centerPrincipalPoint: true ); |
| 113 | |
| 114 | if( !matR.empty() ) |
| 115 | R = Mat_<double>(matR); |
| 116 | |
| 117 | if( !distCoeffs.empty() ) |
| 118 | distCoeffs = Mat_<double>(distCoeffs); |
| 119 | else |
| 120 | { |
| 121 | distCoeffs.create(rows: 14, cols: 1, CV_64F); |
| 122 | distCoeffs = 0.; |
| 123 | } |
| 124 | |
| 125 | CV_Assert( A.size() == Size(3,3) && A.size() == R.size() ); |
| 126 | CV_Assert( Ar.size() == Size(3,3) || Ar.size() == Size(4, 3)); |
| 127 | Mat_<double> iR = (Ar.colRange(startcol: 0,endcol: 3)*R).inv(method: DECOMP_LU); |
| 128 | const double* ir = &iR(0,0); |
| 129 | |
| 130 | double u0 = A(0, 2), v0 = A(1, 2); |
| 131 | double fx = A(0, 0), fy = A(1, 1); |
| 132 | |
| 133 | CV_Assert( distCoeffs.size() == Size(1, 4) || distCoeffs.size() == Size(4, 1) || |
| 134 | distCoeffs.size() == Size(1, 5) || distCoeffs.size() == Size(5, 1) || |
| 135 | distCoeffs.size() == Size(1, 8) || distCoeffs.size() == Size(8, 1) || |
| 136 | distCoeffs.size() == Size(1, 12) || distCoeffs.size() == Size(12, 1) || |
| 137 | distCoeffs.size() == Size(1, 14) || distCoeffs.size() == Size(14, 1)); |
| 138 | |
| 139 | if( distCoeffs.rows != 1 && !distCoeffs.isContinuous() ) |
| 140 | distCoeffs = distCoeffs.t(); |
| 141 | |
| 142 | const double* const distPtr = distCoeffs.ptr<double>(); |
| 143 | double k1 = distPtr[0]; |
| 144 | double k2 = distPtr[1]; |
| 145 | double p1 = distPtr[2]; |
| 146 | double p2 = distPtr[3]; |
| 147 | double k3 = distCoeffs.cols + distCoeffs.rows - 1 >= 5 ? distPtr[4] : 0.; |
| 148 | double k4 = distCoeffs.cols + distCoeffs.rows - 1 >= 8 ? distPtr[5] : 0.; |
| 149 | double k5 = distCoeffs.cols + distCoeffs.rows - 1 >= 8 ? distPtr[6] : 0.; |
| 150 | double k6 = distCoeffs.cols + distCoeffs.rows - 1 >= 8 ? distPtr[7] : 0.; |
| 151 | double s1 = distCoeffs.cols + distCoeffs.rows - 1 >= 12 ? distPtr[8] : 0.; |
| 152 | double s2 = distCoeffs.cols + distCoeffs.rows - 1 >= 12 ? distPtr[9] : 0.; |
| 153 | double s3 = distCoeffs.cols + distCoeffs.rows - 1 >= 12 ? distPtr[10] : 0.; |
| 154 | double s4 = distCoeffs.cols + distCoeffs.rows - 1 >= 12 ? distPtr[11] : 0.; |
| 155 | double tauX = distCoeffs.cols + distCoeffs.rows - 1 >= 14 ? distPtr[12] : 0.; |
| 156 | double tauY = distCoeffs.cols + distCoeffs.rows - 1 >= 14 ? distPtr[13] : 0.; |
| 157 | |
| 158 | // Matrix for trapezoidal distortion of tilted image sensor |
| 159 | Matx33d matTilt = Matx33d::eye(); |
| 160 | detail::computeTiltProjectionMatrix(tauX, tauY, matTilt: &matTilt); |
| 161 | |
| 162 | parallel_for_(range: Range(0, size.height), body: *getInitUndistortRectifyMapComputer( |
| 163 | size: size, map1&: map1, map2&: map2, m1type: m1type, ir: ir, matTilt&: matTilt, u0: u0, v0: v0, |
| 164 | fx: fx, fy: fy, k1: k1, k2: k2, p1: p1, p2: p2, k3: k3, k4: k4, k5: k5, k6: k6, s1: s1, s2: s2, s3: s3, s4: s4)); |
| 165 | } |
| 166 | |
| 167 | void initInverseRectificationMap( InputArray _cameraMatrix, InputArray _distCoeffs, |
| 168 | InputArray _matR, InputArray _newCameraMatrix, |
| 169 | const Size& size, int m1type, OutputArray _map1, OutputArray _map2 ) |
| 170 | { |
| 171 | // Parameters |
| 172 | Mat cameraMatrix = _cameraMatrix.getMat(), distCoeffs = _distCoeffs.getMat(); |
| 173 | Mat matR = _matR.getMat(), newCameraMatrix = _newCameraMatrix.getMat(); |
| 174 | |
| 175 | // Check m1type validity |
| 176 | if( m1type <= 0 ) |
| 177 | m1type = CV_16SC2; |
| 178 | CV_Assert( m1type == CV_16SC2 || m1type == CV_32FC1 || m1type == CV_32FC2 ); |
| 179 | |
| 180 | // Init Maps |
| 181 | _map1.create( sz: size, type: m1type ); |
| 182 | Mat map1 = _map1.getMat(), map2; |
| 183 | if( m1type != CV_32FC2 ) |
| 184 | { |
| 185 | _map2.create( sz: size, type: m1type == CV_16SC2 ? CV_16UC1 : CV_32FC1 ); |
| 186 | map2 = _map2.getMat(); |
| 187 | } |
| 188 | else { |
| 189 | _map2.release(); |
| 190 | } |
| 191 | |
| 192 | // Init camera intrinsics |
| 193 | Mat_<double> A = Mat_<double>(cameraMatrix), Ar; |
| 194 | if( !newCameraMatrix.empty() ) |
| 195 | Ar = Mat_<double>(newCameraMatrix); |
| 196 | else |
| 197 | Ar = getDefaultNewCameraMatrix( cameraMatrix: A, imgsize: size, centerPrincipalPoint: true ); |
| 198 | CV_Assert( A.size() == Size(3,3) ); |
| 199 | CV_Assert( Ar.size() == Size(3,3) || Ar.size() == Size(4, 3)); |
| 200 | |
| 201 | // Init rotation matrix |
| 202 | Mat_<double> R = Mat_<double>::eye(rows: 3, cols: 3); |
| 203 | if( !matR.empty() ) |
| 204 | { |
| 205 | R = Mat_<double>(matR); |
| 206 | //Note, do not inverse |
| 207 | } |
| 208 | CV_Assert( Size(3,3) == R.size() ); |
| 209 | |
| 210 | // Init distortion vector |
| 211 | if( !distCoeffs.empty() ){ |
| 212 | distCoeffs = Mat_<double>(distCoeffs); |
| 213 | |
| 214 | // Fix distortion vector orientation |
| 215 | if( distCoeffs.rows != 1 && !distCoeffs.isContinuous() ) { |
| 216 | distCoeffs = distCoeffs.t(); |
| 217 | } |
| 218 | } |
| 219 | |
| 220 | // Validate distortion vector size |
| 221 | CV_Assert( distCoeffs.empty() || // Empty allows cv::undistortPoints to skip distortion |
| 222 | distCoeffs.size() == Size(1, 4) || distCoeffs.size() == Size(4, 1) || |
| 223 | distCoeffs.size() == Size(1, 5) || distCoeffs.size() == Size(5, 1) || |
| 224 | distCoeffs.size() == Size(1, 8) || distCoeffs.size() == Size(8, 1) || |
| 225 | distCoeffs.size() == Size(1, 12) || distCoeffs.size() == Size(12, 1) || |
| 226 | distCoeffs.size() == Size(1, 14) || distCoeffs.size() == Size(14, 1)); |
| 227 | |
| 228 | // Create objectPoints |
| 229 | std::vector<cv::Point2i> p2i_objPoints; |
| 230 | std::vector<cv::Point2f> p2f_objPoints; |
| 231 | for (int r = 0; r < size.height; r++) |
| 232 | { |
| 233 | for (int c = 0; c < size.width; c++) |
| 234 | { |
| 235 | p2i_objPoints.push_back(x: cv::Point2i(c, r)); |
| 236 | p2f_objPoints.push_back(x: cv::Point2f(static_cast<float>(c), static_cast<float>(r))); |
| 237 | } |
| 238 | } |
| 239 | |
| 240 | // Undistort |
| 241 | std::vector<cv::Point2f> p2f_objPoints_undistorted; |
| 242 | undistortPoints( |
| 243 | src: p2f_objPoints, |
| 244 | dst: p2f_objPoints_undistorted, |
| 245 | cameraMatrix: A, |
| 246 | distCoeffs, |
| 247 | R: cv::Mat::eye(size: cv::Size(3, 3), CV_64FC1), // R |
| 248 | P: cv::Mat::eye(size: cv::Size(3, 3), CV_64FC1) // P = New K |
| 249 | ); |
| 250 | |
| 251 | // Rectify |
| 252 | std::vector<cv::Point2f> p2f_sourcePoints_pinHole; |
| 253 | perspectiveTransform( |
| 254 | src: p2f_objPoints_undistorted, |
| 255 | dst: p2f_sourcePoints_pinHole, |
| 256 | m: R |
| 257 | ); |
| 258 | |
| 259 | // Project points back to camera coordinates. |
| 260 | std::vector<cv::Point2f> p2f_sourcePoints; |
| 261 | undistortPoints( |
| 262 | src: p2f_sourcePoints_pinHole, |
| 263 | dst: p2f_sourcePoints, |
| 264 | cameraMatrix: cv::Mat::eye(size: cv::Size(3, 3), CV_32FC1), // K |
| 265 | distCoeffs: cv::Mat::zeros(size: cv::Size(1, 4), CV_32FC1), // Distortion |
| 266 | R: cv::Mat::eye(size: cv::Size(3, 3), CV_32FC1), // R |
| 267 | P: Ar // New K |
| 268 | ); |
| 269 | |
| 270 | // Copy to map |
| 271 | if (m1type == CV_16SC2) { |
| 272 | for (size_t i=0; i < p2i_objPoints.size(); i++) { |
| 273 | map1.at<Vec2s>(i0: p2i_objPoints[i].y, i1: p2i_objPoints[i].x) = Vec2s(saturate_cast<short>(v: p2f_sourcePoints[i].x), saturate_cast<short>(v: p2f_sourcePoints[i].y)); |
| 274 | } |
| 275 | } else if (m1type == CV_32FC2) { |
| 276 | for (size_t i=0; i < p2i_objPoints.size(); i++) { |
| 277 | map1.at<Vec2f>(i0: p2i_objPoints[i].y, i1: p2i_objPoints[i].x) = Vec2f(p2f_sourcePoints[i]); |
| 278 | } |
| 279 | } else { // m1type == CV_32FC1 |
| 280 | for (size_t i=0; i < p2i_objPoints.size(); i++) { |
| 281 | map1.at<float>(i0: p2i_objPoints[i].y, i1: p2i_objPoints[i].x) = p2f_sourcePoints[i].x; |
| 282 | map2.at<float>(i0: p2i_objPoints[i].y, i1: p2i_objPoints[i].x) = p2f_sourcePoints[i].y; |
| 283 | } |
| 284 | } |
| 285 | } |
| 286 | |
| 287 | void undistort( InputArray _src, OutputArray _dst, InputArray _cameraMatrix, |
| 288 | InputArray _distCoeffs, InputArray _newCameraMatrix ) |
| 289 | { |
| 290 | CV_INSTRUMENT_REGION(); |
| 291 | |
| 292 | Mat src = _src.getMat(), cameraMatrix = _cameraMatrix.getMat(); |
| 293 | Mat distCoeffs = _distCoeffs.getMat(), newCameraMatrix = _newCameraMatrix.getMat(); |
| 294 | |
| 295 | _dst.create( sz: src.size(), type: src.type() ); |
| 296 | Mat dst = _dst.getMat(); |
| 297 | |
| 298 | CV_Assert( dst.data != src.data ); |
| 299 | |
| 300 | int stripe_size0 = std::min(a: std::max(a: 1, b: (1 << 12) / std::max(a: src.cols, b: 1)), b: src.rows); |
| 301 | Mat map1(stripe_size0, src.cols, CV_16SC2), map2(stripe_size0, src.cols, CV_16UC1); |
| 302 | |
| 303 | Mat_<double> A, Ar, I = Mat_<double>::eye(rows: 3,cols: 3); |
| 304 | |
| 305 | cameraMatrix.convertTo(m: A, CV_64F); |
| 306 | if( !distCoeffs.empty() ) |
| 307 | distCoeffs = Mat_<double>(distCoeffs); |
| 308 | else |
| 309 | { |
| 310 | distCoeffs.create(rows: 5, cols: 1, CV_64F); |
| 311 | distCoeffs = 0.; |
| 312 | } |
| 313 | |
| 314 | if( !newCameraMatrix.empty() ) |
| 315 | newCameraMatrix.convertTo(m: Ar, CV_64F); |
| 316 | else |
| 317 | A.copyTo(m: Ar); |
| 318 | |
| 319 | double v0 = Ar(1, 2); |
| 320 | for( int y = 0; y < src.rows; y += stripe_size0 ) |
| 321 | { |
| 322 | int stripe_size = std::min( a: stripe_size0, b: src.rows - y ); |
| 323 | Ar(1, 2) = v0 - y; |
| 324 | Mat map1_part = map1.rowRange(startrow: 0, endrow: stripe_size), |
| 325 | map2_part = map2.rowRange(startrow: 0, endrow: stripe_size), |
| 326 | dst_part = dst.rowRange(startrow: y, endrow: y + stripe_size); |
| 327 | |
| 328 | initUndistortRectifyMap( cameraMatrix: A, distCoeffs: distCoeffs, matR: I, newCameraMatrix: Ar, size: Size(src.cols, stripe_size), |
| 329 | m1type: map1_part.type(), map1: map1_part, map2: map2_part ); |
| 330 | remap( src, dst: dst_part, map1: map1_part, map2: map2_part, interpolation: INTER_LINEAR, borderMode: BORDER_CONSTANT ); |
| 331 | } |
| 332 | } |
| 333 | |
| 334 | } |
| 335 | |
| 336 | static void cvUndistortPointsInternal( const CvMat* _src, CvMat* _dst, const CvMat* _cameraMatrix, |
| 337 | const CvMat* _distCoeffs, |
| 338 | const CvMat* matR, const CvMat* matP, cv::TermCriteria criteria) |
| 339 | { |
| 340 | CV_Assert(criteria.isValid()); |
| 341 | double A[3][3], RR[3][3], k[14]={0,0,0,0,0,0,0,0,0,0,0,0,0,0}; |
| 342 | CvMat matA=cvMat(rows: 3, cols: 3, CV_64F, data: A), _Dk; |
| 343 | CvMat _RR=cvMat(rows: 3, cols: 3, CV_64F, data: RR); |
| 344 | cv::Matx33d invMatTilt = cv::Matx33d::eye(); |
| 345 | cv::Matx33d matTilt = cv::Matx33d::eye(); |
| 346 | |
| 347 | CV_Assert( CV_IS_MAT(_src) && CV_IS_MAT(_dst) && |
| 348 | (_src->rows == 1 || _src->cols == 1) && |
| 349 | (_dst->rows == 1 || _dst->cols == 1) && |
| 350 | _src->cols + _src->rows - 1 == _dst->rows + _dst->cols - 1 && |
| 351 | (CV_MAT_TYPE(_src->type) == CV_32FC2 || CV_MAT_TYPE(_src->type) == CV_64FC2) && |
| 352 | (CV_MAT_TYPE(_dst->type) == CV_32FC2 || CV_MAT_TYPE(_dst->type) == CV_64FC2)); |
| 353 | |
| 354 | CV_Assert( CV_IS_MAT(_cameraMatrix) && |
| 355 | _cameraMatrix->rows == 3 && _cameraMatrix->cols == 3 ); |
| 356 | |
| 357 | cvConvert( _cameraMatrix, &matA ); |
| 358 | |
| 359 | |
| 360 | if( _distCoeffs ) |
| 361 | { |
| 362 | CV_Assert( CV_IS_MAT(_distCoeffs) && |
| 363 | (_distCoeffs->rows == 1 || _distCoeffs->cols == 1) && |
| 364 | (_distCoeffs->rows*_distCoeffs->cols == 4 || |
| 365 | _distCoeffs->rows*_distCoeffs->cols == 5 || |
| 366 | _distCoeffs->rows*_distCoeffs->cols == 8 || |
| 367 | _distCoeffs->rows*_distCoeffs->cols == 12 || |
| 368 | _distCoeffs->rows*_distCoeffs->cols == 14)); |
| 369 | |
| 370 | _Dk = cvMat( rows: _distCoeffs->rows, cols: _distCoeffs->cols, |
| 371 | CV_MAKETYPE(CV_64F,CV_MAT_CN(_distCoeffs->type)), data: k); |
| 372 | |
| 373 | cvConvert( _distCoeffs, &_Dk ); |
| 374 | if (k[12] != 0 || k[13] != 0) |
| 375 | { |
| 376 | cv::detail::computeTiltProjectionMatrix<double>(tauX: k[12], tauY: k[13], NULL, NULL, NULL, invMatTilt: &invMatTilt); |
| 377 | cv::detail::computeTiltProjectionMatrix<double>(tauX: k[12], tauY: k[13], matTilt: &matTilt, NULL, NULL); |
| 378 | } |
| 379 | } |
| 380 | |
| 381 | if( matR ) |
| 382 | { |
| 383 | CV_Assert( CV_IS_MAT(matR) && matR->rows == 3 && matR->cols == 3 ); |
| 384 | cvConvert( matR, &_RR ); |
| 385 | } |
| 386 | else |
| 387 | cvSetIdentity(mat: &_RR); |
| 388 | |
| 389 | if( matP ) |
| 390 | { |
| 391 | double PP[3][3]; |
| 392 | CvMat _P3x3, _PP=cvMat(rows: 3, cols: 3, CV_64F, data: PP); |
| 393 | CV_Assert( CV_IS_MAT(matP) && matP->rows == 3 && (matP->cols == 3 || matP->cols == 4)); |
| 394 | cvConvert( cvGetCols(matP, &_P3x3, 0, 3), &_PP ); |
| 395 | cvMatMul( &_PP, &_RR, &_RR ); |
| 396 | } |
| 397 | |
| 398 | const CvPoint2D32f* srcf = (const CvPoint2D32f*)_src->data.ptr; |
| 399 | const CvPoint2D64f* srcd = (const CvPoint2D64f*)_src->data.ptr; |
| 400 | CvPoint2D32f* dstf = (CvPoint2D32f*)_dst->data.ptr; |
| 401 | CvPoint2D64f* dstd = (CvPoint2D64f*)_dst->data.ptr; |
| 402 | int stype = CV_MAT_TYPE(_src->type); |
| 403 | int dtype = CV_MAT_TYPE(_dst->type); |
| 404 | int sstep = _src->rows == 1 ? 1 : _src->step/CV_ELEM_SIZE(stype); |
| 405 | int dstep = _dst->rows == 1 ? 1 : _dst->step/CV_ELEM_SIZE(dtype); |
| 406 | |
| 407 | double fx = A[0][0]; |
| 408 | double fy = A[1][1]; |
| 409 | double ifx = 1./fx; |
| 410 | double ify = 1./fy; |
| 411 | double cx = A[0][2]; |
| 412 | double cy = A[1][2]; |
| 413 | |
| 414 | int n = _src->rows + _src->cols - 1; |
| 415 | for( int i = 0; i < n; i++ ) |
| 416 | { |
| 417 | double x, y, x0 = 0, y0 = 0, u, v; |
| 418 | if( stype == CV_32FC2 ) |
| 419 | { |
| 420 | x = srcf[i*sstep].x; |
| 421 | y = srcf[i*sstep].y; |
| 422 | } |
| 423 | else |
| 424 | { |
| 425 | x = srcd[i*sstep].x; |
| 426 | y = srcd[i*sstep].y; |
| 427 | } |
| 428 | u = x; v = y; |
| 429 | x = (x - cx)*ifx; |
| 430 | y = (y - cy)*ify; |
| 431 | |
| 432 | if( _distCoeffs ) { |
| 433 | // compensate tilt distortion |
| 434 | cv::Vec3d vecUntilt = invMatTilt * cv::Vec3d(x, y, 1); |
| 435 | double invProj = vecUntilt(2) ? 1./vecUntilt(2) : 1; |
| 436 | x0 = x = invProj * vecUntilt(0); |
| 437 | y0 = y = invProj * vecUntilt(1); |
| 438 | |
| 439 | double error = std::numeric_limits<double>::max(); |
| 440 | // compensate distortion iteratively |
| 441 | |
| 442 | for( int j = 0; ; j++ ) |
| 443 | { |
| 444 | if ((criteria.type & cv::TermCriteria::COUNT) && j >= criteria.maxCount) |
| 445 | break; |
| 446 | if ((criteria.type & cv::TermCriteria::EPS) && error < criteria.epsilon) |
| 447 | break; |
| 448 | double r2 = x*x + y*y; |
| 449 | double icdist = (1 + ((k[7]*r2 + k[6])*r2 + k[5])*r2)/(1 + ((k[4]*r2 + k[1])*r2 + k[0])*r2); |
| 450 | if (icdist < 0) // test: undistortPoints.regression_14583 |
| 451 | { |
| 452 | x = (u - cx)*ifx; |
| 453 | y = (v - cy)*ify; |
| 454 | break; |
| 455 | } |
| 456 | double deltaX = 2*k[2]*x*y + k[3]*(r2 + 2*x*x)+ k[8]*r2+k[9]*r2*r2; |
| 457 | double deltaY = k[2]*(r2 + 2*y*y) + 2*k[3]*x*y+ k[10]*r2+k[11]*r2*r2; |
| 458 | x = (x0 - deltaX)*icdist; |
| 459 | y = (y0 - deltaY)*icdist; |
| 460 | |
| 461 | if(criteria.type & cv::TermCriteria::EPS) |
| 462 | { |
| 463 | double r4, r6, a1, a2, a3, cdist, icdist2; |
| 464 | double xd, yd, xd0, yd0; |
| 465 | cv::Vec3d vecTilt; |
| 466 | |
| 467 | r2 = x*x + y*y; |
| 468 | r4 = r2*r2; |
| 469 | r6 = r4*r2; |
| 470 | a1 = 2*x*y; |
| 471 | a2 = r2 + 2*x*x; |
| 472 | a3 = r2 + 2*y*y; |
| 473 | cdist = 1 + k[0]*r2 + k[1]*r4 + k[4]*r6; |
| 474 | icdist2 = 1./(1 + k[5]*r2 + k[6]*r4 + k[7]*r6); |
| 475 | xd0 = x*cdist*icdist2 + k[2]*a1 + k[3]*a2 + k[8]*r2+k[9]*r4; |
| 476 | yd0 = y*cdist*icdist2 + k[2]*a3 + k[3]*a1 + k[10]*r2+k[11]*r4; |
| 477 | |
| 478 | vecTilt = matTilt*cv::Vec3d(xd0, yd0, 1); |
| 479 | invProj = vecTilt(2) ? 1./vecTilt(2) : 1; |
| 480 | xd = invProj * vecTilt(0); |
| 481 | yd = invProj * vecTilt(1); |
| 482 | |
| 483 | double x_proj = xd*fx + cx; |
| 484 | double y_proj = yd*fy + cy; |
| 485 | |
| 486 | error = sqrt( x: pow(x: x_proj - u, y: 2) + pow(x: y_proj - v, y: 2) ); |
| 487 | } |
| 488 | } |
| 489 | } |
| 490 | |
| 491 | if( matR || matP ) |
| 492 | { |
| 493 | double xx = RR[0][0]*x + RR[0][1]*y + RR[0][2]; |
| 494 | double yy = RR[1][0]*x + RR[1][1]*y + RR[1][2]; |
| 495 | double ww = 1./(RR[2][0]*x + RR[2][1]*y + RR[2][2]); |
| 496 | x = xx*ww; |
| 497 | y = yy*ww; |
| 498 | } |
| 499 | |
| 500 | if( dtype == CV_32FC2 ) |
| 501 | { |
| 502 | dstf[i*dstep].x = (float)x; |
| 503 | dstf[i*dstep].y = (float)y; |
| 504 | } |
| 505 | else |
| 506 | { |
| 507 | dstd[i*dstep].x = x; |
| 508 | dstd[i*dstep].y = y; |
| 509 | } |
| 510 | } |
| 511 | } |
| 512 | |
| 513 | namespace cv { |
| 514 | |
| 515 | void undistortPoints(InputArray _src, OutputArray _dst, |
| 516 | InputArray _cameraMatrix, |
| 517 | InputArray _distCoeffs, |
| 518 | InputArray _Rmat, |
| 519 | InputArray _Pmat) |
| 520 | { |
| 521 | undistortPoints(src: _src, dst: _dst, cameraMatrix: _cameraMatrix, distCoeffs: _distCoeffs, R: _Rmat, P: _Pmat, criteria: TermCriteria(TermCriteria::MAX_ITER, 5, 0.01)); |
| 522 | } |
| 523 | |
| 524 | void undistortPoints(InputArray _src, OutputArray _dst, |
| 525 | InputArray _cameraMatrix, |
| 526 | InputArray _distCoeffs, |
| 527 | InputArray _Rmat, |
| 528 | InputArray _Pmat, |
| 529 | TermCriteria criteria) |
| 530 | { |
| 531 | Mat src = _src.getMat(), cameraMatrix = _cameraMatrix.getMat(); |
| 532 | Mat distCoeffs = _distCoeffs.getMat(), R = _Rmat.getMat(), P = _Pmat.getMat(); |
| 533 | |
| 534 | int npoints = src.checkVector(elemChannels: 2), depth = src.depth(); |
| 535 | if (npoints < 0) |
| 536 | src = src.t(); |
| 537 | npoints = src.checkVector(elemChannels: 2); |
| 538 | CV_Assert(npoints >= 0 && src.isContinuous() && (depth == CV_32F || depth == CV_64F)); |
| 539 | |
| 540 | if (src.cols == 2) |
| 541 | src = src.reshape(cn: 2); |
| 542 | |
| 543 | _dst.create(rows: npoints, cols: 1, CV_MAKETYPE(depth, 2), i: -1, allowTransposed: true); |
| 544 | Mat dst = _dst.getMat(); |
| 545 | |
| 546 | CvMat _csrc = cvMat(m: src), _cdst = cvMat(m: dst), _ccameraMatrix = cvMat(m: cameraMatrix); |
| 547 | CvMat matR, matP, _cdistCoeffs, *pR=0, *pP=0, *pD=0; |
| 548 | if( !R.empty() ) |
| 549 | pR = &(matR = cvMat(m: R)); |
| 550 | if( !P.empty() ) |
| 551 | pP = &(matP = cvMat(m: P)); |
| 552 | if( !distCoeffs.empty() ) |
| 553 | pD = &(_cdistCoeffs = cvMat(m: distCoeffs)); |
| 554 | cvUndistortPointsInternal(src: &_csrc, dst: &_cdst, cameraMatrix: &_ccameraMatrix, distCoeffs: pD, matR: pR, matP: pP, criteria); |
| 555 | } |
| 556 | |
| 557 | void undistortImagePoints(InputArray src, OutputArray dst, InputArray cameraMatrix, InputArray distCoeffs, TermCriteria termCriteria) |
| 558 | { |
| 559 | undistortPoints(src: src, dst: dst, cameraMatrix: cameraMatrix, distCoeffs: distCoeffs, Rmat: noArray(), Pmat: cameraMatrix, criteria: termCriteria); |
| 560 | } |
| 561 | |
| 562 | static Point2f mapPointSpherical(const Point2f& p, float alpha, Vec4d* J, enum UndistortTypes projType) |
| 563 | { |
| 564 | double x = p.x, y = p.y; |
| 565 | double beta = 1 + 2*alpha; |
| 566 | double v = x*x + y*y + 1, iv = 1/v; |
| 567 | double u = std::sqrt(x: beta*v + alpha*alpha); |
| 568 | |
| 569 | double k = (u - alpha)*iv; |
| 570 | double kv = (v*beta/u - (u - alpha)*2)*iv*iv; |
| 571 | double kx = kv*x, ky = kv*y; |
| 572 | |
| 573 | if( projType == PROJ_SPHERICAL_ORTHO ) |
| 574 | { |
| 575 | if(J) |
| 576 | *J = Vec4d(kx*x + k, kx*y, ky*x, ky*y + k); |
| 577 | return Point2f((float)(x*k), (float)(y*k)); |
| 578 | } |
| 579 | if( projType == PROJ_SPHERICAL_EQRECT ) |
| 580 | { |
| 581 | // equirectangular |
| 582 | double iR = 1/(alpha + 1); |
| 583 | double x1 = std::max(a: std::min(a: x*k*iR, b: 1.), b: -1.); |
| 584 | double y1 = std::max(a: std::min(a: y*k*iR, b: 1.), b: -1.); |
| 585 | |
| 586 | if(J) |
| 587 | { |
| 588 | double fx1 = iR/std::sqrt(x: 1 - x1*x1); |
| 589 | double fy1 = iR/std::sqrt(x: 1 - y1*y1); |
| 590 | *J = Vec4d(fx1*(kx*x + k), fx1*ky*x, fy1*kx*y, fy1*(ky*y + k)); |
| 591 | } |
| 592 | return Point2f((float)asin(x: x1), (float)asin(x: y1)); |
| 593 | } |
| 594 | CV_Error(Error::StsBadArg, "Unknown projection type" ); |
| 595 | } |
| 596 | |
| 597 | |
| 598 | static Point2f invMapPointSpherical(Point2f _p, float alpha, enum UndistortTypes projType) |
| 599 | { |
| 600 | double eps = 1e-12; |
| 601 | Vec2d p(_p.x, _p.y), q(_p.x, _p.y), err; |
| 602 | Vec4d J; |
| 603 | int i, maxiter = 5; |
| 604 | |
| 605 | for( i = 0; i < maxiter; i++ ) |
| 606 | { |
| 607 | Point2f p1 = mapPointSpherical(p: Point2f((float)q[0], (float)q[1]), alpha, J: &J, projType); |
| 608 | err = Vec2d(p1.x, p1.y) - p; |
| 609 | if( err[0]*err[0] + err[1]*err[1] < eps ) |
| 610 | break; |
| 611 | |
| 612 | Vec4d JtJ(J[0]*J[0] + J[2]*J[2], J[0]*J[1] + J[2]*J[3], |
| 613 | J[0]*J[1] + J[2]*J[3], J[1]*J[1] + J[3]*J[3]); |
| 614 | double d = JtJ[0]*JtJ[3] - JtJ[1]*JtJ[2]; |
| 615 | d = d ? 1./d : 0; |
| 616 | Vec4d iJtJ(JtJ[3]*d, -JtJ[1]*d, -JtJ[2]*d, JtJ[0]*d); |
| 617 | Vec2d JtErr(J[0]*err[0] + J[2]*err[1], J[1]*err[0] + J[3]*err[1]); |
| 618 | |
| 619 | q -= Vec2d(iJtJ[0]*JtErr[0] + iJtJ[1]*JtErr[1], iJtJ[2]*JtErr[0] + iJtJ[3]*JtErr[1]); |
| 620 | //Matx22d J(kx*x + k, kx*y, ky*x, ky*y + k); |
| 621 | //q -= Vec2d((J.t()*J).inv()*(J.t()*err)); |
| 622 | } |
| 623 | |
| 624 | return i < maxiter ? Point2f((float)q[0], (float)q[1]) : Point2f(-FLT_MAX, -FLT_MAX); |
| 625 | } |
| 626 | |
| 627 | float initWideAngleProjMap(InputArray _cameraMatrix0, InputArray _distCoeffs0, |
| 628 | Size imageSize, int destImageWidth, int m1type, |
| 629 | OutputArray _map1, OutputArray _map2, |
| 630 | enum UndistortTypes projType, double _alpha) |
| 631 | { |
| 632 | Mat cameraMatrix0 = _cameraMatrix0.getMat(), distCoeffs0 = _distCoeffs0.getMat(); |
| 633 | double k[14] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0}, M[9]={0,0,0,0,0,0,0,0,0}; |
| 634 | Mat distCoeffs(distCoeffs0.rows, distCoeffs0.cols, CV_MAKETYPE(CV_64F,distCoeffs0.channels()), k); |
| 635 | Mat cameraMatrix(3,3,CV_64F,M); |
| 636 | Point2f scenter((float)cameraMatrix.at<double>(i0: 0,i1: 2), (float)cameraMatrix.at<double>(i0: 1,i1: 2)); |
| 637 | Point2f dcenter((destImageWidth-1)*0.5f, 0.f); |
| 638 | float xmin = FLT_MAX, xmax = -FLT_MAX, ymin = FLT_MAX, ymax = -FLT_MAX; |
| 639 | int N = 9; |
| 640 | std::vector<Point2f> uvec(1), vvec(1); |
| 641 | Mat I = Mat::eye(rows: 3,cols: 3,CV_64F); |
| 642 | float alpha = (float)_alpha; |
| 643 | |
| 644 | int ndcoeffs = distCoeffs0.cols*distCoeffs0.rows*distCoeffs0.channels(); |
| 645 | CV_Assert((distCoeffs0.cols == 1 || distCoeffs0.rows == 1) && |
| 646 | (ndcoeffs == 4 || ndcoeffs == 5 || ndcoeffs == 8 || ndcoeffs == 12 || ndcoeffs == 14)); |
| 647 | CV_Assert(cameraMatrix0.size() == Size(3,3)); |
| 648 | distCoeffs0.convertTo(m: distCoeffs,CV_64F); |
| 649 | cameraMatrix0.convertTo(m: cameraMatrix,CV_64F); |
| 650 | |
| 651 | alpha = std::min(a: alpha, b: 0.999f); |
| 652 | |
| 653 | for( int i = 0; i < N; i++ ) |
| 654 | for( int j = 0; j < N; j++ ) |
| 655 | { |
| 656 | Point2f p((float)j*imageSize.width/(N-1), (float)i*imageSize.height/(N-1)); |
| 657 | uvec[0] = p; |
| 658 | undistortPoints(src: uvec, dst: vvec, cameraMatrix: cameraMatrix, distCoeffs: distCoeffs, Rmat: I, Pmat: I); |
| 659 | Point2f q = mapPointSpherical(p: vvec[0], alpha, J: 0, projType); |
| 660 | if( xmin > q.x ) xmin = q.x; |
| 661 | if( xmax < q.x ) xmax = q.x; |
| 662 | if( ymin > q.y ) ymin = q.y; |
| 663 | if( ymax < q.y ) ymax = q.y; |
| 664 | } |
| 665 | |
| 666 | float scale = (float)std::min(a: dcenter.x/fabs(x: xmax), b: dcenter.x/fabs(x: xmin)); |
| 667 | Size dsize(destImageWidth, cvCeil(value: std::max(a: scale*fabs(x: ymin)*2, b: scale*fabs(x: ymax)*2))); |
| 668 | dcenter.y = (dsize.height - 1)*0.5f; |
| 669 | |
| 670 | Mat mapxy(dsize, CV_32FC2); |
| 671 | double k1 = k[0], k2 = k[1], k3 = k[2], p1 = k[3], p2 = k[4], k4 = k[5], k5 = k[6], k6 = k[7], s1 = k[8], s2 = k[9], s3 = k[10], s4 = k[11]; |
| 672 | double fx = cameraMatrix.at<double>(i0: 0,i1: 0), fy = cameraMatrix.at<double>(i0: 1,i1: 1), cx = scenter.x, cy = scenter.y; |
| 673 | cv::Matx33d matTilt; |
| 674 | cv::detail::computeTiltProjectionMatrix(tauX: k[12], tauY: k[13], matTilt: &matTilt); |
| 675 | |
| 676 | for( int y = 0; y < dsize.height; y++ ) |
| 677 | { |
| 678 | Point2f* mxy = mapxy.ptr<Point2f>(y); |
| 679 | for( int x = 0; x < dsize.width; x++ ) |
| 680 | { |
| 681 | Point2f p = (Point2f((float)x, (float)y) - dcenter)*(1.f/scale); |
| 682 | Point2f q = invMapPointSpherical(p: p, alpha, projType); |
| 683 | if( q.x <= -FLT_MAX && q.y <= -FLT_MAX ) |
| 684 | { |
| 685 | mxy[x] = Point2f(-1.f, -1.f); |
| 686 | continue; |
| 687 | } |
| 688 | double x2 = q.x*q.x, y2 = q.y*q.y; |
| 689 | double r2 = x2 + y2, _2xy = 2*q.x*q.y; |
| 690 | double kr = 1 + ((k3*r2 + k2)*r2 + k1)*r2/(1 + ((k6*r2 + k5)*r2 + k4)*r2); |
| 691 | double xd = (q.x*kr + p1*_2xy + p2*(r2 + 2*x2) + s1*r2+ s2*r2*r2); |
| 692 | double yd = (q.y*kr + p1*(r2 + 2*y2) + p2*_2xy + s3*r2+ s4*r2*r2); |
| 693 | cv::Vec3d vecTilt = matTilt*cv::Vec3d(xd, yd, 1); |
| 694 | double invProj = vecTilt(2) ? 1./vecTilt(2) : 1; |
| 695 | double u = fx*invProj*vecTilt(0) + cx; |
| 696 | double v = fy*invProj*vecTilt(1) + cy; |
| 697 | |
| 698 | mxy[x] = Point2f((float)u, (float)v); |
| 699 | } |
| 700 | } |
| 701 | |
| 702 | if(m1type == CV_32FC2) |
| 703 | { |
| 704 | _map1.create(sz: mapxy.size(), type: mapxy.type()); |
| 705 | Mat map1 = _map1.getMat(); |
| 706 | mapxy.copyTo(m: map1); |
| 707 | _map2.release(); |
| 708 | } |
| 709 | else |
| 710 | convertMaps(map1: mapxy, map2: Mat(), dstmap1: _map1, dstmap2: _map2, dstmap1type: m1type, nninterpolation: false); |
| 711 | |
| 712 | return scale; |
| 713 | } |
| 714 | |
| 715 | } // namespace |
| 716 | /* End of file */ |
| 717 | |