1// This file is part of OpenCV project.
2// It is subject to the license terms in the LICENSE file found in the top-level directory
3// of this distribution and at http://opencv.org/license.html
4
5#include "../precomp.hpp"
6#include <opencv2/calib3d.hpp>
7
8#include "opencv2/objdetect/aruco_detector.hpp"
9#include "opencv2/objdetect/aruco_board.hpp"
10#include "apriltag/apriltag_quad_thresh.hpp"
11#include "aruco_utils.hpp"
12#include <cmath>
13#include <map>
14
15namespace cv {
16namespace aruco {
17
18using namespace std;
19
20static inline bool readWrite(DetectorParameters &params, const FileNode* readNode,
21 FileStorage* writeStorage = nullptr)
22{
23 CV_Assert(readNode || writeStorage);
24 bool check = false;
25
26 check |= readWriteParameter(name: "adaptiveThreshWinSizeMin", parameter&: params.adaptiveThreshWinSizeMin, readNode, writeStorage);
27 check |= readWriteParameter(name: "adaptiveThreshWinSizeMax", parameter&: params.adaptiveThreshWinSizeMax, readNode, writeStorage);
28 check |= readWriteParameter(name: "adaptiveThreshWinSizeStep", parameter&: params.adaptiveThreshWinSizeStep, readNode, writeStorage);
29 check |= readWriteParameter(name: "adaptiveThreshConstant", parameter&: params.adaptiveThreshConstant, readNode, writeStorage);
30 check |= readWriteParameter(name: "minMarkerPerimeterRate", parameter&: params.minMarkerPerimeterRate, readNode, writeStorage);
31 check |= readWriteParameter(name: "maxMarkerPerimeterRate", parameter&: params.maxMarkerPerimeterRate, readNode, writeStorage);
32 check |= readWriteParameter(name: "polygonalApproxAccuracyRate", parameter&: params.polygonalApproxAccuracyRate,
33 readNode, writeStorage);
34 check |= readWriteParameter(name: "minCornerDistanceRate", parameter&: params.minCornerDistanceRate, readNode, writeStorage);
35 check |= readWriteParameter(name: "minDistanceToBorder", parameter&: params.minDistanceToBorder, readNode, writeStorage);
36 check |= readWriteParameter(name: "minMarkerDistanceRate", parameter&: params.minMarkerDistanceRate, readNode, writeStorage);
37 check |= readWriteParameter(name: "cornerRefinementMethod", parameter&: params.cornerRefinementMethod, readNode, writeStorage);
38 check |= readWriteParameter(name: "cornerRefinementWinSize", parameter&: params.cornerRefinementWinSize, readNode, writeStorage);
39 check |= readWriteParameter(name: "relativeCornerRefinmentWinSize", parameter&: params.relativeCornerRefinmentWinSize, readNode,
40 writeStorage);
41 check |= readWriteParameter(name: "cornerRefinementMaxIterations", parameter&: params.cornerRefinementMaxIterations,
42 readNode, writeStorage);
43 check |= readWriteParameter(name: "cornerRefinementMinAccuracy", parameter&: params.cornerRefinementMinAccuracy,
44 readNode, writeStorage);
45 check |= readWriteParameter(name: "markerBorderBits", parameter&: params.markerBorderBits, readNode, writeStorage);
46 check |= readWriteParameter(name: "perspectiveRemovePixelPerCell", parameter&: params.perspectiveRemovePixelPerCell,
47 readNode, writeStorage);
48 check |= readWriteParameter(name: "perspectiveRemoveIgnoredMarginPerCell", parameter&: params.perspectiveRemoveIgnoredMarginPerCell,
49 readNode, writeStorage);
50 check |= readWriteParameter(name: "maxErroneousBitsInBorderRate", parameter&: params.maxErroneousBitsInBorderRate,
51 readNode, writeStorage);
52 check |= readWriteParameter(name: "minOtsuStdDev", parameter&: params.minOtsuStdDev, readNode, writeStorage);
53 check |= readWriteParameter(name: "errorCorrectionRate", parameter&: params.errorCorrectionRate, readNode, writeStorage);
54 check |= readWriteParameter(name: "minGroupDistance", parameter&: params.minGroupDistance, readNode, writeStorage);
55 // new aruco 3 functionality
56 check |= readWriteParameter(name: "useAruco3Detection", parameter&: params.useAruco3Detection, readNode, writeStorage);
57 check |= readWriteParameter(name: "minSideLengthCanonicalImg", parameter&: params.minSideLengthCanonicalImg, readNode, writeStorage);
58 check |= readWriteParameter(name: "minMarkerLengthRatioOriginalImg", parameter&: params.minMarkerLengthRatioOriginalImg,
59 readNode, writeStorage);
60 return check;
61}
62
63bool DetectorParameters::readDetectorParameters(const FileNode& fn)
64{
65 if (fn.empty())
66 return false;
67 return readWrite(params&: *this, readNode: &fn);
68}
69
70bool DetectorParameters::writeDetectorParameters(FileStorage& fs, const String& name)
71{
72 CV_Assert(fs.isOpened());
73 if (!name.empty())
74 fs << name << "{";
75 bool res = readWrite(params&: *this, readNode: nullptr, writeStorage: &fs);
76 if (!name.empty())
77 fs << "}";
78 return res;
79}
80
81static inline bool readWrite(RefineParameters& refineParameters, const FileNode* readNode,
82 FileStorage* writeStorage = nullptr)
83{
84 CV_Assert(readNode || writeStorage);
85 bool check = false;
86
87 check |= readWriteParameter(name: "minRepDistance", parameter&: refineParameters.minRepDistance, readNode, writeStorage);
88 check |= readWriteParameter(name: "errorCorrectionRate", parameter&: refineParameters.errorCorrectionRate, readNode, writeStorage);
89 check |= readWriteParameter(name: "checkAllOrders", parameter&: refineParameters.checkAllOrders, readNode, writeStorage);
90 return check;
91}
92
93RefineParameters::RefineParameters(float _minRepDistance, float _errorCorrectionRate, bool _checkAllOrders):
94 minRepDistance(_minRepDistance), errorCorrectionRate(_errorCorrectionRate),
95 checkAllOrders(_checkAllOrders){}
96
97bool RefineParameters::readRefineParameters(const FileNode &fn)
98{
99 if (fn.empty())
100 return false;
101 return readWrite(refineParameters&: *this, readNode: &fn);
102}
103
104bool RefineParameters::writeRefineParameters(FileStorage& fs, const String& name)
105{
106 CV_Assert(fs.isOpened());
107 if (!name.empty())
108 fs << name << "{";
109 bool res = readWrite(refineParameters&: *this, readNode: nullptr, writeStorage: &fs);
110 if (!name.empty())
111 fs << "}";
112 return res;
113}
114
115/**
116 * @brief Threshold input image using adaptive thresholding
117 */
118static void _threshold(InputArray _in, OutputArray _out, int winSize, double constant) {
119
120 CV_Assert(winSize >= 3);
121 if(winSize % 2 == 0) winSize++; // win size must be odd
122 adaptiveThreshold(src: _in, dst: _out, maxValue: 255, adaptiveMethod: ADAPTIVE_THRESH_MEAN_C, thresholdType: THRESH_BINARY_INV, blockSize: winSize, C: constant);
123}
124
125
126/**
127 * @brief Given a tresholded image, find the contours, calculate their polygonal approximation
128 * and take those that accomplish some conditions
129 */
130static void _findMarkerContours(const Mat &in, vector<vector<Point2f> > &candidates,
131 vector<vector<Point> > &contoursOut, double minPerimeterRate,
132 double maxPerimeterRate, double accuracyRate,
133 double minCornerDistanceRate, int minSize) {
134
135 CV_Assert(minPerimeterRate > 0 && maxPerimeterRate > 0 && accuracyRate > 0 &&
136 minCornerDistanceRate >= 0);
137
138 // calculate maximum and minimum sizes in pixels
139 unsigned int minPerimeterPixels =
140 (unsigned int)(minPerimeterRate * max(a: in.cols, b: in.rows));
141 unsigned int maxPerimeterPixels =
142 (unsigned int)(maxPerimeterRate * max(a: in.cols, b: in.rows));
143
144 // for aruco3 functionality
145 if (minSize != 0) {
146 minPerimeterPixels = 4*minSize;
147 }
148
149 vector<vector<Point> > contours;
150 findContours(image: in, contours, mode: RETR_LIST, method: CHAIN_APPROX_NONE);
151 // now filter list of contours
152 for(unsigned int i = 0; i < contours.size(); i++) {
153 // check perimeter
154 if(contours[i].size() < minPerimeterPixels || contours[i].size() > maxPerimeterPixels)
155 continue;
156
157 // check is square and is convex
158 vector<Point> approxCurve;
159 approxPolyDP(curve: contours[i], approxCurve, epsilon: double(contours[i].size()) * accuracyRate, closed: true);
160 if(approxCurve.size() != 4 || !isContourConvex(contour: approxCurve)) continue;
161
162 // check min distance between corners
163 double minDistSq = max(a: in.cols, b: in.rows) * max(a: in.cols, b: in.rows);
164 for(int j = 0; j < 4; j++) {
165 double d = (double)(approxCurve[j].x - approxCurve[(j + 1) % 4].x) *
166 (double)(approxCurve[j].x - approxCurve[(j + 1) % 4].x) +
167 (double)(approxCurve[j].y - approxCurve[(j + 1) % 4].y) *
168 (double)(approxCurve[j].y - approxCurve[(j + 1) % 4].y);
169 minDistSq = min(a: minDistSq, b: d);
170 }
171 double minCornerDistancePixels = double(contours[i].size()) * minCornerDistanceRate;
172 if(minDistSq < minCornerDistancePixels * minCornerDistancePixels) continue;
173
174 // if it passes all the test, add to candidates vector
175 vector<Point2f> currentCandidate;
176 currentCandidate.resize(new_size: 4);
177 for(int j = 0; j < 4; j++) {
178 currentCandidate[j] = Point2f((float)approxCurve[j].x, (float)approxCurve[j].y);
179 }
180 candidates.push_back(x: currentCandidate);
181 contoursOut.push_back(x: contours[i]);
182 }
183}
184
185
186/**
187 * @brief Assure order of candidate corners is clockwise direction
188 */
189static void _reorderCandidatesCorners(vector<vector<Point2f> > &candidates) {
190
191 for(unsigned int i = 0; i < candidates.size(); i++) {
192 double dx1 = candidates[i][1].x - candidates[i][0].x;
193 double dy1 = candidates[i][1].y - candidates[i][0].y;
194 double dx2 = candidates[i][2].x - candidates[i][0].x;
195 double dy2 = candidates[i][2].y - candidates[i][0].y;
196 double crossProduct = (dx1 * dy2) - (dy1 * dx2);
197
198 if(crossProduct < 0.0) { // not clockwise direction
199 swap(a&: candidates[i][1], b&: candidates[i][3]);
200 }
201 }
202}
203
204static float getAverageModuleSize(const vector<Point2f>& markerCorners, int markerSize, int markerBorderBits) {
205 float averageArucoModuleSize = 0.f;
206 for (size_t i = 0ull; i < 4ull; i++) {
207 averageArucoModuleSize += sqrt(x: normL2Sqr<float>(pt: Point2f(markerCorners[i] - markerCorners[(i+1ull) % 4ull])));
208 }
209 int numModules = markerSize + markerBorderBits * 2;
210 averageArucoModuleSize /= ((float)markerCorners.size()*numModules);
211 return averageArucoModuleSize;
212}
213
214static bool checkMarker1InMarker2(const vector<Point2f>& marker1, const vector<Point2f>& marker2) {
215 return pointPolygonTest(contour: marker2, pt: marker1[0], measureDist: false) >= 0 && pointPolygonTest(contour: marker2, pt: marker1[1], measureDist: false) >= 0 &&
216 pointPolygonTest(contour: marker2, pt: marker1[2], measureDist: false) >= 0 && pointPolygonTest(contour: marker2, pt: marker1[3], measureDist: false) >= 0;
217}
218
219struct MarkerCandidate {
220 vector<Point2f> corners;
221 vector<Point> contour;
222 float perimeter = 0.f;
223};
224
225struct MarkerCandidateTree : MarkerCandidate{
226 int parent = -1;
227 int depth = 0;
228 vector<MarkerCandidate> closeContours;
229
230 MarkerCandidateTree() {}
231
232 MarkerCandidateTree(vector<Point2f>&& corners_, vector<Point>&& contour_) {
233 corners = std::move(corners_);
234 contour = std::move(contour_);
235 perimeter = 0.f;
236 for (size_t i = 0ull; i < 4ull; i++) {
237 perimeter += sqrt(x: normL2Sqr<float>(pt: Point2f(corners[i] - corners[(i+1ull) % 4ull])));
238 }
239 }
240
241 bool operator<(const MarkerCandidateTree& m) const {
242 // sorting the contors in descending order
243 return perimeter > m.perimeter;
244 }
245};
246
247
248// returns the average distance between the marker points
249float static inline getAverageDistance(const std::vector<Point2f>& marker1, const std::vector<Point2f>& marker2) {
250 float minDistSq = std::numeric_limits<float>::max();
251 // fc is the first corner considered on one of the markers, 4 combinations are possible
252 for(int fc = 0; fc < 4; fc++) {
253 float distSq = 0;
254 for(int c = 0; c < 4; c++) {
255 // modC is the corner considering first corner is fc
256 int modC = (c + fc) % 4;
257 distSq += normL2Sqr<float>(pt: marker1[modC] - marker2[c]);
258 }
259 distSq /= 4.f;
260 minDistSq = min(a: minDistSq, b: distSq);
261 }
262 return sqrt(x: minDistSq);
263}
264
265/**
266 * @brief Initial steps on finding square candidates
267 */
268static void _detectInitialCandidates(const Mat &grey, vector<vector<Point2f> > &candidates,
269 vector<vector<Point> > &contours,
270 const DetectorParameters &params) {
271
272 CV_Assert(params.adaptiveThreshWinSizeMin >= 3 && params.adaptiveThreshWinSizeMax >= 3);
273 CV_Assert(params.adaptiveThreshWinSizeMax >= params.adaptiveThreshWinSizeMin);
274 CV_Assert(params.adaptiveThreshWinSizeStep > 0);
275
276 // number of window sizes (scales) to apply adaptive thresholding
277 int nScales = (params.adaptiveThreshWinSizeMax - params.adaptiveThreshWinSizeMin) /
278 params.adaptiveThreshWinSizeStep + 1;
279
280 vector<vector<vector<Point2f> > > candidatesArrays((size_t) nScales);
281 vector<vector<vector<Point> > > contoursArrays((size_t) nScales);
282
283 ////for each value in the interval of thresholding window sizes
284 parallel_for_(range: Range(0, nScales), functor: [&](const Range& range) {
285 const int begin = range.start;
286 const int end = range.end;
287
288 for (int i = begin; i < end; i++) {
289 int currScale = params.adaptiveThreshWinSizeMin + i * params.adaptiveThreshWinSizeStep;
290 // threshold
291 Mat thresh;
292 _threshold(in: grey, out: thresh, winSize: currScale, constant: params.adaptiveThreshConstant);
293
294 // detect rectangles
295 _findMarkerContours(in: thresh, candidates&: candidatesArrays[i], contoursOut&: contoursArrays[i],
296 minPerimeterRate: params.minMarkerPerimeterRate, maxPerimeterRate: params.maxMarkerPerimeterRate,
297 accuracyRate: params.polygonalApproxAccuracyRate, minCornerDistanceRate: params.minCornerDistanceRate,
298 minSize: params.minSideLengthCanonicalImg);
299 }
300 });
301 // join candidates
302 for(int i = 0; i < nScales; i++) {
303 for(unsigned int j = 0; j < candidatesArrays[i].size(); j++) {
304 candidates.push_back(x: candidatesArrays[i][j]);
305 contours.push_back(x: contoursArrays[i][j]);
306 }
307 }
308}
309
310
311/**
312 * @brief Given an input image and candidate corners, extract the bits of the candidate, including
313 * the border bits
314 */
315static Mat _extractBits(InputArray _image, const vector<Point2f>& corners, int markerSize,
316 int markerBorderBits, int cellSize, double cellMarginRate, double minStdDevOtsu) {
317 CV_Assert(_image.getMat().channels() == 1);
318 CV_Assert(corners.size() == 4ull);
319 CV_Assert(markerBorderBits > 0 && cellSize > 0 && cellMarginRate >= 0 && cellMarginRate <= 1);
320 CV_Assert(minStdDevOtsu >= 0);
321
322 // number of bits in the marker
323 int markerSizeWithBorders = markerSize + 2 * markerBorderBits;
324 int cellMarginPixels = int(cellMarginRate * cellSize);
325
326 Mat resultImg; // marker image after removing perspective
327 int resultImgSize = markerSizeWithBorders * cellSize;
328 Mat resultImgCorners(4, 1, CV_32FC2);
329 resultImgCorners.ptr<Point2f>(y: 0)[0] = Point2f(0, 0);
330 resultImgCorners.ptr<Point2f>(y: 0)[1] = Point2f((float)resultImgSize - 1, 0);
331 resultImgCorners.ptr<Point2f>(y: 0)[2] =
332 Point2f((float)resultImgSize - 1, (float)resultImgSize - 1);
333 resultImgCorners.ptr<Point2f>(y: 0)[3] = Point2f(0, (float)resultImgSize - 1);
334
335 // remove perspective
336 Mat transformation = getPerspectiveTransform(src: corners, dst: resultImgCorners);
337 warpPerspective(src: _image, dst: resultImg, M: transformation, dsize: Size(resultImgSize, resultImgSize),
338 flags: INTER_NEAREST);
339
340 // output image containing the bits
341 Mat bits(markerSizeWithBorders, markerSizeWithBorders, CV_8UC1, Scalar::all(v0: 0));
342
343 // check if standard deviation is enough to apply Otsu
344 // if not enough, it probably means all bits are the same color (black or white)
345 Mat mean, stddev;
346 // Remove some border just to avoid border noise from perspective transformation
347 Mat innerRegion = resultImg.colRange(startcol: cellSize / 2, endcol: resultImg.cols - cellSize / 2)
348 .rowRange(startrow: cellSize / 2, endrow: resultImg.rows - cellSize / 2);
349 meanStdDev(src: innerRegion, mean, stddev);
350 if(stddev.ptr< double >(y: 0)[0] < minStdDevOtsu) {
351 // all black or all white, depending on mean value
352 if(mean.ptr< double >(y: 0)[0] > 127)
353 bits.setTo(value: 1);
354 else
355 bits.setTo(value: 0);
356 return bits;
357 }
358
359 // now extract code, first threshold using Otsu
360 threshold(src: resultImg, dst: resultImg, thresh: 125, maxval: 255, type: THRESH_BINARY | THRESH_OTSU);
361
362 // for each cell
363 for(int y = 0; y < markerSizeWithBorders; y++) {
364 for(int x = 0; x < markerSizeWithBorders; x++) {
365 int Xstart = x * (cellSize) + cellMarginPixels;
366 int Ystart = y * (cellSize) + cellMarginPixels;
367 Mat square = resultImg(Rect(Xstart, Ystart, cellSize - 2 * cellMarginPixels,
368 cellSize - 2 * cellMarginPixels));
369 // count white pixels on each cell to assign its value
370 size_t nZ = (size_t) countNonZero(src: square);
371 if(nZ > square.total() / 2) bits.at<unsigned char>(i0: y, i1: x) = 1;
372 }
373 }
374
375 return bits;
376}
377
378
379
380/**
381 * @brief Return number of erroneous bits in border, i.e. number of white bits in border.
382 */
383static int _getBorderErrors(const Mat &bits, int markerSize, int borderSize) {
384
385 int sizeWithBorders = markerSize + 2 * borderSize;
386
387 CV_Assert(markerSize > 0 && bits.cols == sizeWithBorders && bits.rows == sizeWithBorders);
388
389 int totalErrors = 0;
390 for(int y = 0; y < sizeWithBorders; y++) {
391 for(int k = 0; k < borderSize; k++) {
392 if(bits.ptr<unsigned char>(y)[k] != 0) totalErrors++;
393 if(bits.ptr<unsigned char>(y)[sizeWithBorders - 1 - k] != 0) totalErrors++;
394 }
395 }
396 for(int x = borderSize; x < sizeWithBorders - borderSize; x++) {
397 for(int k = 0; k < borderSize; k++) {
398 if(bits.ptr<unsigned char>(y: k)[x] != 0) totalErrors++;
399 if(bits.ptr<unsigned char>(y: sizeWithBorders - 1 - k)[x] != 0) totalErrors++;
400 }
401 }
402 return totalErrors;
403}
404
405
406/**
407 * @brief Tries to identify one candidate given the dictionary
408 * @return candidate typ. zero if the candidate is not valid,
409 * 1 if the candidate is a black candidate (default candidate)
410 * 2 if the candidate is a white candidate
411 */
412static uint8_t _identifyOneCandidate(const Dictionary& dictionary, const Mat& _image,
413 const vector<Point2f>& _corners, int& idx,
414 const DetectorParameters& params, int& rotation,
415 const float scale = 1.f) {
416 CV_DbgAssert(params.markerBorderBits > 0);
417 uint8_t typ=1;
418 // get bits
419 // scale corners to the correct size to search on the corresponding image pyramid
420 vector<Point2f> scaled_corners(4);
421 for (int i = 0; i < 4; ++i) {
422 scaled_corners[i].x = _corners[i].x * scale;
423 scaled_corners[i].y = _corners[i].y * scale;
424 }
425
426 Mat candidateBits =
427 _extractBits(_image, corners: scaled_corners, markerSize: dictionary.markerSize, markerBorderBits: params.markerBorderBits,
428 cellSize: params.perspectiveRemovePixelPerCell,
429 cellMarginRate: params.perspectiveRemoveIgnoredMarginPerCell, minStdDevOtsu: params.minOtsuStdDev);
430
431 // analyze border bits
432 int maximumErrorsInBorder =
433 int(dictionary.markerSize * dictionary.markerSize * params.maxErroneousBitsInBorderRate);
434 int borderErrors =
435 _getBorderErrors(bits: candidateBits, markerSize: dictionary.markerSize, borderSize: params.markerBorderBits);
436
437 // check if it is a white marker
438 if(params.detectInvertedMarker){
439 // to get from 255 to 1
440 Mat invertedImg = ~candidateBits-254;
441 int invBError = _getBorderErrors(bits: invertedImg, markerSize: dictionary.markerSize, borderSize: params.markerBorderBits);
442 // white marker
443 if(invBError<borderErrors){
444 borderErrors = invBError;
445 invertedImg.copyTo(m: candidateBits);
446 typ=2;
447 }
448 }
449 if(borderErrors > maximumErrorsInBorder) return 0; // border is wrong
450
451 // take only inner bits
452 Mat onlyBits =
453 candidateBits.rowRange(startrow: params.markerBorderBits,
454 endrow: candidateBits.rows - params.markerBorderBits)
455 .colRange(startcol: params.markerBorderBits, endcol: candidateBits.cols - params.markerBorderBits);
456
457 // try to indentify the marker
458 if(!dictionary.identify(onlyBits, idx, rotation, maxCorrectionRate: params.errorCorrectionRate))
459 return 0;
460
461 return typ;
462}
463
464/**
465 * @brief rotate the initial corner to get to the right position
466 */
467static void correctCornerPosition(vector<Point2f>& _candidate, int rotate){
468 std::rotate(first: _candidate.begin(), middle: _candidate.begin() + 4 - rotate, last: _candidate.end());
469}
470
471static size_t _findOptPyrImageForCanonicalImg(
472 const vector<Mat>& img_pyr,
473 const int scaled_width,
474 const int cur_perimeter,
475 const int min_perimeter) {
476 CV_Assert(scaled_width > 0);
477 size_t optLevel = 0;
478 float dist = std::numeric_limits<float>::max();
479 for (size_t i = 0; i < img_pyr.size(); ++i) {
480 const float scale = img_pyr[i].cols / static_cast<float>(scaled_width);
481 const float perimeter_scaled = cur_perimeter * scale;
482 // instead of std::abs() favor the larger pyramid level by checking if the distance is postive
483 // will slow down the algorithm but find more corners in the end
484 const float new_dist = perimeter_scaled - min_perimeter;
485 if (new_dist < dist && new_dist > 0.f) {
486 dist = new_dist;
487 optLevel = i;
488 }
489 }
490 return optLevel;
491}
492
493
494/**
495 * Line fitting A * B = C :: Called from function refineCandidateLines
496 * @param nContours contour-container
497 */
498static Point3f _interpolate2Dline(const vector<Point2f>& nContours){
499 CV_Assert(nContours.size() >= 2);
500 float minX, minY, maxX, maxY;
501 minX = maxX = nContours[0].x;
502 minY = maxY = nContours[0].y;
503
504 for(unsigned int i = 0; i< nContours.size(); i++){
505 minX = nContours[i].x < minX ? nContours[i].x : minX;
506 minY = nContours[i].y < minY ? nContours[i].y : minY;
507 maxX = nContours[i].x > maxX ? nContours[i].x : maxX;
508 maxY = nContours[i].y > maxY ? nContours[i].y : maxY;
509 }
510
511 Mat A = Mat::ones(rows: (int)nContours.size(), cols: 2, CV_32F); // Coefficient Matrix (N x 2)
512 Mat B((int)nContours.size(), 1, CV_32F); // Variables Matrix (N x 1)
513 Mat C; // Constant
514
515 if(maxX - minX > maxY - minY){
516 for(unsigned int i =0; i < nContours.size(); i++){
517 A.at<float>(i0: i,i1: 0)= nContours[i].x;
518 B.at<float>(i0: i,i1: 0)= nContours[i].y;
519 }
520
521 solve(src1: A, src2: B, dst: C, flags: DECOMP_NORMAL);
522
523 return Point3f(C.at<float>(i0: 0, i1: 0), -1., C.at<float>(i0: 1, i1: 0));
524 }
525 else{
526 for(unsigned int i =0; i < nContours.size(); i++){
527 A.at<float>(i0: i,i1: 0)= nContours[i].y;
528 B.at<float>(i0: i,i1: 0)= nContours[i].x;
529 }
530
531 solve(src1: A, src2: B, dst: C, flags: DECOMP_NORMAL);
532
533 return Point3f(-1., C.at<float>(i0: 0, i1: 0), C.at<float>(i0: 1, i1: 0));
534 }
535
536}
537
538/**
539 * Find the Point where the lines crosses :: Called from function refineCandidateLines
540 * @param nLine1
541 * @param nLine2
542 * @return Crossed Point
543 */
544static Point2f _getCrossPoint(Point3f nLine1, Point3f nLine2){
545 Matx22f A(nLine1.x, nLine1.y, nLine2.x, nLine2.y);
546 Vec2f B(-nLine1.z, -nLine2.z);
547 return Vec2f(A.solve(rhs: B).val);
548}
549
550/**
551 * Refine Corners using the contour vector :: Called from function detectMarkers
552 * @param nContours contour-container
553 * @param nCorners candidate Corners
554 */
555static void _refineCandidateLines(vector<Point>& nContours, vector<Point2f>& nCorners){
556 vector<Point2f> contour2f(nContours.begin(), nContours.end());
557 /* 5 groups :: to group the edges
558 * 4 - classified by its corner
559 * extra group - (temporary) if contours do not begin with a corner
560 */
561 vector<Point2f> cntPts[5];
562 int cornerIndex[4]={-1};
563 int group=4;
564
565 for ( unsigned int i =0; i < nContours.size(); i++ ) {
566 for(unsigned int j=0; j<4; j++){
567 if ( nCorners[j] == contour2f[i] ){
568 cornerIndex[j] = i;
569 group=j;
570 }
571 }
572 cntPts[group].push_back(x: contour2f[i]);
573 }
574 for (int i = 0; i < 4; i++)
575 {
576 CV_Assert(cornerIndex[i] != -1);
577 }
578 // saves extra group into corresponding
579 if( !cntPts[4].empty() ){
580 for( unsigned int i=0; i < cntPts[4].size() ; i++ )
581 cntPts[group].push_back(x: cntPts[4].at(n: i));
582 cntPts[4].clear();
583 }
584
585 //Evaluate contour direction :: using the position of the detected corners
586 int inc=1;
587
588 inc = ( (cornerIndex[0] > cornerIndex[1]) && (cornerIndex[3] > cornerIndex[0]) ) ? -1:inc;
589 inc = ( (cornerIndex[2] > cornerIndex[3]) && (cornerIndex[1] > cornerIndex[2]) ) ? -1:inc;
590
591 // calculate the line :: who passes through the grouped points
592 Point3f lines[4];
593 for(int i=0; i<4; i++){
594 lines[i]=_interpolate2Dline(nContours: cntPts[i]);
595 }
596
597 /*
598 * calculate the corner :: where the lines crosses to each other
599 * clockwise direction no clockwise direction
600 * 0 1
601 * .---. 1 .---. 2
602 * | | | |
603 * 3 .___. 0 .___.
604 * 2 3
605 */
606 for(int i=0; i < 4; i++){
607 if(inc<0)
608 nCorners[i] = _getCrossPoint(nLine1: lines[ i ], nLine2: lines[ (i+1)%4 ]); // 01 12 23 30
609 else
610 nCorners[i] = _getCrossPoint(nLine1: lines[ i ], nLine2: lines[ (i+3)%4 ]); // 30 01 12 23
611 }
612}
613
614static inline void findCornerInPyrImage(const float scale_init, const int closest_pyr_image_idx,
615 const vector<Mat>& grey_pyramid, Mat corners,
616 const DetectorParameters& params) {
617 // scale them to the closest pyramid level
618 if (scale_init != 1.f)
619 corners *= scale_init; // scale_init * scale_pyr
620 for (int idx = closest_pyr_image_idx - 1; idx >= 0; --idx) {
621 // scale them to new pyramid level
622 corners *= 2.f; // *= scale_pyr;
623 // use larger win size for larger images
624 const int subpix_win_size = std::max(a: grey_pyramid[idx].cols, b: grey_pyramid[idx].rows) > 1080 ? 5 : 3;
625 cornerSubPix(image: grey_pyramid[idx], corners,
626 winSize: Size(subpix_win_size, subpix_win_size),
627 zeroZone: Size(-1, -1),
628 criteria: TermCriteria(TermCriteria::MAX_ITER | TermCriteria::EPS,
629 params.cornerRefinementMaxIterations,
630 params.cornerRefinementMinAccuracy));
631 }
632}
633
634enum class DictionaryMode {
635 Single,
636 Multi
637};
638
639struct ArucoDetector::ArucoDetectorImpl {
640 /// dictionaries indicates the types of markers that will be searched
641 vector<Dictionary> dictionaries;
642
643 /// marker detection parameters, check DetectorParameters docs to see available settings
644 DetectorParameters detectorParams;
645
646 /// marker refine parameters
647 RefineParameters refineParams;
648 ArucoDetectorImpl() {}
649
650 ArucoDetectorImpl(const vector<Dictionary>&_dictionaries, const DetectorParameters &_detectorParams,
651 const RefineParameters& _refineParams): dictionaries(_dictionaries),
652 detectorParams(_detectorParams), refineParams(_refineParams) {
653 CV_Assert(!dictionaries.empty());
654 }
655
656 /*
657 * @brief Detect markers either using multiple or just first dictionary
658 */
659 void detectMarkers(InputArray _image, OutputArrayOfArrays _corners, OutputArray _ids,
660 OutputArrayOfArrays _rejectedImgPoints, OutputArray _dictIndices, DictionaryMode dictMode) {
661 CV_Assert(!_image.empty());
662
663 CV_Assert(detectorParams.markerBorderBits > 0);
664 // check that the parameters are set correctly if Aruco3 is used
665 CV_Assert(!(detectorParams.useAruco3Detection == true &&
666 detectorParams.minSideLengthCanonicalImg == 0 &&
667 detectorParams.minMarkerLengthRatioOriginalImg == 0.0));
668
669 Mat grey;
670 _convertToGrey(in: _image, out&: grey);
671
672 // Aruco3 functionality is the extension of Aruco.
673 // The description can be found in:
674 // [1] Speeded up detection of squared fiducial markers, 2018, FJ Romera-Ramirez et al.
675 // if Aruco3 functionality if not wanted
676 // change some parameters to be sure to turn it off
677 if (!detectorParams.useAruco3Detection) {
678 detectorParams.minMarkerLengthRatioOriginalImg = 0.0;
679 detectorParams.minSideLengthCanonicalImg = 0;
680 }
681 else {
682 // always turn on corner refinement in case of Aruco3, due to upsampling
683 detectorParams.cornerRefinementMethod = (int)CORNER_REFINE_SUBPIX;
684 // only CORNER_REFINE_SUBPIX implement correctly for useAruco3Detection
685 // Todo: update other CORNER_REFINE methods
686 }
687
688 /// Step 0: equation (2) from paper [1]
689 const float fxfy = (!detectorParams.useAruco3Detection ? 1.f : detectorParams.minSideLengthCanonicalImg /
690 (detectorParams.minSideLengthCanonicalImg + std::max(a: grey.cols, b: grey.rows)*
691 detectorParams.minMarkerLengthRatioOriginalImg));
692
693 /// Step 1: create image pyramid. Section 3.4. in [1]
694 vector<Mat> grey_pyramid;
695 int closest_pyr_image_idx = 0, num_levels = 0;
696 //// Step 1.1: resize image with equation (1) from paper [1]
697 if (detectorParams.useAruco3Detection) {
698 const float scale_pyr = 2.f;
699 const float img_area = static_cast<float>(grey.rows*grey.cols);
700 const float min_area_marker = static_cast<float>(detectorParams.minSideLengthCanonicalImg*
701 detectorParams.minSideLengthCanonicalImg);
702 // find max level
703 num_levels = static_cast<int>(log2(x: img_area / min_area_marker)/scale_pyr);
704 // the closest pyramid image to the downsampled segmentation image
705 // will later be used as start index for corner upsampling
706 const float scale_img_area = img_area * fxfy * fxfy;
707 closest_pyr_image_idx = cvRound(value: log2(x: img_area / scale_img_area)/scale_pyr);
708 }
709 buildPyramid(src: grey, dst: grey_pyramid, maxlevel: num_levels);
710
711 // resize to segmentation image
712 // in this reduces size the contours will be detected
713 if (fxfy != 1.f)
714 resize(src: grey, dst: grey, dsize: Size(cvRound(value: fxfy * grey.cols), cvRound(value: fxfy * grey.rows)));
715
716 /// STEP 2: Detect marker candidates
717 vector<vector<Point2f> > candidates;
718 vector<vector<Point> > contours;
719 vector<int> ids;
720
721 /// STEP 2.a Detect marker candidates :: using AprilTag
722 if(detectorParams.cornerRefinementMethod == (int)CORNER_REFINE_APRILTAG){
723 _apriltag(im_orig: grey, params: detectorParams, candidates, contours);
724 }
725 /// STEP 2.b Detect marker candidates :: traditional way
726 else {
727 detectCandidates(grey, candidates, contours);
728 }
729
730 /// STEP 2.c FILTER OUT NEAR CANDIDATE PAIRS
731 vector<int> dictIndices;
732 vector<vector<Point2f>> rejectedImgPoints;
733 if (DictionaryMode::Single == dictMode) {
734 Dictionary& dictionary = dictionaries.at(n: 0);
735 auto selectedCandidates = filterTooCloseCandidates(imageSize: grey.size(), candidates, contours, markerSize: dictionary.markerSize);
736 candidates.clear();
737 contours.clear();
738
739 /// STEP 2: Check candidate codification (identify markers)
740 identifyCandidates(grey, image_pyr: grey_pyramid, selectedContours&: selectedCandidates, accepted&: candidates, contours,
741 ids, currentDictionary: dictionary, rejected&: rejectedImgPoints);
742
743 /// STEP 3: Corner refinement :: use corner subpix
744 if (detectorParams.cornerRefinementMethod == (int)CORNER_REFINE_SUBPIX) {
745 performCornerSubpixRefinement(grey, grey_pyramid, closest_pyr_image_idx, candidates, dictionary);
746 }
747 } else if (DictionaryMode::Multi == dictMode) {
748 map<int, vector<MarkerCandidateTree>> candidatesPerDictionarySize;
749 for (const Dictionary& dictionary : dictionaries) {
750 candidatesPerDictionarySize.emplace(args: dictionary.markerSize, args: vector<MarkerCandidateTree>());
751 }
752
753 // create candidate trees for each dictionary size
754 for (auto& candidatesTreeEntry : candidatesPerDictionarySize) {
755 // copy candidates
756 vector<vector<Point2f>> candidatesCopy = candidates;
757 vector<vector<Point> > contoursCopy = contours;
758 candidatesTreeEntry.second = filterTooCloseCandidates(imageSize: grey.size(), candidates&: candidatesCopy, contours&: contoursCopy, markerSize: candidatesTreeEntry.first);
759 }
760 candidates.clear();
761 contours.clear();
762
763 /// STEP 2: Check candidate codification (identify markers)
764 int dictIndex = 0;
765 for (const Dictionary& currentDictionary : dictionaries) {
766 // temporary variable to store the current candidates
767 vector<vector<Point2f>> currentCandidates;
768 identifyCandidates(grey, image_pyr: grey_pyramid, selectedContours&: candidatesPerDictionarySize.at(k: currentDictionary.markerSize), accepted&: currentCandidates, contours,
769 ids, currentDictionary, rejected&: rejectedImgPoints);
770 if (_dictIndices.needed()) {
771 dictIndices.insert(position: dictIndices.end(), n: currentCandidates.size(), x: dictIndex);
772 }
773
774 /// STEP 3: Corner refinement :: use corner subpix
775 if (detectorParams.cornerRefinementMethod == (int)CORNER_REFINE_SUBPIX) {
776 performCornerSubpixRefinement(grey, grey_pyramid, closest_pyr_image_idx, candidates: currentCandidates, dictionary: currentDictionary);
777 }
778 candidates.insert(position: candidates.end(), first: currentCandidates.begin(), last: currentCandidates.end());
779 dictIndex++;
780 }
781
782 // Clean up rejectedImgPoints by comparing to itself and all candidates
783 const float epsilon = 0.000001f;
784 auto compareCandidates = [epsilon](vector<Point2f> a, vector<Point2f> b) {
785 for (int i = 0; i < 4; i++) {
786 if (std::abs(x: a[i].x - b[i].x) > epsilon || std::abs(x: a[i].y - b[i].y) > epsilon) {
787 return false;
788 }
789 }
790 return true;
791 };
792 std::sort(first: rejectedImgPoints.begin(), last: rejectedImgPoints.end(), comp: [](const vector<Point2f>& a, const vector<Point2f>&b){
793 float avgX = (a[0].x + a[1].x + a[2].x + a[3].x)*.25f;
794 float avgY = (a[0].y + a[1].y + a[2].y + a[3].y)*.25f;
795 float aDist = avgX*avgX + avgY*avgY;
796 avgX = (b[0].x + b[1].x + b[2].x + b[3].x)*.25f;
797 avgY = (b[0].y + b[1].y + b[2].y + b[3].y)*.25f;
798 float bDist = avgX*avgX + avgY*avgY;
799 return aDist < bDist;
800 });
801 auto last = std::unique(first: rejectedImgPoints.begin(), last: rejectedImgPoints.end(), binary_pred: compareCandidates);
802 rejectedImgPoints.erase(first: last, last: rejectedImgPoints.end());
803
804 for (auto it = rejectedImgPoints.begin(); it != rejectedImgPoints.end();) {
805 bool erased = false;
806 for (const auto& candidate : candidates) {
807 if (compareCandidates(candidate, *it)) {
808 it = rejectedImgPoints.erase(position: it);
809 erased = true;
810 break;
811 }
812 }
813 if (!erased) {
814 it++;
815 }
816 }
817 }
818
819 /// STEP 3, Optional : Corner refinement :: use contour container
820 if (detectorParams.cornerRefinementMethod == (int)CORNER_REFINE_CONTOUR){
821
822 if (!ids.empty()) {
823
824 // do corner refinement using the contours for each detected markers
825 parallel_for_(range: Range(0, (int)candidates.size()), functor: [&](const Range& range) {
826 for (int i = range.start; i < range.end; i++) {
827 _refineCandidateLines(nContours&: contours[i], nCorners&: candidates[i]);
828 }
829 });
830 }
831 }
832
833 if (detectorParams.cornerRefinementMethod != (int)CORNER_REFINE_SUBPIX && fxfy != 1.f) {
834 // only CORNER_REFINE_SUBPIX implement correctly for useAruco3Detection
835 // Todo: update other CORNER_REFINE methods
836
837 // scale to orignal size, this however will lead to inaccurate detections!
838 for (auto &vecPoints : candidates)
839 for (auto &point : vecPoints)
840 point *= 1.f/fxfy;
841 }
842
843 // copy to output arrays
844 _copyVector2Output(vec&: candidates, out: _corners);
845 Mat(ids).copyTo(m: _ids);
846 if(_rejectedImgPoints.needed()) {
847 _copyVector2Output(vec&: rejectedImgPoints, out: _rejectedImgPoints);
848 }
849 if (_dictIndices.needed()) {
850 Mat(dictIndices).copyTo(m: _dictIndices);
851 }
852 }
853
854 /**
855 * @brief Detect square candidates in the input image
856 */
857 void detectCandidates(const Mat& grey, vector<vector<Point2f> >& candidates, vector<vector<Point> >& contours) {
858 /// 1. DETECT FIRST SET OF CANDIDATES
859 _detectInitialCandidates(grey, candidates, contours, params: detectorParams);
860 /// 2. SORT CORNERS
861 _reorderCandidatesCorners(candidates);
862 }
863
864 /**
865 * @brief FILTER OUT NEAR CANDIDATES PAIRS AND TOO NEAR CANDIDATES TO IMAGE BORDER
866 *
867 * save the outer/inner border (i.e. potential candidates) to vector<MarkerCandidateTree>,
868 * clear candidates and contours
869 */
870 vector<MarkerCandidateTree>
871 filterTooCloseCandidates(const Size &imageSize, vector<vector<Point2f> > &candidates, vector<vector<Point> > &contours, int markerSize) {
872 CV_Assert(detectorParams.minMarkerDistanceRate >= 0. && detectorParams.minDistanceToBorder >= 0);
873 vector<MarkerCandidateTree> candidateTree(candidates.size());
874 for(size_t i = 0ull; i < candidates.size(); i++) {
875 candidateTree[i] = MarkerCandidateTree(std::move(candidates[i]), std::move(contours[i]));
876 }
877
878 // sort candidates from big to small
879 std::stable_sort(first: candidateTree.begin(), last: candidateTree.end());
880 // group index for each candidate
881 vector<int> groupId(candidateTree.size(), -1);
882 vector<vector<size_t> > groupedCandidates;
883 vector<bool> isSelectedContours(candidateTree.size(), true);
884
885 for (size_t i = 0ull; i < candidateTree.size(); i++) {
886 for (size_t j = i + 1ull; j < candidateTree.size(); j++) {
887 float minDist = getAverageDistance(marker1: candidateTree[i].corners, marker2: candidateTree[j].corners);
888 // if mean distance is too low, group markers
889 // the distance between the points of two independent markers should be more than half the side of the marker
890 // half the side of the marker = (perimeter / 4) * 0.5 = perimeter * 0.125
891 if(minDist < candidateTree[j].perimeter*(float)detectorParams.minMarkerDistanceRate) {
892 isSelectedContours[i] = false;
893 isSelectedContours[j] = false;
894 // i and j are not related to a group
895 if(groupId[i] < 0 && groupId[j] < 0){
896 // mark candidates with their corresponding group number
897 groupId[i] = groupId[j] = (int)groupedCandidates.size();
898 // create group
899 groupedCandidates.push_back(x: {i, j});
900 }
901 // i is related to a group
902 else if(groupId[i] > -1 && groupId[j] == -1) {
903 int group = groupId[i];
904 groupId[j] = group;
905 // add to group
906 groupedCandidates[group].push_back(x: j);
907 }
908 // j is related to a group
909 else if(groupId[j] > -1 && groupId[i] == -1) {
910 int group = groupId[j];
911 groupId[i] = group;
912 // add to group
913 groupedCandidates[group].push_back(x: i);
914 }
915 }
916 }
917 // group of one candidate
918 if(isSelectedContours[i]) {
919 isSelectedContours[i] = false;
920 groupId[i] = (int)groupedCandidates.size();
921 groupedCandidates.push_back(x: {i});
922 }
923 }
924
925 for (vector<size_t>& grouped : groupedCandidates) {
926 if (detectorParams.detectInvertedMarker) // if detectInvertedMarker choose smallest contours
927 std::stable_sort(first: grouped.begin(), last: grouped.end(), comp: [](const size_t &a, const size_t &b) {
928 return a > b;
929 });
930 else // if detectInvertedMarker==false choose largest contours
931 std::stable_sort(first: grouped.begin(), last: grouped.end());
932 size_t currId = grouped[0];
933 // check if it is too near to the image border
934 bool tooNearBorder = false;
935 for (const auto& corner : candidateTree[currId].corners) {
936 if (corner.x < detectorParams.minDistanceToBorder ||
937 corner.y < detectorParams.minDistanceToBorder ||
938 corner.x > imageSize.width - 1 - detectorParams.minDistanceToBorder ||
939 corner.y > imageSize.height - 1 - detectorParams.minDistanceToBorder) {
940 tooNearBorder = true;
941 break;
942 }
943 }
944 if (tooNearBorder) {
945 continue;
946 }
947 isSelectedContours[currId] = true;
948 for (size_t i = 1ull; i < grouped.size(); i++) {
949 size_t id = grouped[i];
950 float dist = getAverageDistance(marker1: candidateTree[id].corners, marker2: candidateTree[currId].corners);
951 float moduleSize = getAverageModuleSize(markerCorners: candidateTree[id].corners, markerSize, markerBorderBits: detectorParams.markerBorderBits);
952 if (dist > detectorParams.minGroupDistance*moduleSize) {
953 currId = id;
954 candidateTree[grouped[0]].closeContours.push_back(x: candidateTree[id]);
955 }
956 }
957 }
958
959 vector<MarkerCandidateTree> selectedCandidates;
960 selectedCandidates.reserve(n: groupedCandidates.size());
961 for (size_t i = 0ull; i < candidateTree.size(); i++) {
962 if (isSelectedContours[i]) {
963 selectedCandidates.push_back(x: std::move(candidateTree[i]));
964 }
965 }
966
967 // find hierarchy in the candidate tree
968 for (int i = (int)selectedCandidates.size()-1; i >= 0; i--) {
969 for (int j = i - 1; j >= 0; j--) {
970 if (checkMarker1InMarker2(marker1: selectedCandidates[i].corners, marker2: selectedCandidates[j].corners)) {
971 selectedCandidates[i].parent = j;
972 selectedCandidates[j].depth = max(a: selectedCandidates[j].depth, b: selectedCandidates[i].depth + 1);
973 break;
974 }
975 }
976 }
977 return selectedCandidates;
978 }
979
980 /**
981 * @brief Identify square candidates according to a marker dictionary
982 */
983 void identifyCandidates(const Mat& grey, const vector<Mat>& image_pyr, vector<MarkerCandidateTree>& selectedContours,
984 vector<vector<Point2f> >& accepted, vector<vector<Point> >& contours,
985 vector<int>& ids, const Dictionary& currentDictionary, vector<vector<Point2f>>& rejected) const {
986 size_t ncandidates = selectedContours.size();
987
988 vector<int> idsTmp(ncandidates, -1);
989 vector<int> rotated(ncandidates, 0);
990 vector<uint8_t> validCandidates(ncandidates, 0);
991 vector<uint8_t> was(ncandidates, false);
992 bool checkCloseContours = true;
993
994 int maxDepth = 0;
995 for (size_t i = 0ull; i < selectedContours.size(); i++)
996 maxDepth = max(a: selectedContours[i].depth, b: maxDepth);
997 vector<vector<size_t>> depths(maxDepth+1);
998 for (size_t i = 0ull; i < selectedContours.size(); i++) {
999 depths[selectedContours[i].depth].push_back(x: i);
1000 }
1001
1002 //// Analyze each of the candidates
1003 int depth = 0;
1004 size_t counter = 0;
1005 while (counter < ncandidates) {
1006 parallel_for_(range: Range(0, (int)depths[depth].size()), functor: [&](const Range& range) {
1007 const int begin = range.start;
1008 const int end = range.end;
1009 for (int i = begin; i < end; i++) {
1010 size_t v = depths[depth][i];
1011 was[v] = true;
1012 Mat img = grey;
1013 // implements equation (4)
1014 if (detectorParams.useAruco3Detection) {
1015 const int minPerimeter = detectorParams.minSideLengthCanonicalImg * 4;
1016 const size_t nearestImgId = _findOptPyrImageForCanonicalImg(img_pyr: image_pyr, scaled_width: grey.cols, cur_perimeter: static_cast<int>(selectedContours[v].contour.size()), min_perimeter: minPerimeter);
1017 img = image_pyr[nearestImgId];
1018 }
1019 const float scale = detectorParams.useAruco3Detection ? img.cols / static_cast<float>(grey.cols) : 1.f;
1020
1021 validCandidates[v] = _identifyOneCandidate(dictionary: currentDictionary, image: img, corners: selectedContours[v].corners, idx&: idsTmp[v], params: detectorParams, rotation&: rotated[v], scale);
1022
1023 if (validCandidates[v] == 0 && checkCloseContours) {
1024 for (const MarkerCandidate& closeMarkerCandidate: selectedContours[v].closeContours) {
1025 validCandidates[v] = _identifyOneCandidate(dictionary: currentDictionary, image: img, corners: closeMarkerCandidate.corners, idx&: idsTmp[v], params: detectorParams, rotation&: rotated[v], scale);
1026 if (validCandidates[v] > 0) {
1027 selectedContours[v].corners = closeMarkerCandidate.corners;
1028 selectedContours[v].contour = closeMarkerCandidate.contour;
1029 break;
1030 }
1031 }
1032 }
1033 }
1034 });
1035
1036 // visit the parent vertices of the detected markers to skip identify parent contours
1037 for(size_t v : depths[depth]) {
1038 if(validCandidates[v] > 0) {
1039 int parent = selectedContours[v].parent;
1040 while (parent != -1) {
1041 if (!was[parent]) {
1042 was[parent] = true;
1043 counter++;
1044 }
1045 parent = selectedContours[parent].parent;
1046 }
1047 }
1048 counter++;
1049 }
1050 depth++;
1051 }
1052
1053 for (size_t i = 0ull; i < selectedContours.size(); i++) {
1054 if (validCandidates[i] > 0) {
1055 // shift corner positions to the correct rotation
1056 correctCornerPosition(candidate&: selectedContours[i].corners, rotate: rotated[i]);
1057
1058 accepted.push_back(x: selectedContours[i].corners);
1059 contours.push_back(x: selectedContours[i].contour);
1060 ids.push_back(x: idsTmp[i]);
1061 }
1062 else {
1063 rejected.push_back(x: selectedContours[i].corners);
1064 }
1065 }
1066 }
1067
1068 void performCornerSubpixRefinement(const Mat& grey, const vector<Mat>& grey_pyramid, int closest_pyr_image_idx, const vector<vector<Point2f>>& candidates, const Dictionary& dictionary) const {
1069 CV_Assert(detectorParams.cornerRefinementWinSize > 0 && detectorParams.cornerRefinementMaxIterations > 0 &&
1070 detectorParams.cornerRefinementMinAccuracy > 0);
1071 // Do subpixel estimation. In Aruco3 start on the lowest pyramid level and upscale the corners
1072 parallel_for_(range: Range(0, (int)candidates.size()), functor: [&](const Range& range) {
1073 const int begin = range.start;
1074 const int end = range.end;
1075
1076 for (int i = begin; i < end; i++) {
1077 if (detectorParams.useAruco3Detection) {
1078 const float scale_init = (float) grey_pyramid[closest_pyr_image_idx].cols / grey.cols;
1079 findCornerInPyrImage(scale_init, closest_pyr_image_idx, grey_pyramid, corners: Mat(candidates[i]), params: detectorParams);
1080 } else {
1081 int cornerRefinementWinSize = std::max(a: 1, b: cvRound(value: detectorParams.relativeCornerRefinmentWinSize*
1082 getAverageModuleSize(markerCorners: candidates[i], markerSize: dictionary.markerSize, markerBorderBits: detectorParams.markerBorderBits)));
1083 cornerRefinementWinSize = min(a: cornerRefinementWinSize, b: detectorParams.cornerRefinementWinSize);
1084 cornerSubPix(image: grey, corners: Mat(candidates[i]), winSize: Size(cornerRefinementWinSize, cornerRefinementWinSize), zeroZone: Size(-1, -1),
1085 criteria: TermCriteria(TermCriteria::MAX_ITER | TermCriteria::EPS,
1086 detectorParams.cornerRefinementMaxIterations,
1087 detectorParams.cornerRefinementMinAccuracy));
1088 }
1089 }
1090 });
1091 }
1092};
1093
1094ArucoDetector::ArucoDetector(const Dictionary &_dictionary,
1095 const DetectorParameters &_detectorParams,
1096 const RefineParameters& _refineParams) {
1097 arucoDetectorImpl = makePtr<ArucoDetectorImpl>(a1: vector<Dictionary>{_dictionary}, a1: _detectorParams, a1: _refineParams);
1098}
1099
1100ArucoDetector::ArucoDetector(const vector<Dictionary> &_dictionaries,
1101 const DetectorParameters &_detectorParams,
1102 const RefineParameters& _refineParams) {
1103 arucoDetectorImpl = makePtr<ArucoDetectorImpl>(a1: _dictionaries, a1: _detectorParams, a1: _refineParams);
1104}
1105
1106void ArucoDetector::detectMarkers(InputArray _image, OutputArrayOfArrays _corners, OutputArray _ids,
1107 OutputArrayOfArrays _rejectedImgPoints) const {
1108 arucoDetectorImpl->detectMarkers(_image, _corners, _ids, _rejectedImgPoints, dictIndices: noArray(), dictMode: DictionaryMode::Single);
1109}
1110
1111void ArucoDetector::detectMarkersMultiDict(InputArray _image, OutputArrayOfArrays _corners, OutputArray _ids,
1112 OutputArrayOfArrays _rejectedImgPoints, OutputArray _dictIndices) const {
1113 arucoDetectorImpl->detectMarkers(_image, _corners, _ids, _rejectedImgPoints, _dictIndices, dictMode: DictionaryMode::Multi);
1114}
1115
1116/**
1117 * Project board markers that are not included in the list of detected markers
1118 */
1119static inline void _projectUndetectedMarkers(const Board &board, InputOutputArrayOfArrays detectedCorners,
1120 InputOutputArray detectedIds, InputArray cameraMatrix, InputArray distCoeffs,
1121 vector<vector<Point2f> >& undetectedMarkersProjectedCorners,
1122 OutputArray undetectedMarkersIds) {
1123 Mat rvec, tvec; // first estimate board pose with the current avaible markers
1124 Mat objPoints, imgPoints; // object and image points for the solvePnP function
1125 // To refine corners of ArUco markers the function refineDetectedMarkers() find an aruco markers pose from 3D-2D point correspondences.
1126 // To find 3D-2D point correspondences uses matchImagePoints().
1127 // The method matchImagePoints() works with ArUco corners (in Board/GridBoard cases) or with ChArUco corners (in CharucoBoard case).
1128 // To refine corners of ArUco markers we need work with ArUco corners only in all boards.
1129 // To call matchImagePoints() with ArUco corners for all boards we need to call matchImagePoints() from base class Board.
1130 // The method matchImagePoints() implemented in Pimpl and we need to create temp Board object to call the base method.
1131 Board(board.getObjPoints(), board.getDictionary(), board.getIds()).matchImagePoints(detectedCorners, detectedIds, objPoints, imgPoints);
1132 if (objPoints.total() < 4ull) // at least one marker from board so rvec and tvec are valid
1133 return;
1134 solvePnP(objectPoints: objPoints, imagePoints: imgPoints, cameraMatrix, distCoeffs, rvec, tvec);
1135
1136 // search undetected markers and project them using the previous pose
1137 vector<vector<Point2f> > undetectedCorners;
1138 const std::vector<int>& ids = board.getIds();
1139 vector<int> undetectedIds;
1140 for(unsigned int i = 0; i < ids.size(); i++) {
1141 int foundIdx = -1;
1142 for(unsigned int j = 0; j < detectedIds.total(); j++) {
1143 if(ids[i] == detectedIds.getMat().ptr<int>()[j]) {
1144 foundIdx = j;
1145 break;
1146 }
1147 }
1148
1149 // not detected
1150 if(foundIdx == -1) {
1151 undetectedCorners.push_back(x: vector<Point2f>());
1152 undetectedIds.push_back(x: ids[i]);
1153 projectPoints(objectPoints: board.getObjPoints()[i], rvec, tvec, cameraMatrix, distCoeffs,
1154 imagePoints: undetectedCorners.back());
1155 }
1156 }
1157 // parse output
1158 Mat(undetectedIds).copyTo(m: undetectedMarkersIds);
1159 undetectedMarkersProjectedCorners = undetectedCorners;
1160}
1161
1162/**
1163 * Interpolate board markers that are not included in the list of detected markers using
1164 * global homography
1165 */
1166static void _projectUndetectedMarkers(const Board &_board, InputOutputArrayOfArrays _detectedCorners,
1167 InputOutputArray _detectedIds,
1168 vector<vector<Point2f> >& _undetectedMarkersProjectedCorners,
1169 OutputArray _undetectedMarkersIds) {
1170 // check board points are in the same plane, if not, global homography cannot be applied
1171 CV_Assert(_board.getObjPoints().size() > 0);
1172 CV_Assert(_board.getObjPoints()[0].size() > 0);
1173 float boardZ = _board.getObjPoints()[0][0].z;
1174 for(unsigned int i = 0; i < _board.getObjPoints().size(); i++) {
1175 for(unsigned int j = 0; j < _board.getObjPoints()[i].size(); j++)
1176 CV_Assert(boardZ == _board.getObjPoints()[i][j].z);
1177 }
1178
1179 vector<Point2f> detectedMarkersObj2DAll; // Object coordinates (without Z) of all the detected
1180 // marker corners in a single vector
1181 vector<Point2f> imageCornersAll; // Image corners of all detected markers in a single vector
1182 vector<vector<Point2f> > undetectedMarkersObj2D; // Object coordinates (without Z) of all
1183 // missing markers in different vectors
1184 vector<int> undetectedMarkersIds; // ids of missing markers
1185 // find markers included in board, and missing markers from board. Fill the previous vectors
1186 for(unsigned int j = 0; j < _board.getIds().size(); j++) {
1187 bool found = false;
1188 for(unsigned int i = 0; i < _detectedIds.total(); i++) {
1189 if(_detectedIds.getMat().ptr<int>()[i] == _board.getIds()[j]) {
1190 for(int c = 0; c < 4; c++) {
1191 imageCornersAll.push_back(x: _detectedCorners.getMat(i).ptr<Point2f>()[c]);
1192 detectedMarkersObj2DAll.push_back(
1193 x: Point2f(_board.getObjPoints()[j][c].x, _board.getObjPoints()[j][c].y));
1194 }
1195 found = true;
1196 break;
1197 }
1198 }
1199 if(!found) {
1200 undetectedMarkersObj2D.push_back(x: vector<Point2f>());
1201 for(int c = 0; c < 4; c++) {
1202 undetectedMarkersObj2D.back().push_back(
1203 x: Point2f(_board.getObjPoints()[j][c].x, _board.getObjPoints()[j][c].y));
1204 }
1205 undetectedMarkersIds.push_back(x: _board.getIds()[j]);
1206 }
1207 }
1208 if(imageCornersAll.size() == 0) return;
1209
1210 // get homography from detected markers
1211 Mat transformation = findHomography(srcPoints: detectedMarkersObj2DAll, dstPoints: imageCornersAll);
1212
1213 _undetectedMarkersProjectedCorners.resize(new_size: undetectedMarkersIds.size());
1214
1215 // for each undetected marker, apply transformation
1216 for(unsigned int i = 0; i < undetectedMarkersObj2D.size(); i++) {
1217 perspectiveTransform(src: undetectedMarkersObj2D[i], dst: _undetectedMarkersProjectedCorners[i], m: transformation);
1218 }
1219 Mat(undetectedMarkersIds).copyTo(m: _undetectedMarkersIds);
1220}
1221
1222void ArucoDetector::refineDetectedMarkers(InputArray _image, const Board& _board,
1223 InputOutputArrayOfArrays _detectedCorners, InputOutputArray _detectedIds,
1224 InputOutputArrayOfArrays _rejectedCorners, InputArray _cameraMatrix,
1225 InputArray _distCoeffs, OutputArray _recoveredIdxs) const {
1226 DetectorParameters& detectorParams = arucoDetectorImpl->detectorParams;
1227 const Dictionary& dictionary = arucoDetectorImpl->dictionaries.at(n: 0);
1228 RefineParameters& refineParams = arucoDetectorImpl->refineParams;
1229 CV_Assert(refineParams.minRepDistance > 0);
1230
1231 if(_detectedIds.total() == 0 || _rejectedCorners.total() == 0) return;
1232
1233 // get projections of missing markers in the board
1234 vector<vector<Point2f> > undetectedMarkersCorners;
1235 vector<int> undetectedMarkersIds;
1236 if(_cameraMatrix.total() != 0) {
1237 // reproject based on camera projection model
1238 _projectUndetectedMarkers(board: _board, detectedCorners: _detectedCorners, detectedIds: _detectedIds, cameraMatrix: _cameraMatrix, distCoeffs: _distCoeffs,
1239 undetectedMarkersProjectedCorners&: undetectedMarkersCorners, undetectedMarkersIds);
1240
1241 } else {
1242 // reproject based on global homography
1243 _projectUndetectedMarkers(_board, _detectedCorners, _detectedIds, undetectedMarkersProjectedCorners&: undetectedMarkersCorners,
1244 undetectedMarkersIds: undetectedMarkersIds);
1245 }
1246
1247 // list of missing markers indicating if they have been assigned to a candidate
1248 vector<bool > alreadyIdentified(_rejectedCorners.total(), false);
1249
1250 // maximum bits that can be corrected
1251 int maxCorrectionRecalculated =
1252 int(double(dictionary.maxCorrectionBits) * refineParams.errorCorrectionRate);
1253
1254 Mat grey;
1255 _convertToGrey(in: _image, out&: grey);
1256
1257 // vector of final detected marker corners and ids
1258 vector<vector<Point2f> > finalAcceptedCorners;
1259 vector<int> finalAcceptedIds;
1260 // fill with the current markers
1261 finalAcceptedCorners.resize(new_size: _detectedCorners.total());
1262 finalAcceptedIds.resize(new_size: _detectedIds.total());
1263 for(unsigned int i = 0; i < _detectedIds.total(); i++) {
1264 finalAcceptedCorners[i] = _detectedCorners.getMat(i).clone();
1265 finalAcceptedIds[i] = _detectedIds.getMat().ptr<int>()[i];
1266 }
1267 vector<int> recoveredIdxs; // original indexes of accepted markers in _rejectedCorners
1268
1269 // for each missing marker, try to find a correspondence
1270 for(unsigned int i = 0; i < undetectedMarkersIds.size(); i++) {
1271
1272 // best match at the moment
1273 int closestCandidateIdx = -1;
1274 double closestCandidateDistance = refineParams.minRepDistance * refineParams.minRepDistance + 1;
1275 Mat closestRotatedMarker;
1276
1277 for(unsigned int j = 0; j < _rejectedCorners.total(); j++) {
1278 if(alreadyIdentified[j]) continue;
1279
1280 // check distance
1281 double minDistance = closestCandidateDistance + 1;
1282 bool valid = false;
1283 int validRot = 0;
1284 for(int c = 0; c < 4; c++) { // first corner in rejected candidate
1285 double currentMaxDistance = 0;
1286 for(int k = 0; k < 4; k++) {
1287 Point2f rejCorner = _rejectedCorners.getMat(i: j).ptr<Point2f>()[(c + k) % 4];
1288 Point2f distVector = undetectedMarkersCorners[i][k] - rejCorner;
1289 double cornerDist = distVector.x * distVector.x + distVector.y * distVector.y;
1290 currentMaxDistance = max(a: currentMaxDistance, b: cornerDist);
1291 }
1292 // if distance is better than current best distance
1293 if(currentMaxDistance < closestCandidateDistance) {
1294 valid = true;
1295 validRot = c;
1296 minDistance = currentMaxDistance;
1297 }
1298 if(!refineParams.checkAllOrders) break;
1299 }
1300
1301 if(!valid) continue;
1302
1303 // apply rotation
1304 Mat rotatedMarker;
1305 if(refineParams.checkAllOrders) {
1306 rotatedMarker = Mat(4, 1, CV_32FC2);
1307 for(int c = 0; c < 4; c++)
1308 rotatedMarker.ptr<Point2f>()[c] =
1309 _rejectedCorners.getMat(i: j).ptr<Point2f>()[(c + 4 + validRot) % 4];
1310 }
1311 else rotatedMarker = _rejectedCorners.getMat(i: j);
1312
1313 // last filter, check if inner code is close enough to the assigned marker code
1314 int codeDistance = 0;
1315 // if errorCorrectionRate, dont check code
1316 if(refineParams.errorCorrectionRate >= 0) {
1317
1318 // extract bits
1319 Mat bits = _extractBits(
1320 image: grey, corners: rotatedMarker, markerSize: dictionary.markerSize, markerBorderBits: detectorParams.markerBorderBits,
1321 cellSize: detectorParams.perspectiveRemovePixelPerCell,
1322 cellMarginRate: detectorParams.perspectiveRemoveIgnoredMarginPerCell, minStdDevOtsu: detectorParams.minOtsuStdDev);
1323
1324 Mat onlyBits =
1325 bits.rowRange(startrow: detectorParams.markerBorderBits, endrow: bits.rows - detectorParams.markerBorderBits)
1326 .colRange(startcol: detectorParams.markerBorderBits, endcol: bits.rows - detectorParams.markerBorderBits);
1327
1328 codeDistance =
1329 dictionary.getDistanceToId(bits: onlyBits, id: undetectedMarkersIds[i], allRotations: false);
1330 }
1331
1332 // if everythin is ok, assign values to current best match
1333 if(refineParams.errorCorrectionRate < 0 || codeDistance < maxCorrectionRecalculated) {
1334 closestCandidateIdx = j;
1335 closestCandidateDistance = minDistance;
1336 closestRotatedMarker = rotatedMarker;
1337 }
1338 }
1339
1340 // if at least one good match, we have rescue the missing marker
1341 if(closestCandidateIdx >= 0) {
1342
1343 // subpixel refinement
1344 if(detectorParams.cornerRefinementMethod == (int)CORNER_REFINE_SUBPIX) {
1345 CV_Assert(detectorParams.cornerRefinementWinSize > 0 &&
1346 detectorParams.cornerRefinementMaxIterations > 0 &&
1347 detectorParams.cornerRefinementMinAccuracy > 0);
1348
1349 std::vector<Point2f> marker(closestRotatedMarker.begin<Point2f>(), closestRotatedMarker.end<Point2f>());
1350 int cornerRefinementWinSize = std::max(a: 1, b: cvRound(value: detectorParams.relativeCornerRefinmentWinSize*
1351 getAverageModuleSize(markerCorners: marker, markerSize: dictionary.markerSize, markerBorderBits: detectorParams.markerBorderBits)));
1352 cornerRefinementWinSize = min(a: cornerRefinementWinSize, b: detectorParams.cornerRefinementWinSize);
1353 cornerSubPix(image: grey, corners: closestRotatedMarker,
1354 winSize: Size(cornerRefinementWinSize, cornerRefinementWinSize),
1355 zeroZone: Size(-1, -1), criteria: TermCriteria(TermCriteria::MAX_ITER | TermCriteria::EPS,
1356 detectorParams.cornerRefinementMaxIterations,
1357 detectorParams.cornerRefinementMinAccuracy));
1358 }
1359
1360 // remove from rejected
1361 alreadyIdentified[closestCandidateIdx] = true;
1362
1363 // add to detected
1364 finalAcceptedCorners.push_back(x: closestRotatedMarker);
1365 finalAcceptedIds.push_back(x: undetectedMarkersIds[i]);
1366
1367 // add the original index of the candidate
1368 recoveredIdxs.push_back(x: closestCandidateIdx);
1369 }
1370 }
1371
1372 // parse output
1373 if(finalAcceptedIds.size() != _detectedIds.total()) {
1374 // parse output
1375 Mat(finalAcceptedIds).copyTo(m: _detectedIds);
1376 _copyVector2Output(vec&: finalAcceptedCorners, out: _detectedCorners);
1377
1378 // recalculate _rejectedCorners based on alreadyIdentified
1379 vector<vector<Point2f> > finalRejected;
1380 for(unsigned int i = 0; i < alreadyIdentified.size(); i++) {
1381 if(!alreadyIdentified[i]) {
1382 finalRejected.push_back(x: _rejectedCorners.getMat(i).clone());
1383 }
1384 }
1385 _copyVector2Output(vec&: finalRejected, out: _rejectedCorners);
1386
1387 if(_recoveredIdxs.needed()) {
1388 Mat(recoveredIdxs).copyTo(m: _recoveredIdxs);
1389 }
1390 }
1391}
1392
1393void ArucoDetector::write(FileStorage &fs) const {
1394 // preserve old format for single dictionary case
1395 if (1 == arucoDetectorImpl->dictionaries.size()) {
1396 arucoDetectorImpl->dictionaries[0].writeDictionary(fs);
1397 } else {
1398 fs << "dictionaries" << "[";
1399 for (auto& dictionary : arucoDetectorImpl->dictionaries) {
1400 fs << "{";
1401 dictionary.writeDictionary(fs);
1402 fs << "}";
1403 }
1404 fs << "]";
1405 }
1406 arucoDetectorImpl->detectorParams.writeDetectorParameters(fs);
1407 arucoDetectorImpl->refineParams.writeRefineParameters(fs);
1408}
1409
1410void ArucoDetector::read(const FileNode &fn) {
1411 arucoDetectorImpl->dictionaries.clear();
1412 if (!fn.empty() && !fn["dictionaries"].empty() && fn["dictionaries"].isSeq()) {
1413 for (const auto& dictionaryNode : fn["dictionaries"]) {
1414 arucoDetectorImpl->dictionaries.emplace_back();
1415 arucoDetectorImpl->dictionaries.back().readDictionary(fn: dictionaryNode);
1416 }
1417 } else {
1418 // backward compatibility
1419 arucoDetectorImpl->dictionaries.emplace_back();
1420 arucoDetectorImpl->dictionaries.back().readDictionary(fn);
1421 }
1422 arucoDetectorImpl->detectorParams.readDetectorParameters(fn);
1423 arucoDetectorImpl->refineParams.readRefineParameters(fn);
1424}
1425
1426const Dictionary& ArucoDetector::getDictionary() const {
1427 return arucoDetectorImpl->dictionaries[0];
1428}
1429
1430void ArucoDetector::setDictionary(const Dictionary& dictionary) {
1431 if (arucoDetectorImpl->dictionaries.empty()) {
1432 arucoDetectorImpl->dictionaries.push_back(x: dictionary);
1433 } else {
1434 arucoDetectorImpl->dictionaries[0] = dictionary;
1435 }
1436}
1437
1438vector<Dictionary> ArucoDetector::getDictionaries() const {
1439 return arucoDetectorImpl->dictionaries;
1440}
1441
1442void ArucoDetector::setDictionaries(const vector<Dictionary>& dictionaries) {
1443 CV_Assert(!dictionaries.empty());
1444 arucoDetectorImpl->dictionaries = dictionaries;
1445}
1446
1447const DetectorParameters& ArucoDetector::getDetectorParameters() const {
1448 return arucoDetectorImpl->detectorParams;
1449}
1450
1451void ArucoDetector::setDetectorParameters(const DetectorParameters& detectorParameters) {
1452 arucoDetectorImpl->detectorParams = detectorParameters;
1453}
1454
1455const RefineParameters& ArucoDetector::getRefineParameters() const {
1456 return arucoDetectorImpl->refineParams;
1457}
1458
1459void ArucoDetector::setRefineParameters(const RefineParameters& refineParameters) {
1460 arucoDetectorImpl->refineParams = refineParameters;
1461}
1462
1463void drawDetectedMarkers(InputOutputArray _image, InputArrayOfArrays _corners,
1464 InputArray _ids, Scalar borderColor) {
1465 CV_Assert(_image.getMat().total() != 0 &&
1466 (_image.getMat().channels() == 1 || _image.getMat().channels() == 3));
1467 CV_Assert((_corners.total() == _ids.total()) || _ids.total() == 0);
1468
1469 // calculate colors
1470 Scalar textColor, cornerColor;
1471 textColor = cornerColor = borderColor;
1472 swap(a&: textColor.val[0], b&: textColor.val[1]); // text color just sawp G and R
1473 swap(a&: cornerColor.val[1], b&: cornerColor.val[2]); // corner color just sawp G and B
1474
1475 int nMarkers = (int)_corners.total();
1476 for(int i = 0; i < nMarkers; i++) {
1477 Mat currentMarker = _corners.getMat(i);
1478 CV_Assert(currentMarker.total() == 4 && currentMarker.channels() == 2);
1479 if (currentMarker.type() != CV_32SC2)
1480 currentMarker.convertTo(m: currentMarker, CV_32SC2);
1481
1482 // draw marker sides
1483 for(int j = 0; j < 4; j++) {
1484 Point p0, p1;
1485 p0 = currentMarker.ptr<Point>(y: 0)[j];
1486 p1 = currentMarker.ptr<Point>(y: 0)[(j + 1) % 4];
1487 line(img: _image, pt1: p0, pt2: p1, color: borderColor, thickness: 1);
1488 }
1489 // draw first corner mark
1490 rectangle(img: _image, pt1: currentMarker.ptr<Point>(y: 0)[0] - Point(3, 3),
1491 pt2: currentMarker.ptr<Point>(y: 0)[0] + Point(3, 3), color: cornerColor, thickness: 1, lineType: LINE_AA);
1492
1493 // draw ID
1494 if(_ids.total() != 0) {
1495 Point cent(0, 0);
1496 for(int p = 0; p < 4; p++)
1497 cent += currentMarker.ptr<Point>(y: 0)[p];
1498 cent = cent / 4.;
1499 stringstream s;
1500 s << "id=" << _ids.getMat().ptr<int>(y: 0)[i];
1501 putText(img: _image, text: s.str(), org: cent, fontFace: FONT_HERSHEY_SIMPLEX, fontScale: 0.5, color: textColor, thickness: 2);
1502 }
1503 }
1504}
1505
1506void generateImageMarker(const Dictionary &dictionary, int id, int sidePixels, OutputArray _img, int borderBits) {
1507 dictionary.generateImageMarker(id, sidePixels, _img, borderBits);
1508}
1509
1510}
1511}
1512

source code of opencv/modules/objdetect/src/aruco/aruco_detector.cpp