1/*M///////////////////////////////////////////////////////////////////////////////////////
2//
3// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4//
5// By downloading, copying, installing or using the software you agree to this license.
6// If you do not agree to this license, do not download, install,
7// copy or use the software.
8//
9//
10// Intel License Agreement
11// For Open Source Computer Vision Library
12//
13// Copyright (C) 2000, Intel Corporation, all rights reserved.
14// Third party copyrights are property of their respective owners.
15//
16// Redistribution and use in source and binary forms, with or without modification,
17// are permitted provided that the following conditions are met:
18//
19// * Redistribution's of source code must retain the above copyright notice,
20// this list of conditions and the following disclaimer.
21//
22// * Redistribution's in binary form must reproduce the above copyright notice,
23// this list of conditions and the following disclaimer in the documentation
24// and/or other materials provided with the distribution.
25//
26// * The name of Intel Corporation may not be used to endorse or promote products
27// derived from this software without specific prior written permission.
28//
29// This software is provided by the copyright holders and contributors "as is" and
30// any express or implied warranties, including, but not limited to, the implied
31// warranties of merchantability and fitness for a particular purpose are disclaimed.
32// In no event shall the Intel Corporation or contributors be liable for any direct,
33// indirect, incidental, special, exemplary, or consequential damages
34// (including, but not limited to, procurement of substitute goods or services;
35// loss of use, data, or profits; or business interruption) however caused
36// and on any theory of liability, whether in contract, strict liability,
37// or tort (including negligence or otherwise) arising in any way out of
38// the use of this software, even if advised of the possibility of such damage.
39//
40//M*/
41
42#include "precomp.hpp"
43#include "opencl_kernels_imgproc.hpp"
44#include "opencv2/core/hal/intrin.hpp"
45
46#include "opencv2/core/openvx/ovx_defs.hpp"
47
48#include "opencv2/core/utils/tls.hpp"
49
50namespace cv
51{
52
53////////////////// Helper functions //////////////////////
54
55#define CV_CLAMP_INT(v, vmin, vmax) (v < vmin ? vmin : (vmax < v ? vmax : v))
56
57static const size_t OUT_OF_RANGE = (size_t)1 << (sizeof(size_t)*8 - 2);
58
59static void
60calcHistLookupTables_8u( const Mat& hist, const SparseMat& shist,
61 int dims, const float** ranges, const double* uniranges,
62 bool uniform, bool issparse, std::vector<size_t>& _tab )
63{
64 const int low = 0, high = 256;
65 int i, j;
66 _tab.resize(new_size: (high-low)*dims);
67 size_t* tab = &_tab[0];
68
69 if( uniform )
70 {
71 for( i = 0; i < dims; i++ )
72 {
73 double a = uniranges[i*2];
74 double b = uniranges[i*2+1];
75 int sz = !issparse ? hist.size[i] : shist.size(i);
76 size_t step = !issparse ? hist.step[i] : 1;
77
78 double v_lo = ranges ? ranges[i][0] : 0;
79 double v_hi = ranges ? ranges[i][1] : 256;
80
81 for( j = low; j < high; j++ )
82 {
83 int idx = cvFloor(value: j*a + b);
84 size_t written_idx = OUT_OF_RANGE;
85 if (j >= v_lo && j < v_hi)
86 {
87 idx = CV_CLAMP_INT(idx, 0, sz - 1);
88 written_idx = idx*step;
89 }
90 tab[i*(high - low) + j - low] = written_idx;
91 }
92 }
93 }
94 else if (ranges)
95 {
96 for( i = 0; i < dims; i++ )
97 {
98 int limit = std::min(a: cvCeil(value: ranges[i][0]), b: high);
99 int idx = -1, sz = !issparse ? hist.size[i] : shist.size(i);
100 size_t written_idx = OUT_OF_RANGE;
101 size_t step = !issparse ? hist.step[i] : 1;
102
103 for(j = low;;)
104 {
105 for( ; j < limit; j++ )
106 tab[i*(high - low) + j - low] = written_idx;
107
108 if( (unsigned)(++idx) < (unsigned)sz )
109 {
110 limit = std::min(a: cvCeil(value: ranges[i][idx+1]), b: high);
111 written_idx = idx*step;
112 }
113 else
114 {
115 for( ; j < high; j++ )
116 tab[i*(high - low) + j - low] = OUT_OF_RANGE;
117 break;
118 }
119 }
120 }
121 }
122 else
123 {
124 CV_Error(Error::StsBadArg, "Either ranges, either uniform ranges should be provided");
125 }
126}
127
128
129static void histPrepareImages( const Mat* images, int nimages, const int* channels,
130 const Mat& mask, int dims, const int* histSize,
131 const float** ranges, bool uniform,
132 std::vector<uchar*>& ptrs, std::vector<int>& deltas,
133 Size& imsize, std::vector<double>& uniranges )
134{
135 int i, j, c;
136 CV_Assert( channels != 0 || nimages == dims );
137
138 imsize = images[0].size();
139 int depth = images[0].depth(), esz1 = (int)images[0].elemSize1();
140 bool isContinuous = true;
141
142 ptrs.resize(new_size: dims + 1);
143 deltas.resize(new_size: (dims + 1)*2);
144
145 for( i = 0; i < dims; i++ )
146 {
147 if(!channels)
148 {
149 j = i;
150 c = 0;
151 CV_Assert( images[j].channels() == 1 );
152 }
153 else
154 {
155 c = channels[i];
156 CV_Assert( c >= 0 );
157 for( j = 0; j < nimages; c -= images[j].channels(), j++ )
158 if( c < images[j].channels() )
159 break;
160 CV_Assert( j < nimages );
161 }
162
163 CV_Assert( images[j].size() == imsize && images[j].depth() == depth );
164 if( !images[j].isContinuous() )
165 isContinuous = false;
166 ptrs[i] = images[j].data + c*esz1;
167 deltas[i*2] = images[j].channels();
168 deltas[i*2+1] = (int)(images[j].step/esz1 - imsize.width*deltas[i*2]);
169 }
170
171 if( !mask.empty() )
172 {
173 CV_Assert( mask.size() == imsize && mask.channels() == 1 );
174 isContinuous = isContinuous && mask.isContinuous();
175 ptrs[dims] = mask.data;
176 deltas[dims*2] = 1;
177 deltas[dims*2 + 1] = (int)(mask.step/mask.elemSize1());
178 }
179
180 if( isContinuous )
181 {
182 imsize.width *= imsize.height;
183 imsize.height = 1;
184 }
185
186 if( !ranges ) // implicit uniform ranges for 8U
187 {
188 CV_Assert( depth == CV_8U );
189
190 uniranges.resize( new_size: dims*2 );
191 for( i = 0; i < dims; i++ )
192 {
193 uniranges[i*2] = histSize[i]/256.;
194 uniranges[i*2+1] = 0;
195 }
196 }
197 else if( uniform )
198 {
199 uniranges.resize( new_size: dims*2 );
200 for( i = 0; i < dims; i++ )
201 {
202 CV_Assert( ranges[i] && ranges[i][0] < ranges[i][1] );
203 double low = ranges[i][0], high = ranges[i][1];
204 double t = histSize[i]/(high - low);
205 uniranges[i*2] = t;
206 uniranges[i*2+1] = -t*low;
207#if 0 // This should be true by math, but it is not accurate numerically
208 CV_Assert(cvFloor(low * uniranges[i*2] + uniranges[i*2+1]) == 0);
209 CV_Assert((high * uniranges[i*2] + uniranges[i*2+1]) < histSize[i]);
210#endif
211 }
212 }
213 else
214 {
215 for( i = 0; i < dims; i++ )
216 {
217 size_t n = histSize[i];
218 for(size_t k = 0; k < n; k++ )
219 CV_Assert( ranges[i][k] < ranges[i][k+1] );
220 }
221 }
222}
223
224
225////////////////////////////////// C A L C U L A T E H I S T O G R A M ////////////////////////////////////
226
227template<typename T> static void
228calcHist_( std::vector<uchar*>& _ptrs, const std::vector<int>& _deltas,
229 Size imsize, Mat& hist, int dims, const float** _ranges,
230 const double* _uniranges, bool uniform )
231{
232 T** ptrs = (T**)&_ptrs[0];
233 const int* deltas = &_deltas[0];
234 uchar* H = hist.ptr();
235 int i, x;
236 const uchar* mask = _ptrs[dims];
237 int mstep = _deltas[dims*2 + 1];
238 int size[CV_MAX_DIM];
239 size_t hstep[CV_MAX_DIM];
240
241 for( i = 0; i < dims; i++ )
242 {
243 size[i] = hist.size[i];
244 hstep[i] = hist.step[i];
245 }
246
247 if( uniform )
248 {
249 const double* uniranges = &_uniranges[0];
250
251 if( dims == 1 )
252 {
253 double a = uniranges[0], b = uniranges[1];
254 int sz = size[0], d0 = deltas[0], step0 = deltas[1];
255 const T* p0 = (const T*)ptrs[0];
256
257 double v0_lo = _ranges[0][0];
258 double v0_hi = _ranges[0][1];
259
260 for( ; imsize.height--; p0 += step0, mask += mstep )
261 {
262 if( !mask )
263 for( x = 0; x < imsize.width; x++, p0 += d0 )
264 {
265 double v0 = (double)*p0;
266 int idx = cvFloor(value: v0*a + b);
267 if (v0 < v0_lo || v0 >= v0_hi)
268 continue;
269 idx = CV_CLAMP_INT(idx, 0, sz - 1);
270 CV_DbgAssert((unsigned)idx < (unsigned)sz);
271 ((int*)H)[idx]++;
272 }
273 else
274 for( x = 0; x < imsize.width; x++, p0 += d0 )
275 if( mask[x] )
276 {
277 double v0 = (double)*p0;
278 int idx = cvFloor(value: v0*a + b);
279 if (v0 < v0_lo || v0 >= v0_hi)
280 continue;
281 idx = CV_CLAMP_INT(idx, 0, sz - 1);
282 CV_DbgAssert((unsigned)idx < (unsigned)sz);
283 ((int*)H)[idx]++;
284 }
285 }
286 return;
287 }
288 else if( dims == 2 )
289 {
290 double a0 = uniranges[0], b0 = uniranges[1], a1 = uniranges[2], b1 = uniranges[3];
291 int sz0 = size[0], sz1 = size[1];
292 int d0 = deltas[0], step0 = deltas[1],
293 d1 = deltas[2], step1 = deltas[3];
294 size_t hstep0 = hstep[0];
295 const T* p0 = (const T*)ptrs[0];
296 const T* p1 = (const T*)ptrs[1];
297
298 double v0_lo = _ranges[0][0];
299 double v0_hi = _ranges[0][1];
300 double v1_lo = _ranges[1][0];
301 double v1_hi = _ranges[1][1];
302
303 for( ; imsize.height--; p0 += step0, p1 += step1, mask += mstep )
304 {
305 if( !mask )
306 for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1 )
307 {
308 double v0 = (double)*p0;
309 double v1 = (double)*p1;
310 int idx0 = cvFloor(value: v0*a0 + b0);
311 int idx1 = cvFloor(value: v1*a1 + b1);
312 if (v0 < v0_lo || v0 >= v0_hi)
313 continue;
314 if (v1 < v1_lo || v1 >= v1_hi)
315 continue;
316 idx0 = CV_CLAMP_INT(idx0, 0, sz0 - 1);
317 idx1 = CV_CLAMP_INT(idx1, 0, sz1 - 1);
318 CV_DbgAssert((unsigned)idx0 < (unsigned)sz0 && (unsigned)idx1 < (unsigned)sz1);
319 ((int*)(H + hstep0*idx0))[idx1]++;
320 }
321 else
322 for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1 )
323 if( mask[x] )
324 {
325 double v0 = (double)*p0;
326 double v1 = (double)*p1;
327 int idx0 = cvFloor(value: v0*a0 + b0);
328 int idx1 = cvFloor(value: v1*a1 + b1);
329 if (v0 < v0_lo || v0 >= v0_hi)
330 continue;
331 if (v1 < v1_lo || v1 >= v1_hi)
332 continue;
333 idx0 = CV_CLAMP_INT(idx0, 0, sz0 - 1);
334 idx1 = CV_CLAMP_INT(idx1, 0, sz1 - 1);
335 CV_DbgAssert((unsigned)idx0 < (unsigned)sz0 && (unsigned)idx1 < (unsigned)sz1);
336 ((int*)(H + hstep0*idx0))[idx1]++;
337 }
338 }
339 return;
340 }
341 else if( dims == 3 )
342 {
343 double a0 = uniranges[0], b0 = uniranges[1],
344 a1 = uniranges[2], b1 = uniranges[3],
345 a2 = uniranges[4], b2 = uniranges[5];
346 int sz0 = size[0], sz1 = size[1], sz2 = size[2];
347 int d0 = deltas[0], step0 = deltas[1],
348 d1 = deltas[2], step1 = deltas[3],
349 d2 = deltas[4], step2 = deltas[5];
350 size_t hstep0 = hstep[0], hstep1 = hstep[1];
351 const T* p0 = (const T*)ptrs[0];
352 const T* p1 = (const T*)ptrs[1];
353 const T* p2 = (const T*)ptrs[2];
354
355 double v0_lo = _ranges[0][0];
356 double v0_hi = _ranges[0][1];
357 double v1_lo = _ranges[1][0];
358 double v1_hi = _ranges[1][1];
359 double v2_lo = _ranges[2][0];
360 double v2_hi = _ranges[2][1];
361
362 for( ; imsize.height--; p0 += step0, p1 += step1, p2 += step2, mask += mstep )
363 {
364 if( !mask )
365 for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1, p2 += d2 )
366 {
367 double v0 = (double)*p0;
368 double v1 = (double)*p1;
369 double v2 = (double)*p2;
370 int idx0 = cvFloor(value: v0*a0 + b0);
371 int idx1 = cvFloor(value: v1*a1 + b1);
372 int idx2 = cvFloor(value: v2*a2 + b2);
373 if (v0 < v0_lo || v0 >= v0_hi)
374 continue;
375 if (v1 < v1_lo || v1 >= v1_hi)
376 continue;
377 if (v2 < v2_lo || v2 >= v2_hi)
378 continue;
379 idx0 = CV_CLAMP_INT(idx0, 0, sz0 - 1);
380 idx1 = CV_CLAMP_INT(idx1, 0, sz1 - 1);
381 idx2 = CV_CLAMP_INT(idx2, 0, sz2 - 1);
382 CV_DbgAssert(
383 (unsigned)idx0 < (unsigned)sz0 &&
384 (unsigned)idx1 < (unsigned)sz1 &&
385 (unsigned)idx2 < (unsigned)sz2);
386 ((int*)(H + hstep0*idx0 + hstep1*idx1))[idx2]++;
387 }
388 else
389 for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1, p2 += d2 )
390 if( mask[x] )
391 {
392 double v0 = (double)*p0;
393 double v1 = (double)*p1;
394 double v2 = (double)*p2;
395 int idx0 = cvFloor(value: v0*a0 + b0);
396 int idx1 = cvFloor(value: v1*a1 + b1);
397 int idx2 = cvFloor(value: v2*a2 + b2);
398 if (v0 < v0_lo || v0 >= v0_hi)
399 continue;
400 if (v1 < v1_lo || v1 >= v1_hi)
401 continue;
402 if (v2 < v2_lo || v2 >= v2_hi)
403 continue;
404 idx0 = CV_CLAMP_INT(idx0, 0, sz0 - 1);
405 idx1 = CV_CLAMP_INT(idx1, 0, sz1 - 1);
406 idx2 = CV_CLAMP_INT(idx2, 0, sz2 - 1);
407 CV_DbgAssert(
408 (unsigned)idx0 < (unsigned)sz0 &&
409 (unsigned)idx1 < (unsigned)sz1 &&
410 (unsigned)idx2 < (unsigned)sz2);
411 ((int*)(H + hstep0*idx0 + hstep1*idx1))[idx2]++;
412 }
413 }
414 }
415 else
416 {
417 for( ; imsize.height--; mask += mstep )
418 {
419 if( !mask )
420 for( x = 0; x < imsize.width; x++ )
421 {
422 uchar* Hptr = H;
423 for( i = 0; i < dims; i++ )
424 {
425 double v_lo = _ranges[i][0];
426 double v_hi = _ranges[i][1];
427 double v = *ptrs[i];
428 if (v < v_lo || v >= v_hi)
429 break;
430 int idx = cvFloor(value: v*uniranges[i*2] + uniranges[i*2+1]);
431 idx = CV_CLAMP_INT(idx, 0, size[i] - 1);
432 CV_DbgAssert((unsigned)idx < (unsigned)size[i]);
433 ptrs[i] += deltas[i*2];
434 Hptr += idx*hstep[i];
435 }
436
437 if( i == dims )
438 ++*((int*)Hptr);
439 else
440 for( ; i < dims; i++ )
441 ptrs[i] += deltas[i*2];
442 }
443 else
444 for( x = 0; x < imsize.width; x++ )
445 {
446 uchar* Hptr = H;
447 i = 0;
448 if( mask[x] )
449 for( ; i < dims; i++ )
450 {
451 double v_lo = _ranges[i][0];
452 double v_hi = _ranges[i][1];
453 double v = *ptrs[i];
454 if (v < v_lo || v >= v_hi)
455 break;
456 int idx = cvFloor(value: v*uniranges[i*2] + uniranges[i*2+1]);
457 idx = CV_CLAMP_INT(idx, 0, size[i] - 1);
458 CV_DbgAssert((unsigned)idx < (unsigned)size[i]);
459 ptrs[i] += deltas[i*2];
460 Hptr += idx*hstep[i];
461 }
462
463 if( i == dims )
464 ++*((int*)Hptr);
465 else
466 for( ; i < dims; i++ )
467 ptrs[i] += deltas[i*2];
468 }
469 for( i = 0; i < dims; i++ )
470 ptrs[i] += deltas[i*2 + 1];
471 }
472 }
473 }
474 else if (_ranges)
475 {
476 // non-uniform histogram
477 const float* ranges[CV_MAX_DIM];
478 for( i = 0; i < dims; i++ )
479 ranges[i] = &_ranges[i][0];
480
481 for( ; imsize.height--; mask += mstep )
482 {
483 for( x = 0; x < imsize.width; x++ )
484 {
485 uchar* Hptr = H;
486 i = 0;
487
488 if( !mask || mask[x] )
489 for( ; i < dims; i++ )
490 {
491 float v = (float)*ptrs[i];
492 const float* R = ranges[i];
493 int idx = -1, sz = size[i];
494
495 while( v >= R[idx+1] && ++idx < sz )
496 ; // nop
497
498 if( (unsigned)idx >= (unsigned)sz )
499 break;
500
501 ptrs[i] += deltas[i*2];
502 Hptr += idx*hstep[i];
503 }
504
505 if( i == dims )
506 ++*((int*)Hptr);
507 else
508 for( ; i < dims; i++ )
509 ptrs[i] += deltas[i*2];
510 }
511
512 for( i = 0; i < dims; i++ )
513 ptrs[i] += deltas[i*2 + 1];
514 }
515 }
516 else
517 {
518 CV_Error(Error::StsBadArg, "Either ranges, either uniform ranges should be provided");
519 }
520}
521
522
523static void
524calcHist_8u( std::vector<uchar*>& _ptrs, const std::vector<int>& _deltas,
525 Size imsize, Mat& hist, int dims, const float** _ranges,
526 const double* _uniranges, bool uniform )
527{
528 uchar** ptrs = &_ptrs[0];
529 const int* deltas = &_deltas[0];
530 uchar* H = hist.ptr();
531 int x;
532 const uchar* mask = _ptrs[dims];
533 int mstep = _deltas[dims*2 + 1];
534 std::vector<size_t> _tab;
535
536 calcHistLookupTables_8u( hist, shist: SparseMat(), dims, ranges: _ranges, uniranges: _uniranges, uniform, issparse: false, _tab );
537 const size_t* tab = &_tab[0];
538
539 if( dims == 1 )
540 {
541 int d0 = deltas[0], step0 = deltas[1];
542 int matH[256] = { 0, };
543 const uchar* p0 = (const uchar*)ptrs[0];
544
545 for( ; imsize.height--; p0 += step0, mask += mstep )
546 {
547 if( !mask )
548 {
549 if( d0 == 1 )
550 {
551 for( x = 0; x <= imsize.width - 4; x += 4 )
552 {
553 int t0 = p0[x], t1 = p0[x+1];
554 matH[t0]++; matH[t1]++;
555 t0 = p0[x+2]; t1 = p0[x+3];
556 matH[t0]++; matH[t1]++;
557 }
558 p0 += x;
559 }
560 else
561 for( x = 0; x <= imsize.width - 4; x += 4 )
562 {
563 int t0 = p0[0], t1 = p0[d0];
564 matH[t0]++; matH[t1]++;
565 p0 += d0*2;
566 t0 = p0[0]; t1 = p0[d0];
567 matH[t0]++; matH[t1]++;
568 p0 += d0*2;
569 }
570
571 for( ; x < imsize.width; x++, p0 += d0 )
572 matH[*p0]++;
573 }
574 else
575 for( x = 0; x < imsize.width; x++, p0 += d0 )
576 if( mask[x] )
577 matH[*p0]++;
578 }
579
580 for(int i = 0; i < 256; i++ )
581 {
582 size_t hidx = tab[i];
583 if( hidx < OUT_OF_RANGE )
584 *(int*)(H + hidx) += matH[i];
585 }
586 }
587 else if( dims == 2 )
588 {
589 int d0 = deltas[0], step0 = deltas[1],
590 d1 = deltas[2], step1 = deltas[3];
591 const uchar* p0 = (const uchar*)ptrs[0];
592 const uchar* p1 = (const uchar*)ptrs[1];
593
594 for( ; imsize.height--; p0 += step0, p1 += step1, mask += mstep )
595 {
596 if( !mask )
597 for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1 )
598 {
599 size_t idx = tab[*p0] + tab[*p1 + 256];
600 if( idx < OUT_OF_RANGE )
601 ++*(int*)(H + idx);
602 }
603 else
604 for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1 )
605 {
606 size_t idx;
607 if( mask[x] && (idx = tab[*p0] + tab[*p1 + 256]) < OUT_OF_RANGE )
608 ++*(int*)(H + idx);
609 }
610 }
611 }
612 else if( dims == 3 )
613 {
614 int d0 = deltas[0], step0 = deltas[1],
615 d1 = deltas[2], step1 = deltas[3],
616 d2 = deltas[4], step2 = deltas[5];
617
618 const uchar* p0 = (const uchar*)ptrs[0];
619 const uchar* p1 = (const uchar*)ptrs[1];
620 const uchar* p2 = (const uchar*)ptrs[2];
621
622 for( ; imsize.height--; p0 += step0, p1 += step1, p2 += step2, mask += mstep )
623 {
624 if( !mask )
625 for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1, p2 += d2 )
626 {
627 size_t idx = tab[*p0] + tab[*p1 + 256] + tab[*p2 + 512];
628 if( idx < OUT_OF_RANGE )
629 ++*(int*)(H + idx);
630 }
631 else
632 for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1, p2 += d2 )
633 {
634 size_t idx;
635 if( mask[x] && (idx = tab[*p0] + tab[*p1 + 256] + tab[*p2 + 512]) < OUT_OF_RANGE )
636 ++*(int*)(H + idx);
637 }
638 }
639 }
640 else
641 {
642 for( ; imsize.height--; mask += mstep )
643 {
644 if( !mask )
645 for( x = 0; x < imsize.width; x++ )
646 {
647 uchar* Hptr = H;
648 int i = 0;
649 for( ; i < dims; i++ )
650 {
651 size_t idx = tab[*ptrs[i] + i*256];
652 if( idx >= OUT_OF_RANGE )
653 break;
654 Hptr += idx;
655 ptrs[i] += deltas[i*2];
656 }
657
658 if( i == dims )
659 ++*((int*)Hptr);
660 else
661 for( ; i < dims; i++ )
662 ptrs[i] += deltas[i*2];
663 }
664 else
665 for( x = 0; x < imsize.width; x++ )
666 {
667 uchar* Hptr = H;
668 int i = 0;
669 if( mask[x] )
670 for( ; i < dims; i++ )
671 {
672 size_t idx = tab[*ptrs[i] + i*256];
673 if( idx >= OUT_OF_RANGE )
674 break;
675 Hptr += idx;
676 ptrs[i] += deltas[i*2];
677 }
678
679 if( i == dims )
680 ++*((int*)Hptr);
681 else
682 for( ; i < dims; i++ )
683 ptrs[i] += deltas[i*2];
684 }
685 for(int i = 0; i < dims; i++ )
686 ptrs[i] += deltas[i*2 + 1];
687 }
688 }
689}
690
691#ifdef HAVE_IPP
692
693typedef IppStatus(CV_STDCALL * IppiHistogram_C1)(const void* pSrc, int srcStep,
694 IppiSize roiSize, Ipp32u* pHist, const IppiHistogramSpec* pSpec, Ipp8u* pBuffer);
695
696static IppiHistogram_C1 getIppiHistogramFunction_C1(int type)
697{
698 IppiHistogram_C1 ippFunction =
699 (type == CV_8UC1) ? (IppiHistogram_C1)ippiHistogram_8u_C1R :
700 (type == CV_16UC1) ? (IppiHistogram_C1)ippiHistogram_16u_C1R :
701 (type == CV_32FC1) ? (IppiHistogram_C1)ippiHistogram_32f_C1R :
702 NULL;
703
704 return ippFunction;
705}
706
707class ipp_calcHistParallelTLS
708{
709public:
710 ipp_calcHistParallelTLS() {}
711
712 IppAutoBuffer<IppiHistogramSpec> spec;
713 IppAutoBuffer<Ipp8u> buffer;
714 IppAutoBuffer<Ipp32u> thist;
715};
716
717class ipp_calcHistParallel: public ParallelLoopBody
718{
719public:
720 ipp_calcHistParallel(const Mat &src, Mat &hist, Ipp32s histSize, const float *ranges, bool uniform, bool &ok):
721 ParallelLoopBody(), m_src(src), m_hist(hist), m_ok(ok)
722 {
723 ok = true;
724
725 m_uniform = uniform;
726 m_ranges = ranges;
727 m_histSize = histSize;
728 m_type = ippiGetDataType(depth: src.type());
729 m_levelsNum = histSize+1;
730 ippiHistogram_C1 = getIppiHistogramFunction_C1(type: src.type());
731 m_fullRoi = ippiSize(size: src.size());
732 m_bufferSize = 0;
733 m_specSize = 0;
734 if(!ippiHistogram_C1)
735 {
736 ok = false;
737 return;
738 }
739
740 if(ippiHistogramGetBufferSize(dataType: m_type, roiSize: m_fullRoi, nLevels: &m_levelsNum, numChannels: 1, uniform: 1, pSpecSize: &m_specSize, pBufferSize: &m_bufferSize) < 0)
741 {
742 ok = false;
743 return;
744 }
745
746 hist.setTo(value: 0);
747 }
748
749 virtual void operator() (const Range & range) const CV_OVERRIDE
750 {
751 CV_INSTRUMENT_REGION_IPP();
752
753 if(!m_ok)
754 return;
755
756 ipp_calcHistParallelTLS *pTls = m_tls.get();
757
758 IppiSize roi = {.width: m_src.cols, .height: range.end - range.start };
759 bool mtLoop = false;
760 if(m_fullRoi.height != roi.height)
761 mtLoop = true;
762
763 if(!pTls->spec)
764 {
765 pTls->spec.allocate(size: m_specSize);
766 if(!pTls->spec.get())
767 {
768 m_ok = false;
769 return;
770 }
771
772 pTls->buffer.allocate(size: m_bufferSize);
773 if(!pTls->buffer.get() && m_bufferSize)
774 {
775 m_ok = false;
776 return;
777 }
778
779 if(m_uniform)
780 {
781 if(ippiHistogramUniformInit(dataType: m_type, lowerLevel: (Ipp32f*)&m_ranges[0], upperLevel: (Ipp32f*)&m_ranges[1], nLevels: (Ipp32s*)&m_levelsNum, numChannels: 1, pSpec: pTls->spec) < 0)
782 {
783 m_ok = false;
784 return;
785 }
786 }
787 else
788 {
789 if(ippiHistogramInit(dataType: m_type, pLevels: (const Ipp32f**)&m_ranges, nLevels: (Ipp32s*)&m_levelsNum, numChannels: 1, pSpec: pTls->spec) < 0)
790 {
791 m_ok = false;
792 return;
793 }
794 }
795
796 pTls->thist.allocate(size: m_histSize*sizeof(Ipp32u));
797 }
798
799 if(CV_INSTRUMENT_FUN_IPP(ippiHistogram_C1, m_src.ptr(range.start), (int)m_src.step, roi, pTls->thist, pTls->spec, pTls->buffer) < 0)
800 {
801 m_ok = false;
802 return;
803 }
804
805 if(mtLoop)
806 {
807 for(int i = 0; i < m_histSize; i++)
808 CV_XADD((int*)(m_hist.ptr(i)), *(int*)((Ipp32u*)pTls->thist + i));
809 }
810 else
811 ippiCopy_32s_C1R(pSrc: (Ipp32s*)pTls->thist.get(), srcStep: sizeof(Ipp32u), pDst: (Ipp32s*)m_hist.ptr(), dstStep: (int)m_hist.step, roiSize: ippiSize(width: 1, height: m_histSize));
812 }
813
814private:
815 const Mat &m_src;
816 Mat &m_hist;
817 Ipp32s m_histSize;
818 const float *m_ranges;
819 bool m_uniform;
820
821 IppiHistogram_C1 ippiHistogram_C1;
822 IppiSize m_fullRoi;
823 IppDataType m_type;
824 Ipp32s m_levelsNum;
825 int m_bufferSize;
826 int m_specSize;
827
828 mutable Mutex m_syncMutex;
829 TLSData<ipp_calcHistParallelTLS> m_tls;
830
831 volatile bool &m_ok;
832 const ipp_calcHistParallel & operator = (const ipp_calcHistParallel & );
833};
834
835#endif
836
837}
838
839#ifdef HAVE_OPENVX
840namespace cv
841{
842 namespace ovx {
843 template <> inline bool skipSmallImages<VX_KERNEL_HISTOGRAM>(int w, int h) { return w*h < 2048 * 1536; }
844 }
845 static bool openvx_calchist(const Mat& image, OutputArray _hist, const int histSize,
846 const float* _range)
847 {
848 vx_int32 offset = (vx_int32)(_range[0]);
849 vx_uint32 range = (vx_uint32)(_range[1] - _range[0]);
850 if (float(offset) != _range[0] || float(range) != (_range[1] - _range[0]))
851 return false;
852
853 size_t total_size = image.total();
854 int rows = image.dims > 1 ? image.size[0] : 1, cols = rows ? (int)(total_size / rows) : 0;
855 if (image.dims > 2 && !(image.isContinuous() && cols > 0 && (size_t)rows*cols == total_size))
856 return false;
857
858 try
859 {
860 ivx::Context ctx = ovx::getOpenVXContext();
861#if VX_VERSION <= VX_VERSION_1_0
862 if (ctx.vendorID() == VX_ID_KHRONOS && (range % histSize))
863 return false;
864#endif
865
866 ivx::Image
867 img = ivx::Image::createFromHandle(ctx, VX_DF_IMAGE_U8,
868 ivx::Image::createAddressing(cols, rows, 1, (vx_int32)(image.step[0])), image.data);
869
870 ivx::Distribution vxHist = ivx::Distribution::create(ctx, histSize, offset, range);
871 ivx::IVX_CHECK_STATUS(vxuHistogram(ctx, img, vxHist));
872
873 _hist.create(1, &histSize, CV_32F);
874 Mat hist = _hist.getMat(), ihist = hist;
875 ihist.flags = (ihist.flags & ~CV_MAT_TYPE_MASK) | CV_32S;
876 vxHist.copyTo(ihist);
877 ihist.convertTo(hist, CV_32F);
878
879#ifdef VX_VERSION_1_1
880 img.swapHandle();
881#endif
882 }
883 catch (const ivx::RuntimeError & e)
884 {
885 VX_DbgThrow(e.what());
886 }
887 catch (const ivx::WrapperError & e)
888 {
889 VX_DbgThrow(e.what());
890 }
891
892 return true;
893 }
894}
895#endif
896
897#ifdef HAVE_IPP
898#define IPP_HISTOGRAM_PARALLEL 1
899namespace cv
900{
901static bool ipp_calchist(const Mat &image, Mat &hist, int histSize, const float** ranges, bool uniform, bool accumulate)
902{
903 CV_INSTRUMENT_REGION_IPP();
904
905#if IPP_VERSION_X100 < 201801
906 // No SSE42 optimization for uniform 32f
907 if(uniform && image.depth() == CV_32F && cv::ipp::getIppTopFeatures() == ippCPUID_SSE42)
908 return false;
909#endif
910
911 // IPP_DISABLE_HISTOGRAM - https://github.com/opencv/opencv/issues/11544
912 // and https://github.com/opencv/opencv/issues/21595
913 if ((uniform && (ranges[0][1] - ranges[0][0]) != histSize) || abs(x: ranges[0][0]) != cvFloor(value: ranges[0][0]))
914 return false;
915
916 Mat ihist = hist;
917 if(accumulate)
918 ihist.create(ndims: 1, sizes: &histSize, CV_32S);
919
920 bool ok = true;
921 int threads = ippiSuggestThreadsNum(image, multiplier: (1+((double)ihist.total()/image.total()))*2);
922 Range range(0, image.rows);
923 ipp_calcHistParallel invoker(image, ihist, histSize, ranges[0], uniform, ok);
924 if(!ok)
925 return false;
926
927 if(IPP_HISTOGRAM_PARALLEL && threads > 1)
928 parallel_for_(range, body: invoker, nstripes: threads*2);
929 else
930 invoker(range);
931
932 if(ok)
933 {
934 if(accumulate)
935 {
936 IppiSize histRoi = ippiSize(width: 1, height: histSize);
937 IppAutoBuffer<Ipp32f> fhist(histSize*sizeof(Ipp32f));
938 CV_INSTRUMENT_FUN_IPP(ippiConvert_32s32f_C1R, (Ipp32s*)ihist.ptr(), (int)ihist.step, (Ipp32f*)fhist, sizeof(Ipp32f), histRoi);
939 CV_INSTRUMENT_FUN_IPP(ippiAdd_32f_C1IR, (Ipp32f*)fhist, sizeof(Ipp32f), (Ipp32f*)hist.ptr(), (int)hist.step, histRoi);
940 }
941 else
942 CV_INSTRUMENT_FUN_IPP(ippiConvert_32s32f_C1R, (Ipp32s*)ihist.ptr(), (int)ihist.step, (Ipp32f*)hist.ptr(), (int)hist.step, ippiSize(1, histSize));
943 }
944 return ok;
945}
946}
947#endif
948
949void cv::calcHist( const Mat* images, int nimages, const int* channels,
950 InputArray _mask, OutputArray _hist, int dims, const int* histSize,
951 const float** ranges, bool uniform, bool accumulate )
952{
953 CV_INSTRUMENT_REGION();
954
955 CV_Assert(images && nimages > 0);
956
957 CV_OVX_RUN(
958 images && histSize &&
959 nimages == 1 && images[0].type() == CV_8UC1 && dims == 1 && _mask.getMat().empty() &&
960 (!channels || channels[0] == 0) && !accumulate && uniform &&
961 ranges && ranges[0] &&
962 !ovx::skipSmallImages<VX_KERNEL_HISTOGRAM>(images[0].cols, images[0].rows),
963 openvx_calchist(images[0], _hist, histSize[0], ranges[0]))
964
965 Mat mask = _mask.getMat();
966
967 CV_Assert(dims > 0 && histSize);
968
969 const uchar* const histdata = _hist.getMat().ptr();
970 _hist.create(dims, size: histSize, CV_32F);
971 Mat hist = _hist.getMat();
972
973 if(histdata != hist.data)
974 accumulate = false;
975
976 CV_IPP_RUN(
977 nimages == 1 && dims == 1 && channels && channels[0] == 0
978 && _mask.empty() && images[0].dims <= 2 && ranges && ranges[0],
979 ipp_calchist(images[0], hist, histSize[0], ranges, uniform, accumulate));
980
981 if (nimages == 1 && dims == 1 && channels && channels[0] == 0 && _mask.empty() && images[0].dims <= 2 && ranges && ranges[0]) {
982 CALL_HAL(calcHist, cv_hal_calcHist, images[0].data, images[0].step, images[0].type(), images[0].cols, images[0].rows,
983 hist.ptr<float>(), histSize[0], ranges, uniform, accumulate);
984 }
985
986 Mat ihist = hist;
987 ihist.flags = (ihist.flags & ~CV_MAT_TYPE_MASK)|CV_32S;
988
989 if(!accumulate)
990 hist = Scalar(0.);
991 else
992 hist.convertTo(m: ihist, CV_32S);
993
994 std::vector<uchar*> ptrs;
995 std::vector<int> deltas;
996 std::vector<double> uniranges;
997 Size imsize;
998
999 CV_Assert( mask.empty() || mask.type() == CV_8UC1 );
1000 histPrepareImages( images, nimages, channels, mask, dims, histSize: hist.size, ranges,
1001 uniform, ptrs, deltas, imsize, uniranges );
1002 const double* _uniranges = uniform ? &uniranges[0] : 0;
1003
1004 int depth = images[0].depth();
1005
1006 if( depth == CV_8U )
1007 calcHist_8u(ptrs&: ptrs, deltas: deltas, imsize, hist&: ihist, dims, ranges: ranges, _uniranges, uniform );
1008 else if( depth == CV_16U )
1009 calcHist_<ushort>(ptrs&: ptrs, deltas: deltas, imsize, hist&: ihist, dims, ranges: ranges, _uniranges, uniform );
1010 else if( depth == CV_32F )
1011 calcHist_<float>(ptrs&: ptrs, deltas: deltas, imsize, hist&: ihist, dims, ranges: ranges, _uniranges, uniform );
1012 else
1013 CV_Error(cv::Error::StsUnsupportedFormat, "");
1014
1015 ihist.convertTo(m: hist, CV_32F);
1016}
1017
1018
1019namespace cv
1020{
1021
1022template<typename T> static void
1023calcSparseHist_( std::vector<uchar*>& _ptrs, const std::vector<int>& _deltas,
1024 Size imsize, SparseMat& hist, int dims, const float** _ranges,
1025 const double* _uniranges, bool uniform )
1026{
1027 T** ptrs = (T**)&_ptrs[0];
1028 const int* deltas = &_deltas[0];
1029 int i, x;
1030 const uchar* mask = _ptrs[dims];
1031 int mstep = _deltas[dims*2 + 1];
1032 const int* size = hist.hdr->size;
1033 int idx[CV_MAX_DIM];
1034
1035 if( uniform )
1036 {
1037 const double* uniranges = &_uniranges[0];
1038
1039 for( ; imsize.height--; mask += mstep )
1040 {
1041 for( x = 0; x < imsize.width; x++ )
1042 {
1043 i = 0;
1044 if( !mask || mask[x] )
1045 for( ; i < dims; i++ )
1046 {
1047 idx[i] = cvFloor(*ptrs[i]*uniranges[i*2] + uniranges[i*2+1]);
1048 if( (unsigned)idx[i] >= (unsigned)size[i] )
1049 break;
1050 ptrs[i] += deltas[i*2];
1051 }
1052
1053 if( i == dims )
1054 ++*(int*)hist.ptr(idx, createMissing: true);
1055 else
1056 for( ; i < dims; i++ )
1057 ptrs[i] += deltas[i*2];
1058 }
1059 for( i = 0; i < dims; i++ )
1060 ptrs[i] += deltas[i*2 + 1];
1061 }
1062 }
1063 else if (_ranges)
1064 {
1065 // non-uniform histogram
1066 const float* ranges[CV_MAX_DIM];
1067 for( i = 0; i < dims; i++ )
1068 ranges[i] = &_ranges[i][0];
1069
1070 for( ; imsize.height--; mask += mstep )
1071 {
1072 for( x = 0; x < imsize.width; x++ )
1073 {
1074 i = 0;
1075
1076 if( !mask || mask[x] )
1077 for( ; i < dims; i++ )
1078 {
1079 float v = (float)*ptrs[i];
1080 const float* R = ranges[i];
1081 int j = -1, sz = size[i];
1082
1083 while( v >= R[j+1] && ++j < sz )
1084 ; // nop
1085
1086 if( (unsigned)j >= (unsigned)sz )
1087 break;
1088 ptrs[i] += deltas[i*2];
1089 idx[i] = j;
1090 }
1091
1092 if( i == dims )
1093 ++*(int*)hist.ptr(idx, createMissing: true);
1094 else
1095 for( ; i < dims; i++ )
1096 ptrs[i] += deltas[i*2];
1097 }
1098
1099 for( i = 0; i < dims; i++ )
1100 ptrs[i] += deltas[i*2 + 1];
1101 }
1102 }
1103 else
1104 {
1105 CV_Error(Error::StsBadArg, "Either ranges, either uniform ranges should be provided");
1106 }
1107}
1108
1109
1110static void
1111calcSparseHist_8u( std::vector<uchar*>& _ptrs, const std::vector<int>& _deltas,
1112 Size imsize, SparseMat& hist, int dims, const float** _ranges,
1113 const double* _uniranges, bool uniform )
1114{
1115 uchar** ptrs = (uchar**)&_ptrs[0];
1116 const int* deltas = &_deltas[0];
1117 int x;
1118 const uchar* mask = _ptrs[dims];
1119 int mstep = _deltas[dims*2 + 1];
1120 int idx[CV_MAX_DIM];
1121 std::vector<size_t> _tab;
1122
1123 calcHistLookupTables_8u( hist: Mat(), shist: hist, dims, ranges: _ranges, uniranges: _uniranges, uniform, issparse: true, _tab );
1124 const size_t* tab = &_tab[0];
1125
1126 for( ; imsize.height--; mask += mstep )
1127 {
1128 for( x = 0; x < imsize.width; x++ )
1129 {
1130 int i = 0;
1131 if( !mask || mask[x] )
1132 for( ; i < dims; i++ )
1133 {
1134 size_t hidx = tab[*ptrs[i] + i*256];
1135 if( hidx >= OUT_OF_RANGE )
1136 break;
1137 ptrs[i] += deltas[i*2];
1138 idx[i] = (int)hidx;
1139 }
1140
1141 if( i == dims )
1142 ++*(int*)hist.ptr(idx,createMissing: true);
1143 else
1144 for( ; i < dims; i++ )
1145 ptrs[i] += deltas[i*2];
1146 }
1147 for(int i = 0; i < dims; i++ )
1148 ptrs[i] += deltas[i*2 + 1];
1149 }
1150}
1151
1152
1153static void calcHist( const Mat* images, int nimages, const int* channels,
1154 const Mat& mask, SparseMat& hist, int dims, const int* histSize,
1155 const float** ranges, bool uniform, bool accumulate, bool keepInt )
1156{
1157 size_t i, N;
1158
1159 if( !accumulate )
1160 hist.create(dims, sizes: histSize, CV_32F);
1161 else
1162 {
1163 SparseMatIterator it = hist.begin();
1164 for( i = 0, N = hist.nzcount(); i < N; i++, ++it )
1165 {
1166 CV_Assert(it.ptr != NULL);
1167 Cv32suf* val = (Cv32suf*)it.ptr;
1168 val->i = cvRound(value: val->f);
1169 }
1170 }
1171
1172 std::vector<uchar*> ptrs;
1173 std::vector<int> deltas;
1174 std::vector<double> uniranges;
1175 Size imsize;
1176
1177 CV_Assert( mask.empty() || mask.type() == CV_8UC1 );
1178 histPrepareImages( images, nimages, channels, mask, dims, histSize: hist.hdr->size, ranges,
1179 uniform, ptrs, deltas, imsize, uniranges );
1180 const double* _uniranges = uniform ? &uniranges[0] : 0;
1181
1182 int depth = images[0].depth();
1183 if( depth == CV_8U )
1184 calcSparseHist_8u(ptrs&: ptrs, deltas: deltas, imsize, hist, dims, ranges: ranges, _uniranges, uniform );
1185 else if( depth == CV_16U )
1186 calcSparseHist_<ushort>(ptrs&: ptrs, deltas: deltas, imsize, hist, dims, ranges: ranges, _uniranges, uniform );
1187 else if( depth == CV_32F )
1188 calcSparseHist_<float>(ptrs&: ptrs, deltas: deltas, imsize, hist, dims, ranges: ranges, _uniranges, uniform );
1189 else
1190 CV_Error(cv::Error::StsUnsupportedFormat, "");
1191
1192 if( !keepInt )
1193 {
1194 SparseMatIterator it = hist.begin();
1195 for( i = 0, N = hist.nzcount(); i < N; i++, ++it )
1196 {
1197 CV_Assert(it.ptr != NULL);
1198 Cv32suf* val = (Cv32suf*)it.ptr;
1199 val->f = (float)val->i;
1200 }
1201 }
1202}
1203
1204#ifdef HAVE_OPENCL
1205
1206enum
1207{
1208 BINS = 256
1209};
1210
1211static bool ocl_calcHist1(InputArray _src, OutputArray _hist, int ddepth = CV_32S)
1212{
1213 const ocl::Device & dev = ocl::Device::getDefault();
1214 int compunits = dev.maxComputeUnits();
1215 size_t wgs = dev.maxWorkGroupSize();
1216 Size size = _src.size();
1217 bool use16 = size.width % 16 == 0 && _src.offset() % 16 == 0 && _src.step() % 16 == 0;
1218 int kercn = dev.isAMD() && use16 ? 16 : std::min(a: 4, b: ocl::predictOptimalVectorWidth(src1: _src));
1219
1220 ocl::Kernel k1("calculate_histogram", ocl::imgproc::histogram_oclsrc,
1221 format("-D BINS=%d -D HISTS_COUNT=%d -D WGS=%zu -D kercn=%d -D T=%s%s",
1222 BINS, compunits, wgs, kercn,
1223 kercn == 4 ? "int" : ocl::typeToStr(CV_8UC(kercn)),
1224 _src.isContinuous() ? " -D HAVE_SRC_CONT" : ""));
1225 if (k1.empty())
1226 return false;
1227
1228 _hist.create(rows: BINS, cols: 1, type: ddepth);
1229 UMat src = _src.getUMat(), ghist(1, BINS * compunits, CV_32SC1),
1230 hist = _hist.getUMat();
1231
1232 k1.args(kernel_args: ocl::KernelArg::ReadOnly(m: src),
1233 kernel_args: ocl::KernelArg::PtrWriteOnly(m: ghist), kernel_args: (int)src.total());
1234
1235 size_t globalsize = compunits * wgs;
1236 if (!k1.run(dims: 1, globalsize: &globalsize, localsize: &wgs, sync: false))
1237 return false;
1238
1239 wgs = std::min<size_t>(a: ocl::Device::getDefault().maxWorkGroupSize(), b: BINS);
1240 char cvt[50];
1241 ocl::Kernel k2("merge_histogram", ocl::imgproc::histogram_oclsrc,
1242 format("-D BINS=%d -D HISTS_COUNT=%d -D WGS=%d -D convertToHT=%s -D HT=%s",
1243 BINS, compunits, (int)wgs, ocl::convertTypeStr(CV_32S, ddepth, 1, cvt, sizeof(cvt)),
1244 ocl::typeToStr(ddepth)));
1245 if (k2.empty())
1246 return false;
1247
1248 k2.args(kernel_args: ocl::KernelArg::PtrReadOnly(m: ghist),
1249 kernel_args: ocl::KernelArg::WriteOnlyNoSize(m: hist));
1250
1251 return k2.run(dims: 1, globalsize: &wgs, localsize: &wgs, sync: false);
1252}
1253
1254static bool ocl_calcHist(InputArrayOfArrays images, OutputArray hist)
1255{
1256 std::vector<UMat> v;
1257 images.getUMatVector(umv&: v);
1258
1259 return ocl_calcHist1(src: v[0], hist: hist, CV_32F);
1260}
1261
1262#endif
1263
1264}
1265
1266void cv::calcHist( const Mat* images, int nimages, const int* channels,
1267 InputArray _mask, SparseMat& hist, int dims, const int* histSize,
1268 const float** ranges, bool uniform, bool accumulate )
1269{
1270 CV_INSTRUMENT_REGION();
1271
1272 CV_Assert(images && nimages > 0);
1273
1274 Mat mask = _mask.getMat();
1275 calcHist( images, nimages, channels, mask, hist, dims, histSize,
1276 ranges, uniform, accumulate, keepInt: false );
1277}
1278
1279
1280void cv::calcHist( InputArrayOfArrays images, const std::vector<int>& channels,
1281 InputArray mask, OutputArray hist,
1282 const std::vector<int>& histSize,
1283 const std::vector<float>& ranges,
1284 bool accumulate )
1285{
1286 CV_INSTRUMENT_REGION();
1287
1288 CV_OCL_RUN(images.total() == 1 && channels.size() == 1 && images.channels(i: 0) == 1 &&
1289 channels[0] == 0 && images.isUMatVector() && mask.empty() && !accumulate &&
1290 histSize.size() == 1 && histSize[0] == BINS && ranges.size() == 2 &&
1291 ranges[0] == 0 && ranges[1] == static_cast<float>(BINS),
1292 ocl_calcHist(images, hist))
1293
1294 int i, dims = (int)histSize.size(), rsz = (int)ranges.size(), csz = (int)channels.size();
1295 int nimages = (int)images.total();
1296
1297 CV_Assert(nimages > 0 && dims > 0);
1298 CV_Assert(rsz == dims*2 || (rsz == 0 && images.depth(0) == CV_8U));
1299 CV_Assert(csz == 0 || csz == dims);
1300 float* _ranges[CV_MAX_DIM];
1301 if( rsz > 0 )
1302 {
1303 for( i = 0; i < rsz/2; i++ )
1304 _ranges[i] = (float*)&ranges[i*2];
1305 }
1306
1307 AutoBuffer<Mat> buf(nimages);
1308 for( i = 0; i < nimages; i++ )
1309 buf[i] = images.getMat(i);
1310
1311 calcHist(images: &buf[0], nimages, channels: csz ? &channels[0] : 0,
1312 mask: mask, hist: hist, dims, histSize: &histSize[0], ranges: rsz ? (const float**)_ranges : 0,
1313 uniform: true, accumulate);
1314}
1315
1316
1317/////////////////////////////////////// B A C K P R O J E C T ////////////////////////////////////
1318
1319namespace cv
1320{
1321
1322template<typename T, typename BT> static void
1323calcBackProj_( std::vector<uchar*>& _ptrs, const std::vector<int>& _deltas,
1324 Size imsize, const Mat& hist, int dims, const float** _ranges,
1325 const double* _uniranges, float scale, bool uniform )
1326{
1327 T** ptrs = (T**)&_ptrs[0];
1328 const int* deltas = &_deltas[0];
1329 const uchar* H = hist.ptr();
1330 int i, x;
1331 BT* bproj = (BT*)_ptrs[dims];
1332 int bpstep = _deltas[dims*2 + 1];
1333 int size[CV_MAX_DIM];
1334 size_t hstep[CV_MAX_DIM];
1335
1336 for( i = 0; i < dims; i++ )
1337 {
1338 size[i] = hist.size[i];
1339 hstep[i] = hist.step[i];
1340 }
1341
1342 if( uniform )
1343 {
1344 const double* uniranges = &_uniranges[0];
1345
1346 if( dims == 1 )
1347 {
1348 double a = uniranges[0], b = uniranges[1];
1349 int sz = size[0], d0 = deltas[0], step0 = deltas[1];
1350 const T* p0 = (const T*)ptrs[0];
1351
1352 for( ; imsize.height--; p0 += step0, bproj += bpstep )
1353 {
1354 for( x = 0; x < imsize.width; x++, p0 += d0 )
1355 {
1356 int idx = cvFloor(*p0*a + b);
1357 bproj[x] = (unsigned)idx < (unsigned)sz ? saturate_cast<BT>(((const float*)H)[idx]*scale) : 0;
1358 }
1359 }
1360 }
1361 else if( dims == 2 )
1362 {
1363 double a0 = uniranges[0], b0 = uniranges[1],
1364 a1 = uniranges[2], b1 = uniranges[3];
1365 int sz0 = size[0], sz1 = size[1];
1366 int d0 = deltas[0], step0 = deltas[1],
1367 d1 = deltas[2], step1 = deltas[3];
1368 size_t hstep0 = hstep[0];
1369 const T* p0 = (const T*)ptrs[0];
1370 const T* p1 = (const T*)ptrs[1];
1371
1372 for( ; imsize.height--; p0 += step0, p1 += step1, bproj += bpstep )
1373 {
1374 for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1 )
1375 {
1376 int idx0 = cvFloor(*p0*a0 + b0);
1377 int idx1 = cvFloor(*p1*a1 + b1);
1378 bproj[x] = (unsigned)idx0 < (unsigned)sz0 &&
1379 (unsigned)idx1 < (unsigned)sz1 ?
1380 saturate_cast<BT>(((const float*)(H + hstep0*idx0))[idx1]*scale) : 0;
1381 }
1382 }
1383 }
1384 else if( dims == 3 )
1385 {
1386 double a0 = uniranges[0], b0 = uniranges[1],
1387 a1 = uniranges[2], b1 = uniranges[3],
1388 a2 = uniranges[4], b2 = uniranges[5];
1389 int sz0 = size[0], sz1 = size[1], sz2 = size[2];
1390 int d0 = deltas[0], step0 = deltas[1],
1391 d1 = deltas[2], step1 = deltas[3],
1392 d2 = deltas[4], step2 = deltas[5];
1393 size_t hstep0 = hstep[0], hstep1 = hstep[1];
1394 const T* p0 = (const T*)ptrs[0];
1395 const T* p1 = (const T*)ptrs[1];
1396 const T* p2 = (const T*)ptrs[2];
1397
1398 for( ; imsize.height--; p0 += step0, p1 += step1, p2 += step2, bproj += bpstep )
1399 {
1400 for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1, p2 += d2 )
1401 {
1402 int idx0 = cvFloor(*p0*a0 + b0);
1403 int idx1 = cvFloor(*p1*a1 + b1);
1404 int idx2 = cvFloor(*p2*a2 + b2);
1405 bproj[x] = (unsigned)idx0 < (unsigned)sz0 &&
1406 (unsigned)idx1 < (unsigned)sz1 &&
1407 (unsigned)idx2 < (unsigned)sz2 ?
1408 saturate_cast<BT>(((const float*)(H + hstep0*idx0 + hstep1*idx1))[idx2]*scale) : 0;
1409 }
1410 }
1411 }
1412 else
1413 {
1414 for( ; imsize.height--; bproj += bpstep )
1415 {
1416 for( x = 0; x < imsize.width; x++ )
1417 {
1418 const uchar* Hptr = H;
1419 for( i = 0; i < dims; i++ )
1420 {
1421 int idx = cvFloor(*ptrs[i]*uniranges[i*2] + uniranges[i*2+1]);
1422 if( (unsigned)idx >= (unsigned)size[i] || (_ranges && *ptrs[i] >= _ranges[i][1]))
1423 break;
1424 ptrs[i] += deltas[i*2];
1425 Hptr += idx*hstep[i];
1426 }
1427
1428 if( i == dims )
1429 bproj[x] = saturate_cast<BT>(*(const float*)Hptr*scale);
1430 else
1431 {
1432 bproj[x] = 0;
1433 for( ; i < dims; i++ )
1434 ptrs[i] += deltas[i*2];
1435 }
1436 }
1437 for( i = 0; i < dims; i++ )
1438 ptrs[i] += deltas[i*2 + 1];
1439 }
1440 }
1441 }
1442 else if (_ranges)
1443 {
1444 // non-uniform histogram
1445 const float* ranges[CV_MAX_DIM];
1446 for( i = 0; i < dims; i++ )
1447 ranges[i] = &_ranges[i][0];
1448
1449 for( ; imsize.height--; bproj += bpstep )
1450 {
1451 for( x = 0; x < imsize.width; x++ )
1452 {
1453 const uchar* Hptr = H;
1454 for( i = 0; i < dims; i++ )
1455 {
1456 float v = (float)*ptrs[i];
1457 const float* R = ranges[i];
1458 int idx = -1, sz = size[i];
1459
1460 while( v >= R[idx+1] && ++idx < sz )
1461 ; // nop
1462
1463 if( (unsigned)idx >= (unsigned)sz )
1464 break;
1465
1466 ptrs[i] += deltas[i*2];
1467 Hptr += idx*hstep[i];
1468 }
1469
1470 if( i == dims )
1471 bproj[x] = saturate_cast<BT>(*(const float*)Hptr*scale);
1472 else
1473 {
1474 bproj[x] = 0;
1475 for( ; i < dims; i++ )
1476 ptrs[i] += deltas[i*2];
1477 }
1478 }
1479
1480 for( i = 0; i < dims; i++ )
1481 ptrs[i] += deltas[i*2 + 1];
1482 }
1483 }
1484 else
1485 {
1486 CV_Error(Error::StsBadArg, "Either ranges, either uniform ranges should be provided");
1487 }
1488}
1489
1490
1491static void
1492calcBackProj_8u( std::vector<uchar*>& _ptrs, const std::vector<int>& _deltas,
1493 Size imsize, const Mat& hist, int dims, const float** _ranges,
1494 const double* _uniranges, float scale, bool uniform )
1495{
1496 uchar** ptrs = &_ptrs[0];
1497 const int* deltas = &_deltas[0];
1498 const uchar* H = hist.ptr();
1499 int i, x;
1500 uchar* bproj = _ptrs[dims];
1501 int bpstep = _deltas[dims*2 + 1];
1502 std::vector<size_t> _tab;
1503
1504 calcHistLookupTables_8u( hist, shist: SparseMat(), dims, ranges: _ranges, uniranges: _uniranges, uniform, issparse: false, _tab );
1505 const size_t* tab = &_tab[0];
1506
1507 if( dims == 1 )
1508 {
1509 int d0 = deltas[0], step0 = deltas[1];
1510 uchar matH[256] = {0};
1511 const uchar* p0 = (const uchar*)ptrs[0];
1512
1513 for( i = 0; i < 256; i++ )
1514 {
1515 size_t hidx = tab[i];
1516 if( hidx < OUT_OF_RANGE )
1517 matH[i] = saturate_cast<uchar>(v: *(float*)(H + hidx)*scale);
1518 }
1519
1520 for( ; imsize.height--; p0 += step0, bproj += bpstep )
1521 {
1522 if( d0 == 1 )
1523 {
1524 for( x = 0; x <= imsize.width - 4; x += 4 )
1525 {
1526 uchar t0 = matH[p0[x]], t1 = matH[p0[x+1]];
1527 bproj[x] = t0; bproj[x+1] = t1;
1528 t0 = matH[p0[x+2]]; t1 = matH[p0[x+3]];
1529 bproj[x+2] = t0; bproj[x+3] = t1;
1530 }
1531 p0 += x;
1532 }
1533 else
1534 for( x = 0; x <= imsize.width - 4; x += 4 )
1535 {
1536 uchar t0 = matH[p0[0]], t1 = matH[p0[d0]];
1537 bproj[x] = t0; bproj[x+1] = t1;
1538 p0 += d0*2;
1539 t0 = matH[p0[0]]; t1 = matH[p0[d0]];
1540 bproj[x+2] = t0; bproj[x+3] = t1;
1541 p0 += d0*2;
1542 }
1543
1544 for( ; x < imsize.width; x++, p0 += d0 )
1545 bproj[x] = matH[*p0];
1546 }
1547 }
1548 else if( dims == 2 )
1549 {
1550 int d0 = deltas[0], step0 = deltas[1],
1551 d1 = deltas[2], step1 = deltas[3];
1552 const uchar* p0 = (const uchar*)ptrs[0];
1553 const uchar* p1 = (const uchar*)ptrs[1];
1554
1555 for( ; imsize.height--; p0 += step0, p1 += step1, bproj += bpstep )
1556 {
1557 for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1 )
1558 {
1559 size_t idx = tab[*p0] + tab[*p1 + 256];
1560 bproj[x] = idx < OUT_OF_RANGE ? saturate_cast<uchar>(v: *(const float*)(H + idx)*scale) : 0;
1561 }
1562 }
1563 }
1564 else if( dims == 3 )
1565 {
1566 int d0 = deltas[0], step0 = deltas[1],
1567 d1 = deltas[2], step1 = deltas[3],
1568 d2 = deltas[4], step2 = deltas[5];
1569 const uchar* p0 = (const uchar*)ptrs[0];
1570 const uchar* p1 = (const uchar*)ptrs[1];
1571 const uchar* p2 = (const uchar*)ptrs[2];
1572
1573 for( ; imsize.height--; p0 += step0, p1 += step1, p2 += step2, bproj += bpstep )
1574 {
1575 for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1, p2 += d2 )
1576 {
1577 size_t idx = tab[*p0] + tab[*p1 + 256] + tab[*p2 + 512];
1578 bproj[x] = idx < OUT_OF_RANGE ? saturate_cast<uchar>(v: *(const float*)(H + idx)*scale) : 0;
1579 }
1580 }
1581 }
1582 else
1583 {
1584 for( ; imsize.height--; bproj += bpstep )
1585 {
1586 for( x = 0; x < imsize.width; x++ )
1587 {
1588 const uchar* Hptr = H;
1589 for( i = 0; i < dims; i++ )
1590 {
1591 size_t idx = tab[*ptrs[i] + i*256];
1592 if( idx >= OUT_OF_RANGE )
1593 break;
1594 ptrs[i] += deltas[i*2];
1595 Hptr += idx;
1596 }
1597
1598 if( i == dims )
1599 bproj[x] = saturate_cast<uchar>(v: *(const float*)Hptr*scale);
1600 else
1601 {
1602 bproj[x] = 0;
1603 for( ; i < dims; i++ )
1604 ptrs[i] += deltas[i*2];
1605 }
1606 }
1607 for( i = 0; i < dims; i++ )
1608 ptrs[i] += deltas[i*2 + 1];
1609 }
1610 }
1611}
1612
1613}
1614
1615void cv::calcBackProject( const Mat* images, int nimages, const int* channels,
1616 InputArray _hist, OutputArray _backProject,
1617 const float** ranges, double scale, bool uniform )
1618{
1619 CV_INSTRUMENT_REGION();
1620
1621 CV_Assert(images && nimages > 0);
1622
1623 Mat hist = _hist.getMat();
1624 std::vector<uchar*> ptrs;
1625 std::vector<int> deltas;
1626 std::vector<double> uniranges;
1627 Size imsize;
1628 int dims = hist.dims == 2 && hist.size[1] == 1 ? 1 : hist.dims;
1629
1630 CV_Assert( dims > 0 && !hist.empty() );
1631 _backProject.create( sz: images[0].size(), type: images[0].depth() );
1632 Mat backProject = _backProject.getMat();
1633 histPrepareImages( images, nimages, channels, mask: backProject, dims, histSize: hist.size, ranges,
1634 uniform, ptrs, deltas, imsize, uniranges );
1635 const double* _uniranges = uniform ? &uniranges[0] : 0;
1636
1637 int depth = images[0].depth();
1638 if( depth == CV_8U )
1639 calcBackProj_8u(ptrs&: ptrs, deltas: deltas, imsize, hist, dims, ranges: ranges, _uniranges, scale: (float)scale, uniform);
1640 else if( depth == CV_16U )
1641 calcBackProj_<ushort, ushort>(ptrs&: ptrs, deltas: deltas, imsize, hist, dims, ranges: ranges, _uniranges, scale: (float)scale, uniform );
1642 else if( depth == CV_32F )
1643 calcBackProj_<float, float>(ptrs&: ptrs, deltas: deltas, imsize, hist, dims, ranges: ranges, _uniranges, scale: (float)scale, uniform );
1644 else
1645 CV_Error(cv::Error::StsUnsupportedFormat, "");
1646}
1647
1648
1649namespace cv
1650{
1651
1652template<typename T, typename BT> static void
1653calcSparseBackProj_( std::vector<uchar*>& _ptrs, const std::vector<int>& _deltas,
1654 Size imsize, const SparseMat& hist, int dims, const float** _ranges,
1655 const double* _uniranges, float scale, bool uniform )
1656{
1657 T** ptrs = (T**)&_ptrs[0];
1658 const int* deltas = &_deltas[0];
1659 int i, x;
1660 BT* bproj = (BT*)_ptrs[dims];
1661 int bpstep = _deltas[dims*2 + 1];
1662 const int* size = hist.hdr->size;
1663 int idx[CV_MAX_DIM];
1664 const SparseMat_<float>& hist_ = (const SparseMat_<float>&)hist;
1665
1666 if( uniform )
1667 {
1668 const double* uniranges = &_uniranges[0];
1669 for( ; imsize.height--; bproj += bpstep )
1670 {
1671 for( x = 0; x < imsize.width; x++ )
1672 {
1673 for( i = 0; i < dims; i++ )
1674 {
1675 idx[i] = cvFloor(*ptrs[i]*uniranges[i*2] + uniranges[i*2+1]);
1676 if( (unsigned)idx[i] >= (unsigned)size[i] )
1677 break;
1678 ptrs[i] += deltas[i*2];
1679 }
1680
1681 if( i == dims )
1682 bproj[x] = saturate_cast<BT>(hist_(idx)*scale);
1683 else
1684 {
1685 bproj[x] = 0;
1686 for( ; i < dims; i++ )
1687 ptrs[i] += deltas[i*2];
1688 }
1689 }
1690 for( i = 0; i < dims; i++ )
1691 ptrs[i] += deltas[i*2 + 1];
1692 }
1693 }
1694 else if (_ranges)
1695 {
1696 // non-uniform histogram
1697 const float* ranges[CV_MAX_DIM];
1698 for( i = 0; i < dims; i++ )
1699 ranges[i] = &_ranges[i][0];
1700
1701 for( ; imsize.height--; bproj += bpstep )
1702 {
1703 for( x = 0; x < imsize.width; x++ )
1704 {
1705 for( i = 0; i < dims; i++ )
1706 {
1707 float v = (float)*ptrs[i];
1708 const float* R = ranges[i];
1709 int j = -1, sz = size[i];
1710
1711 while( v >= R[j+1] && ++j < sz )
1712 ; // nop
1713
1714 if( (unsigned)j >= (unsigned)sz )
1715 break;
1716 idx[i] = j;
1717 ptrs[i] += deltas[i*2];
1718 }
1719
1720 if( i == dims )
1721 bproj[x] = saturate_cast<BT>(hist_(idx)*scale);
1722 else
1723 {
1724 bproj[x] = 0;
1725 for( ; i < dims; i++ )
1726 ptrs[i] += deltas[i*2];
1727 }
1728 }
1729
1730 for( i = 0; i < dims; i++ )
1731 ptrs[i] += deltas[i*2 + 1];
1732 }
1733 }
1734 else
1735 {
1736 CV_Error(Error::StsBadArg, "Either ranges, either uniform ranges should be provided");
1737 }
1738}
1739
1740
1741static void
1742calcSparseBackProj_8u( std::vector<uchar*>& _ptrs, const std::vector<int>& _deltas,
1743 Size imsize, const SparseMat& hist, int dims, const float** _ranges,
1744 const double* _uniranges, float scale, bool uniform )
1745{
1746 uchar** ptrs = &_ptrs[0];
1747 const int* deltas = &_deltas[0];
1748 int i, x;
1749 uchar* bproj = _ptrs[dims];
1750 int bpstep = _deltas[dims*2 + 1];
1751 std::vector<size_t> _tab;
1752 int idx[CV_MAX_DIM];
1753
1754 calcHistLookupTables_8u( hist: Mat(), shist: hist, dims, ranges: _ranges, uniranges: _uniranges, uniform, issparse: true, _tab );
1755 const size_t* tab = &_tab[0];
1756
1757 for( ; imsize.height--; bproj += bpstep )
1758 {
1759 for( x = 0; x < imsize.width; x++ )
1760 {
1761 for( i = 0; i < dims; i++ )
1762 {
1763 size_t hidx = tab[*ptrs[i] + i*256];
1764 if( hidx >= OUT_OF_RANGE )
1765 break;
1766 idx[i] = (int)hidx;
1767 ptrs[i] += deltas[i*2];
1768 }
1769
1770 if( i == dims )
1771 bproj[x] = saturate_cast<uchar>(v: hist.value<float>(idx)*scale);
1772 else
1773 {
1774 bproj[x] = 0;
1775 for( ; i < dims; i++ )
1776 ptrs[i] += deltas[i*2];
1777 }
1778 }
1779 for( i = 0; i < dims; i++ )
1780 ptrs[i] += deltas[i*2 + 1];
1781 }
1782}
1783
1784}
1785
1786void cv::calcBackProject( const Mat* images, int nimages, const int* channels,
1787 const SparseMat& hist, OutputArray _backProject,
1788 const float** ranges, double scale, bool uniform )
1789{
1790 CV_INSTRUMENT_REGION();
1791
1792 CV_Assert(images && nimages > 0);
1793
1794 std::vector<uchar*> ptrs;
1795 std::vector<int> deltas;
1796 std::vector<double> uniranges;
1797 Size imsize;
1798 int dims = hist.dims();
1799
1800 CV_Assert( dims > 0 );
1801 _backProject.create( sz: images[0].size(), type: images[0].depth() );
1802 Mat backProject = _backProject.getMat();
1803 histPrepareImages( images, nimages, channels, mask: backProject,
1804 dims, histSize: hist.hdr->size, ranges,
1805 uniform, ptrs, deltas, imsize, uniranges );
1806 const double* _uniranges = uniform ? &uniranges[0] : 0;
1807 int depth = images[0].depth();
1808 if( depth == CV_8U )
1809 calcSparseBackProj_8u(ptrs&: ptrs, deltas: deltas, imsize, hist, dims, ranges: ranges,
1810 _uniranges, scale: (float)scale, uniform);
1811 else if( depth == CV_16U )
1812 calcSparseBackProj_<ushort, ushort>(ptrs&: ptrs, deltas: deltas, imsize, hist, dims, ranges: ranges,
1813 _uniranges, scale: (float)scale, uniform );
1814 else if( depth == CV_32F )
1815 calcSparseBackProj_<float, float>(ptrs&: ptrs, deltas: deltas, imsize, hist, dims, ranges: ranges,
1816 _uniranges, scale: (float)scale, uniform );
1817 else
1818 CV_Error(cv::Error::StsUnsupportedFormat, "");
1819}
1820
1821#ifdef HAVE_OPENCL
1822
1823namespace cv {
1824
1825static void getUMatIndex(const std::vector<UMat> & um, int cn, int & idx, int & cnidx)
1826{
1827 int totalChannels = 0;
1828 for (size_t i = 0, size = um.size(); i < size; ++i)
1829 {
1830 int ccn = um[i].channels();
1831 totalChannels += ccn;
1832
1833 if (totalChannels == cn)
1834 {
1835 idx = (int)(i + 1);
1836 cnidx = 0;
1837 return;
1838 }
1839 else if (totalChannels > cn)
1840 {
1841 idx = (int)i;
1842 cnidx = i == 0 ? cn : (cn - totalChannels + ccn);
1843 return;
1844 }
1845 }
1846
1847 idx = cnidx = -1;
1848}
1849
1850static bool ocl_calcBackProject( InputArrayOfArrays _images, std::vector<int> channels,
1851 InputArray _hist, OutputArray _dst,
1852 const std::vector<float>& ranges,
1853 float scale, size_t histdims )
1854{
1855 std::vector<UMat> images;
1856 _images.getUMatVector(umv&: images);
1857
1858 size_t nimages = images.size(), totalcn = images[0].channels();
1859
1860 CV_Assert(nimages > 0);
1861 Size size = images[0].size();
1862 int depth = images[0].depth();
1863
1864 //kernels are valid for this type only
1865 if (depth != CV_8U)
1866 return false;
1867
1868 for (size_t i = 1; i < nimages; ++i)
1869 {
1870 const UMat & m = images[i];
1871 totalcn += m.channels();
1872 CV_Assert(size == m.size() && depth == m.depth());
1873 }
1874
1875 std::sort(first: channels.begin(), last: channels.end());
1876 for (size_t i = 0; i < histdims; ++i)
1877 CV_Assert(channels[i] < (int)totalcn);
1878
1879 if (histdims == 1)
1880 {
1881 int idx, cnidx;
1882 getUMatIndex(um: images, cn: channels[0], idx, cnidx);
1883 CV_Assert(idx >= 0);
1884 UMat im = images[idx];
1885
1886 String opts = format(fmt: "-D histdims=1 -D scn=%d", im.channels());
1887 ocl::Kernel lutk("calcLUT", ocl::imgproc::calc_back_project_oclsrc, opts);
1888 if (lutk.empty())
1889 return false;
1890
1891 size_t lsize = 256;
1892 UMat lut(1, (int)lsize, CV_32SC1), hist = _hist.getUMat(), uranges(ranges, true);
1893
1894 lutk.args(kernel_args: ocl::KernelArg::ReadOnlyNoSize(m: hist), kernel_args: hist.rows,
1895 kernel_args: ocl::KernelArg::PtrWriteOnly(m: lut), kernel_args: scale, kernel_args: ocl::KernelArg::PtrReadOnly(m: uranges));
1896 if (!lutk.run(dims: 1, globalsize: &lsize, NULL, sync: false))
1897 return false;
1898
1899 ocl::Kernel mapk("LUT", ocl::imgproc::calc_back_project_oclsrc, opts);
1900 if (mapk.empty())
1901 return false;
1902
1903 _dst.create(sz: size, type: depth);
1904 UMat dst = _dst.getUMat();
1905
1906 im.offset += cnidx;
1907 mapk.args(kernel_args: ocl::KernelArg::ReadOnlyNoSize(m: im), kernel_args: ocl::KernelArg::PtrReadOnly(m: lut),
1908 kernel_args: ocl::KernelArg::WriteOnly(m: dst));
1909
1910 size_t globalsize[2] = { (size_t)size.width, (size_t)size.height };
1911 return mapk.run(dims: 2, globalsize, NULL, sync: false);
1912 }
1913 else if (histdims == 2)
1914 {
1915 int idx0, idx1, cnidx0, cnidx1;
1916 getUMatIndex(um: images, cn: channels[0], idx&: idx0, cnidx&: cnidx0);
1917 getUMatIndex(um: images, cn: channels[1], idx&: idx1, cnidx&: cnidx1);
1918 CV_Assert(idx0 >= 0 && idx1 >= 0);
1919 UMat im0 = images[idx0], im1 = images[idx1];
1920
1921 // Lut for the first dimension
1922 String opts = format(fmt: "-D histdims=2 -D scn1=%d -D scn2=%d", im0.channels(), im1.channels());
1923 ocl::Kernel lutk1("calcLUT", ocl::imgproc::calc_back_project_oclsrc, opts);
1924 if (lutk1.empty())
1925 return false;
1926
1927 size_t lsize = 256;
1928 UMat lut(1, (int)lsize<<1, CV_32SC1), uranges(ranges, true), hist = _hist.getUMat();
1929
1930 lutk1.args(kernel_args: hist.rows, kernel_args: ocl::KernelArg::PtrWriteOnly(m: lut), kernel_args: (int)0, kernel_args: ocl::KernelArg::PtrReadOnly(m: uranges), kernel_args: (int)0);
1931 if (!lutk1.run(dims: 1, globalsize: &lsize, NULL, sync: false))
1932 return false;
1933
1934 // lut for the second dimension
1935 ocl::Kernel lutk2("calcLUT", ocl::imgproc::calc_back_project_oclsrc, opts);
1936 if (lutk2.empty())
1937 return false;
1938
1939 lut.offset += lsize * sizeof(int);
1940 lutk2.args(kernel_args: hist.cols, kernel_args: ocl::KernelArg::PtrWriteOnly(m: lut), kernel_args: (int)256, kernel_args: ocl::KernelArg::PtrReadOnly(m: uranges), kernel_args: (int)2);
1941 if (!lutk2.run(dims: 1, globalsize: &lsize, NULL, sync: false))
1942 return false;
1943
1944 // perform lut
1945 ocl::Kernel mapk("LUT", ocl::imgproc::calc_back_project_oclsrc, opts);
1946 if (mapk.empty())
1947 return false;
1948
1949 _dst.create(sz: size, type: depth);
1950 UMat dst = _dst.getUMat();
1951
1952 im0.offset += cnidx0;
1953 im1.offset += cnidx1;
1954 mapk.args(kernel_args: ocl::KernelArg::ReadOnlyNoSize(m: im0), kernel_args: ocl::KernelArg::ReadOnlyNoSize(m: im1),
1955 kernel_args: ocl::KernelArg::ReadOnlyNoSize(m: hist), kernel_args: ocl::KernelArg::PtrReadOnly(m: lut), kernel_args: scale, kernel_args: ocl::KernelArg::WriteOnly(m: dst));
1956
1957 size_t globalsize[2] = { (size_t)size.width, (size_t)size.height };
1958 return mapk.run(dims: 2, globalsize, NULL, sync: false);
1959 }
1960 return false;
1961}
1962
1963}
1964
1965#endif
1966
1967void cv::calcBackProject( InputArrayOfArrays images, const std::vector<int>& channels,
1968 InputArray hist, OutputArray dst,
1969 const std::vector<float>& ranges,
1970 double scale )
1971{
1972 CV_INSTRUMENT_REGION();
1973 if (hist.dims() <= 2)
1974 {
1975#ifdef HAVE_OPENCL
1976 Size histSize = hist.size();
1977 bool _1D = histSize.height == 1 || histSize.width == 1;
1978 size_t histdims = _1D ? 1 : hist.dims();
1979#endif
1980
1981 CV_OCL_RUN(dst.isUMat() && hist.type() == CV_32FC1 &&
1982 histdims <= 2 && ranges.size() == histdims * 2 && histdims == channels.size(),
1983 ocl_calcBackProject(images: images, channels, hist: hist, dst: dst, ranges, scale: (float)scale, histdims))
1984 }
1985 Mat H0 = hist.getMat(), H;
1986 int hcn = H0.channels();
1987
1988 if( hcn > 1 )
1989 {
1990 CV_Assert( H0.isContinuous() );
1991 int hsz[CV_CN_MAX+1];
1992 memcpy(dest: hsz, src: &H0.size[0], n: H0.dims*sizeof(hsz[0]));
1993 hsz[H0.dims] = hcn;
1994 H = Mat(H0.dims+1, hsz, H0.depth(), H0.ptr());
1995 }
1996 else
1997 H = H0;
1998
1999 bool _1d = H.rows == 1 || H.cols == 1;
2000 int i, dims = H.dims, rsz = (int)ranges.size(), csz = (int)channels.size();
2001 int nimages = (int)images.total();
2002
2003 CV_Assert(nimages > 0);
2004 CV_Assert(rsz == dims*2 || (rsz == 2 && _1d) || (rsz == 0 && images.depth(0) == CV_8U));
2005 CV_Assert(csz == 0 || csz == dims || (csz == 1 && _1d));
2006
2007 float* _ranges[CV_MAX_DIM];
2008 if( rsz > 0 )
2009 {
2010 for( i = 0; i < rsz/2; i++ )
2011 _ranges[i] = (float*)&ranges[i*2];
2012 }
2013
2014 AutoBuffer<Mat> buf(nimages);
2015 for( i = 0; i < nimages; i++ )
2016 buf[i] = images.getMat(i);
2017
2018 calcBackProject(images: &buf[0], nimages, channels: csz ? &channels[0] : 0,
2019 hist: hist, backProject: dst, ranges: rsz ? (const float**)_ranges : 0, scale, uniform: true);
2020}
2021
2022
2023////////////////// C O M P A R E H I S T O G R A M S ////////////////////////
2024
2025double cv::compareHist( InputArray _H1, InputArray _H2, int method )
2026{
2027 CV_INSTRUMENT_REGION();
2028
2029 Mat H1 = _H1.getMat(), H2 = _H2.getMat();
2030 const Mat* arrays[] = {&H1, &H2, 0};
2031 Mat planes[2];
2032 NAryMatIterator it(arrays, planes);
2033 double result = 0;
2034 int j;
2035
2036 CV_Assert( H1.type() == H2.type() && H1.depth() == CV_32F );
2037
2038 double s1 = 0, s2 = 0, s11 = 0, s12 = 0, s22 = 0;
2039
2040 CV_Assert( it.planes[0].isContinuous() && it.planes[1].isContinuous() );
2041
2042 for( size_t i = 0; i < it.nplanes; i++, ++it )
2043 {
2044 const float* h1 = it.planes[0].ptr<float>();
2045 const float* h2 = it.planes[1].ptr<float>();
2046 const int len = it.planes[0].rows*it.planes[0].cols*H1.channels();
2047 j = 0;
2048
2049 if( (method == CV_COMP_CHISQR) || (method == CV_COMP_CHISQR_ALT))
2050 {
2051#if CV_SIMD_64F || CV_SIMD_SCALABLE_64F
2052 v_float64 v_eps = vx_setall_f64(DBL_EPSILON);
2053 v_float64 v_one = vx_setall_f64(v: 1.f);
2054 v_float64 v_zero = vx_setzero_f64();
2055 v_float64 v_res = vx_setzero_f64();
2056 for ( ; j <= len - VTraits<v_float32>::vlanes(); j += VTraits<v_float32>::vlanes())
2057 {
2058 v_float32 v_h1 = vx_load(ptr: h1 + j), v_h2 = vx_load(ptr: h2 + j);
2059 v_float64 v_h1_l = v_cvt_f64(a: v_h1), v_h1_h = v_cvt_f64_high(a: v_h1);
2060 v_float64 v_h2_l = v_cvt_f64(a: v_h2), v_h2_h = v_cvt_f64_high(a: v_h2);
2061
2062 v_float64 v_a_l, v_a_h;
2063 v_a_l = v_sub(a: v_h1_l, b: v_h2_l);
2064 v_a_h = v_sub(a: v_h1_h, b: v_h2_h);
2065
2066 v_float64 v_b_l, v_b_h;
2067 if (method == CV_COMP_CHISQR)
2068 {
2069 v_b_l = v_h1_l;
2070 v_b_h = v_h1_h;
2071 }
2072 else
2073 {
2074 v_b_l = v_add(a: v_h1_l, b: v_h2_l);
2075 v_b_h = v_add(a: v_h1_h, b: v_h2_h);
2076 }
2077
2078 // low part
2079 auto v_res_l = v_mul(a: v_mul(a: v_a_l, b: v_a_l), b: v_div(a: v_one, b: v_b_l));
2080 auto mask = v_gt(a: v_abs(x: v_b_l), b: v_eps);
2081 v_res_l = v_select(mask, a: v_res_l, b: v_zero);
2082 v_res = v_add(a: v_res, b: v_res_l);
2083 // high part
2084 auto v_res_h = v_mul(a: v_mul(a: v_a_h, b: v_a_h), b: v_div(a: v_one, b: v_b_h));
2085 mask = v_gt(a: v_abs(x: v_b_h), b: v_eps);
2086 v_res_h = v_select(mask, a: v_res_h, b: v_zero);
2087 v_res = v_add(a: v_res, b: v_res_h);
2088 }
2089 result += v_reduce_sum(a: v_res);
2090#endif
2091 for( ; j < len; j++ )
2092 {
2093 double a = h1[j] - h2[j];
2094 double b = (method == CV_COMP_CHISQR) ? h1[j] : h1[j] + h2[j];
2095 if( fabs(x: b) > DBL_EPSILON )
2096 result += a*a/b;
2097 }
2098 }
2099 else if( method == CV_COMP_CORREL )
2100 {
2101#if (CV_SIMD_64F || CV_SIMD_SCALABLE_64F)
2102 v_float64 v_s1 = vx_setzero_f64();
2103 v_float64 v_s2 = vx_setzero_f64();
2104 v_float64 v_s11 = vx_setzero_f64();
2105 v_float64 v_s12 = vx_setzero_f64();
2106 v_float64 v_s22 = vx_setzero_f64();
2107 for ( ; j <= len - VTraits<v_float32>::vlanes(); j += VTraits<v_float32>::vlanes())
2108 {
2109 v_float32 v_a = vx_load(ptr: h1 + j);
2110 v_float32 v_b = vx_load(ptr: h2 + j);
2111
2112 // 0-1
2113 v_float64 v_ad = v_cvt_f64(a: v_a);
2114 v_float64 v_bd = v_cvt_f64(a: v_b);
2115 v_s12 = v_muladd(a: v_ad, b: v_bd, c: v_s12);
2116 v_s11 = v_muladd(a: v_ad, b: v_ad, c: v_s11);
2117 v_s22 = v_muladd(a: v_bd, b: v_bd, c: v_s22);
2118 v_s1 = v_add(a: v_s1, b: v_ad);
2119 v_s2 = v_add(a: v_s2, b: v_bd);
2120
2121 // 2-3
2122 v_ad = v_cvt_f64_high(a: v_a);
2123 v_bd = v_cvt_f64_high(a: v_b);
2124 v_s12 = v_muladd(a: v_ad, b: v_bd, c: v_s12);
2125 v_s11 = v_muladd(a: v_ad, b: v_ad, c: v_s11);
2126 v_s22 = v_muladd(a: v_bd, b: v_bd, c: v_s22);
2127 v_s1 = v_add(a: v_s1, b: v_ad);
2128 v_s2 = v_add(a: v_s2, b: v_bd);
2129 }
2130 s12 += v_reduce_sum(a: v_s12);
2131 s11 += v_reduce_sum(a: v_s11);
2132 s22 += v_reduce_sum(a: v_s22);
2133 s1 += v_reduce_sum(a: v_s1);
2134 s2 += v_reduce_sum(a: v_s2);
2135#elif CV_SIMD && 0 //Disable vectorization for CV_COMP_CORREL if f64 is unsupported due to low precision
2136 v_float32 v_s1 = vx_setzero_f32();
2137 v_float32 v_s2 = vx_setzero_f32();
2138 v_float32 v_s11 = vx_setzero_f32();
2139 v_float32 v_s12 = vx_setzero_f32();
2140 v_float32 v_s22 = vx_setzero_f32();
2141 for (; j <= len - VTraits<v_float32>::vlanes(); j += VTraits<v_float32>::vlanes())
2142 {
2143 v_float32 v_a = vx_load(h1 + j);
2144 v_float32 v_b = vx_load(h2 + j);
2145
2146 v_s12 = v_muladd(v_a, v_b, v_s12);
2147 v_s11 = v_muladd(v_a, v_a, v_s11);
2148 v_s22 = v_muladd(v_b, v_b, v_s22);
2149 v_s1 += v_a;
2150 v_s2 += v_b;
2151 }
2152 s12 += v_reduce_sum(v_s12);
2153 s11 += v_reduce_sum(v_s11);
2154 s22 += v_reduce_sum(v_s22);
2155 s1 += v_reduce_sum(v_s1);
2156 s2 += v_reduce_sum(v_s2);
2157#endif
2158 for( ; j < len; j++ )
2159 {
2160 double a = h1[j];
2161 double b = h2[j];
2162
2163 s12 += a*b;
2164 s1 += a;
2165 s11 += a*a;
2166 s2 += b;
2167 s22 += b*b;
2168 }
2169 }
2170 else if( method == CV_COMP_INTERSECT )
2171 {
2172#if (CV_SIMD_64F || CV_SIMD_SCALABLE_64F)
2173 v_float64 v_result = vx_setzero_f64();
2174 for ( ; j <= len - VTraits<v_float32>::vlanes(); j += VTraits<v_float32>::vlanes())
2175 {
2176 v_float32 v_src = v_min(a: vx_load(ptr: h1 + j), b: vx_load(ptr: h2 + j));
2177 v_result = v_add(a: v_result, b: v_add(a: v_cvt_f64(a: v_src), b: v_cvt_f64_high(a: v_src)));
2178 }
2179 result += v_reduce_sum(a: v_result);
2180#elif CV_SIMD && 0 // Disable vectorization for CV_COMP_INTERSECT if f64 is unsupported due to low precision
2181 // See https://github.com/opencv/opencv/issues/24757
2182 v_float32 v_result = vx_setzero_f32();
2183 for (; j <= len - VTraits<v_float32>::vlanes(); j += VTraits<v_float32>::vlanes())
2184 {
2185 v_float32 v_src = v_min(vx_load(h1 + j), vx_load(h2 + j));
2186 v_result = v_add(v_result, v_src);
2187 }
2188 result += v_reduce_sum(v_result);
2189#endif
2190 for( ; j < len; j++ )
2191 result += std::min(a: h1[j], b: h2[j]);
2192 }
2193 else if( method == CV_COMP_BHATTACHARYYA )
2194 {
2195#if (CV_SIMD_64F || CV_SIMD_SCALABLE_64F)
2196 v_float64 v_s1 = vx_setzero_f64();
2197 v_float64 v_s2 = vx_setzero_f64();
2198 v_float64 v_result = vx_setzero_f64();
2199 for ( ; j <= len - VTraits<v_float32>::vlanes(); j += VTraits<v_float32>::vlanes())
2200 {
2201 v_float32 v_a = vx_load(ptr: h1 + j);
2202 v_float32 v_b = vx_load(ptr: h2 + j);
2203
2204 v_float64 v_ad = v_cvt_f64(a: v_a);
2205 v_float64 v_bd = v_cvt_f64(a: v_b);
2206 v_s1 = v_add(a: v_s1, b: v_ad);
2207 v_s2 = v_add(a: v_s2, b: v_bd);
2208 v_result = v_add(a: v_result, b: v_sqrt(x: v_mul(a: v_ad, b: v_bd)));
2209
2210 v_ad = v_cvt_f64_high(a: v_a);
2211 v_bd = v_cvt_f64_high(a: v_b);
2212 v_s1 = v_add(a: v_s1, b: v_ad);
2213 v_s2 = v_add(a: v_s2, b: v_bd);
2214 v_result = v_add(a: v_result, b: v_sqrt(x: v_mul(a: v_ad, b: v_bd)));
2215 }
2216 s1 += v_reduce_sum(a: v_s1);
2217 s2 += v_reduce_sum(a: v_s2);
2218 result += v_reduce_sum(a: v_result);
2219#elif CV_SIMD && 0 //Disable vectorization for CV_COMP_BHATTACHARYYA if f64 is unsupported due to low precision
2220 v_float32 v_s1 = vx_setzero_f32();
2221 v_float32 v_s2 = vx_setzero_f32();
2222 v_float32 v_result = vx_setzero_f32();
2223 for (; j <= len - VTraits<v_float32>::vlanes(); j += VTraits<v_float32>::vlanes())
2224 {
2225 v_float32 v_a = vx_load(h1 + j);
2226 v_float32 v_b = vx_load(h2 + j);
2227 v_s1 += v_a;
2228 v_s2 += v_b;
2229 v_result += v_sqrt(v_a * v_b);
2230 }
2231 s1 += v_reduce_sum(v_s1);
2232 s2 += v_reduce_sum(v_s2);
2233 result += v_reduce_sum(v_result);
2234#endif
2235 for( ; j < len; j++ )
2236 {
2237 double a = h1[j];
2238 double b = h2[j];
2239 result += std::sqrt(x: a*b);
2240 s1 += a;
2241 s2 += b;
2242 }
2243 }
2244 else if( method == CV_COMP_KL_DIV )
2245 {
2246 for( ; j < len; j++ )
2247 {
2248 double p = h1[j];
2249 double q = h2[j];
2250 if( fabs(x: p) <= DBL_EPSILON ) {
2251 continue;
2252 }
2253 if( fabs(x: q) <= DBL_EPSILON ) {
2254 q = 1e-10;
2255 }
2256 result += p * std::log( x: p / q );
2257 }
2258 }
2259 else
2260 CV_Error( cv::Error::StsBadArg, "Unknown comparison method" );
2261 }
2262
2263 if( method == CV_COMP_CHISQR_ALT )
2264 result *= 2;
2265 else if( method == CV_COMP_CORREL )
2266 {
2267 size_t total = H1.total();
2268 double scale = 1./total;
2269 double num = s12 - s1*s2*scale;
2270 double denom2 = (s11 - s1*s1*scale)*(s22 - s2*s2*scale);
2271 result = std::abs(x: denom2) > DBL_EPSILON ? num/std::sqrt(x: denom2) : 1.;
2272 }
2273 else if( method == CV_COMP_BHATTACHARYYA )
2274 {
2275 s1 *= s2;
2276 s1 = fabs(x: s1) > FLT_EPSILON ? 1./std::sqrt(x: s1) : 1.;
2277 result = std::sqrt(x: std::max(a: 1. - result*s1, b: 0.));
2278 }
2279
2280 return result;
2281}
2282
2283
2284double cv::compareHist( const SparseMat& H1, const SparseMat& H2, int method )
2285{
2286 CV_INSTRUMENT_REGION();
2287
2288 double result = 0;
2289 int i, dims = H1.dims();
2290
2291 CV_Assert( dims > 0 && dims == H2.dims() && H1.type() == H2.type() && H1.type() == CV_32F );
2292 for( i = 0; i < dims; i++ )
2293 CV_Assert( H1.size(i) == H2.size(i) );
2294
2295 const SparseMat *PH1 = &H1, *PH2 = &H2;
2296 if( PH1->nzcount() > PH2->nzcount() && method != CV_COMP_CHISQR && method != CV_COMP_CHISQR_ALT && method != CV_COMP_KL_DIV )
2297 std::swap(a&: PH1, b&: PH2);
2298
2299 SparseMatConstIterator it = PH1->begin();
2300
2301 int N1 = (int)PH1->nzcount(), N2 = (int)PH2->nzcount();
2302
2303 if( (method == CV_COMP_CHISQR) || (method == CV_COMP_CHISQR_ALT) )
2304 {
2305 for( i = 0; i < N1; i++, ++it )
2306 {
2307 CV_Assert(it.ptr != NULL);
2308 float v1 = it.value<float>();
2309 const SparseMat::Node* node = it.node();
2310 float v2 = PH2->value<float>(idx: node->idx, hashval: (size_t*)&node->hashval);
2311 double a = v1 - v2;
2312 double b = (method == CV_COMP_CHISQR) ? v1 : v1 + v2;
2313 if( fabs(x: b) > DBL_EPSILON )
2314 result += a*a/b;
2315 }
2316 }
2317 else if( method == CV_COMP_CORREL )
2318 {
2319 double s1 = 0, s2 = 0, s11 = 0, s12 = 0, s22 = 0;
2320
2321 for( i = 0; i < N1; i++, ++it )
2322 {
2323 CV_Assert(it.ptr != NULL);
2324 double v1 = it.value<float>();
2325 const SparseMat::Node* node = it.node();
2326 s12 += v1*PH2->value<float>(idx: node->idx, hashval: (size_t*)&node->hashval);
2327 s1 += v1;
2328 s11 += v1*v1;
2329 }
2330
2331 it = PH2->begin();
2332 for( i = 0; i < N2; i++, ++it )
2333 {
2334 CV_Assert(it.ptr != NULL);
2335 double v2 = it.value<float>();
2336 s2 += v2;
2337 s22 += v2*v2;
2338 }
2339
2340 size_t total = 1;
2341 for( i = 0; i < H1.dims(); i++ )
2342 total *= H1.size(i);
2343 double scale = 1./total;
2344 double num = s12 - s1*s2*scale;
2345 double denom2 = (s11 - s1*s1*scale)*(s22 - s2*s2*scale);
2346 result = std::abs(x: denom2) > DBL_EPSILON ? num/std::sqrt(x: denom2) : 1.;
2347 }
2348 else if( method == CV_COMP_INTERSECT )
2349 {
2350 for( i = 0; i < N1; i++, ++it )
2351 {
2352 CV_Assert(it.ptr != NULL);
2353 float v1 = it.value<float>();
2354 const SparseMat::Node* node = it.node();
2355 float v2 = PH2->value<float>(idx: node->idx, hashval: (size_t*)&node->hashval);
2356 if( v2 )
2357 result += std::min(a: v1, b: v2);
2358 }
2359 }
2360 else if( method == CV_COMP_BHATTACHARYYA )
2361 {
2362 double s1 = 0, s2 = 0;
2363
2364 for( i = 0; i < N1; i++, ++it )
2365 {
2366 CV_Assert(it.ptr != NULL);
2367 double v1 = it.value<float>();
2368 const SparseMat::Node* node = it.node();
2369 double v2 = PH2->value<float>(idx: node->idx, hashval: (size_t*)&node->hashval);
2370 result += std::sqrt(x: v1*v2);
2371 s1 += v1;
2372 }
2373
2374 it = PH2->begin();
2375 for( i = 0; i < N2; i++, ++it )
2376 {
2377 CV_Assert(it.ptr != NULL);
2378 s2 += it.value<float>();
2379 }
2380
2381 s1 *= s2;
2382 s1 = fabs(x: s1) > FLT_EPSILON ? 1./std::sqrt(x: s1) : 1.;
2383 result = std::sqrt(x: std::max(a: 1. - result*s1, b: 0.));
2384 }
2385 else if( method == CV_COMP_KL_DIV )
2386 {
2387 for( i = 0; i < N1; i++, ++it )
2388 {
2389 CV_Assert(it.ptr != NULL);
2390 double v1 = it.value<float>();
2391 const SparseMat::Node* node = it.node();
2392 double v2 = PH2->value<float>(idx: node->idx, hashval: (size_t*)&node->hashval);
2393 if( !v2 )
2394 v2 = 1e-10;
2395 result += v1 * std::log( x: v1 / v2 );
2396 }
2397 }
2398 else
2399 CV_Error( cv::Error::StsBadArg, "Unknown comparison method" );
2400
2401 if( method == CV_COMP_CHISQR_ALT )
2402 result *= 2;
2403
2404 return result;
2405}
2406
2407
2408const int CV_HIST_DEFAULT_TYPE = CV_32F;
2409
2410/* Creates new histogram */
2411CvHistogram *
2412cvCreateHist( int dims, int *sizes, CvHistType type, float** ranges, int uniform )
2413{
2414 CvHistogram *hist = 0;
2415
2416 if( (unsigned)dims > CV_MAX_DIM )
2417 CV_Error( CV_BadOrder, "Number of dimensions is out of range" );
2418
2419 if( !sizes )
2420 CV_Error( CV_HeaderIsNull, "Null <sizes> pointer" );
2421
2422 hist = (CvHistogram *)cvAlloc( size: sizeof( CvHistogram ));
2423 hist->type = CV_HIST_MAGIC_VAL + ((int)type & 1);
2424 if (uniform) hist->type|= CV_HIST_UNIFORM_FLAG;
2425 hist->thresh2 = 0;
2426 hist->bins = 0;
2427 if( type == CV_HIST_ARRAY )
2428 {
2429 hist->bins = cvInitMatNDHeader( mat: &hist->mat, dims, sizes,
2430 type: CV_HIST_DEFAULT_TYPE );
2431 cvCreateData( arr: hist->bins );
2432 }
2433 else if( type == CV_HIST_SPARSE )
2434 hist->bins = cvCreateSparseMat( dims, sizes, type: CV_HIST_DEFAULT_TYPE );
2435 else
2436 CV_Error( cv::Error::StsBadArg, "Invalid histogram type" );
2437
2438 if( ranges )
2439 cvSetHistBinRanges( hist, ranges, uniform );
2440
2441 return hist;
2442}
2443
2444
2445/* Creates histogram wrapping header for given array */
2446CV_IMPL CvHistogram*
2447cvMakeHistHeaderForArray( int dims, int *sizes, CvHistogram *hist,
2448 float *data, float **ranges, int uniform )
2449{
2450 if( !hist )
2451 CV_Error( cv::Error::StsNullPtr, "Null histogram header pointer" );
2452
2453 if( !data )
2454 CV_Error( cv::Error::StsNullPtr, "Null data pointer" );
2455
2456 hist->thresh2 = 0;
2457 hist->type = CV_HIST_MAGIC_VAL;
2458 hist->bins = cvInitMatNDHeader( mat: &hist->mat, dims, sizes, type: CV_HIST_DEFAULT_TYPE, data );
2459
2460 if( ranges )
2461 {
2462 if( !uniform )
2463 CV_Error( cv::Error::StsBadArg, "Only uniform bin ranges can be used here "
2464 "(to avoid memory allocation)" );
2465 cvSetHistBinRanges( hist, ranges, uniform );
2466 }
2467
2468 return hist;
2469}
2470
2471
2472CV_IMPL void
2473cvReleaseHist( CvHistogram **hist )
2474{
2475 if( !hist )
2476 CV_Error( cv::Error::StsNullPtr, "" );
2477
2478 if( *hist )
2479 {
2480 CvHistogram* temp = *hist;
2481
2482 if( !CV_IS_HIST(temp))
2483 CV_Error( cv::Error::StsBadArg, "Invalid histogram header" );
2484 *hist = 0;
2485
2486 if( CV_IS_SPARSE_HIST( temp ))
2487 cvReleaseSparseMat( mat: (CvSparseMat**)&temp->bins );
2488 else
2489 {
2490 cvReleaseData( arr: temp->bins );
2491 temp->bins = 0;
2492 }
2493
2494 if( temp->thresh2 )
2495 cvFree( &temp->thresh2 );
2496 cvFree( &temp );
2497 }
2498}
2499
2500CV_IMPL void
2501cvClearHist( CvHistogram *hist )
2502{
2503 if( !CV_IS_HIST(hist) )
2504 CV_Error( cv::Error::StsBadArg, "Invalid histogram header" );
2505 cvZero( arr: hist->bins );
2506}
2507
2508
2509// Clears histogram bins that are below than threshold
2510CV_IMPL void
2511cvThreshHist( CvHistogram* hist, double thresh )
2512{
2513 if( !CV_IS_HIST(hist) )
2514 CV_Error( cv::Error::StsBadArg, "Invalid histogram header" );
2515
2516 if( !CV_IS_SPARSE_MAT(hist->bins) )
2517 {
2518 CvMat mat;
2519 cvGetMat( arr: hist->bins, header: &mat, coi: 0, allowND: 1 );
2520 cvThreshold( src: &mat, dst: &mat, threshold: thresh, max_value: 0, threshold_type: cv::THRESH_TOZERO );
2521 }
2522 else
2523 {
2524 CvSparseMat* mat = (CvSparseMat*)hist->bins;
2525 CvSparseMatIterator iterator;
2526 CvSparseNode *node;
2527
2528 for( node = cvInitSparseMatIterator( mat, mat_iterator: &iterator );
2529 node != 0; node = cvGetNextSparseNode( mat_iterator: &iterator ))
2530 {
2531 float* val = (float*)CV_NODE_VAL( mat, node );
2532 if( *val <= thresh )
2533 *val = 0;
2534 }
2535 }
2536}
2537
2538
2539// Normalizes histogram (make sum of the histogram bins == factor)
2540CV_IMPL void
2541cvNormalizeHist( CvHistogram* hist, double factor )
2542{
2543 double sum = 0;
2544
2545 if( !CV_IS_HIST(hist) )
2546 CV_Error( cv::Error::StsBadArg, "Invalid histogram header" );
2547
2548 if( !CV_IS_SPARSE_HIST(hist) )
2549 {
2550 CvMat mat;
2551 cvGetMat( arr: hist->bins, header: &mat, coi: 0, allowND: 1 );
2552 sum = cvSum( arr: &mat ).val[0];
2553 if( fabs(x: sum) < DBL_EPSILON )
2554 sum = 1;
2555 cvScale( src: &mat, dst: &mat, scale: factor/sum, shift: 0 );
2556 }
2557 else
2558 {
2559 CvSparseMat* mat = (CvSparseMat*)hist->bins;
2560 CvSparseMatIterator iterator;
2561 CvSparseNode *node;
2562 float scale;
2563
2564 for( node = cvInitSparseMatIterator( mat, mat_iterator: &iterator );
2565 node != 0; node = cvGetNextSparseNode( mat_iterator: &iterator ))
2566 {
2567 sum += *(float*)CV_NODE_VAL(mat,node);
2568 }
2569
2570 if( fabs(x: sum) < DBL_EPSILON )
2571 sum = 1;
2572 scale = (float)(factor/sum);
2573
2574 for( node = cvInitSparseMatIterator( mat, mat_iterator: &iterator );
2575 node != 0; node = cvGetNextSparseNode( mat_iterator: &iterator ))
2576 {
2577 *(float*)CV_NODE_VAL(mat,node) *= scale;
2578 }
2579 }
2580}
2581
2582
2583// Retrieves histogram global min, max and their positions
2584CV_IMPL void
2585cvGetMinMaxHistValue( const CvHistogram* hist,
2586 float *value_min, float* value_max,
2587 int* idx_min, int* idx_max )
2588{
2589 double minVal, maxVal;
2590 int dims, size[CV_MAX_DIM];
2591
2592 if( !CV_IS_HIST(hist) )
2593 CV_Error( cv::Error::StsBadArg, "Invalid histogram header" );
2594
2595 dims = cvGetDims( arr: hist->bins, sizes: size );
2596
2597 if( !CV_IS_SPARSE_HIST(hist) )
2598 {
2599 CvMat mat;
2600 CvPoint minPt = {.x: 0, .y: 0}, maxPt = {.x: 0, .y: 0};
2601
2602 cvGetMat( arr: hist->bins, header: &mat, coi: 0, allowND: 1 );
2603 cvMinMaxLoc( arr: &mat, min_val: &minVal, max_val: &maxVal, min_loc: &minPt, max_loc: &maxPt );
2604
2605 if( dims == 1 )
2606 {
2607 if( idx_min )
2608 *idx_min = minPt.y + minPt.x;
2609 if( idx_max )
2610 *idx_max = maxPt.y + maxPt.x;
2611 }
2612 else if( dims == 2 )
2613 {
2614 if( idx_min )
2615 idx_min[0] = minPt.y, idx_min[1] = minPt.x;
2616 if( idx_max )
2617 idx_max[0] = maxPt.y, idx_max[1] = maxPt.x;
2618 }
2619 else if( idx_min || idx_max )
2620 {
2621 int imin = minPt.y*mat.cols + minPt.x;
2622 int imax = maxPt.y*mat.cols + maxPt.x;
2623
2624 for(int i = dims - 1; i >= 0; i-- )
2625 {
2626 if( idx_min )
2627 {
2628 int t = imin / size[i];
2629 idx_min[i] = imin - t*size[i];
2630 imin = t;
2631 }
2632
2633 if( idx_max )
2634 {
2635 int t = imax / size[i];
2636 idx_max[i] = imax - t*size[i];
2637 imax = t;
2638 }
2639 }
2640 }
2641 }
2642 else
2643 {
2644 CvSparseMat* mat = (CvSparseMat*)hist->bins;
2645 CvSparseMatIterator iterator;
2646 CvSparseNode *node;
2647 int minv = INT_MAX;
2648 int maxv = INT_MIN;
2649 CvSparseNode* minNode = 0;
2650 CvSparseNode* maxNode = 0;
2651 const int *_idx_min = 0, *_idx_max = 0;
2652 Cv32suf m;
2653
2654 for( node = cvInitSparseMatIterator( mat, mat_iterator: &iterator );
2655 node != 0; node = cvGetNextSparseNode( mat_iterator: &iterator ))
2656 {
2657 int value = *(int*)CV_NODE_VAL(mat,node);
2658 value = CV_TOGGLE_FLT(value);
2659 if( value < minv )
2660 {
2661 minv = value;
2662 minNode = node;
2663 }
2664
2665 if( value > maxv )
2666 {
2667 maxv = value;
2668 maxNode = node;
2669 }
2670 }
2671
2672 if( minNode )
2673 {
2674 _idx_min = CV_NODE_IDX(mat,minNode);
2675 _idx_max = CV_NODE_IDX(mat,maxNode);
2676 m.i = CV_TOGGLE_FLT(minv); minVal = m.f;
2677 m.i = CV_TOGGLE_FLT(maxv); maxVal = m.f;
2678 }
2679 else
2680 {
2681 minVal = maxVal = 0;
2682 }
2683
2684 for(int i = 0; i < dims; i++ )
2685 {
2686 if( idx_min )
2687 idx_min[i] = _idx_min ? _idx_min[i] : -1;
2688 if( idx_max )
2689 idx_max[i] = _idx_max ? _idx_max[i] : -1;
2690 }
2691 }
2692
2693 if( value_min )
2694 *value_min = (float)minVal;
2695
2696 if( value_max )
2697 *value_max = (float)maxVal;
2698}
2699
2700
2701// Compares two histograms using one of a few methods
2702CV_IMPL double
2703cvCompareHist( const CvHistogram* hist1,
2704 const CvHistogram* hist2,
2705 int method )
2706{
2707 int i;
2708 int size1[CV_MAX_DIM], size2[CV_MAX_DIM], total = 1;
2709
2710 if( !CV_IS_HIST(hist1) || !CV_IS_HIST(hist2) )
2711 CV_Error( cv::Error::StsBadArg, "Invalid histogram header[s]" );
2712
2713 if( CV_IS_SPARSE_MAT(hist1->bins) != CV_IS_SPARSE_MAT(hist2->bins))
2714 CV_Error(cv::Error::StsUnmatchedFormats, "One of histograms is sparse and other is not");
2715
2716 if( !CV_IS_SPARSE_MAT(hist1->bins) )
2717 {
2718 cv::Mat H1 = cv::cvarrToMat(arr: hist1->bins);
2719 cv::Mat H2 = cv::cvarrToMat(arr: hist2->bins);
2720 return cv::compareHist(H1: H1, H2: H2, method);
2721 }
2722
2723 int dims1 = cvGetDims( arr: hist1->bins, sizes: size1 );
2724 int dims2 = cvGetDims( arr: hist2->bins, sizes: size2 );
2725
2726 if( dims1 != dims2 )
2727 CV_Error( cv::Error::StsUnmatchedSizes,
2728 "The histograms have different numbers of dimensions" );
2729
2730 for( i = 0; i < dims1; i++ )
2731 {
2732 if( size1[i] != size2[i] )
2733 CV_Error( cv::Error::StsUnmatchedSizes, "The histograms have different sizes" );
2734 total *= size1[i];
2735 }
2736
2737 double result = 0;
2738 CvSparseMat* mat1 = (CvSparseMat*)(hist1->bins);
2739 CvSparseMat* mat2 = (CvSparseMat*)(hist2->bins);
2740 CvSparseMatIterator iterator;
2741 CvSparseNode *node1, *node2;
2742
2743 if( mat1->heap->active_count > mat2->heap->active_count && method != CV_COMP_CHISQR && method != CV_COMP_CHISQR_ALT && method != CV_COMP_KL_DIV )
2744 {
2745 CvSparseMat* t;
2746 CV_SWAP( mat1, mat2, t );
2747 }
2748
2749 if( (method == CV_COMP_CHISQR) || (method == CV_COMP_CHISQR_ALT) )
2750 {
2751 for( node1 = cvInitSparseMatIterator( mat: mat1, mat_iterator: &iterator );
2752 node1 != 0; node1 = cvGetNextSparseNode( mat_iterator: &iterator ))
2753 {
2754 double v1 = *(float*)CV_NODE_VAL(mat1,node1);
2755 uchar* node2_data = cvPtrND( arr: mat2, CV_NODE_IDX(mat1,node1), type: 0, create_node: 0, precalc_hashval: &node1->hashval );
2756 double v2 = node2_data ? *(float*)node2_data : 0.f;
2757 double a = v1 - v2;
2758 double b = (method == CV_COMP_CHISQR) ? v1 : v1 + v2;
2759 if( fabs(x: b) > DBL_EPSILON )
2760 result += a*a/b;
2761 }
2762 }
2763 else if( method == CV_COMP_CORREL )
2764 {
2765 double s1 = 0, s11 = 0;
2766 double s2 = 0, s22 = 0;
2767 double s12 = 0;
2768 double num, denom2, scale = 1./total;
2769
2770 for( node1 = cvInitSparseMatIterator( mat: mat1, mat_iterator: &iterator );
2771 node1 != 0; node1 = cvGetNextSparseNode( mat_iterator: &iterator ))
2772 {
2773 double v1 = *(float*)CV_NODE_VAL(mat1,node1);
2774 uchar* node2_data = cvPtrND( arr: mat2, CV_NODE_IDX(mat1,node1),
2775 type: 0, create_node: 0, precalc_hashval: &node1->hashval );
2776 if( node2_data )
2777 {
2778 double v2 = *(float*)node2_data;
2779 s12 += v1*v2;
2780 }
2781 s1 += v1;
2782 s11 += v1*v1;
2783 }
2784
2785 for( node2 = cvInitSparseMatIterator( mat: mat2, mat_iterator: &iterator );
2786 node2 != 0; node2 = cvGetNextSparseNode( mat_iterator: &iterator ))
2787 {
2788 double v2 = *(float*)CV_NODE_VAL(mat2,node2);
2789 s2 += v2;
2790 s22 += v2*v2;
2791 }
2792
2793 num = s12 - s1*s2*scale;
2794 denom2 = (s11 - s1*s1*scale)*(s22 - s2*s2*scale);
2795 result = fabs(x: denom2) > DBL_EPSILON ? num/sqrt(x: denom2) : 1;
2796 }
2797 else if( method == CV_COMP_INTERSECT )
2798 {
2799 for( node1 = cvInitSparseMatIterator( mat: mat1, mat_iterator: &iterator );
2800 node1 != 0; node1 = cvGetNextSparseNode( mat_iterator: &iterator ))
2801 {
2802 float v1 = *(float*)CV_NODE_VAL(mat1,node1);
2803 uchar* node2_data = cvPtrND( arr: mat2, CV_NODE_IDX(mat1,node1),
2804 type: 0, create_node: 0, precalc_hashval: &node1->hashval );
2805 if( node2_data )
2806 {
2807 float v2 = *(float*)node2_data;
2808 if( v1 <= v2 )
2809 result += v1;
2810 else
2811 result += v2;
2812 }
2813 }
2814 }
2815 else if( method == CV_COMP_BHATTACHARYYA )
2816 {
2817 double s1 = 0, s2 = 0;
2818
2819 for( node1 = cvInitSparseMatIterator( mat: mat1, mat_iterator: &iterator );
2820 node1 != 0; node1 = cvGetNextSparseNode( mat_iterator: &iterator ))
2821 {
2822 double v1 = *(float*)CV_NODE_VAL(mat1,node1);
2823 uchar* node2_data = cvPtrND( arr: mat2, CV_NODE_IDX(mat1,node1),
2824 type: 0, create_node: 0, precalc_hashval: &node1->hashval );
2825 s1 += v1;
2826 if( node2_data )
2827 {
2828 double v2 = *(float*)node2_data;
2829 result += sqrt(x: v1 * v2);
2830 }
2831 }
2832
2833 for( node1 = cvInitSparseMatIterator( mat: mat2, mat_iterator: &iterator );
2834 node1 != 0; node1 = cvGetNextSparseNode( mat_iterator: &iterator ))
2835 {
2836 double v2 = *(float*)CV_NODE_VAL(mat2,node1);
2837 s2 += v2;
2838 }
2839
2840 s1 *= s2;
2841 s1 = fabs(x: s1) > FLT_EPSILON ? 1./sqrt(x: s1) : 1.;
2842 result = 1. - result*s1;
2843 result = sqrt(MAX(result,0.));
2844 }
2845 else if( method == CV_COMP_KL_DIV )
2846 {
2847 cv::SparseMat sH1, sH2;
2848 ((const CvSparseMat*)hist1->bins)->copyToSparseMat(m&: sH1);
2849 ((const CvSparseMat*)hist2->bins)->copyToSparseMat(m&: sH2);
2850 result = cv::compareHist( H1: sH1, H2: sH2, method: CV_COMP_KL_DIV );
2851 }
2852 else
2853 CV_Error( cv::Error::StsBadArg, "Unknown comparison method" );
2854
2855 if( method == CV_COMP_CHISQR_ALT )
2856 result *= 2;
2857
2858 return result;
2859}
2860
2861// copies one histogram to another
2862CV_IMPL void
2863cvCopyHist( const CvHistogram* src, CvHistogram** _dst )
2864{
2865 if( !_dst )
2866 CV_Error( cv::Error::StsNullPtr, "Destination double pointer is NULL" );
2867
2868 CvHistogram* dst = *_dst;
2869
2870 if( !CV_IS_HIST(src) || (dst && !CV_IS_HIST(dst)) )
2871 CV_Error( cv::Error::StsBadArg, "Invalid histogram header[s]" );
2872
2873 bool eq = false;
2874 int size1[CV_MAX_DIM];
2875 bool is_sparse = CV_IS_SPARSE_MAT(src->bins);
2876 int dims1 = cvGetDims( arr: src->bins, sizes: size1 );
2877
2878 if( dst && (is_sparse == CV_IS_SPARSE_MAT(dst->bins)))
2879 {
2880 int size2[CV_MAX_DIM];
2881 int dims2 = cvGetDims( arr: dst->bins, sizes: size2 );
2882
2883 if( dims1 == dims2 )
2884 {
2885 int i;
2886
2887 for( i = 0; i < dims1; i++ )
2888 {
2889 if( size1[i] != size2[i] )
2890 break;
2891 }
2892
2893 eq = (i == dims1);
2894 }
2895 }
2896
2897 if( !eq )
2898 {
2899 cvReleaseHist( hist: _dst );
2900 dst = cvCreateHist( dims: dims1, sizes: size1, type: !is_sparse ? CV_HIST_ARRAY : CV_HIST_SPARSE, ranges: 0, uniform: 0 );
2901 *_dst = dst;
2902 }
2903
2904 if( CV_HIST_HAS_RANGES( src ))
2905 {
2906 float* ranges[CV_MAX_DIM];
2907 float** thresh = 0;
2908
2909 if( CV_IS_UNIFORM_HIST( src ))
2910 {
2911 for( int i = 0; i < dims1; i++ )
2912 ranges[i] = (float*)src->thresh[i];
2913
2914 thresh = ranges;
2915 }
2916 else
2917 {
2918 thresh = src->thresh2;
2919 }
2920
2921 cvSetHistBinRanges( hist: dst, ranges: thresh, CV_IS_UNIFORM_HIST(src));
2922 }
2923
2924 cvCopy( src: src->bins, dst: dst->bins );
2925}
2926
2927
2928// Sets a value range for every histogram bin
2929CV_IMPL void
2930cvSetHistBinRanges( CvHistogram* hist, float** ranges, int uniform )
2931{
2932 int dims, size[CV_MAX_DIM], total = 0;
2933 int i, j;
2934
2935 if( !ranges )
2936 CV_Error( cv::Error::StsNullPtr, "NULL ranges pointer" );
2937
2938 if( !CV_IS_HIST(hist) )
2939 CV_Error( cv::Error::StsBadArg, "Invalid histogram header" );
2940
2941 dims = cvGetDims( arr: hist->bins, sizes: size );
2942 for( i = 0; i < dims; i++ )
2943 total += size[i]+1;
2944
2945 if( uniform )
2946 {
2947 for( i = 0; i < dims; i++ )
2948 {
2949 if( !ranges[i] )
2950 CV_Error( cv::Error::StsNullPtr, "One of <ranges> elements is NULL" );
2951 hist->thresh[i][0] = ranges[i][0];
2952 hist->thresh[i][1] = ranges[i][1];
2953 }
2954
2955 hist->type |= CV_HIST_UNIFORM_FLAG + CV_HIST_RANGES_FLAG;
2956 }
2957 else
2958 {
2959 float* dim_ranges;
2960
2961 if( !hist->thresh2 )
2962 {
2963 hist->thresh2 = (float**)cvAlloc(
2964 size: dims*sizeof(hist->thresh2[0])+
2965 total*sizeof(hist->thresh2[0][0]));
2966 }
2967 dim_ranges = (float*)(hist->thresh2 + dims);
2968
2969 for( i = 0; i < dims; i++ )
2970 {
2971 float val0 = -FLT_MAX;
2972
2973 if( !ranges[i] )
2974 CV_Error( cv::Error::StsNullPtr, "One of <ranges> elements is NULL" );
2975
2976 for( j = 0; j <= size[i]; j++ )
2977 {
2978 float val = ranges[i][j];
2979 if( val <= val0 )
2980 CV_Error(cv::Error::StsOutOfRange, "Bin ranges should go in ascenting order");
2981 val0 = dim_ranges[j] = val;
2982 }
2983
2984 hist->thresh2[i] = dim_ranges;
2985 dim_ranges += size[i] + 1;
2986 }
2987
2988 hist->type |= CV_HIST_RANGES_FLAG;
2989 hist->type &= ~CV_HIST_UNIFORM_FLAG;
2990 }
2991}
2992
2993
2994CV_IMPL void
2995cvCalcArrHist( CvArr** img, CvHistogram* hist, int accumulate, const CvArr* mask )
2996{
2997 if( !CV_IS_HIST(hist))
2998 CV_Error( cv::Error::StsBadArg, "Bad histogram pointer" );
2999
3000 if( !img )
3001 CV_Error( cv::Error::StsNullPtr, "Null double array pointer" );
3002
3003 int size[CV_MAX_DIM];
3004 int i, dims = cvGetDims( arr: hist->bins, sizes: size);
3005 bool uniform = CV_IS_UNIFORM_HIST(hist);
3006
3007 std::vector<cv::Mat> images(dims);
3008 for( i = 0; i < dims; i++ )
3009 images[i] = cv::cvarrToMat(arr: img[i]);
3010
3011 cv::Mat _mask;
3012 if( mask )
3013 _mask = cv::cvarrToMat(arr: mask);
3014
3015 const float* uranges[CV_MAX_DIM] = {0};
3016 const float** ranges = 0;
3017
3018 if( hist->type & CV_HIST_RANGES_FLAG )
3019 {
3020 ranges = (const float**)hist->thresh2;
3021 if( uniform )
3022 {
3023 for( i = 0; i < dims; i++ )
3024 uranges[i] = &hist->thresh[i][0];
3025 ranges = uranges;
3026 }
3027 }
3028
3029 if( !CV_IS_SPARSE_HIST(hist) )
3030 {
3031 cv::Mat H = cv::cvarrToMat(arr: hist->bins);
3032 cv::calcHist( images: &images[0], nimages: (int)images.size(), channels: 0, _mask,
3033 hist: H, dims: cvGetDims(arr: hist->bins), histSize: H.size, ranges, uniform, accumulate: accumulate != 0 );
3034 }
3035 else
3036 {
3037 CvSparseMat* sparsemat = (CvSparseMat*)hist->bins;
3038
3039 if( !accumulate )
3040 cvZero( arr: hist->bins );
3041 cv::SparseMat sH;
3042 sparsemat->copyToSparseMat(m&: sH);
3043 cv::calcHist( images: &images[0], nimages: (int)images.size(), channels: 0, mask: _mask, hist&: sH, dims: sH.dims(),
3044 histSize: sH.dims() > 0 ? sH.hdr->size : 0, ranges, uniform, accumulate: accumulate != 0, keepInt: true );
3045
3046 if( accumulate )
3047 cvZero( arr: sparsemat );
3048
3049 cv::SparseMatConstIterator it = sH.begin();
3050 int nz = (int)sH.nzcount();
3051 for( i = 0; i < nz; i++, ++it )
3052 {
3053 CV_Assert(it.ptr != NULL);
3054 *(float*)cvPtrND(arr: sparsemat, idx: it.node()->idx, type: 0, create_node: -2) = (float)*(const int*)it.ptr;
3055 }
3056 }
3057}
3058
3059
3060CV_IMPL void
3061cvCalcArrBackProject( CvArr** img, CvArr* dst, const CvHistogram* hist )
3062{
3063 if( !CV_IS_HIST(hist))
3064 CV_Error( cv::Error::StsBadArg, "Bad histogram pointer" );
3065
3066 if( !img )
3067 CV_Error( cv::Error::StsNullPtr, "Null double array pointer" );
3068
3069 int size[CV_MAX_DIM];
3070 int i, dims = cvGetDims( arr: hist->bins, sizes: size );
3071
3072 bool uniform = CV_IS_UNIFORM_HIST(hist);
3073 const float* uranges[CV_MAX_DIM] = {0};
3074 const float** ranges = 0;
3075
3076 if( hist->type & CV_HIST_RANGES_FLAG )
3077 {
3078 ranges = (const float**)hist->thresh2;
3079 if( uniform )
3080 {
3081 for( i = 0; i < dims; i++ )
3082 uranges[i] = &hist->thresh[i][0];
3083 ranges = uranges;
3084 }
3085 }
3086
3087 std::vector<cv::Mat> images(dims);
3088 for( i = 0; i < dims; i++ )
3089 images[i] = cv::cvarrToMat(arr: img[i]);
3090
3091 cv::Mat _dst = cv::cvarrToMat(arr: dst);
3092
3093 CV_Assert( _dst.size() == images[0].size() && _dst.depth() == images[0].depth() );
3094
3095 if( !CV_IS_SPARSE_HIST(hist) )
3096 {
3097 cv::Mat H = cv::cvarrToMat(arr: hist->bins);
3098 cv::calcBackProject( images: &images[0], nimages: (int)images.size(),
3099 channels: 0, hist: H, backProject: _dst, ranges, scale: 1, uniform );
3100 }
3101 else
3102 {
3103 cv::SparseMat sH;
3104 ((const CvSparseMat*)hist->bins)->copyToSparseMat(m&: sH);
3105 cv::calcBackProject( images: &images[0], nimages: (int)images.size(),
3106 channels: 0, hist: sH, backProject: _dst, ranges, scale: 1, uniform );
3107 }
3108}
3109
3110
3111////////////////////// B A C K P R O J E C T P A T C H /////////////////////////
3112
3113CV_IMPL void
3114cvCalcArrBackProjectPatch( CvArr** arr, CvArr* dst, CvSize patch_size, CvHistogram* hist,
3115 int method, double norm_factor )
3116{
3117 CvHistogram* model = 0;
3118
3119 IplImage imgstub[CV_MAX_DIM], *img[CV_MAX_DIM];
3120 IplROI roi;
3121 CvMat dststub, *dstmat;
3122 int i, dims;
3123 int x, y;
3124 cv::Size size;
3125
3126 if( !CV_IS_HIST(hist))
3127 CV_Error( cv::Error::StsBadArg, "Bad histogram pointer" );
3128
3129 if( !arr )
3130 CV_Error( cv::Error::StsNullPtr, "Null double array pointer" );
3131
3132 if( norm_factor <= 0 )
3133 CV_Error( cv::Error::StsOutOfRange,
3134 "Bad normalization factor (set it to 1.0 if unsure)" );
3135
3136 if( patch_size.width <= 0 || patch_size.height <= 0 )
3137 CV_Error( cv::Error::StsBadSize, "The patch width and height must be positive" );
3138
3139 dims = cvGetDims( arr: hist->bins );
3140 if (dims < 1)
3141 CV_Error( cv::Error::StsOutOfRange, "Invalid number of dimensions");
3142 cvNormalizeHist( hist, factor: norm_factor );
3143
3144 for( i = 0; i < dims; i++ )
3145 {
3146 CvMat stub, *mat;
3147 mat = cvGetMat( arr: arr[i], header: &stub, coi: 0, allowND: 0 );
3148 img[i] = cvGetImage( arr: mat, image_header: &imgstub[i] );
3149 img[i]->roi = &roi;
3150 }
3151
3152 dstmat = cvGetMat( arr: dst, header: &dststub, coi: 0, allowND: 0 );
3153 if( CV_MAT_TYPE( dstmat->type ) != CV_32FC1 )
3154 CV_Error( cv::Error::StsUnsupportedFormat, "Resultant image must have 32fC1 type" );
3155
3156 if( dstmat->cols != img[0]->width - patch_size.width + 1 ||
3157 dstmat->rows != img[0]->height - patch_size.height + 1 )
3158 CV_Error( cv::Error::StsUnmatchedSizes,
3159 "The output map must be (W-w+1 x H-h+1), "
3160 "where the input images are (W x H) each and the patch is (w x h)" );
3161
3162 cvCopyHist( src: hist, dst: &model );
3163
3164 size = cvGetMatSize(mat: dstmat);
3165 roi.coi = 0;
3166 roi.width = patch_size.width;
3167 roi.height = patch_size.height;
3168
3169 for( y = 0; y < size.height; y++ )
3170 {
3171 for( x = 0; x < size.width; x++ )
3172 {
3173 double result;
3174 roi.xOffset = x;
3175 roi.yOffset = y;
3176
3177 cvCalcHist( image: img, hist: model );
3178 cvNormalizeHist( hist: model, factor: norm_factor );
3179 result = cvCompareHist( hist1: model, hist2: hist, method );
3180 CV_MAT_ELEM( *dstmat, float, y, x ) = (float)result;
3181 }
3182 }
3183
3184 cvReleaseHist( hist: &model );
3185}
3186
3187
3188// Calculates Bayes probabilistic histograms
3189CV_IMPL void
3190cvCalcBayesianProb( CvHistogram** src, int count, CvHistogram** dst )
3191{
3192 int i;
3193
3194 if( !src || !dst )
3195 CV_Error( cv::Error::StsNullPtr, "NULL histogram array pointer" );
3196
3197 if( count < 2 )
3198 CV_Error( cv::Error::StsOutOfRange, "Too small number of histograms" );
3199
3200 for( i = 0; i < count; i++ )
3201 {
3202 if( !CV_IS_HIST(src[i]) || !CV_IS_HIST(dst[i]) )
3203 CV_Error( cv::Error::StsBadArg, "Invalid histogram header" );
3204
3205 if( !CV_IS_MATND(src[i]->bins) || !CV_IS_MATND(dst[i]->bins) )
3206 CV_Error( cv::Error::StsBadArg, "The function supports dense histograms only" );
3207 }
3208
3209 cvZero( arr: dst[0]->bins );
3210 // dst[0] = src[0] + ... + src[count-1]
3211 for( i = 0; i < count; i++ )
3212 cvAdd( src1: src[i]->bins, src2: dst[0]->bins, dst: dst[0]->bins );
3213
3214 cvDiv( src1: 0, src2: dst[0]->bins, dst: dst[0]->bins );
3215
3216 // dst[i] = src[i]*(1/dst[0])
3217 for( i = count - 1; i >= 0; i-- )
3218 cvMul( src1: src[i]->bins, src2: dst[0]->bins, dst: dst[i]->bins );
3219}
3220
3221
3222CV_IMPL void
3223cvCalcProbDensity( const CvHistogram* hist, const CvHistogram* hist_mask,
3224 CvHistogram* hist_dens, double scale )
3225{
3226 if( scale <= 0 )
3227 CV_Error( cv::Error::StsOutOfRange, "scale must be positive" );
3228
3229 if( !CV_IS_HIST(hist) || !CV_IS_HIST(hist_mask) || !CV_IS_HIST(hist_dens) )
3230 CV_Error( cv::Error::StsBadArg, "Invalid histogram pointer[s]" );
3231
3232 {
3233 CvArr* arrs[] = { hist->bins, hist_mask->bins, hist_dens->bins };
3234 CvMatND stubs[3];
3235 CvNArrayIterator iterator;
3236
3237 cvInitNArrayIterator( count: 3, arrs, mask: 0, stubs, array_iterator: &iterator );
3238
3239 if( CV_MAT_TYPE(iterator.hdr[0]->type) != CV_32FC1 )
3240 CV_Error( cv::Error::StsUnsupportedFormat, "All histograms must have 32fC1 type" );
3241
3242 do
3243 {
3244 const float* srcdata = (const float*)(iterator.ptr[0]);
3245 const float* maskdata = (const float*)(iterator.ptr[1]);
3246 float* dstdata = (float*)(iterator.ptr[2]);
3247 int i;
3248
3249 for( i = 0; i < iterator.size.width; i++ )
3250 {
3251 float s = srcdata[i];
3252 float m = maskdata[i];
3253 if( s > FLT_EPSILON )
3254 if( m <= s )
3255 dstdata[i] = (float)(m*scale/s);
3256 else
3257 dstdata[i] = (float)scale;
3258 else
3259 dstdata[i] = (float)0;
3260 }
3261 }
3262 while( cvNextNArraySlice( array_iterator: &iterator ));
3263 }
3264}
3265
3266class EqualizeHistCalcHist_Invoker : public cv::ParallelLoopBody
3267{
3268public:
3269 enum {HIST_SZ = 256};
3270
3271 EqualizeHistCalcHist_Invoker(cv::Mat& src, int* histogram, cv::Mutex* histogramLock)
3272 : src_(src), globalHistogram_(histogram), histogramLock_(histogramLock)
3273 { }
3274
3275 void operator()( const cv::Range& rowRange ) const CV_OVERRIDE
3276 {
3277 int localHistogram[HIST_SZ] = {0, };
3278
3279 const size_t sstep = src_.step;
3280
3281 int width = src_.cols;
3282 int height = rowRange.end - rowRange.start;
3283
3284 if (src_.isContinuous())
3285 {
3286 width *= height;
3287 height = 1;
3288 }
3289
3290 for (const uchar* ptr = src_.ptr<uchar>(y: rowRange.start); height--; ptr += sstep)
3291 {
3292 int x = 0;
3293 for (; x <= width - 4; x += 4)
3294 {
3295 int t0 = ptr[x], t1 = ptr[x+1];
3296 localHistogram[t0]++; localHistogram[t1]++;
3297 t0 = ptr[x+2]; t1 = ptr[x+3];
3298 localHistogram[t0]++; localHistogram[t1]++;
3299 }
3300
3301 for (; x < width; ++x)
3302 localHistogram[ptr[x]]++;
3303 }
3304
3305 cv::AutoLock lock(*histogramLock_);
3306
3307 for( int i = 0; i < HIST_SZ; i++ )
3308 globalHistogram_[i] += localHistogram[i];
3309 }
3310
3311 static bool isWorthParallel( const cv::Mat& src )
3312 {
3313 return ( src.total() >= 640*480 );
3314 }
3315
3316private:
3317 EqualizeHistCalcHist_Invoker& operator=(const EqualizeHistCalcHist_Invoker&);
3318
3319 cv::Mat& src_;
3320 int* globalHistogram_;
3321 cv::Mutex* histogramLock_;
3322};
3323
3324class EqualizeHistLut_Invoker : public cv::ParallelLoopBody
3325{
3326public:
3327 EqualizeHistLut_Invoker( cv::Mat& src, cv::Mat& dst, int* lut )
3328 : src_(src),
3329 dst_(dst),
3330 lut_(lut)
3331 { }
3332
3333 void operator()( const cv::Range& rowRange ) const CV_OVERRIDE
3334 {
3335 const size_t sstep = src_.step;
3336 const size_t dstep = dst_.step;
3337
3338 int width = src_.cols;
3339 int height = rowRange.end - rowRange.start;
3340 int* lut = lut_;
3341
3342 if (src_.isContinuous() && dst_.isContinuous())
3343 {
3344 width *= height;
3345 height = 1;
3346 }
3347
3348 const uchar* sptr = src_.ptr<uchar>(y: rowRange.start);
3349 uchar* dptr = dst_.ptr<uchar>(y: rowRange.start);
3350
3351 for (; height--; sptr += sstep, dptr += dstep)
3352 {
3353 int x = 0;
3354 for (; x <= width - 4; x += 4)
3355 {
3356 int v0 = sptr[x];
3357 int v1 = sptr[x+1];
3358 int x0 = lut[v0];
3359 int x1 = lut[v1];
3360 dptr[x] = (uchar)x0;
3361 dptr[x+1] = (uchar)x1;
3362
3363 v0 = sptr[x+2];
3364 v1 = sptr[x+3];
3365 x0 = lut[v0];
3366 x1 = lut[v1];
3367 dptr[x+2] = (uchar)x0;
3368 dptr[x+3] = (uchar)x1;
3369 }
3370
3371 for (; x < width; ++x)
3372 dptr[x] = (uchar)lut[sptr[x]];
3373 }
3374 }
3375
3376 static bool isWorthParallel( const cv::Mat& src )
3377 {
3378 return ( src.total() >= 640*480 );
3379 }
3380
3381private:
3382 EqualizeHistLut_Invoker& operator=(const EqualizeHistLut_Invoker&);
3383
3384 cv::Mat& src_;
3385 cv::Mat& dst_;
3386 int* lut_;
3387};
3388
3389CV_IMPL void cvEqualizeHist( const CvArr* srcarr, CvArr* dstarr )
3390{
3391 cv::equalizeHist(src: cv::cvarrToMat(arr: srcarr), dst: cv::cvarrToMat(arr: dstarr));
3392}
3393
3394#ifdef HAVE_OPENCL
3395
3396namespace cv {
3397
3398static bool ocl_equalizeHist(InputArray _src, OutputArray _dst)
3399{
3400 const ocl::Device & dev = ocl::Device::getDefault();
3401 int compunits = dev.maxComputeUnits();
3402 size_t wgs = dev.maxWorkGroupSize();
3403 Size size = _src.size();
3404 bool use16 = size.width % 16 == 0 && _src.offset() % 16 == 0 && _src.step() % 16 == 0;
3405 int kercn = dev.isAMD() && use16 ? 16 : std::min(a: 4, b: ocl::predictOptimalVectorWidth(src1: _src));
3406
3407 ocl::Kernel k1("calculate_histogram", ocl::imgproc::histogram_oclsrc,
3408 format("-D BINS=%d -D HISTS_COUNT=%d -D WGS=%zu -D kercn=%d -D T=%s%s",
3409 BINS, compunits, wgs, kercn,
3410 kercn == 4 ? "int" : ocl::typeToStr(CV_8UC(kercn)),
3411 _src.isContinuous() ? " -D HAVE_SRC_CONT" : ""));
3412 if (k1.empty())
3413 return false;
3414
3415 UMat src = _src.getUMat(), ghist(1, BINS * compunits, CV_32SC1);
3416
3417 k1.args(kernel_args: ocl::KernelArg::ReadOnly(m: src),
3418 kernel_args: ocl::KernelArg::PtrWriteOnly(m: ghist), kernel_args: (int)src.total());
3419
3420 size_t globalsize = compunits * wgs;
3421 if (!k1.run(dims: 1, globalsize: &globalsize, localsize: &wgs, sync: false))
3422 return false;
3423
3424 wgs = std::min<size_t>(a: ocl::Device::getDefault().maxWorkGroupSize(), b: BINS);
3425 UMat lut(1, 256, CV_8UC1);
3426 ocl::Kernel k2("calcLUT", ocl::imgproc::histogram_oclsrc,
3427 format("-D BINS=%d -D HISTS_COUNT=%d -D WGS=%d",
3428 BINS, compunits, (int)wgs));
3429 k2.args(kernel_args: ocl::KernelArg::PtrWriteOnly(m: lut),
3430 kernel_args: ocl::KernelArg::PtrReadOnly(m: ghist), kernel_args: (int)_src.total());
3431
3432 // calculation of LUT
3433 if (!k2.run(dims: 1, globalsize: &wgs, localsize: &wgs, sync: false))
3434 return false;
3435
3436 // execute LUT transparently
3437 LUT(src: _src, lut, dst: _dst);
3438 return true;
3439}
3440
3441}
3442
3443#endif
3444
3445void cv::equalizeHist( InputArray _src, OutputArray _dst )
3446{
3447 CV_INSTRUMENT_REGION();
3448
3449 CV_Assert( _src.type() == CV_8UC1 );
3450
3451 if (_src.empty())
3452 return;
3453
3454 CV_OCL_RUN(_src.dims() <= 2 && _dst.isUMat(),
3455 ocl_equalizeHist(_src, _dst))
3456
3457 Mat src = _src.getMat();
3458 _dst.create( sz: src.size(), type: src.type() );
3459 Mat dst = _dst.getMat();
3460
3461 CALL_HAL(equalizeHist, cv_hal_equalize_hist, src.data, src.step, dst.data, dst.step, src.cols, src.rows);
3462
3463 Mutex histogramLockInstance;
3464
3465 const int hist_sz = EqualizeHistCalcHist_Invoker::HIST_SZ;
3466 int hist[hist_sz] = {0,};
3467 int lut[hist_sz];
3468
3469 EqualizeHistCalcHist_Invoker calcBody(src, hist, &histogramLockInstance);
3470 EqualizeHistLut_Invoker lutBody(src, dst, lut);
3471 cv::Range heightRange(0, src.rows);
3472
3473 if(EqualizeHistCalcHist_Invoker::isWorthParallel(src))
3474 parallel_for_(range: heightRange, body: calcBody);
3475 else
3476 calcBody(heightRange);
3477
3478 int i = 0;
3479 while (!hist[i]) ++i;
3480
3481 int total = (int)src.total();
3482 if (hist[i] == total)
3483 {
3484 dst.setTo(value: i);
3485 return;
3486 }
3487
3488 float scale = (hist_sz - 1.f)/(total - hist[i]);
3489 int sum = 0;
3490
3491 for (lut[i++] = 0; i < hist_sz; ++i)
3492 {
3493 sum += hist[i];
3494 lut[i] = saturate_cast<uchar>(v: sum * scale);
3495 }
3496
3497 if(EqualizeHistLut_Invoker::isWorthParallel(src))
3498 parallel_for_(range: heightRange, body: lutBody);
3499 else
3500 lutBody(heightRange);
3501}
3502
3503#if 0
3504// ----------------------------------------------------------------------
3505
3506/* Implementation of RTTI and Generic Functions for CvHistogram */
3507#define CV_TYPE_NAME_HIST "opencv-hist"
3508
3509static int icvIsHist( const void * ptr )
3510{
3511 return CV_IS_HIST( ((CvHistogram*)ptr) );
3512}
3513
3514static CvHistogram * icvCloneHist( const CvHistogram * src )
3515{
3516 CvHistogram * dst=NULL;
3517 cvCopyHist(src, &dst);
3518 return dst;
3519}
3520
3521static void *icvReadHist( CvFileStorage * fs, CvFileNode * node )
3522{
3523 CvHistogram * h = 0;
3524 int type = 0;
3525 int is_uniform = 0;
3526 int have_ranges = 0;
3527
3528 h = (CvHistogram *)cvAlloc( sizeof(CvHistogram) );
3529
3530 type = cvReadIntByName( fs, node, "type", 0 );
3531 is_uniform = cvReadIntByName( fs, node, "is_uniform", 0 );
3532 have_ranges = cvReadIntByName( fs, node, "have_ranges", 0 );
3533 h->type = CV_HIST_MAGIC_VAL | type |
3534 (is_uniform ? CV_HIST_UNIFORM_FLAG : 0) |
3535 (have_ranges ? CV_HIST_RANGES_FLAG : 0);
3536
3537 if(type == CV_HIST_ARRAY)
3538 {
3539 // read histogram bins
3540 CvMatND* mat = (CvMatND*)cvReadByName( fs, node, "mat" );
3541 int i, sizes[CV_MAX_DIM];
3542
3543 if(!CV_IS_MATND(mat))
3544 CV_Error( cv::Error::StsError, "Expected CvMatND");
3545
3546 for(i=0; i<mat->dims; i++)
3547 sizes[i] = mat->dim[i].size;
3548
3549 cvInitMatNDHeader( &(h->mat), mat->dims, sizes, mat->type, mat->data.ptr );
3550 h->bins = &(h->mat);
3551
3552 // take ownership of refcount pointer as well
3553 h->mat.refcount = mat->refcount;
3554
3555 // increase refcount so freeing temp header doesn't free data
3556 cvIncRefData( mat );
3557
3558 // free temporary header
3559 cvReleaseMatND( &mat );
3560 }
3561 else
3562 {
3563 h->bins = cvReadByName( fs, node, "bins" );
3564 if(!CV_IS_SPARSE_MAT(h->bins)){
3565 CV_Error( cv::Error::StsError, "Unknown Histogram type");
3566 }
3567 }
3568
3569 // read thresholds
3570 if(have_ranges)
3571 {
3572 int i, dims, size[CV_MAX_DIM], total = 0;
3573 CvSeqReader reader;
3574 CvFileNode * thresh_node;
3575
3576 dims = cvGetDims( h->bins, size );
3577 for( i = 0; i < dims; i++ )
3578 total += size[i]+1;
3579
3580 thresh_node = cvGetFileNodeByName( fs, node, "thresh" );
3581 if(!thresh_node)
3582 CV_Error( cv::Error::StsError, "'thresh' node is missing");
3583 cvStartReadRawData( fs, thresh_node, &reader );
3584
3585 if(is_uniform)
3586 {
3587 for(i=0; i<dims; i++)
3588 cvReadRawDataSlice( fs, &reader, 2, h->thresh[i], "f" );
3589 h->thresh2 = NULL;
3590 }
3591 else
3592 {
3593 float* dim_ranges;
3594 h->thresh2 = (float**)cvAlloc(
3595 dims*sizeof(h->thresh2[0])+
3596 total*sizeof(h->thresh2[0][0]));
3597 dim_ranges = (float*)(h->thresh2 + dims);
3598 for(i=0; i < dims; i++)
3599 {
3600 h->thresh2[i] = dim_ranges;
3601 cvReadRawDataSlice( fs, &reader, size[i]+1, dim_ranges, "f" );
3602 dim_ranges += size[i] + 1;
3603 }
3604 }
3605 }
3606
3607 return h;
3608}
3609
3610static void icvWriteHist( CvFileStorage* fs, const char* name,
3611 const void* struct_ptr, CvAttrList /*attributes*/ )
3612{
3613 const CvHistogram * hist = (const CvHistogram *) struct_ptr;
3614 int sizes[CV_MAX_DIM];
3615 int dims;
3616 int i;
3617 int is_uniform, have_ranges;
3618
3619 cvStartWriteStruct( fs, name, CV_NODE_MAP, CV_TYPE_NAME_HIST );
3620
3621 is_uniform = (CV_IS_UNIFORM_HIST(hist) ? 1 : 0);
3622 have_ranges = (hist->type & CV_HIST_RANGES_FLAG ? 1 : 0);
3623
3624 cvWriteInt( fs, "type", (hist->type & 1) );
3625 cvWriteInt( fs, "is_uniform", is_uniform );
3626 cvWriteInt( fs, "have_ranges", have_ranges );
3627 if(!CV_IS_SPARSE_HIST(hist))
3628 cvWrite( fs, "mat", &(hist->mat) );
3629 else
3630 cvWrite( fs, "bins", hist->bins );
3631
3632 // write thresholds
3633 if(have_ranges){
3634 dims = cvGetDims( hist->bins, sizes );
3635 cvStartWriteStruct( fs, "thresh", CV_NODE_SEQ + CV_NODE_FLOW );
3636 if(is_uniform){
3637 for(i=0; i<dims; i++){
3638 cvWriteRawData( fs, hist->thresh[i], 2, "f" );
3639 }
3640 }
3641 else{
3642 for(i=0; i<dims; i++){
3643 cvWriteRawData( fs, hist->thresh2[i], sizes[i]+1, "f" );
3644 }
3645 }
3646 cvEndWriteStruct( fs );
3647 }
3648
3649 cvEndWriteStruct( fs );
3650}
3651
3652
3653CvType hist_type( CV_TYPE_NAME_HIST, icvIsHist, (CvReleaseFunc)cvReleaseHist,
3654 icvReadHist, icvWriteHist, (CvCloneFunc)icvCloneHist );
3655#endif
3656
3657/* End of file. */
3658

source code of opencv/modules/imgproc/src/histogram.cpp