1 | // This file is part of OpenCV project. |
---|---|
2 | // It is subject to the license terms in the LICENSE file found in the top-level directory |
3 | // of this distribution and at http://opencv.org/license.html. |
4 | // |
5 | // Copyright (C) 2020-2021 Intel Corporation |
6 | |
7 | #include "opencv2/videoio.hpp" |
8 | #ifdef HAVE_OPENCL |
9 | #include "opencv2/core/ocl.hpp" |
10 | #endif |
11 | #if defined(__OPENCV_BUILD) && !defined(BUILD_PLUGIN) // TODO Properly detect and add D3D11 / LIBVA dependencies for standalone plugins |
12 | #include "cvconfig.h" |
13 | #endif |
14 | #include <sstream> |
15 | |
16 | #if defined(HAVE_MFX) && defined(HAVE_ONEVPL) |
17 | #undef HAVE_MFX // libav's hwcontext_qsv.h doesn't expect oneVPL headers |
18 | #endif |
19 | |
20 | #ifdef HAVE_D3D11 |
21 | #define D3D11_NO_HELPERS |
22 | #include <d3d11.h> |
23 | #include <codecvt> |
24 | #include <locale> |
25 | #include "opencv2/core/directx.hpp" |
26 | #ifdef HAVE_OPENCL |
27 | #include <CL/cl_d3d11.h> |
28 | #endif |
29 | #endif // HAVE_D3D11 |
30 | |
31 | #ifdef HAVE_VA |
32 | #include <va/va_backend.h> |
33 | #ifdef HAVE_VA_INTEL |
34 | #include "opencv2/core/va_intel.hpp" |
35 | #ifndef CL_TARGET_OPENCL_VERSION |
36 | #define CL_TARGET_OPENCL_VERSION 120 |
37 | #endif |
38 | #ifdef HAVE_VA_INTEL_OLD_HEADER |
39 | #include <CL/va_ext.h> |
40 | #else |
41 | #include <CL/cl_va_api_media_sharing_intel.h> |
42 | #endif |
43 | #endif |
44 | #endif // HAVE_VA |
45 | |
46 | // FFMPEG "C" headers |
47 | extern "C"{ |
48 | #include <libavcodec/avcodec.h> |
49 | #include <libavutil/avutil.h> |
50 | #include <libavutil/hwcontext.h> |
51 | #ifdef HAVE_D3D11 |
52 | #include <libavutil/hwcontext_d3d11va.h> |
53 | #endif |
54 | #ifdef HAVE_VA |
55 | #include <libavutil/hwcontext_vaapi.h> |
56 | #endif |
57 | #ifdef HAVE_MFX // dependency only on MFX header files, no linkage dependency |
58 | #include <libavutil/hwcontext_qsv.h> |
59 | #endif |
60 | } |
61 | |
62 | #define HW_DEFAULT_POOL_SIZE 32 |
63 | #define HW_DEFAULT_SW_FORMAT AV_PIX_FMT_NV12 |
64 | |
65 | using namespace cv; |
66 | |
67 | static AVCodec *hw_find_codec(AVCodecID id, AVHWDeviceType hw_type, int (*check_category)(const AVCodec *), |
68 | const char *disabled_codecs, AVPixelFormat *hw_pix_fmt); |
69 | static AVBufferRef* hw_create_device(AVHWDeviceType hw_type, int hw_device, const std::string& device_subname, bool use_opencl); |
70 | static AVBufferRef* hw_create_frames(struct AVCodecContext* ctx, AVBufferRef *hw_device_ctx, int width, int height, AVPixelFormat hw_format); |
71 | static AVPixelFormat hw_get_format_callback(struct AVCodecContext *ctx, const enum AVPixelFormat * fmt); |
72 | static VideoAccelerationType hw_type_to_va_type(AVHWDeviceType hw_type); |
73 | |
74 | static |
75 | const char* getVideoAccelerationName(VideoAccelerationType va_type) |
76 | { |
77 | switch (va_type) |
78 | { |
79 | case VIDEO_ACCELERATION_NONE: return "none"; |
80 | case VIDEO_ACCELERATION_ANY: return "any"; |
81 | case VIDEO_ACCELERATION_D3D11: return "d3d11"; |
82 | case VIDEO_ACCELERATION_VAAPI: return "vaapi"; |
83 | case VIDEO_ACCELERATION_MFX: return "mfx"; |
84 | } |
85 | return "unknown"; |
86 | } |
87 | |
88 | static |
89 | std::string getDecoderConfiguration(VideoAccelerationType va_type, AVDictionary *dict) |
90 | { |
91 | std::string va_name = getVideoAccelerationName(va_type); |
92 | std::string key_name = std::string("hw_decoders_") + va_name; |
93 | const char *hw_acceleration = NULL; |
94 | if (dict) |
95 | { |
96 | AVDictionaryEntry* entry = av_dict_get(m: dict, key: key_name.c_str(), NULL, flags: 0); |
97 | if (entry) |
98 | hw_acceleration = entry->value; |
99 | } |
100 | if (hw_acceleration) |
101 | return hw_acceleration; |
102 | |
103 | // some default values (FFMPEG_DECODE_ACCELERATION_TYPES) |
104 | #ifdef _WIN32 |
105 | switch (va_type) |
106 | { |
107 | case VIDEO_ACCELERATION_NONE: return ""; |
108 | case VIDEO_ACCELERATION_ANY: return "d3d11va"; |
109 | case VIDEO_ACCELERATION_D3D11: return "d3d11va"; |
110 | case VIDEO_ACCELERATION_VAAPI: return ""; |
111 | case VIDEO_ACCELERATION_MFX: return ""; // "qsv" fails if non-Intel D3D11 device |
112 | } |
113 | return ""; |
114 | #else |
115 | switch (va_type) |
116 | { |
117 | case VIDEO_ACCELERATION_NONE: return ""; |
118 | case VIDEO_ACCELERATION_ANY: return "vaapi.iHD"; |
119 | case VIDEO_ACCELERATION_D3D11: return ""; |
120 | case VIDEO_ACCELERATION_VAAPI: return "vaapi.iHD"; |
121 | case VIDEO_ACCELERATION_MFX: return "qsv.iHD"; |
122 | } |
123 | return ""; |
124 | #endif |
125 | } |
126 | |
127 | static |
128 | std::string getEncoderConfiguration(VideoAccelerationType va_type, AVDictionary *dict) |
129 | { |
130 | std::string va_name = getVideoAccelerationName(va_type); |
131 | std::string key_name = std::string("hw_encoders_") + va_name; |
132 | const char *hw_acceleration = NULL; |
133 | if (dict) |
134 | { |
135 | AVDictionaryEntry* entry = av_dict_get(m: dict, key: key_name.c_str(), NULL, flags: 0); |
136 | if (entry) |
137 | hw_acceleration = entry->value; |
138 | } |
139 | if (hw_acceleration) |
140 | return hw_acceleration; |
141 | |
142 | // some default values (FFMPEG_ENCODE_ACCELERATION_TYPES) |
143 | #ifdef _WIN32 |
144 | switch (va_type) |
145 | { |
146 | case VIDEO_ACCELERATION_NONE: return ""; |
147 | case VIDEO_ACCELERATION_ANY: return "qsv"; |
148 | case VIDEO_ACCELERATION_D3D11: return ""; |
149 | case VIDEO_ACCELERATION_VAAPI: return ""; |
150 | case VIDEO_ACCELERATION_MFX: return "qsv"; |
151 | } |
152 | return ""; |
153 | #else |
154 | switch (va_type) |
155 | { |
156 | case VIDEO_ACCELERATION_NONE: return ""; |
157 | case VIDEO_ACCELERATION_ANY: return "qsv.iHD,vaapi.iHD"; |
158 | case VIDEO_ACCELERATION_D3D11: return ""; |
159 | case VIDEO_ACCELERATION_VAAPI: return "vaapi.iHD"; |
160 | case VIDEO_ACCELERATION_MFX: return "qsv.iHD"; |
161 | } |
162 | return "unknown"; |
163 | #endif |
164 | } |
165 | |
166 | static |
167 | std::string getDecoderDisabledCodecs(AVDictionary *dict) |
168 | { |
169 | std::string key_name = std::string("hw_disable_decoders"); |
170 | const char *disabled_codecs = NULL; |
171 | if (dict) |
172 | { |
173 | AVDictionaryEntry* entry = av_dict_get(m: dict, key: key_name.c_str(), NULL, flags: 0); |
174 | if (entry) |
175 | disabled_codecs = entry->value; |
176 | } |
177 | if (disabled_codecs) |
178 | return disabled_codecs; |
179 | |
180 | // some default values (FFMPEG_DECODE_DISABLE_CODECS) |
181 | #ifdef _WIN32 |
182 | return "none"; |
183 | #else |
184 | return "av1.vaapi,av1_qsv,vp8.vaapi,vp8_qsv"; // "vp9_qsv" |
185 | #endif |
186 | } |
187 | |
188 | static |
189 | std::string getEncoderDisabledCodecs(AVDictionary *dict) |
190 | { |
191 | std::string key_name = std::string("hw_disabled_encoders"); |
192 | const char *disabled_codecs = NULL; |
193 | if (dict) |
194 | { |
195 | AVDictionaryEntry* entry = av_dict_get(m: dict, key: key_name.c_str(), NULL, flags: 0); |
196 | if (entry) |
197 | disabled_codecs = entry->value; |
198 | } |
199 | if (disabled_codecs) |
200 | return disabled_codecs; |
201 | |
202 | // some default values (FFMPEG_ENCODE_DISABLE_CODECS) |
203 | #ifdef _WIN32 |
204 | return "mjpeg_qsv"; |
205 | #else |
206 | return "mjpeg_vaapi,mjpeg_qsv,vp8_vaapi"; |
207 | #endif |
208 | } |
209 | |
210 | static |
211 | bool hw_check_device(AVBufferRef* ctx, AVHWDeviceType hw_type, const std::string& device_subname) { |
212 | if (!ctx) |
213 | return false; |
214 | AVHWDeviceContext* hw_device_ctx = (AVHWDeviceContext*)ctx->data; |
215 | if (!hw_device_ctx->hwctx) |
216 | return false; |
217 | const char *hw_name = av_hwdevice_get_type_name(type: hw_type); |
218 | if (hw_type == AV_HWDEVICE_TYPE_QSV) |
219 | hw_name = "MFX"; |
220 | bool ret = true; |
221 | std::string device_name; |
222 | #if defined(HAVE_D3D11) |
223 | if (hw_device_ctx->type == AV_HWDEVICE_TYPE_D3D11VA) { |
224 | ID3D11Device* device = ((AVD3D11VADeviceContext*)hw_device_ctx->hwctx)->device; |
225 | IDXGIDevice* dxgiDevice = nullptr; |
226 | if (device && SUCCEEDED(device->QueryInterface(__uuidof(IDXGIDevice), reinterpret_cast<void**>(&dxgiDevice)))) { |
227 | IDXGIAdapter* adapter = nullptr; |
228 | if (SUCCEEDED(dxgiDevice->GetAdapter(&adapter))) { |
229 | DXGI_ADAPTER_DESC desc; |
230 | if (SUCCEEDED(adapter->GetDesc(&desc))) { |
231 | std::wstring_convert<std::codecvt_utf8_utf16<wchar_t>> conv; |
232 | device_name = conv.to_bytes(desc.Description); |
233 | } |
234 | adapter->Release(); |
235 | } |
236 | dxgiDevice->Release(); |
237 | } |
238 | } |
239 | #endif |
240 | if (hw_device_ctx->type == AV_HWDEVICE_TYPE_VAAPI) { |
241 | #if defined(HAVE_VA) && (VA_MAJOR_VERSION >= 1) |
242 | VADisplay display = ((AVVAAPIDeviceContext *) hw_device_ctx->hwctx)->display; |
243 | if (display) { |
244 | VADriverContext *va_ctx = ((VADisplayContext *) display)->pDriverContext; |
245 | device_name = va_ctx->str_vendor; |
246 | if (hw_type == AV_HWDEVICE_TYPE_QSV) { |
247 | // Workaround for issue fixed in MediaSDK 21.x https://github.com/Intel-Media-SDK/MediaSDK/issues/2595 |
248 | // Checks VAAPI driver for support of VideoProc operation required by MediaSDK |
249 | ret = false; |
250 | int n_entrypoints = va_ctx->max_entrypoints; |
251 | std::vector<VAEntrypoint> entrypoints(n_entrypoints); |
252 | if (va_ctx->vtable->vaQueryConfigEntrypoints(va_ctx, VAProfileNone, entrypoints.data(), &n_entrypoints) == VA_STATUS_SUCCESS) { |
253 | for (int i = 0; i < n_entrypoints; i++) { |
254 | if (entrypoints[i] == VAEntrypointVideoProc) { |
255 | ret = true; |
256 | break; |
257 | } |
258 | } |
259 | } |
260 | if (!ret) |
261 | CV_LOG_INFO(NULL, "FFMPEG: Skipping MFX video acceleration as entrypoint VideoProc not found in: "<< device_name); |
262 | } |
263 | } |
264 | #else |
265 | ret = (hw_type != AV_HWDEVICE_TYPE_QSV); // disable MFX if we can't check VAAPI for VideoProc entrypoint |
266 | #endif |
267 | } |
268 | if (ret && !device_subname.empty() && device_name.find(str: device_subname) == std::string::npos) |
269 | { |
270 | CV_LOG_INFO(NULL, "FFMPEG: Skipping '"<< hw_name << |
271 | "' video acceleration on the following device name as not matching substring '"<< device_subname << "': "<< device_name); |
272 | ret = false; // reject configuration |
273 | } |
274 | if (ret) |
275 | { |
276 | if (!device_name.empty()) { |
277 | CV_LOG_INFO(NULL, "FFMPEG: Using "<< hw_name << " video acceleration on device: "<< device_name); |
278 | } else { |
279 | CV_LOG_INFO(NULL, "FFMPEG: Using "<< hw_name << " video acceleration"); |
280 | } |
281 | } |
282 | return ret; |
283 | } |
284 | |
285 | static |
286 | AVBufferRef* hw_create_derived_context(AVHWDeviceType hw_type, AVBufferRef* hw_device_ctx) { |
287 | AVBufferRef* derived_ctx = NULL; |
288 | const char* hw_name = av_hwdevice_get_type_name(type: hw_type); |
289 | int err = av_hwdevice_ctx_create_derived(dst_ctx: &derived_ctx, type: hw_type, src_ctx: hw_device_ctx, flags: 0); |
290 | if (!derived_ctx || err < 0) |
291 | { |
292 | if (derived_ctx) |
293 | av_buffer_unref(buf: &derived_ctx); |
294 | CV_LOG_INFO(NULL, "FFMPEG: Failed to create derived video acceleration (av_hwdevice_ctx_create_derived) for "<< hw_name << ". Error="<< err); |
295 | return NULL; |
296 | } |
297 | else |
298 | { |
299 | // Store child context in 'user_opaque' field of parent context. |
300 | struct FreeChildContext { |
301 | static void free(struct AVHWDeviceContext* ctx) { |
302 | AVBufferRef* child_ctx = (AVBufferRef*)ctx->user_opaque; |
303 | if (child_ctx) |
304 | av_buffer_unref(buf: &child_ctx); |
305 | } |
306 | }; |
307 | AVHWDeviceContext* ctx = (AVHWDeviceContext*)derived_ctx->data; |
308 | ctx->user_opaque = av_buffer_ref(buf: hw_device_ctx); |
309 | ctx->free = FreeChildContext::free; |
310 | CV_LOG_INFO(NULL, "FFMPEG: Created derived video acceleration context (av_hwdevice_ctx_create_derived) for "<< hw_name); |
311 | return derived_ctx; |
312 | } |
313 | } |
314 | |
315 | #ifdef HAVE_OPENCL // GPU buffer interop with cv::UMat |
316 | |
317 | // FFmpeg context attached to OpenCL context |
318 | class OpenCL_FFMPEG_Context : public ocl::Context::UserContext { |
319 | public: |
320 | OpenCL_FFMPEG_Context(AVBufferRef* ctx) { |
321 | ctx_ = av_buffer_ref(buf: ctx); |
322 | } |
323 | virtual ~OpenCL_FFMPEG_Context() { |
324 | av_buffer_unref(buf: &ctx_); |
325 | } |
326 | AVBufferRef* GetAVHWDevice() { |
327 | return ctx_; |
328 | } |
329 | private: |
330 | AVBufferRef* ctx_; |
331 | }; |
332 | |
333 | #ifdef HAVE_MFX |
334 | static |
335 | int hw_find_qsv_surface_index(AVFrame* hw_frame) |
336 | { |
337 | if (AV_PIX_FMT_QSV != hw_frame->format) |
338 | return -1; |
339 | mfxFrameSurface1* surface = (mfxFrameSurface1*)hw_frame->data[3]; // As defined by AV_PIX_FMT_QSV |
340 | AVHWFramesContext* frames_ctx = (AVHWFramesContext*)hw_frame->hw_frames_ctx->data; |
341 | AVQSVFramesContext* qsv_ctx = (AVQSVFramesContext*)frames_ctx->hwctx; |
342 | for (int i = 0; i < qsv_ctx->nb_surfaces; i++) { |
343 | if (surface == qsv_ctx->surfaces + i) { |
344 | return i; |
345 | } |
346 | } |
347 | return -1; |
348 | } |
349 | #endif |
350 | |
351 | #ifdef HAVE_VA |
352 | static |
353 | VADisplay hw_get_va_display(AVHWDeviceContext* hw_device_ctx) |
354 | { |
355 | if (hw_device_ctx->type == AV_HWDEVICE_TYPE_QSV) { // we stored pointer to child context in 'user_opaque' field |
356 | AVBufferRef* ctx = (AVBufferRef*)hw_device_ctx->user_opaque; |
357 | hw_device_ctx = (AVHWDeviceContext*)ctx->data; |
358 | } |
359 | if (hw_device_ctx && hw_device_ctx->type == AV_HWDEVICE_TYPE_VAAPI) { |
360 | return ((AVVAAPIDeviceContext*)hw_device_ctx->hwctx)->display; |
361 | } |
362 | return NULL; |
363 | } |
364 | #endif // HAVE_VA |
365 | |
366 | #ifdef HAVE_VA_INTEL |
367 | static |
368 | VASurfaceID hw_get_va_surface(AVFrame* hw_frame) { |
369 | if (AV_PIX_FMT_VAAPI == hw_frame->format) { |
370 | return (VASurfaceID)(size_t)hw_frame->data[3]; // As defined by AV_PIX_FMT_VAAPI |
371 | } |
372 | #ifdef HAVE_MFX |
373 | else if (AV_PIX_FMT_QSV == hw_frame->format) { |
374 | int frame_idx = hw_find_qsv_surface_index(hw_frame); |
375 | if (frame_idx >= 0) { // frame index is same in parent (QSV) and child (VAAPI) frame context |
376 | AVHWFramesContext *frames_ctx = (AVHWFramesContext *) hw_frame->hw_frames_ctx->data; |
377 | AVHWFramesContext *child_ctx = (AVHWFramesContext *) frames_ctx->user_opaque; |
378 | if (child_ctx && AV_HWDEVICE_TYPE_VAAPI == child_ctx->device_ctx->type) { |
379 | AVVAAPIFramesContext *vaapi_ctx = (AVVAAPIFramesContext *) child_ctx->hwctx; |
380 | CV_Assert(frame_idx < vaapi_ctx->nb_surfaces); |
381 | return vaapi_ctx->surface_ids[frame_idx]; |
382 | } |
383 | } |
384 | } |
385 | #endif // HAVE_MFX |
386 | return VA_INVALID_SURFACE; |
387 | } |
388 | #endif // HAVE_VA_INTEL |
389 | |
390 | #ifdef HAVE_D3D11 |
391 | static |
392 | AVD3D11VADeviceContext* hw_get_d3d11_device_ctx(AVHWDeviceContext* hw_device_ctx) { |
393 | if (AV_HWDEVICE_TYPE_QSV == hw_device_ctx->type) { // we stored pointer to child context in 'user_opaque' field |
394 | AVBufferRef* ctx = (AVBufferRef*)hw_device_ctx->user_opaque; |
395 | hw_device_ctx = (AVHWDeviceContext*)ctx->data; |
396 | } |
397 | if (AV_HWDEVICE_TYPE_D3D11VA == hw_device_ctx->type) { |
398 | return (AVD3D11VADeviceContext*)hw_device_ctx->hwctx; |
399 | } |
400 | return NULL; |
401 | } |
402 | |
403 | ID3D11Texture2D* hw_get_d3d11_texture(AVFrame* hw_frame, int* subresource) { |
404 | ID3D11Texture2D* texture = NULL; |
405 | if (AV_PIX_FMT_D3D11 == hw_frame->format) { |
406 | texture = (ID3D11Texture2D*)hw_frame->data[0]; // As defined by AV_PIX_FMT_D3D11 |
407 | *subresource = (intptr_t)hw_frame->data[1]; // As defined by AV_PIX_FMT_D3D11 |
408 | } |
409 | #ifdef HAVE_MFX |
410 | else if (AV_PIX_FMT_QSV == hw_frame->format) { |
411 | AVHWFramesContext *frames_ctx = (AVHWFramesContext *) hw_frame->hw_frames_ctx->data; |
412 | AVHWFramesContext *child_ctx = (AVHWFramesContext *) frames_ctx->user_opaque; |
413 | if (child_ctx && AV_HWDEVICE_TYPE_D3D11VA == child_ctx->device_ctx->type) { |
414 | texture = ((AVD3D11VAFramesContext*)child_ctx->hwctx)->texture; |
415 | } |
416 | *subresource = hw_find_qsv_surface_index(hw_frame); |
417 | CV_Assert(*subresource >= 0); |
418 | } |
419 | #endif |
420 | return texture; |
421 | } |
422 | |
423 | // In D3D11 case we allocate additional texture as single texture (not texture array) because |
424 | // OpenCL interop with D3D11 doesn't support/work with NV12 sub-texture of texture array. |
425 | ID3D11Texture2D* hw_get_d3d11_single_texture(AVFrame* hw_frame, AVD3D11VADeviceContext* d3d11_device_ctx, ID3D11Texture2D* texture) { |
426 | AVHWFramesContext* frames_ctx = (AVHWFramesContext*)hw_frame->hw_frames_ctx->data; |
427 | if (AV_HWDEVICE_TYPE_QSV == frames_ctx->device_ctx->type) { |
428 | frames_ctx = (AVHWFramesContext*)frames_ctx->user_opaque; // we stored pointer to child context in 'user_opaque' field |
429 | } |
430 | if (!frames_ctx || AV_HWDEVICE_TYPE_D3D11VA != frames_ctx->device_ctx->type) { |
431 | return NULL; |
432 | } |
433 | ID3D11Texture2D* singleTexture = (ID3D11Texture2D*)frames_ctx->user_opaque; |
434 | if (!singleTexture && d3d11_device_ctx && texture) { |
435 | D3D11_TEXTURE2D_DESC desc = {}; |
436 | texture->GetDesc(&desc); |
437 | desc.ArraySize = 1; |
438 | desc.BindFlags |= D3D11_BIND_SHADER_RESOURCE; |
439 | desc.MiscFlags |= D3D11_RESOURCE_MISC_SHARED; |
440 | if (SUCCEEDED(d3d11_device_ctx->device->CreateTexture2D(&desc, NULL, &singleTexture))) { |
441 | frames_ctx->user_opaque = singleTexture; |
442 | } |
443 | } |
444 | return singleTexture; |
445 | } |
446 | #endif // HAVE_D3D11 |
447 | |
448 | static |
449 | AVHWDeviceType hw_check_opencl_context(AVHWDeviceContext* ctx) { |
450 | ocl::OpenCLExecutionContext& ocl_context = ocl::OpenCLExecutionContext::getCurrentRef(); |
451 | if (!ctx || ocl_context.empty()) |
452 | return AV_HWDEVICE_TYPE_NONE; |
453 | #ifdef HAVE_VA_INTEL |
454 | VADisplay vadisplay_ocl = ocl_context.getContext().getOpenCLContextProperty(CL_CONTEXT_VA_API_DISPLAY_INTEL); |
455 | VADisplay vadisplay_ctx = hw_get_va_display(hw_device_ctx: ctx); |
456 | if (vadisplay_ocl && vadisplay_ocl == vadisplay_ctx) |
457 | return AV_HWDEVICE_TYPE_VAAPI; |
458 | #endif |
459 | #ifdef HAVE_D3D11 |
460 | ID3D11Device* d3d11device_ocl = (ID3D11Device*)ocl_context.getContext().getOpenCLContextProperty(CL_CONTEXT_D3D11_DEVICE_KHR); |
461 | AVD3D11VADeviceContext* d3d11_device_ctx = hw_get_d3d11_device_ctx(ctx); |
462 | if (d3d11_device_ctx && d3d11device_ocl && d3d11_device_ctx->device == d3d11device_ocl) |
463 | return AV_HWDEVICE_TYPE_D3D11VA; |
464 | #endif |
465 | return AV_HWDEVICE_TYPE_NONE; |
466 | } |
467 | |
468 | static |
469 | void hw_init_opencl(AVBufferRef* ctx) { |
470 | if (!ctx) |
471 | return; |
472 | AVHWDeviceContext* hw_device_ctx = (AVHWDeviceContext*)ctx->data; |
473 | if (!hw_device_ctx) |
474 | return; |
475 | #ifdef HAVE_VA_INTEL |
476 | VADisplay va_display = hw_get_va_display(hw_device_ctx); |
477 | if (va_display) { |
478 | va_intel::ocl::initializeContextFromVA(display: va_display); |
479 | } |
480 | #endif |
481 | #ifdef HAVE_D3D11 |
482 | AVD3D11VADeviceContext* d3d11_device_ctx = hw_get_d3d11_device_ctx(hw_device_ctx); |
483 | if (d3d11_device_ctx) { |
484 | directx::ocl::initializeContextFromD3D11Device(d3d11_device_ctx->device); |
485 | } |
486 | #endif |
487 | if (hw_check_opencl_context(ctx: hw_device_ctx) != AV_HWDEVICE_TYPE_NONE) { |
488 | // Attach AVHWDeviceContext to OpenCL context |
489 | ocl::Context &ocl_context = ocl::OpenCLExecutionContext::getCurrent().getContext(); |
490 | ocl_context.setUserContext(std::make_shared<OpenCL_FFMPEG_Context>(args&: ctx)); |
491 | } |
492 | } |
493 | |
494 | static |
495 | AVBufferRef* hw_create_context_from_opencl(ocl::OpenCLExecutionContext& ocl_context, AVHWDeviceType hw_type) { |
496 | if (ocl_context.empty()) |
497 | return NULL; |
498 | auto ocl_ffmpeg_context = ocl_context.getContext().getUserContext<OpenCL_FFMPEG_Context>(); |
499 | if (!ocl_ffmpeg_context) |
500 | return NULL; |
501 | AVBufferRef* ctx = ocl_ffmpeg_context->GetAVHWDevice(); |
502 | if (hw_type != ((AVHWDeviceContext*)ctx->data)->type) { |
503 | ctx = hw_create_derived_context(hw_type, hw_device_ctx: ctx); |
504 | } |
505 | else { |
506 | ctx = av_buffer_ref(buf: ctx); |
507 | } |
508 | if (ctx) |
509 | CV_LOG_INFO(NULL, "FFMPEG: Using "<< av_hwdevice_get_type_name(hw_type) << " video acceleration context attached to OpenCL context"); |
510 | return ctx; |
511 | } |
512 | |
513 | #endif // HAVE_OPENCL |
514 | |
515 | static |
516 | AVBufferRef* hw_create_device(AVHWDeviceType hw_type, int hw_device, const std::string& device_subname, bool use_opencl) { |
517 | AVBufferRef* hw_device_ctx = NULL; |
518 | if (AV_HWDEVICE_TYPE_NONE == hw_type) |
519 | return NULL; |
520 | |
521 | #ifdef HAVE_OPENCL |
522 | // Check if OpenCL context has AVHWDeviceContext attached to it |
523 | ocl::OpenCLExecutionContext& ocl_context = ocl::OpenCLExecutionContext::getCurrentRef(); |
524 | try { |
525 | hw_device_ctx = hw_create_context_from_opencl(ocl_context, hw_type); |
526 | if (hw_device_ctx) { |
527 | if (hw_device >= 0) |
528 | CV_LOG_ERROR(NULL, "VIDEOIO/FFMPEG: ignoring property HW_DEVICE as device context already created and attached to OpenCL context"); |
529 | return hw_device_ctx; |
530 | } |
531 | } |
532 | catch (...) { |
533 | CV_LOG_INFO(NULL, "FFMPEG: Exception creating Video Acceleration context using current OpenCL context"); |
534 | } |
535 | #endif |
536 | |
537 | // Create new media context. In QSV case, first create 'child' context. |
538 | std::vector<AVHWDeviceType> child_types = { hw_type }; |
539 | if (hw_type == AV_HWDEVICE_TYPE_QSV) { |
540 | #ifdef _WIN32 |
541 | child_types = { AV_HWDEVICE_TYPE_D3D11VA, AV_HWDEVICE_TYPE_DXVA2 }; |
542 | #else |
543 | child_types = { AV_HWDEVICE_TYPE_VAAPI }; |
544 | #endif |
545 | } |
546 | for (AVHWDeviceType child_type : child_types) { |
547 | char device[128] = ""; |
548 | char* pdevice = NULL; |
549 | if (hw_device >= 0 && hw_device < 100000) { |
550 | if (child_type == AV_HWDEVICE_TYPE_VAAPI) { |
551 | snprintf(s: device, maxlen: sizeof(device), format: "/dev/dri/renderD%d", 128 + hw_device); |
552 | } |
553 | else { |
554 | snprintf(s: device, maxlen: sizeof(device), format: "%d", hw_device); |
555 | } |
556 | pdevice = device; |
557 | } |
558 | const char* hw_child_name = av_hwdevice_get_type_name(type: child_type); |
559 | const char* device_name = pdevice ? pdevice : "'default'"; |
560 | int err = av_hwdevice_ctx_create(device_ctx: &hw_device_ctx, type: child_type, device: pdevice, NULL, flags: 0); |
561 | if (hw_device_ctx && err >= 0) |
562 | { |
563 | if (!hw_check_device(ctx: hw_device_ctx, hw_type, device_subname)) { |
564 | av_buffer_unref(buf: &hw_device_ctx); |
565 | continue; |
566 | } |
567 | CV_LOG_INFO(NULL, "FFMPEG: Created video acceleration context (av_hwdevice_ctx_create) for "<< hw_child_name << " on device "<< device_name); |
568 | #ifdef HAVE_OPENCL |
569 | // if OpenCL context not created yet or property HW_ACCELERATION_USE_OPENCL set, create OpenCL context with binding to video acceleration context |
570 | if (ocl::haveOpenCL()) { |
571 | if (ocl_context.empty() || use_opencl) { |
572 | try { |
573 | hw_init_opencl(ctx: hw_device_ctx); |
574 | ocl_context = ocl::OpenCLExecutionContext::getCurrentRef(); |
575 | if (!ocl_context.empty()) { |
576 | CV_LOG_INFO(NULL, "FFMPEG: Created OpenCL context with "<< hw_child_name << |
577 | " video acceleration on OpenCL device: "<< ocl_context.getDevice().name()); |
578 | } |
579 | } catch (...) { |
580 | CV_LOG_INFO(NULL, "FFMPEG: Exception creating OpenCL context with "<< hw_child_name << " video acceleration"); |
581 | } |
582 | } |
583 | else { |
584 | CV_LOG_INFO(NULL, "FFMPEG: Can't bind "<< hw_child_name << " video acceleration context to already created OpenCL context"); |
585 | } |
586 | } |
587 | #else |
588 | CV_UNUSED(use_opencl); |
589 | #endif |
590 | if (hw_type != child_type) { |
591 | AVBufferRef* derived_ctx = hw_create_derived_context(hw_type, hw_device_ctx); |
592 | av_buffer_unref(buf: &hw_device_ctx); |
593 | return derived_ctx; |
594 | } else { |
595 | return hw_device_ctx; |
596 | } |
597 | } |
598 | else |
599 | { |
600 | const char* hw_name = hw_child_name; |
601 | CV_LOG_INFO(NULL, "FFMPEG: Failed to create "<< hw_name << " video acceleration (av_hwdevice_ctx_create) on device "<< device_name); |
602 | } |
603 | } |
604 | return NULL; |
605 | } |
606 | |
607 | static |
608 | AVBufferRef* hw_create_frames(struct AVCodecContext* codec_ctx, AVBufferRef *hw_device_ctx, int width, int height, AVPixelFormat hw_format) |
609 | { |
610 | AVHWDeviceContext *device_ctx = (AVHWDeviceContext*)hw_device_ctx->data; |
611 | AVBufferRef* child_ctx = hw_device_ctx; |
612 | // In QSV case we first allocate child D3D11/VAAPI frames (except DXVA2 as no OpenCL interop), then derive to parent QSV frames |
613 | if (AV_HWDEVICE_TYPE_QSV == device_ctx->type) { |
614 | AVBufferRef *ctx = (AVBufferRef *) device_ctx->user_opaque; // child context stored during creation of derived context |
615 | if (ctx && AV_HWDEVICE_TYPE_DXVA2 != ((AVHWDeviceContext *) ctx->data)->type) { |
616 | child_ctx = ctx; |
617 | } |
618 | } |
619 | AVBufferRef *hw_frames_ref = nullptr; |
620 | if (codec_ctx) |
621 | { |
622 | int res = avcodec_get_hw_frames_parameters(avctx: codec_ctx, device_ref: child_ctx, hw_pix_fmt: hw_format, out_frames_ref: &hw_frames_ref); |
623 | if (res < 0) |
624 | { |
625 | CV_LOG_DEBUG(NULL, "FFMPEG: avcodec_get_hw_frames_parameters() call failed: "<< res) |
626 | } |
627 | } |
628 | if (!hw_frames_ref) |
629 | { |
630 | hw_frames_ref = av_hwframe_ctx_alloc(device_ctx: child_ctx); |
631 | } |
632 | if (!hw_frames_ref) |
633 | { |
634 | CV_LOG_INFO(NULL, "FFMPEG: Failed to create HW frame context (av_hwframe_ctx_alloc)"); |
635 | return NULL; |
636 | } |
637 | AVHWFramesContext *frames_ctx = (AVHWFramesContext *)(hw_frames_ref->data); |
638 | frames_ctx->width = width; |
639 | frames_ctx->height = height; |
640 | if (frames_ctx->format == AV_PIX_FMT_NONE) { |
641 | if (child_ctx == hw_device_ctx) { |
642 | frames_ctx->format = hw_format; |
643 | } |
644 | else { |
645 | AVHWFramesConstraints* constraints = av_hwdevice_get_hwframe_constraints(ref: child_ctx, NULL); |
646 | if (constraints) { |
647 | frames_ctx->format = constraints->valid_hw_formats[0]; |
648 | av_hwframe_constraints_free(constraints: &constraints); |
649 | } |
650 | } |
651 | } |
652 | if (frames_ctx->sw_format == AV_PIX_FMT_NONE) |
653 | frames_ctx->sw_format = HW_DEFAULT_SW_FORMAT; |
654 | if (frames_ctx->initial_pool_size == 0) |
655 | frames_ctx->initial_pool_size = HW_DEFAULT_POOL_SIZE; |
656 | |
657 | #ifdef HAVE_D3D11 |
658 | if (frames_ctx->device_ctx && AV_HWDEVICE_TYPE_D3D11VA == frames_ctx->device_ctx->type) { |
659 | // BindFlags |
660 | AVD3D11VAFramesContext* frames_hwctx = (AVD3D11VAFramesContext*)frames_ctx->hwctx; |
661 | frames_hwctx->BindFlags |= D3D11_BIND_DECODER | D3D11_BIND_VIDEO_ENCODER; |
662 | // See function hw_get_d3d11_single_texture(), it allocates additional ID3D11Texture2D texture and |
663 | // attaches it as 'user_opaque' field. We have to set free() callback before av_hwframe_ctx_init() call. |
664 | struct D3D11SingleTexture { |
665 | static void free(struct AVHWFramesContext* ctx) { |
666 | ID3D11Texture2D* singleTexture = (ID3D11Texture2D*)ctx->user_opaque; |
667 | if (ctx->user_opaque) |
668 | singleTexture->Release(); |
669 | } |
670 | }; |
671 | frames_ctx->free = D3D11SingleTexture::free; |
672 | } |
673 | #endif |
674 | |
675 | int res = av_hwframe_ctx_init(ref: hw_frames_ref); |
676 | if (res < 0) |
677 | { |
678 | CV_LOG_INFO(NULL, "FFMPEG: Failed to initialize HW frame context (av_hwframe_ctx_init): "<< res); |
679 | av_buffer_unref(buf: &hw_frames_ref); |
680 | return NULL; |
681 | } |
682 | |
683 | if (child_ctx != hw_device_ctx) { |
684 | AVBufferRef* derived_frame_ctx = NULL; |
685 | int flags = AV_HWFRAME_MAP_READ | AV_HWFRAME_MAP_WRITE; |
686 | res = av_hwframe_ctx_create_derived(derived_frame_ctx: &derived_frame_ctx, format: hw_format, derived_device_ctx: hw_device_ctx, source_frame_ctx: hw_frames_ref, flags); |
687 | av_buffer_unref(buf: &hw_frames_ref); |
688 | if (res < 0) |
689 | { |
690 | CV_LOG_INFO(NULL, "FFMPEG: Failed to create derived HW frame context (av_hwframe_ctx_create_derived): "<< res); |
691 | return NULL; |
692 | } |
693 | else { |
694 | ((AVHWFramesContext*)derived_frame_ctx->data)->user_opaque = frames_ctx; |
695 | return derived_frame_ctx; |
696 | } |
697 | } |
698 | else { |
699 | return hw_frames_ref; |
700 | } |
701 | } |
702 | |
703 | static |
704 | bool hw_check_codec(AVCodec* codec, AVHWDeviceType hw_type, const char *disabled_codecs) |
705 | { |
706 | CV_Assert(disabled_codecs); |
707 | std::string hw_name = std::string(".") + av_hwdevice_get_type_name(type: hw_type); |
708 | std::stringstream s_stream(disabled_codecs); |
709 | while (s_stream.good()) { |
710 | std::string name; |
711 | getline(in&: s_stream, str&: name, delim: ','); |
712 | if (name == codec->name || name == hw_name || name == codec->name + hw_name || name == "hw") { |
713 | CV_LOG_INFO(NULL, "FFMPEG: skipping codec "<< codec->name << hw_name); |
714 | return false; |
715 | } |
716 | } |
717 | return true; |
718 | } |
719 | |
720 | static |
721 | AVCodec *hw_find_codec(AVCodecID id, AVHWDeviceType hw_type, int (*check_category)(const AVCodec *), const char *disabled_codecs, AVPixelFormat *hw_pix_fmt) { |
722 | AVCodec *c = 0; |
723 | void *opaque = 0; |
724 | |
725 | while (NULL != (c = (AVCodec*)av_codec_iterate(opaque: &opaque))) |
726 | { |
727 | if (!check_category(c)) |
728 | continue; |
729 | if (c->id != id) |
730 | continue; |
731 | if (c->capabilities & AV_CODEC_CAP_EXPERIMENTAL) |
732 | continue; |
733 | if (hw_type != AV_HWDEVICE_TYPE_NONE) { |
734 | AVPixelFormat hw_native_fmt = AV_PIX_FMT_NONE; |
735 | #if LIBAVUTIL_BUILD < AV_VERSION_INT(56, 51, 100) // VAAPI encoders support avcodec_get_hw_config() starting ffmpeg 4.3 |
736 | if (hw_type == AV_HWDEVICE_TYPE_VAAPI) |
737 | hw_native_fmt = AV_PIX_FMT_VAAPI_VLD; |
738 | #endif |
739 | if (hw_type == AV_HWDEVICE_TYPE_CUDA) // CUDA encoders don't support avcodec_get_hw_config() |
740 | hw_native_fmt = AV_PIX_FMT_CUDA; |
741 | if (av_codec_is_encoder(codec: c) && hw_native_fmt != AV_PIX_FMT_NONE && c->pix_fmts) { |
742 | for (int i = 0; c->pix_fmts[i] != AV_PIX_FMT_NONE; i++) { |
743 | if (c->pix_fmts[i] == hw_native_fmt) { |
744 | *hw_pix_fmt = hw_native_fmt; |
745 | if (hw_check_codec(codec: c, hw_type, disabled_codecs)) |
746 | return c; |
747 | } |
748 | } |
749 | } |
750 | for (int i = 0;; i++) { |
751 | const AVCodecHWConfig *hw_config = avcodec_get_hw_config(codec: c, index: i); |
752 | if (!hw_config) |
753 | break; |
754 | if (hw_config->device_type == hw_type) { |
755 | *hw_pix_fmt = hw_config->pix_fmt; |
756 | if (hw_check_codec(codec: c, hw_type, disabled_codecs)) |
757 | return c; |
758 | } |
759 | } |
760 | } else { |
761 | return c; |
762 | } |
763 | } |
764 | |
765 | return NULL; |
766 | } |
767 | |
768 | // Callback to select hardware pixel format (not software format) and allocate frame pool (hw_frames_ctx) |
769 | static |
770 | AVPixelFormat hw_get_format_callback(struct AVCodecContext *ctx, const enum AVPixelFormat * fmt) { |
771 | if (!ctx->hw_device_ctx) |
772 | return fmt[0]; |
773 | AVHWDeviceType hw_type = ((AVHWDeviceContext*)ctx->hw_device_ctx->data)->type; |
774 | for (int j = 0;; j++) { |
775 | const AVCodecHWConfig *hw_config = avcodec_get_hw_config(codec: ctx->codec, index: j); |
776 | if (!hw_config) |
777 | break; |
778 | if (hw_config->device_type == hw_type) { |
779 | for (int i = 0; fmt[i] != AV_PIX_FMT_NONE; i++) { |
780 | if (fmt[i] == hw_config->pix_fmt) { |
781 | if (hw_config->methods & AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX) { |
782 | ctx->sw_pix_fmt = HW_DEFAULT_SW_FORMAT; |
783 | ctx->hw_frames_ctx = hw_create_frames(codec_ctx: ctx, hw_device_ctx: ctx->hw_device_ctx, width: ctx->width, height: ctx->height, hw_format: fmt[i]); |
784 | if (ctx->hw_frames_ctx) { |
785 | //ctx->sw_pix_fmt = ((AVHWFramesContext *)(ctx->hw_frames_ctx->data))->sw_format; |
786 | return fmt[i]; |
787 | } |
788 | } |
789 | } |
790 | } |
791 | } |
792 | } |
793 | CV_LOG_DEBUG(NULL, "FFMPEG: Can't select HW format in 'get_format()' callback, use default"); |
794 | return fmt[0]; |
795 | } |
796 | |
797 | // GPU color conversion NV12->BGRA via OpenCL extensions |
798 | static bool |
799 | hw_copy_frame_to_umat(AVBufferRef* ctx, AVFrame* hw_frame, cv::OutputArray output) { |
800 | CV_UNUSED(hw_frame); |
801 | CV_UNUSED(output); |
802 | if (!ctx) |
803 | return false; |
804 | |
805 | #ifdef HAVE_OPENCL |
806 | try { |
807 | // check that current OpenCL context initilized with binding to same VAAPI/D3D11 context |
808 | AVHWDeviceContext *hw_device_ctx = (AVHWDeviceContext *) ctx->data; |
809 | AVHWDeviceType child_type = hw_check_opencl_context(ctx: hw_device_ctx); |
810 | if (child_type == AV_HWDEVICE_TYPE_NONE) |
811 | return false; |
812 | |
813 | #ifdef HAVE_VA_INTEL |
814 | if (child_type == AV_HWDEVICE_TYPE_VAAPI) { |
815 | VADisplay va_display = hw_get_va_display(hw_device_ctx); |
816 | VASurfaceID va_surface = hw_get_va_surface(hw_frame); |
817 | if (va_display && va_surface != VA_INVALID_SURFACE) { |
818 | va_intel::convertFromVASurface(display: va_display, surface: va_surface, size: {hw_frame->width, hw_frame->height}, dst: output); |
819 | return true; |
820 | } |
821 | } |
822 | #endif |
823 | |
824 | #ifdef HAVE_D3D11 |
825 | if (child_type == AV_HWDEVICE_TYPE_D3D11VA) { |
826 | AVD3D11VADeviceContext* d3d11_device_ctx = hw_get_d3d11_device_ctx(hw_device_ctx); |
827 | int subresource = 0; |
828 | ID3D11Texture2D* texture = hw_get_d3d11_texture(hw_frame, &subresource); |
829 | ID3D11Texture2D* singleTexture = hw_get_d3d11_single_texture(hw_frame, d3d11_device_ctx, texture); |
830 | if (texture && singleTexture) { |
831 | // Copy D3D11 sub-texture to D3D11 single texture |
832 | d3d11_device_ctx->device_context->CopySubresourceRegion(singleTexture, 0, 0, 0, 0, texture, subresource, NULL); |
833 | // Copy D3D11 single texture to cv::UMat |
834 | directx::convertFromD3D11Texture2D(singleTexture, output); |
835 | return true; |
836 | } |
837 | } |
838 | #endif |
839 | } |
840 | catch (...) |
841 | { |
842 | return false; |
843 | } |
844 | #endif // HAVE_OPENCL |
845 | |
846 | return false; |
847 | } |
848 | |
849 | // GPU color conversion BGRA->NV12 via OpenCL extensions |
850 | static bool |
851 | hw_copy_umat_to_frame(AVBufferRef* ctx, cv::InputArray input, AVFrame* hw_frame) { |
852 | CV_UNUSED(input); |
853 | CV_UNUSED(hw_frame); |
854 | if (!ctx) |
855 | return false; |
856 | |
857 | #ifdef HAVE_OPENCL |
858 | try { |
859 | // check that current OpenCL context initilized with binding to same VAAPI/D3D11 context |
860 | AVHWDeviceContext *hw_device_ctx = (AVHWDeviceContext *) ctx->data; |
861 | AVHWDeviceType child_type = hw_check_opencl_context(ctx: hw_device_ctx); |
862 | if (child_type == AV_HWDEVICE_TYPE_NONE) |
863 | return false; |
864 | |
865 | #ifdef HAVE_VA_INTEL |
866 | if (child_type == AV_HWDEVICE_TYPE_VAAPI) { |
867 | VADisplay va_display = hw_get_va_display(hw_device_ctx); |
868 | VASurfaceID va_surface = hw_get_va_surface(hw_frame); |
869 | if (va_display != NULL && va_surface != VA_INVALID_SURFACE) { |
870 | va_intel::convertToVASurface(display: va_display, src: input, surface: va_surface, size: {hw_frame->width, hw_frame->height}); |
871 | return true; |
872 | } |
873 | } |
874 | #endif |
875 | |
876 | #ifdef HAVE_D3D11 |
877 | if (child_type == AV_HWDEVICE_TYPE_D3D11VA) { |
878 | AVD3D11VADeviceContext* d3d11_device_ctx = hw_get_d3d11_device_ctx(hw_device_ctx); |
879 | int subresource = 0; |
880 | ID3D11Texture2D* texture = hw_get_d3d11_texture(hw_frame, &subresource); |
881 | ID3D11Texture2D* singleTexture = hw_get_d3d11_single_texture(hw_frame, d3d11_device_ctx, texture); |
882 | if (texture && singleTexture) { |
883 | // Copy cv::UMat to D3D11 single texture |
884 | directx::convertToD3D11Texture2D(input, singleTexture); |
885 | // Copy D3D11 single texture to D3D11 sub-texture |
886 | d3d11_device_ctx->device_context->CopySubresourceRegion(texture, subresource, 0, 0, 0, singleTexture, 0, NULL); |
887 | return true; |
888 | } |
889 | } |
890 | #endif |
891 | } |
892 | catch (...) |
893 | { |
894 | return false; |
895 | } |
896 | #endif // HAVE_OPENCL |
897 | |
898 | return false; |
899 | } |
900 | |
901 | static |
902 | VideoAccelerationType hw_type_to_va_type(AVHWDeviceType hw_type) { |
903 | struct HWTypeFFMPEG { |
904 | AVHWDeviceType hw_type; |
905 | VideoAccelerationType va_type; |
906 | } known_hw_types[] = { |
907 | { .hw_type: AV_HWDEVICE_TYPE_D3D11VA, .va_type: VIDEO_ACCELERATION_D3D11 }, |
908 | { .hw_type: AV_HWDEVICE_TYPE_VAAPI, .va_type: VIDEO_ACCELERATION_VAAPI }, |
909 | { .hw_type: AV_HWDEVICE_TYPE_QSV, .va_type: VIDEO_ACCELERATION_MFX }, |
910 | { .hw_type: AV_HWDEVICE_TYPE_CUDA, .va_type: (VideoAccelerationType)(1 << 11) }, |
911 | }; |
912 | for (const HWTypeFFMPEG& hw : known_hw_types) { |
913 | if (hw_type == hw.hw_type) |
914 | return hw.va_type; |
915 | } |
916 | return VIDEO_ACCELERATION_NONE; |
917 | } |
918 | |
919 | class HWAccelIterator { |
920 | public: |
921 | HWAccelIterator(VideoAccelerationType va_type, bool isEncoder, AVDictionary *dict) |
922 | : hw_type_(AV_HWDEVICE_TYPE_NONE) |
923 | { |
924 | std::string accel_list; |
925 | if (va_type != VIDEO_ACCELERATION_NONE) |
926 | { |
927 | updateAccelList_(accel_list, va_type, isEncoder, dict); |
928 | } |
929 | if (va_type == VIDEO_ACCELERATION_ANY) |
930 | { |
931 | if (!accel_list.empty()) |
932 | accel_list += ","; // add no-acceleration case to the end of the list |
933 | } |
934 | CV_LOG_DEBUG(NULL, "FFMPEG: allowed acceleration types ("<< getVideoAccelerationName(va_type) << "): '"<< accel_list << "'"); |
935 | |
936 | if (accel_list.empty() && va_type != VIDEO_ACCELERATION_NONE && va_type != VIDEO_ACCELERATION_ANY) |
937 | { |
938 | // broke stream |
939 | std::string tmp; |
940 | s_stream_ >> tmp; |
941 | } |
942 | else |
943 | { |
944 | s_stream_ = std::istringstream(accel_list); |
945 | } |
946 | |
947 | if (va_type != VIDEO_ACCELERATION_NONE) |
948 | { |
949 | disabled_codecs_ = isEncoder |
950 | ? getEncoderDisabledCodecs(dict) |
951 | : getDecoderDisabledCodecs(dict); |
952 | CV_LOG_DEBUG(NULL, "FFMPEG: disabled codecs: '"<< disabled_codecs_ << "'"); |
953 | } |
954 | } |
955 | bool good() const |
956 | { |
957 | return s_stream_.good(); |
958 | } |
959 | void parse_next() |
960 | { |
961 | getline(in&: s_stream_, str&: hw_type_device_string_, delim: ','); |
962 | size_t index = hw_type_device_string_.find(c: '.'); |
963 | if (index != std::string::npos) { |
964 | device_subname_ = hw_type_device_string_.substr(pos: index + 1); |
965 | hw_type_string_ = hw_type_device_string_.substr(pos: 0, n: index); |
966 | } else { |
967 | device_subname_.clear(); |
968 | hw_type_string_ = hw_type_device_string_; |
969 | } |
970 | hw_type_ = av_hwdevice_find_type_by_name(name: hw_type_string_.c_str()); |
971 | } |
972 | const std::string& hw_type_device_string() const { return hw_type_device_string_; } |
973 | const std::string& hw_type_string() const { return hw_type_string_; } |
974 | AVHWDeviceType hw_type() const { return hw_type_; } |
975 | const std::string& device_subname() const { return device_subname_; } |
976 | const std::string& disabled_codecs() const { return disabled_codecs_; } |
977 | private: |
978 | bool updateAccelList_(std::string& accel_list, VideoAccelerationType va_type, bool isEncoder, AVDictionary *dict) |
979 | { |
980 | std::string new_accels = isEncoder |
981 | ? getEncoderConfiguration(va_type, dict) |
982 | : getDecoderConfiguration(va_type, dict); |
983 | if (new_accels.empty()) |
984 | return false; |
985 | if (accel_list.empty()) |
986 | accel_list = new_accels; |
987 | else |
988 | accel_list = accel_list + ","+ new_accels; |
989 | return true; |
990 | } |
991 | std::istringstream s_stream_; |
992 | std::string hw_type_device_string_; |
993 | std::string hw_type_string_; |
994 | AVHWDeviceType hw_type_; |
995 | std::string device_subname_; |
996 | |
997 | std::string disabled_codecs_; |
998 | }; |
999 |
Definitions
- getVideoAccelerationName
- getDecoderConfiguration
- getEncoderConfiguration
- getDecoderDisabledCodecs
- getEncoderDisabledCodecs
- hw_check_device
- hw_create_derived_context
- OpenCL_FFMPEG_Context
- OpenCL_FFMPEG_Context
- ~OpenCL_FFMPEG_Context
- GetAVHWDevice
- hw_get_va_display
- hw_get_va_surface
- hw_check_opencl_context
- hw_init_opencl
- hw_create_context_from_opencl
- hw_create_device
- hw_create_frames
- hw_check_codec
- hw_find_codec
- hw_get_format_callback
- hw_copy_frame_to_umat
- hw_copy_umat_to_frame
- hw_type_to_va_type
- HWAccelIterator
- HWAccelIterator
- good
- parse_next
- hw_type_device_string
- hw_type_string
- hw_type
- device_subname
- disabled_codecs
Update your C++ knowledge – Modern C++11/14/17 Training
Find out more