1 | // Copyright (C) 2019 The Qt Company Ltd. |
2 | // Copyright (C) 2016 Jolla Ltd, author: <gunnar.sletta@jollamobile.com> |
3 | // Copyright (C) 2016 Robin Burchell <robin.burchell@viroteck.net> |
4 | // SPDX-License-Identifier: LicenseRef-Qt-Commercial OR LGPL-3.0-only OR GPL-2.0-only OR GPL-3.0-only |
5 | |
6 | #include "qsgbatchrenderer_p.h" |
7 | |
8 | #include <qmath.h> |
9 | |
10 | #include <QtCore/QElapsedTimer> |
11 | #include <QtCore/QtNumeric> |
12 | |
13 | #include <QtGui/QGuiApplication> |
14 | |
15 | #include <private/qnumeric_p.h> |
16 | #include "qsgmaterialshader_p.h" |
17 | |
18 | #include "qsgrhivisualizer_p.h" |
19 | |
20 | #include <algorithm> |
21 | |
22 | QT_BEGIN_NAMESPACE |
23 | |
24 | #ifndef QT_NO_DEBUG |
25 | Q_QUICK_PRIVATE_EXPORT bool qsg_test_and_clear_material_failure(); |
26 | #endif |
27 | |
28 | int qt_sg_envInt(const char *name, int defaultValue); |
29 | |
30 | namespace QSGBatchRenderer |
31 | { |
32 | |
33 | #define DECLARE_DEBUG_VAR(variable) \ |
34 | static bool debug_ ## variable() \ |
35 | { static bool value = qgetenv("QSG_RENDERER_DEBUG").contains(QT_STRINGIFY(variable)); return value; } |
36 | DECLARE_DEBUG_VAR(render) |
37 | DECLARE_DEBUG_VAR(build) |
38 | DECLARE_DEBUG_VAR(change) |
39 | DECLARE_DEBUG_VAR(upload) |
40 | DECLARE_DEBUG_VAR(roots) |
41 | DECLARE_DEBUG_VAR(dump) |
42 | DECLARE_DEBUG_VAR(noalpha) |
43 | DECLARE_DEBUG_VAR(noopaque) |
44 | DECLARE_DEBUG_VAR(noclip) |
45 | #undef DECLARE_DEBUG_VAR |
46 | |
47 | #define QSGNODE_TRAVERSE(NODE) for (QSGNode *child = NODE->firstChild(); child; child = child->nextSibling()) |
48 | #define SHADOWNODE_TRAVERSE(NODE) for (Node *child = NODE->firstChild(); child; child = child->sibling()) |
49 | |
50 | static inline int size_of_type(int type) |
51 | { |
52 | static int sizes[] = { |
53 | sizeof(char), |
54 | sizeof(unsigned char), |
55 | sizeof(short), |
56 | sizeof(unsigned short), |
57 | sizeof(int), |
58 | sizeof(unsigned int), |
59 | sizeof(float), |
60 | 2, |
61 | 3, |
62 | 4, |
63 | sizeof(double) |
64 | }; |
65 | Q_ASSERT(type >= QSGGeometry::ByteType && type <= QSGGeometry::DoubleType); |
66 | return sizes[type - QSGGeometry::ByteType]; |
67 | } |
68 | |
69 | bool qsg_sort_element_increasing_order(Element *a, Element *b) { return a->order < b->order; } |
70 | bool qsg_sort_element_decreasing_order(Element *a, Element *b) { return a->order > b->order; } |
71 | bool qsg_sort_batch_is_valid(Batch *a, Batch *b) { return a->first && !b->first; } |
72 | bool qsg_sort_batch_increasing_order(Batch *a, Batch *b) { return a->first->order < b->first->order; } |
73 | bool qsg_sort_batch_decreasing_order(Batch *a, Batch *b) { return a->first->order > b->first->order; } |
74 | |
75 | QSGMaterial::Flag QSGMaterial_FullMatrix = (QSGMaterial::Flag) (QSGMaterial::RequiresFullMatrix & ~QSGMaterial::RequiresFullMatrixExceptTranslate); |
76 | |
77 | static bool isTranslate(const QMatrix4x4 &m) { return m.flags() <= QMatrix4x4::Translation; } |
78 | static bool isScale(const QMatrix4x4 &m) { return m.flags() <= QMatrix4x4::Scale; } |
79 | static bool is2DSafe(const QMatrix4x4 &m) { return m.flags() < QMatrix4x4::Rotation; } |
80 | |
81 | const float OPAQUE_LIMIT = 0.999f; |
82 | |
83 | const uint DYNAMIC_VERTEX_INDEX_BUFFER_THRESHOLD = 4; |
84 | const int VERTEX_BUFFER_BINDING = 0; |
85 | const int ZORDER_BUFFER_BINDING = VERTEX_BUFFER_BINDING + 1; |
86 | |
87 | const float VIEWPORT_MIN_DEPTH = 0.0f; |
88 | const float VIEWPORT_MAX_DEPTH = 1.0f; |
89 | |
90 | template <class Int> |
91 | inline Int aligned(Int v, Int byteAlign) |
92 | { |
93 | return (v + byteAlign - 1) & ~(byteAlign - 1); |
94 | } |
95 | |
96 | QRhiVertexInputAttribute::Format qsg_vertexInputFormat(const QSGGeometry::Attribute &a) |
97 | { |
98 | switch (a.type) { |
99 | case QSGGeometry::FloatType: |
100 | if (a.tupleSize == 4) |
101 | return QRhiVertexInputAttribute::Float4; |
102 | if (a.tupleSize == 3) |
103 | return QRhiVertexInputAttribute::Float3; |
104 | if (a.tupleSize == 2) |
105 | return QRhiVertexInputAttribute::Float2; |
106 | if (a.tupleSize == 1) |
107 | return QRhiVertexInputAttribute::Float; |
108 | break; |
109 | case QSGGeometry::UnsignedByteType: |
110 | if (a.tupleSize == 4) |
111 | return QRhiVertexInputAttribute::UNormByte4; |
112 | if (a.tupleSize == 2) |
113 | return QRhiVertexInputAttribute::UNormByte2; |
114 | if (a.tupleSize == 1) |
115 | return QRhiVertexInputAttribute::UNormByte; |
116 | break; |
117 | default: |
118 | break; |
119 | } |
120 | qWarning(msg: "Unsupported attribute type 0x%x with %d components" , a.type, a.tupleSize); |
121 | Q_UNREACHABLE_RETURN(QRhiVertexInputAttribute::Float); |
122 | } |
123 | |
124 | static QRhiVertexInputLayout calculateVertexInputLayout(const QSGMaterialShader *s, const QSGGeometry *geometry, bool batchable) |
125 | { |
126 | Q_ASSERT(geometry); |
127 | const QSGMaterialShaderPrivate *sd = QSGMaterialShaderPrivate::get(s); |
128 | if (!sd->vertexShader) { |
129 | qWarning(msg: "No vertex shader in QSGMaterialShader %p" , s); |
130 | return QRhiVertexInputLayout(); |
131 | } |
132 | |
133 | const int attrCount = geometry->attributeCount(); |
134 | QVarLengthArray<QRhiVertexInputAttribute, 8> inputAttributes; |
135 | inputAttributes.reserve(sz: attrCount + 1); |
136 | quint32 offset = 0; |
137 | for (int i = 0; i < attrCount; ++i) { |
138 | const QSGGeometry::Attribute &a = geometry->attributes()[i]; |
139 | if (!sd->vertexShader->vertexInputLocations.contains(t: a.position)) { |
140 | qWarning(msg: "Vertex input %d is present in material but not in shader. This is wrong." , |
141 | a.position); |
142 | } |
143 | inputAttributes.append(t: QRhiVertexInputAttribute(VERTEX_BUFFER_BINDING, a.position, qsg_vertexInputFormat(a), offset)); |
144 | offset += a.tupleSize * size_of_type(type: a.type); |
145 | } |
146 | if (batchable) { |
147 | inputAttributes.append(t: QRhiVertexInputAttribute(ZORDER_BUFFER_BINDING, sd->vertexShader->qt_order_attrib_location, |
148 | QRhiVertexInputAttribute::Float, 0)); |
149 | } |
150 | |
151 | Q_ASSERT(VERTEX_BUFFER_BINDING == 0 && ZORDER_BUFFER_BINDING == 1); // not very flexible |
152 | QVarLengthArray<QRhiVertexInputBinding, 2> inputBindings; |
153 | inputBindings.append(t: QRhiVertexInputBinding(geometry->sizeOfVertex())); |
154 | if (batchable) |
155 | inputBindings.append(t: QRhiVertexInputBinding(sizeof(float))); |
156 | |
157 | QRhiVertexInputLayout inputLayout; |
158 | inputLayout.setBindings(first: inputBindings.cbegin(), last: inputBindings.cend()); |
159 | inputLayout.setAttributes(first: inputAttributes.cbegin(), last: inputAttributes.cend()); |
160 | |
161 | return inputLayout; |
162 | } |
163 | |
164 | QRhiCommandBuffer::IndexFormat qsg_indexFormat(const QSGGeometry *geometry) |
165 | { |
166 | switch (geometry->indexType()) { |
167 | case QSGGeometry::UnsignedShortType: |
168 | return QRhiCommandBuffer::IndexUInt16; |
169 | break; |
170 | case QSGGeometry::UnsignedIntType: |
171 | return QRhiCommandBuffer::IndexUInt32; |
172 | break; |
173 | default: |
174 | Q_UNREACHABLE_RETURN(QRhiCommandBuffer::IndexUInt16); |
175 | } |
176 | } |
177 | |
178 | QRhiGraphicsPipeline::Topology qsg_topology(int geomDrawMode) |
179 | { |
180 | QRhiGraphicsPipeline::Topology topology = QRhiGraphicsPipeline::Triangles; |
181 | switch (geomDrawMode) { |
182 | case QSGGeometry::DrawPoints: |
183 | topology = QRhiGraphicsPipeline::Points; |
184 | break; |
185 | case QSGGeometry::DrawLines: |
186 | topology = QRhiGraphicsPipeline::Lines; |
187 | break; |
188 | case QSGGeometry::DrawLineStrip: |
189 | topology = QRhiGraphicsPipeline::LineStrip; |
190 | break; |
191 | case QSGGeometry::DrawTriangles: |
192 | topology = QRhiGraphicsPipeline::Triangles; |
193 | break; |
194 | case QSGGeometry::DrawTriangleStrip: |
195 | topology = QRhiGraphicsPipeline::TriangleStrip; |
196 | break; |
197 | default: |
198 | qWarning(msg: "Primitive topology 0x%x not supported" , geomDrawMode); |
199 | break; |
200 | } |
201 | return topology; |
202 | } |
203 | |
204 | ShaderManager::Shader *ShaderManager::prepareMaterial(QSGMaterial *material, |
205 | const QSGGeometry *geometry, |
206 | QSGRendererInterface::RenderMode renderMode) |
207 | { |
208 | QSGMaterialType *type = material->type(); |
209 | |
210 | ShaderKey key = qMakePair(value1&: type, value2&: renderMode); |
211 | Shader *shader = rewrittenShaders.value(key, defaultValue: nullptr); |
212 | if (shader) |
213 | return shader; |
214 | |
215 | shader = new Shader; |
216 | QSGMaterialShader *s = static_cast<QSGMaterialShader *>(material->createShader(renderMode)); |
217 | context->initializeRhiShader(shader: s, shaderVariant: QShader::BatchableVertexShader); |
218 | shader->materialShader = s; |
219 | shader->inputLayout = calculateVertexInputLayout(s, geometry, batchable: true); |
220 | QSGMaterialShaderPrivate *sD = QSGMaterialShaderPrivate::get(s); |
221 | shader->stages = { |
222 | { QRhiShaderStage::Vertex, sD->shader(stage: QShader::VertexStage), QShader::BatchableVertexShader }, |
223 | { QRhiShaderStage::Fragment, sD->shader(stage: QShader::FragmentStage) } |
224 | }; |
225 | |
226 | shader->lastOpacity = 0; |
227 | |
228 | rewrittenShaders[key] = shader; |
229 | return shader; |
230 | } |
231 | |
232 | ShaderManager::Shader *ShaderManager::prepareMaterialNoRewrite(QSGMaterial *material, |
233 | const QSGGeometry *geometry, |
234 | QSGRendererInterface::RenderMode renderMode) |
235 | { |
236 | QSGMaterialType *type = material->type(); |
237 | |
238 | ShaderKey key = qMakePair(value1&: type, value2&: renderMode); |
239 | Shader *shader = stockShaders.value(key, defaultValue: nullptr); |
240 | if (shader) |
241 | return shader; |
242 | |
243 | shader = new Shader; |
244 | QSGMaterialShader *s = static_cast<QSGMaterialShader *>(material->createShader(renderMode)); |
245 | context->initializeRhiShader(shader: s, shaderVariant: QShader::StandardShader); |
246 | shader->materialShader = s; |
247 | shader->inputLayout = calculateVertexInputLayout(s, geometry, batchable: false); |
248 | QSGMaterialShaderPrivate *sD = QSGMaterialShaderPrivate::get(s); |
249 | shader->stages = { |
250 | { QRhiShaderStage::Vertex, sD->shader(stage: QShader::VertexStage) }, |
251 | { QRhiShaderStage::Fragment, sD->shader(stage: QShader::FragmentStage) } |
252 | }; |
253 | |
254 | shader->lastOpacity = 0; |
255 | |
256 | stockShaders[key] = shader; |
257 | |
258 | return shader; |
259 | } |
260 | |
261 | void ShaderManager::invalidated() |
262 | { |
263 | qDeleteAll(c: stockShaders); |
264 | stockShaders.clear(); |
265 | qDeleteAll(c: rewrittenShaders); |
266 | rewrittenShaders.clear(); |
267 | |
268 | qDeleteAll(c: pipelineCache); |
269 | pipelineCache.clear(); |
270 | |
271 | qDeleteAll(c: srbPool); |
272 | srbPool.clear(); |
273 | } |
274 | |
275 | void ShaderManager::clearCachedRendererData() |
276 | { |
277 | for (ShaderManager::Shader *sms : std::as_const(t&: stockShaders)) { |
278 | QSGMaterialShader *s = sms->materialShader; |
279 | if (s) { |
280 | QSGMaterialShaderPrivate *sd = QSGMaterialShaderPrivate::get(s); |
281 | sd->clearCachedRendererData(); |
282 | } |
283 | } |
284 | for (ShaderManager::Shader *sms : std::as_const(t&: rewrittenShaders)) { |
285 | QSGMaterialShader *s = sms->materialShader; |
286 | if (s) { |
287 | QSGMaterialShaderPrivate *sd = QSGMaterialShaderPrivate::get(s); |
288 | sd->clearCachedRendererData(); |
289 | } |
290 | } |
291 | } |
292 | |
293 | void qsg_dumpShadowRoots(BatchRootInfo *i, int indent) |
294 | { |
295 | static int = 0; |
296 | ++extraIndent; |
297 | |
298 | QByteArray ind(indent + extraIndent + 10, ' '); |
299 | |
300 | if (!i) { |
301 | qDebug(msg: "%s - no info" , ind.constData()); |
302 | } else { |
303 | qDebug() << ind.constData() << "- parent:" << i->parentRoot << "orders" << i->firstOrder << "->" << i->lastOrder << ", avail:" << i->availableOrders; |
304 | for (QSet<Node *>::const_iterator it = i->subRoots.constBegin(); |
305 | it != i->subRoots.constEnd(); ++it) { |
306 | qDebug() << ind.constData() << "-" << *it; |
307 | qsg_dumpShadowRoots(i: (*it)->rootInfo(), indent); |
308 | } |
309 | } |
310 | |
311 | --extraIndent; |
312 | } |
313 | |
314 | void qsg_dumpShadowRoots(Node *n) |
315 | { |
316 | #ifndef QT_NO_DEBUG_OUTPUT |
317 | static int indent = 0; |
318 | ++indent; |
319 | |
320 | QByteArray ind(indent, ' '); |
321 | |
322 | if (n->type() == QSGNode::ClipNodeType || n->isBatchRoot) { |
323 | qDebug() << ind.constData() << "[X]" << n->sgNode << Qt::hex << uint(n->sgNode->flags()); |
324 | qsg_dumpShadowRoots(i: n->rootInfo(), indent); |
325 | } else { |
326 | QDebug d = qDebug(); |
327 | d << ind.constData() << "[ ]" << n->sgNode << Qt::hex << uint(n->sgNode->flags()); |
328 | if (n->type() == QSGNode::GeometryNodeType) |
329 | d << "order" << Qt::dec << n->element()->order; |
330 | } |
331 | |
332 | SHADOWNODE_TRAVERSE(n) |
333 | qsg_dumpShadowRoots(n: child); |
334 | |
335 | --indent; |
336 | #else |
337 | Q_UNUSED(n); |
338 | #endif |
339 | } |
340 | |
341 | Updater::Updater(Renderer *r) |
342 | : renderer(r) |
343 | , m_roots(32) |
344 | , m_rootMatrices(8) |
345 | { |
346 | m_roots.add(t: 0); |
347 | m_combined_matrix_stack.add(t: &m_identityMatrix); |
348 | m_rootMatrices.add(t: m_identityMatrix); |
349 | } |
350 | |
351 | void Updater::updateStates(QSGNode *n) |
352 | { |
353 | m_current_clip = nullptr; |
354 | |
355 | m_added = 0; |
356 | m_transformChange = 0; |
357 | m_opacityChange = 0; |
358 | |
359 | Node *sn = renderer->m_nodes.value(key: n, defaultValue: 0); |
360 | Q_ASSERT(sn); |
361 | |
362 | if (Q_UNLIKELY(debug_roots())) |
363 | qsg_dumpShadowRoots(n: sn); |
364 | |
365 | if (Q_UNLIKELY(debug_build())) { |
366 | qDebug(msg: "Updater::updateStates()" ); |
367 | if (sn->dirtyState & (QSGNode::DirtyNodeAdded << 16)) |
368 | qDebug(msg: " - nodes have been added" ); |
369 | if (sn->dirtyState & (QSGNode::DirtyMatrix << 16)) |
370 | qDebug(msg: " - transforms have changed" ); |
371 | if (sn->dirtyState & (QSGNode::DirtyOpacity << 16)) |
372 | qDebug(msg: " - opacity has changed" ); |
373 | if (uint(sn->dirtyState) & uint(QSGNode::DirtyForceUpdate << 16)) |
374 | qDebug(msg: " - forceupdate" ); |
375 | } |
376 | |
377 | if (Q_UNLIKELY(renderer->m_visualizer->mode() == Visualizer::VisualizeChanges)) |
378 | renderer->m_visualizer->visualizeChangesPrepare(n: sn); |
379 | |
380 | visitNode(n: sn); |
381 | } |
382 | |
383 | void Updater::visitNode(Node *n) |
384 | { |
385 | if (m_added == 0 && n->dirtyState == 0 && m_force_update == 0 && m_transformChange == 0 && m_opacityChange == 0) |
386 | return; |
387 | |
388 | int count = m_added; |
389 | if (n->dirtyState & QSGNode::DirtyNodeAdded) |
390 | ++m_added; |
391 | |
392 | int force = m_force_update; |
393 | if (n->dirtyState & QSGNode::DirtyForceUpdate) |
394 | ++m_force_update; |
395 | |
396 | switch (n->type()) { |
397 | case QSGNode::OpacityNodeType: |
398 | visitOpacityNode(n); |
399 | break; |
400 | case QSGNode::TransformNodeType: |
401 | visitTransformNode(n); |
402 | break; |
403 | case QSGNode::GeometryNodeType: |
404 | visitGeometryNode(n); |
405 | break; |
406 | case QSGNode::ClipNodeType: |
407 | visitClipNode(n); |
408 | break; |
409 | case QSGNode::RenderNodeType: |
410 | if (m_added) |
411 | n->renderNodeElement()->root = m_roots.last(); |
412 | Q_FALLTHROUGH(); // to visit children |
413 | default: |
414 | SHADOWNODE_TRAVERSE(n) visitNode(n: child); |
415 | break; |
416 | } |
417 | |
418 | m_added = count; |
419 | m_force_update = force; |
420 | n->dirtyState = {}; |
421 | } |
422 | |
423 | void Updater::visitClipNode(Node *n) |
424 | { |
425 | ClipBatchRootInfo * = n->clipInfo(); |
426 | |
427 | QSGClipNode *cn = static_cast<QSGClipNode *>(n->sgNode); |
428 | |
429 | if (m_roots.last() && m_added > 0) |
430 | renderer->registerBatchRoot(childRoot: n, parentRoot: m_roots.last()); |
431 | |
432 | cn->setRendererClipList(m_current_clip); |
433 | m_current_clip = cn; |
434 | m_roots << n; |
435 | m_rootMatrices.add(t: m_rootMatrices.last() * *m_combined_matrix_stack.last()); |
436 | extra->matrix = m_rootMatrices.last(); |
437 | cn->setRendererMatrix(&extra->matrix); |
438 | m_combined_matrix_stack << &m_identityMatrix; |
439 | |
440 | SHADOWNODE_TRAVERSE(n) visitNode(n: child); |
441 | |
442 | m_current_clip = cn->clipList(); |
443 | m_rootMatrices.pop_back(); |
444 | m_combined_matrix_stack.pop_back(); |
445 | m_roots.pop_back(); |
446 | } |
447 | |
448 | void Updater::visitOpacityNode(Node *n) |
449 | { |
450 | QSGOpacityNode *on = static_cast<QSGOpacityNode *>(n->sgNode); |
451 | |
452 | qreal combined = m_opacity_stack.last() * on->opacity(); |
453 | on->setCombinedOpacity(combined); |
454 | m_opacity_stack.add(t: combined); |
455 | |
456 | if (m_added == 0 && n->dirtyState & QSGNode::DirtyOpacity) { |
457 | bool was = n->isOpaque; |
458 | bool is = on->opacity() > OPAQUE_LIMIT; |
459 | if (was != is) { |
460 | renderer->m_rebuild = Renderer::FullRebuild; |
461 | n->isOpaque = is; |
462 | } |
463 | ++m_opacityChange; |
464 | SHADOWNODE_TRAVERSE(n) visitNode(n: child); |
465 | --m_opacityChange; |
466 | } else { |
467 | if (m_added > 0) |
468 | n->isOpaque = on->opacity() > OPAQUE_LIMIT; |
469 | SHADOWNODE_TRAVERSE(n) visitNode(n: child); |
470 | } |
471 | |
472 | m_opacity_stack.pop_back(); |
473 | } |
474 | |
475 | void Updater::visitTransformNode(Node *n) |
476 | { |
477 | bool popMatrixStack = false; |
478 | bool popRootStack = false; |
479 | bool dirty = n->dirtyState & QSGNode::DirtyMatrix; |
480 | |
481 | QSGTransformNode *tn = static_cast<QSGTransformNode *>(n->sgNode); |
482 | |
483 | if (n->isBatchRoot) { |
484 | if (m_added > 0 && m_roots.last()) |
485 | renderer->registerBatchRoot(childRoot: n, parentRoot: m_roots.last()); |
486 | tn->setCombinedMatrix(m_rootMatrices.last() * *m_combined_matrix_stack.last() * tn->matrix()); |
487 | |
488 | // The only change in this subtree is ourselves and we are a batch root, so |
489 | // only update subroots and return, saving tons of child-processing (flickable-panning) |
490 | |
491 | if (!n->becameBatchRoot && m_added == 0 && m_force_update == 0 && m_opacityChange == 0 && dirty && (n->dirtyState & ~QSGNode::DirtyMatrix) == 0) { |
492 | BatchRootInfo *info = renderer->batchRootInfo(node: n); |
493 | for (QSet<Node *>::const_iterator it = info->subRoots.constBegin(); |
494 | it != info->subRoots.constEnd(); ++it) { |
495 | updateRootTransforms(n: *it, root: n, combined: tn->combinedMatrix()); |
496 | } |
497 | return; |
498 | } |
499 | |
500 | n->becameBatchRoot = false; |
501 | |
502 | m_combined_matrix_stack.add(t: &m_identityMatrix); |
503 | m_roots.add(t: n); |
504 | m_rootMatrices.add(t: tn->combinedMatrix()); |
505 | |
506 | popMatrixStack = true; |
507 | popRootStack = true; |
508 | } else if (!tn->matrix().isIdentity()) { |
509 | tn->setCombinedMatrix(*m_combined_matrix_stack.last() * tn->matrix()); |
510 | m_combined_matrix_stack.add(t: &tn->combinedMatrix()); |
511 | popMatrixStack = true; |
512 | } else { |
513 | tn->setCombinedMatrix(*m_combined_matrix_stack.last()); |
514 | } |
515 | |
516 | if (dirty) |
517 | ++m_transformChange; |
518 | |
519 | SHADOWNODE_TRAVERSE(n) visitNode(n: child); |
520 | |
521 | if (dirty) |
522 | --m_transformChange; |
523 | if (popMatrixStack) |
524 | m_combined_matrix_stack.pop_back(); |
525 | if (popRootStack) { |
526 | m_roots.pop_back(); |
527 | m_rootMatrices.pop_back(); |
528 | } |
529 | } |
530 | |
531 | void Updater::visitGeometryNode(Node *n) |
532 | { |
533 | QSGGeometryNode *gn = static_cast<QSGGeometryNode *>(n->sgNode); |
534 | |
535 | gn->setRendererMatrix(m_combined_matrix_stack.last()); |
536 | gn->setRendererClipList(m_current_clip); |
537 | gn->setInheritedOpacity(m_opacity_stack.last()); |
538 | |
539 | if (m_added) { |
540 | Element *e = n->element(); |
541 | e->root = m_roots.last(); |
542 | e->translateOnlyToRoot = isTranslate(m: *gn->matrix()); |
543 | |
544 | if (e->root) { |
545 | BatchRootInfo *info = renderer->batchRootInfo(node: e->root); |
546 | while (info != nullptr) { |
547 | info->availableOrders--; |
548 | if (info->availableOrders < 0) { |
549 | renderer->m_rebuild |= Renderer::BuildRenderLists; |
550 | } else { |
551 | renderer->m_rebuild |= Renderer::BuildRenderListsForTaggedRoots; |
552 | renderer->m_taggedRoots << e->root; |
553 | } |
554 | if (info->parentRoot != nullptr) |
555 | info = renderer->batchRootInfo(node: info->parentRoot); |
556 | else |
557 | info = nullptr; |
558 | } |
559 | } else { |
560 | renderer->m_rebuild |= Renderer::FullRebuild; |
561 | } |
562 | } else { |
563 | if (m_transformChange) { |
564 | Element *e = n->element(); |
565 | e->translateOnlyToRoot = isTranslate(m: *gn->matrix()); |
566 | } |
567 | if (m_opacityChange) { |
568 | Element *e = n->element(); |
569 | if (e->batch) |
570 | renderer->invalidateBatchAndOverlappingRenderOrders(batch: e->batch); |
571 | } |
572 | } |
573 | |
574 | SHADOWNODE_TRAVERSE(n) visitNode(n: child); |
575 | } |
576 | |
577 | void Updater::updateRootTransforms(Node *node, Node *root, const QMatrix4x4 &combined) |
578 | { |
579 | BatchRootInfo *info = renderer->batchRootInfo(node); |
580 | QMatrix4x4 m; |
581 | Node *n = node; |
582 | |
583 | while (n != root) { |
584 | if (n->type() == QSGNode::TransformNodeType) |
585 | m = static_cast<QSGTransformNode *>(n->sgNode)->matrix() * m; |
586 | n = n->parent(); |
587 | } |
588 | |
589 | m = combined * m; |
590 | |
591 | if (node->type() == QSGNode::ClipNodeType) { |
592 | static_cast<ClipBatchRootInfo *>(info)->matrix = m; |
593 | } else { |
594 | Q_ASSERT(node->type() == QSGNode::TransformNodeType); |
595 | static_cast<QSGTransformNode *>(node->sgNode)->setCombinedMatrix(m); |
596 | } |
597 | |
598 | for (QSet<Node *>::const_iterator it = info->subRoots.constBegin(); |
599 | it != info->subRoots.constEnd(); ++it) { |
600 | updateRootTransforms(node: *it, root: node, combined: m); |
601 | } |
602 | } |
603 | |
604 | int qsg_positionAttribute(QSGGeometry *g) |
605 | { |
606 | int vaOffset = 0; |
607 | for (int a=0; a<g->attributeCount(); ++a) { |
608 | const QSGGeometry::Attribute &attr = g->attributes()[a]; |
609 | if (attr.isVertexCoordinate && attr.tupleSize == 2 && attr.type == QSGGeometry::FloatType) { |
610 | return vaOffset; |
611 | } |
612 | vaOffset += attr.tupleSize * size_of_type(type: attr.type); |
613 | } |
614 | return -1; |
615 | } |
616 | |
617 | |
618 | void Rect::map(const QMatrix4x4 &matrix) |
619 | { |
620 | const float *m = matrix.constData(); |
621 | if (isScale(m: matrix)) { |
622 | tl.x = tl.x * m[0] + m[12]; |
623 | tl.y = tl.y * m[5] + m[13]; |
624 | br.x = br.x * m[0] + m[12]; |
625 | br.y = br.y * m[5] + m[13]; |
626 | if (tl.x > br.x) |
627 | qSwap(value1&: tl.x, value2&: br.x); |
628 | if (tl.y > br.y) |
629 | qSwap(value1&: tl.y, value2&: br.y); |
630 | } else { |
631 | Pt mtl = tl; |
632 | Pt mtr = { .x: br.x, .y: tl.y }; |
633 | Pt mbl = { .x: tl.x, .y: br.y }; |
634 | Pt mbr = br; |
635 | |
636 | mtl.map(mat: matrix); |
637 | mtr.map(mat: matrix); |
638 | mbl.map(mat: matrix); |
639 | mbr.map(mat: matrix); |
640 | |
641 | set(FLT_MAX, FLT_MAX, right: -FLT_MAX, bottom: -FLT_MAX); |
642 | (*this) |= mtl; |
643 | (*this) |= mtr; |
644 | (*this) |= mbl; |
645 | (*this) |= mbr; |
646 | } |
647 | } |
648 | |
649 | void Element::computeBounds() |
650 | { |
651 | Q_ASSERT(!boundsComputed); |
652 | boundsComputed = true; |
653 | |
654 | QSGGeometry *g = node->geometry(); |
655 | int offset = qsg_positionAttribute(g); |
656 | if (offset == -1) { |
657 | // No position attribute means overlaps with everything.. |
658 | bounds.set(left: -FLT_MAX, top: -FLT_MAX, FLT_MAX, FLT_MAX); |
659 | return; |
660 | } |
661 | |
662 | bounds.set(FLT_MAX, FLT_MAX, right: -FLT_MAX, bottom: -FLT_MAX); |
663 | char *vd = (char *) g->vertexData() + offset; |
664 | for (int i=0; i<g->vertexCount(); ++i) { |
665 | bounds |= *(Pt *) vd; |
666 | vd += g->sizeOfVertex(); |
667 | } |
668 | bounds.map(matrix: *node->matrix()); |
669 | |
670 | if (!qt_is_finite(f: bounds.tl.x) || bounds.tl.x == FLT_MAX) |
671 | bounds.tl.x = -FLT_MAX; |
672 | if (!qt_is_finite(f: bounds.tl.y) || bounds.tl.y == FLT_MAX) |
673 | bounds.tl.y = -FLT_MAX; |
674 | if (!qt_is_finite(f: bounds.br.x) || bounds.br.x == -FLT_MAX) |
675 | bounds.br.x = FLT_MAX; |
676 | if (!qt_is_finite(f: bounds.br.y) || bounds.br.y == -FLT_MAX) |
677 | bounds.br.y = FLT_MAX; |
678 | |
679 | Q_ASSERT(bounds.tl.x <= bounds.br.x); |
680 | Q_ASSERT(bounds.tl.y <= bounds.br.y); |
681 | |
682 | boundsOutsideFloatRange = bounds.isOutsideFloatRange(); |
683 | } |
684 | |
685 | BatchCompatibility Batch::isMaterialCompatible(Element *e) const |
686 | { |
687 | Element *n = first; |
688 | // Skip to the first node other than e which has not been removed |
689 | while (n && (n == e || n->removed)) |
690 | n = n->nextInBatch; |
691 | |
692 | // Only 'e' in this batch, so a material change doesn't change anything as long as |
693 | // its blending is still in sync with this batch... |
694 | if (!n) |
695 | return BatchIsCompatible; |
696 | |
697 | QSGMaterial *m = e->node->activeMaterial(); |
698 | QSGMaterial *nm = n->node->activeMaterial(); |
699 | return (nm->type() == m->type() && nm->compare(other: m) == 0) |
700 | ? BatchIsCompatible |
701 | : BatchBreaksOnCompare; |
702 | } |
703 | |
704 | /* |
705 | * Marks this batch as dirty or in the case where the geometry node has |
706 | * changed to be incompatible with this batch, return false so that |
707 | * the caller can mark the entire sg for a full rebuild... |
708 | */ |
709 | bool Batch::geometryWasChanged(QSGGeometryNode *gn) |
710 | { |
711 | Element *e = first; |
712 | Q_ASSERT_X(e, "Batch::geometryWasChanged" , "Batch is expected to 'valid' at this time" ); |
713 | // 'gn' is the first node in the batch, compare against the next one. |
714 | while (e && (e->node == gn || e->removed)) |
715 | e = e->nextInBatch; |
716 | if (!e || e->node->geometry()->attributes() == gn->geometry()->attributes()) { |
717 | needsUpload = true; |
718 | return true; |
719 | } else { |
720 | return false; |
721 | } |
722 | } |
723 | |
724 | void Batch::cleanupRemovedElements() |
725 | { |
726 | if (!needsPurge) |
727 | return; |
728 | |
729 | // remove from front of batch.. |
730 | while (first && first->removed) { |
731 | first = first->nextInBatch; |
732 | } |
733 | |
734 | // Then continue and remove other nodes further out in the batch.. |
735 | if (first) { |
736 | Element *e = first; |
737 | while (e->nextInBatch) { |
738 | if (e->nextInBatch->removed) |
739 | e->nextInBatch = e->nextInBatch->nextInBatch; |
740 | else |
741 | e = e->nextInBatch; |
742 | |
743 | } |
744 | } |
745 | |
746 | needsPurge = false; |
747 | } |
748 | |
749 | /* |
750 | * Iterates through all geometry nodes in this batch and unsets their batch, |
751 | * thus forcing them to be rebuilt |
752 | */ |
753 | void Batch::invalidate() |
754 | { |
755 | cleanupRemovedElements(); |
756 | Element *e = first; |
757 | first = nullptr; |
758 | root = nullptr; |
759 | while (e) { |
760 | e->batch = nullptr; |
761 | Element *n = e->nextInBatch; |
762 | e->nextInBatch = nullptr; |
763 | e = n; |
764 | } |
765 | } |
766 | |
767 | bool Batch::isTranslateOnlyToRoot() const { |
768 | bool only = true; |
769 | Element *e = first; |
770 | while (e && only) { |
771 | only &= e->translateOnlyToRoot; |
772 | e = e->nextInBatch; |
773 | } |
774 | return only; |
775 | } |
776 | |
777 | /* |
778 | * Iterates through all the nodes in the batch and returns true if the |
779 | * nodes are all safe to batch. There are two separate criteria: |
780 | * |
781 | * - The matrix is such that the z component of the result is of no |
782 | * consequence. |
783 | * |
784 | * - The bounds are inside the stable floating point range. This applies |
785 | * to desktop only where we in this case can trigger a fallback to |
786 | * unmerged in which case we pass the geometry straight through and |
787 | * just apply the matrix. |
788 | * |
789 | * NOTE: This also means a slight performance impact for geometries which |
790 | * are defined to be outside the stable floating point range and still |
791 | * use single precision float, but given that this implicitly fixes |
792 | * huge lists and tables, it is worth it. |
793 | */ |
794 | bool Batch::isSafeToBatch() const { |
795 | Element *e = first; |
796 | while (e) { |
797 | if (e->boundsOutsideFloatRange) |
798 | return false; |
799 | if (!is2DSafe(m: *e->node->matrix())) |
800 | return false; |
801 | e = e->nextInBatch; |
802 | } |
803 | return true; |
804 | } |
805 | |
806 | static int qsg_countNodesInBatch(const Batch *batch) |
807 | { |
808 | int sum = 0; |
809 | Element *e = batch->first; |
810 | while (e) { |
811 | ++sum; |
812 | e = e->nextInBatch; |
813 | } |
814 | return sum; |
815 | } |
816 | |
817 | static int qsg_countNodesInBatches(const QDataBuffer<Batch *> &batches) |
818 | { |
819 | int sum = 0; |
820 | for (int i=0; i<batches.size(); ++i) { |
821 | sum += qsg_countNodesInBatch(batch: batches.at(i)); |
822 | } |
823 | return sum; |
824 | } |
825 | |
826 | Renderer::Renderer(QSGDefaultRenderContext *ctx, QSGRendererInterface::RenderMode renderMode) |
827 | : QSGRenderer(ctx) |
828 | , m_context(ctx) |
829 | , m_renderMode(renderMode) |
830 | , m_opaqueRenderList(64) |
831 | , m_alphaRenderList(64) |
832 | , m_nextRenderOrder(0) |
833 | , m_partialRebuild(false) |
834 | , m_partialRebuildRoot(nullptr) |
835 | , m_forceNoDepthBuffer(false) |
836 | , m_opaqueBatches(16) |
837 | , m_alphaBatches(16) |
838 | , m_batchPool(16) |
839 | , m_elementsToDelete(64) |
840 | , m_tmpAlphaElements(16) |
841 | , m_tmpOpaqueElements(16) |
842 | , m_rebuild(FullRebuild) |
843 | , m_zRange(0) |
844 | #if defined(QSGBATCHRENDERER_INVALIDATE_WEDGED_NODES) |
845 | , m_renderOrderRebuildLower(-1) |
846 | , m_renderOrderRebuildUpper(-1) |
847 | #endif |
848 | , m_currentMaterial(nullptr) |
849 | , m_currentShader(nullptr) |
850 | , m_vertexUploadPool(256) |
851 | , m_indexUploadPool(64) |
852 | { |
853 | m_rhi = m_context->rhi(); |
854 | Q_ASSERT(m_rhi); // no more direct OpenGL code path in Qt 6 |
855 | |
856 | m_ubufAlignment = m_rhi->ubufAlignment(); |
857 | |
858 | m_uint32IndexForRhi = !m_rhi->isFeatureSupported(feature: QRhi::NonFourAlignedEffectiveIndexBufferOffset); |
859 | if (qEnvironmentVariableIntValue(varName: "QSG_RHI_UINT32_INDEX" )) |
860 | m_uint32IndexForRhi = true; |
861 | |
862 | m_visualizer = new RhiVisualizer(this); |
863 | |
864 | setNodeUpdater(new Updater(this)); |
865 | |
866 | // The shader manager is shared between renderers (think for example Item |
867 | // layers that create a new Renderer each) with the same rendercontext (and |
868 | // so same QRhi). |
869 | m_shaderManager = ctx->findChild<ShaderManager *>(aName: QString(), options: Qt::FindDirectChildrenOnly); |
870 | if (!m_shaderManager) { |
871 | m_shaderManager = new ShaderManager(ctx); |
872 | m_shaderManager->setObjectName(QStringLiteral("__qt_ShaderManager" )); |
873 | m_shaderManager->setParent(ctx); |
874 | QObject::connect(sender: ctx, SIGNAL(invalidated()), receiver: m_shaderManager, SLOT(invalidated()), Qt::DirectConnection); |
875 | } |
876 | |
877 | m_batchNodeThreshold = qt_sg_envInt(name: "QSG_RENDERER_BATCH_NODE_THRESHOLD" , defaultValue: 64); |
878 | m_batchVertexThreshold = qt_sg_envInt(name: "QSG_RENDERER_BATCH_VERTEX_THRESHOLD" , defaultValue: 1024); |
879 | m_srbPoolThreshold = qt_sg_envInt(name: "QSG_RENDERER_SRB_POOL_THRESHOLD" , defaultValue: 1024); |
880 | |
881 | if (Q_UNLIKELY(debug_build() || debug_render())) { |
882 | qDebug(msg: "Batch thresholds: nodes: %d vertices: %d Srb pool threshold: %d" , |
883 | m_batchNodeThreshold, m_batchVertexThreshold, m_srbPoolThreshold); |
884 | } |
885 | } |
886 | |
887 | static void qsg_wipeBuffer(Buffer *buffer) |
888 | { |
889 | delete buffer->buf; |
890 | |
891 | // The free here is ok because we're in one of two situations. |
892 | // 1. We're using the upload pool in which case unmap will have set the |
893 | // data pointer to 0 and calling free on 0 is ok. |
894 | // 2. We're using dedicated buffers because of visualization or IBO workaround |
895 | // and the data something we malloced and must be freed. |
896 | free(ptr: buffer->data); |
897 | } |
898 | |
899 | static void qsg_wipeBatch(Batch *batch) |
900 | { |
901 | qsg_wipeBuffer(buffer: &batch->vbo); |
902 | qsg_wipeBuffer(buffer: &batch->ibo); |
903 | delete batch->ubuf; |
904 | batch->stencilClipState.reset(); |
905 | delete batch; |
906 | } |
907 | |
908 | Renderer::~Renderer() |
909 | { |
910 | if (m_rhi) { |
911 | // Clean up batches and buffers |
912 | for (int i = 0; i < m_opaqueBatches.size(); ++i) |
913 | qsg_wipeBatch(batch: m_opaqueBatches.at(i)); |
914 | for (int i = 0; i < m_alphaBatches.size(); ++i) |
915 | qsg_wipeBatch(batch: m_alphaBatches.at(i)); |
916 | for (int i = 0; i < m_batchPool.size(); ++i) |
917 | qsg_wipeBatch(batch: m_batchPool.at(i)); |
918 | } |
919 | |
920 | for (Node *n : std::as_const(t&: m_nodes)) { |
921 | if (n->type() == QSGNode::GeometryNodeType) { |
922 | Element *e = n->element(); |
923 | if (!e->removed) |
924 | m_elementsToDelete.add(t: e); |
925 | } |
926 | m_nodeAllocator.release(t: n); |
927 | } |
928 | |
929 | // Remaining elements... |
930 | for (int i=0; i<m_elementsToDelete.size(); ++i) |
931 | releaseElement(e: m_elementsToDelete.at(i), inDestructor: true); |
932 | |
933 | destroyGraphicsResources(); |
934 | |
935 | delete m_visualizer; |
936 | } |
937 | |
938 | void Renderer::destroyGraphicsResources() |
939 | { |
940 | // If this is from the dtor, then the shader manager and its already |
941 | // prepared shaders will stay around for other renderers -> the cached data |
942 | // in the rhi shaders have to be purged as it may refer to samplers we |
943 | // are going to destroy. |
944 | m_shaderManager->clearCachedRendererData(); |
945 | |
946 | qDeleteAll(c: m_samplers); |
947 | m_stencilClipCommon.reset(); |
948 | delete m_dummyTexture; |
949 | m_visualizer->releaseResources(); |
950 | } |
951 | |
952 | void Renderer::releaseCachedResources() |
953 | { |
954 | m_shaderManager->invalidated(); |
955 | |
956 | destroyGraphicsResources(); |
957 | |
958 | m_samplers.clear(); |
959 | m_dummyTexture = nullptr; |
960 | |
961 | m_rhi->releaseCachedResources(); |
962 | |
963 | m_vertexUploadPool.shrink(size: 0); |
964 | m_vertexUploadPool.reset(); |
965 | m_indexUploadPool.shrink(size: 0); |
966 | m_indexUploadPool.reset(); |
967 | } |
968 | |
969 | void Renderer::invalidateAndRecycleBatch(Batch *b) |
970 | { |
971 | b->invalidate(); |
972 | for (int i=0; i<m_batchPool.size(); ++i) |
973 | if (b == m_batchPool.at(i)) |
974 | return; |
975 | m_batchPool.add(t: b); |
976 | } |
977 | |
978 | void Renderer::map(Buffer *buffer, quint32 byteSize, bool isIndexBuf) |
979 | { |
980 | if (m_visualizer->mode() == Visualizer::VisualizeNothing) { |
981 | // Common case, use a shared memory pool for uploading vertex data to avoid |
982 | // excessive reevaluation |
983 | QDataBuffer<char> &pool = isIndexBuf ? m_indexUploadPool : m_vertexUploadPool; |
984 | if (byteSize > quint32(pool.size())) |
985 | pool.resize(size: byteSize); |
986 | buffer->data = pool.data(); |
987 | } else if (buffer->size != byteSize) { |
988 | free(ptr: buffer->data); |
989 | buffer->data = (char *) malloc(size: byteSize); |
990 | Q_CHECK_PTR(buffer->data); |
991 | } |
992 | buffer->size = byteSize; |
993 | } |
994 | |
995 | void Renderer::unmap(Buffer *buffer, bool isIndexBuf) |
996 | { |
997 | // Batches are pooled and reused which means the QRhiBuffer will be |
998 | // still valid in a recycled Batch. We only hit the newBuffer() path |
999 | // for brand new Batches. |
1000 | if (!buffer->buf) { |
1001 | buffer->buf = m_rhi->newBuffer(type: QRhiBuffer::Immutable, |
1002 | usage: isIndexBuf ? QRhiBuffer::IndexBuffer : QRhiBuffer::VertexBuffer, |
1003 | size: buffer->size); |
1004 | if (!buffer->buf->create()) { |
1005 | qWarning(msg: "Failed to build vertex/index buffer of size %u" , buffer->size); |
1006 | delete buffer->buf; |
1007 | buffer->buf = nullptr; |
1008 | } |
1009 | } else { |
1010 | bool needsRebuild = false; |
1011 | if (buffer->buf->size() < buffer->size) { |
1012 | buffer->buf->setSize(buffer->size); |
1013 | needsRebuild = true; |
1014 | } |
1015 | if (buffer->buf->type() != QRhiBuffer::Dynamic |
1016 | && buffer->nonDynamicChangeCount > DYNAMIC_VERTEX_INDEX_BUFFER_THRESHOLD) |
1017 | { |
1018 | buffer->buf->setType(QRhiBuffer::Dynamic); |
1019 | buffer->nonDynamicChangeCount = 0; |
1020 | needsRebuild = true; |
1021 | } |
1022 | if (needsRebuild) { |
1023 | if (!buffer->buf->create()) { |
1024 | qWarning(msg: "Failed to (re)build vertex/index buffer of size %u" , buffer->size); |
1025 | delete buffer->buf; |
1026 | buffer->buf = nullptr; |
1027 | } |
1028 | } |
1029 | } |
1030 | if (buffer->buf) { |
1031 | if (buffer->buf->type() != QRhiBuffer::Dynamic) { |
1032 | m_resourceUpdates->uploadStaticBuffer(buf: buffer->buf, |
1033 | offset: 0, size: buffer->size, data: buffer->data); |
1034 | buffer->nonDynamicChangeCount += 1; |
1035 | } else { |
1036 | m_resourceUpdates->updateDynamicBuffer(buf: buffer->buf, offset: 0, size: buffer->size, |
1037 | data: buffer->data); |
1038 | } |
1039 | } |
1040 | if (m_visualizer->mode() == Visualizer::VisualizeNothing) |
1041 | buffer->data = nullptr; |
1042 | } |
1043 | |
1044 | BatchRootInfo *Renderer::batchRootInfo(Node *node) |
1045 | { |
1046 | BatchRootInfo *info = node->rootInfo(); |
1047 | if (!info) { |
1048 | if (node->type() == QSGNode::ClipNodeType) |
1049 | info = new ClipBatchRootInfo; |
1050 | else { |
1051 | Q_ASSERT(node->type() == QSGNode::TransformNodeType); |
1052 | info = new BatchRootInfo; |
1053 | } |
1054 | node->data = info; |
1055 | } |
1056 | return info; |
1057 | } |
1058 | |
1059 | void Renderer::removeBatchRootFromParent(Node *childRoot) |
1060 | { |
1061 | BatchRootInfo *childInfo = batchRootInfo(node: childRoot); |
1062 | if (!childInfo->parentRoot) |
1063 | return; |
1064 | BatchRootInfo *parentInfo = batchRootInfo(node: childInfo->parentRoot); |
1065 | |
1066 | Q_ASSERT(parentInfo->subRoots.contains(childRoot)); |
1067 | parentInfo->subRoots.remove(value: childRoot); |
1068 | childInfo->parentRoot = nullptr; |
1069 | } |
1070 | |
1071 | void Renderer::registerBatchRoot(Node *subRoot, Node *parentRoot) |
1072 | { |
1073 | BatchRootInfo *subInfo = batchRootInfo(node: subRoot); |
1074 | BatchRootInfo *parentInfo = batchRootInfo(node: parentRoot); |
1075 | subInfo->parentRoot = parentRoot; |
1076 | parentInfo->subRoots << subRoot; |
1077 | } |
1078 | |
1079 | bool Renderer::changeBatchRoot(Node *node, Node *root) |
1080 | { |
1081 | BatchRootInfo *subInfo = batchRootInfo(node); |
1082 | if (subInfo->parentRoot == root) |
1083 | return false; |
1084 | if (subInfo->parentRoot) { |
1085 | BatchRootInfo *oldRootInfo = batchRootInfo(node: subInfo->parentRoot); |
1086 | oldRootInfo->subRoots.remove(value: node); |
1087 | } |
1088 | BatchRootInfo *newRootInfo = batchRootInfo(node: root); |
1089 | newRootInfo->subRoots << node; |
1090 | subInfo->parentRoot = root; |
1091 | return true; |
1092 | } |
1093 | |
1094 | void Renderer::nodeChangedBatchRoot(Node *node, Node *root) |
1095 | { |
1096 | if (node->type() == QSGNode::ClipNodeType || node->isBatchRoot) { |
1097 | // When we reach a batchroot, we only need to update it. Its subtree |
1098 | // is relative to that root, so no need to recurse further. |
1099 | changeBatchRoot(node, root); |
1100 | return; |
1101 | } else if (node->type() == QSGNode::GeometryNodeType) { |
1102 | // Only need to change the root as nodeChanged anyway flags a full update. |
1103 | Element *e = node->element(); |
1104 | if (e) { |
1105 | e->root = root; |
1106 | e->boundsComputed = false; |
1107 | } |
1108 | } else if (node->type() == QSGNode::RenderNodeType) { |
1109 | RenderNodeElement *e = node->renderNodeElement(); |
1110 | if (e) |
1111 | e->root = root; |
1112 | } |
1113 | |
1114 | SHADOWNODE_TRAVERSE(node) |
1115 | nodeChangedBatchRoot(node: child, root); |
1116 | } |
1117 | |
1118 | void Renderer::nodeWasTransformed(Node *node, int *vertexCount) |
1119 | { |
1120 | if (node->type() == QSGNode::GeometryNodeType) { |
1121 | QSGGeometryNode *gn = static_cast<QSGGeometryNode *>(node->sgNode); |
1122 | *vertexCount += gn->geometry()->vertexCount(); |
1123 | Element *e = node->element(); |
1124 | if (e) { |
1125 | e->boundsComputed = false; |
1126 | if (e->batch) { |
1127 | if (!e->batch->isOpaque) { |
1128 | invalidateBatchAndOverlappingRenderOrders(batch: e->batch); |
1129 | } else if (e->batch->merged) { |
1130 | e->batch->needsUpload = true; |
1131 | } |
1132 | } |
1133 | } |
1134 | } |
1135 | |
1136 | SHADOWNODE_TRAVERSE(node) |
1137 | nodeWasTransformed(node: child, vertexCount); |
1138 | } |
1139 | |
1140 | void Renderer::nodeWasAdded(QSGNode *node, Node *shadowParent) |
1141 | { |
1142 | Q_ASSERT(!m_nodes.contains(node)); |
1143 | if (node->isSubtreeBlocked()) |
1144 | return; |
1145 | |
1146 | Node *snode = m_nodeAllocator.allocate(); |
1147 | snode->sgNode = node; |
1148 | m_nodes.insert(key: node, value: snode); |
1149 | if (shadowParent) |
1150 | shadowParent->append(child: snode); |
1151 | |
1152 | if (node->type() == QSGNode::GeometryNodeType) { |
1153 | snode->data = m_elementAllocator.allocate(); |
1154 | snode->element()->setNode(static_cast<QSGGeometryNode *>(node)); |
1155 | |
1156 | } else if (node->type() == QSGNode::ClipNodeType) { |
1157 | snode->data = new ClipBatchRootInfo; |
1158 | m_rebuild |= FullRebuild; |
1159 | |
1160 | } else if (node->type() == QSGNode::RenderNodeType) { |
1161 | QSGRenderNode *rn = static_cast<QSGRenderNode *>(node); |
1162 | RenderNodeElement *e = new RenderNodeElement(rn); |
1163 | snode->data = e; |
1164 | Q_ASSERT(!m_renderNodeElements.contains(rn)); |
1165 | m_renderNodeElements.insert(key: e->renderNode, value: e); |
1166 | if (!rn->flags().testFlag(flag: QSGRenderNode::DepthAwareRendering)) |
1167 | m_forceNoDepthBuffer = true; |
1168 | m_rebuild |= FullRebuild; |
1169 | } |
1170 | |
1171 | QSGNODE_TRAVERSE(node) |
1172 | nodeWasAdded(node: child, shadowParent: snode); |
1173 | } |
1174 | |
1175 | void Renderer::nodeWasRemoved(Node *node) |
1176 | { |
1177 | // Prefix traversal as removeBatchRootFromParent below removes nodes |
1178 | // in a bottom-up manner. Note that we *cannot* use SHADOWNODE_TRAVERSE |
1179 | // here, because we delete 'child' (when recursed, down below), so we'd |
1180 | // have a use-after-free. |
1181 | { |
1182 | Node *child = node->firstChild(); |
1183 | while (child) { |
1184 | // Remove (and delete) child |
1185 | node->remove(child); |
1186 | nodeWasRemoved(node: child); |
1187 | child = node->firstChild(); |
1188 | } |
1189 | } |
1190 | |
1191 | if (node->type() == QSGNode::GeometryNodeType) { |
1192 | Element *e = node->element(); |
1193 | if (e) { |
1194 | e->removed = true; |
1195 | m_elementsToDelete.add(t: e); |
1196 | e->node = nullptr; |
1197 | if (e->root) { |
1198 | BatchRootInfo *info = batchRootInfo(node: e->root); |
1199 | info->availableOrders++; |
1200 | } |
1201 | if (e->batch) { |
1202 | e->batch->needsUpload = true; |
1203 | e->batch->needsPurge = true; |
1204 | } |
1205 | |
1206 | } |
1207 | |
1208 | } else if (node->type() == QSGNode::ClipNodeType) { |
1209 | removeBatchRootFromParent(childRoot: node); |
1210 | delete node->clipInfo(); |
1211 | m_rebuild |= FullRebuild; |
1212 | m_taggedRoots.remove(value: node); |
1213 | |
1214 | } else if (node->isBatchRoot) { |
1215 | removeBatchRootFromParent(childRoot: node); |
1216 | delete node->rootInfo(); |
1217 | m_rebuild |= FullRebuild; |
1218 | m_taggedRoots.remove(value: node); |
1219 | |
1220 | } else if (node->type() == QSGNode::RenderNodeType) { |
1221 | RenderNodeElement *e = m_renderNodeElements.take(key: static_cast<QSGRenderNode *>(node->sgNode)); |
1222 | if (e) { |
1223 | e->removed = true; |
1224 | m_elementsToDelete.add(t: e); |
1225 | if (m_renderNodeElements.isEmpty()) |
1226 | m_forceNoDepthBuffer = false; |
1227 | |
1228 | if (e->batch != nullptr) |
1229 | e->batch->needsPurge = true; |
1230 | } |
1231 | } |
1232 | |
1233 | Q_ASSERT(m_nodes.contains(node->sgNode)); |
1234 | |
1235 | m_nodeAllocator.release(t: m_nodes.take(key: node->sgNode)); |
1236 | } |
1237 | |
1238 | void Renderer::turnNodeIntoBatchRoot(Node *node) |
1239 | { |
1240 | if (Q_UNLIKELY(debug_change())) qDebug(msg: " - new batch root" ); |
1241 | m_rebuild |= FullRebuild; |
1242 | node->isBatchRoot = true; |
1243 | node->becameBatchRoot = true; |
1244 | |
1245 | Node *p = node->parent(); |
1246 | while (p) { |
1247 | if (p->type() == QSGNode::ClipNodeType || p->isBatchRoot) { |
1248 | registerBatchRoot(subRoot: node, parentRoot: p); |
1249 | break; |
1250 | } |
1251 | p = p->parent(); |
1252 | } |
1253 | |
1254 | SHADOWNODE_TRAVERSE(node) |
1255 | nodeChangedBatchRoot(node: child, root: node); |
1256 | } |
1257 | |
1258 | |
1259 | void Renderer::nodeChanged(QSGNode *node, QSGNode::DirtyState state) |
1260 | { |
1261 | #ifndef QT_NO_DEBUG_OUTPUT |
1262 | if (Q_UNLIKELY(debug_change())) { |
1263 | QDebug debug = qDebug(); |
1264 | debug << "dirty:" ; |
1265 | if (state & QSGNode::DirtyGeometry) |
1266 | debug << "Geometry" ; |
1267 | if (state & QSGNode::DirtyMaterial) |
1268 | debug << "Material" ; |
1269 | if (state & QSGNode::DirtyMatrix) |
1270 | debug << "Matrix" ; |
1271 | if (state & QSGNode::DirtyNodeAdded) |
1272 | debug << "Added" ; |
1273 | if (state & QSGNode::DirtyNodeRemoved) |
1274 | debug << "Removed" ; |
1275 | if (state & QSGNode::DirtyOpacity) |
1276 | debug << "Opacity" ; |
1277 | if (state & QSGNode::DirtySubtreeBlocked) |
1278 | debug << "SubtreeBlocked" ; |
1279 | if (state & QSGNode::DirtyForceUpdate) |
1280 | debug << "ForceUpdate" ; |
1281 | |
1282 | // when removed, some parts of the node could already have been destroyed |
1283 | // so don't debug it out. |
1284 | if (state & QSGNode::DirtyNodeRemoved) |
1285 | debug << (void *) node << node->type(); |
1286 | else |
1287 | debug << node; |
1288 | } |
1289 | #endif |
1290 | // As this function calls nodeChanged recursively, we do it at the top |
1291 | // to avoid that any of the others are processed twice. |
1292 | if (state & QSGNode::DirtySubtreeBlocked) { |
1293 | Node *sn = m_nodes.value(key: node); |
1294 | |
1295 | // Force a batch rebuild if this includes an opacity change |
1296 | if (state & QSGNode::DirtyOpacity) |
1297 | m_rebuild |= FullRebuild; |
1298 | |
1299 | bool blocked = node->isSubtreeBlocked(); |
1300 | if (blocked && sn) { |
1301 | nodeChanged(node, state: QSGNode::DirtyNodeRemoved); |
1302 | Q_ASSERT(m_nodes.value(node) == 0); |
1303 | } else if (!blocked && !sn) { |
1304 | nodeChanged(node, state: QSGNode::DirtyNodeAdded); |
1305 | } |
1306 | return; |
1307 | } |
1308 | |
1309 | if (state & QSGNode::DirtyNodeAdded) { |
1310 | if (nodeUpdater()->isNodeBlocked(n: node, root: rootNode())) { |
1311 | QSGRenderer::nodeChanged(node, state); |
1312 | return; |
1313 | } |
1314 | if (node == rootNode()) |
1315 | nodeWasAdded(node, shadowParent: nullptr); |
1316 | else |
1317 | nodeWasAdded(node, shadowParent: m_nodes.value(key: node->parent())); |
1318 | } |
1319 | |
1320 | // Mark this node dirty in the shadow tree. |
1321 | Node *shadowNode = m_nodes.value(key: node); |
1322 | |
1323 | // Blocked subtrees won't have shadow nodes, so we can safely abort |
1324 | // here.. |
1325 | if (!shadowNode) { |
1326 | QSGRenderer::nodeChanged(node, state); |
1327 | return; |
1328 | } |
1329 | |
1330 | shadowNode->dirtyState |= state; |
1331 | |
1332 | if (state & QSGNode::DirtyMatrix && !shadowNode->isBatchRoot) { |
1333 | Q_ASSERT(node->type() == QSGNode::TransformNodeType); |
1334 | if (node->m_subtreeRenderableCount > m_batchNodeThreshold) { |
1335 | turnNodeIntoBatchRoot(node: shadowNode); |
1336 | } else { |
1337 | int vertices = 0; |
1338 | nodeWasTransformed(node: shadowNode, vertexCount: &vertices); |
1339 | if (vertices > m_batchVertexThreshold) { |
1340 | turnNodeIntoBatchRoot(node: shadowNode); |
1341 | } |
1342 | } |
1343 | } |
1344 | |
1345 | if (state & QSGNode::DirtyGeometry && node->type() == QSGNode::GeometryNodeType) { |
1346 | QSGGeometryNode *gn = static_cast<QSGGeometryNode *>(node); |
1347 | Element *e = shadowNode->element(); |
1348 | if (e) { |
1349 | e->boundsComputed = false; |
1350 | Batch *b = e->batch; |
1351 | if (b) { |
1352 | if (!e->batch->geometryWasChanged(gn) || !e->batch->isOpaque) { |
1353 | invalidateBatchAndOverlappingRenderOrders(batch: e->batch); |
1354 | } else { |
1355 | b->needsUpload = true; |
1356 | } |
1357 | } |
1358 | } |
1359 | } |
1360 | |
1361 | if (state & QSGNode::DirtyMaterial && node->type() == QSGNode::GeometryNodeType) { |
1362 | Element *e = shadowNode->element(); |
1363 | if (e) { |
1364 | bool blended = hasMaterialWithBlending(n: static_cast<QSGGeometryNode *>(node)); |
1365 | if (e->isMaterialBlended != blended) { |
1366 | m_rebuild |= Renderer::FullRebuild; |
1367 | e->isMaterialBlended = blended; |
1368 | } else if (e->batch) { |
1369 | if (e->batch->isMaterialCompatible(e) == BatchBreaksOnCompare) |
1370 | invalidateBatchAndOverlappingRenderOrders(batch: e->batch); |
1371 | } else { |
1372 | m_rebuild |= Renderer::BuildBatches; |
1373 | } |
1374 | } |
1375 | } |
1376 | |
1377 | // Mark the shadow tree dirty all the way back to the root... |
1378 | QSGNode::DirtyState dirtyChain = state & (QSGNode::DirtyNodeAdded |
1379 | | QSGNode::DirtyOpacity |
1380 | | QSGNode::DirtyMatrix |
1381 | | QSGNode::DirtySubtreeBlocked |
1382 | | QSGNode::DirtyForceUpdate); |
1383 | if (dirtyChain != 0) { |
1384 | dirtyChain = QSGNode::DirtyState(dirtyChain << 16); |
1385 | Node *sn = shadowNode->parent(); |
1386 | while (sn) { |
1387 | sn->dirtyState |= dirtyChain; |
1388 | sn = sn->parent(); |
1389 | } |
1390 | } |
1391 | |
1392 | // Delete happens at the very end because it deletes the shadownode. |
1393 | if (state & QSGNode::DirtyNodeRemoved) { |
1394 | Node *parent = shadowNode->parent(); |
1395 | if (parent) |
1396 | parent->remove(child: shadowNode); |
1397 | nodeWasRemoved(node: shadowNode); |
1398 | Q_ASSERT(m_nodes.value(node) == 0); |
1399 | } |
1400 | |
1401 | QSGRenderer::nodeChanged(node, state); |
1402 | } |
1403 | |
1404 | /* |
1405 | * Traverses the tree and builds two list of geometry nodes. One for |
1406 | * the opaque and one for the translucent. These are populated |
1407 | * in the order they should visually appear in, meaning first |
1408 | * to the back and last to the front. |
1409 | * |
1410 | * We split opaque and translucent as we can perform different |
1411 | * types of reordering / batching strategies on them, depending |
1412 | * |
1413 | * Note: It would be tempting to use the shadow nodes instead of the QSGNodes |
1414 | * for traversal to avoid hash lookups, but the order of the children |
1415 | * is important and they are not preserved in the shadow tree, so we must |
1416 | * use the actual QSGNode tree. |
1417 | */ |
1418 | void Renderer::buildRenderLists(QSGNode *node) |
1419 | { |
1420 | if (node->isSubtreeBlocked()) |
1421 | return; |
1422 | |
1423 | Node *shadowNode = m_nodes.value(key: node); |
1424 | Q_ASSERT(shadowNode); |
1425 | |
1426 | if (node->type() == QSGNode::GeometryNodeType) { |
1427 | QSGGeometryNode *gn = static_cast<QSGGeometryNode *>(node); |
1428 | |
1429 | Element *e = shadowNode->element(); |
1430 | Q_ASSERT(e); |
1431 | |
1432 | bool opaque = gn->inheritedOpacity() > OPAQUE_LIMIT && !(gn->activeMaterial()->flags() & QSGMaterial::Blending); |
1433 | if (opaque && useDepthBuffer()) |
1434 | m_opaqueRenderList << e; |
1435 | else |
1436 | m_alphaRenderList << e; |
1437 | |
1438 | e->order = ++m_nextRenderOrder; |
1439 | // Used while rebuilding partial roots. |
1440 | if (m_partialRebuild) |
1441 | e->orphaned = false; |
1442 | |
1443 | } else if (node->type() == QSGNode::ClipNodeType || shadowNode->isBatchRoot) { |
1444 | Q_ASSERT(m_nodes.contains(node)); |
1445 | BatchRootInfo *info = batchRootInfo(node: shadowNode); |
1446 | if (node == m_partialRebuildRoot) { |
1447 | m_nextRenderOrder = info->firstOrder; |
1448 | QSGNODE_TRAVERSE(node) |
1449 | buildRenderLists(node: child); |
1450 | m_nextRenderOrder = info->lastOrder + 1; |
1451 | } else { |
1452 | int currentOrder = m_nextRenderOrder; |
1453 | QSGNODE_TRAVERSE(node) |
1454 | buildRenderLists(node: child); |
1455 | int padding = (m_nextRenderOrder - currentOrder) >> 2; |
1456 | info->firstOrder = currentOrder; |
1457 | info->availableOrders = padding; |
1458 | info->lastOrder = m_nextRenderOrder + padding; |
1459 | m_nextRenderOrder = info->lastOrder; |
1460 | } |
1461 | return; |
1462 | } else if (node->type() == QSGNode::RenderNodeType) { |
1463 | RenderNodeElement *e = shadowNode->renderNodeElement(); |
1464 | m_alphaRenderList << e; |
1465 | e->order = ++m_nextRenderOrder; |
1466 | Q_ASSERT(e); |
1467 | } |
1468 | |
1469 | QSGNODE_TRAVERSE(node) |
1470 | buildRenderLists(node: child); |
1471 | } |
1472 | |
1473 | void Renderer::tagSubRoots(Node *node) |
1474 | { |
1475 | BatchRootInfo *i = batchRootInfo(node); |
1476 | m_taggedRoots << node; |
1477 | for (QSet<Node *>::const_iterator it = i->subRoots.constBegin(); |
1478 | it != i->subRoots.constEnd(); ++it) { |
1479 | tagSubRoots(node: *it); |
1480 | } |
1481 | } |
1482 | |
1483 | static void qsg_addOrphanedElements(QDataBuffer<Element *> &orphans, const QDataBuffer<Element *> &renderList) |
1484 | { |
1485 | orphans.reset(); |
1486 | for (int i=0; i<renderList.size(); ++i) { |
1487 | Element *e = renderList.at(i); |
1488 | if (e && !e->removed) { |
1489 | e->orphaned = true; |
1490 | orphans.add(t: e); |
1491 | } |
1492 | } |
1493 | } |
1494 | |
1495 | static void qsg_addBackOrphanedElements(QDataBuffer<Element *> &orphans, QDataBuffer<Element *> &renderList) |
1496 | { |
1497 | for (int i=0; i<orphans.size(); ++i) { |
1498 | Element *e = orphans.at(i); |
1499 | if (e->orphaned) |
1500 | renderList.add(t: e); |
1501 | } |
1502 | orphans.reset(); |
1503 | } |
1504 | |
1505 | /* |
1506 | * To rebuild the tagged roots, we start by putting all subroots of tagged |
1507 | * roots into the list of tagged roots. This is to make the rest of the |
1508 | * algorithm simpler. |
1509 | * |
1510 | * Second, we invalidate all batches which belong to tagged roots, which now |
1511 | * includes the entire subtree under a given root |
1512 | * |
1513 | * Then we call buildRenderLists for all tagged subroots which do not have |
1514 | * parents which are tagged, aka, we traverse only the topmosts roots. |
1515 | * |
1516 | * Then we sort the render lists based on their render order, to restore the |
1517 | * right order for rendering. |
1518 | */ |
1519 | void Renderer::buildRenderListsForTaggedRoots() |
1520 | { |
1521 | // Flag any element that is currently in the render lists, but which |
1522 | // is not in a batch. This happens when we have a partial rebuild |
1523 | // in one sub tree while we have a BuildBatches change in another |
1524 | // isolated subtree. So that batch-building takes into account |
1525 | // these "orphaned" nodes, we flag them now. The ones under tagged |
1526 | // roots will be cleared again. The remaining ones are added into the |
1527 | // render lists so that they contain all visual nodes after the |
1528 | // function completes. |
1529 | qsg_addOrphanedElements(orphans&: m_tmpOpaqueElements, renderList: m_opaqueRenderList); |
1530 | qsg_addOrphanedElements(orphans&: m_tmpAlphaElements, renderList: m_alphaRenderList); |
1531 | |
1532 | // Take a copy now, as we will be adding to this while traversing.. |
1533 | QSet<Node *> roots = m_taggedRoots; |
1534 | for (QSet<Node *>::const_iterator it = roots.constBegin(); |
1535 | it != roots.constEnd(); ++it) { |
1536 | tagSubRoots(node: *it); |
1537 | } |
1538 | |
1539 | for (int i=0; i<m_opaqueBatches.size(); ++i) { |
1540 | Batch *b = m_opaqueBatches.at(i); |
1541 | if (m_taggedRoots.contains(value: b->root)) |
1542 | invalidateAndRecycleBatch(b); |
1543 | |
1544 | } |
1545 | for (int i=0; i<m_alphaBatches.size(); ++i) { |
1546 | Batch *b = m_alphaBatches.at(i); |
1547 | if (m_taggedRoots.contains(value: b->root)) |
1548 | invalidateAndRecycleBatch(b); |
1549 | } |
1550 | |
1551 | m_opaqueRenderList.reset(); |
1552 | m_alphaRenderList.reset(); |
1553 | int maxRenderOrder = m_nextRenderOrder; |
1554 | m_partialRebuild = true; |
1555 | // Traverse each root, assigning it |
1556 | for (QSet<Node *>::const_iterator it = m_taggedRoots.constBegin(); |
1557 | it != m_taggedRoots.constEnd(); ++it) { |
1558 | Node *root = *it; |
1559 | BatchRootInfo *i = batchRootInfo(node: root); |
1560 | if ((!i->parentRoot || !m_taggedRoots.contains(value: i->parentRoot)) |
1561 | && !nodeUpdater()->isNodeBlocked(n: root->sgNode, root: rootNode())) { |
1562 | m_nextRenderOrder = i->firstOrder; |
1563 | m_partialRebuildRoot = root->sgNode; |
1564 | buildRenderLists(node: root->sgNode); |
1565 | } |
1566 | } |
1567 | m_partialRebuild = false; |
1568 | m_partialRebuildRoot = nullptr; |
1569 | m_taggedRoots.clear(); |
1570 | m_nextRenderOrder = qMax(a: m_nextRenderOrder, b: maxRenderOrder); |
1571 | |
1572 | // Add orphaned elements back into the list and then sort it.. |
1573 | qsg_addBackOrphanedElements(orphans&: m_tmpOpaqueElements, renderList&: m_opaqueRenderList); |
1574 | qsg_addBackOrphanedElements(orphans&: m_tmpAlphaElements, renderList&: m_alphaRenderList); |
1575 | |
1576 | if (m_opaqueRenderList.size()) |
1577 | std::sort(first: &m_opaqueRenderList.first(), last: &m_opaqueRenderList.last() + 1, comp: qsg_sort_element_decreasing_order); |
1578 | if (m_alphaRenderList.size()) |
1579 | std::sort(first: &m_alphaRenderList.first(), last: &m_alphaRenderList.last() + 1, comp: qsg_sort_element_increasing_order); |
1580 | |
1581 | } |
1582 | |
1583 | void Renderer::buildRenderListsFromScratch() |
1584 | { |
1585 | m_opaqueRenderList.reset(); |
1586 | m_alphaRenderList.reset(); |
1587 | |
1588 | for (int i=0; i<m_opaqueBatches.size(); ++i) |
1589 | invalidateAndRecycleBatch(b: m_opaqueBatches.at(i)); |
1590 | for (int i=0; i<m_alphaBatches.size(); ++i) |
1591 | invalidateAndRecycleBatch(b: m_alphaBatches.at(i)); |
1592 | m_opaqueBatches.reset(); |
1593 | m_alphaBatches.reset(); |
1594 | |
1595 | m_nextRenderOrder = 0; |
1596 | |
1597 | buildRenderLists(node: rootNode()); |
1598 | } |
1599 | |
1600 | void Renderer::invalidateBatchAndOverlappingRenderOrders(Batch *batch) |
1601 | { |
1602 | Q_ASSERT(batch); |
1603 | Q_ASSERT(batch->first); |
1604 | |
1605 | #if defined(QSGBATCHRENDERER_INVALIDATE_WEDGED_NODES) |
1606 | if (m_renderOrderRebuildLower < 0 || batch->first->order < m_renderOrderRebuildLower) |
1607 | m_renderOrderRebuildLower = batch->first->order; |
1608 | if (m_renderOrderRebuildUpper < 0 || batch->lastOrderInBatch > m_renderOrderRebuildUpper) |
1609 | m_renderOrderRebuildUpper = batch->lastOrderInBatch; |
1610 | |
1611 | int first = m_renderOrderRebuildLower; |
1612 | int last = m_renderOrderRebuildUpper; |
1613 | #else |
1614 | int first = batch->first->order; |
1615 | int last = batch->lastOrderInBatch; |
1616 | #endif |
1617 | |
1618 | batch->invalidate(); |
1619 | |
1620 | for (int i=0; i<m_alphaBatches.size(); ++i) { |
1621 | Batch *b = m_alphaBatches.at(i); |
1622 | if (b->first) { |
1623 | int bf = b->first->order; |
1624 | int bl = b->lastOrderInBatch; |
1625 | if (bl > first && bf < last) |
1626 | b->invalidate(); |
1627 | } |
1628 | } |
1629 | |
1630 | m_rebuild |= BuildBatches; |
1631 | } |
1632 | |
1633 | /* Clean up batches by making it a consecutive list of "valid" |
1634 | * batches and moving all invalidated batches to the batches pool. |
1635 | */ |
1636 | void Renderer::cleanupBatches(QDataBuffer<Batch *> *batches) { |
1637 | if (batches->size()) { |
1638 | std::stable_sort(first: &batches->first(), last: &batches->last() + 1, comp: qsg_sort_batch_is_valid); |
1639 | int count = 0; |
1640 | while (count < batches->size() && batches->at(i: count)->first) |
1641 | ++count; |
1642 | for (int i=count; i<batches->size(); ++i) |
1643 | invalidateAndRecycleBatch(b: batches->at(i)); |
1644 | batches->resize(size: count); |
1645 | } |
1646 | } |
1647 | |
1648 | void Renderer::prepareOpaqueBatches() |
1649 | { |
1650 | for (int i=m_opaqueRenderList.size() - 1; i >= 0; --i) { |
1651 | Element *ei = m_opaqueRenderList.at(i); |
1652 | if (!ei || ei->batch || ei->node->geometry()->vertexCount() == 0) |
1653 | continue; |
1654 | Batch *batch = newBatch(); |
1655 | batch->first = ei; |
1656 | batch->root = ei->root; |
1657 | batch->isOpaque = true; |
1658 | batch->needsUpload = true; |
1659 | batch->positionAttribute = qsg_positionAttribute(g: ei->node->geometry()); |
1660 | |
1661 | m_opaqueBatches.add(t: batch); |
1662 | |
1663 | ei->batch = batch; |
1664 | Element *next = ei; |
1665 | |
1666 | QSGGeometryNode *gni = ei->node; |
1667 | |
1668 | for (int j = i - 1; j >= 0; --j) { |
1669 | Element *ej = m_opaqueRenderList.at(i: j); |
1670 | if (!ej) |
1671 | continue; |
1672 | if (ej->root != ei->root) |
1673 | break; |
1674 | if (ej->batch || ej->node->geometry()->vertexCount() == 0) |
1675 | continue; |
1676 | |
1677 | QSGGeometryNode *gnj = ej->node; |
1678 | |
1679 | if (gni->clipList() == gnj->clipList() |
1680 | && gni->geometry()->drawingMode() == gnj->geometry()->drawingMode() |
1681 | && (gni->geometry()->drawingMode() != QSGGeometry::DrawLines || gni->geometry()->lineWidth() == gnj->geometry()->lineWidth()) |
1682 | && gni->geometry()->attributes() == gnj->geometry()->attributes() |
1683 | && gni->inheritedOpacity() == gnj->inheritedOpacity() |
1684 | && gni->activeMaterial()->type() == gnj->activeMaterial()->type() |
1685 | && gni->activeMaterial()->compare(other: gnj->activeMaterial()) == 0) { |
1686 | ej->batch = batch; |
1687 | next->nextInBatch = ej; |
1688 | next = ej; |
1689 | } |
1690 | } |
1691 | |
1692 | batch->lastOrderInBatch = next->order; |
1693 | } |
1694 | } |
1695 | |
1696 | bool Renderer::checkOverlap(int first, int last, const Rect &bounds) |
1697 | { |
1698 | for (int i=first; i<=last; ++i) { |
1699 | Element *e = m_alphaRenderList.at(i); |
1700 | #if defined(QSGBATCHRENDERER_INVALIDATE_WEDGED_NODES) |
1701 | if (!e || e->batch) |
1702 | #else |
1703 | if (!e) |
1704 | #endif |
1705 | continue; |
1706 | Q_ASSERT(e->boundsComputed); |
1707 | if (e->bounds.intersects(r: bounds)) |
1708 | return true; |
1709 | } |
1710 | return false; |
1711 | } |
1712 | |
1713 | /* |
1714 | * |
1715 | * To avoid the O(n^2) checkOverlap check in most cases, we have the |
1716 | * overlapBounds which is the union of all bounding rects to check overlap |
1717 | * for. We know that if it does not overlap, then none of the individual |
1718 | * ones will either. For the typical list case, this results in no calls |
1719 | * to checkOverlap what-so-ever. This also ensures that when all consecutive |
1720 | * items are matching (such as a table of text), we don't build up an |
1721 | * overlap bounds and thus do not require full overlap checks. |
1722 | */ |
1723 | |
1724 | void Renderer::prepareAlphaBatches() |
1725 | { |
1726 | for (int i=0; i<m_alphaRenderList.size(); ++i) { |
1727 | Element *e = m_alphaRenderList.at(i); |
1728 | if (!e || e->isRenderNode) |
1729 | continue; |
1730 | Q_ASSERT(!e->removed); |
1731 | e->ensureBoundsValid(); |
1732 | } |
1733 | |
1734 | for (int i=0; i<m_alphaRenderList.size(); ++i) { |
1735 | Element *ei = m_alphaRenderList.at(i); |
1736 | if (!ei || ei->batch) |
1737 | continue; |
1738 | |
1739 | if (ei->isRenderNode) { |
1740 | Batch *rnb = newBatch(); |
1741 | rnb->first = ei; |
1742 | rnb->root = ei->root; |
1743 | rnb->isOpaque = false; |
1744 | rnb->isRenderNode = true; |
1745 | ei->batch = rnb; |
1746 | m_alphaBatches.add(t: rnb); |
1747 | continue; |
1748 | } |
1749 | |
1750 | if (ei->node->geometry()->vertexCount() == 0) |
1751 | continue; |
1752 | |
1753 | Batch *batch = newBatch(); |
1754 | batch->first = ei; |
1755 | batch->root = ei->root; |
1756 | batch->isOpaque = false; |
1757 | batch->needsUpload = true; |
1758 | m_alphaBatches.add(t: batch); |
1759 | ei->batch = batch; |
1760 | |
1761 | QSGGeometryNode *gni = ei->node; |
1762 | batch->positionAttribute = qsg_positionAttribute(g: gni->geometry()); |
1763 | |
1764 | Rect overlapBounds; |
1765 | overlapBounds.set(FLT_MAX, FLT_MAX, right: -FLT_MAX, bottom: -FLT_MAX); |
1766 | |
1767 | Element *next = ei; |
1768 | |
1769 | for (int j = i + 1; j < m_alphaRenderList.size(); ++j) { |
1770 | Element *ej = m_alphaRenderList.at(i: j); |
1771 | if (!ej) |
1772 | continue; |
1773 | if (ej->root != ei->root || ej->isRenderNode) |
1774 | break; |
1775 | if (ej->batch) { |
1776 | #if !defined(QSGBATCHRENDERER_INVALIDATE_WEDGED_NODES) |
1777 | overlapBounds |= ej->bounds; |
1778 | #endif |
1779 | continue; |
1780 | } |
1781 | |
1782 | QSGGeometryNode *gnj = ej->node; |
1783 | if (gnj->geometry()->vertexCount() == 0) |
1784 | continue; |
1785 | |
1786 | if (gni->clipList() == gnj->clipList() |
1787 | && gni->geometry()->drawingMode() == gnj->geometry()->drawingMode() |
1788 | && (gni->geometry()->drawingMode() != QSGGeometry::DrawLines |
1789 | || (gni->geometry()->lineWidth() == gnj->geometry()->lineWidth() |
1790 | // Must not do overlap checks when the line width is not 1, |
1791 | // we have no knowledge how such lines are rasterized. |
1792 | && gni->geometry()->lineWidth() == 1.0f)) |
1793 | && gni->geometry()->attributes() == gnj->geometry()->attributes() |
1794 | && gni->inheritedOpacity() == gnj->inheritedOpacity() |
1795 | && gni->activeMaterial()->type() == gnj->activeMaterial()->type() |
1796 | && gni->activeMaterial()->compare(other: gnj->activeMaterial()) == 0) { |
1797 | if (!overlapBounds.intersects(r: ej->bounds) || !checkOverlap(first: i+1, last: j - 1, bounds: ej->bounds)) { |
1798 | ej->batch = batch; |
1799 | next->nextInBatch = ej; |
1800 | next = ej; |
1801 | } else { |
1802 | /* When we come across a compatible element which hits an overlap, we |
1803 | * need to stop the batch right away. We cannot add more elements |
1804 | * to the current batch as they will be rendered before the batch that the |
1805 | * current 'ej' will be added to. |
1806 | */ |
1807 | break; |
1808 | } |
1809 | } else { |
1810 | overlapBounds |= ej->bounds; |
1811 | } |
1812 | } |
1813 | |
1814 | batch->lastOrderInBatch = next->order; |
1815 | } |
1816 | |
1817 | |
1818 | } |
1819 | |
1820 | static inline int qsg_fixIndexCount(int iCount, int drawMode) |
1821 | { |
1822 | switch (drawMode) { |
1823 | case QSGGeometry::DrawTriangleStrip: |
1824 | // Merged triangle strips need to contain degenerate triangles at the beginning and end. |
1825 | // One could save 2 uploaded ushorts here by ditching the padding for the front of the |
1826 | // first and the end of the last, but for simplicity, we simply don't care. |
1827 | // Those extra triangles will be skipped while drawing to preserve the strip's parity |
1828 | // anyhow. |
1829 | return iCount + 2; |
1830 | case QSGGeometry::DrawLines: |
1831 | // For lines we drop the last vertex if the number of vertices is uneven. |
1832 | return iCount - (iCount % 2); |
1833 | case QSGGeometry::DrawTriangles: |
1834 | // For triangles we drop trailing vertices until the result is divisible by 3. |
1835 | return iCount - (iCount % 3); |
1836 | default: |
1837 | return iCount; |
1838 | } |
1839 | } |
1840 | |
1841 | static inline float calculateElementZOrder(const Element *e, qreal zRange) |
1842 | { |
1843 | // Clamp the zOrder to within the min and max depth of the viewport. |
1844 | return std::clamp(val: 1.0f - float(e->order * zRange), lo: VIEWPORT_MIN_DEPTH, hi: VIEWPORT_MAX_DEPTH); |
1845 | } |
1846 | |
1847 | /* These parameters warrant some explanation... |
1848 | * |
1849 | * vaOffset: The byte offset into the vertex data to the location of the |
1850 | * 2D float point vertex attributes. |
1851 | * |
1852 | * vertexData: destination where the geometry's vertex data should go |
1853 | * |
1854 | * zData: destination of geometries injected Z positioning |
1855 | * |
1856 | * indexData: destination of the indices for this element |
1857 | * |
1858 | * iBase: The starting index for this element in the batch |
1859 | */ |
1860 | |
1861 | void Renderer::uploadMergedElement(Element *e, int vaOffset, char **vertexData, char **zData, char **indexData, void *iBasePtr, int *indexCount) |
1862 | { |
1863 | if (Q_UNLIKELY(debug_upload())) qDebug() << " - uploading element:" << e << e->node << (void *) *vertexData << (qintptr) (*zData - *vertexData) << (qintptr) (*indexData - *vertexData); |
1864 | QSGGeometry *g = e->node->geometry(); |
1865 | |
1866 | const QMatrix4x4 &localx = *e->node->matrix(); |
1867 | const float *localxdata = localx.constData(); |
1868 | |
1869 | const int vCount = g->vertexCount(); |
1870 | const int vSize = g->sizeOfVertex(); |
1871 | memcpy(dest: *vertexData, src: g->vertexData(), n: vSize * vCount); |
1872 | |
1873 | // apply vertex transform.. |
1874 | char *vdata = *vertexData + vaOffset; |
1875 | if (localx.flags() == QMatrix4x4::Translation) { |
1876 | for (int i=0; i<vCount; ++i) { |
1877 | Pt *p = (Pt *) vdata; |
1878 | p->x += localxdata[12]; |
1879 | p->y += localxdata[13]; |
1880 | vdata += vSize; |
1881 | } |
1882 | } else if (localx.flags() > QMatrix4x4::Translation) { |
1883 | for (int i=0; i<vCount; ++i) { |
1884 | ((Pt *) vdata)->map(mat: localx); |
1885 | vdata += vSize; |
1886 | } |
1887 | } |
1888 | |
1889 | if (useDepthBuffer()) { |
1890 | float *vzorder = (float *) *zData; |
1891 | float zorder = calculateElementZOrder(e, zRange: m_zRange); |
1892 | for (int i=0; i<vCount; ++i) |
1893 | vzorder[i] = zorder; |
1894 | *zData += vCount * sizeof(float); |
1895 | } |
1896 | |
1897 | int iCount = g->indexCount(); |
1898 | if (m_uint32IndexForRhi) { |
1899 | // can only happen when using the rhi |
1900 | quint32 *iBase = (quint32 *) iBasePtr; |
1901 | quint32 *indices = (quint32 *) *indexData; |
1902 | if (iCount == 0) { |
1903 | iCount = vCount; |
1904 | if (g->drawingMode() == QSGGeometry::DrawTriangleStrip) |
1905 | *indices++ = *iBase; |
1906 | else |
1907 | iCount = qsg_fixIndexCount(iCount, drawMode: g->drawingMode()); |
1908 | |
1909 | for (int i=0; i<iCount; ++i) |
1910 | indices[i] = *iBase + i; |
1911 | } else { |
1912 | // source index data in QSGGeometry is always ushort (we would not merge otherwise) |
1913 | const quint16 *srcIndices = g->indexDataAsUShort(); |
1914 | if (g->drawingMode() == QSGGeometry::DrawTriangleStrip) |
1915 | *indices++ = *iBase + srcIndices[0]; |
1916 | else |
1917 | iCount = qsg_fixIndexCount(iCount, drawMode: g->drawingMode()); |
1918 | |
1919 | for (int i=0; i<iCount; ++i) |
1920 | indices[i] = *iBase + srcIndices[i]; |
1921 | } |
1922 | if (g->drawingMode() == QSGGeometry::DrawTriangleStrip) { |
1923 | indices[iCount] = indices[iCount - 1]; |
1924 | iCount += 2; |
1925 | } |
1926 | *iBase += vCount; |
1927 | } else { |
1928 | // normally batching is only done for ushort index data |
1929 | quint16 *iBase = (quint16 *) iBasePtr; |
1930 | quint16 *indices = (quint16 *) *indexData; |
1931 | if (iCount == 0) { |
1932 | iCount = vCount; |
1933 | if (g->drawingMode() == QSGGeometry::DrawTriangleStrip) |
1934 | *indices++ = *iBase; |
1935 | else |
1936 | iCount = qsg_fixIndexCount(iCount, drawMode: g->drawingMode()); |
1937 | |
1938 | for (int i=0; i<iCount; ++i) |
1939 | indices[i] = *iBase + i; |
1940 | } else { |
1941 | const quint16 *srcIndices = g->indexDataAsUShort(); |
1942 | if (g->drawingMode() == QSGGeometry::DrawTriangleStrip) |
1943 | *indices++ = *iBase + srcIndices[0]; |
1944 | else |
1945 | iCount = qsg_fixIndexCount(iCount, drawMode: g->drawingMode()); |
1946 | |
1947 | for (int i=0; i<iCount; ++i) |
1948 | indices[i] = *iBase + srcIndices[i]; |
1949 | } |
1950 | if (g->drawingMode() == QSGGeometry::DrawTriangleStrip) { |
1951 | indices[iCount] = indices[iCount - 1]; |
1952 | iCount += 2; |
1953 | } |
1954 | *iBase += vCount; |
1955 | } |
1956 | |
1957 | *vertexData += vCount * vSize; |
1958 | *indexData += iCount * mergedIndexElemSize(); |
1959 | *indexCount += iCount; |
1960 | } |
1961 | |
1962 | QMatrix4x4 qsg_matrixForRoot(Node *node) |
1963 | { |
1964 | if (node->type() == QSGNode::TransformNodeType) |
1965 | return static_cast<QSGTransformNode *>(node->sgNode)->combinedMatrix(); |
1966 | Q_ASSERT(node->type() == QSGNode::ClipNodeType); |
1967 | QSGClipNode *c = static_cast<QSGClipNode *>(node->sgNode); |
1968 | return *c->matrix(); |
1969 | } |
1970 | |
1971 | void Renderer::uploadBatch(Batch *b) |
1972 | { |
1973 | // Early out if nothing has changed in this batch.. |
1974 | if (!b->needsUpload) { |
1975 | if (Q_UNLIKELY(debug_upload())) qDebug() << " Batch:" << b << "already uploaded..." ; |
1976 | return; |
1977 | } |
1978 | |
1979 | if (!b->first) { |
1980 | if (Q_UNLIKELY(debug_upload())) qDebug() << " Batch:" << b << "is invalid..." ; |
1981 | return; |
1982 | } |
1983 | |
1984 | if (b->isRenderNode) { |
1985 | if (Q_UNLIKELY(debug_upload())) qDebug() << " Batch: " << b << "is a render node..." ; |
1986 | return; |
1987 | } |
1988 | |
1989 | // Figure out if we can merge or not, if not, then just render the batch as is.. |
1990 | Q_ASSERT(b->first); |
1991 | Q_ASSERT(b->first->node); |
1992 | |
1993 | QSGGeometryNode *gn = b->first->node; |
1994 | QSGGeometry *g = gn->geometry(); |
1995 | QSGMaterial::Flags flags = gn->activeMaterial()->flags(); |
1996 | bool canMerge = (g->drawingMode() == QSGGeometry::DrawTriangles || g->drawingMode() == QSGGeometry::DrawTriangleStrip || |
1997 | g->drawingMode() == QSGGeometry::DrawLines || g->drawingMode() == QSGGeometry::DrawPoints) |
1998 | && b->positionAttribute >= 0 |
1999 | && (g->indexType() == QSGGeometry::UnsignedShortType && g->indexCount() > 0) |
2000 | && (flags & (QSGMaterial::NoBatching | QSGMaterial_FullMatrix)) == 0 |
2001 | && ((flags & QSGMaterial::RequiresFullMatrixExceptTranslate) == 0 || b->isTranslateOnlyToRoot()) |
2002 | && b->isSafeToBatch(); |
2003 | |
2004 | b->merged = canMerge; |
2005 | |
2006 | // Figure out how much memory we need... |
2007 | b->vertexCount = 0; |
2008 | b->indexCount = 0; |
2009 | int unmergedIndexSize = 0; |
2010 | Element *e = b->first; |
2011 | |
2012 | while (e) { |
2013 | QSGGeometry *eg = e->node->geometry(); |
2014 | b->vertexCount += eg->vertexCount(); |
2015 | int iCount = eg->indexCount(); |
2016 | if (b->merged) { |
2017 | if (iCount == 0) |
2018 | iCount = eg->vertexCount(); |
2019 | iCount = qsg_fixIndexCount(iCount, drawMode: g->drawingMode()); |
2020 | } else { |
2021 | const int effectiveIndexSize = m_uint32IndexForRhi ? sizeof(quint32) : eg->sizeOfIndex(); |
2022 | unmergedIndexSize += iCount * effectiveIndexSize; |
2023 | } |
2024 | b->indexCount += iCount; |
2025 | e = e->nextInBatch; |
2026 | } |
2027 | |
2028 | // Abort if there are no vertices in this batch.. We abort this late as |
2029 | // this is a broken usecase which we do not care to optimize for... |
2030 | if (b->vertexCount == 0 || (b->merged && b->indexCount == 0)) |
2031 | return; |
2032 | |
2033 | /* Allocate memory for this batch. Merged batches are divided into three separate blocks |
2034 | 1. Vertex data for all elements, as they were in the QSGGeometry object, but |
2035 | with the tranform relative to this batch's root applied. The vertex data |
2036 | is otherwise unmodified. |
2037 | 2. Z data for all elements, derived from each elements "render order". |
2038 | This is present for merged data only. |
2039 | 3. Indices for all elements, as they were in the QSGGeometry object, but |
2040 | adjusted so that each index matches its. |
2041 | And for TRIANGLE_STRIPs, we need to insert degenerate between each |
2042 | primitive. These are unsigned shorts for merged and arbitrary for |
2043 | non-merged. |
2044 | */ |
2045 | int bufferSize = b->vertexCount * g->sizeOfVertex(); |
2046 | int ibufferSize = 0; |
2047 | if (b->merged) { |
2048 | ibufferSize = b->indexCount * mergedIndexElemSize(); |
2049 | if (useDepthBuffer()) |
2050 | bufferSize += b->vertexCount * sizeof(float); |
2051 | } else { |
2052 | ibufferSize = unmergedIndexSize; |
2053 | } |
2054 | |
2055 | map(buffer: &b->ibo, byteSize: ibufferSize, isIndexBuf: true); |
2056 | map(buffer: &b->vbo, byteSize: bufferSize); |
2057 | |
2058 | if (Q_UNLIKELY(debug_upload())) qDebug() << " - batch" << b << " first:" << b->first << " root:" |
2059 | << b->root << " merged:" << b->merged << " positionAttribute" << b->positionAttribute |
2060 | << " vbo:" << b->vbo.buf << ":" << b->vbo.size; |
2061 | |
2062 | if (b->merged) { |
2063 | char *vertexData = b->vbo.data; |
2064 | char *zData = vertexData + b->vertexCount * g->sizeOfVertex(); |
2065 | char *indexData = b->ibo.data; |
2066 | |
2067 | quint16 iOffset16 = 0; |
2068 | quint32 iOffset32 = 0; |
2069 | e = b->first; |
2070 | uint verticesInSet = 0; |
2071 | // Start a new set already after 65534 vertices because 0xFFFF may be |
2072 | // used for an always-on primitive restart with some apis (adapt for |
2073 | // uint32 indices as appropriate). |
2074 | const uint verticesInSetLimit = m_uint32IndexForRhi ? 0xfffffffe : 0xfffe; |
2075 | int indicesInSet = 0; |
2076 | b->drawSets.reset(); |
2077 | int drawSetIndices = 0; |
2078 | const char *indexBase = b->ibo.data; |
2079 | b->drawSets << DrawSet(0, zData - vertexData, drawSetIndices); |
2080 | while (e) { |
2081 | verticesInSet += e->node->geometry()->vertexCount(); |
2082 | if (verticesInSet > verticesInSetLimit) { |
2083 | b->drawSets.last().indexCount = indicesInSet; |
2084 | if (g->drawingMode() == QSGGeometry::DrawTriangleStrip) { |
2085 | b->drawSets.last().indices += 1 * mergedIndexElemSize(); |
2086 | b->drawSets.last().indexCount -= 2; |
2087 | } |
2088 | drawSetIndices = indexData - indexBase; |
2089 | b->drawSets << DrawSet(vertexData - b->vbo.data, |
2090 | zData - b->vbo.data, |
2091 | drawSetIndices); |
2092 | iOffset16 = 0; |
2093 | iOffset32 = 0; |
2094 | verticesInSet = e->node->geometry()->vertexCount(); |
2095 | indicesInSet = 0; |
2096 | } |
2097 | void *iBasePtr = &iOffset16; |
2098 | if (m_uint32IndexForRhi) |
2099 | iBasePtr = &iOffset32; |
2100 | uploadMergedElement(e, vaOffset: b->positionAttribute, vertexData: &vertexData, zData: &zData, indexData: &indexData, iBasePtr, indexCount: &indicesInSet); |
2101 | e = e->nextInBatch; |
2102 | } |
2103 | b->drawSets.last().indexCount = indicesInSet; |
2104 | // We skip the very first and very last degenerate triangles since they aren't needed |
2105 | // and the first one would reverse the vertex ordering of the merged strips. |
2106 | if (g->drawingMode() == QSGGeometry::DrawTriangleStrip) { |
2107 | b->drawSets.last().indices += 1 * mergedIndexElemSize(); |
2108 | b->drawSets.last().indexCount -= 2; |
2109 | } |
2110 | } else { |
2111 | char *vboData = b->vbo.data; |
2112 | char *iboData = b->ibo.data; |
2113 | Element *e = b->first; |
2114 | while (e) { |
2115 | QSGGeometry *g = e->node->geometry(); |
2116 | int vbs = g->vertexCount() * g->sizeOfVertex(); |
2117 | memcpy(dest: vboData, src: g->vertexData(), n: vbs); |
2118 | vboData = vboData + vbs; |
2119 | const int indexCount = g->indexCount(); |
2120 | if (indexCount) { |
2121 | const int effectiveIndexSize = m_uint32IndexForRhi ? sizeof(quint32) : g->sizeOfIndex(); |
2122 | const int ibs = indexCount * effectiveIndexSize; |
2123 | if (g->sizeOfIndex() == effectiveIndexSize) { |
2124 | memcpy(dest: iboData, src: g->indexData(), n: ibs); |
2125 | } else { |
2126 | if (g->sizeOfIndex() == sizeof(quint16) && effectiveIndexSize == sizeof(quint32)) { |
2127 | quint16 *src = g->indexDataAsUShort(); |
2128 | quint32 *dst = (quint32 *) iboData; |
2129 | for (int i = 0; i < indexCount; ++i) |
2130 | dst[i] = src[i]; |
2131 | } else { |
2132 | Q_ASSERT_X(false, "uploadBatch (unmerged)" , "uint index with ushort effective index - cannot happen" ); |
2133 | } |
2134 | } |
2135 | iboData += ibs; |
2136 | } |
2137 | e = e->nextInBatch; |
2138 | } |
2139 | } |
2140 | #ifndef QT_NO_DEBUG_OUTPUT |
2141 | if (Q_UNLIKELY(debug_upload())) { |
2142 | const char *vd = b->vbo.data; |
2143 | qDebug() << " -- Vertex Data, count:" << b->vertexCount << " - " << g->sizeOfVertex() << "bytes/vertex" ; |
2144 | for (int i=0; i<b->vertexCount; ++i) { |
2145 | QDebug dump = qDebug().nospace(); |
2146 | dump << " --- " << i << ": " ; |
2147 | int offset = 0; |
2148 | for (int a=0; a<g->attributeCount(); ++a) { |
2149 | const QSGGeometry::Attribute &attr = g->attributes()[a]; |
2150 | dump << attr.position << ":(" << attr.tupleSize << "," ; |
2151 | if (attr.type == QSGGeometry::FloatType) { |
2152 | dump << "float " ; |
2153 | if (attr.isVertexCoordinate) |
2154 | dump << "* " ; |
2155 | for (int t=0; t<attr.tupleSize; ++t) |
2156 | dump << *(const float *)(vd + offset + t * sizeof(float)) << " " ; |
2157 | } else if (attr.type == QSGGeometry::UnsignedByteType) { |
2158 | dump << "ubyte " ; |
2159 | for (int t=0; t<attr.tupleSize; ++t) |
2160 | dump << *(const unsigned char *)(vd + offset + t * sizeof(unsigned char)) << " " ; |
2161 | } |
2162 | dump << ") " ; |
2163 | offset += attr.tupleSize * size_of_type(type: attr.type); |
2164 | } |
2165 | if (b->merged && useDepthBuffer()) { |
2166 | float zorder = ((float*)(b->vbo.data + b->vertexCount * g->sizeOfVertex()))[i]; |
2167 | dump << " Z:(" << zorder << ")" ; |
2168 | } |
2169 | vd += g->sizeOfVertex(); |
2170 | } |
2171 | |
2172 | if (!b->drawSets.isEmpty()) { |
2173 | if (m_uint32IndexForRhi) { |
2174 | const quint32 *id = (const quint32 *) b->ibo.data; |
2175 | { |
2176 | QDebug iDump = qDebug(); |
2177 | iDump << " -- Index Data, count:" << b->indexCount; |
2178 | for (int i=0; i<b->indexCount; ++i) { |
2179 | if ((i % 24) == 0) |
2180 | iDump << Qt::endl << " --- " ; |
2181 | iDump << id[i]; |
2182 | } |
2183 | } |
2184 | } else { |
2185 | const quint16 *id = (const quint16 *) b->ibo.data; |
2186 | { |
2187 | QDebug iDump = qDebug(); |
2188 | iDump << " -- Index Data, count:" << b->indexCount; |
2189 | for (int i=0; i<b->indexCount; ++i) { |
2190 | if ((i % 24) == 0) |
2191 | iDump << Qt::endl << " --- " ; |
2192 | iDump << id[i]; |
2193 | } |
2194 | } |
2195 | } |
2196 | |
2197 | for (int i=0; i<b->drawSets.size(); ++i) { |
2198 | const DrawSet &s = b->drawSets.at(i); |
2199 | qDebug() << " -- DrawSet: indexCount:" << s.indexCount << " vertices:" << s.vertices << " z:" << s.zorders << " indices:" << s.indices; |
2200 | } |
2201 | } |
2202 | } |
2203 | #endif // QT_NO_DEBUG_OUTPUT |
2204 | |
2205 | unmap(buffer: &b->vbo); |
2206 | unmap(buffer: &b->ibo, isIndexBuf: true); |
2207 | |
2208 | if (Q_UNLIKELY(debug_upload())) qDebug() << " --- vertex/index buffers unmapped, batch upload completed..." ; |
2209 | |
2210 | b->needsUpload = false; |
2211 | |
2212 | if (Q_UNLIKELY(debug_render())) |
2213 | b->uploadedThisFrame = true; |
2214 | } |
2215 | |
2216 | void Renderer::applyClipStateToGraphicsState() |
2217 | { |
2218 | m_gstate.usesScissor = (m_currentClipState.type & ClipState::ScissorClip); |
2219 | m_gstate.stencilTest = (m_currentClipState.type & ClipState::StencilClip); |
2220 | } |
2221 | |
2222 | QRhiGraphicsPipeline *Renderer::buildStencilPipeline(const Batch *batch, bool firstStencilClipInBatch) |
2223 | { |
2224 | QRhiGraphicsPipeline *ps = m_rhi->newGraphicsPipeline(); |
2225 | ps->setFlags(QRhiGraphicsPipeline::UsesStencilRef); |
2226 | QRhiGraphicsPipeline::TargetBlend blend; |
2227 | blend.colorWrite = {}; |
2228 | ps->setTargetBlends({ blend }); |
2229 | ps->setSampleCount(renderTarget().rt->sampleCount()); |
2230 | ps->setStencilTest(true); |
2231 | QRhiGraphicsPipeline::StencilOpState stencilOp; |
2232 | if (firstStencilClipInBatch) { |
2233 | stencilOp.compareOp = QRhiGraphicsPipeline::Always; |
2234 | stencilOp.failOp = QRhiGraphicsPipeline::Keep; |
2235 | stencilOp.depthFailOp = QRhiGraphicsPipeline::Keep; |
2236 | stencilOp.passOp = QRhiGraphicsPipeline::Replace; |
2237 | } else { |
2238 | stencilOp.compareOp = QRhiGraphicsPipeline::Equal; |
2239 | stencilOp.failOp = QRhiGraphicsPipeline::Keep; |
2240 | stencilOp.depthFailOp = QRhiGraphicsPipeline::Keep; |
2241 | stencilOp.passOp = QRhiGraphicsPipeline::IncrementAndClamp; |
2242 | } |
2243 | ps->setStencilFront(stencilOp); |
2244 | ps->setStencilBack(stencilOp); |
2245 | |
2246 | ps->setTopology(m_stencilClipCommon.topology); |
2247 | |
2248 | ps->setShaderStages({ QRhiShaderStage(QRhiShaderStage::Vertex, m_stencilClipCommon.vs), |
2249 | QRhiShaderStage(QRhiShaderStage::Fragment, m_stencilClipCommon.fs) }); |
2250 | ps->setVertexInputLayout(m_stencilClipCommon.inputLayout); |
2251 | ps->setShaderResourceBindings(batch->stencilClipState.srb); // use something, it just needs to be layout-compatible |
2252 | ps->setRenderPassDescriptor(renderTarget().rpDesc); |
2253 | |
2254 | if (!ps->create()) { |
2255 | qWarning(msg: "Failed to build stencil clip pipeline" ); |
2256 | delete ps; |
2257 | return nullptr; |
2258 | } |
2259 | |
2260 | return ps; |
2261 | } |
2262 | |
2263 | void Renderer::updateClipState(const QSGClipNode *clipList, Batch *batch) |
2264 | { |
2265 | // Note: No use of the clip-related speparate m_current* vars is allowed |
2266 | // here. All stored in batch->clipState instead. To collect state during |
2267 | // the prepare steps, m_currentClipState is used. It should not be used in |
2268 | // the render steps afterwards. |
2269 | |
2270 | // The stenciling logic is slightly different from Qt 5's direct OpenGL version |
2271 | // as we cannot just randomly clear the stencil buffer. We now put all clip |
2272 | // shapes into the stencil buffer for all batches in the frame. This means |
2273 | // that the number of total clips in a scene is reduced (since the stencil |
2274 | // value cannot exceed 255) but we do not need any clears inbetween. |
2275 | |
2276 | Q_ASSERT(m_rhi); |
2277 | batch->stencilClipState.updateStencilBuffer = false; |
2278 | if (clipList == m_currentClipState.clipList || Q_UNLIKELY(debug_noclip())) { |
2279 | applyClipStateToGraphicsState(); |
2280 | batch->clipState = m_currentClipState; |
2281 | return; |
2282 | } |
2283 | |
2284 | ClipState::ClipType clipType = ClipState::NoClip; |
2285 | QRect scissorRect; |
2286 | QVarLengthArray<const QSGClipNode *, 4> stencilClipNodes; |
2287 | const QSGClipNode *clip = clipList; |
2288 | |
2289 | batch->stencilClipState.drawCalls.reset(); |
2290 | quint32 totalVSize = 0; |
2291 | quint32 totalISize = 0; |
2292 | quint32 totalUSize = 0; |
2293 | const quint32 StencilClipUbufSize = 64; |
2294 | |
2295 | while (clip) { |
2296 | QMatrix4x4 m = m_current_projection_matrix_native_ndc; |
2297 | if (clip->matrix()) |
2298 | m *= *clip->matrix(); |
2299 | |
2300 | bool isRectangleWithNoPerspective = clip->isRectangular() |
2301 | && qFuzzyIsNull(f: m(3, 0)) && qFuzzyIsNull(f: m(3, 1)); |
2302 | bool noRotate = qFuzzyIsNull(f: m(0, 1)) && qFuzzyIsNull(f: m(1, 0)); |
2303 | bool isRotate90 = qFuzzyIsNull(f: m(0, 0)) && qFuzzyIsNull(f: m(1, 1)); |
2304 | |
2305 | if (isRectangleWithNoPerspective && (noRotate || isRotate90)) { |
2306 | QRectF bbox = clip->clipRect(); |
2307 | qreal invW = 1 / m(3, 3); |
2308 | qreal fx1, fy1, fx2, fy2; |
2309 | if (noRotate) { |
2310 | fx1 = (bbox.left() * m(0, 0) + m(0, 3)) * invW; |
2311 | fy1 = (bbox.bottom() * m(1, 1) + m(1, 3)) * invW; |
2312 | fx2 = (bbox.right() * m(0, 0) + m(0, 3)) * invW; |
2313 | fy2 = (bbox.top() * m(1, 1) + m(1, 3)) * invW; |
2314 | } else { |
2315 | Q_ASSERT(isRotate90); |
2316 | fx1 = (bbox.bottom() * m(0, 1) + m(0, 3)) * invW; |
2317 | fy1 = (bbox.left() * m(1, 0) + m(1, 3)) * invW; |
2318 | fx2 = (bbox.top() * m(0, 1) + m(0, 3)) * invW; |
2319 | fy2 = (bbox.right() * m(1, 0) + m(1, 3)) * invW; |
2320 | } |
2321 | |
2322 | if (fx1 > fx2) |
2323 | qSwap(value1&: fx1, value2&: fx2); |
2324 | if (fy1 > fy2) |
2325 | qSwap(value1&: fy1, value2&: fy2); |
2326 | |
2327 | QRect deviceRect = this->deviceRect(); |
2328 | |
2329 | qint32 ix1 = qRound(d: (fx1 + 1) * deviceRect.width() * qreal(0.5)); |
2330 | qint32 iy1 = qRound(d: (fy1 + 1) * deviceRect.height() * qreal(0.5)); |
2331 | qint32 ix2 = qRound(d: (fx2 + 1) * deviceRect.width() * qreal(0.5)); |
2332 | qint32 iy2 = qRound(d: (fy2 + 1) * deviceRect.height() * qreal(0.5)); |
2333 | |
2334 | if (!(clipType & ClipState::ScissorClip)) { |
2335 | clipType |= ClipState::ScissorClip; |
2336 | scissorRect = QRect(ix1, iy1, ix2 - ix1, iy2 - iy1); |
2337 | } else { |
2338 | scissorRect &= QRect(ix1, iy1, ix2 - ix1, iy2 - iy1); |
2339 | } |
2340 | } else { |
2341 | clipType |= ClipState::StencilClip; |
2342 | |
2343 | const QSGGeometry *g = clip->geometry(); |
2344 | Q_ASSERT(g->attributeCount() > 0); |
2345 | |
2346 | const int vertexByteSize = g->sizeOfVertex() * g->vertexCount(); |
2347 | // the 4 byte alignment may not actually be needed here |
2348 | totalVSize = aligned(v: totalVSize, byteAlign: 4u) + vertexByteSize; |
2349 | if (g->indexCount()) { |
2350 | const int indexByteSize = g->sizeOfIndex() * g->indexCount(); |
2351 | // so no need to worry about NonFourAlignedEffectiveIndexBufferOffset |
2352 | totalISize = aligned(v: totalISize, byteAlign: 4u) + indexByteSize; |
2353 | } |
2354 | // ubuf start offsets must be aligned (typically to 256 bytes) |
2355 | totalUSize = aligned(v: totalUSize, byteAlign: m_ubufAlignment) + StencilClipUbufSize; |
2356 | |
2357 | stencilClipNodes.append(t: clip); |
2358 | } |
2359 | |
2360 | clip = clip->clipList(); |
2361 | } |
2362 | |
2363 | if (clipType & ClipState::StencilClip) { |
2364 | bool rebuildVBuf = false; |
2365 | if (!batch->stencilClipState.vbuf) { |
2366 | batch->stencilClipState.vbuf = m_rhi->newBuffer(type: QRhiBuffer::Dynamic, usage: QRhiBuffer::VertexBuffer, size: totalVSize); |
2367 | rebuildVBuf = true; |
2368 | } else if (batch->stencilClipState.vbuf->size() < totalVSize) { |
2369 | batch->stencilClipState.vbuf->setSize(totalVSize); |
2370 | rebuildVBuf = true; |
2371 | } |
2372 | if (rebuildVBuf) { |
2373 | if (!batch->stencilClipState.vbuf->create()) { |
2374 | qWarning(msg: "Failed to build stencil clip vertex buffer" ); |
2375 | delete batch->stencilClipState.vbuf; |
2376 | batch->stencilClipState.vbuf = nullptr; |
2377 | return; |
2378 | } |
2379 | } |
2380 | |
2381 | if (totalISize) { |
2382 | bool rebuildIBuf = false; |
2383 | if (!batch->stencilClipState.ibuf) { |
2384 | batch->stencilClipState.ibuf = m_rhi->newBuffer(type: QRhiBuffer::Dynamic, usage: QRhiBuffer::IndexBuffer, size: totalISize); |
2385 | rebuildIBuf = true; |
2386 | } else if (batch->stencilClipState.ibuf->size() < totalISize) { |
2387 | batch->stencilClipState.ibuf->setSize(totalISize); |
2388 | rebuildIBuf = true; |
2389 | } |
2390 | if (rebuildIBuf) { |
2391 | if (!batch->stencilClipState.ibuf->create()) { |
2392 | qWarning(msg: "Failed to build stencil clip index buffer" ); |
2393 | delete batch->stencilClipState.ibuf; |
2394 | batch->stencilClipState.ibuf = nullptr; |
2395 | return; |
2396 | } |
2397 | } |
2398 | } |
2399 | |
2400 | bool rebuildUBuf = false; |
2401 | if (!batch->stencilClipState.ubuf) { |
2402 | batch->stencilClipState.ubuf = m_rhi->newBuffer(type: QRhiBuffer::Dynamic, usage: QRhiBuffer::UniformBuffer, size: totalUSize); |
2403 | rebuildUBuf = true; |
2404 | } else if (batch->stencilClipState.ubuf->size() < totalUSize) { |
2405 | batch->stencilClipState.ubuf->setSize(totalUSize); |
2406 | rebuildUBuf = true; |
2407 | } |
2408 | if (rebuildUBuf) { |
2409 | if (!batch->stencilClipState.ubuf->create()) { |
2410 | qWarning(msg: "Failed to build stencil clip uniform buffer" ); |
2411 | delete batch->stencilClipState.ubuf; |
2412 | batch->stencilClipState.ubuf = nullptr; |
2413 | return; |
2414 | } |
2415 | } |
2416 | |
2417 | if (!batch->stencilClipState.srb) { |
2418 | batch->stencilClipState.srb = m_rhi->newShaderResourceBindings(); |
2419 | const QRhiShaderResourceBinding ubufBinding = QRhiShaderResourceBinding::uniformBufferWithDynamicOffset( |
2420 | binding: 0, stage: QRhiShaderResourceBinding::VertexStage, buf: batch->stencilClipState.ubuf, size: StencilClipUbufSize); |
2421 | batch->stencilClipState.srb->setBindings({ ubufBinding }); |
2422 | if (!batch->stencilClipState.srb->create()) { |
2423 | qWarning(msg: "Failed to build stencil clip srb" ); |
2424 | delete batch->stencilClipState.srb; |
2425 | batch->stencilClipState.srb = nullptr; |
2426 | return; |
2427 | } |
2428 | } |
2429 | |
2430 | quint32 vOffset = 0; |
2431 | quint32 iOffset = 0; |
2432 | quint32 uOffset = 0; |
2433 | for (const QSGClipNode *clip : stencilClipNodes) { |
2434 | const QSGGeometry *g = clip->geometry(); |
2435 | const QSGGeometry::Attribute *a = g->attributes(); |
2436 | StencilClipState::StencilDrawCall drawCall; |
2437 | const bool firstStencilClipInBatch = batch->stencilClipState.drawCalls.isEmpty(); |
2438 | |
2439 | if (firstStencilClipInBatch) { |
2440 | m_stencilClipCommon.inputLayout.setBindings({ QRhiVertexInputBinding(g->sizeOfVertex()) }); |
2441 | m_stencilClipCommon.inputLayout.setAttributes({ QRhiVertexInputAttribute(0, 0, qsg_vertexInputFormat(a: *a), 0) }); |
2442 | m_stencilClipCommon.topology = qsg_topology(geomDrawMode: g->drawingMode()); |
2443 | } |
2444 | #ifndef QT_NO_DEBUG |
2445 | else { |
2446 | if (qsg_topology(geomDrawMode: g->drawingMode()) != m_stencilClipCommon.topology) |
2447 | qWarning(msg: "updateClipState: Clip list entries have different primitive topologies, this is not currently supported." ); |
2448 | if (qsg_vertexInputFormat(a: *a) != m_stencilClipCommon.inputLayout.cbeginAttributes()->format()) |
2449 | qWarning(msg: "updateClipState: Clip list entries have different vertex input layouts, this is must not happen." ); |
2450 | } |
2451 | #endif |
2452 | |
2453 | drawCall.vbufOffset = aligned(v: vOffset, byteAlign: 4u); |
2454 | const int vertexByteSize = g->sizeOfVertex() * g->vertexCount(); |
2455 | vOffset = drawCall.vbufOffset + vertexByteSize; |
2456 | |
2457 | int indexByteSize = 0; |
2458 | if (g->indexCount()) { |
2459 | drawCall.ibufOffset = aligned(v: iOffset, byteAlign: 4u); |
2460 | indexByteSize = g->sizeOfIndex() * g->indexCount(); |
2461 | iOffset = drawCall.ibufOffset + indexByteSize; |
2462 | } |
2463 | |
2464 | drawCall.ubufOffset = aligned(v: uOffset, byteAlign: m_ubufAlignment); |
2465 | uOffset = drawCall.ubufOffset + StencilClipUbufSize; |
2466 | |
2467 | QMatrix4x4 matrixYUpNDC = m_current_projection_matrix; |
2468 | if (clip->matrix()) |
2469 | matrixYUpNDC *= *clip->matrix(); |
2470 | |
2471 | m_resourceUpdates->updateDynamicBuffer(buf: batch->stencilClipState.ubuf, offset: drawCall.ubufOffset, size: 64, data: matrixYUpNDC.constData()); |
2472 | m_resourceUpdates->updateDynamicBuffer(buf: batch->stencilClipState.vbuf, offset: drawCall.vbufOffset, size: vertexByteSize, data: g->vertexData()); |
2473 | if (indexByteSize) |
2474 | m_resourceUpdates->updateDynamicBuffer(buf: batch->stencilClipState.ibuf, offset: drawCall.ibufOffset, size: indexByteSize, data: g->indexData()); |
2475 | |
2476 | // stencil ref goes 1, 1, 2, 3, 4, ..., N for the clips in the first batch, |
2477 | // then N+1, N+1, N+2, N+3, ... for the next batch, |
2478 | // and so on. |
2479 | // Note the different stencilOp for the first and the subsequent clips. |
2480 | drawCall.stencilRef = firstStencilClipInBatch ? m_currentClipState.stencilRef + 1 : m_currentClipState.stencilRef; |
2481 | m_currentClipState.stencilRef += 1; |
2482 | |
2483 | drawCall.vertexCount = g->vertexCount(); |
2484 | drawCall.indexCount = g->indexCount(); |
2485 | drawCall.indexFormat = qsg_indexFormat(geometry: g); |
2486 | batch->stencilClipState.drawCalls.add(t: drawCall); |
2487 | } |
2488 | |
2489 | if (!m_stencilClipCommon.vs.isValid()) |
2490 | m_stencilClipCommon.vs = QSGMaterialShaderPrivate::loadShader(filename: QLatin1String(":/qt-project.org/scenegraph/shaders_ng/stencilclip.vert.qsb" )); |
2491 | |
2492 | if (!m_stencilClipCommon.fs.isValid()) |
2493 | m_stencilClipCommon.fs = QSGMaterialShaderPrivate::loadShader(filename: QLatin1String(":/qt-project.org/scenegraph/shaders_ng/stencilclip.frag.qsb" )); |
2494 | |
2495 | if (!m_stencilClipCommon.replacePs) |
2496 | m_stencilClipCommon.replacePs = buildStencilPipeline(batch, firstStencilClipInBatch: true); |
2497 | |
2498 | if (!m_stencilClipCommon.incrPs) |
2499 | m_stencilClipCommon.incrPs = buildStencilPipeline(batch, firstStencilClipInBatch: false); |
2500 | |
2501 | batch->stencilClipState.updateStencilBuffer = true; |
2502 | } |
2503 | |
2504 | m_currentClipState.clipList = clipList; |
2505 | m_currentClipState.type = clipType; |
2506 | m_currentClipState.scissor = QRhiScissor(scissorRect.x(), scissorRect.y(), |
2507 | scissorRect.width(), scissorRect.height()); |
2508 | |
2509 | applyClipStateToGraphicsState(); |
2510 | batch->clipState = m_currentClipState; |
2511 | } |
2512 | |
2513 | void Renderer::enqueueStencilDraw(const Batch *batch) |
2514 | { |
2515 | // cliptype stencil + updateStencilBuffer==false means the batch uses |
2516 | // stenciling but relies on the stencil data generated by a previous batch |
2517 | // (due to the having the same clip node). Do not enqueue draw calls for |
2518 | // stencil in this case as the stencil buffer is already up-to-date. |
2519 | if (!batch->stencilClipState.updateStencilBuffer) |
2520 | return; |
2521 | |
2522 | QRhiCommandBuffer *cb = renderTarget().cb; |
2523 | const int count = batch->stencilClipState.drawCalls.size(); |
2524 | for (int i = 0; i < count; ++i) { |
2525 | const StencilClipState::StencilDrawCall &drawCall(batch->stencilClipState.drawCalls.at(i)); |
2526 | QRhiShaderResourceBindings *srb = batch->stencilClipState.srb; |
2527 | QRhiCommandBuffer::DynamicOffset ubufOffset(0, drawCall.ubufOffset); |
2528 | if (i == 0) { |
2529 | cb->setGraphicsPipeline(m_stencilClipCommon.replacePs); |
2530 | cb->setViewport(m_pstate.viewport); |
2531 | } else if (i == 1) { |
2532 | cb->setGraphicsPipeline(m_stencilClipCommon.incrPs); |
2533 | cb->setViewport(m_pstate.viewport); |
2534 | } |
2535 | // else incrPs is already bound |
2536 | cb->setShaderResources(srb, dynamicOffsetCount: 1, dynamicOffsets: &ubufOffset); |
2537 | cb->setStencilRef(drawCall.stencilRef); |
2538 | const QRhiCommandBuffer::VertexInput vbufBinding(batch->stencilClipState.vbuf, drawCall.vbufOffset); |
2539 | if (drawCall.indexCount) { |
2540 | cb->setVertexInput(startBinding: 0, bindingCount: 1, bindings: &vbufBinding, |
2541 | indexBuf: batch->stencilClipState.ibuf, indexOffset: drawCall.ibufOffset, indexFormat: drawCall.indexFormat); |
2542 | cb->drawIndexed(indexCount: drawCall.indexCount); |
2543 | } else { |
2544 | cb->setVertexInput(startBinding: 0, bindingCount: 1, bindings: &vbufBinding); |
2545 | cb->draw(vertexCount: drawCall.vertexCount); |
2546 | } |
2547 | } |
2548 | } |
2549 | |
2550 | void Renderer::setActiveRhiShader(QSGMaterialShader *program, ShaderManager::Shader *shader) |
2551 | { |
2552 | Q_ASSERT(m_rhi); |
2553 | m_currentProgram = program; |
2554 | m_currentShader = shader; |
2555 | m_currentMaterial = nullptr; |
2556 | } |
2557 | |
2558 | static inline bool needsBlendConstant(QRhiGraphicsPipeline::BlendFactor f) |
2559 | { |
2560 | return f == QRhiGraphicsPipeline::ConstantColor |
2561 | || f == QRhiGraphicsPipeline::OneMinusConstantColor |
2562 | || f == QRhiGraphicsPipeline::ConstantAlpha |
2563 | || f == QRhiGraphicsPipeline::OneMinusConstantAlpha; |
2564 | } |
2565 | |
2566 | // With QRhi renderBatches() is split to two steps: prepare and render. |
2567 | // |
2568 | // Prepare goes through the batches and elements, and set up a graphics |
2569 | // pipeline, srb, uniform buffer, calculates clipping, based on m_gstate, the |
2570 | // material (shaders), and the batches. This step does not touch the command |
2571 | // buffer or renderpass-related state (m_pstate). |
2572 | // |
2573 | // The render step then starts a renderpass, and goes through all |
2574 | // batches/elements again and records setGraphicsPipeline, drawIndexed, etc. on |
2575 | // the command buffer. The prepare step's accumulated global state like |
2576 | // m_gstate must not be used here. Rather, all data needed for rendering is |
2577 | // available from Batch/Element at this stage. Bookkeeping of state in the |
2578 | // renderpass is done via m_pstate. |
2579 | |
2580 | bool Renderer::ensurePipelineState(Element *e, const ShaderManager::Shader *sms, bool depthPostPass) |
2581 | { |
2582 | // Note the key's == and qHash implementations: the renderpass descriptor |
2583 | // and srb are tested for compatibility, not pointer equality. |
2584 | // |
2585 | // We do not store the srb pointer itself because the ownership stays with |
2586 | // the Element and that can go away more often that we would like it |
2587 | // to. (think scrolling a list view, constantly dropping and creating new |
2588 | // nodes) Rather, use an opaque blob of a few uints and store and compare |
2589 | // that. This works because once the pipeline is built, we will always call |
2590 | // setShaderResources with an explicitly specified srb which is fine even if |
2591 | // e->srb we used here to bake the pipeline is already gone by that point. |
2592 | // |
2593 | // A typical QSGMaterial's serialized srb layout is 8 uints. (uniform buffer |
2594 | // + texture, 4 fields each) Regardless, using an implicitly shared |
2595 | // container is essential here. (won't detach so no more allocs and copies |
2596 | // are done, unless the Element decides to rebake the srb with a different |
2597 | // layout - but then the detach is exactly what we need) |
2598 | // |
2599 | // Same story for the renderpass descriptor: the object can go away but |
2600 | // that's fine because that has no effect on an already built pipeline, and |
2601 | // for comparison we only rely on the serialized blob in order decide if the |
2602 | // render target is compatible with the pipeline. |
2603 | |
2604 | const GraphicsPipelineStateKey k = GraphicsPipelineStateKey::create(state: m_gstate, sms, rpDesc: renderTarget().rpDesc, srb: e->srb); |
2605 | |
2606 | // Note: dynamic state (viewport rect, scissor rect, stencil ref, blend |
2607 | // constant) is never a part of GraphicsState/QRhiGraphicsPipeline. |
2608 | |
2609 | // See if there is an existing, matching pipeline state object. |
2610 | auto it = m_shaderManager->pipelineCache.constFind(key: k); |
2611 | if (it != m_shaderManager->pipelineCache.constEnd()) { |
2612 | if (depthPostPass) |
2613 | e->depthPostPassPs = *it; |
2614 | else |
2615 | e->ps = *it; |
2616 | return true; |
2617 | } |
2618 | |
2619 | // Build a new one. This is potentially expensive. |
2620 | QRhiGraphicsPipeline *ps = m_rhi->newGraphicsPipeline(); |
2621 | ps->setShaderStages(first: sms->stages.cbegin(), last: sms->stages.cend()); |
2622 | ps->setVertexInputLayout(sms->inputLayout); |
2623 | ps->setShaderResourceBindings(e->srb); |
2624 | ps->setRenderPassDescriptor(renderTarget().rpDesc); |
2625 | |
2626 | QRhiGraphicsPipeline::Flags flags; |
2627 | if (needsBlendConstant(f: m_gstate.srcColor) || needsBlendConstant(f: m_gstate.dstColor) |
2628 | || needsBlendConstant(f: m_gstate.srcAlpha) || needsBlendConstant(f: m_gstate.dstAlpha)) |
2629 | { |
2630 | flags |= QRhiGraphicsPipeline::UsesBlendConstants; |
2631 | } |
2632 | if (m_gstate.usesScissor) |
2633 | flags |= QRhiGraphicsPipeline::UsesScissor; |
2634 | if (m_gstate.stencilTest) |
2635 | flags |= QRhiGraphicsPipeline::UsesStencilRef; |
2636 | |
2637 | ps->setFlags(flags); |
2638 | ps->setTopology(qsg_topology(geomDrawMode: m_gstate.drawMode)); |
2639 | ps->setCullMode(m_gstate.cullMode); |
2640 | ps->setPolygonMode(m_gstate.polygonMode); |
2641 | |
2642 | QRhiGraphicsPipeline::TargetBlend blend; |
2643 | blend.colorWrite = m_gstate.colorWrite; |
2644 | blend.enable = m_gstate.blending; |
2645 | blend.srcColor = m_gstate.srcColor; |
2646 | blend.dstColor = m_gstate.dstColor; |
2647 | blend.srcAlpha = m_gstate.srcAlpha; |
2648 | blend.dstAlpha = m_gstate.dstAlpha; |
2649 | ps->setTargetBlends({ blend }); |
2650 | |
2651 | ps->setDepthTest(m_gstate.depthTest); |
2652 | ps->setDepthWrite(m_gstate.depthWrite); |
2653 | ps->setDepthOp(m_gstate.depthFunc); |
2654 | |
2655 | if (m_gstate.stencilTest) { |
2656 | ps->setStencilTest(true); |
2657 | QRhiGraphicsPipeline::StencilOpState stencilOp; |
2658 | stencilOp.compareOp = QRhiGraphicsPipeline::Equal; |
2659 | stencilOp.failOp = QRhiGraphicsPipeline::Keep; |
2660 | stencilOp.depthFailOp = QRhiGraphicsPipeline::Keep; |
2661 | stencilOp.passOp = QRhiGraphicsPipeline::Keep; |
2662 | ps->setStencilFront(stencilOp); |
2663 | ps->setStencilBack(stencilOp); |
2664 | } |
2665 | |
2666 | ps->setSampleCount(m_gstate.sampleCount); |
2667 | |
2668 | ps->setLineWidth(m_gstate.lineWidth); |
2669 | |
2670 | if (!ps->create()) { |
2671 | qWarning(msg: "Failed to build graphics pipeline state" ); |
2672 | delete ps; |
2673 | return false; |
2674 | } |
2675 | |
2676 | m_shaderManager->pipelineCache.insert(key: k, value: ps); |
2677 | if (depthPostPass) |
2678 | e->depthPostPassPs = ps; |
2679 | else |
2680 | e->ps = ps; |
2681 | return true; |
2682 | } |
2683 | |
2684 | static QRhiSampler *newSampler(QRhi *rhi, const QSGSamplerDescription &desc) |
2685 | { |
2686 | QRhiSampler::Filter magFilter; |
2687 | QRhiSampler::Filter minFilter; |
2688 | QRhiSampler::Filter mipmapMode; |
2689 | QRhiSampler::AddressMode u; |
2690 | QRhiSampler::AddressMode v; |
2691 | |
2692 | switch (desc.filtering) { |
2693 | case QSGTexture::None: |
2694 | Q_FALLTHROUGH(); |
2695 | case QSGTexture::Nearest: |
2696 | magFilter = minFilter = QRhiSampler::Nearest; |
2697 | break; |
2698 | case QSGTexture::Linear: |
2699 | magFilter = minFilter = QRhiSampler::Linear; |
2700 | break; |
2701 | default: |
2702 | Q_UNREACHABLE(); |
2703 | magFilter = minFilter = QRhiSampler::Nearest; |
2704 | break; |
2705 | } |
2706 | |
2707 | switch (desc.mipmapFiltering) { |
2708 | case QSGTexture::None: |
2709 | mipmapMode = QRhiSampler::None; |
2710 | break; |
2711 | case QSGTexture::Nearest: |
2712 | mipmapMode = QRhiSampler::Nearest; |
2713 | break; |
2714 | case QSGTexture::Linear: |
2715 | mipmapMode = QRhiSampler::Linear; |
2716 | break; |
2717 | default: |
2718 | Q_UNREACHABLE(); |
2719 | mipmapMode = QRhiSampler::None; |
2720 | break; |
2721 | } |
2722 | |
2723 | switch (desc.horizontalWrap) { |
2724 | case QSGTexture::Repeat: |
2725 | u = QRhiSampler::Repeat; |
2726 | break; |
2727 | case QSGTexture::ClampToEdge: |
2728 | u = QRhiSampler::ClampToEdge; |
2729 | break; |
2730 | case QSGTexture::MirroredRepeat: |
2731 | u = QRhiSampler::Mirror; |
2732 | break; |
2733 | default: |
2734 | Q_UNREACHABLE(); |
2735 | u = QRhiSampler::ClampToEdge; |
2736 | break; |
2737 | } |
2738 | |
2739 | switch (desc.verticalWrap) { |
2740 | case QSGTexture::Repeat: |
2741 | v = QRhiSampler::Repeat; |
2742 | break; |
2743 | case QSGTexture::ClampToEdge: |
2744 | v = QRhiSampler::ClampToEdge; |
2745 | break; |
2746 | case QSGTexture::MirroredRepeat: |
2747 | v = QRhiSampler::Mirror; |
2748 | break; |
2749 | default: |
2750 | Q_UNREACHABLE(); |
2751 | v = QRhiSampler::ClampToEdge; |
2752 | break; |
2753 | } |
2754 | |
2755 | return rhi->newSampler(magFilter, minFilter, mipmapMode, addressU: u, addressV: v); |
2756 | } |
2757 | |
2758 | QRhiTexture *Renderer::dummyTexture() |
2759 | { |
2760 | if (!m_dummyTexture) { |
2761 | m_dummyTexture = m_rhi->newTexture(format: QRhiTexture::RGBA8, pixelSize: QSize(64, 64)); |
2762 | if (m_dummyTexture->create()) { |
2763 | if (m_resourceUpdates) { |
2764 | QImage img(m_dummyTexture->pixelSize(), QImage::Format_RGBA8888_Premultiplied); |
2765 | img.fill(pixel: 0); |
2766 | m_resourceUpdates->uploadTexture(tex: m_dummyTexture, image: img); |
2767 | } |
2768 | } |
2769 | } |
2770 | return m_dummyTexture; |
2771 | } |
2772 | |
2773 | static void rendererToMaterialGraphicsState(QSGMaterialShader::GraphicsPipelineState *dst, |
2774 | GraphicsState *src) |
2775 | { |
2776 | dst->blendEnable = src->blending; |
2777 | |
2778 | // the enum values should match, sanity check it |
2779 | Q_ASSERT(int(QSGMaterialShader::GraphicsPipelineState::OneMinusSrc1Alpha) == int(QRhiGraphicsPipeline::OneMinusSrc1Alpha)); |
2780 | Q_ASSERT(int(QSGMaterialShader::GraphicsPipelineState::A) == int(QRhiGraphicsPipeline::A)); |
2781 | Q_ASSERT(int(QSGMaterialShader::GraphicsPipelineState::CullBack) == int(QRhiGraphicsPipeline::Back)); |
2782 | Q_ASSERT(int(QSGMaterialShader::GraphicsPipelineState::Line) == int(QRhiGraphicsPipeline::Line)); |
2783 | dst->srcColor = QSGMaterialShader::GraphicsPipelineState::BlendFactor(src->srcColor); |
2784 | dst->dstColor = QSGMaterialShader::GraphicsPipelineState::BlendFactor(src->dstColor); |
2785 | |
2786 | // For compatibility with any existing code, separateBlendFactors defaults |
2787 | // to _false_ which means that materials that do not touch srcAlpha and |
2788 | // dstAlpha will continue to use srcColor and dstColor as the alpha |
2789 | // blending factors. New code that needs different values for color/alpha, |
2790 | // can explicitly set separateBlendFactors to true and then set srcAlpha |
2791 | // and dstAlpha as well. |
2792 | dst->separateBlendFactors = false; |
2793 | |
2794 | dst->srcAlpha = QSGMaterialShader::GraphicsPipelineState::BlendFactor(src->srcAlpha); |
2795 | dst->dstAlpha = QSGMaterialShader::GraphicsPipelineState::BlendFactor(src->dstAlpha); |
2796 | |
2797 | dst->colorWrite = QSGMaterialShader::GraphicsPipelineState::ColorMask(int(src->colorWrite)); |
2798 | |
2799 | dst->cullMode = QSGMaterialShader::GraphicsPipelineState::CullMode(src->cullMode); |
2800 | dst->polygonMode = QSGMaterialShader::GraphicsPipelineState::PolygonMode(src->polygonMode); |
2801 | } |
2802 | |
2803 | static void materialToRendererGraphicsState(GraphicsState *dst, |
2804 | QSGMaterialShader::GraphicsPipelineState *src) |
2805 | { |
2806 | dst->blending = src->blendEnable; |
2807 | dst->srcColor = QRhiGraphicsPipeline::BlendFactor(src->srcColor); |
2808 | dst->dstColor = QRhiGraphicsPipeline::BlendFactor(src->dstColor); |
2809 | if (src->separateBlendFactors) { |
2810 | dst->srcAlpha = QRhiGraphicsPipeline::BlendFactor(src->srcAlpha); |
2811 | dst->dstAlpha = QRhiGraphicsPipeline::BlendFactor(src->dstAlpha); |
2812 | } else { |
2813 | dst->srcAlpha = dst->srcColor; |
2814 | dst->dstAlpha = dst->dstColor; |
2815 | } |
2816 | dst->colorWrite = QRhiGraphicsPipeline::ColorMask(int(src->colorWrite)); |
2817 | dst->cullMode = QRhiGraphicsPipeline::CullMode(src->cullMode); |
2818 | dst->polygonMode = QRhiGraphicsPipeline::PolygonMode(src->polygonMode); |
2819 | } |
2820 | |
2821 | void Renderer::updateMaterialDynamicData(ShaderManager::Shader *sms, |
2822 | QSGMaterialShader::RenderState &renderState, |
2823 | QSGMaterial *material, |
2824 | const Batch *batch, |
2825 | Element *e, |
2826 | int ubufOffset, |
2827 | int ubufRegionSize) |
2828 | { |
2829 | m_current_resource_update_batch = m_resourceUpdates; |
2830 | |
2831 | QSGMaterialShader *shader = sms->materialShader; |
2832 | QSGMaterialShaderPrivate *pd = QSGMaterialShaderPrivate::get(s: shader); |
2833 | QVarLengthArray<QRhiShaderResourceBinding, 8> bindings; |
2834 | |
2835 | if (pd->ubufBinding >= 0) { |
2836 | m_current_uniform_data = &pd->masterUniformData; |
2837 | const bool changed = shader->updateUniformData(state&: renderState, newMaterial: material, oldMaterial: m_currentMaterial); |
2838 | m_current_uniform_data = nullptr; |
2839 | |
2840 | if (changed || !batch->ubufDataValid) |
2841 | m_resourceUpdates->updateDynamicBuffer(buf: batch->ubuf, offset: ubufOffset, size: ubufRegionSize, data: pd->masterUniformData.constData()); |
2842 | |
2843 | bindings.append(t: QRhiShaderResourceBinding::uniformBuffer(binding: pd->ubufBinding, |
2844 | stage: pd->ubufStages, |
2845 | buf: batch->ubuf, |
2846 | offset: ubufOffset, |
2847 | size: ubufRegionSize)); |
2848 | } |
2849 | |
2850 | for (int binding = 0; binding < QSGMaterialShaderPrivate::MAX_SHADER_RESOURCE_BINDINGS; ++binding) { |
2851 | const QRhiShaderResourceBinding::StageFlags stages = pd->combinedImageSamplerBindings[binding]; |
2852 | if (!stages) |
2853 | continue; |
2854 | |
2855 | QVarLengthArray<QSGTexture *, 4> prevTex = pd->textureBindingTable[binding]; |
2856 | QVarLengthArray<QSGTexture *, 4> nextTex = prevTex; |
2857 | |
2858 | const int count = pd->combinedImageSamplerCount[binding]; |
2859 | nextTex.resize(sz: count); |
2860 | |
2861 | shader->updateSampledImage(state&: renderState, binding, texture: nextTex.data(), newMaterial: material, |
2862 | oldMaterial: m_currentMaterial); |
2863 | |
2864 | if (nextTex.contains(t: nullptr)) { |
2865 | qWarning(msg: "No QSGTexture provided from updateSampledImage(). This is wrong." ); |
2866 | continue; |
2867 | } |
2868 | |
2869 | bool hasDirtySamplerOptions = false; |
2870 | bool isAnisotropic = false; |
2871 | for (QSGTexture *t : nextTex) { |
2872 | QSGTexturePrivate *td = QSGTexturePrivate::get(t); |
2873 | hasDirtySamplerOptions |= td->hasDirtySamplerOptions(); |
2874 | isAnisotropic |= t->anisotropyLevel() != QSGTexture::AnisotropyNone; |
2875 | td->resetDirtySamplerOptions(); |
2876 | } |
2877 | |
2878 | // prevTex may be invalid at this point, avoid dereferencing it |
2879 | if (nextTex != prevTex || hasDirtySamplerOptions) { |
2880 | |
2881 | // The QSGTexture, and so the sampler parameters, may have changed. |
2882 | // The rhiTexture is not relevant here. |
2883 | pd->textureBindingTable[binding] = nextTex; // does not own |
2884 | pd->samplerBindingTable[binding].clear(); |
2885 | |
2886 | if (isAnisotropic) // ### |
2887 | qWarning(msg: "QSGTexture anisotropy levels are not currently supported" ); |
2888 | |
2889 | QVarLengthArray<QRhiSampler *, 4> samplers; |
2890 | |
2891 | for (QSGTexture *t : nextTex) { |
2892 | const QSGSamplerDescription samplerDesc = QSGSamplerDescription::fromTexture(t); |
2893 | |
2894 | QRhiSampler *sampler = m_samplers[samplerDesc]; |
2895 | |
2896 | if (!sampler) { |
2897 | sampler = newSampler(rhi: m_rhi, desc: samplerDesc); |
2898 | if (!sampler->create()) { |
2899 | qWarning(msg: "Failed to build sampler" ); |
2900 | delete sampler; |
2901 | continue; |
2902 | } |
2903 | m_samplers[samplerDesc] = sampler; |
2904 | } |
2905 | samplers.append(t: sampler); |
2906 | } |
2907 | |
2908 | pd->samplerBindingTable[binding] = samplers; // does not own |
2909 | } |
2910 | |
2911 | if (pd->textureBindingTable[binding].size() == pd->samplerBindingTable[binding].size()) { |
2912 | |
2913 | QVarLengthArray<QRhiShaderResourceBinding::TextureAndSampler, 4> textureSamplers; |
2914 | |
2915 | for (int i = 0; i < pd->textureBindingTable[binding].size(); ++i) { |
2916 | |
2917 | QRhiTexture *texture = pd->textureBindingTable[binding].at(idx: i)->rhiTexture(); |
2918 | |
2919 | // texture may be null if the update above failed for any reason, |
2920 | // or if the QSGTexture chose to return null intentionally. This is |
2921 | // valid and we still need to provide something to the shader. |
2922 | if (!texture) |
2923 | texture = dummyTexture(); |
2924 | |
2925 | QRhiSampler *sampler = pd->samplerBindingTable[binding].at(idx: i); |
2926 | |
2927 | textureSamplers.append( |
2928 | t: QRhiShaderResourceBinding::TextureAndSampler { .tex: texture, .sampler: sampler }); |
2929 | } |
2930 | |
2931 | if (!textureSamplers.isEmpty()) |
2932 | bindings.append(t: QRhiShaderResourceBinding::sampledTextures( |
2933 | binding, stage: stages, count, texSamplers: textureSamplers.constData())); |
2934 | } |
2935 | } |
2936 | |
2937 | #ifndef QT_NO_DEBUG |
2938 | if (bindings.isEmpty()) |
2939 | qWarning(msg: "No shader resources for material %p, this is odd." , material); |
2940 | #endif |
2941 | |
2942 | enum class SrbAction { |
2943 | Unknown, |
2944 | DoNothing, |
2945 | UpdateResources, |
2946 | Rebake |
2947 | } srbAction = SrbAction::Unknown; |
2948 | |
2949 | // First, if the Element has no srb created at all, then try to find an existing, |
2950 | // currently unused srb that is layout-compatible with our binding list. |
2951 | if (!e->srb) { |
2952 | // reuse a QVector as our work area, thus possibly reusing the underlying allocation too |
2953 | QVector<quint32> &layoutDesc(m_shaderManager->srbLayoutDescSerializeWorkspace); |
2954 | layoutDesc.clear(); |
2955 | QRhiShaderResourceBinding::serializeLayoutDescription(first: bindings.cbegin(), last: bindings.cend(), dst: std::back_inserter(x&: layoutDesc)); |
2956 | e->srb = m_shaderManager->srbPool.take(key: layoutDesc); |
2957 | if (e->srb) { |
2958 | // Here we know layout compatibility is satisfied, but do not spend time on full |
2959 | // comparison. The chance of getting an srb that refers to the same resources |
2960 | // (buffer, textures) is low in practice. So reuse, but write new resources. |
2961 | srbAction = SrbAction::UpdateResources; |
2962 | } |
2963 | } |
2964 | |
2965 | // If the Element had an existing srb, investigate: |
2966 | // - It may be used as-is (when nothing changed in the scene regarding this node compared to the previous frame). |
2967 | // - Otherwise it may be able to go with a lightweight update (replace resources, binding list layout is the same). |
2968 | // - If all else fails rebake the full thing, meaning we reuse the memory allocation but will recreate everything underneath. |
2969 | if (srbAction == SrbAction::Unknown && e->srb) { |
2970 | if (std::equal(first1: e->srb->cbeginBindings(), last1: e->srb->cendBindings(), first2: bindings.cbegin(), last2: bindings.cend())) { |
2971 | srbAction = SrbAction::DoNothing; |
2972 | } else if (std::equal(first1: e->srb->cbeginBindings(), last1: e->srb->cendBindings(), first2: bindings.cbegin(), last2: bindings.cend(), |
2973 | binary_pred: [](const auto &a, const auto &b) { return a.isLayoutCompatible(b); })) |
2974 | { |
2975 | srbAction = SrbAction::UpdateResources; |
2976 | } else { |
2977 | srbAction = SrbAction::Rebake; |
2978 | } |
2979 | } |
2980 | |
2981 | // If the Element had no srb associated at all and could not find a layout-compatible |
2982 | // one from the pool, then create a whole new object. |
2983 | if (!e->srb) { |
2984 | e->srb = m_rhi->newShaderResourceBindings(); |
2985 | srbAction = SrbAction::Rebake; |
2986 | } |
2987 | |
2988 | Q_ASSERT(srbAction != SrbAction::Unknown && e->srb); |
2989 | |
2990 | switch (srbAction) { |
2991 | case SrbAction::DoNothing: |
2992 | break; |
2993 | case SrbAction::UpdateResources: |
2994 | { |
2995 | e->srb->setBindings(first: bindings.cbegin(), last: bindings.cend()); |
2996 | QRhiShaderResourceBindings::UpdateFlags flags; |
2997 | // Due to the way the binding list is built up above, if we have a uniform buffer |
2998 | // at binding point 0 (or none at all) then the sampledTexture bindings are added |
2999 | // with increasing binding points afterwards, so the list is already sorted based |
3000 | // on the binding points, thus we can save some time by telling the QRhi backend |
3001 | // not to sort again. |
3002 | if (pd->ubufBinding <= 0 || bindings.size() <= 1) |
3003 | flags |= QRhiShaderResourceBindings::BindingsAreSorted; |
3004 | |
3005 | e->srb->updateResources(flags); |
3006 | } |
3007 | break; |
3008 | case SrbAction::Rebake: |
3009 | e->srb->setBindings(first: bindings.cbegin(), last: bindings.cend()); |
3010 | if (!e->srb->create()) |
3011 | qWarning(msg: "Failed to build srb" ); |
3012 | break; |
3013 | default: |
3014 | Q_ASSERT_X(false, "updateMaterialDynamicData" , "No srb action set, this cannot happen" ); |
3015 | } |
3016 | } |
3017 | |
3018 | void Renderer::updateMaterialStaticData(ShaderManager::Shader *sms, |
3019 | QSGMaterialShader::RenderState &renderState, |
3020 | QSGMaterial *material, |
3021 | Batch *batch, |
3022 | bool *gstateChanged) |
3023 | { |
3024 | QSGMaterialShader *shader = sms->materialShader; |
3025 | *gstateChanged = false; |
3026 | if (shader->flags().testFlag(flag: QSGMaterialShader::UpdatesGraphicsPipelineState)) { |
3027 | // generate the public mini-state from m_gstate, invoke the material, |
3028 | // write the changes, if any, back to m_gstate, together with a way to |
3029 | // roll those back. |
3030 | QSGMaterialShader::GraphicsPipelineState shaderPs; |
3031 | rendererToMaterialGraphicsState(dst: &shaderPs, src: &m_gstate); |
3032 | const bool changed = shader->updateGraphicsPipelineState(state&: renderState, ps: &shaderPs, newMaterial: material, oldMaterial: m_currentMaterial); |
3033 | if (changed) { |
3034 | m_gstateStack.push(t: m_gstate); |
3035 | materialToRendererGraphicsState(dst: &m_gstate, src: &shaderPs); |
3036 | if (needsBlendConstant(f: m_gstate.srcColor) || needsBlendConstant(f: m_gstate.dstColor) |
3037 | || needsBlendConstant(f: m_gstate.srcAlpha) || needsBlendConstant(f: m_gstate.dstAlpha)) |
3038 | { |
3039 | batch->blendConstant = shaderPs.blendConstant; |
3040 | } |
3041 | *gstateChanged = true; |
3042 | } |
3043 | } |
3044 | } |
3045 | |
3046 | bool Renderer::prepareRenderMergedBatch(Batch *batch, PreparedRenderBatch *renderBatch) |
3047 | { |
3048 | if (batch->vertexCount == 0 || batch->indexCount == 0) |
3049 | return false; |
3050 | |
3051 | Element *e = batch->first; |
3052 | Q_ASSERT(e); |
3053 | |
3054 | #ifndef QT_NO_DEBUG_OUTPUT |
3055 | if (Q_UNLIKELY(debug_render())) { |
3056 | QDebug debug = qDebug(); |
3057 | debug << " -" |
3058 | << batch |
3059 | << (batch->uploadedThisFrame ? "[ upload]" : "[retained]" ) |
3060 | << (e->node->clipList() ? "[ clip]" : "[noclip]" ) |
3061 | << (batch->isOpaque ? "[opaque]" : "[ alpha]" ) |
3062 | << "[ merged]" |
3063 | << " Nodes:" << QString::fromLatin1(ba: "%1" ).arg(a: qsg_countNodesInBatch(batch), fieldWidth: 4).toLatin1().constData() |
3064 | << " Vertices:" << QString::fromLatin1(ba: "%1" ).arg(a: batch->vertexCount, fieldWidth: 5).toLatin1().constData() |
3065 | << " Indices:" << QString::fromLatin1(ba: "%1" ).arg(a: batch->indexCount, fieldWidth: 5).toLatin1().constData() |
3066 | << " root:" << batch->root; |
3067 | if (batch->drawSets.size() > 1) |
3068 | debug << "sets:" << batch->drawSets.size(); |
3069 | if (!batch->isOpaque) |
3070 | debug << "opacity:" << e->node->inheritedOpacity(); |
3071 | batch->uploadedThisFrame = false; |
3072 | } |
3073 | #endif |
3074 | |
3075 | QSGGeometryNode *gn = e->node; |
3076 | |
3077 | // We always have dirty matrix as all batches are at a unique z range. |
3078 | QSGMaterialShader::RenderState::DirtyStates dirty = QSGMaterialShader::RenderState::DirtyMatrix; |
3079 | if (batch->root) |
3080 | m_current_model_view_matrix = qsg_matrixForRoot(node: batch->root); |
3081 | else |
3082 | m_current_model_view_matrix.setToIdentity(); |
3083 | m_current_determinant = m_current_model_view_matrix.determinant(); |
3084 | m_current_projection_matrix = projectionMatrix(); |
3085 | m_current_projection_matrix_native_ndc = projectionMatrixWithNativeNDC(); |
3086 | |
3087 | QSGMaterial *material = gn->activeMaterial(); |
3088 | if (m_renderMode != QSGRendererInterface::RenderMode3D) |
3089 | updateClipState(clipList: gn->clipList(), batch); |
3090 | |
3091 | const QSGGeometry *g = gn->geometry(); |
3092 | ShaderManager::Shader *sms = useDepthBuffer() ? m_shaderManager->prepareMaterial(material, geometry: g, renderMode: m_renderMode) |
3093 | : m_shaderManager->prepareMaterialNoRewrite(material, geometry: g, renderMode: m_renderMode); |
3094 | if (!sms) |
3095 | return false; |
3096 | |
3097 | Q_ASSERT(sms->materialShader); |
3098 | if (m_currentShader != sms) |
3099 | setActiveRhiShader(program: sms->materialShader, shader: sms); |
3100 | |
3101 | m_current_opacity = gn->inheritedOpacity(); |
3102 | if (!qFuzzyCompare(p1: sms->lastOpacity, p2: float(m_current_opacity))) { |
3103 | dirty |= QSGMaterialShader::RenderState::DirtyOpacity; |
3104 | sms->lastOpacity = m_current_opacity; |
3105 | } |
3106 | |
3107 | QSGMaterialShaderPrivate *pd = QSGMaterialShaderPrivate::get(s: sms->materialShader); |
3108 | const quint32 ubufSize = quint32(pd->masterUniformData.size()); |
3109 | if (pd->ubufBinding >= 0) { |
3110 | bool ubufRebuild = false; |
3111 | if (!batch->ubuf) { |
3112 | batch->ubuf = m_rhi->newBuffer(type: QRhiBuffer::Dynamic, usage: QRhiBuffer::UniformBuffer, size: ubufSize); |
3113 | ubufRebuild = true; |
3114 | } else { |
3115 | if (batch->ubuf->size() < ubufSize) { |
3116 | batch->ubuf->setSize(ubufSize); |
3117 | ubufRebuild = true; |
3118 | } |
3119 | } |
3120 | if (ubufRebuild) { |
3121 | batch->ubufDataValid = false; |
3122 | if (!batch->ubuf->create()) { |
3123 | qWarning(msg: "Failed to build uniform buffer of size %u bytes" , ubufSize); |
3124 | delete batch->ubuf; |
3125 | batch->ubuf = nullptr; |
3126 | return false; |
3127 | } |
3128 | } |
3129 | } |
3130 | |
3131 | QSGMaterialShader::RenderState renderState = state(dirty: QSGMaterialShader::RenderState::DirtyStates(int(dirty))); |
3132 | |
3133 | bool pendingGStatePop = false; |
3134 | updateMaterialStaticData(sms, renderState, material, batch, gstateChanged: &pendingGStatePop); |
3135 | |
3136 | updateMaterialDynamicData(sms, renderState, material, batch, e, ubufOffset: 0, ubufRegionSize: ubufSize); |
3137 | |
3138 | #ifndef QT_NO_DEBUG |
3139 | if (qsg_test_and_clear_material_failure()) { |
3140 | qDebug(msg: "QSGMaterial::updateState triggered an error (merged), batch will be skipped:" ); |
3141 | Element *ee = e; |
3142 | while (ee) { |
3143 | qDebug() << " -" << ee->node; |
3144 | ee = ee->nextInBatch; |
3145 | } |
3146 | QSGNodeDumper::dump(n: rootNode()); |
3147 | qFatal(msg: "Aborting: scene graph is invalid..." ); |
3148 | } |
3149 | #endif |
3150 | |
3151 | m_gstate.drawMode = QSGGeometry::DrawingMode(g->drawingMode()); |
3152 | m_gstate.lineWidth = g->lineWidth(); |
3153 | |
3154 | const bool hasPipeline = ensurePipelineState(e, sms); |
3155 | |
3156 | if (pendingGStatePop) |
3157 | m_gstate = m_gstateStack.pop(); |
3158 | |
3159 | if (!hasPipeline) |
3160 | return false; |
3161 | |
3162 | if (m_renderMode == QSGRendererInterface::RenderMode3D) { |
3163 | m_gstateStack.push(t: m_gstate); |
3164 | setStateForDepthPostPass(); |
3165 | ensurePipelineState(e, sms, depthPostPass: true); |
3166 | m_gstate = m_gstateStack.pop(); |
3167 | } |
3168 | |
3169 | batch->ubufDataValid = true; |
3170 | |
3171 | m_currentMaterial = material; |
3172 | |
3173 | renderBatch->batch = batch; |
3174 | renderBatch->sms = sms; |
3175 | |
3176 | return true; |
3177 | } |
3178 | |
3179 | void Renderer::checkLineWidth(QSGGeometry *g) |
3180 | { |
3181 | if (g->drawingMode() == QSGGeometry::DrawLines || g->drawingMode() == QSGGeometry::DrawLineLoop |
3182 | || g->drawingMode() == QSGGeometry::DrawLineStrip) |
3183 | { |
3184 | if (g->lineWidth() != 1.0f) { |
3185 | static bool checkedWideLineSupport = false; |
3186 | if (!checkedWideLineSupport) { |
3187 | checkedWideLineSupport = true; |
3188 | if (!m_rhi->isFeatureSupported(feature: QRhi::WideLines)) |
3189 | qWarning(msg: "Line widths other than 1 are not supported by the graphics API" ); |
3190 | } |
3191 | } |
3192 | } else if (g->drawingMode() == QSGGeometry::DrawPoints) { |
3193 | if (g->lineWidth() != 1.0f) { |
3194 | static bool warnedPointSize = false; |
3195 | if (!warnedPointSize) { |
3196 | warnedPointSize = true; |
3197 | qWarning(msg: "Point size is not controllable by QSGGeometry. " |
3198 | "Set gl_PointSize from the vertex shader instead." ); |
3199 | } |
3200 | } |
3201 | } |
3202 | } |
3203 | |
3204 | void Renderer::renderMergedBatch(PreparedRenderBatch *renderBatch, bool depthPostPass) |
3205 | { |
3206 | const Batch *batch = renderBatch->batch; |
3207 | if (!batch->vbo.buf || !batch->ibo.buf) |
3208 | return; |
3209 | |
3210 | Element *e = batch->first; |
3211 | QSGGeometryNode *gn = e->node; |
3212 | QSGGeometry *g = gn->geometry(); |
3213 | checkLineWidth(g); |
3214 | |
3215 | if (batch->clipState.type & ClipState::StencilClip) |
3216 | enqueueStencilDraw(batch); |
3217 | |
3218 | QRhiCommandBuffer *cb = renderTarget().cb; |
3219 | setGraphicsPipeline(cb, batch, e, depthPostPass); |
3220 | |
3221 | for (int i = 0, ie = batch->drawSets.size(); i != ie; ++i) { |
3222 | const DrawSet &draw = batch->drawSets.at(i); |
3223 | const QRhiCommandBuffer::VertexInput vbufBindings[] = { |
3224 | { batch->vbo.buf, quint32(draw.vertices) }, |
3225 | { batch->vbo.buf, quint32(draw.zorders) } |
3226 | }; |
3227 | cb->setVertexInput(startBinding: VERTEX_BUFFER_BINDING, bindingCount: useDepthBuffer() ? 2 : 1, bindings: vbufBindings, |
3228 | indexBuf: batch->ibo.buf, indexOffset: draw.indices, |
3229 | indexFormat: m_uint32IndexForRhi ? QRhiCommandBuffer::IndexUInt32 : QRhiCommandBuffer::IndexUInt16); |
3230 | cb->drawIndexed(indexCount: draw.indexCount); |
3231 | } |
3232 | } |
3233 | |
3234 | bool Renderer::prepareRenderUnmergedBatch(Batch *batch, PreparedRenderBatch *renderBatch) |
3235 | { |
3236 | if (batch->vertexCount == 0) |
3237 | return false; |
3238 | |
3239 | Element *e = batch->first; |
3240 | Q_ASSERT(e); |
3241 | |
3242 | if (Q_UNLIKELY(debug_render())) { |
3243 | qDebug() << " -" |
3244 | << batch |
3245 | << (batch->uploadedThisFrame ? "[ upload]" : "[retained]" ) |
3246 | << (e->node->clipList() ? "[ clip]" : "[noclip]" ) |
3247 | << (batch->isOpaque ? "[opaque]" : "[ alpha]" ) |
3248 | << "[unmerged]" |
3249 | << " Nodes:" << QString::fromLatin1(ba: "%1" ).arg(a: qsg_countNodesInBatch(batch), fieldWidth: 4).toLatin1().constData() |
3250 | << " Vertices:" << QString::fromLatin1(ba: "%1" ).arg(a: batch->vertexCount, fieldWidth: 5).toLatin1().constData() |
3251 | << " Indices:" << QString::fromLatin1(ba: "%1" ).arg(a: batch->indexCount, fieldWidth: 5).toLatin1().constData() |
3252 | << " root:" << batch->root; |
3253 | |
3254 | batch->uploadedThisFrame = false; |
3255 | } |
3256 | |
3257 | m_current_projection_matrix = projectionMatrix(); |
3258 | m_current_projection_matrix_native_ndc = projectionMatrixWithNativeNDC(); |
3259 | |
3260 | QSGGeometryNode *gn = e->node; |
3261 | if (m_renderMode != QSGRendererInterface::RenderMode3D) |
3262 | updateClipState(clipList: gn->clipList(), batch); |
3263 | |
3264 | // We always have dirty matrix as all batches are at a unique z range. |
3265 | QSGMaterialShader::RenderState::DirtyStates dirty = QSGMaterialShader::RenderState::DirtyMatrix; |
3266 | |
3267 | // The vertex attributes are assumed to be the same for all elements in the |
3268 | // unmerged batch since the material (and so the shaders) is the same. |
3269 | QSGGeometry *g = gn->geometry(); |
3270 | QSGMaterial *material = gn->activeMaterial(); |
3271 | ShaderManager::Shader *sms = m_shaderManager->prepareMaterialNoRewrite(material, geometry: g, renderMode: m_renderMode); |
3272 | if (!sms) |
3273 | return false; |
3274 | |
3275 | Q_ASSERT(sms->materialShader); |
3276 | if (m_currentShader != sms) |
3277 | setActiveRhiShader(program: sms->materialShader, shader: sms); |
3278 | |
3279 | m_current_opacity = gn->inheritedOpacity(); |
3280 | if (sms->lastOpacity != m_current_opacity) { |
3281 | dirty |= QSGMaterialShader::RenderState::DirtyOpacity; |
3282 | sms->lastOpacity = m_current_opacity; |
3283 | } |
3284 | |
3285 | QMatrix4x4 rootMatrix = batch->root ? qsg_matrixForRoot(node: batch->root) : QMatrix4x4(); |
3286 | |
3287 | QSGMaterialShaderPrivate *pd = QSGMaterialShaderPrivate::get(s: sms->materialShader); |
3288 | const quint32 ubufSize = quint32(pd->masterUniformData.size()); |
3289 | if (pd->ubufBinding >= 0) { |
3290 | quint32 totalUBufSize = 0; |
3291 | while (e) { |
3292 | totalUBufSize += aligned(v: ubufSize, byteAlign: m_ubufAlignment); |
3293 | e = e->nextInBatch; |
3294 | } |
3295 | bool ubufRebuild = false; |
3296 | if (!batch->ubuf) { |
3297 | batch->ubuf = m_rhi->newBuffer(type: QRhiBuffer::Dynamic, usage: QRhiBuffer::UniformBuffer, size: totalUBufSize); |
3298 | ubufRebuild = true; |
3299 | } else { |
3300 | if (batch->ubuf->size() < totalUBufSize) { |
3301 | batch->ubuf->setSize(totalUBufSize); |
3302 | ubufRebuild = true; |
3303 | } |
3304 | } |
3305 | if (ubufRebuild) { |
3306 | batch->ubufDataValid = false; |
3307 | if (!batch->ubuf->create()) { |
3308 | qWarning(msg: "Failed to build uniform buffer of size %u bytes" , totalUBufSize); |
3309 | delete batch->ubuf; |
3310 | batch->ubuf = nullptr; |
3311 | return false; |
3312 | } |
3313 | } |
3314 | } |
3315 | |
3316 | QSGMaterialShader::RenderState renderState = state(dirty: QSGMaterialShader::RenderState::DirtyStates(int(dirty))); |
3317 | bool pendingGStatePop = false; |
3318 | updateMaterialStaticData(sms, renderState, |
3319 | material, batch, gstateChanged: &pendingGStatePop); |
3320 | |
3321 | int ubufOffset = 0; |
3322 | QRhiGraphicsPipeline *ps = nullptr; |
3323 | QRhiGraphicsPipeline *depthPostPassPs = nullptr; |
3324 | e = batch->first; |
3325 | while (e) { |
3326 | gn = e->node; |
3327 | |
3328 | m_current_model_view_matrix = rootMatrix * *gn->matrix(); |
3329 | m_current_determinant = m_current_model_view_matrix.determinant(); |
3330 | |
3331 | m_current_projection_matrix = projectionMatrix(); |
3332 | m_current_projection_matrix_native_ndc = projectionMatrixWithNativeNDC(); |
3333 | if (useDepthBuffer()) { |
3334 | m_current_projection_matrix(2, 2) = m_zRange; |
3335 | m_current_projection_matrix(2, 3) = calculateElementZOrder(e, zRange: m_zRange); |
3336 | } |
3337 | |
3338 | QSGMaterialShader::RenderState renderState = state(dirty: QSGMaterialShader::RenderState::DirtyStates(int(dirty))); |
3339 | updateMaterialDynamicData(sms, renderState, material, batch, e, ubufOffset, ubufRegionSize: ubufSize); |
3340 | |
3341 | #ifndef QT_NO_DEBUG |
3342 | if (qsg_test_and_clear_material_failure()) { |
3343 | qDebug(msg: "QSGMaterial::updateState() triggered an error (unmerged), batch will be skipped:" ); |
3344 | qDebug() << " - offending node is" << e->node; |
3345 | QSGNodeDumper::dump(n: rootNode()); |
3346 | qFatal(msg: "Aborting: scene graph is invalid..." ); |
3347 | return false; |
3348 | } |
3349 | #endif |
3350 | |
3351 | ubufOffset += aligned(v: ubufSize, byteAlign: m_ubufAlignment); |
3352 | |
3353 | const QSGGeometry::DrawingMode prevDrawMode = m_gstate.drawMode; |
3354 | const float prevLineWidth = m_gstate.lineWidth; |
3355 | m_gstate.drawMode = QSGGeometry::DrawingMode(g->drawingMode()); |
3356 | m_gstate.lineWidth = g->lineWidth(); |
3357 | |
3358 | // Do not bother even looking up the ps if the topology has not changed |
3359 | // since everything else is the same for all elements in the batch. |
3360 | // (except if the material modified blend state) |
3361 | if (!ps || m_gstate.drawMode != prevDrawMode || m_gstate.lineWidth != prevLineWidth || pendingGStatePop) { |
3362 | if (!ensurePipelineState(e, sms)) { |
3363 | if (pendingGStatePop) |
3364 | m_gstate = m_gstateStack.pop(); |
3365 | return false; |
3366 | } |
3367 | ps = e->ps; |
3368 | if (m_renderMode == QSGRendererInterface::RenderMode3D) { |
3369 | m_gstateStack.push(t: m_gstate); |
3370 | setStateForDepthPostPass(); |
3371 | ensurePipelineState(e, sms, depthPostPass: true); |
3372 | m_gstate = m_gstateStack.pop(); |
3373 | depthPostPassPs = e->depthPostPassPs; |
3374 | } |
3375 | } else { |
3376 | e->ps = ps; |
3377 | if (m_renderMode == QSGRendererInterface::RenderMode3D) |
3378 | e->depthPostPassPs = depthPostPassPs; |
3379 | } |
3380 | |
3381 | // We don't need to bother with asking each node for its material as they |
3382 | // are all identical (compare==0) since they are in the same batch. |
3383 | m_currentMaterial = material; |
3384 | |
3385 | // We only need to push this on the very first iteration... |
3386 | dirty &= ~QSGMaterialShader::RenderState::DirtyOpacity; |
3387 | |
3388 | e = e->nextInBatch; |
3389 | } |
3390 | |
3391 | if (pendingGStatePop) |
3392 | m_gstate = m_gstateStack.pop(); |
3393 | |
3394 | batch->ubufDataValid = true; |
3395 | |
3396 | renderBatch->batch = batch; |
3397 | renderBatch->sms = sms; |
3398 | |
3399 | return true; |
3400 | } |
3401 | |
3402 | void Renderer::renderUnmergedBatch(PreparedRenderBatch *renderBatch, bool depthPostPass) |
3403 | { |
3404 | const Batch *batch = renderBatch->batch; |
3405 | if (!batch->vbo.buf) |
3406 | return; |
3407 | |
3408 | Element *e = batch->first; |
3409 | |
3410 | if (batch->clipState.type & ClipState::StencilClip) |
3411 | enqueueStencilDraw(batch); |
3412 | |
3413 | quint32 vOffset = 0; |
3414 | quint32 iOffset = 0; |
3415 | QRhiCommandBuffer *cb = renderTarget().cb; |
3416 | |
3417 | while (e) { |
3418 | QSGGeometry *g = e->node->geometry(); |
3419 | checkLineWidth(g); |
3420 | const int effectiveIndexSize = m_uint32IndexForRhi ? sizeof(quint32) : g->sizeOfIndex(); |
3421 | |
3422 | setGraphicsPipeline(cb, batch, e, depthPostPass); |
3423 | |
3424 | const QRhiCommandBuffer::VertexInput vbufBinding(batch->vbo.buf, vOffset); |
3425 | if (g->indexCount()) { |
3426 | if (batch->ibo.buf) { |
3427 | cb->setVertexInput(startBinding: VERTEX_BUFFER_BINDING, bindingCount: 1, bindings: &vbufBinding, |
3428 | indexBuf: batch->ibo.buf, indexOffset: iOffset, |
3429 | indexFormat: effectiveIndexSize == sizeof(quint32) ? QRhiCommandBuffer::IndexUInt32 |
3430 | : QRhiCommandBuffer::IndexUInt16); |
3431 | cb->drawIndexed(indexCount: g->indexCount()); |
3432 | } |
3433 | } else { |
3434 | cb->setVertexInput(startBinding: VERTEX_BUFFER_BINDING, bindingCount: 1, bindings: &vbufBinding); |
3435 | cb->draw(vertexCount: g->vertexCount()); |
3436 | } |
3437 | |
3438 | vOffset += g->sizeOfVertex() * g->vertexCount(); |
3439 | iOffset += g->indexCount() * effectiveIndexSize; |
3440 | |
3441 | e = e->nextInBatch; |
3442 | } |
3443 | } |
3444 | |
3445 | void Renderer::setGraphicsPipeline(QRhiCommandBuffer *cb, const Batch *batch, Element *e, bool depthPostPass) |
3446 | { |
3447 | cb->setGraphicsPipeline(depthPostPass ? e->depthPostPassPs : e->ps); |
3448 | |
3449 | if (!m_pstate.viewportSet) { |
3450 | m_pstate.viewportSet = true; |
3451 | cb->setViewport(m_pstate.viewport); |
3452 | } |
3453 | if (batch->clipState.type & ClipState::ScissorClip) { |
3454 | Q_ASSERT(e->ps->flags().testFlag(QRhiGraphicsPipeline::UsesScissor)); |
3455 | m_pstate.scissorSet = true; |
3456 | cb->setScissor(batch->clipState.scissor); |
3457 | } else { |
3458 | Q_ASSERT(!e->ps->flags().testFlag(QRhiGraphicsPipeline::UsesScissor)); |
3459 | // Regardless of the ps not using scissor, the scissor may need to be |
3460 | // reset, depending on the backend. So set the viewport again, which in |
3461 | // turn also sets the scissor on backends where a scissor rect is |
3462 | // always-on (Vulkan). |
3463 | if (m_pstate.scissorSet) { |
3464 | m_pstate.scissorSet = false; |
3465 | cb->setViewport(m_pstate.viewport); |
3466 | } |
3467 | } |
3468 | if (batch->clipState.type & ClipState::StencilClip) { |
3469 | Q_ASSERT(e->ps->flags().testFlag(QRhiGraphicsPipeline::UsesStencilRef)); |
3470 | cb->setStencilRef(batch->clipState.stencilRef); |
3471 | } |
3472 | if (!depthPostPass && e->ps->flags().testFlag(flag: QRhiGraphicsPipeline::UsesBlendConstants)) |
3473 | cb->setBlendConstants(batch->blendConstant); |
3474 | |
3475 | cb->setShaderResources(srb: e->srb); |
3476 | } |
3477 | |
3478 | void Renderer::releaseElement(Element *e, bool inDestructor) |
3479 | { |
3480 | if (e->isRenderNode) { |
3481 | delete static_cast<RenderNodeElement *>(e); |
3482 | } else { |
3483 | if (e->srb) { |
3484 | if (!inDestructor) { |
3485 | if (m_shaderManager->srbPool.size() < m_srbPoolThreshold) |
3486 | m_shaderManager->srbPool.insert(key: e->srb->serializedLayoutDescription(), value: e->srb); |
3487 | else |
3488 | delete e->srb; |
3489 | } else { |
3490 | delete e->srb; |
3491 | } |
3492 | e->srb = nullptr; |
3493 | } |
3494 | m_elementAllocator.release(t: e); |
3495 | } |
3496 | } |
3497 | |
3498 | void Renderer::deleteRemovedElements() |
3499 | { |
3500 | if (!m_elementsToDelete.size()) |
3501 | return; |
3502 | |
3503 | for (int i=0; i<m_opaqueRenderList.size(); ++i) { |
3504 | Element **e = m_opaqueRenderList.data() + i; |
3505 | if (*e && (*e)->removed) |
3506 | *e = nullptr; |
3507 | } |
3508 | for (int i=0; i<m_alphaRenderList.size(); ++i) { |
3509 | Element **e = m_alphaRenderList.data() + i; |
3510 | if (*e && (*e)->removed) |
3511 | *e = nullptr; |
3512 | } |
3513 | |
3514 | for (int i=0; i<m_elementsToDelete.size(); ++i) |
3515 | releaseElement(e: m_elementsToDelete.at(i)); |
3516 | |
3517 | m_elementsToDelete.reset(); |
3518 | } |
3519 | |
3520 | void Renderer::render() |
3521 | { |
3522 | // Gracefully handle the lack of a render target - some autotests may rely |
3523 | // on this in odd cases. |
3524 | if (!renderTarget().rt) |
3525 | return; |
3526 | |
3527 | prepareRenderPass(ctx: &m_mainRenderPassContext); |
3528 | beginRenderPass(ctx: &m_mainRenderPassContext); |
3529 | recordRenderPass(ctx: &m_mainRenderPassContext); |
3530 | endRenderPass(ctx: &m_mainRenderPassContext); |
3531 | } |
3532 | |
3533 | // An alternative to render() is to call prepareInline() and renderInline() at |
3534 | // the appropriate times (i.e. outside of a QRhi::beginPass() and then inside, |
3535 | // respectively) These allow rendering within a render pass that is started by |
3536 | // another component. In contrast, render() records a full render pass on its |
3537 | // own. |
3538 | |
3539 | void Renderer::prepareInline() |
3540 | { |
3541 | prepareRenderPass(ctx: &m_mainRenderPassContext); |
3542 | } |
3543 | |
3544 | void Renderer::renderInline() |
3545 | { |
3546 | recordRenderPass(ctx: &m_mainRenderPassContext); |
3547 | } |
3548 | |
3549 | void Renderer::prepareRenderPass(RenderPassContext *ctx) |
3550 | { |
3551 | if (ctx->valid) |
3552 | qWarning(msg: "prepareRenderPass() called with an already prepared render pass context" ); |
3553 | |
3554 | ctx->valid = true; |
3555 | |
3556 | if (Q_UNLIKELY(debug_dump())) { |
3557 | qDebug(msg: "\n" ); |
3558 | QSGNodeDumper::dump(n: rootNode()); |
3559 | } |
3560 | |
3561 | ctx->timeRenderLists = 0; |
3562 | ctx->timePrepareOpaque = 0; |
3563 | ctx->timePrepareAlpha = 0; |
3564 | ctx->timeSorting = 0; |
3565 | ctx->timeUploadOpaque = 0; |
3566 | ctx->timeUploadAlpha = 0; |
3567 | |
3568 | if (Q_UNLIKELY(debug_render() || debug_build())) { |
3569 | QByteArray type("rebuild:" ); |
3570 | if (m_rebuild == 0) |
3571 | type += " none" ; |
3572 | if (m_rebuild == FullRebuild) |
3573 | type += " full" ; |
3574 | else { |
3575 | if (m_rebuild & BuildRenderLists) |
3576 | type += " renderlists" ; |
3577 | else if (m_rebuild & BuildRenderListsForTaggedRoots) |
3578 | type += " partial" ; |
3579 | else if (m_rebuild & BuildBatches) |
3580 | type += " batches" ; |
3581 | } |
3582 | |
3583 | qDebug() << "Renderer::render()" << this << type; |
3584 | ctx->timer.start(); |
3585 | } |
3586 | |
3587 | m_resourceUpdates = m_rhi->nextResourceUpdateBatch(); |
3588 | |
3589 | if (m_rebuild & (BuildRenderLists | BuildRenderListsForTaggedRoots)) { |
3590 | bool complete = (m_rebuild & BuildRenderLists) != 0; |
3591 | if (complete) |
3592 | buildRenderListsFromScratch(); |
3593 | else |
3594 | buildRenderListsForTaggedRoots(); |
3595 | m_rebuild |= BuildBatches; |
3596 | |
3597 | if (Q_UNLIKELY(debug_build())) { |
3598 | qDebug(msg: "Opaque render lists %s:" , (complete ? "(complete)" : "(partial)" )); |
3599 | for (int i=0; i<m_opaqueRenderList.size(); ++i) { |
3600 | Element *e = m_opaqueRenderList.at(i); |
3601 | qDebug() << " - element:" << e << " batch:" << e->batch << " node:" << e->node << " order:" << e->order; |
3602 | } |
3603 | qDebug(msg: "Alpha render list %s:" , complete ? "(complete)" : "(partial)" ); |
3604 | for (int i=0; i<m_alphaRenderList.size(); ++i) { |
3605 | Element *e = m_alphaRenderList.at(i); |
3606 | qDebug() << " - element:" << e << " batch:" << e->batch << " node:" << e->node << " order:" << e->order; |
3607 | } |
3608 | } |
3609 | } |
3610 | if (Q_UNLIKELY(debug_render())) ctx->timeRenderLists = ctx->timer.restart(); |
3611 | |
3612 | for (int i=0; i<m_opaqueBatches.size(); ++i) |
3613 | m_opaqueBatches.at(i)->cleanupRemovedElements(); |
3614 | for (int i=0; i<m_alphaBatches.size(); ++i) |
3615 | m_alphaBatches.at(i)->cleanupRemovedElements(); |
3616 | deleteRemovedElements(); |
3617 | |
3618 | cleanupBatches(batches: &m_opaqueBatches); |
3619 | cleanupBatches(batches: &m_alphaBatches); |
3620 | |
3621 | if (m_rebuild & BuildBatches) { |
3622 | prepareOpaqueBatches(); |
3623 | if (Q_UNLIKELY(debug_render())) ctx->timePrepareOpaque = ctx->timer.restart(); |
3624 | prepareAlphaBatches(); |
3625 | if (Q_UNLIKELY(debug_render())) ctx->timePrepareAlpha = ctx->timer.restart(); |
3626 | |
3627 | if (Q_UNLIKELY(debug_build())) { |
3628 | qDebug(msg: "Opaque Batches:" ); |
3629 | for (int i=0; i<m_opaqueBatches.size(); ++i) { |
3630 | Batch *b = m_opaqueBatches.at(i); |
3631 | qDebug() << " - Batch " << i << b << (b->needsUpload ? "upload" : "" ) << " root:" << b->root; |
3632 | for (Element *e = b->first; e; e = e->nextInBatch) { |
3633 | qDebug() << " - element:" << e << " node:" << e->node << e->order; |
3634 | } |
3635 | } |
3636 | qDebug(msg: "Alpha Batches:" ); |
3637 | for (int i=0; i<m_alphaBatches.size(); ++i) { |
3638 | Batch *b = m_alphaBatches.at(i); |
3639 | qDebug() << " - Batch " << i << b << (b->needsUpload ? "upload" : "" ) << " root:" << b->root; |
3640 | for (Element *e = b->first; e; e = e->nextInBatch) { |
3641 | qDebug() << " - element:" << e << e->bounds << " node:" << e->node << " order:" << e->order; |
3642 | } |
3643 | } |
3644 | } |
3645 | } else { |
3646 | if (Q_UNLIKELY(debug_render())) ctx->timePrepareOpaque = ctx->timePrepareAlpha = ctx->timer.restart(); |
3647 | } |
3648 | |
3649 | |
3650 | deleteRemovedElements(); |
3651 | |
3652 | if (m_rebuild != 0) { |
3653 | // Then sort opaque batches so that we're drawing the batches with the highest |
3654 | // order first, maximizing the benefit of front-to-back z-ordering. |
3655 | if (m_opaqueBatches.size()) |
3656 | std::sort(first: &m_opaqueBatches.first(), last: &m_opaqueBatches.last() + 1, comp: qsg_sort_batch_decreasing_order); |
3657 | |
3658 | // Sort alpha batches back to front so that they render correctly. |
3659 | if (m_alphaBatches.size()) |
3660 | std::sort(first: &m_alphaBatches.first(), last: &m_alphaBatches.last() + 1, comp: qsg_sort_batch_increasing_order); |
3661 | |
3662 | m_zRange = m_nextRenderOrder != 0 |
3663 | ? 1.0 / (m_nextRenderOrder) |
3664 | : 0; |
3665 | } |
3666 | |
3667 | if (Q_UNLIKELY(debug_render())) ctx->timeSorting = ctx->timer.restart(); |
3668 | |
3669 | // Set size to 0, nothing is deallocated, they will "grow" again |
3670 | // as part of uploadBatch. |
3671 | m_vertexUploadPool.reset(); |
3672 | m_indexUploadPool.reset(); |
3673 | |
3674 | if (Q_UNLIKELY(debug_upload())) qDebug(msg: "Uploading Opaque Batches:" ); |
3675 | for (int i=0; i<m_opaqueBatches.size(); ++i) { |
3676 | Batch *b = m_opaqueBatches.at(i); |
3677 | uploadBatch(b); |
3678 | } |
3679 | if (Q_UNLIKELY(debug_render())) ctx->timeUploadOpaque = ctx->timer.restart(); |
3680 | |
3681 | if (Q_UNLIKELY(debug_upload())) qDebug(msg: "Uploading Alpha Batches:" ); |
3682 | for (int i=0; i<m_alphaBatches.size(); ++i) { |
3683 | Batch *b = m_alphaBatches.at(i); |
3684 | uploadBatch(b); |
3685 | } |
3686 | if (Q_UNLIKELY(debug_render())) ctx->timeUploadAlpha = ctx->timer.restart(); |
3687 | |
3688 | if (Q_UNLIKELY(debug_render())) { |
3689 | qDebug().nospace() << "Rendering:" << Qt::endl |
3690 | << " -> Opaque: " << qsg_countNodesInBatches(batches: m_opaqueBatches) << " nodes in " << m_opaqueBatches.size() << " batches..." << Qt::endl |
3691 | << " -> Alpha: " << qsg_countNodesInBatches(batches: m_alphaBatches) << " nodes in " << m_alphaBatches.size() << " batches..." ; |
3692 | } |
3693 | |
3694 | m_current_opacity = 1; |
3695 | m_currentMaterial = nullptr; |
3696 | m_currentShader = nullptr; |
3697 | m_currentProgram = nullptr; |
3698 | m_currentClipState.reset(); |
3699 | |
3700 | const QRect viewport = viewportRect(); |
3701 | |
3702 | bool renderOpaque = !debug_noopaque(); |
3703 | bool renderAlpha = !debug_noalpha(); |
3704 | |
3705 | m_pstate.viewport = |
3706 | QRhiViewport(viewport.x(), deviceRect().bottom() - viewport.bottom(), viewport.width(), |
3707 | viewport.height(), VIEWPORT_MIN_DEPTH, VIEWPORT_MAX_DEPTH); |
3708 | m_pstate.clearColor = clearColor(); |
3709 | m_pstate.dsClear = QRhiDepthStencilClearValue(1.0f, 0); |
3710 | m_pstate.viewportSet = false; |
3711 | m_pstate.scissorSet = false; |
3712 | |
3713 | m_gstate.depthTest = useDepthBuffer(); |
3714 | m_gstate.depthWrite = useDepthBuffer(); |
3715 | m_gstate.depthFunc = QRhiGraphicsPipeline::Less; |
3716 | m_gstate.blending = false; |
3717 | |
3718 | m_gstate.cullMode = QRhiGraphicsPipeline::None; |
3719 | m_gstate.polygonMode = QRhiGraphicsPipeline::Fill; |
3720 | m_gstate.colorWrite = QRhiGraphicsPipeline::R |
3721 | | QRhiGraphicsPipeline::G |
3722 | | QRhiGraphicsPipeline::B |
3723 | | QRhiGraphicsPipeline::A; |
3724 | m_gstate.usesScissor = false; |
3725 | m_gstate.stencilTest = false; |
3726 | |
3727 | m_gstate.sampleCount = renderTarget().rt->sampleCount(); |
3728 | |
3729 | ctx->opaqueRenderBatches.clear(); |
3730 | if (Q_LIKELY(renderOpaque)) { |
3731 | for (int i = 0, ie = m_opaqueBatches.size(); i != ie; ++i) { |
3732 | Batch *b = m_opaqueBatches.at(i); |
3733 | PreparedRenderBatch renderBatch; |
3734 | bool ok; |
3735 | if (b->merged) |
3736 | ok = prepareRenderMergedBatch(batch: b, renderBatch: &renderBatch); |
3737 | else |
3738 | ok = prepareRenderUnmergedBatch(batch: b, renderBatch: &renderBatch); |
3739 | if (ok) |
3740 | ctx->opaqueRenderBatches.append(t: renderBatch); |
3741 | } |
3742 | } |
3743 | |
3744 | m_gstate.blending = true; |
3745 | // factors never change, always set for premultiplied alpha based blending |
3746 | |
3747 | // depth test stays enabled (if useDepthBuffer(), that is) but no need |
3748 | // to write out depth from the transparent (back-to-front) pass |
3749 | m_gstate.depthWrite = false; |
3750 | |
3751 | // special case: the 3D plane mode tests against the depth buffer, but does |
3752 | // not write (and all batches are alpha because this render mode evaluates |
3753 | // to useDepthBuffer()==false) |
3754 | if (m_renderMode == QSGRendererInterface::RenderMode3D) { |
3755 | Q_ASSERT(m_opaqueBatches.isEmpty()); |
3756 | m_gstate.depthTest = true; |
3757 | } |
3758 | |
3759 | ctx->alphaRenderBatches.clear(); |
3760 | if (Q_LIKELY(renderAlpha)) { |
3761 | for (int i = 0, ie = m_alphaBatches.size(); i != ie; ++i) { |
3762 | Batch *b = m_alphaBatches.at(i); |
3763 | PreparedRenderBatch renderBatch; |
3764 | bool ok; |
3765 | if (b->merged) |
3766 | ok = prepareRenderMergedBatch(batch: b, renderBatch: &renderBatch); |
3767 | else if (b->isRenderNode) |
3768 | ok = prepareRhiRenderNode(batch: b, renderBatch: &renderBatch); |
3769 | else |
3770 | ok = prepareRenderUnmergedBatch(batch: b, renderBatch: &renderBatch); |
3771 | if (ok) |
3772 | ctx->alphaRenderBatches.append(t: renderBatch); |
3773 | } |
3774 | } |
3775 | |
3776 | m_rebuild = 0; |
3777 | |
3778 | #if defined(QSGBATCHRENDERER_INVALIDATE_WEDGED_NODES) |
3779 | m_renderOrderRebuildLower = -1; |
3780 | m_renderOrderRebuildUpper = -1; |
3781 | #endif |
3782 | |
3783 | if (m_visualizer->mode() != Visualizer::VisualizeNothing) |
3784 | m_visualizer->prepareVisualize(); |
3785 | |
3786 | renderTarget().cb->resourceUpdate(resourceUpdates: m_resourceUpdates); |
3787 | m_resourceUpdates = nullptr; |
3788 | } |
3789 | |
3790 | void Renderer::beginRenderPass(RenderPassContext *) |
3791 | { |
3792 | const QSGRenderTarget &rt(renderTarget()); |
3793 | rt.cb->beginPass(rt: rt.rt, colorClearValue: m_pstate.clearColor, depthStencilClearValue: m_pstate.dsClear, resourceUpdates: nullptr, |
3794 | // we cannot tell if the application will have |
3795 | // native rendering thrown in to this pass |
3796 | // (QQuickWindow::beginExternalCommands()), so |
3797 | // we have no choice but to set the flag always |
3798 | // (thus triggering using secondary command |
3799 | // buffers with Vulkan) |
3800 | flags: QRhiCommandBuffer::ExternalContent); |
3801 | |
3802 | if (m_renderPassRecordingCallbacks.start) |
3803 | m_renderPassRecordingCallbacks.start(m_renderPassRecordingCallbacks.userData); |
3804 | } |
3805 | |
3806 | void Renderer::recordRenderPass(RenderPassContext *ctx) |
3807 | { |
3808 | // prepareRenderPass and recordRenderPass must always be called together. |
3809 | // They are separate because beginRenderPass and endRenderPass are optional. |
3810 | // |
3811 | // The valid call sequence are therefore: |
3812 | // prepare, begin, record, end |
3813 | // or |
3814 | // prepare, record |
3815 | |
3816 | if (!ctx->valid) |
3817 | qWarning(msg: "recordRenderPass() called without a prepared render pass context" ); |
3818 | |
3819 | ctx->valid = false; |
3820 | |
3821 | QRhiCommandBuffer *cb = renderTarget().cb; |
3822 | cb->debugMarkBegin(QByteArrayLiteral("Qt Quick scene render" )); |
3823 | |
3824 | for (int i = 0, ie = ctx->opaqueRenderBatches.size(); i != ie; ++i) { |
3825 | PreparedRenderBatch *renderBatch = &ctx->opaqueRenderBatches[i]; |
3826 | if (renderBatch->batch->merged) |
3827 | renderMergedBatch(renderBatch); |
3828 | else |
3829 | renderUnmergedBatch(renderBatch); |
3830 | } |
3831 | |
3832 | for (int i = 0, ie = ctx->alphaRenderBatches.size(); i != ie; ++i) { |
3833 | PreparedRenderBatch *renderBatch = &ctx->alphaRenderBatches[i]; |
3834 | if (renderBatch->batch->merged) |
3835 | renderMergedBatch(renderBatch); |
3836 | else if (renderBatch->batch->isRenderNode) |
3837 | renderRhiRenderNode(batch: renderBatch->batch); |
3838 | else |
3839 | renderUnmergedBatch(renderBatch); |
3840 | } |
3841 | |
3842 | if (m_renderMode == QSGRendererInterface::RenderMode3D) { |
3843 | // depth post-pass |
3844 | for (int i = 0, ie = ctx->alphaRenderBatches.size(); i != ie; ++i) { |
3845 | PreparedRenderBatch *renderBatch = &ctx->alphaRenderBatches[i]; |
3846 | if (renderBatch->batch->merged) |
3847 | renderMergedBatch(renderBatch, depthPostPass: true); |
3848 | else if (!renderBatch->batch->isRenderNode) // rendernodes are skipped here for now |
3849 | renderUnmergedBatch(renderBatch, depthPostPass: true); |
3850 | } |
3851 | } |
3852 | |
3853 | if (m_currentShader) |
3854 | setActiveRhiShader(program: nullptr, shader: nullptr); |
3855 | |
3856 | cb->debugMarkEnd(); |
3857 | |
3858 | if (Q_UNLIKELY(debug_render())) { |
3859 | qDebug(msg: " -> times: build: %d, prepare(opaque/alpha): %d/%d, sorting: %d, upload(opaque/alpha): %d/%d, record rendering: %d" , |
3860 | (int) ctx->timeRenderLists, |
3861 | (int) ctx->timePrepareOpaque, (int) ctx->timePrepareAlpha, |
3862 | (int) ctx->timeSorting, |
3863 | (int) ctx->timeUploadOpaque, (int) ctx->timeUploadAlpha, |
3864 | (int) ctx->timer.elapsed()); |
3865 | } |
3866 | } |
3867 | |
3868 | void Renderer::endRenderPass(RenderPassContext *) |
3869 | { |
3870 | if (m_renderPassRecordingCallbacks.end) |
3871 | m_renderPassRecordingCallbacks.end(m_renderPassRecordingCallbacks.userData); |
3872 | |
3873 | if (m_visualizer->mode() != Visualizer::VisualizeNothing) |
3874 | m_visualizer->visualize(); |
3875 | |
3876 | renderTarget().cb->endPass(); |
3877 | } |
3878 | |
3879 | struct RenderNodeState : public QSGRenderNode::RenderState |
3880 | { |
3881 | const QMatrix4x4 *projectionMatrix() const override { return m_projectionMatrix; } |
3882 | QRect scissorRect() const override { return m_scissorRect; } |
3883 | bool scissorEnabled() const override { return m_scissorEnabled; } |
3884 | int stencilValue() const override { return m_stencilValue; } |
3885 | bool stencilEnabled() const override { return m_stencilEnabled; } |
3886 | const QRegion *clipRegion() const override { return nullptr; } |
3887 | |
3888 | const QMatrix4x4 *m_projectionMatrix; |
3889 | QRect m_scissorRect; |
3890 | int m_stencilValue; |
3891 | bool m_scissorEnabled; |
3892 | bool m_stencilEnabled; |
3893 | }; |
3894 | |
3895 | bool Renderer::prepareRhiRenderNode(Batch *batch, PreparedRenderBatch *renderBatch) |
3896 | { |
3897 | if (Q_UNLIKELY(debug_render())) |
3898 | qDebug() << " -" << batch << "rendernode" ; |
3899 | |
3900 | Q_ASSERT(batch->first->isRenderNode); |
3901 | RenderNodeElement *e = static_cast<RenderNodeElement *>(batch->first); |
3902 | |
3903 | setActiveRhiShader(program: nullptr, shader: nullptr); |
3904 | |
3905 | QSGRenderNodePrivate *rd = QSGRenderNodePrivate::get(node: e->renderNode); |
3906 | rd->m_clip_list = nullptr; |
3907 | if (m_renderMode != QSGRendererInterface::RenderMode3D) { |
3908 | QSGNode *clip = e->renderNode->parent(); |
3909 | while (clip != rootNode()) { |
3910 | if (clip->type() == QSGNode::ClipNodeType) { |
3911 | rd->m_clip_list = static_cast<QSGClipNode *>(clip); |
3912 | break; |
3913 | } |
3914 | clip = clip->parent(); |
3915 | } |
3916 | updateClipState(clipList: rd->m_clip_list, batch); |
3917 | } |
3918 | |
3919 | QSGNode *xform = e->renderNode->parent(); |
3920 | QMatrix4x4 matrix; |
3921 | QSGNode *root = rootNode(); |
3922 | if (e->root) { |
3923 | matrix = qsg_matrixForRoot(node: e->root); |
3924 | root = e->root->sgNode; |
3925 | } |
3926 | while (xform != root) { |
3927 | if (xform->type() == QSGNode::TransformNodeType) { |
3928 | matrix = matrix * static_cast<QSGTransformNode *>(xform)->combinedMatrix(); |
3929 | break; |
3930 | } |
3931 | xform = xform->parent(); |
3932 | } |
3933 | rd->m_localMatrix = matrix; |
3934 | rd->m_matrix = &rd->m_localMatrix; |
3935 | |
3936 | QSGNode *opacity = e->renderNode->parent(); |
3937 | rd->m_opacity = 1.0; |
3938 | while (opacity != rootNode()) { |
3939 | if (opacity->type() == QSGNode::OpacityNodeType) { |
3940 | rd->m_opacity = static_cast<QSGOpacityNode *>(opacity)->combinedOpacity(); |
3941 | break; |
3942 | } |
3943 | opacity = opacity->parent(); |
3944 | } |
3945 | |
3946 | rd->m_rt = renderTarget(); |
3947 | |
3948 | rd->m_projectionMatrix = projectionMatrix(); |
3949 | if (useDepthBuffer()) { |
3950 | rd->m_projectionMatrix(2, 2) = m_zRange; |
3951 | rd->m_projectionMatrix(2, 3) = calculateElementZOrder(e, zRange: m_zRange); |
3952 | } |
3953 | |
3954 | e->renderNode->prepare(); |
3955 | |
3956 | renderBatch->batch = batch; |
3957 | renderBatch->sms = nullptr; |
3958 | |
3959 | return true; |
3960 | } |
3961 | |
3962 | void Renderer::renderRhiRenderNode(const Batch *batch) |
3963 | { |
3964 | if (batch->clipState.type & ClipState::StencilClip) |
3965 | enqueueStencilDraw(batch); |
3966 | |
3967 | RenderNodeElement *e = static_cast<RenderNodeElement *>(batch->first); |
3968 | QSGRenderNodePrivate *rd = QSGRenderNodePrivate::get(node: e->renderNode); |
3969 | |
3970 | RenderNodeState state; |
3971 | state.m_projectionMatrix = &rd->m_projectionMatrix; |
3972 | const std::array<int, 4> scissor = batch->clipState.scissor.scissor(); |
3973 | state.m_scissorRect = QRect(scissor[0], scissor[1], scissor[2], scissor[3]); |
3974 | state.m_stencilValue = batch->clipState.stencilRef; |
3975 | state.m_scissorEnabled = batch->clipState.type & ClipState::ScissorClip; |
3976 | state.m_stencilEnabled = batch->clipState.type & ClipState::StencilClip; |
3977 | |
3978 | const QSGRenderNode::StateFlags changes = e->renderNode->changedStates(); |
3979 | |
3980 | QRhiCommandBuffer *cb = renderTarget().cb; |
3981 | const bool needsExternal = !e->renderNode->flags().testFlag(flag: QSGRenderNode::NoExternalRendering); |
3982 | if (needsExternal) |
3983 | cb->beginExternal(); |
3984 | e->renderNode->render(state: &state); |
3985 | if (needsExternal) |
3986 | cb->endExternal(); |
3987 | |
3988 | rd->m_matrix = nullptr; |
3989 | rd->m_clip_list = nullptr; |
3990 | |
3991 | if ((changes & QSGRenderNode::ViewportState) |
3992 | || (changes & QSGRenderNode::ScissorState)) |
3993 | { |
3994 | // Reset both flags if either is reported as changed, since with the rhi |
3995 | // it could be setViewport() that will record the resetting of the scissor. |
3996 | m_pstate.viewportSet = false; |
3997 | m_pstate.scissorSet = false; |
3998 | } |
3999 | |
4000 | // Do not bother with RenderTargetState. Where applicable, endExternal() |
4001 | // ensures the correct target is rebound. For others (like Vulkan) it makes |
4002 | // no sense since render() could not possibly do that on our command buffer |
4003 | // which is in renderpass recording state. |
4004 | } |
4005 | |
4006 | void Renderer::setVisualizationMode(const QByteArray &mode) |
4007 | { |
4008 | if (mode.isEmpty()) |
4009 | m_visualizer->setMode(Visualizer::VisualizeNothing); |
4010 | else if (mode == "clip" ) |
4011 | m_visualizer->setMode(Visualizer::VisualizeClipping); |
4012 | else if (mode == "overdraw" ) |
4013 | m_visualizer->setMode(Visualizer::VisualizeOverdraw); |
4014 | else if (mode == "batches" ) |
4015 | m_visualizer->setMode(Visualizer::VisualizeBatches); |
4016 | else if (mode == "changes" ) |
4017 | m_visualizer->setMode(Visualizer::VisualizeChanges); |
4018 | } |
4019 | |
4020 | bool Renderer::hasVisualizationModeWithContinuousUpdate() const |
4021 | { |
4022 | return m_visualizer->mode() == Visualizer::VisualizeOverdraw; |
4023 | } |
4024 | |
4025 | bool operator==(const GraphicsState &a, const GraphicsState &b) noexcept |
4026 | { |
4027 | return a.depthTest == b.depthTest |
4028 | && a.depthWrite == b.depthWrite |
4029 | && a.depthFunc == b.depthFunc |
4030 | && a.blending == b.blending |
4031 | && a.srcColor == b.srcColor |
4032 | && a.dstColor == b.dstColor |
4033 | && a.srcAlpha == b.srcAlpha |
4034 | && a.dstAlpha == b.dstAlpha |
4035 | && a.colorWrite == b.colorWrite |
4036 | && a.cullMode == b.cullMode |
4037 | && a.usesScissor == b.usesScissor |
4038 | && a.stencilTest == b.stencilTest |
4039 | && a.sampleCount == b.sampleCount |
4040 | && a.drawMode == b.drawMode |
4041 | && a.lineWidth == b.lineWidth |
4042 | && a.polygonMode == b.polygonMode; |
4043 | } |
4044 | |
4045 | bool operator!=(const GraphicsState &a, const GraphicsState &b) noexcept |
4046 | { |
4047 | return !(a == b); |
4048 | } |
4049 | |
4050 | size_t qHash(const GraphicsState &s, size_t seed) noexcept |
4051 | { |
4052 | // do not bother with all fields |
4053 | return seed |
4054 | + s.depthTest * 1000 |
4055 | + s.depthWrite * 100 |
4056 | + s.depthFunc |
4057 | + s.blending * 10 |
4058 | + s.srcColor |
4059 | + s.cullMode |
4060 | + s.usesScissor |
4061 | + s.stencilTest |
4062 | + s.sampleCount; |
4063 | } |
4064 | |
4065 | bool operator==(const GraphicsPipelineStateKey &a, const GraphicsPipelineStateKey &b) noexcept |
4066 | { |
4067 | return a.state == b.state |
4068 | && a.sms->materialShader == b.sms->materialShader |
4069 | && a.renderTargetDescription == b.renderTargetDescription |
4070 | && a.srbLayoutDescription == b.srbLayoutDescription; |
4071 | } |
4072 | |
4073 | bool operator!=(const GraphicsPipelineStateKey &a, const GraphicsPipelineStateKey &b) noexcept |
4074 | { |
4075 | return !(a == b); |
4076 | } |
4077 | |
4078 | size_t qHash(const GraphicsPipelineStateKey &k, size_t seed) noexcept |
4079 | { |
4080 | return qHash(s: k.state, seed) |
4081 | ^ qHash(key: k.sms->materialShader) |
4082 | ^ k.extra.renderTargetDescriptionHash |
4083 | ^ k.extra.srbLayoutDescriptionHash; |
4084 | } |
4085 | |
4086 | Visualizer::Visualizer(Renderer *renderer) |
4087 | : m_renderer(renderer), |
4088 | m_visualizeMode(VisualizeNothing) |
4089 | { |
4090 | } |
4091 | |
4092 | Visualizer::~Visualizer() |
4093 | { |
4094 | } |
4095 | |
4096 | #define QSGNODE_DIRTY_PARENT (QSGNode::DirtyNodeAdded \ |
4097 | | QSGNode::DirtyOpacity \ |
4098 | | QSGNode::DirtyMatrix \ |
4099 | | QSGNode::DirtyNodeRemoved) |
4100 | |
4101 | void Visualizer::visualizeChangesPrepare(Node *n, uint parentChanges) |
4102 | { |
4103 | uint childDirty = (parentChanges | n->dirtyState) & QSGNODE_DIRTY_PARENT; |
4104 | uint selfDirty = n->dirtyState | parentChanges; |
4105 | if (n->type() == QSGNode::GeometryNodeType && selfDirty != 0) |
4106 | m_visualizeChangeSet.insert(key: n, value: selfDirty); |
4107 | SHADOWNODE_TRAVERSE(n) { |
4108 | visualizeChangesPrepare(n: child, parentChanges: childDirty); |
4109 | } |
4110 | } |
4111 | |
4112 | } // namespace QSGBatchRenderer |
4113 | |
4114 | QT_END_NAMESPACE |
4115 | |
4116 | #include "moc_qsgbatchrenderer_p.cpp" |
4117 | |