1 | // Copyright (C) 2017 Klaralvdalens Datakonsult AB (KDAB). |
2 | // SPDX-License-Identifier: LicenseRef-Qt-Commercial OR LGPL-3.0-only OR GPL-2.0-only OR GPL-3.0-only |
3 | |
4 | #include "animationutils_p.h" |
5 | #include <Qt3DAnimation/private/handler_p.h> |
6 | #include <Qt3DAnimation/private/managers_p.h> |
7 | #include <Qt3DAnimation/private/clipblendnode_p.h> |
8 | #include <Qt3DAnimation/private/clipblendnodevisitor_p.h> |
9 | #include <Qt3DAnimation/private/clipblendvalue_p.h> |
10 | #include <QtGui/qvector2d.h> |
11 | #include <QtGui/qvector3d.h> |
12 | #include <QtGui/qvector4d.h> |
13 | #include <QtGui/qquaternion.h> |
14 | #include <QtGui/qcolor.h> |
15 | #include <QtCore/qvariant.h> |
16 | #include <QtCore/qvarlengtharray.h> |
17 | #include <Qt3DAnimation/private/animationlogging_p.h> |
18 | |
19 | #include <numeric> |
20 | |
21 | QT_BEGIN_NAMESPACE |
22 | |
23 | namespace { |
24 | const auto slerpThreshold = 0.01f; |
25 | } |
26 | |
27 | namespace Qt3DAnimation { |
28 | namespace Animation { |
29 | |
30 | inline QVector<float> valueToVector(const QVector3D &value) |
31 | { |
32 | return { value.x(), value.y(), value.z() }; |
33 | } |
34 | |
35 | inline QVector<float> valueToVector(const QQuaternion &value) |
36 | { |
37 | return { value.scalar(), value.x(), value.y(), value.z() }; |
38 | } |
39 | |
40 | ClipEvaluationData evaluationDataForClip(AnimationClip *clip, |
41 | const AnimatorEvaluationData &animatorData) |
42 | { |
43 | // global time values expected in seconds |
44 | ClipEvaluationData result; |
45 | result.currentLoop = animatorData.currentLoop; |
46 | result.localTime = localTimeFromElapsedTime(t_current_local: animatorData.currentTime, t_elapsed_global: animatorData.elapsedTime, |
47 | playbackRate: animatorData.playbackRate, duration: clip->duration(), |
48 | loopCount: animatorData.loopCount, currentLoop&: result.currentLoop); |
49 | result.isFinalFrame = isFinalFrame(localTime: result.localTime, duration: clip->duration(), |
50 | currentLoop: result.currentLoop, loopCount: animatorData.loopCount, |
51 | playbackRate: animatorData.playbackRate); |
52 | const bool hasNormalizedTime = isValidNormalizedTime(t: animatorData.normalizedLocalTime); |
53 | result.normalizedLocalTime = hasNormalizedTime ? animatorData.normalizedLocalTime |
54 | : result.localTime / clip->duration(); |
55 | return result; |
56 | } |
57 | |
58 | double localTimeFromElapsedTime(double t_current_local, |
59 | double t_elapsed_global, |
60 | double playbackRate, |
61 | double duration, |
62 | int loopCount, |
63 | int ¤tLoop) |
64 | { |
65 | // Calculate the new local time. |
66 | // playhead + rate * dt |
67 | // where playhead is completed loops * duration + current loop local time |
68 | double t_local = currentLoop * duration + t_current_local + playbackRate * t_elapsed_global; |
69 | double loopNumber = 0; |
70 | if (loopCount == 1) { |
71 | t_local = qBound(min: 0.0, val: t_local, max: duration); |
72 | } else if (loopCount < 0) { |
73 | // Loops forever |
74 | (void) std::modf(x: t_local / duration, iptr: &loopNumber); |
75 | t_local = std::fmod(x: t_local, y: duration); |
76 | } else { |
77 | // N loops |
78 | t_local = qBound(min: 0.0, val: t_local, max: double(loopCount) * duration); |
79 | (void) std::modf(x: t_local / duration, iptr: &loopNumber); |
80 | t_local = std::fmod(x: t_local, y: duration); |
81 | |
82 | // Ensure we clamp to end of final loop |
83 | |
84 | if (int(loopNumber) == loopCount || int(loopNumber) < 0) { |
85 | loopNumber = loopCount - 1; |
86 | t_local = playbackRate >= 0.0 ? duration : 0.0; |
87 | } |
88 | } |
89 | |
90 | qCDebug(Jobs) << "current loop =" << loopNumber |
91 | << "t =" << t_local |
92 | << "duration =" << duration; |
93 | |
94 | currentLoop = int(loopNumber); |
95 | |
96 | return t_local; |
97 | } |
98 | |
99 | double phaseFromElapsedTime(double t_current_local, |
100 | double t_elapsed_global, |
101 | double playbackRate, |
102 | double duration, |
103 | int loopCount, |
104 | int ¤tLoop) |
105 | { |
106 | const double t_local = localTimeFromElapsedTime(t_current_local, t_elapsed_global, playbackRate, |
107 | duration, loopCount, currentLoop); |
108 | return t_local / duration; |
109 | } |
110 | |
111 | /*! |
112 | \internal |
113 | |
114 | Calculates the indices required to map from the component ordering within the |
115 | provided \a channel, into the standard channel orderings expected by Qt types. |
116 | |
117 | For example, given a channel representing a rotation with the components ordered |
118 | as X, Y, Z, Y, this function will return the indices [3, 0, 1, 2] which can then |
119 | later be used as part of the format vector in the formatClipResults() function to |
120 | remap the channels into the standard W, X, Y, Z order required by QQuaternion. |
121 | */ |
122 | ComponentIndices channelComponentsToIndices(const Channel &channel, |
123 | int dataType, |
124 | qsizetype expectedComponentCount, |
125 | qsizetype offset) |
126 | { |
127 | static const QList<char> standardSuffixes = { 'X', 'Y', 'Z', 'W' }; |
128 | static const QList<char> quaternionSuffixes = { 'W', 'X', 'Y', 'Z' }; |
129 | static const QList<char> colorSuffixesRGB = { 'R', 'G', 'B' }; |
130 | static const QList<char> colorSuffixesRGBA = { 'R', 'G', 'B', 'A' }; |
131 | |
132 | switch (dataType) { |
133 | case QMetaType::QQuaternion: |
134 | return channelComponentsToIndicesHelper(channelGroup: channel, expectedComponentCount, |
135 | offset, suffixes: quaternionSuffixes); |
136 | case QMetaType::QColor: |
137 | if (expectedComponentCount == 3) |
138 | return channelComponentsToIndicesHelper(channelGroup: channel, expectedComponentCount, |
139 | offset, suffixes: colorSuffixesRGB); |
140 | Q_ASSERT(expectedComponentCount == 4); |
141 | return channelComponentsToIndicesHelper(channelGroup: channel, expectedComponentCount, |
142 | offset, suffixes: colorSuffixesRGBA); |
143 | default: |
144 | return channelComponentsToIndicesHelper(channelGroup: channel, expectedComponentCount, |
145 | offset, suffixes: standardSuffixes); |
146 | } |
147 | } |
148 | |
149 | ComponentIndices channelComponentsToIndicesHelper(const Channel &channel, |
150 | qsizetype expectedComponentCount, |
151 | qsizetype offset, |
152 | const QList<char> &suffixes) |
153 | { |
154 | const qsizetype actualComponentCount = channel.channelComponents.size(); |
155 | if (actualComponentCount != expectedComponentCount) { |
156 | qWarning() << "Data type expects" << expectedComponentCount |
157 | << "but found" << actualComponentCount << "components in the animation clip" ; |
158 | } |
159 | |
160 | ComponentIndices indices(expectedComponentCount); |
161 | |
162 | // Generate the set of channel suffixes |
163 | QList<char> channelSuffixes; |
164 | channelSuffixes.reserve(asize: expectedComponentCount); |
165 | for (qsizetype i = 0; i < expectedComponentCount; ++i) { |
166 | const QString &componentName = channel.channelComponents[i].name; |
167 | |
168 | // An unset component name indicates that the no mapping is necessary |
169 | // and the index can be used as-is. |
170 | if (componentName.isEmpty()) { |
171 | indices[i] = i + offset; |
172 | continue; |
173 | } |
174 | |
175 | char channelSuffix = componentName.at(i: componentName.size() - 1).toLatin1(); |
176 | channelSuffixes.push_back(t: channelSuffix); |
177 | } |
178 | |
179 | // We can short-circuit if the channels were all unnamed (in order) |
180 | if (channelSuffixes.isEmpty()) |
181 | return indices; |
182 | |
183 | // Find index of standard index in channel indexes |
184 | for (qsizetype i = 0; i < expectedComponentCount; ++i) { |
185 | qsizetype index = channelSuffixes.indexOf(t: suffixes[i]); |
186 | if (index != -1) |
187 | indices[i] = index + offset; |
188 | else |
189 | indices[i] = -1; |
190 | } |
191 | |
192 | return indices; |
193 | } |
194 | |
195 | ClipResults evaluateClipAtLocalTime(AnimationClip *clip, float localTime) |
196 | { |
197 | QVector<float> channelResults; |
198 | Q_ASSERT(clip); |
199 | |
200 | // Ensure we have enough storage to hold the evaluations |
201 | channelResults.resize(size: clip->channelCount()); |
202 | |
203 | // Iterate over channels and evaluate the fcurves |
204 | const auto &channels = clip->channels(); |
205 | int i = 0; |
206 | for (const Channel &channel : channels) { |
207 | if (channel.name.contains(QStringLiteral("Rotation" )) && |
208 | channel.channelComponents.size() == 4) { |
209 | |
210 | // Try to SLERP |
211 | const int nbKeyframes = channel.channelComponents[0].fcurve.keyframeCount(); |
212 | const bool canSlerp = std::find_if(first: std::begin(cont: channel.channelComponents)+1, |
213 | last: std::end(cont: channel.channelComponents), |
214 | pred: [nbKeyframes](const ChannelComponent &v) { |
215 | return v.fcurve.keyframeCount() != nbKeyframes; |
216 | }) == std::end(cont: channel.channelComponents); |
217 | |
218 | if (!canSlerp) { |
219 | // Interpolate per component |
220 | for (const auto &channelComponent : std::as_const(t: channel.channelComponents)) { |
221 | const int lowerKeyframeBound = channelComponent.fcurve.lowerKeyframeBound(localTime); |
222 | channelResults[i++] = channelComponent.fcurve.evaluateAtTime(localTime, lowerBound: lowerKeyframeBound); |
223 | } |
224 | } else { |
225 | // There's only one keyframe. We cant compute omega. Interpolate per component |
226 | if (channel.channelComponents[0].fcurve.keyframeCount() == 1) { |
227 | for (const auto &channelComponent : std::as_const(t: channel.channelComponents)) |
228 | channelResults[i++] = channelComponent.fcurve.keyframe(index: 0).value; |
229 | } else { |
230 | auto quaternionFromChannel = [channel](const int keyframe) { |
231 | const float w = channel.channelComponents[0].fcurve.keyframe(index: keyframe).value; |
232 | const float x = channel.channelComponents[1].fcurve.keyframe(index: keyframe).value; |
233 | const float y = channel.channelComponents[2].fcurve.keyframe(index: keyframe).value; |
234 | const float z = channel.channelComponents[3].fcurve.keyframe(index: keyframe).value; |
235 | QQuaternion quat{w,x,y,z}; |
236 | quat.normalize(); |
237 | return quat; |
238 | }; |
239 | |
240 | const int lowerKeyframeBound = std::max(a: 0, b: channel.channelComponents[0].fcurve.lowerKeyframeBound(localTime)); |
241 | const auto lowerQuat = quaternionFromChannel(lowerKeyframeBound); |
242 | const auto higherQuat = quaternionFromChannel(lowerKeyframeBound + 1); |
243 | auto cosHalfTheta = QQuaternion::dotProduct(q1: lowerQuat, q2: higherQuat); |
244 | // If the two keyframe quaternions are equal, just return the first one as the interpolated value. |
245 | if (std::abs(x: cosHalfTheta) >= 1.0f) { |
246 | channelResults[i++] = lowerQuat.scalar(); |
247 | channelResults[i++] = lowerQuat.x(); |
248 | channelResults[i++] = lowerQuat.y(); |
249 | channelResults[i++] = lowerQuat.z(); |
250 | } else { |
251 | const auto sinHalfTheta = std::sqrt(x: 1.0f - std::pow(x: cosHalfTheta,y: 2.0f)); |
252 | if (std::abs(x: sinHalfTheta) < ::slerpThreshold) { |
253 | auto initial_i = i; |
254 | for (const auto &channelComponent : std::as_const(t: channel.channelComponents)) |
255 | channelResults[i++] = channelComponent.fcurve.evaluateAtTime(localTime, lowerBound: lowerKeyframeBound); |
256 | |
257 | // Normalize the resulting quaternion |
258 | QQuaternion quat{channelResults[initial_i], channelResults[initial_i+1], channelResults[initial_i+2], channelResults[initial_i+3]}; |
259 | quat.normalize(); |
260 | channelResults[initial_i+0] = quat.scalar(); |
261 | channelResults[initial_i+1] = quat.x(); |
262 | channelResults[initial_i+2] = quat.y(); |
263 | channelResults[initial_i+3] = quat.z(); |
264 | } else { |
265 | const auto reverseQ1 = cosHalfTheta < 0 ? -1.0f : 1.0f; |
266 | cosHalfTheta *= reverseQ1; |
267 | const auto halfTheta = std::acos(x: cosHalfTheta); |
268 | for (const auto &channelComponent : std::as_const(t: channel.channelComponents)) |
269 | channelResults[i++] = channelComponent.fcurve.evaluateAtTimeAsSlerp(localTime, |
270 | lowerBound: lowerKeyframeBound, |
271 | halfTheta, |
272 | sinHalfTheta, |
273 | reverseQ1); |
274 | } |
275 | } |
276 | } |
277 | } |
278 | } else { |
279 | // If the channel is not a Rotation, apply linear interpolation per channel component |
280 | // TODO How do we handle other interpolations. For exammple, color interpolation |
281 | // in a linear perceptual way or other non linear spaces? |
282 | for (const auto &channelComponent : std::as_const(t: channel.channelComponents)) { |
283 | const int lowerKeyframeBound = channelComponent.fcurve.lowerKeyframeBound(localTime); |
284 | channelResults[i++] = channelComponent.fcurve.evaluateAtTime(localTime, lowerBound: lowerKeyframeBound); |
285 | } |
286 | } |
287 | } |
288 | return channelResults; |
289 | } |
290 | |
291 | ClipResults evaluateClipAtPhase(AnimationClip *clip, float phase) |
292 | { |
293 | // Calculate the clip local time from the phase and clip duration |
294 | const double localTime = phase * clip->duration(); |
295 | return evaluateClipAtLocalTime(clip, localTime); |
296 | } |
297 | |
298 | template<typename Container> |
299 | Container mapChannelResultsToContainer(const MappingData &mappingData, |
300 | const QVector<float> &channelResults) |
301 | { |
302 | Container r; |
303 | r.reserve(channelResults.size()); |
304 | |
305 | const ComponentIndices channelIndices = mappingData.channelIndices; |
306 | for (const int channelIndex : channelIndices) |
307 | r.push_back(channelResults.at(i: channelIndex)); |
308 | |
309 | return r; |
310 | } |
311 | |
312 | QVariant buildPropertyValue(const MappingData &mappingData, const QVector<float> &channelResults) |
313 | { |
314 | const int vectorOfFloatType = qMetaTypeId<QList<float>>(); |
315 | |
316 | if (mappingData.type == vectorOfFloatType) |
317 | return QVariant::fromValue(value: channelResults); |
318 | |
319 | switch (mappingData.type) { |
320 | case QMetaType::Float: |
321 | case QMetaType::Double: { |
322 | return QVariant::fromValue(value: channelResults[mappingData.channelIndices[0]]); |
323 | } |
324 | |
325 | case QMetaType::QVector2D: { |
326 | const QVector2D vector(channelResults[mappingData.channelIndices[0]], |
327 | channelResults[mappingData.channelIndices[1]]); |
328 | return QVariant::fromValue(value: vector); |
329 | } |
330 | |
331 | case QMetaType::QVector3D: { |
332 | const QVector3D vector(channelResults[mappingData.channelIndices[0]], |
333 | channelResults[mappingData.channelIndices[1]], |
334 | channelResults[mappingData.channelIndices[2]]); |
335 | return QVariant::fromValue(value: vector); |
336 | } |
337 | |
338 | case QMetaType::QVector4D: { |
339 | const QVector4D vector(channelResults[mappingData.channelIndices[0]], |
340 | channelResults[mappingData.channelIndices[1]], |
341 | channelResults[mappingData.channelIndices[2]], |
342 | channelResults[mappingData.channelIndices[3]]); |
343 | return QVariant::fromValue(value: vector); |
344 | } |
345 | |
346 | case QMetaType::QQuaternion: { |
347 | QQuaternion q(channelResults[mappingData.channelIndices[0]], |
348 | channelResults[mappingData.channelIndices[1]], |
349 | channelResults[mappingData.channelIndices[2]], |
350 | channelResults[mappingData.channelIndices[3]]); |
351 | q.normalize(); |
352 | return QVariant::fromValue(value: q); |
353 | } |
354 | |
355 | case QMetaType::QColor: { |
356 | // A color can either be a vec3 or a vec4 |
357 | const QColor color = |
358 | QColor::fromRgbF(r: channelResults[mappingData.channelIndices[0]], |
359 | g: channelResults[mappingData.channelIndices[1]], |
360 | b: channelResults[mappingData.channelIndices[2]], |
361 | a: mappingData.channelIndices.size() > 3 ? channelResults[mappingData.channelIndices[3]] : 1.0f); |
362 | return QVariant::fromValue(value: color); |
363 | } |
364 | |
365 | case QMetaType::QVariantList: { |
366 | const QVariantList results = mapChannelResultsToContainer<QVariantList>( |
367 | mappingData, channelResults); |
368 | return QVariant::fromValue(value: results); |
369 | } |
370 | |
371 | default: |
372 | qWarning() << "Unhandled animation type" << mappingData.type; |
373 | break; |
374 | } |
375 | |
376 | return QVariant(); |
377 | } |
378 | |
379 | AnimationRecord prepareAnimationRecord(Qt3DCore::QNodeId animatorId, |
380 | const QVector<MappingData> &mappingDataVec, |
381 | const QVector<float> &channelResults, |
382 | bool finalFrame, |
383 | float normalizedLocalTime) |
384 | { |
385 | AnimationRecord record; |
386 | record.finalFrame = finalFrame; |
387 | record.animatorId = animatorId; |
388 | record.normalizedTime = normalizedLocalTime; |
389 | |
390 | QVarLengthArray<Skeleton *, 4> dirtySkeletons; |
391 | |
392 | // Iterate over the mappings |
393 | for (const MappingData &mappingData : mappingDataVec) { |
394 | if (!mappingData.propertyName) |
395 | continue; |
396 | |
397 | // Build the new value from the channel/fcurve evaluation results |
398 | const QVariant v = buildPropertyValue(mappingData, channelResults); |
399 | if (!v.isValid()) |
400 | continue; |
401 | |
402 | // TODO: Avoid wrapping joint transform components up in a variant, just |
403 | // to immediately unwrap them again. Refactor buildPropertyValue() to call |
404 | // helper functions that we can call directly here for joints. |
405 | if (mappingData.skeleton && mappingData.jointIndex != -1) { |
406 | // Remember that this skeleton is dirty. We will ask each dirty skeleton |
407 | // to send its set of local poses to observers below. |
408 | if (!dirtySkeletons.contains(t: mappingData.skeleton)) |
409 | dirtySkeletons.push_back(t: mappingData.skeleton); |
410 | |
411 | switch (mappingData.jointTransformComponent) { |
412 | case Scale: |
413 | mappingData.skeleton->setJointScale(jointIndex: mappingData.jointIndex, scale: v.value<QVector3D>()); |
414 | break; |
415 | |
416 | case Rotation: |
417 | mappingData.skeleton->setJointRotation(jointIndex: mappingData.jointIndex, rotation: v.value<QQuaternion>()); |
418 | break; |
419 | |
420 | case Translation: |
421 | mappingData.skeleton->setJointTranslation(jointIndex: mappingData.jointIndex, translation: v.value<QVector3D>()); |
422 | break; |
423 | |
424 | default: |
425 | Q_UNREACHABLE(); |
426 | break; |
427 | } |
428 | } else { |
429 | record.targetChanges.push_back(t: {mappingData.targetId, mappingData.propertyName, v}); |
430 | } |
431 | } |
432 | |
433 | for (const auto skeleton : dirtySkeletons) |
434 | record.skeletonChanges.push_back(t: {skeleton->peerId(), skeleton->joints()}); |
435 | |
436 | return record; |
437 | } |
438 | |
439 | QVector<AnimationCallbackAndValue> prepareCallbacks(const QVector<MappingData> &mappingDataVec, |
440 | const QVector<float> &channelResults) |
441 | { |
442 | QVector<AnimationCallbackAndValue> callbacks; |
443 | for (const MappingData &mappingData : mappingDataVec) { |
444 | if (!mappingData.callback) |
445 | continue; |
446 | const QVariant v = buildPropertyValue(mappingData, channelResults); |
447 | if (v.isValid()) { |
448 | AnimationCallbackAndValue callback; |
449 | callback.callback = mappingData.callback; |
450 | callback.flags = mappingData.callbackFlags; |
451 | callback.value = v; |
452 | callbacks.append(t: callback); |
453 | } |
454 | } |
455 | return callbacks; |
456 | } |
457 | |
458 | // TODO: Optimize this even more by combining the work done here with the functions: |
459 | // buildRequiredChannelsAndTypes() and assignChannelComponentIndices(). We are |
460 | // currently repeating the iteration over mappings and extracting/generating |
461 | // channel names, types and joint indices. |
462 | QVector<MappingData> buildPropertyMappings(const QVector<ChannelMapping *> &channelMappings, |
463 | const QVector<ChannelNameAndType> &channelNamesAndTypes, |
464 | const QVector<ComponentIndices> &channelComponentIndices, |
465 | const QVector<QBitArray> &sourceClipMask) |
466 | { |
467 | // Accumulate the required number of mappings |
468 | int maxMappingDatas = 0; |
469 | for (const auto mapping : channelMappings) { |
470 | switch (mapping->mappingType()) { |
471 | case ChannelMapping::ChannelMappingType: |
472 | case ChannelMapping::CallbackMappingType: |
473 | ++maxMappingDatas; |
474 | break; |
475 | |
476 | case ChannelMapping::SkeletonMappingType: { |
477 | Skeleton *skeleton = mapping->skeleton(); |
478 | maxMappingDatas += 3 * skeleton->jointCount(); // S, R, T |
479 | break; |
480 | } |
481 | } |
482 | } |
483 | QVector<MappingData> mappingDataVec; |
484 | mappingDataVec.reserve(asize: maxMappingDatas); |
485 | |
486 | // Iterate over the mappings |
487 | for (const auto mapping : channelMappings) { |
488 | switch (mapping->mappingType()) { |
489 | case ChannelMapping::ChannelMappingType: |
490 | case ChannelMapping::CallbackMappingType: { |
491 | // Populate the data we need, easy stuff first |
492 | MappingData mappingData; |
493 | mappingData.targetId = mapping->targetId(); |
494 | mappingData.propertyName = mapping->propertyName(); |
495 | mappingData.type = mapping->type(); |
496 | mappingData.callback = mapping->callback(); |
497 | mappingData.callbackFlags = mapping->callbackFlags(); |
498 | |
499 | if (mappingData.type == static_cast<int>(QMetaType::UnknownType)) { |
500 | qWarning() << "Unknown type for node id =" << mappingData.targetId |
501 | << "and property =" << mapping->propertyName() |
502 | << "and callback =" << mapping->callback(); |
503 | continue; |
504 | } |
505 | |
506 | // Try to find matching channel name and type |
507 | const ChannelNameAndType nameAndType = { mapping->channelName(), |
508 | mapping->type(), |
509 | mapping->componentCount(), |
510 | mapping->peerId() |
511 | }; |
512 | const qsizetype index = channelNamesAndTypes.indexOf(t: nameAndType); |
513 | if (index != -1) { |
514 | // Do we have any animation data for this channel? If not, don't bother |
515 | // adding a mapping for it. |
516 | const bool hasChannelIndices = sourceClipMask[index].count(on: true) != 0; |
517 | if (!hasChannelIndices) |
518 | continue; |
519 | |
520 | // We got one! |
521 | mappingData.channelIndices = channelComponentIndices[index]; |
522 | mappingDataVec.push_back(t: mappingData); |
523 | } |
524 | break; |
525 | } |
526 | |
527 | case ChannelMapping::SkeletonMappingType: { |
528 | const QList<ChannelNameAndType> jointProperties |
529 | = { { QLatin1String("Location" ), static_cast<int>(QMetaType::QVector3D), Translation }, |
530 | { QLatin1String("Rotation" ), static_cast<int>(QMetaType::QQuaternion), Rotation }, |
531 | { QLatin1String("Scale" ), static_cast<int>(QMetaType::QVector3D), Scale } }; |
532 | const QHash<QString, const char *> channelNameToPropertyName |
533 | = { { QLatin1String("Location" ), "translation" }, |
534 | { QLatin1String("Rotation" ), "rotation" }, |
535 | { QLatin1String("Scale" ), "scale" } }; |
536 | Skeleton *skeleton = mapping->skeleton(); |
537 | const int jointCount = skeleton->jointCount(); |
538 | for (int jointIndex = 0; jointIndex < jointCount; ++jointIndex) { |
539 | // Populate the data we need, easy stuff first |
540 | MappingData mappingData; |
541 | mappingData.targetId = mapping->skeletonId(); |
542 | mappingData.skeleton = mapping->skeleton(); |
543 | |
544 | const qsizetype propertyCount = jointProperties.size(); |
545 | for (qsizetype propertyIndex = 0; propertyIndex < propertyCount; ++propertyIndex) { |
546 | // Get the name, type and index |
547 | ChannelNameAndType nameAndType = jointProperties[propertyIndex]; |
548 | nameAndType.jointIndex = jointIndex; |
549 | nameAndType.mappingId = mapping->peerId(); |
550 | |
551 | // Try to find matching channel name and type |
552 | const qsizetype index = channelNamesAndTypes.indexOf(t: nameAndType); |
553 | if (index == -1) |
554 | continue; |
555 | |
556 | // Do we have any animation data for this channel? If not, don't bother |
557 | // adding a mapping for it. |
558 | const bool hasChannelIndices = sourceClipMask[index].count(on: true) != 0; |
559 | if (!hasChannelIndices) |
560 | continue; |
561 | |
562 | if (index != -1) { |
563 | // We got one! |
564 | mappingData.propertyName = channelNameToPropertyName[nameAndType.name]; |
565 | mappingData.type = nameAndType.type; |
566 | mappingData.channelIndices = channelComponentIndices[index]; |
567 | mappingData.jointIndex = jointIndex; |
568 | |
569 | // Convert property name for joint transform components to |
570 | // an enumerated type so we can avoid the string comparisons |
571 | // when sending the change events after evaluation. |
572 | // TODO: Replace this logic as we now do it in buildRequiredChannelsAndTypes() |
573 | if (qstrcmp(str1: mappingData.propertyName, str2: "scale" ) == 0) |
574 | mappingData.jointTransformComponent = Scale; |
575 | else if (qstrcmp(str1: mappingData.propertyName, str2: "rotation" ) == 0) |
576 | mappingData.jointTransformComponent = Rotation; |
577 | else if (qstrcmp(str1: mappingData.propertyName, str2: "translation" ) == 0) |
578 | mappingData.jointTransformComponent = Translation; |
579 | |
580 | mappingDataVec.push_back(t: mappingData); |
581 | } |
582 | } |
583 | } |
584 | break; |
585 | } |
586 | } |
587 | } |
588 | |
589 | return mappingDataVec; |
590 | } |
591 | |
592 | QVector<ChannelNameAndType> buildRequiredChannelsAndTypes(Handler *handler, |
593 | const ChannelMapper *mapper) |
594 | { |
595 | ChannelMappingManager *mappingManager = handler->channelMappingManager(); |
596 | const auto mappingIds = mapper->mappingIds(); |
597 | |
598 | // Reserve enough storage assuming each mapping is for a different channel. |
599 | // May be overkill but avoids potential for multiple allocations |
600 | QVector<ChannelNameAndType> namesAndTypes; |
601 | namesAndTypes.reserve(asize: mappingIds.size()); |
602 | |
603 | // Iterate through the mappings and add ones not already used by an earlier mapping. |
604 | // We could add them all then sort and remove duplicates. However, our approach has the |
605 | // advantage of keeping the blend tree format more consistent with the mapping |
606 | // orderings which will have better cache locality when generating events. |
607 | for (const Qt3DCore::QNodeId &mappingId : mappingIds) { |
608 | // Get the mapping object |
609 | ChannelMapping *mapping = mappingManager->lookupResource(id: mappingId); |
610 | Q_ASSERT(mapping); |
611 | |
612 | switch (mapping->mappingType()) { |
613 | case ChannelMapping::ChannelMappingType: |
614 | case ChannelMapping::CallbackMappingType: { |
615 | // Get the name and type |
616 | const ChannelNameAndType nameAndType{ mapping->channelName(), |
617 | mapping->type(), |
618 | mapping->componentCount(), |
619 | mappingId }; |
620 | |
621 | // Add if not already contained |
622 | if (!namesAndTypes.contains(t: nameAndType)) |
623 | namesAndTypes.push_back(t: nameAndType); |
624 | |
625 | break; |
626 | } |
627 | |
628 | case ChannelMapping::SkeletonMappingType: { |
629 | // Add an entry for each scale/rotation/translation property of each joint index |
630 | // of the target skeleton. |
631 | const QList<ChannelNameAndType> jointProperties |
632 | = { { QLatin1String("Location" ), static_cast<int>(QMetaType::QVector3D), Translation }, |
633 | { QLatin1String("Rotation" ), static_cast<int>(QMetaType::QQuaternion), Rotation }, |
634 | { QLatin1String("Scale" ), static_cast<int>(QMetaType::QVector3D), Scale } }; |
635 | Skeleton *skeleton = handler->skeletonManager()->lookupResource(id: mapping->skeletonId()); |
636 | const int jointCount = skeleton->jointCount(); |
637 | for (int jointIndex = 0; jointIndex < jointCount; ++jointIndex) { |
638 | const qsizetype propertyCount = jointProperties.size(); |
639 | for (int propertyIndex = 0; propertyIndex < propertyCount; ++propertyIndex) { |
640 | // Get the name, type and index |
641 | ChannelNameAndType nameAndType = jointProperties[propertyIndex]; |
642 | nameAndType.jointName = skeleton->jointName(jointIndex); |
643 | nameAndType.jointIndex = jointIndex; |
644 | nameAndType.mappingId = mappingId; |
645 | |
646 | // Add if not already contained |
647 | if (!namesAndTypes.contains(t: nameAndType)) |
648 | namesAndTypes.push_back(t: nameAndType); |
649 | } |
650 | } |
651 | |
652 | break; |
653 | } |
654 | } |
655 | } |
656 | |
657 | return namesAndTypes; |
658 | } |
659 | |
660 | QVector<ComponentIndices> assignChannelComponentIndices(const QVector<ChannelNameAndType> &namesAndTypes) |
661 | { |
662 | QVector<ComponentIndices> channelComponentIndices; |
663 | channelComponentIndices.reserve(asize: namesAndTypes.size()); |
664 | |
665 | int baseIndex = 0; |
666 | for (const auto &entry : namesAndTypes) { |
667 | // Populate indices in order |
668 | const int componentCount = entry.componentCount; |
669 | ComponentIndices indices(componentCount); |
670 | std::iota(first: indices.begin(), last: indices.end(), value: baseIndex); |
671 | |
672 | // Append to the results |
673 | channelComponentIndices.push_back(t: indices); |
674 | |
675 | // Increment baseIndex |
676 | baseIndex += componentCount; |
677 | } |
678 | |
679 | return channelComponentIndices; |
680 | } |
681 | |
682 | QVector<Qt3DCore::QNodeId> gatherValueNodesToEvaluate(Handler *handler, |
683 | Qt3DCore::QNodeId blendTreeRootId) |
684 | { |
685 | Q_ASSERT(handler); |
686 | Q_ASSERT(blendTreeRootId.isNull() == false); |
687 | |
688 | // We need the ClipBlendNodeManager to be able to lookup nodes from their Ids |
689 | ClipBlendNodeManager *nodeManager = handler->clipBlendNodeManager(); |
690 | |
691 | // Visit the tree in a pre-order manner and collect the dependencies |
692 | QVector<Qt3DCore::QNodeId> clipIds; |
693 | ClipBlendNodeVisitor visitor(nodeManager, |
694 | ClipBlendNodeVisitor::PreOrder, |
695 | ClipBlendNodeVisitor::VisitOnlyDependencies); |
696 | |
697 | auto func = [&clipIds, nodeManager] (ClipBlendNode *blendNode) { |
698 | // Check if this is a value node itself |
699 | if (blendNode->blendType() == ClipBlendNode::ValueType) |
700 | clipIds.append(t: blendNode->peerId()); |
701 | |
702 | const auto dependencyIds = blendNode->currentDependencyIds(); |
703 | for (const auto &dependencyId : dependencyIds) { |
704 | // Look up the blend node and if it's a value type (clip), |
705 | // add it to the set of value node ids that need to be evaluated |
706 | ClipBlendNode *node = nodeManager->lookupNode(id: dependencyId); |
707 | if (node && node->blendType() == ClipBlendNode::ValueType) |
708 | clipIds.append(t: dependencyId); |
709 | } |
710 | }; |
711 | visitor.traverse(rootId: blendTreeRootId, visitFunction: func); |
712 | |
713 | // Sort and remove duplicates |
714 | std::sort(first: clipIds.begin(), last: clipIds.end()); |
715 | auto last = std::unique(first: clipIds.begin(), last: clipIds.end()); |
716 | clipIds.erase(abegin: last, aend: clipIds.end()); |
717 | return clipIds; |
718 | } |
719 | |
720 | ClipFormat generateClipFormatIndices(const QVector<ChannelNameAndType> &targetChannels, |
721 | const QVector<ComponentIndices> &targetIndices, |
722 | const AnimationClip *clip) |
723 | { |
724 | Q_ASSERT(targetChannels.size() == targetIndices.size()); |
725 | |
726 | // Reserve enough storage for all the format indices |
727 | const qsizetype channelCount = targetChannels.size(); |
728 | ClipFormat f; |
729 | f.namesAndTypes.resize(size: channelCount); |
730 | f.formattedComponentIndices.resize(size: channelCount); |
731 | f.sourceClipMask.resize(size: channelCount); |
732 | qsizetype indexCount = 0; |
733 | for (const auto &targetIndexVec : std::as_const(t: targetIndices)) |
734 | indexCount += targetIndexVec.size(); |
735 | ComponentIndices &sourceIndices = f.sourceClipIndices; |
736 | sourceIndices.resize(size: indexCount); |
737 | |
738 | // Iterate through the target channels |
739 | auto formatIt = sourceIndices.begin(); |
740 | for (qsizetype i = 0; i < channelCount; ++i) { |
741 | // Find the index of the channel from the clip |
742 | const ChannelNameAndType &targetChannel = targetChannels[i]; |
743 | const qsizetype clipChannelIndex = clip->channelIndex(channelName: targetChannel.name, |
744 | jointIndex: targetChannel.jointIndex); |
745 | const qsizetype componentCount = targetIndices[i].size(); |
746 | |
747 | if (clipChannelIndex != -1) { |
748 | // Found a matching channel in the clip. Populate the corresponding |
749 | // entries in the format vector with the *source indices* |
750 | // needed to build the formatted results. |
751 | const qsizetype baseIndex = clip->channelComponentBaseIndex(channelGroupIndex: clipChannelIndex); |
752 | const auto channelIndices = channelComponentsToIndices(channel: clip->channels()[clipChannelIndex], |
753 | dataType: targetChannel.type, |
754 | expectedComponentCount: targetChannel.componentCount, |
755 | offset: baseIndex); |
756 | std::copy(first: channelIndices.begin(), last: channelIndices.end(), result: formatIt); |
757 | |
758 | f.sourceClipMask[i].resize(size: componentCount); |
759 | for (qsizetype j = 0; j < componentCount; ++j) |
760 | f.sourceClipMask[i].setBit(i: j, val: channelIndices[j] != -1); |
761 | } else { |
762 | // No such channel in this clip. We'll use default values when |
763 | // mapping from the clip to the formatted clip results. |
764 | std::fill(first: formatIt, last: formatIt + componentCount, value: -1); |
765 | f.sourceClipMask[i].fill(aval: false, asize: componentCount); |
766 | } |
767 | |
768 | f.formattedComponentIndices[i] = targetIndices[i]; |
769 | f.namesAndTypes[i] = targetChannels[i]; |
770 | formatIt += componentCount; |
771 | } |
772 | |
773 | return f; |
774 | } |
775 | |
776 | ClipResults formatClipResults(const ClipResults &rawClipResults, |
777 | const ComponentIndices &format) |
778 | { |
779 | // Resize the output to match the number of indices |
780 | const qsizetype elementCount = format.size(); |
781 | ClipResults formattedClipResults(elementCount); |
782 | |
783 | // Perform a gather operation to format the data |
784 | |
785 | // TODO: For large numbers of components do this in parallel with |
786 | // for e.g. a parallel_for() like construct |
787 | // TODO: We could potentially avoid having holes in these intermediate |
788 | // vectors by adjusting the component indices stored in the MappingData |
789 | // and format vectors. Needs careful investigation! |
790 | for (qsizetype i = 0; i < elementCount; ++i) { |
791 | if (format[i] == -1) |
792 | continue; |
793 | formattedClipResults[i] = rawClipResults[format[i]]; |
794 | } |
795 | |
796 | return formattedClipResults; |
797 | } |
798 | |
799 | ClipResults evaluateBlendTree(Handler *handler, |
800 | BlendedClipAnimator *animator, |
801 | Qt3DCore::QNodeId blendTreeRootId) |
802 | { |
803 | Q_ASSERT(handler); |
804 | Q_ASSERT(blendTreeRootId.isNull() == false); |
805 | const Qt3DCore::QNodeId animatorId = animator->peerId(); |
806 | |
807 | // We need the ClipBlendNodeManager to be able to lookup nodes from their Ids |
808 | ClipBlendNodeManager *nodeManager = handler->clipBlendNodeManager(); |
809 | |
810 | // Visit the tree in a post-order manner and for each interior node call |
811 | // blending function. We only need to visit the nodes that affect the blend |
812 | // tree at this time. |
813 | ClipBlendNodeVisitor visitor(nodeManager, |
814 | ClipBlendNodeVisitor::PostOrder, |
815 | ClipBlendNodeVisitor::VisitOnlyDependencies); |
816 | |
817 | // TODO: When jobs can spawn other jobs we could evaluate subtrees of |
818 | // the blend tree in parallel. Since it's just a dependency tree, it maps |
819 | // simply onto the dependencies between jobs. |
820 | auto func = [animatorId] (ClipBlendNode *blendNode) { |
821 | // Look up the blend node and if it's an interior node, perform |
822 | // the blend operation |
823 | if (blendNode->blendType() != ClipBlendNode::ValueType) |
824 | blendNode->blend(animatorId); |
825 | }; |
826 | visitor.traverse(rootId: blendTreeRootId, visitFunction: func); |
827 | |
828 | // The clip results stored in the root node for this animator |
829 | // now represent the result of the blend tree evaluation |
830 | ClipBlendNode *blendTreeRootNode = nodeManager->lookupNode(id: blendTreeRootId); |
831 | Q_ASSERT(blendTreeRootNode); |
832 | return blendTreeRootNode->clipResults(animatorId); |
833 | } |
834 | |
835 | QVector<float> defaultValueForChannel(Handler *handler, |
836 | const ChannelNameAndType &channelDescription) |
837 | { |
838 | QVector<float> result; |
839 | |
840 | // Does the channel repesent a joint in a skeleton or is it a general channel? |
841 | ChannelMappingManager *mappingManager = handler->channelMappingManager(); |
842 | const ChannelMapping *mapping = mappingManager->lookupResource(id: channelDescription.mappingId); |
843 | switch (mapping->mappingType()) { |
844 | case ChannelMapping::SkeletonMappingType: { |
845 | // Default channel values for a joint in a skeleton, should be taken |
846 | // from the default pose of the joint itself. I.e. if a joint is not |
847 | // explicitly animated, then it should retain it's initial rest pose. |
848 | Skeleton *skeleton = mapping->skeleton(); |
849 | const int jointIndex = channelDescription.jointIndex; |
850 | switch (channelDescription.jointTransformComponent) { |
851 | case Translation: |
852 | result = valueToVector(value: skeleton->jointTranslation(jointIndex)); |
853 | break; |
854 | |
855 | case Rotation: |
856 | result = valueToVector(value: skeleton->jointRotation(jointIndex)); |
857 | break; |
858 | |
859 | case Scale: |
860 | result = valueToVector(value: skeleton->jointScale(jointIndex)); |
861 | break; |
862 | |
863 | case NoTransformComponent: |
864 | Q_UNREACHABLE(); |
865 | break; |
866 | } |
867 | break; |
868 | } |
869 | |
870 | case ChannelMapping::ChannelMappingType: |
871 | case ChannelMapping::CallbackMappingType: { |
872 | // Do our best to provide a sensible default value. |
873 | if (channelDescription.type == QMetaType::QQuaternion) { |
874 | result = valueToVector(value: QQuaternion()); // (1, 0, 0, 0) |
875 | break; |
876 | } |
877 | |
878 | if (channelDescription.name.toLower() == QLatin1String("scale" )) { |
879 | result = valueToVector(value: QVector3D(1.0f, 1.0f, 1.0f)); |
880 | break; |
881 | } |
882 | |
883 | // Everything else gets all zeros |
884 | const int componentCount = mapping->componentCount(); |
885 | result = QVector<float>(componentCount, 0.0f); |
886 | break; |
887 | } |
888 | |
889 | } |
890 | |
891 | return result; |
892 | } |
893 | |
894 | void applyComponentDefaultValues(const QVector<ComponentValue> &componentDefaults, |
895 | ClipResults &formattedClipResults) |
896 | { |
897 | for (const auto &componentDefault : componentDefaults) |
898 | formattedClipResults[componentDefault.componentIndex] = componentDefault.value; |
899 | } |
900 | |
901 | } // Animation |
902 | } // Qt3DAnimation |
903 | |
904 | QT_END_NAMESPACE |
905 | |