1//
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions
4// are met:
5// * Redistributions of source code must retain the above copyright
6// notice, this list of conditions and the following disclaimer.
7// * Redistributions in binary form must reproduce the above copyright
8// notice, this list of conditions and the following disclaimer in the
9// documentation and/or other materials provided with the distribution.
10// * Neither the name of NVIDIA CORPORATION nor the names of its
11// contributors may be used to endorse or promote products derived
12// from this software without specific prior written permission.
13//
14// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
15// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
18// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
22// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25//
26// Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved.
27// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
28// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
29
30#ifndef BP_AABBMANAGER_H
31#define BP_AABBMANAGER_H
32
33 #include "PxvConfig.h"
34 #include "CmPhysXCommon.h"
35 #include "BpBroadPhaseUpdate.h"
36 #include "GuGeometryUnion.h"
37 #include "CmBitMap.h"
38 #include "CmTask.h"
39 #include "PsAllocator.h"
40 #include "GuBounds.h"
41 #include "PsHashMap.h"
42 #include "CmRadixSortBuffered.h"
43 #include "PsFoundation.h"
44 #include "BpAABBManagerTasks.h"
45 #include "PsHashSet.h"
46 #include "PxFiltering.h"
47 #include "PsSList.h"
48
49 namespace physx
50 {
51 class PxcScratchAllocator;
52 struct PxBroadPhaseType;
53
54 namespace Cm
55 {
56 class RenderOutput;
57 class EventProfiler;
58 class FlushPool;
59 }
60
61 namespace Bp
62 {
63 typedef PxU32 BoundsIndex;
64 typedef PxU32 AggregateHandle; // PT: currently an index in mAggregates array
65 typedef PxU32 ActorHandle;
66
67 struct BroadPhasePair;
68
69 struct ElementType
70 {
71 enum Enum
72 {
73 eSHAPE = 0,
74 eTRIGGER,
75
76 eCOUNT
77 };
78 };
79 PX_COMPILE_TIME_ASSERT(ElementType::eCOUNT <= 4); // 2 bits reserved for type
80
81 /**
82 \brief Changes to the configuration of overlap pairs are reported as void* pairs.
83 \note Each void* in the pair corresponds to the void* passed to AABBManager::createVolume.
84 @see AABBManager::createVolume, AABBManager::getCreatedOverlaps, AABBManager::getDestroyedOverlaps
85 */
86 struct AABBOverlap
87 {
88 PX_FORCE_INLINE AABBOverlap() {}
89 PX_FORCE_INLINE AABBOverlap(void* userData0, void* userData1/*, ActorHandle pairHandle*/) : mUserData0(userData0), mUserData1(userData1)/*, mPairHandle(pairHandle)*/ {}
90
91 void* mUserData0;
92 void* mUserData1;
93 /* union
94 {
95 ActorHandle mPairHandle; //For created pairs, this is the index into the pair in the pair manager
96 void* mUserData; //For deleted pairs, this is the user data written by the application to the pair
97 };*/
98 void* mPairUserData; //For deleted pairs, this is the user data written by the application to the pair
99 };
100
101 struct BpCacheData : public Ps::SListEntry
102 {
103 Ps::Array<AABBOverlap> mCreatedPairs[2];
104 Ps::Array<AABBOverlap> mDeletedPairs[2];
105
106 void reset()
107 {
108 mCreatedPairs[0].resizeUninitialized(size: 0);
109 mCreatedPairs[1].resizeUninitialized(size: 0);
110 mDeletedPairs[0].resizeUninitialized(size: 0);
111 mDeletedPairs[1].resizeUninitialized(size: 0);
112 }
113 };
114
115 class BoundsArray : public Ps::UserAllocated
116 {
117 PX_NOCOPY(BoundsArray)
118
119 public:
120 BoundsArray(Ps::VirtualAllocator& allocator) : mBounds(allocator)
121 {
122 }
123
124 PX_FORCE_INLINE void initEntry(PxU32 index)
125 {
126 index++; // PT: always pretend we need one more entry, to make sure reading the last used entry will be SIMD-safe.
127 const PxU32 oldCapacity = mBounds.capacity();
128 if(index>=oldCapacity)
129 {
130 const PxU32 newCapacity = Ps::nextPowerOfTwo(x: index);
131 mBounds.reserve(capacity: newCapacity);
132 mBounds.forceSize_Unsafe(size: newCapacity);
133 }
134 }
135
136 PX_FORCE_INLINE void updateBounds(const PxTransform& transform, const Gu::GeometryUnion& geom, PxU32 index)
137 {
138 Gu::computeBounds(bounds&: mBounds[index], geometry: geom.getGeometry(), transform, contactOffset: 0.0f, NULL, inflation: 1.0f);
139 mHasAnythingChanged = true;
140 }
141
142 PX_FORCE_INLINE const PxBounds3& getBounds(PxU32 index) const
143 {
144 return mBounds[index];
145 }
146
147 PX_FORCE_INLINE void setBounds(const PxBounds3& bounds, PxU32 index)
148 {
149 // PX_CHECK_AND_RETURN(bounds.isValid() && !bounds.isEmpty(), "BoundsArray::setBounds - illegal bounds\n");
150 mBounds[index] = bounds;
151 mHasAnythingChanged = true;
152 }
153
154 PX_FORCE_INLINE const PxBounds3* begin() const
155 {
156 return mBounds.begin();
157 }
158
159 PX_FORCE_INLINE PxBounds3* begin()
160 {
161 return mBounds.begin();
162 }
163
164 PX_FORCE_INLINE Ps::Array<PxBounds3, Ps::VirtualAllocator>& getBounds()
165 {
166 return mBounds;
167 }
168
169 PX_FORCE_INLINE PxU32 getCapacity() const
170 {
171 return mBounds.size();
172 }
173
174 void shiftOrigin(const PxVec3& shift)
175 {
176 // we shift some potential NaNs here because we don't know what's active, but should be harmless
177 for(PxU32 i=0;i<mBounds.size();i++)
178 {
179 mBounds[i].minimum -= shift;
180 mBounds[i].maximum -= shift;
181 }
182 mHasAnythingChanged = true;
183 }
184
185 PX_FORCE_INLINE bool hasChanged() const { return mHasAnythingChanged; }
186 PX_FORCE_INLINE void resetChangedState() { mHasAnythingChanged = false; }
187 PX_FORCE_INLINE void setChangedState() { mHasAnythingChanged = true; }
188
189 private:
190 Ps::Array<PxBounds3, Ps::VirtualAllocator> mBounds;
191 bool mHasAnythingChanged;
192 };
193
194 struct VolumeData
195 {
196 PX_FORCE_INLINE void reset()
197 {
198 mAggregate = PX_INVALID_U32;
199 mUserData = NULL;
200 }
201
202 PX_FORCE_INLINE void setSingleActor() { mAggregate = PX_INVALID_U32; }
203 PX_FORCE_INLINE bool isSingleActor() const { return mAggregate == PX_INVALID_U32; }
204
205 PX_FORCE_INLINE void setUserData(void* userData)
206 {
207 // PX_ASSERT(!(reinterpret_cast<size_t>(userData) & 3));
208 mUserData = userData;
209 }
210
211 PX_FORCE_INLINE void* getUserData() const
212 {
213 return reinterpret_cast<void*>(reinterpret_cast<size_t>(mUserData)& (~size_t(3)));
214 }
215
216 PX_FORCE_INLINE void setVolumeType(ElementType::Enum volumeType)
217 {
218 PX_ASSERT(volumeType < 2);
219 mUserData = reinterpret_cast<void*>(reinterpret_cast<size_t>(getUserData()) | static_cast<size_t>(volumeType));
220 }
221
222 PX_FORCE_INLINE ElementType::Enum getVolumeType() const
223 {
224 return ElementType::Enum(reinterpret_cast<size_t>(mUserData) & 3);
225 }
226
227 PX_FORCE_INLINE void setAggregate(AggregateHandle handle)
228 {
229 PX_ASSERT(handle!=PX_INVALID_U32);
230 mAggregate = (handle<<1)|1;
231 }
232 PX_FORCE_INLINE bool isAggregate() const { return !isSingleActor() && ((mAggregate&1)!=0); }
233
234 PX_FORCE_INLINE void setAggregated(AggregateHandle handle)
235 {
236 PX_ASSERT(handle!=PX_INVALID_U32);
237 mAggregate = (handle<<1)|0;
238 }
239
240 PX_FORCE_INLINE bool isAggregated() const
241 {
242 return !isSingleActor() && ((mAggregate&1)==0);
243 }
244
245 PX_FORCE_INLINE AggregateHandle getAggregateOwner() const { return mAggregate>>1; }
246 PX_FORCE_INLINE AggregateHandle getAggregate() const { return mAggregate>>1; }
247
248 private:
249 void* mUserData;
250 // PT: TODO: consider moving this to a separate array, which wouldn't be allocated at all for people not using aggregates.
251 // PT: current encoding:
252 // aggregate == PX_INVALID_U32 => single actor
253 // aggregate != PX_INVALID_U32 => aggregate index<<1|LSB. LSB==1 for aggregates, LSB==0 for aggregated actors.
254 AggregateHandle mAggregate;
255 };
256
257 // PT: TODO: revisit this.....
258 class Aggregate;
259 class PersistentPairs;
260 class PersistentActorAggregatePair;
261 class PersistentAggregateAggregatePair;
262 class PersistentSelfCollisionPairs;
263 struct AggPair
264 {
265 PX_FORCE_INLINE AggPair() {}
266 PX_FORCE_INLINE AggPair(ShapeHandle index0, ShapeHandle index1) : mIndex0(index0), mIndex1(index1) {}
267 ShapeHandle mIndex0;
268 ShapeHandle mIndex1;
269
270 PX_FORCE_INLINE bool operator==(const AggPair& p) const
271 {
272 return (p.mIndex0 == mIndex0) && (p.mIndex1 == mIndex1);
273 }
274 };
275 typedef Ps::CoalescedHashMap<AggPair, PersistentPairs*> AggPairMap;
276
277 // PT: TODO: isn't there a generic pair structure somewhere? refactor with AggPair anyway
278 struct Pair
279 {
280 PX_FORCE_INLINE Pair(PxU32 id0, PxU32 id1) : mID0(id0), mID1(id1) {}
281 PX_FORCE_INLINE Pair(){}
282
283 PX_FORCE_INLINE bool operator<(const Pair& p) const
284 {
285 const PxU64 value0 = *reinterpret_cast<const PxU64*>(this);
286 const PxU64 value1 = *reinterpret_cast<const PxU64*>(&p);
287 return value0 < value1;
288 }
289
290 PX_FORCE_INLINE bool operator==(const Pair& p) const
291 {
292 return (p.mID0 == mID0) && (p.mID1 == mID1);
293 }
294
295 PX_FORCE_INLINE bool operator!=(const Pair& p) const
296 {
297 return (p.mID0 != mID0) || (p.mID1 != mID1);
298 }
299
300 PxU32 mID0;
301 PxU32 mID1;
302 };
303
304 class AABBManager;
305
306 class PostBroadPhaseStage2Task : public Cm::Task
307 {
308 Cm::FlushPool* mFlushPool;
309 AABBManager& mManager;
310
311 PX_NOCOPY(PostBroadPhaseStage2Task)
312 public:
313
314 PostBroadPhaseStage2Task(PxU64 contextID, AABBManager& manager) : Cm::Task(contextID), mFlushPool(NULL), mManager(manager)
315 {
316 }
317
318 virtual const char* getName() const { return "PostBroadPhaseStage2Task"; }
319
320 void setFlushPool(Cm::FlushPool* pool) { mFlushPool = pool; }
321
322 virtual void runInternal();
323
324 };
325
326 class ProcessAggPairsBase;
327
328 /**
329 \brief A structure responsible for:
330 * storing an aabb representation for each active shape in the related scene
331 * managing the creation/removal of aabb representations when their related shapes are created/removed
332 * updating all aabbs that require an update due to modification of shape geometry or transform
333 * updating the aabb of all aggregates from the union of the aabbs of all shapes that make up each aggregate
334 * computing and reporting the incremental changes to the set of overlapping aabb pairs
335 */
336 class AABBManager : public Ps::UserAllocated
337 {
338 PX_NOCOPY(AABBManager)
339 public:
340
341 AABBManager(BroadPhase& bp, BoundsArray& boundsArray, Ps::Array<PxReal, Ps::VirtualAllocator>& contactDistance,
342 PxU32 maxNbAggregates, PxU32 maxNbShapes, Ps::VirtualAllocator& allocator, PxU64 contextID,
343 PxPairFilteringMode::Enum kineKineFilteringMode, PxPairFilteringMode::Enum staticKineFilteringMode);
344
345 void destroy();
346
347 AggregateHandle createAggregate(BoundsIndex index, Bp::FilterGroup::Enum group, void* userData, const bool selfCollisions);
348 bool destroyAggregate(BoundsIndex& index, Bp::FilterGroup::Enum& group, AggregateHandle aggregateHandle);
349
350 bool addBounds(BoundsIndex index, PxReal contactDistance, Bp::FilterGroup::Enum group, void* userdata, AggregateHandle aggregateHandle, ElementType::Enum volumeType);
351 void reserveSpaceForBounds(BoundsIndex index);
352 void removeBounds(BoundsIndex index);
353 PX_FORCE_INLINE Ps::IntBool isMarkedForRemove(BoundsIndex index) const { return mRemovedHandleMap.boundedTest(index); }
354
355 void setContactOffset(BoundsIndex handle, PxReal offset)
356 {
357 // PT: this works even for aggregated shapes, since the corresponding bit will also be set in the 'updated' map.
358 mContactDistance.begin()[handle] = offset;
359 mPersistentStateChanged = true;
360 mChangedHandleMap.growAndSet(index: handle);
361 }
362
363 void setVolumeType(BoundsIndex handle, ElementType::Enum volumeType)
364 {
365 mVolumeData[handle].setVolumeType(volumeType);
366 }
367
368 void setBPGroup(BoundsIndex index, Bp::FilterGroup::Enum group)
369 {
370 PX_ASSERT((index + 1) < mVolumeData.size());
371 PX_ASSERT(group != Bp::FilterGroup::eINVALID); // PT: we use group == Bp::FilterGroup::eINVALID to mark removed/invalid entries
372 mGroups[index] = group;
373 }
374
375 // PT: TODO: revisit name: we don't "update AABBs" here anymore
376 void updateAABBsAndBP( PxU32 numCpuTasks,
377 Cm::FlushPool& flushPool,
378 PxcScratchAllocator* scratchAllocator,
379 bool hasContactDistanceUpdated,
380 PxBaseTask* continuation,
381 PxBaseTask* narrowPhaseUnlockTask);
382
383 void finalizeUpdate( PxU32 numCpuTasks,
384 PxcScratchAllocator* scratchAllocator,
385 PxBaseTask* continuation,
386 PxBaseTask* narrowPhaseUnlockTask);
387
388 AABBOverlap* getCreatedOverlaps(ElementType::Enum type, PxU32& count)
389 {
390 PX_ASSERT(type < ElementType::eCOUNT);
391 count = mCreatedOverlaps[type].size();
392 return mCreatedOverlaps[type].begin();
393 }
394
395 AABBOverlap* getDestroyedOverlaps(ElementType::Enum type, PxU32& count)
396 {
397 PX_ASSERT(type < ElementType::eCOUNT);
398 count = mDestroyedOverlaps[type].size();
399 return mDestroyedOverlaps[type].begin();
400 }
401
402 void freeBuffers();
403
404 void** getOutOfBoundsObjects(PxU32& nbOutOfBoundsObjects)
405 {
406 nbOutOfBoundsObjects = mOutOfBoundsObjects.size();
407 return mOutOfBoundsObjects.begin();
408 }
409
410 void clearOutOfBoundsObjects()
411 {
412 mOutOfBoundsObjects.clear();
413 }
414
415 void** getOutOfBoundsAggregates(PxU32& nbOutOfBoundsAggregates)
416 {
417 nbOutOfBoundsAggregates = mOutOfBoundsAggregates.size();
418 return mOutOfBoundsAggregates.begin();
419 }
420
421 void clearOutOfBoundsAggregates()
422 {
423 mOutOfBoundsAggregates.clear();
424 }
425
426 void shiftOrigin(const PxVec3& shift);
427
428 void visualize(Cm::RenderOutput& out);
429
430 PX_FORCE_INLINE BroadPhase* getBroadPhase() const { return &mBroadPhase; }
431 PX_FORCE_INLINE BoundsArray& getBoundsArray() { return mBoundsArray; }
432 PX_FORCE_INLINE PxU32 getNbActiveAggregates() const { return mNbAggregates; }
433 PX_FORCE_INLINE const float* getContactDistances() const { return mContactDistance.begin(); }
434 PX_FORCE_INLINE Cm::BitMapPinned& getChangedAABBMgActorHandleMap() { return mChangedHandleMap; }
435
436 PX_FORCE_INLINE void* getUserData(const BoundsIndex index) const { if (index < mVolumeData.size()) return mVolumeData[index].getUserData(); return NULL; }
437 PX_FORCE_INLINE PxU64 getContextId() const { return mContextID; }
438
439 void postBroadPhase(PxBaseTask*, PxBaseTask* narrowPhaseUnlockTask, Cm::FlushPool& flushPool);
440
441
442
443 BpCacheData* getBpCacheData();
444 void putBpCacheData(BpCacheData*);
445 void resetBpCacheData();
446
447 Ps::Mutex mMapLock;
448
449 private:
450 void reserveShapeSpace(PxU32 nbShapes);
451
452 void postBpStage2(PxBaseTask*, Cm::FlushPool&);
453
454 void postBpStage3(PxBaseTask*);
455
456 PostBroadPhaseStage2Task mPostBroadPhase2;
457 Cm::DelegateTask<AABBManager, &AABBManager::postBpStage3> mPostBroadPhase3;
458
459 //Cm::DelegateTask<SimpleAABBManager, &AABBManager::postBroadPhase> mPostBroadPhase;
460
461 FinalizeUpdateTask mFinalizeUpdateTask;
462
463 // PT: we have bitmaps here probably to quickly handle added/removed objects during same frame.
464 // PT: TODO: consider replacing with plain arrays (easier to parse, already existing below, etc)
465 Cm::BitMap mAddedHandleMap; // PT: indexed by BoundsIndex
466 Cm::BitMap mRemovedHandleMap; // PT: indexed by BoundsIndex
467 Cm::BitMapPinned mChangedHandleMap;
468
469 PX_FORCE_INLINE void removeBPEntry(BoundsIndex index) // PT: only for objects passed to the BP
470 {
471 if(mAddedHandleMap.test(index)) // PT: if object had been added this frame...
472 mAddedHandleMap.reset(index); // PT: ...then simply revert the previous operation locally (it hasn't been passed to the BP yet).
473 else
474 mRemovedHandleMap.set(index); // PT: else we need to remove it from the BP
475 }
476
477 PX_FORCE_INLINE void addBPEntry(BoundsIndex index)
478 {
479 if(mRemovedHandleMap.test(index))
480 mRemovedHandleMap.reset(index);
481 else
482 mAddedHandleMap.set(index);
483 }
484
485 // PT: TODO: when do we need 'Ps::VirtualAllocator' and when don't we? When memory is passed to GPU BP?
486 //ML: we create mGroups and mContactDistance in the AABBManager constructor. Ps::Array will take Ps::VirtualAllocator as a parameter. Therefore, if GPU BP is using,
487 //we will passed a pinned host memory allocator, otherwise, we will just pass a normal allocator.
488 Ps::Array<Bp::FilterGroup::Enum, Ps::VirtualAllocator> mGroups; // NOTE: we stick Bp::FilterGroup::eINVALID in this slot to indicate that the entry is invalid (removed or never inserted.)
489 Ps::Array<PxReal, Ps::VirtualAllocator>& mContactDistance;
490 Ps::Array<VolumeData> mVolumeData;
491 #ifdef BP_FILTERING_USES_TYPE_IN_GROUP
492 bool mLUT[Bp::FilterType::COUNT][Bp::FilterType::COUNT];
493 #endif
494 PX_FORCE_INLINE void initEntry(BoundsIndex index, PxReal contactDistance, Bp::FilterGroup::Enum group, void* userData)
495 {
496 if ((index + 1) >= mVolumeData.size())
497 reserveShapeSpace(nbShapes: index + 1);
498
499 // PT: TODO: why is this needed at all? Why aren't size() and capacity() enough?
500 mUsedSize = PxMax(a: index+1, b: mUsedSize);
501
502 PX_ASSERT(group != Bp::FilterGroup::eINVALID); // PT: we use group == Bp::FilterGroup::eINVALID to mark removed/invalid entries
503 mGroups[index] = group;
504 mContactDistance.begin()[index] = contactDistance;
505 mVolumeData[index].setUserData(userData);
506 }
507
508 PX_FORCE_INLINE void resetEntry(BoundsIndex index)
509 {
510 mGroups[index] = Bp::FilterGroup::eINVALID;
511 mContactDistance.begin()[index] = 0.0f;
512 mVolumeData[index].reset();
513 }
514
515 // PT: TODO: remove confusion between BoundsIndex and ShapeHandle here!
516 Ps::Array<ShapeHandle, Ps::VirtualAllocator> mAddedHandles;
517 Ps::Array<ShapeHandle, Ps::VirtualAllocator> mUpdatedHandles;
518 Ps::Array<ShapeHandle, Ps::VirtualAllocator> mRemovedHandles;
519
520 BroadPhase& mBroadPhase;
521 BoundsArray& mBoundsArray;
522
523 Ps::Array<void*> mOutOfBoundsObjects;
524 Ps::Array<void*> mOutOfBoundsAggregates;
525 Ps::Array<AABBOverlap> mCreatedOverlaps[ElementType::eCOUNT];
526 Ps::Array<AABBOverlap> mDestroyedOverlaps[ElementType::eCOUNT];
527
528 PxcScratchAllocator* mScratchAllocator;
529
530 PxBaseTask* mNarrowPhaseUnblockTask;
531 PxU32 mUsedSize; // highest used value + 1
532 bool mOriginShifted;
533 bool mPersistentStateChanged;
534
535 PxU32 mNbAggregates;
536 PxU32 mFirstFreeAggregate;
537 Ps::Array<Aggregate*> mAggregates; // PT: indexed by AggregateHandle
538 Ps::Array<Aggregate*> mDirtyAggregates;
539
540 PxU32 mTimestamp;
541
542 AggPairMap mActorAggregatePairs;
543 AggPairMap mAggregateAggregatePairs;
544
545 Ps::Array<ProcessAggPairsBase*> mAggPairTasks;
546
547 #ifdef BP_USE_AGGREGATE_GROUP_TAIL
548 // PT: TODO: even in the 3.4 trunk this stuff is a clumsy mess: groups are "BpHandle" suddenly passed
549 // to BroadPhaseUpdateData as "ShapeHandle".
550 //Free aggregate group ids.
551 PxU32 mAggregateGroupTide;
552 Ps::Array<Bp::FilterGroup::Enum> mFreeAggregateGroups; // PT: TODO: remove this useless array
553 #endif
554 Ps::HashSet<Pair> mCreatedPairs;
555
556 PxU64 mContextID;
557
558 Ps::SList mBpThreadContextPool;
559
560 PX_FORCE_INLINE Aggregate* getAggregateFromHandle(AggregateHandle handle)
561 {
562 PX_ASSERT(handle<mAggregates.size());
563 return mAggregates[handle];
564 }
565
566 #ifdef BP_USE_AGGREGATE_GROUP_TAIL
567 PX_FORCE_INLINE void releaseAggregateGroup(const Bp::FilterGroup::Enum group)
568 {
569 PX_ASSERT(group != Bp::FilterGroup::eINVALID);
570 mFreeAggregateGroups.pushBack(a: group);
571 }
572
573 PX_FORCE_INLINE Bp::FilterGroup::Enum getAggregateGroup()
574 {
575 PxU32 id;
576 if(mFreeAggregateGroups.size())
577 id = mFreeAggregateGroups.popBack();
578 else
579 {
580 id = mAggregateGroupTide--;
581 #ifdef BP_FILTERING_USES_TYPE_IN_GROUP
582 id<<=2;
583 id|=FilterType::AGGREGATE;
584 #endif
585 }
586 const Bp::FilterGroup::Enum group = Bp::FilterGroup::Enum(id);
587 PX_ASSERT(group != Bp::FilterGroup::eINVALID);
588 return group;
589 }
590 #endif
591 void startAggregateBoundsComputationTasks(PxU32 nbToGo, PxU32 numCpuTasks, Cm::FlushPool& flushPool);
592 PersistentActorAggregatePair* createPersistentActorAggregatePair(ShapeHandle volA, ShapeHandle volB);
593 PersistentAggregateAggregatePair* createPersistentAggregateAggregatePair(ShapeHandle volA, ShapeHandle volB);
594 void updatePairs(PersistentPairs& p, BpCacheData* data = NULL);
595 void handleOriginShift();
596 public:
597 void processBPCreatedPair(const BroadPhasePair& pair);
598 void processBPDeletedPair(const BroadPhasePair& pair);
599 // bool checkID(ShapeHandle id);
600 friend class PersistentActorAggregatePair;
601 friend class PersistentAggregateAggregatePair;
602 friend class ProcessSelfCollisionPairsParallel;
603 friend class PostBroadPhaseStage2Task;
604 };
605
606 } //namespace Bp
607
608 } //namespace physx
609
610#endif //BP_AABBMANAGER_H
611

source code of qtquick3dphysics/src/3rdparty/PhysX/source/lowlevelaabb/include/BpAABBManager.h