1 | // |
2 | // Redistribution and use in source and binary forms, with or without |
3 | // modification, are permitted provided that the following conditions |
4 | // are met: |
5 | // * Redistributions of source code must retain the above copyright |
6 | // notice, this list of conditions and the following disclaimer. |
7 | // * Redistributions in binary form must reproduce the above copyright |
8 | // notice, this list of conditions and the following disclaimer in the |
9 | // documentation and/or other materials provided with the distribution. |
10 | // * Neither the name of NVIDIA CORPORATION nor the names of its |
11 | // contributors may be used to endorse or promote products derived |
12 | // from this software without specific prior written permission. |
13 | // |
14 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY |
15 | // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
16 | // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
17 | // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR |
18 | // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
19 | // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
20 | // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
21 | // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
22 | // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
23 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
24 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
25 | // |
26 | // Copyright (c) 2008-2021 NVIDIA Corporation. All rights reserved. |
27 | // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. |
28 | // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. |
29 | |
30 | |
31 | #ifndef PX_PHYSICS_COMMON_UTILS |
32 | #define PX_PHYSICS_COMMON_UTILS |
33 | |
34 | |
35 | #include "foundation/PxVec3.h" |
36 | #include "foundation/PxMat33.h" |
37 | #include "foundation/PxBounds3.h" |
38 | #include "common/PxBase.h" |
39 | #include "CmPhysXCommon.h" |
40 | #include "PsInlineArray.h" |
41 | #include "PsArray.h" |
42 | #include "PsAllocator.h" |
43 | |
44 | namespace physx |
45 | { |
46 | namespace Cm |
47 | { |
48 | |
49 | template<class DstType, class SrcType> |
50 | PX_FORCE_INLINE PxU32 getArrayOfPointers(DstType** PX_RESTRICT userBuffer, PxU32 bufferSize, PxU32 startIndex, SrcType*const* PX_RESTRICT src, PxU32 size) |
51 | { |
52 | const PxU32 remainder = PxU32(PxMax<PxI32>(a: PxI32(size - startIndex), b: 0)); |
53 | const PxU32 writeCount = PxMin(a: remainder, b: bufferSize); |
54 | src += startIndex; |
55 | for(PxU32 i=0;i<writeCount;i++) |
56 | userBuffer[i] = static_cast<DstType*>(src[i]); |
57 | return writeCount; |
58 | } |
59 | |
60 | PX_CUDA_CALLABLE PX_INLINE void transformInertiaTensor(const PxVec3& invD, const PxMat33& M, PxMat33& mIInv) |
61 | { |
62 | const float axx = invD.x*M(0,0), axy = invD.x*M(1,0), axz = invD.x*M(2,0); |
63 | const float byx = invD.y*M(0,1), byy = invD.y*M(1,1), byz = invD.y*M(2,1); |
64 | const float czx = invD.z*M(0,2), czy = invD.z*M(1,2), czz = invD.z*M(2,2); |
65 | |
66 | mIInv(0,0) = axx*M(0,0) + byx*M(0,1) + czx*M(0,2); |
67 | mIInv(1,1) = axy*M(1,0) + byy*M(1,1) + czy*M(1,2); |
68 | mIInv(2,2) = axz*M(2,0) + byz*M(2,1) + czz*M(2,2); |
69 | |
70 | mIInv(0,1) = mIInv(1,0) = axx*M(1,0) + byx*M(1,1) + czx*M(1,2); |
71 | mIInv(0,2) = mIInv(2,0) = axx*M(2,0) + byx*M(2,1) + czx*M(2,2); |
72 | mIInv(1,2) = mIInv(2,1) = axy*M(2,0) + byy*M(2,1) + czy*M(2,2); |
73 | } |
74 | |
75 | // PT: TODO: refactor this with PxBounds3 header |
76 | PX_FORCE_INLINE PxVec3 basisExtent(const PxVec3& basis0, const PxVec3& basis1, const PxVec3& basis2, const PxVec3& extent) |
77 | { |
78 | // extended basis vectors |
79 | const PxVec3 c0 = basis0 * extent.x; |
80 | const PxVec3 c1 = basis1 * extent.y; |
81 | const PxVec3 c2 = basis2 * extent.z; |
82 | |
83 | // find combination of base vectors that produces max. distance for each component = sum of abs() |
84 | return PxVec3 ( PxAbs(a: c0.x) + PxAbs(a: c1.x) + PxAbs(a: c2.x), |
85 | PxAbs(a: c0.y) + PxAbs(a: c1.y) + PxAbs(a: c2.y), |
86 | PxAbs(a: c0.z) + PxAbs(a: c1.z) + PxAbs(a: c2.z)); |
87 | } |
88 | |
89 | PX_FORCE_INLINE PxBounds3 basisExtent(const PxVec3& center, const PxVec3& basis0, const PxVec3& basis1, const PxVec3& basis2, const PxVec3& extent) |
90 | { |
91 | const PxVec3 w = basisExtent(basis0, basis1, basis2, extent); |
92 | return PxBounds3(center - w, center + w); |
93 | } |
94 | |
95 | PX_FORCE_INLINE bool isValid(const PxVec3& c, const PxVec3& e) |
96 | { |
97 | return (c.isFinite() && e.isFinite() && (((e.x >= 0.0f) && (e.y >= 0.0f) && (e.z >= 0.0f)) || |
98 | ((e.x == -PX_MAX_BOUNDS_EXTENTS) && |
99 | (e.y == -PX_MAX_BOUNDS_EXTENTS) && |
100 | (e.z == -PX_MAX_BOUNDS_EXTENTS)))); |
101 | } |
102 | |
103 | PX_FORCE_INLINE bool isEmpty(const PxVec3& c, const PxVec3& e) |
104 | { |
105 | PX_UNUSED(c); |
106 | PX_ASSERT(isValid(c, e)); |
107 | return e.x<0.0f; |
108 | } |
109 | |
110 | // Array with externally managed storage. |
111 | // Allocation and resize policy are managed by the owner, |
112 | // Very minimal functionality right now, just POD types |
113 | |
114 | template <typename T, |
115 | typename Owner, |
116 | typename IndexType, |
117 | void (Owner::*realloc)(T*& currentMem, IndexType& currentCapacity, IndexType size, IndexType requiredMinCapacity)> |
118 | class OwnedArray |
119 | { |
120 | public: |
121 | OwnedArray() |
122 | : mData(0) |
123 | , mCapacity(0) |
124 | , mSize(0) |
125 | {} |
126 | |
127 | ~OwnedArray() // owner must call releaseMem before destruction |
128 | { |
129 | PX_ASSERT(mCapacity==0); |
130 | } |
131 | |
132 | void pushBack(T& element, Owner& owner) |
133 | { |
134 | // there's a failure case if here if we push an existing element which causes a resize - |
135 | // a rare case not worth coding around; if you need it, copy the element then push it. |
136 | |
137 | PX_ASSERT(&element<mData || &element>=mData+mSize); |
138 | if(mSize==mCapacity) |
139 | (owner.*realloc)(mData, mCapacity, mSize, IndexType(mSize+1)); |
140 | |
141 | PX_ASSERT(mData && mSize<mCapacity); |
142 | mData[mSize++] = element; |
143 | } |
144 | |
145 | IndexType size() const |
146 | { |
147 | return mSize; |
148 | } |
149 | |
150 | void replaceWithLast(IndexType index) |
151 | { |
152 | PX_ASSERT(index<mSize); |
153 | mData[index] = mData[--mSize]; |
154 | } |
155 | |
156 | T* begin() const |
157 | { |
158 | return mData; |
159 | } |
160 | |
161 | T* end() const |
162 | { |
163 | return mData+mSize; |
164 | } |
165 | |
166 | T& operator [](IndexType index) |
167 | { |
168 | PX_ASSERT(index<mSize); |
169 | return mData[index]; |
170 | } |
171 | |
172 | const T& operator [](IndexType index) const |
173 | { |
174 | PX_ASSERT(index<mSize); |
175 | return mData[index]; |
176 | } |
177 | |
178 | void reserve(IndexType capacity, Owner &owner) |
179 | { |
180 | if(capacity>=mCapacity) |
181 | (owner.*realloc)(mData, mCapacity, mSize, capacity); |
182 | } |
183 | |
184 | void releaseMem(Owner &owner) |
185 | { |
186 | mSize = 0; |
187 | (owner.*realloc)(mData, mCapacity, 0, 0); |
188 | } |
189 | |
190 | private: |
191 | T* mData; |
192 | IndexType mCapacity; |
193 | IndexType mSize; |
194 | |
195 | // just in case someone tries to use a non-POD in here |
196 | union FailIfNonPod |
197 | { |
198 | T t; |
199 | int x; |
200 | }; |
201 | }; |
202 | |
203 | /** |
204 | Any object deriving from PxBase needs to call this function instead of 'delete object;'. |
205 | |
206 | We don't want implement 'operator delete' in PxBase because that would impose how |
207 | memory of derived classes is allocated. Even though most or all of the time derived classes will |
208 | be user allocated, we don't want to put UserAllocatable into the API and derive from that. |
209 | */ |
210 | template<typename T> |
211 | PX_INLINE void deletePxBase(T* object) |
212 | { |
213 | if(object->getBaseFlags() & PxBaseFlag::eOWNS_MEMORY) |
214 | PX_DELETE(object); |
215 | else |
216 | object->~T(); |
217 | } |
218 | |
219 | #define PX_PADDING_8 0xcd |
220 | #define PX_PADDING_16 0xcdcd |
221 | #define PX_PADDING_32 0xcdcdcdcd |
222 | |
223 | #if PX_CHECKED |
224 | /** |
225 | Mark a specified amount of memory with 0xcd pattern. This is used to check that the meta data |
226 | definition for serialized classes is complete in checked builds. |
227 | */ |
228 | PX_INLINE void markSerializedMem(void* ptr, PxU32 byteSize) |
229 | { |
230 | for (PxU32 i = 0; i < byteSize; ++i) |
231 | reinterpret_cast<PxU8*>(ptr)[i] = 0xcd; |
232 | } |
233 | |
234 | /** |
235 | Macro to instantiate a type for serialization testing. |
236 | Note: Only use PX_NEW_SERIALIZED once in a scope. |
237 | */ |
238 | #define PX_NEW_SERIALIZED(v,T) \ |
239 | void* _buf = physx::shdfnd::ReflectionAllocator<T>().allocate(sizeof(T),__FILE__,__LINE__); \ |
240 | Cm::markSerializedMem(_buf, sizeof(T)); \ |
241 | v = PX_PLACEMENT_NEW(_buf, T) |
242 | |
243 | #else |
244 | PX_INLINE void markSerializedMem(void*, PxU32){} |
245 | #define PX_NEW_SERIALIZED(v,T) v = PX_NEW(T) |
246 | #endif |
247 | |
248 | template<typename T, class Alloc> |
249 | struct ArrayAccess: public Ps::Array<T, Alloc> |
250 | { |
251 | void store(PxSerializationContext& context) const |
252 | { |
253 | if(this->mData && (this->mSize || this->capacity())) |
254 | context.writeData(data: this->mData, size: this->capacity()*sizeof(T)); |
255 | } |
256 | |
257 | void load(PxDeserializationContext& context) |
258 | { |
259 | if(this->mData && (this->mSize || this->capacity())) |
260 | this->mData = context.readExtraData<T>(this->capacity()); |
261 | } |
262 | }; |
263 | |
264 | template<typename T, typename Alloc> |
265 | void exportArray(const Ps::Array<T, Alloc>& a, PxSerializationContext& context) |
266 | { |
267 | static_cast<const ArrayAccess<T, Alloc>&>(a).store(context); |
268 | } |
269 | |
270 | template<typename T, typename Alloc> |
271 | void importArray(Ps::Array<T, Alloc>& a, PxDeserializationContext& context) |
272 | { |
273 | static_cast<ArrayAccess<T, Alloc>&>(a).load(context); |
274 | } |
275 | |
276 | template<typename T, PxU32 N, typename Alloc> |
277 | void exportInlineArray(const Ps::InlineArray<T, N, Alloc>& a, PxSerializationContext& context) |
278 | { |
279 | if(!a.isInlined()) |
280 | Cm::exportArray(a, context); |
281 | } |
282 | |
283 | template<typename T, PxU32 N, typename Alloc> |
284 | void importInlineArray(Ps::InlineArray<T, N, Alloc>& a, PxDeserializationContext& context) |
285 | { |
286 | if(!a.isInlined()) |
287 | Cm::importArray(a, context); |
288 | } |
289 | |
290 | template<class T> |
291 | static PX_INLINE T* reserveContainerMemory(Ps::Array<T>& container, PxU32 nb) |
292 | { |
293 | const PxU32 maxNbEntries = container.capacity(); |
294 | const PxU32 requiredSize = container.size() + nb; |
295 | |
296 | if(requiredSize>maxNbEntries) |
297 | { |
298 | const PxU32 naturalGrowthSize = maxNbEntries ? maxNbEntries*2 : 2; |
299 | const PxU32 newSize = PxMax(a: requiredSize, b: naturalGrowthSize); |
300 | container.reserve(newSize); |
301 | } |
302 | |
303 | T* buf = container.end(); |
304 | container.forceSize_Unsafe(requiredSize); |
305 | return buf; |
306 | } |
307 | |
308 | } // namespace Cm |
309 | |
310 | |
311 | |
312 | } |
313 | |
314 | #endif |
315 | |