17 #ifndef BT_DYNAMIC_BOUNDING_VOLUME_TREE_H 18 #define BT_DYNAMIC_BOUNDING_VOLUME_TREE_H 30 #define DBVT_IMPL_GENERIC 0 // Generic implementation 31 #define DBVT_IMPL_SSE 1 // SSE 35 #if (defined(_MSC_VER) && _MSC_VER >= 1400) 36 #define DBVT_USE_TEMPLATE 1 38 #define DBVT_USE_TEMPLATE 0 41 #define DBVT_USE_TEMPLATE 0 45 #define DBVT_USE_INTRINSIC_SSE 1 48 #define DBVT_USE_MEMMOVE 1 51 #define DBVT_ENABLE_BENCHMARK 0 54 #define DBVT_INLINE SIMD_FORCE_INLINE 59 #if defined(BT_USE_SSE) //&& defined (_WIN32) 60 #define DBVT_SELECT_IMPL DBVT_IMPL_SSE 61 #define DBVT_MERGE_IMPL DBVT_IMPL_SSE 62 #define DBVT_INT0_IMPL DBVT_IMPL_SSE 64 #define DBVT_SELECT_IMPL DBVT_IMPL_GENERIC 65 #define DBVT_MERGE_IMPL DBVT_IMPL_GENERIC 66 #define DBVT_INT0_IMPL DBVT_IMPL_GENERIC 69 #if (DBVT_SELECT_IMPL == DBVT_IMPL_SSE) || \ 70 (DBVT_MERGE_IMPL == DBVT_IMPL_SSE) || \ 71 (DBVT_INT0_IMPL == DBVT_IMPL_SSE) 72 #include <emmintrin.h> 81 #define DBVT_VIRTUAL_DTOR(a) 82 #define DBVT_PREFIX template <typename T> 83 #define DBVT_IPOLICY T& policy 84 #define DBVT_CHECKTYPE \ 85 static const ICollide& typechecker = *(T*)1; \ 88 #define DBVT_VIRTUAL_DTOR(a) \ 90 #define DBVT_VIRTUAL virtual 92 #define DBVT_IPOLICY ICollide& policy 93 #define DBVT_CHECKTYPE 97 #if !defined(__CELLOS_LV2__) && !defined(__MWERKS__) 103 #ifndef DBVT_USE_TEMPLATE 104 #error "DBVT_USE_TEMPLATE undefined" 107 #ifndef DBVT_USE_MEMMOVE 108 #error "DBVT_USE_MEMMOVE undefined" 111 #ifndef DBVT_ENABLE_BENCHMARK 112 #error "DBVT_ENABLE_BENCHMARK undefined" 115 #ifndef DBVT_SELECT_IMPL 116 #error "DBVT_SELECT_IMPL undefined" 119 #ifndef DBVT_MERGE_IMPL 120 #error "DBVT_MERGE_IMPL undefined" 123 #ifndef DBVT_INT0_IMPL 124 #error "DBVT_INT0_IMPL undefined" 245 virtual void WriteNode(
const btDbvtNode*,
int index,
int parent,
int child0,
int child1) = 0;
286 void write(IWriter* iwriter)
const;
287 void clone(
btDbvt& dest, IClone* iclone = 0)
const;
291 #if DBVT_ENABLE_BENCHMARK 353 unsigned int signs[3],
373 bool fullsort =
true);
384 if (a[i[m]].value >= v)
396 if (ifree.
size() > 0)
398 i = ifree[ifree.
size() - 1];
446 box.
mi = box.
mx = pts[0];
447 for (
int i = 1; i < n; ++i)
459 box.
mi = box.
mx = *ppts[0];
460 for (
int i = 1; i < n; ++i)
495 return ((
mi.
x() <= a.
mi.
x()) &&
542 if ((
btDot(n, px) + o) < 0)
return (-1);
543 if ((
btDot(n, pi) + o) >= 0)
return (+1);
551 const btVector3 p(b[(signs >> 0) & 1]->x(),
552 b[(signs >> 1) & 1]->y(),
553 b[(signs >> 2) & 1]->z());
554 return (
btDot(p, v));
560 for (
int i = 0; i < 3; ++i)
579 #if DBVT_INT0_IMPL == DBVT_IMPL_SSE 580 const __m128 rt(_mm_or_ps(_mm_cmplt_ps(_mm_load_ps(b.
mx), _mm_load_ps(a.
mi)),
581 _mm_cmplt_ps(_mm_load_ps(a.
mx), _mm_load_ps(b.
mi))));
583 const __int32* pu((
const __int32*)&rt);
585 const int* pu((
const int*)&rt);
587 return ((pu[0] | pu[1] | pu[2]) == 0);
589 return ((a.
mi.
x() <= b.
mx.
x()) &&
602 return ((b.
x() >= a.
mi.
x()) &&
603 (b.
y() >= a.
mi.
y()) &&
604 (b.
z() >= a.
mi.
z()) &&
605 (b.
x() <= a.
mx.
x()) &&
606 (b.
y() <= a.
mx.
y()) &&
607 (b.
z() <= a.
mx.
z()));
625 #if DBVT_SELECT_IMPL == DBVT_IMPL_SSE 628 static ATTRIBUTE_ALIGNED16(
const unsigned __int32) mask[] = {0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff};
630 static ATTRIBUTE_ALIGNED16(
const unsigned int) mask[] = {0x7fffffff, 0x7fffffff, 0x7fffffff, 0x00000000 };
632 #if DBVT_USE_INTRINSIC_SSE 642 __m128 omi(_mm_load_ps(o.
mi));
643 omi = _mm_add_ps(omi, _mm_load_ps(o.
mx));
644 __m128 ami(_mm_load_ps(a.
mi));
645 ami = _mm_add_ps(ami, _mm_load_ps(a.
mx));
646 ami = _mm_sub_ps(ami, omi);
647 ami = _mm_and_ps(ami, _mm_load_ps((
const float*)mask));
648 __m128 bmi(_mm_load_ps(b.
mi));
649 bmi = _mm_add_ps(bmi, _mm_load_ps(b.
mx));
650 bmi = _mm_sub_ps(bmi, omi);
651 bmi = _mm_and_ps(bmi, _mm_load_ps((
const float*)mask));
652 __m128 t0(_mm_movehl_ps(ami, ami));
653 ami = _mm_add_ps(ami, t0);
654 ami = _mm_add_ss(ami, _mm_shuffle_ps(ami, ami, 1));
655 __m128 t1(_mm_movehl_ps(bmi, bmi));
656 bmi = _mm_add_ps(bmi, t1);
657 bmi = _mm_add_ss(bmi, _mm_shuffle_ps(bmi, bmi, 1));
660 tmp.ssereg = _mm_cmple_ss(bmi, ami);
661 return tmp.ints[0] & 1;
704 #if DBVT_MERGE_IMPL == DBVT_IMPL_SSE 705 __m128 ami(_mm_load_ps(a.
mi));
706 __m128 amx(_mm_load_ps(a.
mx));
707 __m128 bmi(_mm_load_ps(b.
mi));
708 __m128 bmx(_mm_load_ps(b.
mx));
709 ami = _mm_min_ps(ami, bmi);
710 amx = _mm_max_ps(amx, bmx);
711 _mm_store_ps(r.
mi, ami);
712 _mm_store_ps(r.
mx, amx);
714 for (
int i = 0; i < 3; ++i)
716 if (a.
mi[i] < b.
mi[i])
720 if (a.
mx[i] > b.
mx[i])
732 return ((a.
mi.
x() != b.
mi.
x()) ||
750 policy.Process(root);
771 policy.Process(root);
788 stkStack[0] =
sStkNN(root0, root1);
791 sStkNN p = stkStack[--depth];
792 if (depth > treshold)
795 treshold = stkStack.
size() - 4;
832 policy.Process(p.
a, p.
b);
856 if (depth > treshold)
896 policy.Process(p.
a, p.
b);
919 stkStack[0]=sStkNN(root0,root1);
921 sStkNN p=stkStack[--depth];
922 if(
Intersect(p.a->volume,p.b->volume,xform))
927 treshold=stkStack.
size()-4;
929 if(p.a->isinternal())
931 if(p.b->isinternal())
933 stkStack[depth++]=sStkNN(p.a->childs[0],p.b->childs[0]);
934 stkStack[depth++]=sStkNN(p.a->childs[1],p.b->childs[0]);
935 stkStack[depth++]=sStkNN(p.a->childs[0],p.b->childs[1]);
936 stkStack[depth++]=sStkNN(p.a->childs[1],p.b->childs[1]);
940 stkStack[depth++]=sStkNN(p.a->childs[0],p.b);
941 stkStack[depth++]=sStkNN(p.a->childs[1],p.b);
946 if(p.b->isinternal())
948 stkStack[depth++]=sStkNN(p.a,p.b->childs[0]);
949 stkStack[depth++]=sStkNN(p.a,p.b->childs[1]);
953 policy.Process(p.a,p.b);
985 #ifndef BT_DISABLE_STACK_TEMP_MEMORY 990 #endif //BT_DISABLE_STACK_TEMP_MEMORY 1009 }
while (stack.
size() > 0);
1044 }
while (stack.
size() > 0);
1053 unsigned int signs[3],
1076 btScalar tmin = 1.f, lambda_min = 0.f;
1077 unsigned int result1 =
false;
1078 result1 =
btRayAabb2(rayFrom, rayDirectionInverse, signs,
bounds, tmin, lambda_min, lambda_max);
1083 if (depth > treshold)
1086 treshold = stack.
size() - 2;
1088 stack[depth++] = node->
childs[0];
1089 stack[depth++] = node->
childs[1];
1093 policy.Process(node);
1118 unsigned int signs[3] = {rayDirectionInverse[0] < 0.0, rayDirectionInverse[1] < 0.0, rayDirectionInverse[2] < 0.0};
1120 btScalar lambda_max = rayDir.
dot(rayTo - rayFrom);
1130 #ifndef BT_DISABLE_STACK_TEMP_MEMORY 1132 #else //BT_DISABLE_STACK_TEMP_MEMORY 1134 #endif //BT_DISABLE_STACK_TEMP_MEMORY 1144 btScalar tmin = 1.f, lambda_min = 0.f;
1145 unsigned int result1 =
btRayAabb2(rayFrom, rayDirectionInverse, signs,
bounds, tmin, lambda_min, lambda_max);
1147 #ifdef COMPARE_BTRAY_AABB2 1151 #endif //TEST_BTRAY_AABB2 1157 if (depth > treshold)
1160 treshold = stack.
size() - 2;
1162 stack[depth++] = node->
childs[0];
1163 stack[depth++] = node->
childs[1];
1167 policy.Process(node);
1185 const int inside = (1 << count) - 1;
1187 int signs[
sizeof(unsigned) * 8];
1188 btAssert(count <
int(
sizeof(signs) /
sizeof(signs[0])));
1189 for (
int i = 0; i < count; ++i)
1191 signs[i] = ((normals[i].
x() >= 0) ? 1 : 0) +
1192 ((normals[i].
y() >= 0) ? 2 : 0) +
1193 ((normals[i].
z() >= 0) ? 4 : 0);
1202 for (
int i = 0, j = 1; (!out) && (i < count); ++i, j <<= 1)
1204 if (0 == (se.
mask & j))
1230 }
while (stack.
size());
1247 const unsigned srtsgns = (sortaxis[0] >= 0 ? 1 : 0) +
1248 (sortaxis[1] >= 0 ? 2 : 0) +
1249 (sortaxis[2] >= 0 ? 4 : 0);
1250 const int inside = (1 << count) - 1;
1254 int signs[
sizeof(unsigned) * 8];
1255 btAssert(count <
int(
sizeof(signs) /
sizeof(signs[0])));
1256 for (
int i = 0; i < count; ++i)
1258 signs[i] = ((normals[i].
x() >= 0) ? 1 : 0) +
1259 ((normals[i].
y() >= 0) ? 2 : 0) +
1260 ((normals[i].
z() >= 0) ? 4 : 0);
1268 const int id = stack[stack.
size() - 1];
1272 if (se.
mask != inside)
1275 for (
int i = 0, j = 1; (!out) && (i < count); ++i, j <<= 1)
1277 if (0 == (se.
mask & j))
1293 if (policy.Descent(se.
node))
1300 const int q = nes[0].
value < nes[1].
value ? 1 : 0;
1301 int j = stack.
size();
1302 if (fsort && (j > 0))
1305 j =
nearest(&stack[0], &stock[0], nes[q].value, 0, stack.
size());
1310 #if DBVT_USE_MEMMOVE 1312 int num_items_to_move = stack.
size() - 1 - j;
1313 if (num_items_to_move > 0)
1314 memmove(&stack[j + 1], &stack[j],
sizeof(
int) * num_items_to_move);
1317 for (
int k = stack.
size() - 1; k > j; --k)
1319 stack[k] = stack[k - 1];
1322 stack[j] =
allocate(ifree, stock, nes[q]);
1324 j =
nearest(&stack[0], &stock[0], nes[1 - q].value, j, stack.
size());
1326 #if DBVT_USE_MEMMOVE 1328 int num_items_to_move = stack.
size() - 1 - j;
1329 if (num_items_to_move > 0)
1330 memmove(&stack[j + 1], &stack[j],
sizeof(
int) * num_items_to_move);
1333 for (
int k = stack.
size() - 1; k > j; --k)
1335 stack[k] = stack[k - 1];
1338 stack[j] =
allocate(ifree, stock, nes[1 - q]);
1351 }
while (stack.
size());
1370 if (policy.Descent(n))
1382 }
while (stack.
size() > 0);
1390 #undef DBVT_USE_MEMMOVE 1391 #undef DBVT_USE_TEMPLATE 1392 #undef DBVT_VIRTUAL_DTOR 1396 #undef DBVT_CHECKTYPE 1397 #undef DBVT_IMPL_GENERIC 1398 #undef DBVT_IMPL_SSE 1399 #undef DBVT_USE_INTRINSIC_SSE 1400 #undef DBVT_SELECT_IMPL 1401 #undef DBVT_MERGE_IMPL 1402 #undef DBVT_INT0_IMPL
DBVT_VIRTUAL void Process(const btDbvtNode *, const btDbvtNode *)
static DBVT_PREFIX void collideKDOP(const btDbvtNode *root, const btVector3 *normals, const btScalar *offsets, int count, DBVT_IPOLICY)
DBVT_INLINE void Merge(const btDbvtAabbMm &a, const btDbvtAabbMm &b, btDbvtAabbMm &r)
void push_back(const T &_Val)
DBVT_INLINE const btVector3 & Mins() const
static DBVT_PREFIX void collideOCL(const btDbvtNode *root, const btVector3 *normals, const btScalar *offsets, const btVector3 &sortaxis, int count, DBVT_IPOLICY, bool fullsort=true)
sStkCLN(const btDbvtNode *n, btDbvtNode *p)
DBVT_VIRTUAL void Process(const btDbvtNode *)
void setZ(btScalar _z)
Set the z value.
static DBVT_PREFIX void collideTU(const btDbvtNode *root, DBVT_IPOLICY)
static btDbvtAabbMm FromPoints(const btVector3 *pts, int n)
The btDbvt class implements a fast dynamic bounding volume tree based on axis aligned bounding boxes ...
DBVT_INLINE btVector3 Center() const
btDbvtNode * insert(const btDbvtVolume &box, void *data)
static DBVT_INLINE int nearest(const int *i, const btDbvt::sStkNPS *a, btScalar v, int l, int h)
btAlignedObjectArray< const btDbvtNode * > btNodeStack
virtual void WriteNode(const btDbvtNode *, int index, int parent, int child0, int child1)=0
DBVT_INLINE btVector3 Lengths() const
static DBVT_PREFIX void rayTest(const btDbvtNode *root, const btVector3 &rayFrom, const btVector3 &rayTo, DBVT_IPOLICY)
rayTest is a re-entrant ray test, and can be called in parallel as long as the btAlignedAlloc is thre...
DBVT_INLINE friend void Merge(const btDbvtAabbMm &a, const btDbvtAabbMm &b, btDbvtAabbMm &r)
DBVT_INLINE int Select(const btDbvtAabbMm &o, const btDbvtAabbMm &a, const btDbvtAabbMm &b)
DBVT_INLINE void AddSpan(const btVector3 &d, btScalar &smi, btScalar &smx) const
static DBVT_INLINE int allocate(btAlignedObjectArray< int > &ifree, btAlignedObjectArray< sStkNPS > &stock, const sStkNPS &value)
DBVT_INLINE btVector3 Extents() const
btVector3 & normalize()
Normalize this vector x^2 + y^2 + z^2 = 1.
void write(IWriter *iwriter) const
DBVT_PREFIX void rayTestInternal(const btDbvtNode *root, const btVector3 &rayFrom, const btVector3 &rayTo, const btVector3 &rayDirectionInverse, unsigned int signs[3], btScalar lambda_max, const btVector3 &aabbMin, const btVector3 &aabbMax, btAlignedObjectArray< const btDbvtNode *> &stack, DBVT_IPOLICY) const
rayTestInternal is faster than rayTest, because it uses a persistent stack (to reduce dynamic memory ...
DBVT_INLINE const btVector3 & Maxs() const
void update(btDbvtNode *leaf, int lookahead=-1)
void setX(btScalar _x)
Set the x value.
static int countLeaves(const btDbvtNode *node)
void optimizeIncremental(int passes)
static btDbvtAabbMm FromCE(const btVector3 &c, const btVector3 &e)
DBVT_VIRTUAL bool Descent(const btDbvtNode *)
const btScalar & x() const
Return the x value.
static int maxdepth(const btDbvtNode *node)
static btDbvtAabbMm FromMM(const btVector3 &mi, const btVector3 &mx)
btScalar dot(const btVector3 &v) const
Return the dot product.
sStkNN(const btDbvtNode *na, const btDbvtNode *nb)
void setY(btScalar _y)
Set the y value.
const btScalar & y() const
Return the y value.
const btScalar & z() const
Return the z value.
void initializeFromBuffer(void *buffer, int size, int capacity)
DBVT_PREFIX void collideTV(const btDbvtNode *root, const btDbvtVolume &volume, DBVT_IPOLICY) const
sStkNP(const btDbvtNode *n, unsigned m)
DBVT_INLINE void Expand(const btVector3 &e)
static btDbvtAabbMm FromCR(const btVector3 &c, btScalar r)
#define DBVT_VIRTUAL_DTOR(a)
btAlignedObjectArray< sStkNN > m_stkStack
void optimizeTopDown(int bu_treshold=128)
btVector3 can be used to represent 3D points and vectors.
#define ATTRIBUTE_ALIGNED16(a)
DBVT_PREFIX void collideTTpersistentStack(const btDbvtNode *root0, const btDbvtNode *root1, DBVT_IPOLICY)
int size() const
return the number of elements in the array
DBVT_INLINE bool isleaf() const
virtual void Prepare(const btDbvtNode *root, int numnodes)=0
DBVT_INLINE int Classify(const btVector3 &n, btScalar o, int s) const
void resize(int newsize, const T &fillData=T())
DBVT_INLINE friend int Select(const btDbvtAabbMm &o, const btDbvtAabbMm &a, const btDbvtAabbMm &b)
DBVT_INLINE btScalar Proximity(const btDbvtAabbMm &a, const btDbvtAabbMm &b)
DBVT_INLINE bool isinternal() const
virtual void WriteLeaf(const btDbvtNode *, int index, int parent)=0
DBVT_INLINE btScalar ProjectMinimum(const btVector3 &v, unsigned signs) const
DBVT_VIRTUAL void Process(const btDbvtNode *n, btScalar)
DBVT_INLINE bool Contain(const btDbvtAabbMm &a) const
DBVT_INLINE friend btScalar Proximity(const btDbvtAabbMm &a, const btDbvtAabbMm &b)
DBVT_INLINE bool Intersect(const btDbvtAabbMm &a, const btDbvtAabbMm &b)
static btDbvtVolume bounds(btDbvtNode **leaves, int count)
static DBVT_PREFIX void enumLeaves(const btDbvtNode *root, DBVT_IPOLICY)
DBVT_PREFIX void collideTT(const btDbvtNode *root0, const btDbvtNode *root1, DBVT_IPOLICY)
btScalar btDot(const btVector3 &v1, const btVector3 &v2)
Return the dot product between two vectors.
DBVT_INLINE btVector3 & tMaxs()
void setMax(const btVector3 &other)
Set each element to the max of the current values and the values of another btVector3.
DBVT_PREFIX void collideTVNoStackAlloc(const btDbvtNode *root, const btDbvtVolume &volume, btNodeStack &stack, DBVT_IPOLICY) const
void clone(btDbvt &dest, IClone *iclone=0) const
static DBVT_PREFIX void enumNodes(const btDbvtNode *root, DBVT_IPOLICY)
DBVT_INLINE void SignedExpand(const btVector3 &e)
bool btRayAabb2(const btVector3 &rayFrom, const btVector3 &rayInvDirection, const unsigned int raySign[3], const btVector3 bounds[2], btScalar &tmin, btScalar lambda_min, btScalar lambda_max)
bool btRayAabb(const btVector3 &rayFrom, const btVector3 &rayTo, const btVector3 &aabbMin, const btVector3 &aabbMax, btScalar ¶m, btVector3 &normal)
DBVT_VIRTUAL bool AllLeaves(const btDbvtNode *)
btDbvtAabbMm btDbvtVolume
DBVT_INLINE friend bool Intersect(const btDbvtAabbMm &a, const btDbvtAabbMm &b)
void setMin(const btVector3 &other)
Set each element to the min of the current values and the values of another btVector3.
float btScalar
The btScalar type abstracts floating point numbers, to easily switch between double and single floati...
virtual void CloneLeaf(btDbvtNode *)
DBVT_INLINE btVector3 & tMins()
static void extractLeaves(const btDbvtNode *node, btAlignedObjectArray< const btDbvtNode *> &leaves)
sStkNPS(const btDbvtNode *n, unsigned m, btScalar v)
DBVT_INLINE bool NotEqual(const btDbvtAabbMm &a, const btDbvtAabbMm &b)
DBVT_INLINE friend bool NotEqual(const btDbvtAabbMm &a, const btDbvtAabbMm &b)
btScalar btFabs(btScalar x)