36void btQuantizedBvh::buildInternal()
59 subtree.m_rootNodeIndex = 0;
72#ifdef DEBUG_PATCH_COLORS
84 btVector3 clampValue(quantizationMargin, quantizationMargin, quantizationMargin);
93 unsigned short vecIn[3];
112btQuantizedBvh::~btQuantizedBvh()
116#ifdef DEBUG_TREE_BUILDING
118int gMaxStackDepth = 0;
121void btQuantizedBvh::buildTree(
int startIndex,
int endIndex)
123#ifdef DEBUG_TREE_BUILDING
125 if (gStackDepth > gMaxStackDepth)
126 gMaxStackDepth = gStackDepth;
129 int splitAxis, splitIndex,
i;
137#ifdef DEBUG_TREE_BUILDING
159 for (
i = startIndex;
i < endIndex;
i++)
177#ifdef DEBUG_TREE_BUILDING
187 const int treeSizeInBytes = escapeIndex * sizeQuantizedNode;
200void btQuantizedBvh::updateSubtreeHeaders(
int leftChildNodexIndex,
int rightChildNodexIndex)
205 int leftSubTreeSize = leftChildNode.isLeafNode() ? 1 : leftChildNode.getEscapeIndex();
206 int leftSubTreeSizeInBytes = leftSubTreeSize *
static_cast<int>(
sizeof(
btQuantizedBvhNode));
209 int rightSubTreeSize = rightChildNode.isLeafNode() ? 1 : rightChildNode.getEscapeIndex();
210 int rightSubTreeSizeInBytes = rightSubTreeSize *
static_cast<int>(
sizeof(
btQuantizedBvhNode));
215 subtree.setAabbFromQuantizeNode(leftChildNode);
216 subtree.m_rootNodeIndex = leftChildNodexIndex;
217 subtree.m_subtreeSize = leftSubTreeSize;
223 subtree.setAabbFromQuantizeNode(rightChildNode);
224 subtree.m_rootNodeIndex = rightChildNodexIndex;
225 subtree.m_subtreeSize = rightSubTreeSize;
232int btQuantizedBvh::sortAndCalcSplittingIndex(
int startIndex,
int endIndex,
int splitAxis)
235 int splitIndex = startIndex;
240 for (
i = startIndex;
i < endIndex;
i++)
247 splitValue = means[splitAxis];
250 for (
i = startIndex;
i < endIndex;
i++)
253 if (center[splitAxis] > splitValue)
271 bool unbalanced = ((splitIndex <= (startIndex + rangeBalancedIndices)) || (splitIndex >= (endIndex - 1 - rangeBalancedIndices)));
278 bool unbal = (splitIndex == startIndex) || (splitIndex == (endIndex));
285int btQuantizedBvh::calcSplittingAxis(
int startIndex,
int endIndex)
293 for (
i = startIndex;
i < endIndex;
i++)
300 for (
i = startIndex;
i < endIndex;
i++)
304 diff2 = diff2 * diff2;
309 return variance.maxAxis();
319 unsigned short int quantizedQueryAabbMin[3];
320 unsigned short int quantizedQueryAabbMax[3];
326 case TRAVERSAL_STACKLESS:
329 case TRAVERSAL_STACKLESS_CACHE_FRIENDLY:
332 case TRAVERSAL_RECURSIVE:
354 int escapeIndex, curIndex = 0;
355 int walkIterations = 0;
358 unsigned aabbOverlap;
366 aabbOverlap =
TestAabbAgainstAabb2(aabbMin, aabbMax, rootNode->m_aabbMinOrg, rootNode->m_aabbMaxOrg);
372 nodeCallback->
processNode(rootNode->m_subPart, rootNode->m_triangleIndex);
383 escapeIndex = rootNode->m_escapeIndex;
384 rootNode += escapeIndex;
385 curIndex += escapeIndex;
392void btQuantizedBvh::walkTree(btOptimizedBvhNode* rootNode,btNodeOverlapCallback* nodeCallback,const btVector3& aabbMin,const btVector3& aabbMax) const
394 bool isLeafNode, aabbOverlap = TestAabbAgainstAabb2(aabbMin,aabbMax,rootNode->m_aabbMin,rootNode->m_aabbMax);
397 isLeafNode = (!rootNode->m_leftChild && !rootNode->m_rightChild);
400 nodeCallback->processNode(rootNode);
403 walkTree(rootNode->m_leftChild,nodeCallback,aabbMin,aabbMax);
404 walkTree(rootNode->m_rightChild,nodeCallback,aabbMin,aabbMax);
411void btQuantizedBvh::walkRecursiveQuantizedTreeAgainstQueryAabb(
const btQuantizedBvhNode* currentNode,
btNodeOverlapCallback* nodeCallback,
unsigned short int* quantizedQueryAabbMin,
unsigned short int* quantizedQueryAabbMax)
const
417 unsigned aabbOverlap;
424 if (aabbOverlap != 0)
428 nodeCallback->
processNode(currentNode->getPartId(), currentNode->getTriangleIndex());
436 const btQuantizedBvhNode* rightChildNode = leftChildNode->isLeafNode() ? leftChildNode + 1 : leftChildNode + leftChildNode->getEscapeIndex();
447 int escapeIndex, curIndex = 0;
448 int walkIterations = 0;
451 unsigned aabbOverlap = 0;
452 unsigned rayBoxOverlap = 0;
458 rayAabbMin.setMin(rayTarget);
459 rayAabbMax.setMax(rayTarget);
462 rayAabbMin += aabbMin;
463 rayAabbMax += aabbMax;
466 btVector3 rayDir = (rayTarget - raySource);
467 rayDir.safeNormalize();
468 lambda_max = rayDir.dot(rayTarget - raySource);
474 unsigned int sign[3] = {rayDirectionInverse[0] < 0.0, rayDirectionInverse[1] < 0.0, rayDirectionInverse[2] < 0.0};
487 bounds[0] = rootNode->m_aabbMinOrg;
488 bounds[1] = rootNode->m_aabbMaxOrg;
493 aabbOverlap =
TestAabbAgainstAabb2(rayAabbMin, rayAabbMax, rootNode->m_aabbMinOrg, rootNode->m_aabbMaxOrg);
500 rayBoxOverlap = aabbOverlap ?
btRayAabb2(raySource, rayDirectionInverse,
sign,
bounds, param, 0.0f, lambda_max) :
false;
512 nodeCallback->
processNode(rootNode->m_subPart, rootNode->m_triangleIndex);
523 escapeIndex = rootNode->m_escapeIndex;
524 rootNode += escapeIndex;
525 curIndex += escapeIndex;
534 int curIndex = startNodeIndex;
535 int walkIterations = 0;
536 int subTreeSize = endNodeIndex - startNodeIndex;
544 unsigned boxBoxOverlap = 0;
545 unsigned rayBoxOverlap = 0;
550 btVector3 rayDirection = (rayTarget - raySource);
551 rayDirection.safeNormalize();
552 lambda_max = rayDirection.dot(rayTarget - raySource);
557 unsigned int sign[3] = {rayDirection[0] < 0.0, rayDirection[1] < 0.0, rayDirection[2] < 0.0};
563 rayAabbMin.setMin(rayTarget);
564 rayAabbMax.setMax(rayTarget);
567 rayAabbMin += aabbMin;
568 rayAabbMax += aabbMax;
570 unsigned short int quantizedQueryAabbMin[3];
571 unsigned short int quantizedQueryAabbMax[3];
575 while (curIndex < endNodeIndex)
578#ifdef VISUALLY_ANALYZE_BVH
580 static int drawPatch = 0;
583 if (curIndex == drawPatch)
586 aabbMin =
unQuantize(rootNode->m_quantizedAabbMin);
587 aabbMax =
unQuantize(rootNode->m_quantizedAabbMax);
589 debugDrawerPtr->
drawAabb(aabbMin, aabbMax, color);
594 btAssert(walkIterations < subTreeSize);
617 printf(
"functions don't match\n");
629 rayBoxOverlap =
true;
635 nodeCallback->
processNode(rootNode->getPartId(), rootNode->getTriangleIndex());
646 escapeIndex = rootNode->getEscapeIndex();
647 rootNode += escapeIndex;
648 curIndex += escapeIndex;
653void btQuantizedBvh::walkStacklessQuantizedTree(
btNodeOverlapCallback* nodeCallback,
unsigned short int* quantizedQueryAabbMin,
unsigned short int* quantizedQueryAabbMax,
int startNodeIndex,
int endNodeIndex)
const
657 int curIndex = startNodeIndex;
658 int walkIterations = 0;
659 int subTreeSize = endNodeIndex - startNodeIndex;
667 unsigned aabbOverlap;
669 while (curIndex < endNodeIndex)
672#ifdef VISUALLY_ANALYZE_BVH
674 static int drawPatch = 0;
677 if (curIndex == drawPatch)
680 aabbMin =
unQuantize(rootNode->m_quantizedAabbMin);
681 aabbMax =
unQuantize(rootNode->m_quantizedAabbMax);
683 debugDrawerPtr->
drawAabb(aabbMin, aabbMax, color);
688 btAssert(walkIterations < subTreeSize);
697 nodeCallback->
processNode(rootNode->getPartId(), rootNode->getTriangleIndex());
708 escapeIndex = rootNode->getEscapeIndex();
709 rootNode += escapeIndex;
710 curIndex += escapeIndex;
716void btQuantizedBvh::walkStacklessQuantizedTreeCacheFriendly(
btNodeOverlapCallback* nodeCallback,
unsigned short int* quantizedQueryAabbMin,
unsigned short int* quantizedQueryAabbMax)
const
731 subtree.m_rootNodeIndex,
732 subtree.m_rootNodeIndex + subtree.m_subtreeSize);
768void btQuantizedBvh::swapLeafNodes(
int i,
int splitIndex)
784void btQuantizedBvh::assignInternalNodeFromLeafNode(
int internalNode,
int leafNodeIndex)
801static const unsigned BVH_ALIGNMENT = 16;
802static const unsigned BVH_ALIGNMENT_MASK = BVH_ALIGNMENT-1;
804static const unsigned BVH_ALIGNMENT_BLOCKS = 2;
807unsigned int btQuantizedBvh::getAlignmentSerializationPadding()
813unsigned btQuantizedBvh::calculateSerializeBufferSize()
const
824bool btQuantizedBvh::serialize(
void* o_alignedDataBuffer,
unsigned ,
bool i_swapEndian)
const
866 unsigned char* nodeData = (
unsigned char*)targetBvh;
869 unsigned sizeToAdd = 0;
870 nodeData += sizeToAdd;
876 targetBvh->m_quantizedContiguousNodes.initializeFromBuffer(nodeData, nodeCount, nodeCount);
880 for (
int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
895 for (
int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
897 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0] =
m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0];
898 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1] =
m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1];
899 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2] =
m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2];
901 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0] =
m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0];
902 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1] =
m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1];
903 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2] =
m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2];
905 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex =
m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex;
913 targetBvh->m_quantizedContiguousNodes.initializeFromBuffer(NULL, 0, 0);
917 targetBvh->m_contiguousNodes.initializeFromBuffer(nodeData, nodeCount, nodeCount);
921 for (
int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
933 for (
int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
935 targetBvh->m_contiguousNodes[nodeIndex].m_aabbMinOrg =
m_contiguousNodes[nodeIndex].m_aabbMinOrg;
936 targetBvh->m_contiguousNodes[nodeIndex].m_aabbMaxOrg =
m_contiguousNodes[nodeIndex].m_aabbMaxOrg;
938 targetBvh->m_contiguousNodes[nodeIndex].m_escapeIndex =
m_contiguousNodes[nodeIndex].m_escapeIndex;
939 targetBvh->m_contiguousNodes[nodeIndex].m_subPart =
m_contiguousNodes[nodeIndex].m_subPart;
940 targetBvh->m_contiguousNodes[nodeIndex].m_triangleIndex =
m_contiguousNodes[nodeIndex].m_triangleIndex;
948 targetBvh->m_contiguousNodes.initializeFromBuffer(NULL, 0, 0);
952 nodeData += sizeToAdd;
976 targetBvh->m_SubtreeHeaders[
i].m_quantizedAabbMin[0] = (
m_SubtreeHeaders[
i].m_quantizedAabbMin[0]);
977 targetBvh->m_SubtreeHeaders[
i].m_quantizedAabbMin[1] = (
m_SubtreeHeaders[
i].m_quantizedAabbMin[1]);
978 targetBvh->m_SubtreeHeaders[
i].m_quantizedAabbMin[2] = (
m_SubtreeHeaders[
i].m_quantizedAabbMin[2]);
980 targetBvh->m_SubtreeHeaders[
i].m_quantizedAabbMax[0] = (
m_SubtreeHeaders[
i].m_quantizedAabbMax[0]);
981 targetBvh->m_SubtreeHeaders[
i].m_quantizedAabbMax[1] = (
m_SubtreeHeaders[
i].m_quantizedAabbMax[1]);
982 targetBvh->m_SubtreeHeaders[
i].m_quantizedAabbMax[2] = (
m_SubtreeHeaders[
i].m_quantizedAabbMax[2]);
984 targetBvh->m_SubtreeHeaders[
i].m_rootNodeIndex = (
m_SubtreeHeaders[
i].m_rootNodeIndex);
988 targetBvh->m_SubtreeHeaders[
i].m_padding[0] = 0;
989 targetBvh->m_SubtreeHeaders[
i].m_padding[1] = 0;
990 targetBvh->m_SubtreeHeaders[
i].m_padding[2] = 0;
998 targetBvh->m_SubtreeHeaders.initializeFromBuffer(NULL, 0, 0);
1001 *((
void**)o_alignedDataBuffer) = NULL;
1006btQuantizedBvh* btQuantizedBvh::deSerializeInPlace(
void* i_alignedDataBuffer,
unsigned int i_dataBufferSize,
bool i_swapEndian)
1008 if (i_alignedDataBuffer == NULL)
1016 bvh->m_curNodeIndex =
static_cast<int>(
btSwapEndian(bvh->m_curNodeIndex));
1022 bvh->m_traversalMode = (btTraversalMode)
btSwapEndian(bvh->m_traversalMode);
1023 bvh->m_subtreeHeaderCount =
static_cast<int>(
btSwapEndian(bvh->m_subtreeHeaderCount));
1026 unsigned int calculatedBufSize = bvh->calculateSerializeBufferSize();
1027 btAssert(calculatedBufSize <= i_dataBufferSize);
1029 if (calculatedBufSize > i_dataBufferSize)
1034 unsigned char* nodeData = (
unsigned char*)bvh;
1037 unsigned sizeToAdd = 0;
1038 nodeData += sizeToAdd;
1040 int nodeCount = bvh->m_curNodeIndex;
1046 if (bvh->m_useQuantization)
1048 bvh->m_quantizedContiguousNodes.initializeFromBuffer(nodeData, nodeCount, nodeCount);
1052 for (
int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
1054 bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0] =
btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0]);
1055 bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1] =
btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1]);
1056 bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2] =
btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2]);
1058 bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0] =
btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0]);
1059 bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1] =
btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1]);
1060 bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2] =
btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2]);
1062 bvh->m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex =
static_cast<int>(
btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex));
1069 bvh->m_contiguousNodes.initializeFromBuffer(nodeData, nodeCount, nodeCount);
1073 for (
int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
1078 bvh->m_contiguousNodes[nodeIndex].m_escapeIndex =
static_cast<int>(
btSwapEndian(bvh->m_contiguousNodes[nodeIndex].m_escapeIndex));
1079 bvh->m_contiguousNodes[nodeIndex].m_subPart =
static_cast<int>(
btSwapEndian(bvh->m_contiguousNodes[nodeIndex].m_subPart));
1080 bvh->m_contiguousNodes[nodeIndex].m_triangleIndex =
static_cast<int>(
btSwapEndian(bvh->m_contiguousNodes[nodeIndex].m_triangleIndex));
1087 nodeData += sizeToAdd;
1090 bvh->m_SubtreeHeaders.initializeFromBuffer(nodeData, bvh->m_subtreeHeaderCount, bvh->m_subtreeHeaderCount);
1093 for (
int i = 0;
i < bvh->m_subtreeHeaderCount;
i++)
1095 bvh->m_SubtreeHeaders[
i].m_quantizedAabbMin[0] =
btSwapEndian(bvh->m_SubtreeHeaders[
i].m_quantizedAabbMin[0]);
1096 bvh->m_SubtreeHeaders[
i].m_quantizedAabbMin[1] =
btSwapEndian(bvh->m_SubtreeHeaders[
i].m_quantizedAabbMin[1]);
1097 bvh->m_SubtreeHeaders[
i].m_quantizedAabbMin[2] =
btSwapEndian(bvh->m_SubtreeHeaders[
i].m_quantizedAabbMin[2]);
1099 bvh->m_SubtreeHeaders[
i].m_quantizedAabbMax[0] =
btSwapEndian(bvh->m_SubtreeHeaders[
i].m_quantizedAabbMax[0]);
1100 bvh->m_SubtreeHeaders[
i].m_quantizedAabbMax[1] =
btSwapEndian(bvh->m_SubtreeHeaders[
i].m_quantizedAabbMax[1]);
1101 bvh->m_SubtreeHeaders[
i].m_quantizedAabbMax[2] =
btSwapEndian(bvh->m_SubtreeHeaders[
i].m_quantizedAabbMax[2]);
1103 bvh->m_SubtreeHeaders[
i].m_rootNodeIndex =
static_cast<int>(
btSwapEndian(bvh->m_SubtreeHeaders[
i].m_rootNodeIndex));
1104 bvh->m_SubtreeHeaders[
i].m_subtreeSize =
static_cast<int>(
btSwapEndian(bvh->m_SubtreeHeaders[
i].m_subtreeSize));
1119void btQuantizedBvh::deSerializeFloat(
struct btQuantizedBvhFloatData& quantizedBvhFloatData)
1121 m_bvhAabbMax.deSerializeFloat(quantizedBvhFloatData.m_bvhAabbMax);
1122 m_bvhAabbMin.deSerializeFloat(quantizedBvhFloatData.m_bvhAabbMin);
1129 int numElem = quantizedBvhFloatData.m_numContiguousLeafNodes;
1134 btOptimizedBvhNodeFloatData* memPtr = quantizedBvhFloatData.m_contiguousNodesPtr;
1136 for (
int i = 0;
i < numElem;
i++, memPtr++)
1148 int numElem = quantizedBvhFloatData.m_numQuantizedContiguousNodes;
1153 btQuantizedBvhNodeData* memPtr = quantizedBvhFloatData.m_quantizedContiguousNodesPtr;
1154 for (
int i = 0;
i < numElem;
i++, memPtr++)
1167 m_traversalMode = btTraversalMode(quantizedBvhFloatData.m_traversalMode);
1170 int numElem = quantizedBvhFloatData.m_numSubtreeHeaders;
1174 btBvhSubtreeInfoData* memPtr = quantizedBvhFloatData.m_subTreeInfoPtr;
1175 for (
int i = 0;
i < numElem;
i++, memPtr++)
1190void btQuantizedBvh::deSerializeDouble(
struct btQuantizedBvhDoubleData& quantizedBvhDoubleData)
1192 m_bvhAabbMax.deSerializeDouble(quantizedBvhDoubleData.m_bvhAabbMax);
1193 m_bvhAabbMin.deSerializeDouble(quantizedBvhDoubleData.m_bvhAabbMin);
1200 int numElem = quantizedBvhDoubleData.m_numContiguousLeafNodes;
1205 btOptimizedBvhNodeDoubleData* memPtr = quantizedBvhDoubleData.m_contiguousNodesPtr;
1207 for (
int i = 0;
i < numElem;
i++, memPtr++)
1219 int numElem = quantizedBvhDoubleData.m_numQuantizedContiguousNodes;
1224 btQuantizedBvhNodeData* memPtr = quantizedBvhDoubleData.m_quantizedContiguousNodesPtr;
1225 for (
int i = 0;
i < numElem;
i++, memPtr++)
1238 m_traversalMode = btTraversalMode(quantizedBvhDoubleData.m_traversalMode);
1241 int numElem = quantizedBvhDoubleData.m_numSubtreeHeaders;
1245 btBvhSubtreeInfoData* memPtr = quantizedBvhDoubleData.m_subTreeInfoPtr;
1246 for (
int i = 0;
i < numElem;
i++, memPtr++)
1262const char* btQuantizedBvh::serialize(
void* dataBuffer,
btSerializer* serializer)
const
1275 if (quantizedData->m_contiguousNodesPtr)
1281 for (
int i = 0;
i < numElem;
i++, memPtr++)
1289 memset(memPtr->m_pad, 0,
sizeof(memPtr->m_pad));
1297 if (quantizedData->m_quantizedContiguousNodesPtr)
1299 int sz =
sizeof(btQuantizedBvhNodeData);
1302 btQuantizedBvhNodeData* memPtr = (btQuantizedBvhNodeData*)chunk->
m_oldPtr;
1303 for (
int i = 0;
i < numElem;
i++, memPtr++)
1320 if (quantizedData->m_subTreeInfoPtr)
1322 int sz =
sizeof(btBvhSubtreeInfoData);
1325 btBvhSubtreeInfoData* memPtr = (btBvhSubtreeInfoData*)chunk->
m_oldPtr;
1326 for (
int i = 0;
i < numElem;
i++, memPtr++)
ATTR_WARN_UNUSED_RESULT const BMVert * v
SIMD_FORCE_INLINE bool TestAabbAgainstAabb2(const btVector3 &aabbMin1, const btVector3 &aabbMax1, const btVector3 &aabbMin2, const btVector3 &aabbMax2)
conservative test for overlap between two aabbs
SIMD_FORCE_INLINE bool btRayAabb2(const btVector3 &rayFrom, const btVector3 &rayInvDirection, const unsigned int raySign[3], const btVector3 bounds[2], btScalar &tmin, btScalar lambda_min, btScalar lambda_max)
SIMD_FORCE_INLINE unsigned testQuantizedAabbAgainstQuantizedAabb(const unsigned short int *aabbMin1, const unsigned short int *aabbMax1, const unsigned short int *aabbMin2, const unsigned short int *aabbMax2)
SIMD_FORCE_INLINE bool btRayAabb(const btVector3 &rayFrom, const btVector3 &rayTo, const btVector3 &aabbMin, const btVector3 &aabbMax, btScalar ¶m, btVector3 &normal)
SIMD_FORCE_INLINE const btVector3 & getAabbMin() const
SIMD_FORCE_INLINE const btVector3 & getAabbMax() const
static btDbvtVolume bounds(btDbvtNode **leaves, int count)
SIMD_FORCE_INLINE void quantize(unsigned short *out, const btVector3 &point, int isMax) const
void swapLeafNodes(int firstIndex, int secondIndex)
int m_escapeIndexOrTriangleIndex
#define btOptimizedBvhNodeData
void walkRecursiveQuantizedTreeAgainstQueryAabb(const btQuantizedBvhNode *currentNode, btNodeOverlapCallback *nodeCallback, unsigned short int *quantizedQueryAabbMin, unsigned short int *quantizedQueryAabbMax) const
use the 16-byte stackless 'skipindex' node tree to do a recursive traversal
void mergeInternalNodeAabb(int nodeIndex, const btVector3 &newAabbMin, const btVector3 &newAabbMax)
void updateSubtreeHeaders(int leftChildNodexIndex, int rightChildNodexIndex)
void setInternalNodeEscapeIndex(int nodeIndex, int escapeIndex)
void buildTree(int startIndex, int endIndex)
unsigned short int m_quantizedAabbMin[3]
btTraversalMode m_traversalMode
SIMD_FORCE_INLINE void quantizeWithClamp(unsigned short *out, const btVector3 &point2, int isMax) const
NodeArray m_contiguousNodes
void walkStacklessQuantizedTreeAgainstRay(btNodeOverlapCallback *nodeCallback, const btVector3 &raySource, const btVector3 &rayTarget, const btVector3 &aabbMin, const btVector3 &aabbMax, int startNodeIndex, int endNodeIndex) const
void setInternalNodeAabbMin(int nodeIndex, const btVector3 &aabbMin)
static unsigned int getAlignmentSerializationPadding()
#define MAX_SUBTREE_SIZE_IN_BYTES
void reportBoxCastOverlappingNodex(btNodeOverlapCallback *nodeCallback, const btVector3 &raySource, const btVector3 &rayTarget, const btVector3 &aabbMin, const btVector3 &aabbMax) const
void walkStacklessQuantizedTreeCacheFriendly(btNodeOverlapCallback *nodeCallback, unsigned short int *quantizedQueryAabbMin, unsigned short int *quantizedQueryAabbMax) const
tree traversal designed for small-memory processors like PS3 SPU
void walkStacklessTree(btNodeOverlapCallback *nodeCallback, const btVector3 &aabbMin, const btVector3 &aabbMax) const
btBvhSubtreeInfo
btBvhSubtreeInfo provides info to gather a subtree of limited size
void walkStacklessTreeAgainstRay(btNodeOverlapCallback *nodeCallback, const btVector3 &raySource, const btVector3 &rayTarget, const btVector3 &aabbMin, const btVector3 &aabbMax, int startNodeIndex, int endNodeIndex) const
btVector3 m_bvhQuantization
BvhSubtreeInfoArray m_SubtreeHeaders
unsigned short int m_quantizedAabbMax[3]
void walkStacklessQuantizedTree(btNodeOverlapCallback *nodeCallback, unsigned short int *quantizedQueryAabbMin, unsigned short int *quantizedQueryAabbMax, int startNodeIndex, int endNodeIndex) const
void assignInternalNodeFromLeafNode(int internalNode, int leafNodeIndex)
QuantizedNodeArray m_quantizedContiguousNodes
int sortAndCalcSplittingIndex(int startIndex, int endIndex, int splitAxis)
void setInternalNodeAabbMax(int nodeIndex, const btVector3 &aabbMax)
SIMD_FORCE_INLINE btVector3 unQuantize(const unsigned short *vecIn) const
int calcSplittingAxis(int startIndex, int endIndex)
QuantizedNodeArray m_quantizedLeafNodes
#define btQuantizedBvhDataName
#define btQuantizedBvhData
float btScalar
The btScalar type abstracts floating point numbers, to easily switch between double and single floati...
SIMD_FORCE_INLINE unsigned btSwapEndian(unsigned val)
#define BT_BULLET_VERSION
SIMD_FORCE_INLINE void btUnSwapVector3Endian(btVector3 &vector)
btUnSwapVector3Endian swaps vector endianness, useful for network and cross-platform serialization
btVector3
btVector3 can be used to represent 3D points and vectors. It has an un-used w component to suit 16-by...
SIMD_FORCE_INLINE void btSwapVector3Endian(const btVector3 &sourceVec, btVector3 &destVec)
btSwapVector3Endian swaps vector endianness, useful for network and cross-platform serialization
virtual void drawAabb(const btVector3 &from, const btVector3 &to, const btVector3 &color)
virtual void processNode(int subPart, int triangleIndex)=0
virtual btChunk * allocate(size_t size, int numElements)=0
virtual void * getUniquePointer(void *oldPtr)=0
virtual void finalizeChunk(btChunk *chunk, const char *structType, int chunkCode, void *oldPtr)=0