• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2 Bullet Continuous Collision Detection and Physics Library
3 Copyright (c) 2003-2006 Erwin Coumans  http://continuousphysics.com/Bullet/
4 
5 This software is provided 'as-is', without any express or implied warranty.
6 In no event will the authors be held liable for any damages arising from the use of this software.
7 Permission is granted to anyone to use this software for any purpose,
8 including commercial applications, and to alter it and redistribute it freely,
9 subject to the following restrictions:
10 
11 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
12 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
13 3. This notice may not be removed or altered from any source distribution.
14 */
15 
16 #include "btQuantizedBvh.h"
17 
18 #include "LinearMath/btAabbUtil2.h"
19 #include "LinearMath/btIDebugDraw.h"
20 #include "LinearMath/btSerializer.h"
21 
22 #define RAYAABB2
23 
btQuantizedBvh()24 btQuantizedBvh::btQuantizedBvh() :
25 					m_bulletVersion(BT_BULLET_VERSION),
26 					m_useQuantization(false),
27 					//m_traversalMode(TRAVERSAL_STACKLESS_CACHE_FRIENDLY)
28 					m_traversalMode(TRAVERSAL_STACKLESS)
29 					//m_traversalMode(TRAVERSAL_RECURSIVE)
30 					,m_subtreeHeaderCount(0) //PCK: add this line
31 {
32 	m_bvhAabbMin.setValue(-SIMD_INFINITY,-SIMD_INFINITY,-SIMD_INFINITY);
33 	m_bvhAabbMax.setValue(SIMD_INFINITY,SIMD_INFINITY,SIMD_INFINITY);
34 }
35 
36 
37 
38 
39 
buildInternal()40 void btQuantizedBvh::buildInternal()
41 {
42 	///assumes that caller filled in the m_quantizedLeafNodes
43 	m_useQuantization = true;
44 	int numLeafNodes = 0;
45 
46 	if (m_useQuantization)
47 	{
48 		//now we have an array of leafnodes in m_leafNodes
49 		numLeafNodes = m_quantizedLeafNodes.size();
50 
51 		m_quantizedContiguousNodes.resize(2*numLeafNodes);
52 
53 	}
54 
55 	m_curNodeIndex = 0;
56 
57 	buildTree(0,numLeafNodes);
58 
59 	///if the entire tree is small then subtree size, we need to create a header info for the tree
60 	if(m_useQuantization && !m_SubtreeHeaders.size())
61 	{
62 		btBvhSubtreeInfo& subtree = m_SubtreeHeaders.expand();
63 		subtree.setAabbFromQuantizeNode(m_quantizedContiguousNodes[0]);
64 		subtree.m_rootNodeIndex = 0;
65 		subtree.m_subtreeSize = m_quantizedContiguousNodes[0].isLeafNode() ? 1 : m_quantizedContiguousNodes[0].getEscapeIndex();
66 	}
67 
68 	//PCK: update the copy of the size
69 	m_subtreeHeaderCount = m_SubtreeHeaders.size();
70 
71 	//PCK: clear m_quantizedLeafNodes and m_leafNodes, they are temporary
72 	m_quantizedLeafNodes.clear();
73 	m_leafNodes.clear();
74 }
75 
76 
77 
78 ///just for debugging, to visualize the individual patches/subtrees
79 #ifdef DEBUG_PATCH_COLORS
80 btVector3 color[4]=
81 {
82 	btVector3(1,0,0),
83 	btVector3(0,1,0),
84 	btVector3(0,0,1),
85 	btVector3(0,1,1)
86 };
87 #endif //DEBUG_PATCH_COLORS
88 
89 
90 
setQuantizationValues(const btVector3 & bvhAabbMin,const btVector3 & bvhAabbMax,btScalar quantizationMargin)91 void	btQuantizedBvh::setQuantizationValues(const btVector3& bvhAabbMin,const btVector3& bvhAabbMax,btScalar quantizationMargin)
92 {
93 	//enlarge the AABB to avoid division by zero when initializing the quantization values
94 	btVector3 clampValue(quantizationMargin,quantizationMargin,quantizationMargin);
95 	m_bvhAabbMin = bvhAabbMin - clampValue;
96 	m_bvhAabbMax = bvhAabbMax + clampValue;
97 	btVector3 aabbSize = m_bvhAabbMax - m_bvhAabbMin;
98 	m_bvhQuantization = btVector3(btScalar(65533.0),btScalar(65533.0),btScalar(65533.0)) / aabbSize;
99 
100 	m_useQuantization = true;
101 
102 	{
103 		unsigned short vecIn[3];
104 		btVector3 v;
105 		{
106 			quantize(vecIn,m_bvhAabbMin,false);
107 			v = unQuantize(vecIn);
108 			m_bvhAabbMin.setMin(v-clampValue);
109 		}
110 		{
111 			quantize(vecIn,m_bvhAabbMax,true);
112 			v = unQuantize(vecIn);
113 			m_bvhAabbMax.setMax(v+clampValue);
114 		}
115 		aabbSize = m_bvhAabbMax - m_bvhAabbMin;
116 		m_bvhQuantization = btVector3(btScalar(65533.0),btScalar(65533.0),btScalar(65533.0)) / aabbSize;
117 	}
118 }
119 
120 
121 
122 
~btQuantizedBvh()123 btQuantizedBvh::~btQuantizedBvh()
124 {
125 }
126 
127 #ifdef DEBUG_TREE_BUILDING
128 int gStackDepth = 0;
129 int gMaxStackDepth = 0;
130 #endif //DEBUG_TREE_BUILDING
131 
buildTree(int startIndex,int endIndex)132 void	btQuantizedBvh::buildTree	(int startIndex,int endIndex)
133 {
134 #ifdef DEBUG_TREE_BUILDING
135 	gStackDepth++;
136 	if (gStackDepth > gMaxStackDepth)
137 		gMaxStackDepth = gStackDepth;
138 #endif //DEBUG_TREE_BUILDING
139 
140 
141 	int splitAxis, splitIndex, i;
142 	int numIndices =endIndex-startIndex;
143 	int curIndex = m_curNodeIndex;
144 
145 	btAssert(numIndices>0);
146 
147 	if (numIndices==1)
148 	{
149 #ifdef DEBUG_TREE_BUILDING
150 		gStackDepth--;
151 #endif //DEBUG_TREE_BUILDING
152 
153 		assignInternalNodeFromLeafNode(m_curNodeIndex,startIndex);
154 
155 		m_curNodeIndex++;
156 		return;
157 	}
158 	//calculate Best Splitting Axis and where to split it. Sort the incoming 'leafNodes' array within range 'startIndex/endIndex'.
159 
160 	splitAxis = calcSplittingAxis(startIndex,endIndex);
161 
162 	splitIndex = sortAndCalcSplittingIndex(startIndex,endIndex,splitAxis);
163 
164 	int internalNodeIndex = m_curNodeIndex;
165 
166 	//set the min aabb to 'inf' or a max value, and set the max aabb to a -inf/minimum value.
167 	//the aabb will be expanded during buildTree/mergeInternalNodeAabb with actual node values
168 	setInternalNodeAabbMin(m_curNodeIndex,m_bvhAabbMax);//can't use btVector3(SIMD_INFINITY,SIMD_INFINITY,SIMD_INFINITY)) because of quantization
169 	setInternalNodeAabbMax(m_curNodeIndex,m_bvhAabbMin);//can't use btVector3(-SIMD_INFINITY,-SIMD_INFINITY,-SIMD_INFINITY)) because of quantization
170 
171 
172 	for (i=startIndex;i<endIndex;i++)
173 	{
174 		mergeInternalNodeAabb(m_curNodeIndex,getAabbMin(i),getAabbMax(i));
175 	}
176 
177 	m_curNodeIndex++;
178 
179 
180 	//internalNode->m_escapeIndex;
181 
182 	int leftChildNodexIndex = m_curNodeIndex;
183 
184 	//build left child tree
185 	buildTree(startIndex,splitIndex);
186 
187 	int rightChildNodexIndex = m_curNodeIndex;
188 	//build right child tree
189 	buildTree(splitIndex,endIndex);
190 
191 #ifdef DEBUG_TREE_BUILDING
192 	gStackDepth--;
193 #endif //DEBUG_TREE_BUILDING
194 
195 	int escapeIndex = m_curNodeIndex - curIndex;
196 
197 	if (m_useQuantization)
198 	{
199 		//escapeIndex is the number of nodes of this subtree
200 		const int sizeQuantizedNode =sizeof(btQuantizedBvhNode);
201 		const int treeSizeInBytes = escapeIndex * sizeQuantizedNode;
202 		if (treeSizeInBytes > MAX_SUBTREE_SIZE_IN_BYTES)
203 		{
204 			updateSubtreeHeaders(leftChildNodexIndex,rightChildNodexIndex);
205 		}
206 	} else
207 	{
208 
209 	}
210 
211 	setInternalNodeEscapeIndex(internalNodeIndex,escapeIndex);
212 
213 }
214 
updateSubtreeHeaders(int leftChildNodexIndex,int rightChildNodexIndex)215 void	btQuantizedBvh::updateSubtreeHeaders(int leftChildNodexIndex,int rightChildNodexIndex)
216 {
217 	btAssert(m_useQuantization);
218 
219 	btQuantizedBvhNode& leftChildNode = m_quantizedContiguousNodes[leftChildNodexIndex];
220 	int leftSubTreeSize = leftChildNode.isLeafNode() ? 1 : leftChildNode.getEscapeIndex();
221 	int leftSubTreeSizeInBytes =  leftSubTreeSize * static_cast<int>(sizeof(btQuantizedBvhNode));
222 
223 	btQuantizedBvhNode& rightChildNode = m_quantizedContiguousNodes[rightChildNodexIndex];
224 	int rightSubTreeSize = rightChildNode.isLeafNode() ? 1 : rightChildNode.getEscapeIndex();
225 	int rightSubTreeSizeInBytes =  rightSubTreeSize *  static_cast<int>(sizeof(btQuantizedBvhNode));
226 
227 	if(leftSubTreeSizeInBytes <= MAX_SUBTREE_SIZE_IN_BYTES)
228 	{
229 		btBvhSubtreeInfo& subtree = m_SubtreeHeaders.expand();
230 		subtree.setAabbFromQuantizeNode(leftChildNode);
231 		subtree.m_rootNodeIndex = leftChildNodexIndex;
232 		subtree.m_subtreeSize = leftSubTreeSize;
233 	}
234 
235 	if(rightSubTreeSizeInBytes <= MAX_SUBTREE_SIZE_IN_BYTES)
236 	{
237 		btBvhSubtreeInfo& subtree = m_SubtreeHeaders.expand();
238 		subtree.setAabbFromQuantizeNode(rightChildNode);
239 		subtree.m_rootNodeIndex = rightChildNodexIndex;
240 		subtree.m_subtreeSize = rightSubTreeSize;
241 	}
242 
243 	//PCK: update the copy of the size
244 	m_subtreeHeaderCount = m_SubtreeHeaders.size();
245 }
246 
247 
sortAndCalcSplittingIndex(int startIndex,int endIndex,int splitAxis)248 int	btQuantizedBvh::sortAndCalcSplittingIndex(int startIndex,int endIndex,int splitAxis)
249 {
250 	int i;
251 	int splitIndex =startIndex;
252 	int numIndices = endIndex - startIndex;
253 	btScalar splitValue;
254 
255 	btVector3 means(btScalar(0.),btScalar(0.),btScalar(0.));
256 	for (i=startIndex;i<endIndex;i++)
257 	{
258 		btVector3 center = btScalar(0.5)*(getAabbMax(i)+getAabbMin(i));
259 		means+=center;
260 	}
261 	means *= (btScalar(1.)/(btScalar)numIndices);
262 
263 	splitValue = means[splitAxis];
264 
265 	//sort leafNodes so all values larger then splitValue comes first, and smaller values start from 'splitIndex'.
266 	for (i=startIndex;i<endIndex;i++)
267 	{
268 		btVector3 center = btScalar(0.5)*(getAabbMax(i)+getAabbMin(i));
269 		if (center[splitAxis] > splitValue)
270 		{
271 			//swap
272 			swapLeafNodes(i,splitIndex);
273 			splitIndex++;
274 		}
275 	}
276 
277 	//if the splitIndex causes unbalanced trees, fix this by using the center in between startIndex and endIndex
278 	//otherwise the tree-building might fail due to stack-overflows in certain cases.
279 	//unbalanced1 is unsafe: it can cause stack overflows
280 	//bool unbalanced1 = ((splitIndex==startIndex) || (splitIndex == (endIndex-1)));
281 
282 	//unbalanced2 should work too: always use center (perfect balanced trees)
283 	//bool unbalanced2 = true;
284 
285 	//this should be safe too:
286 	int rangeBalancedIndices = numIndices/3;
287 	bool unbalanced = ((splitIndex<=(startIndex+rangeBalancedIndices)) || (splitIndex >=(endIndex-1-rangeBalancedIndices)));
288 
289 	if (unbalanced)
290 	{
291 		splitIndex = startIndex+ (numIndices>>1);
292 	}
293 
294 	bool unbal = (splitIndex==startIndex) || (splitIndex == (endIndex));
295 	(void)unbal;
296 	btAssert(!unbal);
297 
298 	return splitIndex;
299 }
300 
301 
calcSplittingAxis(int startIndex,int endIndex)302 int	btQuantizedBvh::calcSplittingAxis(int startIndex,int endIndex)
303 {
304 	int i;
305 
306 	btVector3 means(btScalar(0.),btScalar(0.),btScalar(0.));
307 	btVector3 variance(btScalar(0.),btScalar(0.),btScalar(0.));
308 	int numIndices = endIndex-startIndex;
309 
310 	for (i=startIndex;i<endIndex;i++)
311 	{
312 		btVector3 center = btScalar(0.5)*(getAabbMax(i)+getAabbMin(i));
313 		means+=center;
314 	}
315 	means *= (btScalar(1.)/(btScalar)numIndices);
316 
317 	for (i=startIndex;i<endIndex;i++)
318 	{
319 		btVector3 center = btScalar(0.5)*(getAabbMax(i)+getAabbMin(i));
320 		btVector3 diff2 = center-means;
321 		diff2 = diff2 * diff2;
322 		variance += diff2;
323 	}
324 	variance *= (btScalar(1.)/	((btScalar)numIndices-1)	);
325 
326 	return variance.maxAxis();
327 }
328 
329 
330 
reportAabbOverlappingNodex(btNodeOverlapCallback * nodeCallback,const btVector3 & aabbMin,const btVector3 & aabbMax) const331 void	btQuantizedBvh::reportAabbOverlappingNodex(btNodeOverlapCallback* nodeCallback,const btVector3& aabbMin,const btVector3& aabbMax) const
332 {
333 	//either choose recursive traversal (walkTree) or stackless (walkStacklessTree)
334 
335 	if (m_useQuantization)
336 	{
337 		///quantize query AABB
338 		unsigned short int quantizedQueryAabbMin[3];
339 		unsigned short int quantizedQueryAabbMax[3];
340 		quantizeWithClamp(quantizedQueryAabbMin,aabbMin,0);
341 		quantizeWithClamp(quantizedQueryAabbMax,aabbMax,1);
342 
343 		switch (m_traversalMode)
344 		{
345 		case TRAVERSAL_STACKLESS:
346 				walkStacklessQuantizedTree(nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax,0,m_curNodeIndex);
347 			break;
348 		case TRAVERSAL_STACKLESS_CACHE_FRIENDLY:
349 				walkStacklessQuantizedTreeCacheFriendly(nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax);
350 			break;
351 		case TRAVERSAL_RECURSIVE:
352 			{
353 				const btQuantizedBvhNode* rootNode = &m_quantizedContiguousNodes[0];
354 				walkRecursiveQuantizedTreeAgainstQueryAabb(rootNode,nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax);
355 			}
356 			break;
357 		default:
358 			//unsupported
359 			btAssert(0);
360 		}
361 	} else
362 	{
363 		walkStacklessTree(nodeCallback,aabbMin,aabbMax);
364 	}
365 }
366 
367 
368 int maxIterations = 0;
369 
370 
walkStacklessTree(btNodeOverlapCallback * nodeCallback,const btVector3 & aabbMin,const btVector3 & aabbMax) const371 void	btQuantizedBvh::walkStacklessTree(btNodeOverlapCallback* nodeCallback,const btVector3& aabbMin,const btVector3& aabbMax) const
372 {
373 	btAssert(!m_useQuantization);
374 
375 	const btOptimizedBvhNode* rootNode = &m_contiguousNodes[0];
376 	int escapeIndex, curIndex = 0;
377 	int walkIterations = 0;
378 	bool isLeafNode;
379 	//PCK: unsigned instead of bool
380 	unsigned aabbOverlap;
381 
382 	while (curIndex < m_curNodeIndex)
383 	{
384 		//catch bugs in tree data
385 		btAssert (walkIterations < m_curNodeIndex);
386 
387 		walkIterations++;
388 		aabbOverlap = TestAabbAgainstAabb2(aabbMin,aabbMax,rootNode->m_aabbMinOrg,rootNode->m_aabbMaxOrg);
389 		isLeafNode = rootNode->m_escapeIndex == -1;
390 
391 		//PCK: unsigned instead of bool
392 		if (isLeafNode && (aabbOverlap != 0))
393 		{
394 			nodeCallback->processNode(rootNode->m_subPart,rootNode->m_triangleIndex);
395 		}
396 
397 		//PCK: unsigned instead of bool
398 		if ((aabbOverlap != 0) || isLeafNode)
399 		{
400 			rootNode++;
401 			curIndex++;
402 		} else
403 		{
404 			escapeIndex = rootNode->m_escapeIndex;
405 			rootNode += escapeIndex;
406 			curIndex += escapeIndex;
407 		}
408 	}
409 	if (maxIterations < walkIterations)
410 		maxIterations = walkIterations;
411 
412 }
413 
414 /*
415 ///this was the original recursive traversal, before we optimized towards stackless traversal
416 void	btQuantizedBvh::walkTree(btOptimizedBvhNode* rootNode,btNodeOverlapCallback* nodeCallback,const btVector3& aabbMin,const btVector3& aabbMax) const
417 {
418 	bool isLeafNode, aabbOverlap = TestAabbAgainstAabb2(aabbMin,aabbMax,rootNode->m_aabbMin,rootNode->m_aabbMax);
419 	if (aabbOverlap)
420 	{
421 		isLeafNode = (!rootNode->m_leftChild && !rootNode->m_rightChild);
422 		if (isLeafNode)
423 		{
424 			nodeCallback->processNode(rootNode);
425 		} else
426 		{
427 			walkTree(rootNode->m_leftChild,nodeCallback,aabbMin,aabbMax);
428 			walkTree(rootNode->m_rightChild,nodeCallback,aabbMin,aabbMax);
429 		}
430 	}
431 
432 }
433 */
434 
walkRecursiveQuantizedTreeAgainstQueryAabb(const btQuantizedBvhNode * currentNode,btNodeOverlapCallback * nodeCallback,unsigned short int * quantizedQueryAabbMin,unsigned short int * quantizedQueryAabbMax) const435 void btQuantizedBvh::walkRecursiveQuantizedTreeAgainstQueryAabb(const btQuantizedBvhNode* currentNode,btNodeOverlapCallback* nodeCallback,unsigned short int* quantizedQueryAabbMin,unsigned short int* quantizedQueryAabbMax) const
436 {
437 	btAssert(m_useQuantization);
438 
439 	bool isLeafNode;
440 	//PCK: unsigned instead of bool
441 	unsigned aabbOverlap;
442 
443 	//PCK: unsigned instead of bool
444 	aabbOverlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin,quantizedQueryAabbMax,currentNode->m_quantizedAabbMin,currentNode->m_quantizedAabbMax);
445 	isLeafNode = currentNode->isLeafNode();
446 
447 	//PCK: unsigned instead of bool
448 	if (aabbOverlap != 0)
449 	{
450 		if (isLeafNode)
451 		{
452 			nodeCallback->processNode(currentNode->getPartId(),currentNode->getTriangleIndex());
453 		} else
454 		{
455 			//process left and right children
456 			const btQuantizedBvhNode* leftChildNode = currentNode+1;
457 			walkRecursiveQuantizedTreeAgainstQueryAabb(leftChildNode,nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax);
458 
459 			const btQuantizedBvhNode* rightChildNode = leftChildNode->isLeafNode() ? leftChildNode+1:leftChildNode+leftChildNode->getEscapeIndex();
460 			walkRecursiveQuantizedTreeAgainstQueryAabb(rightChildNode,nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax);
461 		}
462 	}
463 }
464 
465 
466 
walkStacklessTreeAgainstRay(btNodeOverlapCallback * nodeCallback,const btVector3 & raySource,const btVector3 & rayTarget,const btVector3 & aabbMin,const btVector3 & aabbMax,int startNodeIndex,int endNodeIndex) const467 void	btQuantizedBvh::walkStacklessTreeAgainstRay(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin, const btVector3& aabbMax, int startNodeIndex,int endNodeIndex) const
468 {
469 	btAssert(!m_useQuantization);
470 
471 	const btOptimizedBvhNode* rootNode = &m_contiguousNodes[0];
472 	int escapeIndex, curIndex = 0;
473 	int walkIterations = 0;
474 	bool isLeafNode;
475 	//PCK: unsigned instead of bool
476 	unsigned aabbOverlap=0;
477 	unsigned rayBoxOverlap=0;
478 	btScalar lambda_max = 1.0;
479 
480 		/* Quick pruning by quantized box */
481 	btVector3 rayAabbMin = raySource;
482 	btVector3 rayAabbMax = raySource;
483 	rayAabbMin.setMin(rayTarget);
484 	rayAabbMax.setMax(rayTarget);
485 
486 	/* Add box cast extents to bounding box */
487 	rayAabbMin += aabbMin;
488 	rayAabbMax += aabbMax;
489 
490 #ifdef RAYAABB2
491 	btVector3 rayDir = (rayTarget-raySource);
492 	rayDir.normalize ();
493 	lambda_max = rayDir.dot(rayTarget-raySource);
494 	///what about division by zero? --> just set rayDirection[i] to 1.0
495 	btVector3 rayDirectionInverse;
496 	rayDirectionInverse[0] = rayDir[0] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDir[0];
497 	rayDirectionInverse[1] = rayDir[1] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDir[1];
498 	rayDirectionInverse[2] = rayDir[2] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDir[2];
499 	unsigned int sign[3] = { rayDirectionInverse[0] < 0.0, rayDirectionInverse[1] < 0.0, rayDirectionInverse[2] < 0.0};
500 #endif
501 
502 	btVector3 bounds[2];
503 
504 	while (curIndex < m_curNodeIndex)
505 	{
506 		btScalar param = 1.0;
507 		//catch bugs in tree data
508 		btAssert (walkIterations < m_curNodeIndex);
509 
510 		walkIterations++;
511 
512 		bounds[0] = rootNode->m_aabbMinOrg;
513 		bounds[1] = rootNode->m_aabbMaxOrg;
514 		/* Add box cast extents */
515 		bounds[0] -= aabbMax;
516 		bounds[1] -= aabbMin;
517 
518 		aabbOverlap = TestAabbAgainstAabb2(rayAabbMin,rayAabbMax,rootNode->m_aabbMinOrg,rootNode->m_aabbMaxOrg);
519 		//perhaps profile if it is worth doing the aabbOverlap test first
520 
521 #ifdef RAYAABB2
522 			///careful with this check: need to check division by zero (above) and fix the unQuantize method
523 			///thanks Joerg/hiker for the reproduction case!
524 			///http://www.bulletphysics.com/Bullet/phpBB3/viewtopic.php?f=9&t=1858
525 		rayBoxOverlap = aabbOverlap ? btRayAabb2 (raySource, rayDirectionInverse, sign, bounds, param, 0.0f, lambda_max) : false;
526 
527 #else
528 		btVector3 normal;
529 		rayBoxOverlap = btRayAabb(raySource, rayTarget,bounds[0],bounds[1],param, normal);
530 #endif
531 
532 		isLeafNode = rootNode->m_escapeIndex == -1;
533 
534 		//PCK: unsigned instead of bool
535 		if (isLeafNode && (rayBoxOverlap != 0))
536 		{
537 			nodeCallback->processNode(rootNode->m_subPart,rootNode->m_triangleIndex);
538 		}
539 
540 		//PCK: unsigned instead of bool
541 		if ((rayBoxOverlap != 0) || isLeafNode)
542 		{
543 			rootNode++;
544 			curIndex++;
545 		} else
546 		{
547 			escapeIndex = rootNode->m_escapeIndex;
548 			rootNode += escapeIndex;
549 			curIndex += escapeIndex;
550 		}
551 	}
552 	if (maxIterations < walkIterations)
553 		maxIterations = walkIterations;
554 
555 }
556 
557 
558 
walkStacklessQuantizedTreeAgainstRay(btNodeOverlapCallback * nodeCallback,const btVector3 & raySource,const btVector3 & rayTarget,const btVector3 & aabbMin,const btVector3 & aabbMax,int startNodeIndex,int endNodeIndex) const559 void	btQuantizedBvh::walkStacklessQuantizedTreeAgainstRay(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin, const btVector3& aabbMax, int startNodeIndex,int endNodeIndex) const
560 {
561 	btAssert(m_useQuantization);
562 
563 	int curIndex = startNodeIndex;
564 	int walkIterations = 0;
565 	int subTreeSize = endNodeIndex - startNodeIndex;
566 	(void)subTreeSize;
567 
568 	const btQuantizedBvhNode* rootNode = &m_quantizedContiguousNodes[startNodeIndex];
569 	int escapeIndex;
570 
571 	bool isLeafNode;
572 	//PCK: unsigned instead of bool
573 	unsigned boxBoxOverlap = 0;
574 	unsigned rayBoxOverlap = 0;
575 
576 	btScalar lambda_max = 1.0;
577 
578 #ifdef RAYAABB2
579 	btVector3 rayDirection = (rayTarget-raySource);
580 	rayDirection.normalize ();
581 	lambda_max = rayDirection.dot(rayTarget-raySource);
582 	///what about division by zero? --> just set rayDirection[i] to 1.0
583 	rayDirection[0] = rayDirection[0] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDirection[0];
584 	rayDirection[1] = rayDirection[1] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDirection[1];
585 	rayDirection[2] = rayDirection[2] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDirection[2];
586 	unsigned int sign[3] = { rayDirection[0] < 0.0, rayDirection[1] < 0.0, rayDirection[2] < 0.0};
587 #endif
588 
589 	/* Quick pruning by quantized box */
590 	btVector3 rayAabbMin = raySource;
591 	btVector3 rayAabbMax = raySource;
592 	rayAabbMin.setMin(rayTarget);
593 	rayAabbMax.setMax(rayTarget);
594 
595 	/* Add box cast extents to bounding box */
596 	rayAabbMin += aabbMin;
597 	rayAabbMax += aabbMax;
598 
599 	unsigned short int quantizedQueryAabbMin[3];
600 	unsigned short int quantizedQueryAabbMax[3];
601 	quantizeWithClamp(quantizedQueryAabbMin,rayAabbMin,0);
602 	quantizeWithClamp(quantizedQueryAabbMax,rayAabbMax,1);
603 
604 	while (curIndex < endNodeIndex)
605 	{
606 
607 //#define VISUALLY_ANALYZE_BVH 1
608 #ifdef VISUALLY_ANALYZE_BVH
609 		//some code snippet to debugDraw aabb, to visually analyze bvh structure
610 		static int drawPatch = 0;
611 		//need some global access to a debugDrawer
612 		extern btIDebugDraw* debugDrawerPtr;
613 		if (curIndex==drawPatch)
614 		{
615 			btVector3 aabbMin,aabbMax;
616 			aabbMin = unQuantize(rootNode->m_quantizedAabbMin);
617 			aabbMax = unQuantize(rootNode->m_quantizedAabbMax);
618 			btVector3	color(1,0,0);
619 			debugDrawerPtr->drawAabb(aabbMin,aabbMax,color);
620 		}
621 #endif//VISUALLY_ANALYZE_BVH
622 
623 		//catch bugs in tree data
624 		btAssert (walkIterations < subTreeSize);
625 
626 		walkIterations++;
627 		//PCK: unsigned instead of bool
628 		// only interested if this is closer than any previous hit
629 		btScalar param = 1.0;
630 		rayBoxOverlap = 0;
631 		boxBoxOverlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin,quantizedQueryAabbMax,rootNode->m_quantizedAabbMin,rootNode->m_quantizedAabbMax);
632 		isLeafNode = rootNode->isLeafNode();
633 		if (boxBoxOverlap)
634 		{
635 			btVector3 bounds[2];
636 			bounds[0] = unQuantize(rootNode->m_quantizedAabbMin);
637 			bounds[1] = unQuantize(rootNode->m_quantizedAabbMax);
638 			/* Add box cast extents */
639 			bounds[0] -= aabbMax;
640 			bounds[1] -= aabbMin;
641 			btVector3 normal;
642 #if 0
643 			bool ra2 = btRayAabb2 (raySource, rayDirection, sign, bounds, param, 0.0, lambda_max);
644 			bool ra = btRayAabb (raySource, rayTarget, bounds[0], bounds[1], param, normal);
645 			if (ra2 != ra)
646 			{
647 				printf("functions don't match\n");
648 			}
649 #endif
650 #ifdef RAYAABB2
651 			///careful with this check: need to check division by zero (above) and fix the unQuantize method
652 			///thanks Joerg/hiker for the reproduction case!
653 			///http://www.bulletphysics.com/Bullet/phpBB3/viewtopic.php?f=9&t=1858
654 
655 			//BT_PROFILE("btRayAabb2");
656 			rayBoxOverlap = btRayAabb2 (raySource, rayDirection, sign, bounds, param, 0.0f, lambda_max);
657 
658 #else
659 			rayBoxOverlap = true;//btRayAabb(raySource, rayTarget, bounds[0], bounds[1], param, normal);
660 #endif
661 		}
662 
663 		if (isLeafNode && rayBoxOverlap)
664 		{
665 			nodeCallback->processNode(rootNode->getPartId(),rootNode->getTriangleIndex());
666 		}
667 
668 		//PCK: unsigned instead of bool
669 		if ((rayBoxOverlap != 0) || isLeafNode)
670 		{
671 			rootNode++;
672 			curIndex++;
673 		} else
674 		{
675 			escapeIndex = rootNode->getEscapeIndex();
676 			rootNode += escapeIndex;
677 			curIndex += escapeIndex;
678 		}
679 	}
680 	if (maxIterations < walkIterations)
681 		maxIterations = walkIterations;
682 
683 }
684 
walkStacklessQuantizedTree(btNodeOverlapCallback * nodeCallback,unsigned short int * quantizedQueryAabbMin,unsigned short int * quantizedQueryAabbMax,int startNodeIndex,int endNodeIndex) const685 void	btQuantizedBvh::walkStacklessQuantizedTree(btNodeOverlapCallback* nodeCallback,unsigned short int* quantizedQueryAabbMin,unsigned short int* quantizedQueryAabbMax,int startNodeIndex,int endNodeIndex) const
686 {
687 	btAssert(m_useQuantization);
688 
689 	int curIndex = startNodeIndex;
690 	int walkIterations = 0;
691 	int subTreeSize = endNodeIndex - startNodeIndex;
692 	(void)subTreeSize;
693 
694 	const btQuantizedBvhNode* rootNode = &m_quantizedContiguousNodes[startNodeIndex];
695 	int escapeIndex;
696 
697 	bool isLeafNode;
698 	//PCK: unsigned instead of bool
699 	unsigned aabbOverlap;
700 
701 	while (curIndex < endNodeIndex)
702 	{
703 
704 //#define VISUALLY_ANALYZE_BVH 1
705 #ifdef VISUALLY_ANALYZE_BVH
706 		//some code snippet to debugDraw aabb, to visually analyze bvh structure
707 		static int drawPatch = 0;
708 		//need some global access to a debugDrawer
709 		extern btIDebugDraw* debugDrawerPtr;
710 		if (curIndex==drawPatch)
711 		{
712 			btVector3 aabbMin,aabbMax;
713 			aabbMin = unQuantize(rootNode->m_quantizedAabbMin);
714 			aabbMax = unQuantize(rootNode->m_quantizedAabbMax);
715 			btVector3	color(1,0,0);
716 			debugDrawerPtr->drawAabb(aabbMin,aabbMax,color);
717 		}
718 #endif//VISUALLY_ANALYZE_BVH
719 
720 		//catch bugs in tree data
721 		btAssert (walkIterations < subTreeSize);
722 
723 		walkIterations++;
724 		//PCK: unsigned instead of bool
725 		aabbOverlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin,quantizedQueryAabbMax,rootNode->m_quantizedAabbMin,rootNode->m_quantizedAabbMax);
726 		isLeafNode = rootNode->isLeafNode();
727 
728 		if (isLeafNode && aabbOverlap)
729 		{
730 			nodeCallback->processNode(rootNode->getPartId(),rootNode->getTriangleIndex());
731 		}
732 
733 		//PCK: unsigned instead of bool
734 		if ((aabbOverlap != 0) || isLeafNode)
735 		{
736 			rootNode++;
737 			curIndex++;
738 		} else
739 		{
740 			escapeIndex = rootNode->getEscapeIndex();
741 			rootNode += escapeIndex;
742 			curIndex += escapeIndex;
743 		}
744 	}
745 	if (maxIterations < walkIterations)
746 		maxIterations = walkIterations;
747 
748 }
749 
750 //This traversal can be called from Playstation 3 SPU
walkStacklessQuantizedTreeCacheFriendly(btNodeOverlapCallback * nodeCallback,unsigned short int * quantizedQueryAabbMin,unsigned short int * quantizedQueryAabbMax) const751 void	btQuantizedBvh::walkStacklessQuantizedTreeCacheFriendly(btNodeOverlapCallback* nodeCallback,unsigned short int* quantizedQueryAabbMin,unsigned short int* quantizedQueryAabbMax) const
752 {
753 	btAssert(m_useQuantization);
754 
755 	int i;
756 
757 
758 	for (i=0;i<this->m_SubtreeHeaders.size();i++)
759 	{
760 		const btBvhSubtreeInfo& subtree = m_SubtreeHeaders[i];
761 
762 		//PCK: unsigned instead of bool
763 		unsigned overlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin,quantizedQueryAabbMax,subtree.m_quantizedAabbMin,subtree.m_quantizedAabbMax);
764 		if (overlap != 0)
765 		{
766 			walkStacklessQuantizedTree(nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax,
767 				subtree.m_rootNodeIndex,
768 				subtree.m_rootNodeIndex+subtree.m_subtreeSize);
769 		}
770 	}
771 }
772 
773 
reportRayOverlappingNodex(btNodeOverlapCallback * nodeCallback,const btVector3 & raySource,const btVector3 & rayTarget) const774 void	btQuantizedBvh::reportRayOverlappingNodex (btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget) const
775 {
776 	reportBoxCastOverlappingNodex(nodeCallback,raySource,rayTarget,btVector3(0,0,0),btVector3(0,0,0));
777 }
778 
779 
reportBoxCastOverlappingNodex(btNodeOverlapCallback * nodeCallback,const btVector3 & raySource,const btVector3 & rayTarget,const btVector3 & aabbMin,const btVector3 & aabbMax) const780 void	btQuantizedBvh::reportBoxCastOverlappingNodex(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin,const btVector3& aabbMax) const
781 {
782 	//always use stackless
783 
784 	if (m_useQuantization)
785 	{
786 		walkStacklessQuantizedTreeAgainstRay(nodeCallback, raySource, rayTarget, aabbMin, aabbMax, 0, m_curNodeIndex);
787 	}
788 	else
789 	{
790 		walkStacklessTreeAgainstRay(nodeCallback, raySource, rayTarget, aabbMin, aabbMax, 0, m_curNodeIndex);
791 	}
792 	/*
793 	{
794 		//recursive traversal
795 		btVector3 qaabbMin = raySource;
796 		btVector3 qaabbMax = raySource;
797 		qaabbMin.setMin(rayTarget);
798 		qaabbMax.setMax(rayTarget);
799 		qaabbMin += aabbMin;
800 		qaabbMax += aabbMax;
801 		reportAabbOverlappingNodex(nodeCallback,qaabbMin,qaabbMax);
802 	}
803 	*/
804 
805 }
806 
807 
swapLeafNodes(int i,int splitIndex)808 void	btQuantizedBvh::swapLeafNodes(int i,int splitIndex)
809 {
810 	if (m_useQuantization)
811 	{
812 			btQuantizedBvhNode tmp = m_quantizedLeafNodes[i];
813 			m_quantizedLeafNodes[i] = m_quantizedLeafNodes[splitIndex];
814 			m_quantizedLeafNodes[splitIndex] = tmp;
815 	} else
816 	{
817 			btOptimizedBvhNode tmp = m_leafNodes[i];
818 			m_leafNodes[i] = m_leafNodes[splitIndex];
819 			m_leafNodes[splitIndex] = tmp;
820 	}
821 }
822 
assignInternalNodeFromLeafNode(int internalNode,int leafNodeIndex)823 void	btQuantizedBvh::assignInternalNodeFromLeafNode(int internalNode,int leafNodeIndex)
824 {
825 	if (m_useQuantization)
826 	{
827 		m_quantizedContiguousNodes[internalNode] = m_quantizedLeafNodes[leafNodeIndex];
828 	} else
829 	{
830 		m_contiguousNodes[internalNode] = m_leafNodes[leafNodeIndex];
831 	}
832 }
833 
834 //PCK: include
835 #include <new>
836 
837 #if 0
838 //PCK: consts
839 static const unsigned BVH_ALIGNMENT = 16;
840 static const unsigned BVH_ALIGNMENT_MASK = BVH_ALIGNMENT-1;
841 
842 static const unsigned BVH_ALIGNMENT_BLOCKS = 2;
843 #endif
844 
845 
getAlignmentSerializationPadding()846 unsigned int btQuantizedBvh::getAlignmentSerializationPadding()
847 {
848 	// I changed this to 0 since the extra padding is not needed or used.
849 	return 0;//BVH_ALIGNMENT_BLOCKS * BVH_ALIGNMENT;
850 }
851 
calculateSerializeBufferSize() const852 unsigned btQuantizedBvh::calculateSerializeBufferSize() const
853 {
854 	unsigned baseSize = sizeof(btQuantizedBvh) + getAlignmentSerializationPadding();
855 	baseSize += sizeof(btBvhSubtreeInfo) * m_subtreeHeaderCount;
856 	if (m_useQuantization)
857 	{
858 		return baseSize + m_curNodeIndex * sizeof(btQuantizedBvhNode);
859 	}
860 	return baseSize + m_curNodeIndex * sizeof(btOptimizedBvhNode);
861 }
862 
serialize(void * o_alignedDataBuffer,unsigned,bool i_swapEndian) const863 bool btQuantizedBvh::serialize(void *o_alignedDataBuffer, unsigned /*i_dataBufferSize */, bool i_swapEndian) const
864 {
865 	btAssert(m_subtreeHeaderCount == m_SubtreeHeaders.size());
866 	m_subtreeHeaderCount = m_SubtreeHeaders.size();
867 
868 /*	if (i_dataBufferSize < calculateSerializeBufferSize() || o_alignedDataBuffer == NULL || (((unsigned)o_alignedDataBuffer & BVH_ALIGNMENT_MASK) != 0))
869 	{
870 		///check alignedment for buffer?
871 		btAssert(0);
872 		return false;
873 	}
874 */
875 
876 	btQuantizedBvh *targetBvh = (btQuantizedBvh *)o_alignedDataBuffer;
877 
878 	// construct the class so the virtual function table, etc will be set up
879 	// Also, m_leafNodes and m_quantizedLeafNodes will be initialized to default values by the constructor
880 	new (targetBvh) btQuantizedBvh;
881 
882 	if (i_swapEndian)
883 	{
884 		targetBvh->m_curNodeIndex = static_cast<int>(btSwapEndian(m_curNodeIndex));
885 
886 
887 		btSwapVector3Endian(m_bvhAabbMin,targetBvh->m_bvhAabbMin);
888 		btSwapVector3Endian(m_bvhAabbMax,targetBvh->m_bvhAabbMax);
889 		btSwapVector3Endian(m_bvhQuantization,targetBvh->m_bvhQuantization);
890 
891 		targetBvh->m_traversalMode = (btTraversalMode)btSwapEndian(m_traversalMode);
892 		targetBvh->m_subtreeHeaderCount = static_cast<int>(btSwapEndian(m_subtreeHeaderCount));
893 	}
894 	else
895 	{
896 		targetBvh->m_curNodeIndex = m_curNodeIndex;
897 		targetBvh->m_bvhAabbMin = m_bvhAabbMin;
898 		targetBvh->m_bvhAabbMax = m_bvhAabbMax;
899 		targetBvh->m_bvhQuantization = m_bvhQuantization;
900 		targetBvh->m_traversalMode = m_traversalMode;
901 		targetBvh->m_subtreeHeaderCount = m_subtreeHeaderCount;
902 	}
903 
904 	targetBvh->m_useQuantization = m_useQuantization;
905 
906 	unsigned char *nodeData = (unsigned char *)targetBvh;
907 	nodeData += sizeof(btQuantizedBvh);
908 
909 	unsigned sizeToAdd = 0;//(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
910 	nodeData += sizeToAdd;
911 
912 	int nodeCount = m_curNodeIndex;
913 
914 	if (m_useQuantization)
915 	{
916 		targetBvh->m_quantizedContiguousNodes.initializeFromBuffer(nodeData, nodeCount, nodeCount);
917 
918 		if (i_swapEndian)
919 		{
920 			for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
921 			{
922 				targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0]);
923 				targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1]);
924 				targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2]);
925 
926 				targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0]);
927 				targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1]);
928 				targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2]);
929 
930 				targetBvh->m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex = static_cast<int>(btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex));
931 			}
932 		}
933 		else
934 		{
935 			for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
936 			{
937 
938 				targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0];
939 				targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1];
940 				targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2];
941 
942 				targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0];
943 				targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1];
944 				targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2];
945 
946 				targetBvh->m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex = m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex;
947 
948 
949 			}
950 		}
951 		nodeData += sizeof(btQuantizedBvhNode) * nodeCount;
952 
953 		// this clears the pointer in the member variable it doesn't really do anything to the data
954 		// it does call the destructor on the contained objects, but they are all classes with no destructor defined
955 		// so the memory (which is not freed) is left alone
956 		targetBvh->m_quantizedContiguousNodes.initializeFromBuffer(NULL, 0, 0);
957 	}
958 	else
959 	{
960 		targetBvh->m_contiguousNodes.initializeFromBuffer(nodeData, nodeCount, nodeCount);
961 
962 		if (i_swapEndian)
963 		{
964 			for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
965 			{
966 				btSwapVector3Endian(m_contiguousNodes[nodeIndex].m_aabbMinOrg, targetBvh->m_contiguousNodes[nodeIndex].m_aabbMinOrg);
967 				btSwapVector3Endian(m_contiguousNodes[nodeIndex].m_aabbMaxOrg, targetBvh->m_contiguousNodes[nodeIndex].m_aabbMaxOrg);
968 
969 				targetBvh->m_contiguousNodes[nodeIndex].m_escapeIndex = static_cast<int>(btSwapEndian(m_contiguousNodes[nodeIndex].m_escapeIndex));
970 				targetBvh->m_contiguousNodes[nodeIndex].m_subPart = static_cast<int>(btSwapEndian(m_contiguousNodes[nodeIndex].m_subPart));
971 				targetBvh->m_contiguousNodes[nodeIndex].m_triangleIndex = static_cast<int>(btSwapEndian(m_contiguousNodes[nodeIndex].m_triangleIndex));
972 			}
973 		}
974 		else
975 		{
976 			for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
977 			{
978 				targetBvh->m_contiguousNodes[nodeIndex].m_aabbMinOrg = m_contiguousNodes[nodeIndex].m_aabbMinOrg;
979 				targetBvh->m_contiguousNodes[nodeIndex].m_aabbMaxOrg = m_contiguousNodes[nodeIndex].m_aabbMaxOrg;
980 
981 				targetBvh->m_contiguousNodes[nodeIndex].m_escapeIndex = m_contiguousNodes[nodeIndex].m_escapeIndex;
982 				targetBvh->m_contiguousNodes[nodeIndex].m_subPart = m_contiguousNodes[nodeIndex].m_subPart;
983 				targetBvh->m_contiguousNodes[nodeIndex].m_triangleIndex = m_contiguousNodes[nodeIndex].m_triangleIndex;
984 			}
985 		}
986 		nodeData += sizeof(btOptimizedBvhNode) * nodeCount;
987 
988 		// this clears the pointer in the member variable it doesn't really do anything to the data
989 		// it does call the destructor on the contained objects, but they are all classes with no destructor defined
990 		// so the memory (which is not freed) is left alone
991 		targetBvh->m_contiguousNodes.initializeFromBuffer(NULL, 0, 0);
992 	}
993 
994 	sizeToAdd = 0;//(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
995 	nodeData += sizeToAdd;
996 
997 	// Now serialize the subtree headers
998 	targetBvh->m_SubtreeHeaders.initializeFromBuffer(nodeData, m_subtreeHeaderCount, m_subtreeHeaderCount);
999 	if (i_swapEndian)
1000 	{
1001 		for (int i = 0; i < m_subtreeHeaderCount; i++)
1002 		{
1003 			targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[0] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMin[0]);
1004 			targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[1] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMin[1]);
1005 			targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[2] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMin[2]);
1006 
1007 			targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[0] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMax[0]);
1008 			targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[1] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMax[1]);
1009 			targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[2] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMax[2]);
1010 
1011 			targetBvh->m_SubtreeHeaders[i].m_rootNodeIndex = static_cast<int>(btSwapEndian(m_SubtreeHeaders[i].m_rootNodeIndex));
1012 			targetBvh->m_SubtreeHeaders[i].m_subtreeSize = static_cast<int>(btSwapEndian(m_SubtreeHeaders[i].m_subtreeSize));
1013 		}
1014 	}
1015 	else
1016 	{
1017 		for (int i = 0; i < m_subtreeHeaderCount; i++)
1018 		{
1019 			targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[0] = (m_SubtreeHeaders[i].m_quantizedAabbMin[0]);
1020 			targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[1] = (m_SubtreeHeaders[i].m_quantizedAabbMin[1]);
1021 			targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[2] = (m_SubtreeHeaders[i].m_quantizedAabbMin[2]);
1022 
1023 			targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[0] = (m_SubtreeHeaders[i].m_quantizedAabbMax[0]);
1024 			targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[1] = (m_SubtreeHeaders[i].m_quantizedAabbMax[1]);
1025 			targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[2] = (m_SubtreeHeaders[i].m_quantizedAabbMax[2]);
1026 
1027 			targetBvh->m_SubtreeHeaders[i].m_rootNodeIndex = (m_SubtreeHeaders[i].m_rootNodeIndex);
1028 			targetBvh->m_SubtreeHeaders[i].m_subtreeSize = (m_SubtreeHeaders[i].m_subtreeSize);
1029 
1030 			// need to clear padding in destination buffer
1031 			targetBvh->m_SubtreeHeaders[i].m_padding[0] = 0;
1032 			targetBvh->m_SubtreeHeaders[i].m_padding[1] = 0;
1033 			targetBvh->m_SubtreeHeaders[i].m_padding[2] = 0;
1034 		}
1035 	}
1036 	nodeData += sizeof(btBvhSubtreeInfo) * m_subtreeHeaderCount;
1037 
1038 	// this clears the pointer in the member variable it doesn't really do anything to the data
1039 	// it does call the destructor on the contained objects, but they are all classes with no destructor defined
1040 	// so the memory (which is not freed) is left alone
1041 	targetBvh->m_SubtreeHeaders.initializeFromBuffer(NULL, 0, 0);
1042 
1043 	// this wipes the virtual function table pointer at the start of the buffer for the class
1044 	*((void**)o_alignedDataBuffer) = NULL;
1045 
1046 	return true;
1047 }
1048 
deSerializeInPlace(void * i_alignedDataBuffer,unsigned int i_dataBufferSize,bool i_swapEndian)1049 btQuantizedBvh *btQuantizedBvh::deSerializeInPlace(void *i_alignedDataBuffer, unsigned int i_dataBufferSize, bool i_swapEndian)
1050 {
1051 
1052 	if (i_alignedDataBuffer == NULL)// || (((unsigned)i_alignedDataBuffer & BVH_ALIGNMENT_MASK) != 0))
1053 	{
1054 		return NULL;
1055 	}
1056 	btQuantizedBvh *bvh = (btQuantizedBvh *)i_alignedDataBuffer;
1057 
1058 	if (i_swapEndian)
1059 	{
1060 		bvh->m_curNodeIndex = static_cast<int>(btSwapEndian(bvh->m_curNodeIndex));
1061 
1062 		btUnSwapVector3Endian(bvh->m_bvhAabbMin);
1063 		btUnSwapVector3Endian(bvh->m_bvhAabbMax);
1064 		btUnSwapVector3Endian(bvh->m_bvhQuantization);
1065 
1066 		bvh->m_traversalMode = (btTraversalMode)btSwapEndian(bvh->m_traversalMode);
1067 		bvh->m_subtreeHeaderCount = static_cast<int>(btSwapEndian(bvh->m_subtreeHeaderCount));
1068 	}
1069 
1070 	unsigned int calculatedBufSize = bvh->calculateSerializeBufferSize();
1071 	btAssert(calculatedBufSize <= i_dataBufferSize);
1072 
1073 	if (calculatedBufSize > i_dataBufferSize)
1074 	{
1075 		return NULL;
1076 	}
1077 
1078 	unsigned char *nodeData = (unsigned char *)bvh;
1079 	nodeData += sizeof(btQuantizedBvh);
1080 
1081 	unsigned sizeToAdd = 0;//(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
1082 	nodeData += sizeToAdd;
1083 
1084 	int nodeCount = bvh->m_curNodeIndex;
1085 
1086 	// Must call placement new to fill in virtual function table, etc, but we don't want to overwrite most data, so call a special version of the constructor
1087 	// Also, m_leafNodes and m_quantizedLeafNodes will be initialized to default values by the constructor
1088 	new (bvh) btQuantizedBvh(*bvh, false);
1089 
1090 	if (bvh->m_useQuantization)
1091 	{
1092 		bvh->m_quantizedContiguousNodes.initializeFromBuffer(nodeData, nodeCount, nodeCount);
1093 
1094 		if (i_swapEndian)
1095 		{
1096 			for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
1097 			{
1098 				bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0]);
1099 				bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1]);
1100 				bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2]);
1101 
1102 				bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0]);
1103 				bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1]);
1104 				bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2]);
1105 
1106 				bvh->m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex = static_cast<int>(btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex));
1107 			}
1108 		}
1109 		nodeData += sizeof(btQuantizedBvhNode) * nodeCount;
1110 	}
1111 	else
1112 	{
1113 		bvh->m_contiguousNodes.initializeFromBuffer(nodeData, nodeCount, nodeCount);
1114 
1115 		if (i_swapEndian)
1116 		{
1117 			for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
1118 			{
1119 				btUnSwapVector3Endian(bvh->m_contiguousNodes[nodeIndex].m_aabbMinOrg);
1120 				btUnSwapVector3Endian(bvh->m_contiguousNodes[nodeIndex].m_aabbMaxOrg);
1121 
1122 				bvh->m_contiguousNodes[nodeIndex].m_escapeIndex = static_cast<int>(btSwapEndian(bvh->m_contiguousNodes[nodeIndex].m_escapeIndex));
1123 				bvh->m_contiguousNodes[nodeIndex].m_subPart = static_cast<int>(btSwapEndian(bvh->m_contiguousNodes[nodeIndex].m_subPart));
1124 				bvh->m_contiguousNodes[nodeIndex].m_triangleIndex = static_cast<int>(btSwapEndian(bvh->m_contiguousNodes[nodeIndex].m_triangleIndex));
1125 			}
1126 		}
1127 		nodeData += sizeof(btOptimizedBvhNode) * nodeCount;
1128 	}
1129 
1130 	sizeToAdd = 0;//(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
1131 	nodeData += sizeToAdd;
1132 
1133 	// Now serialize the subtree headers
1134 	bvh->m_SubtreeHeaders.initializeFromBuffer(nodeData, bvh->m_subtreeHeaderCount, bvh->m_subtreeHeaderCount);
1135 	if (i_swapEndian)
1136 	{
1137 		for (int i = 0; i < bvh->m_subtreeHeaderCount; i++)
1138 		{
1139 			bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[0] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[0]);
1140 			bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[1] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[1]);
1141 			bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[2] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[2]);
1142 
1143 			bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[0] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[0]);
1144 			bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[1] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[1]);
1145 			bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[2] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[2]);
1146 
1147 			bvh->m_SubtreeHeaders[i].m_rootNodeIndex = static_cast<int>(btSwapEndian(bvh->m_SubtreeHeaders[i].m_rootNodeIndex));
1148 			bvh->m_SubtreeHeaders[i].m_subtreeSize = static_cast<int>(btSwapEndian(bvh->m_SubtreeHeaders[i].m_subtreeSize));
1149 		}
1150 	}
1151 
1152 	return bvh;
1153 }
1154 
1155 // Constructor that prevents btVector3's default constructor from being called
btQuantizedBvh(btQuantizedBvh & self,bool)1156 btQuantizedBvh::btQuantizedBvh(btQuantizedBvh &self, bool /* ownsMemory */) :
1157 m_bvhAabbMin(self.m_bvhAabbMin),
1158 m_bvhAabbMax(self.m_bvhAabbMax),
1159 m_bvhQuantization(self.m_bvhQuantization),
1160 m_bulletVersion(BT_BULLET_VERSION)
1161 {
1162 
1163 }
1164 
deSerializeFloat(struct btQuantizedBvhFloatData & quantizedBvhFloatData)1165 void btQuantizedBvh::deSerializeFloat(struct btQuantizedBvhFloatData& quantizedBvhFloatData)
1166 {
1167 	m_bvhAabbMax.deSerializeFloat(quantizedBvhFloatData.m_bvhAabbMax);
1168 	m_bvhAabbMin.deSerializeFloat(quantizedBvhFloatData.m_bvhAabbMin);
1169 	m_bvhQuantization.deSerializeFloat(quantizedBvhFloatData.m_bvhQuantization);
1170 
1171 	m_curNodeIndex = quantizedBvhFloatData.m_curNodeIndex;
1172 	m_useQuantization = quantizedBvhFloatData.m_useQuantization!=0;
1173 
1174 	{
1175 		int numElem = quantizedBvhFloatData.m_numContiguousLeafNodes;
1176 		m_contiguousNodes.resize(numElem);
1177 
1178 		if (numElem)
1179 		{
1180 			btOptimizedBvhNodeFloatData* memPtr = quantizedBvhFloatData.m_contiguousNodesPtr;
1181 
1182 			for (int i=0;i<numElem;i++,memPtr++)
1183 			{
1184 				m_contiguousNodes[i].m_aabbMaxOrg.deSerializeFloat(memPtr->m_aabbMaxOrg);
1185 				m_contiguousNodes[i].m_aabbMinOrg.deSerializeFloat(memPtr->m_aabbMinOrg);
1186 				m_contiguousNodes[i].m_escapeIndex = memPtr->m_escapeIndex;
1187 				m_contiguousNodes[i].m_subPart = memPtr->m_subPart;
1188 				m_contiguousNodes[i].m_triangleIndex = memPtr->m_triangleIndex;
1189 			}
1190 		}
1191 	}
1192 
1193 	{
1194 		int numElem = quantizedBvhFloatData.m_numQuantizedContiguousNodes;
1195 		m_quantizedContiguousNodes.resize(numElem);
1196 
1197 		if (numElem)
1198 		{
1199 			btQuantizedBvhNodeData* memPtr = quantizedBvhFloatData.m_quantizedContiguousNodesPtr;
1200 			for (int i=0;i<numElem;i++,memPtr++)
1201 			{
1202 				m_quantizedContiguousNodes[i].m_escapeIndexOrTriangleIndex = memPtr->m_escapeIndexOrTriangleIndex;
1203 				m_quantizedContiguousNodes[i].m_quantizedAabbMax[0] = memPtr->m_quantizedAabbMax[0];
1204 				m_quantizedContiguousNodes[i].m_quantizedAabbMax[1] = memPtr->m_quantizedAabbMax[1];
1205 				m_quantizedContiguousNodes[i].m_quantizedAabbMax[2] = memPtr->m_quantizedAabbMax[2];
1206 				m_quantizedContiguousNodes[i].m_quantizedAabbMin[0] = memPtr->m_quantizedAabbMin[0];
1207 				m_quantizedContiguousNodes[i].m_quantizedAabbMin[1] = memPtr->m_quantizedAabbMin[1];
1208 				m_quantizedContiguousNodes[i].m_quantizedAabbMin[2] = memPtr->m_quantizedAabbMin[2];
1209 			}
1210 		}
1211 	}
1212 
1213 	m_traversalMode = btTraversalMode(quantizedBvhFloatData.m_traversalMode);
1214 
1215 	{
1216 		int numElem = quantizedBvhFloatData.m_numSubtreeHeaders;
1217 		m_SubtreeHeaders.resize(numElem);
1218 		if (numElem)
1219 		{
1220 			btBvhSubtreeInfoData* memPtr = quantizedBvhFloatData.m_subTreeInfoPtr;
1221 			for (int i=0;i<numElem;i++,memPtr++)
1222 			{
1223 				m_SubtreeHeaders[i].m_quantizedAabbMax[0] = memPtr->m_quantizedAabbMax[0] ;
1224 				m_SubtreeHeaders[i].m_quantizedAabbMax[1] = memPtr->m_quantizedAabbMax[1];
1225 				m_SubtreeHeaders[i].m_quantizedAabbMax[2] = memPtr->m_quantizedAabbMax[2];
1226 				m_SubtreeHeaders[i].m_quantizedAabbMin[0] = memPtr->m_quantizedAabbMin[0];
1227 				m_SubtreeHeaders[i].m_quantizedAabbMin[1] = memPtr->m_quantizedAabbMin[1];
1228 				m_SubtreeHeaders[i].m_quantizedAabbMin[2] = memPtr->m_quantizedAabbMin[2];
1229 				m_SubtreeHeaders[i].m_rootNodeIndex = memPtr->m_rootNodeIndex;
1230 				m_SubtreeHeaders[i].m_subtreeSize = memPtr->m_subtreeSize;
1231 			}
1232 		}
1233 	}
1234 }
1235 
deSerializeDouble(struct btQuantizedBvhDoubleData & quantizedBvhDoubleData)1236 void btQuantizedBvh::deSerializeDouble(struct btQuantizedBvhDoubleData& quantizedBvhDoubleData)
1237 {
1238 	m_bvhAabbMax.deSerializeDouble(quantizedBvhDoubleData.m_bvhAabbMax);
1239 	m_bvhAabbMin.deSerializeDouble(quantizedBvhDoubleData.m_bvhAabbMin);
1240 	m_bvhQuantization.deSerializeDouble(quantizedBvhDoubleData.m_bvhQuantization);
1241 
1242 	m_curNodeIndex = quantizedBvhDoubleData.m_curNodeIndex;
1243 	m_useQuantization = quantizedBvhDoubleData.m_useQuantization!=0;
1244 
1245 	{
1246 		int numElem = quantizedBvhDoubleData.m_numContiguousLeafNodes;
1247 		m_contiguousNodes.resize(numElem);
1248 
1249 		if (numElem)
1250 		{
1251 			btOptimizedBvhNodeDoubleData* memPtr = quantizedBvhDoubleData.m_contiguousNodesPtr;
1252 
1253 			for (int i=0;i<numElem;i++,memPtr++)
1254 			{
1255 				m_contiguousNodes[i].m_aabbMaxOrg.deSerializeDouble(memPtr->m_aabbMaxOrg);
1256 				m_contiguousNodes[i].m_aabbMinOrg.deSerializeDouble(memPtr->m_aabbMinOrg);
1257 				m_contiguousNodes[i].m_escapeIndex = memPtr->m_escapeIndex;
1258 				m_contiguousNodes[i].m_subPart = memPtr->m_subPart;
1259 				m_contiguousNodes[i].m_triangleIndex = memPtr->m_triangleIndex;
1260 			}
1261 		}
1262 	}
1263 
1264 	{
1265 		int numElem = quantizedBvhDoubleData.m_numQuantizedContiguousNodes;
1266 		m_quantizedContiguousNodes.resize(numElem);
1267 
1268 		if (numElem)
1269 		{
1270 			btQuantizedBvhNodeData* memPtr = quantizedBvhDoubleData.m_quantizedContiguousNodesPtr;
1271 			for (int i=0;i<numElem;i++,memPtr++)
1272 			{
1273 				m_quantizedContiguousNodes[i].m_escapeIndexOrTriangleIndex = memPtr->m_escapeIndexOrTriangleIndex;
1274 				m_quantizedContiguousNodes[i].m_quantizedAabbMax[0] = memPtr->m_quantizedAabbMax[0];
1275 				m_quantizedContiguousNodes[i].m_quantizedAabbMax[1] = memPtr->m_quantizedAabbMax[1];
1276 				m_quantizedContiguousNodes[i].m_quantizedAabbMax[2] = memPtr->m_quantizedAabbMax[2];
1277 				m_quantizedContiguousNodes[i].m_quantizedAabbMin[0] = memPtr->m_quantizedAabbMin[0];
1278 				m_quantizedContiguousNodes[i].m_quantizedAabbMin[1] = memPtr->m_quantizedAabbMin[1];
1279 				m_quantizedContiguousNodes[i].m_quantizedAabbMin[2] = memPtr->m_quantizedAabbMin[2];
1280 			}
1281 		}
1282 	}
1283 
1284 	m_traversalMode = btTraversalMode(quantizedBvhDoubleData.m_traversalMode);
1285 
1286 	{
1287 		int numElem = quantizedBvhDoubleData.m_numSubtreeHeaders;
1288 		m_SubtreeHeaders.resize(numElem);
1289 		if (numElem)
1290 		{
1291 			btBvhSubtreeInfoData* memPtr = quantizedBvhDoubleData.m_subTreeInfoPtr;
1292 			for (int i=0;i<numElem;i++,memPtr++)
1293 			{
1294 				m_SubtreeHeaders[i].m_quantizedAabbMax[0] = memPtr->m_quantizedAabbMax[0] ;
1295 				m_SubtreeHeaders[i].m_quantizedAabbMax[1] = memPtr->m_quantizedAabbMax[1];
1296 				m_SubtreeHeaders[i].m_quantizedAabbMax[2] = memPtr->m_quantizedAabbMax[2];
1297 				m_SubtreeHeaders[i].m_quantizedAabbMin[0] = memPtr->m_quantizedAabbMin[0];
1298 				m_SubtreeHeaders[i].m_quantizedAabbMin[1] = memPtr->m_quantizedAabbMin[1];
1299 				m_SubtreeHeaders[i].m_quantizedAabbMin[2] = memPtr->m_quantizedAabbMin[2];
1300 				m_SubtreeHeaders[i].m_rootNodeIndex = memPtr->m_rootNodeIndex;
1301 				m_SubtreeHeaders[i].m_subtreeSize = memPtr->m_subtreeSize;
1302 			}
1303 		}
1304 	}
1305 
1306 }
1307 
1308 
1309 
1310 ///fills the dataBuffer and returns the struct name (and 0 on failure)
serialize(void * dataBuffer,btSerializer * serializer) const1311 const char*	btQuantizedBvh::serialize(void* dataBuffer, btSerializer* serializer) const
1312 {
1313 	btQuantizedBvhData* quantizedData = (btQuantizedBvhData*)dataBuffer;
1314 
1315 	m_bvhAabbMax.serialize(quantizedData->m_bvhAabbMax);
1316 	m_bvhAabbMin.serialize(quantizedData->m_bvhAabbMin);
1317 	m_bvhQuantization.serialize(quantizedData->m_bvhQuantization);
1318 
1319 	quantizedData->m_curNodeIndex = m_curNodeIndex;
1320 	quantizedData->m_useQuantization = m_useQuantization;
1321 
1322 	quantizedData->m_numContiguousLeafNodes = m_contiguousNodes.size();
1323 	quantizedData->m_contiguousNodesPtr = (btOptimizedBvhNodeData*) (m_contiguousNodes.size() ? serializer->getUniquePointer((void*)&m_contiguousNodes[0]) : 0);
1324 	if (quantizedData->m_contiguousNodesPtr)
1325 	{
1326 		int sz = sizeof(btOptimizedBvhNodeData);
1327 		int numElem = m_contiguousNodes.size();
1328 		btChunk* chunk = serializer->allocate(sz,numElem);
1329 		btOptimizedBvhNodeData* memPtr = (btOptimizedBvhNodeData*)chunk->m_oldPtr;
1330 		for (int i=0;i<numElem;i++,memPtr++)
1331 		{
1332 			m_contiguousNodes[i].m_aabbMaxOrg.serialize(memPtr->m_aabbMaxOrg);
1333 			m_contiguousNodes[i].m_aabbMinOrg.serialize(memPtr->m_aabbMinOrg);
1334 			memPtr->m_escapeIndex = m_contiguousNodes[i].m_escapeIndex;
1335 			memPtr->m_subPart = m_contiguousNodes[i].m_subPart;
1336 			memPtr->m_triangleIndex = m_contiguousNodes[i].m_triangleIndex;
1337 		}
1338 		serializer->finalizeChunk(chunk,"btOptimizedBvhNodeData",BT_ARRAY_CODE,(void*)&m_contiguousNodes[0]);
1339 	}
1340 
1341 	quantizedData->m_numQuantizedContiguousNodes = m_quantizedContiguousNodes.size();
1342 //	printf("quantizedData->m_numQuantizedContiguousNodes=%d\n",quantizedData->m_numQuantizedContiguousNodes);
1343 	quantizedData->m_quantizedContiguousNodesPtr =(btQuantizedBvhNodeData*) (m_quantizedContiguousNodes.size() ? serializer->getUniquePointer((void*)&m_quantizedContiguousNodes[0]) : 0);
1344 	if (quantizedData->m_quantizedContiguousNodesPtr)
1345 	{
1346 		int sz = sizeof(btQuantizedBvhNodeData);
1347 		int numElem = m_quantizedContiguousNodes.size();
1348 		btChunk* chunk = serializer->allocate(sz,numElem);
1349 		btQuantizedBvhNodeData* memPtr = (btQuantizedBvhNodeData*)chunk->m_oldPtr;
1350 		for (int i=0;i<numElem;i++,memPtr++)
1351 		{
1352 			memPtr->m_escapeIndexOrTriangleIndex = m_quantizedContiguousNodes[i].m_escapeIndexOrTriangleIndex;
1353 			memPtr->m_quantizedAabbMax[0] = m_quantizedContiguousNodes[i].m_quantizedAabbMax[0];
1354 			memPtr->m_quantizedAabbMax[1] = m_quantizedContiguousNodes[i].m_quantizedAabbMax[1];
1355 			memPtr->m_quantizedAabbMax[2] = m_quantizedContiguousNodes[i].m_quantizedAabbMax[2];
1356 			memPtr->m_quantizedAabbMin[0] = m_quantizedContiguousNodes[i].m_quantizedAabbMin[0];
1357 			memPtr->m_quantizedAabbMin[1] = m_quantizedContiguousNodes[i].m_quantizedAabbMin[1];
1358 			memPtr->m_quantizedAabbMin[2] = m_quantizedContiguousNodes[i].m_quantizedAabbMin[2];
1359 		}
1360 		serializer->finalizeChunk(chunk,"btQuantizedBvhNodeData",BT_ARRAY_CODE,(void*)&m_quantizedContiguousNodes[0]);
1361 	}
1362 
1363 	quantizedData->m_traversalMode = int(m_traversalMode);
1364 	quantizedData->m_numSubtreeHeaders = m_SubtreeHeaders.size();
1365 
1366 	quantizedData->m_subTreeInfoPtr = (btBvhSubtreeInfoData*) (m_SubtreeHeaders.size() ? serializer->getUniquePointer((void*)&m_SubtreeHeaders[0]) : 0);
1367 	if (quantizedData->m_subTreeInfoPtr)
1368 	{
1369 		int sz = sizeof(btBvhSubtreeInfoData);
1370 		int numElem = m_SubtreeHeaders.size();
1371 		btChunk* chunk = serializer->allocate(sz,numElem);
1372 		btBvhSubtreeInfoData* memPtr = (btBvhSubtreeInfoData*)chunk->m_oldPtr;
1373 		for (int i=0;i<numElem;i++,memPtr++)
1374 		{
1375 			memPtr->m_quantizedAabbMax[0] = m_SubtreeHeaders[i].m_quantizedAabbMax[0];
1376 			memPtr->m_quantizedAabbMax[1] = m_SubtreeHeaders[i].m_quantizedAabbMax[1];
1377 			memPtr->m_quantizedAabbMax[2] = m_SubtreeHeaders[i].m_quantizedAabbMax[2];
1378 			memPtr->m_quantizedAabbMin[0] = m_SubtreeHeaders[i].m_quantizedAabbMin[0];
1379 			memPtr->m_quantizedAabbMin[1] = m_SubtreeHeaders[i].m_quantizedAabbMin[1];
1380 			memPtr->m_quantizedAabbMin[2] = m_SubtreeHeaders[i].m_quantizedAabbMin[2];
1381 
1382 			memPtr->m_rootNodeIndex = m_SubtreeHeaders[i].m_rootNodeIndex;
1383 			memPtr->m_subtreeSize = m_SubtreeHeaders[i].m_subtreeSize;
1384 		}
1385 		serializer->finalizeChunk(chunk,"btBvhSubtreeInfoData",BT_ARRAY_CODE,(void*)&m_SubtreeHeaders[0]);
1386 	}
1387 	return btQuantizedBvhDataName;
1388 }
1389 
1390 
1391 
1392 
1393 
1394