50 const int kUnassignedBatch = -1;
53 for (
int iPhase = 0; iPhase <
m_phases.size(); ++iPhase)
56 bodyBatchId.
resize(bodies.
size(), kUnassignedBatch);
58 for (
int iBatch = phase.
begin; iBatch < phase.
end; ++iBatch)
61 for (
int iiCons = batch.
begin; iiCons < batch.
end; ++iiCons)
70 if (thisBodyBatchId == kUnassignedBatch)
74 else if (thisBodyBatchId != iBatch)
76 btAssert(!
"dynamic body is used in 2 different batches in the same phase");
83 if (thisBodyBatchId == kUnassignedBatch)
87 else if (thisBodyBatchId != iBatch)
89 btAssert(!
"dynamic body is used in 2 different batches in the same phase");
106 if (bc && bc->
m_debugDrawer && iBatch < bc->m_batches.size())
109 for (
int iiCon = b.
begin; iiCon < b.
end; ++iiCon)
115 btVector3 pos0 = bodies[iBody0].getWorldTransform().getOrigin() + offset;
116 btVector3 pos1 = bodies[iBody1].getWorldTransform().getOrigin() + offset;
134 for (
int iBatch = phase.
begin; iBatch < phase.
end; ++iBatch)
136 float tt = float(iBatch - phase.
begin) / float(
btMax(1, phase.
end - phase.
begin - 1));
152 for (
int iBody = 0; iBody < bodies.
size(); ++iBody)
154 const btVector3& pos = bodies[iBody].getWorldTransform().getOrigin();
158 btVector3 bboxExtent = bboxMax - bboxMin;
161 int numPhases = bc->
m_phases.size();
162 for (
int iPhase = 0; iPhase < numPhases; ++iPhase)
164 float b = float(iPhase) / float(numPhases - 1);
167 btVector3 offset = offsetBase + offsetStep * (float(iPhase) - float(numPhases - 1) * 0.5);
168 debugDrawPhase(bc, constraints, bodies, iPhase, color0, color1, offset);
178 for (
int i = 0; i < bodies.
size(); ++i)
191 while (iSrc < numConstraints)
198 while (iSrc < numConstraints && outConInfos[iSrc].bodyIds[0] == srcConInfo.
bodyIds[0] && outConInfos[iSrc].
bodyIds[1] == srcConInfo.
bodyIds[1])
220 for (
int i = iBegin; i < iEnd; ++i)
235 int numConstraints = constraints->
size();
236 bool inParallel =
true;
240 int grainSize = 1200;
245 for (
int i = 0; i < numConstraints; ++i)
255 bool useRunLengthEncoding =
true;
256 if (useRunLengthEncoding)
260 return numConstraints;
266 if (numConstraintRows > numConstraints)
269 for (
int iCon = numConstraints - 1; iCon >= 0; --iCon)
272 int iBatch = constraintBatchIds[iCon];
277 btAssert(iDest >= 0 && iDest < numConstraintRows);
278 constraintBatchIds[iDest] = iBatch;
287 for (
int iCon = 0; iCon < numConstraints; ++iCon)
290 int iBatch = srcConstraintBatchIds[iCon];
295 btAssert(iDest >= 0 && iDest < numConstraintRows);
296 destConstraintBatchIds[iDest] = iBatch;
333 int numConstraints = constraints->
size();
343 for (
int iBatch = iEndBatch - 1; iBatch >= iBeginBatch; --iBatch)
348 for (
int iDestBatch = iBatch - 1; iDestBatch >= iBeginBatch; --iDestBatch)
365 for (
int iBatch = iBeginBatch; iBatch < iEndBatch; ++iBatch)
388 BT_PROFILE(
"updateConstraintBatchIdsForMerges");
390 for (
int i = 0; i < numConstraints; ++i)
392 int iBatch = constraintBatchIds[i];
395 if (batches[iBatch].mergeIndex !=
kNoMerge)
398 constraintBatchIds[i] = batches[iBatch].
mergeIndex;
417 BT_PROFILE(
"UpdateConstraintBatchIdsForMergesLoop");
424 BT_PROFILE(
"updateConstraintBatchIdsForMergesMt");
438 const int* constraintBatchIds,
440 int* constraintIdPerBatch,
444 BT_PROFILE(
"writeOutConstraintIndicesForRangeOfBatches");
445 for (
int iCon = 0; iCon < numConstraints; ++iCon)
447 int iBatch = constraintBatchIds[iCon];
448 if (iBatch >= batchBegin && iBatch < batchEnd)
450 int iDestCon = constraintIdPerBatch[iBatch];
451 constraintIdPerBatch[iBatch] = iDestCon + 1;
488 const int* constraintBatchIds,
490 int* constraintIdPerBatch,
491 int maxNumBatchesPerPhase,
495 bool inParallel =
true;
503 for (
int iCon = 0; iCon < numConstraints; ++iCon)
505 int iBatch = constraintBatchIds[iCon];
506 int iDestCon = constraintIdPerBatch[iBatch];
507 constraintIdPerBatch[iBatch] = iDestCon + 1;
516 int numPhases = bc->
m_phases.size();
519 for (
int iPhase = 0; iPhase < numPhases; ++iPhase)
521 const Range& phase = bc->
m_phases[iPhase];
522 int numBatches = phase.end - phase.begin;
523 float grainSize = floor((0.25f * numBatches /
float(numThreads)) + 0.0f);
529 const int* constraintBatchIds,
533 int maxNumBatchesPerPhase,
544 int* constraintIdPerBatch = batchWork;
546 for (
int iPhase = 0; iPhase < numPhases; ++iPhase)
548 int curPhaseBegin = bc->
m_batches.size();
549 int iBegin = iPhase * maxNumBatchesPerPhase;
550 int iEnd = iBegin + maxNumBatchesPerPhase;
551 for (
int i = iBegin; i < iEnd; ++i)
554 int curBatchBegin = iConstraint;
555 constraintIdPerBatch[i] = curBatchBegin;
557 iConstraint += numConstraints;
558 if (numConstraints > 0)
560 bc->
m_batches.push_back(Range(curBatchBegin, iConstraint));
564 if (bc->
m_batches.size() > curPhaseBegin)
571 btAssert(iConstraint == numConstraints);
576 for (
int iPhase = 0; iPhase < bc->
m_phases.size(); ++iPhase)
579 const Range& curBatches = bc->
m_phases[iPhase];
583 for (
int i = 0; i < bc->
m_phases.size(); ++i)
631 size_t totalSize = 0;
640 size_t totalSize = 0;
644 char* chunkPtr = static_cast<char*>(mem) + totalSize;
645 *chunk.
ptr = chunkPtr;
646 totalSize += chunk.
size;
653 bool* bodyDynamicFlags,
660 for (
int iCon = 0; iCon < numConstraints; ++iCon)
665 btAssert(iBody0 >= 0 && iBody0 < numBodies);
666 btAssert(iBody1 >= 0 && iBody1 < numBodies);
668 if (bodyDynamicFlags[iBody0] && bodyDynamicFlags[iBody1])
670 btVector3 delta = bodyPositions[iBody1] - bodyPositions[iBody0];
699 memset(
this, 0,
sizeof(*
this));
707 for (
int iCon = iConBegin; iCon < iConEnd; ++iCon)
721 for (
int i = 0; i < 3; ++i)
725 if (coordMin != coordMax)
728 if ((coordMin & 1) == 0)
738 gridCoord[i] = coordMin;
750 for (
int i = 0; i < 3; ++i)
752 gridCoord[i] = body0Coords.
m_ints[i];
759 for (
int i = 0; i < 3; ++i)
761 int coordOffset = (iPhase >> i) & 1;
762 chunkCoord[i] = (gridCoord[i] - coordOffset) / 2;
763 btClamp(chunkCoord[i], 0, gridChunkDim[i] - 1);
764 btAssert(chunkCoord[i] < gridChunkDim[i]);
766 int iBatch = iPhase * params.
maxNumBatchesPerPhase + chunkCoord[0] + chunkCoord[1] * gridChunkDim[0] + chunkCoord[2] * gridChunkDim[0] * gridChunkDim[1];
824 const int numPhases = 8;
825 int numConstraints = constraints->
size();
826 int numConstraintRows = constraints->
size();
828 const int maxGridChunkCount = 128;
829 int allocNumBatchesPerPhase = maxGridChunkCount;
830 int minNumBatchesPerPhase = 16;
831 int allocNumBatches = allocNumBatchesPerPhase * numPhases;
834 bool* bodyDynamicFlags = NULL;
837 int* batchWork = NULL;
839 int* constraintBatchIds = NULL;
840 int* constraintRowBatchIds = NULL;
844 memHelper.
addChunk((
void**)&bodyDynamicFlags,
sizeof(
bool) * bodies.
size());
847 memHelper.
addChunk((
void**)&batchWork,
sizeof(
int) * allocNumBatches);
849 memHelper.
addChunk((
void**)&constraintBatchIds,
sizeof(
int) * numConstraints);
850 memHelper.
addChunk((
void**)&constraintRowBatchIds,
sizeof(
int) * numConstraintRows);
853 if (scratchMemory->
capacity() < scratchSize)
856 scratchMemory->
reserve(scratchSize + scratchSize / 16);
859 char* memPtr = &scratchMemory->
at(0);
870 for (
int i = 0; i < bodies.
size(); ++i)
875 bodyPositions[i] = bodyPos;
876 bodyDynamicFlags[i] = isDynamic;
889 btVector3 gridExtent = bboxMax - bboxMin;
893 gridDim[0] = int(1.0 + gridExtent.x() / gridCellSize.
x());
894 gridDim[1] = int(1.0 + gridExtent.y() / gridCellSize.
y());
895 gridDim[2] = int(1.0 + gridExtent.z() / gridCellSize.
z());
899 bool collapseAxis = use2DGrid;
903 int iAxisToCollapse = 0;
904 int axisDim = gridDim[iAxisToCollapse];
906 for (
int i = 0; i < 3; ++i)
908 if (gridDim[i] < axisDim)
911 axisDim = gridDim[i];
915 gridCellSize[iAxisToCollapse] = gridExtent[iAxisToCollapse] * 2.0f;
916 phaseMask &= ~(1 << iAxisToCollapse);
919 int numGridChunks = 0;
923 gridDim[0] = int(1.0 + gridExtent.x() / gridCellSize.
x());
924 gridDim[1] = int(1.0 + gridExtent.y() / gridCellSize.
y());
925 gridDim[2] = int(1.0 + gridExtent.z() / gridCellSize.
z());
926 gridChunkDim[0] =
btMax(1, (gridDim[0] + 0) / 2);
927 gridChunkDim[1] =
btMax(1, (gridDim[1] + 0) / 2);
928 gridChunkDim[2] =
btMax(1, (gridDim[2] + 0) / 2);
929 numGridChunks = gridChunkDim[0] * gridChunkDim[1] * gridChunkDim[2];
930 float nChunks = float(gridChunkDim[0]) * float(gridChunkDim[1]) * float(gridChunkDim[2]);
931 if (numGridChunks <= maxGridChunkCount && nChunks <= maxGridChunkCount)
935 gridCellSize *= 1.25;
937 btAssert(numGridChunks <= maxGridChunkCount);
938 int maxNumBatchesPerPhase = numGridChunks;
943 for (
int iBody = 0; iBody < bodies.
size(); ++iBody)
945 btIntVec3& coords = bodyGridCoords[iBody];
946 if (bodyDynamicFlags[iBody])
948 btVector3 v = (bodyPositions[iBody] - bboxMin) * invGridCellSize;
964 for (
int iPhase = 0; iPhase < numPhases; ++iPhase)
966 int batchBegin = iPhase * maxNumBatchesPerPhase;
967 int batchEnd = batchBegin + maxNumBatchesPerPhase;
968 for (
int iBatch = batchBegin; iBatch < batchEnd; ++iBatch)
986 bool inParallel =
true;
998 for (
int iCon = 0; iCon < numConstraints; ++iCon)
1001 int iBatch = constraintBatchIds[iCon];
1006 for (
int iPhase = 0; iPhase < numPhases; ++iPhase)
1009 if (iPhase == (iPhase & phaseMask))
1011 int iBeginBatch = iPhase * maxNumBatchesPerPhase;
1012 int iEndBatch = iBeginBatch + maxNumBatchesPerPhase;
1019 if (numConstraintRows > numConstraints)
1021 expandConstraintRowsMt(&constraintRowBatchIds[0], &constraintBatchIds[0], &conInfos[0], numConstraints, numConstraintRows);
1025 constraintRowBatchIds = constraintBatchIds;
1028 writeOutBatches(batchedConstraints, constraintRowBatchIds, numConstraintRows, batches, batchWork, maxNumBatchesPerPhase, numPhases);
1040 for (
int i = 0; i < numConstraints; ++i)
1046 bc->
m_phases.resizeNoInitialize(0);
1050 if (numConstraints > 0)
1052 bc->
m_batches.push_back(Range(0, numConstraints));
1053 bc->
m_phases.push_back(Range(0, 1));
1067 if (constraints->
size() >= minBatchSize * 4)