22 #ifndef AFX_DGTYPES__42YH_HY78GT_YHJ63Y__INCLUDED_ 23 #define AFX_DGTYPES__42YH_HY78GT_YHJ63Y__INCLUDED_ 25 #include "common/scummsys.h" 28 #define __USE_CPU_FOUND__ 30 #define DG_MAXIMUN_THREADS 8 40 #define DG_INLINE FORCEINLINE 43 #define DG_MSC_VECTOR_ALIGMENT __declspec(align(16)) 44 #define DG_GCC_VECTOR_ALIGMENT 46 #define DG_MSC_VECTOR_ALIGMENT 47 #define DG_GCC_VECTOR_ALIGMENT __attribute__((aligned(16))) 51 typedef uint8 dgUnsigned8;
53 typedef int16 dgInt16;
54 typedef uint16 dgUnsigned16;
56 typedef int32 dgInt32;
57 typedef uint32 dgUnsigned32;
59 typedef int64 dgInt64;
60 typedef uint64 dgUnsigned64;
62 typedef double dgFloat64;
64 #ifdef __USE_DOUBLE_PRECISION__ 65 typedef double dgFloat32;
67 typedef float dgFloat32;
77 #define dgPI dgFloat32(3.14159f) 78 #define dgPI2 dgFloat32(dgPI * 2.0f) 79 #define dgEXP dgFloat32(2.71828f) 80 #define dgEPSILON dgFloat32(1.0e-5f) 81 #define dgGRAVITY dgFloat32(9.8f) 82 #define dgDEG2RAD dgFloat32(dgPI / 180.0f) 83 #define dgRAD2DEG dgFloat32(180.0f / dgPI) 84 #define dgKMH2MPSEC dgFloat32(0.278f) 95 #define dgNaked __declspec(naked) 102 #define dgCheckFloat(x) (isfinite(x) && !isnan(x)) 107 #define NEWTON_ASSERT(x) 109 DG_INLINE dgInt32 exp_2(dgInt32 x) {
112 for (exp = -1; x; x >>= 1) {
120 DG_INLINE T ClampValue(T val, T min, T max) {
133 DG_INLINE T GetMin(T A, T B) {
142 DG_INLINE T GetMax(T A, T B) {
151 DG_INLINE T GetMin(T A, T B, T C) {
152 return GetMin(GetMin(A, B), C);
156 DG_INLINE T GetMax(T A, T B, T C) {
157 return GetMax(GetMax(A, B), C);
161 DG_INLINE
void Swap(T &A, T &B) {
168 DG_INLINE T GetSign(T A) {
179 dgInt32 dgBinarySearch(T
const *array, dgInt32 elements, dgInt32 entry) {
186 index2 = elements - 1;
188 while ((index2 - index0) > 1) {
189 index1 = (index0 + index2) >> 1;
190 entry1 = array[index1].m_Key;
192 if (entry1 == entry) {
193 NEWTON_ASSERT(array[index1].m_Key <= entry);
194 NEWTON_ASSERT(array[index1 + 1].m_Key >= entry);
197 }
else if (entry < entry1) {
204 if (array[index0].m_Key > index0) {
208 NEWTON_ASSERT(array[index0].m_Key <= entry);
209 NEWTON_ASSERT(array[index0 + 1].m_Key >= entry);
215 void dgRadixSort(T *
const array,
219 dgInt32(*getRadixKey)(
const T *
const A,
void *
const context),
220 void *
const context = NULL) {
221 dgInt32 scanCount[256];
222 dgInt32 histogram[256][4];
224 NEWTON_ASSERT(radixPass >= 1);
225 NEWTON_ASSERT(radixPass <= 4);
227 memset(histogram, 0,
sizeof(histogram));
229 for (dgInt32 i = 0; i < elements; i++) {
230 dgInt32 key = getRadixKey(&array[i], context);
232 for (dgInt32 j = 0; j < radixPass; j++) {
233 dgInt32 radix = (key >> (j << 3)) & 0xff;
234 histogram[radix][j] = histogram[radix][j] + 1;
238 for (dgInt32 radix = 0; radix < radixPass; radix += 2) {
241 for (dgInt32 i = 1; i < 256; i++) {
242 scanCount[i] = scanCount[i - 1] + histogram[i - 1][radix];
245 dgInt32 radixShift = radix << 3;
247 for (dgInt32 i = 0; i < elements; i++) {
248 dgInt32 key = (getRadixKey(&array[i], context) >> radixShift) & 0xff;
249 dgInt32 index = scanCount[key];
250 tmpArray[index] = array[i];
251 scanCount[key] = index + 1;
254 if ((radix + 1) < radixPass) {
257 for (dgInt32 i = 1; i < 256; i++) {
258 scanCount[i] = scanCount[i - 1] + histogram[i - 1][radix + 1];
261 dgInt32 radixShift2 = (radix + 1) << 3;
263 for (dgInt32 i = 0; i < elements; i++) {
264 dgInt32 key = (getRadixKey(&array[i], context) >> radixShift2) & 0xff;
265 dgInt32 index = scanCount[key];
266 array[index] = tmpArray[i];
267 scanCount[key] = index + 1;
270 memcpy(array, tmpArray, elements *
sizeof(T));
275 for (dgInt32 i = 0; i < (elements - 1); i++) {
276 NEWTON_ASSERT(getRadixKey(&array[i], context) <= getRadixKey(&array[i + 1], context));
282 void dgSort(T *
const array,
284 dgInt32(*compare)(
const T *
const A,
const T *
const B,
void *
const context),
285 void *
const context = NULL) {
287 dgInt32 stack[1024][2];
290 stack[0][1] = elements - 1;
291 dgInt32 stackIndex = 1;
295 dgInt32 lo = stack[stackIndex][0];
296 dgInt32 hi = stack[stackIndex][1];
298 if ((hi - lo) > stride) {
301 T val(array[(lo + hi) >> 1]);
304 while (compare(&array[i], &val, context) < 0)
306 while (compare(&array[j], &val, context) > 0)
319 stack[stackIndex][0] = i;
320 stack[stackIndex][1] = hi;
325 stack[stackIndex][0] = lo;
326 stack[stackIndex][1] = j;
329 NEWTON_ASSERT(stackIndex < dgInt32(
sizeof(stack) / (2 *
sizeof(stack[0][0]))));
335 if (elements < stride) {
339 for (dgInt32 i = 1; i < stride; i++) {
340 if (compare(&array[0], &array[i], context) > 0) {
347 for (dgInt32 i = 1; i < elements; i++) {
352 for (; compare(&array[j - 1], &tmp, context) > 0; j--) {
353 NEWTON_ASSERT(j > 0);
354 array[j] = array[j - 1];
361 for (dgInt32 i = 0; i < (elements - 1); i++) {
362 NEWTON_ASSERT(compare(&array[i], &array[i + 1], context) <= 0);
368 void dgSortIndirect(T **
const array,
370 dgInt32(*compare)(
const T *
const A,
const T *
const B,
void *
const context),
371 void *
const context = NULL) {
373 dgInt32 stack[1024][2];
376 stack[0][1] = elements - 1;
377 dgInt32 stackIndex = 1;
381 dgInt32 lo = stack[stackIndex][0];
382 dgInt32 hi = stack[stackIndex][1];
384 if ((hi - lo) > stride) {
387 T *val(array[(lo + hi) >> 1]);
390 while (compare(array[i], val, context) < 0)
392 while (compare(array[j], val, context) > 0)
405 stack[stackIndex][0] = i;
406 stack[stackIndex][1] = hi;
411 stack[stackIndex][0] = lo;
412 stack[stackIndex][1] = j;
415 NEWTON_ASSERT(stackIndex < dgInt32(
sizeof(stack) / (2 *
sizeof(stack[0][0]))));
421 if (elements < stride) {
425 for (dgInt32 i = 1; i < stride; i++) {
426 if (compare(&array[0], &array[i], context) > 0) {
433 for (dgInt32 i = 1; i < elements; i++) {
438 for (; compare(array[j - 1], tmp, context) > 0; j--) {
439 NEWTON_ASSERT(j > 0);
440 array[j] = array[j - 1];
447 for (dgInt32 i = 0; i < (elements - 1); i++) {
448 NEWTON_ASSERT(compare(array[i], array[i + 1], context) <= 0);
453 #ifdef __USE_DOUBLE_PRECISION__ 481 const dgFloat32 *
const vArray,
483 dgInt32 StrideInBytes);
487 const dgFloat64 *
const vArray,
489 dgInt32 strideInBytes);
491 dgInt32 dgVertexListToIndexList(dgFloat32 *
const vertexList,
492 dgInt32 strideInBytes,
493 dgInt32 floatSizeInBytes,
494 dgInt32 unsignedSizeInBytes,
496 dgInt32 *
const indexListOut,
497 dgFloat32 tolerance = dgEPSILON);
499 dgInt32 dgVertexListToIndexList(dgFloat64 *
const vertexList,
500 dgInt32 strideInBytes,
501 dgInt32 compareCount,
503 dgInt32 *
const indexListOut,
504 dgFloat64 tolerance = dgEPSILON);
512 #define PointerToInt(x) ((size_t)x) 513 #define IntToPointer(x) ((void *)(size_t(x))) 515 DG_INLINE dgFloat32 dgAbsf(dgFloat32 x) {
519 val.m_intH &= ~(dgUnsigned64(1) << 31);
520 NEWTON_ASSERT(val.m_float == fabs(x));
522 return dgFloat32(val.m_float);
525 return (x >= dgFloat32(0.0f)) ? x : -x;
529 #ifndef __USE_DOUBLE_PRECISION__ 530 DG_INLINE dgInt32 dgFastInt(dgFloat64 x) {
531 dgInt32 i = dgInt32(x);
533 if (dgFloat64(i) > x) {
541 DG_INLINE dgInt32 dgFastInt(dgFloat32 x) {
545 const dgFloat64 conversionMagicConst = ((dgFloat64(dgInt64(1) << 52)) * dgFloat64(1.5f));
546 val.m_float = dgFloat64(x) + conversionMagicConst;
547 round.m_float = x - dgFloat64(val.m_intL);
548 dgInt32 ret = val.m_intL + (round.m_intH >> 31);
549 NEWTON_ASSERT(ret == dgInt32(floor(x)));
553 dgInt32 i = dgInt32(x);
555 if (dgFloat32(i) > x) {
563 DG_INLINE dgFloat32 dgFloor(dgFloat32 x) {
565 dgFloat32 ret = dgFloat32(dgFastInt(x));
566 NEWTON_ASSERT(ret == floor(x));
574 DG_INLINE dgFloat32 dgCeil(dgFloat32 x) {
576 dgFloat32 ret = dgFloor(x);
579 ret += dgFloat32(1.0f);
582 NEWTON_ASSERT(ret == ceil(x));
590 #define dgSqrt(x) dgFloat32(sqrt(x)) 591 #define dgRsqrt(x) (dgFloat32(1.0f) / dgSqrt(x)) 592 #define dgSin(x) dgFloat32(sin(x)) 593 #define dgCos(x) dgFloat32(cos(x)) 594 #define dgAsin(x) dgFloat32(asin(x)) 595 #define dgAcos(x) dgFloat32(acos(x)) 596 #define dgAtan2(x, y) dgFloat32(atan2(x, y)) 597 #define dgLog(x) dgFloat32(log(x)) 598 #define dgPow(x, y) dgFloat32(pow(x, y)) 599 #define dgFmod(x, y) dgFloat32(fmod(x, y)) 601 typedef dgUnsigned32(dgApi *OnGetPerformanceCountCallback)();
603 dgCpuClass dgApi dgGetCpuType();
605 inline dgInt32 dgAtomicAdd(dgInt32 *
const addend, dgInt32 amount) {
606 return *addend += amount;
Definition: dgTypes.h:474
Definition: dgVector.h:86
Definition: dgTypes.h:464
Definition: dgTypes.h:465
Definition: dgVector.h:104