[Scummvm-git-logs] scummvm master -> be8589c3233e1e0b42b424ef7d30d4ae341acce2
grisenti
noreply at scummvm.org
Sun Mar 26 16:28:43 UTC 2023
This automated email contains information about 1 new commit which have been
pushed to the 'scummvm' repo located at https://github.com/scummvm/scummvm .
Summary:
be8589c323 HPL1: replace defines in dgMemory
Commit: be8589c3233e1e0b42b424ef7d30d4ae341acce2
https://github.com/scummvm/scummvm/commit/be8589c3233e1e0b42b424ef7d30d4ae341acce2
Author: grisenti (emanuele at grisenti.net)
Date: 2023-03-26T16:28:27+02:00
Commit Message:
HPL1: replace defines in dgMemory
this removes the unportable __x86_64__ define
Changed paths:
engines/hpl1/engine/libraries/newton/core/dgMemory.cpp
engines/hpl1/engine/libraries/newton/core/dgMemory.h
diff --git a/engines/hpl1/engine/libraries/newton/core/dgMemory.cpp b/engines/hpl1/engine/libraries/newton/core/dgMemory.cpp
index 7209e7b8253..705519ff31f 100644
--- a/engines/hpl1/engine/libraries/newton/core/dgMemory.cpp
+++ b/engines/hpl1/engine/libraries/newton/core/dgMemory.cpp
@@ -110,7 +110,7 @@ void dgMemoryAllocator::SetAllocatorsCallback(dgMemAlloc memAlloc, dgMemFree mem
}
void *dgMemoryAllocator::MallocLow(dgInt32 workingSize, dgInt32 alignment) {
- NEWTON_ASSERT(alignment >= DG_MEMORY_GRANULARITY);
+ NEWTON_ASSERT(alignment >= memoryGranularity);
NEWTON_ASSERT(((-alignment) & (alignment - 1)) == 0);
dgInt32 size = workingSize + alignment * 2;
void *const ptr = m_malloc(dgUnsigned32(size));
@@ -139,19 +139,19 @@ void dgMemoryAllocator::FreeLow(void *const retPtr) {
m_free(info->m_ptr, dgUnsigned32(info->m_size));
}
-// alloca memory on pool that are quantized to DG_MEMORY_GRANULARITY
-// if memory size is larger than DG_MEMORY_BIN_ENTRIES then the memory is not placed into a pool
+// alloca memory on pool that are quantized to memoryGranularity
+// if memory size is larger than memoryBinEntries then the memory is not placed into a pool
void *dgMemoryAllocator::Malloc(dgInt32 memsize) {
- NEWTON_ASSERT(dgInt32(sizeof(dgMemoryCacheEntry) + sizeof(dgInt32) + sizeof(dgInt32)) <= DG_MEMORY_GRANULARITY);
+ NEWTON_ASSERT(dgInt32(sizeof(dgMemoryCacheEntry) + sizeof(dgInt32) + sizeof(dgInt32)) <= memoryGranularity);
- dgInt32 size = memsize + DG_MEMORY_GRANULARITY - 1;
- size &= (-DG_MEMORY_GRANULARITY);
+ dgInt32 size = memsize + memoryGranularity - 1;
+ size &= -memoryGranularity;
- dgInt32 paddedSize = size + DG_MEMORY_GRANULARITY;
- dgInt32 entry = paddedSize >> DG_MEMORY_GRANULARITY_BITS;
+ dgInt32 paddedSize = size + memoryGranularity;
+ dgInt32 entry = paddedSize / memoryGranularity;
void *ptr;
- if (entry >= DG_MEMORY_BIN_ENTRIES) {
+ if (entry >= memoryBinEntries) {
ptr = MallocLow(size);
} else {
if (!m_memoryDirectory[entry].m_cache) {
@@ -177,7 +177,7 @@ void *dgMemoryAllocator::Malloc(dgInt32 memsize) {
dgMemoryCacheEntry *const cashe = (dgMemoryCacheEntry *) charPtr;
cashe->m_next = (dgMemoryCacheEntry *)(charPtr + paddedSize);
cashe->m_prev = (dgMemoryCacheEntry *)(charPtr - paddedSize);
- dgMemoryInfo *const info = ((dgMemoryInfo *)(charPtr + DG_MEMORY_GRANULARITY)) - 1;
+ dgMemoryInfo *const info = ((dgMemoryInfo *)(charPtr + memoryGranularity)) - 1;
info->SaveInfo(this, bin, entry, m_emumerator, memsize);
charPtr += paddedSize;
}
@@ -194,7 +194,7 @@ void *dgMemoryAllocator::Malloc(dgInt32 memsize) {
cashe->m_next->m_prev = NULL;
}
- ptr = ((char *) cashe) + DG_MEMORY_GRANULARITY;
+ ptr = ((char *) cashe) + memoryGranularity;
dgMemoryInfo *info;
info = ((dgMemoryInfo *)(ptr)) - 1;
@@ -211,22 +211,22 @@ void *dgMemoryAllocator::Malloc(dgInt32 memsize) {
return ptr;
}
-// alloca memory on pool that are quantized to DG_MEMORY_GRANULARITY
-// if memory size is larger than DG_MEMORY_BIN_ENTRIES then the memory is not placed into a pool
+// allocate memory on pool that are quantized to memoryGranularity
+// if memory size is larger than memoryBinEntries then the memory is not placed into a pool
void dgMemoryAllocator::Free(void *const retPtr) {
dgMemoryInfo *const info = ((dgMemoryInfo *)(retPtr)) - 1;
NEWTON_ASSERT(info->m_allocator == this);
dgInt32 entry = info->m_size;
- if (entry >= DG_MEMORY_BIN_ENTRIES) {
+ if (entry >= memoryBinEntries) {
FreeLow(retPtr);
} else {
#ifdef __TRACK_MEMORY_LEAKS__
m_leaklTracker.RemoveBlock(retPtr);
#endif
- dgMemoryCacheEntry *const cashe = (dgMemoryCacheEntry *)(((char *)retPtr) - DG_MEMORY_GRANULARITY) ;
+ dgMemoryCacheEntry *const cashe = (dgMemoryCacheEntry *)(((char *)retPtr) - memoryGranularity) ;
dgMemoryCacheEntry *const tmpCashe = m_memoryDirectory[entry].m_cache;
if (tmpCashe) {
@@ -241,8 +241,8 @@ void dgMemoryAllocator::Free(void *const retPtr) {
dgMemoryBin *const bin = (dgMemoryBin *) info->m_ptr;
#ifdef _DEBUG
- NEWTON_ASSERT((bin->m_info.m_stepInBites - DG_MEMORY_GRANULARITY) > 0);
- memset(retPtr, 0, bin->m_info.m_stepInBites - DG_MEMORY_GRANULARITY);
+ NEWTON_ASSERT((bin->m_info.m_stepInBites - memoryGranularity) > 0);
+ memset(retPtr, 0, bin->m_info.m_stepInBites - memoryGranularity);
#endif
bin->m_info.m_count--;
diff --git a/engines/hpl1/engine/libraries/newton/core/dgMemory.h b/engines/hpl1/engine/libraries/newton/core/dgMemory.h
index 2045a66784a..dd6c65803de 100644
--- a/engines/hpl1/engine/libraries/newton/core/dgMemory.h
+++ b/engines/hpl1/engine/libraries/newton/core/dgMemory.h
@@ -78,16 +78,10 @@ dgInt32 dgGetMemoryUsed();
class dgMemoryAllocator {
-#ifdef __x86_64__
-#define DG_MEMORY_GRANULARITY_BITS 6
-#else
-#define DG_MEMORY_GRANULARITY_BITS 5
-#endif
-#define DG_MEMORY_GRANULARITY (1 << DG_MEMORY_GRANULARITY_BITS)
-#define DG_MEMORY_SIZE (1024 - 64)
-#define DG_MEMORY_BIN_SIZE (1024 * 16)
-#define DG_MEMORY_BIN_ENTRIES (DG_MEMORY_SIZE / DG_MEMORY_GRANULARITY)
-
+ static constexpr dgInt32 memoryGranularity = sizeof(void*) * 8;
+ static constexpr dgInt32 memorySize = 1024 - 64;
+ static constexpr dgInt32 memoryBinSize = 1024 * 16;
+ static constexpr dgInt32 memoryBinEntries = memorySize / memoryGranularity;
public:
class dgMemoryBin {
@@ -101,7 +95,7 @@ public:
dgMemoryBin *m_prev;
};
- char m_pool[DG_MEMORY_BIN_SIZE - sizeof(dgMemoryBinInfo) - DG_MEMORY_GRANULARITY * 2];
+ char m_pool[memoryBinSize - sizeof(dgMemoryBinInfo) - memoryGranularity * 2];
dgMemoryBinInfo m_info;
};
@@ -173,7 +167,7 @@ public:
void operator delete (void *const ptr);
dgInt32 GetMemoryUsed() const;
void SetAllocatorsCallback(dgMemAlloc memAlloc, dgMemFree memFree);
- void *MallocLow(dgInt32 size, dgInt32 alignment = DG_MEMORY_GRANULARITY);
+ void *MallocLow(dgInt32 size, dgInt32 alignment = memoryGranularity);
void FreeLow(void *const retPtr);
void *Malloc(dgInt32 memsize);
void Free(void *const retPtr);
@@ -186,7 +180,7 @@ protected:
dgInt32 m_memoryUsed;
dgMemFree m_free;
dgMemAlloc m_malloc;
- dgMemDirectory m_memoryDirectory[DG_MEMORY_BIN_ENTRIES + 1];
+ dgMemDirectory m_memoryDirectory[memoryBinEntries + 1];
#ifdef __TRACK_MEMORY_LEAKS__
dgMemoryLeaksTracker m_leaklTracker;
More information about the Scummvm-git-logs
mailing list