[Scummvm-cvs-logs] SF.net SVN: scummvm:[34273] scummvm/trunk/common

fingolfin at users.sourceforge.net fingolfin at users.sourceforge.net
Tue Sep 2 13:34:13 CEST 2008


Revision: 34273
          http://scummvm.svn.sourceforge.net/scummvm/?rev=34273&view=rev
Author:   fingolfin
Date:     2008-09-02 11:34:12 +0000 (Tue, 02 Sep 2008)

Log Message:
-----------
Revised HashMap implementation

Modified Paths:
--------------
    scummvm/trunk/common/hashmap.cpp
    scummvm/trunk/common/hashmap.h
    scummvm/trunk/common/memorypool.cpp
    scummvm/trunk/common/memorypool.h

Modified: scummvm/trunk/common/hashmap.cpp
===================================================================
--- scummvm/trunk/common/hashmap.cpp	2008-09-02 11:32:38 UTC (rev 34272)
+++ scummvm/trunk/common/hashmap.cpp	2008-09-02 11:34:12 UTC (rev 34273)
@@ -24,71 +24,37 @@
  */
 
 // The hash map (associative array) implementation in this file is
-// based on code by Andrew Y. Ng, 1996:
+// based on the PyDict implementation of CPython. The erase() method
+// is based on example code in the Wikipedia article on Hash tables.
 
-/* 
- * Copyright (c) 1998-2003 Massachusetts Institute of Technology. 
- * This code was developed as part of the Haystack research project 
- * (http://haystack.lcs.mit.edu/). Permission is hereby granted, 
- * free of charge, to any person obtaining a copy of this software 
- * and associated documentation files (the "Software"), to deal in 
- * the Software without restriction, including without limitation 
- * the rights to use, copy, modify, merge, publish, distribute, 
- * sublicense, and/or sell copies of the Software, and to permit 
- * persons to whom the Software is furnished to do so, subject to 
- * the following conditions: 
- * 
- * The above copyright notice and this permission notice shall be 
- * included in all copies or substantial portions of the Software. 
- * 
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES 
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT 
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, 
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 
- * OTHER DEALINGS IN THE SOFTWARE. 
- */
-
 #include "common/hashmap.h"
 
 namespace Common {
 
-// const char *:
+// Hash function for strings, taken from CPython.
 uint hashit(const char *p) {
-	uint hash = 0;
+	uint hash = *p << 7;
 	byte c;
-	while ((c = *p++))
-		hash = (hash * 31 + c);
-	return hash;
+	int size = 0;
+	while ((c = *p++)) {
+		hash = (1000003 * hash) ^ c;
+		size++;
+	}
+	return hash ^ size;
 }
 
+// Like hashit, but converts every char to lowercase before hashing.
 uint hashit_lower(const char *p) {
-	uint hash = 0;
+	uint hash = tolower(*p) << 7;
 	byte c;
-	while ((c = *p++))
-		hash = (hash * 31 + tolower(c));
-	return hash;
+	int size = 0;
+	while ((c = *p++)) {
+		hash = (1000003 * hash) ^ tolower(c);
+		size++;
+	}
+	return hash ^ size;
 }
 
-// The following table is taken from the GNU ISO C++ Library's hashtable.h file.
-static const uint primes[] = {
-	53ul,         97ul,         193ul,       389ul,       769ul,
-	1543ul,       3079ul,       6151ul,      12289ul,     24593ul,
-	49157ul,      98317ul,      196613ul,    393241ul,    786433ul,
-	1572869ul,    3145739ul,    6291469ul,   12582917ul,  25165843ul,
-	50331653ul,   100663319ul,  201326611ul, 402653189ul, 805306457ul,
-	1610612741ul, 3221225473ul, 4294967291ul
-};
-
-uint nextTableSize(uint x) {
-	int i = 0;
-	while (x >= primes[i])
-		i++;
-	return primes[i];
-}
-
 #ifdef DEBUG_HASH_COLLISIONS
 static double
 	g_collisions = 0,
@@ -98,6 +64,7 @@
 	g_size = 0;
 static int g_max_capacity = 0, g_max_size = 0;
 static int g_totalHashmaps = 0;
+static int g_stats[4] = {0,0,0,0};
 
 void updateHashCollisionStats(int collisions, int lookups, int arrsize, int nele) {
 	g_collisions += collisions;
@@ -108,6 +75,15 @@
 	g_size += nele;
 	g_totalHashmaps++;
 	
+	if (3*nele <= 2*8)
+		g_stats[0]++;
+	if (3*nele <= 2*16)
+		g_stats[1]++;
+	if (3*nele <= 2*32)
+		g_stats[2]++;
+	if (3*nele <= 2*64)
+		g_stats[3]++;
+	
 	g_max_capacity = MAX(g_max_capacity, arrsize);
 	g_max_size = MAX(g_max_size, nele);
 
@@ -118,6 +94,15 @@
 		100 * g_collPerLook / g_totalHashmaps,
 		g_size / g_totalHashmaps, g_max_size,
 		g_capacity / g_totalHashmaps, g_max_capacity);
+	fprintf(stdout, "  %d less than %d; %d less than %d; %d less than %d; %d less than %d\n",
+			g_stats[0], 2*8/3, 
+			g_stats[1],2*16/3, 
+			g_stats[2],2*32/3, 
+			g_stats[3],2*64/3);
+
+	// TODO:
+	// * Should record the maximal size of the map during its lifetime, not that at its death
+	// * Should do some statistics: how many maps are less than 2/3*8, 2/3*16, 2/3*32, ...
 }
 #endif
 

Modified: scummvm/trunk/common/hashmap.h
===================================================================
--- scummvm/trunk/common/hashmap.h	2008-09-02 11:32:38 UTC (rev 34272)
+++ scummvm/trunk/common/hashmap.h	2008-09-02 11:34:12 UTC (rev 34273)
@@ -24,33 +24,9 @@
  */
 
 // The hash map (associative array) implementation in this file is
-// based on code by Andrew Y. Ng, 1996:
+// based on the PyDict implementation of CPython. The erase() method
+// is based on example code in the Wikipedia article on Hash tables.
 
-/*
- * Copyright (c) 1998-2003 Massachusetts Institute of Technology.
- * This code was developed as part of the Haystack research project
- * (http://haystack.lcs.mit.edu/). Permission is hereby granted,
- * free of charge, to any person obtaining a copy of this software
- * and associated documentation files (the "Software"), to deal in
- * the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute,
- * sublicense, and/or sell copies of the Software, and to permit
- * persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
 #ifndef COMMON_HASHMAP_H
 #define COMMON_HASHMAP_H
 
@@ -74,11 +50,6 @@
 
 namespace Common {
 
-// The table sizes ideally are primes. We use a helper function to find
-// suitable table sizes.
-uint nextTableSize(uint x);
-
-
 // Enable the following #define if you want to check how many collisions the
 // code produces (many collisions indicate either a bad hash function, or a
 // hash table that is too small).
@@ -113,9 +84,24 @@
 		Node(const Key &key) : _key(key), _value() {}
 	};
 
+	enum {
+		HASHMAP_PERTURB_SHIFT = 5,
+		HASHMAP_MIN_CAPACITY = 16,
+		
+		// The quotient of the next two constants controls how much the 
+		// internal storage of the hashmap may fill up before being
+		// increased automatically.
+		// Note: the quotient of these two must be between and different
+		// from 0 and 1.
+		HASHMAP_LOADFACTOR_NUMERATOR = 2,
+		HASHMAP_LOADFACTOR_DENOMINATOR = 3,
+		
+		HASHMAP_MEMORYPOOL_SIZE = HASHMAP_MIN_CAPACITY * HASHMAP_LOADFACTOR_NUMERATOR / HASHMAP_LOADFACTOR_DENOMINATOR
+	};
 
+
 #ifdef USE_HASHMAP_MEMORY_POOL
-	MemoryPool _nodePool;
+	FixedSizeMemoryPool<sizeof(Node), HASHMAP_MEMORYPOOL_SIZE> _nodePool;
 
 	Node *allocNode(const Key &key) {
 		void* mem = _nodePool.malloc();
@@ -137,7 +123,7 @@
 #endif
 
 	Node **_storage;	// hashtable of size arrsize.
-	uint _capacity;
+	uint _mask;		/**< Capacity of the HashMap minus one; must be a power of two of minus one */
 	uint _size;
 
 	HashFunc _hash;
@@ -153,7 +139,7 @@
 	void assign(const HM_t &map);
 	int lookup(const Key &key) const;
 	int lookupAndCreateIfMissing(const Key &key);
-	void expand_array(uint newsize);
+	void expandStorage(uint newCapacity);
 
 	template<class T> friend class IteratorImpl;
 
@@ -175,7 +161,7 @@
 
 		NodeType *deref() const {
 			assert(_hashmap != 0);
-			assert(_idx < _hashmap->_capacity);
+			assert(_idx <= _hashmap->_mask);
 			Node *node = _hashmap->_storage[_idx];
 			assert(node != 0);
 			return node;
@@ -196,8 +182,8 @@
 			assert(_hashmap);
 			do {
 				_idx++;
-			} while (_idx < _hashmap->_capacity && _hashmap->_storage[_idx] == 0);
-			if (_idx >= _hashmap->_capacity)
+			} while (_idx <= _hashmap->_mask && _hashmap->_storage[_idx] == 0);
+			if (_idx > _hashmap->_mask)
 				_idx = (uint)-1;
 
 			return *this;
@@ -247,7 +233,7 @@
 
 	iterator	begin() {
 		// Find and return the _key non-empty entry
-		for (uint ctr = 0; ctr < _capacity; ++ctr) {
+		for (uint ctr = 0; ctr <= _mask; ++ctr) {
 			if (_storage[ctr])
 				return iterator(ctr, this);
 		}
@@ -259,7 +245,7 @@
 
 	const_iterator	begin() const {
 		// Find and return the first non-empty entry
-		for (uint ctr = 0; ctr < _capacity; ++ctr) {
+		for (uint ctr = 0; ctr <= _mask; ++ctr) {
 			if (_storage[ctr])
 				return const_iterator(ctr, this);
 		}
@@ -298,14 +284,11 @@
  */
 template<class Key, class Val, class HashFunc, class EqualFunc>
 HashMap<Key, Val, HashFunc, EqualFunc>::HashMap() :
-#ifdef USE_HASHMAP_MEMORY_POOL
-	_nodePool(sizeof(Node)),
-#endif
 	_defaultVal() {
-	_capacity = nextTableSize(0);
-	_storage = new Node *[_capacity];
+	_mask = HASHMAP_MIN_CAPACITY - 1;
+	_storage = new Node *[HASHMAP_MIN_CAPACITY];
 	assert(_storage != NULL);
-	memset(_storage, 0, _capacity * sizeof(Node *));
+	memset(_storage, 0, HASHMAP_MIN_CAPACITY * sizeof(Node *));
 
 	_size = 0;
 
@@ -322,9 +305,6 @@
  */
 template<class Key, class Val, class HashFunc, class EqualFunc>
 HashMap<Key, Val, HashFunc, EqualFunc>::HashMap(const HM_t &map) : 
-#ifdef USE_HASHMAP_MEMORY_POOL
-	_nodePool(sizeof(Node)),
-#endif
 	_defaultVal()  {
 	assign(map);
 }
@@ -334,14 +314,14 @@
  */
 template<class Key, class Val, class HashFunc, class EqualFunc>
 HashMap<Key, Val, HashFunc, EqualFunc>::~HashMap() {
-	for (uint ctr = 0; ctr < _capacity; ++ctr)
+	for (uint ctr = 0; ctr <= _mask; ++ctr)
 		if (_storage[ctr] != NULL)
 		  freeNode(_storage[ctr]);
 
 	delete[] _storage;
 #ifdef DEBUG_HASH_COLLISIONS
 	extern void updateHashCollisionStats(int, int, int, int);
-	updateHashCollisionStats(_collisions, _lookups, _capacity, _size);
+	updateHashCollisionStats(_collisions, _lookups, _mask+1, _size);
 #endif
 }
 
@@ -354,14 +334,14 @@
  */
 template<class Key, class Val, class HashFunc, class EqualFunc>
 void HashMap<Key, Val, HashFunc, EqualFunc>::assign(const HM_t &map) {
-	_capacity = map._capacity;
-	_storage = new Node *[_capacity];
+	_mask = map._mask;
+	_storage = new Node *[_mask+1];
 	assert(_storage != NULL);
-	memset(_storage, 0, _capacity * sizeof(Node *));
+	memset(_storage, 0, (_mask+1) * sizeof(Node *));
 
 	// Simply clone the map given to us, one by one.
 	_size = 0;
-	for (uint ctr = 0; ctr < _capacity; ++ctr) {
+	for (uint ctr = 0; ctr <= _mask; ++ctr) {
 		if (map._storage[ctr] != NULL) {
 			_storage[ctr] = allocNode(map._storage[ctr]->_key);
 			_storage[ctr]->_value = map._storage[ctr]->_value;
@@ -375,43 +355,46 @@
 
 template<class Key, class Val, class HashFunc, class EqualFunc>
 void HashMap<Key, Val, HashFunc, EqualFunc>::clear(bool shrinkArray) {
-	for (uint ctr = 0; ctr < _capacity; ++ctr) {
+	for (uint ctr = 0; ctr <= _mask; ++ctr) {
 		if (_storage[ctr] != NULL) {
 			freeNode(_storage[ctr]);
 			_storage[ctr] = NULL;
 		}
 	}
 
-	if (shrinkArray && _capacity > nextTableSize(0)) {
+#ifdef USE_HASHMAP_MEMORY_POOL
+	_nodePool.freeUnusedPages();
+#endif
+
+	if (shrinkArray && _mask >= HASHMAP_MIN_CAPACITY) {
 		delete[] _storage;
 
-		_capacity = nextTableSize(0);
-		_storage = new Node *[_capacity];
+		_mask = HASHMAP_MIN_CAPACITY;
+		_storage = new Node *[HASHMAP_MIN_CAPACITY];
 		assert(_storage != NULL);
-		memset(_storage, 0, _capacity * sizeof(Node *));
+		memset(_storage, 0, HASHMAP_MIN_CAPACITY * sizeof(Node *));
 	}
 
 	_size = 0;
 }
 
 template<class Key, class Val, class HashFunc, class EqualFunc>
-void HashMap<Key, Val, HashFunc, EqualFunc>::expand_array(uint newsize) {
-	assert(newsize > _capacity);
-	uint ctr, dex;
+void HashMap<Key, Val, HashFunc, EqualFunc>::expandStorage(uint newCapacity) {
+	assert(newCapacity > _mask+1);
 
 	const uint old_size = _size;
-	const uint old_capacity = _capacity;
+	const uint old_mask = _mask;
 	Node **old_storage = _storage;
 
 	// allocate a new array
 	_size = 0;
-	_capacity = newsize;
-	_storage = new Node *[_capacity];
+	_mask = newCapacity - 1;
+	_storage = new Node *[newCapacity];
 	assert(_storage != NULL);
-	memset(_storage, 0, _capacity * sizeof(Node *));
+	memset(_storage, 0, newCapacity * sizeof(Node *));
 
 	// rehash all the old elements
-	for (ctr = 0; ctr < old_capacity; ++ctr) {
+	for (uint ctr = 0; ctr <= old_mask; ++ctr) {
 		if (old_storage[ctr] == NULL)
 			continue;
 
@@ -419,12 +402,13 @@
 		// Since we know that no key exists twice in the old table, we
 		// can do this slightly better than by calling lookup, since we
 		// don't have to call _equal().
-		dex = _hash(old_storage[ctr]->_key) % _capacity;
-		while (_storage[dex] != NULL) {
-			dex = (dex + 1) % _capacity;
+		const uint hash = _hash(old_storage[ctr]->_key);
+		uint idx = hash & _mask;
+		for (uint perturb = hash; _storage[idx] != NULL; perturb >>= HASHMAP_PERTURB_SHIFT) {
+			idx = (5 * idx + perturb + 1) & _mask;
 		}
 
-		_storage[dex] = old_storage[ctr];
+		_storage[idx] = old_storage[ctr];
 		_size++;
 	}
 
@@ -439,10 +423,13 @@
 
 template<class Key, class Val, class HashFunc, class EqualFunc>
 int HashMap<Key, Val, HashFunc, EqualFunc>::lookup(const Key &key) const {
-	uint ctr = _hash(key) % _capacity;
+	const uint hash = _hash(key);
+	uint ctr = hash & _mask;
+	for (uint perturb = hash; ; perturb >>= HASHMAP_PERTURB_SHIFT) {
+		if (_storage[ctr] == NULL || _equal(_storage[ctr]->_key, key))
+			break;
 
-	while (_storage[ctr] != NULL && !_equal(_storage[ctr]->_key, key)) {
-		ctr = (ctr + 1) % _capacity;
+		ctr = (5 * ctr + perturb + 1) & _mask;
 
 #ifdef DEBUG_HASH_COLLISIONS
 		_collisions++;
@@ -453,7 +440,7 @@
 	_lookups++;
 	fprintf(stderr, "collisions %d, lookups %d, ratio %f in HashMap %p; size %d num elements %d\n",
 		_collisions, _lookups, ((double) _collisions / (double)_lookups),
-		(const void *)this, _capacity, _size);
+		(const void *)this, _mask+1, _size);
 #endif
 
 	return ctr;
@@ -467,9 +454,11 @@
 		_storage[ctr] = allocNode(key);
 		_size++;
 
-		// Keep the load factor below 75%.
-		if (_size > _capacity * 75 / 100) {
-			expand_array(nextTableSize(_capacity));
+		// Keep the load factor below a certain threshold.
+		uint capacity = _mask + 1;
+		if (_size * HASHMAP_LOADFACTOR_DENOMINATOR > capacity * HASHMAP_LOADFACTOR_NUMERATOR) {
+			capacity = capacity < 500 ? (capacity * 4) : (capacity * 2);
+			expandStorage(capacity);
 			ctr = lookup(key);
 		}
 	}
@@ -520,23 +509,35 @@
 template<class Key, class Val, class HashFunc, class EqualFunc>
 void HashMap<Key, Val, HashFunc, EqualFunc>::erase(const Key &key) {
 	// This is based on code in the Wikipedia article on Hash tables.
-	uint i = lookup(key);
+
+	const uint hash = _hash(key);
+	uint i = hash & _mask;
+	uint perturb;
+
+	for (perturb = hash; ; perturb >>= HASHMAP_PERTURB_SHIFT) {
+		if (_storage[i] == NULL || _equal(_storage[i]->_key, key))
+			break;
+
+		i = (5 * i + perturb + 1) & _mask;
+	}
+
 	if (_storage[i] == NULL)
 		return; // key wasn't present, so no work has to be done
+
 	// If we remove a key, we must check all subsequent keys and possibly
 	// reinsert them.
 	uint j = i;
 	freeNode(_storage[i]);
 	_storage[i] = NULL;
-	while (true) {
+	for (perturb = hash; ; perturb >>= HASHMAP_PERTURB_SHIFT) {
 		// Look at the next table slot
-		j = (j + 1) % _capacity;
+		j = (5 * j + perturb + 1) & _mask;
 		// If the next slot is empty, we are done
 		if (_storage[j] == NULL)
 			break;
 		// Compute the slot where the content of the next slot should normally be,
 		// assuming an empty table, and check whether we have to move it.
-		uint k = _hash(_storage[j]->_key) % _capacity;
+		uint k = _hash(_storage[j]->_key) & _mask;
 		if ((j > i && (k <= i || k > j)) ||
 		    (j < i && (k <= i && k > j)) ) {
 			_storage[i] = _storage[j];

Modified: scummvm/trunk/common/memorypool.cpp
===================================================================
--- scummvm/trunk/common/memorypool.cpp	2008-09-02 11:32:38 UTC (rev 34272)
+++ scummvm/trunk/common/memorypool.cpp	2008-09-02 11:34:12 UTC (rev 34273)
@@ -28,22 +28,6 @@
 
 namespace Common {
 
-static const size_t CHUNK_PAGE_SIZE = 32;
-
-void* MemoryPool::allocPage() {
-	void* result = ::malloc(CHUNK_PAGE_SIZE * _chunkSize);
-	_pages.push_back(result);
-	void* current = result;
-	for (size_t i = 1; i < CHUNK_PAGE_SIZE; ++i) {
-		void* next    = ((char*)current + _chunkSize);
-		*(void**)current = next;
-
-		current = next;
-	}
-	*(void**)current = NULL;
-	return result;
-}
-
 MemoryPool::MemoryPool(size_t chunkSize) {
 	// You must at least fit the pointer in the node (technically unneeded considering the next rounding statement)
 	_chunkSize = MAX(chunkSize, sizeof(void*));
@@ -52,38 +36,68 @@
 	_chunkSize = (_chunkSize + sizeof(void*) - 1) & (~(sizeof(void*) - 1));
 
 	_next = NULL;
+	
+	_chunksPerPage = 8;
 }
 
 MemoryPool::~MemoryPool() {
-	for (size_t i = 0; i<_pages.size(); ++i)
-		::free(_pages[i]);
+	for (size_t i = 0; i < _pages.size(); ++i)
+		::free(_pages[i].start);
 }
 
-void* MemoryPool::malloc() {
-#if 1
-	if (!_next)
-		_next = allocPage();
+void MemoryPool::allocPage() {
+	Page page;	
+	
+	// Allocate a new page
+	page.numChunks = _chunksPerPage;
+	page.start = ::malloc(page.numChunks * _chunkSize);
+	assert(page.start);
+	_pages.push_back(page);
+	
+	// Next time, we'll alocate a page twice as big as this one.
+	_chunksPerPage *= 2;
+	
+	// Add the page to the pool of free chunk
+	addPageToPool(page);
+}
 
-	void* result = _next;
+void MemoryPool::addPageToPool(const Page &page) {
+
+	// Add all chunks of the new page to the linked list (pool) of free chunks
+	void *current = page.start;
+	for (size_t i = 1; i < page.numChunks; ++i) {
+		void *next    = ((char*)current + _chunkSize);
+		*(void **)current = next;
+
+		current = next;
+	}
+	
+	// Last chunk points to the old _next
+	*(void**)current = _next;
+
+	// From now on, the first free chunk is the first chunk of the new page
+	_next = page.start;
+}
+
+void *MemoryPool::malloc() {
+	if (!_next)	// No free chunks left? Allocate a new page
+		allocPage();
+
+	assert(_next);
+	void *result = _next;
 	_next = *(void**)result;
 	return result;
-#else
-	return ::malloc(_chunkSize);
-#endif
 }
 
 void MemoryPool::free(void* ptr) {
-#if 1
+	// Add the chunk back to (the start of) the list of free chunks
 	*(void**)ptr = _next;
 	_next = ptr;
-#else
-	::free(ptr);
-#endif
 }
 
 // Technically not compliant C++ to compare unrelated pointers. In practice...
-bool MemoryPool::isPointerInPage(void* ptr, void* page) {
-	return (ptr >= page) && (ptr < (char*)page + CHUNK_PAGE_SIZE * _chunkSize);
+bool MemoryPool::isPointerInPage(void *ptr, const Page &page) {
+	return (ptr >= page.start) && (ptr < (char*)page.start + page.numChunks * _chunkSize);
 }
 
 void MemoryPool::freeUnusedPages() {
@@ -94,9 +108,10 @@
 		numberOfFreeChunksPerPage[i] = 0;
 	}
 
-	void* iterator = _next;
+	// Compute for each page how many chunks in it are still in use.
+	void *iterator = _next;
 	while (iterator) {
-		// This should be a binary search
+		// TODO: This should be a binary search (requiring us to keep _pages sorted)
 		for (size_t i = 0; i < _pages.size(); ++i) {
 			if (isPointerInPage(iterator, _pages[i])) {
 				++numberOfFreeChunksPerPage[i];
@@ -106,15 +121,35 @@
 		iterator = *(void**)iterator;
 	}
 
+	// Free all pages which are not in use.
+	// TODO: Might want to reset _chunksPerPage here (e.g. to the largest
+	//      _pages[i].numChunks value still in use).
 	size_t freedPagesCount = 0;
-	for (size_t i = 0; i < _pages.size(); ++i) {
-		if (numberOfFreeChunksPerPage[i] == CHUNK_PAGE_SIZE) {
-			::free(_pages[i]);
-			_pages[i] = NULL; // TODO : Remove NULL values
+	for (size_t i = 0; i < _pages.size(); ++i)  {
+		if (numberOfFreeChunksPerPage[i] == _pages[i].numChunks) {
+			// Remove all chunks of this page from the list of free chunks
+			void **iter2 = &_next;
+			while (*iter2) {
+				if (isPointerInPage(*iter2, _pages[i]))
+					*iter2 = **(void***)iter2;
+				else
+					iter2 = *(void***)iter2;
+			}
+			::free(_pages[i].start);
 			++freedPagesCount;
+			_pages[i].start = NULL;
 		}
 	}
 
+	for (size_t i = 0; i < _pages.size(); )  {
+		if (_pages[i].start == NULL) {
+			_pages.remove_at(i);
+			// We just removed an entry, so we do not advance "i"
+		} else {
+			++i;
+		}
+	}
+
 	//printf("%d freed pages\n", freedPagesCount); 
 }
 

Modified: scummvm/trunk/common/memorypool.h
===================================================================
--- scummvm/trunk/common/memorypool.h	2008-09-02 11:32:38 UTC (rev 34272)
+++ scummvm/trunk/common/memorypool.h	2008-09-02 11:34:12 UTC (rev 34273)
@@ -32,26 +32,57 @@
 namespace Common {
 
 class MemoryPool {
-private:
+protected:
 	MemoryPool(const MemoryPool&);
 	MemoryPool& operator=(const MemoryPool&);
+	
+	struct Page {
+		void *start;
+		size_t numChunks;
+	};
 
 	size_t			_chunkSize;
-	Array<void*>	_pages;
-	void*			_next;
+	Array<Page>		_pages;
+	void			*_next;
+	size_t			_chunksPerPage;
 
-	void*	allocPage();
-	bool	isPointerInPage(void* ptr, void* page);
+	void	allocPage();
+	void	addPageToPool(const Page &page);
+	bool	isPointerInPage(void *ptr, const Page &page);
+
 public:
 	MemoryPool(size_t chunkSize);
 	~MemoryPool();
 
-	void*	malloc();
-	void 	free(void* ptr);
+	void 	*malloc();
+	void 	free(void *ptr);
 
 	void	freeUnusedPages();
 };
 
+template<size_t CHUNK_SIZE, size_t NUM_INTERNAL_CHUNKS = 32>
+class FixedSizeMemoryPool : public MemoryPool {
+private:
+	enum {
+		REAL_CHUNK_SIZE = (CHUNK_SIZE + sizeof(void*) - 1) & (~(sizeof(void*) - 1))
+	};
+
+	byte	_storage[NUM_INTERNAL_CHUNKS * REAL_CHUNK_SIZE];
+public:
+	FixedSizeMemoryPool() : MemoryPool(CHUNK_SIZE) {
+		assert(REAL_CHUNK_SIZE == _chunkSize);
+		// Insert some static storage
+		Page internalPage = { _storage, NUM_INTERNAL_CHUNKS };
+		addPageToPool(internalPage);
+	}
+};
+
+template<size_t CHUNK_SIZE>
+class FixedSizeMemoryPool<CHUNK_SIZE,0> : public MemoryPool {
+public:
+	FixedSizeMemoryPool() : MemoryPool(CHUNK_SIZE) {}
+};
+
 }	// End of namespace Common
 
 #endif


This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.




More information about the Scummvm-git-logs mailing list