Slab: reduce size of alien cache to necessary nodes The alien cache is a per cpu per node array allocated for every slab on the system. Currently we size this array for all nodes that the kernel does support. For IA64 this is 1024 nodes. We allocate an array with 1024 objects even if we only boot a system with 4 nodes. This patch uses "nr_node_ids" to determine the highest possible node number supported by a hardware configuration and only allocates an alien cache sized for the nodes possible. Signed-off-by: Christoph Lameter Index: linux-2.6.20-rc6-mm2/mm/slab.c =================================================================== --- linux-2.6.20-rc6-mm2.orig/mm/slab.c 2007-01-29 15:48:09.000000000 -0600 +++ linux-2.6.20-rc6-mm2/mm/slab.c 2007-01-30 13:10:31.508168575 -0600 @@ -1042,7 +1042,7 @@ static void *alternate_node_alloc(struct static struct array_cache **alloc_alien_cache(int node, int limit) { struct array_cache **ac_ptr; - int memsize = sizeof(void *) * MAX_NUMNODES; + int memsize = sizeof(void *) * nr_node_ids; int i; if (limit > 1) Index: linux-2.6.20-rc6-mm2/mm/hugetlb.c =================================================================== --- linux-2.6.20-rc6-mm2.orig/mm/hugetlb.c 2007-01-24 20:19:28.000000000 -0600 +++ linux-2.6.20-rc6-mm2/mm/hugetlb.c 2007-01-30 13:12:44.827497675 -0600 @@ -191,11 +191,11 @@ static void update_and_free_page(struct #ifdef CONFIG_HIGHMEM static void try_to_free_low(unsigned long count) { - int i; + int node; - for (i = 0; i < MAX_NUMNODES; ++i) { + for_each_node(node) { struct page *page, *next; - list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) { + list_for_each_entry_safe(page, next, &hugepage_freelists[node], lru) { if (PageHighMem(page)) continue; list_del(&page->lru);