Index: linux-2.6.21-rc5/mm/slub.c =================================================================== --- linux-2.6.21-rc5.orig/mm/slub.c 2007-03-26 15:33:55.000000000 -0700 +++ linux-2.6.21-rc5/mm/slub.c 2007-03-26 15:36:13.000000000 -0700 @@ -110,9 +110,9 @@ static int sysfs_slab_add(struct kmem_ca static int sysfs_slab_alias(struct kmem_cache *, const char *); static void sysfs_slab_remove(struct kmem_cache *); #else -static int sysfs_slab_add(struct kmem_cache *) { return 0; } -static int sysfs_slab_alias(struct kmem_cache *, const char *) { return 0; } -static void sysfs_slab_remove(struct kmem_cache *) {} +static int sysfs_slab_add(struct kmem_cache *s) { return 0; } +static int sysfs_slab_alias(struct kmem_cache *s, const char *p) { return 0; } +static void sysfs_slab_remove(struct kmem_cache *s) {} #endif /******************************************************************** @@ -1663,59 +1663,6 @@ void kmem_cache_destroy(struct kmem_cach } EXPORT_SYMBOL(kmem_cache_destroy); -static unsigned long slab_objects(struct kmem_cache *s, - unsigned long *p_total, unsigned long *p_cpu_slabs, - unsigned long *p_partial, unsigned long *nodes) -{ - int nr_slabs = 0; - int nr_partial_slabs = 0; - int nr_cpu_slabs = 0; - int in_cpu_slabs = 0; - int in_partial_slabs = 0; - int cpu; - int node; - unsigned long flags; - struct page *page; - - for_each_online_node(node) { - struct kmem_cache_node *n = get_node(s, node); - - nr_slabs += atomic_read(&n->nr_slabs); - nr_partial_slabs += n->nr_partial; - - if (nodes) - nodes[node] = atomic_read(&n->nr_slabs) + - n->nr_partial; - - spin_lock_irqsave(&n->list_lock, flags); - list_for_each_entry(page, &n->partial, lru) - in_partial_slabs += page->inuse; - spin_unlock_irqrestore(&n->list_lock, flags); - } - - for_each_possible_cpu(cpu) { - page = s->cpu_slab[cpu]; - if (page) { - nr_cpu_slabs++; - in_cpu_slabs += page->inuse; - if (nodes) - nodes[page_to_nid(page)]++; - } - } - - if (p_partial) - *p_partial = nr_partial_slabs; - - if (p_cpu_slabs) - *p_cpu_slabs = nr_cpu_slabs; - - if (p_total) - *p_total = nr_slabs; - - return in_partial_slabs + in_cpu_slabs + - (nr_slabs - nr_partial_slabs - nr_cpu_slabs) * s->objects; -} - /******************************************************************** * Kmalloc subsystem *******************************************************************/ @@ -2287,6 +2234,60 @@ static int __init cpucache_init(void) __initcall(cpucache_init); #endif +static unsigned long slab_objects(struct kmem_cache *s, + unsigned long *p_total, unsigned long *p_cpu_slabs, + unsigned long *p_partial, unsigned long *nodes) +{ + int nr_slabs = 0; + int nr_partial_slabs = 0; + int nr_cpu_slabs = 0; + int in_cpu_slabs = 0; + int in_partial_slabs = 0; + int cpu; + int node; + unsigned long flags; + struct page *page; + + for_each_online_node(node) { + struct kmem_cache_node *n = get_node(s, node); + + nr_slabs += atomic_read(&n->nr_slabs); + nr_partial_slabs += n->nr_partial; + + if (nodes) + nodes[node] = atomic_read(&n->nr_slabs) + + n->nr_partial; + + spin_lock_irqsave(&n->list_lock, flags); + list_for_each_entry(page, &n->partial, lru) + in_partial_slabs += page->inuse; + spin_unlock_irqrestore(&n->list_lock, flags); + } + + for_each_possible_cpu(cpu) { + page = s->cpu_slab[cpu]; + if (page) { + nr_cpu_slabs++; + in_cpu_slabs += page->inuse; + if (nodes) + nodes[page_to_nid(page)]++; + } + } + + if (p_partial) + *p_partial = nr_partial_slabs; + + if (p_cpu_slabs) + *p_cpu_slabs = nr_cpu_slabs; + + if (p_total) + *p_total = nr_slabs; + + return in_partial_slabs + in_cpu_slabs + + (nr_slabs - nr_partial_slabs - nr_cpu_slabs) * s->objects; +} + + /* * These are not as efficient as kmalloc for the non debug case. * We do not have the page struct available so we have to touch one