Index: linux-2.6.21-rc5/mm/slub.c =================================================================== --- linux-2.6.21-rc5.orig/mm/slub.c 2007-03-26 17:14:32.000000000 -0700 +++ linux-2.6.21-rc5/mm/slub.c 2007-03-26 18:01:17.000000000 -0700 @@ -2264,60 +2264,110 @@ static int __init cpucache_init(void) __initcall(cpucache_init); #endif +unsigned long count_partial(struct kmem_cache_node *n) +{ + unsigned long flags; + unsigned long x = 0; + struct page *page; + + spin_lock_irqsave(&n->list_lock, flags); + list_for_each_entry(page, &n->partial, lru) + x += page->inuse; + spin_unlock_irqrestore(&n->list_lock, flags); + return x; +} + +enum slab_stat_type { + SL_FULL, + SL_PARTIAL, + SL_CPU, + SL_OBJECTS +}; + +#define SO_FULL (1 << SL_FULL) +#define SO_PARTIAL (1 << SL_PARTIAL) +#define SO_CPU (1 << SL_PARTIAL) +#define SO_OBJECTS (1 << SL_OBJECTS) + static unsigned long slab_objects(struct kmem_cache *s, - unsigned long *p_total, unsigned long *p_cpu_slabs, - unsigned long *p_partial, unsigned long *nodes) + char *buf, unsigned long flags) { - int nr_slabs = 0; - int nr_partial_slabs = 0; - int nr_cpu_slabs = 0; - int in_cpu_slabs = 0; - int in_partial_slabs = 0; + unsigned long total = 0; int cpu; int node; - unsigned long flags; - struct page *page; + int x; + unsigned long nodes[nr_node_ids]; + for_each_online_node(node) { struct kmem_cache_node *n = get_node(s, node); - nr_slabs += atomic_read(&n->nr_slabs); - nr_partial_slabs += n->nr_partial; + nodes[node] = 0; - if (nodes) - nodes[node] = atomic_read(&n->nr_slabs) + - n->nr_partial; - - spin_lock_irqsave(&n->list_lock, flags); - list_for_each_entry(page, &n->partial, lru) - in_partial_slabs += page->inuse; - spin_unlock_irqrestore(&n->list_lock, flags); + if (flags & SO_FULL) { + if (flags & SO_OBJECTS) + x = atomic_read(&n->nr_slabs) + * s->objects; + else + x = atomic_read(&n->nr_slabs); + total += x; + nodes[node] += x; + } + if (flags & SO_PARTIAL) { + if (flags & SO_OBJECTS) + x = count_partial(n); + else + x = n->nr_partial; + total += x; + nodes[node] += x; + } } - for_each_possible_cpu(cpu) { - page = s->cpu_slab[cpu]; - if (page) { - nr_cpu_slabs++; - in_cpu_slabs += page->inuse; - if (nodes) - nodes[page_to_nid(page)]++; + if (flags & SO_CPU) + for_each_possible_cpu(cpu) { + struct page *page = s->cpu_slab[cpu]; + + if (page) { + int x = 0; + int node = page_to_nid(page); + + if (flags & SO_OBJECTS) + x = page->inuse; + else + x = 1; + total += x; + nodes[node] += x; + } } - } - if (p_partial) - *p_partial = nr_partial_slabs; + x = sprintf(buf, "%lu", total); +#ifdef CONFIG_NUMA + for_each_node(node) + if (nodes[node]) + x += sprintf(buf + x, " N%d=%lu", + node, nodes[node]); +#endif + return x + sprintf(buf + x, "\n"); +} + +int any_slab_objects(struct kmem_cache *s) +{ + int node; + int cpu; - if (p_cpu_slabs) - *p_cpu_slabs = nr_cpu_slabs; + for_each_possible_cpu(cpu) + if (s->cpu_slab[cpu]) + return 1; - if (p_total) - *p_total = nr_slabs; + for_each_node(node) { + struct kmem_cache_node *n = get_node(s, node); - return in_partial_slabs + in_cpu_slabs + - (nr_slabs - nr_partial_slabs - nr_cpu_slabs) * s->objects; + if (n->nr_partial || atomic_read(&n->nr_slabs)) + return 1; + } + return 0; } - /* * These are not as efficient as kmalloc for the non debug case. * We do not have the page struct available so we have to touch one @@ -2456,16 +2506,22 @@ SLAB_ATTR_RO(order); static ssize_t ctor_show(struct kmem_cache *s, char *buf) { - if (s->ctor) - return sprint_symbol(buf, (unsigned long)s->ctor); + if (s->ctor) { + int n = sprint_symbol(buf, (unsigned long)s->ctor); + + return sprintf(buf + n, "\n"); + } return 0; } SLAB_ATTR_RO(ctor); static ssize_t dtor_show(struct kmem_cache *s, char *buf) { - if (s->dtor) - return sprint_symbol(buf, (unsigned long)s->dtor); + if (s->dtor) { + int n = sprint_symbol(buf, (unsigned long)s->dtor); + + return sprintf(buf + n, "\n"); + } return 0; } SLAB_ATTR_RO(dtor); @@ -2478,37 +2534,25 @@ SLAB_ATTR_RO(aliases); static ssize_t slabs_show(struct kmem_cache *s, char *buf) { - unsigned long x; - - slab_objects(s, &x, NULL, NULL, NULL); - return sprintf(buf, "%lu\n", x); + return slab_objects(s, buf, SO_FULL|SO_PARTIAL|SO_CPU); } SLAB_ATTR_RO(slabs); static ssize_t partial_show(struct kmem_cache *s, char *buf) { - unsigned long x; - - slab_objects(s, NULL, &x, NULL, NULL); - return sprintf(buf, "%lu\n", x); + return slab_objects(s, buf, SO_PARTIAL); } SLAB_ATTR_RO(partial); static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf) { - unsigned long x; - - slab_objects(s, NULL, &x, NULL, NULL); - return sprintf(buf, "%lu\n", x); + return slab_objects(s, buf, SO_CPU); } SLAB_ATTR_RO(cpu_slabs); static ssize_t objects_show(struct kmem_cache *s, char *buf) { - unsigned long x; - - x = slab_objects(s, NULL, NULL, NULL, NULL); - return sprintf(buf, "%lu\n", x); + return slab_objects(s, buf, SO_FULL|SO_PARTIAL|SO_CPU|SO_OBJECTS); } SLAB_ATTR_RO(objects); @@ -2583,7 +2627,7 @@ static ssize_t red_zone_show(struct kmem static ssize_t red_zone_store(struct kmem_cache *s, const char *buf, size_t length) { - if (slab_objects(s, NULL, NULL, NULL, NULL)) + if (any_slab_objects(s)) return -EBUSY; s->flags &= ~SLAB_RED_ZONE; @@ -2601,7 +2645,7 @@ static ssize_t poison_show(struct kmem_c static ssize_t poison_store(struct kmem_cache *s, const char *buf, size_t length) { - if (slab_objects(s, NULL, NULL, NULL, NULL)) + if (any_slab_objects(s)) return -EBUSY; s->flags &= ~SLAB_POISON; @@ -2619,7 +2663,7 @@ static ssize_t store_user_show(struct km static ssize_t store_user_store(struct kmem_cache *s, const char *buf, size_t length) { - if (slab_objects(s, NULL, NULL, NULL, NULL)) + if (any_slab_objects(s)) return -EBUSY; s->flags &= ~SLAB_STORE_USER;