Index: linux-2.6.21-rc5-mm4/mm/slub.c =================================================================== --- linux-2.6.21-rc5-mm4.orig/mm/slub.c 2007-04-03 16:34:28.000000000 -0700 +++ linux-2.6.21-rc5-mm4/mm/slub.c 2007-04-03 17:55:30.000000000 -0700 @@ -928,6 +928,9 @@ static struct page *get_any_partial(stru struct zone **z; struct page *page; + + printk(KERN_CRIT "get_any_partial(%s)\n", s->name); + /* * The defrag ratio allows to configure the tradeoffs between * inter node defragmentation and node local allocations. @@ -945,13 +948,16 @@ static struct page *get_any_partial(stru if (!s->defrag_ratio || get_cycles() % 1024 > s->defrag_ratio) return NULL; + printk(KERN_CRIT "1\n"); zonelist = &NODE_DATA(slab_node(current->mempolicy)) ->node_zonelists[gfp_zone(flags)]; + printk(KERN_CRIT "2\n"); for (z = zonelist->zones; *z; z++) { struct kmem_cache_node *n; n = get_node(s, zone_to_nid(*z)); + printk(KERN_CRIT "slab %s zone %p n=%p\n", s->name, *z, n); if (n && cpuset_zone_allowed_hardwall(*z, flags) && n->nr_partial > 2) { page = get_partial_node(n); @@ -1112,6 +1118,7 @@ static __always_inline void *slab_alloc( unsigned long flags; int cpu; + printk(KERN_ERR "slab_alloc(%s, %d)\n", s->name, node); local_irq_save(flags); cpu = smp_processor_id(); page = s->cpu_slab[cpu]; @@ -1122,6 +1129,7 @@ static __always_inline void *slab_alloc( if (unlikely(node != -1 && page_to_nid(page) != node)) goto another_slab; redo: + printk(KERN_ERR "redo point page=%p inuse=%d freelist=%p\n", page, page->inuse, page->freelist); object = page->freelist; if (unlikely(!object)) goto another_slab; @@ -1134,9 +1142,11 @@ have_object: SetPageReferenced(page); slab_unlock(page); local_irq_restore(flags); + printk(KERN_ERR "object = %p\n", object); return object; another_slab: + printk(KERN_ERR "Deactivate slab (%s)\n",s->name); deactivate_slab(s, page, cpu); new_slab: @@ -1156,6 +1166,7 @@ have_slab: } page = new_slab(s, gfpflags, node); + printk(KERN_ERR "New slab %s node = %d\n", s->name, node); if (page) { if (s->cpu_slab[cpu]) { /* @@ -1434,6 +1445,8 @@ static int init_kmem_cache_nodes(struct */ struct page *page; + printk(KERN_CRIT "Bootstrap %s (%p) boot = %p\n", + s->name, s, kmalloc_caches); BUG_ON(s->size < sizeof(struct kmem_cache_node)); page = new_slab(kmalloc_caches, gfpflags, node); @@ -1441,9 +1454,12 @@ static int init_kmem_cache_nodes(struct n = page->freelist; page->freelist = get_freepointer(kmalloc_caches, n); page->inuse++; - } else + printk(KERN_CRIT "Bootstrap done\n"); + } else { + printk(KERN_CRIT "%s: allocating from node %d\n", s->name, node); n = kmem_cache_alloc_node(kmalloc_caches, gfpflags, node); + } if (!n) { free_kmem_cache_nodes(s); @@ -1525,6 +1541,7 @@ static int kmem_cache_open(struct kmem_c void (*ctor)(void *, struct kmem_cache *, unsigned long), void (*dtor)(void *, struct kmem_cache *, unsigned long)) { + printk(KERN_CRIT "kmem_cache_open(%s)\n", name); memset(s, 0, kmem_size); s->name = name; s->ctor = ctor;