Index: linux-2.6.18-rc4-mm3/mm/slabifier.c =================================================================== --- linux-2.6.18-rc4-mm3.orig/mm/slabifier.c 2006-08-29 13:10:49.264067640 -0700 +++ linux-2.6.18-rc4-mm3/mm/slabifier.c 2006-08-29 15:06:29.421159902 -0700 @@ -24,6 +24,7 @@ struct slab { struct slab_cache sc; #ifdef CONFIG_SMP + struct mutex flushing; /* Lock for flusher */ atomic_t active_cpus; /* if >0 then flusher is active */ struct work_struct flush; #endif @@ -167,11 +168,14 @@ static __always_inline int get_active_co */ static __always_inline void slab_lock(struct page *page) { + printk(KERN_CRIT "slab_lock(%p)\n", page); bit_spin_lock(PG_locked, &page->flags); + printk(KERN_CRIT "slab_lock(%p) done.\n", page); } static __always_inline void slab_unlock(struct page *page) { + printk(KERN_CRIT "slab_unlock(%p)\n", page); bit_spin_unlock(PG_locked, &page->flags); } @@ -186,12 +190,14 @@ static __always_inline void slab_unlock( */ static void __always_inline add_partial(struct slab *s, struct page *page) { + printk(KERN_CRIT "add_partial(%s, %p)\n", s->sc.name, page); spin_lock(&s->list_lock); ClearPageActive(page); s->nr_partial++; list_add_tail(&page->lru, &s->partial); spin_unlock(&s->list_lock); slab_unlock(page); + printk(KERN_CRIT "end add_partial(%s, %p)\n", s->sc.name, page); } /* @@ -203,11 +209,13 @@ static void __always_inline add_partial( static void __always_inline remove_partial(struct slab *s, struct page *page) { + printk(KERN_CRIT "remove_partial(%s, %p)\n", s->sc.name, page); spin_lock(&s->list_lock); list_del(&page->lru); s->nr_partial--; spin_unlock(&s->list_lock); slab_unlock(page); + printk(KERN_CRIT "end remove_partial(%s, %p)\n", s->sc.name, page); } /* @@ -236,6 +244,7 @@ static struct page *get_partial(struct s struct page *page; int searchnode = (node == -1) ? numa_node_id() : node; + printk(KERN_CRIT "get_partial(%s, %d)\n", s->sc.name, node); spin_lock(&s->list_lock); /* * Search for slab on the right node @@ -247,7 +256,7 @@ static struct page *get_partial(struct s if (likely(node == -1)) { /* - * We can fall back to any other node in order to + / * We can fall back to any other node in order to * reduce the size of the partial list. */ list_for_each_entry(page, &s->partial, lru) @@ -259,6 +268,7 @@ static struct page *get_partial(struct s page = NULL; out: spin_unlock(&s->list_lock); + printk(KERN_CRIT "end get_partial(%s, %p)\n", s->sc.name, page); return page; } #else @@ -527,18 +537,23 @@ void flusher(void *d) { struct slab *s = d; + if (!mutex_trylock(&s->flushing)) + return; atomic_set(&s->active_cpus, num_online_cpus()); schedule_on_each_cpu(check_flush_active, s); if (atomic_read(&s->active_cpus)) schedule_delayed_work(&s->flush, 10 * HZ); + mutex_unlock(&s->flushing); } static void drain_all(struct slab *s) { if (atomic_read(&s->active_cpus)) { + mutex_lock(&s->flushing); cancel_delayed_work(&s->flush); atomic_set(&s->active_cpus, 0); schedule_on_each_cpu(flush_active, s); + mutex_unlock(&s->flushing); } } #else @@ -600,7 +615,7 @@ static struct slab_cache *slab_create(st atomic_set(&s->refcount, 1); spin_lock_init(&s->list_lock); - + mutex_init(&s->flushing); for_each_possible_cpu(cpu) s->active[cpu] = NULL; return &s->sc;