Index: linux-2.6.18-rc4-mm2/mm/slabifier.c =================================================================== --- linux-2.6.18-rc4-mm2.orig/mm/slabifier.c 2006-08-25 19:16:53.733825481 -0700 +++ linux-2.6.18-rc4-mm2/mm/slabifier.c 2006-08-25 19:47:22.715568466 -0700 @@ -33,6 +33,9 @@ struct slab { spinlock_t list_lock; struct list_head partial; unsigned long nr_partial; +#ifdef CONFIG_NUMA + nodemask_t nodes; /* Nodes on the partial list */ +#endif struct page *active[NR_CPUS]; }; @@ -141,6 +144,9 @@ static void __always_inline add_partial( spin_lock(&s->list_lock); s->nr_partial++; list_add_tail(&page->lru, &s->partial); +#ifdef CONFIG_NUMA + node_set(page_to_nid(page), s->nodes); +#endif spin_unlock(&s->list_lock); } @@ -168,25 +174,31 @@ static __always_inline int lock_and_del_ return 0; } +/* + * Search for a partial list element for the requested + * or the local node. This is done with interrupts disabled + * so we must try to limit interrupt latency by limiting + * the number of slabs we check. + */ struct page *numa_search(struct slab *s, int node) { #ifdef CONFIG_NUMA - int wanted_node; struct list_head *h; struct page *page; - /* - * Search for slab on the right node - */ - wanted_node = node < 0 ? numa_node_id() : node; + if (!node_isset(node, s->nodes)) + return NULL; list_for_each(h, &s->partial) { page = container_of(h, struct page, lru); - if (likely(page_to_nid(page) == wanted_node) && + if (likely(page_to_nid(page) == node) && lock_and_del_slab(s, page)) return page; } + + /* Remember our failure */ + node_clear(node, s->nodes); #endif return NULL; } @@ -199,15 +211,17 @@ static struct page *get_partial(struct s struct page *page; struct list_head *h; +#ifdef CONFIG_NUMA + if (unlikely(node >= 0)) { + spin_lock(&s->list_lock); + page = numa_search(s, node); + goto out; + } +#endif spin_lock(&s->list_lock); - - page = numa_search(s, node); + page = numa_search(s, numa_node_id()); if (page) goto out; -#ifdef CONFIG_NUMA - if (node >= 0) - goto fail; -#endif list_for_each(h, &s->partial) { page = container_of(h, struct page, lru); @@ -215,7 +229,6 @@ static struct page *get_partial(struct s if (likely(lock_and_del_slab(s, page))) goto out; } -fail: page = NULL; out: spin_unlock(&s->list_lock); @@ -522,6 +535,9 @@ static struct slab_cache *slab_create(st s->flusher_active = 0; INIT_WORK(&s->flush, &flusher, s); #endif +#ifdef CONFIG_NUMA + nodes_clear(s->nodes); +#endif if (!s->objects) return NULL;