Memoryless nodes: SLUB support Simply switch all for_each_online_node to for_each_memory_node. That way SLUB only operates on nodes with memory. Any allocation attempt on a memoryless node will fall whereupon SLUB will fetch memory from a nearby node (depending on how memory policies and cpuset describe fallback). Signed-off-by: Christoph Lameter --- mm/slub.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) Index: linux-2.6.22-rc4-mm2/mm/slub.c =================================================================== --- linux-2.6.22-rc4-mm2.orig/mm/slub.c 2007-06-23 14:00:04.000000000 -0700 +++ linux-2.6.22-rc4-mm2/mm/slub.c 2007-06-23 14:42:39.000000000 -0700 @@ -2086,7 +2086,7 @@ static void free_kmem_cache_nodes(struct { int node; - for_each_online_node(node) { + for_each_node_state(node, N_MEMORY) { struct kmem_cache_node *n = s->node[node]; if (n && n != &s->local_node) kmem_cache_free(kmalloc_caches, n); @@ -2104,7 +2104,7 @@ static int init_kmem_cache_nodes(struct else local_node = 0; - for_each_online_node(node) { + for_each_node_state(node, N_MEMORY) { struct kmem_cache_node *n; if (local_node == node) @@ -2366,7 +2366,7 @@ static inline int kmem_cache_close(struc /* Attempt to free all objects */ free_kmem_cache_cpus(s); - for_each_online_node(node) { + for_each_node_state(node, N_MEMORY) { struct kmem_cache_node *n = get_node(s, node); n->nr_partial -= free_list(s, n, &n->partial); @@ -2933,7 +2933,7 @@ int kmem_cache_shrink(struct kmem_cache if (!scratch) return -ENOMEM; - for_each_online_node(node) + for_each_node_state(node, N_MEMORY) __kmem_cache_shrink(s, get_node(s, node), scratch); kfree(scratch); @@ -3004,7 +3004,7 @@ int kmem_cache_defrag(int percent, int n scratch = kmalloc(sizeof(struct list_head) * s->objects, GFP_KERNEL); if (node == -1) { - for_each_online_node(node) + for_each_node_state(node, N_MEMORY) pages += __kmem_cache_defrag(s, percent, node, scratch); } else @@ -3400,7 +3400,7 @@ static unsigned long validate_slab_cache unsigned long count = 0; flush_all(s); - for_each_online_node(node) { + for_each_node_state(node, N_MEMORY) { struct kmem_cache_node *n = get_node(s, node); count += validate_slab_node(s, n); @@ -3619,7 +3619,7 @@ static int list_locations(struct kmem_ca /* Push back cpu slabs */ flush_all(s); - for_each_online_node(node) { + for_each_node_state(node, N_MEMORY) { struct kmem_cache_node *n = get_node(s, node); unsigned long flags; struct page *page; @@ -3731,7 +3731,7 @@ static unsigned long slab_objects(struct } } - for_each_online_node(node) { + for_each_node_state(node, N_MEMORY) { struct kmem_cache_node *n = get_node(s, node); if (flags & SO_PARTIAL) { @@ -3759,7 +3759,7 @@ static unsigned long slab_objects(struct x = sprintf(buf, "%lu", total); #ifdef CONFIG_NUMA - for_each_online_node(node) + for_each_node_state(node, N_MEMORY) if (nodes[node]) x += sprintf(buf + x, " N%d=%lu", node, nodes[node]); @@ -3780,7 +3780,7 @@ static int any_slab_objects(struct kmem_ return 1; } - for_each_online_node(node) { + for_each_node_state(node, N_MEMORY) { struct kmem_cache_node *n = get_node(s, node); if (n && (n->nr_partial || atomic_read(&n->nr_slabs)))