From: Christoph Lameter Make sure that the bootstrap allocation occurs on the correct node and that the slab we allocated gets put onto the partial list. Otherwise the rest of the slab is lost for good. And while we are at it reduce the amount of #ifdefs by rearranging code. init_kmem_cache_node already initializes most fields. Avoid memset and just set the remaining field manually. Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton --- mm/slub.c | 85 ++++++++++++++++++++++++++++++++-------------------- 1 file changed, 53 insertions(+), 32 deletions(-) diff -puN mm/slub.c~slub-core-fix-another-numa-bootstrap-issue mm/slub.c --- a/mm/slub.c~slub-core-fix-another-numa-bootstrap-issue +++ a/mm/slub.c @@ -1388,15 +1388,46 @@ static unsigned long calculate_alignment static void init_kmem_cache_node(struct kmem_cache_node *n) { - memset(n, 0, sizeof(struct kmem_cache_node)); + n->nr_partial = 0; atomic_long_set(&n->nr_slabs, 0); spin_lock_init(&n->list_lock); INIT_LIST_HEAD(&n->partial); } +#ifdef CONFIG_NUMA +/* + * No kmalloc_node yet so do it by hand. We know that this is the first + * slab on the node for this slabcache. There are no concurrent accesses + * possible. + * + * Note that this function only works on the kmalloc_node_cache + * when allocating for the kmalloc_node_cache. + */ +struct kmem_cache_node * __init early_kmem_cache_node_alloc( + gfp_t gfpflags, int node) +{ + struct page *page; + struct kmem_cache_node *n; + + BUG_ON(kmalloc_caches->size < sizeof(struct kmem_cache_node)); + page = new_slab(kmalloc_caches, gfpflags | GFP_THISNODE, node); + /* new_slab() disables interupts */ + local_irq_enable(); + + BUG_ON(!page); + n = page->freelist; + BUG_ON(!n); + page->freelist = get_freepointer(kmalloc_caches, n); + page->inuse++; + kmalloc_caches->node[node] = n; + init_kmem_cache_node(n); + atomic_long_inc(&n->nr_slabs); + add_partial(kmalloc_caches, page); + return n; +} + static void free_kmem_cache_nodes(struct kmem_cache *s) { -#ifdef CONFIG_NUMA int node; for_each_online_node(node) { @@ -1405,12 +1436,10 @@ static void free_kmem_cache_nodes(struct kmem_cache_free(kmalloc_caches, n); s->node[node] = NULL; } -#endif } static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) { -#ifdef CONFIG_NUMA int node; int local_node; @@ -1424,45 +1453,37 @@ static int init_kmem_cache_nodes(struct if (local_node == node) n = &s->local_node; - else - if (slab_state == DOWN) { - /* - * No kmalloc_node yet so do it by hand. - * We know that this is the first slab on the - * node for this slabcache. There are no concurrent - * accesses possible. - */ - struct page *page; - - BUG_ON(s->size < sizeof(struct kmem_cache_node)); - page = new_slab(kmalloc_caches, gfpflags, node); - /* new_slab() disables interupts */ - local_irq_enable(); - - BUG_ON(!page); - n = page->freelist; - page->freelist = get_freepointer(kmalloc_caches, n); - page->inuse++; - } else + else { + if (slab_state == DOWN) { + n = early_kmem_cache_node_alloc(gfpflags, + node); + continue; + } n = kmem_cache_alloc_node(kmalloc_caches, gfpflags, node); - if (!n) { - free_kmem_cache_nodes(s); - return 0; - } + if (!n) { + free_kmem_cache_nodes(s); + return 0; + } + } s->node[node] = n; init_kmem_cache_node(n); - - if (slab_state == DOWN) - atomic_long_inc(&n->nr_slabs); } + return 1; +} #else +static void free_kmem_cache_nodes(struct kmem_cache *s) +{ +} + +static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) +{ init_kmem_cache_node(&s->local_node); -#endif return 1; } +#endif int calculate_sizes(struct kmem_cache *s) { _