Index: linux-2.6.20-rc1/mm/slub.c =================================================================== --- linux-2.6.20-rc1.orig/mm/slub.c 2006-12-15 18:17:46.000000000 -0800 +++ linux-2.6.20-rc1/mm/slub.c 2006-12-15 18:46:13.000000000 -0800 @@ -142,6 +142,12 @@ * fast frees and allocations. */ +#ifdef SLAB_DEBUG +void check_free_chain(struct kmem_cache *, struct page *); +#else +void check_free_chain(struct kmem_cache *, struct page *) {} +#endif + /* * Locking for each individual slab using the pagelock */ @@ -150,10 +156,12 @@ #ifdef CONFIG_SMP bit_spin_lock(PG_locked, &page->flags); #endif + check_free_chain(page->slab, page); } static __always_inline void slab_unlock(struct page *page) { + check_free_chain(page->slab, page); #ifdef CONFIG_SMP bit_spin_unlock(PG_locked, &page->flags); #endif @@ -357,12 +365,12 @@ return 0; } +#ifdef SLAB_DEBUG void check_free_chain(struct kmem_cache *s, struct page *page) { -#ifdef SLAB_DEBUG on_freelist(s, page, NULL); -#endif } +#endif static void __free_slab(struct kmem_cache *s, struct page *page) { @@ -423,11 +431,14 @@ void *end; struct node_slab *n; + if (s->order) + flags |= __GFP_COMP; + if (flags & __GFP_NO_GROW) return NULL; - if (s->order) - flags |= __GFP_COMP; + if (flags & __GFP_WAIT) + local_irq_enable(); if (s->flags & __GFP_DMA) flags |= GFP_DMA; @@ -437,6 +448,9 @@ else page = alloc_pages_node(node, flags & GFP_LEVEL_MASK, s->order); + if (flags & __GFP_WAIT) + local_irq_disable(); + if (!page) return NULL; @@ -444,7 +458,7 @@ if (n) atomic_long_inc(&n->nr_slabs); - mod_zone_page_state(page_zone(page), + __mod_zone_page_state(page_zone(page), (s->flags & SLAB_RECLAIM_ACCOUNT) ? NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, pages); @@ -480,7 +494,6 @@ last[s->offset] = NULL; page->freelist = start; page->inuse = 0; - check_free_chain(s, page); } else __SetPageSlabsingle(page); @@ -599,8 +612,7 @@ on_each_cpu(flush_cpu, s , 1, 1); } -static __always_inline void *allocate(struct kmem_cache *s, - gfp_t gfpflags, int node) +void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) { struct cpu_slab *a; void **object; @@ -611,38 +623,21 @@ if (unlikely(!a->page)) goto new_slab; - /* - * Check NUMA conditions if they exist. This is - * optimized away for kmem_cache_alloc(). - */ - if (unlikely(node != -1 && page_to_nid(a->page) != node)) { - slab_lock(a->page); - deactivate_slab(a); - goto new_slab; - } - if (likely(a->nr_free)) goto get_object; slab_lock(a->page); - check_free_chain(s, a->page); if (likely(a->page->freelist)) goto get_freelist; deactivate_slab(a); new_slab: - a->page = get_partial(s, gfpflags, node); + a->page = get_partial(s, gfpflags, numa_node_id()); if (unlikely(!a->page)) { struct page *page; - if (flags & __GFP_WAIT) - local_irq_enable(); - - page = new_slab(s, gfpflags, node); - - if (flags & __GFP_WAIT) - local_irq_disable(); + page = new_slab(s, gfpflags, -1); if (!page) { object = NULL; @@ -676,7 +671,6 @@ } __SetPageActive(a->page); - check_free_chain(s, a->page); get_freelist: a->freelist = a->page->freelist; @@ -702,14 +696,47 @@ local_irq_restore(flags); return object; } +EXPORT_SYMBOL(kmem_cache_alloc); -void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) +#ifdef CONFIG_NUMA +void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) { - return allocate(s, gfpflags, -1); + void **object; + struct page *page; + unsigned long flags; + + local_irq_save(flags); + page = get_partial(s, gfpflags, node); + if (unlikely(!page)) { + struct page *page; + + page = new_slab(s, gfpflags, node); + + if (!page) { + object = NULL; + goto out; + } + + /* + * There is no point in putting single object slabs + * on an active list. + */ + if (unlikely(s->objects == 1)) { + object = page_address(page); + goto out; + } + slab_lock(page); + } + object = page->freelist; + page->freelist = object[page->offset]; + page->inuse++; + putback_slab(s, page); +out: + local_irq_restore(flags); + return object; } -EXPORT_SYMBOL(kmem_cache_alloc); +EXPORT_SYMBOL(kmem_cache_alloc_node); -#ifdef CONFIG_NUMA /* * Bootstrap function to allow allocation without having cpu slabs * and per node structures. @@ -743,12 +770,6 @@ putback_slab(s, page); return object; } - -void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) -{ - return allocate(s, gfpflags, node); -} -EXPORT_SYMBOL(kmem_cache_alloc_node); #endif void kmem_cache_free(struct kmem_cache *s, void *x)