SLUB: resequence freelist Resequencing a freelist allows to restore allocations in increasing address to a slab. Not testing. Not sure if this an advantage. Signed-off-by: Christoph Lameter --- mm/slub.c | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) Index: slub/mm/slub.c =================================================================== --- slub.orig/mm/slub.c 2007-06-04 21:28:06.000000000 -0700 +++ slub/mm/slub.c 2007-06-04 21:28:36.000000000 -0700 @@ -2408,6 +2408,39 @@ static unsigned long count_partial(struc } /* + * Order the freelist so that addresses increase as object are allocated. + * This is useful to trigger the cpu cacheline prefetching logic. + */ +void resequence_freelist(struct kmem_cache *s, struct page *page) +{ + void *p; + void *last; + void *addr = page_address(page); + DECLARE_BITMAP(map, s->objects); + + bitmap_zero(map, s->objects); + + /* Figure out which objects are on the freelist */ + for_each_free_object(p, s, page->freelist) + set_bit(slab_index(p, s, addr), map); + + last = NULL; + for_each_object(p, s, addr) + if (test_bit(slab_index(p, s, addr), map)) { + if (last) + set_freepointer(s, last, p); + else + page->freelist = p; + last = p; + } + + if (last) + set_freepointer(s, last, NULL); + else + page->freelist = NULL; +} + +/* * Vacate all objects in the given slab. * * Slab must be locked and frozen. Interrupts are disabled (flags must @@ -2467,6 +2500,13 @@ out: * Check the result and unfreeze the slab */ leftover = page->inuse; + if (leftover > 0) + /* + * Cannot free. Lets at least optimize the freelist. We have + * likely touched all the cachelines with the free pointers + * already so it is cheap to do here. + */ + resequence_freelist(s, page); unfreeze_slab(s, page); local_irq_restore(flags); return leftover;