--- include/linux/quicklist.h | 95 ++++++++++++++++++++++++++++++++++++++++++++++ mm/Makefile | 2 mm/quicklist.c | 81 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 178 insertions(+) Index: linux-2.6.21-rc3-mm2/include/linux/quicklist.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ linux-2.6.21-rc3-mm2/include/linux/quicklist.h 2007-06-12 23:02:01.000000000 -0700 @@ -0,0 +1,95 @@ +#ifndef LINUX_QUICKLIST_H +#define LINUX_QUICKLIST_H +/* + * Fast allocations and disposal of pages. Pages must be in the condition + * as needed after allocation when they are freed. Per cpu lists of pages + * are kept that only contain node local pages. + * + * (C) 2007, SGI. Christoph Lameter + */ +#include +#include +#include + +#ifdef CONFIG_QUICKLIST + +#ifndef CONFIG_NR_QUICK +#define CONFIG_NR_QUICK 1 +#endif + +struct quicklist { + void *page; + int nr_pages; +}; + +DECLARE_PER_CPU(struct quicklist, quicklist)[CONFIG_NR_QUICK]; + +/* + * The two key functions quicklist_alloc and quicklist_free are inline so + * that they may be custom compiled for the platform. + * Specifying a NULL ctor can remove constructor support. Specifying + * a constant quicklist allows the determination of the exactly address + * in the per cpu area. + * + * The fast patch in quicklist_alloc touched only a per cpu cacheline and + * the first cacheline of the page itself. There is minmal overhead involved. + */ +static inline void *quicklist_alloc(int nr, gfp_t flags, void (*ctor)(void *)) +{ + struct quicklist *q; + void **p = NULL; + + q =&get_cpu_var(quicklist)[nr]; + p = q->page; + if (likely(p)) { + q->page = p[0]; + p[0] = NULL; + q->nr_pages--; + } + put_cpu_var(quicklist); + if (likely(p)) + return p; + + p = (void *)__get_free_page(flags | __GFP_ZERO); + if (ctor && p) + ctor(p); + return p; +} + +static inline void quicklist_free(int nr, void (*dtor)(void *), void *pp) +{ + struct quicklist *q; + void **p = pp; + struct page *page = virt_to_page(p); + int nid = page_to_nid(page); + + if (unlikely(nid != numa_node_id())) { + if (dtor) + dtor(p); + free_page((unsigned long)p); + return; + } + + q = &get_cpu_var(quicklist)[nr]; + p[0] = q->page; + q->page = p; + q->nr_pages++; + put_cpu_var(quicklist); +} + +void quicklist_check(int nr, void (*dtor)(void *)); +unsigned long quicklist_total_size(void); + +#else +void quicklist_check(int nr, void (*dtor)(void *)) +{ +} + +unsigned long quicklist_total_size(void) +{ + return 0; +} +#endif + +#endif /* LINUX_QUICKLIST_H */ + Index: linux-2.6.21-rc3-mm2/mm/Makefile =================================================================== --- linux-2.6.21-rc3-mm2.orig/mm/Makefile 2007-03-19 12:40:10.000000000 -0700 +++ linux-2.6.21-rc3-mm2/mm/Makefile 2007-03-19 16:18:50.000000000 -0700 @@ -30,3 +30,5 @@ obj-$(CONFIG_MEMORY_HOTPLUG) += memory_h obj-$(CONFIG_FS_XIP) += filemap_xip.o obj-$(CONFIG_MIGRATION) += migrate.o obj-$(CONFIG_SMP) += allocpercpu.o +obj-$(CONFIG_QUICKLIST) += quicklist.o + Index: linux-2.6.21-rc3-mm2/mm/quicklist.c =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ linux-2.6.21-rc3-mm2/mm/quicklist.c 2007-06-12 23:02:01.000000000 -0700 @@ -0,0 +1,81 @@ +/* + * Quicklist support. + * + * Quicklists are light weight lists of pages that have a defined state + * on alloc and free. Pages must be in the quicklist specific defined state + * (zero by default) when the page is freed. It seems that the initial idea + * for such lists first came from Dave Miller and then various other people + * improved on it. + * + * Copyright (C) 2007 SGI, + * Christoph Lameter + * Generalized, added support for multiple lists and + * constructors / destructors. + */ +#include + +#include +#include +#include +#include + +DEFINE_PER_CPU(struct quicklist, quicklist)[CONFIG_NR_QUICK]; + +#define MIN_PAGES 25 +#define MAX_FREES_PER_PASS 16 +#define FRACTION_OF_NODE_MEM 16 + +static unsigned long max_pages(void) +{ + unsigned long node_free_pages, max; + + node_free_pages = node_page_state(numa_node_id(), + NR_FREE_PAGES); + max = node_free_pages / FRACTION_OF_NODE_MEM; + return max(max, (unsigned long)MIN_PAGES); +} + +static long min_pages_to_free(struct quicklist *q) +{ + long pages_to_free; + + pages_to_free = q->nr_pages - max_pages(); + + return min(pages_to_free, (long)MAX_FREES_PER_PASS); +} + +void quicklist_check(int nr, void (*dtor)(void *)) +{ + long pages_to_free; + struct quicklist *q; + + q = &get_cpu_var(quicklist)[nr]; + if (q->nr_pages > MIN_PAGES) { + pages_to_free = min_pages_to_free(q); + + while (pages_to_free > 0) { + void *p = quicklist_alloc(nr, 0, NULL); + + if (dtor) + dtor(p); + free_page((unsigned long)p); + pages_to_free--; + } + } + put_cpu_var(quicklist); +} + +unsigned long quicklist_total_size(void) +{ + unsigned long count = 0; + int cpu; + struct quicklist *ql, *q; + + for_each_online_cpu(cpu) { + ql = per_cpu(quicklist, cpu); + for (q = ql; q < ql + CONFIG_NR_QUICK; q++) + count += q->nr_pages; + } + return count; +} +