From: Christoph Lameter Subject: slub: Fix up comments Signed-off-by: Christoph Lameter --- include/linux/slub_def.h | 8 +++--- mm/slub.c | 54 ++++++++++++++++++++++++++--------------------- 2 files changed, 35 insertions(+), 27 deletions(-) Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2008-02-15 23:27:15.000000000 -0800 +++ linux-2.6/mm/slub.c 2008-02-15 23:28:13.000000000 -0800 @@ -297,6 +297,7 @@ static void *slab_address(struct page *p return page->end - PAGE_MAPPING_ANON; } +/* Determine the maximum number of objects that a slab page can hold */ static inline unsigned long slab_objects(struct kmem_cache *s, struct page *page) { if (PageCompound(page)) @@ -304,6 +305,7 @@ static inline unsigned long slab_objects return s->objects0; } +/* Verify that a pointer has an address that is valid within a slab page */ static inline int check_valid_pointer(struct kmem_cache *s, struct page *page, const void *object) { @@ -634,7 +636,7 @@ static int check_bytes_and_report(struct * A. Free pointer (if we cannot overwrite object on free) * B. Tracking data for SLAB_STORE_USER * C. Padding to reach required alignment boundary or at mininum - * one word if debuggin is on to be able to detect writes + * one word if debugging is on to be able to detect writes * before the word boundary. * * Padding is done using 0x5a (POISON_INUSE) @@ -1318,7 +1320,7 @@ static struct page *get_any_partial(stru * may return off node objects because partial slabs are obtained * from other nodes and filled up. * - * If /sys/slab/xx/defrag_ratio is set to 100 (which makes + * If /sys/kernel/slab/xx/defrag_ratio is set to 100 (which makes * defrag_ratio = 1000) then every (well almost) allocation will * first attempt to defrag slab caches on other nodes. This means * scanning over all nodes to look for partial slabs which may be @@ -1393,9 +1395,11 @@ static void unfreeze_slab(struct kmem_ca * Adding an empty slab to the partial slabs in order * to avoid page allocator overhead. This slab needs * to come after the other slabs with objects in - * order to fill them up. That way the size of the - * partial list stays small. kmem_cache_shrink can - * reclaim empty slabs from the partial list. + * so that the others get filled first. That way the + * size of the partial list stays small. + * + * kmem_cache_shrink can reclaim any empty slabs from the + * partial list. */ add_partial(n, page, 1); slab_unlock(page); @@ -1418,7 +1422,7 @@ static void deactivate_slab(struct kmem_ if (c->freelist) stat(c, DEACTIVATE_REMOTE_FREES); /* - * Merge cpu freelist into freelist. Typically we get here + * Merge cpu freelist into slab freelist. Typically we get here * because both freelists are empty. So this is unlikely * to occur. * @@ -1453,6 +1457,7 @@ static inline void flush_slab(struct kme /* * Flush cpu slab. + * * Called from IPI handler with interrupts disabled. */ static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) @@ -1511,7 +1516,8 @@ static inline int node_match(struct kmem * rest of the freelist to the lockless freelist. * * And if we were unable to get a new slab from the partial slab lists then - * we need to allocate a new slab. This is slowest path since we may sleep. + * we need to allocate a new slab. This is slowest path since it involved + * a call to the page allocator and the setup of a new slab. */ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, void *addr, struct kmem_cache_cpu *c) @@ -1529,7 +1535,9 @@ static void *__slab_alloc(struct kmem_ca slab_lock(c->page); if (unlikely(!node_match(c, node))) goto another_slab; + stat(c, ALLOC_REFILL); + load_freelist: object = c->page->freelist; if (unlikely(object == c->page->end)) @@ -1699,6 +1707,7 @@ static void __slab_free(struct kmem_cach if (unlikely(SlabDebug(page))) goto debug; + checks_ok: prior = object[offset] = page->freelist; page->freelist = object; @@ -1713,8 +1722,7 @@ checks_ok: goto slab_empty; /* - * Objects left in the slab. If it - * was not on the partial list before + * Objects left in the slab. If it was not on the partial list before * then add it. */ if (unlikely(prior == page->end)) { @@ -1957,13 +1965,11 @@ static unsigned long calculate_alignment unsigned long align, unsigned long size) { /* - * If the user wants hardware cache aligned objects then - * follow that suggestion if the object is sufficiently - * large. + * If the user wants hardware cache aligned objects then follow that + * suggestion if the object is sufficiently large. * - * The hardware cache alignment cannot override the - * specified alignment though. If that is greater - * then use it. + * The hardware cache alignment cannot override the specified + * alignment though. If that is greater then use it. */ if ((flags & SLAB_HWCACHE_ALIGN) && size > cache_line_size() / 2) @@ -2161,6 +2167,7 @@ static struct kmem_cache_node *early_kme #endif init_kmem_cache_node(n); atomic_long_inc(&n->nr_slabs); + /* * lockdep requires consistent irq usage for each lock * so even though there cannot be a race this early in @@ -2345,7 +2352,8 @@ static int calculate_sizes(struct kmem_c /* * Determine the number of objects per slab */ - s->objects = max_t(unsigned long, s->objects, (PAGE_SIZE << s->order) / size); + s->objects = max_t(unsigned long, s->objects, + (PAGE_SIZE << s->order) / size); s->objects0 = PAGE_SIZE / size; return !!s->objects; @@ -2405,7 +2413,7 @@ int kmem_ptr_validate(struct kmem_cache /* * We could also check if the object is on the slabs freelist. * But this would be too expensive and it seems that the main - * purpose of kmem_ptr_valid is to check if the object belongs + * purpose of kmem_ptr_valid() is to check if the object belongs * to a certain slab. */ return 1; @@ -3037,7 +3045,7 @@ void __init kmem_cache_init(void) /* * Patch up the size_index table if we have strange large alignment * requirements for the kmalloc array. This is only the case for - * mips it seems. The standard arches will not generate any code here. + * MIPS it seems. The standard arches will not generate any code here. * * Largest permitted alignment is 256 bytes due to the way we * handle the index determination for the smaller caches. @@ -3066,7 +3074,6 @@ void __init kmem_cache_init(void) kmem_size = sizeof(struct kmem_cache); #endif - printk(KERN_INFO "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d," " CPUs=%d, Nodes=%d\n", @@ -3160,12 +3167,15 @@ struct kmem_cache *kmem_cache_create(con */ for_each_online_cpu(cpu) get_cpu_slab(s, cpu)->objsize = s->objsize; + s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); up_write(&slub_lock); + if (sysfs_slab_alias(s, name)) goto err; return s; } + s = kmalloc(kmem_size, GFP_KERNEL); if (s) { if (kmem_cache_open(s, GFP_KERNEL, name, @@ -4077,7 +4087,6 @@ SLAB_ATTR(remote_node_defrag_ratio); #endif #ifdef CONFIG_SLUB_STATS - static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si) { unsigned long sum = 0; @@ -4130,7 +4139,6 @@ STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail); STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees); STAT_ATTR(ORDER_FALLBACK, order_fallback); - #endif static struct attribute *slab_attrs[] = { @@ -4264,8 +4272,8 @@ static struct kset *slab_kset; #define ID_STR_LENGTH 64 /* Create a unique string id for a slab cache: - * format - * :[flags-]size:[memory address of kmemcache] + * + * Format :[flags-]size */ static char *create_unique_id(struct kmem_cache *s) { Index: linux-2.6/include/linux/slub_def.h =================================================================== --- linux-2.6.orig/include/linux/slub_def.h 2008-02-15 23:26:26.000000000 -0800 +++ linux-2.6/include/linux/slub_def.h 2008-02-15 23:28:13.000000000 -0800 @@ -29,7 +29,7 @@ enum stat_item { DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */ DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */ DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */ - ORDER_FALLBACK, /* Allocation that fell back to order 0 */ + ORDER_FALLBACK, /* Number of times fallback was necessary */ NR_SLUB_STAT_ITEMS }; struct kmem_cache_cpu { @@ -63,7 +63,7 @@ struct kmem_cache { int size; /* The size of an object including meta data */ int objsize; /* The size of an object without meta data */ int offset; /* Free pointer offset. */ - int order; + int order; /* Current preferred allocation order */ /* * Avoid an extra cache line for UP, SMP and for the node local to @@ -73,7 +73,7 @@ struct kmem_cache { /* Allocation and freeing of slabs */ int objects; /* Number of objects in a slab of maximum size */ - int objects0; /* Number of object in an order 0 size slab */ + int objects0; /* Number of object in an order 0 sized slab */ gfp_t allocflags; /* gfp flags to use on each alloc */ int refcount; /* Refcount for slab cache destroy */ void (*ctor)(struct kmem_cache *, void *); @@ -141,11 +141,11 @@ static __always_inline int kmalloc_index if (size <= 512) return 9; if (size <= 1024) return 10; if (size <= 2 * 1024) return 11; + if (size <= 4 * 1024) return 12; /* * The following is only needed to support architectures with a larger page * size than 4k. */ - if (size <= 4 * 1024) return 12; if (size <= 8 * 1024) return 13; if (size <= 16 * 1024) return 14; if (size <= 32 * 1024) return 15;