From: Christoph Lameter Note that these limits could now be removed since we no longer use page->private for compound pages. But I think this is fine for now. Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton --- mm/slub.c | 22 +++++++++++++++++++++- 1 files changed, 21 insertions(+), 1 deletion(-) diff -puN mm/slub.c~slub-core-explain-the-64k-limits mm/slub.c --- a/mm/slub.c~slub-core-explain-the-64k-limits +++ a/mm/slub.c @@ -1571,6 +1571,12 @@ static int calculate_sizes(struct kmem_c return 0; s->objects = (PAGE_SIZE << s->order) / size; + + /* + * Verify that the number of objects is within permitted limits. + * The page->inuse field is only 16 bit wide! So we cannot have + * more than 64k objects per slab. + */ if (!s->objects || s->objects > 65535) return 0; return 1; @@ -1593,9 +1599,23 @@ static int kmem_cache_open(struct kmem_c BUG_ON(flags & SLUB_UNIMPLEMENTED); - if (s->size >= 65535 * sizeof(void *)) + /* + * The page->offset field is only 16 bit wide. This is an offset + * in units of words from the beginning of an object. If the slab + * size is bigger then we cannot move the free pointer behind the + * object anymore. + * + * On 32 bit platforms the limit is 256k. On 64bit platforms + * the limit is 512k. + * + * Debugging or ctor/dtors may create a need to move the free + * pointer. Fail if this happens. + */ + if (s->size >= 65535 * sizeof(void *)) { BUG_ON(flags & (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | SLAB_DESTROY_BY_RCU)); + BUG_ON(ctor || dtor); + } else /* * Enable debugging if selected on the kernel commandline. _