--- include/linux/slab.h | 41 ++++++++----------- mm/slub.c | 108 ++++++++++++--------------------------------------- 2 files changed, 45 insertions(+), 104 deletions(-) Index: slub/include/linux/slab.h =================================================================== --- slub.orig/include/linux/slab.h 2007-05-17 18:45:33.000000000 -0700 +++ slub/include/linux/slab.h 2007-05-17 20:04:59.000000000 -0700 @@ -43,35 +43,32 @@ struct kmem_cache_ops { * Called with slab lock held and interrupts disabled. * No slab operation may be performed. * - * Return 0 if reference was successfully obtained - * Return 1 if a concurrent kmem_cache_free is waiting to free object - * Return -errcode if it is not possible to free the object. - * No reference was obtained. - */ - int (*get)(struct kmem_cache *, void *); - - /* - * Use to restore the reference count if we abandon the - * attempt to vacate a slab page due to an unmovable - * object. No locks are held, interrupts are enabled. + * Parameters passed is the number of objects to process + * and a an array of pointers to objects for which we + * need references. + * + * Returns a pointer that is passed to the kick function. + * If all objects cannot be moved then the pointer may + * indicate that this wont work and then kick can simply + * remove the references that were already obtained. + * + * The array passed to get() is also passed to kick(). The + * function may remove objects by setting array elements to NULL. */ - void (*put)(struct kmem_cache *, void *); + void *(*get)(struct kmem_cache *, int nr, void **); /* * Called with no locks held and interrupts enabled. * Any operation may be performed in kick_object. * - * Return 0 for success - * Return -errcode aborts further kicks to objects in the slab - */ - int (*kick)(struct kmem_cache *, void *); - - /* - * Callback to make sure that all object freeing is complete. - * If the slab destroys objects by RCU then this needs to be - * set to synchronize_rcu(). + * Parameters passed are the number of objects in the array, + * the array of pointers to the objects and the pointer + * returned by get(). + * + * Success is checked by examining the number of remaining + * objects in the slab. */ - void (*sync)(void); + void (*kick)(struct kmem_cache *, int nr, void **, void *private); }; struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, Index: slub/mm/slub.c =================================================================== --- slub.orig/mm/slub.c 2007-05-17 18:36:36.000000000 -0700 +++ slub/mm/slub.c 2007-05-17 20:01:09.000000000 -0700 @@ -2375,60 +2375,32 @@ void resequence_freelist(struct kmem_cac * Returns the number of remaining objects */ static int __kmem_cache_vacate(struct kmem_cache *s, - struct page *page, unsigned long flags) + struct page *page, unsigned long flags, void **vector) { void *p; void *addr = page_address(page); DECLARE_BITMAP(map, s->objects); int leftover; - int abort = 0; + int objects; + void *private; if (!page->inuse) - return 0; + goto out; - /* Determine free objects */ + /* Determine used objects */ bitmap_fill(map, s->objects); for_each_free_object(p, s, page->freelist) __clear_bit(slab_index(p, s, addr), map); - /* - * Get a refcount for all used objects. If that fails then - * no KICK callback can be performed. - */ + objects = 0; + memset(vector, 0, s->objects * sizeof(void **)); for_each_object(p, s, addr) { - int i = slab_index(p, s, addr); - - if (test_bit(i, map)) { - int x; - - if (abort) { - /* No need to handle this object */ - __clear_bit(i, map); - continue; - } - - x = s->ops->get(s, p); - - if (x > 0) { - /* - * Concurrent free in progress, there is no - * need to do the kick call for this - * object. - */ - __clear_bit(i, map); - continue; - } - - if (x < 0) - /* - * Unfreeable object encountered. We have no chance - * to free all objects, so give up. - */ - abort = 1; - - } + if (test_bit(slab_index(p, s, addr), map)) + vector[objects++] = p; } + private = s->ops->get(s, objects, vector); + /* * Got references. Now we can drop the slab lock. The slab * is frozen so it cannot vanish from under us nor will @@ -2442,38 +2414,14 @@ static int __kmem_cache_vacate(struct km * Perform the KICK callbacks to remove the objects. This is * expected to remove objects in the slab. */ - for_each_object(p, s, addr) - if (test_bit(slab_index(p, s, addr), map)) { - int x; - - if (abort) { - /* - * Cannot free all objects, so simply drop the - * refcounts of the remaining ones. - */ - if (s->ops->put) - s->ops->put(s, p); - continue; - } - - x = s->ops->kick(s, p); - - if (x < 0) - /* Unfreeable object. Abort kicks */ - abort = 1; - } - - /* - * Insure deletion operations have completed. - */ - if (s->ops->sync && !abort) - s->ops->sync(); + s->ops->kick(s, objects, vector, private); /* * Check the result and unfreeze the slab */ local_irq_save(flags); slab_lock(page); +out: leftover = page->inuse; if (leftover > 0) /* @@ -2507,6 +2455,7 @@ int kmem_cache_vacate(struct page *page) unsigned long flags; struct kmem_cache *s; int vacated = 0; + void **vector = NULL; /* * Get a reference to the page. Return if its freed or being freed. @@ -2519,6 +2468,14 @@ int kmem_cache_vacate(struct page *page) if (!PageSlab(page)) goto out; + s = page->slab; + if (!s) + goto out; + + vector = kmalloc(s->objects * sizeof(void *), GFP_KERNEL); + if (!vector) + return 0; + local_irq_save(flags); /* * The implicit memory barrier in slab_lock guarantees that page->inuse @@ -2544,13 +2501,13 @@ int kmem_cache_vacate(struct page *page) * We are holding a lock on a slab page and all operations on the * slab are blocking. */ - s = page->slab; if (!s->ops->get || !s->ops->kick) goto out_locked; freeze_from_list(s, page); - vacated = __kmem_cache_vacate(s, page, flags) == 0; + vacated = __kmem_cache_vacate(s, page, flags, vector) == 0; out: put_page(page); + kfree(vector); return vacated; out_locked: slab_unlock(page); @@ -2665,7 +2622,8 @@ int kmem_cache_shrink(struct kmem_cache local_irq_save(flags); slab_lock(page); - __kmem_cache_vacate(s, page, flags); + __kmem_cache_vacate(s, page, flags, + (void **)slabs_by_inuse); } } @@ -3510,26 +3468,12 @@ static ssize_t ops_show(struct kmem_cach x += sprintf(buf + x, "\n"); } - if (s->ops->put) { - x += sprintf(buf + x, "put : "); - x += sprint_symbol(buf + x, - (unsigned long)s->ops->put); - x += sprintf(buf + x, "\n"); - } - if (s->ops->kick) { x += sprintf(buf + x, "kick : "); x += sprint_symbol(buf + x, (unsigned long)s->ops->kick); x += sprintf(buf + x, "\n"); } - - if (s->ops->sync) { - x += sprintf(buf + x, "sync : "); - x += sprint_symbol(buf + x, - (unsigned long)s->ops->sync); - x += sprintf(buf + x, "\n"); - } return x; } SLAB_ATTR_RO(ops);