Use constants from gfp.h in mmzone.h This exploits the deferred nature of macro definitions. GFP_ZONEMASK is not used in mmzone.h and therefore it does not matter that the __GFP__xxx values are not defined in this header file but in gfp.h. Signed-off-by: Christoph Lameter Index: linux-2.6.18-rc1-mm1/include/linux/mmzone.h =================================================================== --- linux-2.6.18-rc1-mm1.orig/include/linux/mmzone.h 2006-07-11 11:46:05.575945865 -0700 +++ linux-2.6.18-rc1-mm1/include/linux/mmzone.h 2006-07-11 19:00:37.898858452 -0700 @@ -158,8 +158,6 @@ enum zone_type { * of three zone modifier bits, we could require up to eight zonelists. * If the left most zone modifier is a "loner" then the highest valid * zonelist would be four allowing us to allocate only five zonelists. - * Use the first form for GFP_ZONETYPES when the left most bit is not - * a "loner", otherwise use the second. * * NOTE! Make sure this matches the zones in */ @@ -167,26 +165,23 @@ enum zone_type { #ifdef CONFIG_ZONE_DMA32 #ifdef CONFIG_HIGHMEM -#define GFP_ZONETYPES ((GFP_ZONEMASK + 1) / 2 + 1) /* Loner */ -#define GFP_ZONEMASK 0x07 -#define ZONES_SHIFT 2 /* ceil(log2(MAX_NR_ZONES)) */ +#define GFP_ZONEMASK (__GFP_DMA|__GFP_DMA32|__GFP_HIGHMEM) #else -#define GFP_ZONETYPES ((0x07 + 1) / 2 + 1) /* Loner */ -/* Mask __GFP_HIGHMEM */ -#define GFP_ZONEMASK 0x05 -#define ZONES_SHIFT 2 +#define GFP_ZONEMASK (__GFP_DMA|__GFP_DMA32) #endif +#define ZONES_SHIFT 2 /* ceil(log2(MAX_NR_ZONES)) */ +#define GFP_ZONETYPES 5 #else #ifdef CONFIG_HIGHMEM -#define GFP_ZONEMASK 0x03 +#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM) #define ZONES_SHIFT 2 #define GFP_ZONETYPES 3 #else -#define GFP_ZONEMASK 0x01 +#define GFP_ZONEMASK __GFP_DMA #define ZONES_SHIFT 1 #define GFP_ZONETYPES 2 Index: linux-2.6.18-rc1-mm1/mm/slab.c =================================================================== --- linux-2.6.18-rc1-mm1.orig/mm/slab.c 2006-07-10 10:33:44.362745604 -0700 +++ linux-2.6.18-rc1-mm1/mm/slab.c 2006-07-11 21:59:33.476415935 -0700 @@ -295,9 +295,9 @@ struct kmem_list3 { unsigned int colour_next; /* Per-node cache coloring */ spinlock_t list_lock; struct array_cache *shared; /* shared per node */ - struct array_cache **alien; /* on other nodes */ unsigned long next_reap; /* updated without locking */ int free_touched; /* updated without locking */ + struct array_cache *alien[MAX_NUMNODES]; /* on other nodes */ }; /* @@ -351,7 +351,7 @@ static void kmem_list3_init(struct kmem_ INIT_LIST_HEAD(&parent->slabs_partial); INIT_LIST_HEAD(&parent->slabs_free); parent->shared = NULL; - parent->alien = NULL; + memset(parent->alien, 0, sizeof(parent->alien)); parent->colour_next = 0; spin_lock_init(&parent->list_lock); parent->free_objects = 0; @@ -930,42 +930,35 @@ static int transfer_objects(struct array static void *__cache_alloc_node(struct kmem_cache *, gfp_t, int); static void *alternate_node_alloc(struct kmem_cache *, gfp_t); -static struct array_cache **alloc_alien_cache(int node, int limit) +static int alloc_alien_cache(struct kmem_cache *cache, struct kmem_list3 *l3, + int node, int limit) { - struct array_cache **ac_ptr; - int memsize = sizeof(void *) * MAX_NUMNODES; int i; if (limit > 1) limit = 12; - ac_ptr = kmalloc_node(memsize, GFP_KERNEL, node); - if (ac_ptr) { - for_each_node(i) { - if (i == node || !node_online(i)) { - ac_ptr[i] = NULL; - continue; - } - ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d); - if (!ac_ptr[i]) { - for (i--; i <= 0; i--) - kfree(ac_ptr[i]); - kfree(ac_ptr); - return NULL; - } + + for_each_node(i) { + if (i == node || !node_online(i)) { + l3->alien[i] = NULL; + continue; + } + l3-alien[i] = alloc_arraycache(node, limit, 0xbaadf00d); + if (!l3->alien[i]) { + for (i--; i <= 0; i--) + kfree(l3->alien[i]); + return 0; } } - return ac_ptr; + return 1; } -static void free_alien_cache(struct array_cache **ac_ptr) +static void free_alien_cache(struct kmem_list3 *l3) { int i; - if (!ac_ptr) - return; for_each_node(i) - kfree(ac_ptr[i]); - kfree(ac_ptr); + kfree(l3->alien[i]); } static void __drain_alien_cache(struct kmem_cache *cachep, @@ -996,25 +989,23 @@ static void reap_alien(struct kmem_cache { int node = __get_cpu_var(reap_node); - if (l3->alien) { - struct array_cache *ac = l3->alien[node]; + struct array_cache *ac = l3->alien[node]; - if (ac && ac->avail && spin_trylock_irq(&ac->lock)) { - __drain_alien_cache(cachep, ac, node); - spin_unlock_irq(&ac->lock); - } + if (ac && ac->avail && spin_trylock_irq(&ac->lock)) { + __drain_alien_cache(cachep, ac, node); + spin_unlock_irq(&ac->lock); } } static void drain_alien_cache(struct kmem_cache *cachep, - struct array_cache **alien) + struct kmem_list3 *l3) { int i = 0; struct array_cache *ac; unsigned long flags; for_each_online_node(i) { - ac = alien[i]; + ac = l3->alien[i]; if (ac) { spin_lock_irqsave(&ac->lock, flags); __drain_alien_cache(cachep, ac, i); @@ -1040,7 +1031,7 @@ static inline int cache_free_alien(struc l3 = cachep->nodelists[numa_node_id()]; STATS_INC_NODEFREES(cachep); - if (l3->alien && l3->alien[nodeid]) { + if (l3->alien[nodeid]) { alien = l3->alien[nodeid]; spin_lock_nested(&alien->lock, nesting); if (unlikely(alien->avail == alien->limit)) { @@ -1062,12 +1053,13 @@ static inline int cache_free_alien(struc #define drain_alien_cache(cachep, alien) do { } while (0) #define reap_alien(cachep, l3) do { } while (0) -static inline struct array_cache **alloc_alien_cache(int node, int limit) +static inline int alloc_alien_cache(struct kmem_cache *cache, + int node, int limit) { - return (struct array_cache **) 0x01020304ul; + return 1; } -static inline void free_alien_cache(struct array_cache **ac_ptr) +static inline void free_alien_cache(struct kmem_list3 *l3) { } @@ -1134,7 +1126,6 @@ static int __devinit cpuup_callback(stru list_for_each_entry(cachep, &cache_chain, next) { struct array_cache *nc; struct array_cache *shared; - struct array_cache **alien; nc = alloc_arraycache(node, cachep->limit, cachep->batchcount); @@ -1146,9 +1137,7 @@ static int __devinit cpuup_callback(stru if (!shared) goto bad; - alien = alloc_alien_cache(node, cachep->limit); - if (!alien) - goto bad; + alloc_alien_cache(cachep, node, cachep->limit); cachep->array[cpu] = nc; l3 = cachep->nodelists[node]; BUG_ON(!l3); @@ -1162,15 +1151,8 @@ static int __devinit cpuup_callback(stru l3->shared = shared; shared = NULL; } -#ifdef CONFIG_NUMA - if (!l3->alien) { - l3->alien = alien; - alien = NULL; - } -#endif spin_unlock_irq(&l3->list_lock); kfree(shared); - free_alien_cache(alien); } mutex_unlock(&cache_chain_mutex); break; @@ -1193,7 +1175,6 @@ static int __devinit cpuup_callback(stru list_for_each_entry(cachep, &cache_chain, next) { struct array_cache *nc; struct array_cache *shared; - struct array_cache **alien; cpumask_t mask; mask = node_to_cpumask(node); @@ -1224,16 +1205,11 @@ static int __devinit cpuup_callback(stru l3->shared = NULL; } - alien = l3->alien; - l3->alien = NULL; - spin_unlock_irq(&l3->list_lock); kfree(shared); - if (alien) { - drain_alien_cache(cachep, alien); - free_alien_cache(alien); - } + drain_alien_cache(cachep, l3); + free_alien_cache(l3); free_array_cache: kfree(nc); } @@ -2265,8 +2241,8 @@ static void drain_cpu_caches(struct kmem check_irq_on(); for_each_online_node(node) { l3 = cachep->nodelists[node]; - if (l3 && l3->alien) - drain_alien_cache(cachep, l3->alien); + if (l3) + drain_alien_cache(cachep, l3); } for_each_online_node(node) { @@ -2408,7 +2384,7 @@ int kmem_cache_destroy(struct kmem_cache l3 = cachep->nodelists[i]; if (l3) { kfree(l3->shared); - free_alien_cache(l3->alien); + free_alien_cache(l3); kfree(l3); } } @@ -3499,19 +3475,16 @@ static int alloc_kmemlist(struct kmem_ca int node; struct kmem_list3 *l3; struct array_cache *new_shared; - struct array_cache **new_alien; for_each_online_node(node) { - new_alien = alloc_alien_cache(node, cachep->limit); - if (!new_alien) - goto fail; + alloc_alien_cache(cachep, node, cachep->limit); new_shared = alloc_arraycache(node, cachep->shared*cachep->batchcount, 0xbaadf00d); if (!new_shared) { - free_alien_cache(new_alien); + free_alien_cache(l3); goto fail; } @@ -3526,20 +3499,15 @@ static int alloc_kmemlist(struct kmem_ca shared->avail, node); l3->shared = new_shared; - if (!l3->alien) { - l3->alien = new_alien; - new_alien = NULL; - } l3->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num; spin_unlock_irq(&l3->list_lock); kfree(shared); - free_alien_cache(new_alien); continue; } l3 = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, node); if (!l3) { - free_alien_cache(new_alien); + free_alien_cache(l3); kfree(new_shared); goto fail; } @@ -3548,7 +3516,6 @@ static int alloc_kmemlist(struct kmem_ca l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + ((unsigned long)cachep) % REAPTIMEOUT_LIST3; l3->shared = new_shared; - l3->alien = new_alien; l3->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num; cachep->nodelists[node] = l3; @@ -3564,7 +3531,7 @@ fail: l3 = cachep->nodelists[node]; kfree(l3->shared); - free_alien_cache(l3->alien); + free_alien_cache(l3); kfree(l3); cachep->nodelists[node] = NULL; }