Index: linux-2.6.18-rc4/arch/ia64/Kconfig =================================================================== --- linux-2.6.18-rc4.orig/arch/ia64/Kconfig 2006-08-06 11:20:11.000000000 -0700 +++ linux-2.6.18-rc4/arch/ia64/Kconfig 2006-08-22 21:57:48.539353807 -0700 @@ -258,7 +258,7 @@ config NR_CPUS int "Maximum number of CPUs (2-1024)" range 2 1024 depends on SMP - default "64" + default "1024" help You should set this to the number of CPUs in your system, but keep in mind that a kernel compiled for, e.g., 2 CPUs will boot but @@ -354,7 +354,7 @@ config NUMA config NODES_SHIFT int "Max num nodes shift(3-10)" range 3 10 - default "8" + default "10" depends on NEED_MULTIPLE_NODES help This option specifies the maximum number of nodes in your SSI system. Index: linux-2.6.18-rc4/mm/vmstat.c =================================================================== --- linux-2.6.18-rc4.orig/mm/vmstat.c 2006-08-19 14:39:16.505041889 -0700 +++ linux-2.6.18-rc4/mm/vmstat.c 2006-08-22 21:57:48.541306811 -0700 @@ -674,6 +674,7 @@ struct seq_operations vmstat_op = { #endif /* CONFIG_PROC_FS */ +#ifdef CONFIG_SMP /* * Use the cpu notifier to insure that the thresholds are recalculated * when necessary. @@ -704,4 +705,4 @@ int __init setup_vmstat(void) return 0; } module_init(setup_vmstat) - +#endif Index: linux-2.6.18-rc4/mm/kmalloc.c =================================================================== --- linux-2.6.18-rc4.orig/mm/kmalloc.c 2006-08-22 21:57:46.589279046 -0700 +++ linux-2.6.18-rc4/mm/kmalloc.c 2006-08-23 10:00:42.538284730 -0700 @@ -13,34 +13,38 @@ #include #include -// #define KMALLOC_DEBUG +#define KMALLOC_SHIFT_LOW 5 +#define KMALLOC_SHIFT_HIGH 18 +#define KMALLOC_EXTRA 2 struct slab_control kmalloc_caches[2] - [KMALLOC_SHIFT_HIGH - KMALLOC_SHIFT_LOW + 1] __cacheline_aligned; + [KMALLOC_SHIFT_HIGH - KMALLOC_SHIFT_LOW + 1 + KMALLOC_EXTRA] __cacheline_aligned; EXPORT_SYMBOL(kmalloc_caches); -void *__kmalloc(size_t size, gfp_t flags) + +static struct slab_cache *get_slab(size_t size, gfp_t flags) { - int index = max(KMALLOC_SHIFT_LOW, fls(size - 1)); + int index = kmalloc_index(size); + + BUG_ON(size < 0); - BUG_ON(index > KMALLOC_SHIFT_HIGH); + return &kmalloc_caches[!!(flags & __GFP_DMA)] [index].sc; + +} - return KMALLOC_ALLOCATOR.alloc(&kmalloc_caches[!!(flags & __GFP_DMA)] - [index - KMALLOC_SHIFT_LOW].sc, flags); +void *__kmalloc(size_t size, gfp_t flags) +{ + return KMALLOC_ALLOCATOR.alloc(get_slab(size, flags), flags); } +EXPORT_SYMBOL(__kmalloc); #ifdef CONFIG_NUMA void *__kmalloc_node(size_t size, gfp_t flags, int node) { - int index = max(KMALLOC_SHIFT_LOW, fls(size - 1)); - - BUG_ON(index > KMALLOC_SHIFT_HIGH); - - return KMALLOC_ALLOCATOR.alloc_node(&kmalloc_caches - [!!(flags & __GFP_DMA)] - [index - KMALLOC_SHIFT_LOW].sc, - flags, node); + return KMALLOC_ALLOCATOR.alloc_node(get_slab(size, flags), + flags, node); } +EXPORT_SYMBOL(__kmalloc_node); #endif void *kzalloc(size_t size, gfp_t flags) @@ -83,6 +87,8 @@ void __init create_kmalloc_cache(struct struct slab_cache s; struct slab_cache *rs; +// printk(KERN_CRIT "create_kmalloc_cache(%p,%s,%p,%d)\n", x, name, p, size); + s.page_alloc = p; s.slab_alloc = &KMALLOC_ALLOCATOR; s.size = size; @@ -100,7 +106,6 @@ void __init create_kmalloc_cache(struct } struct slab_allocator kmalloc_slab_allocator; - /* Export the fixed kmalloc array in another way */ struct slab_cache *kmalloc_create(struct slab_control *x, const struct slab_cache *s) @@ -113,20 +118,36 @@ struct slab_cache *kmalloc_create(struct return KMALLOC_ALLOCATOR.dup(&kmalloc_caches[0][index].sc); } - -void __init kmalloc_init(void) +void __init kmalloc_init_array(int dma, const char *name, + const struct page_allocator *pa) { - const struct page_allocator *reg = &page_allocator; - const struct page_allocator *dma = dmaify_page_allocator(reg); int i; - for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) { - create_kmalloc_cache(&kmalloc_caches[0][i], "kmalloc", reg, 1 << i); - create_kmalloc_cache(&kmalloc_caches[1][i], "kmalloc-DMA", dma, 1 << i); + for (i = 0; i <= 13; i++) { + create_kmalloc_cache( + &kmalloc_caches[dma][i], + name, pa, 1 << (i + KMALLOC_SHIFT_LOW)); } + /* Non-power of two caches */ + create_kmalloc_cache(&kmalloc_caches[dma][14], name, pa, 96); + create_kmalloc_cache(&kmalloc_caches[dma][15], name, pa, 192); +} + +void __init kmalloc_init(void) +{ + + kmalloc_init_array(0, "kmalloc", &page_allocator); + /* + * The above must be done first. Deriving a page allocator requires + * a working (normal) kmalloc array. + */ + + kmalloc_init_array(1, "kmalloc-DMA", + dmaify_page_allocator(&page_allocator)); /* And deal with the kmalloc_cache_allocator */ - memcpy(&kmalloc_slab_allocator, &KMALLOC_ALLOCATOR, sizeof(struct slab_allocator)); + memcpy(&kmalloc_slab_allocator, &KMALLOC_ALLOCATOR, + sizeof(struct slab_allocator)); kmalloc_slab_allocator.create = kmalloc_create; kmalloc_slab_allocator.destructor = null_slab_allocator_destructor; } Index: linux-2.6.18-rc4/include/linux/kmalloc.h =================================================================== --- linux-2.6.18-rc4.orig/include/linux/kmalloc.h 2006-08-22 21:57:46.588302543 -0700 +++ linux-2.6.18-rc4/include/linux/kmalloc.h 2006-08-22 21:57:48.544236318 -0700 @@ -15,11 +15,6 @@ #define KMALLOC_ALLOCATOR slabifier_allocator #endif -/* Lowest size supported is 2^KMALLOC_INDEX_LOW. 7 = 128 byte */ -#define KMALLOC_SHIFT_LOW 7 -/* Highest size supported is 2^KMALLOC_INDEX_HIGH. 19 = 256kbyte */ -#define KMALLOC_SHIFT_HIGH 18 - /* * We keep the general caches in an array of slab caches that are used for * 2^x bytes of allocations. For each size we generate a DMA and a @@ -27,41 +22,33 @@ * legacy I/O. The regular caches can be used for devices that can * do DMA to all of memory). */ -extern struct slab_control kmalloc_caches[2] - [KMALLOC_SHIFT_HIGH - KMALLOC_SHIFT_LOW + 1]; +extern struct slab_control kmalloc_caches[2][16]; /* * Sorry that the following has to be that ugly but GCC has trouble * with constant propagation and loops. */ -static inline int __kmalloc_log2(int size) +static inline int kmalloc_index(int size) { - if (size <= 1) return 0; - if (size <= 2) return 1; - if (size <= 4) return 2; - if (size <= 8) return 3; - if (size <= 16) return 4; - if (size <= 32) return 5; - if (size <= 64) return 6; - if (size <= 128) return 7; - if (size <= 256) return 8; - if (size <= 512) return 9; - if (size <= 1024) return 10; - return 99; + if (size <= 32) return 0; + if (size <= 64) return 1; + if (size <= 96) return 14; + if (size <= 128) return 2; + if (size <= 192) return 15; + if (size <= 256) return 3; + if (size <= 512) return 4; + if (size <= 1024) return 5; + if (size <= 2048) return 6; + if (size <= 4096) return 7; + if (size <= 8 * 1024) return 8; + if (size <= 16 * 1024) return 9; + if (size <= 32 * 1024) return 10; + if (size <= 64 * 1024) return 11; + if (size <= 128 * 1024) return 12; + if (size <= 256 * 1024) return 13; + return -1; } -static inline int kmalloc_log2(int size) -{ - if (size == 0) - return -1; - if (size <= 1024) - return __kmalloc_log2(size); - if (size <= 1024*1024) - return __kmalloc_log2(size >> 10) + 10; - return 99; -} - - /* * Find the slab cache for a given combination of allocation flags and size. * @@ -70,9 +57,9 @@ static inline int kmalloc_log2(int size) */ static inline struct slab_cache *kmalloc_slab(size_t size, gfp_t flags) { - int index = min(KMALLOC_SHIFT_LOW, kmalloc_log2(size)); + int index = kmalloc_index(size); - if (index > KMALLOC_SHIFT_HIGH) { + if (index < 0) { /* * Generate a link failure. Would be great if we could * do something to stop the compile here. @@ -80,7 +67,7 @@ static inline struct slab_cache *kmalloc extern void __kmalloc_size_too_large(void); __kmalloc_size_too_large(); } - return &kmalloc_caches[!!(flags & __GFP_DMA)][index - KMALLOC_SHIFT_LOW].sc; + return &kmalloc_caches[!!(flags & __GFP_DMA)][index].sc; } extern void *__kmalloc(size_t, gfp_t); @@ -91,7 +78,6 @@ static inline void *kmalloc(size_t size, if (__builtin_constant_p(size)) { struct slab_cache *s = kmalloc_slab(size, flags); - BUG_ON(s->size < size); return KMALLOC_ALLOCATOR.alloc(s, flags); } else return __kmalloc(size, flags); @@ -104,7 +90,6 @@ static inline void *kmalloc_node(size_t if (__builtin_constant_p(size)) { struct slab_cache *s = kmalloc_slab(size, flags); - BUG_ON(s->size < size); return KMALLOC_ALLOCATOR.alloc_node(s, flags, node); } else return __kmalloc_node(size, flags, node);