get rid of ZONE_HIGHMEM and ZONE_DMA32 for arches that do not need them. This patch gets rid of both zones if an arch does not use mem. Most 64 bit arches do not use ZONE_HIGHMEM and only x86_64 uses ZONE_DMA32. With this patch both zones will not be defined. Test operations on the zones will not do anything but simply return false so that code is not compiled that first checks if a zone is of this type. Signed-off-by: Christoph Lameter Index: linux-2.6.17/include/linux/mmzone.h =================================================================== --- linux-2.6.17.orig/include/linux/mmzone.h 2006-07-03 12:49:31.000000000 -0700 +++ linux-2.6.17/include/linux/mmzone.h 2006-07-03 12:49:47.000000000 -0700 @@ -87,15 +87,57 @@ #define zone_pcp(__z, __cpu) (&(__z)->pageset[(__cpu)]) #endif -#define ZONE_DMA 0 -#define ZONE_DMA32 1 -#define ZONE_NORMAL 2 -#define ZONE_HIGHMEM 3 +typedef enum { + /* + * ZONE_DMA is used when there are devices that are not able + * to do DMA to all of addressable memory (ZONE_NORMAL). Then we + * carve out the portion of memory that is needed for these devices. + * The range is arch specific. + * + * Some examples + * + * Architecture Limit + * --------------------------- + * parisc, ia64, sparc <4G + * s390 <2G + * arm26 <48M + * arm Various + * alpha Unlimited or 0-16MB. + * + * i386, x86_64 and multiple other arches + * <16M. + */ + ZONE_DMA, +#ifdef CONFIG_ZONE_DMA32 + /* + * x86_64 needs two ZONE_DMAs because it supports devices that are + * only able to do DMA to the lower 16M but also 32 bit devices that + * can only do DMA to the lower 4G. + */ + ZONE_DMA32, +#endif + /* + * Normal addressable memory is in ZONE_NORMAL. DMA operations can be + * performed on pages in ZONE_NORMAL if the DMA devices support + * transfers to all addressable memory. + */ + ZONE_NORMAL, +#ifdef CONFIG_HIGHMEM + /* + * A memory area that is only addressable by the kernel through + * mapping portions into its own address space. This is for example + * used by i386 to be allow the kernel to address the memory beyond + * the 1-1 mappings. The kernel will setup special mappings (page + * table entries on i386) for each page that the kernel needs to + * access. + */ + ZONE_HIGHMEM, +#endif + MAX_NR_ZONES +} zones_t; -#define MAX_NR_ZONES 4 /* Sync this with ZONES_SHIFT */ #define ZONES_SHIFT 2 /* ceil(log2(MAX_NR_ZONES)) */ - /* * When a memory allocation must conform to specific limitations (such * as being suitable for DMA) the caller will pass in hints to the @@ -125,16 +167,6 @@ /* #define GFP_ZONETYPES (GFP_ZONEMASK + 1) */ /* Non-loner */ #define GFP_ZONETYPES ((GFP_ZONEMASK + 1) / 2 + 1) /* Loner */ -/* - * On machines where it is needed (eg PCs) we divide physical memory - * into multiple physical zones. On a 32bit PC we have 4 zones: - * - * ZONE_DMA < 16 MB ISA DMA capable memory - * ZONE_DMA32 0 MB Empty - * ZONE_NORMAL 16-896 MB direct mapped by the kernel - * ZONE_HIGHMEM > 896 MB only page cache and user processes - */ - struct zone { /* Fields commonly accessed by the page allocator */ unsigned long free_pages; @@ -147,7 +179,7 @@ * on the higher zones). This array is recalculated at runtime if the * sysctl_lowmem_reserve_ratio sysctl changes. */ - unsigned long lowmem_reserve[MAX_NR_ZONES]; + unsigned long lowmem_reserve[ZONE_NORMAL + 1]; #ifdef CONFIG_NUMA /* @@ -380,7 +412,11 @@ static inline int is_highmem_idx(int idx) { +#ifdef CONFIG_HIGHMEM return (idx == ZONE_HIGHMEM); +#else + return 0; +#endif } static inline int is_normal_idx(int idx) @@ -396,7 +432,11 @@ */ static inline int is_highmem(struct zone *zone) { +#ifdef CONFIG_HIGHMEM return zone == zone->zone_pgdat->node_zones + ZONE_HIGHMEM; +#else + return 0; +#endif } static inline int is_normal(struct zone *zone) @@ -406,7 +446,11 @@ static inline int is_dma32(struct zone *zone) { +#ifdef CONFIG_ZONE_DMA32 return zone == zone->zone_pgdat->node_zones + ZONE_DMA32; +#else + return 0; +#endif } static inline int is_dma(struct zone *zone) Index: linux-2.6.17/mm/page_alloc.c =================================================================== --- linux-2.6.17.orig/mm/page_alloc.c 2006-07-03 12:49:33.000000000 -0700 +++ linux-2.6.17/mm/page_alloc.c 2006-07-03 12:49:47.000000000 -0700 @@ -69,7 +69,15 @@ * TBD: should special case ZONE_DMA32 machines here - in those we normally * don't need any ZONE_NORMAL reservation */ -int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { 256, 256, 32 }; +int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { + 256 +#ifdef CONFIG_ZONE_DMA32 + , 256 +#endif +#ifdef CONFIG_HIGHMEM + , 32 +#endif +}; EXPORT_SYMBOL(totalram_pages); @@ -80,7 +88,17 @@ struct zone *zone_table[1 << ZONETABLE_SHIFT] __read_mostly; EXPORT_SYMBOL(zone_table); -static char *zone_names[MAX_NR_ZONES] = { "DMA", "DMA32", "Normal", "HighMem" }; +static char *zone_names[MAX_NR_ZONES] = { + "DMA", +#ifdef CONFIG_ZONE_DMA32 + "DMA32", +#endif + "Normal", +#ifdef CONFIG_HIGHMEM + "HighMem" +#endif +}; + int min_free_kbytes = 1024; unsigned long __meminitdata nr_kernel_pages; @@ -1323,8 +1341,13 @@ val->totalram = pgdat->node_present_pages; val->freeram = nr_free_pages_pgdat(pgdat); +#ifdef CONFIG_HIGHMEM val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages; val->freehigh = pgdat->node_zones[ZONE_HIGHMEM].free_pages; +#else + val->totalhigh = 0; + val->freehigh = 0; +#endif val->mem_unit = PAGE_SIZE; } #endif @@ -1453,14 +1476,12 @@ { struct zone *zone; - BUG_ON(zone_type > ZONE_HIGHMEM); + BUG_ON(zone_type >= MAX_NR_ZONES); do { zone = pgdat->node_zones + zone_type; if (populated_zone(zone)) { -#ifndef CONFIG_HIGHMEM - BUG_ON(zone_type > ZONE_NORMAL); -#endif + BUG_ON(is_highmem_idx(zone_type)); zonelist->zones[nr_zones++] = zone; check_highest_zone(zone_type); } @@ -1472,14 +1493,17 @@ static inline int highest_zone(int zone_bits) { - int res = ZONE_NORMAL; +#ifdef CONFIG_HIGHMEM if (zone_bits & (__force int)__GFP_HIGHMEM) - res = ZONE_HIGHMEM; + return ZONE_HIGHMEM; +#endif +#ifdef CONFIG_ZONE_DMA32 if (zone_bits & (__force int)__GFP_DMA32) - res = ZONE_DMA32; - if (zone_bits & (__force int)__GFP_DMA) - res = ZONE_DMA; - return res; + return ZONE_DMA32; +#endif + if ((zone_bits & (__force int)__GFP_DMA)) + return ZONE_DMA; + return ZONE_NORMAL; } #ifdef CONFIG_NUMA @@ -2091,7 +2115,7 @@ if (zholes_size) realsize -= zholes_size[j]; - if (j < ZONE_HIGHMEM) + if (!is_highmem_idx(j)) nr_kernel_pages += realsize; nr_all_pages += realsize; Index: linux-2.6.17/include/linux/gfp.h =================================================================== --- linux-2.6.17.orig/include/linux/gfp.h 2006-07-03 12:49:31.000000000 -0700 +++ linux-2.6.17/include/linux/gfp.h 2006-07-03 12:49:47.000000000 -0700 @@ -12,15 +12,17 @@ */ /* Zone modifiers in GFP_ZONEMASK (see linux/mmzone.h - low three bits) */ #define __GFP_DMA ((__force gfp_t)0x01u) -#define __GFP_HIGHMEM ((__force gfp_t)0x02u) -#ifdef CONFIG_DMA_IS_DMA32 -#define __GFP_DMA32 ((__force gfp_t)0x01) /* ZONE_DMA is ZONE_DMA32 */ + +#ifdef CONFIG_ZONE_DMA32 +#define __GFP_DMA32 ((__force gfp_t)0x04) /* Has own ZONE_DMA32 */ #elif BITS_PER_LONG < 64 #define __GFP_DMA32 ((__force gfp_t)0x00) /* ZONE_NORMAL is ZONE_DMA32 */ #else -#define __GFP_DMA32 ((__force gfp_t)0x04) /* Has own ZONE_DMA32 */ +#define __GFP_DMA32 ((__force gfp_t)0x01) /* ZONE_DMA is ZONE_DMA32 */ #endif +#define __GFP_HIGHMEM ((__force gfp_t)0x02u) + /* * Action modifiers - doesn't change the zoning * Index: linux-2.6.17/arch/x86_64/Kconfig =================================================================== --- linux-2.6.17.orig/arch/x86_64/Kconfig 2006-07-03 12:49:04.000000000 -0700 +++ linux-2.6.17/arch/x86_64/Kconfig 2006-07-03 12:49:47.000000000 -0700 @@ -73,6 +73,10 @@ bool default y +config ZONE_DMA32 + bool + default y + config GENERIC_IOMAP bool default y Index: linux-2.6.17/arch/ia64/Kconfig =================================================================== --- linux-2.6.17.orig/arch/ia64/Kconfig 2006-07-03 12:48:56.000000000 -0700 +++ linux-2.6.17/arch/ia64/Kconfig 2006-07-03 12:50:13.000000000 -0700 @@ -66,10 +66,6 @@ bool select GENERIC_ALLOCATOR -config DMA_IS_DMA32 - bool - default y - config DMA_IS_NORMAL bool depends on IA64_SGI_SN2