--- include/linux/mm.h | 91 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 91 insertions(+) Index: linux-2.6/include/linux/mm.h =================================================================== --- linux-2.6.orig/include/linux/mm.h 2007-11-03 16:01:13.308227195 -0700 +++ linux-2.6/include/linux/mm.h 2007-11-03 16:01:54.583421504 -0700 @@ -1137,5 +1137,96 @@ int vmemmap_populate_basepages(struct pa unsigned long pages, int node); int vmemmap_populate(struct page *start_page, unsigned long pages, int node); + +/* + * include/linux/cpu_alloc.h - cpu allocator definitions + * + * The cpu allocator allows allocating an array of objects on all processors. + * A single pointer can then be used to access the instance of the object + * on a particular processor. + * + * Cpu objects are typically small. The allocator packs them tightly + * to increase the chance on each access that a per cpu object is already + * cached. Alignments may be specified but the intend is to align the data + * properly due to cpu alignment constraints and not to avoid cacheline + * contention. Any holes left by aligning objects are filled up with smaller + * objects that are allocated later. + * + * Cpu data can be allocated using CPU_ALLOC. The resulting pointer is + * pointing to the instance of the variable on cpu 0. It is generally an + * error to use the pointer directly unless we are running on cpu 0. So + * the use is valid during boot for example. + * + * The GFP flags have their usual function: __GFP_ZERO zeroes the object + * and other flags may be used to control reclaim behavior if the cpu + * areas have to be extended. However, zones cannot be selected nor + * can locality constraints flags be used. + * + * CPU_PTR() may be used to calculate the pointer for a specific processor. + * CPU_PTR is highly scalable since it simply adds the shifted value of + * smp_processor_id() to the base. + * + * Note: Synchronization is up to caller. If preemption is disabled then + * it is generally safe to access cpu variables (unless they are also + * handled from an interrupt context). + * + * The cpu allocator falls back to slab operations for the !SMP case. + * If the cpu allocator is used during early boot before slab bootstrap + * is complete then the UP case must be handled in a special way. + */ + +#define CPU_ALLOC(type, flags) cpu_alloc(sizeof(type), flags, \ + __alignof__(type)) +#define CPU_FREE(pointer) cpu_free(pointer, sizeof(*(pointer))) + +/* + * Raw calls + */ +void *cpu_alloc(unsigned long size, gfp_t gfp, unsigned long align); +void cpu_free(void *cpu_pointer, unsigned long size); + +/* + * If CPU_AREA_BASE is not defined then the cpu areas are not virtualized + * and are not extendable. Then cpu_area[] is the base address. + */ +#ifndef CPU_AREA_BASE +#define CPU_AREA_STATIC +extern u8 cpu_area[]; + +#define CPU_AREA_BASE (unsigned long)cpu_area +/* + * The default configuration--if the arch does not set the bits--is a + * 64 k sized cpu area for each processor. + */ +#ifndef CPU_AREA_BITS +#define CPU_AREA_BITS ilog2(NR_CPUS) + 16 +#endif +#endif + +/* + * The bits available to address cpu areas are split. The higher bits + * are used to indicate which processor the area belongs to and the + * lower bits are the index in each area. + */ +#define CPU_AREA_SHIFT (CPU_AREA_BITS - ilog2(NR_CPUS)) + + +#define CPU_PTR(__p, __cpu) ((__typeof__(__p))((void *)(__p) + \ + ((unsigned long)(__cpu) << CPU_AREA_SHIFT))) + +/* Functions for populating the per cpu areas mappings */ +pgd_t *cpu_area_pgd_populate(unsigned long addr, gfp_t flags, int node); +pud_t *cpu_area_pud_populate(pgd_t *pgd, unsigned long addr, + gfp_t flags, int node); +pmd_t *cpu_area_pmd_populate(pud_t *pud, unsigned long addr, + gfp_t flags, int node); +pte_t *cpu_area_pte_populate(pmd_t *pmd, unsigned long addr, + gfp_t flags, int node); +void *cpu_area_alloc_block(unsigned long size, gfp_t flags, int node); +int cpu_area_populate_basepages(void *start, unsigned long size, + gfp_t flags, int node); +int cpu_area_populate(void *start, unsigned long size, + gfp_t flags, int node); + #endif /* __KERNEL__ */ #endif /* _LINUX_MM_H */