From 46768ad59fc44ddcc7c4e6c1871876710b9fc91e Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Wed, 26 Sep 2007 10:46:41 -0700 Subject: [PATCH] vcompound_vmalloc_address Sometimes we need to figure out which vmalloc address is in use for a certain page struct. There is no easy way to figure out the vmalloc address from the page struct. So simply search through the kernel page tables to find the address. This is a fairly expensive process. Use sparingly (or provide a better implementation). Signed-off-by: Christoph Lameter --- include/linux/mm.h | 1 + mm/vmalloc.c | 77 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 78 insertions(+), 0 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index d84795d..fe83ccf 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -296,6 +296,7 @@ static inline int get_page_unless_zero(struct page *page) struct page *vmalloc_to_page(const void *addr); unsigned long vmalloc_to_pfn(const void *addr); +void *vmalloc_address(struct page *); /* Determine if an address is within the vmalloc range */ static inline int is_vmalloc_addr(const void *x) diff --git a/mm/vmalloc.c b/mm/vmalloc.c index f9da01a..7a939ee 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -196,6 +196,83 @@ struct page *vmalloc_to_page(const void *vmalloc_addr) EXPORT_SYMBOL(vmalloc_to_page); /* + * Determine vmalloc address from a page struct. + * + * Linear search through all ptes of the vmalloc area. + */ +static unsigned long vaddr_pte_range(pmd_t *pmd, unsigned long addr, + unsigned long end, unsigned long pfn) +{ + pte_t *pte; + + pte = pte_offset_kernel(pmd, addr); + do { + pte_t ptent = *pte; + if (pte_present(ptent) && pte_pfn(ptent) == pfn) + return addr; + } while (pte++, addr += PAGE_SIZE, addr != end); + return 0; +} + +static inline unsigned long vaddr_pmd_range(pud_t *pud, unsigned long addr, + unsigned long end, unsigned long pfn) +{ + pmd_t *pmd; + unsigned long next; + unsigned long n; + + pmd = pmd_offset(pud, addr); + do { + next = pmd_addr_end(addr, end); + if (pmd_none_or_clear_bad(pmd)) + continue; + n = vaddr_pte_range(pmd, addr, next, pfn); + if (n) + return n; + } while (pmd++, addr = next, addr != end); + return 0; +} + +static inline unsigned long vaddr_pud_range(pgd_t *pgd, unsigned long addr, + unsigned long end, unsigned long pfn) +{ + pud_t *pud; + unsigned long next; + unsigned long n; + + pud = pud_offset(pgd, addr); + do { + next = pud_addr_end(addr, end); + if (pud_none_or_clear_bad(pud)) + continue; + n = vaddr_pmd_range(pud, addr, next, pfn); + if (n) + return n; + } while (pud++, addr = next, addr != end); + return 0; +} + +void *vmalloc_address(struct page *page) +{ + pgd_t *pgd; + unsigned long next, n; + unsigned long addr = VMALLOC_START; + unsigned long pfn = page_to_pfn(page); + + pgd = pgd_offset_k(VMALLOC_START); + do { + next = pgd_addr_end(addr, VMALLOC_END); + if (pgd_none_or_clear_bad(pgd)) + continue; + n = vaddr_pud_range(pgd, addr, next, pfn); + if (n) + return (void *)n; + } while (pgd++, addr = next, addr < VMALLOC_END); + return NULL; +} +EXPORT_SYMBOL(vmalloc_address); + +/* * Map a vmalloc()-space virtual address to the physical page frame number. */ unsigned long vmalloc_to_pfn(const void *vmalloc_addr) -- 1.5.4.1