diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c index aa825f3..af09167 100644 --- a/linux-core/drm_compat.c +++ b/linux-core/drm_compat.c @@ -151,7 +151,7 @@ struct page *drm_bo_vm_nopage(struct vm_area_struct *vma, #endif -#if !defined(DRM_FULL_MM_COMPAT) && \ +#if !defined(DRM_NOPFN) && !defined(DRM_VM_FAULT) && \ ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) || \ (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))) @@ -298,7 +298,7 @@ out_unlock: #endif #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) && \ - !defined(DRM_FULL_MM_COMPAT) + !defined(DRM_NOPFN) && !defined(DRM_VM_FAULT) /** */ diff --git a/linux-core/drm_compat.h b/linux-core/drm_compat.h index 08dd5f8..db59945 100644 --- a/linux-core/drm_compat.h +++ b/linux-core/drm_compat.h @@ -179,8 +179,13 @@ static __inline__ void *kcalloc(size_t nmemb, size_t size, int flags) #define DRM_ODD_MM_COMPAT #endif -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)) -#define DRM_FULL_MM_COMPAT +#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27))) +#define DRM_NOPFN +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)) +#define DRM_VM_FAULT #endif @@ -227,7 +232,7 @@ extern void free_nopage_retry(void); #endif -#ifndef DRM_FULL_MM_COMPAT +#ifndef DRM_NOPFN /* * For now, just return a dummy page that we've allocated out of @@ -249,7 +254,7 @@ extern struct page *drm_bo_vm_nopage(struct vm_area_struct *vma, unsigned long address, int *type); #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) && \ - !defined(DRM_FULL_MM_COMPAT) + !defined(DRM_NOPFN) && !defined(DRM_VM_FAULT) extern unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma, unsigned long address); #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) */ diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index 6618c0a..305a815 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -698,7 +698,7 @@ EXPORT_SYMBOL(drm_mmap); * protected by the bo->mutex lock. */ -#ifdef DRM_FULL_MM_COMPAT +#ifdef DRM_NOPFN static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma, unsigned long address) { @@ -796,6 +796,105 @@ out_unlock: } #endif +#ifdef DRM_VM_FAULT +static unsigned long drm_bo_vm_fault(struct vm_area_struct *vma, + struct vm_fault *vmf) +{ + struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; + unsigned long page_offset; + struct page *page = NULL; + struct drm_ttm *ttm; + struct drm_device *dev; + unsigned long pfn; + int err; + unsigned long bus_base; + unsigned long bus_offset; + unsigned long bus_size; + unsigned long ret = VM_FAULT_NOPAGE; + unsigned long address = (unsigned long)vmf->virtual_address; + + if (address > vma->vm_end) + return VM_FAULT_SIGBUS; + + dev = bo->dev; + err = drm_bo_read_lock(&dev->bm.bm_lock, 1); + if (err) + return ret; + + err = mutex_lock_interruptible(&bo->mutex); + if (err) { + drm_bo_read_unlock(&dev->bm.bm_lock); + return ret; + } + + err = drm_bo_wait(bo, 0, 1, 0, 1); + if (err) { + ret = (err != -EAGAIN) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE; + bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED; + goto out_unlock; + } + + bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED; + + /* + * If buffer happens to be in a non-mappable location, + * move it to a mappable. + */ + + if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) { + uint32_t new_flags = bo->mem.proposed_flags | + DRM_BO_FLAG_MAPPABLE | + DRM_BO_FLAG_FORCE_MAPPABLE; + err = drm_bo_move_buffer(bo, new_flags, 0, 0); + if (err) { + ret = (err != -EAGAIN) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE; + goto out_unlock; + } + } + + err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset, + &bus_size); + + if (err) { + ret = VM_FAULT_SIGBUS; + goto out_unlock; + } + + page_offset = (address - vma->vm_start) >> PAGE_SHIFT; + + if (bus_size) { + struct drm_mem_type_manager *man = &dev->bm.man[bo->mem.mem_type]; + + pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset; + vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma); + } else { + ttm = bo->ttm; + + drm_ttm_fixup_caching(ttm); + page = drm_ttm_get_page(ttm, page_offset); + if (!page) { + ret = VM_FAULT_OOM; + goto out_unlock; + } + pfn = page_to_pfn(page); + vma->vm_page_prot = (bo->mem.flags & DRM_BO_FLAG_CACHED) ? + vm_get_page_prot(vma->vm_flags) : + drm_io_prot(_DRM_TTM, vma); + } + + err = vm_insert_pfn(vma, address, pfn); + if (err) { + ret = (err != -EAGAIN) ? VM_FAULT_OOM : VM_FAULT_NOPAGE; + goto out_unlock; + } +out_unlock: + BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED); + mutex_unlock(&bo->mutex); + drm_bo_read_unlock(&dev->bm.bm_lock); + return ret; +} +#endif + static void drm_bo_vm_open_locked(struct vm_area_struct *vma) { struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; @@ -848,8 +947,10 @@ static void drm_bo_vm_close(struct vm_area_struct *vma) } static struct vm_operations_struct drm_bo_vm_ops = { -#ifdef DRM_FULL_MM_COMPAT +#ifdef DRM_NOPFN .nopfn = drm_bo_vm_nopfn, +#elif defined(DRM_VM_FAULT) + .fault = drm_bo_vm_fault, #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) .nopfn = drm_bo_vm_nopfn,