GIT 1d9e76b53ce8ff0b15b43b679801226add747d4e git+ssh://master.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6.git#drm-mm commit Author: Dave Airlie Date: Mon Nov 5 16:59:20 2007 +1000 drm/ttm: fix build with AGP disabled Signed-off-by: Dave Airlie commit f21d68d9ff5e84889bc801f90eb40a5df187504f Author: Thomas Hellstrom Date: Mon Nov 5 16:53:59 2007 +1000 drm/i915: add support for buffer objects and fencing to the i915 driver This add support for the new memory manager buffer objects and fencing to the i915 driver. Signed-off-by: Dave Airlie commit 2006b38601ca9442c204fa23c7c2bf4364449883 Author: Dave Airlie Date: Mon Nov 5 16:39:19 2007 +1000 drm/ttm: Add AGP backend for TTM objects. This adds a DRI memory manager backend to control AGP memory allocations. Signed-off-by: Thomas Hellstrom Signed-off-by: Dave Airlie commit 8ae9c922c59aad41fa3d1f36a695426c4c530990 Author: Thomas Hellstrom Date: Mon Nov 5 14:54:31 2007 +1000 drm: add buffer object and TTM support to DRI memory manager This is the main commit of the DRM memory manager. This adds buffer objects which are objects which can live in various memory areas, and TTM (Translation Table Maps) which allow buffer objects to be moved in/out of the GART dynamically. Signed-off-by: Thomas Hellstrom Signed-off-by: Dave Airlie commit 25f9d35f1b59020f5ade71f4f9c468953521f67e Author: Dave Airlie Date: Mon Nov 5 14:13:19 2007 +1000 drm: add fencing code for memory manager. This adds the generic fence code required to support the memory manager. Fences are user objects and are used to improve synchronization between CPU and GPU. The CPU drops fences into the GPUs command stream, and when the GPU execution has passed that point the fence is signaled. Other objects are attached to fences and they are used to stop objects being interacted with on the CPU with the GPU is busy processing them. Signed-off-by: Thomas Hellstrom Signed-off-by: Dave Airlie commit 31686b1fa123efb5f253453685e05108b178d343 Author: Thomas Hellstrom Date: Mon Nov 5 13:51:07 2007 +1000 drm: add file offset memory manager This "memory" manager instance is to be used to allocate VM space at non-overlapping offsets on the DRM file descriptor. Signed-off-by: Dave Airlie commit 41ccd38c2b79f40c0bad835a0ae2db8a7f5a8f67 Author: Thomas Hellstrom Date: Mon Nov 5 13:45:22 2007 +1000 drm: add user object manager for tracking kernel objects A user object is a structure that helps the drm give out user handles to kernel internal objects and keep track of these objects so they can be destroyed, e.g. when the user space process exits. Signed-off-by: Thomas Hellstrom Signed-off-by: Dave Airlie commit b8c1c287bf4c2fbea9a69bd0f3481d56a6f299c6 Author: Thomas Hellstrom Date: Mon Nov 5 13:21:19 2007 +1000 drm: add basic memory accounting for userspace controlled objects. This adds basic memory accounting in preparation for the new memory manager. Objects that are allocated from userspace should be allocated via the new drm_ctl functions so that the DRM can limit how much RAM users can allocate using the drm interfaces. This code is possibly naive in the heurisitic it uses to arrive at the memory limit. However this shouldn't cause a problem at this stage as fixing this issue is a larger problem and the drm will co-operate with other projects that require the same issues solved. (IBMs SPU support mainly). Signed-off-by: Thomas Hellstrom Signed-off-by: Dave Airlie commit 3da7b481df219f2260d8cd212bc706288dbafe11 Author: Dave Airlie Date: Mon Nov 5 13:07:28 2007 +1000 drm: move two function extern into the correct block commit 8c24f742bbb2f1868c02617c4d0048c2110d0999 Author: Dave Airlie Date: Mon Nov 5 12:50:58 2007 +1000 drm: run cleanfile across drm tree Signed-off-by: Dave Airlie commit 70fc277c4aed5c7879a1071f97ed1898f048fb3c Author: Dave Airlie Date: Mon Nov 5 12:37:41 2007 +1000 drm: some minor cleanups and changes to make memory manager merging easier. Signed-off-by: Dave Airlie drivers/char/drm/Kconfig | 9 +- drivers/char/drm/Makefile | 7 +- drivers/char/drm/README.drm | 1 - drivers/char/drm/drm.h | 289 +++++ drivers/char/drm/drmP.h | 149 ++- drivers/char/drm/drm_agpsupport.c | 164 +++ drivers/char/drm/drm_bo.c | 2525 +++++++++++++++++++++++++++++++++++++ drivers/char/drm/drm_bo_lock.c | 175 +++ drivers/char/drm/drm_bo_move.c | 576 +++++++++ drivers/char/drm/drm_bufs.c | 10 +- drivers/char/drm/drm_context.c | 2 +- drivers/char/drm/drm_drv.c | 62 +- drivers/char/drm/drm_fence.c | 847 +++++++++++++ drivers/char/drm/drm_fops.c | 53 +- drivers/char/drm/drm_hashtab.c | 5 +- drivers/char/drm/drm_hashtab.h | 1 - drivers/char/drm/drm_ioctl.c | 2 +- drivers/char/drm/drm_memory.c | 70 +- drivers/char/drm/drm_mm.c | 13 +- drivers/char/drm/drm_object.c | 293 +++++ drivers/char/drm/drm_objects.h | 608 +++++++++ drivers/char/drm/drm_os_linux.h | 4 +- drivers/char/drm/drm_pciids.h | 1 - drivers/char/drm/drm_proc.c | 94 ++- drivers/char/drm/drm_sarea.h | 2 +- drivers/char/drm/drm_stub.c | 18 +- drivers/char/drm/drm_ttm.c | 418 ++++++ drivers/char/drm/drm_vm.c | 201 +++- drivers/char/drm/i810_dma.c | 6 +- drivers/char/drm/i810_drv.h | 50 +- drivers/char/drm/i830_dma.c | 2 +- drivers/char/drm/i830_drm.h | 8 +- drivers/char/drm/i830_drv.h | 48 +- drivers/char/drm/i830_irq.c | 2 +- drivers/char/drm/i915_buffer.c | 175 +++ drivers/char/drm/i915_dma.c | 439 +++++++- drivers/char/drm/i915_drm.h | 48 + drivers/char/drm/i915_drv.c | 2 +- drivers/char/drm/i915_drv.h | 112 ++- drivers/char/drm/i915_fence.c | 158 +++ drivers/char/drm/i915_irq.c | 72 +- drivers/char/drm/i915_mem.c | 3 +- drivers/char/drm/mga_dma.c | 6 +- drivers/char/drm/mga_drv.h | 112 +- drivers/char/drm/mga_state.c | 10 +- drivers/char/drm/r128_cce.c | 2 +- drivers/char/drm/r128_drv.h | 2 +- drivers/char/drm/r300_cmdbuf.c | 36 +- drivers/char/drm/r300_reg.h | 32 +- drivers/char/drm/radeon_cp.c | 8 +- drivers/char/drm/radeon_drm.h | 12 +- drivers/char/drm/radeon_drv.h | 12 +- drivers/char/drm/savage_state.c | 6 +- drivers/char/drm/sis_mm.c | 2 +- drivers/char/drm/via_dmablit.c | 184 ++-- drivers/char/drm/via_dmablit.h | 84 +- drivers/char/drm/via_drm.h | 4 +- drivers/char/drm/via_drv.c | 2 +- drivers/char/drm/via_map.c | 1 - drivers/char/drm/via_mm.c | 2 +- 60 files changed, 7788 insertions(+), 453 deletions(-) diff --git a/drivers/char/drm/Kconfig b/drivers/char/drm/Kconfig index ba3058d..610d6fd 100644 --- a/drivers/char/drm/Kconfig +++ b/drivers/char/drm/Kconfig @@ -38,7 +38,7 @@ config DRM_RADEON Choose this option if you have an ATI Radeon graphics card. There are both PCI and AGP versions. You don't need to choose this to run the Radeon in plain VGA mode. - + If M is selected, the module will be called radeon. config DRM_I810 @@ -71,9 +71,9 @@ config DRM_I915 852GM, 855GM 865G or 915G integrated graphics. If M is selected, the module will be called i915. AGP support is required for this driver to work. This driver is used by the Intel driver in X.org 6.8 and - XFree86 4.4 and above. If unsure, build this and i830 as modules and + XFree86 4.4 and above. If unsure, build this and i830 as modules and the X server will load the correct one. - + endchoice config DRM_MGA @@ -88,7 +88,7 @@ config DRM_SIS tristate "SiS video cards" depends on DRM && AGP help - Choose this option if you have a SiS 630 or compatible video + Choose this option if you have a SiS 630 or compatible video chipset. If M is selected the module will be called sis. AGP support is required for this driver to work. @@ -105,4 +105,3 @@ config DRM_SAVAGE help Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister chipset. If M is selected the module will be called savage. - diff --git a/drivers/char/drm/Makefile b/drivers/char/drm/Makefile index 6915a05..85c4f9e 100644 --- a/drivers/char/drm/Makefile +++ b/drivers/char/drm/Makefile @@ -6,14 +6,15 @@ drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \ drm_drv.o drm_fops.o drm_ioctl.o drm_irq.o \ drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \ drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \ - drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o + drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o drm_object.o \ + drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o drm_bo_lock.o tdfx-objs := tdfx_drv.o r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o i810-objs := i810_drv.o i810_dma.o i830-objs := i830_drv.o i830_dma.o i830_irq.o -i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o +i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o i915_buffer.o radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o sis-objs := sis_drv.o sis_mm.o savage-objs := savage_drv.o savage_bci.o savage_state.o @@ -38,5 +39,3 @@ obj-$(CONFIG_DRM_I915) += i915.o obj-$(CONFIG_DRM_SIS) += sis.o obj-$(CONFIG_DRM_SAVAGE)+= savage.o obj-$(CONFIG_DRM_VIA) +=via.o - - diff --git a/drivers/char/drm/README.drm b/drivers/char/drm/README.drm index af74cd7..b5b3327 100644 --- a/drivers/char/drm/README.drm +++ b/drivers/char/drm/README.drm @@ -41,4 +41,3 @@ For specific information about kernel-level support, see: A Security Analysis of the Direct Rendering Infrastructure http://dri.sourceforge.net/doc/security_low_level.html - diff --git a/drivers/char/drm/drm.h b/drivers/char/drm/drm.h index 82fb3d0..e63641b 100644 --- a/drivers/char/drm/drm.h +++ b/drivers/char/drm/drm.h @@ -190,6 +190,7 @@ enum drm_map_type { _DRM_AGP = 3, /**< AGP/GART */ _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */ _DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */ + _DRM_TTM = 6 }; /** @@ -572,6 +573,271 @@ struct drm_set_version { int drm_dd_minor; }; +#define DRM_FENCE_FLAG_EMIT 0x00000001 +#define DRM_FENCE_FLAG_SHAREABLE 0x00000002 +#define DRM_FENCE_FLAG_WAIT_LAZY 0x00000004 +#define DRM_FENCE_FLAG_WAIT_IGNORE_SIGNALS 0x00000008 +#define DRM_FENCE_FLAG_NO_USER 0x00000010 + +/* Reserved for driver use */ +#define DRM_FENCE_MASK_DRIVER 0xFF000000 + +#define DRM_FENCE_TYPE_EXE 0x00000001 + +struct drm_fence_arg { + unsigned int handle; + unsigned int fence_class; + unsigned int type; + unsigned int flags; + unsigned int signaled; + unsigned int error; + unsigned int sequence; + unsigned int pad64; + uint64_t expand_pad[2]; /*Future expansion */ +}; + +/* Buffer permissions, referring to how the GPU uses the buffers. + * these translate to fence types used for the buffers. + * Typically a texture buffer is read, A destination buffer is write and + * a command (batch-) buffer is exe. Can be or-ed together. + */ + +#define DRM_BO_FLAG_READ (1ULL << 0) +#define DRM_BO_FLAG_WRITE (1ULL << 1) +#define DRM_BO_FLAG_EXE (1ULL << 2) + +/* + * Status flags. Can be read to determine the actual state of a buffer. + * Can also be set in the buffer mask before validation. + */ + +/* + * Mask: Never evict this buffer. Not even with force. + * This type of buffer is only available to root and must be manually + * removed before buffer manager shutdown or lock. + * Flags: Acknowledge + */ +#define DRM_BO_FLAG_NO_EVICT (1ULL << 4) + +/* + * Mask: Require that the buffer is placed in mappable memory when validated. + * If not set the buffer may or may not be in mappable memory when validated. + * Flags: If set, the buffer is in mappable memory. + */ +#define DRM_BO_FLAG_MAPPABLE (1ULL << 5) + +/* Mask: The buffer should be shareable with other processes. + * Flags: The buffer is shareable with other processes. + */ +#define DRM_BO_FLAG_SHAREABLE (1ULL << 6) + +/* Mask: If set, place the buffer in cache-coherent memory if available. + * If clear, never place the buffer in cache coherent memory if validated. + * Flags: The buffer is currently in cache-coherent memory. + */ +#define DRM_BO_FLAG_CACHED (1ULL << 7) + +/* Mask: Make sure that every time this buffer is validated, + * it ends up on the same location provided that the memory mask + * is the same. + * The buffer will also not be evicted when claiming space for + * other buffers. Basically a pinned buffer but it may be thrown out as + * part of buffer manager shutdown or locking. + * Flags: Acknowledge. + */ +#define DRM_BO_FLAG_NO_MOVE (1ULL << 8) + +/* Mask: Make sure the buffer is in cached memory when mapped + * Flags: Acknowledge. + * Buffers allocated with this flag should not be used for suballocators + * This type may have issues on CPUs with over-aggressive caching + * http://marc.info/?l=linux-kernel&m=102376926732464&w=2 + */ +#define DRM_BO_FLAG_CACHED_MAPPED (1ULL << 19) + + +/* Mask: Force DRM_BO_FLAG_CACHED flag strictly also if it is set. + * Flags: Acknowledge. + */ +#define DRM_BO_FLAG_FORCE_CACHING (1ULL << 13) + +/* + * Mask: Force DRM_BO_FLAG_MAPPABLE flag strictly also if it is clear. + * Flags: Acknowledge. + */ +#define DRM_BO_FLAG_FORCE_MAPPABLE (1ULL << 14) +#define DRM_BO_FLAG_TILE (1ULL << 15) + +/* + * Memory type flags that can be or'ed together in the mask, but only + * one appears in flags. + */ + +/* System memory */ +#define DRM_BO_FLAG_MEM_LOCAL (1ULL << 24) +/* Translation table memory */ +#define DRM_BO_FLAG_MEM_TT (1ULL << 25) +/* Vram memory */ +#define DRM_BO_FLAG_MEM_VRAM (1ULL << 26) +/* Up to the driver to define. */ +#define DRM_BO_FLAG_MEM_PRIV0 (1ULL << 27) +#define DRM_BO_FLAG_MEM_PRIV1 (1ULL << 28) +#define DRM_BO_FLAG_MEM_PRIV2 (1ULL << 29) +#define DRM_BO_FLAG_MEM_PRIV3 (1ULL << 30) +#define DRM_BO_FLAG_MEM_PRIV4 (1ULL << 31) +/* We can add more of these now with a 64-bit flag type */ + +/* Memory flag mask */ +#define DRM_BO_MASK_MEM 0x00000000FF000000ULL +#define DRM_BO_MASK_MEMTYPE 0x00000000FF0800A0ULL + +/* Driver-private flags */ +#define DRM_BO_MASK_DRIVER 0xFFFF000000000000ULL + +/* Don't block on validate and map */ +#define DRM_BO_HINT_DONT_BLOCK 0x00000002 +/* Don't place this buffer on the unfenced list.*/ +#define DRM_BO_HINT_DONT_FENCE 0x00000004 +#define DRM_BO_HINT_WAIT_LAZY 0x00000008 + + +#define DRM_BO_INIT_MAGIC 0xfe769812 +#define DRM_BO_INIT_MAJOR 1 +#define DRM_BO_INIT_MINOR 0 +#define DRM_BO_INIT_PATCH 0 + + +struct drm_bo_info_req { + uint64_t mask; + uint64_t flags; + unsigned int handle; + unsigned int hint; + unsigned int fence_class; + unsigned int desired_tile_stride; + unsigned int tile_info; + unsigned int pad64; +}; + +struct drm_bo_create_req { + uint64_t mask; + uint64_t size; + uint64_t buffer_start; + unsigned int hint; + unsigned int page_alignment; +}; + + +/* + * Reply flags + */ + +#define DRM_BO_REP_BUSY 0x00000001 + +struct drm_bo_info_rep { + uint64_t flags; + uint64_t mask; + uint64_t size; + uint64_t offset; + uint64_t arg_handle; + uint64_t buffer_start; + unsigned int handle; + unsigned int fence_flags; + unsigned int rep_flags; + unsigned int page_alignment; + unsigned int desired_tile_stride; + unsigned int hw_tile_stride; + unsigned int tile_info; + unsigned int pad64; + uint64_t expand_pad[4]; /*Future expansion */ +}; + +struct drm_bo_arg_rep { + struct drm_bo_info_rep bo_info; + int ret; + unsigned int pad64; +}; + +struct drm_bo_create_arg { + union { + struct drm_bo_create_req req; + struct drm_bo_info_rep rep; + } d; +}; + +struct drm_bo_handle_arg { + unsigned int handle; +}; + +struct drm_bo_reference_info_arg { + union { + struct drm_bo_handle_arg req; + struct drm_bo_info_rep rep; + } d; +}; + +struct drm_bo_map_wait_idle_arg { + union { + struct drm_bo_info_req req; + struct drm_bo_info_rep rep; + } d; +}; + +struct drm_bo_op_req { + enum { + drm_bo_validate, + drm_bo_fence, + drm_bo_ref_fence, + } op; + unsigned int arg_handle; + struct drm_bo_info_req bo_req; +}; + + +struct drm_bo_op_arg { + uint64_t next; + union { + struct drm_bo_op_req req; + struct drm_bo_arg_rep rep; + } d; + int handled; + unsigned int pad64; +}; + + +#define DRM_BO_MEM_LOCAL 0 +#define DRM_BO_MEM_TT 1 +#define DRM_BO_MEM_VRAM 2 +#define DRM_BO_MEM_PRIV0 3 +#define DRM_BO_MEM_PRIV1 4 +#define DRM_BO_MEM_PRIV2 5 +#define DRM_BO_MEM_PRIV3 6 +#define DRM_BO_MEM_PRIV4 7 + +#define DRM_BO_MEM_TYPES 8 /* For now. */ + +#define DRM_BO_LOCK_UNLOCK_BM (1 << 0) +#define DRM_BO_LOCK_IGNORE_NO_EVICT (1 << 1) + +struct drm_bo_version_arg { + uint32_t major; + uint32_t minor; + uint32_t patchlevel; +}; + +struct drm_mm_type_arg { + unsigned int mem_type; + unsigned int lock_flags; +}; + +struct drm_mm_init_arg { + unsigned int magic; + unsigned int major; + unsigned int minor; + unsigned int mem_type; + uint64_t p_offset; + uint64_t p_size; +}; + #define DRM_IOCTL_BASE 'd' #define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr) #define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type) @@ -634,6 +900,29 @@ struct drm_set_version { #define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw) +#define DRM_IOCTL_MM_INIT DRM_IOWR(0xc0, struct drm_mm_init_arg) +#define DRM_IOCTL_MM_TAKEDOWN DRM_IOWR(0xc1, struct drm_mm_type_arg) +#define DRM_IOCTL_MM_LOCK DRM_IOWR(0xc2, struct drm_mm_type_arg) +#define DRM_IOCTL_MM_UNLOCK DRM_IOWR(0xc3, struct drm_mm_type_arg) + +#define DRM_IOCTL_FENCE_CREATE DRM_IOWR(0xc4, struct drm_fence_arg) +#define DRM_IOCTL_FENCE_REFERENCE DRM_IOWR(0xc6, struct drm_fence_arg) +#define DRM_IOCTL_FENCE_UNREFERENCE DRM_IOWR(0xc7, struct drm_fence_arg) +#define DRM_IOCTL_FENCE_SIGNALED DRM_IOWR(0xc8, struct drm_fence_arg) +#define DRM_IOCTL_FENCE_FLUSH DRM_IOWR(0xc9, struct drm_fence_arg) +#define DRM_IOCTL_FENCE_WAIT DRM_IOWR(0xca, struct drm_fence_arg) +#define DRM_IOCTL_FENCE_EMIT DRM_IOWR(0xcb, struct drm_fence_arg) +#define DRM_IOCTL_FENCE_BUFFERS DRM_IOWR(0xcc, struct drm_fence_arg) + +#define DRM_IOCTL_BO_CREATE DRM_IOWR(0xcd, struct drm_bo_create_arg) +#define DRM_IOCTL_BO_MAP DRM_IOWR(0xcf, struct drm_bo_map_wait_idle_arg) +#define DRM_IOCTL_BO_UNMAP DRM_IOWR(0xd0, struct drm_bo_handle_arg) +#define DRM_IOCTL_BO_REFERENCE DRM_IOWR(0xd1, struct drm_bo_reference_info_arg) +#define DRM_IOCTL_BO_UNREFERENCE DRM_IOWR(0xd2, struct drm_bo_handle_arg) +#define DRM_IOCTL_BO_SETSTATUS DRM_IOWR(0xd3, struct drm_bo_map_wait_idle_arg) +#define DRM_IOCTL_BO_INFO DRM_IOWR(0xd4, struct drm_bo_reference_info_arg) +#define DRM_IOCTL_BO_WAIT_IDLE DRM_IOWR(0xd5, struct drm_bo_map_wait_idle_arg) +#define DRM_IOCTL_BO_VERSION DRM_IOR(0xd6, struct drm_bo_version_arg) /** * Device specific ioctls should only be in their respective headers * The device specific ioctl range is from 0x40 to 0x99. diff --git a/drivers/char/drm/drmP.h b/drivers/char/drm/drmP.h index dde02a1..6152474 100644 --- a/drivers/char/drm/drmP.h +++ b/drivers/char/drm/drmP.h @@ -55,6 +55,7 @@ #include #include /* For (un)lock_kernel */ #include +#include #include #include #if defined(__alpha__) || defined(__powerpc__) @@ -66,6 +67,7 @@ #ifdef CONFIG_MTRR #include #endif +#include #if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE) #include #include @@ -144,9 +146,22 @@ struct drm_device; #define DRM_MEM_CTXLIST 21 #define DRM_MEM_MM 22 #define DRM_MEM_HASHTAB 23 +#define DRM_MEM_OBJECTS 24 +#define DRM_MEM_FENCE 25 +#define DRM_MEM_TTM 26 +#define DRM_MEM_BUFOBJ 27 #define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8) #define DRM_MAP_HASH_OFFSET 0x10000000 +#define DRM_MAP_HASH_ORDER 12 +#define DRM_OBJECT_HASH_ORDER 12 +#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1) +#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16) +/* + * This should be small enough to allow the use of kmalloc for hash tables + * instead of vmalloc. + */ +#define DRM_FILE_HASH_ORDER 8 /*@}*/ @@ -292,7 +307,6 @@ struct drm_magic_entry { struct list_head head; struct drm_hash_item hash_item; struct drm_file *priv; - struct drm_magic_entry *next; }; struct drm_vma_entry { @@ -375,6 +389,12 @@ struct drm_buf_entry { struct drm_freelist freelist; }; +enum drm_ref_type { + _DRM_REF_USE = 0, + _DRM_REF_TYPE1, + _DRM_NO_REF_TYPES +}; + /** File private data */ struct drm_file { int authenticated; @@ -388,8 +408,16 @@ struct drm_file { struct drm_head *head; int remove_auth_on_close; unsigned long lock_count; - void *driver_priv; + /* + * The user object hash table is global and resides in the + * drm_device structure. We protect the lists and hash tables with the + * device struct_mutex. A bit coarse-grained but probably the best + * option. + */ + struct list_head refd_objects; + struct drm_open_hash refd_object_hash[_DRM_NO_REF_TYPES]; struct file *filp; + void *driver_priv; }; /** Wait queue */ @@ -401,11 +429,9 @@ struct drm_queue { wait_queue_head_t read_queue; /**< Processes waiting on block_read */ atomic_t block_write; /**< Queue blocked for writes */ wait_queue_head_t write_queue; /**< Processes waiting on block_write */ -#if 1 atomic_t total_queued; /**< Total queued statistic */ atomic_t total_flushed; /**< Total flushes statistic */ atomic_t total_locks; /**< Total locks statistics */ -#endif enum drm_ctx_flags flags; /**< Context preserving and 2D-only */ struct drm_waitlist waitlist; /**< Pending buffers */ wait_queue_head_t flush_queue; /**< Processes waiting until flush */ @@ -416,7 +442,8 @@ struct drm_queue { */ struct drm_lock_data { struct drm_hw_lock *hw_lock; /**< Hardware lock */ - struct drm_file *file_priv; /**< File descr of lock holder (0=kernel) */ + /** Private of lock holder's file (NULL=kernel) */ + struct drm_file *file_priv; wait_queue_head_t lock_queue; /**< Queue of blocked processes */ unsigned long lock_time; /**< Time of last lock in jiffies */ spinlock_t spinlock; @@ -491,6 +518,27 @@ struct drm_sigdata { struct drm_hw_lock *lock; }; + +/* + * Generic memory manager structs + */ + +struct drm_mm_node { + struct list_head fl_entry; + struct list_head ml_entry; + int free; + unsigned long start; + unsigned long size; + struct drm_mm *mm; + void *private; +}; + +struct drm_mm { + struct list_head fl_entry; + struct list_head ml_entry; +}; + + /** * Mappings list */ @@ -498,7 +546,8 @@ struct drm_map_list { struct list_head head; /**< list head */ struct drm_hash_item hash; struct drm_map *map; /**< mapping */ - unsigned int user_token; + uint64_t user_token; + struct drm_mm_node *file_offset_node; }; typedef struct drm_map drm_local_map_t; @@ -536,23 +585,7 @@ struct drm_ati_pcigart_info { int table_size; }; -/* - * Generic memory manager structs - */ -struct drm_mm_node { - struct list_head fl_entry; - struct list_head ml_entry; - int free; - unsigned long start; - unsigned long size; - struct drm_mm *mm; - void *private; -}; - -struct drm_mm { - struct list_head fl_entry; - struct list_head ml_entry; -}; +#include "drm_objects.h" /** * DRM driver structure. This structure represent the common code for @@ -609,6 +642,9 @@ struct drm_driver { void (*set_version) (struct drm_device *dev, struct drm_set_version *sv); + struct drm_fence_driver *fence_driver; + struct drm_bo_driver *bo_driver; + int major; int minor; int patchlevel; @@ -683,6 +719,10 @@ struct drm_device { struct list_head maplist; /**< Linked list of regions */ int map_count; /**< Number of mappable regions */ struct drm_open_hash map_hash; /**< User token hash table for maps */ + struct drm_mm offset_manager; /**< User token manager */ + struct drm_open_hash object_hash; /**< User token hash table for objects */ + struct address_space *dev_mapping; /**< For unmap_mapping_range() */ + struct page *ttm_dummy_page; /** \name Context handle management */ /*@{ */ @@ -750,7 +790,6 @@ struct drm_device { struct pci_controller *hose; #endif struct drm_sg_mem *sg; /**< Scatter gather memory */ - unsigned long *ctx_bitmap; /**< context bitmap */ void *dev_private; /**< device private data */ struct drm_sigdata sigdata; /**< For block_all_signals */ sigset_t sigmask; @@ -760,6 +799,9 @@ struct drm_device { unsigned int agp_buffer_token; struct drm_head primary; /**< primary screen head */ + struct drm_fence_manager fm; + struct drm_buffer_manager bm; + /** \name Drawable information */ /*@{ */ spinlock_t drw_lock; @@ -767,6 +809,15 @@ struct drm_device { /*@} */ }; +#if __OS_HAS_AGP +struct drm_agp_ttm_backend { + struct drm_ttm_backend backend; + DRM_AGP_MEM *mem; + struct agp_bridge_data *bridge; + int populated; +}; +#endif + static __inline__ int drm_core_check_feature(struct drm_device *dev, int feature) { @@ -847,6 +898,8 @@ extern int drm_release(struct inode *inode, struct file *filp); /* Mapping support (drm_vm.h) */ extern int drm_mmap(struct file *filp, struct vm_area_struct *vma); +extern unsigned long drm_core_get_map_ofs(struct drm_map * map); +extern unsigned long drm_core_get_reg_ofs(struct drm_device *dev); extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); /* Memory management support (drm_memory.h) */ @@ -861,6 +914,15 @@ extern int drm_free_agp(DRM_AGP_MEM * handle, int pages); extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start); extern int drm_unbind_agp(DRM_AGP_MEM * handle); +extern void drm_free_memctl(size_t size); +extern int drm_alloc_memctl(size_t size); +extern void drm_query_memctl(uint64_t *cur_used, + uint64_t *low_threshold, + uint64_t *high_threshold); +extern void drm_init_memctl(size_t low_threshold, + size_t high_threshold, + size_t unit_size); + /* Misc. IOCTL support (drm_ioctl.h) */ extern int drm_irq_by_busid(struct drm_device *dev, void *data, struct drm_file *file_priv); @@ -1018,6 +1080,7 @@ extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, size extern int drm_agp_free_memory(DRM_AGP_MEM * handle); extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start); extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle); +extern struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev); /* Stub support (drm_stub.h) */ extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, @@ -1073,7 +1136,7 @@ extern void drm_sysfs_device_remove(struct class_device *class_dev); extern struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent, unsigned long size, unsigned alignment); -void drm_mm_put_block(struct drm_mm_node * cur); +extern void drm_mm_put_block(struct drm_mm_node * cur); extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, unsigned long size, unsigned alignment, int best_match); extern int drm_mm_init(struct drm_mm *mm, unsigned long start, unsigned long size); @@ -1142,10 +1205,40 @@ extern void drm_free(void *pt, size_t size, int area); extern void *drm_calloc(size_t nmemb, size_t size, int area); #endif -/*@}*/ +/* + * Accounting variants of standard calls. + */ -extern unsigned long drm_core_get_map_ofs(struct drm_map * map); -extern unsigned long drm_core_get_reg_ofs(struct drm_device *dev); +static inline void *drm_ctl_alloc(size_t size, int area) +{ + void *ret; + if (drm_alloc_memctl(size)) + return NULL; + ret = drm_alloc(size, area); + if (!ret) + drm_free_memctl(size); + return ret; +} + +static inline void *drm_ctl_calloc(size_t nmemb, size_t size, int area) +{ + void *ret; + + if (drm_alloc_memctl(nmemb*size)) + return NULL; + ret = drm_calloc(nmemb, size, area); + if (!ret) + drm_free_memctl(nmemb*size); + return ret; +} + +static inline void drm_ctl_free(void *pt, size_t size, int area) +{ + drm_free(pt, size, area); + drm_free_memctl(size); +} + +/*@}*/ #endif /* __KERNEL__ */ #endif diff --git a/drivers/char/drm/drm_agpsupport.c b/drivers/char/drm/drm_agpsupport.c index 214f4fb..cb690ae 100644 --- a/drivers/char/drm/drm_agpsupport.c +++ b/drivers/char/drm/drm_agpsupport.c @@ -453,4 +453,168 @@ int drm_agp_unbind_memory(DRM_AGP_MEM * handle) return agp_unbind_memory(handle); } + +/* + * AGP ttm backend interface. + */ + +#ifndef AGP_USER_TYPES +#define AGP_USER_TYPES (1 << 16) +#define AGP_USER_MEMORY (AGP_USER_TYPES) +#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1) +#endif +#define AGP_REQUIRED_MAJOR 0 +#define AGP_REQUIRED_MINOR 102 + +static int drm_agp_needs_unbind_cache_adjust(struct drm_ttm_backend *backend) { + return ((backend->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1); +} + + +static int drm_agp_populate(struct drm_ttm_backend *backend, unsigned long num_pages, + struct page **pages) { + + struct drm_agp_ttm_backend *agp_be = + container_of(backend, struct drm_agp_ttm_backend, backend); + struct page **cur_page, **last_page = pages + num_pages; + DRM_AGP_MEM *mem; + + if (drm_alloc_memctl(num_pages * sizeof(void *))) + return -1; + + DRM_DEBUG("drm_agp_populate_ttm\n"); + mem = drm_agp_allocate_memory(agp_be->bridge, num_pages, AGP_USER_MEMORY); + if (!mem) { + drm_free_memctl(num_pages *sizeof(void *)); + return -1; + } + + DRM_DEBUG("Current page count is %ld\n", (long) mem->page_count); + mem->page_count = 0; + for (cur_page = pages; cur_page < last_page; ++cur_page) { + mem->memory[mem->page_count++] = phys_to_gart(page_to_phys(*cur_page)); + } + agp_be->mem = mem; + return 0; +} + +static int drm_agp_bind_ttm(struct drm_ttm_backend *backend, + struct drm_bo_mem_reg *bo_mem) +{ + struct drm_agp_ttm_backend *agp_be = + container_of(backend, struct drm_agp_ttm_backend, backend); + DRM_AGP_MEM *mem = agp_be->mem; + int ret; + int snooped = (bo_mem->flags & DRM_BO_FLAG_CACHED) && !(bo_mem->flags & DRM_BO_FLAG_CACHED_MAPPED); + + DRM_DEBUG("drm_agp_bind_ttm\n"); + mem->is_flushed = TRUE; + mem->type = AGP_USER_MEMORY; + /* CACHED MAPPED implies not snooped memory */ + if (snooped) + mem->type = AGP_USER_CACHED_MEMORY; + + ret = drm_agp_bind_memory(mem, bo_mem->mm_node->start); + if (ret) { + DRM_ERROR("AGP Bind memory failed\n"); + } + DRM_FLAG_MASKED(backend->flags, (bo_mem->flags & DRM_BO_FLAG_CACHED) ? + DRM_BE_FLAG_BOUND_CACHED : 0, + DRM_BE_FLAG_BOUND_CACHED); + return ret; +} + +static int drm_agp_unbind_ttm(struct drm_ttm_backend *backend) { + + struct drm_agp_ttm_backend *agp_be = + container_of(backend, struct drm_agp_ttm_backend, backend); + + DRM_DEBUG("drm_agp_unbind_ttm\n"); + if (agp_be->mem->is_bound) + return drm_agp_unbind_memory(agp_be->mem); + else + return 0; +} + +static void drm_agp_clear_ttm(struct drm_ttm_backend *backend) { + + struct drm_agp_ttm_backend *agp_be = + container_of(backend, struct drm_agp_ttm_backend, backend); + DRM_AGP_MEM *mem = agp_be->mem; + + DRM_DEBUG("drm_agp_clear_ttm\n"); + if (mem) { + unsigned long num_pages = mem->page_count; + backend->func->unbind(backend); + agp_free_memory(mem); + drm_free_memctl(num_pages *sizeof(void *)); + } + agp_be->mem = NULL; +} + +static void drm_agp_destroy_ttm(struct drm_ttm_backend *backend) { + + struct drm_agp_ttm_backend *agp_be; + + if (backend) { + DRM_DEBUG("drm_agp_destroy_ttm\n"); + agp_be = container_of(backend, struct drm_agp_ttm_backend, backend); + if (agp_be) { + if (agp_be->mem) { + backend->func->clear(backend); + } + drm_ctl_free(agp_be, sizeof(*agp_be), DRM_MEM_TTM); + } + } +} + +static struct drm_ttm_backend_func agp_ttm_backend = +{ + .needs_ub_cache_adjust = drm_agp_needs_unbind_cache_adjust, + .populate = drm_agp_populate, + .clear = drm_agp_clear_ttm, + .bind = drm_agp_bind_ttm, + .unbind = drm_agp_unbind_ttm, + .destroy = drm_agp_destroy_ttm, +}; + +struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev) +{ + + struct drm_agp_ttm_backend *agp_be; + struct agp_kern_info *info; + + if (!dev->agp) { + DRM_ERROR("AGP is not initialized.\n"); + return NULL; + } + info = &dev->agp->agp_info; + + if (info->version.major != AGP_REQUIRED_MAJOR || + info->version.minor < AGP_REQUIRED_MINOR) { + DRM_ERROR("Wrong agpgart version %d.%d\n" + "\tYou need at least version %d.%d.\n", + info->version.major, + info->version.minor, + AGP_REQUIRED_MAJOR, + AGP_REQUIRED_MINOR); + return NULL; + } + + + agp_be = drm_ctl_calloc(1, sizeof(*agp_be), DRM_MEM_TTM); + if (!agp_be) + return NULL; + + agp_be->mem = NULL; + + agp_be->bridge = dev->agp->bridge; + agp_be->populated = FALSE; + agp_be->backend.func = &agp_ttm_backend; + agp_be->backend.dev = dev; + + return &agp_be->backend; +} +EXPORT_SYMBOL(drm_agp_init_ttm); + #endif /* __OS_HAS_AGP */ diff --git a/drivers/char/drm/drm_bo.c b/drivers/char/drm/drm_bo.c new file mode 100644 index 0000000..a2cd6c1 --- /dev/null +++ b/drivers/char/drm/drm_bo.c @@ -0,0 +1,2525 @@ +/************************************************************************** + * + * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ + +#include "drmP.h" + +/* + * Locking may look a bit complicated but isn't really: + * + * The buffer usage atomic_t needs to be protected by dev->struct_mutex + * when there is a chance that it can be zero before or after the operation. + * + * dev->struct_mutex also protects all lists and list heads, + * Hash tables and hash heads. + * + * bo->mutex protects the buffer object itself excluding the usage field. + * bo->mutex does also protect the buffer list heads, so to manipulate those, + * we need both the bo->mutex and the dev->struct_mutex. + * + * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal + * is a bit complicated. When dev->struct_mutex is released to grab bo->mutex, + * the list traversal will, in general, need to be restarted. + * + */ + +static void drm_bo_destroy_locked(struct drm_buffer_object *bo); +static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo); +static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo); +static void drm_bo_unmap_virtual(struct drm_buffer_object *bo); + +static inline uint64_t drm_bo_type_flags(unsigned type) +{ + return (1ULL << (24 + type)); +} + +/* + * bo locked. dev->struct_mutex locked. + */ + +void drm_bo_add_to_pinned_lru(struct drm_buffer_object *bo) +{ + struct drm_mem_type_manager *man; + + DRM_ASSERT_LOCKED(&bo->dev->struct_mutex); + DRM_ASSERT_LOCKED(&bo->mutex); + + man = &bo->dev->bm.man[bo->pinned_mem_type]; + list_add_tail(&bo->pinned_lru, &man->pinned); +} + +void drm_bo_add_to_lru(struct drm_buffer_object *bo) +{ + struct drm_mem_type_manager *man; + + DRM_ASSERT_LOCKED(&bo->dev->struct_mutex); + + if (!(bo->mem.mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT)) + || bo->mem.mem_type != bo->pinned_mem_type) { + man = &bo->dev->bm.man[bo->mem.mem_type]; + list_add_tail(&bo->lru, &man->lru); + } else { + INIT_LIST_HEAD(&bo->lru); + } +} + +static int drm_bo_vm_pre_move(struct drm_buffer_object *bo, int old_is_pci) +{ + if (!bo->map_list.map) + return 0; + + drm_bo_unmap_virtual(bo); + return 0; +} + +static void drm_bo_vm_post_move(struct drm_buffer_object *bo) +{ +} + +/* + * Call bo->mutex locked. + */ + +static int drm_bo_add_ttm(struct drm_buffer_object *bo) +{ + struct drm_device *dev = bo->dev; + int ret = 0; + + DRM_ASSERT_LOCKED(&bo->mutex); + bo->ttm = NULL; + + switch (bo->type) { + case drm_bo_type_dc: + case drm_bo_type_kernel: + bo->ttm = drm_ttm_init(dev, bo->num_pages << PAGE_SHIFT); + if (!bo->ttm) + ret = -ENOMEM; + break; + case drm_bo_type_user: + bo->ttm = drm_ttm_init(dev, bo->num_pages << PAGE_SHIFT); + if (!bo->ttm) + ret = -ENOMEM; + + ret = drm_ttm_set_user(bo->ttm, current, + bo->mem.mask & DRM_BO_FLAG_WRITE, + bo->buffer_start, + bo->num_pages, + dev->bm.dummy_read_page); + if (ret) + return ret; + + break; + default: + DRM_ERROR("Illegal buffer object type\n"); + ret = -EINVAL; + break; + } + + return ret; +} + +static int drm_bo_handle_move_mem(struct drm_buffer_object *bo, + struct drm_bo_mem_reg *mem, + int evict, int no_wait) +{ + struct drm_device *dev = bo->dev; + struct drm_buffer_manager *bm = &dev->bm; + int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem); + int new_is_pci = drm_mem_reg_is_pci(dev, mem); + struct drm_mem_type_manager *old_man = &bm->man[bo->mem.mem_type]; + struct drm_mem_type_manager *new_man = &bm->man[mem->mem_type]; + int ret = 0; + + if (old_is_pci || new_is_pci || + ((mem->flags ^ bo->mem.flags) & DRM_BO_FLAG_CACHED)) + ret = drm_bo_vm_pre_move(bo, old_is_pci); + if (ret) + return ret; + + /* + * Create and bind a ttm if required. + */ + + if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) { + ret = drm_bo_add_ttm(bo); + if (ret) + goto out_err; + + if (mem->mem_type != DRM_BO_MEM_LOCAL) { + ret = drm_bind_ttm(bo->ttm, mem); + if (ret) + goto out_err; + } + } + + if ((bo->mem.mem_type == DRM_BO_MEM_LOCAL) && bo->ttm == NULL) { + + struct drm_bo_mem_reg *old_mem = &bo->mem; + uint64_t save_flags = old_mem->flags; + uint64_t save_mask = old_mem->mask; + + *old_mem = *mem; + mem->mm_node = NULL; + old_mem->mask = save_mask; + DRM_FLAG_MASKED(save_flags, mem->flags, DRM_BO_MASK_MEMTYPE); + + } else if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && + !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) { + + ret = drm_bo_move_ttm(bo, evict, no_wait, mem); + + } else if (dev->driver->bo_driver->move) { + ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem); + + } else { + + ret = drm_bo_move_memcpy(bo, evict, no_wait, mem); + + } + + if (ret) + goto out_err; + + if (old_is_pci || new_is_pci) + drm_bo_vm_post_move(bo); + + if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) { + ret = + dev->driver->bo_driver->invalidate_caches(dev, + bo->mem.flags); + if (ret) + DRM_ERROR("Can not flush read caches\n"); + } + + DRM_FLAG_MASKED(bo->priv_flags, + (evict) ? _DRM_BO_FLAG_EVICTED : 0, + _DRM_BO_FLAG_EVICTED); + + if (bo->mem.mm_node) + bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) + + bm->man[bo->mem.mem_type].gpu_offset; + + + return 0; + +out_err: + if (old_is_pci || new_is_pci) + drm_bo_vm_post_move(bo); + + new_man = &bm->man[bo->mem.mem_type]; + if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) { + drm_ttm_unbind(bo->ttm); + drm_destroy_ttm(bo->ttm); + bo->ttm = NULL; + } + + return ret; +} + +/* + * Call bo->mutex locked. + * Wait until the buffer is idle. + */ + +int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int ignore_signals, + int no_wait) +{ + int ret; + + DRM_ASSERT_LOCKED(&bo->mutex); + + if (bo->fence) { + if (drm_fence_object_signaled(bo->fence, bo->fence_type, 0)) { + drm_fence_usage_deref_unlocked(&bo->fence); + return 0; + } + if (no_wait) + return -EBUSY; + + ret = drm_fence_object_wait(bo->fence, lazy, ignore_signals, + bo->fence_type); + if (ret) + return ret; + + drm_fence_usage_deref_unlocked(&bo->fence); + } + return 0; +} +EXPORT_SYMBOL(drm_bo_wait); + +static int drm_bo_expire_fence(struct drm_buffer_object *bo, int allow_errors) +{ + struct drm_device *dev = bo->dev; + struct drm_buffer_manager *bm = &dev->bm; + + if (bo->fence) { + if (bm->nice_mode) { + unsigned long _end = jiffies + 3 * DRM_HZ; + int ret; + do { + ret = drm_bo_wait(bo, 0, 1, 0); + if (ret && allow_errors) + return ret; + + } while (ret && !time_after_eq(jiffies, _end)); + + if (bo->fence) { + bm->nice_mode = 0; + DRM_ERROR("Detected GPU lockup or " + "fence driver was taken down. " + "Evicting buffer.\n"); + } + } + if (bo->fence) + drm_fence_usage_deref_unlocked(&bo->fence); + } + return 0; +} + +/* + * Call dev->struct_mutex locked. + * Attempts to remove all private references to a buffer by expiring its + * fence object and removing from lru lists and memory managers. + */ + +static void drm_bo_cleanup_refs(struct drm_buffer_object *bo, int remove_all) +{ + struct drm_device *dev = bo->dev; + struct drm_buffer_manager *bm = &dev->bm; + + DRM_ASSERT_LOCKED(&dev->struct_mutex); + + atomic_inc(&bo->usage); + mutex_unlock(&dev->struct_mutex); + mutex_lock(&bo->mutex); + + DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); + + if (bo->fence && drm_fence_object_signaled(bo->fence, + bo->fence_type, 0)) + drm_fence_usage_deref_unlocked(&bo->fence); + + if (bo->fence && remove_all) + (void)drm_bo_expire_fence(bo, 0); + + mutex_lock(&dev->struct_mutex); + + if (!atomic_dec_and_test(&bo->usage)) + goto out; + + if (!bo->fence) { + list_del_init(&bo->lru); + if (bo->mem.mm_node) { + drm_mm_put_block(bo->mem.mm_node); + if (bo->pinned_node == bo->mem.mm_node) + bo->pinned_node = NULL; + bo->mem.mm_node = NULL; + } + list_del_init(&bo->pinned_lru); + if (bo->pinned_node) { + drm_mm_put_block(bo->pinned_node); + bo->pinned_node = NULL; + } + list_del_init(&bo->ddestroy); + mutex_unlock(&bo->mutex); + drm_bo_destroy_locked(bo); + return; + } + + if (list_empty(&bo->ddestroy)) { + drm_fence_object_flush(bo->fence, bo->fence_type); + list_add_tail(&bo->ddestroy, &bm->ddestroy); + schedule_delayed_work(&bm->wq, + ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100); + } + +out: + mutex_unlock(&bo->mutex); + return; +} + +/* + * Verify that refcount is 0 and that there are no internal references + * to the buffer object. Then destroy it. + */ + +static void drm_bo_destroy_locked(struct drm_buffer_object *bo) +{ + struct drm_device *dev = bo->dev; + struct drm_buffer_manager *bm = &dev->bm; + + DRM_ASSERT_LOCKED(&dev->struct_mutex); + + if (list_empty(&bo->lru) && bo->mem.mm_node == NULL && + list_empty(&bo->pinned_lru) && bo->pinned_node == NULL && + list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) { + if (bo->fence != NULL) { + DRM_ERROR("Fence was non-zero.\n"); + drm_bo_cleanup_refs(bo, 0); + return; + } + + + if (bo->ttm) { + drm_ttm_unbind(bo->ttm); + drm_destroy_ttm(bo->ttm); + bo->ttm = NULL; + } + + atomic_dec(&bm->count); + + drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ); + + return; + } + + /* + * Some stuff is still trying to reference the buffer object. + * Get rid of those references. + */ + + drm_bo_cleanup_refs(bo, 0); + + return; +} + +/* + * Call dev->struct_mutex locked. + */ + +static void drm_bo_delayed_delete(struct drm_device *dev, int remove_all) +{ + struct drm_buffer_manager *bm = &dev->bm; + + struct drm_buffer_object *entry, *nentry; + struct list_head *list, *next; + + list_for_each_safe(list, next, &bm->ddestroy) { + entry = list_entry(list, struct drm_buffer_object, ddestroy); + + nentry = NULL; + if (next != &bm->ddestroy) { + nentry = list_entry(next, struct drm_buffer_object, + ddestroy); + atomic_inc(&nentry->usage); + } + + drm_bo_cleanup_refs(entry, remove_all); + + if (nentry) + atomic_dec(&nentry->usage); + } +} + +static void drm_bo_delayed_workqueue(struct work_struct *work) +{ + struct drm_buffer_manager *bm = + container_of(work, struct drm_buffer_manager, wq.work); + struct drm_device *dev = container_of(bm, struct drm_device, bm); + + DRM_DEBUG("Delayed delete Worker\n"); + + mutex_lock(&dev->struct_mutex); + if (!bm->initialized) { + mutex_unlock(&dev->struct_mutex); + return; + } + drm_bo_delayed_delete(dev, 0); + if (bm->initialized && !list_empty(&bm->ddestroy)) { + schedule_delayed_work(&bm->wq, + ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100); + } + mutex_unlock(&dev->struct_mutex); +} + +void drm_bo_usage_deref_locked(struct drm_buffer_object **bo) +{ + struct drm_buffer_object *tmp_bo = *bo; + bo = NULL; + + DRM_ASSERT_LOCKED(&tmp_bo->dev->struct_mutex); + + if (atomic_dec_and_test(&tmp_bo->usage)) + drm_bo_destroy_locked(tmp_bo); +} +EXPORT_SYMBOL(drm_bo_usage_deref_locked); + +static void drm_bo_base_deref_locked(struct drm_file *file_priv, + struct drm_user_object *uo) +{ + struct drm_buffer_object *bo = + drm_user_object_entry(uo, struct drm_buffer_object, base); + + DRM_ASSERT_LOCKED(&bo->dev->struct_mutex); + + drm_bo_takedown_vm_locked(bo); + drm_bo_usage_deref_locked(&bo); +} + +void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo) +{ + struct drm_buffer_object *tmp_bo = *bo; + struct drm_device *dev = tmp_bo->dev; + + *bo = NULL; + if (atomic_dec_and_test(&tmp_bo->usage)) { + mutex_lock(&dev->struct_mutex); + if (atomic_read(&tmp_bo->usage) == 0) + drm_bo_destroy_locked(tmp_bo); + mutex_unlock(&dev->struct_mutex); + } +} +EXPORT_SYMBOL(drm_bo_usage_deref_unlocked); + +void drm_putback_buffer_objects(struct drm_device *dev) +{ + struct drm_buffer_manager *bm = &dev->bm; + struct list_head *list = &bm->unfenced; + struct drm_buffer_object *entry, *next; + + mutex_lock(&dev->struct_mutex); + list_for_each_entry_safe(entry, next, list, lru) { + atomic_inc(&entry->usage); + mutex_unlock(&dev->struct_mutex); + + mutex_lock(&entry->mutex); + BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED)); + mutex_lock(&dev->struct_mutex); + + list_del_init(&entry->lru); + DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); + DRM_WAKEUP(&entry->event_queue); + + /* + * FIXME: Might want to put back on head of list + * instead of tail here. + */ + + drm_bo_add_to_lru(entry); + mutex_unlock(&entry->mutex); + drm_bo_usage_deref_locked(&entry); + } + mutex_unlock(&dev->struct_mutex); +} +EXPORT_SYMBOL(drm_putback_buffer_objects); + + +/* + * Note. The caller has to register (if applicable) + * and deregister fence object usage. + */ + +int drm_fence_buffer_objects(struct drm_device *dev, + struct list_head *list, + uint32_t fence_flags, + struct drm_fence_object *fence, + struct drm_fence_object **used_fence) +{ + struct drm_buffer_manager *bm = &dev->bm; + struct drm_buffer_object *entry; + uint32_t fence_type = 0; + uint32_t fence_class = ~0; + int count = 0; + int ret = 0; + struct list_head *l; + + mutex_lock(&dev->struct_mutex); + + if (!list) + list = &bm->unfenced; + + if (fence) + fence_class = fence->fence_class; + + list_for_each_entry(entry, list, lru) { + BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED)); + fence_type |= entry->new_fence_type; + if (fence_class == ~0) + fence_class = entry->new_fence_class; + else if (entry->new_fence_class != fence_class) { + DRM_ERROR("Unmatching fence classes on unfenced list: " + "%d and %d.\n", + fence_class, + entry->new_fence_class); + ret = -EINVAL; + goto out; + } + count++; + } + + if (!count) { + ret = -EINVAL; + goto out; + } + + if (fence) { + if ((fence_type & fence->type) != fence_type || + (fence->fence_class != fence_class)) { + DRM_ERROR("Given fence doesn't match buffers " + "on unfenced list.\n"); + ret = -EINVAL; + goto out; + } + } else { + mutex_unlock(&dev->struct_mutex); + ret = drm_fence_object_create(dev, fence_class, fence_type, + fence_flags | DRM_FENCE_FLAG_EMIT, + &fence); + mutex_lock(&dev->struct_mutex); + if (ret) + goto out; + } + + count = 0; + l = list->next; + while (l != list) { + prefetch(l->next); + entry = list_entry(l, struct drm_buffer_object, lru); + atomic_inc(&entry->usage); + mutex_unlock(&dev->struct_mutex); + mutex_lock(&entry->mutex); + mutex_lock(&dev->struct_mutex); + list_del_init(l); + if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) { + count++; + if (entry->fence) + drm_fence_usage_deref_locked(&entry->fence); + entry->fence = drm_fence_reference_locked(fence); + entry->fence_class = entry->new_fence_class; + entry->fence_type = entry->new_fence_type; + DRM_FLAG_MASKED(entry->priv_flags, 0, + _DRM_BO_FLAG_UNFENCED); + DRM_WAKEUP(&entry->event_queue); + drm_bo_add_to_lru(entry); + } + mutex_unlock(&entry->mutex); + drm_bo_usage_deref_locked(&entry); + l = list->next; + } + DRM_DEBUG("Fenced %d buffers\n", count); +out: + mutex_unlock(&dev->struct_mutex); + *used_fence = fence; + return ret; +} +EXPORT_SYMBOL(drm_fence_buffer_objects); + +/* + * bo->mutex locked + */ + +static int drm_bo_evict(struct drm_buffer_object *bo, unsigned mem_type, + int no_wait) +{ + int ret = 0; + struct drm_device *dev = bo->dev; + struct drm_bo_mem_reg evict_mem; + + /* + * Someone might have modified the buffer before we took the + * buffer mutex. + */ + + if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) + goto out; + if (bo->mem.mem_type != mem_type) + goto out; + + ret = drm_bo_wait(bo, 0, 0, no_wait); + + if (ret && ret != -EAGAIN) { + DRM_ERROR("Failed to expire fence before " + "buffer eviction.\n"); + goto out; + } + + evict_mem = bo->mem; + evict_mem.mm_node = NULL; + + evict_mem = bo->mem; + evict_mem.mask = dev->driver->bo_driver->evict_mask(bo); + ret = drm_bo_mem_space(bo, &evict_mem, no_wait); + + if (ret) { + if (ret != -EAGAIN) + DRM_ERROR("Failed to find memory space for " + "buffer 0x%p eviction.\n", bo); + goto out; + } + + ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait); + + if (ret) { + if (ret != -EAGAIN) + DRM_ERROR("Buffer eviction failed\n"); + goto out; + } + + mutex_lock(&dev->struct_mutex); + if (evict_mem.mm_node) { + if (evict_mem.mm_node != bo->pinned_node) + drm_mm_put_block(evict_mem.mm_node); + evict_mem.mm_node = NULL; + } + list_del(&bo->lru); + drm_bo_add_to_lru(bo); + mutex_unlock(&dev->struct_mutex); + + DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED, + _DRM_BO_FLAG_EVICTED); + +out: + return ret; +} + +/** + * Repeatedly evict memory from the LRU for @mem_type until we create enough + * space, or we've evicted everything and there isn't enough space. + */ +static int drm_bo_mem_force_space(struct drm_device *dev, + struct drm_bo_mem_reg *mem, + uint32_t mem_type, int no_wait) +{ + struct drm_mm_node *node; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_buffer_object *entry; + struct drm_mem_type_manager *man = &bm->man[mem_type]; + struct list_head *lru; + unsigned long num_pages = mem->num_pages; + int ret; + + mutex_lock(&dev->struct_mutex); + do { + node = drm_mm_search_free(&man->manager, num_pages, + mem->page_alignment, 1); + if (node) + break; + + lru = &man->lru; + if (lru->next == lru) + break; + + entry = list_entry(lru->next, struct drm_buffer_object, lru); + atomic_inc(&entry->usage); + mutex_unlock(&dev->struct_mutex); + mutex_lock(&entry->mutex); + BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT)); + + ret = drm_bo_evict(entry, mem_type, no_wait); + mutex_unlock(&entry->mutex); + drm_bo_usage_deref_unlocked(&entry); + if (ret) + return ret; + mutex_lock(&dev->struct_mutex); + } while (1); + + if (!node) { + mutex_unlock(&dev->struct_mutex); + return -ENOMEM; + } + + node = drm_mm_get_block(node, num_pages, mem->page_alignment); + mutex_unlock(&dev->struct_mutex); + mem->mm_node = node; + mem->mem_type = mem_type; + return 0; +} + +static int drm_bo_mt_compatible(struct drm_mem_type_manager *man, + int disallow_fixed, + uint32_t mem_type, + uint64_t mask, uint32_t *res_mask) +{ + uint64_t cur_flags = drm_bo_type_flags(mem_type); + uint64_t flag_diff; + + if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && disallow_fixed) + return 0; + if (man->flags & _DRM_FLAG_MEMTYPE_CACHED) + cur_flags |= DRM_BO_FLAG_CACHED; + if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE) + cur_flags |= DRM_BO_FLAG_MAPPABLE; + if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT) + DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED); + + if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0) + return 0; + + if (mem_type == DRM_BO_MEM_LOCAL) { + *res_mask = cur_flags; + return 1; + } + + flag_diff = (mask ^ cur_flags); + if (flag_diff & DRM_BO_FLAG_CACHED_MAPPED) + cur_flags |= DRM_BO_FLAG_CACHED_MAPPED; + + if ((flag_diff & DRM_BO_FLAG_CACHED) && + (!(mask & DRM_BO_FLAG_CACHED) || + (mask & DRM_BO_FLAG_FORCE_CACHING))) + return 0; + + if ((flag_diff & DRM_BO_FLAG_MAPPABLE) && + ((mask & DRM_BO_FLAG_MAPPABLE) || + (mask & DRM_BO_FLAG_FORCE_MAPPABLE))) + return 0; + + *res_mask = cur_flags; + return 1; +} + +/** + * Creates space for memory region @mem according to its type. + * + * This function first searches for free space in compatible memory types in + * the priority order defined by the driver. If free space isn't found, then + * drm_bo_mem_force_space is attempted in priority order to evict and find + * space. + */ +int drm_bo_mem_space(struct drm_buffer_object *bo, + struct drm_bo_mem_reg *mem, int no_wait) +{ + struct drm_device *dev = bo->dev; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_mem_type_manager *man; + + uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio; + const uint32_t *prios = dev->driver->bo_driver->mem_type_prio; + uint32_t i; + uint32_t mem_type = DRM_BO_MEM_LOCAL; + uint32_t cur_flags; + int type_found = 0; + int type_ok = 0; + int has_eagain = 0; + struct drm_mm_node *node = NULL; + int ret; + + mem->mm_node = NULL; + for (i = 0; i < num_prios; ++i) { + mem_type = prios[i]; + man = &bm->man[mem_type]; + + type_ok = drm_bo_mt_compatible(man, + bo->type == drm_bo_type_user, + mem_type, mem->mask, + &cur_flags); + + if (!type_ok) + continue; + + if (mem_type == DRM_BO_MEM_LOCAL) + break; + + if ((mem_type == bo->pinned_mem_type) && + (bo->pinned_node != NULL)) { + node = bo->pinned_node; + break; + } + + mutex_lock(&dev->struct_mutex); + if (man->has_type && man->use_type) { + type_found = 1; + node = drm_mm_search_free(&man->manager, mem->num_pages, + mem->page_alignment, 1); + if (node) + node = drm_mm_get_block(node, mem->num_pages, + mem->page_alignment); + } + mutex_unlock(&dev->struct_mutex); + if (node) + break; + } + + if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) { + mem->mm_node = node; + mem->mem_type = mem_type; + mem->flags = cur_flags; + return 0; + } + + if (!type_found) + return -EINVAL; + + num_prios = dev->driver->bo_driver->num_mem_busy_prio; + prios = dev->driver->bo_driver->mem_busy_prio; + + for (i = 0; i < num_prios; ++i) { + mem_type = prios[i]; + man = &bm->man[mem_type]; + + if (!man->has_type) + continue; + + if (!drm_bo_mt_compatible(man, + bo->type == drm_bo_type_user, + mem_type, + mem->mask, + &cur_flags)) + continue; + + ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait); + + if (ret == 0) { + mem->flags = cur_flags; + return 0; + } + + if (ret == -EAGAIN) + has_eagain = 1; + } + + ret = (has_eagain) ? -EAGAIN : -ENOMEM; + return ret; +} +EXPORT_SYMBOL(drm_bo_mem_space); + +static int drm_bo_new_mask(struct drm_buffer_object *bo, + uint64_t new_flags, uint64_t used_mask) +{ + uint32_t new_props; + + if (bo->type == drm_bo_type_user && + ((used_mask & (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING)) != + (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING))) { + DRM_ERROR("User buffers require cache-coherent memory.\n"); + return -EINVAL; + } + + if ((used_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) { + DRM_ERROR("DRM_BO_FLAG_NO_EVICT is only available to priviliged processes.\n"); + return -EPERM; + } + + if ((new_flags & DRM_BO_FLAG_NO_MOVE)) { + DRM_ERROR("DRM_BO_FLAG_NO_MOVE is not properly implemented yet.\n"); + return -EPERM; + } + + new_props = new_flags & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE | + DRM_BO_FLAG_READ); + + if (!new_props) { + DRM_ERROR("Invalid buffer object rwx properties\n"); + return -EINVAL; + } + + bo->mem.mask = new_flags; + return 0; +} + +/* + * Call dev->struct_mutex locked. + */ + +struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv, + uint32_t handle, int check_owner) +{ + struct drm_user_object *uo; + struct drm_buffer_object *bo; + + uo = drm_lookup_user_object(file_priv, handle); + + if (!uo || (uo->type != drm_buffer_type)) { + DRM_ERROR("Could not find buffer object 0x%08x\n", handle); + return NULL; + } + + if (check_owner && file_priv != uo->owner) { + if (!drm_lookup_ref_object(file_priv, uo, _DRM_REF_USE)) + return NULL; + } + + bo = drm_user_object_entry(uo, struct drm_buffer_object, base); + atomic_inc(&bo->usage); + return bo; +} +EXPORT_SYMBOL(drm_lookup_buffer_object); + +/* + * Call bo->mutex locked. + * Returns 1 if the buffer is currently rendered to or from. 0 otherwise. + * Doesn't do any fence flushing as opposed to the drm_bo_busy function. + */ + +static int drm_bo_quick_busy(struct drm_buffer_object *bo) +{ + struct drm_fence_object *fence = bo->fence; + + BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED); + if (fence) { + if (drm_fence_object_signaled(fence, bo->fence_type, 0)) { + drm_fence_usage_deref_unlocked(&bo->fence); + return 0; + } + return 1; + } + return 0; +} + +/* + * Call bo->mutex locked. + * Returns 1 if the buffer is currently rendered to or from. 0 otherwise. + */ + +static int drm_bo_busy(struct drm_buffer_object *bo) +{ + struct drm_fence_object *fence = bo->fence; + + BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED); + if (fence) { + if (drm_fence_object_signaled(fence, bo->fence_type, 0)) { + drm_fence_usage_deref_unlocked(&bo->fence); + return 0; + } + drm_fence_object_flush(fence, DRM_FENCE_TYPE_EXE); + if (drm_fence_object_signaled(fence, bo->fence_type, 0)) { + drm_fence_usage_deref_unlocked(&bo->fence); + return 0; + } + return 1; + } + return 0; +} + +static int drm_bo_evict_cached(struct drm_buffer_object *bo) +{ + int ret = 0; + + BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED); + if (bo->mem.mm_node) + ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1); + return ret; +} + +/* + * Wait until a buffer is unmapped. + */ + +static int drm_bo_wait_unmapped(struct drm_buffer_object *bo, int no_wait) +{ + int ret = 0; + + if ((atomic_read(&bo->mapped) >= 0) && no_wait) + return -EBUSY; + + DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ, + atomic_read(&bo->mapped) == -1); + + if (ret == -EINTR) + ret = -EAGAIN; + + return ret; +} + +static int drm_bo_check_unfenced(struct drm_buffer_object *bo) +{ + int ret; + + mutex_lock(&bo->mutex); + ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED); + mutex_unlock(&bo->mutex); + return ret; +} + +/* + * Wait until a buffer, scheduled to be fenced moves off the unfenced list. + * Until then, we cannot really do anything with it except delete it. + */ + +static int drm_bo_wait_unfenced(struct drm_buffer_object *bo, int no_wait, + int eagain_if_wait) +{ + int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED); + + if (ret && no_wait) + return -EBUSY; + else if (!ret) + return 0; + + ret = 0; + mutex_unlock(&bo->mutex); + DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ, + !drm_bo_check_unfenced(bo)); + mutex_lock(&bo->mutex); + if (ret == -EINTR) + return -EAGAIN; + ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED); + if (ret) { + DRM_ERROR("Timeout waiting for buffer to become fenced\n"); + return -EBUSY; + } + if (eagain_if_wait) + return -EAGAIN; + + return 0; +} + +/* + * Fill in the ioctl reply argument with buffer info. + * Bo locked. + */ + +static void drm_bo_fill_rep_arg(struct drm_buffer_object *bo, + struct drm_bo_info_rep *rep) +{ + if (!rep) + return; + + rep->handle = bo->base.hash.key; + rep->flags = bo->mem.flags; + rep->size = bo->num_pages * PAGE_SIZE; + rep->offset = bo->offset; + + if (bo->type == drm_bo_type_dc) + rep->arg_handle = bo->map_list.user_token; + else + rep->arg_handle = 0; + + rep->mask = bo->mem.mask; + rep->buffer_start = bo->buffer_start; + rep->fence_flags = bo->fence_type; + rep->rep_flags = 0; + rep->page_alignment = bo->mem.page_alignment; + + if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) { + DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY, + DRM_BO_REP_BUSY); + } +} + +/* + * Wait for buffer idle and register that we've mapped the buffer. + * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1, + * so that if the client dies, the mapping is automatically + * unregistered. + */ + +static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle, + uint32_t map_flags, unsigned hint, + struct drm_bo_info_rep *rep) +{ + struct drm_buffer_object *bo; + struct drm_device *dev = file_priv->head->dev; + int ret = 0; + int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; + + mutex_lock(&dev->struct_mutex); + bo = drm_lookup_buffer_object(file_priv, handle, 1); + mutex_unlock(&dev->struct_mutex); + + if (!bo) + return -EINVAL; + + mutex_lock(&bo->mutex); + ret = drm_bo_wait_unfenced(bo, no_wait, 0); + if (ret) + goto out; + + /* + * If this returns true, we are currently unmapped. + * We need to do this test, because unmapping can + * be done without the bo->mutex held. + */ + + while (1) { + if (atomic_inc_and_test(&bo->mapped)) { + if (no_wait && drm_bo_busy(bo)) { + atomic_dec(&bo->mapped); + ret = -EBUSY; + goto out; + } + ret = drm_bo_wait(bo, 0, 0, no_wait); + if (ret) { + atomic_dec(&bo->mapped); + goto out; + } + + if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED) + drm_bo_evict_cached(bo); + + break; + } else if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED) { + + /* + * We are already mapped with different flags. + * need to wait for unmap. + */ + + ret = drm_bo_wait_unmapped(bo, no_wait); + if (ret) + goto out; + + continue; + } + break; + } + + mutex_lock(&dev->struct_mutex); + ret = drm_add_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1); + mutex_unlock(&dev->struct_mutex); + if (ret) { + if (atomic_add_negative(-1, &bo->mapped)) + DRM_WAKEUP(&bo->event_queue); + + } else + drm_bo_fill_rep_arg(bo, rep); +out: + mutex_unlock(&bo->mutex); + drm_bo_usage_deref_unlocked(&bo); + return ret; +} + +static int drm_buffer_object_unmap(struct drm_file *file_priv, uint32_t handle) +{ + struct drm_device *dev = file_priv->head->dev; + struct drm_buffer_object *bo; + struct drm_ref_object *ro; + int ret = 0; + + mutex_lock(&dev->struct_mutex); + + bo = drm_lookup_buffer_object(file_priv, handle, 1); + if (!bo) { + ret = -EINVAL; + goto out; + } + + ro = drm_lookup_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1); + if (!ro) { + ret = -EINVAL; + goto out; + } + + drm_remove_ref_object(file_priv, ro); + drm_bo_usage_deref_locked(&bo); +out: + mutex_unlock(&dev->struct_mutex); + return ret; +} + +/* + * Call struct-sem locked. + */ + +static void drm_buffer_user_object_unmap(struct drm_file *file_priv, + struct drm_user_object *uo, + enum drm_ref_type action) +{ + struct drm_buffer_object *bo = + drm_user_object_entry(uo, struct drm_buffer_object, base); + + /* + * We DON'T want to take the bo->lock here, because we want to + * hold it when we wait for unmapped buffer. + */ + + BUG_ON(action != _DRM_REF_TYPE1); + + if (atomic_add_negative(-1, &bo->mapped)) + DRM_WAKEUP(&bo->event_queue); +} + +/* + * bo->mutex locked. + * Note that new_mem_flags are NOT transferred to the bo->mem.mask. + */ + +int drm_bo_move_buffer(struct drm_buffer_object *bo, uint64_t new_mem_flags, + int no_wait, int move_unfenced) +{ + struct drm_device *dev = bo->dev; + struct drm_buffer_manager *bm = &dev->bm; + int ret = 0; + struct drm_bo_mem_reg mem; + /* + * Flush outstanding fences. + */ + + drm_bo_busy(bo); + + /* + * Wait for outstanding fences. + */ + + ret = drm_bo_wait(bo, 0, 0, no_wait); + if (ret) + return ret; + + mem.num_pages = bo->num_pages; + mem.size = mem.num_pages << PAGE_SHIFT; + mem.mask = new_mem_flags; + mem.page_alignment = bo->mem.page_alignment; + + mutex_lock(&bm->evict_mutex); + mutex_lock(&dev->struct_mutex); + list_del_init(&bo->lru); + mutex_unlock(&dev->struct_mutex); + + /* + * Determine where to move the buffer. + */ + ret = drm_bo_mem_space(bo, &mem, no_wait); + if (ret) + goto out_unlock; + + ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait); + +out_unlock: + if (ret || !move_unfenced) { + mutex_lock(&dev->struct_mutex); + if (mem.mm_node) { + if (mem.mm_node != bo->pinned_node) + drm_mm_put_block(mem.mm_node); + mem.mm_node = NULL; + } + mutex_unlock(&dev->struct_mutex); + } + + mutex_unlock(&bm->evict_mutex); + return ret; +} + +static int drm_bo_mem_compat(struct drm_bo_mem_reg *mem) +{ + uint32_t flag_diff = (mem->mask ^ mem->flags); + + if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0) + return 0; + if ((flag_diff & DRM_BO_FLAG_CACHED) && + (/* !(mem->mask & DRM_BO_FLAG_CACHED) ||*/ + (mem->mask & DRM_BO_FLAG_FORCE_CACHING))) + return 0; + + if ((flag_diff & DRM_BO_FLAG_MAPPABLE) && + ((mem->mask & DRM_BO_FLAG_MAPPABLE) || + (mem->mask & DRM_BO_FLAG_FORCE_MAPPABLE))) + return 0; + return 1; +} + +/* + * bo locked. + */ + +static int drm_buffer_object_validate(struct drm_buffer_object *bo, + uint32_t fence_class, + int move_unfenced, int no_wait) +{ + struct drm_device *dev = bo->dev; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_bo_driver *driver = dev->driver->bo_driver; + uint32_t ftype; + int ret; + + DRM_DEBUG("New flags 0x%016llx, Old flags 0x%016llx\n", + (unsigned long long) bo->mem.mask, + (unsigned long long) bo->mem.flags); + + ret = driver->fence_type(bo, &fence_class, &ftype); + + if (ret) { + DRM_ERROR("Driver did not support given buffer permissions\n"); + return ret; + } + + /* + * We're switching command submission mechanism, + * or cannot simply rely on the hardware serializing for us. + * + * Wait for buffer idle. + */ + + if ((fence_class != bo->fence_class) || + ((ftype ^ bo->fence_type) & bo->fence_type)) { + + ret = drm_bo_wait(bo, 0, 0, no_wait); + + if (ret) + return ret; + + } + + bo->new_fence_class = fence_class; + bo->new_fence_type = ftype; + + ret = drm_bo_wait_unmapped(bo, no_wait); + if (ret) { + DRM_ERROR("Timed out waiting for buffer unmap.\n"); + return ret; + } + + /* + * Check whether we need to move buffer. + */ + + if (!drm_bo_mem_compat(&bo->mem)) { + ret = drm_bo_move_buffer(bo, bo->mem.mask, no_wait, + move_unfenced); + if (ret) { + if (ret != -EAGAIN) + DRM_ERROR("Failed moving buffer.\n"); + return ret; + } + } + + /* + * Pinned buffers. + */ + + if (bo->mem.mask & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) { + bo->pinned_mem_type = bo->mem.mem_type; + mutex_lock(&dev->struct_mutex); + list_del_init(&bo->pinned_lru); + drm_bo_add_to_pinned_lru(bo); + + if (bo->pinned_node != bo->mem.mm_node) { + if (bo->pinned_node != NULL) + drm_mm_put_block(bo->pinned_node); + bo->pinned_node = bo->mem.mm_node; + } + + mutex_unlock(&dev->struct_mutex); + + } else if (bo->pinned_node != NULL) { + + mutex_lock(&dev->struct_mutex); + + if (bo->pinned_node != bo->mem.mm_node) + drm_mm_put_block(bo->pinned_node); + + list_del_init(&bo->pinned_lru); + bo->pinned_node = NULL; + mutex_unlock(&dev->struct_mutex); + + } + + /* + * We might need to add a TTM. + */ + + if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) { + ret = drm_bo_add_ttm(bo); + if (ret) + return ret; + } + DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE); + + /* + * Finally, adjust lru to be sure. + */ + + mutex_lock(&dev->struct_mutex); + list_del(&bo->lru); + if (move_unfenced) { + list_add_tail(&bo->lru, &bm->unfenced); + DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED, + _DRM_BO_FLAG_UNFENCED); + } else { + drm_bo_add_to_lru(bo); + if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) { + DRM_WAKEUP(&bo->event_queue); + DRM_FLAG_MASKED(bo->priv_flags, 0, + _DRM_BO_FLAG_UNFENCED); + } + } + mutex_unlock(&dev->struct_mutex); + + return 0; +} + +int drm_bo_do_validate(struct drm_buffer_object *bo, + uint64_t flags, uint64_t mask, uint32_t hint, + uint32_t fence_class, + int no_wait, + struct drm_bo_info_rep *rep) +{ + int ret; + + mutex_lock(&bo->mutex); + ret = drm_bo_wait_unfenced(bo, no_wait, 0); + + if (ret) + goto out; + + + DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask); + ret = drm_bo_new_mask(bo, flags, mask); + if (ret) + goto out; + + ret = drm_buffer_object_validate(bo, + fence_class, + !(hint & DRM_BO_HINT_DONT_FENCE), + no_wait); +out: + if (rep) + drm_bo_fill_rep_arg(bo, rep); + + mutex_unlock(&bo->mutex); + return ret; +} +EXPORT_SYMBOL(drm_bo_do_validate); + + +int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle, + uint32_t fence_class, + uint64_t flags, uint64_t mask, + uint32_t hint, + int use_old_fence_class, + struct drm_bo_info_rep *rep, + struct drm_buffer_object **bo_rep) +{ + struct drm_device *dev = file_priv->head->dev; + struct drm_buffer_object *bo; + int ret; + int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; + + mutex_lock(&dev->struct_mutex); + bo = drm_lookup_buffer_object(file_priv, handle, 1); + mutex_unlock(&dev->struct_mutex); + + if (!bo) + return -EINVAL; + + if (use_old_fence_class) + fence_class = bo->fence_class; + + /* + * Only allow creator to change shared buffer mask. + */ + + if (bo->base.owner != file_priv) + mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE); + + + ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class, + no_wait, rep); + + if (!ret && bo_rep) + *bo_rep = bo; + else + drm_bo_usage_deref_unlocked(&bo); + + return ret; +} +EXPORT_SYMBOL(drm_bo_handle_validate); + +static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle, + struct drm_bo_info_rep *rep) +{ + struct drm_device *dev = file_priv->head->dev; + struct drm_buffer_object *bo; + + mutex_lock(&dev->struct_mutex); + bo = drm_lookup_buffer_object(file_priv, handle, 1); + mutex_unlock(&dev->struct_mutex); + + if (!bo) + return -EINVAL; + + mutex_lock(&bo->mutex); + if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED)) + (void)drm_bo_busy(bo); + drm_bo_fill_rep_arg(bo, rep); + mutex_unlock(&bo->mutex); + drm_bo_usage_deref_unlocked(&bo); + return 0; +} + +static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle, + uint32_t hint, + struct drm_bo_info_rep *rep) +{ + struct drm_device *dev = file_priv->head->dev; + struct drm_buffer_object *bo; + int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; + int ret; + + mutex_lock(&dev->struct_mutex); + bo = drm_lookup_buffer_object(file_priv, handle, 1); + mutex_unlock(&dev->struct_mutex); + + if (!bo) + return -EINVAL; + + mutex_lock(&bo->mutex); + ret = drm_bo_wait_unfenced(bo, no_wait, 0); + if (ret) + goto out; + ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 0, no_wait); + if (ret) + goto out; + + drm_bo_fill_rep_arg(bo, rep); + +out: + mutex_unlock(&bo->mutex); + drm_bo_usage_deref_unlocked(&bo); + return ret; +} + +int drm_buffer_object_create(struct drm_device *dev, + unsigned long size, + enum drm_bo_type type, + uint64_t mask, + uint32_t hint, + uint32_t page_alignment, + unsigned long buffer_start, + struct drm_buffer_object **buf_obj) +{ + struct drm_buffer_manager *bm = &dev->bm; + struct drm_buffer_object *bo; + int ret = 0; + unsigned long num_pages; + + size += buffer_start & ~PAGE_MASK; + num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; + if (num_pages == 0) { + DRM_ERROR("Illegal buffer object size.\n"); + return -EINVAL; + } + + bo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ); + + if (!bo) + return -ENOMEM; + + mutex_init(&bo->mutex); + mutex_lock(&bo->mutex); + + atomic_set(&bo->usage, 1); + atomic_set(&bo->mapped, -1); + DRM_INIT_WAITQUEUE(&bo->event_queue); + INIT_LIST_HEAD(&bo->lru); + INIT_LIST_HEAD(&bo->pinned_lru); + INIT_LIST_HEAD(&bo->ddestroy); + bo->dev = dev; + bo->type = type; + bo->num_pages = num_pages; + bo->mem.mem_type = DRM_BO_MEM_LOCAL; + bo->mem.num_pages = bo->num_pages; + bo->mem.mm_node = NULL; + bo->mem.page_alignment = page_alignment; + bo->buffer_start = buffer_start & PAGE_MASK; + bo->priv_flags = 0; + bo->mem.flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED | + DRM_BO_FLAG_MAPPABLE; + bo->mem.mask = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED | + DRM_BO_FLAG_MAPPABLE; + atomic_inc(&bm->count); + ret = drm_bo_new_mask(bo, mask, mask); + if (ret) + goto out_err; + + if (bo->type == drm_bo_type_dc) { + mutex_lock(&dev->struct_mutex); + ret = drm_bo_setup_vm_locked(bo); + mutex_unlock(&dev->struct_mutex); + if (ret) + goto out_err; + } + + ret = drm_buffer_object_validate(bo, 0, 0, hint & DRM_BO_HINT_DONT_BLOCK); + if (ret) + goto out_err; + + mutex_unlock(&bo->mutex); + *buf_obj = bo; + return 0; + +out_err: + mutex_unlock(&bo->mutex); + + drm_bo_usage_deref_unlocked(&bo); + return ret; +} +EXPORT_SYMBOL(drm_buffer_object_create); + + +static int drm_bo_add_user_object(struct drm_file *file_priv, + struct drm_buffer_object *bo, int shareable) +{ + struct drm_device *dev = file_priv->head->dev; + int ret; + + mutex_lock(&dev->struct_mutex); + ret = drm_add_user_object(file_priv, &bo->base, shareable); + if (ret) + goto out; + + bo->base.remove = drm_bo_base_deref_locked; + bo->base.type = drm_buffer_type; + bo->base.ref_struct_locked = NULL; + bo->base.unref = drm_buffer_user_object_unmap; + +out: + mutex_unlock(&dev->struct_mutex); + return ret; +} + +int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) +{ + struct drm_bo_create_arg *arg = data; + struct drm_bo_create_req *req = &arg->d.req; + struct drm_bo_info_rep *rep = &arg->d.rep; + struct drm_buffer_object *entry; + enum drm_bo_type bo_type; + int ret = 0; + + DRM_DEBUG("drm_bo_create_ioctl: %dkb, %dkb align\n", + (int)(req->size / 1024), req->page_alignment * 4); + + if (!dev->bm.initialized) { + DRM_ERROR("Buffer object manager is not initialized.\n"); + return -EINVAL; + } + + bo_type = (req->buffer_start) ? drm_bo_type_user : drm_bo_type_dc; + + if (bo_type == drm_bo_type_user) + req->mask &= ~DRM_BO_FLAG_SHAREABLE; + + ret = drm_buffer_object_create(file_priv->head->dev, + req->size, bo_type, req->mask, + req->hint, req->page_alignment, + req->buffer_start, &entry); + if (ret) + goto out; + + ret = drm_bo_add_user_object(file_priv, entry, + req->mask & DRM_BO_FLAG_SHAREABLE); + if (ret) { + drm_bo_usage_deref_unlocked(&entry); + goto out; + } + + mutex_lock(&entry->mutex); + drm_bo_fill_rep_arg(entry, rep); + mutex_unlock(&entry->mutex); + +out: + return ret; +} + +int drm_bo_setstatus_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv) +{ + struct drm_bo_map_wait_idle_arg *arg = data; + struct drm_bo_info_req *req = &arg->d.req; + struct drm_bo_info_rep *rep = &arg->d.rep; + int ret; + + if (!dev->bm.initialized) { + DRM_ERROR("Buffer object manager is not initialized.\n"); + return -EINVAL; + } + + ret = drm_bo_read_lock(&dev->bm.bm_lock); + if (ret) + return ret; + + ret = drm_bo_handle_validate(file_priv, req->handle, req->fence_class, + req->flags, + req->mask, + req->hint | DRM_BO_HINT_DONT_FENCE, + 1, + rep, NULL); + + (void) drm_bo_read_unlock(&dev->bm.bm_lock); + if (ret) + return ret; + + return 0; +} + +int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) +{ + struct drm_bo_map_wait_idle_arg *arg = data; + struct drm_bo_info_req *req = &arg->d.req; + struct drm_bo_info_rep *rep = &arg->d.rep; + int ret; + if (!dev->bm.initialized) { + DRM_ERROR("Buffer object manager is not initialized.\n"); + return -EINVAL; + } + + ret = drm_buffer_object_map(file_priv, req->handle, req->mask, + req->hint, rep); + if (ret) + return ret; + + return 0; +} + +int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) +{ + struct drm_bo_handle_arg *arg = data; + int ret; + if (!dev->bm.initialized) { + DRM_ERROR("Buffer object manager is not initialized.\n"); + return -EINVAL; + } + + ret = drm_buffer_object_unmap(file_priv, arg->handle); + return ret; +} + + +int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) +{ + struct drm_bo_reference_info_arg *arg = data; + struct drm_bo_handle_arg *req = &arg->d.req; + struct drm_bo_info_rep *rep = &arg->d.rep; + struct drm_user_object *uo; + int ret; + + if (!dev->bm.initialized) { + DRM_ERROR("Buffer object manager is not initialized.\n"); + return -EINVAL; + } + + ret = drm_user_object_ref(file_priv, req->handle, + drm_buffer_type, &uo); + if (ret) + return ret; + + ret = drm_bo_handle_info(file_priv, req->handle, rep); + if (ret) + return ret; + + return 0; +} + +int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) +{ + struct drm_bo_handle_arg *arg = data; + int ret = 0; + + if (!dev->bm.initialized) { + DRM_ERROR("Buffer object manager is not initialized.\n"); + return -EINVAL; + } + + ret = drm_user_object_unref(file_priv, arg->handle, drm_buffer_type); + return ret; +} + +int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) +{ + struct drm_bo_reference_info_arg *arg = data; + struct drm_bo_handle_arg *req = &arg->d.req; + struct drm_bo_info_rep *rep = &arg->d.rep; + int ret; + + if (!dev->bm.initialized) { + DRM_ERROR("Buffer object manager is not initialized.\n"); + return -EINVAL; + } + + ret = drm_bo_handle_info(file_priv, req->handle, rep); + if (ret) + return ret; + + return 0; +} + +int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) +{ + struct drm_bo_map_wait_idle_arg *arg = data; + struct drm_bo_info_req *req = &arg->d.req; + struct drm_bo_info_rep *rep = &arg->d.rep; + int ret; + if (!dev->bm.initialized) { + DRM_ERROR("Buffer object manager is not initialized.\n"); + return -EINVAL; + } + + ret = drm_bo_handle_wait(file_priv, req->handle, + req->hint, rep); + if (ret) + return ret; + + return 0; +} + +static int drm_bo_leave_list(struct drm_buffer_object *bo, + uint32_t mem_type, + int free_pinned, + int allow_errors) +{ + struct drm_device *dev = bo->dev; + int ret = 0; + + mutex_lock(&bo->mutex); + + ret = drm_bo_expire_fence(bo, allow_errors); + if (ret) + goto out; + + if (free_pinned) { + DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE); + mutex_lock(&dev->struct_mutex); + list_del_init(&bo->pinned_lru); + if (bo->pinned_node == bo->mem.mm_node) + bo->pinned_node = NULL; + if (bo->pinned_node != NULL) { + drm_mm_put_block(bo->pinned_node); + bo->pinned_node = NULL; + } + mutex_unlock(&dev->struct_mutex); + } + + if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) { + DRM_ERROR("A DRM_BO_NO_EVICT buffer present at " + "cleanup. Removing flag and evicting.\n"); + bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT; + bo->mem.mask &= ~DRM_BO_FLAG_NO_EVICT; + } + + if (bo->mem.mem_type == mem_type) + ret = drm_bo_evict(bo, mem_type, 0); + + if (ret) { + if (allow_errors) { + goto out; + } else { + ret = 0; + DRM_ERROR("Cleanup eviction failed\n"); + } + } + +out: + mutex_unlock(&bo->mutex); + return ret; +} + + +static struct drm_buffer_object *drm_bo_entry(struct list_head *list, + int pinned_list) +{ + if (pinned_list) + return list_entry(list, struct drm_buffer_object, pinned_lru); + else + return list_entry(list, struct drm_buffer_object, lru); +} + +/* + * dev->struct_mutex locked. + */ + +static int drm_bo_force_list_clean(struct drm_device *dev, + struct list_head *head, + unsigned mem_type, + int free_pinned, + int allow_errors, + int pinned_list) +{ + struct list_head *list, *next, *prev; + struct drm_buffer_object *entry, *nentry; + int ret; + int do_restart; + + /* + * The list traversal is a bit odd here, because an item may + * disappear from the list when we release the struct_mutex or + * when we decrease the usage count. Also we're not guaranteed + * to drain pinned lists, so we can't always restart. + */ + +restart: + nentry = NULL; + list_for_each_safe(list, next, head) { + prev = list->prev; + + entry = (nentry != NULL) ? nentry: drm_bo_entry(list, pinned_list); + atomic_inc(&entry->usage); + if (nentry) { + atomic_dec(&nentry->usage); + nentry = NULL; + } + + /* + * Protect the next item from destruction, so we can check + * its list pointers later on. + */ + + if (next != head) { + nentry = drm_bo_entry(next, pinned_list); + atomic_inc(&nentry->usage); + } + mutex_unlock(&dev->struct_mutex); + + ret = drm_bo_leave_list(entry, mem_type, free_pinned, + allow_errors); + mutex_lock(&dev->struct_mutex); + + drm_bo_usage_deref_locked(&entry); + if (ret) + return ret; + + /* + * Has the next item disappeared from the list? + */ + + do_restart = ((next->prev != list) && (next->prev != prev)); + + if (nentry != NULL && do_restart) + drm_bo_usage_deref_locked(&nentry); + + if (do_restart) + goto restart; + } + return 0; +} + +int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type) +{ + struct drm_buffer_manager *bm = &dev->bm; + struct drm_mem_type_manager *man = &bm->man[mem_type]; + int ret = -EINVAL; + + if (mem_type >= DRM_BO_MEM_TYPES) { + DRM_ERROR("Illegal memory type %d\n", mem_type); + return ret; + } + + if (!man->has_type) { + DRM_ERROR("Trying to take down uninitialized " + "memory manager type %u\n", mem_type); + return ret; + } + man->use_type = 0; + man->has_type = 0; + + ret = 0; + if (mem_type > 0) { + BUG_ON(!list_empty(&bm->unfenced)); + drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0); + drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1); + + if (drm_mm_clean(&man->manager)) { + drm_mm_takedown(&man->manager); + } else { + ret = -EBUSY; + } + } + + return ret; +} +EXPORT_SYMBOL(drm_bo_clean_mm); + +/** + *Evict all buffers of a particular mem_type, but leave memory manager + *regions for NO_MOVE buffers intact. New buffers cannot be added at this + *point since we have the hardware lock. + */ + +static int drm_bo_lock_mm(struct drm_device *dev, unsigned mem_type) +{ + int ret; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_mem_type_manager *man = &bm->man[mem_type]; + + if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) { + DRM_ERROR("Illegal memory manager memory type %u.\n", mem_type); + return -EINVAL; + } + + if (!man->has_type) { + DRM_ERROR("Memory type %u has not been initialized.\n", + mem_type); + return 0; + } + + ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0); + if (ret) + return ret; + ret = drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1, 1); + + return ret; +} + +int drm_bo_init_mm(struct drm_device *dev, + unsigned type, + unsigned long p_offset, unsigned long p_size) +{ + struct drm_buffer_manager *bm = &dev->bm; + int ret = -EINVAL; + struct drm_mem_type_manager *man; + + if (type >= DRM_BO_MEM_TYPES) { + DRM_ERROR("Illegal memory type %d\n", type); + return ret; + } + + man = &bm->man[type]; + if (man->has_type) { + DRM_ERROR("Memory manager already initialized for type %d\n", + type); + return ret; + } + + ret = dev->driver->bo_driver->init_mem_type(dev, type, man); + if (ret) + return ret; + + ret = 0; + if (type != DRM_BO_MEM_LOCAL) { + if (!p_size) { + DRM_ERROR("Zero size memory manager type %d\n", type); + return ret; + } + ret = drm_mm_init(&man->manager, p_offset, p_size); + if (ret) + return ret; + } + man->has_type = 1; + man->use_type = 1; + + INIT_LIST_HEAD(&man->lru); + INIT_LIST_HEAD(&man->pinned); + + return 0; +} +EXPORT_SYMBOL(drm_bo_init_mm); + +/* + * This function is intended to be called on drm driver unload. + * If you decide to call it from lastclose, you must protect the call + * from a potentially racing drm_bo_driver_init in firstopen. + * (This may happen on X server restart). + */ + +int drm_bo_driver_finish(struct drm_device *dev) +{ + struct drm_buffer_manager *bm = &dev->bm; + int ret = 0; + unsigned i = DRM_BO_MEM_TYPES; + struct drm_mem_type_manager *man; + + mutex_lock(&dev->struct_mutex); + + if (!bm->initialized) + goto out; + bm->initialized = 0; + + while (i--) { + man = &bm->man[i]; + if (man->has_type) { + man->use_type = 0; + if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i)) { + ret = -EBUSY; + DRM_ERROR("DRM memory manager type %d " + "is not clean.\n", i); + } + man->has_type = 0; + } + } + mutex_unlock(&dev->struct_mutex); + + if (!cancel_delayed_work(&bm->wq)) + flush_scheduled_work(); + + mutex_lock(&dev->struct_mutex); + drm_bo_delayed_delete(dev, 1); + if (list_empty(&bm->ddestroy)) + DRM_DEBUG("Delayed destroy list was clean\n"); + + if (list_empty(&bm->man[0].lru)) + DRM_DEBUG("Swap list was clean\n"); + + if (list_empty(&bm->man[0].pinned)) + DRM_DEBUG("NO_MOVE list was clean\n"); + + if (list_empty(&bm->unfenced)) + DRM_DEBUG("Unfenced list was clean\n"); + + unlock_page(bm->dummy_read_page); + __free_page(bm->dummy_read_page); + +out: + mutex_unlock(&dev->struct_mutex); + return ret; +} + +/* + * This function is intended to be called on drm driver load. + * If you decide to call it from firstopen, you must protect the call + * from a potentially racing drm_bo_driver_finish in lastclose. + * (This may happen on X server restart). + */ + +int drm_bo_driver_init(struct drm_device *dev) +{ + struct drm_bo_driver *driver = dev->driver->bo_driver; + struct drm_buffer_manager *bm = &dev->bm; + int ret = -EINVAL; + + bm->dummy_read_page = NULL; + drm_bo_init_lock(&bm->bm_lock); + mutex_lock(&dev->struct_mutex); + if (!driver) + goto out_unlock; + + bm->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32); + if (!bm->dummy_read_page) { + ret = -ENOMEM; + goto out_unlock; + } + + SetPageLocked(bm->dummy_read_page); + + /* + * Initialize the system memory buffer type. + * Other types need to be driver / IOCTL initialized. + */ + ret = drm_bo_init_mm(dev, DRM_BO_MEM_LOCAL, 0, 0); + if (ret) + goto out_unlock; + + INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue); + bm->initialized = 1; + bm->nice_mode = 1; + atomic_set(&bm->count, 0); + bm->cur_pages = 0; + INIT_LIST_HEAD(&bm->unfenced); + INIT_LIST_HEAD(&bm->ddestroy); +out_unlock: + mutex_unlock(&dev->struct_mutex); + return ret; +} +EXPORT_SYMBOL(drm_bo_driver_init); + +int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) +{ + struct drm_mm_init_arg *arg = data; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_bo_driver *driver = dev->driver->bo_driver; + int ret; + + if (!driver) { + DRM_ERROR("Buffer objects are not supported by this driver\n"); + return -EINVAL; + } + + ret = drm_bo_write_lock(&bm->bm_lock, file_priv); + if (ret) + return ret; + + ret = -EINVAL; + if (arg->magic != DRM_BO_INIT_MAGIC) { + DRM_ERROR("You are using an old libdrm that is not compatible with\n" + "\tthe kernel DRM module. Please upgrade your libdrm.\n"); + return -EINVAL; + } + if (arg->major != DRM_BO_INIT_MAJOR) { + DRM_ERROR("libdrm and kernel DRM buffer object interface major\n" + "\tversion don't match. Got %d, expected %d.\n", + arg->major, DRM_BO_INIT_MAJOR); + return -EINVAL; + } + + mutex_lock(&dev->struct_mutex); + if (!bm->initialized) { + DRM_ERROR("DRM memory manager was not initialized.\n"); + goto out; + } + if (arg->mem_type == 0) { + DRM_ERROR("System memory buffers already initialized.\n"); + goto out; + } + ret = drm_bo_init_mm(dev, arg->mem_type, + arg->p_offset, arg->p_size); + +out: + mutex_unlock(&dev->struct_mutex); + (void) drm_bo_write_unlock(&bm->bm_lock, file_priv); + + if (ret) + return ret; + + return 0; +} + +int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) +{ + struct drm_mm_type_arg *arg = data; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_bo_driver *driver = dev->driver->bo_driver; + int ret; + + if (!driver) { + DRM_ERROR("Buffer objects are not supported by this driver\n"); + return -EINVAL; + } + + ret = drm_bo_write_lock(&bm->bm_lock, file_priv); + if (ret) + return ret; + + mutex_lock(&dev->struct_mutex); + ret = -EINVAL; + if (!bm->initialized) { + DRM_ERROR("DRM memory manager was not initialized\n"); + goto out; + } + if (arg->mem_type == 0) { + DRM_ERROR("No takedown for System memory buffers.\n"); + goto out; + } + ret = 0; + if (drm_bo_clean_mm(dev, arg->mem_type)) { + DRM_ERROR("Memory manager type %d not clean. " + "Delaying takedown\n", arg->mem_type); + } +out: + mutex_unlock(&dev->struct_mutex); + (void) drm_bo_write_unlock(&bm->bm_lock, file_priv); + + if (ret) + return ret; + + return 0; +} + +int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) +{ + struct drm_mm_type_arg *arg = data; + struct drm_bo_driver *driver = dev->driver->bo_driver; + int ret; + + if (!driver) { + DRM_ERROR("Buffer objects are not supported by this driver\n"); + return -EINVAL; + } + + if (arg->lock_flags & DRM_BO_LOCK_IGNORE_NO_EVICT) { + DRM_ERROR("Lock flag DRM_BO_LOCK_IGNORE_NO_EVICT not supported yet.\n"); + return -EINVAL; + } + + if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) { + ret = drm_bo_write_lock(&dev->bm.bm_lock, file_priv); + if (ret) + return ret; + } + + mutex_lock(&dev->struct_mutex); + ret = drm_bo_lock_mm(dev, arg->mem_type); + mutex_unlock(&dev->struct_mutex); + if (ret) { + (void) drm_bo_write_unlock(&dev->bm.bm_lock, file_priv); + return ret; + } + + return 0; +} + +int drm_mm_unlock_ioctl(struct drm_device *dev, + void *data, + struct drm_file *file_priv) +{ + struct drm_mm_type_arg *arg = data; + struct drm_bo_driver *driver = dev->driver->bo_driver; + int ret; + + if (!driver) { + DRM_ERROR("Buffer objects are not supported by this driver\n"); + return -EINVAL; + } + + if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) { + ret = drm_bo_write_unlock(&dev->bm.bm_lock, file_priv); + if (ret) + return ret; + } + + return 0; +} + +/* + * buffer object vm functions. + */ + +int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg *mem) +{ + struct drm_buffer_manager *bm = &dev->bm; + struct drm_mem_type_manager *man = &bm->man[mem->mem_type]; + + if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) { + if (mem->mem_type == DRM_BO_MEM_LOCAL) + return 0; + + if (man->flags & _DRM_FLAG_MEMTYPE_CMA) + return 0; + + if (mem->flags & DRM_BO_FLAG_CACHED) + return 0; + } + return 1; +} +EXPORT_SYMBOL(drm_mem_reg_is_pci); + +/** + * \c Get the PCI offset for the buffer object memory. + * + * \param bo The buffer object. + * \param bus_base On return the base of the PCI region + * \param bus_offset On return the byte offset into the PCI region + * \param bus_size On return the byte size of the buffer object or zero if + * the buffer object memory is not accessible through a PCI region. + * \return Failure indication. + * + * Returns -EINVAL if the buffer object is currently not mappable. + * Otherwise returns zero. + */ + +int drm_bo_pci_offset(struct drm_device *dev, + struct drm_bo_mem_reg *mem, + unsigned long *bus_base, + unsigned long *bus_offset, unsigned long *bus_size) +{ + struct drm_buffer_manager *bm = &dev->bm; + struct drm_mem_type_manager *man = &bm->man[mem->mem_type]; + + *bus_size = 0; + if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)) + return -EINVAL; + + if (drm_mem_reg_is_pci(dev, mem)) { + *bus_offset = mem->mm_node->start << PAGE_SHIFT; + *bus_size = mem->num_pages << PAGE_SHIFT; + *bus_base = man->io_offset; + } + + return 0; +} + +/** + * \c Kill all user-space virtual mappings of this buffer object. + * + * \param bo The buffer object. + * + * Call bo->mutex locked. + */ + +void drm_bo_unmap_virtual(struct drm_buffer_object *bo) +{ + struct drm_device *dev = bo->dev; + loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT; + loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT; + + if (!dev->dev_mapping) + return; + + unmap_mapping_range(dev->dev_mapping, offset, holelen, 1); +} + +static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo) +{ + struct drm_map_list *list; + drm_local_map_t *map; + struct drm_device *dev = bo->dev; + + DRM_ASSERT_LOCKED(&dev->struct_mutex); + if (bo->type != drm_bo_type_dc) + return; + + list = &bo->map_list; + if (list->user_token) { + drm_ht_remove_item(&dev->map_hash, &list->hash); + list->user_token = 0; + } + if (list->file_offset_node) { + drm_mm_put_block(list->file_offset_node); + list->file_offset_node = NULL; + } + + map = list->map; + if (!map) + return; + + drm_ctl_free(map, sizeof(*map), DRM_MEM_BUFOBJ); + list->map = NULL; + list->user_token = 0ULL; + drm_bo_usage_deref_locked(&bo); +} + +static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo) +{ + struct drm_map_list *list = &bo->map_list; + drm_local_map_t *map; + struct drm_device *dev = bo->dev; + + DRM_ASSERT_LOCKED(&dev->struct_mutex); + list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ); + if (!list->map) + return -ENOMEM; + + map = list->map; + map->offset = 0; + map->type = _DRM_TTM; + map->flags = _DRM_REMOVABLE; + map->size = bo->mem.num_pages * PAGE_SIZE; + atomic_inc(&bo->usage); + map->handle = (void *)bo; + + list->file_offset_node = drm_mm_search_free(&dev->offset_manager, + bo->mem.num_pages, 0, 0); + + if (!list->file_offset_node) { + drm_bo_takedown_vm_locked(bo); + return -ENOMEM; + } + + list->file_offset_node = drm_mm_get_block(list->file_offset_node, + bo->mem.num_pages, 0); + + list->hash.key = list->file_offset_node->start; + if (drm_ht_insert_item(&dev->map_hash, &list->hash)) { + drm_bo_takedown_vm_locked(bo); + return -ENOMEM; + } + + list->user_token = ((uint64_t) list->hash.key) << PAGE_SHIFT; + + return 0; +} + +int drm_bo_version_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_bo_version_arg *arg = (struct drm_bo_version_arg *)data; + + arg->major = DRM_BO_INIT_MAJOR; + arg->minor = DRM_BO_INIT_MINOR; + arg->patchlevel = DRM_BO_INIT_PATCH; + + return 0; +} diff --git a/drivers/char/drm/drm_bo_lock.c b/drivers/char/drm/drm_bo_lock.c new file mode 100644 index 0000000..e4f159d --- /dev/null +++ b/drivers/char/drm/drm_bo_lock.c @@ -0,0 +1,175 @@ +/************************************************************************** + * + * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ + +/* + * This file implements a simple replacement for the buffer manager use + * of the heavyweight hardware lock. + * The lock is a read-write lock. Taking it in read mode is fast, and + * intended for in-kernel use only. + * Taking it in write mode is slow. + * + * The write mode is used only when there is a need to block all + * user-space processes from allocating a + * new memory area. + * Typical use in write mode is X server VT switching, and it's allowed + * to leave kernel space with the write lock held. If a user-space process + * dies while having the write-lock, it will be released during the file + * descriptor release. + * + * The read lock is typically placed at the start of an IOCTL- or + * user-space callable function that may end up allocating a memory area. + * This includes setstatus, super-ioctls and no_pfn; the latter may move + * unmappable regions to mappable. It's a bug to leave kernel space with the + * read lock held. + * + * Both read- and write lock taking is interruptible for low signal-delivery + * latency. The locking functions will return -EAGAIN if interrupted by a + * signal. + * + * Locking order: The lock should be taken BEFORE any kernel mutexes + * or spinlocks. + */ + +#include "drmP.h" + +void drm_bo_init_lock(struct drm_bo_lock *lock) +{ + DRM_INIT_WAITQUEUE(&lock->queue); + atomic_set(&lock->write_lock_pending, 0); + atomic_set(&lock->readers, 0); +} + +void drm_bo_read_unlock(struct drm_bo_lock *lock) +{ + if (unlikely(atomic_add_negative(-1, &lock->readers))) + BUG(); + if (atomic_read(&lock->readers) == 0) + wake_up_interruptible(&lock->queue); +} +EXPORT_SYMBOL(drm_bo_read_unlock); + +int drm_bo_read_lock(struct drm_bo_lock *lock) +{ + while (unlikely(atomic_read(&lock->write_lock_pending) != 0)) { + int ret; + ret = wait_event_interruptible + (lock->queue, atomic_read(&lock->write_lock_pending) == 0); + if (ret) + return -EAGAIN; + } + + while (unlikely(!atomic_add_unless(&lock->readers, 1, -1))) { + int ret; + ret = wait_event_interruptible + (lock->queue, atomic_add_unless(&lock->readers, 1, -1)); + if (ret) + return -EAGAIN; + } + return 0; +} +EXPORT_SYMBOL(drm_bo_read_lock); + +static int __drm_bo_write_unlock(struct drm_bo_lock *lock) +{ + if (unlikely(atomic_cmpxchg(&lock->readers, -1, 0) != -1)) + return -EINVAL; + if (unlikely(atomic_cmpxchg(&lock->write_lock_pending, 1, 0) != 1)) + return -EINVAL; + wake_up_interruptible(&lock->queue); + return 0; +} + +static void drm_bo_write_lock_remove(struct drm_file *file_priv, + struct drm_user_object *item) +{ + struct drm_bo_lock *lock = container_of(item, struct drm_bo_lock, base); + int ret; + + ret = __drm_bo_write_unlock(lock); + BUG_ON(ret); +} + +int drm_bo_write_lock(struct drm_bo_lock *lock, struct drm_file *file_priv) +{ + int ret = 0; + struct drm_device *dev; + + if (unlikely(atomic_cmpxchg(&lock->write_lock_pending, 0, 1) != 0)) + return -EINVAL; + + while (unlikely(atomic_cmpxchg(&lock->readers, 0, -1) != 0)) { + ret = wait_event_interruptible + (lock->queue, atomic_cmpxchg(&lock->readers, 0, -1) == 0); + + if (ret) { + atomic_set(&lock->write_lock_pending, 0); + wake_up_interruptible(&lock->queue); + return -EAGAIN; + } + } + + /* + * Add a dummy user-object, the destructor of which will + * make sure the lock is released if the client dies + * while holding it. + */ + + dev = file_priv->head->dev; + mutex_lock(&dev->struct_mutex); + ret = drm_add_user_object(file_priv, &lock->base, 0); + lock->base.remove = &drm_bo_write_lock_remove; + lock->base.type = drm_lock_type; + if (ret) + (void)__drm_bo_write_unlock(lock); + + mutex_unlock(&dev->struct_mutex); + + return ret; +} + +int drm_bo_write_unlock(struct drm_bo_lock *lock, struct drm_file *file_priv) +{ + struct drm_device *dev = file_priv->head->dev; + struct drm_ref_object *ro; + + mutex_lock(&dev->struct_mutex); + + if (lock->base.owner != file_priv) { + mutex_unlock(&dev->struct_mutex); + return -EINVAL; + } + ro = drm_lookup_ref_object(file_priv, &lock->base, _DRM_REF_USE); + BUG_ON(!ro); + drm_remove_ref_object(file_priv, ro); + lock->base.owner = NULL; + + mutex_unlock(&dev->struct_mutex); + return 0; +} diff --git a/drivers/char/drm/drm_bo_move.c b/drivers/char/drm/drm_bo_move.c new file mode 100644 index 0000000..80dd784 --- /dev/null +++ b/drivers/char/drm/drm_bo_move.c @@ -0,0 +1,576 @@ +/************************************************************************** + * + * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ + +#include "drmP.h" + +/** + * Free the old memory node unless it's a pinned region and we + * have not been requested to free also pinned regions. + */ + +static void drm_bo_free_old_node(struct drm_buffer_object *bo) +{ + struct drm_bo_mem_reg *old_mem = &bo->mem; + + if (old_mem->mm_node && (old_mem->mm_node != bo->pinned_node)) { + mutex_lock(&bo->dev->struct_mutex); + drm_mm_put_block(old_mem->mm_node); + old_mem->mm_node = NULL; + mutex_unlock(&bo->dev->struct_mutex); + } + old_mem->mm_node = NULL; +} + +int drm_bo_move_ttm(struct drm_buffer_object *bo, + int evict, int no_wait, struct drm_bo_mem_reg *new_mem) +{ + struct drm_ttm *ttm = bo->ttm; + struct drm_bo_mem_reg *old_mem = &bo->mem; + uint64_t save_flags = old_mem->flags; + uint64_t save_mask = old_mem->mask; + int ret; + + if (old_mem->mem_type == DRM_BO_MEM_TT) { + if (evict) + drm_ttm_evict(ttm); + else + drm_ttm_unbind(ttm); + + drm_bo_free_old_node(bo); + DRM_FLAG_MASKED(old_mem->flags, + DRM_BO_FLAG_CACHED | DRM_BO_FLAG_MAPPABLE | + DRM_BO_FLAG_MEM_LOCAL, DRM_BO_MASK_MEMTYPE); + old_mem->mem_type = DRM_BO_MEM_LOCAL; + save_flags = old_mem->flags; + } + if (new_mem->mem_type != DRM_BO_MEM_LOCAL) { + ret = drm_bind_ttm(ttm, new_mem); + if (ret) + return ret; + } + + *old_mem = *new_mem; + new_mem->mm_node = NULL; + old_mem->mask = save_mask; + DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE); + return 0; +} +EXPORT_SYMBOL(drm_bo_move_ttm); + +/** + * \c Return a kernel virtual address to the buffer object PCI memory. + * + * \param bo The buffer object. + * \return Failure indication. + * + * Returns -EINVAL if the buffer object is currently not mappable. + * Returns -ENOMEM if the ioremap operation failed. + * Otherwise returns zero. + * + * After a successfull call, bo->iomap contains the virtual address, or NULL + * if the buffer object content is not accessible through PCI space. + * Call bo->mutex locked. + */ + +int drm_mem_reg_ioremap(struct drm_device *dev, struct drm_bo_mem_reg *mem, + void **virtual) +{ + struct drm_buffer_manager *bm = &dev->bm; + struct drm_mem_type_manager *man = &bm->man[mem->mem_type]; + unsigned long bus_offset; + unsigned long bus_size; + unsigned long bus_base; + int ret; + void *addr; + + *virtual = NULL; + ret = drm_bo_pci_offset(dev, mem, &bus_base, &bus_offset, &bus_size); + if (ret || bus_size == 0) + return ret; + + if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP)) + addr = (void *)(((u8 *) man->io_addr) + bus_offset); + else { + addr = ioremap_nocache(bus_base + bus_offset, bus_size); + if (!addr) + return -ENOMEM; + } + *virtual = addr; + return 0; +} +EXPORT_SYMBOL(drm_mem_reg_ioremap); + +/** + * \c Unmap mapping obtained using drm_bo_ioremap + * + * \param bo The buffer object. + * + * Call bo->mutex locked. + */ + +void drm_mem_reg_iounmap(struct drm_device *dev, struct drm_bo_mem_reg *mem, + void *virtual) +{ + struct drm_buffer_manager *bm; + struct drm_mem_type_manager *man; + + bm = &dev->bm; + man = &bm->man[mem->mem_type]; + + if (virtual && (man->flags & _DRM_FLAG_NEEDS_IOREMAP)) + iounmap(virtual); +} + +static int drm_copy_io_page(void *dst, void *src, unsigned long page) +{ + uint32_t *dstP = + (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT)); + uint32_t *srcP = + (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT)); + + int i; + for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i) + iowrite32(ioread32(srcP++), dstP++); + return 0; +} + +static int drm_copy_io_ttm_page(struct drm_ttm *ttm, void *src, + unsigned long page) +{ + struct page *d = drm_ttm_get_page(ttm, page); + void *dst; + + if (!d) + return -ENOMEM; + + src = (void *)((unsigned long)src + (page << PAGE_SHIFT)); + dst = kmap(d); + if (!dst) + return -ENOMEM; + + memcpy_fromio(dst, src, PAGE_SIZE); + kunmap(d); + return 0; +} + +static int drm_copy_ttm_io_page(struct drm_ttm *ttm, void *dst, unsigned long page) +{ + struct page *s = drm_ttm_get_page(ttm, page); + void *src; + + if (!s) + return -ENOMEM; + + dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); + src = kmap(s); + if (!src) + return -ENOMEM; + + memcpy_toio(dst, src, PAGE_SIZE); + kunmap(s); + return 0; +} + +int drm_bo_move_memcpy(struct drm_buffer_object *bo, + int evict, int no_wait, struct drm_bo_mem_reg *new_mem) +{ + struct drm_device *dev = bo->dev; + struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type]; + struct drm_ttm *ttm = bo->ttm; + struct drm_bo_mem_reg *old_mem = &bo->mem; + struct drm_bo_mem_reg old_copy = *old_mem; + void *old_iomap; + void *new_iomap; + int ret; + uint64_t save_flags = old_mem->flags; + uint64_t save_mask = old_mem->mask; + unsigned long i; + unsigned long page; + unsigned long add = 0; + int dir; + + ret = drm_mem_reg_ioremap(dev, old_mem, &old_iomap); + if (ret) + return ret; + ret = drm_mem_reg_ioremap(dev, new_mem, &new_iomap); + if (ret) + goto out; + + if (old_iomap == NULL && new_iomap == NULL) + goto out2; + if (old_iomap == NULL && ttm == NULL) + goto out2; + + add = 0; + dir = 1; + + if ((old_mem->mem_type == new_mem->mem_type) && + (new_mem->mm_node->start < + old_mem->mm_node->start + old_mem->mm_node->size)) { + dir = -1; + add = new_mem->num_pages - 1; + } + + for (i = 0; i < new_mem->num_pages; ++i) { + page = i * dir + add; + if (old_iomap == NULL) + ret = drm_copy_ttm_io_page(ttm, new_iomap, page); + else if (new_iomap == NULL) + ret = drm_copy_io_ttm_page(ttm, old_iomap, page); + else + ret = drm_copy_io_page(new_iomap, old_iomap, page); + if (ret) + goto out1; + } + mb(); +out2: + drm_bo_free_old_node(bo); + + *old_mem = *new_mem; + new_mem->mm_node = NULL; + old_mem->mask = save_mask; + DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE); + + if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (ttm != NULL)) { + drm_ttm_unbind(ttm); + drm_destroy_ttm(ttm); + bo->ttm = NULL; + } + +out1: + drm_mem_reg_iounmap(dev, new_mem, new_iomap); +out: + drm_mem_reg_iounmap(dev, &old_copy, old_iomap); + return ret; +} +EXPORT_SYMBOL(drm_bo_move_memcpy); + +/* + * Transfer a buffer object's memory and LRU status to a newly + * created object. User-space references remains with the old + * object. Call bo->mutex locked. + */ + +int drm_buffer_object_transfer(struct drm_buffer_object *bo, + struct drm_buffer_object **new_obj) +{ + struct drm_buffer_object *fbo; + struct drm_device *dev = bo->dev; + struct drm_buffer_manager *bm = &dev->bm; + + fbo = drm_ctl_calloc(1, sizeof(*fbo), DRM_MEM_BUFOBJ); + if (!fbo) + return -ENOMEM; + + *fbo = *bo; + mutex_init(&fbo->mutex); + mutex_lock(&fbo->mutex); + mutex_lock(&dev->struct_mutex); + + DRM_INIT_WAITQUEUE(&bo->event_queue); + INIT_LIST_HEAD(&fbo->ddestroy); + INIT_LIST_HEAD(&fbo->lru); + INIT_LIST_HEAD(&fbo->pinned_lru); + + drm_fence_reference_unlocked(&fbo->fence, bo->fence); + fbo->pinned_node = NULL; + fbo->mem.mm_node->private = (void *)fbo; + atomic_set(&fbo->usage, 1); + atomic_inc(&bm->count); + mutex_unlock(&dev->struct_mutex); + mutex_unlock(&fbo->mutex); + + *new_obj = fbo; + return 0; +} + +/* + * Since move is underway, we need to block signals in this function. + * We cannot restart until it has finished. + */ + +int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo, + int evict, int no_wait, uint32_t fence_class, + uint32_t fence_type, uint32_t fence_flags, + struct drm_bo_mem_reg *new_mem) +{ + struct drm_device *dev = bo->dev; + struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type]; + struct drm_bo_mem_reg *old_mem = &bo->mem; + int ret; + uint64_t save_flags = old_mem->flags; + uint64_t save_mask = old_mem->mask; + struct drm_buffer_object *old_obj; + + if (bo->fence) + drm_fence_usage_deref_unlocked(&bo->fence); + ret = drm_fence_object_create(dev, fence_class, fence_type, + fence_flags | DRM_FENCE_FLAG_EMIT, + &bo->fence); + bo->fence_type = fence_type; + if (ret) + return ret; + + if (evict || ((bo->mem.mm_node == bo->pinned_node) && + bo->mem.mm_node != NULL)) { + ret = drm_bo_wait(bo, 0, 1, 0); + if (ret) + return ret; + + drm_bo_free_old_node(bo); + + if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm != NULL)) { + drm_ttm_unbind(bo->ttm); + drm_destroy_ttm(bo->ttm); + bo->ttm = NULL; + } + } else { + + /* This should help pipeline ordinary buffer moves. + * + * Hang old buffer memory on a new buffer object, + * and leave it to be released when the GPU + * operation has completed. + */ + + ret = drm_buffer_object_transfer(bo, &old_obj); + + if (ret) + return ret; + + if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) + old_obj->ttm = NULL; + else + bo->ttm = NULL; + + mutex_lock(&dev->struct_mutex); + list_del_init(&old_obj->lru); + DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); + drm_bo_add_to_lru(old_obj); + + drm_bo_usage_deref_locked(&old_obj); + mutex_unlock(&dev->struct_mutex); + + } + + *old_mem = *new_mem; + new_mem->mm_node = NULL; + old_mem->mask = save_mask; + DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE); + return 0; +} +EXPORT_SYMBOL(drm_bo_move_accel_cleanup); + +int drm_bo_same_page(unsigned long offset, + unsigned long offset2) +{ + return (offset & PAGE_MASK) == (offset2 & PAGE_MASK); +} +EXPORT_SYMBOL(drm_bo_same_page); + +unsigned long drm_bo_offset_end(unsigned long offset, + unsigned long end) +{ + offset = (offset + PAGE_SIZE) & PAGE_MASK; + return (end < offset) ? end : offset; +} +EXPORT_SYMBOL(drm_bo_offset_end); + +static pgprot_t drm_kernel_io_prot(uint32_t map_type) +{ + pgprot_t tmp = PAGE_KERNEL; + +#if defined(__i386__) || defined(__x86_64__) +#ifdef USE_PAT_WC +#warning using pat + if (drm_use_pat() && map_type == _DRM_TTM) { + pgprot_val(tmp) |= _PAGE_PAT; + return tmp; + } +#endif + if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) { + pgprot_val(tmp) |= _PAGE_PCD; + pgprot_val(tmp) &= ~_PAGE_PWT; + } +#elif defined(__powerpc__) + pgprot_val(tmp) |= _PAGE_NO_CACHE; + if (map_type == _DRM_REGISTERS) + pgprot_val(tmp) |= _PAGE_GUARDED; +#endif +#if defined(__ia64__) + if (map_type == _DRM_TTM) + tmp = pgprot_writecombine(tmp); + else + tmp = pgprot_noncached(tmp); +#endif + return tmp; +} + +static int drm_bo_ioremap(struct drm_buffer_object *bo, unsigned long bus_base, + unsigned long bus_offset, unsigned long bus_size, + struct drm_bo_kmap_obj *map) +{ + struct drm_device *dev = bo->dev; + struct drm_bo_mem_reg *mem = &bo->mem; + struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type]; + + if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP)) { + map->bo_kmap_type = bo_map_premapped; + map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset); + } else { + map->bo_kmap_type = bo_map_iomap; + map->virtual = ioremap_nocache(bus_base + bus_offset, bus_size); + } + return (!map->virtual) ? -ENOMEM : 0; +} + +static int drm_bo_kmap_ttm(struct drm_buffer_object *bo, + unsigned long start_page, unsigned long num_pages, + struct drm_bo_kmap_obj *map) +{ + struct drm_device *dev = bo->dev; + struct drm_bo_mem_reg *mem = &bo->mem; + struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type]; + pgprot_t prot; + struct drm_ttm *ttm = bo->ttm; + struct page *d; + int i; + + BUG_ON(!ttm); + + if (num_pages == 1 && (mem->flags & DRM_BO_FLAG_CACHED)) { + + /* + * We're mapping a single page, and the desired + * page protection is consistent with the bo. + */ + + map->bo_kmap_type = bo_map_kmap; + map->page = drm_ttm_get_page(ttm, start_page); + map->virtual = kmap(map->page); + } else { + /* + * Populate the part we're mapping; + */ + + for (i = start_page; i < start_page + num_pages; ++i) { + d = drm_ttm_get_page(ttm, i); + if (!d) + return -ENOMEM; + } + + /* + * We need to use vmap to get the desired page protection + * or to make the buffer object look contigous. + */ + + prot = (mem->flags & DRM_BO_FLAG_CACHED) ? + PAGE_KERNEL : + drm_kernel_io_prot(man->drm_bus_maptype); + map->bo_kmap_type = bo_map_vmap; + map->virtual = vmap(ttm->pages + start_page, + num_pages, 0, prot); + } + return (!map->virtual) ? -ENOMEM : 0; +} + +/* + * This function is to be used for kernel mapping of buffer objects. + * It chooses the appropriate mapping method depending on the memory type + * and caching policy the buffer currently has. + * Mapping multiple pages or buffers that live in io memory is a bit slow and + * consumes vmalloc space. Be restrictive with such mappings. + * Mapping single pages usually returns the logical kernel address, + * (which is fast) + * BUG may use slower temporary mappings for high memory pages or + * uncached / write-combined pages. + * + * The function fills in a drm_bo_kmap_obj which can be used to return the + * kernel virtual address of the buffer. + * + * Code servicing a non-priviliged user request is only allowed to map one + * page at a time. We might need to implement a better scheme to stop such + * processes from consuming all vmalloc space. + */ + +int drm_bo_kmap(struct drm_buffer_object *bo, unsigned long start_page, + unsigned long num_pages, struct drm_bo_kmap_obj *map) +{ + int ret; + unsigned long bus_base; + unsigned long bus_offset; + unsigned long bus_size; + + map->virtual = NULL; + + if (num_pages > bo->num_pages) + return -EINVAL; + if (start_page > bo->num_pages) + return -EINVAL; + ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base, + &bus_offset, &bus_size); + + if (ret) + return ret; + + if (bus_size == 0) { + return drm_bo_kmap_ttm(bo, start_page, num_pages, map); + } else { + bus_offset += start_page << PAGE_SHIFT; + bus_size = num_pages << PAGE_SHIFT; + return drm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map); + } +} +EXPORT_SYMBOL(drm_bo_kmap); + +void drm_bo_kunmap(struct drm_bo_kmap_obj *map) +{ + if (!map->virtual) + return; + + switch (map->bo_kmap_type) { + case bo_map_iomap: + iounmap(map->virtual); + break; + case bo_map_vmap: + vunmap(map->virtual); + break; + case bo_map_kmap: + kunmap(map->page); + break; + case bo_map_premapped: + break; + default: + BUG(); + } + map->virtual = NULL; + map->page = NULL; +} +EXPORT_SYMBOL(drm_bo_kunmap); diff --git a/drivers/char/drm/drm_bufs.c b/drivers/char/drm/drm_bufs.c index d24a6c2..36ecbab 100644 --- a/drivers/char/drm/drm_bufs.c +++ b/drivers/char/drm/drm_bufs.c @@ -184,7 +184,7 @@ static int drm_addmap_core(struct drm_device * dev, unsigned int offset, return -ENOMEM; } } - + break; case _DRM_SHM: list = drm_find_matching_map(dev, map); @@ -413,6 +413,8 @@ int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map) dmah.size = map->size; __drm_pci_free(dev, &dmah); break; + case _DRM_TTM: + BUG_ON(1); } drm_free(map, sizeof(*map), DRM_MEM_MAPS); @@ -814,9 +816,9 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request) page_count = 0; while (entry->buf_count < count) { - + dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful); - + if (!dmah) { /* Set count correctly so we free the proper amount. */ entry->buf_count = count; @@ -1592,5 +1594,3 @@ int drm_order(unsigned long size) return order; } EXPORT_SYMBOL(drm_order); - - diff --git a/drivers/char/drm/drm_context.c b/drivers/char/drm/drm_context.c index 17fe69e..d505f69 100644 --- a/drivers/char/drm/drm_context.c +++ b/drivers/char/drm/drm_context.c @@ -159,7 +159,7 @@ int drm_getsareactx(struct drm_device *dev, void *data, request->handle = NULL; list_for_each_entry(_entry, &dev->maplist, head) { if (_entry->map == map) { - request->handle = + request->handle = (void *)(unsigned long)_entry->user_token; break; } diff --git a/drivers/char/drm/drm_drv.c b/drivers/char/drm/drm_drv.c index 44a4626..3d1c160 100644 --- a/drivers/char/drm/drm_drv.c +++ b/drivers/char/drm/drm_drv.c @@ -117,6 +117,34 @@ static struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0), DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + + DRM_IOCTL_DEF(DRM_IOCTL_MM_INIT, drm_mm_init_ioctl, + DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_MM_TAKEDOWN, drm_mm_takedown_ioctl, + DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_MM_LOCK, drm_mm_lock_ioctl, + DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_MM_UNLOCK, drm_mm_unlock_ioctl, + DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + + DRM_IOCTL_DEF(DRM_IOCTL_FENCE_CREATE, drm_fence_create_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_FENCE_REFERENCE, drm_fence_reference_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_FENCE_UNREFERENCE, drm_fence_unreference_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_FENCE_SIGNALED, drm_fence_signaled_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_FENCE_FLUSH, drm_fence_flush_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_FENCE_WAIT, drm_fence_wait_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_FENCE_EMIT, drm_fence_emit_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_FENCE_BUFFERS, drm_fence_buffers_ioctl, DRM_AUTH), + + DRM_IOCTL_DEF(DRM_IOCTL_BO_CREATE, drm_bo_create_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_BO_MAP, drm_bo_map_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_BO_UNMAP, drm_bo_unmap_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_BO_REFERENCE, drm_bo_reference_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_BO_UNREFERENCE, drm_bo_unreference_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_BO_SETSTATUS, drm_bo_setstatus_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_BO_INFO, drm_bo_info_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_BO_WAIT_IDLE, drm_bo_wait_idle_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_BO_VERSION, drm_bo_version_ioctl, 0), }; #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) @@ -139,6 +167,8 @@ int drm_lastclose(struct drm_device * dev) DRM_DEBUG("\n"); + drm_bo_driver_finish(dev); + if (dev->driver->lastclose) dev->driver->lastclose(dev); DRM_DEBUG("driver lastclose completed\n"); @@ -196,7 +226,7 @@ int drm_lastclose(struct drm_device * dev) /* Clear vma list (only built for debugging) */ list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) { list_del(&vma->head); - drm_free(vma, sizeof(*vma), DRM_MEM_VMAS); + drm_ctl_free(vma, sizeof(*vma), DRM_MEM_VMAS); } list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) { @@ -228,6 +258,7 @@ int drm_lastclose(struct drm_device * dev) dev->lock.file_priv = NULL; wake_up_interruptible(&dev->lock.lock_queue); } + dev->dev_mapping = NULL; mutex_unlock(&dev->struct_mutex); DRM_DEBUG("lastclose completed\n"); @@ -292,8 +323,11 @@ static void drm_cleanup(struct drm_device * dev) } drm_lastclose(dev); + drm_fence_manager_takedown(dev); drm_ht_remove(&dev->map_hash); + drm_mm_takedown(&dev->offset_manager); + drm_ht_remove(&dev->object_hash); drm_ctxbitmap_cleanup(dev); @@ -356,8 +390,32 @@ static const struct file_operations drm_stub_fops = { static int __init drm_core_init(void) { - int ret = -ENOMEM; + int ret; + struct sysinfo si; + unsigned long avail_memctl_mem; + unsigned long max_memctl_mem; + + si_meminfo(&si); + + /* + * AGP only allows low / DMA32 memory ATM. + */ + + avail_memctl_mem = si.totalram - si.totalhigh; + + /* + * Avoid overflows + */ + + max_memctl_mem = 1UL << (32 - PAGE_SHIFT); + max_memctl_mem = (max_memctl_mem / si.mem_unit) * PAGE_SIZE; + + if (avail_memctl_mem >= max_memctl_mem) + avail_memctl_mem = max_memctl_mem; + + drm_init_memctl(avail_memctl_mem/2, avail_memctl_mem*3/4, si.mem_unit); + ret = -ENOMEM; drm_cards_limit = (drm_cards_limit < DRM_MAX_MINOR + 1 ? drm_cards_limit : DRM_MAX_MINOR + 1); diff --git a/drivers/char/drm/drm_fence.c b/drivers/char/drm/drm_fence.c new file mode 100644 index 0000000..a224e18 --- /dev/null +++ b/drivers/char/drm/drm_fence.c @@ -0,0 +1,847 @@ +/************************************************************************** + * + * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ + +#include "drmP.h" + +/* + * Typically called by the IRQ handler. + */ + +void drm_fence_handler(struct drm_device *dev, uint32_t fence_class, + uint32_t sequence, uint32_t type, uint32_t error) +{ + int wake = 0; + uint32_t diff; + uint32_t relevant; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_class_manager *fc = &fm->fence_class[fence_class]; + struct drm_fence_driver *driver = dev->driver->fence_driver; + struct list_head *head; + struct drm_fence_object *fence, *next; + int found = 0; + int is_exe = (type & DRM_FENCE_TYPE_EXE); + int ge_last_exe; + + + diff = (sequence - fc->exe_flush_sequence) & driver->sequence_mask; + + if (fc->pending_exe_flush && is_exe && diff < driver->wrap_diff) + fc->pending_exe_flush = 0; + + diff = (sequence - fc->last_exe_flush) & driver->sequence_mask; + ge_last_exe = diff < driver->wrap_diff; + + if (is_exe && ge_last_exe) + fc->last_exe_flush = sequence; + + if (list_empty(&fc->ring)) + return; + + list_for_each_entry(fence, &fc->ring, ring) { + diff = (sequence - fence->sequence) & driver->sequence_mask; + if (diff > driver->wrap_diff) { + found = 1; + break; + } + } + + fc->pending_flush &= ~type; + head = (found) ? &fence->ring : &fc->ring; + + list_for_each_entry_safe_reverse(fence, next, head, ring) { + if (&fence->ring == &fc->ring) + break; + + if (error) { + fence->error = error; + fence->signaled = fence->type; + fence->submitted_flush = fence->type; + fence->flush_mask = fence->type; + list_del_init(&fence->ring); + wake = 1; + break; + } + + if (is_exe) + type |= fence->native_type; + + relevant = type & fence->type; + + if ((fence->signaled | relevant) != fence->signaled) { + fence->signaled |= relevant; + fence->flush_mask |= relevant; + fence->submitted_flush |= relevant; + DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n", + fence->base.hash.key, fence->signaled); + wake = 1; + } + + relevant = fence->flush_mask & + ~(fence->submitted_flush | fence->signaled); + + fc->pending_flush |= relevant; + fence->submitted_flush |= relevant; + + if (!(fence->type & ~fence->signaled)) { + DRM_DEBUG("Fence completely signaled 0x%08lx\n", + fence->base.hash.key); + list_del_init(&fence->ring); + } + + } + + /* + * Reinstate lost flush flags. + */ + + if ((fc->pending_flush & type) != type) { + head = head->prev; + list_for_each_entry(fence, head, ring) { + if (&fence->ring == &fc->ring) + break; + diff = (fc->last_exe_flush - fence->sequence) & + driver->sequence_mask; + if (diff > driver->wrap_diff) + break; + + relevant = fence->submitted_flush & ~fence->signaled; + fc->pending_flush |= relevant; + } + } + + if (wake) { + DRM_WAKEUP(&fc->fence_queue); + } +} +EXPORT_SYMBOL(drm_fence_handler); + +static void drm_fence_unring(struct drm_device *dev, struct list_head *ring) +{ + struct drm_fence_manager *fm = &dev->fm; + unsigned long flags; + + write_lock_irqsave(&fm->lock, flags); + list_del_init(ring); + write_unlock_irqrestore(&fm->lock, flags); +} + +void drm_fence_usage_deref_locked(struct drm_fence_object **fence) +{ + struct drm_fence_object *tmp_fence = *fence; + struct drm_device *dev = tmp_fence->dev; + struct drm_fence_manager *fm = &dev->fm; + + DRM_ASSERT_LOCKED(&dev->struct_mutex); + *fence = NULL; + if (atomic_dec_and_test(&tmp_fence->usage)) { + drm_fence_unring(dev, &tmp_fence->ring); + DRM_DEBUG("Destroyed a fence object 0x%08lx\n", + tmp_fence->base.hash.key); + atomic_dec(&fm->count); + BUG_ON(!list_empty(&tmp_fence->base.list)); + drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE); + } +} +EXPORT_SYMBOL(drm_fence_usage_deref_locked); + +void drm_fence_usage_deref_unlocked(struct drm_fence_object **fence) +{ + struct drm_fence_object *tmp_fence = *fence; + struct drm_device *dev = tmp_fence->dev; + struct drm_fence_manager *fm = &dev->fm; + + *fence = NULL; + if (atomic_dec_and_test(&tmp_fence->usage)) { + mutex_lock(&dev->struct_mutex); + if (atomic_read(&tmp_fence->usage) == 0) { + drm_fence_unring(dev, &tmp_fence->ring); + atomic_dec(&fm->count); + BUG_ON(!list_empty(&tmp_fence->base.list)); + drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE); + } + mutex_unlock(&dev->struct_mutex); + } +} +EXPORT_SYMBOL(drm_fence_usage_deref_unlocked); + +struct drm_fence_object +*drm_fence_reference_locked(struct drm_fence_object *src) +{ + DRM_ASSERT_LOCKED(&src->dev->struct_mutex); + + atomic_inc(&src->usage); + return src; +} + +void drm_fence_reference_unlocked(struct drm_fence_object **dst, + struct drm_fence_object *src) +{ + mutex_lock(&src->dev->struct_mutex); + *dst = src; + atomic_inc(&src->usage); + mutex_unlock(&src->dev->struct_mutex); +} +EXPORT_SYMBOL(drm_fence_reference_unlocked); + +static void drm_fence_object_destroy(struct drm_file *priv, + struct drm_user_object *base) +{ + struct drm_fence_object *fence = + drm_user_object_entry(base, struct drm_fence_object, base); + + drm_fence_usage_deref_locked(&fence); +} + +int drm_fence_object_signaled(struct drm_fence_object *fence, + uint32_t mask, int poke_flush) +{ + unsigned long flags; + int signaled; + struct drm_device *dev = fence->dev; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_driver *driver = dev->driver->fence_driver; + + if (poke_flush) + driver->poke_flush(dev, fence->fence_class); + read_lock_irqsave(&fm->lock, flags); + signaled = + (fence->type & mask & fence->signaled) == (fence->type & mask); + read_unlock_irqrestore(&fm->lock, flags); + + return signaled; +} +EXPORT_SYMBOL(drm_fence_object_signaled); + +static void drm_fence_flush_exe(struct drm_fence_class_manager *fc, + struct drm_fence_driver *driver, + uint32_t sequence) +{ + uint32_t diff; + + if (!fc->pending_exe_flush) { + fc->exe_flush_sequence = sequence; + fc->pending_exe_flush = 1; + } else { + diff = (sequence - fc->exe_flush_sequence) & driver->sequence_mask; + if (diff < driver->wrap_diff) + fc->exe_flush_sequence = sequence; + } +} + +int drm_fence_object_flush(struct drm_fence_object *fence, + uint32_t type) +{ + struct drm_device *dev = fence->dev; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class]; + struct drm_fence_driver *driver = dev->driver->fence_driver; + unsigned long flags; + + if (type & ~fence->type) { + DRM_ERROR("Flush trying to extend fence type, " + "0x%x, 0x%x\n", type, fence->type); + return -EINVAL; + } + + write_lock_irqsave(&fm->lock, flags); + fence->flush_mask |= type; + if ((fence->submitted_flush & fence->signaled) + == fence->submitted_flush) { + if ((fence->type & DRM_FENCE_TYPE_EXE) && + !(fence->submitted_flush & DRM_FENCE_TYPE_EXE)) { + drm_fence_flush_exe(fc, driver, fence->sequence); + fence->submitted_flush |= DRM_FENCE_TYPE_EXE; + } else { + fc->pending_flush |= (fence->flush_mask & + ~fence->submitted_flush); + fence->submitted_flush = fence->flush_mask; + } + } + write_unlock_irqrestore(&fm->lock, flags); + driver->poke_flush(dev, fence->fence_class); + return 0; +} + +/* + * Make sure old fence objects are signaled before their fence sequences are + * wrapped around and reused. + */ + +void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class, + uint32_t sequence) +{ + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_class_manager *fc = &fm->fence_class[fence_class]; + struct drm_fence_driver *driver = dev->driver->fence_driver; + uint32_t old_sequence; + unsigned long flags; + struct drm_fence_object *fence; + uint32_t diff; + + write_lock_irqsave(&fm->lock, flags); + old_sequence = (sequence - driver->flush_diff) & driver->sequence_mask; + diff = (old_sequence - fc->last_exe_flush) & driver->sequence_mask; + + if ((diff < driver->wrap_diff) && !fc->pending_exe_flush) { + fc->pending_exe_flush = 1; + fc->exe_flush_sequence = sequence - (driver->flush_diff / 2); + } + write_unlock_irqrestore(&fm->lock, flags); + + mutex_lock(&dev->struct_mutex); + read_lock_irqsave(&fm->lock, flags); + + if (list_empty(&fc->ring)) { + read_unlock_irqrestore(&fm->lock, flags); + mutex_unlock(&dev->struct_mutex); + return; + } + fence = drm_fence_reference_locked(list_entry(fc->ring.next, struct drm_fence_object, ring)); + mutex_unlock(&dev->struct_mutex); + diff = (old_sequence - fence->sequence) & driver->sequence_mask; + read_unlock_irqrestore(&fm->lock, flags); + if (diff < driver->wrap_diff) + drm_fence_object_flush(fence, fence->type); + drm_fence_usage_deref_unlocked(&fence); +} +EXPORT_SYMBOL(drm_fence_flush_old); + +static int drm_fence_lazy_wait(struct drm_fence_object *fence, + int ignore_signals, + uint32_t mask) +{ + struct drm_device *dev = fence->dev; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class]; + int signaled; + unsigned long _end = jiffies + 3*DRM_HZ; + int ret = 0; + + do { + DRM_WAIT_ON(ret, fc->fence_queue, 3 * DRM_HZ, + (signaled = drm_fence_object_signaled(fence, mask, 1))); + if (signaled) + return 0; + if (time_after_eq(jiffies, _end)) + break; + } while (ret == -EINTR && ignore_signals); + if (drm_fence_object_signaled(fence, mask, 0)) + return 0; + if (time_after_eq(jiffies, _end)) + ret = -EBUSY; + if (ret) { + if (ret == -EBUSY) { + DRM_ERROR("Fence timeout. " + "GPU lockup or fence driver was " + "taken down. %d 0x%08x 0x%02x 0x%02x 0x%02x\n", + fence->fence_class, + fence->sequence, + fence->type, + mask, + fence->signaled); + DRM_ERROR("Pending exe flush %d 0x%08x\n", + fc->pending_exe_flush, + fc->exe_flush_sequence); + } + return ((ret == -EINTR) ? -EAGAIN : ret); + } + return 0; +} + +int drm_fence_object_wait(struct drm_fence_object *fence, + int lazy, int ignore_signals, uint32_t mask) +{ + struct drm_device *dev = fence->dev; + struct drm_fence_driver *driver = dev->driver->fence_driver; + int ret = 0; + unsigned long _end; + int signaled; + + if (mask & ~fence->type) { + DRM_ERROR("Wait trying to extend fence type" + " 0x%08x 0x%08x\n", mask, fence->type); + BUG(); + return -EINVAL; + } + + if (drm_fence_object_signaled(fence, mask, 0)) + return 0; + + _end = jiffies + 3 * DRM_HZ; + + drm_fence_object_flush(fence, mask); + + if (lazy && driver->lazy_capable) { + + ret = drm_fence_lazy_wait(fence, ignore_signals, mask); + if (ret) + return ret; + + } else { + + if (driver->has_irq(dev, fence->fence_class, + DRM_FENCE_TYPE_EXE)) { + ret = drm_fence_lazy_wait(fence, ignore_signals, + DRM_FENCE_TYPE_EXE); + if (ret) + return ret; + } + + if (driver->has_irq(dev, fence->fence_class, + mask & ~DRM_FENCE_TYPE_EXE)) { + ret = drm_fence_lazy_wait(fence, ignore_signals, + mask); + if (ret) + return ret; + } + } + if (drm_fence_object_signaled(fence, mask, 0)) + return 0; + + /* + * Avoid kernel-space busy-waits. + */ + if (!ignore_signals) + return -EAGAIN; + + do { + schedule(); + signaled = drm_fence_object_signaled(fence, mask, 1); + } while (!signaled && !time_after_eq(jiffies, _end)); + + if (!signaled) + return -EBUSY; + + return 0; +} +EXPORT_SYMBOL(drm_fence_object_wait); + +int drm_fence_object_emit(struct drm_fence_object *fence, uint32_t fence_flags, + uint32_t fence_class, uint32_t type) +{ + struct drm_device *dev = fence->dev; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_driver *driver = dev->driver->fence_driver; + struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class]; + unsigned long flags; + uint32_t sequence; + uint32_t native_type; + int ret; + + drm_fence_unring(dev, &fence->ring); + ret = driver->emit(dev, fence_class, fence_flags, &sequence, + &native_type); + if (ret) + return ret; + + write_lock_irqsave(&fm->lock, flags); + fence->fence_class = fence_class; + fence->type = type; + fence->flush_mask = 0x00; + fence->submitted_flush = 0x00; + fence->signaled = 0x00; + fence->sequence = sequence; + fence->native_type = native_type; + if (list_empty(&fc->ring)) + fc->last_exe_flush = sequence - 1; + list_add_tail(&fence->ring, &fc->ring); + write_unlock_irqrestore(&fm->lock, flags); + return 0; +} +EXPORT_SYMBOL(drm_fence_object_emit); + +static int drm_fence_object_init(struct drm_device *dev, uint32_t fence_class, + uint32_t type, + uint32_t fence_flags, + struct drm_fence_object *fence) +{ + int ret = 0; + unsigned long flags; + struct drm_fence_manager *fm = &dev->fm; + + mutex_lock(&dev->struct_mutex); + atomic_set(&fence->usage, 1); + mutex_unlock(&dev->struct_mutex); + + write_lock_irqsave(&fm->lock, flags); + INIT_LIST_HEAD(&fence->ring); + + /* + * Avoid hitting BUG() for kernel-only fence objects. + */ + + INIT_LIST_HEAD(&fence->base.list); + fence->fence_class = fence_class; + fence->type = type; + fence->flush_mask = 0; + fence->submitted_flush = 0; + fence->signaled = 0; + fence->sequence = 0; + fence->dev = dev; + write_unlock_irqrestore(&fm->lock, flags); + if (fence_flags & DRM_FENCE_FLAG_EMIT) { + ret = drm_fence_object_emit(fence, fence_flags, + fence->fence_class, type); + } + return ret; +} + +int drm_fence_add_user_object(struct drm_file *priv, + struct drm_fence_object *fence, int shareable) +{ + struct drm_device *dev = priv->head->dev; + int ret; + + mutex_lock(&dev->struct_mutex); + ret = drm_add_user_object(priv, &fence->base, shareable); + if (ret) + goto out; + atomic_inc(&fence->usage); + fence->base.type = drm_fence_type; + fence->base.remove = &drm_fence_object_destroy; + DRM_DEBUG("Fence 0x%08lx created\n", fence->base.hash.key); +out: + mutex_unlock(&dev->struct_mutex); + return ret; +} +EXPORT_SYMBOL(drm_fence_add_user_object); + +int drm_fence_object_create(struct drm_device *dev, uint32_t fence_class, + uint32_t type, unsigned flags, + struct drm_fence_object **c_fence) +{ + struct drm_fence_object *fence; + int ret; + struct drm_fence_manager *fm = &dev->fm; + + fence = drm_ctl_calloc(1, sizeof(*fence), DRM_MEM_FENCE); + if (!fence) + return -ENOMEM; + ret = drm_fence_object_init(dev, fence_class, type, flags, fence); + if (ret) { + drm_fence_usage_deref_unlocked(&fence); + return ret; + } + *c_fence = fence; + atomic_inc(&fm->count); + + return 0; +} +EXPORT_SYMBOL(drm_fence_object_create); + +void drm_fence_manager_init(struct drm_device *dev) +{ + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_class_manager *fence_class; + struct drm_fence_driver *fed = dev->driver->fence_driver; + int i; + unsigned long flags; + + rwlock_init(&fm->lock); + write_lock_irqsave(&fm->lock, flags); + fm->initialized = 0; + if (!fed) + goto out_unlock; + + fm->initialized = 1; + fm->num_classes = fed->num_classes; + BUG_ON(fm->num_classes > _DRM_FENCE_CLASSES); + + for (i = 0; i < fm->num_classes; ++i) { + fence_class = &fm->fence_class[i]; + + INIT_LIST_HEAD(&fence_class->ring); + fence_class->pending_flush = 0; + DRM_INIT_WAITQUEUE(&fence_class->fence_queue); + } + + atomic_set(&fm->count, 0); + out_unlock: + write_unlock_irqrestore(&fm->lock, flags); +} + +void drm_fence_fill_arg(struct drm_fence_object *fence, + struct drm_fence_arg *arg) +{ + struct drm_device *dev = fence->dev; + struct drm_fence_manager *fm = &dev->fm; + unsigned long irq_flags; + + read_lock_irqsave(&fm->lock, irq_flags); + arg->handle = fence->base.hash.key; + arg->fence_class = fence->fence_class; + arg->type = fence->type; + arg->signaled = fence->signaled; + arg->error = fence->error; + arg->sequence = fence->sequence; + read_unlock_irqrestore(&fm->lock, irq_flags); +} +EXPORT_SYMBOL(drm_fence_fill_arg); + +void drm_fence_manager_takedown(struct drm_device *dev) +{ +} + +struct drm_fence_object *drm_lookup_fence_object(struct drm_file *priv, + uint32_t handle) +{ + struct drm_device *dev = priv->head->dev; + struct drm_user_object *uo; + struct drm_fence_object *fence; + + mutex_lock(&dev->struct_mutex); + uo = drm_lookup_user_object(priv, handle); + if (!uo || (uo->type != drm_fence_type)) { + mutex_unlock(&dev->struct_mutex); + return NULL; + } + fence = drm_fence_reference_locked(drm_user_object_entry(uo, struct drm_fence_object, base)); + mutex_unlock(&dev->struct_mutex); + return fence; +} + +int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) +{ + int ret; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_arg *arg = data; + struct drm_fence_object *fence; + ret = 0; + + if (!fm->initialized) { + DRM_ERROR("The DRM driver does not support fencing.\n"); + return -EINVAL; + } + + if (arg->flags & DRM_FENCE_FLAG_EMIT) + LOCK_TEST_WITH_RETURN(dev, file_priv); + ret = drm_fence_object_create(dev, arg->fence_class, + arg->type, arg->flags, &fence); + if (ret) + return ret; + ret = drm_fence_add_user_object(file_priv, fence, + arg->flags & + DRM_FENCE_FLAG_SHAREABLE); + if (ret) { + drm_fence_usage_deref_unlocked(&fence); + return ret; + } + + /* + * usage > 0. No need to lock dev->struct_mutex; + */ + + arg->handle = fence->base.hash.key; + + drm_fence_fill_arg(fence, arg); + drm_fence_usage_deref_unlocked(&fence); + + return ret; +} + +int drm_fence_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) +{ + int ret; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_arg *arg = data; + struct drm_fence_object *fence; + struct drm_user_object *uo; + ret = 0; + + if (!fm->initialized) { + DRM_ERROR("The DRM driver does not support fencing.\n"); + return -EINVAL; + } + + ret = drm_user_object_ref(file_priv, arg->handle, drm_fence_type, &uo); + if (ret) + return ret; + fence = drm_lookup_fence_object(file_priv, arg->handle); + drm_fence_fill_arg(fence, arg); + drm_fence_usage_deref_unlocked(&fence); + + return ret; +} + + +int drm_fence_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) +{ + int ret; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_arg *arg = data; + ret = 0; + + if (!fm->initialized) { + DRM_ERROR("The DRM driver does not support fencing.\n"); + return -EINVAL; + } + + return drm_user_object_unref(file_priv, arg->handle, drm_fence_type); +} + +int drm_fence_signaled_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) +{ + int ret; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_arg *arg = data; + struct drm_fence_object *fence; + ret = 0; + + if (!fm->initialized) { + DRM_ERROR("The DRM driver does not support fencing.\n"); + return -EINVAL; + } + + fence = drm_lookup_fence_object(file_priv, arg->handle); + if (!fence) + return -EINVAL; + + drm_fence_fill_arg(fence, arg); + drm_fence_usage_deref_unlocked(&fence); + + return ret; +} + +int drm_fence_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) +{ + int ret; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_arg *arg = data; + struct drm_fence_object *fence; + ret = 0; + + if (!fm->initialized) { + DRM_ERROR("The DRM driver does not support fencing.\n"); + return -EINVAL; + } + + fence = drm_lookup_fence_object(file_priv, arg->handle); + if (!fence) + return -EINVAL; + ret = drm_fence_object_flush(fence, arg->type); + + drm_fence_fill_arg(fence, arg); + drm_fence_usage_deref_unlocked(&fence); + + return ret; +} + + +int drm_fence_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) +{ + int ret; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_arg *arg = data; + struct drm_fence_object *fence; + ret = 0; + + if (!fm->initialized) { + DRM_ERROR("The DRM driver does not support fencing.\n"); + return -EINVAL; + } + + fence = drm_lookup_fence_object(file_priv, arg->handle); + if (!fence) + return -EINVAL; + ret = drm_fence_object_wait(fence, + arg->flags & DRM_FENCE_FLAG_WAIT_LAZY, + 0, arg->type); + + drm_fence_fill_arg(fence, arg); + drm_fence_usage_deref_unlocked(&fence); + + return ret; +} + + +int drm_fence_emit_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) +{ + int ret; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_arg *arg = data; + struct drm_fence_object *fence; + ret = 0; + + if (!fm->initialized) { + DRM_ERROR("The DRM driver does not support fencing.\n"); + return -EINVAL; + } + + LOCK_TEST_WITH_RETURN(dev, file_priv); + fence = drm_lookup_fence_object(file_priv, arg->handle); + if (!fence) + return -EINVAL; + ret = drm_fence_object_emit(fence, arg->flags, arg->fence_class, + arg->type); + + drm_fence_fill_arg(fence, arg); + drm_fence_usage_deref_unlocked(&fence); + + return ret; +} + +int drm_fence_buffers_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) +{ + int ret; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_arg *arg = data; + struct drm_fence_object *fence; + ret = 0; + + if (!fm->initialized) { + DRM_ERROR("The DRM driver does not support fencing.\n"); + return -EINVAL; + } + + if (!dev->bm.initialized) { + DRM_ERROR("Buffer object manager is not initialized\n"); + return -EINVAL; + } + LOCK_TEST_WITH_RETURN(dev, file_priv); + ret = drm_fence_buffer_objects(dev, NULL, arg->flags, + NULL, &fence); + if (ret) + return ret; + + if (!(arg->flags & DRM_FENCE_FLAG_NO_USER)) { + ret = drm_fence_add_user_object(file_priv, fence, + arg->flags & + DRM_FENCE_FLAG_SHAREABLE); + if (ret) + return ret; + } + + arg->handle = fence->base.hash.key; + + drm_fence_fill_arg(fence, arg); + drm_fence_usage_deref_unlocked(&fence); + + return ret; +} diff --git a/drivers/char/drm/drm_fops.c b/drivers/char/drm/drm_fops.c index 3992f73..af8ae0b 100644 --- a/drivers/char/drm/drm_fops.c +++ b/drivers/char/drm/drm_fops.c @@ -147,11 +147,18 @@ int drm_open(struct inode *inode, struct file *filp) spin_lock(&dev->count_lock); if (!dev->open_count++) { spin_unlock(&dev->count_lock); - return drm_setup(dev); + retcode = drm_setup(dev); + goto out; } spin_unlock(&dev->count_lock); } - +out: + mutex_lock(&dev->struct_mutex); + BUG_ON((dev->dev_mapping != NULL) && + (dev->dev_mapping != inode->i_mapping)); + if (dev->dev_mapping == NULL) + dev->dev_mapping = inode->i_mapping; + mutex_unlock(&dev->struct_mutex); return retcode; } EXPORT_SYMBOL(drm_open); @@ -228,6 +235,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp, int minor = iminor(inode); struct drm_file *priv; int ret; + int i, j; if (filp->f_flags & O_EXCL) return -EBUSY; /* No exclusive opens */ @@ -253,6 +261,20 @@ static int drm_open_helper(struct inode *inode, struct file *filp, priv->lock_count = 0; INIT_LIST_HEAD(&priv->lhead); + INIT_LIST_HEAD(&priv->refd_objects); + + for (i = 0; i < _DRM_NO_REF_TYPES; ++i) { + ret = drm_ht_create(&priv->refd_object_hash[i], + DRM_FILE_HASH_ORDER); + if (ret) + break; + } + + if (ret) { + for (j = 0; j < i; ++j) + drm_ht_remove(&priv->refd_object_hash[j]); + goto out_free; + } if (dev->driver->open) { ret = dev->driver->open(dev, priv); @@ -309,6 +331,32 @@ int drm_fasync(int fd, struct file *filp, int on) } EXPORT_SYMBOL(drm_fasync); +static void drm_object_release(struct file *filp) +{ + struct drm_file *priv = filp->private_data; + struct list_head *head; + struct drm_ref_object *ref_object; + int i; + + /* + * Free leftover ref objects created by me. Note that we cannot use + * list_for_each() here, as the struct_mutex may be temporarily + * released by the remove_() functions, and thus the lists may be + * altered. + * Also, a drm_remove_ref_object() will not remove it + * from the list unless its refcount is 1. + */ + head = &priv->refd_objects; + while (head->next != head) { + ref_object = list_entry(head->next, struct drm_ref_object, list); + drm_remove_ref_object(priv, ref_object); + head = &priv->refd_objects; + } + + for (i = 0; i < _DRM_NO_REF_TYPES; ++i) + drm_ht_remove(&priv->refd_object_hash[i]); +} + /** * Release file. * @@ -422,6 +470,7 @@ int drm_release(struct inode *inode, struct file *filp) mutex_unlock(&dev->ctxlist_mutex); mutex_lock(&dev->struct_mutex); + drm_object_release(filp); if (file_priv->remove_auth_on_close == 1) { struct drm_file *temp; diff --git a/drivers/char/drm/drm_hashtab.c b/drivers/char/drm/drm_hashtab.c index 4b8e7db..3316067 100644 --- a/drivers/char/drm/drm_hashtab.c +++ b/drivers/char/drm/drm_hashtab.c @@ -80,7 +80,7 @@ void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key) } } -static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht, +static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht, unsigned long key) { struct drm_hash_item *entry; @@ -129,7 +129,7 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item) } /* - * Just insert an item and return any "bits" bit key that hasn't been + * Just insert an item and return any "bits" bit key that hasn't been * used before. */ int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item, @@ -200,4 +200,3 @@ void drm_ht_remove(struct drm_open_hash *ht) ht->table = NULL; } } - diff --git a/drivers/char/drm/drm_hashtab.h b/drivers/char/drm/drm_hashtab.h index 573e333..cd2b189 100644 --- a/drivers/char/drm/drm_hashtab.h +++ b/drivers/char/drm/drm_hashtab.h @@ -65,4 +65,3 @@ extern void drm_ht_remove(struct drm_open_hash *ht); #endif - diff --git a/drivers/char/drm/drm_ioctl.c b/drivers/char/drm/drm_ioctl.c index 3cbebf8..b4d86a3 100644 --- a/drivers/char/drm/drm_ioctl.c +++ b/drivers/char/drm/drm_ioctl.c @@ -234,7 +234,7 @@ int drm_getclient(struct drm_device *dev, void *data, idx = client->idx; mutex_lock(&dev->struct_mutex); - + if (list_empty(&dev->filelist)) { mutex_unlock(&dev->struct_mutex); return -EINVAL; diff --git a/drivers/char/drm/drm_memory.c b/drivers/char/drm/drm_memory.c index 9301990..9a01a91 100644 --- a/drivers/char/drm/drm_memory.c +++ b/drivers/char/drm/drm_memory.c @@ -36,6 +36,75 @@ #include #include "drmP.h" +static struct { + spinlock_t lock; + uint64_t cur_used; + uint64_t low_threshold; + uint64_t high_threshold; +} drm_memctl = { + .lock = __SPIN_LOCK_UNLOCKED(drm_memctl.lock) +}; + +static inline size_t drm_size_align(size_t size) +{ + size_t tmpSize = 4; + if (size > PAGE_SIZE) + return PAGE_ALIGN(size); + + while (tmpSize < size) + tmpSize <<= 1; + + return (size_t) tmpSize; +} + +int drm_alloc_memctl(size_t size) +{ + int ret; + unsigned long a_size = drm_size_align(size); + + spin_lock(&drm_memctl.lock); + ret = ((drm_memctl.cur_used + a_size) > drm_memctl.high_threshold) ? + -ENOMEM : 0; + if (!ret) + drm_memctl.cur_used += a_size; + spin_unlock(&drm_memctl.lock); + return ret; +} +EXPORT_SYMBOL(drm_alloc_memctl); + +void drm_free_memctl(size_t size) +{ + unsigned long a_size = drm_size_align(size); + + spin_lock(&drm_memctl.lock); + drm_memctl.cur_used -= a_size; + spin_unlock(&drm_memctl.lock); +} +EXPORT_SYMBOL(drm_free_memctl); + +void drm_query_memctl(uint64_t *cur_used, + uint64_t *low_threshold, + uint64_t *high_threshold) +{ + spin_lock(&drm_memctl.lock); + *cur_used = drm_memctl.cur_used; + *low_threshold = drm_memctl.low_threshold; + *high_threshold = drm_memctl.high_threshold; + spin_unlock(&drm_memctl.lock); +} +EXPORT_SYMBOL(drm_query_memctl); + +void drm_init_memctl(size_t p_low_threshold, + size_t p_high_threshold, + size_t unit_size) +{ + spin_lock(&drm_memctl.lock); + drm_memctl.cur_used = 0; + drm_memctl.low_threshold = p_low_threshold * unit_size; + drm_memctl.high_threshold = p_high_threshold * unit_size; + spin_unlock(&drm_memctl.lock); +} + #ifdef DEBUG_MEMORY #include "drm_memory_debug.h" #else @@ -179,4 +248,3 @@ void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev) iounmap(map->handle); } EXPORT_SYMBOL(drm_core_ioremapfree); - diff --git a/drivers/char/drm/drm_mm.c b/drivers/char/drm/drm_mm.c index 86f4eb6..369a052 100644 --- a/drivers/char/drm/drm_mm.c +++ b/drivers/char/drm/drm_mm.c @@ -82,7 +82,7 @@ static int drm_mm_create_tail_node(struct drm_mm *mm, struct drm_mm_node *child; child = (struct drm_mm_node *) - drm_alloc(sizeof(*child), DRM_MEM_MM); + drm_ctl_alloc(sizeof(*child), DRM_MEM_MM); if (!child) return -ENOMEM; @@ -118,7 +118,7 @@ static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent, struct drm_mm_node *child; child = (struct drm_mm_node *) - drm_alloc(sizeof(*child), DRM_MEM_MM); + drm_ctl_alloc(sizeof(*child), DRM_MEM_MM); if (!child) return NULL; @@ -200,8 +200,8 @@ void drm_mm_put_block(struct drm_mm_node * cur) prev_node->size += next_node->size; list_del(&next_node->ml_entry); list_del(&next_node->fl_entry); - drm_free(next_node, sizeof(*next_node), - DRM_MEM_MM); + drm_ctl_free(next_node, sizeof(*next_node), + DRM_MEM_MM); } else { next_node->size += cur->size; next_node->start = cur->start; @@ -214,7 +214,7 @@ void drm_mm_put_block(struct drm_mm_node * cur) list_add(&cur->fl_entry, &mm->fl_entry); } else { list_del(&cur->ml_entry); - drm_free(cur, sizeof(*cur), DRM_MEM_MM); + drm_ctl_free(cur, sizeof(*cur), DRM_MEM_MM); } } @@ -291,6 +291,5 @@ void drm_mm_takedown(struct drm_mm * mm) list_del(&entry->fl_entry); list_del(&entry->ml_entry); - drm_free(entry, sizeof(*entry), DRM_MEM_MM); + drm_ctl_free(entry, sizeof(*entry), DRM_MEM_MM); } - diff --git a/drivers/char/drm/drm_object.c b/drivers/char/drm/drm_object.c new file mode 100644 index 0000000..07914ca --- /dev/null +++ b/drivers/char/drm/drm_object.c @@ -0,0 +1,293 @@ +/************************************************************************** + * + * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ + +#include "drmP.h" + +int drm_add_user_object(struct drm_file *priv, struct drm_user_object *item, + int shareable) +{ + struct drm_device *dev = priv->head->dev; + int ret; + + DRM_ASSERT_LOCKED(&dev->struct_mutex); + + /* The refcount will be bumped to 1 when we add the ref object below. */ + atomic_set(&item->refcount, 0); + item->shareable = shareable; + item->owner = priv; + + ret = drm_ht_just_insert_please(&dev->object_hash, &item->hash, + (unsigned long)item, 32, 0, 0); + if (ret) + return ret; + + ret = drm_add_ref_object(priv, item, _DRM_REF_USE); + if (ret) + ret = drm_ht_remove_item(&dev->object_hash, &item->hash); + + return ret; +} +EXPORT_SYMBOL(drm_add_user_object); + +struct drm_user_object *drm_lookup_user_object(struct drm_file *priv, uint32_t key) +{ + struct drm_device *dev = priv->head->dev; + struct drm_hash_item *hash; + int ret; + struct drm_user_object *item; + + DRM_ASSERT_LOCKED(&dev->struct_mutex); + + ret = drm_ht_find_item(&dev->object_hash, key, &hash); + if (ret) + return NULL; + + item = drm_hash_entry(hash, struct drm_user_object, hash); + + if (priv != item->owner) { + struct drm_open_hash *ht = &priv->refd_object_hash[_DRM_REF_USE]; + ret = drm_ht_find_item(ht, (unsigned long)item, &hash); + if (ret) { + DRM_ERROR("Object not registered for usage\n"); + return NULL; + } + } + return item; +} +EXPORT_SYMBOL(drm_lookup_user_object); + +static void drm_deref_user_object(struct drm_file *priv, struct drm_user_object *item) +{ + struct drm_device *dev = priv->head->dev; + int ret; + + if (atomic_dec_and_test(&item->refcount)) { + ret = drm_ht_remove_item(&dev->object_hash, &item->hash); + BUG_ON(ret); + item->remove(priv, item); + } +} + +static int drm_object_ref_action(struct drm_file *priv, struct drm_user_object *ro, + enum drm_ref_type action) +{ + int ret = 0; + + switch (action) { + case _DRM_REF_USE: + atomic_inc(&ro->refcount); + break; + default: + if (!ro->ref_struct_locked) { + break; + } else { + ro->ref_struct_locked(priv, ro, action); + } + } + return ret; +} + +int drm_add_ref_object(struct drm_file *priv, struct drm_user_object *referenced_object, + enum drm_ref_type ref_action) +{ + int ret = 0; + struct drm_ref_object *item; + struct drm_open_hash *ht = &priv->refd_object_hash[ref_action]; + + DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex); + if (!referenced_object->shareable && priv != referenced_object->owner) { + DRM_ERROR("Not allowed to reference this object\n"); + return -EINVAL; + } + + /* + * If this is not a usage reference, Check that usage has been registered + * first. Otherwise strange things may happen on destruction. + */ + + if ((ref_action != _DRM_REF_USE) && priv != referenced_object->owner) { + item = + drm_lookup_ref_object(priv, referenced_object, + _DRM_REF_USE); + if (!item) { + DRM_ERROR + ("Object not registered for usage by this client\n"); + return -EINVAL; + } + } + + if (NULL != + (item = + drm_lookup_ref_object(priv, referenced_object, ref_action))) { + atomic_inc(&item->refcount); + return drm_object_ref_action(priv, referenced_object, + ref_action); + } + + item = drm_ctl_calloc(1, sizeof(*item), DRM_MEM_OBJECTS); + if (item == NULL) { + DRM_ERROR("Could not allocate reference object\n"); + return -ENOMEM; + } + + atomic_set(&item->refcount, 1); + item->hash.key = (unsigned long)referenced_object; + ret = drm_ht_insert_item(ht, &item->hash); + item->unref_action = ref_action; + + if (ret) + goto out; + + list_add(&item->list, &priv->refd_objects); + ret = drm_object_ref_action(priv, referenced_object, ref_action); +out: + return ret; +} + +struct drm_ref_object *drm_lookup_ref_object(struct drm_file *priv, + struct drm_user_object *referenced_object, + enum drm_ref_type ref_action) +{ + struct drm_hash_item *hash; + int ret; + + DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex); + ret = drm_ht_find_item(&priv->refd_object_hash[ref_action], + (unsigned long)referenced_object, &hash); + if (ret) + return NULL; + + return drm_hash_entry(hash, struct drm_ref_object, hash); +} +EXPORT_SYMBOL(drm_lookup_ref_object); + +static void drm_remove_other_references(struct drm_file *priv, + struct drm_user_object *ro) +{ + int i; + struct drm_open_hash *ht; + struct drm_hash_item *hash; + struct drm_ref_object *item; + + for (i = _DRM_REF_USE + 1; i < _DRM_NO_REF_TYPES; ++i) { + ht = &priv->refd_object_hash[i]; + while (!drm_ht_find_item(ht, (unsigned long)ro, &hash)) { + item = drm_hash_entry(hash, struct drm_ref_object, hash); + drm_remove_ref_object(priv, item); + } + } +} + +void drm_remove_ref_object(struct drm_file *priv, struct drm_ref_object *item) +{ + int ret; + struct drm_user_object *user_object = (struct drm_user_object *) item->hash.key; + struct drm_open_hash *ht = &priv->refd_object_hash[item->unref_action]; + enum drm_ref_type unref_action; + + DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex); + unref_action = item->unref_action; + if (atomic_dec_and_test(&item->refcount)) { + ret = drm_ht_remove_item(ht, &item->hash); + BUG_ON(ret); + list_del_init(&item->list); + if (unref_action == _DRM_REF_USE) + drm_remove_other_references(priv, user_object); + drm_ctl_free(item, sizeof(*item), DRM_MEM_OBJECTS); + } + + switch (unref_action) { + case _DRM_REF_USE: + drm_deref_user_object(priv, user_object); + break; + default: + BUG_ON(!user_object->unref); + user_object->unref(priv, user_object, unref_action); + break; + } + +} + +int drm_user_object_ref(struct drm_file *priv, uint32_t user_token, + enum drm_object_type type, struct drm_user_object **object) +{ + struct drm_device *dev = priv->head->dev; + struct drm_user_object *uo; + struct drm_hash_item *hash; + int ret; + + mutex_lock(&dev->struct_mutex); + ret = drm_ht_find_item(&dev->object_hash, user_token, &hash); + if (ret) { + DRM_ERROR("Could not find user object to reference.\n"); + goto out_err; + } + uo = drm_hash_entry(hash, struct drm_user_object, hash); + if (uo->type != type) { + ret = -EINVAL; + goto out_err; + } + ret = drm_add_ref_object(priv, uo, _DRM_REF_USE); + if (ret) + goto out_err; + mutex_unlock(&dev->struct_mutex); + *object = uo; + return 0; +out_err: + mutex_unlock(&dev->struct_mutex); + return ret; +} + +int drm_user_object_unref(struct drm_file *priv, uint32_t user_token, + enum drm_object_type type) +{ + struct drm_device *dev = priv->head->dev; + struct drm_user_object *uo; + struct drm_ref_object *ro; + int ret; + + mutex_lock(&dev->struct_mutex); + uo = drm_lookup_user_object(priv, user_token); + if (!uo || (uo->type != type)) { + ret = -EINVAL; + goto out_err; + } + ro = drm_lookup_ref_object(priv, uo, _DRM_REF_USE); + if (!ro) { + ret = -EINVAL; + goto out_err; + } + drm_remove_ref_object(priv, ro); + mutex_unlock(&dev->struct_mutex); + return 0; +out_err: + mutex_unlock(&dev->struct_mutex); + return ret; +} diff --git a/drivers/char/drm/drm_objects.h b/drivers/char/drm/drm_objects.h new file mode 100644 index 0000000..d3a157c --- /dev/null +++ b/drivers/char/drm/drm_objects.h @@ -0,0 +1,608 @@ +/************************************************************************** + * + * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ + +#ifndef _DRM_OBJECTS_H +#define _DRM_OBJECTS_H + +struct drm_device; +struct drm_bo_mem_reg; + +/*************************************************** + * User space objects. (drm_object.c) + */ + +#define drm_user_object_entry(_ptr, _type, _member) container_of(_ptr, _type, _member) + +enum drm_object_type { + drm_fence_type, + drm_buffer_type, + drm_lock_type, + /* + * Add other user space object types here. + */ + drm_driver_type0 = 256, + drm_driver_type1, + drm_driver_type2, + drm_driver_type3, + drm_driver_type4 +}; + +/* + * A user object is a structure that helps the drm give out user handles + * to kernel internal objects and to keep track of these objects so that + * they can be destroyed, for example when the user space process exits. + * Designed to be accessible using a user space 32-bit handle. + */ + +struct drm_user_object { + struct drm_hash_item hash; + struct list_head list; + enum drm_object_type type; + atomic_t refcount; + int shareable; + struct drm_file *owner; + void (*ref_struct_locked) (struct drm_file *priv, + struct drm_user_object *obj, + enum drm_ref_type ref_action); + void (*unref) (struct drm_file *priv, struct drm_user_object *obj, + enum drm_ref_type unref_action); + void (*remove) (struct drm_file *priv, struct drm_user_object *obj); +}; + +/* + * A ref object is a structure which is used to + * keep track of references to user objects and to keep track of these + * references so that they can be destroyed for example when the user space + * process exits. Designed to be accessible using a pointer to the _user_ object. + */ + +struct drm_ref_object { + struct drm_hash_item hash; + struct list_head list; + atomic_t refcount; + enum drm_ref_type unref_action; +}; + +/** + * Must be called with the struct_mutex held. + */ + +extern int drm_add_user_object(struct drm_file *priv, struct drm_user_object *item, + int shareable); +/** + * Must be called with the struct_mutex held. + */ + +extern struct drm_user_object *drm_lookup_user_object(struct drm_file *priv, + uint32_t key); + +/* + * Must be called with the struct_mutex held. May temporarily release it. + */ + +extern int drm_add_ref_object(struct drm_file *priv, + struct drm_user_object *referenced_object, + enum drm_ref_type ref_action); + +/* + * Must be called with the struct_mutex held. + */ + +struct drm_ref_object *drm_lookup_ref_object(struct drm_file *priv, + struct drm_user_object *referenced_object, + enum drm_ref_type ref_action); +/* + * Must be called with the struct_mutex held. + * If "item" has been obtained by a call to drm_lookup_ref_object. You may not + * release the struct_mutex before calling drm_remove_ref_object. + * This function may temporarily release the struct_mutex. + */ + +extern void drm_remove_ref_object(struct drm_file *priv, struct drm_ref_object *item); +extern int drm_user_object_ref(struct drm_file *priv, uint32_t user_token, + enum drm_object_type type, + struct drm_user_object **object); +extern int drm_user_object_unref(struct drm_file *priv, uint32_t user_token, + enum drm_object_type type); + +/*************************************************** + * Fence objects. (drm_fence.c) + */ + +struct drm_fence_object { + struct drm_user_object base; + struct drm_device *dev; + atomic_t usage; + + /* + * The below three fields are protected by the fence manager spinlock. + */ + + struct list_head ring; + int fence_class; + uint32_t native_type; + uint32_t type; + uint32_t signaled; + uint32_t sequence; + uint32_t flush_mask; + uint32_t submitted_flush; + uint32_t error; +}; + +#define _DRM_FENCE_CLASSES 8 +#define _DRM_FENCE_TYPE_EXE 0x00 + +struct drm_fence_class_manager { + struct list_head ring; + uint32_t pending_flush; + wait_queue_head_t fence_queue; + int pending_exe_flush; + uint32_t last_exe_flush; + uint32_t exe_flush_sequence; +}; + +struct drm_fence_manager { + int initialized; + rwlock_t lock; + struct drm_fence_class_manager fence_class[_DRM_FENCE_CLASSES]; + uint32_t num_classes; + atomic_t count; +}; + +struct drm_fence_driver { + uint32_t num_classes; + uint32_t wrap_diff; + uint32_t flush_diff; + uint32_t sequence_mask; + int lazy_capable; + int (*has_irq) (struct drm_device *dev, uint32_t fence_class, + uint32_t flags); + int (*emit) (struct drm_device *dev, uint32_t fence_class, + uint32_t flags, uint32_t *breadcrumb, + uint32_t *native_type); + void (*poke_flush) (struct drm_device *dev, uint32_t fence_class); +}; + +extern void drm_fence_handler(struct drm_device *dev, uint32_t fence_class, + uint32_t sequence, uint32_t type, + uint32_t error); +extern void drm_fence_manager_init(struct drm_device *dev); +extern void drm_fence_manager_takedown(struct drm_device *dev); +extern void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class, + uint32_t sequence); +extern int drm_fence_object_flush(struct drm_fence_object *fence, + uint32_t type); +extern int drm_fence_object_signaled(struct drm_fence_object *fence, + uint32_t type, int flush); +extern void drm_fence_usage_deref_locked(struct drm_fence_object **fence); +extern void drm_fence_usage_deref_unlocked(struct drm_fence_object **fence); +extern struct drm_fence_object *drm_fence_reference_locked(struct drm_fence_object *src); +extern void drm_fence_reference_unlocked(struct drm_fence_object **dst, + struct drm_fence_object *src); +extern int drm_fence_object_wait(struct drm_fence_object *fence, + int lazy, int ignore_signals, uint32_t mask); +extern int drm_fence_object_create(struct drm_device *dev, uint32_t type, + uint32_t fence_flags, uint32_t fence_class, + struct drm_fence_object **c_fence); +extern int drm_fence_object_emit(struct drm_fence_object *fence, + uint32_t fence_flags, uint32_t class, + uint32_t type); +extern void drm_fence_fill_arg(struct drm_fence_object *fence, + struct drm_fence_arg *arg); + +extern int drm_fence_add_user_object(struct drm_file *priv, + struct drm_fence_object *fence, + int shareable); + +extern int drm_fence_create_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_fence_destroy_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_fence_reference_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_fence_unreference_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_fence_signaled_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_fence_flush_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_fence_wait_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_fence_emit_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_fence_buffers_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); + +/************************************************** + *TTMs + */ + +/* + * The ttm backend GTT interface. (In our case AGP). + * Any similar type of device (PCIE?) + * needs only to implement these functions to be usable with the TTM interface. + * The AGP backend implementation lives in drm_agpsupport.c + * basically maps these calls to available functions in agpgart. + * Each drm device driver gets an + * additional function pointer that creates these types, + * so that the device can choose the correct aperture. + * (Multiple AGP apertures, etc.) + * Most device drivers will let this point to the standard AGP implementation. + */ + +#define DRM_BE_FLAG_NEEDS_FREE 0x00000001 +#define DRM_BE_FLAG_BOUND_CACHED 0x00000002 + +struct drm_ttm_backend; +struct drm_ttm_backend_func { + int (*needs_ub_cache_adjust) (struct drm_ttm_backend *backend); + int (*populate) (struct drm_ttm_backend *backend, + unsigned long num_pages, struct page **pages); + void (*clear) (struct drm_ttm_backend *backend); + int (*bind) (struct drm_ttm_backend *backend, + struct drm_bo_mem_reg *bo_mem); + int (*unbind) (struct drm_ttm_backend *backend); + void (*destroy) (struct drm_ttm_backend *backend); +}; + + +struct drm_ttm_backend { + struct drm_device *dev; + uint32_t flags; + struct drm_ttm_backend_func *func; +}; + +struct drm_ttm { + struct mm_struct *user_mm; + struct page *dummy_read_page; + struct page **pages; + uint32_t page_flags; + unsigned long num_pages; + atomic_t vma_count; + struct drm_device *dev; + int destroy; + uint32_t mapping_offset; + struct drm_ttm_backend *be; + enum { + ttm_bound, + ttm_evicted, + ttm_unbound, + ttm_unpopulated, + } state; + +}; + +extern struct drm_ttm *drm_ttm_init(struct drm_device *dev, unsigned long size); +extern int drm_bind_ttm(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem); +extern void drm_ttm_unbind(struct drm_ttm *ttm); +extern void drm_ttm_evict(struct drm_ttm *ttm); +extern void drm_ttm_fixup_caching(struct drm_ttm *ttm); +extern struct page *drm_ttm_get_page(struct drm_ttm *ttm, int index); +extern void drm_ttm_cache_flush(void); +extern int drm_ttm_populate(struct drm_ttm *ttm); +extern int drm_ttm_set_user(struct drm_ttm *ttm, + struct task_struct *tsk, + int write, + unsigned long start, + unsigned long num_pages, + struct page *dummy_read_page); + +/* + * Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do + * this which calls this function iff there are no vmas referencing it anymore. + * Otherwise it is called when the last vma exits. + */ + +extern int drm_destroy_ttm(struct drm_ttm *ttm); + +#define DRM_FLAG_MASKED(_old, _new, _mask) {\ +(_old) ^= (((_old) ^ (_new)) & (_mask)); \ +} + +#define DRM_TTM_MASK_FLAGS ((1 << PAGE_SHIFT) - 1) +#define DRM_TTM_MASK_PFN (0xFFFFFFFFU - DRM_TTM_MASK_FLAGS) + +/* + * Page flags. + */ + +#define DRM_TTM_PAGE_UNCACHED (1 << 0) +#define DRM_TTM_PAGE_USED (1 << 1) +#define DRM_TTM_PAGE_BOUND (1 << 2) +#define DRM_TTM_PAGE_PRESENT (1 << 3) +#define DRM_TTM_PAGE_VMALLOC (1 << 4) +#define DRM_TTM_PAGE_USER (1 << 5) +#define DRM_TTM_PAGE_USER_WRITE (1 << 6) +#define DRM_TTM_PAGE_USER_DIRTY (1 << 7) +#define DRM_TTM_PAGE_USER_DMA (1 << 8) + +/*************************************************** + * Buffer objects. (drm_bo.c, drm_bo_move.c) + */ + +struct drm_bo_mem_reg { + struct drm_mm_node *mm_node; + unsigned long size; + unsigned long num_pages; + uint32_t page_alignment; + uint32_t mem_type; + uint64_t flags; + uint64_t mask; + uint32_t desired_tile_stride; + uint32_t hw_tile_stride; +}; + +enum drm_bo_type { + drm_bo_type_dc, + drm_bo_type_user, + drm_bo_type_kernel, /* for initial kernel allocations */ +}; + +struct drm_buffer_object { + struct drm_device *dev; + struct drm_user_object base; + + /* + * If there is a possibility that the usage variable is zero, + * then dev->struct_mutext should be locked before incrementing it. + */ + + atomic_t usage; + unsigned long buffer_start; + enum drm_bo_type type; + unsigned long offset; + atomic_t mapped; + struct drm_bo_mem_reg mem; + + struct list_head lru; + struct list_head ddestroy; + + uint32_t fence_type; + uint32_t fence_class; + uint32_t new_fence_type; + uint32_t new_fence_class; + struct drm_fence_object *fence; + uint32_t priv_flags; + wait_queue_head_t event_queue; + struct mutex mutex; + unsigned long num_pages; + + /* For pinned buffers */ + struct drm_mm_node *pinned_node; + uint32_t pinned_mem_type; + struct list_head pinned_lru; + + /* For vm */ + struct drm_ttm *ttm; + struct drm_map_list map_list; + uint32_t memory_type; + unsigned long bus_offset; + uint32_t vm_flags; + void *iomap; + + +}; + +#define _DRM_BO_FLAG_UNFENCED 0x00000001 +#define _DRM_BO_FLAG_EVICTED 0x00000002 + +struct drm_mem_type_manager { + int has_type; + int use_type; + struct drm_mm manager; + struct list_head lru; + struct list_head pinned; + uint32_t flags; + uint32_t drm_bus_maptype; + unsigned long gpu_offset; + unsigned long io_offset; + unsigned long io_size; + void *io_addr; +}; + +struct drm_bo_lock { + struct drm_user_object base; + wait_queue_head_t queue; + atomic_t write_lock_pending; + atomic_t readers; +}; + +#define _DRM_FLAG_MEMTYPE_FIXED 0x00000001 /* Fixed (on-card) PCI memory */ +#define _DRM_FLAG_MEMTYPE_MAPPABLE 0x00000002 /* Memory mappable */ +#define _DRM_FLAG_MEMTYPE_CACHED 0x00000004 /* Cached binding */ +#define _DRM_FLAG_NEEDS_IOREMAP 0x00000008 /* Fixed memory needs ioremap + before kernel access. */ +#define _DRM_FLAG_MEMTYPE_CMA 0x00000010 /* Can't map aperture */ +#define _DRM_FLAG_MEMTYPE_CSELECT 0x00000020 /* Select caching */ + +struct drm_buffer_manager { + struct drm_bo_lock bm_lock; + struct mutex evict_mutex; + int nice_mode; + int initialized; + struct drm_file *last_to_validate; + struct drm_mem_type_manager man[DRM_BO_MEM_TYPES]; + struct list_head unfenced; + struct list_head ddestroy; + struct delayed_work wq; + uint32_t fence_type; + unsigned long cur_pages; + atomic_t count; + struct page *dummy_read_page; +}; + +struct drm_bo_driver { + const uint32_t *mem_type_prio; + const uint32_t *mem_busy_prio; + uint32_t num_mem_type_prio; + uint32_t num_mem_busy_prio; + struct drm_ttm_backend *(*create_ttm_backend_entry) + (struct drm_device *dev); + int (*fence_type) (struct drm_buffer_object *bo, uint32_t *fclass, + uint32_t *type); + int (*invalidate_caches) (struct drm_device *dev, uint64_t flags); + int (*init_mem_type) (struct drm_device *dev, uint32_t type, + struct drm_mem_type_manager *man); + uint32_t(*evict_mask) (struct drm_buffer_object *bo); + int (*move) (struct drm_buffer_object *bo, + int evict, int no_wait, struct drm_bo_mem_reg *new_mem); + void (*ttm_cache_flush)(struct drm_ttm *ttm); +}; + +/* + * buffer objects (drm_bo.c) + */ + +extern int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_bo_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_bo_setstatus_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_mm_unlock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_bo_version_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_bo_driver_finish(struct drm_device *dev); +extern int drm_bo_driver_init(struct drm_device *dev); +extern int drm_bo_pci_offset(struct drm_device *dev, + struct drm_bo_mem_reg *mem, + unsigned long *bus_base, + unsigned long *bus_offset, + unsigned long *bus_size); +extern int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg *mem); + +extern void drm_bo_usage_deref_locked(struct drm_buffer_object **bo); +extern void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo); +extern void drm_putback_buffer_objects(struct drm_device *dev); +extern int drm_fence_buffer_objects(struct drm_device *dev, + struct list_head *list, + uint32_t fence_flags, + struct drm_fence_object *fence, + struct drm_fence_object **used_fence); +extern void drm_bo_add_to_lru(struct drm_buffer_object *bo); +extern int drm_buffer_object_create(struct drm_device *dev, unsigned long size, + enum drm_bo_type type, uint64_t mask, + uint32_t hint, uint32_t page_alignment, + unsigned long buffer_start, + struct drm_buffer_object **bo); +extern int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int ignore_signals, + int no_wait); +extern int drm_bo_mem_space(struct drm_buffer_object *bo, + struct drm_bo_mem_reg *mem, int no_wait); +extern int drm_bo_move_buffer(struct drm_buffer_object *bo, + uint64_t new_mem_flags, + int no_wait, int move_unfenced); +extern int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type); +extern int drm_bo_init_mm(struct drm_device *dev, unsigned type, + unsigned long p_offset, unsigned long p_size); +extern int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle, + uint32_t fence_class, uint64_t flags, + uint64_t mask, uint32_t hint, + int use_old_fence_class, + struct drm_bo_info_rep *rep, + struct drm_buffer_object **bo_rep); +extern struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv, + uint32_t handle, + int check_owner); +extern int drm_bo_do_validate(struct drm_buffer_object *bo, + uint64_t flags, uint64_t mask, uint32_t hint, + uint32_t fence_class, + int no_wait, + struct drm_bo_info_rep *rep); + +/* + * Buffer object memory move- and map helpers. + * drm_bo_move.c + */ + +extern int drm_bo_move_ttm(struct drm_buffer_object *bo, + int evict, int no_wait, + struct drm_bo_mem_reg *new_mem); +extern int drm_bo_move_memcpy(struct drm_buffer_object *bo, + int evict, + int no_wait, struct drm_bo_mem_reg *new_mem); +extern int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo, + int evict, int no_wait, + uint32_t fence_class, uint32_t fence_type, + uint32_t fence_flags, + struct drm_bo_mem_reg *new_mem); +extern int drm_bo_same_page(unsigned long offset, unsigned long offset2); +extern unsigned long drm_bo_offset_end(unsigned long offset, + unsigned long end); + +struct drm_bo_kmap_obj { + void *virtual; + struct page *page; + enum { + bo_map_iomap, + bo_map_vmap, + bo_map_kmap, + bo_map_premapped, + } bo_kmap_type; +}; + +static inline void *drm_bmo_virtual(struct drm_bo_kmap_obj *map, int *is_iomem) +{ + *is_iomem = (map->bo_kmap_type == bo_map_iomap || + map->bo_kmap_type == bo_map_premapped); + return map->virtual; +} +extern void drm_bo_kunmap(struct drm_bo_kmap_obj *map); +extern int drm_bo_kmap(struct drm_buffer_object *bo, unsigned long start_page, + unsigned long num_pages, struct drm_bo_kmap_obj *map); + +/* + * drm_bo_lock.c + * Simple replacement for the hardware lock on buffer manager init and clean. + */ + + +extern void drm_bo_init_lock(struct drm_bo_lock *lock); +extern void drm_bo_read_unlock(struct drm_bo_lock *lock); +extern int drm_bo_read_lock(struct drm_bo_lock *lock); +extern int drm_bo_write_lock(struct drm_bo_lock *lock, + struct drm_file *file_priv); + +extern int drm_bo_write_unlock(struct drm_bo_lock *lock, + struct drm_file *file_priv); + +#ifdef CONFIG_DEBUG_MUTEXES +#define DRM_ASSERT_LOCKED(_mutex) \ + BUG_ON(!mutex_is_locked(_mutex) || \ + ((_mutex)->owner != current_thread_info())) +#else +#define DRM_ASSERT_LOCKED(_mutex) +#endif +#endif diff --git a/drivers/char/drm/drm_os_linux.h b/drivers/char/drm/drm_os_linux.h index daa69c9..8dbd257 100644 --- a/drivers/char/drm/drm_os_linux.h +++ b/drivers/char/drm/drm_os_linux.h @@ -69,9 +69,9 @@ static __inline__ int mtrr_del(int reg, unsigned long base, unsigned long size) #define DRM_COPY_TO_USER(arg1, arg2, arg3) \ copy_to_user(arg1, arg2, arg3) /* Macros for copyfrom user, but checking readability only once */ -#define DRM_VERIFYAREA_READ( uaddr, size ) \ +#define DRM_VERIFYAREA_READ( uaddr, size ) \ (access_ok( VERIFY_READ, uaddr, size ) ? 0 : -EFAULT) -#define DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3) \ +#define DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3) \ __copy_from_user(arg1, arg2, arg3) #define DRM_COPY_TO_USER_UNCHECKED(arg1, arg2, arg3) \ __copy_to_user(arg1, arg2, arg3) diff --git a/drivers/char/drm/drm_pciids.h b/drivers/char/drm/drm_pciids.h index f359397..508e555 100644 --- a/drivers/char/drm/drm_pciids.h +++ b/drivers/char/drm/drm_pciids.h @@ -311,4 +311,3 @@ {0x8086, 0x2a02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x8086, 0x2a12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0, 0, 0} - diff --git a/drivers/char/drm/drm_proc.c b/drivers/char/drm/drm_proc.c index 12dfea8..a644b18 100644 --- a/drivers/char/drm/drm_proc.c +++ b/drivers/char/drm/drm_proc.c @@ -49,6 +49,8 @@ static int drm_queues_info(char *buf, char **start, off_t offset, int request, int *eof, void *data); static int drm_bufs_info(char *buf, char **start, off_t offset, int request, int *eof, void *data); +static int drm_objects_info(char *buf, char **start, off_t offset, + int request, int *eof, void *data); #if DRM_DEBUG_CODE static int drm_vma_info(char *buf, char **start, off_t offset, int request, int *eof, void *data); @@ -67,6 +69,7 @@ static struct drm_proc_list { {"clients", drm_clients_info}, {"queues", drm_queues_info}, {"bufs", drm_bufs_info}, + {"objects", drm_objects_info}, #if DRM_DEBUG_CODE {"vma", drm_vma_info}, #endif @@ -236,11 +239,11 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request, type = "??"; else type = types[map->type]; - DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08x ", + DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ", i, map->offset, map->size, type, map->flags, - r_list->user_token); + (unsigned long) r_list->user_token); if (map->mtrr < 0) { DRM_PROC_PRINT("none\n"); } else { @@ -416,6 +419,93 @@ static int drm_bufs_info(char *buf, char **start, off_t offset, int request, } /** + * Called when "/proc/dri/.../objects" is read. + * + * \param buf output buffer. + * \param start start of output data. + * \param offset requested start offset. + * \param request requested number of bytes. + * \param eof whether there is no more data to return. + * \param data private data. + * \return number of written bytes. + */ +static int drm__objects_info(char *buf, char **start, off_t offset, int request, + int *eof, void *data) +{ + struct drm_device *dev = (struct drm_device *) data; + int len = 0; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_fence_manager *fm = &dev->fm; + uint64_t used_mem; + uint64_t low_mem; + uint64_t high_mem; + + + if (offset > DRM_PROC_LIMIT) { + *eof = 1; + return 0; + } + + *start = &buf[offset]; + *eof = 0; + + DRM_PROC_PRINT("Object accounting:\n\n"); + if (fm->initialized) { + DRM_PROC_PRINT("Number of active fence objects: %d.\n", + atomic_read(&fm->count)); + } else { + DRM_PROC_PRINT("Fence objects are not supported by this driver\n"); + } + + if (bm->initialized) { + DRM_PROC_PRINT("Number of active buffer objects: %d.\n\n", + atomic_read(&bm->count)); + } + DRM_PROC_PRINT("Memory accounting:\n\n"); + if (bm->initialized) { + DRM_PROC_PRINT("Number of locked GATT pages: %lu.\n", bm->cur_pages); + } else { + DRM_PROC_PRINT("Buffer objects are not supported by this driver.\n"); + } + + drm_query_memctl(&used_mem, &low_mem, &high_mem); + + if (used_mem > 16*PAGE_SIZE) { + DRM_PROC_PRINT("Used object memory is %lu pages.\n", + (unsigned long) (used_mem >> PAGE_SHIFT)); + } else { + DRM_PROC_PRINT("Used object memory is %lu bytes.\n", + (unsigned long) used_mem); + } + DRM_PROC_PRINT("Soft object memory usage threshold is %lu pages.\n", + (unsigned long) (low_mem >> PAGE_SHIFT)); + DRM_PROC_PRINT("Hard object memory usage threshold is %lu pages.\n", + (unsigned long) (high_mem >> PAGE_SHIFT)); + + DRM_PROC_PRINT("\n"); + + if (len > request + offset) + return request; + *eof = 1; + return len - offset; +} + +/** + * Simply calls _objects_info() while holding the drm_device::struct_mutex lock. + */ +static int drm_objects_info(char *buf, char **start, off_t offset, int request, + int *eof, void *data) +{ + struct drm_device *dev = (struct drm_device *) data; + int ret; + + mutex_lock(&dev->struct_mutex); + ret = drm__objects_info(buf, start, offset, request, eof, data); + mutex_unlock(&dev->struct_mutex); + return ret; +} + +/** * Called when "/proc/dri/.../clients" is read. * * \param buf output buffer. diff --git a/drivers/char/drm/drm_sarea.h b/drivers/char/drm/drm_sarea.h index e040f47..4800373 100644 --- a/drivers/char/drm/drm_sarea.h +++ b/drivers/char/drm/drm_sarea.h @@ -45,7 +45,7 @@ #endif /** Maximum number of drawables in the SAREA */ -#define SAREA_MAX_DRAWABLES 256 +#define SAREA_MAX_DRAWABLES 256 #define SAREA_DRAWABLE_CLAIMED_ENTRY 0x80000000 diff --git a/drivers/char/drm/drm_stub.c b/drivers/char/drm/drm_stub.c index ee83ff9..ae19213 100644 --- a/drivers/char/drm/drm_stub.c +++ b/drivers/char/drm/drm_stub.c @@ -71,6 +71,7 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev, init_timer(&dev->timer); mutex_init(&dev->struct_mutex); mutex_init(&dev->ctxlist_mutex); + mutex_init(&dev->bm.evict_mutex); idr_init(&dev->drw_idr); @@ -83,7 +84,19 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev, #endif dev->irq = pdev->irq; - if (drm_ht_create(&dev->map_hash, 12)) { + if (drm_ht_create(&dev->map_hash, DRM_MAP_HASH_ORDER)) { + return -ENOMEM; + } + + if (drm_mm_init(&dev->offset_manager, DRM_FILE_PAGE_OFFSET_START, + DRM_FILE_PAGE_OFFSET_SIZE)) { + drm_ht_remove(&dev->map_hash); + return -ENOMEM; + } + + if (drm_ht_create(&dev->object_hash, DRM_OBJECT_HASH_ORDER)) { + drm_ht_remove(&dev->map_hash); + drm_mm_takedown(&dev->offset_manager); return -ENOMEM; } @@ -126,6 +139,7 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev, goto error_out_unreg; } + drm_fence_manager_init(dev); return 0; error_out_unreg: @@ -224,7 +238,7 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, } if ((ret = drm_get_head(dev, &dev->primary))) goto err_g2; - + DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", driver->name, driver->major, driver->minor, driver->patchlevel, driver->date, dev->primary.minor); diff --git a/drivers/char/drm/drm_ttm.c b/drivers/char/drm/drm_ttm.c new file mode 100644 index 0000000..2cd198b --- /dev/null +++ b/drivers/char/drm/drm_ttm.c @@ -0,0 +1,418 @@ +/************************************************************************** + * + * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom + */ + +#include "drmP.h" + +static void drm_ttm_ipi_handler(void *null) +{ + flush_agp_cache(); +} + +void drm_ttm_cache_flush(void) +{ + if (on_each_cpu(drm_ttm_ipi_handler, NULL, 1, 1) != 0) + DRM_ERROR("Timed out waiting for drm cache flush.\n"); +} +EXPORT_SYMBOL(drm_ttm_cache_flush); + +/* + * Use kmalloc if possible. Otherwise fall back to vmalloc. + */ + +static void ttm_alloc_pages(struct drm_ttm *ttm) +{ + unsigned long size = ttm->num_pages * sizeof(*ttm->pages); + ttm->pages = NULL; + + if (drm_alloc_memctl(size)) + return; + + if (size <= PAGE_SIZE) + ttm->pages = drm_calloc(1, size, DRM_MEM_TTM); + + if (!ttm->pages) { + ttm->pages = vmalloc_user(size); + if (ttm->pages) + ttm->page_flags |= DRM_TTM_PAGE_VMALLOC; + } + if (!ttm->pages) + drm_free_memctl(size); +} + +static void ttm_free_pages(struct drm_ttm *ttm) +{ + unsigned long size = ttm->num_pages * sizeof(*ttm->pages); + + if (ttm->page_flags & DRM_TTM_PAGE_VMALLOC) { + vfree(ttm->pages); + ttm->page_flags &= ~DRM_TTM_PAGE_VMALLOC; + } else { + drm_free(ttm->pages, size, DRM_MEM_TTM); + } + drm_free_memctl(size); + ttm->pages = NULL; +} + +static struct page *drm_ttm_alloc_page(void) +{ + struct page *page; + + if (drm_alloc_memctl(PAGE_SIZE)) + return NULL; + + page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32); + if (!page) { + drm_free_memctl(PAGE_SIZE); + return NULL; + } + SetPageLocked(page); + return page; +} + +/* + * Change caching policy for the linear kernel map + * for range of pages in a ttm. + */ + +static int drm_set_caching(struct drm_ttm *ttm, int noncached) +{ + int i; + struct page **cur_page; + int do_tlbflush = 0; + + if ((ttm->page_flags & DRM_TTM_PAGE_UNCACHED) == noncached) + return 0; + + if (noncached) + drm_ttm_cache_flush(); + + for (i = 0; i < ttm->num_pages; ++i) { + cur_page = ttm->pages + i; + if (*cur_page) { + if (!PageHighMem(*cur_page)) { + if (noncached) { + map_page_into_agp(*cur_page); + } else { + unmap_page_from_agp(*cur_page); + } + do_tlbflush = 1; + } + } + } + if (do_tlbflush) + flush_agp_mappings(); + + DRM_FLAG_MASKED(ttm->page_flags, noncached, DRM_TTM_PAGE_UNCACHED); + + return 0; +} + + +static void drm_ttm_free_user_pages(struct drm_ttm *ttm) +{ + struct mm_struct *mm = ttm->user_mm; + int write; + int dirty; + struct page *page; + int i; + + BUG_ON(!(ttm->page_flags & DRM_TTM_PAGE_USER)); + write = ((ttm->page_flags & DRM_TTM_PAGE_USER_WRITE) != 0); + dirty = ((ttm->page_flags & DRM_TTM_PAGE_USER_DIRTY) != 0); + + down_read(&mm->mmap_sem); + for (i = 0; i < ttm->num_pages; ++i) { + page = ttm->pages[i]; + if (page == NULL) + continue; + + if (page == ttm->dummy_read_page) { + BUG_ON(write); + continue; + } + + if (write && dirty && !PageReserved(page)) + SetPageDirty(page); + + ttm->pages[i] = NULL; + page_cache_release(page); + } + up_read(&mm->mmap_sem); +} + +static void drm_ttm_free_alloced_pages(struct drm_ttm *ttm) +{ + int i; + struct drm_buffer_manager *bm = &ttm->dev->bm; + struct page **cur_page; + + for (i = 0; i < ttm->num_pages; ++i) { + cur_page = ttm->pages + i; + if (*cur_page) { + unlock_page(*cur_page); + if (page_count(*cur_page) != 1) + DRM_ERROR("Erroneous page count. Leaking pages.\n"); + if (page_mapped(*cur_page)) + DRM_ERROR("Erroneous map count. Leaking page mappings.\n"); + __free_page(*cur_page); + drm_free_memctl(PAGE_SIZE); + --bm->cur_pages; + } + } +} + +/* + * Free all resources associated with a ttm. + */ + +int drm_destroy_ttm(struct drm_ttm *ttm) +{ + struct drm_ttm_backend *be; + + if (!ttm) + return 0; + + be = ttm->be; + if (be) { + be->func->destroy(be); + ttm->be = NULL; + } + + if (ttm->pages) { + if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) + drm_set_caching(ttm, 0); + + if (ttm->page_flags & DRM_TTM_PAGE_USER) + drm_ttm_free_user_pages(ttm); + else + drm_ttm_free_alloced_pages(ttm); + + ttm_free_pages(ttm); + } + + drm_ctl_free(ttm, sizeof(*ttm), DRM_MEM_TTM); + return 0; +} + +struct page *drm_ttm_get_page(struct drm_ttm *ttm, int index) +{ + struct page *p; + struct drm_buffer_manager *bm = &ttm->dev->bm; + + p = ttm->pages[index]; + if (!p) { + p = drm_ttm_alloc_page(); + if (!p) + return NULL; + ttm->pages[index] = p; + ++bm->cur_pages; + } + return p; +} +EXPORT_SYMBOL(drm_ttm_get_page); + + + + +int drm_ttm_set_user(struct drm_ttm *ttm, + struct task_struct *tsk, + int write, + unsigned long start, + unsigned long num_pages, + struct page *dummy_read_page) +{ + struct mm_struct *mm = tsk->mm; + int ret; + int i; + + BUG_ON(num_pages != ttm->num_pages); + + ttm->user_mm = mm; + ttm->dummy_read_page = dummy_read_page; + ttm->page_flags = DRM_TTM_PAGE_USER | + ((write) ? DRM_TTM_PAGE_USER_WRITE : 0); + + + down_read(&mm->mmap_sem); + ret = get_user_pages(tsk, mm, start, num_pages, + write, 0, ttm->pages, NULL); + up_read(&mm->mmap_sem); + + if (ret != num_pages && write) { + drm_ttm_free_user_pages(ttm); + return -ENOMEM; + } + + for (i = 0; i < num_pages; ++i) { + if (ttm->pages[i] == NULL) + ttm->pages[i] = ttm->dummy_read_page; + } + + return 0; +} + + + +int drm_ttm_populate(struct drm_ttm *ttm) +{ + struct page *page; + unsigned long i; + struct drm_ttm_backend *be; + + if (ttm->state != ttm_unpopulated) + return 0; + + be = ttm->be; + for (i = 0; i < ttm->num_pages; ++i) { + page = drm_ttm_get_page(ttm, i); + if (!page) + return -ENOMEM; + } + be->func->populate(be, ttm->num_pages, ttm->pages); + ttm->state = ttm_unbound; + return 0; +} + +/* + * Initialize a ttm. + */ + +struct drm_ttm *drm_ttm_init(struct drm_device *dev, unsigned long size) +{ + struct drm_bo_driver *bo_driver = dev->driver->bo_driver; + struct drm_ttm *ttm; + + if (!bo_driver) + return NULL; + + ttm = drm_ctl_calloc(1, sizeof(*ttm), DRM_MEM_TTM); + if (!ttm) + return NULL; + + ttm->dev = dev; + atomic_set(&ttm->vma_count, 0); + + ttm->destroy = 0; + ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; + + ttm->page_flags = 0; + + /* + * Account also for AGP module memory usage. + */ + + ttm_alloc_pages(ttm); + if (!ttm->pages) { + drm_destroy_ttm(ttm); + DRM_ERROR("Failed allocating page table\n"); + return NULL; + } + ttm->be = bo_driver->create_ttm_backend_entry(dev); + if (!ttm->be) { + drm_destroy_ttm(ttm); + DRM_ERROR("Failed creating ttm backend entry\n"); + return NULL; + } + ttm->state = ttm_unpopulated; + return ttm; +} + +/* + * Unbind a ttm region from the aperture. + */ + +void drm_ttm_evict(struct drm_ttm *ttm) +{ + struct drm_ttm_backend *be = ttm->be; + int ret; + + if (ttm->state == ttm_bound) { + ret = be->func->unbind(be); + BUG_ON(ret); + } + + ttm->state = ttm_evicted; +} + +void drm_ttm_fixup_caching(struct drm_ttm *ttm) +{ + + if (ttm->state == ttm_evicted) { + struct drm_ttm_backend *be = ttm->be; + if (be->func->needs_ub_cache_adjust(be)) + drm_set_caching(ttm, 0); + ttm->state = ttm_unbound; + } +} + +void drm_ttm_unbind(struct drm_ttm *ttm) +{ + if (ttm->state == ttm_bound) + drm_ttm_evict(ttm); + + drm_ttm_fixup_caching(ttm); +} + +int drm_bind_ttm(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem) +{ + struct drm_bo_driver *bo_driver = ttm->dev->driver->bo_driver; + int ret = 0; + struct drm_ttm_backend *be; + + if (!ttm) + return -EINVAL; + if (ttm->state == ttm_bound) + return 0; + + be = ttm->be; + + ret = drm_ttm_populate(ttm); + if (ret) + return ret; + + if (ttm->state == ttm_unbound && !(bo_mem->flags & DRM_BO_FLAG_CACHED)) + drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED); + else if ((bo_mem->flags & DRM_BO_FLAG_CACHED_MAPPED) && + bo_driver->ttm_cache_flush) + bo_driver->ttm_cache_flush(ttm); + + ret = be->func->bind(be, bo_mem); + if (ret) { + ttm->state = ttm_evicted; + DRM_ERROR("Couldn't bind backend.\n"); + return ret; + } + + ttm->state = ttm_bound; + if (ttm->page_flags & DRM_TTM_PAGE_USER) + ttm->page_flags |= DRM_TTM_PAGE_USER_DIRTY; + return 0; +} +EXPORT_SYMBOL(drm_bind_ttm); diff --git a/drivers/char/drm/drm_vm.c b/drivers/char/drm/drm_vm.c index e8d50af..82da913 100644 --- a/drivers/char/drm/drm_vm.c +++ b/drivers/char/drm/drm_vm.c @@ -40,6 +40,10 @@ static void drm_vm_open(struct vm_area_struct *vma); static void drm_vm_close(struct vm_area_struct *vma); +static int drm_bo_mmap_locked(struct vm_area_struct *vma, + struct file *filp, + drm_local_map_t *map); + static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma) { @@ -213,7 +217,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma) found_maps++; if (pt->vma == vma) { list_del(&pt->head); - drm_free(pt, sizeof(*pt), DRM_MEM_VMAS); + drm_ctl_free(pt, sizeof(*pt), DRM_MEM_VMAS); } } @@ -255,6 +259,9 @@ static void drm_vm_shm_close(struct vm_area_struct *vma) dmah.size = map->size; __drm_pci_free(dev, &dmah); break; + case _DRM_TTM: + BUG_ON(1); + break; } drm_free(map, sizeof(*map), DRM_MEM_MAPS); } @@ -413,7 +420,7 @@ static void drm_vm_open_locked(struct vm_area_struct *vma) vma->vm_start, vma->vm_end - vma->vm_start); atomic_inc(&dev->vma_count); - vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS); + vma_entry = drm_ctl_alloc(sizeof(*vma_entry), DRM_MEM_VMAS); if (vma_entry) { vma_entry->vma = vma; vma_entry->pid = current->pid; @@ -453,7 +460,7 @@ static void drm_vm_close(struct vm_area_struct *vma) list_for_each_entry_safe(pt, temp, &dev->vmalist, head) { if (pt->vma == vma) { list_del(&pt->head); - drm_free(pt, sizeof(*pt), DRM_MEM_VMAS); + drm_ctl_free(pt, sizeof(*pt), DRM_MEM_VMAS); break; } } @@ -651,6 +658,8 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) vma->vm_private_data = (void *)map; vma->vm_flags |= VM_RESERVED; break; + case _DRM_TTM: + return drm_bo_mmap_locked(vma, filp, map); default: return -EINVAL; /* This should never happen. */ } @@ -674,3 +683,189 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma) return ret; } EXPORT_SYMBOL(drm_mmap); + +/** + * buffer object vm functions. + */ + +/** + * \c Pagefault method for buffer objects. + * + * \param vma Virtual memory area. + * \param address File offset. + * \return Error or refault. The pfn is manually inserted. + * + * It's important that pfns are inserted while holding the bo->mutex lock. + * otherwise we might race with unmap_mapping_range() which is always + * called with the bo->mutex lock held. + * + * We're modifying the page attribute bits of the vma->vm_page_prot field, + * without holding the mmap_sem in write mode. Only in read mode. + * These bits are not used by the mm subsystem code, and we consider them + * protected by the bo->mutex lock. + */ + +static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma, + unsigned long address) +{ + struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; + unsigned long page_offset; + struct page *page = NULL; + struct drm_ttm *ttm; + struct drm_device *dev; + unsigned long pfn; + int err; + unsigned long bus_base; + unsigned long bus_offset; + unsigned long bus_size; + unsigned long ret = NOPFN_REFAULT; + + if (address > vma->vm_end) + return NOPFN_SIGBUS; + + dev = bo->dev; + err = drm_bo_read_lock(&dev->bm.bm_lock); + if (err) + return NOPFN_REFAULT; + + err = mutex_lock_interruptible(&bo->mutex); + if (err) { + drm_bo_read_unlock(&dev->bm.bm_lock); + return NOPFN_REFAULT; + } + + err = drm_bo_wait(bo, 0, 0, 0); + if (err) { + ret = (err != -EAGAIN) ? NOPFN_SIGBUS : NOPFN_REFAULT; + goto out_unlock; + } + + /* + * If buffer happens to be in a non-mappable location, + * move it to a mappable. + */ + + if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) { + uint32_t new_mask = bo->mem.mask | + DRM_BO_FLAG_MAPPABLE | + DRM_BO_FLAG_FORCE_MAPPABLE; + err = drm_bo_move_buffer(bo, new_mask, 0, 0); + if (err) { + ret = (err != -EAGAIN) ? NOPFN_SIGBUS : NOPFN_REFAULT; + goto out_unlock; + } + } + + err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset, + &bus_size); + + if (err) { + ret = NOPFN_SIGBUS; + goto out_unlock; + } + + page_offset = (address - vma->vm_start) >> PAGE_SHIFT; + + if (bus_size) { + struct drm_mem_type_manager *man = &dev->bm.man[bo->mem.mem_type]; + + pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset; + vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma); + } else { + ttm = bo->ttm; + + drm_ttm_fixup_caching(ttm); + page = drm_ttm_get_page(ttm, page_offset); + if (!page) { + ret = NOPFN_OOM; + goto out_unlock; + } + pfn = page_to_pfn(page); + vma->vm_page_prot = (bo->mem.flags & DRM_BO_FLAG_CACHED) ? + vm_get_page_prot(vma->vm_flags) : + drm_io_prot(_DRM_TTM, vma); + } + + err = vm_insert_pfn(vma, address, pfn); + if (err) { + ret = (err != -EAGAIN) ? NOPFN_OOM : NOPFN_REFAULT; + goto out_unlock; + } +out_unlock: + mutex_unlock(&bo->mutex); + drm_bo_read_unlock(&dev->bm.bm_lock); + return ret; +} + +static void drm_bo_vm_open_locked(struct vm_area_struct *vma) +{ + struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; + + drm_vm_open_locked(vma); + atomic_inc(&bo->usage); +} + +/** + * \c vma open method for buffer objects. + * + * \param vma virtual memory area. + */ + +static void drm_bo_vm_open(struct vm_area_struct *vma) +{ + struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; + struct drm_device *dev = bo->dev; + + mutex_lock(&dev->struct_mutex); + drm_bo_vm_open_locked(vma); + mutex_unlock(&dev->struct_mutex); +} + +/** + * \c vma close method for buffer objects. + * + * \param vma virtual memory area. + */ + +static void drm_bo_vm_close(struct vm_area_struct *vma) +{ + struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; + struct drm_device *dev = bo->dev; + + drm_vm_close(vma); + if (bo) { + mutex_lock(&dev->struct_mutex); + drm_bo_usage_deref_locked((struct drm_buffer_object **) + &vma->vm_private_data); + mutex_unlock(&dev->struct_mutex); + } + return; +} + +static struct vm_operations_struct drm_bo_vm_ops = { + .nopfn = drm_bo_vm_nopfn, + .open = drm_bo_vm_open, + .close = drm_bo_vm_close, +}; + +/** + * mmap buffer object memory. + * + * \param vma virtual memory area. + * \param file_priv DRM file private. + * \param map The buffer object drm map. + * \return zero on success or a negative number on failure. + */ + +int drm_bo_mmap_locked(struct vm_area_struct *vma, + struct file *filp, + drm_local_map_t *map) +{ + vma->vm_ops = &drm_bo_vm_ops; + vma->vm_private_data = map->handle; + vma->vm_file = filp; + vma->vm_flags |= VM_RESERVED | VM_IO; + vma->vm_flags |= VM_PFNMAP; + drm_bo_vm_open_locked(vma); + return 0; +} diff --git a/drivers/char/drm/i810_dma.c b/drivers/char/drm/i810_dma.c index eb381a7..aa2fa9b 100644 --- a/drivers/char/drm/i810_dma.c +++ b/drivers/char/drm/i810_dma.c @@ -40,7 +40,7 @@ #define I810_BUF_FREE 2 #define I810_BUF_CLIENT 1 -#define I810_BUF_HARDWARE 0 +#define I810_BUF_HARDWARE 0 #define I810_BUF_UNMAPPED 0 #define I810_BUF_MAPPED 1 @@ -848,7 +848,7 @@ static void i810_dma_quiescent(struct drm_device * dev) drm_i810_private_t *dev_priv = dev->dev_private; RING_LOCALS; -/* printk("%s\n", __FUNCTION__); */ +/* printk("%s\n", __FUNCTION__); */ i810_kernel_lost_context(dev); @@ -869,7 +869,7 @@ static int i810_flush_queue(struct drm_device * dev) int i, ret = 0; RING_LOCALS; -/* printk("%s\n", __FUNCTION__); */ +/* printk("%s\n", __FUNCTION__); */ i810_kernel_lost_context(dev); diff --git a/drivers/char/drm/i810_drv.h b/drivers/char/drm/i810_drv.h index 0af4587..4de172b 100644 --- a/drivers/char/drm/i810_drv.h +++ b/drivers/char/drm/i810_drv.h @@ -25,7 +25,7 @@ * DEALINGS IN THE SOFTWARE. * * Authors: Rickard E. (Rik) Faith - * Jeff Hartmann + * Jeff Hartmann * */ @@ -134,7 +134,7 @@ extern int i810_max_ioctl; #define I810_ADDR(reg) (I810_BASE(reg) + reg) #define I810_DEREF(reg) *(__volatile__ int *)I810_ADDR(reg) #define I810_READ(reg) I810_DEREF(reg) -#define I810_WRITE(reg,val) do { I810_DEREF(reg) = val; } while (0) +#define I810_WRITE(reg,val) do { I810_DEREF(reg) = val; } while (0) #define I810_DEREF16(reg) *(__volatile__ u16 *)I810_ADDR(reg) #define I810_READ16(reg) I810_DEREF16(reg) #define I810_WRITE16(reg,val) do { I810_DEREF16(reg) = val; } while (0) @@ -155,19 +155,19 @@ extern int i810_max_ioctl; } while (0) #define ADVANCE_LP_RING() do { \ - if (I810_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING\n"); \ - dev_priv->ring.tail = outring; \ + if (I810_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING\n"); \ + dev_priv->ring.tail = outring; \ I810_WRITE(LP_RING + RING_TAIL, outring); \ } while(0) -#define OUT_RING(n) do { \ +#define OUT_RING(n) do { \ if (I810_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \ *(volatile unsigned int *)(virt + outring) = n; \ outring += 4; \ outring &= ringmask; \ } while (0) -#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23)) +#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23)) #define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23)) #define CMD_REPORT_HEAD (7<<23) #define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1) @@ -184,28 +184,28 @@ extern int i810_max_ioctl; #define I810REG_HWSTAM 0x02098 #define I810REG_INT_IDENTITY_R 0x020a4 -#define I810REG_INT_MASK_R 0x020a8 +#define I810REG_INT_MASK_R 0x020a8 #define I810REG_INT_ENABLE_R 0x020a0 -#define LP_RING 0x2030 -#define HP_RING 0x2040 -#define RING_TAIL 0x00 +#define LP_RING 0x2030 +#define HP_RING 0x2040 +#define RING_TAIL 0x00 #define TAIL_ADDR 0x000FFFF8 -#define RING_HEAD 0x04 -#define HEAD_WRAP_COUNT 0xFFE00000 -#define HEAD_WRAP_ONE 0x00200000 -#define HEAD_ADDR 0x001FFFFC -#define RING_START 0x08 -#define START_ADDR 0x00FFFFF8 -#define RING_LEN 0x0C -#define RING_NR_PAGES 0x000FF000 -#define RING_REPORT_MASK 0x00000006 -#define RING_REPORT_64K 0x00000002 -#define RING_REPORT_128K 0x00000004 -#define RING_NO_REPORT 0x00000000 -#define RING_VALID_MASK 0x00000001 -#define RING_VALID 0x00000001 -#define RING_INVALID 0x00000000 +#define RING_HEAD 0x04 +#define HEAD_WRAP_COUNT 0xFFE00000 +#define HEAD_WRAP_ONE 0x00200000 +#define HEAD_ADDR 0x001FFFFC +#define RING_START 0x08 +#define START_ADDR 0x00FFFFF8 +#define RING_LEN 0x0C +#define RING_NR_PAGES 0x000FF000 +#define RING_REPORT_MASK 0x00000006 +#define RING_REPORT_64K 0x00000002 +#define RING_REPORT_128K 0x00000004 +#define RING_NO_REPORT 0x00000000 +#define RING_VALID_MASK 0x00000001 +#define RING_VALID 0x00000001 +#define RING_INVALID 0x00000000 #define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19)) #define SC_UPDATE_SCISSOR (0x1<<1) diff --git a/drivers/char/drm/i830_dma.c b/drivers/char/drm/i830_dma.c index 69a363e..379cbda 100644 --- a/drivers/char/drm/i830_dma.c +++ b/drivers/char/drm/i830_dma.c @@ -42,7 +42,7 @@ #define I830_BUF_FREE 2 #define I830_BUF_CLIENT 1 -#define I830_BUF_HARDWARE 0 +#define I830_BUF_HARDWARE 0 #define I830_BUF_UNMAPPED 0 #define I830_BUF_MAPPED 1 diff --git a/drivers/char/drm/i830_drm.h b/drivers/char/drm/i830_drm.h index 968a6d9..4b00d2d 100644 --- a/drivers/char/drm/i830_drm.h +++ b/drivers/char/drm/i830_drm.h @@ -12,9 +12,9 @@ #define _I830_DEFINES_ #define I830_DMA_BUF_ORDER 12 -#define I830_DMA_BUF_SZ (1< - * Jeff Hartmann + * Jeff Hartmann * */ @@ -183,7 +183,7 @@ extern int i830_driver_device_is_agp(struct drm_device * dev); extern int i830_wait_ring(struct drm_device * dev, int n, const char *caller); -#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23)) +#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23)) #define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23)) #define CMD_REPORT_HEAD (7<<23) #define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1) @@ -203,30 +203,30 @@ extern int i830_wait_ring(struct drm_device * dev, int n, const char *caller); #define I830REG_HWSTAM 0x02098 #define I830REG_INT_IDENTITY_R 0x020a4 -#define I830REG_INT_MASK_R 0x020a8 +#define I830REG_INT_MASK_R 0x020a8 #define I830REG_INT_ENABLE_R 0x020a0 #define I830_IRQ_RESERVED ((1<<13)|(3<<2)) -#define LP_RING 0x2030 -#define HP_RING 0x2040 -#define RING_TAIL 0x00 +#define LP_RING 0x2030 +#define HP_RING 0x2040 +#define RING_TAIL 0x00 #define TAIL_ADDR 0x001FFFF8 -#define RING_HEAD 0x04 -#define HEAD_WRAP_COUNT 0xFFE00000 -#define HEAD_WRAP_ONE 0x00200000 -#define HEAD_ADDR 0x001FFFFC -#define RING_START 0x08 -#define START_ADDR 0x0xFFFFF000 -#define RING_LEN 0x0C -#define RING_NR_PAGES 0x001FF000 -#define RING_REPORT_MASK 0x00000006 -#define RING_REPORT_64K 0x00000002 -#define RING_REPORT_128K 0x00000004 -#define RING_NO_REPORT 0x00000000 -#define RING_VALID_MASK 0x00000001 -#define RING_VALID 0x00000001 -#define RING_INVALID 0x00000000 +#define RING_HEAD 0x04 +#define HEAD_WRAP_COUNT 0xFFE00000 +#define HEAD_WRAP_ONE 0x00200000 +#define HEAD_ADDR 0x001FFFFC +#define RING_START 0x08 +#define START_ADDR 0x0xFFFFF000 +#define RING_LEN 0x0C +#define RING_NR_PAGES 0x001FF000 +#define RING_REPORT_MASK 0x00000006 +#define RING_REPORT_64K 0x00000002 +#define RING_REPORT_128K 0x00000004 +#define RING_NO_REPORT 0x00000000 +#define RING_VALID_MASK 0x00000001 +#define RING_VALID 0x00000001 +#define RING_INVALID 0x00000000 #define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19)) #define SC_UPDATE_SCISSOR (0x1<<1) @@ -279,9 +279,9 @@ extern int i830_wait_ring(struct drm_device * dev, int n, const char *caller); #define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21) #define XY_SRC_COPY_BLT_WRITE_RGB (1<<20) -#define MI_BATCH_BUFFER ((0x30<<23)|1) -#define MI_BATCH_BUFFER_START (0x31<<23) -#define MI_BATCH_BUFFER_END (0xA<<23) +#define MI_BATCH_BUFFER ((0x30<<23)|1) +#define MI_BATCH_BUFFER_START (0x31<<23) +#define MI_BATCH_BUFFER_END (0xA<<23) #define MI_BATCH_NON_SECURE (1) #define MI_WAIT_FOR_EVENT ((0x3<<23)) diff --git a/drivers/char/drm/i830_irq.c b/drivers/char/drm/i830_irq.c index 76403f4..a33db5f 100644 --- a/drivers/char/drm/i830_irq.c +++ b/drivers/char/drm/i830_irq.c @@ -144,7 +144,7 @@ int i830_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_i830_private_t *dev_priv = dev->dev_private; - drm_i830_irq_wait_t *irqwait = data; + drm_i830_irq_wait_t *irqwait = data; if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); diff --git a/drivers/char/drm/i915_buffer.c b/drivers/char/drm/i915_buffer.c new file mode 100644 index 0000000..2cea87d --- /dev/null +++ b/drivers/char/drm/i915_buffer.c @@ -0,0 +1,175 @@ +/************************************************************************** + * + * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * + **************************************************************************/ +/* + * Authors: Thomas Hellström + */ + +#include "drmP.h" +#include "i915_drm.h" +#include "i915_drv.h" + +struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device * dev) +{ + return drm_agp_init_ttm(dev); +} + +int i915_fence_types(struct drm_buffer_object *bo, + uint32_t * fclass, + uint32_t * type) +{ + if (bo->mem.mask & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE)) + *type = 3; + else + *type = 1; + return 0; +} + +int i915_invalidate_caches(struct drm_device * dev, uint64_t flags) +{ + /* + * FIXME: Only emit once per batchbuffer submission. + */ + + uint32_t flush_cmd = MI_NO_WRITE_FLUSH; + + if (flags & DRM_BO_FLAG_READ) + flush_cmd |= MI_READ_FLUSH; + if (flags & DRM_BO_FLAG_EXE) + flush_cmd |= MI_EXE_FLUSH; + + return i915_emit_mi_flush(dev, flush_cmd); +} + +int i915_init_mem_type(struct drm_device * dev, uint32_t type, + struct drm_mem_type_manager * man) +{ + switch (type) { + case DRM_BO_MEM_LOCAL: + man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | + _DRM_FLAG_MEMTYPE_CACHED; + man->drm_bus_maptype = 0; + man->gpu_offset = 0; + break; + case DRM_BO_MEM_TT: + if (!(drm_core_has_AGP(dev) && dev->agp)) { + DRM_ERROR("AGP is not enabled for memory type %u\n", + (unsigned)type); + return -EINVAL; + } + man->io_offset = dev->agp->agp_info.aper_base; + man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024; + man->io_addr = NULL; + man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | + _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_NEEDS_IOREMAP; + man->drm_bus_maptype = _DRM_AGP; + man->gpu_offset = 0; + break; + case DRM_BO_MEM_PRIV0: + if (!(drm_core_has_AGP(dev) && dev->agp)) { + DRM_ERROR("AGP is not enabled for memory type %u\n", + (unsigned)type); + return -EINVAL; + } + man->io_offset = dev->agp->agp_info.aper_base; + man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024; + man->io_addr = NULL; + man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | + _DRM_FLAG_MEMTYPE_FIXED | _DRM_FLAG_NEEDS_IOREMAP; + man->drm_bus_maptype = _DRM_AGP; + man->gpu_offset = 0; + break; + default: + DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); + return -EINVAL; + } + return 0; +} + +uint32_t i915_evict_mask(struct drm_buffer_object *bo) +{ + switch (bo->mem.mem_type) { + case DRM_BO_MEM_LOCAL: + case DRM_BO_MEM_TT: + return DRM_BO_FLAG_MEM_LOCAL; + default: + return DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED; + } +} + + +/* + * Disable i915_move_flip for now, since we can't guarantee that the hardware lock + * is held here. To re-enable we need to make sure either + * a) The X server is using DRM to submit commands to the ring, or + * b) DRM can use the HP ring for these blits. This means i915 needs to implement + * a new ring submission mechanism and fence class. + */ + +int i915_move(struct drm_buffer_object * bo, + int evict, int no_wait, struct drm_bo_mem_reg * new_mem) +{ + struct drm_bo_mem_reg *old_mem = &bo->mem; + + if (old_mem->mem_type == DRM_BO_MEM_LOCAL) { + return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); + } else if (new_mem->mem_type == DRM_BO_MEM_LOCAL) { + if (0 /*i915_move_flip(bo, evict, no_wait, new_mem)*/) + return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); + } else { + if (0 /*i915_move_blit(bo, evict, no_wait, new_mem)*/) + return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); + } + return 0; +} + + +static inline void drm_cache_flush_addr(void *virt) +{ + int i; + + for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) + clflush(virt+i); +} + +static inline void drm_cache_flush_page(struct page *p) +{ + drm_cache_flush_addr(page_address(p)); +} + +void i915_flush_ttm(struct drm_ttm *ttm) +{ + int i; + + if (!ttm) + return; + + DRM_MEMORYBARRIER(); + for (i = ttm->num_pages-1; i >= 0; i--) + drm_cache_flush_page(drm_ttm_get_page(ttm, i)); + DRM_MEMORYBARRIER(); +} diff --git a/drivers/char/drm/i915_dma.c b/drivers/char/drm/i915_dma.c index e61a43e..eced445 100644 --- a/drivers/char/drm/i915_dma.c +++ b/drivers/char/drm/i915_dma.c @@ -351,7 +351,7 @@ static int validate_cmd(int cmd) { int ret = do_validate_cmd(cmd); -/* printk("validate_cmd( %x ): %d\n", cmd, ret); */ +/* printk("validate_cmd( %x ): %d\n", cmd, ret); */ return ret; } @@ -438,15 +438,17 @@ static int i915_emit_box(struct drm_device * dev, * emit. For now, do it in both places: */ -static void i915_emit_breadcrumb(struct drm_device *dev) +void i915_emit_breadcrumb(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; RING_LOCALS; - dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter; + if (++dev_priv->counter > BREADCRUMB_MASK) { + dev_priv->counter = 1; + DRM_DEBUG("Breadcrumb counter wrapped around\n"); + } - if (dev_priv->counter > 0x7FFFFFFFUL) - dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1; + dev_priv->sarea_priv->last_enqueue = dev_priv->counter; BEGIN_LP_RING(4); OUT_RING(CMD_STORE_DWORD_IDX); @@ -456,9 +458,32 @@ static void i915_emit_breadcrumb(struct drm_device *dev) ADVANCE_LP_RING(); } + +int i915_emit_mi_flush(struct drm_device *dev, uint32_t flush) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + uint32_t flush_cmd = CMD_MI_FLUSH; + RING_LOCALS; + + flush_cmd |= flush; + + i915_kernel_lost_context(dev); + + BEGIN_LP_RING(4); + OUT_RING(flush_cmd); + OUT_RING(0); + OUT_RING(0); + OUT_RING(0); + ADVANCE_LP_RING(); + + return 0; +} + + static int i915_dispatch_cmdbuffer(struct drm_device * dev, drm_i915_cmdbuffer_t * cmd) { + drm_i915_private_t *dev_priv = dev->dev_private; int nbox = cmd->num_cliprects; int i = 0, count, ret; @@ -484,7 +509,8 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev, return ret; } - i915_emit_breadcrumb(dev); + i915_emit_breadcrumb( dev ); + drm_fence_flush_old(dev, 0, dev_priv->counter); return 0; } @@ -534,8 +560,8 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev, } } - i915_emit_breadcrumb(dev); - + i915_emit_breadcrumb( dev ); + drm_fence_flush_old(dev, 0, dev_priv->counter); return 0; } @@ -607,7 +633,6 @@ static int i915_batchbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - u32 *hw_status = dev_priv->hw_status_page; drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) dev_priv->sarea_priv; drm_i915_batchbuffer_t *batch = data; @@ -630,7 +655,7 @@ static int i915_batchbuffer(struct drm_device *dev, void *data, ret = i915_dispatch_batchbuffer(dev, batch); - sarea_priv->last_dispatch = (int)hw_status[5]; + sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); return ret; } @@ -638,7 +663,6 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - u32 *hw_status = dev_priv->hw_status_page; drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) dev_priv->sarea_priv; drm_i915_cmdbuffer_t *cmdbuf = data; @@ -663,10 +687,391 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data, return ret; } - sarea_priv->last_dispatch = (int)hw_status[5]; + sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); return 0; } +struct i915_relocatee_info { + struct drm_buffer_object *buf; + unsigned long offset; + u32 *data_page; + unsigned page_offset; + struct drm_bo_kmap_obj kmap; + int is_iomem; +}; + +static void i915_dereference_buffers_locked(struct drm_buffer_object **buffers, + unsigned num_buffers) +{ + while (num_buffers--) + drm_bo_usage_deref_locked(&buffers[num_buffers]); +} + +int i915_apply_reloc(struct drm_file *file_priv, int num_buffers, + struct drm_buffer_object **buffers, + struct i915_relocatee_info *relocatee, + uint32_t *reloc) +{ + unsigned index; + unsigned long new_cmd_offset; + u32 val; + int ret; + + if (reloc[2] >= num_buffers) { + DRM_ERROR("Illegal relocation buffer %08X\n", reloc[2]); + return -EINVAL; + } + + new_cmd_offset = reloc[0]; + if (!relocatee->data_page || + !drm_bo_same_page(relocatee->offset, new_cmd_offset)) { + drm_bo_kunmap(&relocatee->kmap); + relocatee->offset = new_cmd_offset; + ret = drm_bo_kmap(relocatee->buf, new_cmd_offset >> PAGE_SHIFT, + 1, &relocatee->kmap); + if (ret) { + DRM_ERROR("Could not map command buffer to apply relocs\n %08lx", new_cmd_offset); + return ret; + } + + relocatee->data_page = drm_bmo_virtual(&relocatee->kmap, + &relocatee->is_iomem); + relocatee->page_offset = (relocatee->offset & PAGE_MASK); + } + + val = buffers[reloc[2]]->offset; + index = (reloc[0] - relocatee->page_offset) >> 2; + + /* add in validate */ + val = val + reloc[1]; + + relocatee->data_page[index] = val; + return 0; +} + +int i915_process_relocs(struct drm_file *file_priv, + uint32_t buf_handle, + uint32_t *reloc_buf_handle, + struct i915_relocatee_info *relocatee, + struct drm_buffer_object **buffers, + uint32_t num_buffers) +{ + struct drm_device *dev = file_priv->head->dev; + struct drm_buffer_object *reloc_list_object; + uint32_t cur_handle = *reloc_buf_handle; + uint32_t *reloc_page; + int ret, reloc_is_iomem, reloc_stride; + uint32_t num_relocs, reloc_offset, reloc_end, reloc_page_offset, next_offset, cur_offset; + struct drm_bo_kmap_obj reloc_kmap; + + memset(&reloc_kmap, 0, sizeof(reloc_kmap)); + + mutex_lock(&dev->struct_mutex); + reloc_list_object = drm_lookup_buffer_object(file_priv, cur_handle, 1); + mutex_unlock(&dev->struct_mutex); + if (!reloc_list_object) + return -EINVAL; + + ret = drm_bo_kmap(reloc_list_object, 0, 1, &reloc_kmap); + if (ret) { + DRM_ERROR("Could not map relocation buffer.\n"); + goto out; + } + + reloc_page = drm_bmo_virtual(&reloc_kmap, &reloc_is_iomem); + num_relocs = reloc_page[0] & 0xffff; + + if ((reloc_page[0] >> 16) & 0xffff) { + DRM_ERROR("Unsupported relocation type requested\n"); + goto out; + } + + /* get next relocate buffer handle */ + *reloc_buf_handle = reloc_page[1]; + reloc_stride = I915_RELOC0_STRIDE * sizeof(uint32_t); /* may be different for other types of relocs */ + + DRM_DEBUG("num relocs is %d, next is %08X\n", num_relocs, reloc_page[1]); + + reloc_page_offset = 0; + reloc_offset = I915_RELOC_HEADER * sizeof(uint32_t); + reloc_end = reloc_offset + (num_relocs * reloc_stride); + + do { + next_offset = drm_bo_offset_end(reloc_offset, reloc_end); + + do { + cur_offset = ((reloc_offset + reloc_page_offset) & ~PAGE_MASK) / sizeof(uint32_t); + ret = i915_apply_reloc(file_priv, num_buffers, + buffers, relocatee, &reloc_page[cur_offset]); + if (ret) + goto out; + + reloc_offset += reloc_stride; + } while (reloc_offset < next_offset); + + drm_bo_kunmap(&reloc_kmap); + + reloc_offset = next_offset; + if (reloc_offset != reloc_end) { + ret = drm_bo_kmap(reloc_list_object, reloc_offset >> PAGE_SHIFT, 1, &reloc_kmap); + if (ret) { + DRM_ERROR("Could not map relocation buffer.\n"); + goto out; + } + + reloc_page = drm_bmo_virtual(&reloc_kmap, &reloc_is_iomem); + reloc_page_offset = reloc_offset & ~PAGE_MASK; + } + + } while (reloc_offset != reloc_end); +out: + drm_bo_kunmap(&relocatee->kmap); + relocatee->data_page = NULL; + + drm_bo_kunmap(&reloc_kmap); + + mutex_lock(&dev->struct_mutex); + drm_bo_usage_deref_locked(&reloc_list_object); + mutex_unlock(&dev->struct_mutex); + + return ret; +} + +static int i915_exec_reloc(struct drm_file *file_priv, drm_handle_t buf_handle, + drm_handle_t buf_reloc_handle, + struct drm_buffer_object **buffers, + uint32_t buf_count) +{ + struct drm_device *dev = file_priv->head->dev; + struct i915_relocatee_info relocatee; + int ret = 0; + + memset(&relocatee, 0, sizeof(relocatee)); + + mutex_lock(&dev->struct_mutex); + relocatee.buf = drm_lookup_buffer_object(file_priv, buf_handle, 1); + mutex_unlock(&dev->struct_mutex); + if (!relocatee.buf) { + DRM_DEBUG("relocatee buffer invalid %08x\n", buf_handle); + ret = -EINVAL; + goto out_err; + } + + while (buf_reloc_handle) { + ret = i915_process_relocs(file_priv, buf_handle, &buf_reloc_handle, &relocatee, buffers, buf_count); + if (ret) { + DRM_ERROR("process relocs failed\n"); + break; + } + } + + mutex_lock(&dev->struct_mutex); + drm_bo_usage_deref_locked(&relocatee.buf); + mutex_unlock(&dev->struct_mutex); + +out_err: + return ret; +} + +/* + * Validate, add fence and relocate a block of bos from a userspace list + */ +int i915_validate_buffer_list(struct drm_file *file_priv, + unsigned int fence_class, uint64_t data, + struct drm_buffer_object **buffers, + uint32_t *num_buffers) +{ + struct drm_i915_op_arg arg; + struct drm_bo_op_req *req = &arg.d.req; + struct drm_bo_arg_rep rep; + unsigned long next = 0; + int ret = 0; + unsigned buf_count = 0; + struct drm_device *dev = file_priv->head->dev; + uint32_t buf_reloc_handle, buf_handle; + + + do { + if (buf_count >= *num_buffers) { + DRM_ERROR("Buffer count exceeded %d\n.", *num_buffers); + ret = -EINVAL; + goto out_err; + } + + buffers[buf_count] = NULL; + + if (copy_from_user(&arg, (void __user *)(unsigned)data, sizeof(arg))) { + ret = -EFAULT; + goto out_err; + } + + if (arg.handled) { + data = arg.next; + mutex_lock(&dev->struct_mutex); + buffers[buf_count] = drm_lookup_buffer_object(file_priv, req->arg_handle, 1); + mutex_unlock(&dev->struct_mutex); + buf_count++; + continue; + } + + rep.ret = 0; + if (req->op != drm_bo_validate) { + DRM_ERROR + ("Buffer object operation wasn't \"validate\".\n"); + rep.ret = -EINVAL; + goto out_err; + } + + buf_handle = req->bo_req.handle; + buf_reloc_handle = arg.reloc_handle; + + if (buf_reloc_handle) { + ret = i915_exec_reloc(file_priv, buf_handle, buf_reloc_handle, buffers, buf_count); + if (ret) + goto out_err; + DRM_MEMORYBARRIER(); + } + + rep.ret = drm_bo_handle_validate(file_priv, req->bo_req.handle, + req->bo_req.fence_class, + req->bo_req.flags, + req->bo_req.mask, + req->bo_req.hint, + 0, + &rep.bo_info, + &buffers[buf_count]); + + if (rep.ret) { + DRM_ERROR("error on handle validate %d\n", rep.ret); + goto out_err; + } + + + next = arg.next; + arg.handled = 1; + arg.d.rep = rep; + + if (copy_to_user((void __user *)(unsigned)data, &arg, sizeof(arg))) + return -EFAULT; + + data = next; + buf_count++; + + } while (next != 0); + *num_buffers = buf_count; + return 0; +out_err: + mutex_lock(&dev->struct_mutex); + i915_dereference_buffers_locked(buffers, buf_count); + mutex_unlock(&dev->struct_mutex); + *num_buffers = 0; + return (ret) ? ret : rep.ret; +} + +static int i915_execbuffer(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) + dev_priv->sarea_priv; + struct drm_i915_execbuffer *exec_buf = data; + struct _drm_i915_batchbuffer *batch = &exec_buf->batch; + struct drm_fence_arg *fence_arg = &exec_buf->fence_arg; + int num_buffers; + int ret; + struct drm_buffer_object **buffers; + struct drm_fence_object *fence; + + if (!dev_priv->allow_batchbuffer) { + DRM_ERROR("Batchbuffer ioctl disabled\n"); + return -EINVAL; + } + + + if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects, + batch->num_cliprects * + sizeof(struct drm_clip_rect))) + return -EFAULT; + + if (exec_buf->num_buffers > dev_priv->max_validate_buffers) + return -EINVAL; + + + ret = drm_bo_read_lock(&dev->bm.bm_lock); + if (ret) + return ret; + + /* + * The cmdbuf_mutex makes sure the validate-submit-fence + * operation is atomic. + */ + + ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex); + if (ret) { + drm_bo_read_unlock(&dev->bm.bm_lock); + return -EAGAIN; + } + + num_buffers = exec_buf->num_buffers; + + buffers = drm_calloc(num_buffers, sizeof(struct drm_buffer_object *), DRM_MEM_DRIVER); + if (!buffers) { + drm_bo_read_unlock(&dev->bm.bm_lock); + mutex_unlock(&dev_priv->cmdbuf_mutex); + return -ENOMEM; + } + + /* validate buffer list + fixup relocations */ + ret = i915_validate_buffer_list(file_priv, 0, exec_buf->ops_list, + buffers, &num_buffers); + if (ret) + goto out_free; + + /* make sure all previous memory operations have passed */ + DRM_MEMORYBARRIER(); + + /* submit buffer */ + batch->start = buffers[num_buffers-1]->offset; + + DRM_DEBUG("i915 exec batchbuffer, start %x used %d cliprects %d\n", + batch->start, batch->used, batch->num_cliprects); + + ret = i915_dispatch_batchbuffer(dev, batch); + if (ret) + goto out_err0; + + sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); + + /* fence */ + ret = drm_fence_buffer_objects(dev, NULL, 0, NULL, &fence); + if (ret) + goto out_err0; + + if (!(fence_arg->flags & DRM_FENCE_FLAG_NO_USER)) { + ret = drm_fence_add_user_object(file_priv, fence, fence_arg->flags & DRM_FENCE_FLAG_SHAREABLE); + if (!ret) { + fence_arg->handle = fence->base.hash.key; + fence_arg->fence_class = fence->fence_class; + fence_arg->type = fence->type; + fence_arg->signaled = fence->signaled; + } + } + drm_fence_usage_deref_unlocked(&fence); +out_err0: + + /* handle errors */ + mutex_lock(&dev->struct_mutex); + i915_dereference_buffers_locked(buffers, num_buffers); + mutex_unlock(&dev->struct_mutex); + +out_free: + drm_free(buffers, (exec_buf->num_buffers * sizeof(struct drm_buffer_object *)), DRM_MEM_DRIVER); + + mutex_unlock(&dev_priv->cmdbuf_mutex); + drm_bo_read_unlock(&dev->bm.bm_lock); + return ret; +} + static int i915_flip_bufs(struct drm_device *dev, void *data, struct drm_file *file_priv) { @@ -798,7 +1203,8 @@ void i915_driver_lastclose(struct drm_device * dev) { if (dev->dev_private) { drm_i915_private_t *dev_priv = dev->dev_private; - i915_mem_takedown(&(dev_priv->agp_heap)); + if (dev_priv->agp_heap) + i915_mem_takedown(&(dev_priv->agp_heap)); } i915_dma_cleanup(dev); } @@ -829,6 +1235,7 @@ struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ), DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH), + DRM_IOCTL_DEF(DRM_I915_EXECBUFFER, i915_execbuffer, DRM_AUTH), }; int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); @@ -848,3 +1255,9 @@ int i915_driver_device_is_agp(struct drm_device * dev) { return 1; } + +int i915_driver_firstopen(struct drm_device *dev) +{ + drm_bo_driver_init(dev); + return 0; +} diff --git a/drivers/char/drm/i915_drm.h b/drivers/char/drm/i915_drm.h index 05c66cf..1e974e4 100644 --- a/drivers/char/drm/i915_drm.h +++ b/drivers/char/drm/i915_drm.h @@ -115,6 +115,16 @@ typedef struct _drm_i915_sarea { int pipeB_h; } drm_i915_sarea_t; +/* Driver specific fence types and classes. + */ + +/* The only fence class we support */ +#define DRM_I915_FENCE_CLASS_ACCEL 0 +/* Fence type that guarantees read-write flush */ +#define DRM_I915_FENCE_TYPE_RW 2 +/* MI_FLUSH programmed just before the fence */ +#define DRM_I915_FENCE_FLAG_FLUSHED 0x01000000 + /* Flags for perf_boxes */ #define I915_BOX_RING_EMPTY 0x1 @@ -143,6 +153,7 @@ typedef struct _drm_i915_sarea { #define DRM_I915_GET_VBLANK_PIPE 0x0e #define DRM_I915_VBLANK_SWAP 0x0f #define DRM_I915_HWS_ADDR 0x11 +#define DRM_I915_EXECBUFFER 0x12 #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) @@ -160,6 +171,7 @@ typedef struct _drm_i915_sarea { #define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t) #define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t) #define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t) +#define DRM_IOCTL_I915_EXECBUFFER DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_EXECBUFFER, struct drm_i915_execbuffer) /* Allow drivers to submit batchbuffers directly to hardware, relying * on the security mechanisms provided by hardware. @@ -267,4 +279,40 @@ typedef struct drm_i915_hws_addr { uint64_t addr; } drm_i915_hws_addr_t; +/* + * Relocation header is 4 uint32_ts + * 0 - (16-bit relocation type << 16)| 16 bit reloc count + * 1 - buffer handle for another list of relocs + * 2-3 - spare. + */ +#define I915_RELOC_HEADER 4 + +/* + * type 0 relocation has 4-uint32_t stride + * 0 - offset into buffer + * 1 - delta to add in + * 2 - index into buffer list + * 3 - reserved (for optimisations later). + */ +#define I915_RELOC_TYPE_0 0 +#define I915_RELOC0_STRIDE 4 + +struct drm_i915_op_arg { + uint64_t next; + uint32_t reloc_handle; + int handled; + union { + struct drm_bo_op_req req; + struct drm_bo_arg_rep rep; + } d; + +}; + +struct drm_i915_execbuffer { + uint64_t ops_list; + uint32_t num_buffers; + struct _drm_i915_batchbuffer batch; + struct drm_fence_arg fence_arg; +}; + #endif /* _I915_DRM_H_ */ diff --git a/drivers/char/drm/i915_drv.c b/drivers/char/drm/i915_drv.c index 85bcc27..63b6675 100644 --- a/drivers/char/drm/i915_drv.c +++ b/drivers/char/drm/i915_drv.c @@ -77,7 +77,7 @@ static struct drm_driver driver = { .name = DRIVER_NAME, .id_table = pciidlist, }, - + .name = DRIVER_NAME, .desc = DRIVER_DESC, .date = DRIVER_DATE, diff --git a/drivers/char/drm/i915_drv.h b/drivers/char/drm/i915_drv.h index e064292..3b0cf67 100644 --- a/drivers/char/drm/i915_drv.h +++ b/drivers/char/drm/i915_drv.h @@ -53,6 +53,8 @@ #define DRIVER_MINOR 6 #define DRIVER_PATCHLEVEL 0 +#define I915_MAX_VALIDATE_BUFFERS 4096 + typedef struct _drm_i915_ring_buffer { int tail_mask; unsigned long Start; @@ -110,6 +112,19 @@ typedef struct drm_i915_private { struct mem_block *agp_heap; unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; int vblank_pipe; + spinlock_t user_irq_lock; + int user_irq_refcount; + int fence_irq_on; + uint32_t irq_enable_reg; + int irq_enabled; + + uint32_t flush_sequence; + uint32_t flush_flags; + uint32_t flush_pending; + uint32_t saved_flush_status; + void *agp_iomap; + unsigned int max_validate_buffers; + struct mutex cmdbuf_mutex; spinlock_t swaps_lock; drm_i915_vbl_swap_t vbl_swaps; @@ -128,6 +143,9 @@ extern void i915_driver_preclose(struct drm_device *dev, extern int i915_driver_device_is_agp(struct drm_device * dev); extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); +extern void i915_emit_breadcrumb(struct drm_device *dev); +extern int i915_emit_mi_flush(struct drm_device *dev, uint32_t flush); +extern int i915_driver_firstopen(struct drm_device *dev); /* i915_irq.c */ extern int i915_irq_emit(struct drm_device *dev, void *data, @@ -145,6 +163,9 @@ extern int i915_vblank_pipe_set(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int i915_vblank_pipe_get(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int i915_emit_irq(struct drm_device * dev); +extern void i915_user_irq_on(drm_i915_private_t *dev_priv); +extern void i915_user_irq_off(drm_i915_private_t *dev_priv); extern int i915_vblank_swap(struct drm_device *dev, void *data, struct drm_file *file_priv); @@ -159,11 +180,37 @@ extern int i915_mem_destroy_heap(struct drm_device *dev, void *data, struct drm_file *file_priv); extern void i915_mem_takedown(struct mem_block **heap); extern void i915_mem_release(struct drm_device * dev, - struct drm_file *file_priv, struct mem_block *heap); + struct drm_file *file_priv, + struct mem_block *heap); +/* i915_fence.c */ + + +extern void i915_fence_handler(struct drm_device *dev); +extern int i915_fence_emit_sequence(struct drm_device *dev, uint32_t class, + uint32_t flags, + uint32_t *sequence, + uint32_t *native_type); +extern void i915_poke_flush(struct drm_device *dev, uint32_t class); +extern int i915_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t flags); + +/* i915_buffer.c */ +extern struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device *dev); +extern int i915_fence_types(struct drm_buffer_object *bo, uint32_t *fclass, + uint32_t *type); +extern int i915_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags); +extern int i915_init_mem_type(struct drm_device *dev, uint32_t type, + struct drm_mem_type_manager *man); +extern uint32_t i915_evict_mask(struct drm_buffer_object *bo); +extern int i915_move(struct drm_buffer_object *bo, int evict, + int no_wait, struct drm_bo_mem_reg *new_mem); +void i915_flush_ttm(struct drm_ttm *ttm); + +extern void intel_init_chipset_flush_compat(struct drm_device *dev); +extern void intel_fini_chipset_flush_compat(struct drm_device *dev); #define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg)) #define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val)) -#define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg)) +#define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg)) #define I915_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, (reg), (val)) #define I915_VERBOSE 0 @@ -200,7 +247,7 @@ extern void i915_mem_release(struct drm_device * dev, extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); -#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23)) +#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23)) #define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23)) #define CMD_REPORT_HEAD (7<<23) #define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1) @@ -217,8 +264,9 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); #define I915REG_HWSTAM 0x02098 #define I915REG_INT_IDENTITY_R 0x020a4 -#define I915REG_INT_MASK_R 0x020a8 +#define I915REG_INT_MASK_R 0x020a8 #define I915REG_INT_ENABLE_R 0x020a0 +#define I915REG_INSTPM 0x020c0 #define I915REG_PIPEASTAT 0x70024 #define I915REG_PIPEBSTAT 0x71024 @@ -229,7 +277,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); #define SRX_INDEX 0x3c4 #define SRX_DATA 0x3c5 #define SR01 1 -#define SR01_SCREEN_OFF (1<<5) +#define SR01_SCREEN_OFF (1<<5) #define PPCR 0x61204 #define PPCR_ON (1<<0) @@ -249,25 +297,25 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); #define ADPA_DPMS_OFF (3<<10) #define NOPID 0x2094 -#define LP_RING 0x2030 -#define HP_RING 0x2040 -#define RING_TAIL 0x00 +#define LP_RING 0x2030 +#define HP_RING 0x2040 +#define RING_TAIL 0x00 #define TAIL_ADDR 0x001FFFF8 -#define RING_HEAD 0x04 -#define HEAD_WRAP_COUNT 0xFFE00000 -#define HEAD_WRAP_ONE 0x00200000 -#define HEAD_ADDR 0x001FFFFC -#define RING_START 0x08 -#define START_ADDR 0x0xFFFFF000 -#define RING_LEN 0x0C -#define RING_NR_PAGES 0x001FF000 -#define RING_REPORT_MASK 0x00000006 -#define RING_REPORT_64K 0x00000002 -#define RING_REPORT_128K 0x00000004 -#define RING_NO_REPORT 0x00000000 -#define RING_VALID_MASK 0x00000001 -#define RING_VALID 0x00000001 -#define RING_INVALID 0x00000000 +#define RING_HEAD 0x04 +#define HEAD_WRAP_COUNT 0xFFE00000 +#define HEAD_WRAP_ONE 0x00200000 +#define HEAD_ADDR 0x001FFFFC +#define RING_START 0x08 +#define START_ADDR 0x0xFFFFF000 +#define RING_LEN 0x0C +#define RING_NR_PAGES 0x001FF000 +#define RING_REPORT_MASK 0x00000006 +#define RING_REPORT_64K 0x00000002 +#define RING_REPORT_128K 0x00000004 +#define RING_NO_REPORT 0x00000000 +#define RING_VALID_MASK 0x00000001 +#define RING_VALID 0x00000001 +#define RING_INVALID 0x00000000 #define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19)) #define SC_UPDATE_SCISSOR (0x1<<1) @@ -294,9 +342,9 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); #define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21) #define XY_SRC_COPY_BLT_WRITE_RGB (1<<20) -#define MI_BATCH_BUFFER ((0x30<<23)|1) -#define MI_BATCH_BUFFER_START (0x31<<23) -#define MI_BATCH_BUFFER_END (0xA<<23) +#define MI_BATCH_BUFFER ((0x30<<23)|1) +#define MI_BATCH_BUFFER_START (0x31<<23) +#define MI_BATCH_BUFFER_END (0xA<<23) #define MI_BATCH_NON_SECURE (1) #define MI_BATCH_NON_SECURE_I965 (1<<8) @@ -311,6 +359,16 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); #define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1) -#define READ_BREADCRUMB(dev_priv) (((u32 *)(dev_priv->hw_status_page))[5]) +#define CMD_MI_FLUSH (0x04 << 23) +#define MI_NO_WRITE_FLUSH (1 << 2) +#define MI_READ_FLUSH (1 << 0) +#define MI_EXE_FLUSH (1 << 1) +#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */ +#define MI_SCENE_COUNT (1 << 3) /* just increment scene count */ + +#define BREADCRUMB_BITS 31 +#define BREADCRUMB_MASK ((1U << BREADCRUMB_BITS) - 1) +#define READ_BREADCRUMB(dev_priv) (((volatile u32 *)(dev_priv->hw_status_page))[5]) +#define READ_HWSP(dev_priv, reg) (((volatile u32 *)(dev_priv->hw_status_page))[reg]) #endif diff --git a/drivers/char/drm/i915_fence.c b/drivers/char/drm/i915_fence.c new file mode 100644 index 0000000..f2c2982 --- /dev/null +++ b/drivers/char/drm/i915_fence.c @@ -0,0 +1,158 @@ +/************************************************************************** + * + * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * + **************************************************************************/ +/* + * Authors: Thomas Hellström + */ + +#include "drmP.h" +#include "drm.h" +#include "i915_drm.h" +#include "i915_drv.h" + +/* + * Implements an intel sync flush operation. + */ + +static void i915_perform_flush(struct drm_device * dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_class_manager *fc = &fm->fence_class[0]; + struct drm_fence_driver *driver = dev->driver->fence_driver; + uint32_t flush_flags = 0; + uint32_t flush_sequence = 0; + uint32_t i_status; + uint32_t diff; + uint32_t sequence; + int rwflush; + + if (!dev_priv) + return; + + if (fc->pending_exe_flush) { + sequence = READ_BREADCRUMB(dev_priv); + + /* + * First update fences with the current breadcrumb. + */ + + diff = (sequence - fc->last_exe_flush) & BREADCRUMB_MASK; + if (diff < driver->wrap_diff && diff != 0) { + drm_fence_handler(dev, 0, sequence, + DRM_FENCE_TYPE_EXE, 0); + } + + if (dev_priv->fence_irq_on && !fc->pending_exe_flush) { + i915_user_irq_off(dev_priv); + dev_priv->fence_irq_on = 0; + } else if (!dev_priv->fence_irq_on && fc->pending_exe_flush) { + i915_user_irq_on(dev_priv); + dev_priv->fence_irq_on = 1; + } + } + + if (dev_priv->flush_pending) { + i_status = READ_HWSP(dev_priv, 0); + if ((i_status & (1 << 12)) != + (dev_priv->saved_flush_status & (1 << 12))) { + flush_flags = dev_priv->flush_flags; + flush_sequence = dev_priv->flush_sequence; + dev_priv->flush_pending = 0; + drm_fence_handler(dev, 0, flush_sequence, flush_flags, 0); + } + } + + rwflush = fc->pending_flush & DRM_I915_FENCE_TYPE_RW; + if (rwflush && !dev_priv->flush_pending) { + dev_priv->flush_sequence = (uint32_t) READ_BREADCRUMB(dev_priv); + dev_priv->flush_flags = fc->pending_flush; + dev_priv->saved_flush_status = READ_HWSP(dev_priv, 0); + I915_WRITE(I915REG_INSTPM, (1 << 5) | (1 << 21)); + dev_priv->flush_pending = 1; + fc->pending_flush &= ~DRM_I915_FENCE_TYPE_RW; + } + + if (dev_priv->flush_pending) { + i_status = READ_HWSP(dev_priv, 0); + if ((i_status & (1 << 12)) != + (dev_priv->saved_flush_status & (1 << 12))) { + flush_flags = dev_priv->flush_flags; + flush_sequence = dev_priv->flush_sequence; + dev_priv->flush_pending = 0; + drm_fence_handler(dev, 0, flush_sequence, flush_flags, 0); + } + } + +} + +void i915_poke_flush(struct drm_device * dev, uint32_t class) +{ + struct drm_fence_manager *fm = &dev->fm; + unsigned long flags; + + write_lock_irqsave(&fm->lock, flags); + i915_perform_flush(dev); + write_unlock_irqrestore(&fm->lock, flags); +} + +int i915_fence_emit_sequence(struct drm_device * dev, uint32_t class, uint32_t flags, + uint32_t * sequence, uint32_t * native_type) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + if (!dev_priv) + return -EINVAL; + + i915_emit_irq(dev); + *sequence = (uint32_t) dev_priv->counter; + *native_type = DRM_FENCE_TYPE_EXE; + if (flags & DRM_I915_FENCE_FLAG_FLUSHED) + *native_type |= DRM_I915_FENCE_TYPE_RW; + + return 0; +} + +void i915_fence_handler(struct drm_device * dev) +{ + struct drm_fence_manager *fm = &dev->fm; + + write_lock(&fm->lock); + i915_perform_flush(dev); + write_unlock(&fm->lock); +} + +int i915_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t flags) +{ + /* + * We have an irq that tells us when we have a new breadcrumb. + */ + + if (class == 0 && flags == DRM_FENCE_TYPE_EXE) + return 1; + + return 0; +} diff --git a/drivers/char/drm/i915_irq.c b/drivers/char/drm/i915_irq.c index a443f4a..1200dd9 100644 --- a/drivers/char/drm/i915_irq.c +++ b/drivers/char/drm/i915_irq.c @@ -234,8 +234,10 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); - if (temp & USER_INT_FLAG) + if (temp & USER_INT_FLAG) { DRM_WAKEUP(&dev_priv->irq_queue); + i915_fence_handler(dev); + } if (temp & (VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG)) { int vblank_pipe = dev_priv->vblank_pipe; @@ -269,7 +271,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) return IRQ_HANDLED; } -static int i915_emit_irq(struct drm_device * dev) +int i915_emit_irq(struct drm_device * dev) { drm_i915_private_t *dev_priv = dev->dev_private; RING_LOCALS; @@ -278,23 +280,37 @@ static int i915_emit_irq(struct drm_device * dev) DRM_DEBUG("%s\n", __FUNCTION__); - dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter; - - if (dev_priv->counter > 0x7FFFFFFFUL) - dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1; + i915_emit_breadcrumb(dev); - BEGIN_LP_RING(6); - OUT_RING(CMD_STORE_DWORD_IDX); - OUT_RING(20); - OUT_RING(dev_priv->counter); - OUT_RING(0); + BEGIN_LP_RING(2); OUT_RING(0); OUT_RING(GFX_OP_USER_INTERRUPT); ADVANCE_LP_RING(); - + return dev_priv->counter; } +void i915_user_irq_on(drm_i915_private_t *dev_priv) +{ + spin_lock(&dev_priv->user_irq_lock); + if (dev_priv->irq_enabled && (++dev_priv->user_irq_refcount == 1)){ + dev_priv->irq_enable_reg |= USER_INT_FLAG; + I915_WRITE16(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg); + } + spin_unlock(&dev_priv->user_irq_lock); +} + +void i915_user_irq_off(drm_i915_private_t *dev_priv) +{ + spin_lock(&dev_priv->user_irq_lock); + if (dev_priv->irq_enabled && (--dev_priv->user_irq_refcount == 0)) { + // dev_priv->irq_enable_reg &= ~USER_INT_FLAG; + // I915_WRITE16(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg); + } + spin_unlock(&dev_priv->user_irq_lock); +} + + static int i915_wait_irq(struct drm_device * dev, int irq_nr) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; @@ -308,8 +324,10 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr) dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; + i915_user_irq_on(dev_priv); DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ, READ_BREADCRUMB(dev_priv) >= irq_nr); + i915_user_irq_off(dev_priv); if (ret == -EBUSY) { DRM_ERROR("%s: EBUSY -- rec: %d emitted: %d\n", @@ -321,7 +339,8 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr) return ret; } -static int i915_driver_vblank_do_wait(struct drm_device *dev, unsigned int *sequence, +static int i915_driver_vblank_do_wait(struct drm_device *dev, + unsigned int *sequence, atomic_t *counter) { drm_i915_private_t *dev_priv = dev->dev_private; @@ -336,7 +355,7 @@ static int i915_driver_vblank_do_wait(struct drm_device *dev, unsigned int *sequ DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ, (((cur_vblank = atomic_read(counter)) - *sequence) <= (1<<23))); - + *sequence = cur_vblank; return ret; @@ -398,15 +417,15 @@ int i915_irq_wait(struct drm_device *dev, void *data, static void i915_enable_interrupt (struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - u16 flag; - flag = 0; + dev_priv->irq_enable_reg = USER_INT_FLAG; if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A) - flag |= VSYNC_PIPEA_FLAG; + dev_priv->irq_enable_reg |= VSYNC_PIPEA_FLAG; if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B) - flag |= VSYNC_PIPEB_FLAG; + dev_priv->irq_enable_reg |= VSYNC_PIPEB_FLAG; - I915_WRITE16(I915REG_INT_ENABLE_R, USER_INT_FLAG | flag); + I915_WRITE16(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg); + dev_priv->irq_enabled = 1; } /* Set the vblank monitor pipe @@ -423,7 +442,7 @@ int i915_vblank_pipe_set(struct drm_device *dev, void *data, } if (pipe->pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) { - DRM_ERROR("%s called with invalid pipe 0x%x\n", + DRM_ERROR("%s called with invalid pipe 0x%x\n", __FUNCTION__, pipe->pipe); return -EINVAL; } @@ -569,7 +588,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - I915_WRITE16(I915REG_HWSTAM, 0xfffe); + I915_WRITE16(I915REG_HWSTAM, 0xeffe); I915_WRITE16(I915REG_INT_MASK_R, 0x0); I915_WRITE16(I915REG_INT_ENABLE_R, 0x0); } @@ -584,8 +603,18 @@ void i915_driver_irq_postinstall(struct drm_device * dev) if (!dev_priv->vblank_pipe) dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A; + + spin_lock_init(&dev_priv->user_irq_lock); + dev_priv->user_irq_refcount = 0; + i915_enable_interrupt(dev); DRM_INIT_WAITQUEUE(&dev_priv->irq_queue); + + /* + * Initialize the hardware status page IRQ location. + */ + + I915_WRITE(I915REG_INSTPM, ( 1 << 5) | ( 1 << 21)); } void i915_driver_irq_uninstall(struct drm_device * dev) @@ -596,6 +625,7 @@ void i915_driver_irq_uninstall(struct drm_device * dev) if (!dev_priv) return; + dev_priv->irq_enabled = 0; I915_WRITE16(I915REG_HWSTAM, 0xffff); I915_WRITE16(I915REG_INT_MASK_R, 0xffff); I915_WRITE16(I915REG_INT_ENABLE_R, 0x0); diff --git a/drivers/char/drm/i915_mem.c b/drivers/char/drm/i915_mem.c index 56fb9b3..d360896 100644 --- a/drivers/char/drm/i915_mem.c +++ b/drivers/char/drm/i915_mem.c @@ -375,7 +375,7 @@ int i915_mem_destroy_heap( struct drm_device *dev, void *data, DRM_ERROR("get_heap failed"); return -EFAULT; } - + if (!*heap) { DRM_ERROR("heap not initialized?"); return -EFAULT; @@ -384,4 +384,3 @@ int i915_mem_destroy_heap( struct drm_device *dev, void *data, i915_mem_takedown( heap ); return 0; } - diff --git a/drivers/char/drm/mga_dma.c b/drivers/char/drm/mga_dma.c index c567c34..04998b7 100644 --- a/drivers/char/drm/mga_dma.c +++ b/drivers/char/drm/mga_dma.c @@ -493,7 +493,7 @@ static int mga_do_agp_dma_bootstrap(struct drm_device * dev, dma_bs->agp_size); return err; } - + dev_priv->agp_size = agp_size; dev_priv->agp_handle = agp_req.handle; @@ -550,7 +550,7 @@ static int mga_do_agp_dma_bootstrap(struct drm_device * dev, { struct drm_map_list *_entry; unsigned long agp_token = 0; - + list_for_each_entry(_entry, &dev->maplist, head) { if (_entry->map == dev->agp_buffer_map) agp_token = _entry->user_token; @@ -964,7 +964,7 @@ static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup) free_req.handle = dev_priv->agp_handle; drm_agp_free(dev, &free_req); - + dev_priv->agp_textures = NULL; dev_priv->agp_size = 0; dev_priv->agp_handle = 0; diff --git a/drivers/char/drm/mga_drv.h b/drivers/char/drm/mga_drv.h index cd94c04..44a9e66 100644 --- a/drivers/char/drm/mga_drv.h +++ b/drivers/char/drm/mga_drv.h @@ -216,8 +216,8 @@ static inline u32 _MGA_READ(u32 * addr) #define MGA_WRITE( reg, val ) DRM_WRITE32(dev_priv->mmio, (reg), (val)) #endif -#define DWGREG0 0x1c00 -#define DWGREG0_END 0x1dff +#define DWGREG0 0x1c00 +#define DWGREG0_END 0x1dff #define DWGREG1 0x2c00 #define DWGREG1_END 0x2dff @@ -394,22 +394,22 @@ do { \ #define MGA_VINTCLR (1 << 4) #define MGA_VINTEN (1 << 5) -#define MGA_ALPHACTRL 0x2c7c -#define MGA_AR0 0x1c60 -#define MGA_AR1 0x1c64 -#define MGA_AR2 0x1c68 -#define MGA_AR3 0x1c6c -#define MGA_AR4 0x1c70 -#define MGA_AR5 0x1c74 -#define MGA_AR6 0x1c78 +#define MGA_ALPHACTRL 0x2c7c +#define MGA_AR0 0x1c60 +#define MGA_AR1 0x1c64 +#define MGA_AR2 0x1c68 +#define MGA_AR3 0x1c6c +#define MGA_AR4 0x1c70 +#define MGA_AR5 0x1c74 +#define MGA_AR6 0x1c78 #define MGA_CXBNDRY 0x1c80 -#define MGA_CXLEFT 0x1ca0 +#define MGA_CXLEFT 0x1ca0 #define MGA_CXRIGHT 0x1ca4 -#define MGA_DMAPAD 0x1c54 -#define MGA_DSTORG 0x2cb8 -#define MGA_DWGCTL 0x1c00 +#define MGA_DMAPAD 0x1c54 +#define MGA_DSTORG 0x2cb8 +#define MGA_DWGCTL 0x1c00 # define MGA_OPCOD_MASK (15 << 0) # define MGA_OPCOD_TRAP (4 << 0) # define MGA_OPCOD_TEXTURE_TRAP (6 << 0) @@ -455,27 +455,27 @@ do { \ # define MGA_CLIPDIS (1 << 31) #define MGA_DWGSYNC 0x2c4c -#define MGA_FCOL 0x1c24 -#define MGA_FIFOSTATUS 0x1e10 -#define MGA_FOGCOL 0x1cf4 +#define MGA_FCOL 0x1c24 +#define MGA_FIFOSTATUS 0x1e10 +#define MGA_FOGCOL 0x1cf4 #define MGA_FXBNDRY 0x1c84 -#define MGA_FXLEFT 0x1ca8 +#define MGA_FXLEFT 0x1ca8 #define MGA_FXRIGHT 0x1cac -#define MGA_ICLEAR 0x1e18 +#define MGA_ICLEAR 0x1e18 # define MGA_SOFTRAPICLR (1 << 0) # define MGA_VLINEICLR (1 << 5) -#define MGA_IEN 0x1e1c +#define MGA_IEN 0x1e1c # define MGA_SOFTRAPIEN (1 << 0) # define MGA_VLINEIEN (1 << 5) -#define MGA_LEN 0x1c5c +#define MGA_LEN 0x1c5c #define MGA_MACCESS 0x1c04 -#define MGA_PITCH 0x1c8c -#define MGA_PLNWT 0x1c1c -#define MGA_PRIMADDRESS 0x1e58 +#define MGA_PITCH 0x1c8c +#define MGA_PLNWT 0x1c1c +#define MGA_PRIMADDRESS 0x1e58 # define MGA_DMA_GENERAL (0 << 0) # define MGA_DMA_BLIT (1 << 0) # define MGA_DMA_VECTOR (2 << 0) @@ -487,43 +487,43 @@ do { \ # define MGA_PRIMPTREN0 (1 << 0) # define MGA_PRIMPTREN1 (1 << 1) -#define MGA_RST 0x1e40 +#define MGA_RST 0x1e40 # define MGA_SOFTRESET (1 << 0) # define MGA_SOFTEXTRST (1 << 1) -#define MGA_SECADDRESS 0x2c40 -#define MGA_SECEND 0x2c44 -#define MGA_SETUPADDRESS 0x2cd0 -#define MGA_SETUPEND 0x2cd4 +#define MGA_SECADDRESS 0x2c40 +#define MGA_SECEND 0x2c44 +#define MGA_SETUPADDRESS 0x2cd0 +#define MGA_SETUPEND 0x2cd4 #define MGA_SGN 0x1c58 #define MGA_SOFTRAP 0x2c48 -#define MGA_SRCORG 0x2cb4 +#define MGA_SRCORG 0x2cb4 # define MGA_SRMMAP_MASK (1 << 0) # define MGA_SRCMAP_FB (0 << 0) # define MGA_SRCMAP_SYSMEM (1 << 0) # define MGA_SRCACC_MASK (1 << 1) # define MGA_SRCACC_PCI (0 << 1) # define MGA_SRCACC_AGP (1 << 1) -#define MGA_STATUS 0x1e14 +#define MGA_STATUS 0x1e14 # define MGA_SOFTRAPEN (1 << 0) # define MGA_VSYNCPEN (1 << 4) # define MGA_VLINEPEN (1 << 5) # define MGA_DWGENGSTS (1 << 16) # define MGA_ENDPRDMASTS (1 << 17) #define MGA_STENCIL 0x2cc8 -#define MGA_STENCILCTL 0x2ccc +#define MGA_STENCILCTL 0x2ccc -#define MGA_TDUALSTAGE0 0x2cf8 -#define MGA_TDUALSTAGE1 0x2cfc -#define MGA_TEXBORDERCOL 0x2c5c -#define MGA_TEXCTL 0x2c30 +#define MGA_TDUALSTAGE0 0x2cf8 +#define MGA_TDUALSTAGE1 0x2cfc +#define MGA_TEXBORDERCOL 0x2c5c +#define MGA_TEXCTL 0x2c30 #define MGA_TEXCTL2 0x2c3c # define MGA_DUALTEX (1 << 7) # define MGA_G400_TC2_MAGIC (1 << 15) # define MGA_MAP1_ENABLE (1 << 31) -#define MGA_TEXFILTER 0x2c58 -#define MGA_TEXHEIGHT 0x2c2c -#define MGA_TEXORG 0x2c24 +#define MGA_TEXFILTER 0x2c58 +#define MGA_TEXHEIGHT 0x2c2c +#define MGA_TEXORG 0x2c24 # define MGA_TEXORGMAP_MASK (1 << 0) # define MGA_TEXORGMAP_FB (0 << 0) # define MGA_TEXORGMAP_SYSMEM (1 << 0) @@ -534,45 +534,45 @@ do { \ #define MGA_TEXORG2 0x2ca8 #define MGA_TEXORG3 0x2cac #define MGA_TEXORG4 0x2cb0 -#define MGA_TEXTRANS 0x2c34 -#define MGA_TEXTRANSHIGH 0x2c38 -#define MGA_TEXWIDTH 0x2c28 - -#define MGA_WACCEPTSEQ 0x1dd4 -#define MGA_WCODEADDR 0x1e6c -#define MGA_WFLAG 0x1dc4 -#define MGA_WFLAG1 0x1de0 +#define MGA_TEXTRANS 0x2c34 +#define MGA_TEXTRANSHIGH 0x2c38 +#define MGA_TEXWIDTH 0x2c28 + +#define MGA_WACCEPTSEQ 0x1dd4 +#define MGA_WCODEADDR 0x1e6c +#define MGA_WFLAG 0x1dc4 +#define MGA_WFLAG1 0x1de0 #define MGA_WFLAGNB 0x1e64 -#define MGA_WFLAGNB1 0x1e08 +#define MGA_WFLAGNB1 0x1e08 #define MGA_WGETMSB 0x1dc8 -#define MGA_WIADDR 0x1dc0 +#define MGA_WIADDR 0x1dc0 #define MGA_WIADDR2 0x1dd8 # define MGA_WMODE_SUSPEND (0 << 0) # define MGA_WMODE_RESUME (1 << 0) # define MGA_WMODE_JUMP (2 << 0) # define MGA_WMODE_START (3 << 0) # define MGA_WAGP_ENABLE (1 << 2) -#define MGA_WMISC 0x1e70 +#define MGA_WMISC 0x1e70 # define MGA_WUCODECACHE_ENABLE (1 << 0) # define MGA_WMASTER_ENABLE (1 << 1) # define MGA_WCACHEFLUSH_ENABLE (1 << 3) #define MGA_WVRTXSZ 0x1dcc -#define MGA_YBOT 0x1c9c -#define MGA_YDST 0x1c90 +#define MGA_YBOT 0x1c9c +#define MGA_YDST 0x1c90 #define MGA_YDSTLEN 0x1c88 #define MGA_YDSTORG 0x1c94 -#define MGA_YTOP 0x1c98 +#define MGA_YTOP 0x1c98 -#define MGA_ZORG 0x1c0c +#define MGA_ZORG 0x1c0c /* This finishes the current batch of commands */ -#define MGA_EXEC 0x0100 +#define MGA_EXEC 0x0100 /* AGP PLL encoding (for G200 only). */ -#define MGA_AGP_PLL 0x1e4c +#define MGA_AGP_PLL 0x1e4c # define MGA_AGP2XPLL_DISABLE (0 << 0) # define MGA_AGP2XPLL_ENABLE (1 << 0) diff --git a/drivers/char/drm/mga_state.c b/drivers/char/drm/mga_state.c index 5ec8b61..4cb95ec 100644 --- a/drivers/char/drm/mga_state.c +++ b/drivers/char/drm/mga_state.c @@ -150,8 +150,8 @@ static __inline__ void mga_g400_emit_tex0(drm_mga_private_t * dev_priv) drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[0]; DMA_LOCALS; -/* printk("mga_g400_emit_tex0 %x %x %x\n", tex->texorg, */ -/* tex->texctl, tex->texctl2); */ +/* printk("mga_g400_emit_tex0 %x %x %x\n", tex->texorg, */ +/* tex->texctl, tex->texctl2); */ BEGIN_DMA(6); @@ -190,8 +190,8 @@ static __inline__ void mga_g400_emit_tex1(drm_mga_private_t * dev_priv) drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[1]; DMA_LOCALS; -/* printk("mga_g400_emit_tex1 %x %x %x\n", tex->texorg, */ -/* tex->texctl, tex->texctl2); */ +/* printk("mga_g400_emit_tex1 %x %x %x\n", tex->texorg, */ +/* tex->texctl, tex->texctl2); */ BEGIN_DMA(5); @@ -256,7 +256,7 @@ static __inline__ void mga_g400_emit_pipe(drm_mga_private_t * dev_priv) unsigned int pipe = sarea_priv->warp_pipe; DMA_LOCALS; -/* printk("mga_g400_emit_pipe %x\n", pipe); */ +/* printk("mga_g400_emit_pipe %x\n", pipe); */ BEGIN_DMA(10); diff --git a/drivers/char/drm/r128_cce.c b/drivers/char/drm/r128_cce.c index 7d550ab..90972cc 100644 --- a/drivers/char/drm/r128_cce.c +++ b/drivers/char/drm/r128_cce.c @@ -1,4 +1,4 @@ -/* r128_cce.c -- ATI Rage 128 driver -*- linux-c -*- +/* r128_cce.c -- ATI Rage 128 driver -*- linux-c -*- * Created: Wed Apr 5 19:24:19 2000 by kevin@precisioninsight.com */ /* diff --git a/drivers/char/drm/r128_drv.h b/drivers/char/drm/r128_drv.h index 5041bd8..a53082e 100644 --- a/drivers/char/drm/r128_drv.h +++ b/drivers/char/drm/r128_drv.h @@ -493,7 +493,7 @@ do { \ write * sizeof(u32) ); \ } \ if (((dev_priv->ring.tail + _nr) & tail_mask) != write) { \ - DRM_ERROR( \ + DRM_ERROR( \ "ADVANCE_RING(): mismatch: nr: %x write: %x line: %d\n", \ ((dev_priv->ring.tail + _nr) & tail_mask), \ write, __LINE__); \ diff --git a/drivers/char/drm/r300_cmdbuf.c b/drivers/char/drm/r300_cmdbuf.c index 59b2944..8cd8271 100644 --- a/drivers/char/drm/r300_cmdbuf.c +++ b/drivers/char/drm/r300_cmdbuf.c @@ -486,7 +486,7 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv, if (cmd[0] & 0x8000) { u32 offset; - if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL + if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { offset = cmd[2] << 10; ret = !radeon_check_offset(dev_priv, offset); @@ -504,7 +504,7 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv, DRM_ERROR("Invalid bitblt second offset is %08X\n", offset); return -EINVAL; } - + } } @@ -723,54 +723,54 @@ static int r300_scratch(drm_radeon_private_t *dev_priv, u32 *ref_age_base; u32 i, buf_idx, h_pending; RING_LOCALS; - - if (cmdbuf->bufsz < + + if (cmdbuf->bufsz < (sizeof(u64) + header.scratch.n_bufs * sizeof(buf_idx))) { return -EINVAL; } - + if (header.scratch.reg >= 5) { return -EINVAL; } - + dev_priv->scratch_ages[header.scratch.reg]++; - + ref_age_base = (u32 *)(unsigned long)*((uint64_t *)cmdbuf->buf); - + cmdbuf->buf += sizeof(u64); cmdbuf->bufsz -= sizeof(u64); - + for (i=0; i < header.scratch.n_bufs; i++) { buf_idx = *(u32 *)cmdbuf->buf; buf_idx *= 2; /* 8 bytes per buf */ - + if (DRM_COPY_TO_USER(ref_age_base + buf_idx, &dev_priv->scratch_ages[header.scratch.reg], sizeof(u32))) { return -EINVAL; } - + if (DRM_COPY_FROM_USER(&h_pending, ref_age_base + buf_idx + 1, sizeof(u32))) { return -EINVAL; } - + if (h_pending == 0) { return -EINVAL; } - + h_pending--; - + if (DRM_COPY_TO_USER(ref_age_base + buf_idx + 1, &h_pending, sizeof(u32))) { return -EINVAL; } - + cmdbuf->buf += sizeof(buf_idx); cmdbuf->bufsz -= sizeof(buf_idx); } - + BEGIN_RING(2); OUT_RING( CP_PACKET0( RADEON_SCRATCH_REG0 + header.scratch.reg * 4, 0 ) ); OUT_RING( dev_priv->scratch_ages[header.scratch.reg] ); ADVANCE_RING(); - + return 0; } @@ -919,7 +919,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev, goto cleanup; } break; - + default: DRM_ERROR("bad cmd_type %i at %p\n", header.header.cmd_type, diff --git a/drivers/char/drm/r300_reg.h b/drivers/char/drm/r300_reg.h index 3ae57ec..9a71457 100644 --- a/drivers/char/drm/r300_reg.h +++ b/drivers/char/drm/r300_reg.h @@ -853,13 +853,13 @@ USE OR OTHER DEALINGS IN THE SOFTWARE. # define R300_TX_FORMAT_W8Z8Y8X8 0xC # define R300_TX_FORMAT_W2Z10Y10X10 0xD # define R300_TX_FORMAT_W16Z16Y16X16 0xE -# define R300_TX_FORMAT_DXT1 0xF -# define R300_TX_FORMAT_DXT3 0x10 -# define R300_TX_FORMAT_DXT5 0x11 +# define R300_TX_FORMAT_DXT1 0xF +# define R300_TX_FORMAT_DXT3 0x10 +# define R300_TX_FORMAT_DXT5 0x11 # define R300_TX_FORMAT_D3DMFT_CxV8U8 0x12 /* no swizzle */ -# define R300_TX_FORMAT_A8R8G8B8 0x13 /* no swizzle */ -# define R300_TX_FORMAT_B8G8_B8G8 0x14 /* no swizzle */ -# define R300_TX_FORMAT_G8R8_G8B8 0x15 /* no swizzle */ +# define R300_TX_FORMAT_A8R8G8B8 0x13 /* no swizzle */ +# define R300_TX_FORMAT_B8G8_B8G8 0x14 /* no swizzle */ +# define R300_TX_FORMAT_G8R8_G8B8 0x15 /* no swizzle */ /* 0x16 - some 16 bit green format.. ?? */ # define R300_TX_FORMAT_UNK25 (1 << 25) /* no swizzle */ # define R300_TX_FORMAT_CUBIC_MAP (1 << 26) @@ -867,19 +867,19 @@ USE OR OTHER DEALINGS IN THE SOFTWARE. /* gap */ /* Floating point formats */ /* Note - hardware supports both 16 and 32 bit floating point */ -# define R300_TX_FORMAT_FL_I16 0x18 -# define R300_TX_FORMAT_FL_I16A16 0x19 +# define R300_TX_FORMAT_FL_I16 0x18 +# define R300_TX_FORMAT_FL_I16A16 0x19 # define R300_TX_FORMAT_FL_R16G16B16A16 0x1A -# define R300_TX_FORMAT_FL_I32 0x1B -# define R300_TX_FORMAT_FL_I32A32 0x1C +# define R300_TX_FORMAT_FL_I32 0x1B +# define R300_TX_FORMAT_FL_I32A32 0x1C # define R300_TX_FORMAT_FL_R32G32B32A32 0x1D /* alpha modes, convenience mostly */ /* if you have alpha, pick constant appropriate to the number of channels (1 for I8, 2 for I8A8, 4 for R8G8B8A8, etc */ -# define R300_TX_FORMAT_ALPHA_1CH 0x000 -# define R300_TX_FORMAT_ALPHA_2CH 0x200 -# define R300_TX_FORMAT_ALPHA_4CH 0x600 -# define R300_TX_FORMAT_ALPHA_NONE 0xA00 +# define R300_TX_FORMAT_ALPHA_1CH 0x000 +# define R300_TX_FORMAT_ALPHA_2CH 0x200 +# define R300_TX_FORMAT_ALPHA_4CH 0x600 +# define R300_TX_FORMAT_ALPHA_NONE 0xA00 /* Swizzling */ /* constants */ # define R300_TX_FORMAT_X 0 @@ -1360,11 +1360,11 @@ USE OR OTHER DEALINGS IN THE SOFTWARE. # define R300_RB3D_Z_DISABLED_2 0x00000014 # define R300_RB3D_Z_TEST 0x00000012 # define R300_RB3D_Z_TEST_AND_WRITE 0x00000016 -# define R300_RB3D_Z_WRITE_ONLY 0x00000006 +# define R300_RB3D_Z_WRITE_ONLY 0x00000006 # define R300_RB3D_Z_TEST 0x00000012 # define R300_RB3D_Z_TEST_AND_WRITE 0x00000016 -# define R300_RB3D_Z_WRITE_ONLY 0x00000006 +# define R300_RB3D_Z_WRITE_ONLY 0x00000006 # define R300_RB3D_STENCIL_ENABLE 0x00000001 #define R300_RB3D_ZSTENCIL_CNTL_1 0x4F04 diff --git a/drivers/char/drm/radeon_cp.c b/drivers/char/drm/radeon_cp.c index 24fca8e..df22302 100644 --- a/drivers/char/drm/radeon_cp.c +++ b/drivers/char/drm/radeon_cp.c @@ -1127,7 +1127,7 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev, { u32 ring_start, cur_read_ptr; u32 tmp; - + /* Initialize the memory controller. With new memory map, the fb location * is not changed, it should have been properly initialized already. Part * of the problem is that the code below is bogus, assuming the GART is @@ -1358,7 +1358,7 @@ static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on) return; } - tmp = RADEON_READ(RADEON_AIC_CNTL); + tmp = RADEON_READ(RADEON_AIC_CNTL); if (on) { RADEON_WRITE(RADEON_AIC_CNTL, @@ -1583,7 +1583,7 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init) dev_priv->fb_location = (RADEON_READ(RADEON_MC_FB_LOCATION) & 0xffff) << 16; - dev_priv->fb_size = + dev_priv->fb_size = ((RADEON_READ(RADEON_MC_FB_LOCATION) & 0xffff0000u) + 0x10000) - dev_priv->fb_location; @@ -1630,7 +1630,7 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init) ((base + dev_priv->gart_size) & 0xfffffffful) < base) base = dev_priv->fb_location - dev_priv->gart_size; - } + } dev_priv->gart_vm_start = base & 0xffc00000u; if (dev_priv->gart_vm_start != base) DRM_INFO("GART aligned down from 0x%08x to 0x%08x\n", diff --git a/drivers/char/drm/radeon_drm.h b/drivers/char/drm/radeon_drm.h index 5a8e23f..5f8d042 100644 --- a/drivers/char/drm/radeon_drm.h +++ b/drivers/char/drm/radeon_drm.h @@ -223,10 +223,10 @@ typedef union { #define R300_CMD_CP_DELAY 5 #define R300_CMD_DMA_DISCARD 6 #define R300_CMD_WAIT 7 -# define R300_WAIT_2D 0x1 -# define R300_WAIT_3D 0x2 -# define R300_WAIT_2D_CLEAN 0x3 -# define R300_WAIT_3D_CLEAN 0x4 +# define R300_WAIT_2D 0x1 +# define R300_WAIT_3D 0x2 +# define R300_WAIT_2D_CLEAN 0x3 +# define R300_WAIT_3D_CLEAN 0x4 #define R300_CMD_SCRATCH 8 typedef union { @@ -722,7 +722,7 @@ typedef struct drm_radeon_surface_free { unsigned int address; } drm_radeon_surface_free_t; -#define DRM_RADEON_VBLANK_CRTC1 1 -#define DRM_RADEON_VBLANK_CRTC2 2 +#define DRM_RADEON_VBLANK_CRTC1 1 +#define DRM_RADEON_VBLANK_CRTC2 2 #endif diff --git a/drivers/char/drm/radeon_drv.h b/drivers/char/drm/radeon_drv.h index bfbb60a..aae0308 100644 --- a/drivers/char/drm/radeon_drv.h +++ b/drivers/char/drm/radeon_drv.h @@ -429,7 +429,7 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev, #define RADEON_PCIE_INDEX 0x0030 #define RADEON_PCIE_DATA 0x0034 #define RADEON_PCIE_TX_GART_CNTL 0x10 -# define RADEON_PCIE_TX_GART_EN (1 << 0) +# define RADEON_PCIE_TX_GART_EN (1 << 0) # define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_PASS_THRU (0<<1) # define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_CLAMP_LO (1<<1) # define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD (3<<1) @@ -439,7 +439,7 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev, # define RADEON_PCIE_TX_GART_INVALIDATE_TLB (1<<8) #define RADEON_PCIE_TX_DISCARD_RD_ADDR_LO 0x11 #define RADEON_PCIE_TX_DISCARD_RD_ADDR_HI 0x12 -#define RADEON_PCIE_TX_GART_BASE 0x13 +#define RADEON_PCIE_TX_GART_BASE 0x13 #define RADEON_PCIE_TX_GART_START_LO 0x14 #define RADEON_PCIE_TX_GART_START_HI 0x15 #define RADEON_PCIE_TX_GART_END_LO 0x16 @@ -512,12 +512,12 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev, #define RADEON_GEN_INT_STATUS 0x0044 # define RADEON_CRTC_VBLANK_STAT (1 << 0) -# define RADEON_CRTC_VBLANK_STAT_ACK (1 << 0) +# define RADEON_CRTC_VBLANK_STAT_ACK (1 << 0) # define RADEON_CRTC2_VBLANK_STAT (1 << 9) -# define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9) +# define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9) # define RADEON_GUI_IDLE_INT_TEST_ACK (1 << 19) # define RADEON_SW_INT_TEST (1 << 25) -# define RADEON_SW_INT_TEST_ACK (1 << 25) +# define RADEON_SW_INT_TEST_ACK (1 << 25) # define RADEON_SW_INT_FIRE (1 << 26) #define RADEON_HOST_PATH_CNTL 0x0130 @@ -1133,7 +1133,7 @@ do { \ write, dev_priv->ring.tail ); \ } \ if (((dev_priv->ring.tail + _nr) & mask) != write) { \ - DRM_ERROR( \ + DRM_ERROR( \ "ADVANCE_RING(): mismatch: nr: %x write: %x line: %d\n", \ ((dev_priv->ring.tail + _nr) & mask), \ write, __LINE__); \ diff --git a/drivers/char/drm/savage_state.c b/drivers/char/drm/savage_state.c index bf8e0e1..5f6238f 100644 --- a/drivers/char/drm/savage_state.c +++ b/drivers/char/drm/savage_state.c @@ -512,7 +512,7 @@ static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv, DMA_DRAW_PRIMITIVE(count, prim, skip); if (vb_stride == vtx_size) { - DMA_COPY(&vtxbuf[vb_stride * start], + DMA_COPY(&vtxbuf[vb_stride * start], vtx_size * count); } else { for (i = start; i < start + count; ++i) { @@ -742,7 +742,7 @@ static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv, while (n != 0) { /* Can emit up to 255 vertices (85 triangles) at once. */ unsigned int count = n > 255 ? 255 : n; - + /* Check indices */ for (i = 0; i < count; ++i) { if (idx[i] > vb_size / (vb_stride * 4)) { @@ -933,7 +933,7 @@ static int savage_dispatch_draw(drm_savage_private_t * dev_priv, /* j was check in savage_bci_cmdbuf */ ret = savage_dispatch_vb_idx(dev_priv, &cmd_header, (const uint16_t *)cmdbuf, - (const uint32_t *)vtxbuf, vb_size, + (const uint32_t *)vtxbuf, vb_size, vb_stride); cmdbuf += j; break; diff --git a/drivers/char/drm/sis_mm.c b/drivers/char/drm/sis_mm.c index a6b7ccd..42836f4 100644 --- a/drivers/char/drm/sis_mm.c +++ b/drivers/char/drm/sis_mm.c @@ -249,7 +249,7 @@ int sis_idle(struct drm_device *dev) return 0; } } - + /* * Implement a device switch here if needed */ diff --git a/drivers/char/drm/via_dmablit.c b/drivers/char/drm/via_dmablit.c index c6fd16f..33c5197 100644 --- a/drivers/char/drm/via_dmablit.c +++ b/drivers/char/drm/via_dmablit.c @@ -1,5 +1,5 @@ /* via_dmablit.c -- PCI DMA BitBlt support for the VIA Unichrome/Pro - * + * * Copyright (C) 2005 Thomas Hellstrom, All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -16,22 +16,22 @@ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * - * Authors: + * Authors: * Thomas Hellstrom. * Partially based on code obtained from Digeo Inc. */ /* - * Unmaps the DMA mappings. - * FIXME: Is this a NoOp on x86? Also - * FIXME: What happens if this one is called and a pending blit has previously done - * the same DMA mappings? + * Unmaps the DMA mappings. + * FIXME: Is this a NoOp on x86? Also + * FIXME: What happens if this one is called and a pending blit has previously done + * the same DMA mappings? */ #include "drmP.h" @@ -65,7 +65,7 @@ via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg) int num_desc = vsg->num_desc; unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page; unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page; - drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] + + drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] + descriptor_this_page; dma_addr_t next = vsg->chain_start; @@ -73,7 +73,7 @@ via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg) if (descriptor_this_page-- == 0) { cur_descriptor_page--; descriptor_this_page = vsg->descriptors_per_page - 1; - desc_ptr = vsg->desc_pages[cur_descriptor_page] + + desc_ptr = vsg->desc_pages[cur_descriptor_page] + descriptor_this_page; } dma_unmap_single(&pdev->dev, next, sizeof(*desc_ptr), DMA_TO_DEVICE); @@ -93,7 +93,7 @@ via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg) static void via_map_blit_for_device(struct pci_dev *pdev, const drm_via_dmablit_t *xfer, - drm_via_sg_info_t *vsg, + drm_via_sg_info_t *vsg, int mode) { unsigned cur_descriptor_page = 0; @@ -110,7 +110,7 @@ via_map_blit_for_device(struct pci_dev *pdev, dma_addr_t next = 0 | VIA_DMA_DPR_EC; drm_via_descriptor_t *desc_ptr = NULL; - if (mode == 1) + if (mode == 1) desc_ptr = vsg->desc_pages[cur_descriptor_page]; for (cur_line = 0; cur_line < xfer->num_lines; ++cur_line) { @@ -118,24 +118,24 @@ via_map_blit_for_device(struct pci_dev *pdev, line_len = xfer->line_length; cur_fb = fb_addr; cur_mem = mem_addr; - + while (line_len > 0) { remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len); line_len -= remaining_len; if (mode == 1) { - desc_ptr->mem_addr = - dma_map_page(&pdev->dev, - vsg->pages[VIA_PFN(cur_mem) - + desc_ptr->mem_addr = + dma_map_page(&pdev->dev, + vsg->pages[VIA_PFN(cur_mem) - VIA_PFN(first_addr)], - VIA_PGOFF(cur_mem), remaining_len, + VIA_PGOFF(cur_mem), remaining_len, vsg->direction); desc_ptr->dev_addr = cur_fb; - + desc_ptr->size = remaining_len; desc_ptr->next = (uint32_t) next; - next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr), + next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr), DMA_TO_DEVICE); desc_ptr++; if (++num_descriptors_this_page >= vsg->descriptors_per_page) { @@ -143,12 +143,12 @@ via_map_blit_for_device(struct pci_dev *pdev, desc_ptr = vsg->desc_pages[++cur_descriptor_page]; } } - + num_desc++; cur_mem += remaining_len; cur_fb += remaining_len; } - + mem_addr += xfer->mem_stride; fb_addr += xfer->fb_stride; } @@ -161,14 +161,14 @@ via_map_blit_for_device(struct pci_dev *pdev, } /* - * Function that frees up all resources for a blit. It is usable even if the + * Function that frees up all resources for a blit. It is usable even if the * blit info has only been partially built as long as the status enum is consistent * with the actual status of the used resources. */ static void -via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg) +via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg) { struct page *page; int i; @@ -185,7 +185,7 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg) case dr_via_pages_locked: for (i=0; inum_pages; ++i) { if ( NULL != (page = vsg->pages[i])) { - if (! PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction)) + if (! PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction)) SetPageDirty(page); page_cache_release(page); } @@ -200,7 +200,7 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg) vsg->bounce_buffer = NULL; } vsg->free_on_sequence = 0; -} +} /* * Fire a blit engine. @@ -213,7 +213,7 @@ via_fire_dmablit(struct drm_device *dev, drm_via_sg_info_t *vsg, int engine) VIA_WRITE(VIA_PCI_DMA_MAR0 + engine*0x10, 0); VIA_WRITE(VIA_PCI_DMA_DAR0 + engine*0x10, 0); - VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD | + VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD | VIA_DMA_CSR_DE); VIA_WRITE(VIA_PCI_DMA_MR0 + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE); VIA_WRITE(VIA_PCI_DMA_BCR0 + engine*0x10, 0); @@ -233,9 +233,9 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer) { int ret; unsigned long first_pfn = VIA_PFN(xfer->mem_addr); - vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride -1)) - + vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride -1)) - first_pfn + 1; - + if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages))) return -ENOMEM; memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages); @@ -248,7 +248,7 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer) up_read(¤t->mm->mmap_sem); if (ret != vsg->num_pages) { - if (ret < 0) + if (ret < 0) return ret; vsg->state = dr_via_pages_locked; return -EINVAL; @@ -264,21 +264,21 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer) * quite large for some blits, and pages don't need to be contingous. */ -static int +static int via_alloc_desc_pages(drm_via_sg_info_t *vsg) { int i; - + vsg->descriptors_per_page = PAGE_SIZE / sizeof( drm_via_descriptor_t); - vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) / + vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) / vsg->descriptors_per_page; if (NULL == (vsg->desc_pages = kcalloc(vsg->num_desc_pages, sizeof(void *), GFP_KERNEL))) return -ENOMEM; - + vsg->state = dr_via_desc_pages_alloc; for (i=0; inum_desc_pages; ++i) { - if (NULL == (vsg->desc_pages[i] = + if (NULL == (vsg->desc_pages[i] = (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL))) return -ENOMEM; } @@ -286,7 +286,7 @@ via_alloc_desc_pages(drm_via_sg_info_t *vsg) vsg->num_desc); return 0; } - + static void via_abort_dmablit(struct drm_device *dev, int engine) { @@ -300,7 +300,7 @@ via_dmablit_engine_off(struct drm_device *dev, int engine) { drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; - VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD); + VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD); } @@ -311,7 +311,7 @@ via_dmablit_engine_off(struct drm_device *dev, int engine) * task. Basically the task of the interrupt handler is to submit a new blit to the engine, while * the workqueue task takes care of processing associated with the old blit. */ - + void via_dmablit_handler(struct drm_device *dev, int engine, int from_irq) { @@ -331,19 +331,19 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq) spin_lock_irqsave(&blitq->blit_lock, irqsave); } - done_transfer = blitq->is_active && + done_transfer = blitq->is_active && (( status = VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD); - done_transfer = done_transfer || ( blitq->aborting && !(status & VIA_DMA_CSR_DE)); + done_transfer = done_transfer || ( blitq->aborting && !(status & VIA_DMA_CSR_DE)); cur = blitq->cur; if (done_transfer) { blitq->blits[cur]->aborted = blitq->aborting; blitq->done_blit_handle++; - DRM_WAKEUP(blitq->blit_queue + cur); + DRM_WAKEUP(blitq->blit_queue + cur); cur++; - if (cur >= VIA_NUM_BLIT_SLOTS) + if (cur >= VIA_NUM_BLIT_SLOTS) cur = 0; blitq->cur = cur; @@ -355,7 +355,7 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq) blitq->is_active = 0; blitq->aborting = 0; - schedule_work(&blitq->wq); + schedule_work(&blitq->wq); } else if (blitq->is_active && time_after_eq(jiffies, blitq->end)) { @@ -367,7 +367,7 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq) blitq->aborting = 1; blitq->end = jiffies + DRM_HZ; } - + if (!blitq->is_active) { if (blitq->num_outstanding) { via_fire_dmablit(dev, blitq->blits[cur], engine); @@ -383,14 +383,14 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq) } via_dmablit_engine_off(dev, engine); } - } + } if (from_irq) { spin_unlock(&blitq->blit_lock); } else { spin_unlock_irqrestore(&blitq->blit_lock, irqsave); } -} +} @@ -426,13 +426,13 @@ via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_que return active; } - + /* * Sync. Wait for at least three seconds for the blit to be performed. */ static int -via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine) +via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine) { drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; @@ -441,12 +441,12 @@ via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine) int ret = 0; if (via_dmablit_active(blitq, engine, handle, &queue)) { - DRM_WAIT_ON(ret, *queue, 3 * DRM_HZ, + DRM_WAIT_ON(ret, *queue, 3 * DRM_HZ, !via_dmablit_active(blitq, engine, handle, NULL)); } DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n", handle, engine, ret); - + return ret; } @@ -468,12 +468,12 @@ via_dmablit_timer(unsigned long data) struct drm_device *dev = blitq->dev; int engine = (int) (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues); - - DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine, + + DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine, (unsigned long) jiffies); via_dmablit_handler(dev, engine, 0); - + if (!timer_pending(&blitq->poll_timer)) { mod_timer(&blitq->poll_timer, jiffies + 1); @@ -497,7 +497,7 @@ via_dmablit_timer(unsigned long data) */ -static void +static void via_dmablit_workqueue(struct work_struct *work) { drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq); @@ -505,38 +505,38 @@ via_dmablit_workqueue(struct work_struct *work) unsigned long irqsave; drm_via_sg_info_t *cur_sg; int cur_released; - - - DRM_DEBUG("Workqueue task called for blit engine %ld\n",(unsigned long) + + + DRM_DEBUG("Workqueue task called for blit engine %ld\n",(unsigned long) (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues)); spin_lock_irqsave(&blitq->blit_lock, irqsave); - + while(blitq->serviced != blitq->cur) { cur_released = blitq->serviced++; DRM_DEBUG("Releasing blit slot %d\n", cur_released); - if (blitq->serviced >= VIA_NUM_BLIT_SLOTS) + if (blitq->serviced >= VIA_NUM_BLIT_SLOTS) blitq->serviced = 0; - + cur_sg = blitq->blits[cur_released]; blitq->num_free++; - + spin_unlock_irqrestore(&blitq->blit_lock, irqsave); - + DRM_WAKEUP(&blitq->busy_queue); - + via_free_sg_info(dev->pdev, cur_sg); kfree(cur_sg); - + spin_lock_irqsave(&blitq->blit_lock, irqsave); } spin_unlock_irqrestore(&blitq->blit_lock, irqsave); } - + /* * Init all blit engines. Currently we use two, but some hardware have 4. @@ -550,8 +550,8 @@ via_init_dmablit(struct drm_device *dev) drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; drm_via_blitq_t *blitq; - pci_set_master(dev->pdev); - + pci_set_master(dev->pdev); + for (i=0; i< VIA_NUM_BLIT_ENGINES; ++i) { blitq = dev_priv->blit_queues + i; blitq->dev = dev; @@ -572,20 +572,20 @@ via_init_dmablit(struct drm_device *dev) INIT_WORK(&blitq->wq, via_dmablit_workqueue); setup_timer(&blitq->poll_timer, via_dmablit_timer, (unsigned long)blitq); - } + } } /* * Build all info and do all mappings required for a blit. */ - + static int via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer) { int draw = xfer->to_fb; int ret = 0; - + vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; vsg->bounce_buffer = NULL; @@ -599,7 +599,7 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli /* * Below check is a driver limitation, not a hardware one. We * don't want to lock unused pages, and don't want to incoporate the - * extra logic of avoiding them. Make sure there are no. + * extra logic of avoiding them. Make sure there are no. * (Not a big limitation anyway.) */ @@ -625,11 +625,11 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) { DRM_ERROR("Too large PCI DMA bitblt.\n"); return -EINVAL; - } + } - /* + /* * we allow a negative fb stride to allow flipping of images in - * transfer. + * transfer. */ if (xfer->mem_stride < xfer->line_length || @@ -653,11 +653,11 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli #else if ((((unsigned long)xfer->mem_addr & 15) || ((unsigned long)xfer->fb_addr & 3)) || - ((xfer->num_lines > 1) && + ((xfer->num_lines > 1) && ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) { DRM_ERROR("Invalid DRM bitblt alignment.\n"); return -EINVAL; - } + } #endif if (0 != (ret = via_lock_all_dma_pages(vsg, xfer))) { @@ -673,17 +673,17 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli return ret; } via_map_blit_for_device(dev->pdev, xfer, vsg, 1); - + return 0; } - + /* * Reserve one free slot in the blit queue. Will wait for one second for one * to become available. Otherwise -EBUSY is returned. */ -static int +static int via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine) { int ret=0; @@ -698,10 +698,10 @@ via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine) if (ret) { return (-EINTR == ret) ? -EAGAIN : ret; } - + spin_lock_irqsave(&blitq->blit_lock, irqsave); } - + blitq->num_free--; spin_unlock_irqrestore(&blitq->blit_lock, irqsave); @@ -712,7 +712,7 @@ via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine) * Hand back a free slot if we changed our mind. */ -static void +static void via_dmablit_release_slot(drm_via_blitq_t *blitq) { unsigned long irqsave; @@ -728,8 +728,8 @@ via_dmablit_release_slot(drm_via_blitq_t *blitq) */ -static int -via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer) +static int +via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer) { drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; drm_via_sg_info_t *vsg; @@ -760,15 +760,15 @@ via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer) spin_lock_irqsave(&blitq->blit_lock, irqsave); blitq->blits[blitq->head++] = vsg; - if (blitq->head >= VIA_NUM_BLIT_SLOTS) + if (blitq->head >= VIA_NUM_BLIT_SLOTS) blitq->head = 0; blitq->num_outstanding++; - xfer->sync.sync_handle = ++blitq->cur_blit_handle; + xfer->sync.sync_handle = ++blitq->cur_blit_handle; spin_unlock_irqrestore(&blitq->blit_lock, irqsave); xfer->sync.engine = engine; - via_dmablit_handler(dev, engine, 0); + via_dmablit_handler(dev, engine, 0); return 0; } @@ -776,7 +776,7 @@ via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer) /* * Sync on a previously submitted blit. Note that the X server use signals extensively, and * that there is a very big probability that this IOCTL will be interrupted by a signal. In that - * case it returns with -EAGAIN for the signal to be delivered. + * case it returns with -EAGAIN for the signal to be delivered. * The caller should then reissue the IOCTL. This is similar to what is being done for drmGetLock(). */ @@ -786,7 +786,7 @@ via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_pri drm_via_blitsync_t *sync = data; int err; - if (sync->engine >= VIA_NUM_BLIT_ENGINES) + if (sync->engine >= VIA_NUM_BLIT_ENGINES) return -EINVAL; err = via_dmablit_sync(dev, sync->sync_handle, sync->engine); @@ -796,15 +796,15 @@ via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_pri return err; } - + /* * Queue a blit and hand back a handle to be used for sync. This IOCTL may be interrupted by a signal - * while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should + * while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should * be reissued. See the above IOCTL code. */ -int +int via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv ) { drm_via_dmablit_t *xfer = data; diff --git a/drivers/char/drm/via_dmablit.h b/drivers/char/drm/via_dmablit.h index 6f6a513..7408a54 100644 --- a/drivers/char/drm/via_dmablit.h +++ b/drivers/char/drm/via_dmablit.h @@ -1,5 +1,5 @@ /* via_dmablit.h -- PCI DMA BitBlt support for the VIA Unichrome/Pro - * + * * Copyright 2005 Thomas Hellstrom. * All Rights Reserved. * @@ -17,12 +17,12 @@ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * - * Authors: + * Authors: * Thomas Hellstrom. * Register info from Digeo Inc. */ @@ -67,7 +67,7 @@ typedef struct _drm_via_blitq { unsigned cur; unsigned num_free; unsigned num_outstanding; - unsigned long end; + unsigned long end; int aborting; int is_active; drm_via_sg_info_t *blits[VIA_NUM_BLIT_SLOTS]; @@ -77,46 +77,46 @@ typedef struct _drm_via_blitq { struct work_struct wq; struct timer_list poll_timer; } drm_via_blitq_t; - -/* + +/* * PCI DMA Registers * Channels 2 & 3 don't seem to be implemented in hardware. */ - -#define VIA_PCI_DMA_MAR0 0xE40 /* Memory Address Register of Channel 0 */ -#define VIA_PCI_DMA_DAR0 0xE44 /* Device Address Register of Channel 0 */ -#define VIA_PCI_DMA_BCR0 0xE48 /* Byte Count Register of Channel 0 */ -#define VIA_PCI_DMA_DPR0 0xE4C /* Descriptor Pointer Register of Channel 0 */ - -#define VIA_PCI_DMA_MAR1 0xE50 /* Memory Address Register of Channel 1 */ -#define VIA_PCI_DMA_DAR1 0xE54 /* Device Address Register of Channel 1 */ -#define VIA_PCI_DMA_BCR1 0xE58 /* Byte Count Register of Channel 1 */ -#define VIA_PCI_DMA_DPR1 0xE5C /* Descriptor Pointer Register of Channel 1 */ - -#define VIA_PCI_DMA_MAR2 0xE60 /* Memory Address Register of Channel 2 */ -#define VIA_PCI_DMA_DAR2 0xE64 /* Device Address Register of Channel 2 */ -#define VIA_PCI_DMA_BCR2 0xE68 /* Byte Count Register of Channel 2 */ -#define VIA_PCI_DMA_DPR2 0xE6C /* Descriptor Pointer Register of Channel 2 */ - -#define VIA_PCI_DMA_MAR3 0xE70 /* Memory Address Register of Channel 3 */ -#define VIA_PCI_DMA_DAR3 0xE74 /* Device Address Register of Channel 3 */ -#define VIA_PCI_DMA_BCR3 0xE78 /* Byte Count Register of Channel 3 */ -#define VIA_PCI_DMA_DPR3 0xE7C /* Descriptor Pointer Register of Channel 3 */ - -#define VIA_PCI_DMA_MR0 0xE80 /* Mode Register of Channel 0 */ -#define VIA_PCI_DMA_MR1 0xE84 /* Mode Register of Channel 1 */ -#define VIA_PCI_DMA_MR2 0xE88 /* Mode Register of Channel 2 */ -#define VIA_PCI_DMA_MR3 0xE8C /* Mode Register of Channel 3 */ - -#define VIA_PCI_DMA_CSR0 0xE90 /* Command/Status Register of Channel 0 */ -#define VIA_PCI_DMA_CSR1 0xE94 /* Command/Status Register of Channel 1 */ -#define VIA_PCI_DMA_CSR2 0xE98 /* Command/Status Register of Channel 2 */ -#define VIA_PCI_DMA_CSR3 0xE9C /* Command/Status Register of Channel 3 */ - -#define VIA_PCI_DMA_PTR 0xEA0 /* Priority Type Register */ - -/* Define for DMA engine */ + +#define VIA_PCI_DMA_MAR0 0xE40 /* Memory Address Register of Channel 0 */ +#define VIA_PCI_DMA_DAR0 0xE44 /* Device Address Register of Channel 0 */ +#define VIA_PCI_DMA_BCR0 0xE48 /* Byte Count Register of Channel 0 */ +#define VIA_PCI_DMA_DPR0 0xE4C /* Descriptor Pointer Register of Channel 0 */ + +#define VIA_PCI_DMA_MAR1 0xE50 /* Memory Address Register of Channel 1 */ +#define VIA_PCI_DMA_DAR1 0xE54 /* Device Address Register of Channel 1 */ +#define VIA_PCI_DMA_BCR1 0xE58 /* Byte Count Register of Channel 1 */ +#define VIA_PCI_DMA_DPR1 0xE5C /* Descriptor Pointer Register of Channel 1 */ + +#define VIA_PCI_DMA_MAR2 0xE60 /* Memory Address Register of Channel 2 */ +#define VIA_PCI_DMA_DAR2 0xE64 /* Device Address Register of Channel 2 */ +#define VIA_PCI_DMA_BCR2 0xE68 /* Byte Count Register of Channel 2 */ +#define VIA_PCI_DMA_DPR2 0xE6C /* Descriptor Pointer Register of Channel 2 */ + +#define VIA_PCI_DMA_MAR3 0xE70 /* Memory Address Register of Channel 3 */ +#define VIA_PCI_DMA_DAR3 0xE74 /* Device Address Register of Channel 3 */ +#define VIA_PCI_DMA_BCR3 0xE78 /* Byte Count Register of Channel 3 */ +#define VIA_PCI_DMA_DPR3 0xE7C /* Descriptor Pointer Register of Channel 3 */ + +#define VIA_PCI_DMA_MR0 0xE80 /* Mode Register of Channel 0 */ +#define VIA_PCI_DMA_MR1 0xE84 /* Mode Register of Channel 1 */ +#define VIA_PCI_DMA_MR2 0xE88 /* Mode Register of Channel 2 */ +#define VIA_PCI_DMA_MR3 0xE8C /* Mode Register of Channel 3 */ + +#define VIA_PCI_DMA_CSR0 0xE90 /* Command/Status Register of Channel 0 */ +#define VIA_PCI_DMA_CSR1 0xE94 /* Command/Status Register of Channel 1 */ +#define VIA_PCI_DMA_CSR2 0xE98 /* Command/Status Register of Channel 2 */ +#define VIA_PCI_DMA_CSR3 0xE9C /* Command/Status Register of Channel 3 */ + +#define VIA_PCI_DMA_PTR 0xEA0 /* Priority Type Register */ + +/* Define for DMA engine */ /* DPR */ #define VIA_DMA_DPR_EC (1<<1) /* end of chain */ #define VIA_DMA_DPR_DDIE (1<<2) /* descriptor done interrupt enable */ diff --git a/drivers/char/drm/via_drm.h b/drivers/char/drm/via_drm.h index 8f53c76..a3b5c10 100644 --- a/drivers/char/drm/via_drm.h +++ b/drivers/char/drm/via_drm.h @@ -35,7 +35,7 @@ #include "via_drmclient.h" #endif -#define VIA_NR_SAREA_CLIPRECTS 8 +#define VIA_NR_SAREA_CLIPRECTS 8 #define VIA_NR_XVMC_PORTS 10 #define VIA_NR_XVMC_LOCKS 5 #define VIA_MAX_CACHELINE_SIZE 64 @@ -259,7 +259,7 @@ typedef struct drm_via_blitsync { typedef struct drm_via_dmablit { uint32_t num_lines; uint32_t line_length; - + uint32_t fb_addr; uint32_t fb_stride; diff --git a/drivers/char/drm/via_drv.c b/drivers/char/drm/via_drv.c index 2d4957a..80c01cd 100644 --- a/drivers/char/drm/via_drv.c +++ b/drivers/char/drm/via_drv.c @@ -71,7 +71,7 @@ static struct drm_driver driver = { .name = DRIVER_NAME, .id_table = pciidlist, }, - + .name = DRIVER_NAME, .desc = DRIVER_DESC, .date = DRIVER_DATE, diff --git a/drivers/char/drm/via_map.c b/drivers/char/drm/via_map.c index 1009150..f6dcaaf 100644 --- a/drivers/char/drm/via_map.c +++ b/drivers/char/drm/via_map.c @@ -121,4 +121,3 @@ int via_driver_unload(struct drm_device *dev) return 0; } - diff --git a/drivers/char/drm/via_mm.c b/drivers/char/drm/via_mm.c index 3ffbf86..69f6558 100644 --- a/drivers/char/drm/via_mm.c +++ b/drivers/char/drm/via_mm.c @@ -113,7 +113,7 @@ void via_lastclose(struct drm_device *dev) dev_priv->vram_initialized = 0; dev_priv->agp_initialized = 0; mutex_unlock(&dev->struct_mutex); -} +} int via_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv)