From 7e3d8ced3c85ce2d202b1c49bfc759a9e65b10be Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Mon, 19 Mar 2007 09:50:11 +1100 Subject: [PATCH] drm: Add TTM support to the DRM This patch adds the Translation-Table maps support to the DRM driver. --- drivers/char/drm/Makefile | 3 drivers/char/drm/drm.h | 237 ++++ drivers/char/drm/drmP.h | 179 ++- drivers/char/drm/drm_agpsupport.c | 170 +++ drivers/char/drm/drm_bo.c | 2318 +++++++++++++++++++++++++++++++++++++ drivers/char/drm/drm_bo_move.c | 395 ++++++ drivers/char/drm/drm_bufs.c | 2 drivers/char/drm/drm_drv.c | 46 + drivers/char/drm/drm_fence.c | 661 +++++++++++ drivers/char/drm/drm_fops.c | 161 ++- drivers/char/drm/drm_irq.c | 5 drivers/char/drm/drm_lock.c | 133 ++ drivers/char/drm/drm_memory.c | 69 + drivers/char/drm/drm_mm.c | 1 drivers/char/drm/drm_object.c | 289 +++++ drivers/char/drm/drm_objects.h | 460 +++++++ drivers/char/drm/drm_proc.c | 90 + drivers/char/drm/drm_stub.c | 25 drivers/char/drm/drm_ttm.c | 337 +++++ drivers/char/drm/drm_vm.c | 234 +++- 20 files changed, 5689 insertions(+), 126 deletions(-) diff --git a/drivers/char/drm/Makefile b/drivers/char/drm/Makefile index 3ad0f64..7090ba4 100644 --- a/drivers/char/drm/Makefile +++ b/drivers/char/drm/Makefile @@ -6,7 +6,8 @@ drm-objs := drm_auth.o drm_bufs.o drm drm_drv.o drm_fops.o drm_ioctl.o drm_irq.o \ drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \ drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \ - drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o + drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \ + drm_ttm.o drm_bo.o drm_bo_move.o drm_object.o drm_fence.o tdfx-objs := tdfx_drv.o r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o diff --git a/drivers/char/drm/drm.h b/drivers/char/drm/drm.h index 0891984..ac278f8 100644 --- a/drivers/char/drm/drm.h +++ b/drivers/char/drm/drm.h @@ -95,6 +95,7 @@ #define _DRM_LOCK_IS_HELD(lock) ((loc #define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT) #define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT)) +typedef __u64 drm_u64_t; typedef unsigned int drm_handle_t; typedef unsigned int drm_context_t; typedef unsigned int drm_drawable_t; @@ -208,6 +209,7 @@ typedef enum drm_map_type { _DRM_AGP = 3, /**< AGP/GART */ _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */ _DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */ + _DRM_TTM = 6 } drm_map_type_t; /** @@ -590,6 +592,233 @@ typedef struct drm_set_version { int drm_dd_minor; } drm_set_version_t; + +#define DRM_FENCE_FLAG_EMIT 0x00000001 +#define DRM_FENCE_FLAG_SHAREABLE 0x00000002 +#define DRM_FENCE_FLAG_WAIT_LAZY 0x00000004 +#define DRM_FENCE_FLAG_WAIT_IGNORE_SIGNALS 0x00000008 + +/* Reserved for driver use */ +#define DRM_FENCE_MASK_DRIVER 0xFF000000 + +#define DRM_FENCE_TYPE_EXE 0x00000001 + +typedef struct drm_fence_arg { + unsigned handle; + int class; + unsigned type; + unsigned flags; + unsigned signaled; + unsigned expand_pad[4]; /*Future expansion */ + enum { + drm_fence_create, + drm_fence_destroy, + drm_fence_reference, + drm_fence_unreference, + drm_fence_signaled, + drm_fence_flush, + drm_fence_wait, + drm_fence_emit, + drm_fence_buffers + } op; +} drm_fence_arg_t; + +/* Buffer permissions, referring to how the GPU uses the buffers. + * these translate to fence types used for the buffers. + * Typically a texture buffer is read, A destination buffer is write and + * a command (batch-) buffer is exe. Can be or-ed together. + */ + +#define DRM_BO_FLAG_READ 0x00000001 +#define DRM_BO_FLAG_WRITE 0x00000002 +#define DRM_BO_FLAG_EXE 0x00000004 + +/* + * Status flags. Can be read to determine the actual state of a buffer. + * Can also be set in the buffer mask before validation. + */ + +/* + * Mask: Never evict this buffer. Not even with force. This type of buffer is only + * available to root and must be manually removed before buffer manager shutdown + * or lock. + * Flags: Acknowledge + */ +#define DRM_BO_FLAG_NO_EVICT 0x00000010 + +/* + * Mask: Require that the buffer is placed in mappable memory when validated. + * If not set the buffer may or may not be in mappable memory when validated. + * Flags: If set, the buffer is in mappable memory. + */ +#define DRM_BO_FLAG_MAPPABLE 0x00000020 + +/* Mask: The buffer should be shareable with other processes. + * Flags: The buffer is shareable with other processes. + */ +#define DRM_BO_FLAG_SHAREABLE 0x00000040 + +/* Mask: If set, place the buffer in cache-coherent memory if available. + * If clear, never place the buffer in cache coherent memory if validated. + * Flags: The buffer is currently in cache-coherent memory. + */ +#define DRM_BO_FLAG_CACHED 0x00000080 + +/* Mask: Make sure that every time this buffer is validated, + * it ends up on the same location provided that the memory mask is the same. + * The buffer will also not be evicted when claiming space for + * other buffers. Basically a pinned buffer but it may be thrown out as + * part of buffer manager shutdown or locking. + * Flags: Acknowledge. + */ +#define DRM_BO_FLAG_NO_MOVE 0x00000100 + +/* Mask: Make sure the buffer is in cached memory when mapped for reading. + * Flags: Acknowledge. + */ +#define DRM_BO_FLAG_READ_CACHED 0x00080000 + +/* Mask: Force DRM_BO_FLAG_CACHED flag strictly also if it is set. + * Flags: Acknowledge. + */ +#define DRM_BO_FLAG_FORCE_CACHING 0x00002000 + +/* + * Mask: Force DRM_BO_FLAG_MAPPABLE flag strictly also if it is clear. + * Flags: Acknowledge. + */ +#define DRM_BO_FLAG_FORCE_MAPPABLE 0x00004000 + +/* + * Memory type flags that can be or'ed together in the mask, but only + * one appears in flags. + */ + +/* System memory */ +#define DRM_BO_FLAG_MEM_LOCAL 0x01000000 +/* Translation table memory */ +#define DRM_BO_FLAG_MEM_TT 0x02000000 +/* Vram memory */ +#define DRM_BO_FLAG_MEM_VRAM 0x04000000 +/* Up to the driver to define. */ +#define DRM_BO_FLAG_MEM_PRIV0 0x08000000 +#define DRM_BO_FLAG_MEM_PRIV1 0x10000000 +#define DRM_BO_FLAG_MEM_PRIV2 0x20000000 +#define DRM_BO_FLAG_MEM_PRIV3 0x40000000 +#define DRM_BO_FLAG_MEM_PRIV4 0x80000000 + +/* Memory flag mask */ +#define DRM_BO_MASK_MEM 0xFF000000 +#define DRM_BO_MASK_MEMTYPE 0xFF0000A0 + +/* Don't block on validate and map */ +#define DRM_BO_HINT_DONT_BLOCK 0x00000002 +/* Don't place this buffer on the unfenced list.*/ +#define DRM_BO_HINT_DONT_FENCE 0x00000004 +#define DRM_BO_HINT_WAIT_LAZY 0x00000008 +#define DRM_BO_HINT_ALLOW_UNFENCED_MAP 0x00000010 + + +typedef enum { + drm_bo_type_dc, + drm_bo_type_user, + drm_bo_type_fake +}drm_bo_type_t; + + +typedef struct drm_bo_arg_request { + unsigned handle; /* User space handle */ + unsigned mask; + unsigned hint; + drm_u64_t size; + drm_bo_type_t type; + unsigned arg_handle; + drm_u64_t buffer_start; + unsigned page_alignment; + unsigned expand_pad[4]; /*Future expansion */ + enum { + drm_bo_create, + drm_bo_validate, + drm_bo_map, + drm_bo_unmap, + drm_bo_fence, + drm_bo_destroy, + drm_bo_reference, + drm_bo_unreference, + drm_bo_info, + drm_bo_wait_idle, + drm_bo_ref_fence + } op; +} drm_bo_arg_request_t; + + +/* + * Reply flags + */ + +#define DRM_BO_REP_BUSY 0x00000001 + +typedef struct drm_bo_arg_reply { + int ret; + unsigned handle; + unsigned flags; + drm_u64_t size; + drm_u64_t offset; + drm_u64_t arg_handle; + unsigned mask; + drm_u64_t buffer_start; + unsigned fence_flags; + unsigned rep_flags; + unsigned page_alignment; + unsigned expand_pad[4]; /*Future expansion */ +}drm_bo_arg_reply_t; + + +typedef struct drm_bo_arg{ + int handled; + drm_u64_t next; + union { + drm_bo_arg_request_t req; + drm_bo_arg_reply_t rep; + } d; +} drm_bo_arg_t; + +#define DRM_BO_MEM_LOCAL 0 +#define DRM_BO_MEM_TT 1 +#define DRM_BO_MEM_VRAM 2 +#define DRM_BO_MEM_PRIV0 3 +#define DRM_BO_MEM_PRIV1 4 +#define DRM_BO_MEM_PRIV2 5 +#define DRM_BO_MEM_PRIV3 6 +#define DRM_BO_MEM_PRIV4 7 + +#define DRM_BO_MEM_TYPES 8 /* For now. */ + +typedef union drm_mm_init_arg{ + struct { + enum { + mm_init, + mm_takedown, + mm_query, + mm_lock, + mm_unlock + } op; + drm_u64_t p_offset; + drm_u64_t p_size; + unsigned mem_type; + unsigned expand_pad[8]; /*Future expansion */ + } req; + struct { + drm_handle_t mm_sarea; + unsigned expand_pad[8]; /*Future expansion */ + } rep; +} drm_mm_init_arg_t; + +/** + * \name Ioctls Definitions + */ +/*@{*/ + #define DRM_IOCTL_BASE 'd' #define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr) #define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type) @@ -650,7 +879,13 @@ #define DRM_IOCTL_SG_FREE DRM_IOW( 0x39 #define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, drm_wait_vblank_t) -#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, drm_update_draw_t) +#define DRM_IOCTL_FENCE DRM_IOWR(0x3b, drm_fence_arg_t) +#define DRM_IOCTL_BUFOBJ DRM_IOWR(0x3d, drm_bo_arg_t) +#define DRM_IOCTL_MM_INIT DRM_IOWR(0x3e, drm_mm_init_arg_t) + +#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, drm_update_draw_t) + +/*@}*/ /** * Device specific ioctls should only be in their respective headers diff --git a/drivers/char/drm/drmP.h b/drivers/char/drm/drmP.h index 09705da..f4abc3e 100644 --- a/drivers/char/drm/drmP.h +++ b/drivers/char/drm/drmP.h @@ -55,6 +55,7 @@ #include #include #include /* For (un)lock_kernel */ #include +#include #include #include #if defined(__alpha__) || defined(__powerpc__) @@ -67,6 +68,7 @@ #ifdef CONFIG_MTRR #include #endif #if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE) +#include #include #include #endif @@ -139,9 +141,18 @@ #define DRM_MEM_SGLISTS 20 #define DRM_MEM_CTXLIST 21 #define DRM_MEM_MM 22 #define DRM_MEM_HASHTAB 23 +#define DRM_MEM_OBJECTS 24 +#define DRM_MEM_FENCE 25 +#define DRM_MEM_TTM 26 +#define DRM_MEM_BUFOBJ 27 #define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8) #define DRM_MAP_HASH_OFFSET 0x10000000 +#define DRM_MAP_HASH_ORDER 12 +#define DRM_OBJECT_HASH_ORDER 12 +#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1) +#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16) +#define DRM_MM_INIT_MAX_PAGES 256 /*@}*/ @@ -282,7 +293,6 @@ typedef struct drm_magic_entry { drm_hash_item_t hash_item; struct list_head head; struct drm_file *priv; - struct drm_magic_entry *next; } drm_magic_entry_t; typedef struct drm_magic_head { @@ -370,6 +380,19 @@ typedef struct drm_buf_entry { drm_freelist_t freelist; } drm_buf_entry_t; +/* + * This should be small enough to allow the use of kmalloc for hash tables + * instead of vmalloc. + */ + +#define DRM_FILE_HASH_ORDER 8 +typedef enum{ + _DRM_REF_USE=0, + _DRM_REF_TYPE1, + _DRM_NO_REF_TYPES +} drm_ref_t; + + /** File private data */ typedef struct drm_file { int authenticated; @@ -384,6 +407,18 @@ typedef struct drm_file { struct drm_head *head; int remove_auth_on_close; unsigned long lock_count; + + /* + * The user object hash table is global and resides in the + * drm_device structure. We protect the lists and hash tables with the + * device struct_mutex. A bit coarse-grained but probably the best + * option. + */ + + struct list_head refd_objects; + struct list_head user_objects; + + drm_open_hash_t refd_object_hash[_DRM_NO_REF_TYPES]; void *driver_priv; } drm_file_t; @@ -414,6 +449,10 @@ typedef struct drm_lock_data { struct file *filp; /**< File descr of lock holder (0=kernel) */ wait_queue_head_t lock_queue; /**< Queue of blocked processes */ unsigned long lock_time; /**< Time of last lock in jiffies */ + spinlock_t spinlock; + uint32_t kernel_waiters; + uint32_t user_waiters; + int idle_has_lock; } drm_lock_data_t; /** @@ -483,6 +522,27 @@ typedef struct drm_sigdata { drm_hw_lock_t *lock; } drm_sigdata_t; + +/* + * Generic memory manager structs + */ + +typedef struct drm_mm_node { + struct list_head fl_entry; + struct list_head ml_entry; + int free; + unsigned long start; + unsigned long size; + struct drm_mm *mm; + void *private; +} drm_mm_node_t; + +typedef struct drm_mm { + struct list_head fl_entry; + struct list_head ml_entry; +} drm_mm_t; + + /** * Mappings list */ @@ -490,7 +550,8 @@ typedef struct drm_map_list { struct list_head head; /**< list head */ drm_hash_item_t hash; drm_map_t *map; /**< mapping */ - unsigned int user_token; + drm_u64_t user_token; + drm_mm_node_t *file_offset_node; } drm_map_list_t; typedef drm_map_t drm_local_map_t; @@ -523,23 +584,8 @@ typedef struct ati_pcigart_info { drm_local_map_t mapping; } drm_ati_pcigart_info; -/* - * Generic memory manager structs - */ -typedef struct drm_mm_node { - struct list_head fl_entry; - struct list_head ml_entry; - int free; - unsigned long start; - unsigned long size; - struct drm_mm *mm; - void *private; -} drm_mm_node_t; -typedef struct drm_mm { - struct list_head fl_entry; - struct list_head ml_entry; -} drm_mm_t; +#include "drm_objects.h" /** * DRM driver structure. This structure represent the common code for @@ -589,11 +635,16 @@ struct drm_driver { void (*irq_uninstall) (struct drm_device * dev); void (*reclaim_buffers) (struct drm_device * dev, struct file * filp); void (*reclaim_buffers_locked) (struct drm_device *dev, - struct file *filp); + struct file * filp); + void (*reclaim_buffers_idlelocked) (struct drm_device *dev, + struct file * filp); unsigned long (*get_map_ofs) (drm_map_t * map); unsigned long (*get_reg_ofs) (struct drm_device * dev); void (*set_version) (struct drm_device * dev, drm_set_version_t * sv); + struct drm_fence_driver *fence_driver; + struct drm_bo_driver *bo_driver; + int major; int minor; int patchlevel; @@ -668,7 +719,11 @@ typedef struct drm_device { /*@{ */ drm_map_list_t *maplist; /**< Linked list of regions */ int map_count; /**< Number of mappable regions */ - drm_open_hash_t map_hash; /**< User token hash table for maps */ + drm_open_hash_t map_hash; /**< User token hash table for maps */ + drm_mm_t offset_manager; /**< User token manager */ + drm_open_hash_t object_hash; /**< User token hash table for objects */ + struct address_space *dev_mapping; /**< For unmap_mapping_range() */ + struct page *ttm_dummy_page; /** \name Context handle management */ /*@{ */ @@ -747,6 +802,9 @@ #endif unsigned int agp_buffer_token; drm_head_t primary; /**< primary screen head */ + drm_fence_manager_t fm; + drm_buffer_manager_t bm; + /** \name Drawable information */ /*@{ */ spinlock_t drw_lock; @@ -757,6 +815,18 @@ #endif /*@} */ } drm_device_t; +#if __OS_HAS_AGP +typedef struct drm_agp_ttm_priv { + DRM_AGP_MEM *mem; + struct agp_bridge_data *bridge; + unsigned alloc_type; + unsigned cached_type; + unsigned uncached_type; + int populated; +} drm_agp_ttm_priv; +#endif + + static __inline__ int drm_core_check_feature(struct drm_device *dev, int feature) { @@ -845,12 +915,22 @@ extern void drm_mem_init(void); extern int drm_mem_info(char *buf, char **start, off_t offset, int request, int *eof, void *data); extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area); - -extern DRM_AGP_MEM *drm_alloc_agp(drm_device_t * dev, int pages, u32 type); +extern unsigned long drm_alloc_pages(int order, int area); +extern void drm_free_pages(unsigned long address, int order, int area); +extern DRM_AGP_MEM *drm_alloc_agp(drm_device_t *dev, int pages, u32 type); extern int drm_free_agp(DRM_AGP_MEM * handle, int pages); extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start); extern int drm_unbind_agp(DRM_AGP_MEM * handle); +extern void drm_free_memctl(size_t size); +extern int drm_alloc_memctl(size_t size); +extern void drm_query_memctl(drm_u64_t *cur_used, + drm_u64_t *low_threshold, + drm_u64_t *high_threshold); +extern void drm_init_memctl(size_t low_threshold, + size_t high_threshold, + size_t unit_size); + /* Misc. IOCTL support (drm_ioctl.h) */ extern int drm_irq_by_busid(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); @@ -915,9 +995,18 @@ extern int drm_lock(struct inode *inode, unsigned int cmd, unsigned long arg); extern int drm_unlock(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context); -extern int drm_lock_free(drm_device_t * dev, - __volatile__ unsigned int *lock, unsigned int context); +extern int drm_lock_take(drm_lock_data_t *lock_data, unsigned int context); +extern int drm_lock_free(drm_lock_data_t *lock_data, unsigned int context); +extern void drm_idlelock_take(drm_lock_data_t *lock_data); +extern void drm_idlelock_release(drm_lock_data_t *lock_data); + +/* + * These are exported to drivers so that they can implement fencing using + * DMA quiscent + idle. DMA quiescent usually requires the hardware lock. + */ + +extern int drm_i_have_hw_lock(struct file *filp); +extern int drm_kernel_take_hw_lock(struct file *filp); /* Buffer management support (drm_bufs.h) */ extern int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request); @@ -1000,7 +1089,8 @@ extern DRM_AGP_MEM *drm_agp_allocate_mem extern int drm_agp_free_memory(DRM_AGP_MEM * handle); extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start); extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle); - +extern drm_ttm_backend_t *drm_agp_init_ttm(struct drm_device *dev, + drm_ttm_backend_t *backend); /* Stub support (drm_stub.h) */ extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, struct drm_driver *driver); @@ -1062,6 +1152,11 @@ extern unsigned long drm_mm_tail_space(d extern int drm_mm_remove_space_from_tail(drm_mm_t *mm, unsigned long size); extern int drm_mm_add_space_to_tail(drm_mm_t *mm, unsigned long size); +static inline drm_mm_t *drm_get_mm(drm_mm_node_t *block) +{ + return block->mm; +} + extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev); extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev); @@ -1125,6 +1220,38 @@ #endif extern unsigned long drm_core_get_map_ofs(drm_map_t * map); extern unsigned long drm_core_get_reg_ofs(struct drm_device *dev); +/* + * Accounting variants of standard calls. + */ + +static inline void *drm_ctl_alloc(size_t size, int area) +{ + void *ret; + if (drm_alloc_memctl(size)) + return NULL; + ret = drm_alloc(size, area); + if (!ret) + drm_free_memctl(size); + return ret; +} + +static inline void *drm_ctl_calloc(size_t nmemb, size_t size, int area) +{ + void *ret; + + if (drm_alloc_memctl(nmemb*size)) + return NULL; + ret = drm_calloc(nmemb, size, area); + if (!ret) + drm_free_memctl(nmemb*size); + return ret; +} + +static inline void drm_ctl_free(void *pt, size_t size, int area) +{ + drm_free(pt, size, area); + drm_free_memctl(size); +} #endif /* __KERNEL__ */ #endif diff --git a/drivers/char/drm/drm_agpsupport.c b/drivers/char/drm/drm_agpsupport.c index 40bfd9b..01c6d61 100644 --- a/drivers/char/drm/drm_agpsupport.c +++ b/drivers/char/drm/drm_agpsupport.c @@ -511,4 +511,174 @@ int drm_agp_unbind_memory(DRM_AGP_MEM * return agp_unbind_memory(handle); } + + +/* + * AGP ttm backend interface. + */ + +#ifndef AGP_USER_TYPES +#define AGP_USER_TYPES (1 << 16) +#define AGP_USER_MEMORY (AGP_USER_TYPES) +#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1) +#endif +#define AGP_REQUIRED_MAJOR 0 +#define AGP_REQUIRED_MINOR 102 + +static int drm_agp_needs_unbind_cache_adjust(drm_ttm_backend_t *backend) { + return ((backend->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1); +} + + +static int drm_agp_populate(drm_ttm_backend_t *backend, unsigned long num_pages, + struct page **pages) { + + drm_agp_ttm_priv *agp_priv = (drm_agp_ttm_priv *) backend->private; + struct page **cur_page, **last_page = pages + num_pages; + DRM_AGP_MEM *mem; + + if (drm_alloc_memctl(num_pages * sizeof(void *))) + return -1; + + DRM_DEBUG("drm_agp_populate_ttm\n"); + mem = drm_agp_allocate_memory(agp_priv->bridge, num_pages, agp_priv->alloc_type); + if (!mem) { + drm_free_memctl(num_pages *sizeof(void *)); + return -1; + } + + DRM_DEBUG("Current page count is %ld\n", (long) mem->page_count); + mem->page_count = 0; + for (cur_page = pages; cur_page < last_page; ++cur_page) { + mem->memory[mem->page_count++] = phys_to_gart(page_to_phys(*cur_page)); + } + agp_priv->mem = mem; + return 0; +} + +static int drm_agp_bind_ttm(drm_ttm_backend_t *backend, + unsigned long offset, + int cached) +{ + drm_agp_ttm_priv *agp_priv = (drm_agp_ttm_priv *) backend->private; + DRM_AGP_MEM *mem = agp_priv->mem; + int ret; + + DRM_DEBUG("drm_agp_bind_ttm\n"); + DRM_FLAG_MASKED(backend->flags, (cached) ? DRM_BE_FLAG_BOUND_CACHED : 0, + DRM_BE_FLAG_BOUND_CACHED); + mem->is_flushed = TRUE; + mem->type = (cached) ? agp_priv->cached_type : agp_priv->uncached_type; + ret = drm_agp_bind_memory(mem, offset); + if (ret) { + DRM_ERROR("AGP Bind memory failed\n"); + } + return ret; +} + +static int drm_agp_unbind_ttm(drm_ttm_backend_t *backend) { + + drm_agp_ttm_priv *agp_priv = (drm_agp_ttm_priv *) backend->private; + + DRM_DEBUG("drm_agp_unbind_ttm\n"); + if (agp_priv->mem->is_bound) + return drm_agp_unbind_memory(agp_priv->mem); + else + return 0; +} + +static void drm_agp_clear_ttm(drm_ttm_backend_t *backend) { + + drm_agp_ttm_priv *agp_priv = (drm_agp_ttm_priv *) backend->private; + DRM_AGP_MEM *mem = agp_priv->mem; + + DRM_DEBUG("drm_agp_clear_ttm\n"); + if (mem) { + unsigned long num_pages = mem->page_count; + backend->unbind(backend); + agp_free_memory(mem); + drm_free_memctl(num_pages *sizeof(void *)); + } + + agp_priv->mem = NULL; +} + +static void drm_agp_destroy_ttm(drm_ttm_backend_t *backend) { + + drm_agp_ttm_priv *agp_priv; + + if (backend) { + DRM_DEBUG("drm_agp_destroy_ttm\n"); + agp_priv = (drm_agp_ttm_priv *) backend->private; + if (agp_priv) { + if (agp_priv->mem) { + backend->clear(backend); + } + drm_ctl_free(agp_priv, sizeof(*agp_priv), DRM_MEM_MAPPINGS); + backend->private = NULL; + } + if (backend->flags & DRM_BE_FLAG_NEEDS_FREE) { + drm_ctl_free(backend, sizeof(*backend), DRM_MEM_MAPPINGS); + } + } +} + +drm_ttm_backend_t *drm_agp_init_ttm(struct drm_device *dev, + drm_ttm_backend_t *backend) +{ + + drm_ttm_backend_t *agp_be; + drm_agp_ttm_priv *agp_priv; + struct agp_kern_info *info; + + if (!dev->agp) { + DRM_ERROR("AGP is not initialized.\n"); + return NULL; + } + info = &dev->agp->agp_info; + + if (info->version.major != AGP_REQUIRED_MAJOR || + info->version.minor < AGP_REQUIRED_MINOR) { + DRM_ERROR("Wrong agpgart version %d.%d\n" + "\tYou need at least version %d.%d.\n", + info->version.major, + info->version.minor, + AGP_REQUIRED_MAJOR, + AGP_REQUIRED_MINOR); + return NULL; + } + + agp_be = (backend != NULL) ? backend: + drm_ctl_calloc(1, sizeof(*agp_be), DRM_MEM_MAPPINGS); + + if (!agp_be) + return NULL; + + agp_priv = drm_ctl_calloc(1, sizeof(*agp_priv), DRM_MEM_MAPPINGS); + + if (!agp_priv) { + drm_ctl_free(agp_be, sizeof(*agp_be), DRM_MEM_MAPPINGS); + return NULL; + } + + agp_priv->mem = NULL; + agp_priv->alloc_type = AGP_USER_MEMORY; + agp_priv->cached_type = AGP_USER_CACHED_MEMORY; + agp_priv->uncached_type = AGP_USER_MEMORY; + agp_priv->bridge = dev->agp->bridge; + agp_priv->populated = FALSE; + agp_be->private = (void *) agp_priv; + agp_be->needs_ub_cache_adjust = drm_agp_needs_unbind_cache_adjust; + agp_be->populate = drm_agp_populate; + agp_be->clear = drm_agp_clear_ttm; + agp_be->bind = drm_agp_bind_ttm; + agp_be->unbind = drm_agp_unbind_ttm; + agp_be->destroy = drm_agp_destroy_ttm; + DRM_FLAG_MASKED(agp_be->flags, (backend == NULL) ? DRM_BE_FLAG_NEEDS_FREE : 0, + DRM_BE_FLAG_NEEDS_FREE); + agp_be->drm_map_type = _DRM_AGP; + return agp_be; +} +EXPORT_SYMBOL(drm_agp_init_ttm); + #endif /* __OS_HAS_AGP */ diff --git a/drivers/char/drm/drm_bo.c b/drivers/char/drm/drm_bo.c new file mode 100644 index 0000000..52dc158 --- /dev/null +++ b/drivers/char/drm/drm_bo.c @@ -0,0 +1,2318 @@ +/************************************************************************** + * + * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellström + */ + +#include "drmP.h" + +/* + * Locking may look a bit complicated but isn't really: + * + * The buffer usage atomic_t needs to be protected by dev->struct_mutex + * when there is a chance that it can be zero before or after the operation. + * + * dev->struct_mutex also protects all lists and list heads. Hash tables and hash + * heads. + * + * bo->mutex protects the buffer object itself excluding the usage field. + * bo->mutex does also protect the buffer list heads, so to manipulate those, we need + * both the bo->mutex and the dev->struct_mutex. + * + * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal is a bit + * complicated. When dev->struct_mutex is released to grab bo->mutex, the list + * traversal will, in general, need to be restarted. + * + */ + +static void drm_bo_destroy_locked(drm_buffer_object_t * bo); +static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo); +static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo); +static void drm_bo_unmap_virtual(drm_buffer_object_t * bo); + +static inline uint32_t drm_bo_type_flags(unsigned type) +{ + return (1 << (24 + type)); +} + +/* + * bo locked. dev->struct_mutex locked. + */ + +void drm_bo_add_to_pinned_lru(drm_buffer_object_t * bo) +{ + drm_mem_type_manager_t *man; + + man = &bo->dev->bm.man[bo->pinned_mem_type]; + list_add_tail(&bo->pinned_lru, &man->pinned); +} + +void drm_bo_add_to_lru(drm_buffer_object_t * bo) +{ + drm_mem_type_manager_t *man; + + if (!(bo->mem.mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))) { + man = &bo->dev->bm.man[bo->mem.mem_type]; + list_add_tail(&bo->lru, &man->lru); + } else { + INIT_LIST_HEAD(&bo->lru); + } +} + +static int drm_bo_vm_pre_move(drm_buffer_object_t * bo, int old_is_pci) +{ + drm_bo_unmap_virtual(bo); + return 0; +} + +static void drm_bo_vm_post_move(drm_buffer_object_t * bo) +{ +} + +/* + * Call bo->mutex locked. + */ + +static int drm_bo_add_ttm(drm_buffer_object_t * bo) +{ + drm_device_t *dev = bo->dev; + int ret = 0; + bo->ttm = NULL; + + switch (bo->type) { + case drm_bo_type_dc: + bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT); + if (!bo->ttm) + ret = -ENOMEM; + break; + case drm_bo_type_user: + case drm_bo_type_fake: + break; + default: + DRM_ERROR("Illegal buffer object type\n"); + ret = -EINVAL; + break; + } + + return ret; +} + +static int drm_bo_handle_move_mem(drm_buffer_object_t * bo, + drm_bo_mem_reg_t * mem, + int evict, int no_wait) +{ + drm_device_t *dev = bo->dev; + drm_buffer_manager_t *bm = &dev->bm; + int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem); + int new_is_pci = drm_mem_reg_is_pci(dev, mem); + drm_mem_type_manager_t *old_man = &bm->man[bo->mem.mem_type]; + drm_mem_type_manager_t *new_man = &bm->man[mem->mem_type]; + int ret = 0; + + if (old_is_pci || new_is_pci) + ret = drm_bo_vm_pre_move(bo, old_is_pci); + if (ret) + return ret; + + /* + * Create and bind a ttm if required. + */ + + if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) { + ret = drm_bo_add_ttm(bo); + if (ret) + goto out_err; + + if (mem->mem_type != DRM_BO_MEM_LOCAL) { + ret = drm_bind_ttm(bo->ttm, new_man->flags & + DRM_BO_FLAG_CACHED, + mem->mm_node->start); + if (ret) + goto out_err; + } + } + + if ((bo->mem.mem_type == DRM_BO_MEM_LOCAL) && bo->ttm == NULL) { + + drm_bo_mem_reg_t *old_mem = &bo->mem; + uint32_t save_flags = old_mem->flags; + uint32_t save_mask = old_mem->mask; + + *old_mem = *mem; + mem->mm_node = NULL; + old_mem->mask = save_mask; + DRM_FLAG_MASKED(save_flags, mem->flags, DRM_BO_MASK_MEMTYPE); + + } else if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && + !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) { + + ret = drm_bo_move_ttm(bo, evict, no_wait, mem); + + } else if (dev->driver->bo_driver->move) { + ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem); + + } else { + + ret = drm_bo_move_memcpy(bo, evict, no_wait, mem); + + } + + if (ret) + goto out_err; + + if (old_is_pci || new_is_pci) + drm_bo_vm_post_move(bo); + + if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) { + ret = + dev->driver->bo_driver->invalidate_caches(dev, + bo->mem.flags); + if (ret) + DRM_ERROR("Can not flush read caches\n"); + } + + DRM_FLAG_MASKED(bo->priv_flags, + (evict) ? _DRM_BO_FLAG_EVICTED : 0, + _DRM_BO_FLAG_EVICTED); + + if (bo->mem.mm_node) + bo->offset = bo->mem.mm_node->start << PAGE_SHIFT; + + return 0; + + out_err: + if (old_is_pci || new_is_pci) + drm_bo_vm_post_move(bo); + + new_man = &bm->man[bo->mem.mem_type]; + if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) { + drm_ttm_unbind(bo->ttm); + drm_destroy_ttm(bo->ttm); + bo->ttm = NULL; + } + + return ret; +} + +/* + * Call bo->mutex locked. + * Wait until the buffer is idle. + */ + +int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals, + int no_wait) +{ + + drm_fence_object_t *fence = bo->fence; + int ret; + + if (fence) { + drm_device_t *dev = bo->dev; + if (drm_fence_object_signaled(fence, bo->fence_type)) { + drm_fence_usage_deref_unlocked(dev, fence); + bo->fence = NULL; + return 0; + } + if (no_wait) { + return -EBUSY; + } + ret = + drm_fence_object_wait(dev, fence, lazy, ignore_signals, + bo->fence_type); + if (ret) + return ret; + + drm_fence_usage_deref_unlocked(dev, fence); + bo->fence = NULL; + + } + return 0; +} + +static int drm_bo_expire_fence(drm_buffer_object_t * bo, int allow_errors) +{ + drm_device_t *dev = bo->dev; + drm_buffer_manager_t *bm = &dev->bm; + + if (bo->fence) { + if (bm->nice_mode) { + unsigned long _end = jiffies + 3 * DRM_HZ; + int ret; + do { + ret = drm_bo_wait(bo, 0, 1, 0); + if (ret && allow_errors) + return ret; + + } while (ret && !time_after_eq(jiffies, _end)); + + if (bo->fence) { + bm->nice_mode = 0; + DRM_ERROR("Detected GPU lockup or " + "fence driver was taken down. " + "Evicting buffer.\n"); + } + } + if (bo->fence) { + drm_fence_usage_deref_unlocked(dev, bo->fence); + bo->fence = NULL; + } + } + return 0; +} + +/* + * Call dev->struct_mutex locked. + * Attempts to remove all private references to a buffer by expiring its + * fence object and removing from lru lists and memory managers. + */ + +static void drm_bo_cleanup_refs(drm_buffer_object_t * bo, int remove_all) +{ + drm_device_t *dev = bo->dev; + drm_buffer_manager_t *bm = &dev->bm; + + atomic_inc(&bo->usage); + mutex_unlock(&dev->struct_mutex); + mutex_lock(&bo->mutex); + + DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); + + if (bo->fence && drm_fence_object_signaled(bo->fence, bo->fence_type)) { + drm_fence_usage_deref_locked(dev, bo->fence); + bo->fence = NULL; + } + + if (bo->fence && remove_all) + (void)drm_bo_expire_fence(bo, 0); + + mutex_lock(&dev->struct_mutex); + + if (!atomic_dec_and_test(&bo->usage)) { + goto out; + } + + if (!bo->fence) { + list_del_init(&bo->lru); + if (bo->mem.mm_node) { + drm_mm_put_block(bo->mem.mm_node); + if (bo->pinned_node == bo->mem.mm_node) + bo->pinned_node = NULL; + bo->mem.mm_node = NULL; + } + list_del_init(&bo->pinned_lru); + if (bo->pinned_node) { + drm_mm_put_block(bo->pinned_node); + bo->pinned_node = NULL; + } + list_del_init(&bo->ddestroy); + mutex_unlock(&bo->mutex); + drm_bo_destroy_locked(bo); + return; + } + + if (list_empty(&bo->ddestroy)) { + drm_fence_object_flush(dev, bo->fence, bo->fence_type); + list_add_tail(&bo->ddestroy, &bm->ddestroy); + schedule_delayed_work(&bm->wq, + ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100); + } + + out: + mutex_unlock(&bo->mutex); + return; +} + +/* + * Verify that refcount is 0 and that there are no internal references + * to the buffer object. Then destroy it. + */ + +static void drm_bo_destroy_locked(drm_buffer_object_t * bo) +{ + drm_device_t *dev = bo->dev; + drm_buffer_manager_t *bm = &dev->bm; + + if (list_empty(&bo->lru) && bo->mem.mm_node == NULL && + list_empty(&bo->pinned_lru) && bo->pinned_node == NULL && + list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) { + if (bo->fence != NULL) { + DRM_ERROR("Fence was non-zero.\n"); + drm_bo_cleanup_refs(bo, 0); + return; + } + + if (bo->ttm) { + drm_ttm_unbind(bo->ttm); + drm_destroy_ttm(bo->ttm); + bo->ttm = NULL; + } + + atomic_dec(&bm->count); + + drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ); + + return; + } + + /* + * Some stuff is still trying to reference the buffer object. + * Get rid of those references. + */ + + drm_bo_cleanup_refs(bo, 0); + + return; +} + +/* + * Call dev->struct_mutex locked. + */ + +static void drm_bo_delayed_delete(drm_device_t * dev, int remove_all) +{ + drm_buffer_manager_t *bm = &dev->bm; + + drm_buffer_object_t *entry, *nentry; + struct list_head *list, *next; + + list_for_each_safe(list, next, &bm->ddestroy) { + entry = list_entry(list, drm_buffer_object_t, ddestroy); + + nentry = NULL; + if (next != &bm->ddestroy) { + nentry = list_entry(next, drm_buffer_object_t, + ddestroy); + atomic_inc(&nentry->usage); + } + + drm_bo_cleanup_refs(entry, remove_all); + + if (nentry) { + atomic_dec(&nentry->usage); + } + } +} + +static void drm_bo_delayed_workqueue(struct work_struct *work) +{ + drm_buffer_manager_t *bm = + container_of(work, drm_buffer_manager_t, wq.work); + drm_device_t *dev = container_of(bm, drm_device_t, bm); + + DRM_DEBUG("Delayed delete Worker\n"); + + mutex_lock(&dev->struct_mutex); + if (!bm->initialized) { + mutex_unlock(&dev->struct_mutex); + return; + } + drm_bo_delayed_delete(dev, 0); + if (bm->initialized && !list_empty(&bm->ddestroy)) { + schedule_delayed_work(&bm->wq, + ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100); + } + mutex_unlock(&dev->struct_mutex); +} + +void drm_bo_usage_deref_locked(drm_buffer_object_t * bo) +{ + if (atomic_dec_and_test(&bo->usage)) { + drm_bo_destroy_locked(bo); + } +} + +static void drm_bo_base_deref_locked(drm_file_t * priv, drm_user_object_t * uo) +{ + drm_buffer_object_t *bo = + drm_user_object_entry(uo, drm_buffer_object_t, base); + + drm_bo_takedown_vm_locked(bo); + drm_bo_usage_deref_locked(bo); +} + +static void drm_bo_usage_deref_unlocked(drm_buffer_object_t * bo) +{ + drm_device_t *dev = bo->dev; + + if (atomic_dec_and_test(&bo->usage)) { + mutex_lock(&dev->struct_mutex); + if (atomic_read(&bo->usage) == 0) + drm_bo_destroy_locked(bo); + mutex_unlock(&dev->struct_mutex); + } +} + +/* + * Note. The caller has to register (if applicable) + * and deregister fence object usage. + */ + +int drm_fence_buffer_objects(drm_file_t * priv, + struct list_head *list, + uint32_t fence_flags, + drm_fence_object_t * fence, + drm_fence_object_t ** used_fence) +{ + drm_device_t *dev = priv->head->dev; + drm_buffer_manager_t *bm = &dev->bm; + + drm_buffer_object_t *entry; + uint32_t fence_type = 0; + int count = 0; + int ret = 0; + struct list_head *l; + LIST_HEAD(f_list); + + mutex_lock(&dev->struct_mutex); + + if (!list) + list = &bm->unfenced; + + list_for_each_entry(entry, list, lru) { + BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED)); + fence_type |= entry->fence_type; + if (entry->fence_class != 0) { + DRM_ERROR("Fence class %d is not implemented yet.\n", + entry->fence_class); + ret = -EINVAL; + goto out; + } + count++; + } + + if (!count) { + ret = -EINVAL; + goto out; + } + + /* + * Transfer to a local list before we release the dev->struct_mutex; + * This is so we don't get any new unfenced objects while fencing + * the ones we already have.. + */ + + list_splice_init(list, &f_list); + + if (fence) { + if ((fence_type & fence->type) != fence_type) { + DRM_ERROR("Given fence doesn't match buffers " + "on unfenced list.\n"); + ret = -EINVAL; + goto out; + } + } else { + mutex_unlock(&dev->struct_mutex); + ret = drm_fence_object_create(dev, 0, fence_type, + fence_flags | DRM_FENCE_FLAG_EMIT, + &fence); + mutex_lock(&dev->struct_mutex); + if (ret) + goto out; + } + + count = 0; + l = f_list.next; + while (l != &f_list) { + prefetch(l->next); + entry = list_entry(l, drm_buffer_object_t, lru); + atomic_inc(&entry->usage); + mutex_unlock(&dev->struct_mutex); + mutex_lock(&entry->mutex); + mutex_lock(&dev->struct_mutex); + list_del_init(l); + if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) { + count++; + if (entry->fence) + drm_fence_usage_deref_locked(dev, entry->fence); + entry->fence = fence; + DRM_FLAG_MASKED(entry->priv_flags, 0, + _DRM_BO_FLAG_UNFENCED); + DRM_WAKEUP(&entry->event_queue); + drm_bo_add_to_lru(entry); + } + mutex_unlock(&entry->mutex); + drm_bo_usage_deref_locked(entry); + l = f_list.next; + } + atomic_add(count, &fence->usage); + DRM_DEBUG("Fenced %d buffers\n", count); + out: + mutex_unlock(&dev->struct_mutex); + *used_fence = fence; + return ret; +} + +EXPORT_SYMBOL(drm_fence_buffer_objects); + +/* + * bo->mutex locked + */ + +static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, + int no_wait) +{ + int ret = 0; + drm_device_t *dev = bo->dev; + drm_bo_mem_reg_t evict_mem; + + /* + * Someone might have modified the buffer before we took the buffer mutex. + */ + + if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) + goto out; + if (bo->mem.mem_type != mem_type) + goto out; + + ret = drm_bo_wait(bo, 0, 0, no_wait); + + if (ret && ret != -EAGAIN) { + DRM_ERROR("Failed to expire fence before " + "buffer eviction.\n"); + goto out; + } + + evict_mem = bo->mem; + evict_mem.mm_node = NULL; + + if (bo->type == drm_bo_type_fake) { + bo->mem.mem_type = DRM_BO_MEM_LOCAL; + bo->mem.mm_node = NULL; + goto out1; + } + + evict_mem = bo->mem; + evict_mem.mask = dev->driver->bo_driver->evict_mask(bo); + ret = drm_bo_mem_space(bo, &evict_mem, no_wait); + + if (ret) { + if (ret != -EAGAIN) + DRM_ERROR("Failed to find memory space for " + "buffer 0x%p eviction.\n", bo); + goto out; + } + + ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait); + + if (ret) { + if (ret != -EAGAIN) + DRM_ERROR("Buffer eviction failed\n"); + goto out; + } + + out1: + mutex_lock(&dev->struct_mutex); + if (evict_mem.mm_node) { + if (evict_mem.mm_node != bo->pinned_node) + drm_mm_put_block(evict_mem.mm_node); + evict_mem.mm_node = NULL; + } + list_del(&bo->lru); + drm_bo_add_to_lru(bo); + mutex_unlock(&dev->struct_mutex); + + DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED, + _DRM_BO_FLAG_EVICTED); + + out: + return ret; +} + +static int drm_bo_mem_force_space(drm_device_t * dev, + drm_bo_mem_reg_t * mem, + uint32_t mem_type, int no_wait) +{ + drm_mm_node_t *node; + drm_buffer_manager_t *bm = &dev->bm; + drm_buffer_object_t *entry; + drm_mem_type_manager_t *man = &bm->man[mem_type]; + struct list_head *lru; + unsigned long num_pages = mem->num_pages; + int ret; + + mutex_lock(&dev->struct_mutex); + do { + node = drm_mm_search_free(&man->manager, num_pages, + mem->page_alignment, 1); + if (node) + break; + + lru = &man->lru; + if (lru->next == lru) + break; + + entry = list_entry(lru->next, drm_buffer_object_t, lru); + atomic_inc(&entry->usage); + mutex_unlock(&dev->struct_mutex); + mutex_lock(&entry->mutex); + BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT)); + + ret = drm_bo_evict(entry, mem_type, no_wait); + mutex_unlock(&entry->mutex); + drm_bo_usage_deref_unlocked(entry); + if (ret) + return ret; + mutex_lock(&dev->struct_mutex); + } while (1); + + if (!node) { + mutex_unlock(&dev->struct_mutex); + return -ENOMEM; + } + + node = drm_mm_get_block(node, num_pages, mem->page_alignment); + mutex_unlock(&dev->struct_mutex); + mem->mm_node = node; + mem->mem_type = mem_type; + return 0; +} + +static int drm_bo_mt_compatible(drm_mem_type_manager_t * man, + uint32_t mem_type, + uint32_t mask, uint32_t * res_mask) +{ + uint32_t cur_flags = drm_bo_type_flags(mem_type); + uint32_t flag_diff; + + if (man->flags & _DRM_FLAG_MEMTYPE_CACHED) + cur_flags |= DRM_BO_FLAG_CACHED; + if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE) + cur_flags |= DRM_BO_FLAG_MAPPABLE; + if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT) + DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED); + + if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0) + return 0; + + if (mem_type == DRM_BO_MEM_LOCAL) { + *res_mask = cur_flags; + return 1; + } + + flag_diff = (mask ^ cur_flags); + if ((flag_diff & DRM_BO_FLAG_CACHED) && + (!(mask & DRM_BO_FLAG_CACHED) || + (mask & DRM_BO_FLAG_FORCE_CACHING))) + return 0; + + if ((flag_diff & DRM_BO_FLAG_MAPPABLE) && + ((mask & DRM_BO_FLAG_MAPPABLE) || + (mask & DRM_BO_FLAG_FORCE_MAPPABLE)) ) + return 0; + + *res_mask = cur_flags; + return 1; +} + +int drm_bo_mem_space(drm_buffer_object_t * bo, + drm_bo_mem_reg_t * mem, int no_wait) +{ + drm_device_t *dev = bo->dev; + drm_buffer_manager_t *bm = &dev->bm; + drm_mem_type_manager_t *man; + + uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio; + const uint32_t *prios = dev->driver->bo_driver->mem_type_prio; + uint32_t i; + uint32_t mem_type = DRM_BO_MEM_LOCAL; + uint32_t cur_flags; + int type_found = 0; + int type_ok = 0; + int has_eagain = 0; + drm_mm_node_t *node = NULL; + int ret; + + mem->mm_node = NULL; + for (i = 0; i < num_prios; ++i) { + mem_type = prios[i]; + man = &bm->man[mem_type]; + + type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask, + &cur_flags); + + if (!type_ok) + continue; + + if (mem_type == DRM_BO_MEM_LOCAL) + break; + + if ((mem_type == bo->pinned_mem_type) && + (bo->pinned_node != NULL)) { + node = bo->pinned_node; + break; + } + + mutex_lock(&dev->struct_mutex); + if (man->has_type && man->use_type) { + type_found = 1; + node = drm_mm_search_free(&man->manager, mem->num_pages, + mem->page_alignment, 1); + if (node) + node = drm_mm_get_block(node, mem->num_pages, + mem->page_alignment); + } + mutex_unlock(&dev->struct_mutex); + if (node) + break; + } + + if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) { + mem->mm_node = node; + mem->mem_type = mem_type; + mem->flags = cur_flags; + return 0; + } + + if (!type_found) + return -EINVAL; + + num_prios = dev->driver->bo_driver->num_mem_busy_prio; + prios = dev->driver->bo_driver->mem_busy_prio; + + for (i = 0; i < num_prios; ++i) { + mem_type = prios[i]; + man = &bm->man[mem_type]; + + if (!drm_bo_mt_compatible(man, mem_type, mem->mask, &cur_flags)) + continue; + + ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait); + + if (ret == 0) { + mem->flags = cur_flags; + return 0; + } + + if (ret == -EAGAIN) + has_eagain = 1; + } + + ret = (has_eagain) ? -EAGAIN : -ENOMEM; + return ret; +} + +EXPORT_SYMBOL(drm_bo_mem_space); + +static int drm_bo_new_mask(drm_buffer_object_t * bo, + uint32_t new_mask, uint32_t hint) +{ + uint32_t new_props; + + if (bo->type == drm_bo_type_user) { + DRM_ERROR("User buffers are not supported yet\n"); + return -EINVAL; + } + if (bo->type == drm_bo_type_fake && + !(new_mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))) { + DRM_ERROR("Fake buffers must be pinned.\n"); + return -EINVAL; + } + + if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) { + DRM_ERROR + ("DRM_BO_FLAG_NO_EVICT is only available to priviliged " + "processes\n"); + return -EPERM; + } + + new_props = new_mask & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE | + DRM_BO_FLAG_READ); + + if (!new_props) { + DRM_ERROR("Invalid buffer object rwx properties\n"); + return -EINVAL; + } + + bo->mem.mask = new_mask; + return 0; +} + +/* + * Call dev->struct_mutex locked. + */ + +drm_buffer_object_t *drm_lookup_buffer_object(drm_file_t * priv, + uint32_t handle, int check_owner) +{ + drm_user_object_t *uo; + drm_buffer_object_t *bo; + + uo = drm_lookup_user_object(priv, handle); + + if (!uo || (uo->type != drm_buffer_type)) { + DRM_ERROR("Could not find buffer object 0x%08x\n", handle); + return NULL; + } + + if (check_owner && priv != uo->owner) { + if (!drm_lookup_ref_object(priv, uo, _DRM_REF_USE)) + return NULL; + } + + bo = drm_user_object_entry(uo, drm_buffer_object_t, base); + atomic_inc(&bo->usage); + return bo; +} + +/* + * Call bo->mutex locked. + * Returns 1 if the buffer is currently rendered to or from. 0 otherwise. + * Doesn't do any fence flushing as opposed to the drm_bo_busy function. + */ + +static int drm_bo_quick_busy(drm_buffer_object_t * bo) +{ + drm_fence_object_t *fence = bo->fence; + + BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED); + if (fence) { + drm_device_t *dev = bo->dev; + if (drm_fence_object_signaled(fence, bo->fence_type)) { + drm_fence_usage_deref_unlocked(dev, fence); + bo->fence = NULL; + return 0; + } + return 1; + } + return 0; +} + +/* + * Call bo->mutex locked. + * Returns 1 if the buffer is currently rendered to or from. 0 otherwise. + */ + +static int drm_bo_busy(drm_buffer_object_t * bo) +{ + drm_fence_object_t *fence = bo->fence; + + BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED); + if (fence) { + drm_device_t *dev = bo->dev; + if (drm_fence_object_signaled(fence, bo->fence_type)) { + drm_fence_usage_deref_unlocked(dev, fence); + bo->fence = NULL; + return 0; + } + drm_fence_object_flush(dev, fence, DRM_FENCE_TYPE_EXE); + if (drm_fence_object_signaled(fence, bo->fence_type)) { + drm_fence_usage_deref_unlocked(dev, fence); + bo->fence = NULL; + return 0; + } + return 1; + } + return 0; +} + +static int drm_bo_read_cached(drm_buffer_object_t * bo) +{ + int ret = 0; + + BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED); + if (bo->mem.mm_node) + ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1); + return ret; +} + +/* + * Wait until a buffer is unmapped. + */ + +static int drm_bo_wait_unmapped(drm_buffer_object_t * bo, int no_wait) +{ + int ret = 0; + + if ((atomic_read(&bo->mapped) >= 0) && no_wait) + return -EBUSY; + + DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ, + atomic_read(&bo->mapped) == -1); + + if (ret == -EINTR) + ret = -EAGAIN; + + return ret; +} + +static int drm_bo_check_unfenced(drm_buffer_object_t * bo) +{ + int ret; + + mutex_lock(&bo->mutex); + ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED); + mutex_unlock(&bo->mutex); + return ret; +} + +/* + * Wait until a buffer, scheduled to be fenced moves off the unfenced list. + * Until then, we cannot really do anything with it except delete it. + * The unfenced list is a PITA, and the operations + * 1) validating + * 2) submitting commands + * 3) fencing + * Should really be an atomic operation. + * We now "solve" this problem by keeping + * the buffer "unfenced" after validating, but before fencing. + */ + +static int drm_bo_wait_unfenced(drm_buffer_object_t * bo, int no_wait, + int eagain_if_wait) +{ + int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED); + unsigned long _end = jiffies + 3 * DRM_HZ; + + if (ret && no_wait) + return -EBUSY; + else if (!ret) + return 0; + + do { + mutex_unlock(&bo->mutex); + DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ, + !drm_bo_check_unfenced(bo)); + mutex_lock(&bo->mutex); + if (ret == -EINTR) + return -EAGAIN; + if (ret) { + DRM_ERROR + ("Error waiting for buffer to become fenced\n"); + return ret; + } + ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED); + } while (ret && !time_after_eq(jiffies, _end)); + if (ret) { + DRM_ERROR("Timeout waiting for buffer to become fenced\n"); + return ret; + } + if (eagain_if_wait) + return -EAGAIN; + + return 0; +} + +/* + * Fill in the ioctl reply argument with buffer info. + * Bo locked. + */ + +static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo, + drm_bo_arg_reply_t * rep) +{ + rep->handle = bo->base.hash.key; + rep->flags = bo->mem.flags; + rep->size = bo->mem.num_pages * PAGE_SIZE; + rep->offset = bo->offset; + rep->arg_handle = bo->map_list.user_token; + rep->mask = bo->mem.mask; + rep->buffer_start = bo->buffer_start; + rep->fence_flags = bo->fence_type; + rep->rep_flags = 0; + rep->page_alignment = bo->mem.page_alignment; + + if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) { + DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY, + DRM_BO_REP_BUSY); + } +} + +/* + * Wait for buffer idle and register that we've mapped the buffer. + * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1, + * so that if the client dies, the mapping is automatically + * unregistered. + */ + +static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle, + uint32_t map_flags, unsigned hint, + drm_bo_arg_reply_t * rep) +{ + drm_buffer_object_t *bo; + drm_device_t *dev = priv->head->dev; + int ret = 0; + int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; + + mutex_lock(&dev->struct_mutex); + bo = drm_lookup_buffer_object(priv, handle, 1); + mutex_unlock(&dev->struct_mutex); + + if (!bo) + return -EINVAL; + + mutex_lock(&bo->mutex); + if (!(hint & DRM_BO_HINT_ALLOW_UNFENCED_MAP)) { + ret = drm_bo_wait_unfenced(bo, no_wait, 0); + if (ret) + goto out; + } + + /* + * If this returns true, we are currently unmapped. + * We need to do this test, because unmapping can + * be done without the bo->mutex held. + */ + + while (1) { + if (atomic_inc_and_test(&bo->mapped)) { + if (no_wait && drm_bo_busy(bo)) { + atomic_dec(&bo->mapped); + ret = -EBUSY; + goto out; + } + ret = drm_bo_wait(bo, 0, 0, no_wait); + if (ret) { + atomic_dec(&bo->mapped); + goto out; + } + + if ((map_flags & DRM_BO_FLAG_READ) && + (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) && + (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) { + drm_bo_read_cached(bo); + } + break; + } else if ((map_flags & DRM_BO_FLAG_READ) && + (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) && + (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) { + + /* + * We are already mapped with different flags. + * need to wait for unmap. + */ + + ret = drm_bo_wait_unmapped(bo, no_wait); + if (ret) + goto out; + + continue; + } + break; + } + + mutex_lock(&dev->struct_mutex); + ret = drm_add_ref_object(priv, &bo->base, _DRM_REF_TYPE1); + mutex_unlock(&dev->struct_mutex); + if (ret) { + if (atomic_add_negative(-1, &bo->mapped)) + DRM_WAKEUP(&bo->event_queue); + + } else + drm_bo_fill_rep_arg(bo, rep); + out: + mutex_unlock(&bo->mutex); + drm_bo_usage_deref_unlocked(bo); + return ret; +} + +static int drm_buffer_object_unmap(drm_file_t * priv, uint32_t handle) +{ + drm_device_t *dev = priv->head->dev; + drm_buffer_object_t *bo; + drm_ref_object_t *ro; + int ret = 0; + + mutex_lock(&dev->struct_mutex); + + bo = drm_lookup_buffer_object(priv, handle, 1); + if (!bo) { + ret = -EINVAL; + goto out; + } + + ro = drm_lookup_ref_object(priv, &bo->base, _DRM_REF_TYPE1); + if (!ro) { + ret = -EINVAL; + goto out; + } + + drm_remove_ref_object(priv, ro); + drm_bo_usage_deref_locked(bo); + out: + mutex_unlock(&dev->struct_mutex); + return ret; +} + +/* + * Call struct-sem locked. + */ + +static void drm_buffer_user_object_unmap(drm_file_t * priv, + drm_user_object_t * uo, + drm_ref_t action) +{ + drm_buffer_object_t *bo = + drm_user_object_entry(uo, drm_buffer_object_t, base); + + /* + * We DON'T want to take the bo->lock here, because we want to + * hold it when we wait for unmapped buffer. + */ + + BUG_ON(action != _DRM_REF_TYPE1); + + if (atomic_add_negative(-1, &bo->mapped)) + DRM_WAKEUP(&bo->event_queue); +} + +/* + * bo->mutex locked. + * Note that new_mem_flags are NOT transferred to the bo->mem.mask. + */ + +int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, + int no_wait, int move_unfenced) +{ + drm_device_t *dev = bo->dev; + drm_buffer_manager_t *bm = &dev->bm; + int ret = 0; + drm_bo_mem_reg_t mem; + /* + * Flush outstanding fences. + */ + + drm_bo_busy(bo); + + /* + * Wait for outstanding fences. + */ + + ret = drm_bo_wait(bo, 0, 0, no_wait); + if (ret) + return ret; + + mem.num_pages = bo->mem.num_pages; + mem.size = mem.num_pages << PAGE_SHIFT; + mem.mask = new_mem_flags; + mem.page_alignment = bo->mem.page_alignment; + + mutex_lock(&bm->evict_mutex); + mutex_lock(&dev->struct_mutex); + list_del(&bo->lru); + list_add_tail(&bo->lru, &bm->unfenced); + DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED, + _DRM_BO_FLAG_UNFENCED); + mutex_unlock(&dev->struct_mutex); + + /* + * Determine where to move the buffer. + */ + ret = drm_bo_mem_space(bo, &mem, no_wait); + if (ret) + goto out_unlock; + + ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait); + + out_unlock: + if (ret || !move_unfenced) { + mutex_lock(&dev->struct_mutex); + if (mem.mm_node) { + if (mem.mm_node != bo->pinned_node) + drm_mm_put_block(mem.mm_node); + mem.mm_node = NULL; + } + DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); + DRM_WAKEUP(&bo->event_queue); + list_del(&bo->lru); + drm_bo_add_to_lru(bo); + mutex_unlock(&dev->struct_mutex); + } + + mutex_unlock(&bm->evict_mutex); + return ret; +} + +static int drm_bo_mem_compat(drm_bo_mem_reg_t * mem) +{ + uint32_t flag_diff = (mem->mask ^ mem->flags); + + if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0) + return 0; + if ((flag_diff & DRM_BO_FLAG_CACHED) && + (!(mem->mask & DRM_BO_FLAG_CACHED) || + (mem->mask & DRM_BO_FLAG_FORCE_CACHING))) { + return 0; + } + if ((flag_diff & DRM_BO_FLAG_MAPPABLE) && + ((mem->mask & DRM_BO_FLAG_MAPPABLE) || + (mem->mask & DRM_BO_FLAG_FORCE_MAPPABLE))) + return 0; + return 1; +} + +static int drm_bo_check_fake(drm_device_t * dev, drm_bo_mem_reg_t * mem) +{ + drm_buffer_manager_t *bm = &dev->bm; + drm_mem_type_manager_t *man; + uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio; + const uint32_t *prios = dev->driver->bo_driver->mem_type_prio; + uint32_t i; + int type_ok = 0; + uint32_t mem_type = 0; + uint32_t cur_flags; + + if (drm_bo_mem_compat(mem)) + return 0; + + BUG_ON(mem->mm_node); + + for (i = 0; i < num_prios; ++i) { + mem_type = prios[i]; + man = &bm->man[mem_type]; + type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask, + &cur_flags); + if (type_ok) + break; + } + + if (type_ok) { + mem->mm_node = NULL; + mem->mem_type = mem_type; + mem->flags = cur_flags; + DRM_FLAG_MASKED(mem->flags, mem->mask, ~DRM_BO_MASK_MEMTYPE); + return 0; + } + + DRM_ERROR("Illegal fake buffer flags 0x%08x\n", mem->mask); + return -EINVAL; +} + +/* + * bo locked. + */ + +static int drm_buffer_object_validate(drm_buffer_object_t * bo, + int move_unfenced, int no_wait) +{ + drm_device_t *dev = bo->dev; + drm_buffer_manager_t *bm = &dev->bm; + drm_bo_driver_t *driver = dev->driver->bo_driver; + int ret; + + DRM_DEBUG("New flags 0x%08x, Old flags 0x%08x\n", bo->mem.mask, + bo->mem.flags); + ret = + driver->fence_type(bo, &bo->fence_class, &bo->fence_type); + if (ret) { + DRM_ERROR("Driver did not support given buffer permissions\n"); + return ret; + } + + ret = drm_bo_wait_unmapped(bo, no_wait); + if (ret) + return ret; + + if (bo->type == drm_bo_type_fake) { + ret = drm_bo_check_fake(dev, &bo->mem); + if (ret) + return ret; + } + + /* + * Check whether we need to move buffer. + */ + + if (!drm_bo_mem_compat(&bo->mem)) { + ret = drm_bo_move_buffer(bo, bo->mem.mask, no_wait, + move_unfenced); + if (ret) { + if (ret != -EAGAIN) + DRM_ERROR("Failed moving buffer.\n"); + return ret; + } + } + + /* + * Pinned buffers. + */ + + if (bo->mem.mask & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) { + bo->pinned_mem_type = bo->mem.mem_type; + mutex_lock(&dev->struct_mutex); + list_del_init(&bo->pinned_lru); + drm_bo_add_to_pinned_lru(bo); + + if (bo->pinned_node != bo->mem.mm_node) { + if (bo->pinned_node != NULL) + drm_mm_put_block(bo->pinned_node); + bo->pinned_node = bo->mem.mm_node; + } + + mutex_unlock(&dev->struct_mutex); + + } else if (bo->pinned_node != NULL) { + + mutex_lock(&dev->struct_mutex); + drm_mm_put_block(bo->pinned_node); + list_del_init(&bo->pinned_lru); + bo->pinned_node = NULL; + mutex_unlock(&dev->struct_mutex); + + } + + /* + * We might need to add a TTM. + */ + + if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) { + ret = drm_bo_add_ttm(bo); + if (ret) + return ret; + } + DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE); + + /* + * Finally, adjust lru to be sure. + */ + + mutex_lock(&dev->struct_mutex); + list_del(&bo->lru); + if (move_unfenced) { + list_add_tail(&bo->lru, &bm->unfenced); + DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED, + _DRM_BO_FLAG_UNFENCED); + } else { + drm_bo_add_to_lru(bo); + if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) { + DRM_WAKEUP(&bo->event_queue); + DRM_FLAG_MASKED(bo->priv_flags, 0, + _DRM_BO_FLAG_UNFENCED); + } + } + mutex_unlock(&dev->struct_mutex); + + return 0; +} + +static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle, + uint32_t flags, uint32_t mask, uint32_t hint, + drm_bo_arg_reply_t * rep) +{ + drm_buffer_object_t *bo; + int ret; + int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; + + bo = drm_lookup_buffer_object(priv, handle, 1); + if (!bo) { + return -EINVAL; + } + + mutex_lock(&bo->mutex); + ret = drm_bo_wait_unfenced(bo, no_wait, 0); + + if (ret) + goto out; + + DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask); + ret = drm_bo_new_mask(bo, flags, hint); + if (ret) + goto out; + + ret = + drm_buffer_object_validate(bo, !(hint & DRM_BO_HINT_DONT_FENCE), + no_wait); + drm_bo_fill_rep_arg(bo, rep); + + out: + + mutex_unlock(&bo->mutex); + + drm_bo_usage_deref_unlocked(bo); + return ret; +} + +static int drm_bo_handle_info(drm_file_t * priv, uint32_t handle, + drm_bo_arg_reply_t * rep) +{ + drm_buffer_object_t *bo; + + bo = drm_lookup_buffer_object(priv, handle, 1); + if (!bo) { + return -EINVAL; + } + mutex_lock(&bo->mutex); + if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED)) + (void)drm_bo_busy(bo); + drm_bo_fill_rep_arg(bo, rep); + mutex_unlock(&bo->mutex); + drm_bo_usage_deref_unlocked(bo); + return 0; +} + +static int drm_bo_handle_wait(drm_file_t * priv, uint32_t handle, + uint32_t hint, drm_bo_arg_reply_t * rep) +{ + drm_buffer_object_t *bo; + int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; + int ret; + + bo = drm_lookup_buffer_object(priv, handle, 1); + if (!bo) { + return -EINVAL; + } + + mutex_lock(&bo->mutex); + ret = drm_bo_wait_unfenced(bo, no_wait, 0); + if (ret) + goto out; + ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 0, no_wait); + if (ret) + goto out; + + drm_bo_fill_rep_arg(bo, rep); + + out: + mutex_unlock(&bo->mutex); + drm_bo_usage_deref_unlocked(bo); + return ret; +} + +int drm_buffer_object_create(drm_file_t * priv, + unsigned long size, + drm_bo_type_t type, + uint32_t mask, + uint32_t hint, + uint32_t page_alignment, + unsigned long buffer_start, + drm_buffer_object_t ** buf_obj) +{ + drm_device_t *dev = priv->head->dev; + drm_buffer_manager_t *bm = &dev->bm; + drm_buffer_object_t *bo; + int ret = 0; + unsigned long num_pages; + + if ((buffer_start & ~PAGE_MASK) && (type != drm_bo_type_fake)) { + DRM_ERROR("Invalid buffer object start.\n"); + return -EINVAL; + } + num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; + if (num_pages == 0) { + DRM_ERROR("Illegal buffer object size.\n"); + return -EINVAL; + } + + bo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ); + + if (!bo) + return -ENOMEM; + + mutex_init(&bo->mutex); + mutex_lock(&bo->mutex); + + atomic_set(&bo->usage, 1); + atomic_set(&bo->mapped, -1); + DRM_INIT_WAITQUEUE(&bo->event_queue); + INIT_LIST_HEAD(&bo->lru); + INIT_LIST_HEAD(&bo->pinned_lru); + INIT_LIST_HEAD(&bo->ddestroy); + bo->dev = dev; + bo->type = type; + bo->mem.mem_type = DRM_BO_MEM_LOCAL; + bo->mem.num_pages = num_pages; + bo->mem.mm_node = NULL; + bo->mem.page_alignment = page_alignment; + if (bo->type == drm_bo_type_fake) { + bo->offset = buffer_start; + bo->buffer_start = 0; + } else { + bo->buffer_start = buffer_start; + } + bo->priv_flags = 0; + bo->mem.flags = 0; + bo->mem.mask = 0; + atomic_inc(&bm->count); + ret = drm_bo_new_mask(bo, mask, hint); + + if (ret) + goto out_err; + + if (bo->type == drm_bo_type_dc) { + mutex_lock(&dev->struct_mutex); + ret = drm_bo_setup_vm_locked(bo); + mutex_unlock(&dev->struct_mutex); + if (ret) + goto out_err; + } + ret = drm_buffer_object_validate(bo, 0, hint & DRM_BO_HINT_DONT_BLOCK); + if (ret) + goto out_err; + + mutex_unlock(&bo->mutex); + *buf_obj = bo; + return 0; + + out_err: + mutex_unlock(&bo->mutex); + + drm_bo_usage_deref_unlocked(bo); + return ret; +} + +static int drm_bo_add_user_object(drm_file_t * priv, drm_buffer_object_t * bo, + int shareable) +{ + drm_device_t *dev = priv->head->dev; + int ret; + + mutex_lock(&dev->struct_mutex); + ret = drm_add_user_object(priv, &bo->base, shareable); + if (ret) + goto out; + + bo->base.remove = drm_bo_base_deref_locked; + bo->base.type = drm_buffer_type; + bo->base.ref_struct_locked = NULL; + bo->base.unref = drm_buffer_user_object_unmap; + + out: + mutex_unlock(&dev->struct_mutex); + return ret; +} + +static int drm_bo_lock_test(drm_device_t * dev, struct file *filp) +{ + LOCK_TEST_WITH_RETURN(dev, filp); + return 0; +} + +int drm_bo_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + drm_bo_arg_t arg; + drm_bo_arg_request_t *req = &arg.d.req; + drm_bo_arg_reply_t rep; + unsigned long next; + drm_user_object_t *uo; + drm_buffer_object_t *entry; + + if (!dev->bm.initialized) { + DRM_ERROR("Buffer object manager is not initialized.\n"); + return -EINVAL; + } + + do { + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + + if (arg.handled) { + data = arg.next; + continue; + } + + rep.ret = 0; + switch (req->op) { + case drm_bo_create: + rep.ret = + drm_buffer_object_create(priv, req->size, + req->type, + req->mask, + req->hint, + req->page_alignment, + req->buffer_start, &entry); + if (rep.ret) + break; + + rep.ret = + drm_bo_add_user_object(priv, entry, + req-> + mask & + DRM_BO_FLAG_SHAREABLE); + if (rep.ret) + drm_bo_usage_deref_unlocked(entry); + + if (rep.ret) + break; + + mutex_lock(&entry->mutex); + drm_bo_fill_rep_arg(entry, &rep); + mutex_unlock(&entry->mutex); + break; + case drm_bo_unmap: + rep.ret = drm_buffer_object_unmap(priv, req->handle); + break; + case drm_bo_map: + rep.ret = drm_buffer_object_map(priv, req->handle, + req->mask, + req->hint, &rep); + break; + case drm_bo_destroy: + mutex_lock(&dev->struct_mutex); + uo = drm_lookup_user_object(priv, req->handle); + if (!uo || (uo->type != drm_buffer_type) + || uo->owner != priv) { + mutex_unlock(&dev->struct_mutex); + rep.ret = -EINVAL; + break; + } + rep.ret = drm_remove_user_object(priv, uo); + mutex_unlock(&dev->struct_mutex); + break; + case drm_bo_reference: + rep.ret = drm_user_object_ref(priv, req->handle, + drm_buffer_type, &uo); + if (rep.ret) + break; + mutex_lock(&dev->struct_mutex); + uo = drm_lookup_user_object(priv, req->handle); + entry = + drm_user_object_entry(uo, drm_buffer_object_t, + base); + atomic_dec(&entry->usage); + mutex_unlock(&dev->struct_mutex); + mutex_lock(&entry->mutex); + drm_bo_fill_rep_arg(entry, &rep); + mutex_unlock(&entry->mutex); + break; + case drm_bo_unreference: + rep.ret = drm_user_object_unref(priv, req->handle, + drm_buffer_type); + break; + case drm_bo_validate: + rep.ret = drm_bo_lock_test(dev, filp); + + if (rep.ret) + break; + rep.ret = + drm_bo_handle_validate(priv, req->handle, req->mask, + req->arg_handle, req->hint, + &rep); + break; + case drm_bo_fence: + rep.ret = drm_bo_lock_test(dev, filp); + if (rep.ret) + break; + /**/ break; + case drm_bo_info: + rep.ret = drm_bo_handle_info(priv, req->handle, &rep); + break; + case drm_bo_wait_idle: + rep.ret = drm_bo_handle_wait(priv, req->handle, + req->hint, &rep); + break; + case drm_bo_ref_fence: + rep.ret = -EINVAL; + DRM_ERROR("Function is not implemented yet.\n"); + default: + rep.ret = -EINVAL; + } + next = arg.next; + + /* + * A signal interrupted us. Make sure the ioctl is restartable. + */ + + if (rep.ret == -EAGAIN) + return -EAGAIN; + + arg.handled = 1; + arg.d.rep = rep; + DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); + data = next; + } while (data); + return 0; +} + +/** + *Clean the unfenced list and put on regular LRU. + *This is part of the memory manager cleanup and should only be + *called with the DRI lock held. + *Call dev->struct_sem locked. + */ + +static void drm_bo_clean_unfenced(drm_device_t *dev) +{ + drm_buffer_manager_t *bm = &dev->bm; + struct list_head *head, *list; + drm_buffer_object_t *entry; + + head = &bm->unfenced; + + list = head->next; + while(list != head) { + prefetch(list->next); + entry = list_entry(list, drm_buffer_object_t, lru); + + atomic_inc(&entry->usage); + mutex_unlock(&dev->struct_mutex); + mutex_lock(&entry->mutex); + mutex_lock(&dev->struct_mutex); + + list_del(&entry->lru); + DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); + drm_bo_add_to_lru(entry); + mutex_unlock(&entry->mutex); + list = head->next; + } +} + +static int drm_bo_leave_list(drm_buffer_object_t * bo, + uint32_t mem_type, + int free_pinned, int allow_errors) +{ + drm_device_t *dev = bo->dev; + int ret = 0; + + mutex_lock(&bo->mutex); + + ret = drm_bo_expire_fence(bo, allow_errors); + if (ret) + goto out; + + if (free_pinned) { + DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE); + mutex_lock(&dev->struct_mutex); + list_del_init(&bo->pinned_lru); + if (bo->pinned_node == bo->mem.mm_node) + bo->pinned_node = NULL; + if (bo->pinned_node != NULL) { + drm_mm_put_block(bo->pinned_node); + bo->pinned_node = NULL; + } + mutex_unlock(&dev->struct_mutex); + } + + if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) { + DRM_ERROR("A DRM_BO_NO_EVICT buffer present at " + "cleanup. Removing flag and evicting.\n"); + bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT; + bo->mem.mask &= ~DRM_BO_FLAG_NO_EVICT; + } + + if (bo->mem.mem_type == mem_type) + ret = drm_bo_evict(bo, mem_type, 0); + + if (ret) { + if (allow_errors) { + goto out; + } else { + ret = 0; + DRM_ERROR("Cleanup eviction failed\n"); + } + } + + out: + mutex_unlock(&bo->mutex); + return ret; +} + + +static drm_buffer_object_t *drm_bo_entry(struct list_head *list, + int pinned_list) +{ + if (pinned_list) + return list_entry(list, drm_buffer_object_t, pinned_lru); + else + return list_entry(list, drm_buffer_object_t, lru); +} + +/* + * dev->struct_mutex locked. + */ + +static int drm_bo_force_list_clean(drm_device_t * dev, + struct list_head *head, + unsigned mem_type, + int free_pinned, + int allow_errors, + int pinned_list) +{ + struct list_head *list, *next, *prev; + drm_buffer_object_t *entry, *nentry; + int ret; + int do_restart; + + /* + * The list traversal is a bit odd here, because an item may + * disappear from the list when we release the struct_mutex or + * when we decrease the usage count. Also we're not guaranteed + * to drain pinned lists, so we can't always restart. + */ + +restart: + nentry = NULL; + list_for_each_safe(list, next, head) { + prev = list->prev; + + entry = (nentry != NULL) ? nentry: drm_bo_entry(list, pinned_list); + atomic_inc(&entry->usage); + if (nentry) { + atomic_dec(&nentry->usage); + nentry = NULL; + } + + /* + * Protect the next item from destruction, so we can check + * its list pointers later on. + */ + + if (next != head) { + nentry = drm_bo_entry(next, pinned_list); + atomic_inc(&nentry->usage); + } + mutex_unlock(&dev->struct_mutex); + + ret = drm_bo_leave_list(entry, mem_type, free_pinned, + allow_errors); + mutex_lock(&dev->struct_mutex); + + drm_bo_usage_deref_locked(entry); + if (ret) + return ret; + + /* + * Has the next item disappeared from the list? + */ + + do_restart = ((next->prev != list) && (next->prev != prev)); + + if (nentry != NULL && do_restart) { + drm_bo_usage_deref_locked(nentry); + nentry = NULL; + } + + if (do_restart) + goto restart; + } + return 0; +} + +int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type) +{ + drm_buffer_manager_t *bm = &dev->bm; + drm_mem_type_manager_t *man = &bm->man[mem_type]; + int ret = -EINVAL; + + if (mem_type >= DRM_BO_MEM_TYPES) { + DRM_ERROR("Illegal memory type %d\n", mem_type); + return ret; + } + + if (!man->has_type) { + DRM_ERROR("Trying to take down uninitialized " + "memory manager type\n"); + return ret; + } + man->use_type = 0; + man->has_type = 0; + + ret = 0; + if (mem_type > 0) { + + drm_bo_clean_unfenced(dev); + drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0); + drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1); + + if (drm_mm_clean(&man->manager)) { + drm_mm_takedown(&man->manager); + } else { + ret = -EBUSY; + } + } + + return ret; +} + +/** + *Evict all buffers of a particular mem_type, but leave memory manager + *regions for NO_MOVE buffers intact. New buffers cannot be added at this + *point since we have the hardware lock. + */ + +static int drm_bo_lock_mm(drm_device_t * dev, unsigned mem_type) +{ + int ret; + drm_buffer_manager_t *bm = &dev->bm; + drm_mem_type_manager_t *man = &bm->man[mem_type]; + + if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) { + DRM_ERROR("Illegal memory manager memory type %u,\n", mem_type); + return -EINVAL; + } + + drm_bo_clean_unfenced(dev); + ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0); + if (ret) + return ret; + ret = drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1, 1); + + return ret; +} + +int drm_bo_init_mm(drm_device_t * dev, + unsigned type, + unsigned long p_offset, unsigned long p_size) +{ + drm_buffer_manager_t *bm = &dev->bm; + int ret = -EINVAL; + drm_mem_type_manager_t *man; + + if (type >= DRM_BO_MEM_TYPES) { + DRM_ERROR("Illegal memory type %d\n", type); + return ret; + } + + man = &bm->man[type]; + if (man->has_type) { + DRM_ERROR("Memory manager already initialized for type %d\n", + type); + return ret; + } + + ret = dev->driver->bo_driver->init_mem_type(dev, type, man); + if (ret) + return ret; + + ret = 0; + if (type != DRM_BO_MEM_LOCAL) { + if (!p_size) { + DRM_ERROR("Zero size memory manager type %d\n", type); + return ret; + } + ret = drm_mm_init(&man->manager, p_offset, p_size); + if (ret) + return ret; + } + man->has_type = 1; + man->use_type = 1; + + INIT_LIST_HEAD(&man->lru); + INIT_LIST_HEAD(&man->pinned); + + return 0; +} +EXPORT_SYMBOL(drm_bo_init_mm); + +/* + * This is called from lastclose, so we don't need to bother about + * any clients still running when we set the initialized flag to zero. + */ + +int drm_bo_driver_finish(drm_device_t * dev) +{ + drm_buffer_manager_t *bm = &dev->bm; + int ret = 0; + unsigned i = DRM_BO_MEM_TYPES; + drm_mem_type_manager_t *man; + + mutex_lock(&dev->bm.init_mutex); + mutex_lock(&dev->struct_mutex); + + if (!bm->initialized) + goto out; + bm->initialized = 0; + + while (i--) { + man = &bm->man[i]; + if (man->has_type) { + man->use_type = 0; + if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i)) { + ret = -EBUSY; + DRM_ERROR("DRM memory manager type %d " + "is not clean.\n", i); + } + man->has_type = 0; + } + } + mutex_unlock(&dev->struct_mutex); + + if (!cancel_delayed_work(&bm->wq)) { + flush_scheduled_work(); + } + mutex_lock(&dev->struct_mutex); + drm_bo_delayed_delete(dev, 1); + if (list_empty(&bm->ddestroy)) { + DRM_DEBUG("Delayed destroy list was clean\n"); + } + if (list_empty(&bm->man[0].lru)) { + DRM_DEBUG("Swap list was clean\n"); + } + if (list_empty(&bm->man[0].pinned)) { + DRM_DEBUG("NO_MOVE list was clean\n"); + } + if (list_empty(&bm->unfenced)) { + DRM_DEBUG("Unfenced list was clean\n"); + } + out: + mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev->bm.init_mutex); + return ret; +} + +int drm_bo_driver_init(drm_device_t * dev) +{ + drm_bo_driver_t *driver = dev->driver->bo_driver; + drm_buffer_manager_t *bm = &dev->bm; + int ret = -EINVAL; + + mutex_lock(&dev->bm.init_mutex); + mutex_lock(&dev->struct_mutex); + if (!driver) + goto out_unlock; + + /* + * Initialize the system memory buffer type. + * Other types need to be driver / IOCTL initialized. + */ + + ret = drm_bo_init_mm(dev, 0, 0, 0); + if (ret) + goto out_unlock; + + INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue); + + bm->initialized = 1; + bm->nice_mode = 1; + atomic_set(&bm->count, 0); + bm->cur_pages = 0; + INIT_LIST_HEAD(&bm->unfenced); + INIT_LIST_HEAD(&bm->ddestroy); + out_unlock: + mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev->bm.init_mutex); + return ret; +} + +EXPORT_SYMBOL(drm_bo_driver_init); + +int drm_mm_init_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + + int ret = 0; + drm_mm_init_arg_t arg; + drm_buffer_manager_t *bm = &dev->bm; + drm_bo_driver_t *driver = dev->driver->bo_driver; + + if (!driver) { + DRM_ERROR("Buffer objects are not supported by this driver\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + + switch (arg.req.op) { + case mm_init: + ret = -EINVAL; + mutex_lock(&dev->bm.init_mutex); + mutex_lock(&dev->struct_mutex); + if (!bm->initialized) { + DRM_ERROR("DRM memory manager was not initialized.\n"); + break; + } + if (arg.req.mem_type == 0) { + DRM_ERROR + ("System memory buffers already initialized.\n"); + break; + } + ret = drm_bo_init_mm(dev, arg.req.mem_type, + arg.req.p_offset, arg.req.p_size); + break; + case mm_takedown: + LOCK_TEST_WITH_RETURN(dev, filp); + mutex_lock(&dev->bm.init_mutex); + mutex_lock(&dev->struct_mutex); + ret = -EINVAL; + if (!bm->initialized) { + DRM_ERROR("DRM memory manager was not initialized\n"); + break; + } + if (arg.req.mem_type == 0) { + DRM_ERROR("No takedown for System memory buffers.\n"); + break; + } + ret = 0; + if (drm_bo_clean_mm(dev, arg.req.mem_type)) { + DRM_ERROR("Memory manager type %d not clean. " + "Delaying takedown\n", arg.req.mem_type); + } + break; + case mm_lock: + LOCK_TEST_WITH_RETURN(dev, filp); + mutex_lock(&dev->bm.init_mutex); + mutex_lock(&dev->struct_mutex); + ret = drm_bo_lock_mm(dev, arg.req.mem_type); + break; + case mm_unlock: + LOCK_TEST_WITH_RETURN(dev, filp); + mutex_lock(&dev->bm.init_mutex); + mutex_lock(&dev->struct_mutex); + ret = 0; + break; + default: + DRM_ERROR("Function not implemented yet\n"); + return -EINVAL; + } + + mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev->bm.init_mutex); + if (ret) + return ret; + + DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); + return 0; +} + +/* + * buffer object vm functions. + */ + +int drm_mem_reg_is_pci(drm_device_t * dev, drm_bo_mem_reg_t * mem) +{ + drm_buffer_manager_t *bm = &dev->bm; + drm_mem_type_manager_t *man = &bm->man[mem->mem_type]; + + if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) { + if (mem->mem_type == DRM_BO_MEM_LOCAL) + return 0; + + if (man->flags & _DRM_FLAG_MEMTYPE_CMA) + return 0; + + if (mem->flags & DRM_BO_FLAG_CACHED) + return 0; + } + return 1; +} + +EXPORT_SYMBOL(drm_mem_reg_is_pci); + +/** + * \c Get the PCI offset for the buffer object memory. + * + * \param bo The buffer object. + * \param bus_base On return the base of the PCI region + * \param bus_offset On return the byte offset into the PCI region + * \param bus_size On return the byte size of the buffer object or zero if + * the buffer object memory is not accessible through a PCI region. + * \return Failure indication. + * + * Returns -EINVAL if the buffer object is currently not mappable. + * Otherwise returns zero. + */ + +int drm_bo_pci_offset(drm_device_t * dev, + drm_bo_mem_reg_t * mem, + unsigned long *bus_base, + unsigned long *bus_offset, unsigned long *bus_size) +{ + drm_buffer_manager_t *bm = &dev->bm; + drm_mem_type_manager_t *man = &bm->man[mem->mem_type]; + + *bus_size = 0; + if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)) + return -EINVAL; + + if (drm_mem_reg_is_pci(dev, mem)) { + *bus_offset = mem->mm_node->start << PAGE_SHIFT; + *bus_size = mem->num_pages << PAGE_SHIFT; + *bus_base = man->io_offset; + } + + return 0; +} + +/** + * \c Kill all user-space virtual mappings of this buffer object. + * + * \param bo The buffer object. + * + * Call bo->mutex locked. + */ + +void drm_bo_unmap_virtual(drm_buffer_object_t * bo) +{ + drm_device_t *dev = bo->dev; + loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT; + loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT; + + unmap_mapping_range(dev->dev_mapping, offset, holelen, 1); +} + +static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo) +{ + drm_map_list_t *list = &bo->map_list; + drm_local_map_t *map; + drm_device_t *dev = bo->dev; + + if (list->user_token) { + drm_ht_remove_item(&dev->map_hash, &list->hash); + list->user_token = 0; + } + if (list->file_offset_node) { + drm_mm_put_block(list->file_offset_node); + list->file_offset_node = NULL; + } + + map = list->map; + if (!map) + return; + + drm_ctl_free(map, sizeof(*map), DRM_MEM_BUFOBJ); + list->map = NULL; + list->user_token = 0ULL; + drm_bo_usage_deref_locked(bo); +} + +static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo) +{ + drm_map_list_t *list = &bo->map_list; + drm_local_map_t *map; + drm_device_t *dev = bo->dev; + + list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ); + if (!list->map) + return -ENOMEM; + + map = list->map; + map->offset = 0; + map->type = _DRM_TTM; + map->flags = _DRM_REMOVABLE; + map->size = bo->mem.num_pages * PAGE_SIZE; + atomic_inc(&bo->usage); + map->handle = (void *)bo; + + list->file_offset_node = drm_mm_search_free(&dev->offset_manager, + bo->mem.num_pages, 0, 0); + + if (!list->file_offset_node) { + drm_bo_takedown_vm_locked(bo); + return -ENOMEM; + } + + list->file_offset_node = drm_mm_get_block(list->file_offset_node, + bo->mem.num_pages, 0); + + list->hash.key = list->file_offset_node->start; + if (drm_ht_insert_item(&dev->map_hash, &list->hash)) { + drm_bo_takedown_vm_locked(bo); + return -ENOMEM; + } + + list->user_token = ((drm_u64_t) list->hash.key) << PAGE_SHIFT; + + return 0; +} diff --git a/drivers/char/drm/drm_bo_move.c b/drivers/char/drm/drm_bo_move.c new file mode 100644 index 0000000..945f0d1 --- /dev/null +++ b/drivers/char/drm/drm_bo_move.c @@ -0,0 +1,395 @@ +/************************************************************************** + * + * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellström + */ + +#include "drmP.h" + +/** + * Free the old memory node unless it's a pinned region and we + * have not been requested to free also pinned regions. + */ + +static void drm_bo_free_old_node(drm_buffer_object_t * bo) +{ + drm_bo_mem_reg_t *old_mem = &bo->mem; + + if (old_mem->mm_node && (old_mem->mm_node != bo->pinned_node)) { + mutex_lock(&bo->dev->struct_mutex); + drm_mm_put_block(old_mem->mm_node); + old_mem->mm_node = NULL; + mutex_unlock(&bo->dev->struct_mutex); + } + old_mem->mm_node = NULL; +} + +int drm_bo_move_ttm(drm_buffer_object_t * bo, + int evict, int no_wait, drm_bo_mem_reg_t * new_mem) +{ + drm_ttm_t *ttm = bo->ttm; + drm_bo_mem_reg_t *old_mem = &bo->mem; + uint32_t save_flags = old_mem->flags; + uint32_t save_mask = old_mem->mask; + int ret; + + if (old_mem->mem_type == DRM_BO_MEM_TT) { + if (evict) + drm_ttm_evict(ttm); + else + drm_ttm_unbind(ttm); + + drm_bo_free_old_node(bo); + DRM_FLAG_MASKED(old_mem->flags, + DRM_BO_FLAG_CACHED | DRM_BO_FLAG_MAPPABLE | + DRM_BO_FLAG_MEM_LOCAL, DRM_BO_MASK_MEMTYPE); + old_mem->mem_type = DRM_BO_MEM_LOCAL; + save_flags = old_mem->flags; + } + if (new_mem->mem_type != DRM_BO_MEM_LOCAL) { + ret = drm_bind_ttm(ttm, + new_mem->flags & DRM_BO_FLAG_CACHED, + new_mem->mm_node->start); + if (ret) + return ret; + } + + *old_mem = *new_mem; + new_mem->mm_node = NULL; + old_mem->mask = save_mask; + DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE); + return 0; +} + +EXPORT_SYMBOL(drm_bo_move_ttm); + +/** + * \c Return a kernel virtual address to the buffer object PCI memory. + * + * \param bo The buffer object. + * \return Failure indication. + * + * Returns -EINVAL if the buffer object is currently not mappable. + * Returns -ENOMEM if the ioremap operation failed. + * Otherwise returns zero. + * + * After a successfull call, bo->iomap contains the virtual address, or NULL + * if the buffer object content is not accessible through PCI space. + * Call bo->mutex locked. + */ + +int drm_mem_reg_ioremap(drm_device_t * dev, drm_bo_mem_reg_t * mem, + void **virtual) +{ + drm_buffer_manager_t *bm = &dev->bm; + drm_mem_type_manager_t *man = &bm->man[mem->mem_type]; + unsigned long bus_offset; + unsigned long bus_size; + unsigned long bus_base; + int ret; + void *addr; + + *virtual = NULL; + ret = drm_bo_pci_offset(dev, mem, &bus_base, &bus_offset, &bus_size); + if (ret || bus_size == 0) + return ret; + + if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP)) + addr = (void *)(((u8 *) man->io_addr) + bus_offset); + else { + addr = ioremap_nocache(bus_base + bus_offset, bus_size); + if (!addr) + return -ENOMEM; + } + *virtual = addr; + return 0; +} + +/** + * \c Unmap mapping obtained using drm_bo_ioremap + * + * \param bo The buffer object. + * + * Call bo->mutex locked. + */ + +void drm_mem_reg_iounmap(drm_device_t * dev, drm_bo_mem_reg_t * mem, + void *virtual) +{ + drm_buffer_manager_t *bm; + drm_mem_type_manager_t *man; + + bm = &dev->bm; + man = &bm->man[mem->mem_type]; + + if (virtual && (man->flags & _DRM_FLAG_NEEDS_IOREMAP)) { + iounmap(virtual); + } +} + +static int drm_copy_io_page(void *dst, void *src, unsigned long page) +{ + uint32_t *dstP = + (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT)); + uint32_t *srcP = + (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT)); + + int i; + for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i) + iowrite32(ioread32(srcP++), dstP++); + return 0; +} + +static int drm_copy_io_ttm_page(drm_ttm_t * ttm, void *src, unsigned long page) +{ + struct page *d = drm_ttm_get_page(ttm, page); + void *dst; + + if (!d) + return -ENOMEM; + + src = (void *)((unsigned long)src + (page << PAGE_SHIFT)); + dst = kmap(d); + if (!dst) + return -ENOMEM; + + memcpy_fromio(dst, src, PAGE_SIZE); + kunmap(d); + return 0; +} + +static int drm_copy_ttm_io_page(drm_ttm_t * ttm, void *dst, unsigned long page) +{ + struct page *s = drm_ttm_get_page(ttm, page); + void *src; + + if (!s) + return -ENOMEM; + + dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); + src = kmap(s); + if (!src) + return -ENOMEM; + + memcpy_toio(dst, src, PAGE_SIZE); + kunmap(s); + return 0; +} + +int drm_bo_move_memcpy(drm_buffer_object_t * bo, + int evict, int no_wait, drm_bo_mem_reg_t * new_mem) +{ + drm_device_t *dev = bo->dev; + drm_mem_type_manager_t *man = &dev->bm.man[new_mem->mem_type]; + drm_ttm_t *ttm = bo->ttm; + drm_bo_mem_reg_t *old_mem = &bo->mem; + drm_bo_mem_reg_t old_copy = *old_mem; + void *old_iomap; + void *new_iomap; + int ret; + uint32_t save_flags = old_mem->flags; + uint32_t save_mask = old_mem->mask; + unsigned long i; + unsigned long page; + unsigned long add = 0; + int dir; + + ret = drm_mem_reg_ioremap(dev, old_mem, &old_iomap); + if (ret) + return ret; + ret = drm_mem_reg_ioremap(dev, new_mem, &new_iomap); + if (ret) + goto out; + + if (old_iomap == NULL && new_iomap == NULL) + goto out2; + if (old_iomap == NULL && ttm == NULL) + goto out2; + + add = 0; + dir = 1; + + if ((old_mem->mem_type == new_mem->mem_type) && + (new_mem->mm_node->start < + old_mem->mm_node->start + old_mem->mm_node->size)) { + dir = -1; + add = new_mem->num_pages - 1; + } + + for (i = 0; i < new_mem->num_pages; ++i) { + page = i * dir + add; + if (old_iomap == NULL) + ret = drm_copy_ttm_io_page(ttm, new_iomap, page); + else if (new_iomap == NULL) + ret = drm_copy_io_ttm_page(ttm, old_iomap, page); + else + ret = drm_copy_io_page(new_iomap, old_iomap, page); + if (ret) + goto out1; + } + mb(); + out2: + drm_bo_free_old_node(bo); + + *old_mem = *new_mem; + new_mem->mm_node = NULL; + old_mem->mask = save_mask; + DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE); + + if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (ttm != NULL)) { + drm_ttm_unbind(ttm); + drm_destroy_ttm(ttm); + bo->ttm = NULL; + } + + out1: + drm_mem_reg_iounmap(dev, new_mem, new_iomap); + out: + drm_mem_reg_iounmap(dev, &old_copy, old_iomap); + return ret; +} + +EXPORT_SYMBOL(drm_bo_move_memcpy); + +/* + * Transfer a buffer object's memory and LRU status to a newly + * created object. User-space references remains with the old + * object. Call bo->mutex locked. + */ + +int drm_buffer_object_transfer(drm_buffer_object_t * bo, + drm_buffer_object_t ** new_obj) +{ + drm_buffer_object_t *fbo; + drm_device_t *dev = bo->dev; + drm_buffer_manager_t *bm = &dev->bm; + + fbo = drm_ctl_calloc(1, sizeof(*fbo), DRM_MEM_BUFOBJ); + if (!fbo) + return -ENOMEM; + + *fbo = *bo; + mutex_init(&fbo->mutex); + mutex_lock(&fbo->mutex); + mutex_lock(&dev->struct_mutex); + + DRM_INIT_WAITQUEUE(&bo->event_queue); + INIT_LIST_HEAD(&fbo->ddestroy); + INIT_LIST_HEAD(&fbo->lru); + INIT_LIST_HEAD(&fbo->pinned_lru); + + atomic_inc(&bo->fence->usage); + fbo->pinned_node = NULL; + fbo->mem.mm_node->private = (void *)fbo; + atomic_set(&fbo->usage, 1); + atomic_inc(&bm->count); + mutex_unlock(&dev->struct_mutex); + mutex_unlock(&fbo->mutex); + + *new_obj = fbo; + return 0; +} + +/* + * Since move is underway, we need to block signals in this function. + * We cannot restart until it has finished. + */ + +int drm_bo_move_accel_cleanup(drm_buffer_object_t * bo, + int evict, + int no_wait, + uint32_t fence_class, + uint32_t fence_type, + uint32_t fence_flags, drm_bo_mem_reg_t * new_mem) +{ + drm_device_t *dev = bo->dev; + drm_mem_type_manager_t *man = &dev->bm.man[new_mem->mem_type]; + drm_bo_mem_reg_t *old_mem = &bo->mem; + int ret; + uint32_t save_flags = old_mem->flags; + uint32_t save_mask = old_mem->mask; + drm_buffer_object_t *old_obj; + + if (bo->fence) + drm_fence_usage_deref_unlocked(dev, bo->fence); + ret = drm_fence_object_create(dev, fence_class, fence_type, + fence_flags | DRM_FENCE_FLAG_EMIT, + &bo->fence); + if (ret) + return ret; + + if (evict || ((bo->mem.mm_node == bo->pinned_node) && + bo->mem.mm_node != NULL)) + { + ret = drm_bo_wait(bo, 0, 1, 0); + if (ret) + return ret; + + drm_bo_free_old_node(bo); + + if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm != NULL)) { + drm_ttm_unbind(bo->ttm); + drm_destroy_ttm(bo->ttm); + bo->ttm = NULL; + } + } else { + + /* This should help pipeline ordinary buffer moves. + * + * Hang old buffer memory on a new buffer object, + * and leave it to be released when the GPU + * operation has completed. + */ + + ret = drm_buffer_object_transfer(bo, &old_obj); + + if (ret) + return ret; + + if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) + old_obj->ttm = NULL; + else + bo->ttm = NULL; + + mutex_lock(&dev->struct_mutex); + list_del_init(&old_obj->lru); + DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); + drm_bo_add_to_lru(old_obj); + + drm_bo_usage_deref_locked(old_obj); + mutex_unlock(&dev->struct_mutex); + + } + + *old_mem = *new_mem; + new_mem->mm_node = NULL; + old_mem->mask = save_mask; + DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE); + return 0; +} + +EXPORT_SYMBOL(drm_bo_move_accel_cleanup); diff --git a/drivers/char/drm/drm_bufs.c b/drivers/char/drm/drm_bufs.c index c113458..0e5b1e4 100644 --- a/drivers/char/drm/drm_bufs.c +++ b/drivers/char/drm/drm_bufs.c @@ -427,6 +427,8 @@ int drm_rmmap_locked(drm_device_t *dev, dmah.size = map->size; __drm_pci_free(dev, &dmah); break; + case _DRM_TTM: + BUG_ON(1); } drm_free(map, sizeof(*map), DRM_MEM_MAPS); diff --git a/drivers/char/drm/drm_drv.c b/drivers/char/drm/drm_drv.c index 26bec30..a2e5629 100644 --- a/drivers/char/drm/drm_drv.c +++ b/drivers/char/drm/drm_drv.c @@ -116,6 +116,10 @@ #endif [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = {drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = {drm_wait_vblank, 0}, + [DRM_IOCTL_NR(DRM_IOCTL_FENCE)] = {drm_fence_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_BUFOBJ)] = {drm_bo_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_MM_INIT)] = {drm_mm_init_ioctl, + DRM_AUTH }, [DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW)] = {drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, }; @@ -140,6 +144,12 @@ int drm_lastclose(drm_device_t * dev) DRM_DEBUG("\n"); + /* + * We can't do much about this function failing. + */ + + drm_bo_driver_finish(dev); + if (dev->driver->lastclose) dev->driver->lastclose(dev); DRM_DEBUG("driver lastclose completed\n"); @@ -209,7 +219,7 @@ int drm_lastclose(drm_device_t * dev) if (dev->vmalist) { for (vma = dev->vmalist; vma; vma = vma_next) { vma_next = vma->next; - drm_free(vma, sizeof(*vma), DRM_MEM_VMAS); + drm_ctl_free(vma, sizeof(*vma), DRM_MEM_VMAS); } dev->vmalist = NULL; } @@ -246,6 +256,7 @@ int drm_lastclose(drm_device_t * dev) dev->lock.filp = NULL; wake_up_interruptible(&dev->lock.lock_queue); } + dev->dev_mapping = NULL; mutex_unlock(&dev->struct_mutex); DRM_DEBUG("lastclose completed\n"); @@ -273,8 +284,6 @@ int drm_init(struct drm_driver *driver) DRM_DEBUG("\n"); - drm_mem_init(); - for (i = 0; driver->pci_driver.id_table[i].vendor != 0; i++) { pid = (struct pci_device_id *)&driver->pci_driver.id_table[i]; @@ -310,11 +319,14 @@ static void drm_cleanup(drm_device_t * d } drm_lastclose(dev); + drm_fence_manager_takedown(dev); if (dev->maplist) { drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS); dev->maplist = NULL; drm_ht_remove(&dev->map_hash); + drm_mm_takedown(&dev->offset_manager); + drm_ht_remove(&dev->object_hash); } drm_ctxbitmap_cleanup(dev); @@ -378,8 +390,32 @@ static const struct file_operations drm_ static int __init drm_core_init(void) { - int ret = -ENOMEM; + int ret; + struct sysinfo si; + unsigned long avail_memctl_mem; + unsigned long max_memctl_mem; + + si_meminfo(&si); + + /* + * AGP only allows low / DMA32 memory ATM. + */ + + avail_memctl_mem = si.totalram - si.totalhigh; + /* + * Avoid overflows + */ + + max_memctl_mem = 1UL << (32 - PAGE_SHIFT); + max_memctl_mem = (max_memctl_mem / si.mem_unit) * PAGE_SIZE; + + if (avail_memctl_mem >= max_memctl_mem) + avail_memctl_mem = max_memctl_mem; + + drm_init_memctl(avail_memctl_mem/2, avail_memctl_mem*3/4, si.mem_unit); + + ret = -ENOMEM; drm_cards_limit = (drm_cards_limit < DRM_MAX_MINOR + 1 ? drm_cards_limit : DRM_MAX_MINOR + 1); @@ -405,6 +441,8 @@ static int __init drm_core_init(void) goto err_p3; } + drm_mem_init(); + DRM_INFO("Initialized %s %d.%d.%d %s\n", CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE); return 0; diff --git a/drivers/char/drm/drm_fence.c b/drivers/char/drm/drm_fence.c new file mode 100644 index 0000000..6dd04a3 --- /dev/null +++ b/drivers/char/drm/drm_fence.c @@ -0,0 +1,661 @@ +/************************************************************************** + * + * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellström + */ + +#include "drmP.h" + +/* + * Typically called by the IRQ handler. + */ + +void drm_fence_handler(drm_device_t * dev, uint32_t class, + uint32_t sequence, uint32_t type) +{ + int wake = 0; + uint32_t diff; + uint32_t relevant; + drm_fence_manager_t *fm = &dev->fm; + drm_fence_class_manager_t *fc = &fm->class[class]; + drm_fence_driver_t *driver = dev->driver->fence_driver; + struct list_head *head; + drm_fence_object_t *fence, *next; + int found = 0; + int is_exe = (type & DRM_FENCE_TYPE_EXE); + int ge_last_exe; + + + + diff = (sequence - fc->exe_flush_sequence) & driver->sequence_mask; + + if (fc->pending_exe_flush && is_exe && diff < driver->wrap_diff) + fc->pending_exe_flush = 0; + + diff = (sequence - fc->last_exe_flush) & driver->sequence_mask; + ge_last_exe = diff < driver->wrap_diff; + + if (ge_last_exe) + fc->pending_flush &= ~type; + + if (is_exe && ge_last_exe) { + fc->last_exe_flush = sequence; + } + + if (list_empty(&fc->ring)) + return; + + list_for_each_entry(fence, &fc->ring, ring) { + diff = (sequence - fence->sequence) & driver->sequence_mask; + if (diff > driver->wrap_diff) { + found = 1; + break; + } + } + + head = (found) ? &fence->ring : &fc->ring; + + list_for_each_entry_safe_reverse(fence, next, head, ring) { + if (&fence->ring == &fc->ring) + break; + + type |= fence->native_type; + relevant = type & fence->type; + + if ((fence->signaled | relevant) != fence->signaled) { + fence->signaled |= relevant; + DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n", + fence->base.hash.key, fence->signaled); + fence->submitted_flush |= relevant; + wake = 1; + } + + relevant = fence->flush_mask & + ~(fence->signaled | fence->submitted_flush); + + if (relevant) { + fc->pending_flush |= relevant; + fence->submitted_flush = fence->flush_mask; + } + + if (!(fence->type & ~fence->signaled)) { + DRM_DEBUG("Fence completely signaled 0x%08lx\n", + fence->base.hash.key); + list_del_init(&fence->ring); + } + + } + + if (wake) { + DRM_WAKEUP(&fc->fence_queue); + } +} + +EXPORT_SYMBOL(drm_fence_handler); + +static void drm_fence_unring(drm_device_t * dev, struct list_head *ring) +{ + drm_fence_manager_t *fm = &dev->fm; + unsigned long flags; + + write_lock_irqsave(&fm->lock, flags); + list_del_init(ring); + write_unlock_irqrestore(&fm->lock, flags); +} + +void drm_fence_usage_deref_locked(drm_device_t * dev, + drm_fence_object_t * fence) +{ + drm_fence_manager_t *fm = &dev->fm; + + if (atomic_dec_and_test(&fence->usage)) { + drm_fence_unring(dev, &fence->ring); + DRM_DEBUG("Destroyed a fence object 0x%08lx\n", + fence->base.hash.key); + atomic_dec(&fm->count); + drm_ctl_free(fence, sizeof(*fence), DRM_MEM_FENCE); + } +} + +void drm_fence_usage_deref_unlocked(drm_device_t * dev, + drm_fence_object_t * fence) +{ + drm_fence_manager_t *fm = &dev->fm; + + if (atomic_dec_and_test(&fence->usage)) { + mutex_lock(&dev->struct_mutex); + if (atomic_read(&fence->usage) == 0) { + drm_fence_unring(dev, &fence->ring); + atomic_dec(&fm->count); + drm_ctl_free(fence, sizeof(*fence), DRM_MEM_FENCE); + } + mutex_unlock(&dev->struct_mutex); + } +} + +static void drm_fence_object_destroy(drm_file_t * priv, + drm_user_object_t * base) +{ + drm_device_t *dev = priv->head->dev; + drm_fence_object_t *fence = + drm_user_object_entry(base, drm_fence_object_t, base); + + drm_fence_usage_deref_locked(dev, fence); +} + +static int fence_signaled(drm_device_t * dev, + drm_fence_object_t * fence, + uint32_t mask, int poke_flush) +{ + unsigned long flags; + int signaled; + drm_fence_manager_t *fm = &dev->fm; + drm_fence_driver_t *driver = dev->driver->fence_driver; + + if (poke_flush) + driver->poke_flush(dev, fence->class); + read_lock_irqsave(&fm->lock, flags); + signaled = + (fence->type & mask & fence->signaled) == (fence->type & mask); + read_unlock_irqrestore(&fm->lock, flags); + + return signaled; +} + +static void drm_fence_flush_exe(drm_fence_class_manager_t * fc, + drm_fence_driver_t * driver, uint32_t sequence) +{ + uint32_t diff; + + if (!fc->pending_exe_flush) { + fc->exe_flush_sequence = sequence; + fc->pending_exe_flush = 1; + } else { + diff = + (sequence - fc->exe_flush_sequence) & driver->sequence_mask; + if (diff < driver->wrap_diff) { + fc->exe_flush_sequence = sequence; + } + } +} + +int drm_fence_object_signaled(drm_fence_object_t * fence, + uint32_t type) +{ + return ((fence->signaled & type) == type); +} + +int drm_fence_object_flush(drm_device_t * dev, + drm_fence_object_t * fence, + uint32_t type) +{ + drm_fence_manager_t *fm = &dev->fm; + drm_fence_class_manager_t *fc = &fm->class[fence->class]; + drm_fence_driver_t *driver = dev->driver->fence_driver; + unsigned long flags; + + if (type & ~fence->type) { + DRM_ERROR("Flush trying to extend fence type, " + "0x%x, 0x%x\n", type, fence->type); + return -EINVAL; + } + + write_lock_irqsave(&fm->lock, flags); + fence->flush_mask |= type; + if (fence->submitted_flush == fence->signaled) { + if ((fence->type & DRM_FENCE_TYPE_EXE) && + !(fence->submitted_flush & DRM_FENCE_TYPE_EXE)) { + drm_fence_flush_exe(fc, driver, fence->sequence); + fence->submitted_flush |= DRM_FENCE_TYPE_EXE; + } else { + fc->pending_flush |= (fence->flush_mask & + ~fence->submitted_flush); + fence->submitted_flush = fence->flush_mask; + } + } + write_unlock_irqrestore(&fm->lock, flags); + driver->poke_flush(dev, fence->class); + return 0; +} + +/* + * Make sure old fence objects are signaled before their fence sequences are + * wrapped around and reused. + */ + +void drm_fence_flush_old(drm_device_t * dev, uint32_t class, uint32_t sequence) +{ + drm_fence_manager_t *fm = &dev->fm; + drm_fence_class_manager_t *fc = &fm->class[class]; + drm_fence_driver_t *driver = dev->driver->fence_driver; + uint32_t old_sequence; + unsigned long flags; + drm_fence_object_t *fence; + uint32_t diff; + + write_lock_irqsave(&fm->lock, flags); + old_sequence = (sequence - driver->flush_diff) & driver->sequence_mask; + diff = (old_sequence - fc->last_exe_flush) & driver->sequence_mask; + + if ((diff < driver->wrap_diff) && !fc->pending_exe_flush) { + fc->pending_exe_flush = 1; + fc->exe_flush_sequence = sequence - (driver->flush_diff / 2); + } + write_unlock_irqrestore(&fm->lock, flags); + + mutex_lock(&dev->struct_mutex); + read_lock_irqsave(&fm->lock, flags); + + if (list_empty(&fc->ring)) { + read_unlock_irqrestore(&fm->lock, flags); + mutex_unlock(&dev->struct_mutex); + return; + } + fence = list_entry(fc->ring.next, drm_fence_object_t, ring); + atomic_inc(&fence->usage); + mutex_unlock(&dev->struct_mutex); + diff = (old_sequence - fence->sequence) & driver->sequence_mask; + read_unlock_irqrestore(&fm->lock, flags); + if (diff < driver->wrap_diff) { + drm_fence_object_flush(dev, fence, fence->type); + } + drm_fence_usage_deref_unlocked(dev, fence); +} + +EXPORT_SYMBOL(drm_fence_flush_old); + +static int drm_fence_lazy_wait(drm_device_t *dev, + drm_fence_object_t *fence, + int ignore_signals, + uint32_t mask) +{ + drm_fence_manager_t *fm = &dev->fm; + drm_fence_class_manager_t *fc = &fm->class[fence->class]; + + unsigned long _end = jiffies + 3*DRM_HZ; + int ret = 0; + + do { + DRM_WAIT_ON(ret, fc->fence_queue, 3 * DRM_HZ, + fence_signaled(dev, fence, mask, 0)); + if (time_after_eq(jiffies, _end)) + break; + } while (ret == -EINTR && ignore_signals); + if (time_after_eq(jiffies, _end) && (ret != 0)) + ret = -EBUSY; + if (ret) { + if (ret == -EBUSY) { + DRM_ERROR("Fence timeout. " + "GPU lockup or fence driver was " + "taken down.\n"); + } + return ((ret == -EINTR) ? -EAGAIN : ret); + } + return 0; +} + +int drm_fence_object_wait(drm_device_t * dev, + drm_fence_object_t * fence, + int lazy, int ignore_signals, uint32_t mask) +{ + drm_fence_driver_t *driver = dev->driver->fence_driver; + int ret = 0; + unsigned long _end; + int signaled; + + if (mask & ~fence->type) { + DRM_ERROR("Wait trying to extend fence type" + " 0x%08x 0x%08x\n", mask, fence->type); + return -EINVAL; + } + + if (fence_signaled(dev, fence, mask, 0)) + return 0; + + _end = jiffies + 3 * DRM_HZ; + + drm_fence_object_flush(dev, fence, mask); + + if (lazy && driver->lazy_capable) { + + ret = drm_fence_lazy_wait(dev, fence, ignore_signals, mask); + if (ret) + return ret; + + } else { + + if (driver->has_irq(dev, fence->class, + DRM_FENCE_TYPE_EXE)) { + ret = drm_fence_lazy_wait(dev, fence, ignore_signals, + DRM_FENCE_TYPE_EXE); + if (ret) + return ret; + } + + if (driver->has_irq(dev, fence->class, + mask & ~DRM_FENCE_TYPE_EXE)) { + ret = drm_fence_lazy_wait(dev, fence, ignore_signals, + mask); + if (ret) + return ret; + } + } + if (drm_fence_object_signaled(fence, mask)) + return 0; + + /* + * Avoid kernel-space busy-waits. + */ +#if 1 + if (!ignore_signals) + return -EAGAIN; +#endif + do { + schedule(); + signaled = fence_signaled(dev, fence, mask, 1); + } while (!signaled && !time_after_eq(jiffies, _end)); + + if (!signaled) + return -EBUSY; + + return 0; +} + +int drm_fence_object_emit(drm_device_t * dev, drm_fence_object_t * fence, + uint32_t fence_flags, uint32_t class, uint32_t type) +{ + drm_fence_manager_t *fm = &dev->fm; + drm_fence_driver_t *driver = dev->driver->fence_driver; + drm_fence_class_manager_t *fc = &fm->class[fence->class]; + unsigned long flags; + uint32_t sequence; + uint32_t native_type; + int ret; + + drm_fence_unring(dev, &fence->ring); + ret = driver->emit(dev, class, fence_flags, &sequence, &native_type); + if (ret) + return ret; + + write_lock_irqsave(&fm->lock, flags); + fence->class = class; + fence->type = type; + fence->flush_mask = 0x00; + fence->submitted_flush = 0x00; + fence->signaled = 0x00; + fence->sequence = sequence; + fence->native_type = native_type; + if (list_empty(&fc->ring)) + fc->last_exe_flush = sequence - 1; + list_add_tail(&fence->ring, &fc->ring); + write_unlock_irqrestore(&fm->lock, flags); + return 0; +} + +static int drm_fence_object_init(drm_device_t * dev, uint32_t class, + uint32_t type, + uint32_t fence_flags, + drm_fence_object_t * fence) +{ + int ret = 0; + unsigned long flags; + drm_fence_manager_t *fm = &dev->fm; + + mutex_lock(&dev->struct_mutex); + atomic_set(&fence->usage, 1); + mutex_unlock(&dev->struct_mutex); + + write_lock_irqsave(&fm->lock, flags); + INIT_LIST_HEAD(&fence->ring); + fence->class = class; + fence->type = type; + fence->flush_mask = 0; + fence->submitted_flush = 0; + fence->signaled = 0; + fence->sequence = 0; + write_unlock_irqrestore(&fm->lock, flags); + if (fence_flags & DRM_FENCE_FLAG_EMIT) { + ret = drm_fence_object_emit(dev, fence, fence_flags, + fence->class, type); + } + return ret; +} + +int drm_fence_add_user_object(drm_file_t * priv, drm_fence_object_t * fence, + int shareable) +{ + drm_device_t *dev = priv->head->dev; + int ret; + + mutex_lock(&dev->struct_mutex); + ret = drm_add_user_object(priv, &fence->base, shareable); + mutex_unlock(&dev->struct_mutex); + if (ret) + return ret; + fence->base.type = drm_fence_type; + fence->base.remove = &drm_fence_object_destroy; + DRM_DEBUG("Fence 0x%08lx created\n", fence->base.hash.key); + return 0; +} + +EXPORT_SYMBOL(drm_fence_add_user_object); + +int drm_fence_object_create(drm_device_t * dev, uint32_t class, uint32_t type, + unsigned flags, drm_fence_object_t ** c_fence) +{ + drm_fence_object_t *fence; + int ret; + drm_fence_manager_t *fm = &dev->fm; + + fence = drm_ctl_alloc(sizeof(*fence), DRM_MEM_FENCE); + if (!fence) + return -ENOMEM; + ret = drm_fence_object_init(dev, class, type, flags, fence); + if (ret) { + drm_fence_usage_deref_unlocked(dev, fence); + return ret; + } + *c_fence = fence; + atomic_inc(&fm->count); + + return 0; +} + +EXPORT_SYMBOL(drm_fence_object_create); + +void drm_fence_manager_init(drm_device_t * dev) +{ + drm_fence_manager_t *fm = &dev->fm; + drm_fence_class_manager_t *class; + drm_fence_driver_t *fed = dev->driver->fence_driver; + int i; + + + fm->lock = RW_LOCK_UNLOCKED; + write_lock(&fm->lock); + fm->initialized = 0; + if (!fed) + goto out_unlock; + + fm->initialized = 1; + fm->num_classes = fed->num_classes; + BUG_ON(fm->num_classes > _DRM_FENCE_CLASSES); + + for (i=0; inum_classes; ++i) { + class = &fm->class[i]; + + INIT_LIST_HEAD(&class->ring); + class->pending_flush = 0; + DRM_INIT_WAITQUEUE(&class->fence_queue); + } + + atomic_set(&fm->count, 0); + out_unlock: + write_unlock(&fm->lock); +} + +void drm_fence_manager_takedown(drm_device_t * dev) +{ +} + +drm_fence_object_t *drm_lookup_fence_object(drm_file_t * priv, uint32_t handle) +{ + drm_device_t *dev = priv->head->dev; + drm_user_object_t *uo; + drm_fence_object_t *fence; + + mutex_lock(&dev->struct_mutex); + uo = drm_lookup_user_object(priv, handle); + if (!uo || (uo->type != drm_fence_type)) { + mutex_unlock(&dev->struct_mutex); + return NULL; + } + fence = drm_user_object_entry(uo, drm_fence_object_t, base); + atomic_inc(&fence->usage); + mutex_unlock(&dev->struct_mutex); + return fence; +} + +int drm_fence_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + int ret; + drm_fence_manager_t *fm = &dev->fm; + drm_fence_arg_t arg; + drm_fence_object_t *fence; + drm_user_object_t *uo; + unsigned long flags; + ret = 0; + + if (!fm->initialized) { + DRM_ERROR("The DRM driver does not support fencing.\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + switch (arg.op) { + case drm_fence_create: + if (arg.flags & DRM_FENCE_FLAG_EMIT) + LOCK_TEST_WITH_RETURN(dev, filp); + ret = drm_fence_object_create(dev, arg.class, + arg.type, arg.flags, &fence); + if (ret) + return ret; + ret = drm_fence_add_user_object(priv, fence, + arg.flags & + DRM_FENCE_FLAG_SHAREABLE); + if (ret) { + drm_fence_usage_deref_unlocked(dev, fence); + return ret; + } + + /* + * usage > 0. No need to lock dev->struct_mutex; + */ + + atomic_inc(&fence->usage); + arg.handle = fence->base.hash.key; + break; + case drm_fence_destroy: + mutex_lock(&dev->struct_mutex); + uo = drm_lookup_user_object(priv, arg.handle); + if (!uo || (uo->type != drm_fence_type) || uo->owner != priv) { + mutex_unlock(&dev->struct_mutex); + return -EINVAL; + } + ret = drm_remove_user_object(priv, uo); + mutex_unlock(&dev->struct_mutex); + return ret; + case drm_fence_reference: + ret = + drm_user_object_ref(priv, arg.handle, drm_fence_type, &uo); + if (ret) + return ret; + fence = drm_lookup_fence_object(priv, arg.handle); + break; + case drm_fence_unreference: + ret = drm_user_object_unref(priv, arg.handle, drm_fence_type); + return ret; + case drm_fence_signaled: + fence = drm_lookup_fence_object(priv, arg.handle); + if (!fence) + return -EINVAL; + break; + case drm_fence_flush: + fence = drm_lookup_fence_object(priv, arg.handle); + if (!fence) + return -EINVAL; + ret = drm_fence_object_flush(dev, fence, arg.type); + break; + case drm_fence_wait: + fence = drm_lookup_fence_object(priv, arg.handle); + if (!fence) + return -EINVAL; + ret = + drm_fence_object_wait(dev, fence, + arg.flags & DRM_FENCE_FLAG_WAIT_LAZY, + 0, arg.type); + break; + case drm_fence_emit: + LOCK_TEST_WITH_RETURN(dev, filp); + fence = drm_lookup_fence_object(priv, arg.handle); + if (!fence) + return -EINVAL; + ret = drm_fence_object_emit(dev, fence, arg.flags, arg.class, + arg.type); + break; + case drm_fence_buffers: + if (!dev->bm.initialized) { + DRM_ERROR("Buffer object manager is not initialized\n"); + return -EINVAL; + } + LOCK_TEST_WITH_RETURN(dev, filp); + ret = drm_fence_buffer_objects(priv, NULL, arg.flags, + NULL, &fence); + if (ret) + return ret; + ret = drm_fence_add_user_object(priv, fence, + arg.flags & + DRM_FENCE_FLAG_SHAREABLE); + if (ret) + return ret; + atomic_inc(&fence->usage); + arg.handle = fence->base.hash.key; + break; + default: + return -EINVAL; + } + read_lock_irqsave(&fm->lock, flags); + arg.class = fence->class; + arg.type = fence->type; + arg.signaled = fence->signaled; + read_unlock_irqrestore(&fm->lock, flags); + drm_fence_usage_deref_unlocked(dev, fence); + + DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); + return ret; +} diff --git a/drivers/char/drm/drm_fops.c b/drivers/char/drm/drm_fops.c index 314abd9..3e8f823 100644 --- a/drivers/char/drm/drm_fops.c +++ b/drivers/char/drm/drm_fops.c @@ -158,6 +158,12 @@ int drm_open(struct inode *inode, struct } spin_unlock(&dev->count_lock); } + mutex_lock(&dev->struct_mutex); + BUG_ON((dev->dev_mapping != NULL) && + (dev->dev_mapping != inode->i_mapping)); + if (dev->dev_mapping == NULL) + dev->dev_mapping = inode->i_mapping; + mutex_unlock(&dev->struct_mutex); return retcode; } @@ -235,6 +241,7 @@ static int drm_open_helper(struct inode int minor = iminor(inode); drm_file_t *priv; int ret; + int i,j; if (filp->f_flags & O_EXCL) return -EBUSY; /* No exclusive opens */ @@ -258,6 +265,22 @@ static int drm_open_helper(struct inode priv->authenticated = capable(CAP_SYS_ADMIN); priv->lock_count = 0; + INIT_LIST_HEAD(&priv->user_objects); + INIT_LIST_HEAD(&priv->refd_objects); + + for (i=0; i<_DRM_NO_REF_TYPES; ++i) { + ret = drm_ht_create(&priv->refd_object_hash[i], DRM_FILE_HASH_ORDER); + if (ret) + break; + } + + if (ret) { + for(j=0; jrefd_object_hash[j]); + } + goto out_free; + } + if (dev->driver->open) { ret = dev->driver->open(dev, priv); if (ret < 0) @@ -322,6 +345,53 @@ int drm_fasync(int fd, struct file *filp } EXPORT_SYMBOL(drm_fasync); +static void drm_object_release(struct file *filp) { + + drm_file_t *priv = filp->private_data; + struct list_head *head; + drm_user_object_t *user_object; + drm_ref_object_t *ref_object; + int i; + + /* + * Free leftover ref objects created by me. Note that we cannot use + * list_for_each() here, as the struct_mutex may be temporarily released + * by the remove_() functions, and thus the lists may be altered. + * Also, a drm_remove_ref_object() will not remove it + * from the list unless its refcount is 1. + */ + + head = &priv->refd_objects; + while (head->next != head) { + ref_object = list_entry(head->next, drm_ref_object_t, list); + drm_remove_ref_object(priv, ref_object); + head = &priv->refd_objects; + } + + /* + * Free leftover user objects created by me. + */ + + head = &priv->user_objects; + while (head->next != head) { + user_object = list_entry(head->next, drm_user_object_t, list); + drm_remove_user_object(priv, user_object); + head = &priv->user_objects; + } + + + + + for(i=0; i<_DRM_NO_REF_TYPES; ++i) { + drm_ht_remove(&priv->refd_object_hash[i]); + } +} + + + + + + /** * Release file. * @@ -356,58 +426,56 @@ int drm_release(struct inode *inode, str current->pid, (long)old_encode_dev(priv->head->device), dev->open_count); - if (priv->lock_count && dev->lock.hw_lock && - _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) && - dev->lock.filp == filp) { - DRM_DEBUG("File %p released, freeing lock for context %d\n", - filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); - - if (dev->driver->reclaim_buffers_locked) + if (dev->driver->reclaim_buffers_locked && dev->lock.hw_lock) { + if (drm_i_have_hw_lock(filp)) { dev->driver->reclaim_buffers_locked(dev, filp); - - drm_lock_free(dev, &dev->lock.hw_lock->lock, - _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); - - /* FIXME: may require heavy-handed reset of - hardware at this point, possibly - processed via a callback to the X - server. */ - } else if (dev->driver->reclaim_buffers_locked && priv->lock_count - && dev->lock.hw_lock) { - /* The lock is required to reclaim buffers */ - DECLARE_WAITQUEUE(entry, current); - - add_wait_queue(&dev->lock.lock_queue, &entry); - for (;;) { - __set_current_state(TASK_INTERRUPTIBLE); - if (!dev->lock.hw_lock) { - /* Device has been unregistered */ - retcode = -EINTR; - break; + } else { + unsigned long _end=jiffies + 3*DRM_HZ; + int locked = 0; + + drm_idlelock_take(&dev->lock); + + /* + * Wait for a while. + */ + + do{ + spin_lock(&dev->lock.spinlock); + locked = dev->lock.idle_has_lock; + spin_unlock(&dev->lock.spinlock); + if (locked) + break; + schedule(); + } while (!time_after_eq(jiffies, _end)); + + if (!locked) { + DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n" + "\tdriver to use reclaim_buffers_idlelocked() instead.\n" + "\tI will go on reclaiming the buffers anyway.\n"); } - if (drm_lock_take(&dev->lock.hw_lock->lock, - DRM_KERNEL_CONTEXT)) { - dev->lock.filp = filp; - dev->lock.lock_time = jiffies; - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); - break; /* Got lock */ - } - /* Contention */ - schedule(); - if (signal_pending(current)) { - retcode = -ERESTARTSYS; - break; - } - } - __set_current_state(TASK_RUNNING); - remove_wait_queue(&dev->lock.lock_queue, &entry); - if (!retcode) { + dev->driver->reclaim_buffers_locked(dev, filp); - drm_lock_free(dev, &dev->lock.hw_lock->lock, - DRM_KERNEL_CONTEXT); + drm_idlelock_release(&dev->lock); } } + if (dev->driver->reclaim_buffers_idlelocked && dev->lock.hw_lock) { + + drm_idlelock_take(&dev->lock); + dev->driver->reclaim_buffers_idlelocked(dev, filp); + drm_idlelock_release(&dev->lock); + + } + + if (drm_i_have_hw_lock(filp)) { + DRM_DEBUG("File %p released, freeing lock for context %d\n", + filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); + + drm_lock_free(&dev->lock, + _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); + } + + if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && !dev->driver->reclaim_buffers_locked) { dev->driver->reclaim_buffers(dev, filp); @@ -437,6 +505,7 @@ int drm_release(struct inode *inode, str mutex_unlock(&dev->ctxlist_mutex); mutex_lock(&dev->struct_mutex); + drm_object_release(filp); if (priv->remove_auth_on_close == 1) { drm_file_t *temp = dev->file_first; while (temp) { diff --git a/drivers/char/drm/drm_irq.c b/drivers/char/drm/drm_irq.c index 9d00c51..6791990 100644 --- a/drivers/char/drm/drm_irq.c +++ b/drivers/char/drm/drm_irq.c @@ -119,6 +119,7 @@ static int drm_irq_install(drm_device_t init_waitqueue_head(&dev->vbl_queue); spin_lock_init(&dev->vbl_lock); + spin_lock_init(&dev->tasklet_lock); INIT_LIST_HEAD(&dev->vbl_sigs.head); INIT_LIST_HEAD(&dev->vbl_sigs2.head); @@ -424,7 +425,7 @@ static void drm_locked_tasklet_func(unsi spin_lock_irqsave(&dev->tasklet_lock, irqflags); if (!dev->locked_tasklet_func || - !drm_lock_take(&dev->lock.hw_lock->lock, + !drm_lock_take(&dev->lock, DRM_KERNEL_CONTEXT)) { spin_unlock_irqrestore(&dev->tasklet_lock, irqflags); return; @@ -435,7 +436,7 @@ static void drm_locked_tasklet_func(unsi dev->locked_tasklet_func(dev); - drm_lock_free(dev, &dev->lock.hw_lock->lock, + drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT); dev->locked_tasklet_func = NULL; diff --git a/drivers/char/drm/drm_lock.c b/drivers/char/drm/drm_lock.c index e9993ba..af1b103 100644 --- a/drivers/char/drm/drm_lock.c +++ b/drivers/char/drm/drm_lock.c @@ -35,9 +35,6 @@ #include "drmP.h" -static int drm_lock_transfer(drm_device_t * dev, - __volatile__ unsigned int *lock, - unsigned int context); static int drm_notifier(void *priv); /** @@ -80,6 +77,9 @@ int drm_lock(struct inode *inode, struct return -EINVAL; add_wait_queue(&dev->lock.lock_queue, &entry); + spin_lock(&dev->lock.spinlock); + dev->lock.user_waiters++; + spin_unlock(&dev->lock.spinlock); for (;;) { __set_current_state(TASK_INTERRUPTIBLE); if (!dev->lock.hw_lock) { @@ -87,7 +87,7 @@ int drm_lock(struct inode *inode, struct ret = -EINTR; break; } - if (drm_lock_take(&dev->lock.hw_lock->lock, lock.context)) { + if (drm_lock_take(&dev->lock, lock.context)) { dev->lock.filp = filp; dev->lock.lock_time = jiffies; atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); @@ -101,12 +101,14 @@ int drm_lock(struct inode *inode, struct break; } } + spin_lock(&dev->lock.spinlock); + dev->lock.user_waiters--; + spin_unlock(&dev->lock.spinlock); __set_current_state(TASK_RUNNING); remove_wait_queue(&dev->lock.lock_queue, &entry); - DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock"); - if (ret) - return ret; + DRM_DEBUG( "%d %s\n", lock.context, ret ? "interrupted" : "has lock" ); + if (ret) return ret; sigemptyset(&dev->sigmask); sigaddset(&dev->sigmask, SIGSTOP); @@ -122,7 +124,7 @@ int drm_lock(struct inode *inode, struct if (dev->driver->dma_quiescent && (lock.flags & _DRM_LOCK_QUIESCENT)) { if (dev->driver->dma_quiescent(dev)) { - DRM_DEBUG("%d waiting for DMA quiescent\n", lock.context); + DRM_DEBUG( "%d waiting for DMA quiescent\n", lock.context); return DRM_ERR(EBUSY); } } @@ -184,12 +186,8 @@ int drm_unlock(struct inode *inode, stru if (dev->driver->kernel_context_switch_unlock) dev->driver->kernel_context_switch_unlock(dev); else { - drm_lock_transfer(dev, &dev->lock.hw_lock->lock, - DRM_KERNEL_CONTEXT); - - if (drm_lock_free(dev, &dev->lock.hw_lock->lock, - DRM_KERNEL_CONTEXT)) { - DRM_ERROR("\n"); + if (drm_lock_free(&dev->lock,lock.context)) { + /* FIXME: Should really bail out here. */ } } @@ -206,18 +204,26 @@ int drm_unlock(struct inode *inode, stru * * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction. */ -int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context) +int drm_lock_take(drm_lock_data_t *lock_data, + unsigned int context) { unsigned int old, new, prev; + volatile unsigned int *lock = &lock_data->hw_lock->lock; + spin_lock(&lock_data->spinlock); do { old = *lock; if (old & _DRM_LOCK_HELD) new = old | _DRM_LOCK_CONT; - else - new = context | _DRM_LOCK_HELD; + else { + new = context | _DRM_LOCK_HELD | + ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ? + _DRM_LOCK_CONT : 0); + } prev = cmpxchg(lock, old, new); } while (prev != old); + spin_unlock(&lock_data->spinlock); + if (_DRM_LOCKING_CONTEXT(old) == context) { if (old & _DRM_LOCK_HELD) { if (context != DRM_KERNEL_CONTEXT) { @@ -227,8 +233,10 @@ int drm_lock_take(__volatile__ unsigned return 0; } } - if (new == (context | _DRM_LOCK_HELD)) { + + if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) { /* Have lock */ + return 1; } return 0; @@ -246,13 +254,13 @@ int drm_lock_take(__volatile__ unsigned * Resets the lock file pointer. * Marks the lock as held by the given context, via the \p cmpxchg instruction. */ -static int drm_lock_transfer(drm_device_t * dev, - __volatile__ unsigned int *lock, +static int drm_lock_transfer(drm_lock_data_t *lock_data, unsigned int context) { unsigned int old, new, prev; + volatile unsigned int *lock = &lock_data->hw_lock->lock; - dev->lock.filp = NULL; + lock_data->filp = NULL; do { old = *lock; new = context | _DRM_LOCK_HELD; @@ -272,23 +280,32 @@ static int drm_lock_transfer(drm_device_ * Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task * waiting on the lock queue. */ -int drm_lock_free(drm_device_t * dev, - __volatile__ unsigned int *lock, unsigned int context) +int drm_lock_free(drm_lock_data_t *lock_data, unsigned int context) { unsigned int old, new, prev; + volatile unsigned int *lock = &lock_data->hw_lock->lock; + + spin_lock(&lock_data->spinlock); + if (lock_data->kernel_waiters != 0) { + drm_lock_transfer(lock_data, 0); + lock_data->idle_has_lock = 1; + spin_unlock(&lock_data->spinlock); + return 1; + } + spin_unlock(&lock_data->spinlock); - dev->lock.filp = NULL; do { old = *lock; - new = 0; + new = _DRM_LOCKING_CONTEXT(old); prev = cmpxchg(lock, old, new); } while (prev != old); + if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) { DRM_ERROR("%d freed heavyweight lock held by %d\n", context, _DRM_LOCKING_CONTEXT(old)); return 1; } - wake_up_interruptible(&dev->lock.lock_queue); + wake_up_interruptible(&lock_data->lock_queue); return 0; } @@ -322,3 +339,67 @@ static int drm_notifier(void *priv) } while (prev != old); return 0; } + +/** + * This function returns immediately and takes the hw lock + * with the kernel context if it is free, otherwise it gets the highest priority when and if + * it is eventually released. + * + * This guarantees that the kernel will _eventually_ have the lock _unless_ it is held + * by a blocked process. (In the latter case an explicit wait for the hardware lock would cause + * a deadlock, which is why the "idlelock" was invented). + * + * This should be sufficient to wait for GPU idle without + * having to worry about starvation. + */ + +void drm_idlelock_take(drm_lock_data_t *lock_data) +{ + int ret = 0; + + spin_lock(&lock_data->spinlock); + lock_data->kernel_waiters++; + if (!lock_data->idle_has_lock) { + + spin_unlock(&lock_data->spinlock); + ret = drm_lock_take(lock_data, DRM_KERNEL_CONTEXT); + spin_lock(&lock_data->spinlock); + + if (ret == 1) + lock_data->idle_has_lock = 1; + } + spin_unlock(&lock_data->spinlock); +} +EXPORT_SYMBOL(drm_idlelock_take); + +void drm_idlelock_release(drm_lock_data_t *lock_data) +{ + unsigned int old, prev; + volatile unsigned int *lock = &lock_data->hw_lock->lock; + + spin_lock(&lock_data->spinlock); + if (--lock_data->kernel_waiters == 0) { + if (lock_data->idle_has_lock) { + do { + old = *lock; + prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT); + } while (prev != old); + wake_up_interruptible(&lock_data->lock_queue); + lock_data->idle_has_lock = 0; + } + } + spin_unlock(&lock_data->spinlock); +} +EXPORT_SYMBOL(drm_idlelock_release); + + +int drm_i_have_hw_lock(struct file *filp) +{ + DRM_DEVICE; + + return (priv->lock_count && dev->lock.hw_lock && + _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) && + dev->lock.filp == filp); +} + +EXPORT_SYMBOL(drm_i_have_hw_lock); diff --git a/drivers/char/drm/drm_memory.c b/drivers/char/drm/drm_memory.c index 92a8670..e8c3e50 100644 --- a/drivers/char/drm/drm_memory.c +++ b/drivers/char/drm/drm_memory.c @@ -36,6 +36,75 @@ #include #include "drmP.h" +static struct { + spinlock_t lock; + drm_u64_t cur_used; + drm_u64_t low_threshold; + drm_u64_t high_threshold; +} drm_memctl = { + .lock = SPIN_LOCK_UNLOCKED +}; + +static inline size_t drm_size_align(size_t size) { + + register size_t tmpSize = 4; + if (size > PAGE_SIZE) + return PAGE_ALIGN(size); + + while(tmpSize < size) + tmpSize <<= 1; + + return (size_t) tmpSize; +} + +int drm_alloc_memctl(size_t size) +{ + int ret; + unsigned long a_size = drm_size_align(size); + + spin_lock(&drm_memctl.lock); + ret = ((drm_memctl.cur_used + a_size) > drm_memctl.high_threshold) ? + -ENOMEM : 0; + if (!ret) + drm_memctl.cur_used += a_size; + spin_unlock(&drm_memctl.lock); + return ret; +} +EXPORT_SYMBOL(drm_alloc_memctl); + +void drm_free_memctl(size_t size) +{ + unsigned long a_size = drm_size_align(size); + + spin_lock(&drm_memctl.lock); + drm_memctl.cur_used -= a_size; + spin_unlock(&drm_memctl.lock); +} +EXPORT_SYMBOL(drm_free_memctl); + +void drm_query_memctl(drm_u64_t *cur_used, + drm_u64_t *low_threshold, + drm_u64_t *high_threshold) +{ + spin_lock(&drm_memctl.lock); + *cur_used = drm_memctl.cur_used; + *low_threshold = drm_memctl.low_threshold; + *high_threshold = drm_memctl.high_threshold; + spin_unlock(&drm_memctl.lock); +} +EXPORT_SYMBOL(drm_query_memctl); + +void drm_init_memctl(size_t p_low_threshold, + size_t p_high_threshold, + size_t unit_size) +{ + spin_lock(&drm_memctl.lock); + drm_memctl.cur_used = 0; + drm_memctl.low_threshold = p_low_threshold * unit_size; + drm_memctl.high_threshold = p_high_threshold * unit_size; + spin_unlock(&drm_memctl.lock); +} + #ifdef DEBUG_MEMORY #include "drm_memory_debug.h" #else diff --git a/drivers/char/drm/drm_mm.c b/drivers/char/drm/drm_mm.c index 2ec1d9f..5f18b24 100644 --- a/drivers/char/drm/drm_mm.c +++ b/drivers/char/drm/drm_mm.c @@ -217,6 +217,7 @@ void drm_mm_put_block(drm_mm_node_t * cu drm_free(cur, sizeof(*cur), DRM_MEM_MM); } } +EXPORT_SYMBOL(drm_mm_put_block); drm_mm_node_t *drm_mm_search_free(const drm_mm_t * mm, unsigned long size, diff --git a/drivers/char/drm/drm_object.c b/drivers/char/drm/drm_object.c new file mode 100644 index 0000000..939cf0d --- /dev/null +++ b/drivers/char/drm/drm_object.c @@ -0,0 +1,289 @@ +/************************************************************************** + * + * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellström + */ + +#include "drmP.h" + +int drm_add_user_object(drm_file_t * priv, drm_user_object_t * item, + int shareable) +{ + drm_device_t *dev = priv->head->dev; + int ret; + + atomic_set(&item->refcount, 1); + item->shareable = shareable; + item->owner = priv; + + ret = drm_ht_just_insert_please(&dev->object_hash, &item->hash, + (unsigned long)item, 32, 0, 0); + if (ret) + return ret; + + list_add_tail(&item->list, &priv->user_objects); + return 0; +} + +drm_user_object_t *drm_lookup_user_object(drm_file_t * priv, uint32_t key) +{ + drm_device_t *dev = priv->head->dev; + drm_hash_item_t *hash; + int ret; + drm_user_object_t *item; + + ret = drm_ht_find_item(&dev->object_hash, key, &hash); + if (ret) { + return NULL; + } + item = drm_hash_entry(hash, drm_user_object_t, hash); + + if (priv != item->owner) { + drm_open_hash_t *ht = &priv->refd_object_hash[_DRM_REF_USE]; + ret = drm_ht_find_item(ht, (unsigned long)item, &hash); + if (ret) { + DRM_ERROR("Object not registered for usage\n"); + return NULL; + } + } + return item; +} + +static void drm_deref_user_object(drm_file_t * priv, drm_user_object_t * item) +{ + drm_device_t *dev = priv->head->dev; + int ret; + + if (atomic_dec_and_test(&item->refcount)) { + ret = drm_ht_remove_item(&dev->object_hash, &item->hash); + BUG_ON(ret); + list_del_init(&item->list); + item->remove(priv, item); + } +} + +int drm_remove_user_object(drm_file_t * priv, drm_user_object_t * item) +{ + if (item->owner != priv) { + DRM_ERROR("Cannot destroy object not owned by you.\n"); + return -EINVAL; + } + item->owner = 0; + item->shareable = 0; + list_del_init(&item->list); + drm_deref_user_object(priv, item); + return 0; +} + +static int drm_object_ref_action(drm_file_t * priv, drm_user_object_t * ro, + drm_ref_t action) +{ + int ret = 0; + + switch (action) { + case _DRM_REF_USE: + atomic_inc(&ro->refcount); + break; + default: + if (!ro->ref_struct_locked) { + break; + } else { + ro->ref_struct_locked(priv, ro, action); + } + } + return ret; +} + +int drm_add_ref_object(drm_file_t * priv, drm_user_object_t * referenced_object, + drm_ref_t ref_action) +{ + int ret = 0; + drm_ref_object_t *item; + drm_open_hash_t *ht = &priv->refd_object_hash[ref_action]; + + if (!referenced_object->shareable && priv != referenced_object->owner) { + DRM_ERROR("Not allowed to reference this object\n"); + return -EINVAL; + } + + /* + * If this is not a usage reference, Check that usage has been registered + * first. Otherwise strange things may happen on destruction. + */ + + if ((ref_action != _DRM_REF_USE) && priv != referenced_object->owner) { + item = + drm_lookup_ref_object(priv, referenced_object, + _DRM_REF_USE); + if (!item) { + DRM_ERROR + ("Object not registered for usage by this client\n"); + return -EINVAL; + } + } + + if (NULL != + (item = + drm_lookup_ref_object(priv, referenced_object, ref_action))) { + atomic_inc(&item->refcount); + return drm_object_ref_action(priv, referenced_object, + ref_action); + } + + item = drm_ctl_calloc(1, sizeof(*item), DRM_MEM_OBJECTS); + if (item == NULL) { + DRM_ERROR("Could not allocate reference object\n"); + return -ENOMEM; + } + + atomic_set(&item->refcount, 1); + item->hash.key = (unsigned long)referenced_object; + ret = drm_ht_insert_item(ht, &item->hash); + item->unref_action = ref_action; + + if (ret) + goto out; + + list_add(&item->list, &priv->refd_objects); + ret = drm_object_ref_action(priv, referenced_object, ref_action); + out: + return ret; +} + +drm_ref_object_t *drm_lookup_ref_object(drm_file_t * priv, + drm_user_object_t * referenced_object, + drm_ref_t ref_action) +{ + drm_hash_item_t *hash; + int ret; + + ret = drm_ht_find_item(&priv->refd_object_hash[ref_action], + (unsigned long)referenced_object, &hash); + if (ret) + return NULL; + + return drm_hash_entry(hash, drm_ref_object_t, hash); +} + +static void drm_remove_other_references(drm_file_t * priv, + drm_user_object_t * ro) +{ + int i; + drm_open_hash_t *ht; + drm_hash_item_t *hash; + drm_ref_object_t *item; + + for (i = _DRM_REF_USE + 1; i < _DRM_NO_REF_TYPES; ++i) { + ht = &priv->refd_object_hash[i]; + while (!drm_ht_find_item(ht, (unsigned long)ro, &hash)) { + item = drm_hash_entry(hash, drm_ref_object_t, hash); + drm_remove_ref_object(priv, item); + } + } +} + +void drm_remove_ref_object(drm_file_t * priv, drm_ref_object_t * item) +{ + int ret; + drm_user_object_t *user_object = (drm_user_object_t *) item->hash.key; + drm_open_hash_t *ht = &priv->refd_object_hash[item->unref_action]; + drm_ref_t unref_action; + + unref_action = item->unref_action; + if (atomic_dec_and_test(&item->refcount)) { + ret = drm_ht_remove_item(ht, &item->hash); + BUG_ON(ret); + list_del_init(&item->list); + if (unref_action == _DRM_REF_USE) + drm_remove_other_references(priv, user_object); + drm_ctl_free(item, sizeof(*item), DRM_MEM_OBJECTS); + } + + switch (unref_action) { + case _DRM_REF_USE: + drm_deref_user_object(priv, user_object); + break; + default: + BUG_ON(!user_object->unref); + user_object->unref(priv, user_object, unref_action); + break; + } + +} + +int drm_user_object_ref(drm_file_t * priv, uint32_t user_token, + drm_object_type_t type, drm_user_object_t ** object) +{ + drm_device_t *dev = priv->head->dev; + drm_user_object_t *uo; + int ret; + + mutex_lock(&dev->struct_mutex); + uo = drm_lookup_user_object(priv, user_token); + if (!uo || (uo->type != type)) { + ret = -EINVAL; + goto out_err; + } + ret = drm_add_ref_object(priv, uo, _DRM_REF_USE); + if (ret) + goto out_err; + mutex_unlock(&dev->struct_mutex); + *object = uo; + DRM_ERROR("Referenced an object\n"); + return 0; + out_err: + mutex_unlock(&dev->struct_mutex); + return ret; +} + +int drm_user_object_unref(drm_file_t * priv, uint32_t user_token, + drm_object_type_t type) +{ + drm_device_t *dev = priv->head->dev; + drm_user_object_t *uo; + drm_ref_object_t *ro; + int ret; + + mutex_lock(&dev->struct_mutex); + uo = drm_lookup_user_object(priv, user_token); + if (!uo || (uo->type != type)) { + ret = -EINVAL; + goto out_err; + } + ro = drm_lookup_ref_object(priv, uo, _DRM_REF_USE); + if (!ro) { + ret = -EINVAL; + goto out_err; + } + drm_remove_ref_object(priv, ro); + mutex_unlock(&dev->struct_mutex); + DRM_ERROR("Unreferenced an object\n"); + return 0; + out_err: + mutex_unlock(&dev->struct_mutex); + return ret; +} diff --git a/drivers/char/drm/drm_objects.h b/drivers/char/drm/drm_objects.h new file mode 100644 index 0000000..050355e --- /dev/null +++ b/drivers/char/drm/drm_objects.h @@ -0,0 +1,460 @@ +/************************************************************************** + * + * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellström + */ + +#ifndef _DRM_OBJECTS_H +#define _DRM_OJBECTS_H +#define DRM_HAS_TTM + +struct drm_device; + +/*************************************************** + * User space objects. (drm_object.c) + */ + +#define drm_user_object_entry(_ptr, _type, _member) container_of(_ptr, _type, _member) + +typedef enum { + drm_fence_type, + drm_buffer_type, + drm_ttm_type + /* + * Add other user space object types here. + */ +} drm_object_type_t; + +/* + * A user object is a structure that helps the drm give out user handles + * to kernel internal objects and to keep track of these objects so that + * they can be destroyed, for example when the user space process exits. + * Designed to be accessible using a user space 32-bit handle. + */ + +typedef struct drm_user_object { + drm_hash_item_t hash; + struct list_head list; + drm_object_type_t type; + atomic_t refcount; + int shareable; + drm_file_t *owner; + void (*ref_struct_locked) (drm_file_t * priv, + struct drm_user_object * obj, + drm_ref_t ref_action); + void (*unref) (drm_file_t * priv, struct drm_user_object * obj, + drm_ref_t unref_action); + void (*remove) (drm_file_t * priv, struct drm_user_object * obj); +} drm_user_object_t; + +/* + * A ref object is a structure which is used to + * keep track of references to user objects and to keep track of these + * references so that they can be destroyed for example when the user space + * process exits. Designed to be accessible using a pointer to the _user_ object. + */ + +typedef struct drm_ref_object { + drm_hash_item_t hash; + struct list_head list; + atomic_t refcount; + drm_ref_t unref_action; +} drm_ref_object_t; + +/** + * Must be called with the struct_mutex held. + */ + +extern int drm_add_user_object(drm_file_t * priv, drm_user_object_t * item, + int shareable); +/** + * Must be called with the struct_mutex held. + */ + +extern drm_user_object_t *drm_lookup_user_object(drm_file_t * priv, + uint32_t key); + +/* + * Must be called with the struct_mutex held. + * If "item" has been obtained by a call to drm_lookup_user_object. You may not + * release the struct_mutex before calling drm_remove_ref_object. + * This function may temporarily release the struct_mutex. + */ + +extern int drm_remove_user_object(drm_file_t * priv, drm_user_object_t * item); + +/* + * Must be called with the struct_mutex held. May temporarily release it. + */ + +extern int drm_add_ref_object(drm_file_t * priv, + drm_user_object_t * referenced_object, + drm_ref_t ref_action); + +/* + * Must be called with the struct_mutex held. + */ + +drm_ref_object_t *drm_lookup_ref_object(drm_file_t * priv, + drm_user_object_t * referenced_object, + drm_ref_t ref_action); +/* + * Must be called with the struct_mutex held. + * If "item" has been obtained by a call to drm_lookup_ref_object. You may not + * release the struct_mutex before calling drm_remove_ref_object. + * This function may temporarily release the struct_mutex. + */ + +extern void drm_remove_ref_object(drm_file_t * priv, drm_ref_object_t * item); +extern int drm_user_object_ref(drm_file_t * priv, uint32_t user_token, + drm_object_type_t type, + drm_user_object_t ** object); +extern int drm_user_object_unref(drm_file_t * priv, uint32_t user_token, + drm_object_type_t type); + +/*************************************************** + * Fence objects. (drm_fence.c) + */ + +typedef struct drm_fence_object { + drm_user_object_t base; + atomic_t usage; + + /* + * The below three fields are protected by the fence manager spinlock. + */ + + struct list_head ring; + int class; + uint32_t native_type; + uint32_t type; + uint32_t signaled; + uint32_t sequence; + uint32_t flush_mask; + uint32_t submitted_flush; +} drm_fence_object_t; + +#define _DRM_FENCE_CLASSES 8 +#define _DRM_FENCE_TYPE_EXE 0x00 + +typedef struct drm_fence_class_manager { + struct list_head ring; + uint32_t pending_flush; + wait_queue_head_t fence_queue; + int pending_exe_flush; + uint32_t last_exe_flush; + uint32_t exe_flush_sequence; +} drm_fence_class_manager_t; + +typedef struct drm_fence_manager { + int initialized; + rwlock_t lock; + drm_fence_class_manager_t class[_DRM_FENCE_CLASSES]; + uint32_t num_classes; + atomic_t count; +} drm_fence_manager_t; + +typedef struct drm_fence_driver { + uint32_t num_classes; + uint32_t wrap_diff; + uint32_t flush_diff; + uint32_t sequence_mask; + int lazy_capable; + int (*has_irq) (struct drm_device * dev, uint32_t class, + uint32_t flags); + int (*emit) (struct drm_device * dev, uint32_t class, uint32_t flags, + uint32_t * breadcrumb, uint32_t * native_type); + void (*poke_flush) (struct drm_device * dev, uint32_t class); +} drm_fence_driver_t; + +extern void drm_fence_handler(struct drm_device *dev, uint32_t class, + uint32_t sequence, uint32_t type); +extern void drm_fence_manager_init(struct drm_device *dev); +extern void drm_fence_manager_takedown(struct drm_device *dev); +extern void drm_fence_flush_old(struct drm_device *dev, uint32_t class, + uint32_t sequence); +extern int drm_fence_object_flush(struct drm_device *dev, + drm_fence_object_t * fence, uint32_t type); +extern int drm_fence_object_signaled(drm_fence_object_t * fence, uint32_t type); +extern void drm_fence_usage_deref_locked(struct drm_device *dev, + drm_fence_object_t * fence); +extern void drm_fence_usage_deref_unlocked(struct drm_device *dev, + drm_fence_object_t * fence); +extern int drm_fence_object_wait(struct drm_device *dev, + drm_fence_object_t * fence, + int lazy, int ignore_signals, uint32_t mask); +extern int drm_fence_object_create(struct drm_device *dev, uint32_t type, + uint32_t fence_flags, uint32_t class, + drm_fence_object_t ** c_fence); +extern int drm_fence_add_user_object(drm_file_t * priv, + drm_fence_object_t * fence, int shareable); +extern int drm_fence_ioctl(DRM_IOCTL_ARGS); + +/************************************************** + *TTMs + */ + +/* + * The ttm backend GTT interface. (In our case AGP). + * Any similar type of device (PCIE?) + * needs only to implement these functions to be usable with the "TTM" interface. + * The AGP backend implementation lives in drm_agpsupport.c + * basically maps these calls to available functions in agpgart. + * Each drm device driver gets an + * additional function pointer that creates these types, + * so that the device can choose the correct aperture. + * (Multiple AGP apertures, etc.) + * Most device drivers will let this point to the standard AGP implementation. + */ + +#define DRM_BE_FLAG_NEEDS_FREE 0x00000001 +#define DRM_BE_FLAG_BOUND_CACHED 0x00000002 + +typedef struct drm_ttm_backend { + void *private; + uint32_t flags; + uint32_t drm_map_type; + int (*needs_ub_cache_adjust) (struct drm_ttm_backend * backend); + int (*populate) (struct drm_ttm_backend * backend, + unsigned long num_pages, struct page ** pages); + void (*clear) (struct drm_ttm_backend * backend); + int (*bind) (struct drm_ttm_backend * backend, + unsigned long offset, int cached); + int (*unbind) (struct drm_ttm_backend * backend); + void (*destroy) (struct drm_ttm_backend * backend); +} drm_ttm_backend_t; + +typedef struct drm_ttm { + struct page **pages; + uint32_t page_flags; + unsigned long num_pages; + unsigned long aper_offset; + atomic_t vma_count; + struct drm_device *dev; + int destroy; + uint32_t mapping_offset; + drm_ttm_backend_t *be; + enum { + ttm_bound, + ttm_evicted, + ttm_unbound, + ttm_unpopulated, + } state; + +} drm_ttm_t; + +extern drm_ttm_t *drm_ttm_init(struct drm_device *dev, unsigned long size); +extern int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset); +extern void drm_ttm_unbind(drm_ttm_t * ttm); +extern void drm_ttm_evict(drm_ttm_t * ttm); +extern void drm_ttm_fixup_caching(drm_ttm_t * ttm); +extern struct page *drm_ttm_get_page(drm_ttm_t * ttm, int index); + +/* + * Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do this, + * which calls this function iff there are no vmas referencing it anymore. Otherwise it is called + * when the last vma exits. + */ + +extern int drm_destroy_ttm(drm_ttm_t * ttm); + +#define DRM_FLAG_MASKED(_old, _new, _mask) {\ +(_old) ^= (((_old) ^ (_new)) & (_mask)); \ +} + +#define DRM_TTM_MASK_FLAGS ((1 << PAGE_SHIFT) - 1) +#define DRM_TTM_MASK_PFN (0xFFFFFFFFU - DRM_TTM_MASK_FLAGS) + +/* + * Page flags. + */ + +#define DRM_TTM_PAGE_UNCACHED 0x01 +#define DRM_TTM_PAGE_USED 0x02 +#define DRM_TTM_PAGE_BOUND 0x04 +#define DRM_TTM_PAGE_PRESENT 0x08 +#define DRM_TTM_PAGE_VMALLOC 0x10 + +/*************************************************** + * Buffer objects. (drm_bo.c, drm_bo_move.c) + */ + +typedef struct drm_bo_mem_reg { + drm_mm_node_t *mm_node; + unsigned long size; + unsigned long num_pages; + uint32_t page_alignment; + uint32_t mem_type; + uint32_t flags; + uint32_t mask; +} drm_bo_mem_reg_t; + +typedef struct drm_buffer_object { + struct drm_device *dev; + drm_user_object_t base; + + /* + * If there is a possibility that the usage variable is zero, + * then dev->struct_mutext should be locked before incrementing it. + */ + + atomic_t usage; + unsigned long buffer_start; + drm_bo_type_t type; + unsigned long offset; + atomic_t mapped; + drm_bo_mem_reg_t mem; + + struct list_head lru; + struct list_head ddestroy; + + uint32_t fence_type; + uint32_t fence_class; + drm_fence_object_t *fence; + uint32_t priv_flags; + wait_queue_head_t event_queue; + struct mutex mutex; + + /* For pinned buffers */ + drm_mm_node_t *pinned_node; + uint32_t pinned_mem_type; + struct list_head pinned_lru; + + /* For vm */ + + drm_ttm_t *ttm; + drm_map_list_t map_list; + uint32_t memory_type; + unsigned long bus_offset; + uint32_t vm_flags; + void *iomap; + +} drm_buffer_object_t; + +#define _DRM_BO_FLAG_UNFENCED 0x00000001 +#define _DRM_BO_FLAG_EVICTED 0x00000002 + +typedef struct drm_mem_type_manager { + int has_type; + int use_type; + drm_mm_t manager; + struct list_head lru; + struct list_head pinned; + uint32_t flags; + uint32_t drm_bus_maptype; + unsigned long io_offset; + unsigned long io_size; + void *io_addr; +} drm_mem_type_manager_t; + +#define _DRM_FLAG_MEMTYPE_FIXED 0x00000001 /* Fixed (on-card) PCI memory */ +#define _DRM_FLAG_MEMTYPE_MAPPABLE 0x00000002 /* Memory mappable */ +#define _DRM_FLAG_MEMTYPE_CACHED 0x00000004 /* Cached binding */ +#define _DRM_FLAG_NEEDS_IOREMAP 0x00000008 /* Fixed memory needs ioremap + before kernel access. */ +#define _DRM_FLAG_MEMTYPE_CMA 0x00000010 /* Can't map aperture */ +#define _DRM_FLAG_MEMTYPE_CSELECT 0x00000020 /* Select caching */ + +typedef struct drm_buffer_manager { + struct mutex init_mutex; + struct mutex evict_mutex; + int nice_mode; + int initialized; + drm_file_t *last_to_validate; + drm_mem_type_manager_t man[DRM_BO_MEM_TYPES]; + struct list_head unfenced; + struct list_head ddestroy; + struct delayed_work wq; + uint32_t fence_type; + unsigned long cur_pages; + atomic_t count; +} drm_buffer_manager_t; + +typedef struct drm_bo_driver { + const uint32_t *mem_type_prio; + const uint32_t *mem_busy_prio; + uint32_t num_mem_type_prio; + uint32_t num_mem_busy_prio; + drm_ttm_backend_t *(*create_ttm_backend_entry) + (struct drm_device * dev); + int (*fence_type) (struct drm_buffer_object *bo, uint32_t * class, uint32_t * type); + int (*invalidate_caches) (struct drm_device * dev, uint32_t flags); + int (*init_mem_type) (struct drm_device * dev, uint32_t type, + drm_mem_type_manager_t * man); + uint32_t(*evict_mask) (struct drm_buffer_object *bo); + int (*move) (struct drm_buffer_object * bo, + int evict, int no_wait, struct drm_bo_mem_reg * new_mem); +} drm_bo_driver_t; + +/* + * buffer objects (drm_bo.c) + */ + +extern int drm_bo_ioctl(DRM_IOCTL_ARGS); +extern int drm_mm_init_ioctl(DRM_IOCTL_ARGS); +extern int drm_bo_driver_finish(struct drm_device *dev); +extern int drm_bo_driver_init(struct drm_device *dev); +extern int drm_bo_pci_offset(struct drm_device *dev, + drm_bo_mem_reg_t * mem, + unsigned long *bus_base, + unsigned long *bus_offset, + unsigned long *bus_size); +extern int drm_mem_reg_is_pci(struct drm_device *dev, drm_bo_mem_reg_t * mem); + +extern void drm_bo_usage_deref_locked(drm_buffer_object_t * bo); +extern int drm_fence_buffer_objects(drm_file_t * priv, + struct list_head *list, + uint32_t fence_flags, + drm_fence_object_t * fence, + drm_fence_object_t ** used_fence); +extern void drm_bo_add_to_lru(drm_buffer_object_t * bo); +extern int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals, + int no_wait); +extern int drm_bo_mem_space(drm_buffer_object_t * bo, + drm_bo_mem_reg_t * mem, int no_wait); +extern int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, + int no_wait, int move_unfenced); + +/* + * Buffer object memory move helpers. + * drm_bo_move.c + */ + +extern int drm_bo_move_ttm(drm_buffer_object_t * bo, + int evict, int no_wait, drm_bo_mem_reg_t * new_mem); +extern int drm_bo_move_memcpy(drm_buffer_object_t * bo, + int evict, + int no_wait, drm_bo_mem_reg_t * new_mem); +extern int drm_bo_move_accel_cleanup(drm_buffer_object_t * bo, + int evict, + int no_wait, + uint32_t fence_class, + uint32_t fence_type, + uint32_t fence_flags, + drm_bo_mem_reg_t * new_mem); + +#endif diff --git a/drivers/char/drm/drm_proc.c b/drivers/char/drm/drm_proc.c index b204498..0fdf0a4 100644 --- a/drivers/char/drm/drm_proc.c +++ b/drivers/char/drm/drm_proc.c @@ -49,6 +49,8 @@ static int drm_queues_info(char *buf, ch int request, int *eof, void *data); static int drm_bufs_info(char *buf, char **start, off_t offset, int request, int *eof, void *data); +static int drm_objects_info(char *buf, char **start, off_t offset, + int request, int *eof, void *data); #if DRM_DEBUG_CODE static int drm_vma_info(char *buf, char **start, off_t offset, int request, int *eof, void *data); @@ -67,6 +69,7 @@ static struct drm_proc_list { {"clients", drm_clients_info}, {"queues", drm_queues_info}, {"bufs", drm_bufs_info}, + {"objects", drm_objects_info}, #if DRM_DEBUG_CODE {"vma", drm_vma_info}, #endif @@ -418,6 +421,93 @@ static int drm_bufs_info(char *buf, char } /** + * Called when "/proc/dri/.../objects" is read. + * + * \param buf output buffer. + * \param start start of output data. + * \param offset requested start offset. + * \param request requested number of bytes. + * \param eof whether there is no more data to return. + * \param data private data. + * \return number of written bytes. + */ +static int drm__objects_info(char *buf, char **start, off_t offset, int request, + int *eof, void *data) +{ + drm_device_t *dev = (drm_device_t *) data; + int len = 0; + drm_buffer_manager_t *bm = &dev->bm; + drm_fence_manager_t *fm = &dev->fm; + drm_u64_t used_mem; + drm_u64_t low_mem; + drm_u64_t high_mem; + + + if (offset > DRM_PROC_LIMIT) { + *eof = 1; + return 0; + } + + *start = &buf[offset]; + *eof = 0; + + DRM_PROC_PRINT("Object accounting:\n\n"); + if (fm->initialized) { + DRM_PROC_PRINT("Number of active fence objects: %d.\n", + atomic_read(&fm->count)); + } else { + DRM_PROC_PRINT("Fence objects are not supported by this driver\n"); + } + + if (bm->initialized) { + DRM_PROC_PRINT("Number of active buffer objects: %d.\n\n", + atomic_read(&bm->count)); + } + DRM_PROC_PRINT("Memory accounting:\n\n"); + if (bm->initialized) { + DRM_PROC_PRINT("Number of locked GATT pages: %lu.\n", bm->cur_pages); + } else { + DRM_PROC_PRINT("Buffer objects are not supported by this driver.\n"); + } + + drm_query_memctl(&used_mem, &low_mem, &high_mem); + + if (used_mem > 16*PAGE_SIZE) { + DRM_PROC_PRINT("Used object memory is %lu pages.\n", + (unsigned long) (used_mem >> PAGE_SHIFT)); + } else { + DRM_PROC_PRINT("Used object memory is %lu bytes.\n", + (unsigned long) used_mem); + } + DRM_PROC_PRINT("Soft object memory usage threshold is %lu pages.\n", + (unsigned long) (low_mem >> PAGE_SHIFT)); + DRM_PROC_PRINT("Hard object memory usage threshold is %lu pages.\n", + (unsigned long) (high_mem >> PAGE_SHIFT)); + + DRM_PROC_PRINT("\n"); + + if (len > request + offset) + return request; + *eof = 1; + return len - offset; +} + +/** + * Simply calls _objects_info() while holding the drm_device::struct_mutex lock. + */ +static int drm_objects_info(char *buf, char **start, off_t offset, int request, + int *eof, void *data) +{ + drm_device_t *dev = (drm_device_t *) data; + int ret; + + mutex_lock(&dev->struct_mutex); + ret = drm__objects_info(buf, start, offset, request, eof, data); + mutex_unlock(&dev->struct_mutex); + return ret; +} + +/** * Called when "/proc/dri/.../clients" is read. * * \param buf output buffer. diff --git a/drivers/char/drm/drm_stub.c b/drivers/char/drm/drm_stub.c index 120d102..d5e45b2 100644 --- a/drivers/char/drm/drm_stub.c +++ b/drivers/char/drm/drm_stub.c @@ -62,9 +62,12 @@ static int drm_fill_in_dev(drm_device_t spin_lock_init(&dev->count_lock); spin_lock_init(&dev->drw_lock); spin_lock_init(&dev->tasklet_lock); + spin_lock_init(&dev->lock.spinlock); init_timer(&dev->timer); mutex_init(&dev->struct_mutex); mutex_init(&dev->ctxlist_mutex); + mutex_init(&dev->bm.init_mutex); + mutex_init(&dev->bm.evict_mutex); dev->pdev = pdev; dev->pci_device = pdev->device; @@ -75,14 +78,27 @@ #ifdef __alpha__ #endif dev->irq = pdev->irq; - dev->maplist = drm_calloc(1, sizeof(*dev->maplist), DRM_MEM_MAPS); - if (dev->maplist == NULL) + if (drm_ht_create(&dev->map_hash, DRM_MAP_HASH_ORDER)) { + drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS); return -ENOMEM; - INIT_LIST_HEAD(&dev->maplist->head); - if (drm_ht_create(&dev->map_hash, 12)) { + } + if (drm_mm_init(&dev->offset_manager, DRM_FILE_PAGE_OFFSET_START, + DRM_FILE_PAGE_OFFSET_SIZE)) { drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS); + drm_ht_remove(&dev->map_hash); + return -ENOMEM; + } + + if (drm_ht_create(&dev->object_hash, DRM_OBJECT_HASH_ORDER)) { + drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS); + drm_ht_remove(&dev->map_hash); + drm_mm_takedown(&dev->offset_manager); return -ENOMEM; } + dev->maplist = drm_calloc(1, sizeof(*dev->maplist), DRM_MEM_MAPS); + if (dev->maplist == NULL) + return -ENOMEM; + INIT_LIST_HEAD(&dev->maplist->head); /* the DRM has 6 basic counters */ dev->counters = 6; @@ -123,6 +139,7 @@ #endif goto error_out_unreg; } + drm_fence_manager_init(dev); return 0; error_out_unreg: diff --git a/drivers/char/drm/drm_ttm.c b/drivers/char/drm/drm_ttm.c new file mode 100644 index 0000000..8e57408 --- /dev/null +++ b/drivers/char/drm/drm_ttm.c @@ -0,0 +1,337 @@ +/************************************************************************** + * + * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellström + */ + +#include "drmP.h" + +static void drm_ttm_ipi_handler(void *null) +{ + flush_agp_cache(); +} + +static void drm_ttm_cache_flush(void) +{ + if (on_each_cpu(drm_ttm_ipi_handler, NULL, 1, 1) != 0) + DRM_ERROR("Timed out waiting for drm cache flush.\n"); +} + +/* + * Use kmalloc if possible. Otherwise fall back to vmalloc. + */ + +static void ttm_alloc_pages(drm_ttm_t * ttm) +{ + unsigned long size = ttm->num_pages * sizeof(*ttm->pages); + ttm->pages = NULL; + + if (drm_alloc_memctl(size)) + return; + + if (size <= PAGE_SIZE) { + ttm->pages = drm_calloc(1, size, DRM_MEM_TTM); + } + if (!ttm->pages) { + ttm->pages = vmalloc_user(size); + if (ttm->pages) + ttm->page_flags |= DRM_TTM_PAGE_VMALLOC; + } + if (!ttm->pages) { + drm_free_memctl(size); + } +} + +static void ttm_free_pages(drm_ttm_t * ttm) +{ + unsigned long size = ttm->num_pages * sizeof(*ttm->pages); + + if (ttm->page_flags & DRM_TTM_PAGE_VMALLOC) { + vfree(ttm->pages); + ttm->page_flags &= ~DRM_TTM_PAGE_VMALLOC; + } else { + drm_free(ttm->pages, size, DRM_MEM_TTM); + } + drm_free_memctl(size); + ttm->pages = NULL; +} + +static struct page *drm_ttm_alloc_page(void) +{ + struct page *page; + + if (drm_alloc_memctl(PAGE_SIZE)) { + return NULL; + } + page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32); + if (!page) { + drm_free_memctl(PAGE_SIZE); + return NULL; + } + SetPageLocked(page); + return page; +} + +/* + * Change caching policy for the linear kernel map + * for range of pages in a ttm. + */ + +static int drm_set_caching(drm_ttm_t * ttm, int noncached) +{ + int i; + struct page **cur_page; + int do_tlbflush = 0; + + if ((ttm->page_flags & DRM_TTM_PAGE_UNCACHED) == noncached) + return 0; + + if (noncached) + drm_ttm_cache_flush(); + + for (i = 0; i < ttm->num_pages; ++i) { + cur_page = ttm->pages + i; + if (*cur_page) { + if (!PageHighMem(*cur_page)) { + if (noncached) { + map_page_into_agp(*cur_page); + } else { + unmap_page_from_agp(*cur_page); + } + do_tlbflush = 1; + } + } + } + if (do_tlbflush) + flush_agp_mappings(); + + DRM_FLAG_MASKED(ttm->page_flags, noncached, DRM_TTM_PAGE_UNCACHED); + + return 0; +} + +/* + * Free all resources associated with a ttm. + */ + +int drm_destroy_ttm(drm_ttm_t * ttm) +{ + + int i; + struct page **cur_page; + drm_ttm_backend_t *be; + + if (!ttm) + return 0; + + be = ttm->be; + if (be) { + be->destroy(be); + ttm->be = NULL; + } + + if (ttm->pages) { + drm_buffer_manager_t *bm = &ttm->dev->bm; + if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) + drm_set_caching(ttm, 0); + + for (i = 0; i < ttm->num_pages; ++i) { + cur_page = ttm->pages + i; + if (*cur_page) { + unlock_page(*cur_page); + + if (page_count(*cur_page) != 1) { + DRM_ERROR("Erroneous page count. " + "Leaking pages.\n"); + } + if (page_mapped(*cur_page)) { + DRM_ERROR("Erroneous map count. " + "Leaking page mappings.\n"); + } + __free_page(*cur_page); + drm_free_memctl(PAGE_SIZE); + --bm->cur_pages; + } + } + ttm_free_pages(ttm); + } + + drm_ctl_free(ttm, sizeof(*ttm), DRM_MEM_TTM); + return 0; +} + +struct page *drm_ttm_get_page(drm_ttm_t * ttm, int index) +{ + struct page *p; + drm_buffer_manager_t *bm = &ttm->dev->bm; + + p = ttm->pages[index]; + if (!p) { + p = drm_ttm_alloc_page(); + if (!p) + return NULL; + ttm->pages[index] = p; + ++bm->cur_pages; + } + return p; +} + +static int drm_ttm_populate(drm_ttm_t * ttm) +{ + struct page *page; + unsigned long i; + drm_ttm_backend_t *be; + + if (ttm->state != ttm_unpopulated) + return 0; + + be = ttm->be; + for (i = 0; i < ttm->num_pages; ++i) { + page = drm_ttm_get_page(ttm, i); + if (!page) + return -ENOMEM; + } + be->populate(be, ttm->num_pages, ttm->pages); + ttm->state = ttm_unbound; + return 0; +} + +/* + * Initialize a ttm. + */ + +drm_ttm_t *drm_ttm_init(struct drm_device * dev, unsigned long size) +{ + drm_bo_driver_t *bo_driver = dev->driver->bo_driver; + drm_ttm_t *ttm; + + if (!bo_driver) + return NULL; + + ttm = drm_ctl_calloc(1, sizeof(*ttm), DRM_MEM_TTM); + if (!ttm) + return NULL; + + ttm->dev = dev; + atomic_set(&ttm->vma_count, 0); + + ttm->destroy = 0; + ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; + + ttm->page_flags = 0; + + /* + * Account also for AGP module memory usage. + */ + + ttm_alloc_pages(ttm); + if (!ttm->pages) { + drm_destroy_ttm(ttm); + DRM_ERROR("Failed allocating page table\n"); + return NULL; + } + ttm->be = bo_driver->create_ttm_backend_entry(dev); + if (!ttm->be) { + drm_destroy_ttm(ttm); + DRM_ERROR("Failed creating ttm backend entry\n"); + return NULL; + } + ttm->state = ttm_unpopulated; + return ttm; +} + +/* + * Unbind a ttm region from the aperture. + */ + +void drm_ttm_evict(drm_ttm_t * ttm) +{ + drm_ttm_backend_t *be = ttm->be; + int ret; + + if (ttm->state == ttm_bound) { + ret = be->unbind(be); + BUG_ON(ret); + } + + ttm->state = ttm_evicted; +} + +void drm_ttm_fixup_caching(drm_ttm_t * ttm) +{ + + if (ttm->state == ttm_evicted) { + drm_ttm_backend_t *be = ttm->be; + if (be->needs_ub_cache_adjust(be)) { + drm_set_caching(ttm, 0); + } + ttm->state = ttm_unbound; + } +} + +void drm_ttm_unbind(drm_ttm_t * ttm) +{ + if (ttm->state == ttm_bound) + drm_ttm_evict(ttm); + + drm_ttm_fixup_caching(ttm); +} + +int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset) +{ + + int ret = 0; + drm_ttm_backend_t *be; + + if (!ttm) + return -EINVAL; + if (ttm->state == ttm_bound) + return 0; + + be = ttm->be; + + ret = drm_ttm_populate(ttm); + if (ret) + return ret; + + if (ttm->state == ttm_unbound && !cached) { + drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED); + } + + if ((ret = be->bind(be, aper_offset, cached))) { + ttm->state = ttm_evicted; + DRM_ERROR("Couldn't bind backend.\n"); + return ret; + } + + ttm->aper_offset = aper_offset; + ttm->state = ttm_bound; + + return 0; +} + +EXPORT_SYMBOL(drm_bind_ttm); diff --git a/drivers/char/drm/drm_vm.c b/drivers/char/drm/drm_vm.c index 239ac33..c27d384 100644 --- a/drivers/char/drm/drm_vm.c +++ b/drivers/char/drm/drm_vm.c @@ -40,6 +40,9 @@ #endif static void drm_vm_open(struct vm_area_struct *vma); static void drm_vm_close(struct vm_area_struct *vma); +static int drm_bo_mmap_locked(struct vm_area_struct *vma, + struct file *filp, + drm_local_map_t *map); static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma) { @@ -220,7 +223,7 @@ static void drm_vm_shm_close(struct vm_a } else { dev->vmalist = pt->next; } - drm_free(pt, sizeof(*pt), DRM_MEM_VMAS); + drm_ctl_free(pt, sizeof(*pt), DRM_MEM_VMAS); } else { prev = pt; } @@ -265,6 +268,9 @@ static void drm_vm_shm_close(struct vm_a dmah.size = map->size; __drm_pci_free(dev, &dmah); break; + case _DRM_TTM: + BUG_ON(1); + break; } drm_free(map, sizeof(*map), DRM_MEM_MAPS); } @@ -329,6 +335,7 @@ static __inline__ struct page *drm_do_vm unsigned long page_offset; struct page *page; + DRM_DEBUG("\n"); if (!entry) return NOPAGE_SIGBUS; /* Error */ if (address > vma->vm_end) @@ -413,7 +420,7 @@ static struct vm_operations_struct drm_v * Create a new drm_vma_entry structure as the \p vma private data entry and * add it to drm_device::vmalist. */ -static void drm_vm_open(struct vm_area_struct *vma) +static void drm_vm_open_locked(struct vm_area_struct *vma) { drm_file_t *priv = vma->vm_file->private_data; drm_device_t *dev = priv->head->dev; @@ -423,17 +430,25 @@ static void drm_vm_open(struct vm_area_s vma->vm_start, vma->vm_end - vma->vm_start); atomic_inc(&dev->vma_count); - vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS); + vma_entry = drm_ctl_alloc(sizeof(*vma_entry), DRM_MEM_VMAS); if (vma_entry) { - mutex_lock(&dev->struct_mutex); vma_entry->vma = vma; vma_entry->next = dev->vmalist; vma_entry->pid = current->pid; dev->vmalist = vma_entry; - mutex_unlock(&dev->struct_mutex); } } +static void drm_vm_open(struct vm_area_struct *vma) +{ + drm_file_t *priv = vma->vm_file->private_data; + drm_device_t *dev = priv->head->dev; + + mutex_lock(&dev->struct_mutex); + drm_vm_open_locked(vma); + mutex_unlock(&dev->struct_mutex); +} + /** * \c close method for all virtual memory types. * @@ -460,7 +475,7 @@ static void drm_vm_close(struct vm_area_ } else { dev->vmalist = pt->next; } - drm_free(pt, sizeof(*pt), DRM_MEM_VMAS); + drm_ctl_free(pt, sizeof(*pt), DRM_MEM_VMAS); break; } } @@ -484,7 +499,6 @@ static int drm_mmap_dma(struct file *fil drm_device_dma_t *dma; unsigned long length = vma->vm_end - vma->vm_start; - lock_kernel(); dev = priv->head->dev; dma = dev->dma; DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n", @@ -492,10 +506,8 @@ static int drm_mmap_dma(struct file *fil /* Length must match exact page count */ if (!dma || (length >> PAGE_SHIFT) != dma->page_count) { - unlock_kernel(); return -EINVAL; } - unlock_kernel(); if (!capable(CAP_SYS_ADMIN) && (dma->flags & _DRM_DMA_USE_PCI_RO)) { @@ -518,7 +530,7 @@ #endif vma->vm_flags |= VM_RESERVED; /* Don't swap */ vma->vm_file = filp; /* Needed for drm_vm_open() */ - drm_vm_open(vma); + drm_vm_open_locked(vma); return 0; } @@ -553,7 +565,7 @@ EXPORT_SYMBOL(drm_core_get_reg_ofs); * according to the mapping type and remaps the pages. Finally sets the file * pointer and calls vm_open(). */ -int drm_mmap(struct file *filp, struct vm_area_struct *vma) +static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; @@ -634,10 +646,10 @@ #ifdef __sparc__ vma->vm_end - vma->vm_start, vma->vm_page_prot)) #else - if (io_remap_pfn_range(vma, vma->vm_start, - (map->offset + offset) >> PAGE_SHIFT, - vma->vm_end - vma->vm_start, - vma->vm_page_prot)) + if (remap_pfn_range(vma, vma->vm_start, + (map->offset + offset) >> PAGE_SHIFT, + vma->vm_end - vma->vm_start, + vma->vm_page_prot)) #endif return -EAGAIN; DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx," @@ -661,14 +673,204 @@ #endif vma->vm_private_data = (void *)map; vma->vm_flags |= VM_RESERVED; break; + case _DRM_TTM: + return drm_bo_mmap_locked(vma, filp, map); default: return -EINVAL; /* This should never happen. */ } vma->vm_flags |= VM_RESERVED; /* Don't swap */ vma->vm_file = filp; /* Needed for drm_vm_open() */ - drm_vm_open(vma); + drm_vm_open_locked(vma); return 0; } +int drm_mmap(struct file *filp, struct vm_area_struct *vma) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + int ret; + + mutex_lock(&dev->struct_mutex); + ret = drm_mmap_locked(filp, vma); + mutex_unlock(&dev->struct_mutex); + + return ret; +} EXPORT_SYMBOL(drm_mmap); + +/** + * buffer object vm functions. + */ + +/** + * \c Pagefault method for buffer objects. + * + * \param vma Virtual memory area. + * \param address File offset. + * \return Error or refault. The pfn is manually inserted. + * + * It's important that pfns are inserted while holding the bo->mutex lock. + * otherwise we might race with unmap_mapping_range() which is always + * called with the bo->mutex lock held. + * + * We're modifying the page attribute bits of the vma->vm_page_prot field, + * without holding the mmap_sem in write mode. Only in read mode. + * These bits are not used by the mm subsystem code, and we consider them + * protected by the bo->mutex lock. + */ + +static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma, + unsigned long address) +{ + drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data; + unsigned long page_offset; + struct page *page = NULL; + drm_ttm_t *ttm; + drm_device_t *dev; + unsigned long pfn; + int err; + unsigned long bus_base; + unsigned long bus_offset; + unsigned long bus_size; + int ret = NOPFN_REFAULT; + + if (address > vma->vm_end) + return NOPFN_SIGBUS; + + err = mutex_lock_interruptible(&bo->mutex); + if (err) + return NOPFN_REFAULT; + + err = drm_bo_wait(bo, 0, 0, 0); + if (err) { + ret = (err != -EAGAIN) ? NOPFN_SIGBUS : NOPFN_REFAULT; + goto out_unlock; + } + + /* + * If buffer happens to be in a non-mappable location, + * move it to a mappable. + */ + + if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) { + uint32_t new_mask = bo->mem.mask | + DRM_BO_FLAG_MAPPABLE | + DRM_BO_FLAG_FORCE_MAPPABLE; + err = drm_bo_move_buffer(bo, new_mask, 0, 0); + if (err) { + ret = (err != -EAGAIN) ? NOPFN_SIGBUS : NOPFN_REFAULT; + goto out_unlock; + } + } + + dev = bo->dev; + err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset, + &bus_size); + + if (err) { + ret = NOPFN_SIGBUS; + goto out_unlock; + } + + page_offset = (address - vma->vm_start) >> PAGE_SHIFT; + + if (bus_size) { + drm_mem_type_manager_t *man = &dev->bm.man[bo->mem.mem_type]; + + pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset; + vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma); + } else { + ttm = bo->ttm; + + drm_ttm_fixup_caching(ttm); + page = drm_ttm_get_page(ttm, page_offset); + if (!page) { + ret = NOPFN_OOM; + goto out_unlock; + } + pfn = page_to_pfn(page); + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); + } + + err = vm_insert_pfn(vma, address, pfn); + if (err) { + ret = (err != -EAGAIN) ? NOPFN_OOM : NOPFN_REFAULT; + goto out_unlock; + } +out_unlock: + mutex_unlock(&bo->mutex); + return ret; +} + +static void drm_bo_vm_open_locked(struct vm_area_struct *vma) +{ + drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data; + + drm_vm_open_locked(vma); + atomic_inc(&bo->usage); +} + +/** + * \c vma open method for buffer objects. + * + * \param vma virtual memory area. + */ + +static void drm_bo_vm_open(struct vm_area_struct *vma) +{ + drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data; + drm_device_t *dev = bo->dev; + + mutex_lock(&dev->struct_mutex); + drm_bo_vm_open_locked(vma); + mutex_unlock(&dev->struct_mutex); +} + +/** + * \c vma close method for buffer objects. + * + * \param vma virtual memory area. + */ + +static void drm_bo_vm_close(struct vm_area_struct *vma) +{ + drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data; + drm_device_t *dev = bo->dev; + + drm_vm_close(vma); + if (bo) { + mutex_lock(&dev->struct_mutex); + drm_bo_usage_deref_locked(bo); + mutex_unlock(&dev->struct_mutex); + } + return; +} + +static struct vm_operations_struct drm_bo_vm_ops = { + .nopfn = drm_bo_vm_nopfn, + .open = drm_bo_vm_open, + .close = drm_bo_vm_close, +}; + +/** + * mmap buffer object memory. + * + * \param vma virtual memory area. + * \param filp file pointer. + * \param map The buffer object drm map. + * \return zero on success or a negative number on failure. + */ + +int drm_bo_mmap_locked(struct vm_area_struct *vma, + struct file *filp, + drm_local_map_t *map) +{ + vma->vm_ops = &drm_bo_vm_ops; + vma->vm_private_data = map->handle; + vma->vm_file = filp; + vma->vm_flags |= VM_RESERVED | VM_IO; + vma->vm_flags |= VM_PFNMAP; + drm_bo_vm_open_locked(vma); + return 0; +} -- 1.4.1