diff -Napur -X /home/jbarnes/dontdiff gpu/drm/drm_drv.c /tmp/drm-master/drivers/gpu/drm/drm_drv.c --- gpu/drm/drm_drv.c 2008-08-21 14:43:42.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/drm_drv.c 2008-08-21 14:34:09.000000000 -0700 @@ -45,10 +45,12 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ - #include "drmP.h" #include "drm_core.h" +static void drm_cleanup(struct drm_device * dev); +int drm_fb_loaded = 0; + static int drm_version(struct drm_device *dev, void *data, struct drm_file *file_priv); @@ -113,14 +115,46 @@ static struct drm_ioctl_desc drm_ioctls[ DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0), + DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0), + DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + + DRM_IOCTL_DEF(DRM_IOCTL_MM_INIT, drm_mm_init_ioctl, + DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_MM_TAKEDOWN, drm_mm_takedown_ioctl, + DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_MM_LOCK, drm_mm_lock_ioctl, + DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_MM_UNLOCK, drm_mm_unlock_ioctl, + DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + + DRM_IOCTL_DEF(DRM_IOCTL_FENCE_CREATE, drm_fence_create_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_FENCE_REFERENCE, drm_fence_reference_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_FENCE_UNREFERENCE, drm_fence_unreference_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_FENCE_SIGNALED, drm_fence_signaled_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_FENCE_FLUSH, drm_fence_flush_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_FENCE_WAIT, drm_fence_wait_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_FENCE_EMIT, drm_fence_emit_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_FENCE_BUFFERS, drm_fence_buffers_ioctl, DRM_AUTH), + + DRM_IOCTL_DEF(DRM_IOCTL_BO_CREATE, drm_bo_create_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_BO_MAP, drm_bo_map_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_BO_UNMAP, drm_bo_unmap_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_BO_REFERENCE, drm_bo_reference_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_BO_UNREFERENCE, drm_bo_unreference_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_BO_SETSTATUS, drm_bo_setstatus_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_BO_INFO, drm_bo_info_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_BO_WAIT_IDLE, drm_bo_wait_idle_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_BO_VERSION, drm_bo_version_ioctl, 0), + + DRM_IOCTL_DEF(DRM_IOCTL_MM_INFO, drm_mm_info_ioctl, 0), }; #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) + /** * Take down the DRM device. * @@ -139,6 +173,12 @@ int drm_lastclose(struct drm_device * de DRM_DEBUG("\n"); + /* + * We can't do much about this function failing. + */ + + drm_bo_driver_finish(dev); + if (dev->driver->lastclose) dev->driver->lastclose(dev); DRM_DEBUG("driver lastclose completed\n"); @@ -152,13 +192,18 @@ int drm_lastclose(struct drm_device * de if (dev->irq_enabled) drm_irq_uninstall(dev); + /* Free drawable information memory */ mutex_lock(&dev->struct_mutex); - /* Free drawable information memory */ drm_drawable_free_all(dev); del_timer(&dev->timer); - /* Clear pid list */ + if (dev->unique) { + drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER); + dev->unique = NULL; + dev->unique_len = 0; + } + if (dev->magicfree.next) { list_for_each_entry_safe(pt, next, &dev->magicfree, head) { list_del(&pt->head); @@ -168,6 +213,7 @@ int drm_lastclose(struct drm_device * de drm_ht_remove(&dev->magiclist); } + /* Clear AGP information */ if (drm_core_has_AGP(dev) && dev->agp) { struct drm_agp_mem *entry, *tempe; @@ -196,7 +242,7 @@ int drm_lastclose(struct drm_device * de /* Clear vma list (only built for debugging) */ list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) { list_del(&vma->head); - drm_free(vma, sizeof(*vma), DRM_MEM_VMAS); + drm_ctl_free(vma, sizeof(*vma), DRM_MEM_VMAS); } list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) { @@ -208,6 +254,7 @@ int drm_lastclose(struct drm_device * de if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) { for (i = 0; i < dev->queue_count; i++) { + if (dev->queuelist[i]) { drm_free(dev->queuelist[i], sizeof(*dev->queuelist[0]), @@ -230,12 +277,24 @@ int drm_lastclose(struct drm_device * de dev->lock.file_priv = NULL; wake_up_interruptible(&dev->lock.lock_queue); } + dev->dev_mapping = NULL; mutex_unlock(&dev->struct_mutex); DRM_DEBUG("lastclose completed\n"); return 0; } +void drm_cleanup_pci(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + + pci_set_drvdata(pdev, NULL); + pci_release_regions(pdev); + if (dev) + drm_cleanup(dev); +} +EXPORT_SYMBOL(drm_cleanup_pci); + /** * Module initialization. Called via init_module at module load time, or via * linux/init/main.c (this is not currently supported). @@ -249,30 +308,71 @@ int drm_lastclose(struct drm_device * de * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and * after the initialization for driver customization. */ -int drm_init(struct drm_driver *driver) +int drm_init(struct drm_driver *driver, + struct pci_device_id *pciidlist) { - struct pci_dev *pdev = NULL; + struct pci_dev *pdev; struct pci_device_id *pid; - int i; + int rc, i; DRM_DEBUG("\n"); - for (i = 0; driver->pci_driver.id_table[i].vendor != 0; i++) { - pid = (struct pci_device_id *)&driver->pci_driver.id_table[i]; + for (i = 0; (pciidlist[i].vendor != 0) && !drm_fb_loaded; i++) { + pid = &pciidlist[i]; pdev = NULL; /* pass back in pdev to account for multiple identical cards */ while ((pdev = pci_get_subsys(pid->vendor, pid->device, pid->subvendor, - pid->subdevice, pdev)) != NULL) { - /* stealth mode requires a manual probe */ - pci_dev_get(pdev); - drm_get_dev(pdev, pid, driver); + pid->subdevice, pdev))) { + /* Are there device class requirements? */ + if ((pid->class != 0) + && ((pdev->class & pid->class_mask) != pid->class)) { + continue; + } + /* is there already a driver loaded, or (short circuit saves work) */ + /* does something like VesaFB have control of the memory region? */ + if (pci_dev_driver(pdev) + || pci_request_regions(pdev, "DRM scan")) { + /* go into stealth mode */ + drm_fb_loaded = 1; + pci_dev_put(pdev); + break; + } + /* no fbdev or vesadev, put things back and wait for normal probe */ + pci_release_regions(pdev); } } + + if (!drm_fb_loaded) + return pci_register_driver(&driver->pci_driver); + else { + for (i = 0; pciidlist[i].vendor != 0; i++) { + pid = &pciidlist[i]; + + pdev = NULL; + /* pass back in pdev to account for multiple identical cards */ + while ((pdev = + pci_get_subsys(pid->vendor, pid->device, + pid->subvendor, pid->subdevice, + pdev))) { + /* Are there device class requirements? */ + if ((pid->class != 0) + && ((pdev->class & pid->class_mask) != pid->class)) { + continue; + } + /* stealth mode requires a manual probe */ + pci_dev_get(pdev); + if ((rc = drm_get_dev(pdev, &pciidlist[i], driver))) { + pci_dev_put(pdev); + return rc; + } + } + } + DRM_INFO("Used old pci detect: framebuffer loaded\n"); + } return 0; } - EXPORT_SYMBOL(drm_init); /** @@ -284,17 +384,18 @@ EXPORT_SYMBOL(drm_init); */ static void drm_cleanup(struct drm_device * dev) { - DRM_DEBUG("\n"); + DRM_DEBUG("\n"); if (!dev) { DRM_ERROR("cleanup called no dev\n"); return; } drm_lastclose(dev); + drm_fence_manager_takedown(dev); - if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && - dev->agp && dev->agp->agp_mtrr >= 0) { + if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && dev->agp + && dev->agp->agp_mtrr >= 0) { int retval; retval = mtrr_del(dev->agp->agp_mtrr, dev->agp->agp_info.aper_base, @@ -306,19 +407,23 @@ static void drm_cleanup(struct drm_devic drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS); dev->agp = NULL; } - if (dev->driver->unload) dev->driver->unload(dev); - drm_ht_remove(&dev->map_hash); + if (!drm_fb_loaded) + pci_disable_device(dev->pdev); + drm_ctxbitmap_cleanup(dev); + drm_ht_remove(&dev->map_hash); + drm_mm_takedown(&dev->offset_manager); + drm_ht_remove(&dev->object_hash); drm_put_minor(&dev->primary); if (drm_put_dev(dev)) DRM_ERROR("Cannot unload module\n"); } -static int drm_minors_cleanup(int id, void *ptr, void *data) +int drm_minors_cleanup(int id, void *ptr, void *data) { struct drm_minor *minor = ptr; struct drm_device *dev; @@ -340,12 +445,12 @@ static int drm_minors_cleanup(int id, vo void drm_exit(struct drm_driver *driver) { DRM_DEBUG("\n"); - - idr_for_each(&drm_minors_idr, &drm_minors_cleanup, driver); - + if (drm_fb_loaded) { + idr_for_each(&drm_minors_idr, &drm_minors_cleanup, driver); + } else + pci_unregister_driver(&driver->pci_driver); DRM_INFO("Module unloaded\n"); } - EXPORT_SYMBOL(drm_exit); /** File operations structure */ @@ -356,9 +461,33 @@ static const struct file_operations drm_ static int __init drm_core_init(void) { - int ret = -ENOMEM; + int ret; + struct sysinfo si; + unsigned long avail_memctl_mem; + unsigned long max_memctl_mem; idr_init(&drm_minors_idr); + si_meminfo(&si); + + /* + * AGP only allows low / DMA32 memory ATM. + */ + + avail_memctl_mem = si.totalram - si.totalhigh; + + /* + * Avoid overflows + */ + + max_memctl_mem = 1UL << (32 - PAGE_SHIFT); + max_memctl_mem = (max_memctl_mem / si.mem_unit) * PAGE_SIZE; + + if (avail_memctl_mem >= max_memctl_mem) + avail_memctl_mem = max_memctl_mem; + + drm_init_memctl(avail_memctl_mem/2, avail_memctl_mem*3/4, si.mem_unit); + + ret = -ENOMEM; if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops)) goto err_p1; @@ -380,7 +509,8 @@ static int __init drm_core_init(void) drm_mem_init(); DRM_INFO("Initialized %s %d.%d.%d %s\n", - CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE); + CORE_NAME, + CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE); return 0; err_p3: drm_sysfs_destroy(); @@ -409,7 +539,7 @@ module_exit(drm_core_exit); * Get version information * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument, pointing to a drm_version structure. * \return zero on success or negative number on failure. @@ -443,45 +573,64 @@ static int drm_version(struct drm_device * * Looks up the ioctl function in the ::ioctls table, checking for root * previleges if so required, and dispatches to the respective function. + * + * Copies data in and out according to the size and direction given in cmd, + * which must match the ioctl cmd known by the kernel. The kernel uses a 512 + * byte stack buffer to store the ioctl arguments in kernel space. Should we + * ever need much larger ioctl arguments, we may need to allocate memory. */ int drm_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { + return drm_unlocked_ioctl(filp, cmd, arg); +} +EXPORT_SYMBOL(drm_ioctl); + +long drm_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ struct drm_file *file_priv = filp->private_data; struct drm_device *dev = file_priv->minor->dev; struct drm_ioctl_desc *ioctl; drm_ioctl_t *func; unsigned int nr = DRM_IOCTL_NR(cmd); int retcode = -EINVAL; - char *kdata = NULL; + char kdata[512]; atomic_inc(&dev->ioctl_count); atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]); ++file_priv->ioctl_count; DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n", - task_pid_nr(current), cmd, nr, - (long)old_encode_dev(file_priv->minor->device), + current->pid, cmd, nr, (long)old_encode_dev(file_priv->minor->device), file_priv->authenticated); if ((nr >= DRM_CORE_IOCTL_COUNT) && ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END))) goto err_i1; - if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) && - (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) + if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) + && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE]; else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) { ioctl = &drm_ioctls[nr]; cmd = ioctl->cmd; - } else + } else { + retcode = -EINVAL; goto err_i1; + } - /* Do not trust userspace, use our own definition */ func = ioctl->func; /* is there a local override? */ if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl) func = dev->driver->dma_ioctl; + if (cmd & IOC_IN) { + if (copy_from_user(kdata, (void __user *)arg, + _IOC_SIZE(cmd)) != 0) { + retcode = -EACCES; + goto err_i1; + } + } + if (!func) { DRM_DEBUG("no function\n"); retcode = -EINVAL; @@ -490,40 +639,22 @@ int drm_ioctl(struct inode *inode, struc ((ioctl->flags & DRM_MASTER) && !file_priv->master)) { retcode = -EACCES; } else { - if (cmd & (IOC_IN | IOC_OUT)) { - kdata = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL); - if (!kdata) { - retcode = -ENOMEM; - goto err_i1; - } - } - - if (cmd & IOC_IN) { - if (copy_from_user(kdata, (void __user *)arg, - _IOC_SIZE(cmd)) != 0) { - retcode = -EFAULT; - goto err_i1; - } - } retcode = func(dev, kdata, file_priv); + } - if ((retcode == 0) && (cmd & IOC_OUT)) { - if (copy_to_user((void __user *)arg, kdata, - _IOC_SIZE(cmd)) != 0) - retcode = -EFAULT; - } + if (cmd & IOC_OUT) { + if (copy_to_user((void __user *)arg, kdata, + _IOC_SIZE(cmd)) != 0) + retcode = -EACCES; } - err_i1: - if (kdata) - kfree(kdata); +err_i1: atomic_dec(&dev->ioctl_count); if (retcode) - DRM_DEBUG("ret = %x\n", retcode); + DRM_DEBUG("ret = %d\n", retcode); return retcode; } - -EXPORT_SYMBOL(drm_ioctl); +EXPORT_SYMBOL(drm_unlocked_ioctl); drm_local_map_t *drm_getsarea(struct drm_device *dev) { diff -Napur -X /home/jbarnes/dontdiff gpu/drm/drm_fops.c /tmp/drm-master/drivers/gpu/drm/drm_fops.c --- gpu/drm/drm_fops.c 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/drm_fops.c 2008-08-21 14:34:09.000000000 -0700 @@ -37,7 +37,6 @@ #include "drmP.h" #include "drm_sarea.h" #include -#include static int drm_open_helper(struct inode *inode, struct file *filp, struct drm_device * dev); @@ -47,7 +46,7 @@ static int drm_setup(struct drm_device * drm_local_map_t *map; int i; int ret; - u32 sareapage; + int sareapage; if (dev->driver->firstopen) { ret = dev->driver->firstopen(dev); @@ -58,7 +57,7 @@ static int drm_setup(struct drm_device * dev->magicfree.next = NULL; /* prebuild the SAREA */ - sareapage = max_t(unsigned, SAREA_MAX, PAGE_SIZE); + sareapage = max(SAREA_MAX, PAGE_SIZE); i = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK, &map); if (i != 0) return i; @@ -86,7 +85,6 @@ static int drm_setup(struct drm_device * dev->queue_reserved = 0; dev->queue_slots = 0; dev->queuelist = NULL; - dev->irq_enabled = 0; dev->context_flag = 0; dev->interrupt_flag = 0; dev->dma_flag = 0; @@ -147,11 +145,20 @@ int drm_open(struct inode *inode, struct spin_lock(&dev->count_lock); if (!dev->open_count++) { spin_unlock(&dev->count_lock); - return drm_setup(dev); + retcode = drm_setup(dev); + goto out; } spin_unlock(&dev->count_lock); } +out: + mutex_lock(&dev->struct_mutex); + BUG_ON((dev->dev_mapping != NULL) && + (dev->dev_mapping != inode->i_mapping)); + if (dev->dev_mapping == NULL) + dev->dev_mapping = inode->i_mapping; + mutex_unlock(&dev->struct_mutex); + return retcode; } EXPORT_SYMBOL(drm_open); @@ -175,14 +182,12 @@ int drm_stub_open(struct inode *inode, s DRM_DEBUG("\n"); - /* BKL pushdown: note that nothing else serializes idr_find() */ - lock_kernel(); minor = idr_find(&drm_minors_idr, minor_id); if (!minor) - goto out; - + return -ENODEV; + if (!(dev = minor->dev)) - goto out; + return -ENODEV; old_fops = filp->f_op; filp->f_op = fops_get(&dev->driver->fops); @@ -192,8 +197,6 @@ int drm_stub_open(struct inode *inode, s } fops_put(old_fops); -out: - unlock_kernel(); return err; } @@ -231,13 +234,14 @@ static int drm_open_helper(struct inode int minor_id = iminor(inode); struct drm_file *priv; int ret; + int i, j; if (filp->f_flags & O_EXCL) return -EBUSY; /* No exclusive opens */ if (!drm_cpu_valid()) return -EINVAL; - DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id); + DRM_DEBUG("pid = %d, minor = %d\n", current->pid, minor_id); priv = drm_alloc(sizeof(*priv), DRM_MEM_FILES); if (!priv) @@ -247,7 +251,7 @@ static int drm_open_helper(struct inode filp->private_data = priv; priv->filp = filp; priv->uid = current->euid; - priv->pid = task_pid_nr(current); + priv->pid = current->pid; priv->minor = idr_find(&drm_minors_idr, minor_id); priv->ioctl_count = 0; /* for compatibility root is always authenticated */ @@ -255,6 +259,20 @@ static int drm_open_helper(struct inode priv->lock_count = 0; INIT_LIST_HEAD(&priv->lhead); + INIT_LIST_HEAD(&priv->refd_objects); + + for (i = 0; i < _DRM_NO_REF_TYPES; ++i) { + ret = drm_ht_create(&priv->refd_object_hash[i], + DRM_FILE_HASH_ORDER); + if (ret) + break; + } + + if (ret) { + for (j = 0; j < i; ++j) + drm_ht_remove(&priv->refd_object_hash[j]); + goto out_free; + } if (dev->driver->open) { ret = dev->driver->open(dev, priv); @@ -311,6 +329,33 @@ int drm_fasync(int fd, struct file *filp } EXPORT_SYMBOL(drm_fasync); +static void drm_object_release(struct file *filp) +{ + struct drm_file *priv = filp->private_data; + struct list_head *head; + struct drm_ref_object *ref_object; + int i; + + /* + * Free leftover ref objects created by me. Note that we cannot use + * list_for_each() here, as the struct_mutex may be temporarily + * released by the remove_() functions, and thus the lists may be + * altered. + * Also, a drm_remove_ref_object() will not remove it + * from the list unless its refcount is 1. + */ + + head = &priv->refd_objects; + while (head->next != head) { + ref_object = list_entry(head->next, struct drm_ref_object, list); + drm_remove_ref_object(priv, ref_object); + head = &priv->refd_objects; + } + + for (i = 0; i < _DRM_NO_REF_TYPES; ++i) + drm_ht_remove(&priv->refd_object_hash[i]); +} + /** * Release file. * @@ -341,15 +386,14 @@ int drm_release(struct inode *inode, str */ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n", - task_pid_nr(current), - (long)old_encode_dev(file_priv->minor->device), + current->pid, (long)old_encode_dev(file_priv->minor->device), dev->open_count); if (dev->driver->reclaim_buffers_locked && dev->lock.hw_lock) { if (drm_i_have_hw_lock(dev, file_priv)) { dev->driver->reclaim_buffers_locked(dev, file_priv); } else { - unsigned long endtime = jiffies + 3 * DRM_HZ; + unsigned long _end=jiffies + 3*DRM_HZ; int locked = 0; drm_idlelock_take(&dev->lock); @@ -365,7 +409,7 @@ int drm_release(struct inode *inode, str if (locked) break; schedule(); - } while (!time_after_eq(jiffies, endtime)); + } while (!time_after_eq(jiffies, _end)); if (!locked) { DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n" @@ -403,6 +447,7 @@ int drm_release(struct inode *inode, str drm_fasync(-1, filp, 0); mutex_lock(&dev->ctxlist_mutex); + if (!list_empty(&dev->ctxlist)) { struct drm_ctx_list *pos, *n; @@ -424,6 +469,7 @@ int drm_release(struct inode *inode, str mutex_unlock(&dev->ctxlist_mutex); mutex_lock(&dev->struct_mutex); + drm_object_release(filp); if (file_priv->remove_auth_on_close == 1) { struct drm_file *temp; @@ -464,8 +510,17 @@ int drm_release(struct inode *inode, str EXPORT_SYMBOL(drm_release); /** No-op. */ +/* This is to deal with older X servers that believe 0 means data is + * available which is not the correct return for a poll function. + * This cannot be fixed until the Xserver is fixed. Xserver will need + * to set a newer interface version to avoid breaking older Xservers. + * Without fixing the Xserver you get: "WaitForSomething(): select: errno=22" + * http://freedesktop.org/bugzilla/show_bug.cgi?id=1505 if you try + * to return the correct response. + */ unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait) { + /* return (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM); */ return 0; } EXPORT_SYMBOL(drm_poll); diff -Napur -X /home/jbarnes/dontdiff gpu/drm/drm_ioctl.c /tmp/drm-master/drivers/gpu/drm/drm_ioctl.c --- gpu/drm/drm_ioctl.c 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/drm_ioctl.c 2008-08-21 14:34:09.000000000 -0700 @@ -128,9 +128,8 @@ int drm_setunique(struct drm_device *dev static int drm_set_busid(struct drm_device * dev) { int len; - if (dev->unique != NULL) - return 0; + return -EBUSY; dev->unique_len = 40; dev->unique = drm_alloc(dev->unique_len + 1, DRM_MEM_DRIVER); @@ -138,12 +137,12 @@ static int drm_set_busid(struct drm_devi return -ENOMEM; len = snprintf(dev->unique, dev->unique_len, "pci:%04x:%02x:%02x.%d", - drm_get_pci_domain(dev), dev->pdev->bus->number, + drm_get_pci_domain(dev), + dev->pdev->bus->number, PCI_SLOT(dev->pdev->devfn), PCI_FUNC(dev->pdev->devfn)); - if (len > dev->unique_len) - DRM_ERROR("Unique buffer overflowed\n"); + DRM_ERROR("buffer overflow"); dev->devname = drm_alloc(strlen(dev->driver->pci_driver.name) + dev->unique_len + diff -Napur -X /home/jbarnes/dontdiff gpu/drm/drm_irq.c /tmp/drm-master/drivers/gpu/drm/drm_irq.c --- gpu/drm/drm_irq.c 2008-08-21 14:43:42.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/drm_irq.c 2008-08-21 14:34:09.000000000 -0700 @@ -63,7 +63,7 @@ int drm_irq_by_busid(struct drm_device * p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn)) return -EINVAL; - p->irq = dev->pdev->irq; + p->irq = dev->irq; DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum, p->irq); @@ -71,25 +71,137 @@ int drm_irq_by_busid(struct drm_device * return 0; } +static void vblank_disable_fn(unsigned long arg) +{ + struct drm_device *dev = (struct drm_device *)arg; + unsigned long irqflags; + int i; + + if (!dev->vblank_disable_allowed) + return; + + for (i = 0; i < dev->num_crtcs; i++) { + spin_lock_irqsave(&dev->vbl_lock, irqflags); + if (atomic_read(&dev->vblank_refcount[i]) == 0 && + dev->vblank_enabled[i]) { + DRM_DEBUG("disabling vblank on crtc %d\n", i); + dev->last_vblank[i] = + dev->driver->get_vblank_counter(dev, i); + dev->driver->disable_vblank(dev, i); + dev->vblank_enabled[i] = 0; + } + spin_unlock_irqrestore(&dev->vbl_lock, irqflags); + } +} + +static void drm_vblank_cleanup(struct drm_device *dev) +{ + /* Bail if the driver didn't call drm_vblank_init() */ + if (dev->num_crtcs == 0) + return; + + del_timer(&dev->vblank_disable_timer); + + vblank_disable_fn((unsigned long)dev); + + drm_free(dev->vbl_queue, sizeof(*dev->vbl_queue) * dev->num_crtcs, + DRM_MEM_DRIVER); + drm_free(dev->vbl_sigs, sizeof(*dev->vbl_sigs) * dev->num_crtcs, + DRM_MEM_DRIVER); + drm_free(dev->_vblank_count, sizeof(*dev->_vblank_count) * + dev->num_crtcs, DRM_MEM_DRIVER); + drm_free(dev->vblank_refcount, sizeof(*dev->vblank_refcount) * + dev->num_crtcs, DRM_MEM_DRIVER); + drm_free(dev->vblank_enabled, sizeof(*dev->vblank_enabled) * + dev->num_crtcs, DRM_MEM_DRIVER); + drm_free(dev->last_vblank, sizeof(*dev->last_vblank) * dev->num_crtcs, + DRM_MEM_DRIVER); + drm_free(dev->vblank_inmodeset, sizeof(*dev->vblank_inmodeset) * + dev->num_crtcs, DRM_MEM_DRIVER); + + dev->num_crtcs = 0; +} + +int drm_vblank_init(struct drm_device *dev, int num_crtcs) +{ + int i, ret = -ENOMEM; + + setup_timer(&dev->vblank_disable_timer, vblank_disable_fn, + (unsigned long)dev); + spin_lock_init(&dev->vbl_lock); + atomic_set(&dev->vbl_signal_pending, 0); + dev->num_crtcs = num_crtcs; + + dev->vbl_queue = drm_alloc(sizeof(wait_queue_head_t) * num_crtcs, + DRM_MEM_DRIVER); + if (!dev->vbl_queue) + goto err; + + dev->vbl_sigs = drm_alloc(sizeof(struct list_head) * num_crtcs, + DRM_MEM_DRIVER); + if (!dev->vbl_sigs) + goto err; + + dev->_vblank_count = drm_alloc(sizeof(atomic_t) * num_crtcs, + DRM_MEM_DRIVER); + if (!dev->_vblank_count) + goto err; + + dev->vblank_refcount = drm_alloc(sizeof(atomic_t) * num_crtcs, + DRM_MEM_DRIVER); + if (!dev->vblank_refcount) + goto err; + + dev->vblank_enabled = drm_calloc(num_crtcs, sizeof(int), + DRM_MEM_DRIVER); + if (!dev->vblank_enabled) + goto err; + + dev->last_vblank = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER); + if (!dev->last_vblank) + goto err; + + dev->vblank_inmodeset = drm_calloc(num_crtcs, sizeof(int), + DRM_MEM_DRIVER); + if (!dev->vblank_inmodeset) + goto err; + + /* Zero per-crtc vblank stuff */ + for (i = 0; i < num_crtcs; i++) { + init_waitqueue_head(&dev->vbl_queue[i]); + INIT_LIST_HEAD(&dev->vbl_sigs[i]); + atomic_set(&dev->_vblank_count[i], 0); + atomic_set(&dev->vblank_refcount[i], 0); + } + + dev->vblank_disable_allowed = 0; + + return 0; + +err: + drm_vblank_cleanup(dev); + return ret; +} +EXPORT_SYMBOL(drm_vblank_init); + /** * Install IRQ handler. * * \param dev DRM device. - * \param irq IRQ number. * - * Initializes the IRQ related data, and setups drm_device::vbl_queue. Installs the handler, calling the driver + * Initializes the IRQ related data. Installs the handler, calling the driver * \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions * before and after the installation. */ -static int drm_irq_install(struct drm_device * dev) +int drm_irq_install(struct drm_device * dev) { - int ret; + int ret = 0; unsigned long sh_flags = 0; if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) return -EINVAL; - if (dev->pdev->irq == 0) + if (dev->irq == 0) return -EINVAL; mutex_lock(&dev->struct_mutex); @@ -107,18 +219,7 @@ static int drm_irq_install(struct drm_de dev->irq_enabled = 1; mutex_unlock(&dev->struct_mutex); - DRM_DEBUG("irq=%d\n", dev->pdev->irq); - - if (drm_core_check_feature(dev, DRIVER_IRQ_VBL)) { - init_waitqueue_head(&dev->vbl_queue); - - spin_lock_init(&dev->vbl_lock); - - INIT_LIST_HEAD(&dev->vbl_sigs); - INIT_LIST_HEAD(&dev->vbl_sigs2); - - dev->vbl_pending = 0; - } + DRM_DEBUG("irq=%d\n", dev->irq); /* Before installing handler */ dev->driver->irq_preinstall(dev); @@ -127,12 +228,8 @@ static int drm_irq_install(struct drm_de if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED)) sh_flags = IRQF_SHARED; - ret = request_irq(dev->pdev->irq, dev->driver->irq_handler, + ret = request_irq(dev->irq, dev->driver->irq_handler, sh_flags, dev->devname, dev); - /* Expose the device irq number to drivers that want to export it for - * whatever reason. - */ - dev->irq = dev->pdev->irq; if (ret < 0) { mutex_lock(&dev->struct_mutex); dev->irq_enabled = 0; @@ -141,10 +238,16 @@ static int drm_irq_install(struct drm_de } /* After installing handler */ - dev->driver->irq_postinstall(dev); + ret = dev->driver->irq_postinstall(dev); + if (ret < 0) { + mutex_lock(&dev->struct_mutex); + dev->irq_enabled = 0; + mutex_unlock(&dev->struct_mutex); + } - return 0; + return ret; } +EXPORT_SYMBOL(drm_irq_install); /** * Uninstall the IRQ handler. @@ -168,17 +271,18 @@ int drm_irq_uninstall(struct drm_device if (!irq_enabled) return -EINVAL; - DRM_DEBUG("irq=%d\n", dev->pdev->irq); + DRM_DEBUG("irq=%d\n", dev->irq); dev->driver->irq_uninstall(dev); - free_irq(dev->pdev->irq, dev); + free_irq(dev->irq, dev); + + drm_vblank_cleanup(dev); dev->locked_tasklet_func = NULL; return 0; } - EXPORT_SYMBOL(drm_irq_uninstall); /** @@ -205,7 +309,7 @@ int drm_control(struct drm_device *dev, if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) return 0; if (dev->if_version < DRM_IF_VERSION(1, 2) && - ctl->irq != dev->pdev->irq) + ctl->irq != dev->irq) return -EINVAL; return drm_irq_install(dev); case DRM_UNINST_HANDLER: @@ -218,6 +322,173 @@ int drm_control(struct drm_device *dev, } /** + * drm_vblank_count - retrieve "cooked" vblank counter value + * @dev: DRM device + * @crtc: which counter to retrieve + * + * Fetches the "cooked" vblank count value that represents the number of + * vblank events since the system was booted, including lost events due to + * modesetting activity. + */ +u32 drm_vblank_count(struct drm_device *dev, int crtc) +{ + return atomic_read(&dev->_vblank_count[crtc]); +} +EXPORT_SYMBOL(drm_vblank_count); + +/** + * drm_update_vblank_count - update the master vblank counter + * @dev: DRM device + * @crtc: counter to update + * + * Call back into the driver to update the appropriate vblank counter + * (specified by @crtc). Deal with wraparound, if it occurred, and + * update the last read value so we can deal with wraparound on the next + * call if necessary. + * + * Only necessary when going from off->on, to account for frames we + * didn't get an interrupt for. + * + * Note: caller must hold dev->vbl_lock since this reads & writes + * device vblank fields. + */ +static void drm_update_vblank_count(struct drm_device *dev, int crtc) +{ + u32 cur_vblank, diff; + + /* + * Interrupts were disabled prior to this call, so deal with counter + * wrap if needed. + * NOTE! It's possible we lost a full dev->max_vblank_count events + * here if the register is small or we had vblank interrupts off for + * a long time. + */ + cur_vblank = dev->driver->get_vblank_counter(dev, crtc); + diff = cur_vblank - dev->last_vblank[crtc]; + if (cur_vblank < dev->last_vblank[crtc]) { + diff += dev->max_vblank_count; + + DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n", + crtc, dev->last_vblank[crtc], cur_vblank, diff); + } + + DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n", + crtc, diff); + + atomic_add(diff, &dev->_vblank_count[crtc]); +} + +/** + * drm_vblank_get - get a reference count on vblank events + * @dev: DRM device + * @crtc: which CRTC to own + * + * Acquire a reference count on vblank events to avoid having them disabled + * while in use. + * + * RETURNS + * Zero on success, nonzero on failure. + */ +int drm_vblank_get(struct drm_device *dev, int crtc) +{ + unsigned long irqflags; + int ret = 0; + + spin_lock_irqsave(&dev->vbl_lock, irqflags); + /* Going from 0->1 means we have to enable interrupts again */ + if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1 && + !dev->vblank_enabled[crtc]) { + ret = dev->driver->enable_vblank(dev, crtc); + if (ret) + atomic_dec(&dev->vblank_refcount[crtc]); + else { + dev->vblank_enabled[crtc] = 1; + drm_update_vblank_count(dev, crtc); + } + } + spin_unlock_irqrestore(&dev->vbl_lock, irqflags); + + return ret; +} +EXPORT_SYMBOL(drm_vblank_get); + +/** + * drm_vblank_put - give up ownership of vblank events + * @dev: DRM device + * @crtc: which counter to give up + * + * Release ownership of a given vblank counter, turning off interrupts + * if possible. + */ +void drm_vblank_put(struct drm_device *dev, int crtc) +{ + /* Last user schedules interrupt disable */ + if (atomic_dec_and_test(&dev->vblank_refcount[crtc])) + mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ); +} +EXPORT_SYMBOL(drm_vblank_put); + +/** + * drm_modeset_ctl - handle vblank event counter changes across mode switch + * @DRM_IOCTL_ARGS: standard ioctl arguments + * + * Applications should call the %_DRM_PRE_MODESET and %_DRM_POST_MODESET + * ioctls around modesetting so that any lost vblank events are accounted for. + * + * Generally the counter will reset across mode sets. If interrupts are + * enabled around this call, we don't have to do anything since the counter + * will have already been incremented. + */ +int drm_modeset_ctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_modeset_ctl *modeset = data; + unsigned long irqflags; + int crtc, ret = 0; + + /* If drm_vblank_init() hasn't been called yet, just no-op */ + if (!dev->num_crtcs) + goto out; + + crtc = modeset->crtc; + if (crtc >= dev->num_crtcs) { + ret = -EINVAL; + goto out; + } + + /* + * To avoid all the problems that might happen if interrupts + * were enabled/disabled around or between these calls, we just + * have the kernel take a reference on the CRTC (just once though + * to avoid corrupting the count if multiple, mismatch calls occur), + * so that interrupts remain enabled in the interim. + */ + switch (modeset->cmd) { + case _DRM_PRE_MODESET: + if (!dev->vblank_inmodeset[crtc]) { + dev->vblank_inmodeset[crtc] = 1; + drm_vblank_get(dev, crtc); + } + break; + case _DRM_POST_MODESET: + if (dev->vblank_inmodeset[crtc]) { + spin_lock_irqsave(&dev->vbl_lock, irqflags); + dev->vblank_disable_allowed = 1; + dev->vblank_inmodeset[crtc] = 0; + spin_unlock_irqrestore(&dev->vbl_lock, irqflags); + drm_vblank_put(dev, crtc); + } + break; + default: + ret = -EINVAL; + break; + } + +out: + return ret; +} + +/** * Wait for VBLANK. * * \param inode device inode. @@ -236,14 +507,14 @@ int drm_control(struct drm_device *dev, * * If a signal is not requested, then calls vblank_wait(). */ -int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_priv) +int drm_wait_vblank(struct drm_device *dev, void *data, + struct drm_file *file_priv) { union drm_wait_vblank *vblwait = data; - struct timeval now; int ret = 0; - unsigned int flags, seq; + unsigned int flags, seq, crtc; - if ((!dev->pdev->irq) || (!dev->irq_enabled)) + if ((!dev->irq) || (!dev->irq_enabled)) return -EINVAL; if (vblwait->request.type & @@ -255,13 +526,15 @@ int drm_wait_vblank(struct drm_device *d } flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK; + crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0; - if (!drm_core_check_feature(dev, (flags & _DRM_VBLANK_SECONDARY) ? - DRIVER_IRQ_VBL2 : DRIVER_IRQ_VBL)) + if (crtc >= dev->num_crtcs) return -EINVAL; - seq = atomic_read((flags & _DRM_VBLANK_SECONDARY) ? &dev->vbl_received2 - : &dev->vbl_received); + ret = drm_vblank_get(dev, crtc); + if (ret) + return ret; + seq = drm_vblank_count(dev, crtc); switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) { case _DRM_VBLANK_RELATIVE: @@ -270,7 +543,8 @@ int drm_wait_vblank(struct drm_device *d case _DRM_VBLANK_ABSOLUTE: break; default: - return -EINVAL; + ret = -EINVAL; + goto done; } if ((flags & _DRM_VBLANK_NEXTONMISS) && @@ -280,8 +554,7 @@ int drm_wait_vblank(struct drm_device *d if (flags & _DRM_VBLANK_SIGNAL) { unsigned long irqflags; - struct list_head *vbl_sigs = (flags & _DRM_VBLANK_SECONDARY) - ? &dev->vbl_sigs2 : &dev->vbl_sigs; + struct list_head *vbl_sigs = &dev->vbl_sigs[crtc]; struct drm_vbl_sig *vbl_sig; spin_lock_irqsave(&dev->vbl_lock, irqflags); @@ -302,22 +575,29 @@ int drm_wait_vblank(struct drm_device *d } } - if (dev->vbl_pending >= 100) { + if (atomic_read(&dev->vbl_signal_pending) >= 100) { spin_unlock_irqrestore(&dev->vbl_lock, irqflags); - return -EBUSY; + ret = -EBUSY; + goto done; } - dev->vbl_pending++; - spin_unlock_irqrestore(&dev->vbl_lock, irqflags); - if (! - (vbl_sig = - drm_alloc(sizeof(struct drm_vbl_sig), DRM_MEM_DRIVER))) { - return -ENOMEM; + vbl_sig = drm_calloc(1, sizeof(struct drm_vbl_sig), + DRM_MEM_DRIVER); + if (!vbl_sig) { + ret = -ENOMEM; + goto done; + } + + ret = drm_vblank_get(dev, crtc); + if (ret) { + drm_free(vbl_sig, sizeof(struct drm_vbl_sig), + DRM_MEM_DRIVER); + return ret; } - memset((void *)vbl_sig, 0, sizeof(*vbl_sig)); + atomic_inc(&dev->vbl_signal_pending); vbl_sig->sequence = vblwait->request.sequence; vbl_sig->info.si_signo = vblwait->request.signal; @@ -331,20 +611,23 @@ int drm_wait_vblank(struct drm_device *d vblwait->reply.sequence = seq; } else { - if (flags & _DRM_VBLANK_SECONDARY) { - if (dev->driver->vblank_wait2) - ret = dev->driver->vblank_wait2(dev, &vblwait->request.sequence); - } else if (dev->driver->vblank_wait) - ret = - dev->driver->vblank_wait(dev, - &vblwait->request.sequence); - - do_gettimeofday(&now); - vblwait->reply.tval_sec = now.tv_sec; - vblwait->reply.tval_usec = now.tv_usec; + DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ, + ((drm_vblank_count(dev, crtc) + - vblwait->request.sequence) <= (1 << 23))); + + if (ret != -EINTR) { + struct timeval now; + + do_gettimeofday(&now); + + vblwait->reply.tval_sec = now.tv_sec; + vblwait->reply.tval_usec = now.tv_usec; + vblwait->reply.sequence = drm_vblank_count(dev, crtc); + } } - done: +done: + drm_vblank_put(dev, crtc); return ret; } @@ -352,44 +635,57 @@ int drm_wait_vblank(struct drm_device *d * Send the VBLANK signals. * * \param dev DRM device. + * \param crtc CRTC where the vblank event occurred * * Sends a signal for each task in drm_device::vbl_sigs and empties the list. * * If a signal is not requested, then calls vblank_wait(). */ -void drm_vbl_send_signals(struct drm_device * dev) +static void drm_vbl_send_signals(struct drm_device * dev, int crtc) { + struct drm_vbl_sig *vbl_sig, *tmp; + struct list_head *vbl_sigs; + unsigned int vbl_seq; unsigned long flags; - int i; spin_lock_irqsave(&dev->vbl_lock, flags); - for (i = 0; i < 2; i++) { - struct drm_vbl_sig *vbl_sig, *tmp; - struct list_head *vbl_sigs = i ? &dev->vbl_sigs2 : &dev->vbl_sigs; - unsigned int vbl_seq = atomic_read(i ? &dev->vbl_received2 : - &dev->vbl_received); - - list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) { - if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) { - vbl_sig->info.si_code = vbl_seq; - send_sig_info(vbl_sig->info.si_signo, - &vbl_sig->info, vbl_sig->task); - - list_del(&vbl_sig->head); - - drm_free(vbl_sig, sizeof(*vbl_sig), - DRM_MEM_DRIVER); + vbl_sigs = &dev->vbl_sigs[crtc]; + vbl_seq = drm_vblank_count(dev, crtc); - dev->vbl_pending--; - } - } + list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) { + if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) { + vbl_sig->info.si_code = vbl_seq; + send_sig_info(vbl_sig->info.si_signo, + &vbl_sig->info, vbl_sig->task); + + list_del(&vbl_sig->head); + + drm_free(vbl_sig, sizeof(*vbl_sig), + DRM_MEM_DRIVER); + atomic_dec(&dev->vbl_signal_pending); + drm_vblank_put(dev, crtc); + } } spin_unlock_irqrestore(&dev->vbl_lock, flags); } -EXPORT_SYMBOL(drm_vbl_send_signals); +/** + * drm_handle_vblank - handle a vblank event + * @dev: DRM device + * @crtc: where this event occurred + * + * Drivers should call this routine in their vblank interrupt handlers to + * update the vblank counter and send any signals that may be pending. + */ +void drm_handle_vblank(struct drm_device *dev, int crtc) +{ + atomic_inc(&dev->_vblank_count[crtc]); + DRM_WAKEUP(&dev->vbl_queue[crtc]); + drm_vbl_send_signals(dev, crtc); +} +EXPORT_SYMBOL(drm_handle_vblank); /** * Tasklet wrapper function. diff -Napur -X /home/jbarnes/dontdiff gpu/drm/drm_sysfs.c /tmp/drm-master/drivers/gpu/drm/drm_sysfs.c --- gpu/drm/drm_sysfs.c 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/drm_sysfs.c 2008-08-21 14:34:09.000000000 -0700 @@ -34,6 +34,8 @@ static int drm_sysfs_suspend(struct devi struct drm_minor *drm_minor = to_drm_minor(dev); struct drm_device *drm_dev = drm_minor->dev; + printk(KERN_ERR "%s\n", __FUNCTION__); + if (drm_dev->driver->suspend) return drm_dev->driver->suspend(drm_dev, state); @@ -164,7 +166,7 @@ int drm_sysfs_device_add(struct drm_mino minor->kdev.release = drm_sysfs_device_release; minor->kdev.devt = minor->device; minor_str = "card%d"; - + snprintf(minor->kdev.bus_id, BUS_ID_SIZE, minor_str, minor->index); err = device_register(&minor->kdev); diff -Napur -X /home/jbarnes/dontdiff gpu/drm/i810/i810_dma.c /tmp/drm-master/drivers/gpu/drm/i810/i810_dma.c --- gpu/drm/i810/i810_dma.c 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/i810/i810_dma.c 2008-08-21 14:34:09.000000000 -0700 @@ -30,13 +30,14 @@ * */ +#include /* For task queue support */ +#include +#include + #include "drmP.h" #include "drm.h" #include "i810_drm.h" #include "i810_drv.h" -#include /* For task queue support */ -#include -#include #define I810_BUF_FREE 2 #define I810_BUF_CLIENT 1 @@ -45,6 +46,24 @@ #define I810_BUF_UNMAPPED 0 #define I810_BUF_MAPPED 1 +static inline void i810_print_status_page(struct drm_device * dev) +{ + struct drm_device_dma *dma = dev->dma; + drm_i810_private_t *dev_priv = dev->dev_private; + u32 *temp = dev_priv->hw_status_page; + int i; + + DRM_DEBUG("hw_status: Interrupt Status : %x\n", temp[0]); + DRM_DEBUG("hw_status: LpRing Head ptr : %x\n", temp[1]); + DRM_DEBUG("hw_status: IRing Head ptr : %x\n", temp[2]); + DRM_DEBUG("hw_status: Reserved : %x\n", temp[3]); + DRM_DEBUG("hw_status: Last Render: %x\n", temp[4]); + DRM_DEBUG("hw_status: Driver Counter : %d\n", temp[5]); + for (i = 6; i < dma->buf_count + 6; i++) { + DRM_DEBUG("buffer status idx : %d used: %d\n", i - 6, temp[i]); + } +} + static struct drm_buf *i810_freelist_get(struct drm_device * dev) { struct drm_device_dma *dma = dev->dma; @@ -892,7 +911,7 @@ static int i810_flush_queue(struct drm_d } /* Must be called with the lock held */ -static void i810_reclaim_buffers(struct drm_device * dev, +static void i810_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv) { struct drm_device_dma *dma = dev->dma; @@ -1019,7 +1038,7 @@ static int i810_getbuf(struct drm_device retcode = i810_dma_get_buffer(dev, d, file_priv); DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n", - task_pid_nr(current), retcode, d->granted); + current->pid, retcode, d->granted); sarea_priv->last_dispatch = (int)hw_status[5]; @@ -1160,7 +1179,6 @@ static int i810_ov0_flip(struct drm_devi drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; LOCK_TEST_WITH_RETURN(dev, file_priv); - //Tell the overlay to update I810_WRITE(0x30000, dev_priv->overlay_physical | 0x80000000); diff -Napur -X /home/jbarnes/dontdiff gpu/drm/i810/i810_drv.c /tmp/drm-master/drivers/gpu/drm/i810/i810_drv.c --- gpu/drm/i810/i810_drv.c 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/i810/i810_drv.c 2008-08-21 14:34:09.000000000 -0700 @@ -41,9 +41,10 @@ static struct pci_device_id pciidlist[] i810_PCI_IDS }; +static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); static struct drm_driver driver = { .driver_features = - DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR | + DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR | */ DRIVER_HAVE_DMA | DRIVER_DMA_QUEUE, .dev_priv_size = sizeof(drm_i810_buf_priv_t), .load = i810_driver_load, @@ -56,19 +57,20 @@ static struct drm_driver driver = { .get_reg_ofs = drm_core_get_reg_ofs, .ioctls = i810_ioctls, .fops = { - .owner = THIS_MODULE, - .open = drm_open, - .release = drm_release, - .ioctl = drm_ioctl, - .mmap = drm_mmap, - .poll = drm_poll, - .fasync = drm_fasync, - }, - + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .ioctl = drm_ioctl, + .mmap = drm_mmap, + .poll = drm_poll, + .fasync = drm_fasync, + }, .pci_driver = { - .name = DRIVER_NAME, - .id_table = pciidlist, - }, + .name = DRIVER_NAME, + .id_table = pciidlist, + .probe = probe, + .remove = __devexit_p(drm_cleanup_pci), + }, .name = DRIVER_NAME, .desc = DRIVER_DESC, @@ -78,10 +80,15 @@ static struct drm_driver driver = { .patchlevel = DRIVER_PATCHLEVEL, }; +static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + return drm_get_dev(pdev, ent, &driver); +} + static int __init i810_init(void) { driver.num_ioctls = i810_max_ioctl; - return drm_init(&driver); + return drm_init(&driver, pciidlist); } static void __exit i810_exit(void) diff -Napur -X /home/jbarnes/dontdiff gpu/drm/i810/i810_drv.h /tmp/drm-master/drivers/gpu/drm/i810/i810_drv.h --- gpu/drm/i810/i810_drv.h 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/i810/i810_drv.h 2008-08-21 14:34:09.000000000 -0700 @@ -144,7 +144,7 @@ extern int i810_max_ioctl; volatile char *virt; #define BEGIN_LP_RING(n) do { \ - if (I810_VERBOSE) \ + if (I810_VERBOSE) \ DRM_DEBUG("BEGIN_LP_RING(%d)\n", n); \ if (dev_priv->ring.space < n*4) \ i810_wait_ring(dev, n*4); \ @@ -154,17 +154,17 @@ extern int i810_max_ioctl; virt = dev_priv->ring.virtual_start; \ } while (0) -#define ADVANCE_LP_RING() do { \ +#define ADVANCE_LP_RING() do { \ if (I810_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING\n"); \ dev_priv->ring.tail = outring; \ - I810_WRITE(LP_RING + RING_TAIL, outring); \ + I810_WRITE(LP_RING + RING_TAIL, outring); \ } while(0) -#define OUT_RING(n) do { \ +#define OUT_RING(n) do { \ if (I810_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \ - *(volatile unsigned int *)(virt + outring) = n; \ - outring += 4; \ - outring &= ringmask; \ + *(volatile unsigned int *)(virt + outring) = n; \ + outring += 4; \ + outring &= ringmask; \ } while (0) #define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23)) diff -Napur -X /home/jbarnes/dontdiff gpu/drm/i810/Makefile /tmp/drm-master/drivers/gpu/drm/i810/Makefile --- gpu/drm/i810/Makefile 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/i810/Makefile 1969-12-31 16:00:00.000000000 -0800 @@ -1,8 +0,0 @@ -# -# Makefile for the drm device driver. This driver provides support for the -# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. - -ccflags-y := -Iinclude/drm -i810-y := i810_drv.o i810_dma.o - -obj-$(CONFIG_DRM_I810) += i810.o diff -Napur -X /home/jbarnes/dontdiff gpu/drm/i915/i915_buffer.c /tmp/drm-master/drivers/gpu/drm/i915/i915_buffer.c --- gpu/drm/i915/i915_buffer.c 1969-12-31 16:00:00.000000000 -0800 +++ /tmp/drm-master/drivers/gpu/drm/i915/i915_buffer.c 2008-08-21 14:34:09.000000000 -0700 @@ -0,0 +1,194 @@ +/************************************************************************** + * + * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * + **************************************************************************/ +/* + * Authors: Thomas Hellström + */ + +#include "drmP.h" +#include "i915_drm.h" +#include "i915_drv.h" + +struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device *dev) +{ + return drm_agp_init_ttm(dev); +} + +int i915_fence_type(struct drm_buffer_object *bo, + uint32_t *fclass, + uint32_t *type) +{ + if (bo->mem.proposed_flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE)) + *type = 3; + else + *type = 1; + return 0; +} + +int i915_invalidate_caches(struct drm_device *dev, uint64_t flags) +{ + /* + * FIXME: Only emit once per batchbuffer submission. + */ + + uint32_t flush_cmd = MI_NO_WRITE_FLUSH; + + if (flags & DRM_BO_FLAG_READ) + flush_cmd |= MI_READ_FLUSH; + if (flags & DRM_BO_FLAG_EXE) + flush_cmd |= MI_EXE_FLUSH; + + return i915_emit_mi_flush(dev, flush_cmd); +} + +int i915_init_mem_type(struct drm_device *dev, uint32_t type, + struct drm_mem_type_manager *man) +{ + switch (type) { + case DRM_BO_MEM_LOCAL: + man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | + _DRM_FLAG_MEMTYPE_CACHED; + man->drm_bus_maptype = 0; + man->gpu_offset = 0; + break; + case DRM_BO_MEM_TT: + if (!(drm_core_has_AGP(dev) && dev->agp)) { + DRM_ERROR("AGP is not enabled for memory type %u\n", + (unsigned)type); + return -EINVAL; + } + man->io_offset = dev->agp->agp_info.aper_base; + man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024; + man->io_addr = NULL; + man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | + _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_NEEDS_IOREMAP; + man->drm_bus_maptype = _DRM_AGP; + man->gpu_offset = 0; + break; + case DRM_BO_MEM_PRIV0: + if (!(drm_core_has_AGP(dev) && dev->agp)) { + DRM_ERROR("AGP is not enabled for memory type %u\n", + (unsigned)type); + return -EINVAL; + } + man->io_offset = dev->agp->agp_info.aper_base; + man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024; + man->io_addr = NULL; + man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | + _DRM_FLAG_MEMTYPE_FIXED | _DRM_FLAG_NEEDS_IOREMAP; + man->drm_bus_maptype = _DRM_AGP; + man->gpu_offset = 0; + break; + default: + DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); + return -EINVAL; + } + return 0; +} + +/* + * i915_evict_flags: + * + * @bo: the buffer object to be evicted + * + * Return the bo flags for a buffer which is not mapped to the hardware. + * These will be placed in proposed_flags so that when the move is + * finished, they'll end up in bo->mem.flags + */ +uint64_t i915_evict_flags(struct drm_buffer_object *bo) +{ + switch (bo->mem.mem_type) { + case DRM_BO_MEM_LOCAL: + case DRM_BO_MEM_TT: + return DRM_BO_FLAG_MEM_LOCAL; + default: + return DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED; + } +} + + +/* + * Disable i915_move_flip for now, since we can't guarantee that the hardware + * lock is held here. To re-enable we need to make sure either + * a) The X server is using DRM to submit commands to the ring, or + * b) DRM can use the HP ring for these blits. This means i915 needs to + * implement a new ring submission mechanism and fence class. + */ +int i915_move(struct drm_buffer_object *bo, + int evict, int no_wait, struct drm_bo_mem_reg *new_mem) +{ + struct drm_bo_mem_reg *old_mem = &bo->mem; + + if (old_mem->mem_type == DRM_BO_MEM_LOCAL) { + return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); + } else if (new_mem->mem_type == DRM_BO_MEM_LOCAL) { + if (1) /*i915_move_flip(bo, evict, no_wait, new_mem)*/ + return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); + } else { + if (1) /*i915_move_blit(bo, evict, no_wait, new_mem)*/ + return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); + } + return 0; +} + + +static inline void drm_cache_flush_addr(void *virt) +{ + int i; + + for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) + clflush(virt+i); +} + +static inline void drm_cache_flush_page(struct page *p) +{ + drm_cache_flush_addr(page_address(p)); +} + +void i915_flush_ttm(struct drm_ttm *ttm) +{ + int i; + + if (!ttm) + return; + + DRM_MEMORYBARRIER(); + +#ifdef CONFIG_X86_32 + /* Hopefully nobody has built an x86-64 processor without clflush */ + if (!cpu_has_clflush) { + wbinvd(); + DRM_MEMORYBARRIER(); + return; + } +#endif + + for (i = ttm->num_pages - 1; i >= 0; i--) + drm_cache_flush_page(drm_ttm_get_page(ttm, i)); + + DRM_MEMORYBARRIER(); +} diff -Napur -X /home/jbarnes/dontdiff gpu/drm/i915/i915_compat.c /tmp/drm-master/drivers/gpu/drm/i915/i915_compat.c --- gpu/drm/i915/i915_compat.c 1969-12-31 16:00:00.000000000 -0800 +++ /tmp/drm-master/drivers/gpu/drm/i915/i915_compat.c 2008-08-21 14:34:09.000000000 -0700 @@ -0,0 +1,2 @@ +#include "drmP.h" + diff -Napur -X /home/jbarnes/dontdiff gpu/drm/i915/i915_dma.c /tmp/drm-master/drivers/gpu/drm/i915/i915_dma.c --- gpu/drm/i915/i915_dma.c 2008-08-21 14:43:42.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/i915/i915_dma.c 2008-08-21 14:34:09.000000000 -0700 @@ -40,83 +40,27 @@ int i915_wait_ring(struct drm_device * d { drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_ring_buffer_t *ring = &(dev_priv->ring); - u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD; - u32 last_acthd = I915_READ(acthd_reg); - u32 acthd; u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR; int i; - for (i = 0; i < 100000; i++) { + for (i = 0; i < 10000; i++) { ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; - acthd = I915_READ(acthd_reg); ring->space = ring->head - (ring->tail + 8); if (ring->space < 0) ring->space += ring->Size; if (ring->space >= n) return 0; - dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; - if (ring->head != last_head) i = 0; - if (acthd != last_acthd) - i = 0; last_head = ring->head; - last_acthd = acthd; - msleep_interruptible(10); - + DRM_UDELAY(1); } return -EBUSY; } -/** - * Sets up the hardware status page for devices that need a physical address - * in the register. - */ -int i915_init_phys_hws(struct drm_device *dev) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - /* Program Hardware Status Page */ - dev_priv->status_page_dmah = - drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff); - - if (!dev_priv->status_page_dmah) { - DRM_ERROR("Can not allocate hardware status page\n"); - return -ENOMEM; - } - dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr; - dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; - - memset(dev_priv->hw_status_page, 0, PAGE_SIZE); - - I915_WRITE(HWS_PGA, dev_priv->dma_status_page); - DRM_DEBUG("Enabled hardware status page\n"); - return 0; -} - -/** - * Frees the hardware status page, whether it's a physical address or a virtual - * address set up by the X Server. - */ -void i915_free_hws(struct drm_device *dev) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - if (dev_priv->status_page_dmah) { - drm_pci_free(dev, dev_priv->status_page_dmah); - dev_priv->status_page_dmah = NULL; - } - - if (dev_priv->status_gfx_addr) { - dev_priv->status_gfx_addr = 0; - drm_core_ioremapfree(&dev_priv->hws_map, dev); - } - - /* Need to rewrite hardware status page */ - I915_WRITE(HWS_PGA, 0x1ffff000); -} - void i915_kernel_lost_context(struct drm_device * dev) { drm_i915_private_t *dev_priv = dev->dev_private; @@ -127,9 +71,6 @@ void i915_kernel_lost_context(struct drm ring->space = ring->head - (ring->tail + 8); if (ring->space < 0) ring->space += ring->Size; - - if (ring->head == ring->tail) - dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; } static int i915_dma_cleanup(struct drm_device * dev) @@ -139,7 +80,7 @@ static int i915_dma_cleanup(struct drm_d * may not have been called from userspace and after dev_private * is freed, it's too late. */ - if (dev->irq_enabled) + if (dev->irq) drm_irq_uninstall(dev); if (dev_priv->ring.virtual_start) { @@ -149,17 +90,87 @@ static int i915_dma_cleanup(struct drm_d dev_priv->ring.map.size = 0; } - /* Clear the HWS virtual address at teardown */ - if (I915_NEED_GFX_HWS(dev)) - i915_free_hws(dev); + if (dev_priv->status_page_dmah) { + drm_pci_free(dev, dev_priv->status_page_dmah); + dev_priv->status_page_dmah = NULL; + /* Need to rewrite hardware status page */ + I915_WRITE(0x02080, 0x1ffff000); + } + + if (dev_priv->status_gfx_addr) { + dev_priv->status_gfx_addr = 0; + drm_core_ioremapfree(&dev_priv->hws_map, dev); + I915_WRITE(0x02080, 0x1ffff000); + } return 0; } -static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) +#define DRI2_SAREA_BLOCK_TYPE(b) ((b) >> 16) +#define DRI2_SAREA_BLOCK_SIZE(b) ((b) & 0xffff) +#define DRI2_SAREA_BLOCK_NEXT(p) \ + ((void *) ((unsigned char *) (p) + \ + DRI2_SAREA_BLOCK_SIZE(*(unsigned int *) p))) + +#define DRI2_SAREA_BLOCK_END 0x0000 +#define DRI2_SAREA_BLOCK_LOCK 0x0001 +#define DRI2_SAREA_BLOCK_EVENT_BUFFER 0x0002 + +static int +setup_dri2_sarea(struct drm_device * dev, + struct drm_file *file_priv, + drm_i915_init_t * init) { drm_i915_private_t *dev_priv = dev->dev_private; + int ret; + unsigned int *p, *end, *next; + + mutex_lock(&dev->struct_mutex); + dev_priv->sarea_bo = + drm_lookup_buffer_object(file_priv, + init->sarea_handle, 1); + mutex_unlock(&dev->struct_mutex); + + if (!dev_priv->sarea_bo) { + DRM_ERROR("did not find sarea bo\n"); + return -EINVAL; + } + + ret = drm_bo_kmap(dev_priv->sarea_bo, 0, + dev_priv->sarea_bo->num_pages, + &dev_priv->sarea_kmap); + if (ret) { + DRM_ERROR("could not map sarea bo\n"); + return ret; + } + p = dev_priv->sarea_kmap.virtual; + end = (void *) p + (dev_priv->sarea_bo->num_pages << PAGE_SHIFT); + while (p < end && DRI2_SAREA_BLOCK_TYPE(*p) != DRI2_SAREA_BLOCK_END) { + switch (DRI2_SAREA_BLOCK_TYPE(*p)) { + case DRI2_SAREA_BLOCK_LOCK: + dev->lock.hw_lock = (void *) (p + 1); + dev->sigdata.lock = dev->lock.hw_lock; + break; + } + next = DRI2_SAREA_BLOCK_NEXT(p); + if (next <= p || end < next) { + DRM_ERROR("malformed dri2 sarea: next is %p should be within %p-%p\n", + next, p, end); + return -EINVAL; + } + p = next; + } + + return 0; +} + +static int i915_initialize(struct drm_device * dev, + struct drm_file *file_priv, + drm_i915_init_t * init) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + int ret; dev_priv->sarea = drm_getsarea(dev); if (!dev_priv->sarea) { DRM_ERROR("can not find sarea!\n"); @@ -167,8 +178,24 @@ static int i915_initialize(struct drm_de return -EINVAL; } - dev_priv->sarea_priv = (drm_i915_sarea_t *) - ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset); + if (init->mmio_offset != 0) + dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset); + if (!dev_priv->mmio_map) { + i915_dma_cleanup(dev); + DRM_ERROR("can not find mmio map!\n"); + return -EINVAL; + } + + dev_priv->max_validate_buffers = I915_MAX_VALIDATE_BUFFERS; + + if (init->sarea_priv_offset) + dev_priv->sarea_priv = (drm_i915_sarea_t *) + ((u8 *) dev_priv->sarea->handle + + init->sarea_priv_offset); + else { + /* No sarea_priv for you! */ + dev_priv->sarea_priv = NULL; + } dev_priv->ring.Start = init->ring_start; dev_priv->ring.End = init->ring_end; @@ -193,15 +220,53 @@ static int i915_initialize(struct drm_de dev_priv->ring.virtual_start = dev_priv->ring.map.handle; dev_priv->cpp = init->cpp; - dev_priv->back_offset = init->back_offset; - dev_priv->front_offset = init->front_offset; - dev_priv->current_page = 0; - dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; + + if (dev_priv->sarea_priv) + dev_priv->sarea_priv->pf_current_page = 0; + + /* We are using separate values as placeholders for mechanisms for + * private backbuffer/depthbuffer usage. + */ + dev_priv->use_mi_batchbuffer_start = 0; + if (IS_I965G(dev)) /* 965 doesn't support older method */ + dev_priv->use_mi_batchbuffer_start = 1; /* Allow hardware batchbuffers unless told otherwise. */ dev_priv->allow_batchbuffer = 1; + /* Enable vblank on pipe A for older X servers + */ + dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A; + + /* Program Hardware Status Page */ + if (!I915_NEED_GFX_HWS(dev)) { + dev_priv->status_page_dmah = + drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff); + + if (!dev_priv->status_page_dmah) { + i915_dma_cleanup(dev); + DRM_ERROR("Can not allocate hardware status page\n"); + return -ENOMEM; + } + dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr; + dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; + + memset(dev_priv->hw_status_page, 0, PAGE_SIZE); + + I915_WRITE(0x02080, dev_priv->dma_status_page); + } + DRM_DEBUG("Enabled hardware status page\n"); + mutex_init(&dev_priv->cmdbuf_mutex); + if (init->func == I915_INIT_DMA2) { + ret = setup_dri2_sarea(dev, file_priv, init); + if (ret) { + i915_dma_cleanup(dev); + DRM_ERROR("could not set up dri2 sarea\n"); + return ret; + } + } + return 0; } @@ -209,13 +274,18 @@ static int i915_dma_resume(struct drm_de { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - DRM_DEBUG("%s\n", __func__); + DRM_DEBUG("\n"); if (!dev_priv->sarea) { DRM_ERROR("can not find sarea!\n"); return -EINVAL; } + if (!dev_priv->mmio_map) { + DRM_ERROR("can not find mmio map!\n"); + return -EINVAL; + } + if (dev_priv->ring.map.handle == NULL) { DRM_ERROR("can not ioremap virtual address for" " ring buffer\n"); @@ -230,9 +300,9 @@ static int i915_dma_resume(struct drm_de DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page); if (dev_priv->status_gfx_addr != 0) - I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); + I915_WRITE(0x02080, dev_priv->status_gfx_addr); else - I915_WRITE(HWS_PGA, dev_priv->dma_status_page); + I915_WRITE(0x02080, dev_priv->dma_status_page); DRM_DEBUG("Enabled hardware status page\n"); return 0; @@ -246,7 +316,8 @@ static int i915_dma_init(struct drm_devi switch (init->func) { case I915_INIT_DMA: - retcode = i915_initialize(dev, init); + case I915_INIT_DMA2: + retcode = i915_initialize(dev, file_priv, init); break; case I915_CLEANUP_DMA: retcode = i915_dma_cleanup(dev); @@ -338,7 +409,8 @@ static int validate_cmd(int cmd) return ret; } -static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwords) +static int i915_emit_cmds(struct drm_device *dev, int __user *buffer, + int dwords) { drm_i915_private_t *dev_priv = dev->dev_private; int i; @@ -420,32 +492,58 @@ static int i915_emit_box(struct drm_devi * emit. For now, do it in both places: */ -static void i915_emit_breadcrumb(struct drm_device *dev) +void i915_emit_breadcrumb(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; RING_LOCALS; - dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter; + if (++dev_priv->counter > BREADCRUMB_MASK) { + dev_priv->counter = 1; + DRM_DEBUG("Breadcrumb counter wrapped around\n"); + } - if (dev_priv->counter > 0x7FFFFFFFUL) - dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1; + if (dev_priv->sarea_priv) + dev_priv->sarea_priv->last_enqueue = dev_priv->counter; BEGIN_LP_RING(4); OUT_RING(MI_STORE_DWORD_INDEX); - OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT); + OUT_RING(20); OUT_RING(dev_priv->counter); OUT_RING(0); ADVANCE_LP_RING(); } + +int i915_emit_mi_flush(struct drm_device *dev, uint32_t flush) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + uint32_t flush_cmd = MI_FLUSH; + RING_LOCALS; + + flush_cmd |= flush; + + i915_kernel_lost_context(dev); + + BEGIN_LP_RING(4); + OUT_RING(flush_cmd); + OUT_RING(0); + OUT_RING(0); + OUT_RING(0); + ADVANCE_LP_RING(); + + return 0; +} + + static int i915_dispatch_cmdbuffer(struct drm_device * dev, drm_i915_cmdbuffer_t * cmd) { + drm_i915_private_t *dev_priv = dev->dev_private; int nbox = cmd->num_cliprects; int i = 0, count, ret; if (cmd->sz & 0x3) { - DRM_ERROR("alignment"); + DRM_ERROR("alignment\n"); return -EINVAL; } @@ -467,11 +565,13 @@ static int i915_dispatch_cmdbuffer(struc } i915_emit_breadcrumb(dev); + if (unlikely((dev_priv->counter & 0xFF) == 0)) + drm_fence_flush_old(dev, 0, dev_priv->counter); return 0; } -static int i915_dispatch_batchbuffer(struct drm_device * dev, - drm_i915_batchbuffer_t * batch) +int i915_dispatch_batchbuffer(struct drm_device * dev, + drm_i915_batchbuffer_t * batch) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_clip_rect __user *boxes = batch->cliprects; @@ -480,7 +580,7 @@ static int i915_dispatch_batchbuffer(str RING_LOCALS; if ((batch->start | batch->used) & 0x7) { - DRM_ERROR("alignment"); + DRM_ERROR("alignment\n"); return -EINVAL; } @@ -496,7 +596,7 @@ static int i915_dispatch_batchbuffer(str return ret; } - if (!IS_I830(dev) && !IS_845G(dev)) { + if (dev_priv->use_mi_batchbuffer_start) { BEGIN_LP_RING(2); if (IS_I965G(dev)) { OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); @@ -506,6 +606,7 @@ static int i915_dispatch_batchbuffer(str OUT_RING(batch->start | MI_BATCH_NON_SECURE); } ADVANCE_LP_RING(); + } else { BEGIN_LP_RING(4); OUT_RING(MI_BATCH_BUFFER); @@ -517,69 +618,94 @@ static int i915_dispatch_batchbuffer(str } i915_emit_breadcrumb(dev); - + if (unlikely((dev_priv->counter & 0xFF) == 0)) + drm_fence_flush_old(dev, 0, dev_priv->counter); return 0; } -static int i915_dispatch_flip(struct drm_device * dev) +static void i915_do_dispatch_flip(struct drm_device * dev, int plane, int sync) { drm_i915_private_t *dev_priv = dev->dev_private; + u32 num_pages, current_page, next_page, dspbase; + int shift = 2 * plane, x, y; RING_LOCALS; - DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n", - __FUNCTION__, - dev_priv->current_page, - dev_priv->sarea_priv->pf_current_page); - - i915_kernel_lost_context(dev); + /* Calculate display base offset */ + num_pages = dev_priv->sarea_priv->third_handle ? 3 : 2; + current_page = (dev_priv->sarea_priv->pf_current_page >> shift) & 0x3; + next_page = (current_page + 1) % num_pages; - BEGIN_LP_RING(2); - OUT_RING(MI_FLUSH | MI_READ_FLUSH); - OUT_RING(0); - ADVANCE_LP_RING(); + switch (next_page) { + default: + case 0: + dspbase = dev_priv->sarea_priv->front_offset; + break; + case 1: + dspbase = dev_priv->sarea_priv->back_offset; + break; + case 2: + dspbase = dev_priv->sarea_priv->third_offset; + break; + } - BEGIN_LP_RING(6); - OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); - OUT_RING(0); - if (dev_priv->current_page == 0) { - OUT_RING(dev_priv->back_offset); - dev_priv->current_page = 1; + if (plane == 0) { + x = dev_priv->sarea_priv->planeA_x; + y = dev_priv->sarea_priv->planeA_y; } else { - OUT_RING(dev_priv->front_offset); - dev_priv->current_page = 0; + x = dev_priv->sarea_priv->planeB_x; + y = dev_priv->sarea_priv->planeB_y; } - OUT_RING(0); - ADVANCE_LP_RING(); - BEGIN_LP_RING(2); - OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP); - OUT_RING(0); - ADVANCE_LP_RING(); + dspbase += (y * dev_priv->sarea_priv->pitch + x) * dev_priv->cpp; - dev_priv->sarea_priv->last_enqueue = dev_priv->counter++; + DRM_DEBUG("plane=%d current_page=%d dspbase=0x%x\n", plane, current_page, + dspbase); BEGIN_LP_RING(4); - OUT_RING(MI_STORE_DWORD_INDEX); - OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT); - OUT_RING(dev_priv->counter); - OUT_RING(0); + OUT_RING(sync ? 0 : + (MI_WAIT_FOR_EVENT | (plane ? MI_WAIT_FOR_PLANE_B_FLIP : + MI_WAIT_FOR_PLANE_A_FLIP))); + OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | (sync ? 0 : ASYNC_FLIP) | + (plane ? DISPLAY_PLANE_B : DISPLAY_PLANE_A)); + OUT_RING(dev_priv->sarea_priv->pitch * dev_priv->cpp); + OUT_RING(dspbase); ADVANCE_LP_RING(); - dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; - return 0; + dev_priv->sarea_priv->pf_current_page &= ~(0x3 << shift); + dev_priv->sarea_priv->pf_current_page |= next_page << shift; } -static int i915_quiescent(struct drm_device * dev) +void i915_dispatch_flip(struct drm_device * dev, int planes, int sync) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + int i; + + DRM_DEBUG("planes=0x%x pfCurrentPage=%d\n", + planes, dev_priv->sarea_priv->pf_current_page); + + i915_emit_mi_flush(dev, MI_READ_FLUSH | MI_EXE_FLUSH); + + for (i = 0; i < 2; i++) + if (planes & (1 << i)) + i915_do_dispatch_flip(dev, i, sync); + + i915_emit_breadcrumb(dev); + if (unlikely(!sync && ((dev_priv->counter & 0xFF) == 0))) + drm_fence_flush_old(dev, 0, dev_priv->counter); +} + +int i915_quiescent(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; i915_kernel_lost_context(dev); - return i915_wait_ring(dev, dev_priv->ring.Size - 8, __func__); + return i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__); } static int i915_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { + LOCK_TEST_WITH_RETURN(dev, file_priv); return i915_quiescent(dev); @@ -589,7 +715,6 @@ static int i915_batchbuffer(struct drm_d struct drm_file *file_priv) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - u32 *hw_status = dev_priv->hw_status_page; drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) dev_priv->sarea_priv; drm_i915_batchbuffer_t *batch = data; @@ -606,13 +731,13 @@ static int i915_batchbuffer(struct drm_d LOCK_TEST_WITH_RETURN(dev, file_priv); if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects, - batch->num_cliprects * - sizeof(struct drm_clip_rect))) + batch->num_cliprects * + sizeof(struct drm_clip_rect))) return -EFAULT; ret = i915_dispatch_batchbuffer(dev, batch); - sarea_priv->last_dispatch = (int)hw_status[5]; + sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); return ret; } @@ -620,7 +745,6 @@ static int i915_cmdbuffer(struct drm_dev struct drm_file *file_priv) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - u32 *hw_status = dev_priv->hw_status_page; drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) dev_priv->sarea_priv; drm_i915_cmdbuffer_t *cmdbuf = data; @@ -645,20 +769,59 @@ static int i915_cmdbuffer(struct drm_dev return ret; } - sarea_priv->last_dispatch = (int)hw_status[5]; + sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); return 0; } -static int i915_flip_bufs(struct drm_device *dev, void *data, - struct drm_file *file_priv) +#if defined(DRM_DEBUG_CODE) +#define DRM_DEBUG_RELOCATION (drm_debug != 0) +#else +#define DRM_DEBUG_RELOCATION 0 +#endif + +static int i915_do_cleanup_pageflip(struct drm_device * dev) { - DRM_DEBUG("%s\n", __FUNCTION__); + drm_i915_private_t *dev_priv = dev->dev_private; + int i, planes, num_pages = dev_priv->sarea_priv->third_handle ? 3 : 2; + + DRM_DEBUG("\n"); + + for (i = 0, planes = 0; i < 2; i++) + if (dev_priv->sarea_priv->pf_current_page & (0x3 << (2 * i))) { + dev_priv->sarea_priv->pf_current_page = + (dev_priv->sarea_priv->pf_current_page & + ~(0x3 << (2 * i))) | ((num_pages - 1) << (2 * i)); + + planes |= 1 << i; + } + + if (planes) + i915_dispatch_flip(dev, planes, 0); + + return 0; +} + +static int i915_flip_bufs(struct drm_device *dev, void *data, struct drm_file *file_priv) +{ + drm_i915_flip_t *param = data; + + DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, file_priv); - return i915_dispatch_flip(dev); + /* This is really planes */ + if (param->pipes & ~0x3) { + DRM_ERROR("Invalid planes 0x%x, only <= 0x3 is valid\n", + param->pipes); + return -EINVAL; + } + + i915_dispatch_flip(dev, param->pipes, 0); + + return 0; } + static int i915_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv) { @@ -673,7 +836,7 @@ static int i915_getparam(struct drm_devi switch (param->param) { case I915_PARAM_IRQ_ACTIVE: - value = dev->irq_enabled; + value = dev->irq ? 1 : 0; break; case I915_PARAM_ALLOW_BATCHBUFFER: value = dev_priv->allow_batchbuffer ? 1 : 0; @@ -681,6 +844,9 @@ static int i915_getparam(struct drm_devi case I915_PARAM_LAST_DISPATCH: value = READ_BREADCRUMB(dev_priv); break; + case I915_PARAM_CHIPSET_ID: + value = dev->pci_device; + break; default: DRM_ERROR("Unknown parameter %d\n", param->param); return -EINVAL; @@ -707,6 +873,8 @@ static int i915_setparam(struct drm_devi switch (param->param) { case I915_SETPARAM_USE_MI_BATCHBUFFER_START: + if (!IS_I965G(dev)) + dev_priv->use_mi_batchbuffer_start = param->value; break; case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY: dev_priv->tex_lru_log_granularity = param->value; @@ -722,6 +890,63 @@ static int i915_setparam(struct drm_devi return 0; } +drm_i915_mmio_entry_t mmio_table[] = { + [MMIO_REGS_PS_DEPTH_COUNT] = { + I915_MMIO_MAY_READ|I915_MMIO_MAY_WRITE, + 0x2350, + 8 + } +}; + +static int mmio_table_size = sizeof(mmio_table)/sizeof(drm_i915_mmio_entry_t); + +static int i915_mmio(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + uint32_t buf[8]; + drm_i915_private_t *dev_priv = dev->dev_private; + drm_i915_mmio_entry_t *e; + drm_i915_mmio_t *mmio = data; + void __iomem *base; + int i; + + if (!dev_priv) { + DRM_ERROR("called with no initialization\n"); + return -EINVAL; + } + + if (mmio->reg >= mmio_table_size) + return -EINVAL; + + e = &mmio_table[mmio->reg]; + base = (u8 *) dev_priv->mmio_map->handle + e->offset; + + switch (mmio->read_write) { + case I915_MMIO_READ: + if (!(e->flag & I915_MMIO_MAY_READ)) + return -EINVAL; + for (i = 0; i < e->size / 4; i++) + buf[i] = I915_READ(e->offset + i * 4); + if (DRM_COPY_TO_USER(mmio->data, buf, e->size)) { + DRM_ERROR("DRM_COPY_TO_USER failed\n"); + return -EFAULT; + } + break; + + case I915_MMIO_WRITE: + if (!(e->flag & I915_MMIO_MAY_WRITE)) + return -EINVAL; + if (DRM_COPY_FROM_USER(buf, mmio->data, e->size)) { + DRM_ERROR("DRM_COPY_TO_USER failed\n"); + return -EFAULT; + } + for (i = 0; i < e->size / 4; i++) + I915_WRITE(e->offset + i * 4, buf[i]); + break; + } + return 0; +} + static int i915_set_status_page(struct drm_device *dev, void *data, struct drm_file *file_priv) { @@ -735,8 +960,7 @@ static int i915_set_status_page(struct d DRM_ERROR("called with no initialization\n"); return -EINVAL; } - - printk(KERN_DEBUG "set status page addr 0x%08x\n", (u32)hws->addr); + DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws->addr); dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12); @@ -758,7 +982,7 @@ static int i915_set_status_page(struct d memset(dev_priv->hw_status_page, 0, PAGE_SIZE); I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); - DRM_DEBUG("load hws HWS_PGA with gfx mem 0x%x\n", + DRM_DEBUG("load hws 0x2080 with gfx mem 0x%x\n", dev_priv->status_gfx_addr); DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page); return 0; @@ -766,7 +990,7 @@ static int i915_set_status_page(struct d int i915_driver_load(struct drm_device *dev, unsigned long flags) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv; unsigned long base, size; int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1; @@ -790,30 +1014,10 @@ int i915_driver_load(struct drm_device * size = drm_get_resource_len(dev, mmio_bar); ret = drm_addmap(dev, base, size, _DRM_REGISTERS, - _DRM_KERNEL | _DRM_DRIVER, - &dev_priv->mmio_map); - - /* Init HWS */ - if (!I915_NEED_GFX_HWS(dev)) { - ret = i915_init_phys_hws(dev); - if (ret != 0) - return ret; - } - - /* On the 945G/GM, the chipset reports the MSI capability on the - * integrated graphics even though the support isn't actually there - * according to the published specs. It doesn't appear to function - * correctly in testing on 945G. - * This may be a side effect of MSI having been made available for PEG - * and the registers being closely associated. - */ - if (!IS_I945G(dev) && !IS_I945GM(dev)) - pci_enable_msi(dev->pdev); + _DRM_KERNEL | _DRM_DRIVER, &dev_priv->mmio_map); intel_opregion_init(dev); - spin_lock_init(&dev_priv->user_irq_lock); - return ret; } @@ -821,11 +1025,6 @@ int i915_driver_unload(struct drm_device { struct drm_i915_private *dev_priv = dev->dev_private; - if (dev->pdev->msi_enabled) - pci_disable_msi(dev->pdev); - - i915_free_hws(dev); - if (dev_priv->mmio_map) drm_rmmap(dev, dev_priv->mmio_map); @@ -833,7 +1032,6 @@ int i915_driver_unload(struct drm_device drm_free(dev->dev_private, sizeof(drm_i915_private_t), DRM_MEM_DRIVER); - return 0; } @@ -841,12 +1039,32 @@ void i915_driver_lastclose(struct drm_de { drm_i915_private_t *dev_priv = dev->dev_private; + /* agp off can use this to get called before dev_priv */ if (!dev_priv) return; + if (dev_priv->val_bufs) { + vfree(dev_priv->val_bufs); + dev_priv->val_bufs = NULL; + } + + if (drm_getsarea(dev) && dev_priv->sarea_priv) + i915_do_cleanup_pageflip(dev); if (dev_priv->agp_heap) i915_mem_takedown(&(dev_priv->agp_heap)); + if (dev_priv->sarea_kmap.virtual) { + drm_bo_kunmap(&dev_priv->sarea_kmap); + dev_priv->sarea_kmap.virtual = NULL; + dev->lock.hw_lock = NULL; + dev->sigdata.lock = NULL; + } + if (dev_priv->sarea_bo) { + mutex_lock(&dev->struct_mutex); + drm_bo_usage_deref_locked(&dev_priv->sarea_bo); + mutex_unlock(&dev->struct_mutex); + dev_priv->sarea_bo = NULL; + } i915_dma_cleanup(dev); } @@ -873,7 +1091,9 @@ struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ), DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ), DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), + DRM_IOCTL_DEF(DRM_I915_MMIO, i915_mmio, DRM_AUTH), DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH), + DRM_IOCTL_DEF(DRM_I915_EXECBUFFER, i915_execbuffer, DRM_AUTH), }; int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); @@ -893,3 +1113,9 @@ int i915_driver_device_is_agp(struct drm { return 1; } + +int i915_driver_firstopen(struct drm_device *dev) +{ + drm_bo_driver_init(dev); + return 0; +} diff -Napur -X /home/jbarnes/dontdiff gpu/drm/i915/i915_drv.c /tmp/drm-master/drivers/gpu/drm/i915/i915_drv.c --- gpu/drm/i915/i915_drv.c 2008-08-21 14:43:42.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/i915/i915_drv.c 2008-08-21 14:34:09.000000000 -0700 @@ -38,211 +38,30 @@ static struct pci_device_id pciidlist[] i915_PCI_IDS }; -enum pipe { - PIPE_A = 0, - PIPE_B, -}; - -static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - if (pipe == PIPE_A) - return (I915_READ(DPLL_A) & DPLL_VCO_ENABLE); - else - return (I915_READ(DPLL_B) & DPLL_VCO_ENABLE); -} - -static void i915_save_palette(struct drm_device *dev, enum pipe pipe) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B); - u32 *array; - int i; - - if (!i915_pipe_enabled(dev, pipe)) - return; - - if (pipe == PIPE_A) - array = dev_priv->save_palette_a; - else - array = dev_priv->save_palette_b; - - for(i = 0; i < 256; i++) - array[i] = I915_READ(reg + (i << 2)); -} - -static void i915_restore_palette(struct drm_device *dev, enum pipe pipe) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B); - u32 *array; - int i; - - if (!i915_pipe_enabled(dev, pipe)) - return; - - if (pipe == PIPE_A) - array = dev_priv->save_palette_a; - else - array = dev_priv->save_palette_b; - - for(i = 0; i < 256; i++) - I915_WRITE(reg + (i << 2), array[i]); -} - -static u8 i915_read_indexed(u16 index_port, u16 data_port, u8 reg) -{ - outb(reg, index_port); - return inb(data_port); -} - -static u8 i915_read_ar(u16 st01, u8 reg, u16 palette_enable) -{ - inb(st01); - outb(palette_enable | reg, VGA_AR_INDEX); - return inb(VGA_AR_DATA_READ); -} - -static void i915_write_ar(u8 st01, u8 reg, u8 val, u16 palette_enable) -{ - inb(st01); - outb(palette_enable | reg, VGA_AR_INDEX); - outb(val, VGA_AR_DATA_WRITE); -} - -static void i915_write_indexed(u16 index_port, u16 data_port, u8 reg, u8 val) -{ - outb(reg, index_port); - outb(val, data_port); -} - -static void i915_save_vga(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int i; - u16 cr_index, cr_data, st01; - - /* VGA color palette registers */ - dev_priv->saveDACMASK = inb(VGA_DACMASK); - /* DACCRX automatically increments during read */ - outb(0, VGA_DACRX); - /* Read 3 bytes of color data from each index */ - for (i = 0; i < 256 * 3; i++) - dev_priv->saveDACDATA[i] = inb(VGA_DACDATA); - - /* MSR bits */ - dev_priv->saveMSR = inb(VGA_MSR_READ); - if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) { - cr_index = VGA_CR_INDEX_CGA; - cr_data = VGA_CR_DATA_CGA; - st01 = VGA_ST01_CGA; - } else { - cr_index = VGA_CR_INDEX_MDA; - cr_data = VGA_CR_DATA_MDA; - st01 = VGA_ST01_MDA; - } - - /* CRT controller regs */ - i915_write_indexed(cr_index, cr_data, 0x11, - i915_read_indexed(cr_index, cr_data, 0x11) & - (~0x80)); - for (i = 0; i <= 0x24; i++) - dev_priv->saveCR[i] = - i915_read_indexed(cr_index, cr_data, i); - /* Make sure we don't turn off CR group 0 writes */ - dev_priv->saveCR[0x11] &= ~0x80; - - /* Attribute controller registers */ - inb(st01); - dev_priv->saveAR_INDEX = inb(VGA_AR_INDEX); - for (i = 0; i <= 0x14; i++) - dev_priv->saveAR[i] = i915_read_ar(st01, i, 0); - inb(st01); - outb(dev_priv->saveAR_INDEX, VGA_AR_INDEX); - inb(st01); - - /* Graphics controller registers */ - for (i = 0; i < 9; i++) - dev_priv->saveGR[i] = - i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, i); - - dev_priv->saveGR[0x10] = - i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x10); - dev_priv->saveGR[0x11] = - i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x11); - dev_priv->saveGR[0x18] = - i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x18); - - /* Sequencer registers */ - for (i = 0; i < 8; i++) - dev_priv->saveSR[i] = - i915_read_indexed(VGA_SR_INDEX, VGA_SR_DATA, i); -} +extern struct drm_fence_driver i915_fence_driver; -static void i915_restore_vga(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int i; - u16 cr_index, cr_data, st01; - - /* MSR bits */ - outb(dev_priv->saveMSR, VGA_MSR_WRITE); - if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) { - cr_index = VGA_CR_INDEX_CGA; - cr_data = VGA_CR_DATA_CGA; - st01 = VGA_ST01_CGA; - } else { - cr_index = VGA_CR_INDEX_MDA; - cr_data = VGA_CR_DATA_MDA; - st01 = VGA_ST01_MDA; - } - /* Sequencer registers, don't write SR07 */ - for (i = 0; i < 7; i++) - i915_write_indexed(VGA_SR_INDEX, VGA_SR_DATA, i, - dev_priv->saveSR[i]); - - /* CRT controller regs */ - /* Enable CR group 0 writes */ - i915_write_indexed(cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]); - for (i = 0; i <= 0x24; i++) - i915_write_indexed(cr_index, cr_data, i, dev_priv->saveCR[i]); - - /* Graphics controller regs */ - for (i = 0; i < 9; i++) - i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, i, - dev_priv->saveGR[i]); - - i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x10, - dev_priv->saveGR[0x10]); - i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x11, - dev_priv->saveGR[0x11]); - i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x18, - dev_priv->saveGR[0x18]); - - /* Attribute controller registers */ - inb(st01); - for (i = 0; i <= 0x14; i++) - i915_write_ar(st01, i, dev_priv->saveAR[i], 0); - inb(st01); /* switch back to index mode */ - outb(dev_priv->saveAR_INDEX | 0x20, VGA_AR_INDEX); - inb(st01); - - /* VGA color palette registers */ - outb(dev_priv->saveDACMASK, VGA_DACMASK); - /* DACCRX automatically increments during read */ - outb(0, VGA_DACWX); - /* Read 3 bytes of color data from each index */ - for (i = 0; i < 256 * 3; i++) - outb(dev_priv->saveDACDATA[i], VGA_DACDATA); +static uint32_t i915_mem_prios[] = {DRM_BO_MEM_PRIV0, DRM_BO_MEM_TT, DRM_BO_MEM_LOCAL}; +static uint32_t i915_busy_prios[] = {DRM_BO_MEM_TT, DRM_BO_MEM_PRIV0, DRM_BO_MEM_LOCAL}; -} +static struct drm_bo_driver i915_bo_driver = { + .mem_type_prio = i915_mem_prios, + .mem_busy_prio = i915_busy_prios, + .num_mem_type_prio = sizeof(i915_mem_prios)/sizeof(uint32_t), + .num_mem_busy_prio = sizeof(i915_busy_prios)/sizeof(uint32_t), + .create_ttm_backend_entry = i915_create_ttm_backend_entry, + .fence_type = i915_fence_type, + .invalidate_caches = i915_invalidate_caches, + .init_mem_type = i915_init_mem_type, + .evict_flags = i915_evict_flags, + .move = i915_move, + .ttm_cache_flush = i915_flush_ttm, + .command_stream_barrier = NULL, +}; static int i915_suspend(struct drm_device *dev, pm_message_t state) { struct drm_i915_private *dev_priv = dev->dev_private; - int i; if (!dev || !dev_priv) { printk(KERN_ERR "dev: %p, dev_priv: %p\n", dev, dev_priv); @@ -254,122 +73,8 @@ static int i915_suspend(struct drm_devic return 0; pci_save_state(dev->pdev); - pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); - /* Display arbitration control */ - dev_priv->saveDSPARB = I915_READ(DSPARB); - - /* Pipe & plane A info */ - dev_priv->savePIPEACONF = I915_READ(PIPEACONF); - dev_priv->savePIPEASRC = I915_READ(PIPEASRC); - dev_priv->saveFPA0 = I915_READ(FPA0); - dev_priv->saveFPA1 = I915_READ(FPA1); - dev_priv->saveDPLL_A = I915_READ(DPLL_A); - if (IS_I965G(dev)) - dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD); - dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A); - dev_priv->saveHBLANK_A = I915_READ(HBLANK_A); - dev_priv->saveHSYNC_A = I915_READ(HSYNC_A); - dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A); - dev_priv->saveVBLANK_A = I915_READ(VBLANK_A); - dev_priv->saveVSYNC_A = I915_READ(VSYNC_A); - dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A); - - dev_priv->saveDSPACNTR = I915_READ(DSPACNTR); - dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE); - dev_priv->saveDSPASIZE = I915_READ(DSPASIZE); - dev_priv->saveDSPAPOS = I915_READ(DSPAPOS); - dev_priv->saveDSPAADDR = I915_READ(DSPAADDR); - if (IS_I965G(dev)) { - dev_priv->saveDSPASURF = I915_READ(DSPASURF); - dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF); - } - i915_save_palette(dev, PIPE_A); - dev_priv->savePIPEASTAT = I915_READ(PIPEASTAT); - - /* Pipe & plane B info */ - dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF); - dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC); - dev_priv->saveFPB0 = I915_READ(FPB0); - dev_priv->saveFPB1 = I915_READ(FPB1); - dev_priv->saveDPLL_B = I915_READ(DPLL_B); - if (IS_I965G(dev)) - dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD); - dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B); - dev_priv->saveHBLANK_B = I915_READ(HBLANK_B); - dev_priv->saveHSYNC_B = I915_READ(HSYNC_B); - dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B); - dev_priv->saveVBLANK_B = I915_READ(VBLANK_B); - dev_priv->saveVSYNC_B = I915_READ(VSYNC_B); - dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A); - - dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR); - dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE); - dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE); - dev_priv->saveDSPBPOS = I915_READ(DSPBPOS); - dev_priv->saveDSPBADDR = I915_READ(DSPBADDR); - if (IS_I965GM(dev) || IS_IGD_GM(dev)) { - dev_priv->saveDSPBSURF = I915_READ(DSPBSURF); - dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF); - } - i915_save_palette(dev, PIPE_B); - dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT); - - /* CRT state */ - dev_priv->saveADPA = I915_READ(ADPA); - - /* LVDS state */ - dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL); - dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS); - dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL); - if (IS_I965G(dev)) - dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); - if (IS_MOBILE(dev) && !IS_I830(dev)) - dev_priv->saveLVDS = I915_READ(LVDS); - if (!IS_I830(dev) && !IS_845G(dev)) - dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL); - dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS); - dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS); - dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR); - - /* FIXME: save TV & SDVO state */ - - /* FBC state */ - dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE); - dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE); - dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2); - dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL); - - /* Interrupt state */ - dev_priv->saveIIR = I915_READ(IIR); - dev_priv->saveIER = I915_READ(IER); - dev_priv->saveIMR = I915_READ(IMR); - - /* VGA state */ - dev_priv->saveVGA0 = I915_READ(VGA0); - dev_priv->saveVGA1 = I915_READ(VGA1); - dev_priv->saveVGA_PD = I915_READ(VGA_PD); - dev_priv->saveVGACNTRL = I915_READ(VGACNTRL); - - /* Clock gating state */ - dev_priv->saveD_STATE = I915_READ(D_STATE); - dev_priv->saveCG_2D_DIS = I915_READ(CG_2D_DIS); - - /* Cache mode state */ - dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); - - /* Memory Arbitration state */ - dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE); - - /* Scratch space */ - for (i = 0; i < 16; i++) { - dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2)); - dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2)); - } - for (i = 0; i < 3; i++) - dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2)); - - i915_save_vga(dev); + i915_save_state(dev); intel_opregion_free(dev); @@ -384,178 +89,38 @@ static int i915_suspend(struct drm_devic static int i915_resume(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; - int i; - pci_set_power_state(dev->pdev, PCI_D0); pci_restore_state(dev->pdev); if (pci_enable_device(dev->pdev)) return -1; pci_set_master(dev->pdev); - pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); - - I915_WRITE(DSPARB, dev_priv->saveDSPARB); - - /* Pipe & plane A info */ - /* Prime the clock */ - if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) { - I915_WRITE(DPLL_A, dev_priv->saveDPLL_A & - ~DPLL_VCO_ENABLE); - udelay(150); - } - I915_WRITE(FPA0, dev_priv->saveFPA0); - I915_WRITE(FPA1, dev_priv->saveFPA1); - /* Actually enable it */ - I915_WRITE(DPLL_A, dev_priv->saveDPLL_A); - udelay(150); - if (IS_I965G(dev)) - I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD); - udelay(150); - - /* Restore mode */ - I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A); - I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A); - I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A); - I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A); - I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A); - I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A); - I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A); - - /* Restore plane info */ - I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE); - I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS); - I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC); - I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR); - I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE); - if (IS_I965G(dev)) { - I915_WRITE(DSPASURF, dev_priv->saveDSPASURF); - I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF); - } - - I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF); - - i915_restore_palette(dev, PIPE_A); - /* Enable the plane */ - I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR); - I915_WRITE(DSPAADDR, I915_READ(DSPAADDR)); - - /* Pipe & plane B info */ - if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) { - I915_WRITE(DPLL_B, dev_priv->saveDPLL_B & - ~DPLL_VCO_ENABLE); - udelay(150); - } - I915_WRITE(FPB0, dev_priv->saveFPB0); - I915_WRITE(FPB1, dev_priv->saveFPB1); - /* Actually enable it */ - I915_WRITE(DPLL_B, dev_priv->saveDPLL_B); - udelay(150); - if (IS_I965G(dev)) - I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD); - udelay(150); - - /* Restore mode */ - I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B); - I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B); - I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B); - I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B); - I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B); - I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B); - I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B); - - /* Restore plane info */ - I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE); - I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS); - I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC); - I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR); - I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE); - if (IS_I965G(dev)) { - I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF); - I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF); - } - - I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF); - - i915_restore_palette(dev, PIPE_B); - /* Enable the plane */ - I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR); - I915_WRITE(DSPBADDR, I915_READ(DSPBADDR)); - - /* CRT state */ - I915_WRITE(ADPA, dev_priv->saveADPA); - - /* LVDS state */ - if (IS_I965G(dev)) - I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2); - if (IS_MOBILE(dev) && !IS_I830(dev)) - I915_WRITE(LVDS, dev_priv->saveLVDS); - if (!IS_I830(dev) && !IS_845G(dev)) - I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL); - - I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS); - I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL); - I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS); - I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS); - I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR); - I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL); - - /* FIXME: restore TV & SDVO state */ - - /* FBC info */ - I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE); - I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE); - I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2); - I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL); - - /* VGA state */ - I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL); - I915_WRITE(VGA0, dev_priv->saveVGA0); - I915_WRITE(VGA1, dev_priv->saveVGA1); - I915_WRITE(VGA_PD, dev_priv->saveVGA_PD); - udelay(150); - - /* Clock gating state */ - I915_WRITE (D_STATE, dev_priv->saveD_STATE); - I915_WRITE(CG_2D_DIS, dev_priv->saveCG_2D_DIS); - - /* Cache mode state */ - I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); - - /* Memory arbitration state */ - I915_WRITE (MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000); - - for (i = 0; i < 16; i++) { - I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]); - I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i+7]); - } - for (i = 0; i < 3; i++) - I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]); - - i915_restore_vga(dev); + i915_restore_state(dev); intel_opregion_init(dev); return 0; } +static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); static struct drm_driver driver = { /* don't use mtrr's here, the Xserver or user space app should * deal with them for intel hardware. */ .driver_features = - DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/ - DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL | - DRIVER_IRQ_VBL2, + DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR | */ + DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, .load = i915_driver_load, .unload = i915_driver_unload, + .firstopen = i915_driver_firstopen, .lastclose = i915_driver_lastclose, .preclose = i915_driver_preclose, .suspend = i915_suspend, .resume = i915_resume, .device_is_agp = i915_driver_device_is_agp, - .vblank_wait = i915_driver_vblank_wait, - .vblank_wait2 = i915_driver_vblank_wait2, + .get_vblank_counter = i915_get_vblank_counter, + .enable_vblank = i915_enable_vblank, + .disable_vblank = i915_disable_vblank, .irq_preinstall = i915_driver_irq_preinstall, .irq_postinstall = i915_driver_irq_postinstall, .irq_uninstall = i915_driver_irq_uninstall, @@ -565,23 +130,25 @@ static struct drm_driver driver = { .get_reg_ofs = drm_core_get_reg_ofs, .ioctls = i915_ioctls, .fops = { - .owner = THIS_MODULE, - .open = drm_open, - .release = drm_release, - .ioctl = drm_ioctl, - .mmap = drm_mmap, - .poll = drm_poll, - .fasync = drm_fasync, -#ifdef CONFIG_COMPAT - .compat_ioctl = i915_compat_ioctl, + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .ioctl = drm_ioctl, + .mmap = drm_mmap, + .poll = drm_poll, + .fasync = drm_fasync, +#if defined(CONFIG_COMPAT) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9) + .compat_ioctl = i915_compat_ioctl, #endif - }, - + }, .pci_driver = { - .name = DRIVER_NAME, - .id_table = pciidlist, - }, - + .name = DRIVER_NAME, + .id_table = pciidlist, + .probe = probe, + .remove = __devexit_p(drm_cleanup_pci), + }, + .fence_driver = &i915_fence_driver, + .bo_driver = &i915_bo_driver, .name = DRIVER_NAME, .desc = DRIVER_DESC, .date = DRIVER_DATE, @@ -590,10 +157,15 @@ static struct drm_driver driver = { .patchlevel = DRIVER_PATCHLEVEL, }; +static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + return drm_get_dev(pdev, ent, &driver); +} + static int __init i915_init(void) { driver.num_ioctls = i915_max_ioctl; - return drm_init(&driver); + return drm_init(&driver, pciidlist); } static void __exit i915_exit(void) diff -Napur -X /home/jbarnes/dontdiff gpu/drm/i915/i915_drv.h /tmp/drm-master/drivers/gpu/drm/i915/i915_drv.h --- gpu/drm/i915/i915_drv.h 2008-08-21 14:43:42.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/i915/i915_drv.h 2008-08-21 14:34:09.000000000 -0700 @@ -30,8 +30,6 @@ #ifndef _I915_DRV_H_ #define _I915_DRV_H_ -#include "i915_reg.h" - /* General customization: */ @@ -39,7 +37,10 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20060119" +#define DRIVER_DATE "20080312" + +#define I915_HAVE_FENCE +#define I915_HAVE_BUFFER /* Interface history: * @@ -50,11 +51,24 @@ * 1.5: Add vblank pipe configuration * 1.6: - New ioctl for scheduling buffer swaps on vertical blank * - Support vertical blank on secondary display pipe + * 1.8: New ioctl for ARB_Occlusion_Query + * 1.9: Usable page flipping and triple buffering + * 1.10: Plane/pipe disentangling + * 1.11: TTM superioctl + * 1.12: TTM relocation optimization */ #define DRIVER_MAJOR 1 -#define DRIVER_MINOR 6 +#define DRIVER_MINOR 13 #define DRIVER_PATCHLEVEL 0 +enum pipe { + PIPE_A = 0, + PIPE_B, +}; + +#define I915_MAX_VALIDATE_BUFFERS 4096 +struct drm_i915_validate_buffer; + typedef struct _drm_i915_ring_buffer { int tail_mask; unsigned long Start; @@ -78,15 +92,22 @@ struct mem_block { typedef struct _drm_i915_vbl_swap { struct list_head head; drm_drawable_t drw_id; - unsigned int pipe; + unsigned int plane; unsigned int sequence; + int flip; } drm_i915_vbl_swap_t; +struct opregion_header; +struct opregion_acpi; +struct opregion_swsci; +struct opregion_asle; + struct intel_opregion { struct opregion_header *header; struct opregion_acpi *acpi; struct opregion_swsci *swsci; struct opregion_asle *asle; + int enabled; }; @@ -100,35 +121,43 @@ typedef struct drm_i915_private { drm_dma_handle_t *status_page_dmah; void *hw_status_page; dma_addr_t dma_status_page; - unsigned long counter; + uint32_t counter; unsigned int status_gfx_addr; drm_local_map_t hws_map; unsigned int cpp; - int back_offset; - int front_offset; - int current_page; - int page_flipping; + int use_mi_batchbuffer_start; wait_queue_head_t irq_queue; atomic_t irq_received; atomic_t irq_emitted; - /** Protects user_irq_refcount and irq_mask_reg */ - spinlock_t user_irq_lock; - /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */ - int user_irq_refcount; - /** Cached value of IMR to avoid reads in updating the bitfield */ - u32 irq_mask_reg; int tex_lru_log_granularity; int allow_batchbuffer; struct mem_block *agp_heap; unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; int vblank_pipe; + DRM_SPINTYPE user_irq_lock; + int user_irq_refcount; + int fence_irq_on; + uint32_t irq_enable_reg; + int irq_enabled; + + uint32_t flush_sequence; + uint32_t flush_flags; + uint32_t flush_pending; + uint32_t saved_flush_status; + void *agp_iomap; + unsigned int max_validate_buffers; + struct mutex cmdbuf_mutex; + struct drm_i915_validate_buffer *val_bufs; - spinlock_t swaps_lock; + DRM_SPINTYPE swaps_lock; drm_i915_vbl_swap_t vbl_swaps; unsigned int swaps_pending; + /* DRI2 sarea */ + struct drm_buffer_object *sarea_bo; + struct drm_bo_kmap_obj sarea_kmap; struct intel_opregion opregion; @@ -222,6 +251,13 @@ typedef struct drm_i915_private { u8 saveCR[37]; } drm_i915_private_t; +enum intel_chip_family { + CHIP_I8XX = 0x01, + CHIP_I9XX = 0x02, + CHIP_I915 = 0x04, + CHIP_I965 = 0x08, +}; + extern struct drm_ioctl_desc i915_ioctls[]; extern int i915_max_ioctl; @@ -235,6 +271,13 @@ extern void i915_driver_preclose(struct extern int i915_driver_device_is_agp(struct drm_device * dev); extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); +extern void i915_emit_breadcrumb(struct drm_device *dev); +extern void i915_dispatch_flip(struct drm_device * dev, int pipes, int sync); +extern int i915_emit_mi_flush(struct drm_device *dev, uint32_t flush); +extern int i915_driver_firstopen(struct drm_device *dev); +extern int i915_dispatch_batchbuffer(struct drm_device * dev, + drm_i915_batchbuffer_t * batch); +extern int i915_quiescent(struct drm_device *dev); /* i915_irq.c */ extern int i915_irq_emit(struct drm_device *dev, void *data, @@ -242,19 +285,22 @@ extern int i915_irq_emit(struct drm_devi extern int i915_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv); -extern int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence); -extern int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence); extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); extern void i915_driver_irq_preinstall(struct drm_device * dev); -extern void i915_driver_irq_postinstall(struct drm_device * dev); +extern int i915_driver_irq_postinstall(struct drm_device * dev); extern void i915_driver_irq_uninstall(struct drm_device * dev); extern int i915_vblank_pipe_set(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int i915_vblank_pipe_get(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int i915_emit_irq(struct drm_device * dev); +extern int i915_enable_vblank(struct drm_device *dev, int crtc); +extern void i915_disable_vblank(struct drm_device *dev, int crtc); +extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc); extern int i915_vblank_swap(struct drm_device *dev, void *data, struct drm_file *file_priv); -extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask); +extern void i915_user_irq_on(drm_i915_private_t *dev_priv); +extern void i915_user_irq_off(drm_i915_private_t *dev_priv); /* i915_mem.c */ extern int i915_mem_alloc(struct drm_device *dev, void *data, @@ -267,7 +313,33 @@ extern int i915_mem_destroy_heap(struct struct drm_file *file_priv); extern void i915_mem_takedown(struct mem_block **heap); extern void i915_mem_release(struct drm_device * dev, - struct drm_file *file_priv, struct mem_block *heap); + struct drm_file *file_priv, + struct mem_block *heap); + +/* i915_suspend.c */ +extern int i915_save_state(struct drm_device *dev); +extern int i915_restore_state(struct drm_device *dev); + +/* i915_fence.c */ +extern void i915_fence_handler(struct drm_device *dev); +extern void i915_invalidate_reported_sequence(struct drm_device *dev); + + +/* i915_buffer.c */ +extern struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device *dev); +extern int i915_fence_type(struct drm_buffer_object *bo, uint32_t *fclass, + uint32_t *type); +extern int i915_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags); +extern int i915_init_mem_type(struct drm_device *dev, uint32_t type, + struct drm_mem_type_manager *man); +extern uint64_t i915_evict_flags(struct drm_buffer_object *bo); +extern int i915_move(struct drm_buffer_object *bo, int evict, + int no_wait, struct drm_bo_mem_reg *new_mem); +void i915_flush_ttm(struct drm_ttm *ttm); +/* i915_execbuf.c */ +int i915_execbuffer(struct drm_device *dev, void *data, + struct drm_file *file_priv); + /* i915_opregion.c */ extern int intel_opregion_init(struct drm_device *dev); @@ -275,21 +347,31 @@ extern void intel_opregion_free(struct d extern void opregion_asle_intr(struct drm_device *dev); extern void opregion_enable_asle(struct drm_device *dev); + #define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg)) #define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val)) #define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg)) #define I915_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, (reg), (val)) +#define I915_READ8(reg) DRM_READ8(dev_priv->mmio_map, (reg)) +#define I915_WRITE8(reg,val) DRM_WRITE8(dev_priv->mmio_map, (reg), (val)) + +#if defined(__FreeBSD__) +typedef boolean_t bool; +#endif #define I915_VERBOSE 0 +#define PRIMARY_RINGBUFFER_SIZE (128*1024) + #define RING_LOCALS unsigned int outring, ringmask, outcount; \ - volatile char *virt; + volatile char *virt; #define BEGIN_LP_RING(n) do { \ if (I915_VERBOSE) \ - DRM_DEBUG("BEGIN_LP_RING(%d)\n", (n)); \ - if (dev_priv->ring.space < (n)*4) \ - i915_wait_ring(dev, (n)*4, __func__); \ + DRM_DEBUG("BEGIN_LP_RING(%d)\n", \ + (n)); \ + if (dev_priv->ring.space < (n)*4) \ + i915_wait_ring(dev, (n)*4, __FUNCTION__); \ outcount = 0; \ outring = dev_priv->ring.tail; \ ringmask = dev_priv->ring.tail_mask; \ @@ -298,8 +380,8 @@ extern void opregion_enable_asle(struct #define OUT_RING(n) do { \ if (I915_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \ - *(volatile unsigned int *)(virt + outring) = (n); \ - outcount++; \ + *(volatile unsigned int *)(virt + outring) = (n); \ + outcount++; \ outring += 4; \ outring &= ringmask; \ } while (0) @@ -311,6 +393,119 @@ extern void opregion_enable_asle(struct I915_WRITE(PRB0_TAIL, outring); \ } while(0) +extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); + +/* + * The Bridge device's PCI config space has information about the + * fb aperture size and the amount of pre-reserved memory. + */ +#define INTEL_GMCH_CTRL 0x52 +#define INTEL_GMCH_ENABLED 0x4 +#define INTEL_GMCH_MEM_MASK 0x1 +#define INTEL_GMCH_MEM_64M 0x1 +#define INTEL_GMCH_MEM_128M 0 + +#define INTEL_855_GMCH_GMS_MASK (0x7 << 4) +#define INTEL_855_GMCH_GMS_DISABLED (0x0 << 4) +#define INTEL_855_GMCH_GMS_STOLEN_1M (0x1 << 4) +#define INTEL_855_GMCH_GMS_STOLEN_4M (0x2 << 4) +#define INTEL_855_GMCH_GMS_STOLEN_8M (0x3 << 4) +#define INTEL_855_GMCH_GMS_STOLEN_16M (0x4 << 4) +#define INTEL_855_GMCH_GMS_STOLEN_32M (0x5 << 4) + +#define INTEL_915G_GMCH_GMS_STOLEN_48M (0x6 << 4) +#define INTEL_915G_GMCH_GMS_STOLEN_64M (0x7 << 4) + +/* PCI config space */ + +#define HPLLCC 0xc0 /* 855 only */ +#define GC_CLOCK_CONTROL_MASK (3 << 0) +#define GC_CLOCK_133_200 (0 << 0) +#define GC_CLOCK_100_200 (1 << 0) +#define GC_CLOCK_100_133 (2 << 0) +#define GC_CLOCK_166_250 (3 << 0) +#define GCFGC 0xf0 /* 915+ only */ +#define GC_LOW_FREQUENCY_ENABLE (1 << 7) +#define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4) +#define GC_DISPLAY_CLOCK_333_MHZ (4 << 4) +#define GC_DISPLAY_CLOCK_MASK (7 << 4) +#define LBB 0xf4 + +/* VGA stuff */ + +#define VGA_ST01_MDA 0x3ba +#define VGA_ST01_CGA 0x3da + +#define VGA_MSR_WRITE 0x3c2 +#define VGA_MSR_READ 0x3cc +#define VGA_MSR_MEM_EN (1<<1) +#define VGA_MSR_CGA_MODE (1<<0) + +#define VGA_SR_INDEX 0x3c4 +#define VGA_SR_DATA 0x3c5 + +#define VGA_AR_INDEX 0x3c0 +#define VGA_AR_VID_EN (1<<5) +#define VGA_AR_DATA_WRITE 0x3c0 +#define VGA_AR_DATA_READ 0x3c1 + +#define VGA_GR_INDEX 0x3ce +#define VGA_GR_DATA 0x3cf +/* GR05 */ +#define VGA_GR_MEM_READ_MODE_SHIFT 3 +#define VGA_GR_MEM_READ_MODE_PLANE 1 +/* GR06 */ +#define VGA_GR_MEM_MODE_MASK 0xc +#define VGA_GR_MEM_MODE_SHIFT 2 +#define VGA_GR_MEM_A0000_AFFFF 0 +#define VGA_GR_MEM_A0000_BFFFF 1 +#define VGA_GR_MEM_B0000_B7FFF 2 +#define VGA_GR_MEM_B0000_BFFFF 3 + +#define VGA_DACMASK 0x3c6 +#define VGA_DACRX 0x3c7 +#define VGA_DACWX 0x3c8 +#define VGA_DACDATA 0x3c9 + +#define VGA_CR_INDEX_MDA 0x3b4 +#define VGA_CR_DATA_MDA 0x3b5 +#define VGA_CR_INDEX_CGA 0x3d4 +#define VGA_CR_DATA_CGA 0x3d5 + +/* + * Memory interface instructions used by the kernel + */ +#define MI_INSTR(opcode, flags) (((opcode) << 23) | (flags)) + +#define MI_NOOP MI_INSTR(0, 0) +#define MI_USER_INTERRUPT MI_INSTR(0x02, 0) +#define MI_WAIT_FOR_EVENT MI_INSTR(0x03, 0) +#define MI_WAIT_FOR_PLANE_B_FLIP (1<<6) +#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2) +#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1) +#define MI_FLUSH MI_INSTR(0x04, 0) +#define MI_READ_FLUSH (1 << 0) +#define MI_EXE_FLUSH (1 << 1) +#define MI_NO_WRITE_FLUSH (1 << 2) +#define MI_SCENE_COUNT (1 << 3) /* just increment scene count */ +#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */ +#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0) +#define MI_REPORT_HEAD MI_INSTR(0x07, 0) +#define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0) +#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) +#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ +#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1) +#define MI_LOAD_REGISTER_IMM MI_INSTR(0x22, 1) +#define MI_BATCH_BUFFER MI_INSTR(0x30, 1) +#define MI_BATCH_NON_SECURE (1) +#define MI_BATCH_NON_SECURE_I965 (1<<8) +#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) + +#define BREADCRUMB_BITS 31 +#define BREADCRUMB_MASK ((1U << BREADCRUMB_BITS) - 1) + +#define READ_BREADCRUMB(dev_priv) (((volatile u32*)(dev_priv->hw_status_page))[5]) + /** * Reads a dword out of the status page, which is written to from the command * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or @@ -325,9 +520,1262 @@ extern void opregion_enable_asle(struct * The area from dword 0x10 to 0x3ff is available for driver usage. */ #define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg]) -#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, 5) +#define I915_GEM_HWS_INDEX 0x10 -extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); +/* + * 3D instructions used by the kernel + */ +#define GFX_INSTR(opcode, flags) ((0x3 << 29) | ((opcode) << 24) | (flags)) + +#define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24)) +#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19)) +#define SC_UPDATE_SCISSOR (0x1<<1) +#define SC_ENABLE_MASK (0x1<<0) +#define SC_ENABLE (0x1<<0) +#define GFX_OP_LOAD_INDIRECT ((0x3<<29)|(0x1d<<24)|(0x7<<16)) +#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1)) +#define SCI_YMIN_MASK (0xffff<<16) +#define SCI_XMIN_MASK (0xffff<<0) +#define SCI_YMAX_MASK (0xffff<<16) +#define SCI_XMAX_MASK (0xffff<<0) +#define GFX_OP_SCISSOR_ENABLE ((0x3<<29)|(0x1c<<24)|(0x10<<19)) +#define GFX_OP_SCISSOR_RECT ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1) +#define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0) +#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16)) +#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4) +#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0) +#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3)) +#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2) +#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4) +#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6) +#define XY_MONO_SRC_COPY_IMM_BLT ((2<<29)|(0x71<<22)|5) +#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21) +#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20) +#define BLT_DEPTH_8 (0<<24) +#define BLT_DEPTH_16_565 (1<<24) +#define BLT_DEPTH_16_1555 (2<<24) +#define BLT_DEPTH_32 (3<<24) +#define BLT_ROP_GXCOPY (0xcc<<16) +#define XY_SRC_COPY_BLT_SRC_TILED (1<<15) /* 965+ only */ +#define XY_SRC_COPY_BLT_DST_TILED (1<<11) /* 965+ only */ +#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2) +#define ASYNC_FLIP (1<<22) +#define DISPLAY_PLANE_A (0<<20) +#define DISPLAY_PLANE_B (1<<20) + +/* + * Instruction and interrupt control regs + */ + +#define PRB0_TAIL 0x02030 +#define PRB0_HEAD 0x02034 +#define PRB0_START 0x02038 +#define PRB0_CTL 0x0203c +#define TAIL_ADDR 0x001FFFF8 +#define HEAD_WRAP_COUNT 0xFFE00000 +#define HEAD_WRAP_ONE 0x00200000 +#define HEAD_ADDR 0x001FFFFC +#define RING_NR_PAGES 0x001FF000 +#define RING_REPORT_MASK 0x00000006 +#define RING_REPORT_64K 0x00000002 +#define RING_REPORT_128K 0x00000004 +#define RING_NO_REPORT 0x00000000 +#define RING_VALID_MASK 0x00000001 +#define RING_VALID 0x00000001 +#define RING_INVALID 0x00000000 +#define PRB1_TAIL 0x02040 /* 915+ only */ +#define PRB1_HEAD 0x02044 /* 915+ only */ +#define PRB1_START 0x02048 /* 915+ only */ +#define PRB1_CTL 0x0204c /* 915+ only */ +#define HWS_PGA 0x02080 +#define IPEIR 0x02088 +#define NOPID 0x02094 +#define HWSTAM 0x02098 +#define SCPD0 0x0209c /* 915+ only */ +#define IER 0x020a0 +#define IIR 0x020a4 +#define IMR 0x020a8 +#define ISR 0x020ac +#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18) +#define I915_DISPLAY_PORT_INTERRUPT (1<<17) +#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15) +#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14) +#define I915_HWB_OOM_INTERRUPT (1<<13) +#define I915_SYNC_STATUS_INTERRUPT (1<<12) +#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11) +#define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1<<10) +#define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT (1<<9) +#define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1<<8) +#define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT (1<<7) +#define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT (1<<6) +#define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT (1<<5) +#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1<<4) +#define I915_DEBUG_INTERRUPT (1<<2) +#define I915_USER_INTERRUPT (1<<1) +#define I915_ASLE_INTERRUPT (1<<0) +#define EIR 0x020b0 +#define EMR 0x020b4 +#define ESR 0x020b8 +#define INSTPM 0x020c0 +#define FW_BLC 0x020d8 +#define FW_BLC_SELF 0x020e0 /* 915+ only */ +#define MI_ARB_STATE 0x020e4 /* 915+ only */ +#define CACHE_MODE_0 0x02120 /* 915+ only */ +#define CM0_MASK_SHIFT 16 +#define CM0_IZ_OPT_DISABLE (1<<6) +#define CM0_ZR_OPT_DISABLE (1<<5) +#define CM0_DEPTH_EVICT_DISABLE (1<<4) +#define CM0_COLOR_EVICT_DISABLE (1<<3) +#define CM0_DEPTH_WRITE_DISABLE (1<<1) +#define CM0_RC_OP_FLUSH_DISABLE (1<<0) +#define GFX_FLSH_CNTL 0x02170 /* 915+ only */ + +/* + * Framebuffer compression (915+ only) + */ + +#define FBC_CFB_BASE 0x03200 /* 4k page aligned */ +#define FBC_LL_BASE 0x03204 /* 4k page aligned */ +#define FBC_CONTROL 0x03208 +#define FBC_CTL_EN (1<<31) +#define FBC_CTL_PERIODIC (1<<30) +#define FBC_CTL_INTERVAL_SHIFT (16) +#define FBC_CTL_UNCOMPRESSIBLE (1<<14) +#define FBC_CTL_STRIDE_SHIFT (5) +#define FBC_CTL_FENCENO (1<<0) +#define FBC_COMMAND 0x0320c +#define FBC_CMD_COMPRESS (1<<0) +#define FBC_STATUS 0x03210 +#define FBC_STAT_COMPRESSING (1<<31) +#define FBC_STAT_COMPRESSED (1<<30) +#define FBC_STAT_MODIFIED (1<<29) +#define FBC_STAT_CURRENT_LINE (1<<0) +#define FBC_CONTROL2 0x03214 +#define FBC_CTL_FENCE_DBL (0<<4) +#define FBC_CTL_IDLE_IMM (0<<2) +#define FBC_CTL_IDLE_FULL (1<<2) +#define FBC_CTL_IDLE_LINE (2<<2) +#define FBC_CTL_IDLE_DEBUG (3<<2) +#define FBC_CTL_CPU_FENCE (1<<1) +#define FBC_CTL_PLANEA (0<<0) +#define FBC_CTL_PLANEB (1<<0) +#define FBC_FENCE_OFF 0x0321b + +#define FBC_LL_SIZE (1536) + +/* + * GPIO regs + */ + +#define GPIOA 0x5010 +#define GPIOB 0x5014 +#define GPIOC 0x5018 +#define GPIOD 0x501c +#define GPIOE 0x5020 +#define GPIOF 0x5024 +#define GPIOG 0x5028 +#define GPIOH 0x502c +# define GPIO_CLOCK_DIR_MASK (1 << 0) +# define GPIO_CLOCK_DIR_IN (0 << 1) +# define GPIO_CLOCK_DIR_OUT (1 << 1) +# define GPIO_CLOCK_VAL_MASK (1 << 2) +# define GPIO_CLOCK_VAL_OUT (1 << 3) +# define GPIO_CLOCK_VAL_IN (1 << 4) +# define GPIO_CLOCK_PULLUP_DISABLE (1 << 5) +# define GPIO_DATA_DIR_MASK (1 << 8) +# define GPIO_DATA_DIR_IN (0 << 9) +# define GPIO_DATA_DIR_OUT (1 << 9) +# define GPIO_DATA_VAL_MASK (1 << 10) +# define GPIO_DATA_VAL_OUT (1 << 11) +# define GPIO_DATA_VAL_IN (1 << 12) +# define GPIO_DATA_PULLUP_DISABLE (1 << 13) + +/* + * Clock control & power management + */ + +#define VGA0 0x6000 +#define VGA1 0x6004 +#define VGA_PD 0x6010 +#define VGA0_PD_P2_DIV_4 (1 << 7) +#define VGA0_PD_P1_DIV_2 (1 << 5) +#define VGA0_PD_P1_SHIFT 0 +#define VGA0_PD_P1_MASK (0x1f << 0) +#define VGA1_PD_P2_DIV_4 (1 << 15) +#define VGA1_PD_P1_DIV_2 (1 << 13) +#define VGA1_PD_P1_SHIFT 8 +#define VGA1_PD_P1_MASK (0x1f << 8) +#define DPLL_A 0x06014 +#define DPLL_B 0x06018 +#define DPLL_VCO_ENABLE (1 << 31) +#define DPLL_DVO_HIGH_SPEED (1 << 30) +#define DPLL_SYNCLOCK_ENABLE (1 << 29) +#define DPLL_VGA_MODE_DIS (1 << 28) +#define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */ +#define DPLLB_MODE_LVDS (2 << 26) /* i915 */ +#define DPLL_MODE_MASK (3 << 26) +#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */ +#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */ +#define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */ +#define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */ +#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */ +#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */ + +#define I915_FIFO_UNDERRUN_STATUS (1UL<<31) +#define I915_CRC_ERROR_ENABLE (1UL<<29) +#define I915_CRC_DONE_ENABLE (1UL<<28) +#define I915_GMBUS_EVENT_ENABLE (1UL<<27) +#define I915_VSYNC_INTERRUPT_ENABLE (1UL<<25) +#define I915_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24) +#define I915_DPST_EVENT_ENABLE (1UL<<23) +#define I915_LEGACY_BLC_EVENT_ENABLE (1UL<<22) +#define I915_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21) +#define I915_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20) +#define I915_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */ +#define I915_VBLANK_INTERRUPT_ENABLE (1UL<<17) +#define I915_OVERLAY_UPDATED_ENABLE (1UL<<16) +#define I915_CRC_ERROR_INTERRUPT_STATUS (1UL<<13) +#define I915_CRC_DONE_INTERRUPT_STATUS (1UL<<12) +#define I915_GMBUS_INTERRUPT_STATUS (1UL<<11) +#define I915_VSYNC_INTERRUPT_STATUS (1UL<<9) +#define I915_DISPLAY_LINE_COMPARE_STATUS (1UL<<8) +#define I915_DPST_EVENT_STATUS (1UL<<7) +#define I915_LEGACY_BLC_EVENT_STATUS (1UL<<6) +#define I915_ODD_FIELD_INTERRUPT_STATUS (1UL<<5) +#define I915_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4) +#define I915_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */ +#define I915_VBLANK_INTERRUPT_STATUS (1UL<<1) +#define I915_OVERLAY_UPDATED_STATUS (1UL<<0) + +#define SRX_INDEX 0x3c4 +#define SRX_DATA 0x3c5 +#define SR01 1 +#define SR01_SCREEN_OFF (1<<5) + +#define PPCR 0x61204 +#define PPCR_ON (1<<0) + +#define DVOB 0x61140 +#define DVOB_ON (1<<31) +#define DVOC 0x61160 +#define DVOC_ON (1<<31) +#define LVDS 0x61180 +#define LVDS_ON (1<<31) + +#define ADPA 0x61100 +#define ADPA_DPMS_MASK (~(3<<10)) +#define ADPA_DPMS_ON (0<<10) +#define ADPA_DPMS_SUSPEND (1<<10) +#define ADPA_DPMS_STANDBY (2<<10) +#define ADPA_DPMS_OFF (3<<10) + +#define RING_TAIL 0x00 +#define TAIL_ADDR 0x001FFFF8 +#define RING_HEAD 0x04 +#define HEAD_WRAP_COUNT 0xFFE00000 +#define HEAD_WRAP_ONE 0x00200000 +#define HEAD_ADDR 0x001FFFFC +#define RING_START 0x08 +#define START_ADDR 0xFFFFF000 +#define RING_LEN 0x0C +#define RING_NR_PAGES 0x001FF000 +#define RING_REPORT_MASK 0x00000006 +#define RING_REPORT_64K 0x00000002 +#define RING_REPORT_128K 0x00000004 +#define RING_NO_REPORT 0x00000000 +#define RING_VALID_MASK 0x00000001 +#define RING_VALID 0x00000001 +#define RING_INVALID 0x00000000 + +/* Scratch pad debug 0 reg: + */ +#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000 +/* + * The i830 generation, in LVDS mode, defines P1 as the bit number set within + * this field (only one bit may be set). + */ +#define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000 +#define DPLL_FPA01_P1_POST_DIV_SHIFT 16 +/* i830, required in DVO non-gang */ +#define PLL_P2_DIVIDE_BY_4 (1 << 23) +#define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */ +#define PLL_REF_INPUT_DREFCLK (0 << 13) +#define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */ +#define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO TVCLKIN */ +#define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13) +#define PLL_REF_INPUT_MASK (3 << 13) +#define PLL_LOAD_PULSE_PHASE_SHIFT 9 +/* + * Parallel to Serial Load Pulse phase selection. + * Selects the phase for the 10X DPLL clock for the PCIe + * digital display port. The range is 4 to 13; 10 or more + * is just a flip delay. The default is 6 + */ +#define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT) +#define DISPLAY_RATE_SELECT_FPA1 (1 << 8) +/* + * SDVO multiplier for 945G/GM. Not used on 965. + */ +#define SDVO_MULTIPLIER_MASK 0x000000ff +#define SDVO_MULTIPLIER_SHIFT_HIRES 4 +#define SDVO_MULTIPLIER_SHIFT_VGA 0 +#define DPLL_A_MD 0x0601c /* 965+ only */ +/* + * UDI pixel divider, controlling how many pixels are stuffed into a packet. + * + * Value is pixels minus 1. Must be set to 1 pixel for SDVO. + */ +#define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000 +#define DPLL_MD_UDI_DIVIDER_SHIFT 24 +/* UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */ +#define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000 +#define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16 +/* + * SDVO/UDI pixel multiplier. + * + * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus + * clock rate is 10 times the DPLL clock. At low resolution/refresh rate + * modes, the bus rate would be below the limits, so SDVO allows for stuffing + * dummy bytes in the datastream at an increased clock rate, with both sides of + * the link knowing how many bytes are fill. + * + * So, for a mode with a dotclock of 65Mhz, we would want to double the clock + * rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be + * set to 130Mhz, and the SDVO multiplier set to 2x in this register and + * through an SDVO command. + * + * This register field has values of multiplication factor minus 1, with + * a maximum multiplier of 5 for SDVO. + */ +#define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00 +#define DPLL_MD_UDI_MULTIPLIER_SHIFT 8 +/* + * SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK. + * This best be set to the default value (3) or the CRT won't work. No, + * I don't entirely understand what this does... + */ +#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f +#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0 +#define DPLL_B_MD 0x06020 /* 965+ only */ +#define FPA0 0x06040 +#define FPA1 0x06044 +#define FPB0 0x06048 +#define FPB1 0x0604c +#define FP_N_DIV_MASK 0x003f0000 +#define FP_N_DIV_SHIFT 16 +#define FP_M1_DIV_MASK 0x00003f00 +#define FP_M1_DIV_SHIFT 8 +#define FP_M2_DIV_MASK 0x0000003f +#define FP_M2_DIV_SHIFT 0 +#define DPLL_TEST 0x606c +#define DPLLB_TEST_SDVO_DIV_1 (0 << 22) +#define DPLLB_TEST_SDVO_DIV_2 (1 << 22) +#define DPLLB_TEST_SDVO_DIV_4 (2 << 22) +#define DPLLB_TEST_SDVO_DIV_MASK (3 << 22) +#define DPLLB_TEST_N_BYPASS (1 << 19) +#define DPLLB_TEST_M_BYPASS (1 << 18) +#define DPLLB_INPUT_BUFFER_ENABLE (1 << 16) +#define DPLLA_TEST_N_BYPASS (1 << 3) +#define DPLLA_TEST_M_BYPASS (1 << 2) +#define DPLLA_INPUT_BUFFER_ENABLE (1 << 0) +#define D_STATE 0x6104 +#define CG_2D_DIS 0x6200 +#define CG_3D_DIS 0x6204 + +/* + * Palette regs + */ + +#define PALETTE_A 0x0a000 +#define PALETTE_B 0x0a800 + +/* + * Overlay regs + */ + +#define OVADD 0x30000 +#define DOVSTA 0x30008 +#define OC_BUF (0x3<<20) +#define OGAMC5 0x30010 +#define OGAMC4 0x30014 +#define OGAMC3 0x30018 +#define OGAMC2 0x3001c +#define OGAMC1 0x30020 +#define OGAMC0 0x30024 + +/* + * Display engine regs + */ + +/* Pipe A timing regs */ +#define HTOTAL_A 0x60000 +#define HBLANK_A 0x60004 +#define HSYNC_A 0x60008 +#define VTOTAL_A 0x6000c +#define VBLANK_A 0x60010 +#define VSYNC_A 0x60014 +#define PIPEASRC 0x6001c +#define BCLRPAT_A 0x60020 + +/* Pipe B timing regs */ +#define HTOTAL_B 0x61000 +#define HBLANK_B 0x61004 +#define HSYNC_B 0x61008 +#define VTOTAL_B 0x6100c +#define VBLANK_B 0x61010 +#define VSYNC_B 0x61014 +#define PIPEBSRC 0x6101c +#define BCLRPAT_B 0x61020 + +/* VGA port control */ +#define ADPA 0x61100 +#define ADPA_DAC_ENABLE (1<<31) +#define ADPA_DAC_DISABLE 0 +#define ADPA_PIPE_SELECT_MASK (1<<30) +#define ADPA_PIPE_A_SELECT 0 +#define ADPA_PIPE_B_SELECT (1<<30) +#define ADPA_USE_VGA_HVPOLARITY (1<<15) +#define ADPA_SETS_HVPOLARITY 0 +#define ADPA_VSYNC_CNTL_DISABLE (1<<11) +#define ADPA_VSYNC_CNTL_ENABLE 0 +#define ADPA_HSYNC_CNTL_DISABLE (1<<10) +#define ADPA_HSYNC_CNTL_ENABLE 0 +#define ADPA_VSYNC_ACTIVE_HIGH (1<<4) +#define ADPA_VSYNC_ACTIVE_LOW 0 +#define ADPA_HSYNC_ACTIVE_HIGH (1<<3) +#define ADPA_HSYNC_ACTIVE_LOW 0 +#define ADPA_DPMS_MASK (~(3<<10)) +#define ADPA_DPMS_ON (0<<10) +#define ADPA_DPMS_SUSPEND (1<<10) +#define ADPA_DPMS_STANDBY (2<<10) +#define ADPA_DPMS_OFF (3<<10) + +/* Hotplug control (945+ only) */ +#define PORT_HOTPLUG_EN 0x61110 +#define SDVOB_HOTPLUG_INT_EN (1 << 26) +#define SDVOC_HOTPLUG_INT_EN (1 << 25) +#define TV_HOTPLUG_INT_EN (1 << 18) +#define CRT_HOTPLUG_INT_EN (1 << 9) +#define CRT_HOTPLUG_FORCE_DETECT (1 << 3) + +#define PORT_HOTPLUG_STAT 0x61114 +#define CRT_HOTPLUG_INT_STATUS (1 << 11) +#define TV_HOTPLUG_INT_STATUS (1 << 10) +#define CRT_HOTPLUG_MONITOR_MASK (3 << 8) +#define CRT_HOTPLUG_MONITOR_COLOR (3 << 8) +#define CRT_HOTPLUG_MONITOR_MONO (2 << 8) +#define CRT_HOTPLUG_MONITOR_NONE (0 << 8) +#define SDVOC_HOTPLUG_INT_STATUS (1 << 7) +#define SDVOB_HOTPLUG_INT_STATUS (1 << 6) + +/* SDVO port control */ +#define SDVOB 0x61140 +#define SDVOC 0x61160 +#define SDVO_ENABLE (1 << 31) +#define SDVO_PIPE_B_SELECT (1 << 30) +#define SDVO_STALL_SELECT (1 << 29) +#define SDVO_INTERRUPT_ENABLE (1 << 26) +/** + * 915G/GM SDVO pixel multiplier. + * + * Programmed value is multiplier - 1, up to 5x. + * + * \sa DPLL_MD_UDI_MULTIPLIER_MASK + */ +#define SDVO_PORT_MULTIPLY_MASK (7 << 23) +#define SDVO_PORT_MULTIPLY_SHIFT 23 +#define SDVO_PHASE_SELECT_MASK (15 << 19) +#define SDVO_PHASE_SELECT_DEFAULT (6 << 19) +#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18) +#define SDVOC_GANG_MODE (1 << 16) +#define SDVO_BORDER_ENABLE (1 << 7) +#define SDVOB_PCIE_CONCURRENCY (1 << 3) +#define SDVO_DETECTED (1 << 2) +/* Bits to be preserved when writing */ +#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14) | (1 << 26)) +#define SDVOC_PRESERVE_MASK ((1 << 17) | (1 << 26)) + +/* DVO port control */ +#define DVOA 0x61120 +#define DVOB 0x61140 +#define DVOC 0x61160 +#define DVO_ENABLE (1 << 31) +#define DVO_PIPE_B_SELECT (1 << 30) +#define DVO_PIPE_STALL_UNUSED (0 << 28) +#define DVO_PIPE_STALL (1 << 28) +#define DVO_PIPE_STALL_TV (2 << 28) +#define DVO_PIPE_STALL_MASK (3 << 28) +#define DVO_USE_VGA_SYNC (1 << 15) +#define DVO_DATA_ORDER_I740 (0 << 14) +#define DVO_DATA_ORDER_FP (1 << 14) +#define DVO_VSYNC_DISABLE (1 << 11) +#define DVO_HSYNC_DISABLE (1 << 10) +#define DVO_VSYNC_TRISTATE (1 << 9) +#define DVO_HSYNC_TRISTATE (1 << 8) +#define DVO_BORDER_ENABLE (1 << 7) +#define DVO_DATA_ORDER_GBRG (1 << 6) +#define DVO_DATA_ORDER_RGGB (0 << 6) +#define DVO_DATA_ORDER_GBRG_ERRATA (0 << 6) +#define DVO_DATA_ORDER_RGGB_ERRATA (1 << 6) +#define DVO_VSYNC_ACTIVE_HIGH (1 << 4) +#define DVO_HSYNC_ACTIVE_HIGH (1 << 3) +#define DVO_BLANK_ACTIVE_HIGH (1 << 2) +#define DVO_OUTPUT_CSTATE_PIXELS (1 << 1) /* SDG only */ +#define DVO_OUTPUT_SOURCE_SIZE_PIXELS (1 << 0) /* SDG only */ +#define DVO_PRESERVE_MASK (0x7<<24) +#define DVOA_SRCDIM 0x61124 +#define DVOB_SRCDIM 0x61144 +#define DVOC_SRCDIM 0x61164 +#define DVO_SRCDIM_HORIZONTAL_SHIFT 12 +#define DVO_SRCDIM_VERTICAL_SHIFT 0 + +/* LVDS port control */ +#define LVDS 0x61180 +/* + * Enables the LVDS port. This bit must be set before DPLLs are enabled, as + * the DPLL semantics change when the LVDS is assigned to that pipe. + */ +#define LVDS_PORT_EN (1 << 31) +/* Selects pipe B for LVDS data. Must be set on pre-965. */ +#define LVDS_PIPEB_SELECT (1 << 30) +/* + * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per + * pixel. + */ +#define LVDS_A0A2_CLKA_POWER_MASK (3 << 8) +#define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8) +#define LVDS_A0A2_CLKA_POWER_UP (3 << 8) +/* + * Controls the A3 data pair, which contains the additional LSBs for 24 bit + * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be + * on. + */ +#define LVDS_A3_POWER_MASK (3 << 6) +#define LVDS_A3_POWER_DOWN (0 << 6) +#define LVDS_A3_POWER_UP (3 << 6) +/* + * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP + * is set. + */ +#define LVDS_CLKB_POWER_MASK (3 << 4) +#define LVDS_CLKB_POWER_DOWN (0 << 4) +#define LVDS_CLKB_POWER_UP (3 << 4) +/* + * Controls the B0-B3 data pairs. This must be set to match the DPLL p2 + * setting for whether we are in dual-channel mode. The B3 pair will + * additionally only be powered up when LVDS_A3_POWER_UP is set. + */ +#define LVDS_B0B3_POWER_MASK (3 << 2) +#define LVDS_B0B3_POWER_DOWN (0 << 2) +#define LVDS_B0B3_POWER_UP (3 << 2) + +/* Panel power sequencing */ +#define PP_STATUS 0x61200 +#define PP_ON (1 << 31) +/* + * Indicates that all dependencies of the panel are on: + * + * - PLL enabled + * - pipe enabled + * - LVDS/DVOB/DVOC on + */ +#define PP_READY (1 << 30) +#define PP_SEQUENCE_NONE (0 << 28) +#define PP_SEQUENCE_ON (1 << 28) +#define PP_SEQUENCE_OFF (2 << 28) +#define PP_SEQUENCE_MASK 0x30000000 +#define PP_CONTROL 0x61204 +#define POWER_TARGET_ON (1 << 0) +#define PP_ON_DELAYS 0x61208 +#define PP_OFF_DELAYS 0x6120c +#define PP_DIVISOR 0x61210 + +/* Panel fitting */ +#define PFIT_CONTROL 0x61230 +#define PFIT_ENABLE (1 << 31) +#define PFIT_PIPE_MASK (3 << 29) +#define PFIT_PIPE_SHIFT 29 +#define VERT_INTERP_DISABLE (0 << 10) +#define VERT_INTERP_BILINEAR (1 << 10) +#define VERT_INTERP_MASK (3 << 10) +#define VERT_AUTO_SCALE (1 << 9) +#define HORIZ_INTERP_DISABLE (0 << 6) +#define HORIZ_INTERP_BILINEAR (1 << 6) +#define HORIZ_INTERP_MASK (3 << 6) +#define HORIZ_AUTO_SCALE (1 << 5) +#define PANEL_8TO6_DITHER_ENABLE (1 << 3) +#define PFIT_PGM_RATIOS 0x61234 +#define PFIT_VERT_SCALE_MASK 0xfff00000 +#define PFIT_HORIZ_SCALE_MASK 0x0000fff0 +#define PFIT_AUTO_RATIOS 0x61238 + +/* Backlight control */ +#define BLC_PWM_CTL 0x61254 +#define BACKLIGHT_MODULATION_FREQ_SHIFT (17) +#define BLC_PWM_CTL2 0x61250 /* 965+ only */ +/* + * This is the most significant 15 bits of the number of backlight cycles in a + * complete cycle of the modulated backlight control. + * + * The actual value is this field multiplied by two. + */ +#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17) +#define BLM_LEGACY_MODE (1 << 16) +/* + * This is the number of cycles out of the backlight modulation cycle for which + * the backlight is on. + * + * This field must be no greater than the number of cycles in the complete + * backlight modulation cycle. + */ +#define BACKLIGHT_DUTY_CYCLE_SHIFT (0) +#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff) + +/* TV port control */ +#define TV_CTL 0x68000 +/** Enables the TV encoder */ +# define TV_ENC_ENABLE (1 << 31) +/** Sources the TV encoder input from pipe B instead of A. */ +# define TV_ENC_PIPEB_SELECT (1 << 30) +/** Outputs composite video (DAC A only) */ +# define TV_ENC_OUTPUT_COMPOSITE (0 << 28) +/** Outputs SVideo video (DAC B/C) */ +# define TV_ENC_OUTPUT_SVIDEO (1 << 28) +/** Outputs Component video (DAC A/B/C) */ +# define TV_ENC_OUTPUT_COMPONENT (2 << 28) +/** Outputs Composite and SVideo (DAC A/B/C) */ +# define TV_ENC_OUTPUT_SVIDEO_COMPOSITE (3 << 28) +# define TV_TRILEVEL_SYNC (1 << 21) +/** Enables slow sync generation (945GM only) */ +# define TV_SLOW_SYNC (1 << 20) +/** Selects 4x oversampling for 480i and 576p */ +# define TV_OVERSAMPLE_4X (0 << 18) +/** Selects 2x oversampling for 720p and 1080i */ +# define TV_OVERSAMPLE_2X (1 << 18) +/** Selects no oversampling for 1080p */ +# define TV_OVERSAMPLE_NONE (2 << 18) +/** Selects 8x oversampling */ +# define TV_OVERSAMPLE_8X (3 << 18) +/** Selects progressive mode rather than interlaced */ +# define TV_PROGRESSIVE (1 << 17) +/** Sets the colorburst to PAL mode. Required for non-M PAL modes. */ +# define TV_PAL_BURST (1 << 16) +/** Field for setting delay of Y compared to C */ +# define TV_YC_SKEW_MASK (7 << 12) +/** Enables a fix for 480p/576p standard definition modes on the 915GM only */ +# define TV_ENC_SDP_FIX (1 << 11) +/** + * Enables a fix for the 915GM only. + * + * Not sure what it does. + */ +# define TV_ENC_C0_FIX (1 << 10) +/** Bits that must be preserved by software */ +# define TV_CTL_SAVE ((3 << 8) | (3 << 6)) +# define TV_FUSE_STATE_MASK (3 << 4) +/** Read-only state that reports all features enabled */ +# define TV_FUSE_STATE_ENABLED (0 << 4) +/** Read-only state that reports that Macrovision is disabled in hardware*/ +# define TV_FUSE_STATE_NO_MACROVISION (1 << 4) +/** Read-only state that reports that TV-out is disabled in hardware. */ +# define TV_FUSE_STATE_DISABLED (2 << 4) +/** Normal operation */ +# define TV_TEST_MODE_NORMAL (0 << 0) +/** Encoder test pattern 1 - combo pattern */ +# define TV_TEST_MODE_PATTERN_1 (1 << 0) +/** Encoder test pattern 2 - full screen vertical 75% color bars */ +# define TV_TEST_MODE_PATTERN_2 (2 << 0) +/** Encoder test pattern 3 - full screen horizontal 75% color bars */ +# define TV_TEST_MODE_PATTERN_3 (3 << 0) +/** Encoder test pattern 4 - random noise */ +# define TV_TEST_MODE_PATTERN_4 (4 << 0) +/** Encoder test pattern 5 - linear color ramps */ +# define TV_TEST_MODE_PATTERN_5 (5 << 0) +/** + * This test mode forces the DACs to 50% of full output. + * + * This is used for load detection in combination with TVDAC_SENSE_MASK + */ +# define TV_TEST_MODE_MONITOR_DETECT (7 << 0) +# define TV_TEST_MODE_MASK (7 << 0) + +#define TV_DAC 0x68004 +/** + * Reports that DAC state change logic has reported change (RO). + * + * This gets cleared when TV_DAC_STATE_EN is cleared +*/ +# define TVDAC_STATE_CHG (1 << 31) +# define TVDAC_SENSE_MASK (7 << 28) +/** Reports that DAC A voltage is above the detect threshold */ +# define TVDAC_A_SENSE (1 << 30) +/** Reports that DAC B voltage is above the detect threshold */ +# define TVDAC_B_SENSE (1 << 29) +/** Reports that DAC C voltage is above the detect threshold */ +# define TVDAC_C_SENSE (1 << 28) +/** + * Enables DAC state detection logic, for load-based TV detection. + * + * The PLL of the chosen pipe (in TV_CTL) must be running, and the encoder set + * to off, for load detection to work. + */ +# define TVDAC_STATE_CHG_EN (1 << 27) +/** Sets the DAC A sense value to high */ +# define TVDAC_A_SENSE_CTL (1 << 26) +/** Sets the DAC B sense value to high */ +# define TVDAC_B_SENSE_CTL (1 << 25) +/** Sets the DAC C sense value to high */ +# define TVDAC_C_SENSE_CTL (1 << 24) +/** Overrides the ENC_ENABLE and DAC voltage levels */ +# define DAC_CTL_OVERRIDE (1 << 7) +/** Sets the slew rate. Must be preserved in software */ +# define ENC_TVDAC_SLEW_FAST (1 << 6) +# define DAC_A_1_3_V (0 << 4) +# define DAC_A_1_1_V (1 << 4) +# define DAC_A_0_7_V (2 << 4) +# define DAC_A_OFF (3 << 4) +# define DAC_B_1_3_V (0 << 2) +# define DAC_B_1_1_V (1 << 2) +# define DAC_B_0_7_V (2 << 2) +# define DAC_B_OFF (3 << 2) +# define DAC_C_1_3_V (0 << 0) +# define DAC_C_1_1_V (1 << 0) +# define DAC_C_0_7_V (2 << 0) +# define DAC_C_OFF (3 << 0) + +/** + * CSC coefficients are stored in a floating point format with 9 bits of + * mantissa and 2 or 3 bits of exponent. The exponent is represented as 2**-n, + * where 2-bit exponents are unsigned n, and 3-bit exponents are signed n with + * -1 (0x3) being the only legal negative value. + */ +#define TV_CSC_Y 0x68010 +# define TV_RY_MASK 0x07ff0000 +# define TV_RY_SHIFT 16 +# define TV_GY_MASK 0x00000fff +# define TV_GY_SHIFT 0 + +#define TV_CSC_Y2 0x68014 +# define TV_BY_MASK 0x07ff0000 +# define TV_BY_SHIFT 16 +/** + * Y attenuation for component video. + * + * Stored in 1.9 fixed point. + */ +# define TV_AY_MASK 0x000003ff +# define TV_AY_SHIFT 0 + +#define TV_CSC_U 0x68018 +# define TV_RU_MASK 0x07ff0000 +# define TV_RU_SHIFT 16 +# define TV_GU_MASK 0x000007ff +# define TV_GU_SHIFT 0 + +#define TV_CSC_U2 0x6801c +# define TV_BU_MASK 0x07ff0000 +# define TV_BU_SHIFT 16 +/** + * U attenuation for component video. + * + * Stored in 1.9 fixed point. + */ +# define TV_AU_MASK 0x000003ff +# define TV_AU_SHIFT 0 + +#define TV_CSC_V 0x68020 +# define TV_RV_MASK 0x0fff0000 +# define TV_RV_SHIFT 16 +# define TV_GV_MASK 0x000007ff +# define TV_GV_SHIFT 0 + +#define TV_CSC_V2 0x68024 +# define TV_BV_MASK 0x07ff0000 +# define TV_BV_SHIFT 16 +/** + * V attenuation for component video. + * + * Stored in 1.9 fixed point. + */ +# define TV_AV_MASK 0x000007ff +# define TV_AV_SHIFT 0 + +#define TV_CLR_KNOBS 0x68028 +/** 2s-complement brightness adjustment */ +# define TV_BRIGHTNESS_MASK 0xff000000 +# define TV_BRIGHTNESS_SHIFT 24 +/** Contrast adjustment, as a 2.6 unsigned floating point number */ +# define TV_CONTRAST_MASK 0x00ff0000 +# define TV_CONTRAST_SHIFT 16 +/** Saturation adjustment, as a 2.6 unsigned floating point number */ +# define TV_SATURATION_MASK 0x0000ff00 +# define TV_SATURATION_SHIFT 8 +/** Hue adjustment, as an integer phase angle in degrees */ +# define TV_HUE_MASK 0x000000ff +# define TV_HUE_SHIFT 0 + +#define TV_CLR_LEVEL 0x6802c +/** Controls the DAC level for black */ +# define TV_BLACK_LEVEL_MASK 0x01ff0000 +# define TV_BLACK_LEVEL_SHIFT 16 +/** Controls the DAC level for blanking */ +# define TV_BLANK_LEVEL_MASK 0x000001ff +# define TV_BLANK_LEVEL_SHIFT 0 + +#define TV_H_CTL_1 0x68030 +/** Number of pixels in the hsync. */ +# define TV_HSYNC_END_MASK 0x1fff0000 +# define TV_HSYNC_END_SHIFT 16 +/** Total number of pixels minus one in the line (display and blanking). */ +# define TV_HTOTAL_MASK 0x00001fff +# define TV_HTOTAL_SHIFT 0 + +#define TV_H_CTL_2 0x68034 +/** Enables the colorburst (needed for non-component color) */ +# define TV_BURST_ENA (1 << 31) +/** Offset of the colorburst from the start of hsync, in pixels minus one. */ +# define TV_HBURST_START_SHIFT 16 +# define TV_HBURST_START_MASK 0x1fff0000 +/** Length of the colorburst */ +# define TV_HBURST_LEN_SHIFT 0 +# define TV_HBURST_LEN_MASK 0x0001fff + +#define TV_H_CTL_3 0x68038 +/** End of hblank, measured in pixels minus one from start of hsync */ +# define TV_HBLANK_END_SHIFT 16 +# define TV_HBLANK_END_MASK 0x1fff0000 +/** Start of hblank, measured in pixels minus one from start of hsync */ +# define TV_HBLANK_START_SHIFT 0 +# define TV_HBLANK_START_MASK 0x0001fff + +#define TV_V_CTL_1 0x6803c +/** XXX */ +# define TV_NBR_END_SHIFT 16 +# define TV_NBR_END_MASK 0x07ff0000 +/** XXX */ +# define TV_VI_END_F1_SHIFT 8 +# define TV_VI_END_F1_MASK 0x00003f00 +/** XXX */ +# define TV_VI_END_F2_SHIFT 0 +# define TV_VI_END_F2_MASK 0x0000003f + +#define TV_V_CTL_2 0x68040 +/** Length of vsync, in half lines */ +# define TV_VSYNC_LEN_MASK 0x07ff0000 +# define TV_VSYNC_LEN_SHIFT 16 +/** Offset of the start of vsync in field 1, measured in one less than the + * number of half lines. + */ +# define TV_VSYNC_START_F1_MASK 0x00007f00 +# define TV_VSYNC_START_F1_SHIFT 8 +/** + * Offset of the start of vsync in field 2, measured in one less than the + * number of half lines. + */ +# define TV_VSYNC_START_F2_MASK 0x0000007f +# define TV_VSYNC_START_F2_SHIFT 0 + +#define TV_V_CTL_3 0x68044 +/** Enables generation of the equalization signal */ +# define TV_EQUAL_ENA (1 << 31) +/** Length of vsync, in half lines */ +# define TV_VEQ_LEN_MASK 0x007f0000 +# define TV_VEQ_LEN_SHIFT 16 +/** Offset of the start of equalization in field 1, measured in one less than + * the number of half lines. + */ +# define TV_VEQ_START_F1_MASK 0x0007f00 +# define TV_VEQ_START_F1_SHIFT 8 +/** + * Offset of the start of equalization in field 2, measured in one less than + * the number of half lines. + */ +# define TV_VEQ_START_F2_MASK 0x000007f +# define TV_VEQ_START_F2_SHIFT 0 + +#define TV_V_CTL_4 0x68048 +/** + * Offset to start of vertical colorburst, measured in one less than the + * number of lines from vertical start. + */ +# define TV_VBURST_START_F1_MASK 0x003f0000 +# define TV_VBURST_START_F1_SHIFT 16 +/** + * Offset to the end of vertical colorburst, measured in one less than the + * number of lines from the start of NBR. + */ +# define TV_VBURST_END_F1_MASK 0x000000ff +# define TV_VBURST_END_F1_SHIFT 0 + +#define TV_V_CTL_5 0x6804c +/** + * Offset to start of vertical colorburst, measured in one less than the + * number of lines from vertical start. + */ +# define TV_VBURST_START_F2_MASK 0x003f0000 +# define TV_VBURST_START_F2_SHIFT 16 +/** + * Offset to the end of vertical colorburst, measured in one less than the + * number of lines from the start of NBR. + */ +# define TV_VBURST_END_F2_MASK 0x000000ff +# define TV_VBURST_END_F2_SHIFT 0 + +#define TV_V_CTL_6 0x68050 +/** + * Offset to start of vertical colorburst, measured in one less than the + * number of lines from vertical start. + */ +# define TV_VBURST_START_F3_MASK 0x003f0000 +# define TV_VBURST_START_F3_SHIFT 16 +/** + * Offset to the end of vertical colorburst, measured in one less than the + * number of lines from the start of NBR. + */ +# define TV_VBURST_END_F3_MASK 0x000000ff +# define TV_VBURST_END_F3_SHIFT 0 + +#define TV_V_CTL_7 0x68054 +/** + * Offset to start of vertical colorburst, measured in one less than the + * number of lines from vertical start. + */ +# define TV_VBURST_START_F4_MASK 0x003f0000 +# define TV_VBURST_START_F4_SHIFT 16 +/** + * Offset to the end of vertical colorburst, measured in one less than the + * number of lines from the start of NBR. + */ +# define TV_VBURST_END_F4_MASK 0x000000ff +# define TV_VBURST_END_F4_SHIFT 0 + +#define TV_SC_CTL_1 0x68060 +/** Turns on the first subcarrier phase generation DDA */ +# define TV_SC_DDA1_EN (1 << 31) +/** Turns on the first subcarrier phase generation DDA */ +# define TV_SC_DDA2_EN (1 << 30) +/** Turns on the first subcarrier phase generation DDA */ +# define TV_SC_DDA3_EN (1 << 29) +/** Sets the subcarrier DDA to reset frequency every other field */ +# define TV_SC_RESET_EVERY_2 (0 << 24) +/** Sets the subcarrier DDA to reset frequency every fourth field */ +# define TV_SC_RESET_EVERY_4 (1 << 24) +/** Sets the subcarrier DDA to reset frequency every eighth field */ +# define TV_SC_RESET_EVERY_8 (2 << 24) +/** Sets the subcarrier DDA to never reset the frequency */ +# define TV_SC_RESET_NEVER (3 << 24) +/** Sets the peak amplitude of the colorburst.*/ +# define TV_BURST_LEVEL_MASK 0x00ff0000 +# define TV_BURST_LEVEL_SHIFT 16 +/** Sets the increment of the first subcarrier phase generation DDA */ +# define TV_SCDDA1_INC_MASK 0x00000fff +# define TV_SCDDA1_INC_SHIFT 0 + +#define TV_SC_CTL_2 0x68064 +/** Sets the rollover for the second subcarrier phase generation DDA */ +# define TV_SCDDA2_SIZE_MASK 0x7fff0000 +# define TV_SCDDA2_SIZE_SHIFT 16 +/** Sets the increent of the second subcarrier phase generation DDA */ +# define TV_SCDDA2_INC_MASK 0x00007fff +# define TV_SCDDA2_INC_SHIFT 0 + +#define TV_SC_CTL_3 0x68068 +/** Sets the rollover for the third subcarrier phase generation DDA */ +# define TV_SCDDA3_SIZE_MASK 0x7fff0000 +# define TV_SCDDA3_SIZE_SHIFT 16 +/** Sets the increent of the third subcarrier phase generation DDA */ +# define TV_SCDDA3_INC_MASK 0x00007fff +# define TV_SCDDA3_INC_SHIFT 0 + +#define TV_WIN_POS 0x68070 +/** X coordinate of the display from the start of horizontal active */ +# define TV_XPOS_MASK 0x1fff0000 +# define TV_XPOS_SHIFT 16 +/** Y coordinate of the display from the start of vertical active (NBR) */ +# define TV_YPOS_MASK 0x00000fff +# define TV_YPOS_SHIFT 0 + +#define TV_WIN_SIZE 0x68074 +/** Horizontal size of the display window, measured in pixels*/ +# define TV_XSIZE_MASK 0x1fff0000 +# define TV_XSIZE_SHIFT 16 +/** + * Vertical size of the display window, measured in pixels. + * + * Must be even for interlaced modes. + */ +# define TV_YSIZE_MASK 0x00000fff +# define TV_YSIZE_SHIFT 0 + +#define TV_FILTER_CTL_1 0x68080 +/** + * Enables automatic scaling calculation. + * + * If set, the rest of the registers are ignored, and the calculated values can + * be read back from the register. + */ +# define TV_AUTO_SCALE (1 << 31) +/** + * Disables the vertical filter. + * + * This is required on modes more than 1024 pixels wide */ +# define TV_V_FILTER_BYPASS (1 << 29) +/** Enables adaptive vertical filtering */ +# define TV_VADAPT (1 << 28) +# define TV_VADAPT_MODE_MASK (3 << 26) +/** Selects the least adaptive vertical filtering mode */ +# define TV_VADAPT_MODE_LEAST (0 << 26) +/** Selects the moderately adaptive vertical filtering mode */ +# define TV_VADAPT_MODE_MODERATE (1 << 26) +/** Selects the most adaptive vertical filtering mode */ +# define TV_VADAPT_MODE_MOST (3 << 26) +/** + * Sets the horizontal scaling factor. + * + * This should be the fractional part of the horizontal scaling factor divided + * by the oversampling rate. TV_HSCALE should be less than 1, and set to: + * + * (src width - 1) / ((oversample * dest width) - 1) + */ +# define TV_HSCALE_FRAC_MASK 0x00003fff +# define TV_HSCALE_FRAC_SHIFT 0 + +#define TV_FILTER_CTL_2 0x68084 +/** + * Sets the integer part of the 3.15 fixed-point vertical scaling factor. + * + * TV_VSCALE should be (src height - 1) / ((interlace * dest height) - 1) + */ +# define TV_VSCALE_INT_MASK 0x00038000 +# define TV_VSCALE_INT_SHIFT 15 +/** + * Sets the fractional part of the 3.15 fixed-point vertical scaling factor. + * + * \sa TV_VSCALE_INT_MASK + */ +# define TV_VSCALE_FRAC_MASK 0x00007fff +# define TV_VSCALE_FRAC_SHIFT 0 + +#define TV_FILTER_CTL_3 0x68088 +/** + * Sets the integer part of the 3.15 fixed-point vertical scaling factor. + * + * TV_VSCALE should be (src height - 1) / (1/4 * (dest height - 1)) + * + * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes. + */ +# define TV_VSCALE_IP_INT_MASK 0x00038000 +# define TV_VSCALE_IP_INT_SHIFT 15 +/** + * Sets the fractional part of the 3.15 fixed-point vertical scaling factor. + * + * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes. + * + * \sa TV_VSCALE_IP_INT_MASK + */ +# define TV_VSCALE_IP_FRAC_MASK 0x00007fff +# define TV_VSCALE_IP_FRAC_SHIFT 0 + +#define TV_CC_CONTROL 0x68090 +# define TV_CC_ENABLE (1 << 31) +/** + * Specifies which field to send the CC data in. + * + * CC data is usually sent in field 0. + */ +# define TV_CC_FID_MASK (1 << 27) +# define TV_CC_FID_SHIFT 27 +/** Sets the horizontal position of the CC data. Usually 135. */ +# define TV_CC_HOFF_MASK 0x03ff0000 +# define TV_CC_HOFF_SHIFT 16 +/** Sets the vertical position of the CC data. Usually 21 */ +# define TV_CC_LINE_MASK 0x0000003f +# define TV_CC_LINE_SHIFT 0 + +#define TV_CC_DATA 0x68094 +# define TV_CC_RDY (1 << 31) +/** Second word of CC data to be transmitted. */ +# define TV_CC_DATA_2_MASK 0x007f0000 +# define TV_CC_DATA_2_SHIFT 16 +/** First word of CC data to be transmitted. */ +# define TV_CC_DATA_1_MASK 0x0000007f +# define TV_CC_DATA_1_SHIFT 0 + +#define TV_H_LUMA_0 0x68100 +#define TV_H_LUMA_59 0x681ec +#define TV_H_CHROMA_0 0x68200 +#define TV_H_CHROMA_59 0x682ec +#define TV_V_LUMA_0 0x68300 +#define TV_V_LUMA_42 0x683a8 +#define TV_V_CHROMA_0 0x68400 +#define TV_V_CHROMA_42 0x684a8 + +/* Display & cursor control */ + +/* Pipe A */ +#define PIPEADSL 0x70000 +#define PIPEACONF 0x70008 +#define PIPEACONF_ENABLE (1<<31) +#define PIPEACONF_DISABLE 0 +#define PIPEACONF_DOUBLE_WIDE (1<<30) +#define I965_PIPECONF_ACTIVE (1<<30) +#define PIPEACONF_SINGLE_WIDE 0 +#define PIPEACONF_PIPE_UNLOCKED 0 +#define PIPEACONF_PIPE_LOCKED (1<<25) +#define PIPEACONF_PALETTE 0 +#define PIPEACONF_GAMMA (1<<24) +#define PIPECONF_FORCE_BORDER (1<<25) +#define PIPECONF_PROGRESSIVE (0 << 21) +#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21) +#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21) +#define PIPEASTAT 0x70024 +#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31) +#define PIPE_CRC_ERROR_ENABLE (1UL<<29) +#define PIPE_CRC_DONE_ENABLE (1UL<<28) +#define PIPE_GMBUS_EVENT_ENABLE (1UL<<27) +#define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL<<26) +#define PIPE_VSYNC_INTERRUPT_ENABLE (1UL<<25) +#define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24) +#define PIPE_DPST_EVENT_ENABLE (1UL<<23) +#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22) +#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21) +#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20) +#define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL<<18) /* pre-965 */ +#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */ +#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17) +#define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16) +#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13) +#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12) +#define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11) +#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10) +#define PIPE_VSYNC_INTERRUPT_STATUS (1UL<<9) +#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8) +#define PIPE_DPST_EVENT_STATUS (1UL<<7) +#define PIPE_LEGACY_BLC_EVENT_STATUS (1UL<<6) +#define PIPE_ODD_FIELD_INTERRUPT_STATUS (1UL<<5) +#define PIPE_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4) +#define PIPE_HOTPLUG_TV_INTERRUPT_STATUS (1UL<<2) /* pre-965 */ +#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */ +#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1) +#define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0) + +#define DSPARB 0x70030 +#define DSPARB_CSTART_MASK (0x7f << 7) +#define DSPARB_CSTART_SHIFT 7 +#define DSPARB_BSTART_MASK (0x7f) +#define DSPARB_BSTART_SHIFT 0 +/* + * The two pipe frame counter registers are not synchronized, so + * reading a stable value is somewhat tricky. The following code + * should work: + * + * do { + * high1 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >> + * PIPE_FRAME_HIGH_SHIFT; + * low1 = ((INREG(PIPEAFRAMEPIXEL) & PIPE_FRAME_LOW_MASK) >> + * PIPE_FRAME_LOW_SHIFT); + * high2 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >> + * PIPE_FRAME_HIGH_SHIFT); + * } while (high1 != high2); + * frame = (high1 << 8) | low1; + */ +#define PIPEAFRAMEHIGH 0x70040 +#define PIPE_FRAME_HIGH_MASK 0x0000ffff +#define PIPE_FRAME_HIGH_SHIFT 0 +#define PIPEAFRAMEPIXEL 0x70044 +#define PIPE_FRAME_LOW_MASK 0xff000000 +#define PIPE_FRAME_LOW_SHIFT 24 +#define PIPE_PIXEL_MASK 0x00ffffff +#define PIPE_PIXEL_SHIFT 0 + +/* Cursor A & B regs */ +#define CURACNTR 0x70080 +#define CURSOR_MODE_DISABLE 0x00 +#define CURSOR_MODE_64_32B_AX 0x07 +#define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX) +#define MCURSOR_GAMMA_ENABLE (1 << 26) +#define CURABASE 0x70084 +#define CURAPOS 0x70088 +#define CURSOR_POS_MASK 0x007FF +#define CURSOR_POS_SIGN 0x8000 +#define CURSOR_X_SHIFT 0 +#define CURSOR_Y_SHIFT 16 +#define CURBCNTR 0x700c0 +#define CURBBASE 0x700c4 +#define CURBPOS 0x700c8 + +/* Display A control */ +#define DSPACNTR 0x70180 +#define DISPLAY_PLANE_ENABLE (1<<31) +#define DISPLAY_PLANE_DISABLE 0 +#define DISPPLANE_GAMMA_ENABLE (1<<30) +#define DISPPLANE_GAMMA_DISABLE 0 +#define DISPPLANE_PIXFORMAT_MASK (0xf<<26) +#define DISPPLANE_8BPP (0x2<<26) +#define DISPPLANE_15_16BPP (0x4<<26) +#define DISPPLANE_16BPP (0x5<<26) +#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26) +#define DISPPLANE_32BPP (0x7<<26) +#define DISPPLANE_STEREO_ENABLE (1<<25) +#define DISPPLANE_STEREO_DISABLE 0 +#define DISPPLANE_SEL_PIPE_MASK (1<<24) +#define DISPPLANE_SEL_PIPE_A 0 +#define DISPPLANE_SEL_PIPE_B (1<<24) +#define DISPPLANE_SRC_KEY_ENABLE (1<<22) +#define DISPPLANE_SRC_KEY_DISABLE 0 +#define DISPPLANE_LINE_DOUBLE (1<<20) +#define DISPPLANE_NO_LINE_DOUBLE 0 +#define DISPPLANE_STEREO_POLARITY_FIRST 0 +#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18) +#define DSPAADDR 0x70184 +#define DSPASTRIDE 0x70188 +#define DSPAPOS 0x7018C /* reserved */ +#define DSPASIZE 0x70190 +#define DSPASURF 0x7019C /* 965+ only */ +#define DSPATILEOFF 0x701A4 /* 965+ only */ + +/* VBIOS flags */ +#define SWF00 0x71410 +#define SWF01 0x71414 +#define SWF02 0x71418 +#define SWF03 0x7141c +#define SWF04 0x71420 +#define SWF05 0x71424 +#define SWF06 0x71428 +#define SWF10 0x70410 +#define SWF11 0x70414 +#define SWF14 0x71420 +#define SWF30 0x72414 +#define SWF31 0x72418 +#define SWF32 0x7241c + +/* Pipe B */ +#define PIPEBDSL 0x71000 +#define PIPEBCONF 0x71008 +#define PIPEBSTAT 0x71024 +#define PIPEBFRAMEHIGH 0x71040 +#define PIPEBFRAMEPIXEL 0x71044 + +/* Display B control */ +#define DSPBCNTR 0x71180 +#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15) +#define DISPPLANE_ALPHA_TRANS_DISABLE 0 +#define DISPPLANE_SPRITE_ABOVE_DISPLAY 0 +#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1) +#define DSPBADDR 0x71184 +#define DSPBSTRIDE 0x71188 +#define DSPBPOS 0x7118C +#define DSPBSIZE 0x71190 +#define DSPBSURF 0x7119C +#define DSPBTILEOFF 0x711A4 + +/* VBIOS regs */ +#define VGACNTRL 0x71400 +# define VGA_DISP_DISABLE (1 << 31) +# define VGA_2X_MODE (1 << 30) +# define VGA_PIPE_B_SELECT (1 << 29) + +/* Chipset type macros */ #define IS_I830(dev) ((dev)->pci_device == 0x3577) #define IS_845G(dev) ((dev)->pci_device == 0x2562) @@ -353,7 +1801,7 @@ extern int i915_wait_ring(struct drm_dev #define IS_I965GM(dev) ((dev)->pci_device == 0x2A02) -#define IS_IGD_GM(dev) ((dev)->pci_device == 0x2A42) +#define IS_GM45(dev) ((dev)->pci_device == 0x2A42) #define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \ (dev)->pci_device == 0x2E12 || \ @@ -367,10 +1815,8 @@ extern int i915_wait_ring(struct drm_dev IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev)) #define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \ - IS_I945GM(dev) || IS_I965GM(dev) || IS_IGD_GM(dev)) + IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev)) -#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_IGD_GM(dev) || IS_G4X(dev)) - -#define PRIMARY_RINGBUFFER_SIZE (128*1024) +#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev)) #endif diff -Napur -X /home/jbarnes/dontdiff gpu/drm/i915/i915_execbuf.c /tmp/drm-master/drivers/gpu/drm/i915/i915_execbuf.c --- gpu/drm/i915/i915_execbuf.c 1969-12-31 16:00:00.000000000 -0800 +++ /tmp/drm-master/drivers/gpu/drm/i915/i915_execbuf.c 2008-08-21 14:34:10.000000000 -0700 @@ -0,0 +1,917 @@ +/* + * Copyright 2003-2008 Tungsten Graphics, Inc., Cedar Park, Texas. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. + * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Thomas Hellstrom + * Dave Airlie + * Keith Packard + * ... ? + */ + +#include "drmP.h" +#include "drm.h" +#include "i915_drm.h" +#include "i915_drv.h" + +#if DRM_DEBUG_CODE +#define DRM_DEBUG_RELOCATION (drm_debug != 0) +#else +#define DRM_DEBUG_RELOCATION 0 +#endif + +enum i915_buf_idle { + I915_RELOC_UNCHECKED, + I915_RELOC_IDLE, + I915_RELOC_BUSY +}; + +struct i915_relocatee_info { + struct drm_buffer_object *buf; + unsigned long offset; + uint32_t *data_page; + unsigned page_offset; + struct drm_bo_kmap_obj kmap; + int is_iomem; + int dst; + int idle; + int performed_ring_relocs; +#ifdef DRM_KMAP_ATOMIC_PROT_PFN + unsigned long pfn; + pgprot_t pg_prot; +#endif +}; + +struct drm_i915_validate_buffer { + struct drm_buffer_object *buffer; + int presumed_offset_correct; + void __user *data; + int ret; + enum i915_buf_idle idle; +}; + +/* + * I'd like to use MI_STORE_DATA_IMM here, but I can't make + * it work. Seems like GART writes are broken with that + * instruction. Also I'm not sure that MI_FLUSH will + * act as a memory barrier for that instruction. It will + * for this single dword 2D blit. + */ + +static void i915_emit_ring_reloc(struct drm_device *dev, uint32_t offset, + uint32_t value) +{ + struct drm_i915_private *dev_priv = + (struct drm_i915_private *)dev->dev_private; + + RING_LOCALS; + i915_kernel_lost_context(dev); + BEGIN_LP_RING(6); + OUT_RING((0x02 << 29) | (0x40 << 22) | (0x3 << 20) | (0x3)); + OUT_RING((0x3 << 24) | (0xF0 << 16) | (0x40)); + OUT_RING((0x1 << 16) | (0x4)); + OUT_RING(offset); + OUT_RING(value); + OUT_RING(0); + ADVANCE_LP_RING(); +} + +static void i915_dereference_buffers_locked(struct drm_i915_validate_buffer + *buffers, unsigned num_buffers) +{ + while (num_buffers--) + drm_bo_usage_deref_locked(&buffers[num_buffers].buffer); +} + +int i915_apply_reloc(struct drm_file *file_priv, int num_buffers, + struct drm_i915_validate_buffer *buffers, + struct i915_relocatee_info *relocatee, uint32_t * reloc) +{ + unsigned index; + unsigned long new_cmd_offset; + u32 val; + int ret, i; + int buf_index = -1; + + /* + * FIXME: O(relocs * buffers) complexity. + */ + + for (i = 0; i <= num_buffers; i++) + if (buffers[i].buffer) + if (reloc[2] == buffers[i].buffer->base.hash.key) + buf_index = i; + + if (buf_index == -1) { + DRM_ERROR("Illegal relocation buffer %08X\n", reloc[2]); + return -EINVAL; + } + + /* + * Short-circuit relocations that were correctly + * guessed by the client + */ + if (buffers[buf_index].presumed_offset_correct && !DRM_DEBUG_RELOCATION) + return 0; + + new_cmd_offset = reloc[0]; + if (!relocatee->data_page || + !drm_bo_same_page(relocatee->offset, new_cmd_offset)) { + struct drm_bo_mem_reg *mem = &relocatee->buf->mem; + + drm_bo_kunmap(&relocatee->kmap); + relocatee->data_page = NULL; + relocatee->offset = new_cmd_offset; + + if (unlikely(relocatee->idle == I915_RELOC_UNCHECKED)) { + ret = drm_bo_wait(relocatee->buf, 0, 1, 0, 0); + if (ret) + return ret; + relocatee->idle = I915_RELOC_IDLE; + } + + if (unlikely((mem->mem_type != DRM_BO_MEM_LOCAL) && + (mem->flags & DRM_BO_FLAG_CACHED_MAPPED))) + drm_bo_evict_cached(relocatee->buf); + + ret = drm_bo_kmap(relocatee->buf, new_cmd_offset >> PAGE_SHIFT, + 1, &relocatee->kmap); + if (ret) { + DRM_ERROR + ("Could not map command buffer to apply relocs\n %08lx", + new_cmd_offset); + return ret; + } + relocatee->data_page = drm_bmo_virtual(&relocatee->kmap, + &relocatee->is_iomem); + relocatee->page_offset = (relocatee->offset & PAGE_MASK); + } + + val = buffers[buf_index].buffer->offset; + index = (reloc[0] - relocatee->page_offset) >> 2; + + /* add in validate */ + val = val + reloc[1]; + + if (DRM_DEBUG_RELOCATION) { + if (buffers[buf_index].presumed_offset_correct && + relocatee->data_page[index] != val) { + DRM_DEBUG + ("Relocation mismatch source %d target %d buffer %d user %08x kernel %08x\n", + reloc[0], reloc[1], buf_index, + relocatee->data_page[index], val); + } + } + + if (relocatee->is_iomem) + iowrite32(val, relocatee->data_page + index); + else + relocatee->data_page[index] = val; + return 0; +} + +int i915_process_relocs(struct drm_file *file_priv, + uint32_t buf_handle, + uint32_t __user ** reloc_user_ptr, + struct i915_relocatee_info *relocatee, + struct drm_i915_validate_buffer *buffers, + uint32_t num_buffers) +{ + int ret, reloc_stride; + uint32_t cur_offset; + uint32_t reloc_count; + uint32_t reloc_type; + uint32_t reloc_buf_size; + uint32_t *reloc_buf = NULL; + int i; + + /* do a copy from user from the user ptr */ + ret = get_user(reloc_count, *reloc_user_ptr); + if (ret) { + DRM_ERROR("Could not map relocation buffer.\n"); + goto out; + } + + ret = get_user(reloc_type, (*reloc_user_ptr) + 1); + if (ret) { + DRM_ERROR("Could not map relocation buffer.\n"); + goto out; + } + + if (reloc_type != 0) { + DRM_ERROR("Unsupported relocation type requested\n"); + ret = -EINVAL; + goto out; + } + + reloc_buf_size = + (I915_RELOC_HEADER + + (reloc_count * I915_RELOC0_STRIDE)) * sizeof(uint32_t); + reloc_buf = kmalloc(reloc_buf_size, GFP_KERNEL); + if (!reloc_buf) { + DRM_ERROR("Out of memory for reloc buffer\n"); + ret = -ENOMEM; + goto out; + } + + if (copy_from_user(reloc_buf, *reloc_user_ptr, reloc_buf_size)) { + ret = -EFAULT; + goto out; + } + + /* get next relocate buffer handle */ + *reloc_user_ptr = (uint32_t *) * (unsigned long *)&reloc_buf[2]; + + reloc_stride = I915_RELOC0_STRIDE * sizeof(uint32_t); /* may be different for other types of relocs */ + + DRM_DEBUG("num relocs is %d, next is %p\n", reloc_count, + *reloc_user_ptr); + + for (i = 0; i < reloc_count; i++) { + cur_offset = I915_RELOC_HEADER + (i * I915_RELOC0_STRIDE); + + ret = i915_apply_reloc(file_priv, num_buffers, buffers, + relocatee, reloc_buf + cur_offset); + if (ret) + goto out; + } + + out: + if (reloc_buf) + kfree(reloc_buf); + + if (relocatee->data_page) { + drm_bo_kunmap(&relocatee->kmap); + relocatee->data_page = NULL; + } + + return ret; +} + +static int i915_exec_reloc(struct drm_file *file_priv, drm_handle_t buf_handle, + uint32_t __user * reloc_user_ptr, + struct drm_i915_validate_buffer *buffers, + uint32_t buf_count) +{ + struct drm_device *dev = file_priv->minor->dev; + struct i915_relocatee_info relocatee; + int ret = 0; + int b; + + /* + * Short circuit relocations when all previous + * buffers offsets were correctly guessed by + * the client + */ + if (!DRM_DEBUG_RELOCATION) { + for (b = 0; b < buf_count; b++) + if (!buffers[b].presumed_offset_correct) + break; + + if (b == buf_count) + return 0; + } + + memset(&relocatee, 0, sizeof(relocatee)); + relocatee.idle = I915_RELOC_UNCHECKED; + + mutex_lock(&dev->struct_mutex); + relocatee.buf = drm_lookup_buffer_object(file_priv, buf_handle, 1); + mutex_unlock(&dev->struct_mutex); + if (!relocatee.buf) { + DRM_DEBUG("relocatee buffer invalid %08x\n", buf_handle); + ret = -EINVAL; + goto out_err; + } + + mutex_lock(&relocatee.buf->mutex); + while (reloc_user_ptr) { + ret = + i915_process_relocs(file_priv, buf_handle, &reloc_user_ptr, + &relocatee, buffers, buf_count); + if (ret) { + DRM_ERROR("process relocs failed\n"); + goto out_err1; + } + } + + out_err1: + mutex_unlock(&relocatee.buf->mutex); + drm_bo_usage_deref_unlocked(&relocatee.buf); + out_err: + return ret; +} + +static void i915_clear_relocatee(struct i915_relocatee_info *relocatee) +{ + if (relocatee->data_page) { +#ifndef DRM_KMAP_ATOMIC_PROT_PFN + drm_bo_kunmap(&relocatee->kmap); +#else + kunmap_atomic(relocatee->data_page, KM_USER0); +#endif + relocatee->data_page = NULL; + } + relocatee->buf = NULL; + relocatee->dst = ~0; +} + +static int i915_update_relocatee(struct i915_relocatee_info *relocatee, + struct drm_i915_validate_buffer *buffers, + unsigned int dst, unsigned long dst_offset) +{ + int ret; + + if (unlikely(dst != relocatee->dst || NULL == relocatee->buf)) { + i915_clear_relocatee(relocatee); + relocatee->dst = dst; + relocatee->buf = buffers[dst].buffer; + relocatee->idle = buffers[dst].idle; + + /* + * Check for buffer idle. If the buffer is busy, revert to + * ring relocations. + */ + + if (relocatee->idle == I915_RELOC_UNCHECKED) { + preempt_enable(); + mutex_lock(&relocatee->buf->mutex); + + ret = drm_bo_wait(relocatee->buf, 0, 1, 1, 0); + if (ret == 0) + relocatee->idle = I915_RELOC_IDLE; + else { + relocatee->idle = I915_RELOC_BUSY; + relocatee->performed_ring_relocs = 1; + } + mutex_unlock(&relocatee->buf->mutex); + preempt_disable(); + buffers[dst].idle = relocatee->idle; + } + } + + if (relocatee->idle == I915_RELOC_BUSY) + return 0; + + if (unlikely(dst_offset > relocatee->buf->num_pages * PAGE_SIZE)) { + DRM_ERROR("Relocation destination out of bounds.\n"); + return -EINVAL; + } + if (unlikely(!drm_bo_same_page(relocatee->page_offset, dst_offset) || + NULL == relocatee->data_page)) { +#ifdef DRM_KMAP_ATOMIC_PROT_PFN + if (NULL != relocatee->data_page) { + kunmap_atomic(relocatee->data_page, KM_USER0); + relocatee->data_page = NULL; + } + ret = drm_bo_pfn_prot(relocatee->buf, dst_offset, + &relocatee->pfn, &relocatee->pg_prot); + if (ret) { + DRM_ERROR("Can't map relocation destination.\n"); + return -EINVAL; + } + relocatee->data_page = + kmap_atomic_prot_pfn(relocatee->pfn, KM_USER0, + relocatee->pg_prot); +#else + if (NULL != relocatee->data_page) { + drm_bo_kunmap(&relocatee->kmap); + relocatee->data_page = NULL; + } + + ret = drm_bo_kmap(relocatee->buf, dst_offset >> PAGE_SHIFT, + 1, &relocatee->kmap); + if (ret) { + DRM_ERROR("Can't map relocation destination.\n"); + return ret; + } + + relocatee->data_page = drm_bmo_virtual(&relocatee->kmap, + &relocatee->is_iomem); +#endif + relocatee->page_offset = dst_offset & PAGE_MASK; + } + return 0; +} + +static int i915_apply_post_reloc(uint32_t reloc[], + struct drm_i915_validate_buffer *buffers, + uint32_t num_buffers, + struct i915_relocatee_info *relocatee) +{ + uint32_t reloc_buffer = reloc[2]; + uint32_t dst_buffer = reloc[3]; + uint32_t val; + uint32_t index; + int ret; + + if (likely(buffers[reloc_buffer].presumed_offset_correct)) + return 0; + if (unlikely(reloc_buffer >= num_buffers)) { + DRM_ERROR("Invalid reloc buffer index.\n"); + return -EINVAL; + } + if (unlikely(dst_buffer >= num_buffers)) { + DRM_ERROR("Invalid dest buffer index.\n"); + return -EINVAL; + } + + ret = i915_update_relocatee(relocatee, buffers, dst_buffer, reloc[0]); + if (unlikely(ret)) + return ret; + + val = buffers[reloc_buffer].buffer->offset; + index = (reloc[0] - relocatee->page_offset) >> 2; + val = val + reloc[1]; + + if (relocatee->idle == I915_RELOC_BUSY) { + i915_emit_ring_reloc(relocatee->buf->dev, + relocatee->buf->offset + reloc[0], val); + return 0; + } +#ifdef DRM_KMAP_ATOMIC_PROT_PFN + relocatee->data_page[index] = val; +#else + if (likely(relocatee->is_iomem)) + iowrite32(val, relocatee->data_page + index); + else + relocatee->data_page[index] = val; +#endif + + return 0; +} + +static int i915_post_relocs(struct drm_file *file_priv, + uint32_t __user * new_reloc_ptr, + struct drm_i915_validate_buffer *buffers, + unsigned int num_buffers) +{ + uint32_t *reloc; + uint32_t reloc_stride = I915_RELOC0_STRIDE * sizeof(uint32_t); + uint32_t header_size = I915_RELOC_HEADER * sizeof(uint32_t); + struct i915_relocatee_info relocatee; + uint32_t reloc_type; + uint32_t num_relocs; + uint32_t count; + int ret = 0; + int i; + int short_circuit = 1; + uint32_t __user *reloc_ptr; + uint64_t new_reloc_data; + uint32_t reloc_buf_size; + uint32_t *reloc_buf; + + for (i = 0; i < num_buffers; ++i) { + if (unlikely(!buffers[i].presumed_offset_correct)) { + short_circuit = 0; + break; + } + } + + if (likely(short_circuit)) + return 0; + + memset(&relocatee, 0, sizeof(relocatee)); + + while (new_reloc_ptr) { + reloc_ptr = new_reloc_ptr; + + ret = get_user(num_relocs, reloc_ptr); + if (unlikely(ret)) + goto out; + if (unlikely(!access_ok(VERIFY_READ, reloc_ptr, + header_size + + num_relocs * reloc_stride))) + return -EFAULT; + + ret = __get_user(reloc_type, reloc_ptr + 1); + if (unlikely(ret)) + goto out; + + if (unlikely(reloc_type != 1)) { + DRM_ERROR("Unsupported relocation type requested.\n"); + ret = -EINVAL; + goto out; + } + + ret = __get_user(new_reloc_data, reloc_ptr + 2); + new_reloc_ptr = (uint32_t __user *) (unsigned long) + new_reloc_data; + + reloc_ptr += I915_RELOC_HEADER; + + if (num_relocs == 0) + goto out; + + reloc_buf_size = + (num_relocs * I915_RELOC0_STRIDE) * sizeof(uint32_t); + reloc_buf = kmalloc(reloc_buf_size, GFP_KERNEL); + if (!reloc_buf) { + DRM_ERROR("Out of memory for reloc buffer\n"); + ret = -ENOMEM; + goto out; + } + + if (__copy_from_user(reloc_buf, reloc_ptr, reloc_buf_size)) { + ret = -EFAULT; + goto out; + } + reloc = reloc_buf; + preempt_disable(); + for (count = 0; count < num_relocs; ++count) { + ret = i915_apply_post_reloc(reloc, buffers, + num_buffers, &relocatee); + if (unlikely(ret)) { + preempt_enable(); + goto out; + } + reloc += I915_RELOC0_STRIDE; + } + preempt_enable(); + + if (reloc_buf) { + kfree(reloc_buf); + reloc_buf = NULL; + } + i915_clear_relocatee(&relocatee); + } + + out: + /* + * Flush ring relocs so the command parser will pick them up. + */ + + if (relocatee.performed_ring_relocs) + (void)i915_emit_mi_flush(file_priv->minor->dev, 0); + + i915_clear_relocatee(&relocatee); + if (reloc_buf) { + kfree(reloc_buf); + reloc_buf = NULL; + } + + return ret; +} + +static int i915_check_presumed(struct drm_i915_op_arg *arg, + struct drm_buffer_object *bo, + uint32_t __user * data, int *presumed_ok) +{ + struct drm_bo_op_req *req = &arg->d.req; + uint32_t hint_offset; + uint32_t hint = req->bo_req.hint; + + *presumed_ok = 0; + + if (!(hint & DRM_BO_HINT_PRESUMED_OFFSET)) + return 0; + if (bo->offset == req->bo_req.presumed_offset) { + *presumed_ok = 1; + return 0; + } + + /* + * We need to turn off the HINT_PRESUMED_OFFSET for this buffer in + * the user-space IOCTL argument list, since the buffer has moved, + * we're about to apply relocations and we might subsequently + * hit an -EAGAIN. In that case the argument list will be reused by + * user-space, but the presumed offset is no longer valid. + * + * Needless to say, this is a bit ugly. + */ + + hint_offset = (uint32_t *) & req->bo_req.hint - (uint32_t *) arg; + hint &= ~DRM_BO_HINT_PRESUMED_OFFSET; + return __put_user(hint, data + hint_offset); +} + +/* + * Validate, add fence and relocate a block of bos from a userspace list + */ +int i915_validate_buffer_list(struct drm_file *file_priv, + unsigned int fence_class, uint64_t data, + struct drm_i915_validate_buffer *buffers, + uint32_t * num_buffers, + uint32_t __user ** post_relocs) +{ + struct drm_i915_op_arg arg; + struct drm_bo_op_req *req = &arg.d.req; + int ret = 0; + unsigned buf_count = 0; + uint32_t buf_handle; + uint32_t __user *reloc_user_ptr; + struct drm_i915_validate_buffer *item = buffers; + *post_relocs = NULL; + + do { + if (buf_count >= *num_buffers) { + DRM_ERROR("Buffer count exceeded %d\n.", *num_buffers); + ret = -EINVAL; + goto out_err; + } + item = buffers + buf_count; + item->buffer = NULL; + item->presumed_offset_correct = 0; + item->idle = I915_RELOC_UNCHECKED; + + if (copy_from_user + (&arg, (void __user *)(unsigned long)data, sizeof(arg))) { + ret = -EFAULT; + goto out_err; + } + + ret = 0; + if (req->op != drm_bo_validate) { + DRM_ERROR + ("Buffer object operation wasn't \"validate\".\n"); + ret = -EINVAL; + goto out_err; + } + item->ret = 0; + item->data = (void __user *)(unsigned long)data; + + buf_handle = req->bo_req.handle; + reloc_user_ptr = (uint32_t *) (unsigned long)arg.reloc_ptr; + + /* + * Switch mode to post-validation relocations? + */ + + if (unlikely((buf_count == 0) && (*post_relocs == NULL) && + (reloc_user_ptr != NULL))) { + uint32_t reloc_type; + + ret = get_user(reloc_type, reloc_user_ptr + 1); + if (ret) + goto out_err; + + if (reloc_type == 1) + *post_relocs = reloc_user_ptr; + + } + + if ((*post_relocs == NULL) && (reloc_user_ptr != NULL)) { + ret = + i915_exec_reloc(file_priv, buf_handle, + reloc_user_ptr, buffers, buf_count); + if (ret) + goto out_err; + DRM_MEMORYBARRIER(); + } + + ret = drm_bo_handle_validate(file_priv, req->bo_req.handle, + req->bo_req.flags, + req->bo_req.mask, req->bo_req.hint, + req->bo_req.fence_class, + NULL, &item->buffer); + if (ret) { + DRM_ERROR("error on handle validate %d\n", ret); + goto out_err; + } + + buf_count++; + + ret = i915_check_presumed(&arg, item->buffer, + (uint32_t __user *) + (unsigned long)data, + &item->presumed_offset_correct); + if (ret) + goto out_err; + + data = arg.next; + } while (data != 0); + out_err: + *num_buffers = buf_count; + item->ret = (ret != -EAGAIN) ? ret : 0; + return ret; +} + +/* + * Remove all buffers from the unfenced list. + * If the execbuffer operation was aborted, for example due to a signal, + * this also make sure that buffers retain their original state and + * fence pointers. + * Copy back buffer information to user-space unless we were interrupted + * by a signal. In which case the IOCTL must be rerun. + */ + +static int i915_handle_copyback(struct drm_device *dev, + struct drm_i915_validate_buffer *buffers, + unsigned int num_buffers, int ret) +{ + int err = ret; + int i; + struct drm_i915_op_arg arg; + struct drm_buffer_object *bo; + + if (ret) + drm_putback_buffer_objects(dev); + + if (ret != -EAGAIN) { + for (i = 0; i < num_buffers; ++i) { + arg.handled = 1; + arg.d.rep.ret = buffers->ret; + bo = buffers->buffer; + mutex_lock(&bo->mutex); + drm_bo_fill_rep_arg(bo, &arg.d.rep.bo_info); + mutex_unlock(&bo->mutex); + if (__copy_to_user(buffers->data, &arg, sizeof(arg))) + err = -EFAULT; + buffers++; + } + } + + return err; +} + +/* + * Create a fence object, and if that fails, pretend that everything is + * OK and just idle the GPU. + */ + +void i915_fence_or_sync(struct drm_file *file_priv, + uint32_t fence_flags, + struct drm_fence_arg *fence_arg, + struct drm_fence_object **fence_p) +{ + struct drm_device *dev = file_priv->minor->dev; + int ret; + struct drm_fence_object *fence; + + ret = drm_fence_buffer_objects(dev, NULL, fence_flags, NULL, &fence); + + if (ret) { + + /* + * Fence creation failed. + * Fall back to synchronous operation and idle the engine. + */ + + (void)i915_emit_mi_flush(dev, MI_READ_FLUSH); + (void)i915_quiescent(dev); + + if (!(fence_flags & DRM_FENCE_FLAG_NO_USER)) { + + /* + * Communicate to user-space that + * fence creation has failed and that + * the engine is idle. + */ + + fence_arg->handle = ~0; + fence_arg->error = ret; + } + drm_putback_buffer_objects(dev); + if (fence_p) + *fence_p = NULL; + return; + } + + if (!(fence_flags & DRM_FENCE_FLAG_NO_USER)) { + + ret = drm_fence_add_user_object(file_priv, fence, + fence_flags & + DRM_FENCE_FLAG_SHAREABLE); + if (!ret) + drm_fence_fill_arg(fence, fence_arg); + else { + /* + * Fence user object creation failed. + * We must idle the engine here as well, as user- + * space expects a fence object to wait on. Since we + * have a fence object we wait for it to signal + * to indicate engine "sufficiently" idle. + */ + + (void)drm_fence_object_wait(fence, 0, 1, fence->type); + drm_fence_usage_deref_unlocked(&fence); + fence_arg->handle = ~0; + fence_arg->error = ret; + } + } + + if (fence_p) + *fence_p = fence; + else if (fence) + drm_fence_usage_deref_unlocked(&fence); +} + +int i915_execbuffer(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) + dev_priv->sarea_priv; + struct drm_i915_execbuffer *exec_buf = data; + struct drm_i915_batchbuffer *batch = &exec_buf->batch; + struct drm_fence_arg *fence_arg = &exec_buf->fence_arg; + int num_buffers; + int ret; + uint32_t __user *post_relocs; + + if (!dev_priv->allow_batchbuffer) { + DRM_ERROR("Batchbuffer ioctl disabled\n"); + return -EINVAL; + } + + if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects, + batch->num_cliprects * + sizeof(struct + drm_clip_rect))) + return -EFAULT; + + if (exec_buf->num_buffers > dev_priv->max_validate_buffers) + return -EINVAL; + + ret = drm_bo_read_lock(&dev->bm.bm_lock, 1); + if (ret) + return ret; + + /* + * The cmdbuf_mutex makes sure the validate-submit-fence + * operation is atomic. + */ + + ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex); + if (ret) { + drm_bo_read_unlock(&dev->bm.bm_lock); + return -EAGAIN; + } + + num_buffers = exec_buf->num_buffers; + + if (!dev_priv->val_bufs) { + dev_priv->val_bufs = + vmalloc(sizeof(struct drm_i915_validate_buffer) * + dev_priv->max_validate_buffers); + } + if (!dev_priv->val_bufs) { + drm_bo_read_unlock(&dev->bm.bm_lock); + mutex_unlock(&dev_priv->cmdbuf_mutex); + return -ENOMEM; + } + + /* validate buffer list + fixup relocations */ + ret = i915_validate_buffer_list(file_priv, 0, exec_buf->ops_list, + dev_priv->val_bufs, &num_buffers, + &post_relocs); + if (ret) + goto out_err0; + + if (post_relocs) { + ret = i915_post_relocs(file_priv, post_relocs, + dev_priv->val_bufs, num_buffers); + if (ret) + goto out_err0; + } + + /* make sure all previous memory operations have passed */ + DRM_MEMORYBARRIER(); + + if (!post_relocs) { + drm_agp_chipset_flush(dev); + batch->start = + dev_priv->val_bufs[num_buffers - 1].buffer->offset; + } else { + batch->start += dev_priv->val_bufs[0].buffer->offset; + } + + DRM_DEBUG("i915 exec batchbuffer, start %x used %d cliprects %d\n", + batch->start, batch->used, batch->num_cliprects); + + ret = i915_dispatch_batchbuffer(dev, batch); + if (ret) + goto out_err0; + if (sarea_priv) + sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); + i915_fence_or_sync(file_priv, fence_arg->flags, fence_arg, NULL); + + out_err0: + ret = i915_handle_copyback(dev, dev_priv->val_bufs, num_buffers, ret); + mutex_lock(&dev->struct_mutex); + i915_dereference_buffers_locked(dev_priv->val_bufs, num_buffers); + mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev_priv->cmdbuf_mutex); + drm_bo_read_unlock(&dev->bm.bm_lock); + return ret; +} diff -Napur -X /home/jbarnes/dontdiff gpu/drm/i915/i915_fence.c /tmp/drm-master/drivers/gpu/drm/i915/i915_fence.c --- gpu/drm/i915/i915_fence.c 1969-12-31 16:00:00.000000000 -0800 +++ /tmp/drm-master/drivers/gpu/drm/i915/i915_fence.c 2008-08-21 14:34:10.000000000 -0700 @@ -0,0 +1,273 @@ +/************************************************************************** + * + * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * + **************************************************************************/ +/* + * Authors: Thomas Hellström + */ + +#include "drmP.h" +#include "drm.h" +#include "i915_drm.h" +#include "i915_drv.h" + +/* + * Initiate a sync flush if it's not already pending. + */ + +static inline void i915_initiate_rwflush(struct drm_i915_private *dev_priv, + struct drm_fence_class_manager *fc) +{ + if ((fc->pending_flush & DRM_I915_FENCE_TYPE_RW) && + !dev_priv->flush_pending) { + dev_priv->flush_sequence = (uint32_t) READ_BREADCRUMB(dev_priv); + dev_priv->flush_flags = fc->pending_flush; + dev_priv->saved_flush_status = READ_HWSP(dev_priv, 0); + I915_WRITE(INSTPM, (1 << 5) | (1 << 21)); + dev_priv->flush_pending = 1; + fc->pending_flush &= ~DRM_I915_FENCE_TYPE_RW; + } +} + +static inline void i915_report_rwflush(struct drm_device *dev, + struct drm_i915_private *dev_priv) +{ + if (unlikely(dev_priv->flush_pending)) { + + uint32_t flush_flags; + uint32_t i_status; + uint32_t flush_sequence; + + i_status = READ_HWSP(dev_priv, 0); + if ((i_status & (1 << 12)) != + (dev_priv->saved_flush_status & (1 << 12))) { + flush_flags = dev_priv->flush_flags; + flush_sequence = dev_priv->flush_sequence; + dev_priv->flush_pending = 0; + drm_fence_handler(dev, 0, flush_sequence, + flush_flags, 0); + } + } +} + +static void i915_fence_flush(struct drm_device *dev, + uint32_t fence_class) +{ + struct drm_i915_private *dev_priv = + (struct drm_i915_private *) dev->dev_private; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_class_manager *fc = &fm->fence_class[0]; + unsigned long irq_flags; + + if (unlikely(!dev_priv)) + return; + + write_lock_irqsave(&fm->lock, irq_flags); + i915_initiate_rwflush(dev_priv, fc); + write_unlock_irqrestore(&fm->lock, irq_flags); +} + + +static void i915_fence_poll(struct drm_device *dev, uint32_t fence_class, + uint32_t waiting_types) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_class_manager *fc = &fm->fence_class[0]; + uint32_t sequence; + + if (unlikely(!dev_priv)) + return; + + /* + * First, report any executed sync flush: + */ + + i915_report_rwflush(dev, dev_priv); + + /* + * Report A new breadcrumb, and adjust IRQs. + */ + + if (waiting_types & DRM_FENCE_TYPE_EXE) { + + sequence = READ_BREADCRUMB(dev_priv); + drm_fence_handler(dev, 0, sequence, + DRM_FENCE_TYPE_EXE, 0); + + if (dev_priv->fence_irq_on && + !(fc->waiting_types & DRM_FENCE_TYPE_EXE)) { + i915_user_irq_off(dev_priv); + dev_priv->fence_irq_on = 0; + } else if (!dev_priv->fence_irq_on && + (fc->waiting_types & DRM_FENCE_TYPE_EXE)) { + i915_user_irq_on(dev_priv); + dev_priv->fence_irq_on = 1; + } + } + + /* + * There may be new RW flushes pending. Start them. + */ + + i915_initiate_rwflush(dev_priv, fc); + + /* + * And possibly, but unlikely, they finish immediately. + */ + + i915_report_rwflush(dev, dev_priv); + +} + +static int i915_fence_emit_sequence(struct drm_device *dev, uint32_t class, + uint32_t flags, uint32_t *sequence, + uint32_t *native_type) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + if (unlikely(!dev_priv)) + return -EINVAL; + + i915_emit_irq(dev); + *sequence = (uint32_t) dev_priv->counter; + *native_type = DRM_FENCE_TYPE_EXE; + if (flags & DRM_I915_FENCE_FLAG_FLUSHED) + *native_type |= DRM_I915_FENCE_TYPE_RW; + + return 0; +} + +void i915_fence_handler(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_class_manager *fc = &fm->fence_class[0]; + + write_lock(&fm->lock); + if (likely(dev_priv->fence_irq_on)) + i915_fence_poll(dev, 0, fc->waiting_types); + write_unlock(&fm->lock); +} + +/* + * We need a separate wait function since we need to poll for + * sync flushes. + */ + +static int i915_fence_wait(struct drm_fence_object *fence, + int lazy, int interruptible, uint32_t mask) +{ + struct drm_device *dev = fence->dev; + drm_i915_private_t *dev_priv = (struct drm_i915_private *) dev->dev_private; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_class_manager *fc = &fm->fence_class[0]; + int ret; + unsigned long _end = jiffies + 3 * DRM_HZ; + + drm_fence_object_flush(fence, mask); + if (likely(interruptible)) + ret = wait_event_interruptible_timeout + (fc->fence_queue, drm_fence_object_signaled(fence, DRM_FENCE_TYPE_EXE), + 3 * DRM_HZ); + else + ret = wait_event_timeout + (fc->fence_queue, drm_fence_object_signaled(fence, DRM_FENCE_TYPE_EXE), + 3 * DRM_HZ); + + if (unlikely(ret == -ERESTARTSYS)) + return -EAGAIN; + + if (unlikely(ret == 0)) + return -EBUSY; + + if (likely(mask == DRM_FENCE_TYPE_EXE || + drm_fence_object_signaled(fence, mask))) + return 0; + + /* + * Remove this code snippet when fixed. HWSTAM doesn't let + * flush info through... + */ + + if (unlikely(dev_priv && !dev_priv->irq_enabled)) { + unsigned long irq_flags; + + DRM_ERROR("X server disabled IRQs before releasing frame buffer.\n"); + msleep(100); + dev_priv->flush_pending = 0; + write_lock_irqsave(&fm->lock, irq_flags); + drm_fence_handler(dev, fence->fence_class, + fence->sequence, fence->type, 0); + write_unlock_irqrestore(&fm->lock, irq_flags); + } + + /* + * Poll for sync flush completion. + */ + + return drm_fence_wait_polling(fence, lazy, interruptible, mask, _end); +} + +static uint32_t i915_fence_needed_flush(struct drm_fence_object *fence) +{ + uint32_t flush_flags = fence->waiting_types & + ~(DRM_FENCE_TYPE_EXE | fence->signaled_types); + + if (likely(flush_flags == 0 || + ((flush_flags & ~fence->native_types) == 0) || + (fence->signaled_types != DRM_FENCE_TYPE_EXE))) + return 0; + else { + struct drm_device *dev = fence->dev; + struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private; + struct drm_fence_driver *driver = dev->driver->fence_driver; + + if (unlikely(!dev_priv)) + return 0; + + if (dev_priv->flush_pending) { + uint32_t diff = (dev_priv->flush_sequence - fence->sequence) & + driver->sequence_mask; + + if (diff < driver->wrap_diff) + return 0; + } + } + return flush_flags; +} + +struct drm_fence_driver i915_fence_driver = { + .num_classes = 1, + .wrap_diff = (1U << (BREADCRUMB_BITS - 1)), + .flush_diff = (1U << (BREADCRUMB_BITS - 2)), + .sequence_mask = BREADCRUMB_MASK, + .has_irq = NULL, + .emit = i915_fence_emit_sequence, + .flush = i915_fence_flush, + .poll = i915_fence_poll, + .needed_flush = i915_fence_needed_flush, + .wait = i915_fence_wait, +}; diff -Napur -X /home/jbarnes/dontdiff gpu/drm/i915/i915_ioc32.c /tmp/drm-master/drivers/gpu/drm/i915/i915_ioc32.c --- gpu/drm/i915/i915_ioc32.c 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/i915/i915_ioc32.c 2008-08-21 14:34:09.000000000 -0700 @@ -34,6 +34,7 @@ #include "drmP.h" #include "drm.h" #include "i915_drm.h" +#include "i915_drv.h" typedef struct _drm_i915_batchbuffer32 { int start; /* agp offset */ @@ -41,7 +42,7 @@ typedef struct _drm_i915_batchbuffer32 { int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ int num_cliprects; /* mulitpass with multiple cliprects? */ - u32 cliprects; /* pointer to userspace cliprects */ + u32 cliprects; /* pointer to userspace cliprects */ } drm_i915_batchbuffer32_t; static int compat_i915_batchbuffer(struct file *file, unsigned int cmd, @@ -66,18 +67,18 @@ static int compat_i915_batchbuffer(struc &batchbuffer->cliprects)) return -EFAULT; - return drm_ioctl(file->f_path.dentry->d_inode, file, + return drm_ioctl(file->f_dentry->d_inode, file, DRM_IOCTL_I915_BATCHBUFFER, - (unsigned long)batchbuffer); + (unsigned long) batchbuffer); } typedef struct _drm_i915_cmdbuffer32 { - u32 buf; /* pointer to userspace command buffer */ + u32 buf; /* pointer to userspace command buffer */ int sz; /* nr bytes in buf */ int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ int num_cliprects; /* mulitpass with multiple cliprects? */ - u32 cliprects; /* pointer to userspace cliprects */ + u32 cliprects; /* pointer to userspace cliprects */ } drm_i915_cmdbuffer32_t; static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd, @@ -102,8 +103,8 @@ static int compat_i915_cmdbuffer(struct &cmdbuffer->cliprects)) return -EFAULT; - return drm_ioctl(file->f_path.dentry->d_inode, file, - DRM_IOCTL_I915_CMDBUFFER, (unsigned long)cmdbuffer); + return drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_I915_CMDBUFFER, (unsigned long) cmdbuffer); } typedef struct drm_i915_irq_emit32 { @@ -116,7 +117,7 @@ static int compat_i915_irq_emit(struct f drm_i915_irq_emit32_t req32; drm_i915_irq_emit_t __user *request; - if (copy_from_user(&req32, (void __user *)arg, sizeof(req32))) + if (copy_from_user(&req32, (void __user *) arg, sizeof(req32))) return -EFAULT; request = compat_alloc_user_space(sizeof(*request)); @@ -125,8 +126,8 @@ static int compat_i915_irq_emit(struct f &request->irq_seq)) return -EFAULT; - return drm_ioctl(file->f_path.dentry->d_inode, file, - DRM_IOCTL_I915_IRQ_EMIT, (unsigned long)request); + return drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_I915_IRQ_EMIT, (unsigned long) request); } typedef struct drm_i915_getparam32 { int param; @@ -139,7 +140,7 @@ static int compat_i915_getparam(struct f drm_i915_getparam32_t req32; drm_i915_getparam_t __user *request; - if (copy_from_user(&req32, (void __user *)arg, sizeof(req32))) + if (copy_from_user(&req32, (void __user *) arg, sizeof(req32))) return -EFAULT; request = compat_alloc_user_space(sizeof(*request)); @@ -149,8 +150,8 @@ static int compat_i915_getparam(struct f &request->value)) return -EFAULT; - return drm_ioctl(file->f_path.dentry->d_inode, file, - DRM_IOCTL_I915_GETPARAM, (unsigned long)request); + return drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_I915_GETPARAM, (unsigned long) request); } typedef struct drm_i915_mem_alloc32 { @@ -166,7 +167,7 @@ static int compat_i915_alloc(struct file drm_i915_mem_alloc32_t req32; drm_i915_mem_alloc_t __user *request; - if (copy_from_user(&req32, (void __user *)arg, sizeof(req32))) + if (copy_from_user(&req32, (void __user *) arg, sizeof(req32))) return -EFAULT; request = compat_alloc_user_space(sizeof(*request)); @@ -178,16 +179,75 @@ static int compat_i915_alloc(struct file &request->region_offset)) return -EFAULT; - return drm_ioctl(file->f_path.dentry->d_inode, file, - DRM_IOCTL_I915_ALLOC, (unsigned long)request); + return drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_I915_ALLOC, (unsigned long) request); } +typedef struct drm_i915_execbuffer32 { + uint64_t ops_list; + uint32_t num_buffers; + struct _drm_i915_batchbuffer32 batch; + drm_context_t context; + struct drm_fence_arg fence_arg; +} drm_i915_execbuffer32_t; + +static int compat_i915_execbuffer(struct file *file, unsigned int cmd, + unsigned long arg) +{ + drm_i915_execbuffer32_t req32; + struct drm_i915_execbuffer __user *request; + int err; + + if (copy_from_user(&req32, (void __user *) arg, sizeof(req32))) + return -EFAULT; + + request = compat_alloc_user_space(sizeof(*request)); + + if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) + || __put_user(req32.ops_list, &request->ops_list) + || __put_user(req32.num_buffers, &request->num_buffers) + || __put_user(req32.context, &request->context) + || __copy_to_user(&request->fence_arg, &req32.fence_arg, + sizeof(req32.fence_arg)) + || __put_user(req32.batch.start, &request->batch.start) + || __put_user(req32.batch.used, &request->batch.used) + || __put_user(req32.batch.DR1, &request->batch.DR1) + || __put_user(req32.batch.DR4, &request->batch.DR4) + || __put_user(req32.batch.num_cliprects, + &request->batch.num_cliprects) + || __put_user((int __user *)(unsigned long)req32.batch.cliprects, + &request->batch.cliprects)) + return -EFAULT; + + err = drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_I915_EXECBUFFER, (unsigned long)request); + + if (err) + return err; + + if (__get_user(req32.fence_arg.handle, &request->fence_arg.handle) + || __get_user(req32.fence_arg.fence_class, &request->fence_arg.fence_class) + || __get_user(req32.fence_arg.type, &request->fence_arg.type) + || __get_user(req32.fence_arg.flags, &request->fence_arg.flags) + || __get_user(req32.fence_arg.signaled, &request->fence_arg.signaled) + || __get_user(req32.fence_arg.error, &request->fence_arg.error) + || __get_user(req32.fence_arg.sequence, &request->fence_arg.sequence)) + return -EFAULT; + + if (copy_to_user((void __user *)arg, &req32, sizeof(req32))) + return -EFAULT; + + return 0; +} + + drm_ioctl_compat_t *i915_compat_ioctls[] = { [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer, [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer, [DRM_I915_GETPARAM] = compat_i915_getparam, [DRM_I915_IRQ_EMIT] = compat_i915_irq_emit, - [DRM_I915_ALLOC] = compat_i915_alloc + [DRM_I915_ALLOC] = compat_i915_alloc, + [DRM_I915_EXECBUFFER] = compat_i915_execbuffer, }; /** @@ -213,9 +273,9 @@ long i915_compat_ioctl(struct file *filp lock_kernel(); /* XXX for now */ if (fn != NULL) - ret = (*fn) (filp, cmd, arg); + ret = (*fn)(filp, cmd, arg); else - ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg); + ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg); unlock_kernel(); return ret; diff -Napur -X /home/jbarnes/dontdiff gpu/drm/i915/i915_irq.c /tmp/drm-master/drivers/gpu/drm/i915/i915_irq.c --- gpu/drm/i915/i915_irq.c 2008-08-21 14:43:42.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/i915/i915_irq.c 2008-08-21 14:34:09.000000000 -0700 @@ -33,31 +33,109 @@ #define MAX_NOPID ((u32)~0) -/** These are the interrupts used by the driver */ -#define I915_INTERRUPT_ENABLE_MASK (I915_USER_INTERRUPT | \ - I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | \ - I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT | \ - I915_ASLE_INTERRUPT | \ - I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) - -void -i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) -{ - if ((dev_priv->irq_mask_reg & mask) != 0) { - dev_priv->irq_mask_reg &= ~mask; - I915_WRITE(IMR, dev_priv->irq_mask_reg); - (void) I915_READ(IMR); - } +/** + * i915_get_pipe - return the the pipe associated with a given plane + * @dev: DRM device + * @plane: plane to look for + * + * The Intel Mesa & 2D drivers call the vblank routines with a plane number + * rather than a pipe number, since they may not always be equal. This routine + * maps the given @plane back to a pipe number. + */ +static int +i915_get_pipe(struct drm_device *dev, int plane) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + u32 dspcntr; + + dspcntr = plane ? I915_READ(DSPBCNTR) : I915_READ(DSPACNTR); + + return dspcntr & DISPPLANE_SEL_PIPE_MASK ? 1 : 0; +} + +/** + * i915_get_plane - return the the plane associated with a given pipe + * @dev: DRM device + * @pipe: pipe to look for + * + * The Intel Mesa & 2D drivers call the vblank routines with a plane number + * rather than a plane number, since they may not always be equal. This routine + * maps the given @pipe back to a plane number. + */ +static int +i915_get_plane(struct drm_device *dev, int pipe) +{ + if (i915_get_pipe(dev, 0) == pipe) + return 0; + return 1; +} + +/** + * i915_pipe_enabled - check if a pipe is enabled + * @dev: DRM device + * @pipe: pipe to check + * + * Reading certain registers when the pipe is disabled can hang the chip. + * Use this routine to make sure the PLL is running and the pipe is active + * before reading such registers if unsure. + */ +static int +i915_pipe_enabled(struct drm_device *dev, int pipe) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF; + + if (I915_READ(pipeconf) & PIPEACONF_ENABLE) + return 1; + + return 0; } -static inline void -i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask) -{ - if ((dev_priv->irq_mask_reg & mask) != mask) { - dev_priv->irq_mask_reg |= mask; - I915_WRITE(IMR, dev_priv->irq_mask_reg); - (void) I915_READ(IMR); +/** + * Emit a synchronous flip. + * + * This function must be called with the drawable spinlock held. + */ +static void +i915_dispatch_vsync_flip(struct drm_device *dev, struct drm_drawable_info *drw, + int plane) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv; + u16 x1, y1, x2, y2; + int pf_planes = 1 << plane; + + DRM_SPINLOCK_ASSERT(&dev->drw_lock); + + /* If the window is visible on the other plane, we have to flip on that + * plane as well. + */ + if (plane == 1) { + x1 = sarea_priv->planeA_x; + y1 = sarea_priv->planeA_y; + x2 = x1 + sarea_priv->planeA_w; + y2 = y1 + sarea_priv->planeA_h; + } else { + x1 = sarea_priv->planeB_x; + y1 = sarea_priv->planeB_y; + x2 = x1 + sarea_priv->planeB_w; + y2 = y1 + sarea_priv->planeB_h; } + + if (x2 > 0 && y2 > 0) { + int i, num_rects = drw->num_rects; + struct drm_clip_rect *rect = drw->rects; + + for (i = 0; i < num_rects; i++) + if (!(rect[i].x1 >= x2 || rect[i].y1 >= y2 || + rect[i].x2 <= x1 || rect[i].y2 <= y1)) { + pf_planes = 0x3; + + break; + } + } + + i915_dispatch_flip(dev, pf_planes, 1); } /** @@ -68,23 +146,22 @@ i915_disable_irq(drm_i915_private_t *dev static void i915_vblank_tasklet(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - unsigned long irqflags; struct list_head *list, *tmp, hits, *hit; - int nhits, nrects, slice[2], upper[2], lower[2], i; - unsigned counter[2] = { atomic_read(&dev->vbl_received), - atomic_read(&dev->vbl_received2) }; + int nhits, nrects, slice[2], upper[2], lower[2], i, num_pages; + unsigned counter[2]; struct drm_drawable_info *drw; drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv; - u32 cpp = dev_priv->cpp; + u32 cpp = dev_priv->cpp, offsets[3]; u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA | XY_SRC_COPY_BLT_WRITE_RGB) : XY_SRC_COPY_BLT_CMD; u32 src_pitch = sarea_priv->pitch * cpp; u32 dst_pitch = sarea_priv->pitch * cpp; + /* COPY rop (0xcc), map cpp to magic color depth constants */ u32 ropcpp = (0xcc << 16) | ((cpp - 1) << 24); RING_LOCALS; - + if (IS_I965G(dev) && sarea_priv->front_tiled) { cmd |= XY_SRC_COPY_BLT_DST_TILED; dst_pitch >>= 2; @@ -93,6 +170,9 @@ static void i915_vblank_tasklet(struct d cmd |= XY_SRC_COPY_BLT_SRC_TILED; src_pitch >>= 2; } + + counter[0] = drm_vblank_count(dev, 0); + counter[1] = drm_vblank_count(dev, 1); DRM_DEBUG("\n"); @@ -100,28 +180,35 @@ static void i915_vblank_tasklet(struct d nhits = nrects = 0; - spin_lock_irqsave(&dev_priv->swaps_lock, irqflags); + /* No irqsave/restore necessary. This tasklet may be run in an + * interrupt context or normal context, but we don't have to worry + * about getting interrupted by something acquiring the lock, because + * we are the interrupt context thing that acquires the lock. + */ + DRM_SPINLOCK(&dev_priv->swaps_lock); /* Find buffer swaps scheduled for this vertical blank */ list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) { drm_i915_vbl_swap_t *vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head); + int pipe = i915_get_pipe(dev, vbl_swap->plane); - if ((counter[vbl_swap->pipe] - vbl_swap->sequence) > (1<<23)) + if ((counter[pipe] - vbl_swap->sequence) > (1<<23)) continue; list_del(list); dev_priv->swaps_pending--; + drm_vblank_put(dev, pipe); - spin_unlock(&dev_priv->swaps_lock); - spin_lock(&dev->drw_lock); + DRM_SPINUNLOCK(&dev_priv->swaps_lock); + DRM_SPINLOCK(&dev->drw_lock); drw = drm_get_drawable_info(dev, vbl_swap->drw_id); if (!drw) { - spin_unlock(&dev->drw_lock); + DRM_SPINUNLOCK(&dev->drw_lock); drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER); - spin_lock(&dev_priv->swaps_lock); + DRM_SPINLOCK(&dev_priv->swaps_lock); continue; } @@ -138,7 +225,7 @@ static void i915_vblank_tasklet(struct d } } - spin_unlock(&dev->drw_lock); + DRM_SPINUNLOCK(&dev->drw_lock); /* List of hits was empty, or we reached the end of it */ if (hit == &hits) @@ -146,48 +233,29 @@ static void i915_vblank_tasklet(struct d nhits++; - spin_lock(&dev_priv->swaps_lock); + DRM_SPINLOCK(&dev_priv->swaps_lock); } + DRM_SPINUNLOCK(&dev_priv->swaps_lock); + if (nhits == 0) { - spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); return; } - spin_unlock(&dev_priv->swaps_lock); - i915_kernel_lost_context(dev); - if (IS_I965G(dev)) { - BEGIN_LP_RING(4); - - OUT_RING(GFX_OP_DRAWRECT_INFO_I965); - OUT_RING(0); - OUT_RING(((sarea_priv->width - 1) & 0xffff) | ((sarea_priv->height - 1) << 16)); - OUT_RING(0); - ADVANCE_LP_RING(); - } else { - BEGIN_LP_RING(6); - - OUT_RING(GFX_OP_DRAWRECT_INFO); - OUT_RING(0); - OUT_RING(0); - OUT_RING(sarea_priv->width | sarea_priv->height << 16); - OUT_RING(sarea_priv->width | sarea_priv->height << 16); - OUT_RING(0); - - ADVANCE_LP_RING(); - } - - sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT; - upper[0] = upper[1] = 0; - slice[0] = max(sarea_priv->pipeA_h / nhits, 1); - slice[1] = max(sarea_priv->pipeB_h / nhits, 1); - lower[0] = sarea_priv->pipeA_y + slice[0]; - lower[1] = sarea_priv->pipeB_y + slice[0]; + slice[0] = max(sarea_priv->planeA_h / nhits, 1); + slice[1] = max(sarea_priv->planeB_h / nhits, 1); + lower[0] = sarea_priv->planeA_y + slice[0]; + lower[1] = sarea_priv->planeB_y + slice[0]; + + offsets[0] = sarea_priv->front_offset; + offsets[1] = sarea_priv->back_offset; + offsets[2] = sarea_priv->third_offset; + num_pages = sarea_priv->third_handle ? 3 : 2; - spin_lock(&dev->drw_lock); + DRM_SPINLOCK(&dev->drw_lock); /* Emit blits for buffer swaps, partitioning both outputs into as many * slices as there are buffer swaps scheduled in order to avoid tearing @@ -197,6 +265,8 @@ static void i915_vblank_tasklet(struct d for (i = 0; i++ < nhits; upper[0] = lower[0], lower[0] += slice[0], upper[1] = lower[1], lower[1] += slice[1]) { + int init_drawrect = 1; + if (i == nhits) lower[0] = lower[1] = sarea_priv->height; @@ -204,7 +274,7 @@ static void i915_vblank_tasklet(struct d drm_i915_vbl_swap_t *swap_hit = list_entry(hit, drm_i915_vbl_swap_t, head); struct drm_clip_rect *rect; - int num_rects, pipe; + int num_rects, plane, front, back; unsigned short top, bottom; drw = drm_get_drawable_info(dev, swap_hit->drw_id); @@ -212,10 +282,50 @@ static void i915_vblank_tasklet(struct d if (!drw) continue; + plane = swap_hit->plane; + + if (swap_hit->flip) { + i915_dispatch_vsync_flip(dev, drw, plane); + continue; + } + + if (init_drawrect) { + int width = sarea_priv->width; + int height = sarea_priv->height; + if (IS_I965G(dev)) { + BEGIN_LP_RING(4); + + OUT_RING(GFX_OP_DRAWRECT_INFO_I965); + OUT_RING(0); + OUT_RING(((width - 1) & 0xffff) | ((height - 1) << 16)); + OUT_RING(0); + + ADVANCE_LP_RING(); + } else { + BEGIN_LP_RING(6); + + OUT_RING(GFX_OP_DRAWRECT_INFO); + OUT_RING(0); + OUT_RING(0); + OUT_RING(((width - 1) & 0xffff) | ((height - 1) << 16)); + OUT_RING(0); + OUT_RING(0); + + ADVANCE_LP_RING(); + } + + sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT; + + init_drawrect = 0; + } + rect = drw->rects; - pipe = swap_hit->pipe; - top = upper[pipe]; - bottom = lower[pipe]; + top = upper[plane]; + bottom = lower[plane]; + + front = (dev_priv->sarea_priv->pf_current_page >> + (2 * plane)) & 0x3; + back = (front + 1) % num_pages; for (num_rects = drw->num_rects; num_rects--; rect++) { int y1 = max(rect->y1, top); @@ -230,17 +340,17 @@ static void i915_vblank_tasklet(struct d OUT_RING(ropcpp | dst_pitch); OUT_RING((y1 << 16) | rect->x1); OUT_RING((y2 << 16) | rect->x2); - OUT_RING(sarea_priv->front_offset); + OUT_RING(offsets[front]); OUT_RING((y1 << 16) | rect->x1); OUT_RING(src_pitch); - OUT_RING(sarea_priv->back_offset); + OUT_RING(offsets[back]); ADVANCE_LP_RING(); } } } - spin_unlock_irqrestore(&dev->drw_lock, irqflags); + DRM_SPINUNLOCK(&dev->drw_lock); list_for_each_safe(hit, tmp, &hits) { drm_i915_vbl_swap_t *swap_hit = @@ -252,77 +362,114 @@ static void i915_vblank_tasklet(struct d } } +u32 i915_get_vblank_counter(struct drm_device *dev, int plane) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + unsigned long high_frame; + unsigned long low_frame; + u32 high1, high2, low, count; + int pipe; + + pipe = i915_get_pipe(dev, plane); + high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH; + low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL; + + if (!i915_pipe_enabled(dev, pipe)) { + DRM_ERROR("trying to get vblank count for disabled pipe %d\n", pipe); + return 0; + } + + /* + * High & low register fields aren't synchronized, so make sure + * we get a low value that's stable across two reads of the high + * register. + */ + do { + high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >> + PIPE_FRAME_HIGH_SHIFT); + low = ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >> + PIPE_FRAME_LOW_SHIFT); + high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >> + PIPE_FRAME_HIGH_SHIFT); + } while (high1 != high2); + + count = (high1 << 8) | low; + + return count; +} + irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *) arg; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - u32 pipea_stats, pipeb_stats; u32 iir; + u32 pipea_stats, pipeb_stats; + int vblank = 0; - pipea_stats = I915_READ(PIPEASTAT); - pipeb_stats = I915_READ(PIPEBSTAT); - - if (dev->pdev->msi_enabled) - I915_WRITE(IMR, ~0); iir = I915_READ(IIR); + if (iir == 0) + return IRQ_NONE; - DRM_DEBUG("iir=%08x\n", iir); + /* + * Clear the PIPE(A|B)STAT regs before the IIR otherwise + * we may get extra interrupts. + */ + if (iir & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT) { + pipea_stats = I915_READ(PIPEASTAT); + if (pipea_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS| + PIPE_VBLANK_INTERRUPT_STATUS)) + { + vblank++; + drm_handle_vblank(dev, i915_get_plane(dev, 0)); + } - if (iir == 0) { - if (dev->pdev->msi_enabled) { - I915_WRITE(IMR, dev_priv->irq_mask_reg); - (void) I915_READ(IMR); + I915_WRITE(PIPEASTAT, pipea_stats); + } + if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) { + pipeb_stats = I915_READ(PIPEBSTAT); + /* Ack the event */ + I915_WRITE(PIPEBSTAT, pipeb_stats); + + /* The vblank interrupt gets enabled even if we didn't ask for + it, so make sure it's shut down again */ + if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)) + pipeb_stats &= ~(I915_VBLANK_INTERRUPT_ENABLE); + + if (pipeb_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS| + PIPE_VBLANK_INTERRUPT_STATUS)) + { + vblank++; + drm_handle_vblank(dev, i915_get_plane(dev, 1)); } - return IRQ_NONE; + + if (pipeb_stats & I915_LEGACY_BLC_EVENT_ENABLE) + opregion_asle_intr(dev); + I915_WRITE(PIPEBSTAT, pipeb_stats); } - I915_WRITE(PIPEASTAT, pipea_stats); - I915_WRITE(PIPEBSTAT, pipeb_stats); + if (iir & I915_ASLE_INTERRUPT) + opregion_asle_intr(dev); - I915_WRITE(IIR, iir); - if (dev->pdev->msi_enabled) - I915_WRITE(IMR, dev_priv->irq_mask_reg); - (void) I915_READ(IIR); /* Flush posted writes */ + if (dev_priv->sarea_priv) + dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); - dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); + I915_WRITE(IIR, iir); + (void) I915_READ(IIR); - if (iir & I915_USER_INTERRUPT) + if (iir & I915_USER_INTERRUPT) { DRM_WAKEUP(&dev_priv->irq_queue); + i915_fence_handler(dev); + } - if (iir & (I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | - I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)) { - int vblank_pipe = dev_priv->vblank_pipe; - - if ((vblank_pipe & - (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B)) - == (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B)) { - if (iir & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT) - atomic_inc(&dev->vbl_received); - if (iir & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT) - atomic_inc(&dev->vbl_received2); - } else if (((iir & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT) && - (vblank_pipe & DRM_I915_VBLANK_PIPE_A)) || - ((iir & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT) && - (vblank_pipe & DRM_I915_VBLANK_PIPE_B))) - atomic_inc(&dev->vbl_received); - - DRM_WAKEUP(&dev->vbl_queue); - drm_vbl_send_signals(dev); - + if (vblank) { if (dev_priv->swaps_pending > 0) drm_locked_tasklet(dev, i915_vblank_tasklet); } - if (iir & I915_ASLE_INTERRUPT) - opregion_asle_intr(dev); - - if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) - opregion_asle_intr(dev); - return IRQ_HANDLED; } -static int i915_emit_irq(struct drm_device * dev) +int i915_emit_irq(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; RING_LOCALS; @@ -331,16 +478,9 @@ static int i915_emit_irq(struct drm_devi DRM_DEBUG("\n"); - dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter; - - if (dev_priv->counter > 0x7FFFFFFFUL) - dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1; + i915_emit_breadcrumb(dev); - BEGIN_LP_RING(6); - OUT_RING(MI_STORE_DWORD_INDEX); - OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT); - OUT_RING(dev_priv->counter); - OUT_RING(0); + BEGIN_LP_RING(2); OUT_RING(0); OUT_RING(MI_USER_INTERRUPT); ADVANCE_LP_RING(); @@ -348,27 +488,28 @@ static int i915_emit_irq(struct drm_devi return dev_priv->counter; } -static void i915_user_irq_get(struct drm_device *dev) +void i915_user_irq_on(drm_i915_private_t *dev_priv) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + DRM_SPINLOCK(&dev_priv->user_irq_lock); + if (dev_priv->irq_enabled && (++dev_priv->user_irq_refcount == 1)){ + dev_priv->irq_enable_reg |= I915_USER_INTERRUPT; + I915_WRITE(IER, dev_priv->irq_enable_reg); + } + DRM_SPINUNLOCK(&dev_priv->user_irq_lock); - spin_lock(&dev_priv->user_irq_lock); - if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) - i915_enable_irq(dev_priv, I915_USER_INTERRUPT); - spin_unlock(&dev_priv->user_irq_lock); } -static void i915_user_irq_put(struct drm_device *dev) +void i915_user_irq_off(drm_i915_private_t *dev_priv) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - - spin_lock(&dev_priv->user_irq_lock); - BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0); - if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) - i915_disable_irq(dev_priv, I915_USER_INTERRUPT); - spin_unlock(&dev_priv->user_irq_lock); + DRM_SPINLOCK(&dev_priv->user_irq_lock); + if (dev_priv->irq_enabled && (--dev_priv->user_irq_refcount == 0)) { + // dev_priv->irq_enable_reg &= ~I915_USER_INTERRUPT; + // I915_WRITE(IER, dev_priv->irq_enable_reg); + } + DRM_SPINUNLOCK(&dev_priv->user_irq_lock); } + static int i915_wait_irq(struct drm_device * dev, int irq_nr) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; @@ -377,59 +518,25 @@ static int i915_wait_irq(struct drm_devi DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr, READ_BREADCRUMB(dev_priv)); - if (READ_BREADCRUMB(dev_priv) >= irq_nr) { - dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); + if (READ_BREADCRUMB(dev_priv) >= irq_nr) return 0; - } - - dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; - i915_user_irq_get(dev); + i915_user_irq_on(dev_priv); DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ, READ_BREADCRUMB(dev_priv) >= irq_nr); - i915_user_irq_put(dev); + i915_user_irq_off(dev_priv); if (ret == -EBUSY) { DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", READ_BREADCRUMB(dev_priv), (int)dev_priv->counter); } - dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); + if (dev_priv->sarea_priv) + dev_priv->sarea_priv->last_dispatch = + READ_BREADCRUMB(dev_priv); return ret; } -static int i915_driver_vblank_do_wait(struct drm_device *dev, unsigned int *sequence, - atomic_t *counter) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - unsigned int cur_vblank; - int ret = 0; - - if (!dev_priv) { - DRM_ERROR("called with no initialization\n"); - return -EINVAL; - } - - DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ, - (((cur_vblank = atomic_read(counter)) - - *sequence) <= (1<<23))); - - *sequence = cur_vblank; - - return ret; -} - - -int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence) -{ - return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received); -} - -int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence) -{ - return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received2); -} - /* Needs the lock as it touches the ring. */ int i915_irq_emit(struct drm_device *dev, void *data, @@ -459,7 +566,7 @@ int i915_irq_emit(struct drm_device *dev /* Doesn't need the hardware lock. */ int i915_irq_wait(struct drm_device *dev, void *data, - struct drm_file *file_priv) + struct drm_file *file_priv) { drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_irq_wait_t *irqwait = data; @@ -472,40 +579,113 @@ int i915_irq_wait(struct drm_device *dev return i915_wait_irq(dev, irqwait->irq_seq); } +int i915_enable_vblank(struct drm_device *dev, int plane) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + int pipe = i915_get_pipe(dev, plane); + u32 pipestat_reg = 0; + u32 pipestat; + + switch (pipe) { + case 0: + pipestat_reg = PIPEASTAT; + dev_priv->irq_enable_reg |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; + break; + case 1: + pipestat_reg = PIPEBSTAT; + dev_priv->irq_enable_reg |= I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; + break; + default: + DRM_ERROR("tried to enable vblank on non-existent pipe %d\n", + pipe); + break; + } + + if (pipestat_reg) + { + pipestat = I915_READ (pipestat_reg); + /* + * Older chips didn't have the start vblank interrupt, + * but + */ + if (IS_I965G (dev)) + pipestat |= PIPE_START_VBLANK_INTERRUPT_ENABLE; + else + pipestat |= PIPE_VBLANK_INTERRUPT_ENABLE; + /* + * Clear any pending status + */ + pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS | + PIPE_VBLANK_INTERRUPT_STATUS); + I915_WRITE(pipestat_reg, pipestat); + } + I915_WRITE(IER, dev_priv->irq_enable_reg); + + return 0; +} + +void i915_disable_vblank(struct drm_device *dev, int plane) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + int pipe = i915_get_pipe(dev, plane); + u32 pipestat_reg = 0; + u32 pipestat; + + switch (pipe) { + case 0: + pipestat_reg = PIPEASTAT; + dev_priv->irq_enable_reg &= ~I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; + break; + case 1: + pipestat_reg = PIPEBSTAT; + dev_priv->irq_enable_reg &= ~I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; + break; + default: + DRM_ERROR("tried to disable vblank on non-existent pipe %d\n", + pipe); + break; + } + + I915_WRITE(IER, dev_priv->irq_enable_reg); + + if (pipestat_reg) + { + pipestat = I915_READ (pipestat_reg); + pipestat &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE | + PIPE_VBLANK_INTERRUPT_ENABLE); + /* + * Clear any pending status + */ + pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS | + PIPE_VBLANK_INTERRUPT_STATUS); + I915_WRITE(pipestat_reg, pipestat); + } +} + +static void i915_enable_interrupt (struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + + dev_priv->irq_enable_reg |= I915_USER_INTERRUPT; + + opregion_enable_asle(dev); + + I915_WRITE(IER, dev_priv->irq_enable_reg); + dev_priv->irq_enabled = 1; +} + /* Set the vblank monitor pipe */ int i915_vblank_pipe_set(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_i915_private_t *dev_priv = dev->dev_private; - drm_i915_vblank_pipe_t *pipe = data; - u32 enable_mask = 0, disable_mask = 0; if (!dev_priv) { DRM_ERROR("called with no initialization\n"); return -EINVAL; } - if (pipe->pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) { - DRM_ERROR("called with invalid pipe 0x%x\n", pipe->pipe); - return -EINVAL; - } - - if (pipe->pipe & DRM_I915_VBLANK_PIPE_A) - enable_mask |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; - else - disable_mask |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; - - if (pipe->pipe & DRM_I915_VBLANK_PIPE_B) - enable_mask |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; - else - disable_mask |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; - - i915_enable_irq(dev_priv, enable_mask); - i915_disable_irq(dev_priv, disable_mask); - - dev_priv->vblank_pipe = pipe->pipe; - return 0; } @@ -514,19 +694,13 @@ int i915_vblank_pipe_get(struct drm_devi { drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_vblank_pipe_t *pipe = data; - u16 flag; if (!dev_priv) { DRM_ERROR("called with no initialization\n"); return -EINVAL; } - flag = I915_READ(IMR); - pipe->pipe = 0; - if (flag & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT) - pipe->pipe |= DRM_I915_VBLANK_PIPE_A; - if (flag & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT) - pipe->pipe |= DRM_I915_VBLANK_PIPE_B; + pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; return 0; } @@ -540,27 +714,30 @@ int i915_vblank_swap(struct drm_device * drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_vblank_swap_t *swap = data; drm_i915_vbl_swap_t *vbl_swap; - unsigned int pipe, seqtype, curseq; + unsigned int pipe, seqtype, curseq, plane; unsigned long irqflags; struct list_head *list; + int ret; if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __func__); return -EINVAL; } - if (dev_priv->sarea_priv->rotation) { + if (!dev_priv->sarea_priv || dev_priv->sarea_priv->rotation) { DRM_DEBUG("Rotation not supported\n"); return -EINVAL; } if (swap->seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE | - _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)) { + _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS | + _DRM_VBLANK_FLIP)) { DRM_ERROR("Invalid sequence type 0x%x\n", swap->seqtype); return -EINVAL; } - pipe = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0; + plane = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0; + pipe = i915_get_pipe(dev, plane); seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE); @@ -569,17 +746,29 @@ int i915_vblank_swap(struct drm_device * return -EINVAL; } - spin_lock_irqsave(&dev->drw_lock, irqflags); + DRM_SPINLOCK_IRQSAVE(&dev->drw_lock, irqflags); + /* It makes no sense to schedule a swap for a drawable that doesn't have + * valid information at this point. E.g. this could mean that the X + * server is too old to push drawable information to the DRM, in which + * case all such swaps would become ineffective. + */ if (!drm_get_drawable_info(dev, swap->drawable)) { - spin_unlock_irqrestore(&dev->drw_lock, irqflags); + DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock, irqflags); DRM_DEBUG("Invalid drawable ID %d\n", swap->drawable); return -EINVAL; } - spin_unlock_irqrestore(&dev->drw_lock, irqflags); + DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock, irqflags); - curseq = atomic_read(pipe ? &dev->vbl_received2 : &dev->vbl_received); + /* + * We take the ref here and put it when the swap actually completes + * in the tasklet. + */ + ret = drm_vblank_get(dev, pipe); + if (ret) + return ret; + curseq = drm_vblank_count(dev, pipe); if (seqtype == _DRM_VBLANK_RELATIVE) swap->sequence += curseq; @@ -589,28 +778,61 @@ int i915_vblank_swap(struct drm_device * swap->sequence = curseq + 1; } else { DRM_DEBUG("Missed target sequence\n"); + drm_vblank_put(dev, pipe); return -EINVAL; } } - spin_lock_irqsave(&dev_priv->swaps_lock, irqflags); + if (swap->seqtype & _DRM_VBLANK_FLIP) { + swap->sequence--; + + if ((curseq - swap->sequence) <= (1<<23)) { + struct drm_drawable_info *drw; + + LOCK_TEST_WITH_RETURN(dev, file_priv); + + DRM_SPINLOCK_IRQSAVE(&dev->drw_lock, irqflags); + + drw = drm_get_drawable_info(dev, swap->drawable); + + if (!drw) { + DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock, + irqflags); + DRM_DEBUG("Invalid drawable ID %d\n", + swap->drawable); + drm_vblank_put(dev, pipe); + return -EINVAL; + } + + i915_dispatch_vsync_flip(dev, drw, plane); + + DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock, irqflags); + + drm_vblank_put(dev, pipe); + return 0; + } + } + + DRM_SPINLOCK_IRQSAVE(&dev_priv->swaps_lock, irqflags); list_for_each(list, &dev_priv->vbl_swaps.head) { vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head); if (vbl_swap->drw_id == swap->drawable && - vbl_swap->pipe == pipe && + vbl_swap->plane == plane && vbl_swap->sequence == swap->sequence) { - spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); + vbl_swap->flip = (swap->seqtype & _DRM_VBLANK_FLIP); + DRM_SPINUNLOCK_IRQRESTORE(&dev_priv->swaps_lock, irqflags); DRM_DEBUG("Already scheduled\n"); return 0; } } - spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); + DRM_SPINUNLOCK_IRQRESTORE(&dev_priv->swaps_lock, irqflags); if (dev_priv->swaps_pending >= 100) { DRM_DEBUG("Too many swaps queued\n"); + drm_vblank_put(dev, pipe); return -EBUSY; } @@ -618,21 +840,26 @@ int i915_vblank_swap(struct drm_device * if (!vbl_swap) { DRM_ERROR("Failed to allocate memory to queue swap\n"); + drm_vblank_put(dev, pipe); return -ENOMEM; } DRM_DEBUG("\n"); vbl_swap->drw_id = swap->drawable; - vbl_swap->pipe = pipe; + vbl_swap->plane = plane; vbl_swap->sequence = swap->sequence; + vbl_swap->flip = (swap->seqtype & _DRM_VBLANK_FLIP); + + if (vbl_swap->flip) + swap->sequence++; - spin_lock_irqsave(&dev_priv->swaps_lock, irqflags); + DRM_SPINLOCK_IRQSAVE(&dev_priv->swaps_lock, irqflags); list_add_tail(&vbl_swap->head, &dev_priv->vbl_swaps.head); dev_priv->swaps_pending++; - spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); + DRM_SPINUNLOCK_IRQRESTORE(&dev_priv->swaps_lock, irqflags); return 0; } @@ -643,52 +870,61 @@ void i915_driver_irq_preinstall(struct d { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - I915_WRITE(HWSTAM, 0xfffe); - I915_WRITE(IMR, 0x0); - I915_WRITE(IER, 0x0); + I915_WRITE16(HWSTAM, 0xeffe); + I915_WRITE16(IMR, 0x0); + I915_WRITE16(IER, 0x0); } -void i915_driver_irq_postinstall(struct drm_device * dev) +int i915_driver_irq_postinstall(struct drm_device * dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + int ret, num_pipes = 2; - spin_lock_init(&dev_priv->swaps_lock); + DRM_SPININIT(&dev_priv->swaps_lock, "swap"); INIT_LIST_HEAD(&dev_priv->vbl_swaps.head); dev_priv->swaps_pending = 0; - if (!dev_priv->vblank_pipe) - dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A; + DRM_SPININIT(&dev_priv->user_irq_lock, "userirq"); + dev_priv->user_irq_refcount = 0; + dev_priv->irq_enable_reg = 0; + + ret = drm_vblank_init(dev, num_pipes); + if (ret) + return ret; - /* Set initial unmasked IRQs to just the selected vblank pipes. */ - dev_priv->irq_mask_reg = ~0; - if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A) - dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; - if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B) - dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; - - dev_priv->irq_mask_reg &= I915_INTERRUPT_ENABLE_MASK; - - I915_WRITE(IMR, dev_priv->irq_mask_reg); - I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK); - (void) I915_READ(IER); - - opregion_enable_asle(dev); + dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; + dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ + i915_enable_interrupt(dev); DRM_INIT_WAITQUEUE(&dev_priv->irq_queue); + + /* + * Initialize the hardware status page IRQ location. + */ + + I915_WRITE(INSTPM, (1 << 5) | (1 << 21)); + return 0; } void i915_driver_irq_uninstall(struct drm_device * dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - u16 temp; + u32 temp; if (!dev_priv) return; - I915_WRITE(HWSTAM, 0xffff); - I915_WRITE(IMR, 0xffff); + dev_priv->vblank_pipe = 0; + + dev_priv->irq_enabled = 0; + I915_WRITE(HWSTAM, 0xffffffff); + I915_WRITE(IMR, 0xffffffff); I915_WRITE(IER, 0x0); + temp = I915_READ(PIPEASTAT); + I915_WRITE(PIPEASTAT, temp); + temp = I915_READ(PIPEBSTAT); + I915_WRITE(PIPEBSTAT, temp); temp = I915_READ(IIR); I915_WRITE(IIR, temp); } diff -Napur -X /home/jbarnes/dontdiff gpu/drm/i915/i915_opregion.c /tmp/drm-master/drivers/gpu/drm/i915/i915_opregion.c --- gpu/drm/i915/i915_opregion.c 2008-08-21 14:43:42.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/i915/i915_opregion.c 2008-08-21 14:34:09.000000000 -0700 @@ -1,9 +1,10 @@ /* + * * Copyright 2008 Intel Corporation * Copyright 2008 Red Hat * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to @@ -11,8 +12,8 @@ * the following conditions: * * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. + * next paragraph) shall be included in all copies or substantial portions + * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF @@ -32,7 +33,6 @@ #include "i915_drv.h" #define PCI_ASLE 0xe4 -#define PCI_LBPC 0xf4 #define PCI_ASLS 0xfc #define OPREGION_SZ (8*1024) @@ -47,6 +47,30 @@ #define MBOX_SWSCI (1<<1) #define MBOX_ASLE (1<<2) +/* _DOD id definitions */ +#define OUTPUT_CONNECTOR_MSK 0xf000 +#define OUTPUT_CONNECTOR_OFFSET 12 + +#define OUTPUT_PORT_MSK 0x00f0 +#define OUTPUT_PORT_OFFSET 4 + #define OUTPUT_PORT_ANALOG 0 + #define OUTPUT_PORT_LVDS 1 + #define OUTPUT_PORT_SDVOB 2 + #define OUTPUT_PORT_SDVOC 3 + #define OUTPUT_PORT_TV 4 + +#define OUTPUT_DISPLAY_MSK 0x0f00 +#define OUTPUT_DISPLAY_OFFSET 8 + #define OUTPUT_DISPLAY_OTHER 0 + #define OUTPUT_DISPLAY_VGA 1 + #define OUTPUT_DISPLAY_TV 2 + #define OUTPUT_DISPLAY_DIGI 3 + #define OUTPUT_DISPLAY_FLAT_PANEL 4 + +/* predefined id for integrated LVDS and VGA connector */ +#define OUTPUT_INT_LVDS 0x00000110 +#define OUTPUT_INT_VGA 0x80000100 + struct opregion_header { u8 signature[16]; u32 size; @@ -140,33 +164,25 @@ static u32 asle_set_backlight(struct drm { struct drm_i915_private *dev_priv = dev->dev_private; struct opregion_asle *asle = dev_priv->opregion.asle; - u32 blc_pwm_ctl, blc_pwm_ctl2; - + u32 blc_pwm_ctl; + if (!(bclp & ASLE_BCLP_VALID)) return ASLE_BACKLIGHT_FAIL; - + bclp &= ASLE_BCLP_MSK; if (bclp < 0 || bclp > 255) return ASLE_BACKLIGHT_FAIL; - + blc_pwm_ctl = I915_READ(BLC_PWM_CTL); blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK; - blc_pwm_ctl2 = I915_READ(BLC_PWM_CTL2); - - if (blc_pwm_ctl2 & BLM_COMBINATION_MODE) - pci_write_config_dword(dev->pdev, PCI_LBPC, bclp); - else - I915_WRITE(BLC_PWM_CTL, blc_pwm_ctl | ((bclp * 0x101)-1)); - + I915_WRITE(BLC_PWM_CTL, blc_pwm_ctl | ((bclp * 0x101) -1)); asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID; - + return 0; } static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi) { - /* alsi is the current ALS reading in lux. 0 indicates below sensor - range, 0xffff indicates above sensor range. 1-0xfffe are valid */ return 0; } @@ -178,15 +194,13 @@ static u32 asle_set_pwm_freq(struct drm_ u32 pwm = pfmb & ASLE_PFMB_PWM_MASK; blc_pwm_ctl &= BACKLIGHT_DUTY_CYCLE_MASK; pwm = pwm >> 9; - /* FIXME - what do we do with the PWM? */ + // FIXME - what do we do with the PWM? } return 0; } static u32 asle_set_pfit(struct drm_device *dev, u32 pfit) { - /* Panel fitting is currently controlled by the X code, so this is a - noop until modesetting support works fully */ if (!(pfit & ASLE_PFIT_VALID)) return ASLE_PFIT_FAIL; return 0; @@ -203,7 +217,7 @@ void opregion_asle_intr(struct drm_devic return; asle_req = asle->aslc & ASLE_REQ_MSK; - + if (!asle_req) { DRM_DEBUG("non asle set request??\n"); return; @@ -211,16 +225,16 @@ void opregion_asle_intr(struct drm_devic if (asle_req & ASLE_SET_ALS_ILLUM) asle_stat |= asle_set_als_illum(dev, asle->alsi); - + if (asle_req & ASLE_SET_BACKLIGHT) asle_stat |= asle_set_backlight(dev, asle->bclp); - + if (asle_req & ASLE_SET_PFIT) asle_stat |= asle_set_pfit(dev, asle->pfit); - + if (asle_req & ASLE_SET_PWM_FREQ) asle_stat |= asle_set_pwm_freq(dev, asle->pfmb); - + asle->aslc = asle_stat; } @@ -237,17 +251,18 @@ void opregion_enable_asle(struct drm_dev if (asle) { u32 pipeb_stats = I915_READ(PIPEBSTAT); if (IS_MOBILE(dev)) { - /* Many devices trigger events with a write to the - legacy backlight controller, so we need to ensure - that it's able to generate interrupts */ - I915_WRITE(PIPEBSTAT, pipeb_stats |= + /* Some hardware uses the legacy backlight controller + to signal interrupts, so we need to set up pipe B + to generate an IRQ on writes */ + I915_WRITE(PIPEBSTAT, pipeb_stats |= I915_LEGACY_BLC_EVENT_ENABLE); - i915_enable_irq(dev_priv, I915_ASLE_INTERRUPT | - I915_DISPLAY_PIPE_B_EVENT_INTERRUPT); + dev_priv->irq_enable_reg |= + (I915_ASLE_INTERRUPT + | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT); } else - i915_enable_irq(dev_priv, I915_ASLE_INTERRUPT); - - asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN | + dev_priv->irq_enable_reg |= I915_ASLE_INTERRUPT; + + asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN | ASLE_PFMB_EN; asle->ardy = 1; } @@ -260,7 +275,7 @@ void opregion_enable_asle(struct drm_dev static struct intel_opregion *system_opregion; int intel_opregion_video_event(struct notifier_block *nb, unsigned long val, - void *data) + void *data) { /* The only video events relevant to opregion are 0x80. These indicate either a docking event, lid switch or display switch request. In @@ -268,12 +283,12 @@ int intel_opregion_video_event(struct no We might want to fix the video driver to be opregion-aware in future, but right now we just indicate to the firmware that the request has been handled */ - + struct opregion_acpi *acpi; if (!system_opregion) return NOTIFY_DONE; - + acpi = system_opregion->acpi; acpi->csts = 0; @@ -291,25 +306,25 @@ int intel_opregion_init(struct drm_devic void *base; u32 asls, mboxes; int err = 0; - + pci_read_config_dword(dev->pdev, PCI_ASLS, &asls); DRM_DEBUG("graphic opregion physical addr: 0x%x\n", asls); if (asls == 0) { DRM_DEBUG("ACPI OpRegion not supported!\n"); return -ENOTSUPP; } - + base = ioremap(asls, OPREGION_SZ); if (!base) return -ENOMEM; - + opregion->header = base; if (memcmp(opregion->header->signature, OPREGION_SIGNATURE, 16)) { DRM_DEBUG("opregion signature mismatch\n"); err = -EINVAL; goto err_out; } - + mboxes = opregion->header->mboxes; if (mboxes & MBOX_ACPI) { DRM_DEBUG("Public ACPI methods supported\n"); @@ -320,7 +335,7 @@ int intel_opregion_init(struct drm_devic goto err_out; } opregion->enabled = 1; - + if (mboxes & MBOX_SWSCI) { DRM_DEBUG("SWSCI supported\n"); opregion->swsci = base + OPREGION_SWSCI_OFFSET; @@ -329,7 +344,7 @@ int intel_opregion_init(struct drm_devic DRM_DEBUG("ASLE supported\n"); opregion->asle = base + OPREGION_ASLE_OFFSET; } - + /* Notify BIOS we are ready to handle ACPI video ext notifs. * Right now, all the events are handled by the ACPI video module. * We don't actually need to do anything with them. */ @@ -338,9 +353,9 @@ int intel_opregion_init(struct drm_devic system_opregion = opregion; register_acpi_notifier(&intel_opregion_notifier); - + return 0; - + err_out: iounmap(opregion->header); opregion->header = NULL; @@ -351,21 +366,21 @@ void intel_opregion_free(struct drm_devi { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_opregion *opregion = &dev_priv->opregion; - + if (!opregion->enabled) return; - + opregion->acpi->drdy = 0; - + system_opregion = NULL; unregister_acpi_notifier(&intel_opregion_notifier); - + /* just clear all opregion memory pointers now */ iounmap(opregion->header); opregion->header = NULL; opregion->acpi = NULL; opregion->swsci = NULL; opregion->asle = NULL; - + opregion->enabled = 0; } diff -Napur -X /home/jbarnes/dontdiff gpu/drm/i915/i915_reg.h /tmp/drm-master/drivers/gpu/drm/i915/i915_reg.h --- gpu/drm/i915/i915_reg.h 2008-08-21 14:43:42.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/i915/i915_reg.h 1969-12-31 16:00:00.000000000 -0800 @@ -1,1406 +0,0 @@ -/* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. - * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR - * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -#ifndef _I915_REG_H_ -#define _I915_REG_H_ - -/* MCH MMIO space */ -/** 915-945 and GM965 MCH register controlling DRAM channel access */ -#define DCC 0x200 -#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0) -#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC (1 << 0) -#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0) -#define DCC_ADDRESSING_MODE_MASK (3 << 0) -#define DCC_CHANNEL_XOR_DISABLE (1 << 10) - -/** 965 MCH register controlling DRAM channel configuration */ -#define CHDECMISC 0x111 -#define CHDECMISC_FLEXMEMORY (1 << 1) - -/* - * The Bridge device's PCI config space has information about the - * fb aperture size and the amount of pre-reserved memory. - */ -#define INTEL_GMCH_CTRL 0x52 -#define INTEL_GMCH_ENABLED 0x4 -#define INTEL_GMCH_MEM_MASK 0x1 -#define INTEL_GMCH_MEM_64M 0x1 -#define INTEL_GMCH_MEM_128M 0 - -#define INTEL_855_GMCH_GMS_MASK (0x7 << 4) -#define INTEL_855_GMCH_GMS_DISABLED (0x0 << 4) -#define INTEL_855_GMCH_GMS_STOLEN_1M (0x1 << 4) -#define INTEL_855_GMCH_GMS_STOLEN_4M (0x2 << 4) -#define INTEL_855_GMCH_GMS_STOLEN_8M (0x3 << 4) -#define INTEL_855_GMCH_GMS_STOLEN_16M (0x4 << 4) -#define INTEL_855_GMCH_GMS_STOLEN_32M (0x5 << 4) - -#define INTEL_915G_GMCH_GMS_STOLEN_48M (0x6 << 4) -#define INTEL_915G_GMCH_GMS_STOLEN_64M (0x7 << 4) - -/* PCI config space */ - -#define HPLLCC 0xc0 /* 855 only */ -#define GC_CLOCK_CONTROL_MASK (3 << 0) -#define GC_CLOCK_133_200 (0 << 0) -#define GC_CLOCK_100_200 (1 << 0) -#define GC_CLOCK_100_133 (2 << 0) -#define GC_CLOCK_166_250 (3 << 0) -#define GCFGC 0xf0 /* 915+ only */ -#define GC_LOW_FREQUENCY_ENABLE (1 << 7) -#define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4) -#define GC_DISPLAY_CLOCK_333_MHZ (4 << 4) -#define GC_DISPLAY_CLOCK_MASK (7 << 4) -#define LBB 0xf4 - -/* VGA stuff */ - -#define VGA_ST01_MDA 0x3ba -#define VGA_ST01_CGA 0x3da - -#define VGA_MSR_WRITE 0x3c2 -#define VGA_MSR_READ 0x3cc -#define VGA_MSR_MEM_EN (1<<1) -#define VGA_MSR_CGA_MODE (1<<0) - -#define VGA_SR_INDEX 0x3c4 -#define VGA_SR_DATA 0x3c5 - -#define VGA_AR_INDEX 0x3c0 -#define VGA_AR_VID_EN (1<<5) -#define VGA_AR_DATA_WRITE 0x3c0 -#define VGA_AR_DATA_READ 0x3c1 - -#define VGA_GR_INDEX 0x3ce -#define VGA_GR_DATA 0x3cf -/* GR05 */ -#define VGA_GR_MEM_READ_MODE_SHIFT 3 -#define VGA_GR_MEM_READ_MODE_PLANE 1 -/* GR06 */ -#define VGA_GR_MEM_MODE_MASK 0xc -#define VGA_GR_MEM_MODE_SHIFT 2 -#define VGA_GR_MEM_A0000_AFFFF 0 -#define VGA_GR_MEM_A0000_BFFFF 1 -#define VGA_GR_MEM_B0000_B7FFF 2 -#define VGA_GR_MEM_B0000_BFFFF 3 - -#define VGA_DACMASK 0x3c6 -#define VGA_DACRX 0x3c7 -#define VGA_DACWX 0x3c8 -#define VGA_DACDATA 0x3c9 - -#define VGA_CR_INDEX_MDA 0x3b4 -#define VGA_CR_DATA_MDA 0x3b5 -#define VGA_CR_INDEX_CGA 0x3d4 -#define VGA_CR_DATA_CGA 0x3d5 - -/* - * Memory interface instructions used by the kernel - */ -#define MI_INSTR(opcode, flags) (((opcode) << 23) | (flags)) - -#define MI_NOOP MI_INSTR(0, 0) -#define MI_USER_INTERRUPT MI_INSTR(0x02, 0) -#define MI_WAIT_FOR_EVENT MI_INSTR(0x03, 0) -#define MI_WAIT_FOR_PLANE_B_FLIP (1<<6) -#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2) -#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1) -#define MI_FLUSH MI_INSTR(0x04, 0) -#define MI_READ_FLUSH (1 << 0) -#define MI_EXE_FLUSH (1 << 1) -#define MI_NO_WRITE_FLUSH (1 << 2) -#define MI_SCENE_COUNT (1 << 3) /* just increment scene count */ -#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */ -#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0) -#define MI_REPORT_HEAD MI_INSTR(0x07, 0) -#define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0) -#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) -#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ -#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1) -#define MI_STORE_DWORD_INDEX_SHIFT 2 -#define MI_LOAD_REGISTER_IMM MI_INSTR(0x22, 1) -#define MI_BATCH_BUFFER MI_INSTR(0x30, 1) -#define MI_BATCH_NON_SECURE (1) -#define MI_BATCH_NON_SECURE_I965 (1<<8) -#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) - -/* - * 3D instructions used by the kernel - */ -#define GFX_INSTR(opcode, flags) ((0x3 << 29) | ((opcode) << 24) | (flags)) - -#define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24)) -#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19)) -#define SC_UPDATE_SCISSOR (0x1<<1) -#define SC_ENABLE_MASK (0x1<<0) -#define SC_ENABLE (0x1<<0) -#define GFX_OP_LOAD_INDIRECT ((0x3<<29)|(0x1d<<24)|(0x7<<16)) -#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1)) -#define SCI_YMIN_MASK (0xffff<<16) -#define SCI_XMIN_MASK (0xffff<<0) -#define SCI_YMAX_MASK (0xffff<<16) -#define SCI_XMAX_MASK (0xffff<<0) -#define GFX_OP_SCISSOR_ENABLE ((0x3<<29)|(0x1c<<24)|(0x10<<19)) -#define GFX_OP_SCISSOR_RECT ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1) -#define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0) -#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16)) -#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4) -#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0) -#define GFX_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1) -#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3)) -#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2) -#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4) -#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6) -#define XY_MONO_SRC_COPY_IMM_BLT ((2<<29)|(0x71<<22)|5) -#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21) -#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20) -#define BLT_DEPTH_8 (0<<24) -#define BLT_DEPTH_16_565 (1<<24) -#define BLT_DEPTH_16_1555 (2<<24) -#define BLT_DEPTH_32 (3<<24) -#define BLT_ROP_GXCOPY (0xcc<<16) -#define XY_SRC_COPY_BLT_SRC_TILED (1<<15) /* 965+ only */ -#define XY_SRC_COPY_BLT_DST_TILED (1<<11) /* 965+ only */ -#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2) -#define ASYNC_FLIP (1<<22) -#define DISPLAY_PLANE_A (0<<20) -#define DISPLAY_PLANE_B (1<<20) - -/* - * Instruction and interrupt control regs - */ - -#define PRB0_TAIL 0x02030 -#define PRB0_HEAD 0x02034 -#define PRB0_START 0x02038 -#define PRB0_CTL 0x0203c -#define TAIL_ADDR 0x001FFFF8 -#define HEAD_WRAP_COUNT 0xFFE00000 -#define HEAD_WRAP_ONE 0x00200000 -#define HEAD_ADDR 0x001FFFFC -#define RING_NR_PAGES 0x001FF000 -#define RING_REPORT_MASK 0x00000006 -#define RING_REPORT_64K 0x00000002 -#define RING_REPORT_128K 0x00000004 -#define RING_NO_REPORT 0x00000000 -#define RING_VALID_MASK 0x00000001 -#define RING_VALID 0x00000001 -#define RING_INVALID 0x00000000 -#define PRB1_TAIL 0x02040 /* 915+ only */ -#define PRB1_HEAD 0x02044 /* 915+ only */ -#define PRB1_START 0x02048 /* 915+ only */ -#define PRB1_CTL 0x0204c /* 915+ only */ -#define ACTHD_I965 0x02074 -#define HWS_PGA 0x02080 -#define HWS_ADDRESS_MASK 0xfffff000 -#define HWS_START_ADDRESS_SHIFT 4 -#define IPEIR 0x02088 -#define NOPID 0x02094 -#define HWSTAM 0x02098 -#define SCPD0 0x0209c /* 915+ only */ -#define IER 0x020a0 -#define IIR 0x020a4 -#define IMR 0x020a8 -#define ISR 0x020ac -#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18) -#define I915_DISPLAY_PORT_INTERRUPT (1<<17) -#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15) -#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14) -#define I915_HWB_OOM_INTERRUPT (1<<13) -#define I915_SYNC_STATUS_INTERRUPT (1<<12) -#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11) -#define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1<<10) -#define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT (1<<9) -#define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1<<8) -#define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT (1<<7) -#define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT (1<<6) -#define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT (1<<5) -#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1<<4) -#define I915_DEBUG_INTERRUPT (1<<2) -#define I915_USER_INTERRUPT (1<<1) -#define I915_ASLE_INTERRUPT (1<<0) -#define EIR 0x020b0 -#define EMR 0x020b4 -#define ESR 0x020b8 -#define INSTPM 0x020c0 -#define ACTHD 0x020c8 -#define FW_BLC 0x020d8 -#define FW_BLC_SELF 0x020e0 /* 915+ only */ -#define MI_ARB_STATE 0x020e4 /* 915+ only */ -#define CACHE_MODE_0 0x02120 /* 915+ only */ -#define CM0_MASK_SHIFT 16 -#define CM0_IZ_OPT_DISABLE (1<<6) -#define CM0_ZR_OPT_DISABLE (1<<5) -#define CM0_DEPTH_EVICT_DISABLE (1<<4) -#define CM0_COLOR_EVICT_DISABLE (1<<3) -#define CM0_DEPTH_WRITE_DISABLE (1<<1) -#define CM0_RC_OP_FLUSH_DISABLE (1<<0) -#define GFX_FLSH_CNTL 0x02170 /* 915+ only */ - -/* - * Framebuffer compression (915+ only) - */ - -#define FBC_CFB_BASE 0x03200 /* 4k page aligned */ -#define FBC_LL_BASE 0x03204 /* 4k page aligned */ -#define FBC_CONTROL 0x03208 -#define FBC_CTL_EN (1<<31) -#define FBC_CTL_PERIODIC (1<<30) -#define FBC_CTL_INTERVAL_SHIFT (16) -#define FBC_CTL_UNCOMPRESSIBLE (1<<14) -#define FBC_CTL_STRIDE_SHIFT (5) -#define FBC_CTL_FENCENO (1<<0) -#define FBC_COMMAND 0x0320c -#define FBC_CMD_COMPRESS (1<<0) -#define FBC_STATUS 0x03210 -#define FBC_STAT_COMPRESSING (1<<31) -#define FBC_STAT_COMPRESSED (1<<30) -#define FBC_STAT_MODIFIED (1<<29) -#define FBC_STAT_CURRENT_LINE (1<<0) -#define FBC_CONTROL2 0x03214 -#define FBC_CTL_FENCE_DBL (0<<4) -#define FBC_CTL_IDLE_IMM (0<<2) -#define FBC_CTL_IDLE_FULL (1<<2) -#define FBC_CTL_IDLE_LINE (2<<2) -#define FBC_CTL_IDLE_DEBUG (3<<2) -#define FBC_CTL_CPU_FENCE (1<<1) -#define FBC_CTL_PLANEA (0<<0) -#define FBC_CTL_PLANEB (1<<0) -#define FBC_FENCE_OFF 0x0321b - -#define FBC_LL_SIZE (1536) - -/* - * GPIO regs - */ -#define GPIOA 0x5010 -#define GPIOB 0x5014 -#define GPIOC 0x5018 -#define GPIOD 0x501c -#define GPIOE 0x5020 -#define GPIOF 0x5024 -#define GPIOG 0x5028 -#define GPIOH 0x502c -# define GPIO_CLOCK_DIR_MASK (1 << 0) -# define GPIO_CLOCK_DIR_IN (0 << 1) -# define GPIO_CLOCK_DIR_OUT (1 << 1) -# define GPIO_CLOCK_VAL_MASK (1 << 2) -# define GPIO_CLOCK_VAL_OUT (1 << 3) -# define GPIO_CLOCK_VAL_IN (1 << 4) -# define GPIO_CLOCK_PULLUP_DISABLE (1 << 5) -# define GPIO_DATA_DIR_MASK (1 << 8) -# define GPIO_DATA_DIR_IN (0 << 9) -# define GPIO_DATA_DIR_OUT (1 << 9) -# define GPIO_DATA_VAL_MASK (1 << 10) -# define GPIO_DATA_VAL_OUT (1 << 11) -# define GPIO_DATA_VAL_IN (1 << 12) -# define GPIO_DATA_PULLUP_DISABLE (1 << 13) - -/* - * Clock control & power management - */ - -#define VGA0 0x6000 -#define VGA1 0x6004 -#define VGA_PD 0x6010 -#define VGA0_PD_P2_DIV_4 (1 << 7) -#define VGA0_PD_P1_DIV_2 (1 << 5) -#define VGA0_PD_P1_SHIFT 0 -#define VGA0_PD_P1_MASK (0x1f << 0) -#define VGA1_PD_P2_DIV_4 (1 << 15) -#define VGA1_PD_P1_DIV_2 (1 << 13) -#define VGA1_PD_P1_SHIFT 8 -#define VGA1_PD_P1_MASK (0x1f << 8) -#define DPLL_A 0x06014 -#define DPLL_B 0x06018 -#define DPLL_VCO_ENABLE (1 << 31) -#define DPLL_DVO_HIGH_SPEED (1 << 30) -#define DPLL_SYNCLOCK_ENABLE (1 << 29) -#define DPLL_VGA_MODE_DIS (1 << 28) -#define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */ -#define DPLLB_MODE_LVDS (2 << 26) /* i915 */ -#define DPLL_MODE_MASK (3 << 26) -#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */ -#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */ -#define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */ -#define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */ -#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */ -#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */ - -#define I915_FIFO_UNDERRUN_STATUS (1UL<<31) -#define I915_CRC_ERROR_ENABLE (1UL<<29) -#define I915_CRC_DONE_ENABLE (1UL<<28) -#define I915_GMBUS_EVENT_ENABLE (1UL<<27) -#define I915_VSYNC_INTERRUPT_ENABLE (1UL<<25) -#define I915_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24) -#define I915_DPST_EVENT_ENABLE (1UL<<23) -#define I915_LEGACY_BLC_EVENT_ENABLE (1UL<<22) -#define I915_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21) -#define I915_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20) -#define I915_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */ -#define I915_VBLANK_INTERRUPT_ENABLE (1UL<<17) -#define I915_OVERLAY_UPDATED_ENABLE (1UL<<16) -#define I915_CRC_ERROR_INTERRUPT_STATUS (1UL<<13) -#define I915_CRC_DONE_INTERRUPT_STATUS (1UL<<12) -#define I915_GMBUS_INTERRUPT_STATUS (1UL<<11) -#define I915_VSYNC_INTERRUPT_STATUS (1UL<<9) -#define I915_DISPLAY_LINE_COMPARE_STATUS (1UL<<8) -#define I915_DPST_EVENT_STATUS (1UL<<7) -#define I915_LEGACY_BLC_EVENT_STATUS (1UL<<6) -#define I915_ODD_FIELD_INTERRUPT_STATUS (1UL<<5) -#define I915_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4) -#define I915_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */ -#define I915_VBLANK_INTERRUPT_STATUS (1UL<<1) -#define I915_OVERLAY_UPDATED_STATUS (1UL<<0) - -#define SRX_INDEX 0x3c4 -#define SRX_DATA 0x3c5 -#define SR01 1 -#define SR01_SCREEN_OFF (1<<5) - -#define PPCR 0x61204 -#define PPCR_ON (1<<0) - -#define DVOB 0x61140 -#define DVOB_ON (1<<31) -#define DVOC 0x61160 -#define DVOC_ON (1<<31) -#define LVDS 0x61180 -#define LVDS_ON (1<<31) - -#define ADPA 0x61100 -#define ADPA_DPMS_MASK (~(3<<10)) -#define ADPA_DPMS_ON (0<<10) -#define ADPA_DPMS_SUSPEND (1<<10) -#define ADPA_DPMS_STANDBY (2<<10) -#define ADPA_DPMS_OFF (3<<10) - -#define RING_TAIL 0x00 -#define TAIL_ADDR 0x001FFFF8 -#define RING_HEAD 0x04 -#define HEAD_WRAP_COUNT 0xFFE00000 -#define HEAD_WRAP_ONE 0x00200000 -#define HEAD_ADDR 0x001FFFFC -#define RING_START 0x08 -#define START_ADDR 0xFFFFF000 -#define RING_LEN 0x0C -#define RING_NR_PAGES 0x001FF000 -#define RING_REPORT_MASK 0x00000006 -#define RING_REPORT_64K 0x00000002 -#define RING_REPORT_128K 0x00000004 -#define RING_NO_REPORT 0x00000000 -#define RING_VALID_MASK 0x00000001 -#define RING_VALID 0x00000001 -#define RING_INVALID 0x00000000 - -/* Scratch pad debug 0 reg: - */ -#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000 -/* - * The i830 generation, in LVDS mode, defines P1 as the bit number set within - * this field (only one bit may be set). - */ -#define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000 -#define DPLL_FPA01_P1_POST_DIV_SHIFT 16 -/* i830, required in DVO non-gang */ -#define PLL_P2_DIVIDE_BY_4 (1 << 23) -#define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */ -#define PLL_REF_INPUT_DREFCLK (0 << 13) -#define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */ -#define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO TVCLKIN */ -#define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13) -#define PLL_REF_INPUT_MASK (3 << 13) -#define PLL_LOAD_PULSE_PHASE_SHIFT 9 -/* - * Parallel to Serial Load Pulse phase selection. - * Selects the phase for the 10X DPLL clock for the PCIe - * digital display port. The range is 4 to 13; 10 or more - * is just a flip delay. The default is 6 - */ -#define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT) -#define DISPLAY_RATE_SELECT_FPA1 (1 << 8) -/* - * SDVO multiplier for 945G/GM. Not used on 965. - */ -#define SDVO_MULTIPLIER_MASK 0x000000ff -#define SDVO_MULTIPLIER_SHIFT_HIRES 4 -#define SDVO_MULTIPLIER_SHIFT_VGA 0 -#define DPLL_A_MD 0x0601c /* 965+ only */ -/* - * UDI pixel divider, controlling how many pixels are stuffed into a packet. - * - * Value is pixels minus 1. Must be set to 1 pixel for SDVO. - */ -#define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000 -#define DPLL_MD_UDI_DIVIDER_SHIFT 24 -/* UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */ -#define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000 -#define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16 -/* - * SDVO/UDI pixel multiplier. - * - * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus - * clock rate is 10 times the DPLL clock. At low resolution/refresh rate - * modes, the bus rate would be below the limits, so SDVO allows for stuffing - * dummy bytes in the datastream at an increased clock rate, with both sides of - * the link knowing how many bytes are fill. - * - * So, for a mode with a dotclock of 65Mhz, we would want to double the clock - * rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be - * set to 130Mhz, and the SDVO multiplier set to 2x in this register and - * through an SDVO command. - * - * This register field has values of multiplication factor minus 1, with - * a maximum multiplier of 5 for SDVO. - */ -#define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00 -#define DPLL_MD_UDI_MULTIPLIER_SHIFT 8 -/* - * SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK. - * This best be set to the default value (3) or the CRT won't work. No, - * I don't entirely understand what this does... - */ -#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f -#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0 -#define DPLL_B_MD 0x06020 /* 965+ only */ -#define FPA0 0x06040 -#define FPA1 0x06044 -#define FPB0 0x06048 -#define FPB1 0x0604c -#define FP_N_DIV_MASK 0x003f0000 -#define FP_N_DIV_SHIFT 16 -#define FP_M1_DIV_MASK 0x00003f00 -#define FP_M1_DIV_SHIFT 8 -#define FP_M2_DIV_MASK 0x0000003f -#define FP_M2_DIV_SHIFT 0 -#define DPLL_TEST 0x606c -#define DPLLB_TEST_SDVO_DIV_1 (0 << 22) -#define DPLLB_TEST_SDVO_DIV_2 (1 << 22) -#define DPLLB_TEST_SDVO_DIV_4 (2 << 22) -#define DPLLB_TEST_SDVO_DIV_MASK (3 << 22) -#define DPLLB_TEST_N_BYPASS (1 << 19) -#define DPLLB_TEST_M_BYPASS (1 << 18) -#define DPLLB_INPUT_BUFFER_ENABLE (1 << 16) -#define DPLLA_TEST_N_BYPASS (1 << 3) -#define DPLLA_TEST_M_BYPASS (1 << 2) -#define DPLLA_INPUT_BUFFER_ENABLE (1 << 0) -#define D_STATE 0x6104 -#define CG_2D_DIS 0x6200 -#define CG_3D_DIS 0x6204 - -/* - * Palette regs - */ - -#define PALETTE_A 0x0a000 -#define PALETTE_B 0x0a800 - -/* - * Overlay regs - */ - -#define OVADD 0x30000 -#define DOVSTA 0x30008 -#define OC_BUF (0x3<<20) -#define OGAMC5 0x30010 -#define OGAMC4 0x30014 -#define OGAMC3 0x30018 -#define OGAMC2 0x3001c -#define OGAMC1 0x30020 -#define OGAMC0 0x30024 - -/* - * Display engine regs - */ - -/* Pipe A timing regs */ -#define HTOTAL_A 0x60000 -#define HBLANK_A 0x60004 -#define HSYNC_A 0x60008 -#define VTOTAL_A 0x6000c -#define VBLANK_A 0x60010 -#define VSYNC_A 0x60014 -#define PIPEASRC 0x6001c -#define BCLRPAT_A 0x60020 - -/* Pipe B timing regs */ -#define HTOTAL_B 0x61000 -#define HBLANK_B 0x61004 -#define HSYNC_B 0x61008 -#define VTOTAL_B 0x6100c -#define VBLANK_B 0x61010 -#define VSYNC_B 0x61014 -#define PIPEBSRC 0x6101c -#define BCLRPAT_B 0x61020 - -/* VGA port control */ -#define ADPA 0x61100 -#define ADPA_DAC_ENABLE (1<<31) -#define ADPA_DAC_DISABLE 0 -#define ADPA_PIPE_SELECT_MASK (1<<30) -#define ADPA_PIPE_A_SELECT 0 -#define ADPA_PIPE_B_SELECT (1<<30) -#define ADPA_USE_VGA_HVPOLARITY (1<<15) -#define ADPA_SETS_HVPOLARITY 0 -#define ADPA_VSYNC_CNTL_DISABLE (1<<11) -#define ADPA_VSYNC_CNTL_ENABLE 0 -#define ADPA_HSYNC_CNTL_DISABLE (1<<10) -#define ADPA_HSYNC_CNTL_ENABLE 0 -#define ADPA_VSYNC_ACTIVE_HIGH (1<<4) -#define ADPA_VSYNC_ACTIVE_LOW 0 -#define ADPA_HSYNC_ACTIVE_HIGH (1<<3) -#define ADPA_HSYNC_ACTIVE_LOW 0 -#define ADPA_DPMS_MASK (~(3<<10)) -#define ADPA_DPMS_ON (0<<10) -#define ADPA_DPMS_SUSPEND (1<<10) -#define ADPA_DPMS_STANDBY (2<<10) -#define ADPA_DPMS_OFF (3<<10) - -/* Hotplug control (945+ only) */ -#define PORT_HOTPLUG_EN 0x61110 -#define SDVOB_HOTPLUG_INT_EN (1 << 26) -#define SDVOC_HOTPLUG_INT_EN (1 << 25) -#define TV_HOTPLUG_INT_EN (1 << 18) -#define CRT_HOTPLUG_INT_EN (1 << 9) -#define CRT_HOTPLUG_FORCE_DETECT (1 << 3) - -#define PORT_HOTPLUG_STAT 0x61114 -#define CRT_HOTPLUG_INT_STATUS (1 << 11) -#define TV_HOTPLUG_INT_STATUS (1 << 10) -#define CRT_HOTPLUG_MONITOR_MASK (3 << 8) -#define CRT_HOTPLUG_MONITOR_COLOR (3 << 8) -#define CRT_HOTPLUG_MONITOR_MONO (2 << 8) -#define CRT_HOTPLUG_MONITOR_NONE (0 << 8) -#define SDVOC_HOTPLUG_INT_STATUS (1 << 7) -#define SDVOB_HOTPLUG_INT_STATUS (1 << 6) - -/* SDVO port control */ -#define SDVOB 0x61140 -#define SDVOC 0x61160 -#define SDVO_ENABLE (1 << 31) -#define SDVO_PIPE_B_SELECT (1 << 30) -#define SDVO_STALL_SELECT (1 << 29) -#define SDVO_INTERRUPT_ENABLE (1 << 26) -/** - * 915G/GM SDVO pixel multiplier. - * - * Programmed value is multiplier - 1, up to 5x. - * - * \sa DPLL_MD_UDI_MULTIPLIER_MASK - */ -#define SDVO_PORT_MULTIPLY_MASK (7 << 23) -#define SDVO_PORT_MULTIPLY_SHIFT 23 -#define SDVO_PHASE_SELECT_MASK (15 << 19) -#define SDVO_PHASE_SELECT_DEFAULT (6 << 19) -#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18) -#define SDVOC_GANG_MODE (1 << 16) -#define SDVO_BORDER_ENABLE (1 << 7) -#define SDVOB_PCIE_CONCURRENCY (1 << 3) -#define SDVO_DETECTED (1 << 2) -/* Bits to be preserved when writing */ -#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14) | (1 << 26)) -#define SDVOC_PRESERVE_MASK ((1 << 17) | (1 << 26)) - -/* DVO port control */ -#define DVOA 0x61120 -#define DVOB 0x61140 -#define DVOC 0x61160 -#define DVO_ENABLE (1 << 31) -#define DVO_PIPE_B_SELECT (1 << 30) -#define DVO_PIPE_STALL_UNUSED (0 << 28) -#define DVO_PIPE_STALL (1 << 28) -#define DVO_PIPE_STALL_TV (2 << 28) -#define DVO_PIPE_STALL_MASK (3 << 28) -#define DVO_USE_VGA_SYNC (1 << 15) -#define DVO_DATA_ORDER_I740 (0 << 14) -#define DVO_DATA_ORDER_FP (1 << 14) -#define DVO_VSYNC_DISABLE (1 << 11) -#define DVO_HSYNC_DISABLE (1 << 10) -#define DVO_VSYNC_TRISTATE (1 << 9) -#define DVO_HSYNC_TRISTATE (1 << 8) -#define DVO_BORDER_ENABLE (1 << 7) -#define DVO_DATA_ORDER_GBRG (1 << 6) -#define DVO_DATA_ORDER_RGGB (0 << 6) -#define DVO_DATA_ORDER_GBRG_ERRATA (0 << 6) -#define DVO_DATA_ORDER_RGGB_ERRATA (1 << 6) -#define DVO_VSYNC_ACTIVE_HIGH (1 << 4) -#define DVO_HSYNC_ACTIVE_HIGH (1 << 3) -#define DVO_BLANK_ACTIVE_HIGH (1 << 2) -#define DVO_OUTPUT_CSTATE_PIXELS (1 << 1) /* SDG only */ -#define DVO_OUTPUT_SOURCE_SIZE_PIXELS (1 << 0) /* SDG only */ -#define DVO_PRESERVE_MASK (0x7<<24) -#define DVOA_SRCDIM 0x61124 -#define DVOB_SRCDIM 0x61144 -#define DVOC_SRCDIM 0x61164 -#define DVO_SRCDIM_HORIZONTAL_SHIFT 12 -#define DVO_SRCDIM_VERTICAL_SHIFT 0 - -/* LVDS port control */ -#define LVDS 0x61180 -/* - * Enables the LVDS port. This bit must be set before DPLLs are enabled, as - * the DPLL semantics change when the LVDS is assigned to that pipe. - */ -#define LVDS_PORT_EN (1 << 31) -/* Selects pipe B for LVDS data. Must be set on pre-965. */ -#define LVDS_PIPEB_SELECT (1 << 30) -/* - * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per - * pixel. - */ -#define LVDS_A0A2_CLKA_POWER_MASK (3 << 8) -#define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8) -#define LVDS_A0A2_CLKA_POWER_UP (3 << 8) -/* - * Controls the A3 data pair, which contains the additional LSBs for 24 bit - * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be - * on. - */ -#define LVDS_A3_POWER_MASK (3 << 6) -#define LVDS_A3_POWER_DOWN (0 << 6) -#define LVDS_A3_POWER_UP (3 << 6) -/* - * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP - * is set. - */ -#define LVDS_CLKB_POWER_MASK (3 << 4) -#define LVDS_CLKB_POWER_DOWN (0 << 4) -#define LVDS_CLKB_POWER_UP (3 << 4) -/* - * Controls the B0-B3 data pairs. This must be set to match the DPLL p2 - * setting for whether we are in dual-channel mode. The B3 pair will - * additionally only be powered up when LVDS_A3_POWER_UP is set. - */ -#define LVDS_B0B3_POWER_MASK (3 << 2) -#define LVDS_B0B3_POWER_DOWN (0 << 2) -#define LVDS_B0B3_POWER_UP (3 << 2) - -/* Panel power sequencing */ -#define PP_STATUS 0x61200 -#define PP_ON (1 << 31) -/* - * Indicates that all dependencies of the panel are on: - * - * - PLL enabled - * - pipe enabled - * - LVDS/DVOB/DVOC on - */ -#define PP_READY (1 << 30) -#define PP_SEQUENCE_NONE (0 << 28) -#define PP_SEQUENCE_ON (1 << 28) -#define PP_SEQUENCE_OFF (2 << 28) -#define PP_SEQUENCE_MASK 0x30000000 -#define PP_CONTROL 0x61204 -#define POWER_TARGET_ON (1 << 0) -#define PP_ON_DELAYS 0x61208 -#define PP_OFF_DELAYS 0x6120c -#define PP_DIVISOR 0x61210 - -/* Panel fitting */ -#define PFIT_CONTROL 0x61230 -#define PFIT_ENABLE (1 << 31) -#define PFIT_PIPE_MASK (3 << 29) -#define PFIT_PIPE_SHIFT 29 -#define VERT_INTERP_DISABLE (0 << 10) -#define VERT_INTERP_BILINEAR (1 << 10) -#define VERT_INTERP_MASK (3 << 10) -#define VERT_AUTO_SCALE (1 << 9) -#define HORIZ_INTERP_DISABLE (0 << 6) -#define HORIZ_INTERP_BILINEAR (1 << 6) -#define HORIZ_INTERP_MASK (3 << 6) -#define HORIZ_AUTO_SCALE (1 << 5) -#define PANEL_8TO6_DITHER_ENABLE (1 << 3) -#define PFIT_PGM_RATIOS 0x61234 -#define PFIT_VERT_SCALE_MASK 0xfff00000 -#define PFIT_HORIZ_SCALE_MASK 0x0000fff0 -#define PFIT_AUTO_RATIOS 0x61238 - -/* Backlight control */ -#define BLC_PWM_CTL 0x61254 -#define BACKLIGHT_MODULATION_FREQ_SHIFT (17) -#define BLC_PWM_CTL2 0x61250 /* 965+ only */ -#define BLM_COMBINATION_MODE (1 << 30) -/* - * This is the most significant 15 bits of the number of backlight cycles in a - * complete cycle of the modulated backlight control. - * - * The actual value is this field multiplied by two. - */ -#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17) -#define BLM_LEGACY_MODE (1 << 16) -/* - * This is the number of cycles out of the backlight modulation cycle for which - * the backlight is on. - * - * This field must be no greater than the number of cycles in the complete - * backlight modulation cycle. - */ -#define BACKLIGHT_DUTY_CYCLE_SHIFT (0) -#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff) - -/* TV port control */ -#define TV_CTL 0x68000 -/** Enables the TV encoder */ -# define TV_ENC_ENABLE (1 << 31) -/** Sources the TV encoder input from pipe B instead of A. */ -# define TV_ENC_PIPEB_SELECT (1 << 30) -/** Outputs composite video (DAC A only) */ -# define TV_ENC_OUTPUT_COMPOSITE (0 << 28) -/** Outputs SVideo video (DAC B/C) */ -# define TV_ENC_OUTPUT_SVIDEO (1 << 28) -/** Outputs Component video (DAC A/B/C) */ -# define TV_ENC_OUTPUT_COMPONENT (2 << 28) -/** Outputs Composite and SVideo (DAC A/B/C) */ -# define TV_ENC_OUTPUT_SVIDEO_COMPOSITE (3 << 28) -# define TV_TRILEVEL_SYNC (1 << 21) -/** Enables slow sync generation (945GM only) */ -# define TV_SLOW_SYNC (1 << 20) -/** Selects 4x oversampling for 480i and 576p */ -# define TV_OVERSAMPLE_4X (0 << 18) -/** Selects 2x oversampling for 720p and 1080i */ -# define TV_OVERSAMPLE_2X (1 << 18) -/** Selects no oversampling for 1080p */ -# define TV_OVERSAMPLE_NONE (2 << 18) -/** Selects 8x oversampling */ -# define TV_OVERSAMPLE_8X (3 << 18) -/** Selects progressive mode rather than interlaced */ -# define TV_PROGRESSIVE (1 << 17) -/** Sets the colorburst to PAL mode. Required for non-M PAL modes. */ -# define TV_PAL_BURST (1 << 16) -/** Field for setting delay of Y compared to C */ -# define TV_YC_SKEW_MASK (7 << 12) -/** Enables a fix for 480p/576p standard definition modes on the 915GM only */ -# define TV_ENC_SDP_FIX (1 << 11) -/** - * Enables a fix for the 915GM only. - * - * Not sure what it does. - */ -# define TV_ENC_C0_FIX (1 << 10) -/** Bits that must be preserved by software */ -# define TV_CTL_SAVE ((3 << 8) | (3 << 6)) -# define TV_FUSE_STATE_MASK (3 << 4) -/** Read-only state that reports all features enabled */ -# define TV_FUSE_STATE_ENABLED (0 << 4) -/** Read-only state that reports that Macrovision is disabled in hardware*/ -# define TV_FUSE_STATE_NO_MACROVISION (1 << 4) -/** Read-only state that reports that TV-out is disabled in hardware. */ -# define TV_FUSE_STATE_DISABLED (2 << 4) -/** Normal operation */ -# define TV_TEST_MODE_NORMAL (0 << 0) -/** Encoder test pattern 1 - combo pattern */ -# define TV_TEST_MODE_PATTERN_1 (1 << 0) -/** Encoder test pattern 2 - full screen vertical 75% color bars */ -# define TV_TEST_MODE_PATTERN_2 (2 << 0) -/** Encoder test pattern 3 - full screen horizontal 75% color bars */ -# define TV_TEST_MODE_PATTERN_3 (3 << 0) -/** Encoder test pattern 4 - random noise */ -# define TV_TEST_MODE_PATTERN_4 (4 << 0) -/** Encoder test pattern 5 - linear color ramps */ -# define TV_TEST_MODE_PATTERN_5 (5 << 0) -/** - * This test mode forces the DACs to 50% of full output. - * - * This is used for load detection in combination with TVDAC_SENSE_MASK - */ -# define TV_TEST_MODE_MONITOR_DETECT (7 << 0) -# define TV_TEST_MODE_MASK (7 << 0) - -#define TV_DAC 0x68004 -/** - * Reports that DAC state change logic has reported change (RO). - * - * This gets cleared when TV_DAC_STATE_EN is cleared -*/ -# define TVDAC_STATE_CHG (1 << 31) -# define TVDAC_SENSE_MASK (7 << 28) -/** Reports that DAC A voltage is above the detect threshold */ -# define TVDAC_A_SENSE (1 << 30) -/** Reports that DAC B voltage is above the detect threshold */ -# define TVDAC_B_SENSE (1 << 29) -/** Reports that DAC C voltage is above the detect threshold */ -# define TVDAC_C_SENSE (1 << 28) -/** - * Enables DAC state detection logic, for load-based TV detection. - * - * The PLL of the chosen pipe (in TV_CTL) must be running, and the encoder set - * to off, for load detection to work. - */ -# define TVDAC_STATE_CHG_EN (1 << 27) -/** Sets the DAC A sense value to high */ -# define TVDAC_A_SENSE_CTL (1 << 26) -/** Sets the DAC B sense value to high */ -# define TVDAC_B_SENSE_CTL (1 << 25) -/** Sets the DAC C sense value to high */ -# define TVDAC_C_SENSE_CTL (1 << 24) -/** Overrides the ENC_ENABLE and DAC voltage levels */ -# define DAC_CTL_OVERRIDE (1 << 7) -/** Sets the slew rate. Must be preserved in software */ -# define ENC_TVDAC_SLEW_FAST (1 << 6) -# define DAC_A_1_3_V (0 << 4) -# define DAC_A_1_1_V (1 << 4) -# define DAC_A_0_7_V (2 << 4) -# define DAC_A_OFF (3 << 4) -# define DAC_B_1_3_V (0 << 2) -# define DAC_B_1_1_V (1 << 2) -# define DAC_B_0_7_V (2 << 2) -# define DAC_B_OFF (3 << 2) -# define DAC_C_1_3_V (0 << 0) -# define DAC_C_1_1_V (1 << 0) -# define DAC_C_0_7_V (2 << 0) -# define DAC_C_OFF (3 << 0) - -/** - * CSC coefficients are stored in a floating point format with 9 bits of - * mantissa and 2 or 3 bits of exponent. The exponent is represented as 2**-n, - * where 2-bit exponents are unsigned n, and 3-bit exponents are signed n with - * -1 (0x3) being the only legal negative value. - */ -#define TV_CSC_Y 0x68010 -# define TV_RY_MASK 0x07ff0000 -# define TV_RY_SHIFT 16 -# define TV_GY_MASK 0x00000fff -# define TV_GY_SHIFT 0 - -#define TV_CSC_Y2 0x68014 -# define TV_BY_MASK 0x07ff0000 -# define TV_BY_SHIFT 16 -/** - * Y attenuation for component video. - * - * Stored in 1.9 fixed point. - */ -# define TV_AY_MASK 0x000003ff -# define TV_AY_SHIFT 0 - -#define TV_CSC_U 0x68018 -# define TV_RU_MASK 0x07ff0000 -# define TV_RU_SHIFT 16 -# define TV_GU_MASK 0x000007ff -# define TV_GU_SHIFT 0 - -#define TV_CSC_U2 0x6801c -# define TV_BU_MASK 0x07ff0000 -# define TV_BU_SHIFT 16 -/** - * U attenuation for component video. - * - * Stored in 1.9 fixed point. - */ -# define TV_AU_MASK 0x000003ff -# define TV_AU_SHIFT 0 - -#define TV_CSC_V 0x68020 -# define TV_RV_MASK 0x0fff0000 -# define TV_RV_SHIFT 16 -# define TV_GV_MASK 0x000007ff -# define TV_GV_SHIFT 0 - -#define TV_CSC_V2 0x68024 -# define TV_BV_MASK 0x07ff0000 -# define TV_BV_SHIFT 16 -/** - * V attenuation for component video. - * - * Stored in 1.9 fixed point. - */ -# define TV_AV_MASK 0x000007ff -# define TV_AV_SHIFT 0 - -#define TV_CLR_KNOBS 0x68028 -/** 2s-complement brightness adjustment */ -# define TV_BRIGHTNESS_MASK 0xff000000 -# define TV_BRIGHTNESS_SHIFT 24 -/** Contrast adjustment, as a 2.6 unsigned floating point number */ -# define TV_CONTRAST_MASK 0x00ff0000 -# define TV_CONTRAST_SHIFT 16 -/** Saturation adjustment, as a 2.6 unsigned floating point number */ -# define TV_SATURATION_MASK 0x0000ff00 -# define TV_SATURATION_SHIFT 8 -/** Hue adjustment, as an integer phase angle in degrees */ -# define TV_HUE_MASK 0x000000ff -# define TV_HUE_SHIFT 0 - -#define TV_CLR_LEVEL 0x6802c -/** Controls the DAC level for black */ -# define TV_BLACK_LEVEL_MASK 0x01ff0000 -# define TV_BLACK_LEVEL_SHIFT 16 -/** Controls the DAC level for blanking */ -# define TV_BLANK_LEVEL_MASK 0x000001ff -# define TV_BLANK_LEVEL_SHIFT 0 - -#define TV_H_CTL_1 0x68030 -/** Number of pixels in the hsync. */ -# define TV_HSYNC_END_MASK 0x1fff0000 -# define TV_HSYNC_END_SHIFT 16 -/** Total number of pixels minus one in the line (display and blanking). */ -# define TV_HTOTAL_MASK 0x00001fff -# define TV_HTOTAL_SHIFT 0 - -#define TV_H_CTL_2 0x68034 -/** Enables the colorburst (needed for non-component color) */ -# define TV_BURST_ENA (1 << 31) -/** Offset of the colorburst from the start of hsync, in pixels minus one. */ -# define TV_HBURST_START_SHIFT 16 -# define TV_HBURST_START_MASK 0x1fff0000 -/** Length of the colorburst */ -# define TV_HBURST_LEN_SHIFT 0 -# define TV_HBURST_LEN_MASK 0x0001fff - -#define TV_H_CTL_3 0x68038 -/** End of hblank, measured in pixels minus one from start of hsync */ -# define TV_HBLANK_END_SHIFT 16 -# define TV_HBLANK_END_MASK 0x1fff0000 -/** Start of hblank, measured in pixels minus one from start of hsync */ -# define TV_HBLANK_START_SHIFT 0 -# define TV_HBLANK_START_MASK 0x0001fff - -#define TV_V_CTL_1 0x6803c -/** XXX */ -# define TV_NBR_END_SHIFT 16 -# define TV_NBR_END_MASK 0x07ff0000 -/** XXX */ -# define TV_VI_END_F1_SHIFT 8 -# define TV_VI_END_F1_MASK 0x00003f00 -/** XXX */ -# define TV_VI_END_F2_SHIFT 0 -# define TV_VI_END_F2_MASK 0x0000003f - -#define TV_V_CTL_2 0x68040 -/** Length of vsync, in half lines */ -# define TV_VSYNC_LEN_MASK 0x07ff0000 -# define TV_VSYNC_LEN_SHIFT 16 -/** Offset of the start of vsync in field 1, measured in one less than the - * number of half lines. - */ -# define TV_VSYNC_START_F1_MASK 0x00007f00 -# define TV_VSYNC_START_F1_SHIFT 8 -/** - * Offset of the start of vsync in field 2, measured in one less than the - * number of half lines. - */ -# define TV_VSYNC_START_F2_MASK 0x0000007f -# define TV_VSYNC_START_F2_SHIFT 0 - -#define TV_V_CTL_3 0x68044 -/** Enables generation of the equalization signal */ -# define TV_EQUAL_ENA (1 << 31) -/** Length of vsync, in half lines */ -# define TV_VEQ_LEN_MASK 0x007f0000 -# define TV_VEQ_LEN_SHIFT 16 -/** Offset of the start of equalization in field 1, measured in one less than - * the number of half lines. - */ -# define TV_VEQ_START_F1_MASK 0x0007f00 -# define TV_VEQ_START_F1_SHIFT 8 -/** - * Offset of the start of equalization in field 2, measured in one less than - * the number of half lines. - */ -# define TV_VEQ_START_F2_MASK 0x000007f -# define TV_VEQ_START_F2_SHIFT 0 - -#define TV_V_CTL_4 0x68048 -/** - * Offset to start of vertical colorburst, measured in one less than the - * number of lines from vertical start. - */ -# define TV_VBURST_START_F1_MASK 0x003f0000 -# define TV_VBURST_START_F1_SHIFT 16 -/** - * Offset to the end of vertical colorburst, measured in one less than the - * number of lines from the start of NBR. - */ -# define TV_VBURST_END_F1_MASK 0x000000ff -# define TV_VBURST_END_F1_SHIFT 0 - -#define TV_V_CTL_5 0x6804c -/** - * Offset to start of vertical colorburst, measured in one less than the - * number of lines from vertical start. - */ -# define TV_VBURST_START_F2_MASK 0x003f0000 -# define TV_VBURST_START_F2_SHIFT 16 -/** - * Offset to the end of vertical colorburst, measured in one less than the - * number of lines from the start of NBR. - */ -# define TV_VBURST_END_F2_MASK 0x000000ff -# define TV_VBURST_END_F2_SHIFT 0 - -#define TV_V_CTL_6 0x68050 -/** - * Offset to start of vertical colorburst, measured in one less than the - * number of lines from vertical start. - */ -# define TV_VBURST_START_F3_MASK 0x003f0000 -# define TV_VBURST_START_F3_SHIFT 16 -/** - * Offset to the end of vertical colorburst, measured in one less than the - * number of lines from the start of NBR. - */ -# define TV_VBURST_END_F3_MASK 0x000000ff -# define TV_VBURST_END_F3_SHIFT 0 - -#define TV_V_CTL_7 0x68054 -/** - * Offset to start of vertical colorburst, measured in one less than the - * number of lines from vertical start. - */ -# define TV_VBURST_START_F4_MASK 0x003f0000 -# define TV_VBURST_START_F4_SHIFT 16 -/** - * Offset to the end of vertical colorburst, measured in one less than the - * number of lines from the start of NBR. - */ -# define TV_VBURST_END_F4_MASK 0x000000ff -# define TV_VBURST_END_F4_SHIFT 0 - -#define TV_SC_CTL_1 0x68060 -/** Turns on the first subcarrier phase generation DDA */ -# define TV_SC_DDA1_EN (1 << 31) -/** Turns on the first subcarrier phase generation DDA */ -# define TV_SC_DDA2_EN (1 << 30) -/** Turns on the first subcarrier phase generation DDA */ -# define TV_SC_DDA3_EN (1 << 29) -/** Sets the subcarrier DDA to reset frequency every other field */ -# define TV_SC_RESET_EVERY_2 (0 << 24) -/** Sets the subcarrier DDA to reset frequency every fourth field */ -# define TV_SC_RESET_EVERY_4 (1 << 24) -/** Sets the subcarrier DDA to reset frequency every eighth field */ -# define TV_SC_RESET_EVERY_8 (2 << 24) -/** Sets the subcarrier DDA to never reset the frequency */ -# define TV_SC_RESET_NEVER (3 << 24) -/** Sets the peak amplitude of the colorburst.*/ -# define TV_BURST_LEVEL_MASK 0x00ff0000 -# define TV_BURST_LEVEL_SHIFT 16 -/** Sets the increment of the first subcarrier phase generation DDA */ -# define TV_SCDDA1_INC_MASK 0x00000fff -# define TV_SCDDA1_INC_SHIFT 0 - -#define TV_SC_CTL_2 0x68064 -/** Sets the rollover for the second subcarrier phase generation DDA */ -# define TV_SCDDA2_SIZE_MASK 0x7fff0000 -# define TV_SCDDA2_SIZE_SHIFT 16 -/** Sets the increent of the second subcarrier phase generation DDA */ -# define TV_SCDDA2_INC_MASK 0x00007fff -# define TV_SCDDA2_INC_SHIFT 0 - -#define TV_SC_CTL_3 0x68068 -/** Sets the rollover for the third subcarrier phase generation DDA */ -# define TV_SCDDA3_SIZE_MASK 0x7fff0000 -# define TV_SCDDA3_SIZE_SHIFT 16 -/** Sets the increent of the third subcarrier phase generation DDA */ -# define TV_SCDDA3_INC_MASK 0x00007fff -# define TV_SCDDA3_INC_SHIFT 0 - -#define TV_WIN_POS 0x68070 -/** X coordinate of the display from the start of horizontal active */ -# define TV_XPOS_MASK 0x1fff0000 -# define TV_XPOS_SHIFT 16 -/** Y coordinate of the display from the start of vertical active (NBR) */ -# define TV_YPOS_MASK 0x00000fff -# define TV_YPOS_SHIFT 0 - -#define TV_WIN_SIZE 0x68074 -/** Horizontal size of the display window, measured in pixels*/ -# define TV_XSIZE_MASK 0x1fff0000 -# define TV_XSIZE_SHIFT 16 -/** - * Vertical size of the display window, measured in pixels. - * - * Must be even for interlaced modes. - */ -# define TV_YSIZE_MASK 0x00000fff -# define TV_YSIZE_SHIFT 0 - -#define TV_FILTER_CTL_1 0x68080 -/** - * Enables automatic scaling calculation. - * - * If set, the rest of the registers are ignored, and the calculated values can - * be read back from the register. - */ -# define TV_AUTO_SCALE (1 << 31) -/** - * Disables the vertical filter. - * - * This is required on modes more than 1024 pixels wide */ -# define TV_V_FILTER_BYPASS (1 << 29) -/** Enables adaptive vertical filtering */ -# define TV_VADAPT (1 << 28) -# define TV_VADAPT_MODE_MASK (3 << 26) -/** Selects the least adaptive vertical filtering mode */ -# define TV_VADAPT_MODE_LEAST (0 << 26) -/** Selects the moderately adaptive vertical filtering mode */ -# define TV_VADAPT_MODE_MODERATE (1 << 26) -/** Selects the most adaptive vertical filtering mode */ -# define TV_VADAPT_MODE_MOST (3 << 26) -/** - * Sets the horizontal scaling factor. - * - * This should be the fractional part of the horizontal scaling factor divided - * by the oversampling rate. TV_HSCALE should be less than 1, and set to: - * - * (src width - 1) / ((oversample * dest width) - 1) - */ -# define TV_HSCALE_FRAC_MASK 0x00003fff -# define TV_HSCALE_FRAC_SHIFT 0 - -#define TV_FILTER_CTL_2 0x68084 -/** - * Sets the integer part of the 3.15 fixed-point vertical scaling factor. - * - * TV_VSCALE should be (src height - 1) / ((interlace * dest height) - 1) - */ -# define TV_VSCALE_INT_MASK 0x00038000 -# define TV_VSCALE_INT_SHIFT 15 -/** - * Sets the fractional part of the 3.15 fixed-point vertical scaling factor. - * - * \sa TV_VSCALE_INT_MASK - */ -# define TV_VSCALE_FRAC_MASK 0x00007fff -# define TV_VSCALE_FRAC_SHIFT 0 - -#define TV_FILTER_CTL_3 0x68088 -/** - * Sets the integer part of the 3.15 fixed-point vertical scaling factor. - * - * TV_VSCALE should be (src height - 1) / (1/4 * (dest height - 1)) - * - * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes. - */ -# define TV_VSCALE_IP_INT_MASK 0x00038000 -# define TV_VSCALE_IP_INT_SHIFT 15 -/** - * Sets the fractional part of the 3.15 fixed-point vertical scaling factor. - * - * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes. - * - * \sa TV_VSCALE_IP_INT_MASK - */ -# define TV_VSCALE_IP_FRAC_MASK 0x00007fff -# define TV_VSCALE_IP_FRAC_SHIFT 0 - -#define TV_CC_CONTROL 0x68090 -# define TV_CC_ENABLE (1 << 31) -/** - * Specifies which field to send the CC data in. - * - * CC data is usually sent in field 0. - */ -# define TV_CC_FID_MASK (1 << 27) -# define TV_CC_FID_SHIFT 27 -/** Sets the horizontal position of the CC data. Usually 135. */ -# define TV_CC_HOFF_MASK 0x03ff0000 -# define TV_CC_HOFF_SHIFT 16 -/** Sets the vertical position of the CC data. Usually 21 */ -# define TV_CC_LINE_MASK 0x0000003f -# define TV_CC_LINE_SHIFT 0 - -#define TV_CC_DATA 0x68094 -# define TV_CC_RDY (1 << 31) -/** Second word of CC data to be transmitted. */ -# define TV_CC_DATA_2_MASK 0x007f0000 -# define TV_CC_DATA_2_SHIFT 16 -/** First word of CC data to be transmitted. */ -# define TV_CC_DATA_1_MASK 0x0000007f -# define TV_CC_DATA_1_SHIFT 0 - -#define TV_H_LUMA_0 0x68100 -#define TV_H_LUMA_59 0x681ec -#define TV_H_CHROMA_0 0x68200 -#define TV_H_CHROMA_59 0x682ec -#define TV_V_LUMA_0 0x68300 -#define TV_V_LUMA_42 0x683a8 -#define TV_V_CHROMA_0 0x68400 -#define TV_V_CHROMA_42 0x684a8 - -/* Display & cursor control */ - -/* Pipe A */ -#define PIPEADSL 0x70000 -#define PIPEACONF 0x70008 -#define PIPEACONF_ENABLE (1<<31) -#define PIPEACONF_DISABLE 0 -#define PIPEACONF_DOUBLE_WIDE (1<<30) -#define I965_PIPECONF_ACTIVE (1<<30) -#define PIPEACONF_SINGLE_WIDE 0 -#define PIPEACONF_PIPE_UNLOCKED 0 -#define PIPEACONF_PIPE_LOCKED (1<<25) -#define PIPEACONF_PALETTE 0 -#define PIPEACONF_GAMMA (1<<24) -#define PIPECONF_FORCE_BORDER (1<<25) -#define PIPECONF_PROGRESSIVE (0 << 21) -#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21) -#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21) -#define PIPEASTAT 0x70024 -#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31) -#define PIPE_CRC_ERROR_ENABLE (1UL<<29) -#define PIPE_CRC_DONE_ENABLE (1UL<<28) -#define PIPE_GMBUS_EVENT_ENABLE (1UL<<27) -#define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL<<26) -#define PIPE_VSYNC_INTERRUPT_ENABLE (1UL<<25) -#define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24) -#define PIPE_DPST_EVENT_ENABLE (1UL<<23) -#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22) -#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21) -#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20) -#define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL<<18) /* pre-965 */ -#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */ -#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17) -#define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16) -#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13) -#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12) -#define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11) -#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10) -#define PIPE_VSYNC_INTERRUPT_STATUS (1UL<<9) -#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8) -#define PIPE_DPST_EVENT_STATUS (1UL<<7) -#define PIPE_LEGACY_BLC_EVENT_STATUS (1UL<<6) -#define PIPE_ODD_FIELD_INTERRUPT_STATUS (1UL<<5) -#define PIPE_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4) -#define PIPE_HOTPLUG_TV_INTERRUPT_STATUS (1UL<<2) /* pre-965 */ -#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */ -#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1) -#define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0) - -#define DSPARB 0x70030 -#define DSPARB_CSTART_MASK (0x7f << 7) -#define DSPARB_CSTART_SHIFT 7 -#define DSPARB_BSTART_MASK (0x7f) -#define DSPARB_BSTART_SHIFT 0 -/* - * The two pipe frame counter registers are not synchronized, so - * reading a stable value is somewhat tricky. The following code - * should work: - * - * do { - * high1 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >> - * PIPE_FRAME_HIGH_SHIFT; - * low1 = ((INREG(PIPEAFRAMEPIXEL) & PIPE_FRAME_LOW_MASK) >> - * PIPE_FRAME_LOW_SHIFT); - * high2 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >> - * PIPE_FRAME_HIGH_SHIFT); - * } while (high1 != high2); - * frame = (high1 << 8) | low1; - */ -#define PIPEAFRAMEHIGH 0x70040 -#define PIPE_FRAME_HIGH_MASK 0x0000ffff -#define PIPE_FRAME_HIGH_SHIFT 0 -#define PIPEAFRAMEPIXEL 0x70044 -#define PIPE_FRAME_LOW_MASK 0xff000000 -#define PIPE_FRAME_LOW_SHIFT 24 -#define PIPE_PIXEL_MASK 0x00ffffff -#define PIPE_PIXEL_SHIFT 0 - -/* Cursor A & B regs */ -#define CURACNTR 0x70080 -#define CURSOR_MODE_DISABLE 0x00 -#define CURSOR_MODE_64_32B_AX 0x07 -#define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX) -#define MCURSOR_GAMMA_ENABLE (1 << 26) -#define CURABASE 0x70084 -#define CURAPOS 0x70088 -#define CURSOR_POS_MASK 0x007FF -#define CURSOR_POS_SIGN 0x8000 -#define CURSOR_X_SHIFT 0 -#define CURSOR_Y_SHIFT 16 -#define CURBCNTR 0x700c0 -#define CURBBASE 0x700c4 -#define CURBPOS 0x700c8 - -/* Display A control */ -#define DSPACNTR 0x70180 -#define DISPLAY_PLANE_ENABLE (1<<31) -#define DISPLAY_PLANE_DISABLE 0 -#define DISPPLANE_GAMMA_ENABLE (1<<30) -#define DISPPLANE_GAMMA_DISABLE 0 -#define DISPPLANE_PIXFORMAT_MASK (0xf<<26) -#define DISPPLANE_8BPP (0x2<<26) -#define DISPPLANE_15_16BPP (0x4<<26) -#define DISPPLANE_16BPP (0x5<<26) -#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26) -#define DISPPLANE_32BPP (0x7<<26) -#define DISPPLANE_STEREO_ENABLE (1<<25) -#define DISPPLANE_STEREO_DISABLE 0 -#define DISPPLANE_SEL_PIPE_MASK (1<<24) -#define DISPPLANE_SEL_PIPE_A 0 -#define DISPPLANE_SEL_PIPE_B (1<<24) -#define DISPPLANE_SRC_KEY_ENABLE (1<<22) -#define DISPPLANE_SRC_KEY_DISABLE 0 -#define DISPPLANE_LINE_DOUBLE (1<<20) -#define DISPPLANE_NO_LINE_DOUBLE 0 -#define DISPPLANE_STEREO_POLARITY_FIRST 0 -#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18) -#define DSPAADDR 0x70184 -#define DSPASTRIDE 0x70188 -#define DSPAPOS 0x7018C /* reserved */ -#define DSPASIZE 0x70190 -#define DSPASURF 0x7019C /* 965+ only */ -#define DSPATILEOFF 0x701A4 /* 965+ only */ - -/* VBIOS flags */ -#define SWF00 0x71410 -#define SWF01 0x71414 -#define SWF02 0x71418 -#define SWF03 0x7141c -#define SWF04 0x71420 -#define SWF05 0x71424 -#define SWF06 0x71428 -#define SWF10 0x70410 -#define SWF11 0x70414 -#define SWF14 0x71420 -#define SWF30 0x72414 -#define SWF31 0x72418 -#define SWF32 0x7241c - -/* Pipe B */ -#define PIPEBDSL 0x71000 -#define PIPEBCONF 0x71008 -#define PIPEBSTAT 0x71024 -#define PIPEBFRAMEHIGH 0x71040 -#define PIPEBFRAMEPIXEL 0x71044 - -/* Display B control */ -#define DSPBCNTR 0x71180 -#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15) -#define DISPPLANE_ALPHA_TRANS_DISABLE 0 -#define DISPPLANE_SPRITE_ABOVE_DISPLAY 0 -#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1) -#define DSPBADDR 0x71184 -#define DSPBSTRIDE 0x71188 -#define DSPBPOS 0x7118C -#define DSPBSIZE 0x71190 -#define DSPBSURF 0x7119C -#define DSPBTILEOFF 0x711A4 - -/* VBIOS regs */ -#define VGACNTRL 0x71400 -# define VGA_DISP_DISABLE (1 << 31) -# define VGA_2X_MODE (1 << 30) -# define VGA_PIPE_B_SELECT (1 << 29) - -#endif /* _I915_REG_H_ */ diff -Napur -X /home/jbarnes/dontdiff gpu/drm/i915/i915_suspend.c /tmp/drm-master/drivers/gpu/drm/i915/i915_suspend.c --- gpu/drm/i915/i915_suspend.c 1969-12-31 16:00:00.000000000 -0800 +++ /tmp/drm-master/drivers/gpu/drm/i915/i915_suspend.c 2008-08-21 14:34:09.000000000 -0700 @@ -0,0 +1,520 @@ +/* i915_suspend.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*- + */ +/* + * + * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. + * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "drmP.h" +#include "drm.h" +#include "i915_drm.h" +#include "i915_drv.h" + +static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (pipe == PIPE_A) + return (I915_READ(DPLL_A) & DPLL_VCO_ENABLE); + else + return (I915_READ(DPLL_B) & DPLL_VCO_ENABLE); +} + +static void i915_save_palette(struct drm_device *dev, enum pipe pipe) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B); + u32 *array; + int i; + + if (!i915_pipe_enabled(dev, pipe)) + return; + + if (pipe == PIPE_A) + array = dev_priv->save_palette_a; + else + array = dev_priv->save_palette_b; + + for(i = 0; i < 256; i++) + array[i] = I915_READ(reg + (i << 2)); +} + +static void i915_restore_palette(struct drm_device *dev, enum pipe pipe) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B); + u32 *array; + int i; + + if (!i915_pipe_enabled(dev, pipe)) + return; + + if (pipe == PIPE_A) + array = dev_priv->save_palette_a; + else + array = dev_priv->save_palette_b; + + for(i = 0; i < 256; i++) + I915_WRITE(reg + (i << 2), array[i]); +} + +static u8 i915_read_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + I915_WRITE8(index_port, reg); + return I915_READ8(data_port); +} + +static u8 i915_read_ar(struct drm_device *dev, u16 st01, u8 reg, u16 palette_enable) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + I915_READ8(st01); + I915_WRITE8(VGA_AR_INDEX, palette_enable | reg); + return I915_READ8(VGA_AR_DATA_READ); +} + +static void i915_write_ar(struct drm_device *dev, u16 st01, u8 reg, u8 val, u16 palette_enable) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + I915_READ8(st01); + I915_WRITE8(VGA_AR_INDEX, palette_enable | reg); + I915_WRITE8(VGA_AR_DATA_WRITE, val); +} + +static void i915_write_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg, u8 val) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + I915_WRITE8(index_port, reg); + I915_WRITE8(data_port, val); +} + +static void i915_save_vga(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int i; + u16 cr_index, cr_data, st01; + + /* VGA color palette registers */ + dev_priv->saveDACMASK = I915_READ8(VGA_DACMASK); + /* DACCRX automatically increments during read */ + I915_WRITE8(VGA_DACRX, 0); + /* Read 3 bytes of color data from each index */ + for (i = 0; i < 256 * 3; i++) + dev_priv->saveDACDATA[i] = I915_READ8(VGA_DACDATA); + + /* MSR bits */ + dev_priv->saveMSR = I915_READ8(VGA_MSR_READ); + if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) { + cr_index = VGA_CR_INDEX_CGA; + cr_data = VGA_CR_DATA_CGA; + st01 = VGA_ST01_CGA; + } else { + cr_index = VGA_CR_INDEX_MDA; + cr_data = VGA_CR_DATA_MDA; + st01 = VGA_ST01_MDA; + } + + /* CRT controller regs */ + i915_write_indexed(dev, cr_index, cr_data, 0x11, + i915_read_indexed(dev, cr_index, cr_data, 0x11) & + (~0x80)); + for (i = 0; i <= 0x24; i++) + dev_priv->saveCR[i] = + i915_read_indexed(dev, cr_index, cr_data, i); + /* Make sure we don't turn off CR group 0 writes */ + dev_priv->saveCR[0x11] &= ~0x80; + + /* Attribute controller registers */ + I915_READ8(st01); + dev_priv->saveAR_INDEX = I915_READ8(VGA_AR_INDEX); + for (i = 0; i <= 0x14; i++) + dev_priv->saveAR[i] = i915_read_ar(dev, st01, i, 0); + I915_READ8(st01); + I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX); + I915_READ8(st01); + + /* Graphics controller registers */ + for (i = 0; i < 9; i++) + dev_priv->saveGR[i] = + i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i); + + dev_priv->saveGR[0x10] = + i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10); + dev_priv->saveGR[0x11] = + i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11); + dev_priv->saveGR[0x18] = + i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18); + + /* Sequencer registers */ + for (i = 0; i < 8; i++) + dev_priv->saveSR[i] = + i915_read_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i); +} + +static void i915_restore_vga(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int i; + u16 cr_index, cr_data, st01; + + /* MSR bits */ + I915_WRITE8(VGA_MSR_WRITE, dev_priv->saveMSR); + if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) { + cr_index = VGA_CR_INDEX_CGA; + cr_data = VGA_CR_DATA_CGA; + st01 = VGA_ST01_CGA; + } else { + cr_index = VGA_CR_INDEX_MDA; + cr_data = VGA_CR_DATA_MDA; + st01 = VGA_ST01_MDA; + } + + /* Sequencer registers, don't write SR07 */ + for (i = 0; i < 7; i++) + i915_write_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i, + dev_priv->saveSR[i]); + + /* CRT controller regs */ + /* Enable CR group 0 writes */ + i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]); + for (i = 0; i <= 0x24; i++) + i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->saveCR[i]); + + /* Graphics controller regs */ + for (i = 0; i < 9; i++) + i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i, + dev_priv->saveGR[i]); + + i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10, + dev_priv->saveGR[0x10]); + i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11, + dev_priv->saveGR[0x11]); + i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18, + dev_priv->saveGR[0x18]); + + /* Attribute controller registers */ + I915_READ8(st01); /* switch back to index mode */ + for (i = 0; i <= 0x14; i++) + i915_write_ar(dev, st01, i, dev_priv->saveAR[i], 0); + I915_READ8(st01); /* switch back to index mode */ + I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX | 0x20); + I915_READ8(st01); + + /* VGA color palette registers */ + I915_WRITE8(VGA_DACMASK, dev_priv->saveDACMASK); + /* DACCRX automatically increments during read */ + I915_WRITE8(VGA_DACWX, 0); + /* Read 3 bytes of color data from each index */ + for (i = 0; i < 256 * 3; i++) + I915_WRITE8(VGA_DACDATA, dev_priv->saveDACDATA[i]); + +} + +int i915_save_state(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int i; + +#if defined(__FreeBSD__) + dev_priv->saveLBB = (u8) pci_read_config(dev->device, LBB, 1); +#else + pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); +#endif + + /* Display arbitration control */ + dev_priv->saveDSPARB = I915_READ(DSPARB); + + /* Pipe & plane A info */ + dev_priv->savePIPEACONF = I915_READ(PIPEACONF); + dev_priv->savePIPEASRC = I915_READ(PIPEASRC); + dev_priv->saveFPA0 = I915_READ(FPA0); + dev_priv->saveFPA1 = I915_READ(FPA1); + dev_priv->saveDPLL_A = I915_READ(DPLL_A); + if (IS_I965G(dev)) + dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD); + dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A); + dev_priv->saveHBLANK_A = I915_READ(HBLANK_A); + dev_priv->saveHSYNC_A = I915_READ(HSYNC_A); + dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A); + dev_priv->saveVBLANK_A = I915_READ(VBLANK_A); + dev_priv->saveVSYNC_A = I915_READ(VSYNC_A); + dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A); + + dev_priv->saveDSPACNTR = I915_READ(DSPACNTR); + dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE); + dev_priv->saveDSPASIZE = I915_READ(DSPASIZE); + dev_priv->saveDSPAPOS = I915_READ(DSPAPOS); + dev_priv->saveDSPAADDR = I915_READ(DSPAADDR); + if (IS_I965G(dev)) { + dev_priv->saveDSPASURF = I915_READ(DSPASURF); + dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF); + } + i915_save_palette(dev, PIPE_A); + dev_priv->savePIPEASTAT = I915_READ(PIPEASTAT); + + /* Pipe & plane B info */ + dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF); + dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC); + dev_priv->saveFPB0 = I915_READ(FPB0); + dev_priv->saveFPB1 = I915_READ(FPB1); + dev_priv->saveDPLL_B = I915_READ(DPLL_B); + if (IS_I965G(dev)) + dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD); + dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B); + dev_priv->saveHBLANK_B = I915_READ(HBLANK_B); + dev_priv->saveHSYNC_B = I915_READ(HSYNC_B); + dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B); + dev_priv->saveVBLANK_B = I915_READ(VBLANK_B); + dev_priv->saveVSYNC_B = I915_READ(VSYNC_B); + dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A); + + dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR); + dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE); + dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE); + dev_priv->saveDSPBPOS = I915_READ(DSPBPOS); + dev_priv->saveDSPBADDR = I915_READ(DSPBADDR); + if (IS_I965GM(dev) || IS_GM45(dev)) { + dev_priv->saveDSPBSURF = I915_READ(DSPBSURF); + dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF); + } + i915_save_palette(dev, PIPE_B); + dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT); + + /* CRT state */ + dev_priv->saveADPA = I915_READ(ADPA); + + /* LVDS state */ + dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL); + dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS); + dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL); + if (IS_I965G(dev)) + dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); + if (IS_MOBILE(dev) && !IS_I830(dev)) + dev_priv->saveLVDS = I915_READ(LVDS); + if (!IS_I830(dev) && !IS_845G(dev)) + dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL); + dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS); + dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS); + dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR); + + /* FIXME: save TV & SDVO state */ + + /* FBC state */ + dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE); + dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE); + dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2); + dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL); + + /* Interrupt state */ + dev_priv->saveIIR = I915_READ(IIR); + dev_priv->saveIER = I915_READ(IER); + dev_priv->saveIMR = I915_READ(IMR); + + /* VGA state */ + dev_priv->saveVGA0 = I915_READ(VGA0); + dev_priv->saveVGA1 = I915_READ(VGA1); + dev_priv->saveVGA_PD = I915_READ(VGA_PD); + dev_priv->saveVGACNTRL = I915_READ(VGACNTRL); + + /* Clock gating state */ + dev_priv->saveD_STATE = I915_READ(D_STATE); + dev_priv->saveCG_2D_DIS = I915_READ(CG_2D_DIS); + + /* Cache mode state */ + dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); + + /* Memory Arbitration state */ + dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE); + + /* Scratch space */ + for (i = 0; i < 16; i++) { + dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2)); + dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2)); + } + for (i = 0; i < 3; i++) + dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2)); + + i915_save_vga(dev); + + return 0; +} + +int i915_restore_state(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int i; + +#if defined(__FreeBSD__) + pci_write_config(dev->device, LBB, dev_priv->saveLBB, 1); +#else + pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); +#endif + + I915_WRITE(DSPARB, dev_priv->saveDSPARB); + + /* Pipe & plane A info */ + /* Prime the clock */ + if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) { + I915_WRITE(DPLL_A, dev_priv->saveDPLL_A & + ~DPLL_VCO_ENABLE); + DRM_UDELAY(150); + } + I915_WRITE(FPA0, dev_priv->saveFPA0); + I915_WRITE(FPA1, dev_priv->saveFPA1); + /* Actually enable it */ + I915_WRITE(DPLL_A, dev_priv->saveDPLL_A); + DRM_UDELAY(150); + if (IS_I965G(dev)) + I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD); + DRM_UDELAY(150); + + /* Restore mode */ + I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A); + I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A); + I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A); + I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A); + I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A); + I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A); + I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A); + + /* Restore plane info */ + I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE); + I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS); + I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC); + I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR); + I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE); + if (IS_I965G(dev)) { + I915_WRITE(DSPASURF, dev_priv->saveDSPASURF); + I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF); + } + + I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF); + + i915_restore_palette(dev, PIPE_A); + /* Enable the plane */ + I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR); + I915_WRITE(DSPAADDR, I915_READ(DSPAADDR)); + + /* Pipe & plane B info */ + if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) { + I915_WRITE(DPLL_B, dev_priv->saveDPLL_B & + ~DPLL_VCO_ENABLE); + DRM_UDELAY(150); + } + I915_WRITE(FPB0, dev_priv->saveFPB0); + I915_WRITE(FPB1, dev_priv->saveFPB1); + /* Actually enable it */ + I915_WRITE(DPLL_B, dev_priv->saveDPLL_B); + DRM_UDELAY(150); + if (IS_I965G(dev)) + I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD); + DRM_UDELAY(150); + + /* Restore mode */ + I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B); + I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B); + I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B); + I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B); + I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B); + I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B); + I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B); + + /* Restore plane info */ + I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE); + I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS); + I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC); + I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR); + I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE); + if (IS_I965G(dev)) { + I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF); + I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF); + } + + I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF); + + i915_restore_palette(dev, PIPE_B); + /* Enable the plane */ + I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR); + I915_WRITE(DSPBADDR, I915_READ(DSPBADDR)); + + /* CRT state */ + I915_WRITE(ADPA, dev_priv->saveADPA); + + /* LVDS state */ + if (IS_I965G(dev)) + I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2); + if (IS_MOBILE(dev) && !IS_I830(dev)) + I915_WRITE(LVDS, dev_priv->saveLVDS); + if (!IS_I830(dev) && !IS_845G(dev)) + I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL); + + I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS); + I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL); + I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS); + I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS); + I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR); + I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL); + + /* FIXME: restore TV & SDVO state */ + + /* FBC info */ + I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE); + I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE); + I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2); + I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL); + + /* VGA state */ + I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL); + I915_WRITE(VGA0, dev_priv->saveVGA0); + I915_WRITE(VGA1, dev_priv->saveVGA1); + I915_WRITE(VGA_PD, dev_priv->saveVGA_PD); + DRM_UDELAY(150); + + /* Clock gating state */ + I915_WRITE (D_STATE, dev_priv->saveD_STATE); + I915_WRITE (CG_2D_DIS, dev_priv->saveCG_2D_DIS); + + /* Cache mode state */ + I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); + + /* Memory arbitration state */ + I915_WRITE (MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000); + + for (i = 0; i < 16; i++) { + I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]); + I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i+7]); + } + for (i = 0; i < 3; i++) + I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]); + + i915_restore_vga(dev); + + return 0; +} + diff -Napur -X /home/jbarnes/dontdiff gpu/drm/i915/Makefile /tmp/drm-master/drivers/gpu/drm/i915/Makefile --- gpu/drm/i915/Makefile 2008-08-21 14:43:42.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/i915/Makefile 1969-12-31 16:00:00.000000000 -0800 @@ -1,10 +0,0 @@ -# -# Makefile for the drm device driver. This driver provides support for the -# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. - -ccflags-y := -Iinclude/drm -i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_opregion.o - -i915-$(CONFIG_COMPAT) += i915_ioc32.o - -obj-$(CONFIG_DRM_I915) += i915.o diff -Napur -X /home/jbarnes/dontdiff gpu/drm/Kconfig /tmp/drm-master/drivers/gpu/drm/Kconfig --- gpu/drm/Kconfig 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/Kconfig 2008-08-21 14:34:09.000000000 -0700 @@ -4,9 +4,8 @@ # This driver provides support for the # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. # -menuconfig DRM - tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)" - depends on (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG +config DRM + bool "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)" help Kernel-level support for the Direct Rendering Infrastructure (DRI) introduced in XFree86 4.0. If you say Y here, you need to select @@ -37,8 +36,8 @@ config DRM_RADEON help Choose this option if you have an ATI Radeon graphics card. There are both PCI and AGP versions. You don't need to choose this to - run the Radeon in plain VGA mode. - + run the Radeon in plain VGA mode. There is a product page at + . If M is selected, the module will be called radeon. config DRM_I810 @@ -54,54 +53,46 @@ choice depends on DRM && AGP && AGP_INTEL optional -config DRM_I830 - tristate "i830 driver" - help - Choose this option if you have a system that has Intel 830M, 845G, - 852GM, 855GM or 865G integrated graphics. If M is selected, the - module will be called i830. AGP support is required for this driver - to work. This driver is used by the older X releases X.org 6.7 and - XFree86 4.3. If unsure, build this and i915 as modules and the X server - will load the correct one. - config DRM_I915 tristate "i915 driver" help Choose this option if you have a system that has Intel 830M, 845G, - 852GM, 855GM 865G or 915G integrated graphics. If M is selected, the - module will be called i915. AGP support is required for this driver - to work. This driver is used by the Intel driver in X.org 6.8 and - XFree86 4.4 and above. If unsure, build this and i830 as modules and - the X server will load the correct one. - + 852GM, 855GM, 865G, 915G, 915GM, 945G, 945GM and 965G integrated + graphics. If M is selected, the module will be called i915. + AGP support is required for this driver to work. + endchoice config DRM_MGA tristate "Matrox g200/g400" - depends on DRM + depends on DRM && (!X86_64 || BROKEN) && (!PPC || BROKEN) help - Choose this option if you have a Matrox G200, G400 or G450 graphics - card. If M is selected, the module will be called mga. AGP - support is required for this driver to work. + Choose this option if you have a Matrox G200, G400, G450 or G550 + graphics card. If M is selected, the module will be called mga. config DRM_SIS tristate "SiS video cards" - depends on DRM && AGP + depends on DRM help - Choose this option if you have a SiS 630 or compatible video - chipset. If M is selected the module will be called sis. AGP - support is required for this driver to work. + Choose this option if you have a SiS 630 or compatible video + chipset. If M is selected the module will be called sis. config DRM_VIA tristate "Via unichrome video cards" - depends on DRM + depends on DRM help - Choose this option if you have a Via unichrome or compatible video + Choose this option if you have a Via unichrome or compatible video chipset. If M is selected the module will be called via. -config DRM_SAVAGE - tristate "Savage video cards" - depends on DRM +config DRM_MACH64 + tristate "ATI Rage Pro (Mach64)" + depends on DRM && PCI help - Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister - chipset. If M is selected the module will be called savage. + Choose this option if you have an ATI Rage Pro (mach64 chipset) + graphics card. Example cards include: 3D Rage Pro, Xpert 98, + 3D Rage LT Pro, 3D Rage XL/XC, and 3D Rage Mobility (P/M, M1). + Cards earlier than ATI Rage Pro (e.g. Rage II) are not supported. + If M is selected, the module will be called mach64. AGP support for + this card is strongly suggested (unless you have a PCI version). + + diff -Napur -X /home/jbarnes/dontdiff gpu/drm/Makefile /tmp/drm-master/drivers/gpu/drm/Makefile --- gpu/drm/Makefile 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/Makefile 2008-08-21 14:34:09.000000000 -0700 @@ -1,26 +1,70 @@ # # Makefile for the drm device driver. This driver provides support for the # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. +# +# Based on David Woodhouse's mtd build. +# +# $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/Makefile.kernel,v 1.18 2003/08/16 17:59:17 dawes Exp $ +# -ccflags-y := -Iinclude/drm - -drm-y := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \ +drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \ drm_drv.o drm_fops.o drm_ioctl.o drm_irq.o \ drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \ - drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \ - drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o - -drm-$(CONFIG_COMPAT) += drm_ioc32.o + drm_sysfs.o drm_pci.o drm_agpsupport.o drm_scatter.o \ + drm_memory_debug.o ati_pcigart.o drm_sman.o \ + drm_hashtab.o drm_mm.o drm_object.o drm_compat.o \ + drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o drm_bo_lock.o \ + drm_regman.o drm_vm_nopage_compat.o +tdfx-objs := tdfx_drv.o +r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o +mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o +i810-objs := i810_drv.o i810_dma.o +i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \ + i915_buffer.o i915_compat.o i915_execbuf.o i915_suspend.o \ + i915_opregion.o +nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \ + nouveau_object.o nouveau_irq.o nouveau_notifier.o nouveau_swmthd.o \ + nouveau_sgdma.o nouveau_dma.o nouveau_bo.o nouveau_fence.o \ + nv04_timer.o \ + nv04_mc.o nv40_mc.o nv50_mc.o \ + nv04_fb.o nv10_fb.o nv40_fb.o \ + nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \ + nv04_graph.o nv10_graph.o nv20_graph.o \ + nv40_graph.o nv50_graph.o \ + nv04_instmem.o nv50_instmem.o +radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o +sis-objs := sis_drv.o sis_mm.o +ffb-objs := ffb_drv.o ffb_context.o +savage-objs := savage_drv.o savage_bci.o savage_state.o +via-objs := via_irq.o via_drv.o via_map.o via_mm.o via_dma.o via_verifier.o \ + via_video.o via_dmablit.o via_fence.o via_buffer.o +mach64-objs := mach64_drv.o mach64_dma.o mach64_irq.o mach64_state.o +nv-objs := nv_drv.o +xgi-objs := xgi_cmdlist.o xgi_drv.o xgi_fb.o xgi_misc.o xgi_pcie.o \ + xgi_fence.o -obj-$(CONFIG_DRM) += drm.o -obj-$(CONFIG_DRM_TDFX) += tdfx/ -obj-$(CONFIG_DRM_R128) += r128/ -obj-$(CONFIG_DRM_RADEON)+= radeon/ -obj-$(CONFIG_DRM_MGA) += mga/ -obj-$(CONFIG_DRM_I810) += i810/ -obj-$(CONFIG_DRM_I830) += i830/ -obj-$(CONFIG_DRM_I915) += i915/ -obj-$(CONFIG_DRM_SIS) += sis/ -obj-$(CONFIG_DRM_SAVAGE)+= savage/ -obj-$(CONFIG_DRM_VIA) +=via/ +ifeq ($(CONFIG_COMPAT),y) +drm-objs += drm_ioc32.o +radeon-objs += radeon_ioc32.o +mga-objs += mga_ioc32.o +r128-objs += r128_ioc32.o +i915-objs += i915_ioc32.o +nouveau-objs += nouveau_ioc32.o +xgi-objs += xgi_ioc32.o +endif +obj-m += drm.o +obj-$(CONFIG_DRM_TDFX) += tdfx.o +obj-$(CONFIG_DRM_R128) += r128.o +obj-$(CONFIG_DRM_RADEON)+= radeon.o +obj-$(CONFIG_DRM_MGA) += mga.o +obj-$(CONFIG_DRM_I810) += i810.o +obj-$(CONFIG_DRM_I915) += i915.o +obj-$(CONFIG_DRM_SIS) += sis.o +obj-$(CONFIG_DRM_FFB) += ffb.o +obj-$(CONFIG_DRM_SAVAGE)+= savage.o +obj-$(CONFIG_DRM_VIA) += via.o +obj-$(CONFIG_DRM_MACH64)+= mach64.o +obj-$(CONFIG_DRM_NV) += nv.o +obj-$(CONFIG_DRM_NOUVEAU) += nouveau.o +obj-$(CONFIG_DRM_XGI) += xgi.o diff -Napur -X /home/jbarnes/dontdiff gpu/drm/mga/Makefile /tmp/drm-master/drivers/gpu/drm/mga/Makefile --- gpu/drm/mga/Makefile 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/mga/Makefile 1969-12-31 16:00:00.000000000 -0800 @@ -1,11 +0,0 @@ -# -# Makefile for the drm device driver. This driver provides support for the -# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. - -ccflags-y := -Iinclude/drm -mga-y := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o - -mga-$(CONFIG_COMPAT) += mga_ioc32.o - -obj-$(CONFIG_DRM_MGA) += mga.o - diff -Napur -X /home/jbarnes/dontdiff gpu/drm/mga/mga_dma.c /tmp/drm-master/drivers/gpu/drm/mga/mga_dma.c --- gpu/drm/mga/mga_dma.c 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/mga/mga_dma.c 2008-08-21 14:34:10.000000000 -0700 @@ -1,7 +1,7 @@ /* mga_dma.c -- DMA support for mga g200/g400 -*- linux-c -*- * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com - * - * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + */ +/* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * @@ -44,8 +44,8 @@ #define MGA_DEFAULT_USEC_TIMEOUT 10000 #define MGA_FREELIST_DEBUG 0 -#define MINIMAL_CLEANUP 0 -#define FULL_CLEANUP 1 +#define MINIMAL_CLEANUP 0 +#define FULL_CLEANUP 1 static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup); /* ================================================================ @@ -308,23 +308,6 @@ static void mga_freelist_cleanup(struct dev_priv->head = dev_priv->tail = NULL; } -#if 0 -/* FIXME: Still needed? - */ -static void mga_freelist_reset(struct drm_device * dev) -{ - struct drm_device_dma *dma = dev->dma; - struct drm_buf *buf; - drm_mga_buf_priv_t *buf_priv; - int i; - - for (i = 0; i < dma->buf_count; i++) { - buf = dma->buflist[i]; - buf_priv = buf->dev_private; - SET_AGE(&buf_priv->list_entry->age, MGA_BUFFER_FREE, 0); - } -} -#endif static struct drm_buf *mga_freelist_get(struct drm_device * dev) { @@ -393,7 +376,7 @@ int mga_freelist_put(struct drm_device * * DMA initialization, cleanup */ -int mga_driver_load(struct drm_device * dev, unsigned long flags) +int mga_driver_load(struct drm_device *dev, unsigned long flags) { drm_mga_private_t *dev_priv; @@ -418,7 +401,6 @@ int mga_driver_load(struct drm_device * return 0; } -#if __OS_HAS_AGP /** * Bootstrap the driver for AGP DMA. * @@ -434,16 +416,16 @@ int mga_driver_load(struct drm_device * * * \sa mga_do_dma_bootstrap, mga_do_pci_dma_bootstrap */ -static int mga_do_agp_dma_bootstrap(struct drm_device * dev, +static int mga_do_agp_dma_bootstrap(struct drm_device *dev, drm_mga_dma_bootstrap_t * dma_bs) { drm_mga_private_t *const dev_priv = - (drm_mga_private_t *) dev->dev_private; + (drm_mga_private_t *)dev->dev_private; unsigned int warp_size = mga_warp_microcode_size(dev_priv); int err; unsigned offset; const unsigned secondary_size = dma_bs->secondary_bin_count - * dma_bs->secondary_bin_size; + * dma_bs->secondary_bin_size; const unsigned agp_size = (dma_bs->agp_size << 20); struct drm_buf_desc req; struct drm_agp_mode mode; @@ -499,7 +481,7 @@ static int mga_do_agp_dma_bootstrap(stru bind_req.handle = agp_req.handle; bind_req.offset = 0; - err = drm_agp_bind(dev, &bind_req); + err = drm_agp_bind( dev, &bind_req ); if (err) { DRM_ERROR("Unable to bind AGP memory: %d\n", err); return err; @@ -521,7 +503,7 @@ static int mga_do_agp_dma_bootstrap(stru offset += warp_size; err = drm_addmap(dev, offset, dma_bs->primary_size, - _DRM_AGP, _DRM_READ_ONLY, &dev_priv->primary); + _DRM_AGP, _DRM_READ_ONLY, & dev_priv->primary); if (err) { DRM_ERROR("Unable to map primary DMA region: %d\n", err); return err; @@ -529,13 +511,13 @@ static int mga_do_agp_dma_bootstrap(stru offset += dma_bs->primary_size; err = drm_addmap(dev, offset, secondary_size, - _DRM_AGP, 0, &dev->agp_buffer_map); + _DRM_AGP, 0, & dev->agp_buffer_map); if (err) { DRM_ERROR("Unable to map secondary DMA region: %d\n", err); return err; } - (void)memset(&req, 0, sizeof(req)); + (void)memset( &req, 0, sizeof(req) ); req.count = dma_bs->secondary_bin_count; req.size = dma_bs->secondary_bin_size; req.flags = _DRM_AGP_BUFFER; @@ -563,9 +545,9 @@ static int mga_do_agp_dma_bootstrap(stru offset += secondary_size; err = drm_addmap(dev, offset, agp_size - offset, - _DRM_AGP, 0, &dev_priv->agp_textures); + _DRM_AGP, 0, & dev_priv->agp_textures); if (err) { - DRM_ERROR("Unable to map AGP texture region %d\n", err); + DRM_ERROR("Unable to map AGP texture region: %d\n", err); return err; } @@ -587,13 +569,6 @@ static int mga_do_agp_dma_bootstrap(stru DRM_INFO("Initialized card for AGP DMA.\n"); return 0; } -#else -static int mga_do_agp_dma_bootstrap(struct drm_device * dev, - drm_mga_dma_bootstrap_t * dma_bs) -{ - return -EINVAL; -} -#endif /** * Bootstrap the driver for PCI DMA. @@ -613,13 +588,14 @@ static int mga_do_pci_dma_bootstrap(stru drm_mga_dma_bootstrap_t * dma_bs) { drm_mga_private_t *const dev_priv = - (drm_mga_private_t *) dev->dev_private; + (drm_mga_private_t *) dev->dev_private; unsigned int warp_size = mga_warp_microcode_size(dev_priv); unsigned int primary_size; unsigned int bin_count; int err; struct drm_buf_desc req; + if (dev->dma == NULL) { DRM_ERROR("dev->dma is NULL\n"); return -EFAULT; @@ -646,7 +622,7 @@ static int mga_do_pci_dma_bootstrap(stru */ for (primary_size = dma_bs->primary_size; primary_size != 0; - primary_size >>= 1) { + primary_size >>= 1 ) { /* The proper alignment for this mapping is 0x04 */ err = drm_addmap(dev, 0, primary_size, _DRM_CONSISTENT, _DRM_READ_ONLY, &dev_priv->primary); @@ -667,7 +643,7 @@ static int mga_do_pci_dma_bootstrap(stru } for (bin_count = dma_bs->secondary_bin_count; bin_count > 0; - bin_count--) { + bin_count-- ) { (void)memset(&req, 0, sizeof(req)); req.count = bin_count; req.size = dma_bs->secondary_bin_size; @@ -699,13 +675,15 @@ static int mga_do_pci_dma_bootstrap(stru return 0; } -static int mga_do_dma_bootstrap(struct drm_device * dev, - drm_mga_dma_bootstrap_t * dma_bs) + +static int mga_do_dma_bootstrap(struct drm_device *dev, + drm_mga_dma_bootstrap_t *dma_bs) { const int is_agp = (dma_bs->agp_mode != 0) && drm_device_is_agp(dev); int err; drm_mga_private_t *const dev_priv = - (drm_mga_private_t *) dev->dev_private; + (drm_mga_private_t *) dev->dev_private; + dev_priv->used_new_dma_init = 1; @@ -713,25 +691,28 @@ static int mga_do_dma_bootstrap(struct d * the cards MMIO registers and map a status page. */ err = drm_addmap(dev, dev_priv->mmio_base, dev_priv->mmio_size, - _DRM_REGISTERS, _DRM_READ_ONLY, &dev_priv->mmio); + _DRM_REGISTERS, _DRM_READ_ONLY, & dev_priv->mmio); if (err) { DRM_ERROR("Unable to map MMIO region: %d\n", err); return err; } + err = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM, _DRM_READ_ONLY | _DRM_LOCKED | _DRM_KERNEL, - &dev_priv->status); + & dev_priv->status); if (err) { DRM_ERROR("Unable to map status region: %d\n", err); return err; } + /* The DMA initialization procedure is slightly different for PCI and * AGP cards. AGP cards just allocate a large block of AGP memory and * carve off portions of it for internal uses. The remaining memory * is returned to user-mode to be used for AGP textures. */ + if (is_agp) { err = mga_do_agp_dma_bootstrap(dev, dma_bs); } @@ -744,6 +725,7 @@ static int mga_do_dma_bootstrap(struct d mga_do_cleanup_dma(dev, MINIMAL_CLEANUP); } + /* Not only do we want to try and initialized PCI cards for PCI DMA, * but we also try to initialized AGP cards that could not be * initialized for AGP DMA. This covers the case where we have an AGP @@ -756,6 +738,7 @@ static int mga_do_dma_bootstrap(struct d err = mga_do_pci_dma_bootstrap(dev, dma_bs); } + return err; } @@ -768,6 +751,7 @@ int mga_dma_bootstrap(struct drm_device const drm_mga_private_t *const dev_priv = (drm_mga_private_t *) dev->dev_private; + err = mga_do_dma_bootstrap(dev, bootstrap); if (err) { mga_do_cleanup_dma(dev, FULL_CLEANUP); @@ -784,15 +768,17 @@ int mga_dma_bootstrap(struct drm_device bootstrap->agp_mode = modes[bootstrap->agp_mode & 0x07]; - return err; + return 0; } + static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init) { drm_mga_private_t *dev_priv; int ret; DRM_DEBUG("\n"); + dev_priv = dev->dev_private; if (init->sgram) { @@ -850,7 +836,7 @@ static int mga_do_init_dma(struct drm_de } dev->agp_buffer_token = init->buffers_offset; dev->agp_buffer_map = - drm_core_findmap(dev, init->buffers_offset); + drm_core_findmap(dev, init->buffers_offset); if (!dev->agp_buffer_map) { DRM_ERROR("failed to find dma buffer region!\n"); return -EINVAL; @@ -875,14 +861,14 @@ static int mga_do_init_dma(struct drm_de } ret = mga_warp_install_microcode(dev_priv); - if (ret < 0) { - DRM_ERROR("failed to install WARP ucode!: %d\n", ret); + if (ret != 0) { + DRM_ERROR("failed to install WARP ucode: %d!\n", ret); return ret; } ret = mga_warp_init(dev_priv); - if (ret < 0) { - DRM_ERROR("failed to init WARP engine!: %d\n", ret); + if (ret != 0) { + DRM_ERROR("failed to init WARP engine: %d!\n", ret); return ret; } @@ -893,10 +879,6 @@ static int mga_do_init_dma(struct drm_de /* Init the primary DMA registers. */ MGA_WRITE(MGA_PRIMADDRESS, dev_priv->primary->offset | MGA_DMA_GENERAL); -#if 0 - MGA_WRITE(MGA_PRIMPTR, virt_to_bus((void *)dev_priv->prim.status) | MGA_PRIMPTREN0 | /* Soft trap, SECEND, SETUPEND */ - MGA_PRIMPTREN1); /* DWGSYNC */ -#endif dev_priv->prim.start = (u8 *) dev_priv->primary->handle; dev_priv->prim.end = ((u8 *) dev_priv->primary->handle @@ -954,7 +936,6 @@ static int mga_do_cleanup_dma(struct drm drm_core_ioremapfree(dev->agp_buffer_map, dev); if (dev_priv->used_new_dma_init) { -#if __OS_HAS_AGP if (dev_priv->agp_handle != 0) { struct drm_agp_binding unbind_req; struct drm_agp_buffer free_req; @@ -973,7 +954,6 @@ static int mga_do_cleanup_dma(struct drm if ((dev->agp != NULL) && dev->agp->acquired) { err = drm_agp_release(dev); } -#endif } dev_priv->warp = NULL; diff -Napur -X /home/jbarnes/dontdiff gpu/drm/mga/mga_drv.c /tmp/drm-master/drivers/gpu/drm/mga/mga_drv.c --- gpu/drm/mga/mga_drv.c 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/mga/mga_drv.c 2008-08-21 14:34:09.000000000 -0700 @@ -42,18 +42,20 @@ static struct pci_device_id pciidlist[] mga_PCI_IDS }; +static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); static struct drm_driver driver = { .driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | - DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | - DRIVER_IRQ_VBL, - .dev_priv_size = sizeof(drm_mga_buf_priv_t), + DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, + .dev_priv_size = sizeof (drm_mga_buf_priv_t), .load = mga_driver_load, .unload = mga_driver_unload, .lastclose = mga_driver_lastclose, .dma_quiescent = mga_driver_dma_quiescent, .device_is_agp = mga_driver_device_is_agp, - .vblank_wait = mga_driver_vblank_wait, + .get_vblank_counter = mga_get_vblank_counter, + .enable_vblank = mga_enable_vblank, + .disable_vblank = mga_disable_vblank, .irq_preinstall = mga_driver_irq_preinstall, .irq_postinstall = mga_driver_irq_postinstall, .irq_uninstall = mga_driver_irq_uninstall, @@ -64,20 +66,22 @@ static struct drm_driver driver = { .ioctls = mga_ioctls, .dma_ioctl = mga_dma_buffers, .fops = { - .owner = THIS_MODULE, - .open = drm_open, - .release = drm_release, - .ioctl = drm_ioctl, - .mmap = drm_mmap, - .poll = drm_poll, - .fasync = drm_fasync, -#ifdef CONFIG_COMPAT - .compat_ioctl = mga_compat_ioctl, + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .ioctl = drm_ioctl, + .mmap = drm_mmap, + .poll = drm_poll, + .fasync = drm_fasync, +#if defined(CONFIG_COMPAT) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9) + .compat_ioctl = mga_compat_ioctl, #endif - }, + }, .pci_driver = { - .name = DRIVER_NAME, - .id_table = pciidlist, + .name = DRIVER_NAME, + .id_table = pciidlist, + .probe = probe, + .remove = __devexit_p(drm_cleanup_pci), }, .name = DRIVER_NAME, @@ -88,10 +92,16 @@ static struct drm_driver driver = { .patchlevel = DRIVER_PATCHLEVEL, }; +static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + return drm_get_dev(pdev, ent, &driver); +} + + static int __init mga_init(void) { driver.num_ioctls = mga_max_ioctl; - return drm_init(&driver); + return drm_init(&driver, pciidlist); } static void __exit mga_exit(void) @@ -120,7 +130,8 @@ MODULE_LICENSE("GPL and additional right */ static int mga_driver_device_is_agp(struct drm_device * dev) { - const struct pci_dev *const pdev = dev->pdev; + const struct pci_dev * const pdev = dev->pdev; + /* There are PCI versions of the G450. These cards have the * same PCI ID as the AGP G450, but have an additional PCI-to-PCI diff -Napur -X /home/jbarnes/dontdiff gpu/drm/mga/mga_drv.h /tmp/drm-master/drivers/gpu/drm/mga/mga_drv.h --- gpu/drm/mga/mga_drv.h 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/mga/mga_drv.h 2008-08-21 14:34:09.000000000 -0700 @@ -38,11 +38,11 @@ #define DRIVER_NAME "mga" #define DRIVER_DESC "Matrox G200/G400" -#define DRIVER_DATE "20051102" +#define DRIVER_DATE "20060319" #define DRIVER_MAJOR 3 #define DRIVER_MINOR 2 -#define DRIVER_PATCHLEVEL 1 +#define DRIVER_PATCHLEVEL 2 typedef struct drm_mga_primary_buffer { u8 *start; @@ -112,14 +112,15 @@ typedef struct drm_mga_private { * * \sa drm_mga_private_t::mmio */ - /*@{ */ - u32 mmio_base; /**< Bus address of base of MMIO. */ - u32 mmio_size; /**< Size of the MMIO region. */ - /*@} */ + /*@{*/ + u32 mmio_base; /**< Bus address of base of MMIO. */ + u32 mmio_size; /**< Size of the MMIO region. */ + /*@}*/ u32 clear_cmd; u32 maccess; + atomic_t vbl_received; /**< Number of vblanks received. */ wait_queue_head_t fence_queue; atomic_t last_fence_retired; u32 next_fence_to_post; @@ -181,11 +182,14 @@ extern int mga_warp_install_microcode(dr extern int mga_warp_init(drm_mga_private_t * dev_priv); /* mga_irq.c */ +extern int mga_enable_vblank(struct drm_device *dev, int crtc); +extern void mga_disable_vblank(struct drm_device *dev, int crtc); +extern u32 mga_get_vblank_counter(struct drm_device *dev, int crtc); extern int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence); extern int mga_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence); extern irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS); extern void mga_driver_irq_preinstall(struct drm_device * dev); -extern void mga_driver_irq_postinstall(struct drm_device * dev); +extern int mga_driver_irq_postinstall(struct drm_device * dev); extern void mga_driver_irq_uninstall(struct drm_device * dev); extern long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); diff -Napur -X /home/jbarnes/dontdiff gpu/drm/mga/mga_ioc32.c /tmp/drm-master/drivers/gpu/drm/mga/mga_ioc32.c --- gpu/drm/mga/mga_ioc32.c 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/mga/mga_ioc32.c 2008-08-21 14:34:09.000000000 -0700 @@ -1,3 +1,4 @@ + /** * \file mga_ioc32.c * @@ -90,25 +91,25 @@ static int compat_mga_init(struct file * || __put_user(init32.buffers_offset, &init->buffers_offset)) return -EFAULT; - for (i = 0; i < MGA_NR_TEX_HEAPS; i++) { - err |= - __put_user(init32.texture_offset[i], - &init->texture_offset[i]); - err |= - __put_user(init32.texture_size[i], &init->texture_size[i]); + for (i=0; itexture_offset[i]); + err |= __put_user(init32.texture_size[i], &init->texture_size[i]); } if (err) return -EFAULT; - return drm_ioctl(file->f_path.dentry->d_inode, file, - DRM_IOCTL_MGA_INIT, (unsigned long)init); + return drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_MGA_INIT, (unsigned long) init); } + typedef struct drm_mga_getparam32 { int param; u32 value; } drm_mga_getparam32_t; + static int compat_mga_getparam(struct file *file, unsigned int cmd, unsigned long arg) { @@ -121,11 +122,10 @@ static int compat_mga_getparam(struct fi getparam = compat_alloc_user_space(sizeof(*getparam)); if (!access_ok(VERIFY_WRITE, getparam, sizeof(*getparam)) || __put_user(getparam32.param, &getparam->param) - || __put_user((void __user *)(unsigned long)getparam32.value, - &getparam->value)) + || __put_user((void __user *)(unsigned long)getparam32.value, &getparam->value)) return -EFAULT; - return drm_ioctl(file->f_path.dentry->d_inode, file, + return drm_ioctl(file->f_dentry->d_inode, file, DRM_IOCTL_MGA_GETPARAM, (unsigned long)getparam); } @@ -166,7 +166,7 @@ static int compat_mga_dma_bootstrap(stru || __put_user(dma_bootstrap32.agp_size, &dma_bootstrap->agp_size)) return -EFAULT; - err = drm_ioctl(file->f_path.dentry->d_inode, file, + err = drm_ioctl(file->f_dentry->d_inode, file, DRM_IOCTL_MGA_DMA_BOOTSTRAP, (unsigned long)dma_bootstrap); if (err) @@ -182,8 +182,10 @@ static int compat_mga_dma_bootstrap(stru &dma_bootstrap->secondary_bin_count) || __get_user(dma_bootstrap32.secondary_bin_size, &dma_bootstrap->secondary_bin_size) - || __get_user(dma_bootstrap32.agp_mode, &dma_bootstrap->agp_mode) - || __get_user(dma_bootstrap32.agp_size, &dma_bootstrap->agp_size)) + || __get_user(dma_bootstrap32.agp_mode, + &dma_bootstrap->agp_mode) + || __get_user(dma_bootstrap32.agp_size, + &dma_bootstrap->agp_size)) return -EFAULT; if (copy_to_user((void __user *)arg, &dma_bootstrap32, @@ -208,7 +210,8 @@ drm_ioctl_compat_t *mga_compat_ioctls[] * \param arg user argument. * \return zero on success or negative number on failure. */ -long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +long mga_compat_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) { unsigned int nr = DRM_IOCTL_NR(cmd); drm_ioctl_compat_t *fn = NULL; @@ -222,9 +225,9 @@ long mga_compat_ioctl(struct file *filp, lock_kernel(); /* XXX for now */ if (fn != NULL) - ret = (*fn) (filp, cmd, arg); + ret = (*fn)(filp, cmd, arg); else - ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg); + ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg); unlock_kernel(); return ret; diff -Napur -X /home/jbarnes/dontdiff gpu/drm/mga/mga_irq.c /tmp/drm-master/drivers/gpu/drm/mga/mga_irq.c --- gpu/drm/mga/mga_irq.c 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/mga/mga_irq.c 2008-08-21 14:34:09.000000000 -0700 @@ -1,5 +1,6 @@ /* mga_irq.c -- IRQ handling for radeon -*- linux-c -*- - * + */ +/* * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved. * * The Weather Channel (TM) funded Tungsten Graphics to develop the @@ -35,6 +36,20 @@ #include "mga_drm.h" #include "mga_drv.h" +u32 mga_get_vblank_counter(struct drm_device *dev, int crtc) +{ + const drm_mga_private_t *const dev_priv = + (drm_mga_private_t *) dev->dev_private; + + if (crtc != 0) { + return 0; + } + + + return atomic_read(&dev_priv->vbl_received); +} + + irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *) arg; @@ -47,9 +62,8 @@ irqreturn_t mga_driver_irq_handler(DRM_I /* VBLANK interrupt */ if (status & MGA_VLINEPEN) { MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR); - atomic_inc(&dev->vbl_received); - DRM_WAKEUP(&dev->vbl_queue); - drm_vbl_send_signals(dev); + atomic_inc(&dev_priv->vbl_received); + drm_handle_vblank(dev, 0); handled = 1; } @@ -58,6 +72,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I const u32 prim_start = MGA_READ(MGA_PRIMADDRESS); const u32 prim_end = MGA_READ(MGA_PRIMEND); + MGA_WRITE(MGA_ICLEAR, MGA_SOFTRAPICLR); /* In addition to clearing the interrupt-pending bit, we @@ -72,28 +87,39 @@ irqreturn_t mga_driver_irq_handler(DRM_I handled = 1; } - if (handled) { + if (handled) return IRQ_HANDLED; - } return IRQ_NONE; } -int mga_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence) +int mga_enable_vblank(struct drm_device *dev, int crtc) { - unsigned int cur_vblank; - int ret = 0; + drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; - /* Assume that the user has missed the current sequence number - * by about a day rather than she wants to wait for years - * using vertical blanks... - */ - DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ, - (((cur_vblank = atomic_read(&dev->vbl_received)) - - *sequence) <= (1 << 23))); + if (crtc != 0) { + DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", + crtc); + return 0; + } - *sequence = cur_vblank; + MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN); + return 0; +} - return ret; + +void mga_disable_vblank(struct drm_device *dev, int crtc) +{ + if (crtc != 0) { + DRM_ERROR("tried to disable vblank on non-existent crtc %d\n", + crtc); + } + + /* Do *NOT* disable the vertical refresh interrupt. MGA doesn't have + * a nice hardware counter that tracks the number of refreshes when + * the interrupt is disabled, and the kernel doesn't know the refresh + * rate to calculate an estimate. + */ + /* MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN); */ } int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence) @@ -125,14 +151,22 @@ void mga_driver_irq_preinstall(struct dr MGA_WRITE(MGA_ICLEAR, ~0); } -void mga_driver_irq_postinstall(struct drm_device * dev) +int mga_driver_irq_postinstall(struct drm_device * dev) { drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; + int ret; + + ret = drm_vblank_init(dev, 1); + if (ret) + return ret; DRM_INIT_WAITQUEUE(&dev_priv->fence_queue); - /* Turn on vertical blank interrupt and soft trap interrupt. */ - MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN); + /* Turn on soft trap interrupt. Vertical blank interrupts are enabled + * in mga_enable_vblank. + */ + MGA_WRITE(MGA_IEN, MGA_SOFTRAPEN); + return 0; } void mga_driver_irq_uninstall(struct drm_device * dev) diff -Napur -X /home/jbarnes/dontdiff gpu/drm/mga/mga_state.c /tmp/drm-master/drivers/gpu/drm/mga/mga_state.c --- gpu/drm/mga/mga_state.c 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/mga/mga_state.c 2008-08-21 14:34:09.000000000 -0700 @@ -1,6 +1,7 @@ /* mga_state.c -- State support for MGA G200/G400 -*- linux-c -*- * Created: Thu Jan 27 02:53:43 2000 by jhartmann@precisioninsight.com - * + */ +/* * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. @@ -99,19 +100,23 @@ static __inline__ void mga_g400_emit_con DMA_BLOCK(MGA_DSTORG, ctx->dstorg, MGA_MACCESS, ctx->maccess, - MGA_PLNWT, ctx->plnwt, MGA_DWGCTL, ctx->dwgctl); + MGA_PLNWT, ctx->plnwt, + MGA_DWGCTL, ctx->dwgctl); DMA_BLOCK(MGA_ALPHACTRL, ctx->alphactrl, MGA_FOGCOL, ctx->fogcolor, - MGA_WFLAG, ctx->wflag, MGA_ZORG, dev_priv->depth_offset); + MGA_WFLAG, ctx->wflag, + MGA_ZORG, dev_priv->depth_offset); DMA_BLOCK(MGA_WFLAG1, ctx->wflag, MGA_TDUALSTAGE0, ctx->tdualstage0, - MGA_TDUALSTAGE1, ctx->tdualstage1, MGA_FCOL, ctx->fcol); + MGA_TDUALSTAGE1, ctx->tdualstage1, + MGA_FCOL, ctx->fcol); DMA_BLOCK(MGA_STENCIL, ctx->stencil, MGA_STENCILCTL, ctx->stencilctl, - MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000); + MGA_DMAPAD, 0x00000000, + MGA_DMAPAD, 0x00000000); ADVANCE_DMA(); } @@ -131,15 +136,18 @@ static __inline__ void mga_g200_emit_tex DMA_BLOCK(MGA_TEXORG, tex->texorg, MGA_TEXORG1, tex->texorg1, - MGA_TEXORG2, tex->texorg2, MGA_TEXORG3, tex->texorg3); + MGA_TEXORG2, tex->texorg2, + MGA_TEXORG3, tex->texorg3); DMA_BLOCK(MGA_TEXORG4, tex->texorg4, MGA_TEXWIDTH, tex->texwidth, - MGA_TEXHEIGHT, tex->texheight, MGA_WR24, tex->texwidth); + MGA_TEXHEIGHT, tex->texheight, + MGA_WR24, tex->texwidth); DMA_BLOCK(MGA_WR34, tex->texheight, MGA_TEXTRANS, 0x0000ffff, - MGA_TEXTRANSHIGH, 0x0000ffff, MGA_DMAPAD, 0x00000000); + MGA_TEXTRANSHIGH, 0x0000ffff, + MGA_DMAPAD, 0x00000000); ADVANCE_DMA(); } @@ -162,15 +170,18 @@ static __inline__ void mga_g400_emit_tex DMA_BLOCK(MGA_TEXORG, tex->texorg, MGA_TEXORG1, tex->texorg1, - MGA_TEXORG2, tex->texorg2, MGA_TEXORG3, tex->texorg3); + MGA_TEXORG2, tex->texorg2, + MGA_TEXORG3, tex->texorg3); DMA_BLOCK(MGA_TEXORG4, tex->texorg4, MGA_TEXWIDTH, tex->texwidth, - MGA_TEXHEIGHT, tex->texheight, MGA_WR49, 0x00000000); + MGA_TEXHEIGHT, tex->texheight, + MGA_WR49, 0x00000000); DMA_BLOCK(MGA_WR57, 0x00000000, MGA_WR53, 0x00000000, - MGA_WR61, 0x00000000, MGA_WR52, MGA_G400_WR_MAGIC); + MGA_WR61, 0x00000000, + MGA_WR52, MGA_G400_WR_MAGIC); DMA_BLOCK(MGA_WR60, MGA_G400_WR_MAGIC, MGA_WR54, tex->texwidth | MGA_G400_WR_MAGIC, @@ -179,7 +190,8 @@ static __inline__ void mga_g400_emit_tex DMA_BLOCK(MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000, - MGA_TEXTRANS, 0x0000ffff, MGA_TEXTRANSHIGH, 0x0000ffff); + MGA_TEXTRANS, 0x0000ffff, + MGA_TEXTRANSHIGH, 0x0000ffff); ADVANCE_DMA(); } @@ -204,11 +216,13 @@ static __inline__ void mga_g400_emit_tex DMA_BLOCK(MGA_TEXORG, tex->texorg, MGA_TEXORG1, tex->texorg1, - MGA_TEXORG2, tex->texorg2, MGA_TEXORG3, tex->texorg3); + MGA_TEXORG2, tex->texorg2, + MGA_TEXORG3, tex->texorg3); DMA_BLOCK(MGA_TEXORG4, tex->texorg4, MGA_TEXWIDTH, tex->texwidth, - MGA_TEXHEIGHT, tex->texheight, MGA_WR49, 0x00000000); + MGA_TEXHEIGHT, tex->texheight, + MGA_WR49, 0x00000000); DMA_BLOCK(MGA_WR57, 0x00000000, MGA_WR53, 0x00000000, @@ -233,11 +247,13 @@ static __inline__ void mga_g200_emit_pip DMA_BLOCK(MGA_WIADDR, MGA_WMODE_SUSPEND, MGA_WVRTXSZ, 0x00000007, - MGA_WFLAG, 0x00000000, MGA_WR24, 0x00000000); + MGA_WFLAG, 0x00000000, + MGA_WR24, 0x00000000); DMA_BLOCK(MGA_WR25, 0x00000100, MGA_WR34, 0x00000000, - MGA_WR42, 0x0000ffff, MGA_WR60, 0x0000ffff); + MGA_WR42, 0x0000ffff, + MGA_WR60, 0x0000ffff); /* Padding required to to hardware bug. */ @@ -262,12 +278,14 @@ static __inline__ void mga_g400_emit_pip DMA_BLOCK(MGA_WIADDR2, MGA_WMODE_SUSPEND, MGA_DMAPAD, 0x00000000, - MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000); + MGA_DMAPAD, 0x00000000, + MGA_DMAPAD, 0x00000000); if (pipe & MGA_T2) { DMA_BLOCK(MGA_WVRTXSZ, 0x00001e09, MGA_DMAPAD, 0x00000000, - MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000); + MGA_DMAPAD, 0x00000000, + MGA_DMAPAD, 0x00000000); DMA_BLOCK(MGA_WACCEPTSEQ, 0x00000000, MGA_WACCEPTSEQ, 0x00000000, @@ -295,7 +313,8 @@ static __inline__ void mga_g400_emit_pip DMA_BLOCK(MGA_WVRTXSZ, 0x00001807, MGA_DMAPAD, 0x00000000, - MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000); + MGA_DMAPAD, 0x00000000, + MGA_DMAPAD, 0x00000000); DMA_BLOCK(MGA_WACCEPTSEQ, 0x00000000, MGA_WACCEPTSEQ, 0x00000000, @@ -305,7 +324,8 @@ static __inline__ void mga_g400_emit_pip DMA_BLOCK(MGA_WFLAG, 0x00000000, MGA_WFLAG1, 0x00000000, - MGA_WR56, MGA_G400_WR56_MAGIC, MGA_DMAPAD, 0x00000000); + MGA_WR56, MGA_G400_WR56_MAGIC, + MGA_DMAPAD, 0x00000000); DMA_BLOCK(MGA_WR49, 0x00000000, /* tex0 */ MGA_WR57, 0x00000000, /* tex0 */ @@ -495,7 +515,8 @@ static void mga_dma_dispatch_clear(struc DMA_BLOCK(MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000, - MGA_DWGSYNC, 0x00007100, MGA_DWGSYNC, 0x00007000); + MGA_DWGSYNC, 0x00007100, + MGA_DWGSYNC, 0x00007000); ADVANCE_DMA(); @@ -561,7 +582,8 @@ static void mga_dma_dispatch_clear(struc /* Force reset of DWGCTL */ DMA_BLOCK(MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000, - MGA_PLNWT, ctx->plnwt, MGA_DWGCTL, ctx->dwgctl); + MGA_PLNWT, ctx->plnwt, + MGA_DWGCTL, ctx->dwgctl); ADVANCE_DMA(); @@ -586,7 +608,8 @@ static void mga_dma_dispatch_swap(struct DMA_BLOCK(MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000, - MGA_DWGSYNC, 0x00007100, MGA_DWGSYNC, 0x00007000); + MGA_DWGSYNC, 0x00007100, + MGA_DWGSYNC, 0x00007000); DMA_BLOCK(MGA_DSTORG, dev_priv->front_offset, MGA_MACCESS, dev_priv->maccess, @@ -595,7 +618,8 @@ static void mga_dma_dispatch_swap(struct DMA_BLOCK(MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000, - MGA_PLNWT, 0xffffffff, MGA_DWGCTL, MGA_DWGCTL_COPY); + MGA_PLNWT, 0xffffffff, + MGA_DWGCTL, MGA_DWGCTL_COPY); for (i = 0; i < nbox; i++) { struct drm_clip_rect *box = &pbox[i]; @@ -613,7 +637,8 @@ static void mga_dma_dispatch_swap(struct DMA_BLOCK(MGA_DMAPAD, 0x00000000, MGA_PLNWT, ctx->plnwt, - MGA_SRCORG, dev_priv->front_offset, MGA_DWGCTL, ctx->dwgctl); + MGA_SRCORG, dev_priv->front_offset, + MGA_DWGCTL, ctx->dwgctl); ADVANCE_DMA(); @@ -724,8 +749,7 @@ static void mga_dma_dispatch_iload(struc drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_buf_priv_t *buf_priv = buf->dev_private; drm_mga_context_regs_t *ctx = &dev_priv->sarea_priv->context_state; - u32 srcorg = - buf->bus_address | dev_priv->dma_access | MGA_SRCMAP_SYSMEM; + u32 srcorg = buf->bus_address | dev_priv->dma_access | MGA_SRCMAP_SYSMEM; u32 y2; DMA_LOCALS; DRM_DEBUG("buf=%d used=%d\n", buf->idx, buf->used); @@ -736,22 +760,28 @@ static void mga_dma_dispatch_iload(struc DMA_BLOCK(MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000, - MGA_DWGSYNC, 0x00007100, MGA_DWGSYNC, 0x00007000); + MGA_DWGSYNC, 0x00007100, + MGA_DWGSYNC, 0x00007000); DMA_BLOCK(MGA_DSTORG, dstorg, - MGA_MACCESS, 0x00000000, MGA_SRCORG, srcorg, MGA_AR5, 64); + MGA_MACCESS, 0x00000000, + MGA_SRCORG, srcorg, + MGA_AR5, 64); DMA_BLOCK(MGA_PITCH, 64, MGA_PLNWT, 0xffffffff, - MGA_DMAPAD, 0x00000000, MGA_DWGCTL, MGA_DWGCTL_COPY); + MGA_DMAPAD, 0x00000000, + MGA_DWGCTL, MGA_DWGCTL_COPY); DMA_BLOCK(MGA_AR0, 63, MGA_AR3, 0, - MGA_FXBNDRY, (63 << 16) | 0, MGA_YDSTLEN + MGA_EXEC, y2); + MGA_FXBNDRY, (63 << 16) | 0, + MGA_YDSTLEN + MGA_EXEC, y2); DMA_BLOCK(MGA_PLNWT, ctx->plnwt, MGA_SRCORG, dev_priv->front_offset, - MGA_PITCH, dev_priv->front_pitch, MGA_DWGSYNC, 0x00007000); + MGA_PITCH, dev_priv->front_pitch, + MGA_DWGSYNC, 0x00007000); ADVANCE_DMA(); @@ -781,11 +811,13 @@ static void mga_dma_dispatch_blit(struct DMA_BLOCK(MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000, - MGA_DWGSYNC, 0x00007100, MGA_DWGSYNC, 0x00007000); + MGA_DWGSYNC, 0x00007100, + MGA_DWGSYNC, 0x00007000); DMA_BLOCK(MGA_DWGCTL, MGA_DWGCTL_COPY, MGA_PLNWT, blit->planemask, - MGA_SRCORG, blit->srcorg, MGA_DSTORG, blit->dstorg); + MGA_SRCORG, blit->srcorg, + MGA_DSTORG, blit->dstorg); DMA_BLOCK(MGA_SGN, scandir, MGA_MACCESS, dev_priv->maccess, @@ -819,7 +851,8 @@ static void mga_dma_dispatch_blit(struct /* Force reset of DWGCTL */ DMA_BLOCK(MGA_DMAPAD, 0x00000000, MGA_PLNWT, ctx->plnwt, - MGA_PITCH, dev_priv->front_pitch, MGA_DWGCTL, ctx->dwgctl); + MGA_PITCH, dev_priv->front_pitch, + MGA_DWGCTL, ctx->dwgctl); ADVANCE_DMA(); } @@ -952,13 +985,6 @@ static int mga_dma_iload(struct drm_devi LOCK_TEST_WITH_RETURN(dev, file_priv); -#if 0 - if (mga_do_wait_for_idle(dev_priv) < 0) { - if (MGA_DMA_DEBUG) - DRM_INFO("-EBUSY\n"); - return -EBUSY; - } -#endif if (iload->idx < 0 || iload->idx > dma->buf_count) return -EINVAL; @@ -1062,14 +1088,14 @@ static int mga_set_fence(struct drm_devi BEGIN_DMA(1); DMA_BLOCK(MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000, - MGA_DMAPAD, 0x00000000, MGA_SOFTRAP, 0x00000000); + MGA_DMAPAD, 0x00000000, + MGA_SOFTRAP, 0x00000000); ADVANCE_DMA(); return 0; } -static int mga_wait_fence(struct drm_device *dev, void *data, struct drm_file * -file_priv) +static int mga_wait_fence(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_mga_private_t *dev_priv = dev->dev_private; u32 *fence = data; @@ -1082,6 +1108,7 @@ file_priv) DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); mga_driver_fence_wait(dev, fence); + return 0; } @@ -1099,6 +1126,7 @@ struct drm_ioctl_desc mga_ioctls[] = { DRM_IOCTL_DEF(DRM_MGA_SET_FENCE, mga_set_fence, DRM_AUTH), DRM_IOCTL_DEF(DRM_MGA_WAIT_FENCE, mga_wait_fence, DRM_AUTH), DRM_IOCTL_DEF(DRM_MGA_DMA_BOOTSTRAP, mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + }; int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls); diff -Napur -X /home/jbarnes/dontdiff gpu/drm/mga/mga_warp.c /tmp/drm-master/drivers/gpu/drm/mga/mga_warp.c --- gpu/drm/mga/mga_warp.c 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/mga/mga_warp.c 2008-08-21 14:34:09.000000000 -0700 @@ -1,6 +1,7 @@ /* mga_warp.c -- Matrox G200/G400 WARP engine management -*- linux-c -*- * Created: Thu Jan 11 21:29:32 2001 by gareth@valinux.com - * + */ +/* * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * @@ -48,30 +49,33 @@ do { \ } while (0) static const unsigned int mga_warp_g400_microcode_size = - (WARP_UCODE_SIZE(warp_g400_tgz) + - WARP_UCODE_SIZE(warp_g400_tgza) + - WARP_UCODE_SIZE(warp_g400_tgzaf) + - WARP_UCODE_SIZE(warp_g400_tgzf) + - WARP_UCODE_SIZE(warp_g400_tgzs) + - WARP_UCODE_SIZE(warp_g400_tgzsa) + - WARP_UCODE_SIZE(warp_g400_tgzsaf) + - WARP_UCODE_SIZE(warp_g400_tgzsf) + - WARP_UCODE_SIZE(warp_g400_t2gz) + - WARP_UCODE_SIZE(warp_g400_t2gza) + - WARP_UCODE_SIZE(warp_g400_t2gzaf) + - WARP_UCODE_SIZE(warp_g400_t2gzf) + - WARP_UCODE_SIZE(warp_g400_t2gzs) + - WARP_UCODE_SIZE(warp_g400_t2gzsa) + - WARP_UCODE_SIZE(warp_g400_t2gzsaf) + WARP_UCODE_SIZE(warp_g400_t2gzsf)); + (WARP_UCODE_SIZE(warp_g400_tgz) + + WARP_UCODE_SIZE(warp_g400_tgza) + + WARP_UCODE_SIZE(warp_g400_tgzaf) + + WARP_UCODE_SIZE(warp_g400_tgzf) + + WARP_UCODE_SIZE(warp_g400_tgzs) + + WARP_UCODE_SIZE(warp_g400_tgzsa) + + WARP_UCODE_SIZE(warp_g400_tgzsaf) + + WARP_UCODE_SIZE(warp_g400_tgzsf) + + WARP_UCODE_SIZE(warp_g400_t2gz) + + WARP_UCODE_SIZE(warp_g400_t2gza) + + WARP_UCODE_SIZE(warp_g400_t2gzaf) + + WARP_UCODE_SIZE(warp_g400_t2gzf) + + WARP_UCODE_SIZE(warp_g400_t2gzs) + + WARP_UCODE_SIZE(warp_g400_t2gzsa) + + WARP_UCODE_SIZE(warp_g400_t2gzsaf) + + WARP_UCODE_SIZE(warp_g400_t2gzsf)); static const unsigned int mga_warp_g200_microcode_size = - (WARP_UCODE_SIZE(warp_g200_tgz) + - WARP_UCODE_SIZE(warp_g200_tgza) + - WARP_UCODE_SIZE(warp_g200_tgzaf) + - WARP_UCODE_SIZE(warp_g200_tgzf) + - WARP_UCODE_SIZE(warp_g200_tgzs) + - WARP_UCODE_SIZE(warp_g200_tgzsa) + - WARP_UCODE_SIZE(warp_g200_tgzsaf) + WARP_UCODE_SIZE(warp_g200_tgzsf)); + (WARP_UCODE_SIZE(warp_g200_tgz) + + WARP_UCODE_SIZE(warp_g200_tgza) + + WARP_UCODE_SIZE(warp_g200_tgzaf) + + WARP_UCODE_SIZE(warp_g200_tgzf) + + WARP_UCODE_SIZE(warp_g200_tgzs) + + WARP_UCODE_SIZE(warp_g200_tgzsa) + + WARP_UCODE_SIZE(warp_g200_tgzsaf) + + WARP_UCODE_SIZE(warp_g200_tgzsf)); + unsigned int mga_warp_microcode_size(const drm_mga_private_t * dev_priv) { @@ -82,6 +86,7 @@ unsigned int mga_warp_microcode_size(con case MGA_CARD_TYPE_G200: return PAGE_ALIGN(mga_warp_g200_microcode_size); default: + DRM_ERROR("Unknown chipset value: 0x%x\n", dev_priv->chipset); return 0; } } diff -Napur -X /home/jbarnes/dontdiff gpu/drm/r128/Makefile /tmp/drm-master/drivers/gpu/drm/r128/Makefile --- gpu/drm/r128/Makefile 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/r128/Makefile 1969-12-31 16:00:00.000000000 -0800 @@ -1,10 +0,0 @@ -# -# Makefile for the drm device driver. This driver provides support for the -# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. - -ccflags-y := -Iinclude/drm -r128-y := r128_drv.o r128_cce.o r128_state.o r128_irq.o - -r128-$(CONFIG_COMPAT) += r128_ioc32.o - -obj-$(CONFIG_DRM_R128) += r128.o diff -Napur -X /home/jbarnes/dontdiff gpu/drm/r128/r128_cce.c /tmp/drm-master/drivers/gpu/drm/r128/r128_cce.c --- gpu/drm/r128/r128_cce.c 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/r128/r128_cce.c 2008-08-21 14:34:09.000000000 -0700 @@ -325,7 +325,7 @@ static void r128_cce_init_ring_buffer(st else #endif ring_start = dev_priv->cce_ring->offset - - (unsigned long)dev->sg->virtual; + (unsigned long)dev->sg->virtual; R128_WRITE(R128_PM4_BUFFER_OFFSET, ring_start | R128_AGP_OFFSET); @@ -612,10 +612,8 @@ int r128_do_cleanup_cce(struct drm_devic #endif { if (dev_priv->gart_info.bus_addr) - if (!drm_ati_pcigart_cleanup(dev, - &dev_priv->gart_info)) - DRM_ERROR - ("failed to cleanup PCI GART!\n"); + if (!drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info)) + DRM_ERROR("failed to cleanup PCI GART!\n"); } drm_free(dev->dev_private, sizeof(drm_r128_private_t), @@ -757,52 +755,6 @@ int r128_fullscreen(struct drm_device *d #define R128_BUFFER_USED 0xffffffff #define R128_BUFFER_FREE 0 -#if 0 -static int r128_freelist_init(struct drm_device * dev) -{ - struct drm_device_dma *dma = dev->dma; - drm_r128_private_t *dev_priv = dev->dev_private; - struct drm_buf *buf; - drm_r128_buf_priv_t *buf_priv; - drm_r128_freelist_t *entry; - int i; - - dev_priv->head = drm_alloc(sizeof(drm_r128_freelist_t), DRM_MEM_DRIVER); - if (dev_priv->head == NULL) - return -ENOMEM; - - memset(dev_priv->head, 0, sizeof(drm_r128_freelist_t)); - dev_priv->head->age = R128_BUFFER_USED; - - for (i = 0; i < dma->buf_count; i++) { - buf = dma->buflist[i]; - buf_priv = buf->dev_private; - - entry = drm_alloc(sizeof(drm_r128_freelist_t), DRM_MEM_DRIVER); - if (!entry) - return -ENOMEM; - - entry->age = R128_BUFFER_FREE; - entry->buf = buf; - entry->prev = dev_priv->head; - entry->next = dev_priv->head->next; - if (!entry->next) - dev_priv->tail = entry; - - buf_priv->discard = 0; - buf_priv->dispatched = 0; - buf_priv->list_entry = entry; - - dev_priv->head->next = entry; - - if (dev_priv->head->next) - dev_priv->head->next->prev = entry; - } - - return 0; - -} -#endif static struct drm_buf *r128_freelist_get(struct drm_device * dev) { @@ -817,7 +769,7 @@ static struct drm_buf *r128_freelist_get for (i = 0; i < dma->buf_count; i++) { buf = dma->buflist[i]; buf_priv = buf->dev_private; - if (!buf->file_priv) + if (buf->file_priv == 0) return buf; } diff -Napur -X /home/jbarnes/dontdiff gpu/drm/r128/r128_drv.c /tmp/drm-master/drivers/gpu/drm/r128/r128_drv.c --- gpu/drm/r128/r128_drv.c 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/r128/r128_drv.c 2008-08-21 14:34:09.000000000 -0700 @@ -40,15 +40,17 @@ static struct pci_device_id pciidlist[] r128_PCI_IDS }; +static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); static struct drm_driver driver = { .driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | - DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | - DRIVER_IRQ_VBL, + DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, .dev_priv_size = sizeof(drm_r128_buf_priv_t), .preclose = r128_driver_preclose, .lastclose = r128_driver_lastclose, - .vblank_wait = r128_driver_vblank_wait, + .get_vblank_counter = r128_get_vblank_counter, + .enable_vblank = r128_enable_vblank, + .disable_vblank = r128_disable_vblank, .irq_preinstall = r128_driver_irq_preinstall, .irq_postinstall = r128_driver_irq_postinstall, .irq_uninstall = r128_driver_irq_uninstall, @@ -59,21 +61,22 @@ static struct drm_driver driver = { .ioctls = r128_ioctls, .dma_ioctl = r128_cce_buffers, .fops = { - .owner = THIS_MODULE, - .open = drm_open, - .release = drm_release, - .ioctl = drm_ioctl, - .mmap = drm_mmap, - .poll = drm_poll, - .fasync = drm_fasync, -#ifdef CONFIG_COMPAT - .compat_ioctl = r128_compat_ioctl, + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .ioctl = drm_ioctl, + .mmap = drm_mmap, + .poll = drm_poll, + .fasync = drm_fasync, +#if defined(CONFIG_COMPAT) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9) + .compat_ioctl = r128_compat_ioctl, #endif - }, - + }, .pci_driver = { - .name = DRIVER_NAME, - .id_table = pciidlist, + .name = DRIVER_NAME, + .id_table = pciidlist, + .probe = probe, + .remove = __devexit_p(drm_cleanup_pci), }, .name = DRIVER_NAME, @@ -84,10 +87,17 @@ static struct drm_driver driver = { .patchlevel = DRIVER_PATCHLEVEL, }; +static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + return drm_get_dev(pdev, ent, &driver); +} + + static int __init r128_init(void) { driver.num_ioctls = r128_max_ioctl; - return drm_init(&driver); + + return drm_init(&driver, pciidlist); } static void __exit r128_exit(void) diff -Napur -X /home/jbarnes/dontdiff gpu/drm/r128/r128_drv.h /tmp/drm-master/drivers/gpu/drm/r128/r128_drv.h --- gpu/drm/r128/r128_drv.h 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/r128/r128_drv.h 2008-08-21 14:34:09.000000000 -0700 @@ -29,7 +29,7 @@ * Rickard E. (Rik) Faith * Kevin E. Martin * Gareth Hughes - * Michel Dänzer + * Michel D�zer */ #ifndef __R128_DRV_H__ @@ -97,6 +97,8 @@ typedef struct drm_r128_private { u32 crtc_offset; u32 crtc_offset_cntl; + atomic_t vbl_received; + u32 color_fmt; unsigned int front_offset; unsigned int front_pitch; @@ -149,11 +151,12 @@ extern int r128_wait_ring(drm_r128_priva extern int r128_do_cce_idle(drm_r128_private_t * dev_priv); extern int r128_do_cleanup_cce(struct drm_device * dev); -extern int r128_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence); - +extern int r128_enable_vblank(struct drm_device *dev, int crtc); +extern void r128_disable_vblank(struct drm_device *dev, int crtc); +extern u32 r128_get_vblank_counter(struct drm_device *dev, int crtc); extern irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS); extern void r128_driver_irq_preinstall(struct drm_device * dev); -extern void r128_driver_irq_postinstall(struct drm_device * dev); +extern int r128_driver_irq_postinstall(struct drm_device * dev); extern void r128_driver_irq_uninstall(struct drm_device * dev); extern void r128_driver_lastclose(struct drm_device * dev); extern void r128_driver_preclose(struct drm_device * dev, diff -Napur -X /home/jbarnes/dontdiff gpu/drm/r128/r128_ioc32.c /tmp/drm-master/drivers/gpu/drm/r128/r128_ioc32.c --- gpu/drm/r128/r128_ioc32.c 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/r128/r128_ioc32.c 2008-08-21 14:34:09.000000000 -0700 @@ -95,10 +95,11 @@ static int compat_r128_init(struct file &init->agp_textures_offset)) return -EFAULT; - return drm_ioctl(file->f_path.dentry->d_inode, file, + return drm_ioctl(file->f_dentry->d_inode, file, DRM_IOCTL_R128_INIT, (unsigned long)init); } + typedef struct drm_r128_depth32 { int func; int n; @@ -129,7 +130,7 @@ static int compat_r128_depth(struct file &depth->mask)) return -EFAULT; - return drm_ioctl(file->f_path.dentry->d_inode, file, + return drm_ioctl(file->f_dentry->d_inode, file, DRM_IOCTL_R128_DEPTH, (unsigned long)depth); } @@ -153,7 +154,7 @@ static int compat_r128_stipple(struct fi &stipple->mask)) return -EFAULT; - return drm_ioctl(file->f_path.dentry->d_inode, file, + return drm_ioctl(file->f_dentry->d_inode, file, DRM_IOCTL_R128_STIPPLE, (unsigned long)stipple); } @@ -178,7 +179,7 @@ static int compat_r128_getparam(struct f &getparam->value)) return -EFAULT; - return drm_ioctl(file->f_path.dentry->d_inode, file, + return drm_ioctl(file->f_dentry->d_inode, file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam); } @@ -212,9 +213,9 @@ long r128_compat_ioctl(struct file *filp lock_kernel(); /* XXX for now */ if (fn != NULL) - ret = (*fn) (filp, cmd, arg); + ret = (*fn)(filp, cmd, arg); else - ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg); + ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg); unlock_kernel(); return ret; diff -Napur -X /home/jbarnes/dontdiff gpu/drm/r128/r128_irq.c /tmp/drm-master/drivers/gpu/drm/r128/r128_irq.c --- gpu/drm/r128/r128_irq.c 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/r128/r128_irq.c 2008-08-21 14:34:09.000000000 -0700 @@ -35,6 +35,16 @@ #include "r128_drm.h" #include "r128_drv.h" +u32 r128_get_vblank_counter(struct drm_device *dev, int crtc) +{ + const drm_r128_private_t *dev_priv = dev->dev_private; + + if (crtc != 0) + return 0; + + return atomic_read(&dev_priv->vbl_received); +} + irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *) arg; @@ -46,30 +56,38 @@ irqreturn_t r128_driver_irq_handler(DRM_ /* VBLANK interrupt */ if (status & R128_CRTC_VBLANK_INT) { R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK); - atomic_inc(&dev->vbl_received); - DRM_WAKEUP(&dev->vbl_queue); - drm_vbl_send_signals(dev); + atomic_inc(&dev_priv->vbl_received); + drm_handle_vblank(dev, 0); return IRQ_HANDLED; } return IRQ_NONE; } -int r128_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence) +int r128_enable_vblank(struct drm_device *dev, int crtc) { - unsigned int cur_vblank; - int ret = 0; + drm_r128_private_t *dev_priv = dev->dev_private; - /* Assume that the user has missed the current sequence number - * by about a day rather than she wants to wait for years - * using vertical blanks... - */ - DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ, - (((cur_vblank = atomic_read(&dev->vbl_received)) - - *sequence) <= (1 << 23))); + if (crtc != 0) { + DRM_ERROR("%s: bad crtc %d\n", __FUNCTION__, crtc); + return -EINVAL; + } - *sequence = cur_vblank; + R128_WRITE(R128_GEN_INT_CNTL, R128_CRTC_VBLANK_INT_EN); + return 0; +} + +void r128_disable_vblank(struct drm_device *dev, int crtc) +{ + if (crtc != 0) + DRM_ERROR("%s: bad crtc %d\n", __FUNCTION__, crtc); - return ret; + /* + * FIXME: implement proper interrupt disable by using the vblank + * counter register (if available) + * + * R128_WRITE(R128_GEN_INT_CNTL, + * R128_READ(R128_GEN_INT_CNTL) & ~R128_CRTC_VBLANK_INT_EN); + */ } void r128_driver_irq_preinstall(struct drm_device * dev) @@ -82,12 +100,9 @@ void r128_driver_irq_preinstall(struct d R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK); } -void r128_driver_irq_postinstall(struct drm_device * dev) +int r128_driver_irq_postinstall(struct drm_device * dev) { - drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private; - - /* Turn on VBL interrupt */ - R128_WRITE(R128_GEN_INT_CNTL, R128_CRTC_VBLANK_INT_EN); + return drm_vblank_init(dev, 1); } void r128_driver_irq_uninstall(struct drm_device * dev) diff -Napur -X /home/jbarnes/dontdiff gpu/drm/r128/r128_state.c /tmp/drm-master/drivers/gpu/drm/r128/r128_state.c --- gpu/drm/r128/r128_state.c 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/r128/r128_state.c 2008-08-21 14:34:09.000000000 -0700 @@ -1549,9 +1549,6 @@ static int r128_cce_indirect(struct drm_ struct drm_buf *buf; drm_r128_buf_priv_t *buf_priv; drm_r128_indirect_t *indirect = data; -#if 0 - RING_LOCALS; -#endif LOCK_TEST_WITH_RETURN(dev, file_priv); @@ -1595,14 +1592,6 @@ static int r128_cce_indirect(struct drm_ buf->used = indirect->end; buf_priv->discard = indirect->discard; -#if 0 - /* Wait for the 3D stream to idle before the indirect buffer - * containing 2D acceleration commands is processed. - */ - BEGIN_RING(2); - RADEON_WAIT_UNTIL_3D_IDLE(); - ADVANCE_RING(); -#endif /* Dispatch the indirect buffer full of commands from the * X server. This is insecure and is thus only available to diff -Napur -X /home/jbarnes/dontdiff gpu/drm/radeon/Makefile /tmp/drm-master/drivers/gpu/drm/radeon/Makefile --- gpu/drm/radeon/Makefile 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/radeon/Makefile 1969-12-31 16:00:00.000000000 -0800 @@ -1,10 +0,0 @@ -# -# Makefile for the drm device driver. This driver provides support for the -# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. - -ccflags-y := -Iinclude/drm -radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o - -radeon-$(CONFIG_COMPAT) += radeon_ioc32.o - -obj-$(CONFIG_DRM_RADEON)+= radeon.o diff -Napur -X /home/jbarnes/dontdiff gpu/drm/radeon/r300_cmdbuf.c /tmp/drm-master/drivers/gpu/drm/radeon/r300_cmdbuf.c --- gpu/drm/radeon/r300_cmdbuf.c 2008-08-21 14:43:42.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/radeon/r300_cmdbuf.c 2008-08-21 14:34:09.000000000 -0700 @@ -352,6 +352,7 @@ static __inline__ int r300_emit_packet0( sz = header.packet0.count; reg = (header.packet0.reghi << 8) | header.packet0.reglo; + DRM_DEBUG("R300_CMD_PACKET0: reg %04x, sz %d\n", reg, sz); if (!sz) return 0; @@ -919,8 +920,7 @@ static int r300_scratch(drm_radeon_priva u32 i, buf_idx, h_pending; RING_LOCALS; - if (cmdbuf->bufsz < - (sizeof(u64) + header.scratch.n_bufs * sizeof(buf_idx))) { + if (cmdbuf->bufsz < sizeof(uint64_t) + header.scratch.n_bufs * sizeof(buf_idx) ) { return -EINVAL; } @@ -928,12 +928,12 @@ static int r300_scratch(drm_radeon_priva return -EINVAL; } - dev_priv->scratch_ages[header.scratch.reg]++; + dev_priv->scratch_ages[header.scratch.reg] ++; - ref_age_base = (u32 *)(unsigned long)*((uint64_t *)cmdbuf->buf); + ref_age_base = (u32 *)(unsigned long)*((uint64_t *)cmdbuf->buf); - cmdbuf->buf += sizeof(u64); - cmdbuf->bufsz -= sizeof(u64); + cmdbuf->buf += sizeof(uint64_t); + cmdbuf->bufsz -= sizeof(uint64_t); for (i=0; i < header.scratch.n_bufs; i++) { buf_idx = *(u32 *)cmdbuf->buf; @@ -974,7 +974,7 @@ static int r300_scratch(drm_radeon_priva * the graphics card. * Called by r300_do_cp_cmdbuf. */ -static inline int r300_emit_r500fp(drm_radeon_private_t *dev_priv, +static __inline__ int r300_emit_r500fp(drm_radeon_private_t *dev_priv, drm_radeon_kcmd_buffer_t *cmdbuf, drm_r300_cmd_header_t header) { @@ -1054,7 +1054,6 @@ int r300_do_cp_cmdbuf(struct drm_device switch (header.header.cmd_type) { case R300_CMD_PACKET0: - DRM_DEBUG("R300_CMD_PACKET0\n"); ret = r300_emit_packet0(dev_priv, cmdbuf, header); if (ret) { DRM_ERROR("r300_emit_packet0 failed\n"); diff -Napur -X /home/jbarnes/dontdiff gpu/drm/radeon/r300_reg.h /tmp/drm-master/drivers/gpu/drm/radeon/r300_reg.h --- gpu/drm/radeon/r300_reg.h 2008-08-21 14:43:42.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/radeon/r300_reg.h 2008-08-21 14:34:09.000000000 -0700 @@ -23,6 +23,8 @@ USE OR OTHER DEALINGS IN THE SOFTWARE. **************************************************************************/ +/* *INDENT-OFF* */ + #ifndef _R300_REG_H #define _R300_REG_H @@ -36,6 +38,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE. # define R300_MC_MISC__MC_SAME_PAGE_PRIO_SHIFT 24 # define R300_MC_MISC__MC_GLOBW_INIT_LAT_SHIFT 28 + #define R300_MC_INIT_GFX_LAT_TIMER 0x154 # define R300_MC_MISC__MC_G3D0R_INIT_LAT_SHIFT 0 # define R300_MC_MISC__MC_G3D1R_INIT_LAT_SHIFT 4 @@ -584,7 +587,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE. #define R300_RE_FOG_START 0x4298 /* Not sure why there are duplicate of factor and constant values. - * My best guess so far is that there are separate zbiases for test and write. + * My best guess so far is that there are seperate zbiases for test and write. * Ordering might be wrong. * Some of the tests indicate that fgl has a fallback implementation of zbias * via pixel shaders. @@ -1771,3 +1774,5 @@ USE OR OTHER DEALINGS IN THE SOFTWARE. #define R500_RB3D_CONSTANT_COLOR_AR 0x4ef8 #endif /* _R300_REG_H */ + +/* *INDENT-ON* */ diff -Napur -X /home/jbarnes/dontdiff gpu/drm/radeon/r600_microcode.h /tmp/drm-master/drivers/gpu/drm/radeon/r600_microcode.h --- gpu/drm/radeon/r600_microcode.h 1969-12-31 16:00:00.000000000 -0800 +++ /tmp/drm-master/drivers/gpu/drm/radeon/r600_microcode.h 2008-08-21 14:34:09.000000000 -0700 @@ -0,0 +1,27 @@ +/* + * Copyright 2007 Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +/* this file is not used we just have it as a placeholder for future R600 + work */ diff -Napur -X /home/jbarnes/dontdiff gpu/drm/radeon/radeon_cp.c /tmp/drm-master/drivers/gpu/drm/radeon/radeon_cp.c --- gpu/drm/radeon/radeon_cp.c 2008-08-21 14:43:42.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/radeon/radeon_cp.c 2008-08-21 14:34:09.000000000 -0700 @@ -36,7 +36,6 @@ #include "r300_reg.h" #include "radeon_microcode.h" - #define RADEON_FIFO_DEBUG 0 static int radeon_do_cleanup_cp(struct drm_device * dev); @@ -71,10 +70,10 @@ static u32 RS690_READ_MCIND(drm_radeon_p static u32 IGP_READ_MCIND(drm_radeon_private_t *dev_priv, int addr) { - if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) - return RS690_READ_MCIND(dev_priv, addr); + if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) + return RS690_READ_MCIND(dev_priv, addr); else - return RS480_READ_MCIND(dev_priv, addr); + return RS480_READ_MCIND(dev_priv, addr); } u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv) @@ -128,9 +127,10 @@ static void radeon_write_agp_base(drm_ra } else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) { R500_WRITE_MCIND(R520_MC_AGP_BASE, agp_base_lo); R500_WRITE_MCIND(R520_MC_AGP_BASE_2, agp_base_hi); - } else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480) { + } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) || + ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) { RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo); - RADEON_WRITE(RS480_AGP_BASE_2, 0); + RADEON_WRITE(RS480_AGP_BASE_2, agp_base_hi); } else { RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo); if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R200) @@ -155,7 +155,7 @@ static u32 RADEON_READ_PCIE(drm_radeon_p #if RADEON_FIFO_DEBUG static void radeon_status(drm_radeon_private_t * dev_priv) { - printk("%s:\n", __func__); + printk("%s:\n", __FUNCTION__); printk("RBBM_STATUS = 0x%08x\n", (unsigned int)RADEON_READ(RADEON_RBBM_STATUS)); printk("CP_RB_RTPR = 0x%08x\n", @@ -263,7 +263,7 @@ static int radeon_do_wait_for_idle(drm_r return -EBUSY; } -static void radeon_init_pipes(drm_radeon_private_t *dev_priv) +static void radeon_init_pipes(drm_radeon_private_t * dev_priv) { uint32_t gb_tile_config, gb_pipe_sel = 0; @@ -285,7 +285,7 @@ static void radeon_init_pipes(drm_radeon gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16 /*| R300_SUBPIXEL_1_16*/); - switch (dev_priv->num_gb_pipes) { + switch(dev_priv->num_gb_pipes) { case 2: gb_tile_config |= R300_PIPE_COUNT_R300; break; case 3: gb_tile_config |= R300_PIPE_COUNT_R420_3P; break; case 4: gb_tile_config |= R300_PIPE_COUNT_R420; break; @@ -320,6 +320,7 @@ static void radeon_cp_load_microcode(drm radeon_do_wait_for_idle(dev_priv); RADEON_WRITE(RADEON_CP_ME_RAM_ADDR, 0); + if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R100) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV100) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV200) || @@ -347,6 +348,7 @@ static void radeon_cp_load_microcode(drm ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV350) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV380) || + ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) || ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) { DRM_INFO("Loading R300 Microcode\n"); for (i = 0; i < 256; i++) { @@ -395,12 +397,6 @@ static void radeon_cp_load_microcode(drm static void radeon_do_cp_flush(drm_radeon_private_t * dev_priv) { DRM_DEBUG("\n"); -#if 0 - u32 tmp; - - tmp = RADEON_READ(RADEON_CP_RB_WPTR) | (1 << 31); - RADEON_WRITE(RADEON_CP_RB_WPTR, tmp); -#endif } /* Wait for the CP to go idle. @@ -490,7 +486,7 @@ static int radeon_do_engine_reset(struct radeon_do_pixcache_flush(dev_priv); if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV410) { - /* may need something similar for newer chips */ + /* may need something similar for newer chips */ clock_cntl_index = RADEON_READ(RADEON_CLOCK_CNTL_INDEX); mclk_cntl = RADEON_READ_PLL(dev, RADEON_MCLK_CNTL); @@ -700,9 +696,8 @@ static void radeon_test_writeback(drm_ra } if (!dev_priv->writeback_works) { - /* Disable writeback to avoid unnecessary bus master transfer */ - RADEON_WRITE(RADEON_CP_RB_CNTL, RADEON_READ(RADEON_CP_RB_CNTL) | - RADEON_RB_NO_UPDATE); + /* Disable writeback to avoid unnecessary bus master transfers */ + RADEON_WRITE(RADEON_CP_RB_CNTL, RADEON_READ(RADEON_CP_RB_CNTL) | RADEON_RB_NO_UPDATE); RADEON_WRITE(RADEON_SCRATCH_UMSK, 0); } } @@ -714,11 +709,12 @@ static void radeon_set_igpgart(drm_radeo if (on) { DRM_DEBUG("programming igp gart %08X %08lX %08X\n", - dev_priv->gart_vm_start, - (long)dev_priv->gart_info.bus_addr, - dev_priv->gart_size); + dev_priv->gart_vm_start, + (long)dev_priv->gart_info.bus_addr, + dev_priv->gart_size); temp = IGP_READ_MCIND(dev_priv, RS480_MC_MISC_CNTL); + if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) IGP_WRITE_MCIND(RS480_MC_MISC_CNTL, (RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN)); @@ -745,8 +741,8 @@ static void radeon_set_igpgart(drm_radeo radeon_write_agp_base(dev_priv, dev_priv->gart_vm_start); dev_priv->gart_size = 32*1024*1024; - temp = (((dev_priv->gart_vm_start - 1 + dev_priv->gart_size) & - 0xffff0000) | (dev_priv->gart_vm_start >> 16)); + temp = (((dev_priv->gart_vm_start - 1 + dev_priv->gart_size) & + 0xffff0000) | (dev_priv->gart_vm_start >> 16)); radeon_write_agp_location(dev_priv, temp); @@ -759,7 +755,7 @@ static void radeon_set_igpgart(drm_radeo if ((temp & RS480_GART_CACHE_INVALIDATE) == 0) break; DRM_UDELAY(1); - } while (1); + } while(1); IGP_WRITE_MCIND(RS480_GART_CACHE_CNTRL, RS480_GART_CACHE_INVALIDATE); @@ -769,7 +765,7 @@ static void radeon_set_igpgart(drm_radeo if ((temp & RS480_GART_CACHE_INVALIDATE) == 0) break; DRM_UDELAY(1); - } while (1); + } while(1); IGP_WRITE_MCIND(RS480_GART_CACHE_CNTRL, 0); } else { @@ -861,11 +857,14 @@ static int radeon_do_init_cp(struct drm_ return -EINVAL; } - if (init->is_pci && (dev_priv->flags & RADEON_IS_AGP)) { + if (init->is_pci && (dev_priv->flags & RADEON_IS_AGP)) + { DRM_DEBUG("Forcing AGP card to PCI mode\n"); dev_priv->flags &= ~RADEON_IS_AGP; - } else if (!(dev_priv->flags & (RADEON_IS_AGP | RADEON_IS_PCI | RADEON_IS_PCIE)) - && !init->is_pci) { + } + else if (!(dev_priv->flags & (RADEON_IS_AGP | RADEON_IS_PCI | RADEON_IS_PCIE)) + && !init->is_pci) + { DRM_DEBUG("Restoring AGP flag\n"); dev_priv->flags |= RADEON_IS_AGP; } @@ -888,17 +887,6 @@ static int radeon_do_init_cp(struct drm_ */ dev_priv->vblank_crtc = DRM_RADEON_VBLANK_CRTC1; - switch(init->func) { - case RADEON_INIT_R200_CP: - dev_priv->microcode_version = UCODE_R200; - break; - case RADEON_INIT_R300_CP: - dev_priv->microcode_version = UCODE_R300; - break; - default: - dev_priv->microcode_version = UCODE_R100; - } - dev_priv->do_boxes = 0; dev_priv->cp_mode = init->cp_mode; @@ -946,8 +934,7 @@ static int radeon_do_init_cp(struct drm_ */ dev_priv->depth_clear.rb3d_cntl = (RADEON_PLANE_MASK_ENABLE | (dev_priv->color_fmt << 10) | - (dev_priv->microcode_version == - UCODE_R100 ? RADEON_ZBLOCK16 : 0)); + (dev_priv->chip_family < CHIP_R200 ? RADEON_ZBLOCK16 : 0)); dev_priv->depth_clear.rb3d_zstencilcntl = (dev_priv->depth_fmt | @@ -1131,6 +1118,7 @@ static int radeon_do_init_cp(struct drm_ dev_priv->ring.fetch_size = /* init->fetch_size */ 32; dev_priv->ring.fetch_size_l2ow = drm_order( /* init->fetch_size */ 32 / 16); + dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1; dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK; @@ -1286,7 +1274,7 @@ static int radeon_do_resume_cp(struct dr radeon_cp_init_ring_buffer(dev, dev_priv); radeon_do_engine_reset(dev); - radeon_enable_interrupt(dev); + radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1); DRM_DEBUG("radeon_do_resume_cp() complete\n"); @@ -1389,11 +1377,7 @@ void radeon_do_release(struct drm_device /* Stop the cp */ while ((ret = radeon_do_cp_idle(dev_priv)) != 0) { DRM_DEBUG("radeon_do_cp_idle %d\n", ret); -#ifdef __linux__ schedule(); -#else - tsleep(&ret, PZERO, "rdnrel", 1); -#endif } radeon_do_cp_stop(dev_priv); radeon_do_engine_reset(dev); @@ -1543,41 +1527,6 @@ struct drm_buf *radeon_freelist_get(stru return NULL; } -#if 0 -struct drm_buf *radeon_freelist_get(struct drm_device * dev) -{ - struct drm_device_dma *dma = dev->dma; - drm_radeon_private_t *dev_priv = dev->dev_private; - drm_radeon_buf_priv_t *buf_priv; - struct drm_buf *buf; - int i, t; - int start; - u32 done_age = DRM_READ32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1)); - - if (++dev_priv->last_buf >= dma->buf_count) - dev_priv->last_buf = 0; - - start = dev_priv->last_buf; - dev_priv->stats.freelist_loops++; - - for (t = 0; t < 2; t++) { - for (i = start; i < dma->buf_count; i++) { - buf = dma->buflist[i]; - buf_priv = buf->dev_private; - if (buf->file_priv == 0 || (buf->pending && - buf_priv->age <= - done_age)) { - dev_priv->stats.requested_bufs++; - buf->pending = 0; - return buf; - } - } - start = 0; - } - - return NULL; -} -#endif void radeon_freelist_reset(struct drm_device * dev) { @@ -1720,6 +1669,7 @@ int radeon_driver_load(struct drm_device break; } + dev_priv->chip_family = flags & RADEON_FAMILY_MASK; if (drm_device_is_agp(dev)) dev_priv->flags |= RADEON_IS_AGP; else if (drm_device_is_pcie(dev)) diff -Napur -X /home/jbarnes/dontdiff gpu/drm/radeon/radeon_drv.c /tmp/drm-master/drivers/gpu/drm/radeon/radeon_drv.c --- gpu/drm/radeon/radeon_drv.c 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/radeon/radeon_drv.c 2008-08-21 14:34:09.000000000 -0700 @@ -41,26 +41,48 @@ int radeon_no_wb; MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers\n"); module_param_named(no_wb, radeon_no_wb, int, 0444); -static int dri_library_name(struct drm_device *dev, char *buf) +static int dri_library_name(struct drm_device * dev, char * buf) { drm_radeon_private_t *dev_priv = dev->dev_private; int family = dev_priv->flags & RADEON_FAMILY_MASK; return snprintf(buf, PAGE_SIZE, "%s\n", - (family < CHIP_R200) ? "radeon" : - ((family < CHIP_R300) ? "r200" : - "r300")); + (family < CHIP_R200) ? "radeon" : + ((family < CHIP_R300) ? "r200" : + "r300")); +} + +static int radeon_suspend(struct drm_device *dev, pm_message_t state) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + + /* Disable *all* interrupts */ + if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) + RADEON_WRITE(R500_DxMODE_INT_MASK, 0); + RADEON_WRITE(RADEON_GEN_INT_CNTL, 0); + return 0; +} + +static int radeon_resume(struct drm_device *dev) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + + /* Restore interrupt registers */ + if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) + RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg); + RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg); + return 0; } static struct pci_device_id pciidlist[] = { radeon_PCI_IDS }; +static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); static struct drm_driver driver = { .driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | - DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | - DRIVER_IRQ_VBL | DRIVER_IRQ_VBL2, + DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED, .dev_priv_size = sizeof(drm_radeon_buf_priv_t), .load = radeon_driver_load, .firstopen = radeon_driver_firstopen, @@ -69,8 +91,11 @@ static struct drm_driver driver = { .postclose = radeon_driver_postclose, .lastclose = radeon_driver_lastclose, .unload = radeon_driver_unload, - .vblank_wait = radeon_driver_vblank_wait, - .vblank_wait2 = radeon_driver_vblank_wait2, + .suspend = radeon_suspend, + .resume = radeon_resume, + .get_vblank_counter = radeon_get_vblank_counter, + .enable_vblank = radeon_enable_vblank, + .disable_vblank = radeon_disable_vblank, .dri_library_name = dri_library_name, .irq_preinstall = radeon_driver_irq_preinstall, .irq_postinstall = radeon_driver_irq_postinstall, @@ -82,21 +107,22 @@ static struct drm_driver driver = { .ioctls = radeon_ioctls, .dma_ioctl = radeon_cp_buffers, .fops = { - .owner = THIS_MODULE, - .open = drm_open, - .release = drm_release, - .ioctl = drm_ioctl, - .mmap = drm_mmap, - .poll = drm_poll, - .fasync = drm_fasync, -#ifdef CONFIG_COMPAT - .compat_ioctl = radeon_compat_ioctl, + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .ioctl = drm_ioctl, + .mmap = drm_mmap, + .poll = drm_poll, + .fasync = drm_fasync, +#if defined(CONFIG_COMPAT) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9) + .compat_ioctl = radeon_compat_ioctl, #endif - }, - + }, .pci_driver = { - .name = DRIVER_NAME, - .id_table = pciidlist, + .name = DRIVER_NAME, + .id_table = pciidlist, + .probe = probe, + .remove = __devexit_p(drm_cleanup_pci), }, .name = DRIVER_NAME, @@ -107,10 +133,15 @@ static struct drm_driver driver = { .patchlevel = DRIVER_PATCHLEVEL, }; +static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + return drm_get_dev(pdev, ent, &driver); +} + static int __init radeon_init(void) { driver.num_ioctls = radeon_max_ioctl; - return drm_init(&driver); + return drm_init(&driver, pciidlist); } static void __exit radeon_exit(void) diff -Napur -X /home/jbarnes/dontdiff gpu/drm/radeon/radeon_drv.h /tmp/drm-master/drivers/gpu/drm/radeon/radeon_drv.h --- gpu/drm/radeon/radeon_drv.h 2008-08-21 14:43:42.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/radeon/radeon_drv.h 2008-08-21 14:34:09.000000000 -0700 @@ -38,7 +38,7 @@ #define DRIVER_NAME "radeon" #define DRIVER_DESC "ATI Radeon" -#define DRIVER_DATE "20080528" +#define DRIVER_DATE "20080613" /* Interface history: * @@ -100,6 +100,7 @@ * 1.28- Add support for VBL on CRTC2 * 1.29- R500 3D cmd buffer support */ + #define DRIVER_MAJOR 1 #define DRIVER_MINOR 29 #define DRIVER_PATCHLEVEL 0 @@ -123,6 +124,7 @@ enum radeon_family { CHIP_RV380, CHIP_R420, CHIP_RV410, + CHIP_RS400, CHIP_RS480, CHIP_RS690, CHIP_RV515, @@ -134,12 +136,6 @@ enum radeon_family { CHIP_LAST, }; -enum radeon_cp_microcode_version { - UCODE_R100, - UCODE_R200, - UCODE_R300, -}; - /* * Chip flags */ @@ -171,8 +167,8 @@ typedef struct drm_radeon_freelist { typedef struct drm_radeon_ring_buffer { u32 *start; u32 *end; - int size; - int size_l2qw; + int size; /* Double Words */ + int size_l2qw; /* log2 Quad Words */ int rptr_update; /* Double Words */ int rptr_update_l2qw; /* log2 Quad Words */ @@ -224,6 +220,7 @@ struct radeon_virt_surface { #define RADEON_PURGE_EMITED (1 < 1) typedef struct drm_radeon_private { + drm_radeon_ring_buffer_t ring; drm_radeon_sarea_t *sarea_priv; @@ -246,8 +243,6 @@ typedef struct drm_radeon_private { int usec_timeout; - int microcode_version; - struct { u32 boxes; int freelist_timeouts; @@ -309,12 +304,16 @@ typedef struct drm_radeon_private { u32 scratch_ages[5]; + unsigned int crtc_last_cnt; + unsigned int crtc2_last_cnt; + /* starting from here on, data is preserved accross an open */ uint32_t flags; /* see radeon_chip_flags */ unsigned long fb_aper_offset; int num_gb_pipes; int track_flush; + uint32_t chip_family; /* extract from flags */ } drm_radeon_private_t; typedef struct drm_radeon_buf_priv { @@ -366,10 +365,6 @@ extern int radeon_wait_ring(drm_radeon_p extern int radeon_do_cp_idle(drm_radeon_private_t * dev_priv); -extern int radeon_driver_preinit(struct drm_device *dev, unsigned long flags); -extern int radeon_presetup(struct drm_device *dev); -extern int radeon_driver_postcleanup(struct drm_device *dev); - extern int radeon_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int radeon_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int radeon_mem_init_heap(struct drm_device *dev, void *data, struct drm_file *file_priv); @@ -378,38 +373,40 @@ extern void radeon_mem_release(struct dr struct mem_block *heap); /* radeon_irq.c */ +extern void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state); extern int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv); extern void radeon_do_release(struct drm_device * dev); -extern int radeon_driver_vblank_wait(struct drm_device * dev, - unsigned int *sequence); -extern int radeon_driver_vblank_wait2(struct drm_device * dev, - unsigned int *sequence); +extern u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc); +extern int radeon_enable_vblank(struct drm_device *dev, int crtc); +extern void radeon_disable_vblank(struct drm_device *dev, int crtc); extern irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS); extern void radeon_driver_irq_preinstall(struct drm_device * dev); -extern void radeon_driver_irq_postinstall(struct drm_device * dev); +extern int radeon_driver_irq_postinstall(struct drm_device * dev); extern void radeon_driver_irq_uninstall(struct drm_device * dev); -extern void radeon_enable_interrupt(struct drm_device *dev); extern int radeon_vblank_crtc_get(struct drm_device *dev); extern int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value); extern int radeon_driver_load(struct drm_device *dev, unsigned long flags); extern int radeon_driver_unload(struct drm_device *dev); extern int radeon_driver_firstopen(struct drm_device *dev); -extern void radeon_driver_preclose(struct drm_device * dev, struct drm_file *file_priv); -extern void radeon_driver_postclose(struct drm_device * dev, struct drm_file * filp); +extern void radeon_driver_preclose(struct drm_device * dev, + struct drm_file *file_priv); +extern void radeon_driver_postclose(struct drm_device * dev, + struct drm_file *file_priv); extern void radeon_driver_lastclose(struct drm_device * dev); -extern int radeon_driver_open(struct drm_device * dev, struct drm_file * filp_priv); +extern int radeon_driver_open(struct drm_device * dev, + struct drm_file * file_priv); extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd, - unsigned long arg); + unsigned long arg); /* r300_cmdbuf.c */ extern void r300_init_reg_flags(struct drm_device *dev); -extern int r300_do_cp_cmdbuf(struct drm_device * dev, +extern int r300_do_cp_cmdbuf(struct drm_device *dev, struct drm_file *file_priv, - drm_radeon_kcmd_buffer_t * cmdbuf); + drm_radeon_kcmd_buffer_t *cmdbuf); /* Flags for stats.boxes */ @@ -422,10 +419,9 @@ extern int r300_do_cp_cmdbuf(struct drm_ /* Register definitions, register access macros and drmAddMap constants * for Radeon kernel driver. */ - #define RADEON_AGP_COMMAND 0x0f60 -#define RADEON_AGP_COMMAND_PCI_CONFIG 0x0060 /* offset in PCI config */ -# define RADEON_AGP_ENABLE (1<<8) +#define RADEON_AGP_COMMAND_PCI_CONFIG 0x0060 /* offset in PCI config */ +# define RADEON_AGP_ENABLE (1<<8) #define RADEON_AUX_SCISSOR_CNTL 0x26f0 # define RADEON_EXCLUSIVE_SCISSOR_0 (1 << 24) # define RADEON_EXCLUSIVE_SCISSOR_1 (1 << 25) @@ -441,7 +437,7 @@ extern int r300_do_cp_cmdbuf(struct drm_ # define RADEON_PLL_WR_EN (1 << 7) #define RADEON_CLOCK_CNTL_INDEX 0x0008 #define RADEON_CONFIG_APER_SIZE 0x0108 -#define RADEON_CONFIG_MEMSIZE 0x00f8 +#define RADEON_CONFIG_MEMSIZE 0x00f8 #define RADEON_CRTC_OFFSET 0x0224 #define RADEON_CRTC_OFFSET_CNTL 0x0228 # define RADEON_CRTC_TILE_EN (1 << 15) @@ -608,6 +604,12 @@ extern int r300_do_cp_cmdbuf(struct drm_ ? DRM_READ32( dev_priv->ring_rptr, RADEON_SCRATCHOFF(x) ) \ : RADEON_READ( RADEON_SCRATCH_REG0 + 4*(x) ) ) +#define RADEON_CRTC_CRNT_FRAME 0x0214 +#define RADEON_CRTC2_CRNT_FRAME 0x0314 + +#define RADEON_CRTC_STATUS 0x005c +#define RADEON_CRTC2_STATUS 0x03fc + #define RADEON_GEN_INT_CNTL 0x0040 # define RADEON_CRTC_VBLANK_MASK (1 << 0) # define RADEON_CRTC2_VBLANK_MASK (1 << 9) @@ -623,6 +625,8 @@ extern int r300_do_cp_cmdbuf(struct drm_ # define RADEON_SW_INT_TEST (1 << 25) # define RADEON_SW_INT_TEST_ACK (1 << 25) # define RADEON_SW_INT_FIRE (1 << 26) +# define R500_DISPLAY_INT_STATUS (1 << 0) + #define RADEON_HOST_PATH_CNTL 0x0130 # define RADEON_HDP_SOFT_RESET (1 << 26) @@ -698,7 +702,7 @@ extern int r300_do_cp_cmdbuf(struct drm_ # define R300_ZC_FLUSH (1 << 0) # define R300_ZC_FREE (1 << 1) # define R300_ZC_BUSY (1 << 31) -#define RADEON_RB3D_DSTCACHE_CTLSTAT 0x325c +#define RADEON_RB3D_DSTCACHE_CTLSTAT 0x325c # define RADEON_RB3D_DC_FLUSH (3 << 0) # define RADEON_RB3D_DC_FREE (3 << 2) # define RADEON_RB3D_DC_FLUSH_ALL 0xf @@ -710,15 +714,15 @@ extern int r300_do_cp_cmdbuf(struct drm_ #define RADEON_RB3D_ZSTENCILCNTL 0x1c2c # define RADEON_Z_TEST_MASK (7 << 4) # define RADEON_Z_TEST_ALWAYS (7 << 4) -# define RADEON_Z_HIERARCHY_ENABLE (1 << 8) +# define RADEON_Z_HIERARCHY_ENABLE (1 << 8) # define RADEON_STENCIL_TEST_ALWAYS (7 << 12) # define RADEON_STENCIL_S_FAIL_REPLACE (2 << 16) # define RADEON_STENCIL_ZPASS_REPLACE (2 << 20) # define RADEON_STENCIL_ZFAIL_REPLACE (2 << 24) -# define RADEON_Z_COMPRESSION_ENABLE (1 << 28) -# define RADEON_FORCE_Z_DIRTY (1 << 29) +# define RADEON_Z_COMPRESSION_ENABLE (1 << 28) +# define RADEON_FORCE_Z_DIRTY (1 << 29) # define RADEON_Z_WRITE_ENABLE (1 << 30) -# define RADEON_Z_DECOMPRESSION_ENABLE (1 << 31) +# define RADEON_Z_DECOMPRESSION_ENABLE (1 << 31) #define RADEON_RBBM_SOFT_RESET 0x00f0 # define RADEON_SOFT_RESET_CP (1 << 0) # define RADEON_SOFT_RESET_HI (1 << 1) @@ -924,7 +928,7 @@ extern int r300_do_cp_cmdbuf(struct drm_ # define RADEON_CP_NEXT_CHAR 0x00001900 # define RADEON_CP_PLY_NEXTSCAN 0x00001D00 # define RADEON_CP_SET_SCISSORS 0x00001E00 - /* GEN_INDX_PRIM is unsupported starting with R300 */ + /* GEN_INDX_PRIM is unsupported starting with R300 */ # define RADEON_3D_RNDR_GEN_INDX_PRIM 0x00002300 # define RADEON_WAIT_FOR_IDLE 0x00002600 # define RADEON_3D_DRAW_VBUF 0x00002800 @@ -1109,13 +1113,41 @@ extern int r300_do_cp_cmdbuf(struct drm_ #define R200_SE_TCL_POINT_SPRITE_CNTL 0x22c4 -#define R200_PP_TRI_PERF 0x2cf8 +#define R200_PP_TRI_PERF 0x2cf8 #define R200_PP_AFS_0 0x2f80 -#define R200_PP_AFS_1 0x2f00 /* same as txcblend_0 */ +#define R200_PP_AFS_1 0x2f00 /* same as txcblend_0 */ #define R200_VAP_PVS_CNTL_1 0x22D0 +/* MPEG settings from VHA code */ +#define RADEON_VHA_SETTO16_1 0x2694 +#define RADEON_VHA_SETTO16_2 0x2680 +#define RADEON_VHA_SETTO0_1 0x1840 +#define RADEON_VHA_FB_OFFSET 0x19e4 +#define RADEON_VHA_SETTO1AND70S 0x19d8 +#define RADEON_VHA_DST_PITCH 0x1408 + +// set as reference header +#define RADEON_VHA_BACKFRAME0_OFF_Y 0x1840 +#define RADEON_VHA_BACKFRAME1_OFF_PITCH_Y 0x1844 +#define RADEON_VHA_BACKFRAME0_OFF_U 0x1848 +#define RADEON_VHA_BACKFRAME1_OFF_PITCH_U 0x184c +#define RADOEN_VHA_BACKFRAME0_OFF_V 0x1850 +#define RADEON_VHA_BACKFRAME1_OFF_PITCH_V 0x1854 +#define RADEON_VHA_FORWFRAME0_OFF_Y 0x1858 +#define RADEON_VHA_FORWFRAME1_OFF_PITCH_Y 0x185c +#define RADEON_VHA_FORWFRAME0_OFF_U 0x1860 +#define RADEON_VHA_FORWFRAME1_OFF_PITCH_U 0x1864 +#define RADEON_VHA_FORWFRAME0_OFF_V 0x1868 +#define RADEON_VHA_FORWFRAME0_OFF_PITCH_V 0x1880 +#define RADEON_VHA_BACKFRAME0_OFF_Y_2 0x1884 +#define RADEON_VHA_BACKFRAME1_OFF_PITCH_Y_2 0x1888 +#define RADEON_VHA_BACKFRAME0_OFF_U_2 0x188c +#define RADEON_VHA_BACKFRAME1_OFF_PITCH_U_2 0x1890 +#define RADEON_VHA_BACKFRAME0_OFF_V_2 0x1894 +#define RADEON_VHA_BACKFRAME1_OFF_PITCH_V_2 0x1898 + #define R500_D1CRTC_STATUS 0x609c #define R500_D2CRTC_STATUS 0x689c #define R500_CRTC_V_BLANK (1<<0) @@ -1157,53 +1189,53 @@ extern int r300_do_cp_cmdbuf(struct drm_ #define RADEON_PCIGART_TABLE_SIZE (32*1024) -#define RADEON_READ(reg) DRM_READ32( dev_priv->mmio, (reg) ) -#define RADEON_WRITE(reg,val) DRM_WRITE32( dev_priv->mmio, (reg), (val) ) +#define RADEON_READ(reg) DRM_READ32( dev_priv->mmio, (reg) ) +#define RADEON_WRITE(reg,val) DRM_WRITE32( dev_priv->mmio, (reg), (val) ) #define RADEON_READ8(reg) DRM_READ8( dev_priv->mmio, (reg) ) #define RADEON_WRITE8(reg,val) DRM_WRITE8( dev_priv->mmio, (reg), (val) ) -#define RADEON_WRITE_PLL(addr, val) \ +#define RADEON_WRITE_PLL( addr, val ) \ do { \ - RADEON_WRITE8(RADEON_CLOCK_CNTL_INDEX, \ + RADEON_WRITE8( RADEON_CLOCK_CNTL_INDEX, \ ((addr) & 0x1f) | RADEON_PLL_WR_EN ); \ - RADEON_WRITE(RADEON_CLOCK_CNTL_DATA, (val)); \ + RADEON_WRITE( RADEON_CLOCK_CNTL_DATA, (val) ); \ } while (0) -#define RADEON_WRITE_PCIE(addr, val) \ +#define RADEON_WRITE_PCIE( addr, val ) \ do { \ - RADEON_WRITE8(RADEON_PCIE_INDEX, \ + RADEON_WRITE8( RADEON_PCIE_INDEX, \ ((addr) & 0xff)); \ - RADEON_WRITE(RADEON_PCIE_DATA, (val)); \ + RADEON_WRITE( RADEON_PCIE_DATA, (val) ); \ } while (0) -#define R500_WRITE_MCIND(addr, val) \ +#define R500_WRITE_MCIND( addr, val ) \ do { \ RADEON_WRITE(R520_MC_IND_INDEX, 0xff0000 | ((addr) & 0xff)); \ RADEON_WRITE(R520_MC_IND_DATA, (val)); \ RADEON_WRITE(R520_MC_IND_INDEX, 0); \ } while (0) -#define RS480_WRITE_MCIND(addr, val) \ +#define RS480_WRITE_MCIND( addr, val ) \ do { \ - RADEON_WRITE(RS480_NB_MC_INDEX, \ + RADEON_WRITE( RS480_NB_MC_INDEX, \ ((addr) & 0xff) | RS480_NB_MC_IND_WR_EN); \ - RADEON_WRITE(RS480_NB_MC_DATA, (val)); \ - RADEON_WRITE(RS480_NB_MC_INDEX, 0xff); \ + RADEON_WRITE( RS480_NB_MC_DATA, (val) ); \ + RADEON_WRITE( RS480_NB_MC_INDEX, 0xff ); \ } while (0) -#define RS690_WRITE_MCIND(addr, val) \ +#define RS690_WRITE_MCIND( addr, val ) \ do { \ RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_WR_EN | ((addr) & RS690_MC_INDEX_MASK)); \ RADEON_WRITE(RS690_MC_DATA, val); \ RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_WR_ACK); \ } while (0) -#define IGP_WRITE_MCIND(addr, val) \ +#define IGP_WRITE_MCIND( addr, val ) \ do { \ - if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) \ - RS690_WRITE_MCIND(addr, val); \ - else \ - RS480_WRITE_MCIND(addr, val); \ + if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) \ + RS690_WRITE_MCIND( addr, val ); \ + else \ + RS480_WRITE_MCIND( addr, val ); \ } while (0) #define CP_PACKET0( reg, n ) \ @@ -1247,42 +1279,42 @@ do { \ #define RADEON_FLUSH_CACHE() do { \ if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \ - OUT_RING(CP_PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); \ - OUT_RING(RADEON_RB3D_DC_FLUSH); \ + OUT_RING(CP_PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); \ + OUT_RING(RADEON_RB3D_DC_FLUSH); \ } else { \ - OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \ - OUT_RING(R300_RB3D_DC_FLUSH); \ - } \ + OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \ + OUT_RING(R300_RB3D_DC_FLUSH); \ + } \ } while (0) #define RADEON_PURGE_CACHE() do { \ if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \ - OUT_RING(CP_PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); \ - OUT_RING(RADEON_RB3D_DC_FLUSH | RADEON_RB3D_DC_FREE); \ + OUT_RING(CP_PACKET0( RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); \ + OUT_RING(RADEON_RB3D_DC_FLUSH | RADEON_RB3D_DC_FREE); \ } else { \ - OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \ - OUT_RING(R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); \ - } \ + OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \ + OUT_RING(R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE ); \ + } \ } while (0) #define RADEON_FLUSH_ZCACHE() do { \ if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \ - OUT_RING(CP_PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); \ - OUT_RING(RADEON_RB3D_ZC_FLUSH); \ + OUT_RING( CP_PACKET0( RADEON_RB3D_ZCACHE_CTLSTAT, 0 ) ); \ + OUT_RING( RADEON_RB3D_ZC_FLUSH ); \ } else { \ - OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0)); \ - OUT_RING(R300_ZC_FLUSH); \ - } \ + OUT_RING( CP_PACKET0( R300_ZB_ZCACHE_CTLSTAT, 0 ) ); \ + OUT_RING( R300_ZC_FLUSH ); \ + } \ } while (0) #define RADEON_PURGE_ZCACHE() do { \ if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \ - OUT_RING(CP_PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); \ - OUT_RING(RADEON_RB3D_ZC_FLUSH | RADEON_RB3D_ZC_FREE); \ + OUT_RING(CP_PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); \ + OUT_RING(RADEON_RB3D_ZC_FLUSH | RADEON_RB3D_ZC_FREE); \ } else { \ - OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0)); \ - OUT_RING(R300_ZC_FLUSH | R300_ZC_FREE); \ - } \ + OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0)); \ + OUT_RING(R300_ZC_FLUSH | R300_ZC_FREE); \ + } \ } while (0) /* ================================================================ @@ -1339,7 +1371,7 @@ do { \ DRM_INFO( "BEGIN_RING( %d )\n", (n)); \ } \ if ( dev_priv->ring.space <= (n) * sizeof(u32) ) { \ - COMMIT_RING(); \ + COMMIT_RING(); \ radeon_wait_ring( dev_priv, (n) * sizeof(u32) ); \ } \ _nr = n; dev_priv->ring.space -= (n) * sizeof(u32); \ @@ -1385,14 +1417,14 @@ do { \ OUT_RING( val ); \ } while (0) -#define OUT_RING_TABLE( tab, sz ) do { \ +#define OUT_RING_TABLE( tab, sz ) do { \ int _size = (sz); \ int *_tab = (int *)(tab); \ \ if (write + _size > mask) { \ int _i = (mask+1) - write; \ _size -= _i; \ - while (_i > 0 ) { \ + while (_i > 0) { \ *(int *)(ring + write) = *_tab++; \ write++; \ _i--; \ diff -Napur -X /home/jbarnes/dontdiff gpu/drm/radeon/radeon_ioc32.c /tmp/drm-master/drivers/gpu/drm/radeon/radeon_ioc32.c --- gpu/drm/radeon/radeon_ioc32.c 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/radeon/radeon_ioc32.c 2008-08-21 14:34:09.000000000 -0700 @@ -92,8 +92,8 @@ static int compat_radeon_cp_init(struct &init->gart_textures_offset)) return -EFAULT; - return drm_ioctl(file->f_path.dentry->d_inode, file, - DRM_IOCTL_RADEON_CP_INIT, (unsigned long)init); + return drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_RADEON_CP_INIT, (unsigned long) init); } typedef struct drm_radeon_clear32 { @@ -101,8 +101,8 @@ typedef struct drm_radeon_clear32 { unsigned int clear_color; unsigned int clear_depth; unsigned int color_mask; - unsigned int depth_mask; /* misnamed field: should be stencil */ - u32 depth_boxes; + unsigned int depth_mask; /* misnamed field: should be stencil */ + u32 depth_boxes; } drm_radeon_clear32_t; static int compat_radeon_cp_clear(struct file *file, unsigned int cmd, @@ -125,8 +125,8 @@ static int compat_radeon_cp_clear(struct &clr->depth_boxes)) return -EFAULT; - return drm_ioctl(file->f_path.dentry->d_inode, file, - DRM_IOCTL_RADEON_CLEAR, (unsigned long)clr); + return drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_RADEON_CLEAR, (unsigned long) clr); } typedef struct drm_radeon_stipple32 { @@ -145,16 +145,16 @@ static int compat_radeon_cp_stipple(stru request = compat_alloc_user_space(sizeof(*request)); if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) - || __put_user((unsigned int __user *)(unsigned long)mask, + || __put_user((unsigned int __user *)(unsigned long) mask, &request->mask)) return -EFAULT; - return drm_ioctl(file->f_path.dentry->d_inode, file, - DRM_IOCTL_RADEON_STIPPLE, (unsigned long)request); + return drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_RADEON_STIPPLE, (unsigned long) request); } typedef struct drm_radeon_tex_image32 { - unsigned int x, y; /* Blit coordinates */ + unsigned int x, y; /* Blit coordinates */ unsigned int width, height; u32 data; } drm_radeon_tex_image32_t; @@ -163,7 +163,7 @@ typedef struct drm_radeon_texture32 { unsigned int offset; int pitch; int format; - int width; /* Texture image coordinates */ + int width; /* Texture image coordinates */ int height; u32 image; } drm_radeon_texture32_t; @@ -204,13 +204,13 @@ static int compat_radeon_cp_texture(stru &image->data)) return -EFAULT; - return drm_ioctl(file->f_path.dentry->d_inode, file, - DRM_IOCTL_RADEON_TEXTURE, (unsigned long)request); + return drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_RADEON_TEXTURE, (unsigned long) request); } typedef struct drm_radeon_vertex2_32 { - int idx; /* Index of vertex buffer */ - int discard; /* Client finished with buffer? */ + int idx; /* Index of vertex buffer */ + int discard; /* Client finished with buffer? */ int nr_states; u32 state; int nr_prims; @@ -238,8 +238,8 @@ static int compat_radeon_cp_vertex2(stru &request->prim)) return -EFAULT; - return drm_ioctl(file->f_path.dentry->d_inode, file, - DRM_IOCTL_RADEON_VERTEX2, (unsigned long)request); + return drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_RADEON_VERTEX2, (unsigned long) request); } typedef struct drm_radeon_cmd_buffer32 { @@ -268,8 +268,8 @@ static int compat_radeon_cp_cmdbuf(struc &request->boxes)) return -EFAULT; - return drm_ioctl(file->f_path.dentry->d_inode, file, - DRM_IOCTL_RADEON_CMDBUF, (unsigned long)request); + return drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_RADEON_CMDBUF, (unsigned long) request); } typedef struct drm_radeon_getparam32 { @@ -293,8 +293,8 @@ static int compat_radeon_cp_getparam(str &request->value)) return -EFAULT; - return drm_ioctl(file->f_path.dentry->d_inode, file, - DRM_IOCTL_RADEON_GETPARAM, (unsigned long)request); + return drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_RADEON_GETPARAM, (unsigned long) request); } typedef struct drm_radeon_mem_alloc32 { @@ -322,8 +322,8 @@ static int compat_radeon_mem_alloc(struc &request->region_offset)) return -EFAULT; - return drm_ioctl(file->f_path.dentry->d_inode, file, - DRM_IOCTL_RADEON_ALLOC, (unsigned long)request); + return drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_RADEON_ALLOC, (unsigned long) request); } typedef struct drm_radeon_irq_emit32 { @@ -345,8 +345,8 @@ static int compat_radeon_irq_emit(struct &request->irq_seq)) return -EFAULT; - return drm_ioctl(file->f_path.dentry->d_inode, file, - DRM_IOCTL_RADEON_IRQ_EMIT, (unsigned long)request); + return drm_ioctl(file->f_dentry->d_inode, file, + DRM_IOCTL_RADEON_IRQ_EMIT, (unsigned long) request); } /* The two 64-bit arches where alignof(u64)==4 in 32-bit code */ @@ -362,7 +362,7 @@ static int compat_radeon_cp_setparam(str drm_radeon_setparam32_t req32; drm_radeon_setparam_t __user *request; - if (copy_from_user(&req32, (void __user *) arg, sizeof(req32))) + if (copy_from_user(&req32, (void __user *)arg, sizeof(req32))) return -EFAULT; request = compat_alloc_user_space(sizeof(*request)); @@ -415,9 +415,9 @@ long radeon_compat_ioctl(struct file *fi lock_kernel(); /* XXX for now */ if (fn != NULL) - ret = (*fn) (filp, cmd, arg); + ret = (*fn)(filp, cmd, arg); else - ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg); + ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg); unlock_kernel(); return ret; diff -Napur -X /home/jbarnes/dontdiff gpu/drm/radeon/radeon_irq.c /tmp/drm-master/drivers/gpu/drm/radeon/radeon_irq.c --- gpu/drm/radeon/radeon_irq.c 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/radeon/radeon_irq.c 2008-08-21 14:34:09.000000000 -0700 @@ -27,7 +27,7 @@ * * Authors: * Keith Whitwell - * Michel Dänzer + * Michel D�zer */ #include "drmP.h" @@ -35,12 +35,130 @@ #include "radeon_drm.h" #include "radeon_drv.h" -static __inline__ u32 radeon_acknowledge_irqs(drm_radeon_private_t * dev_priv, - u32 mask) +void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state) { - u32 irqs = RADEON_READ(RADEON_GEN_INT_STATUS) & mask; + drm_radeon_private_t *dev_priv = dev->dev_private; + + if (state) + dev_priv->irq_enable_reg |= mask; + else + dev_priv->irq_enable_reg &= ~mask; + + RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg); +} + +static void r500_vbl_irq_set_state(struct drm_device *dev, u32 mask, int state) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + + if (state) + dev_priv->r500_disp_irq_reg |= mask; + else + dev_priv->r500_disp_irq_reg &= ~mask; + + RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg); +} + +int radeon_enable_vblank(struct drm_device *dev, int crtc) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + + if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) { + switch (crtc) { + case 0: + r500_vbl_irq_set_state(dev, R500_D1MODE_INT_MASK, 1); + break; + case 1: + r500_vbl_irq_set_state(dev, R500_D2MODE_INT_MASK, 1); + break; + default: + DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", + crtc); + return EINVAL; + } + } else { + switch (crtc) { + case 0: + radeon_irq_set_state(dev, RADEON_CRTC_VBLANK_MASK, 1); + break; + case 1: + radeon_irq_set_state(dev, RADEON_CRTC2_VBLANK_MASK, 1); + break; + default: + DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", + crtc); + return EINVAL; + } + } + + return 0; +} + +void radeon_disable_vblank(struct drm_device *dev, int crtc) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + + if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) { + switch (crtc) { + case 0: + r500_vbl_irq_set_state(dev, R500_D1MODE_INT_MASK, 0); + break; + case 1: + r500_vbl_irq_set_state(dev, R500_D2MODE_INT_MASK, 0); + break; + default: + DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", + crtc); + break; + } + } else { + switch (crtc) { + case 0: + radeon_irq_set_state(dev, RADEON_CRTC_VBLANK_MASK, 0); + break; + case 1: + radeon_irq_set_state(dev, RADEON_CRTC2_VBLANK_MASK, 0); + break; + default: + DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", + crtc); + break; + } + } +} + +static __inline__ u32 radeon_acknowledge_irqs(drm_radeon_private_t * dev_priv, u32 *r500_disp_int) +{ + u32 irqs = RADEON_READ(RADEON_GEN_INT_STATUS); + u32 irq_mask = RADEON_SW_INT_TEST; + + *r500_disp_int = 0; + if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) { + /* vbl interrupts in a different place */ + + if (irqs & R500_DISPLAY_INT_STATUS) { + /* if a display interrupt */ + u32 disp_irq; + + disp_irq = RADEON_READ(R500_DISP_INTERRUPT_STATUS); + + *r500_disp_int = disp_irq; + if (disp_irq & R500_D1_VBLANK_INTERRUPT) { + RADEON_WRITE(R500_D1MODE_VBLANK_STATUS, R500_VBLANK_ACK); + } + if (disp_irq & R500_D2_VBLANK_INTERRUPT) { + RADEON_WRITE(R500_D2MODE_VBLANK_STATUS, R500_VBLANK_ACK); + } + } + irq_mask |= R500_DISPLAY_INT_STATUS; + } else + irq_mask |= RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT; + + irqs &= irq_mask; + if (irqs) RADEON_WRITE(RADEON_GEN_INT_STATUS, irqs); + return irqs; } @@ -68,44 +186,33 @@ irqreturn_t radeon_driver_irq_handler(DR drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private; u32 stat; + u32 r500_disp_int; /* Only consider the bits we're interested in - others could be used * outside the DRM */ - stat = radeon_acknowledge_irqs(dev_priv, (RADEON_SW_INT_TEST_ACK | - RADEON_CRTC_VBLANK_STAT | - RADEON_CRTC2_VBLANK_STAT)); + stat = radeon_acknowledge_irqs(dev_priv, &r500_disp_int); if (!stat) return IRQ_NONE; stat &= dev_priv->irq_enable_reg; /* SW interrupt */ - if (stat & RADEON_SW_INT_TEST) { + if (stat & RADEON_SW_INT_TEST) DRM_WAKEUP(&dev_priv->swi_queue); - } /* VBLANK interrupt */ - if (stat & (RADEON_CRTC_VBLANK_STAT|RADEON_CRTC2_VBLANK_STAT)) { - int vblank_crtc = dev_priv->vblank_crtc; - - if ((vblank_crtc & - (DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) == - (DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) { - if (stat & RADEON_CRTC_VBLANK_STAT) - atomic_inc(&dev->vbl_received); - if (stat & RADEON_CRTC2_VBLANK_STAT) - atomic_inc(&dev->vbl_received2); - } else if (((stat & RADEON_CRTC_VBLANK_STAT) && - (vblank_crtc & DRM_RADEON_VBLANK_CRTC1)) || - ((stat & RADEON_CRTC2_VBLANK_STAT) && - (vblank_crtc & DRM_RADEON_VBLANK_CRTC2))) - atomic_inc(&dev->vbl_received); - - DRM_WAKEUP(&dev->vbl_queue); - drm_vbl_send_signals(dev); + if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) { + if (r500_disp_int & R500_D1_VBLANK_INTERRUPT) + drm_handle_vblank(dev, 0); + if (r500_disp_int & R500_D2_VBLANK_INTERRUPT) + drm_handle_vblank(dev, 1); + } else { + if (stat & RADEON_CRTC_VBLANK_STAT) + drm_handle_vblank(dev, 0); + if (stat & RADEON_CRTC2_VBLANK_STAT) + drm_handle_vblank(dev, 1); } - return IRQ_HANDLED; } @@ -144,54 +251,31 @@ static int radeon_wait_irq(struct drm_de return ret; } -static int radeon_driver_vblank_do_wait(struct drm_device * dev, - unsigned int *sequence, int crtc) +u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc) { - drm_radeon_private_t *dev_priv = - (drm_radeon_private_t *) dev->dev_private; - unsigned int cur_vblank; - int ret = 0; - int ack = 0; - atomic_t *counter; + drm_radeon_private_t *dev_priv = dev->dev_private; + if (!dev_priv) { DRM_ERROR("called with no initialization\n"); return -EINVAL; } - if (crtc == DRM_RADEON_VBLANK_CRTC1) { - counter = &dev->vbl_received; - ack |= RADEON_CRTC_VBLANK_STAT; - } else if (crtc == DRM_RADEON_VBLANK_CRTC2) { - counter = &dev->vbl_received2; - ack |= RADEON_CRTC2_VBLANK_STAT; - } else + if (crtc < 0 || crtc > 1) { + DRM_ERROR("Invalid crtc %d\n", crtc); return -EINVAL; + } - radeon_acknowledge_irqs(dev_priv, ack); - - dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; - - /* Assume that the user has missed the current sequence number - * by about a day rather than she wants to wait for years - * using vertical blanks... - */ - DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ, - (((cur_vblank = atomic_read(counter)) - - *sequence) <= (1 << 23))); - - *sequence = cur_vblank; - - return ret; -} - -int radeon_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence) -{ - return radeon_driver_vblank_do_wait(dev, sequence, DRM_RADEON_VBLANK_CRTC1); -} - -int radeon_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence) -{ - return radeon_driver_vblank_do_wait(dev, sequence, DRM_RADEON_VBLANK_CRTC2); + if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) { + if (crtc == 0) + return RADEON_READ(R500_D1CRTC_FRAME_COUNT); + else + return RADEON_READ(R500_D2CRTC_FRAME_COUNT); + } else { + if (crtc == 0) + return RADEON_READ(RADEON_CRTC_CRNT_FRAME); + else + return RADEON_READ(RADEON_CRTC2_CRNT_FRAME); + } } /* Needs the lock as it touches the ring. @@ -234,46 +318,41 @@ int radeon_irq_wait(struct drm_device *d return radeon_wait_irq(dev, irqwait->irq_seq); } -void radeon_enable_interrupt(struct drm_device *dev) -{ - drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private; - - dev_priv->irq_enable_reg = RADEON_SW_INT_ENABLE; - if (dev_priv->vblank_crtc & DRM_RADEON_VBLANK_CRTC1) - dev_priv->irq_enable_reg |= RADEON_CRTC_VBLANK_MASK; - - if (dev_priv->vblank_crtc & DRM_RADEON_VBLANK_CRTC2) - dev_priv->irq_enable_reg |= RADEON_CRTC2_VBLANK_MASK; - - RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg); - dev_priv->irq_enabled = 1; -} - /* drm_dma.h hooks */ void radeon_driver_irq_preinstall(struct drm_device * dev) { drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private; + u32 dummy; /* Disable *all* interrupts */ + if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) + RADEON_WRITE(R500_DxMODE_INT_MASK, 0); RADEON_WRITE(RADEON_GEN_INT_CNTL, 0); /* Clear bits if they're already high */ - radeon_acknowledge_irqs(dev_priv, (RADEON_SW_INT_TEST_ACK | - RADEON_CRTC_VBLANK_STAT | - RADEON_CRTC2_VBLANK_STAT)); + radeon_acknowledge_irqs(dev_priv, &dummy); } -void radeon_driver_irq_postinstall(struct drm_device * dev) +int radeon_driver_irq_postinstall(struct drm_device * dev) { drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private; + int ret; atomic_set(&dev_priv->swi_emitted, 0); DRM_INIT_WAITQUEUE(&dev_priv->swi_queue); - radeon_enable_interrupt(dev); + ret = drm_vblank_init(dev, 2); + if (ret) + return ret; + + dev->max_vblank_count = 0x001fffff; + + radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1); + + return 0; } void radeon_driver_irq_uninstall(struct drm_device * dev) @@ -285,6 +364,8 @@ void radeon_driver_irq_uninstall(struct dev_priv->irq_enabled = 0; + if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) + RADEON_WRITE(R500_DxMODE_INT_MASK, 0); /* Disable *all* interrupts */ RADEON_WRITE(RADEON_GEN_INT_CNTL, 0); } @@ -293,18 +374,8 @@ void radeon_driver_irq_uninstall(struct int radeon_vblank_crtc_get(struct drm_device *dev) { drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private; - u32 flag; - u32 value; - - flag = RADEON_READ(RADEON_GEN_INT_CNTL); - value = 0; - - if (flag & RADEON_CRTC_VBLANK_MASK) - value |= DRM_RADEON_VBLANK_CRTC1; - if (flag & RADEON_CRTC2_VBLANK_MASK) - value |= DRM_RADEON_VBLANK_CRTC2; - return value; + return dev_priv->vblank_crtc; } int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value) @@ -315,6 +386,5 @@ int radeon_vblank_crtc_set(struct drm_de return -EINVAL; } dev_priv->vblank_crtc = (unsigned int)value; - radeon_enable_interrupt(dev); return 0; } diff -Napur -X /home/jbarnes/dontdiff gpu/drm/radeon/radeon_mem.c /tmp/drm-master/drivers/gpu/drm/radeon/radeon_mem.c --- gpu/drm/radeon/radeon_mem.c 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/radeon/radeon_mem.c 2008-08-21 14:34:09.000000000 -0700 @@ -100,8 +100,8 @@ static struct mem_block *find_block(stru struct mem_block *p; list_for_each(p, heap) - if (p->start == start) - return p; + if (p->start == start) + return p; return NULL; } diff -Napur -X /home/jbarnes/dontdiff gpu/drm/radeon/radeon_microcode.h /tmp/drm-master/drivers/gpu/drm/radeon/radeon_microcode.h --- gpu/drm/radeon/radeon_microcode.h 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/radeon/radeon_microcode.h 2008-08-21 14:34:09.000000000 -0700 @@ -27,7 +27,7 @@ #define RADEON_MICROCODE_H /* production radeon ucode r1xx-r6xx */ -static const u32 R100_cp_microcode[][2] = { +static const u32 R100_cp_microcode[][2]={ { 0x21007000, 0000000000 }, { 0x20007000, 0000000000 }, { 0x000000b4, 0x00000004 }, @@ -286,7 +286,7 @@ static const u32 R100_cp_microcode[][2] { 0000000000, 0000000000 }, }; -static const u32 R200_cp_microcode[][2] = { +static const u32 R200_cp_microcode[][2]={ { 0x21007000, 0000000000 }, { 0x20007000, 0000000000 }, { 0x000000bf, 0x00000004 }, @@ -545,7 +545,7 @@ static const u32 R200_cp_microcode[][2] { 0000000000, 0000000000 }, }; -static const u32 R300_cp_microcode[][2] = { +static const u32 R300_cp_microcode[][2]={ { 0x4200e000, 0000000000 }, { 0x4000e000, 0000000000 }, { 0x000000ae, 0x00000008 }, @@ -804,7 +804,7 @@ static const u32 R300_cp_microcode[][2] { 0000000000, 0000000000 }, }; -static const u32 R420_cp_microcode[][2] = { +static const u32 R420_cp_microcode[][2]={ { 0x4200e000, 0000000000 }, { 0x4000e000, 0000000000 }, { 0x00000099, 0x00000008 }, @@ -1063,7 +1063,7 @@ static const u32 R420_cp_microcode[][2] { 0000000000, 0000000000 }, }; -static const u32 RS600_cp_microcode[][2] = { +static const u32 RS600_cp_microcode[][2]={ { 0x4200e000, 0000000000 }, { 0x4000e000, 0000000000 }, { 0x000000a0, 0x00000008 }, @@ -1322,7 +1322,7 @@ static const u32 RS600_cp_microcode[][2] { 0000000000, 0000000000 }, }; -static const u32 RS690_cp_microcode[][2] = { +static const u32 RS690_cp_microcode[][2]={ { 0x000000dd, 0x00000008 }, { 0x000000df, 0x00000008 }, { 0x000000a0, 0x00000008 }, @@ -1581,7 +1581,7 @@ static const u32 RS690_cp_microcode[][2] { 0000000000, 0000000000 }, }; -static const u32 R520_cp_microcode[][2] = { +static const u32 R520_cp_microcode[][2]={ { 0x4200e000, 0000000000 }, { 0x4000e000, 0000000000 }, { 0x00000099, 0x00000008 }, diff -Napur -X /home/jbarnes/dontdiff gpu/drm/radeon/radeon_state.c /tmp/drm-master/drivers/gpu/drm/radeon/radeon_state.c --- gpu/drm/radeon/radeon_state.c 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/radeon/radeon_state.c 2008-08-21 14:34:09.000000000 -0700 @@ -39,8 +39,8 @@ static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t * dev_priv, - struct drm_file * file_priv, - u32 *offset) + struct drm_file *file_priv, + u32 * offset) { u64 off = *offset; u32 fb_end = dev_priv->fb_location + dev_priv->fb_size - 1; @@ -169,7 +169,7 @@ static __inline__ int radeon_check_and_f } break; - case R200_EMIT_VAP_CTL:{ + case R200_EMIT_VAP_CTL: { RING_LOCALS; BEGIN_RING(2); OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0); @@ -305,8 +305,9 @@ static __inline__ int radeon_check_and_f case RADEON_CP_3D_DRAW_INDX_2: case RADEON_3D_CLEAR_HIZ: /* safe but r200 only */ - if (dev_priv->microcode_version != UCODE_R200) { - DRM_ERROR("Invalid 3d packet for r100-class chip\n"); + if ((dev_priv->chip_family < CHIP_R200) || + (dev_priv->chip_family > CHIP_RV280)) { + DRM_ERROR("Invalid 3d packet for non r200-class chip\n"); return -EINVAL; } break; @@ -359,8 +360,8 @@ static __inline__ int radeon_check_and_f break; case RADEON_3D_RNDR_GEN_INDX_PRIM: - if (dev_priv->microcode_version != UCODE_R100) { - DRM_ERROR("Invalid 3d packet for r200-class chip\n"); + if (dev_priv->chip_family > CHIP_RS200) { + DRM_ERROR("Invalid 3d packet for non-r100-class chip\n"); return -EINVAL; } if (radeon_check_and_fixup_offset(dev_priv, file_priv, &cmd[1])) { @@ -370,8 +371,10 @@ static __inline__ int radeon_check_and_f break; case RADEON_CP_INDX_BUFFER: - if (dev_priv->microcode_version != UCODE_R200) { - DRM_ERROR("Invalid 3d packet for r100-class chip\n"); + /* safe but r200 only */ + if ((dev_priv->chip_family < CHIP_R200) || + (dev_priv->chip_family > CHIP_RV280)) { + DRM_ERROR("Invalid 3d packet for non-r200-class chip\n"); return -EINVAL; } if ((cmd[1] & 0x8000ffff) != 0x80000810) { @@ -1015,7 +1018,7 @@ static void radeon_cp_dispatch_clear(str int tileoffset, nrtilesx, nrtilesy, j; /* it looks like r200 needs rv-style clears, at least if hierz is not enabled? */ if ((dev_priv->flags & RADEON_HAS_HIERZ) - && !(dev_priv->microcode_version == UCODE_R200)) { + && (dev_priv->chip_family < CHIP_R200)) { /* FIXME : figure this out for r200 (when hierz is enabled). Or maybe r200 actually doesn't need to put the low-res z value into the tile cache like r100, but just needs to clear the hi-level z-buffer? @@ -1044,7 +1047,8 @@ static void radeon_cp_dispatch_clear(str ADVANCE_RING(); tileoffset += depthpixperline >> 6; } - } else if (dev_priv->microcode_version == UCODE_R200) { + } else if ((dev_priv->chip_family >= CHIP_R200) && + (dev_priv->chip_family <= CHIP_RV280)) { /* works for rv250. */ /* find first macro tile (8x2 4x4 z-pixels on rv250) */ tileoffset = @@ -1099,7 +1103,8 @@ static void radeon_cp_dispatch_clear(str /* TODO don't always clear all hi-level z tiles */ if ((dev_priv->flags & RADEON_HAS_HIERZ) - && (dev_priv->microcode_version == UCODE_R200) + && ((dev_priv->chip_family >= CHIP_R200) && + (dev_priv->chip_family <= CHIP_RV280)) && (flags & RADEON_USE_HIERZ)) /* r100 and cards without hierarchical z-buffer have no high-level z-buffer */ /* FIXME : the mask supposedly contains low-res z values. So can't set @@ -1119,8 +1124,9 @@ static void radeon_cp_dispatch_clear(str * rendering a quad into just those buffers. Thus, we have to * make sure the 3D engine is configured correctly. */ - else if ((dev_priv->microcode_version == UCODE_R200) && - (flags & (RADEON_DEPTH | RADEON_STENCIL))) { + else if ((dev_priv->chip_family >= CHIP_R200) && + (dev_priv->chip_family <= CHIP_RV280) && + (flags & (RADEON_DEPTH | RADEON_STENCIL))) { int tempPP_CNTL; int tempRE_CNTL; @@ -2090,6 +2096,11 @@ static int radeon_surface_alloc(struct d drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_surface_alloc_t *alloc = data; + if (!dev_priv) { + DRM_ERROR("called with no initialization\n"); + return -EINVAL; + } + if (alloc_surface(alloc, dev_priv, file_priv) == -1) return -EINVAL; else @@ -2101,6 +2112,11 @@ static int radeon_surface_free(struct dr drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_surface_free_t *memfree = data; + if (!dev_priv) { + DRM_ERROR("called with no initialization\n"); + return -EINVAL; + } + if (free_surface(file_priv, dev_priv, memfree->address)) return -EINVAL; else @@ -2203,7 +2219,7 @@ static int radeon_cp_swap(struct drm_dev static int radeon_cp_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; + drm_radeon_sarea_t *sarea_priv; struct drm_device_dma *dma = dev->dma; struct drm_buf *buf; drm_radeon_vertex_t *vertex = data; @@ -2211,6 +2227,13 @@ static int radeon_cp_vertex(struct drm_d LOCK_TEST_WITH_RETURN(dev, file_priv); + if (!dev_priv) { + DRM_ERROR("called with no initialization\n"); + return -EINVAL; + } + + sarea_priv = dev_priv->sarea_priv; + DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n", DRM_CURRENTPID, vertex->idx, vertex->count, vertex->discard); @@ -2279,7 +2302,7 @@ static int radeon_cp_vertex(struct drm_d static int radeon_cp_indices(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; + drm_radeon_sarea_t *sarea_priv; struct drm_device_dma *dma = dev->dma; struct drm_buf *buf; drm_radeon_indices_t *elts = data; @@ -2288,6 +2311,12 @@ static int radeon_cp_indices(struct drm_ LOCK_TEST_WITH_RETURN(dev, file_priv); + if (!dev_priv) { + DRM_ERROR("called with no initialization\n"); + return -EINVAL; + } + sarea_priv = dev_priv->sarea_priv; + DRM_DEBUG("pid=%d index=%d start=%d end=%d discard=%d\n", DRM_CURRENTPID, elts->idx, elts->start, elts->end, elts->discard); @@ -2420,6 +2449,11 @@ static int radeon_cp_indirect(struct drm LOCK_TEST_WITH_RETURN(dev, file_priv); + if (!dev_priv) { + DRM_ERROR("called with no initialization\n"); + return -EINVAL; + } + DRM_DEBUG("idx=%d s=%d e=%d d=%d\n", indirect->idx, indirect->start, indirect->end, indirect->discard); @@ -2478,7 +2512,7 @@ static int radeon_cp_indirect(struct drm static int radeon_cp_vertex2(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; - drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; + drm_radeon_sarea_t *sarea_priv; struct drm_device_dma *dma = dev->dma; struct drm_buf *buf; drm_radeon_vertex2_t *vertex = data; @@ -2487,6 +2521,13 @@ static int radeon_cp_vertex2(struct drm_ LOCK_TEST_WITH_RETURN(dev, file_priv); + if (!dev_priv) { + DRM_ERROR("called with no initialization\n"); + return -EINVAL; + } + + sarea_priv = dev_priv->sarea_priv; + DRM_DEBUG("pid=%d index=%d discard=%d\n", DRM_CURRENTPID, vertex->idx, vertex->discard); @@ -2675,10 +2716,10 @@ static __inline__ int radeon_emit_veclin int start = header.veclinear.addr_lo | (header.veclinear.addr_hi << 8); RING_LOCALS; - if (!sz) - return 0; - if (sz * 4 > cmdbuf->bufsz) - return -EINVAL; + if (!sz) + return 0; + if (sz * 4 > cmdbuf->bufsz) + return -EINVAL; BEGIN_RING(5 + sz); OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0); @@ -2823,6 +2864,11 @@ static int radeon_cp_cmdbuf(struct drm_d LOCK_TEST_WITH_RETURN(dev, file_priv); + if (!dev_priv) { + DRM_ERROR("called with no initialization\n"); + return -EINVAL; + } + RING_SPACE_TEST_WITH_RETURN(dev_priv); VB_AGE_TEST_WITH_RETURN(dev_priv); @@ -2849,7 +2895,7 @@ static int radeon_cp_cmdbuf(struct drm_d orig_nbox = cmdbuf->nbox; - if (dev_priv->microcode_version == UCODE_R300) { + if (dev_priv->chip_family >= CHIP_R300) { int temp; temp = r300_do_cp_cmdbuf(dev, file_priv, cmdbuf); @@ -2979,6 +3025,11 @@ static int radeon_cp_getparam(struct drm drm_radeon_getparam_t *param = data; int value; + if (!dev_priv) { + DRM_ERROR("called with no initialization\n"); + return -EINVAL; + } + DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); switch (param->param) { @@ -3008,7 +3059,7 @@ static int radeon_cp_getparam(struct drm case RADEON_PARAM_STATUS_HANDLE: value = dev_priv->ring_rptr_offset; break; -#if BITS_PER_LONG == 32 +#ifndef __LP64__ /* * This ioctl() doesn't work on 64-bit platforms because hw_lock is a * pointer which can't fit into an int-sized variable. According to @@ -3031,6 +3082,7 @@ static int radeon_cp_getparam(struct drm return -EINVAL; value = RADEON_SCRATCH_REG_OFFSET; break; + case RADEON_PARAM_CARD_TYPE: if (dev_priv->flags & RADEON_IS_PCIE) value = RADEON_CARD_PCIE; @@ -3049,7 +3101,7 @@ static int radeon_cp_getparam(struct drm value = dev_priv->num_gb_pipes; break; default: - DRM_DEBUG("Invalid parameter %d\n", param->param); + DRM_DEBUG( "Invalid parameter %d\n", param->param ); return -EINVAL; } @@ -3067,6 +3119,11 @@ static int radeon_cp_setparam(struct drm drm_radeon_setparam_t *sp = data; struct drm_radeon_driver_file_fields *radeon_priv; + if (!dev_priv) { + DRM_ERROR("called with no initialization\n"); + return -EINVAL; + } + switch (sp->param) { case RADEON_SETPARAM_FB_LOCATION: radeon_priv = file_priv->driver_priv; @@ -3078,12 +3135,14 @@ static int radeon_cp_setparam(struct drm DRM_DEBUG("color tiling disabled\n"); dev_priv->front_pitch_offset &= ~RADEON_DST_TILE_MACRO; dev_priv->back_pitch_offset &= ~RADEON_DST_TILE_MACRO; - dev_priv->sarea_priv->tiling_enabled = 0; + if (dev_priv->sarea_priv) + dev_priv->sarea_priv->tiling_enabled = 0; } else if (sp->value == 1) { DRM_DEBUG("color tiling enabled\n"); dev_priv->front_pitch_offset |= RADEON_DST_TILE_MACRO; dev_priv->back_pitch_offset |= RADEON_DST_TILE_MACRO; - dev_priv->sarea_priv->tiling_enabled = 1; + if (dev_priv->sarea_priv) + dev_priv->sarea_priv->tiling_enabled = 1; } break; case RADEON_SETPARAM_PCIGART_LOCATION: @@ -3116,7 +3175,8 @@ static int radeon_cp_setparam(struct drm * * DRM infrastructure takes care of reclaiming dma buffers. */ -void radeon_driver_preclose(struct drm_device *dev, struct drm_file *file_priv) +void radeon_driver_preclose(struct drm_device *dev, + struct drm_file *file_priv) { if (dev->dev_private) { drm_radeon_private_t *dev_priv = dev->dev_private; diff -Napur -X /home/jbarnes/dontdiff gpu/drm/README.drm /tmp/drm-master/drivers/gpu/drm/README.drm --- gpu/drm/README.drm 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/README.drm 1969-12-31 16:00:00.000000000 -0800 @@ -1,43 +0,0 @@ -************************************************************ -* For the very latest on DRI development, please see: * -* http://dri.freedesktop.org/ * -************************************************************ - -The Direct Rendering Manager (drm) is a device-independent kernel-level -device driver that provides support for the XFree86 Direct Rendering -Infrastructure (DRI). - -The DRM supports the Direct Rendering Infrastructure (DRI) in four major -ways: - - 1. The DRM provides synchronized access to the graphics hardware via - the use of an optimized two-tiered lock. - - 2. The DRM enforces the DRI security policy for access to the graphics - hardware by only allowing authenticated X11 clients access to - restricted regions of memory. - - 3. The DRM provides a generic DMA engine, complete with multiple - queues and the ability to detect the need for an OpenGL context - switch. - - 4. The DRM is extensible via the use of small device-specific modules - that rely extensively on the API exported by the DRM module. - - -Documentation on the DRI is available from: - http://dri.freedesktop.org/wiki/Documentation - http://sourceforge.net/project/showfiles.php?group_id=387 - http://dri.sourceforge.net/doc/ - -For specific information about kernel-level support, see: - - The Direct Rendering Manager, Kernel Support for the Direct Rendering - Infrastructure - http://dri.sourceforge.net/doc/drm_low_level.html - - Hardware Locking for the Direct Rendering Infrastructure - http://dri.sourceforge.net/doc/hardware_locking_low_level.html - - A Security Analysis of the Direct Rendering Infrastructure - http://dri.sourceforge.net/doc/security_low_level.html diff -Napur -X /home/jbarnes/dontdiff gpu/drm/savage/Makefile /tmp/drm-master/drivers/gpu/drm/savage/Makefile --- gpu/drm/savage/Makefile 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/savage/Makefile 1969-12-31 16:00:00.000000000 -0800 @@ -1,9 +0,0 @@ -# -# Makefile for the drm device driver. This driver provides support for the -# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. - -ccflags-y = -Iinclude/drm -savage-y := savage_drv.o savage_bci.o savage_state.o - -obj-$(CONFIG_DRM_SAVAGE)+= savage.o - diff -Napur -X /home/jbarnes/dontdiff gpu/drm/savage/savage_bci.c /tmp/drm-master/drivers/gpu/drm/savage/savage_bci.c --- gpu/drm/savage/savage_bci.c 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/savage/savage_bci.c 2008-08-21 14:34:09.000000000 -0700 @@ -28,14 +28,14 @@ /* Need a long timeout for shadow status updates can take a while * and so can waiting for events when the queue is full. */ -#define SAVAGE_DEFAULT_USEC_TIMEOUT 1000000 /* 1s */ -#define SAVAGE_EVENT_USEC_TIMEOUT 5000000 /* 5s */ +#define SAVAGE_DEFAULT_USEC_TIMEOUT 1000000 /* 1s */ +#define SAVAGE_EVENT_USEC_TIMEOUT 5000000 /* 5s */ #define SAVAGE_FREELIST_DEBUG 0 static int savage_do_cleanup_bci(struct drm_device *dev); static int -savage_bci_wait_fifo_shadow(drm_savage_private_t * dev_priv, unsigned int n) +savage_bci_wait_fifo_shadow(drm_savage_private_t *dev_priv, unsigned int n) { uint32_t mask = dev_priv->status_used_mask; uint32_t threshold = dev_priv->bci_threshold_hi; @@ -64,7 +64,7 @@ savage_bci_wait_fifo_shadow(drm_savage_p } static int -savage_bci_wait_fifo_s3d(drm_savage_private_t * dev_priv, unsigned int n) +savage_bci_wait_fifo_s3d(drm_savage_private_t *dev_priv, unsigned int n) { uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n; uint32_t status; @@ -85,7 +85,7 @@ savage_bci_wait_fifo_s3d(drm_savage_priv } static int -savage_bci_wait_fifo_s4(drm_savage_private_t * dev_priv, unsigned int n) +savage_bci_wait_fifo_s4(drm_savage_private_t *dev_priv, unsigned int n) { uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n; uint32_t status; @@ -117,7 +117,7 @@ savage_bci_wait_fifo_s4(drm_savage_priva * rule. Otherwise there may be glitches every 2^16 events. */ static int -savage_bci_wait_event_shadow(drm_savage_private_t * dev_priv, uint16_t e) +savage_bci_wait_event_shadow(drm_savage_private_t *dev_priv, uint16_t e) { uint32_t status; int i; @@ -140,7 +140,7 @@ savage_bci_wait_event_shadow(drm_savage_ } static int -savage_bci_wait_event_reg(drm_savage_private_t * dev_priv, uint16_t e) +savage_bci_wait_event_reg(drm_savage_private_t *dev_priv, uint16_t e) { uint32_t status; int i; @@ -161,7 +161,7 @@ savage_bci_wait_event_reg(drm_savage_pri return -EBUSY; } -uint16_t savage_bci_emit_event(drm_savage_private_t * dev_priv, +uint16_t savage_bci_emit_event(drm_savage_private_t *dev_priv, unsigned int flags) { uint16_t count; @@ -177,12 +177,12 @@ uint16_t savage_bci_emit_event(drm_savag } count = (count + 1) & 0xffff; if (count == 0) { - count++; /* See the comment above savage_wait_event_*. */ + count++; /* See the comment above savage_wait_event_*. */ dev_priv->event_wrap++; } dev_priv->event_counter = count; if (dev_priv->status_ptr) - dev_priv->status_ptr[1023] = (uint32_t) count; + dev_priv->status_ptr[1023] = (uint32_t)count; if ((flags & (SAVAGE_WAIT_2D | SAVAGE_WAIT_3D))) { unsigned int wait_cmd = BCI_CMD_WAIT; @@ -195,7 +195,7 @@ uint16_t savage_bci_emit_event(drm_savag } else { BEGIN_BCI(1); } - BCI_WRITE(BCI_CMD_UPDATE_EVENT_TAG | (uint32_t) count); + BCI_WRITE(BCI_CMD_UPDATE_EVENT_TAG | (uint32_t)count); return count; } @@ -203,7 +203,7 @@ uint16_t savage_bci_emit_event(drm_savag /* * Freelist management */ -static int savage_freelist_init(struct drm_device * dev) +static int savage_freelist_init(struct drm_device *dev) { drm_savage_private_t *dev_priv = dev->dev_private; struct drm_device_dma *dma = dev->dma; @@ -236,7 +236,7 @@ static int savage_freelist_init(struct d return 0; } -static struct drm_buf *savage_freelist_get(struct drm_device * dev) +static struct drm_buf *savage_freelist_get(struct drm_device *dev) { drm_savage_private_t *dev_priv = dev->dev_private; drm_savage_buf_priv_t *tail = dev_priv->tail.prev; @@ -251,7 +251,7 @@ static struct drm_buf *savage_freelist_g event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff; wrap = dev_priv->event_wrap; if (event > dev_priv->event_counter) - wrap--; /* hardware hasn't passed the last wrap yet */ + wrap--; /* hardware hasn't passed the last wrap yet */ DRM_DEBUG(" tail=0x%04x %d\n", tail->age.event, tail->age.wrap); DRM_DEBUG(" head=0x%04x %d\n", event, wrap); @@ -269,7 +269,7 @@ static struct drm_buf *savage_freelist_g return NULL; } -void savage_freelist_put(struct drm_device * dev, struct drm_buf * buf) +void savage_freelist_put(struct drm_device *dev, struct drm_buf *buf) { drm_savage_private_t *dev_priv = dev->dev_private; drm_savage_buf_priv_t *entry = buf->dev_private, *prev, *next; @@ -292,12 +292,12 @@ void savage_freelist_put(struct drm_devi /* * Command DMA */ -static int savage_dma_init(drm_savage_private_t * dev_priv) +static int savage_dma_init(drm_savage_private_t *dev_priv) { unsigned int i; dev_priv->nr_dma_pages = dev_priv->cmd_dma->size / - (SAVAGE_DMA_PAGE_SIZE * 4); + (SAVAGE_DMA_PAGE_SIZE*4); dev_priv->dma_pages = drm_alloc(sizeof(drm_savage_dma_page_t) * dev_priv->nr_dma_pages, DRM_MEM_DRIVER); if (dev_priv->dma_pages == NULL) @@ -316,7 +316,7 @@ static int savage_dma_init(drm_savage_pr return 0; } -void savage_dma_reset(drm_savage_private_t * dev_priv) +void savage_dma_reset(drm_savage_private_t *dev_priv) { uint16_t event; unsigned int wrap, i; @@ -331,7 +331,7 @@ void savage_dma_reset(drm_savage_private dev_priv->first_dma_page = dev_priv->current_dma_page = 0; } -void savage_dma_wait(drm_savage_private_t * dev_priv, unsigned int page) +void savage_dma_wait(drm_savage_private_t *dev_priv, unsigned int page) { uint16_t event; unsigned int wrap; @@ -347,7 +347,7 @@ void savage_dma_wait(drm_savage_private_ event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff; wrap = dev_priv->event_wrap; if (event > dev_priv->event_counter) - wrap--; /* hardware hasn't passed the last wrap yet */ + wrap--; /* hardware hasn't passed the last wrap yet */ if (dev_priv->dma_pages[page].age.wrap > wrap || (dev_priv->dma_pages[page].age.wrap == wrap && @@ -359,13 +359,13 @@ void savage_dma_wait(drm_savage_private_ } } -uint32_t *savage_dma_alloc(drm_savage_private_t * dev_priv, unsigned int n) +uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv, unsigned int n) { unsigned int cur = dev_priv->current_dma_page; unsigned int rest = SAVAGE_DMA_PAGE_SIZE - - dev_priv->dma_pages[cur].used; + dev_priv->dma_pages[cur].used; unsigned int nr_pages = (n - rest + SAVAGE_DMA_PAGE_SIZE - 1) / - SAVAGE_DMA_PAGE_SIZE; + SAVAGE_DMA_PAGE_SIZE; uint32_t *dma_ptr; unsigned int i; @@ -373,7 +373,7 @@ uint32_t *savage_dma_alloc(drm_savage_pr cur, dev_priv->dma_pages[cur].used, n, rest, nr_pages); if (cur + nr_pages < dev_priv->nr_dma_pages) { - dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle + + dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle + cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used; if (n < rest) rest = n; @@ -389,7 +389,7 @@ uint32_t *savage_dma_alloc(drm_savage_pr dev_priv->dma_pages[i].used = 0; dev_priv->dma_pages[i].flushed = 0; } - dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle; + dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle; dev_priv->first_dma_page = cur = 0; } for (i = cur; nr_pages > 0; ++i, --nr_pages) { @@ -415,7 +415,7 @@ uint32_t *savage_dma_alloc(drm_savage_pr return dma_ptr; } -static void savage_dma_flush(drm_savage_private_t * dev_priv) +static void savage_dma_flush(drm_savage_private_t *dev_priv) { unsigned int first = dev_priv->first_dma_page; unsigned int cur = dev_priv->current_dma_page; @@ -440,7 +440,7 @@ static void savage_dma_flush(drm_savage_ /* pad with noops */ if (pad) { - uint32_t *dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle + + uint32_t *dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle + cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used; dev_priv->dma_pages[cur].used += pad; while (pad != 0) { @@ -453,8 +453,8 @@ static void savage_dma_flush(drm_savage_ /* do flush ... */ phys_addr = dev_priv->cmd_dma->offset + - (first * SAVAGE_DMA_PAGE_SIZE + - dev_priv->dma_pages[first].flushed) * 4; + (first * SAVAGE_DMA_PAGE_SIZE + + dev_priv->dma_pages[first].flushed) * 4; len = (cur - first) * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used - dev_priv->dma_pages[first].flushed; @@ -498,7 +498,7 @@ static void savage_dma_flush(drm_savage_ dev_priv->dma_pages[cur].flushed); } -static void savage_fake_dma_flush(drm_savage_private_t * dev_priv) +static void savage_fake_dma_flush(drm_savage_private_t *dev_priv) { unsigned int i, j; BCI_LOCALS; @@ -514,8 +514,8 @@ static void savage_fake_dma_flush(drm_sa for (i = dev_priv->first_dma_page; i <= dev_priv->current_dma_page && dev_priv->dma_pages[i].used; ++i) { - uint32_t *dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle + - i * SAVAGE_DMA_PAGE_SIZE; + uint32_t *dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle + + i * SAVAGE_DMA_PAGE_SIZE; #if SAVAGE_DMA_DEBUG /* Sanity check: all pages except the last one must be full. */ if (i < dev_priv->current_dma_page && @@ -551,7 +551,6 @@ int savage_driver_load(struct drm_device return 0; } - /* * Initalize mappings. On Savage4 and SavageIX the alignment * and size of the aperture is not suitable for automatic MTRR setup @@ -587,7 +586,7 @@ int savage_driver_firstopen(struct drm_d dev_priv->mtrr[0].size = 0x01000000; dev_priv->mtrr[0].handle = drm_mtrr_add(dev_priv->mtrr[0].base, - dev_priv->mtrr[0].size, DRM_MTRR_WC); + dev_priv->mtrr[0].size, DRM_MTRR_WC); dev_priv->mtrr[1].base = fb_base + 0x02000000; dev_priv->mtrr[1].size = 0x02000000; dev_priv->mtrr[1].handle = @@ -597,7 +596,7 @@ int savage_driver_firstopen(struct drm_d dev_priv->mtrr[2].size = 0x04000000; dev_priv->mtrr[2].handle = drm_mtrr_add(dev_priv->mtrr[2].base, - dev_priv->mtrr[2].size, DRM_MTRR_WC); + dev_priv->mtrr[2].size, DRM_MTRR_WC); } else { DRM_ERROR("strange pci_resource_len %08lx\n", drm_get_resource_len(dev, 0)); @@ -663,8 +662,8 @@ void savage_driver_lastclose(struct drm_ for (i = 0; i < 3; ++i) if (dev_priv->mtrr[i].handle >= 0) drm_mtrr_del(dev_priv->mtrr[i].handle, - dev_priv->mtrr[i].base, - dev_priv->mtrr[i].size, DRM_MTRR_WC); + dev_priv->mtrr[i].base, + dev_priv->mtrr[i].size, DRM_MTRR_WC); } int savage_driver_unload(struct drm_device *dev) @@ -676,7 +675,7 @@ int savage_driver_unload(struct drm_devi return 0; } -static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init) +static int savage_do_init_bci(struct drm_device *dev, drm_savage_init_t *init) { drm_savage_private_t *dev_priv = dev->dev_private; @@ -745,7 +744,7 @@ static int savage_do_init_bci(struct drm } if (init->agp_textures_offset) { dev_priv->agp_textures = - drm_core_findmap(dev, init->agp_textures_offset); + drm_core_findmap(dev, init->agp_textures_offset); if (!dev_priv->agp_textures) { DRM_ERROR("could not find agp texture region!\n"); savage_do_cleanup_bci(dev); @@ -816,8 +815,8 @@ static int savage_do_init_bci(struct drm } dev_priv->sarea_priv = - (drm_savage_sarea_t *) ((uint8_t *) dev_priv->sarea->handle + - init->sarea_priv_offset); + (drm_savage_sarea_t *)((uint8_t *)dev_priv->sarea->handle + + init->sarea_priv_offset); /* setup bitmap descriptors */ { @@ -826,9 +825,9 @@ static int savage_do_init_bci(struct drm unsigned int front_stride, back_stride, depth_stride; if (dev_priv->chipset <= S3_SAVAGE4) { color_tile_format = dev_priv->fb_bpp == 16 ? - SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP; + SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP; depth_tile_format = dev_priv->depth_bpp == 16 ? - SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP; + SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP; } else { color_tile_format = SAVAGE_BD_TILE_DEST; depth_tile_format = SAVAGE_BD_TILE_DEST; @@ -839,23 +838,23 @@ static int savage_do_init_bci(struct drm dev_priv->depth_pitch / (dev_priv->depth_bpp / 8); dev_priv->front_bd = front_stride | SAVAGE_BD_BW_DISABLE | - (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) | - (color_tile_format << SAVAGE_BD_TILE_SHIFT); + (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) | + (color_tile_format << SAVAGE_BD_TILE_SHIFT); - dev_priv->back_bd = back_stride | SAVAGE_BD_BW_DISABLE | - (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) | - (color_tile_format << SAVAGE_BD_TILE_SHIFT); + dev_priv-> back_bd = back_stride | SAVAGE_BD_BW_DISABLE | + (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) | + (color_tile_format << SAVAGE_BD_TILE_SHIFT); dev_priv->depth_bd = depth_stride | SAVAGE_BD_BW_DISABLE | - (dev_priv->depth_bpp << SAVAGE_BD_BPP_SHIFT) | - (depth_tile_format << SAVAGE_BD_TILE_SHIFT); + (dev_priv->depth_bpp << SAVAGE_BD_BPP_SHIFT) | + (depth_tile_format << SAVAGE_BD_TILE_SHIFT); } /* setup status and bci ptr */ dev_priv->event_counter = 0; dev_priv->event_wrap = 0; dev_priv->bci_ptr = (volatile uint32_t *) - ((uint8_t *) dev_priv->mmio->handle + SAVAGE_BCI_OFFSET); + ((uint8_t *)dev_priv->mmio->handle + SAVAGE_BCI_OFFSET); if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S3D; } else { @@ -863,7 +862,7 @@ static int savage_do_init_bci(struct drm } if (dev_priv->status != NULL) { dev_priv->status_ptr = - (volatile uint32_t *)dev_priv->status->handle; + (volatile uint32_t *)dev_priv->status->handle; dev_priv->wait_fifo = savage_bci_wait_fifo_shadow; dev_priv->wait_evnt = savage_bci_wait_event_shadow; dev_priv->status_ptr[1023] = dev_priv->event_counter; @@ -898,7 +897,7 @@ static int savage_do_init_bci(struct drm return 0; } -static int savage_do_cleanup_bci(struct drm_device * dev) +static int savage_do_cleanup_bci(struct drm_device *dev) { drm_savage_private_t *dev_priv = dev->dev_private; @@ -922,7 +921,7 @@ static int savage_do_cleanup_bci(struct if (dev_priv->dma_pages) drm_free(dev_priv->dma_pages, - sizeof(drm_savage_dma_page_t) * dev_priv->nr_dma_pages, + sizeof(drm_savage_dma_page_t)*dev_priv->nr_dma_pages, DRM_MEM_DRIVER); return 0; @@ -975,7 +974,7 @@ static int savage_bci_event_wait(struct hw_e = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff; hw_w = dev_priv->event_wrap; if (hw_e > dev_priv->event_counter) - hw_w--; /* hardware hasn't passed the last wrap yet */ + hw_w--; /* hardware hasn't passed the last wrap yet */ event_e = event->count & 0xffff; event_w = event->count >> 16; @@ -1066,8 +1065,6 @@ void savage_reclaim_buffers(struct drm_d if (!dma->buflist) return; - /*i830_flush_queue(dev); */ - for (i = 0; i < dma->buf_count; i++) { struct drm_buf *buf = dma->buflist[i]; drm_savage_buf_priv_t *buf_priv = buf->dev_private; diff -Napur -X /home/jbarnes/dontdiff gpu/drm/savage/savage_drv.c /tmp/drm-master/drivers/gpu/drm/savage/savage_drv.c --- gpu/drm/savage/savage_drv.c 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/savage/savage_drv.c 2008-08-21 14:34:09.000000000 -0700 @@ -33,9 +33,11 @@ static struct pci_device_id pciidlist[] savage_PCI_IDS }; +static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); static struct drm_driver driver = { .driver_features = - DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_DMA | DRIVER_PCI_DMA, + DRIVER_USE_AGP | DRIVER_USE_MTRR | + DRIVER_HAVE_DMA | DRIVER_PCI_DMA, .dev_priv_size = sizeof(drm_savage_buf_priv_t), .load = savage_driver_load, .firstopen = savage_driver_firstopen, @@ -47,18 +49,19 @@ static struct drm_driver driver = { .ioctls = savage_ioctls, .dma_ioctl = savage_bci_buffers, .fops = { - .owner = THIS_MODULE, - .open = drm_open, - .release = drm_release, - .ioctl = drm_ioctl, - .mmap = drm_mmap, - .poll = drm_poll, - .fasync = drm_fasync, + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .ioctl = drm_ioctl, + .mmap = drm_mmap, + .poll = drm_poll, + .fasync = drm_fasync, }, - .pci_driver = { - .name = DRIVER_NAME, - .id_table = pciidlist, + .name = DRIVER_NAME, + .id_table = pciidlist, + .probe = probe, + .remove = __devexit_p(drm_cleanup_pci), }, .name = DRIVER_NAME, @@ -69,10 +72,15 @@ static struct drm_driver driver = { .patchlevel = DRIVER_PATCHLEVEL, }; +static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + return drm_get_dev(pdev, ent, &driver); +} + static int __init savage_init(void) { driver.num_ioctls = savage_max_ioctl; - return drm_init(&driver); + return drm_init(&driver, pciidlist); } static void __exit savage_exit(void) @@ -83,6 +91,6 @@ static void __exit savage_exit(void) module_init(savage_init); module_exit(savage_exit); -MODULE_AUTHOR(DRIVER_AUTHOR); -MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_AUTHOR( DRIVER_AUTHOR ); +MODULE_DESCRIPTION( DRIVER_DESC ); MODULE_LICENSE("GPL and additional rights"); diff -Napur -X /home/jbarnes/dontdiff gpu/drm/savage/savage_drv.h /tmp/drm-master/drivers/gpu/drm/savage/savage_drv.h --- gpu/drm/savage/savage_drv.h 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/savage/savage_drv.h 2008-08-21 14:34:09.000000000 -0700 @@ -65,7 +65,7 @@ typedef struct drm_savage_dma_page { drm_savage_age_t age; unsigned int used, flushed; } drm_savage_dma_page_t; -#define SAVAGE_DMA_PAGE_SIZE 1024 /* in dwords */ +#define SAVAGE_DMA_PAGE_SIZE 1024 /* in dwords */ /* Fake DMA buffer size in bytes. 4 pages. Allows a maximum command * size of 16kbytes or 4k entries. Minimum requirement would be * 10kbytes for 255 40-byte vertices in one drawing command. */ @@ -187,13 +187,13 @@ typedef struct drm_savage_private { unsigned int waiting; /* config/hardware-dependent function pointers */ - int (*wait_fifo) (struct drm_savage_private * dev_priv, unsigned int n); - int (*wait_evnt) (struct drm_savage_private * dev_priv, uint16_t e); + int (*wait_fifo)(struct drm_savage_private *dev_priv, unsigned int n); + int (*wait_evnt)(struct drm_savage_private *dev_priv, uint16_t e); /* Err, there is a macro wait_event in include/linux/wait.h. * Avoid unwanted macro expansion. */ - void (*emit_clip_rect) (struct drm_savage_private * dev_priv, - const struct drm_clip_rect * pbox); - void (*dma_flush) (struct drm_savage_private * dev_priv); + void (*emit_clip_rect)(struct drm_savage_private *dev_priv, + const struct drm_clip_rect *pbox); + void (*dma_flush)(struct drm_savage_private *dev_priv); } drm_savage_private_t; /* ioctls */ @@ -201,12 +201,12 @@ extern int savage_bci_cmdbuf(struct drm_ extern int savage_bci_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv); /* BCI functions */ -extern uint16_t savage_bci_emit_event(drm_savage_private_t * dev_priv, +extern uint16_t savage_bci_emit_event(drm_savage_private_t *dev_priv, unsigned int flags); -extern void savage_freelist_put(struct drm_device * dev, struct drm_buf * buf); -extern void savage_dma_reset(drm_savage_private_t * dev_priv); -extern void savage_dma_wait(drm_savage_private_t * dev_priv, unsigned int page); -extern uint32_t *savage_dma_alloc(drm_savage_private_t * dev_priv, +extern void savage_freelist_put(struct drm_device *dev, struct drm_buf *buf); +extern void savage_dma_reset(drm_savage_private_t *dev_priv); +extern void savage_dma_wait(drm_savage_private_t *dev_priv, unsigned int page); +extern uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv, unsigned int n); extern int savage_driver_load(struct drm_device *dev, unsigned long chipset); extern int savage_driver_firstopen(struct drm_device *dev); @@ -216,10 +216,10 @@ extern void savage_reclaim_buffers(struc struct drm_file *file_priv); /* state functions */ -extern void savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv, - const struct drm_clip_rect * pbox); -extern void savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv, - const struct drm_clip_rect * pbox); +extern void savage_emit_clip_rect_s3d(drm_savage_private_t *dev_priv, + const struct drm_clip_rect *pbox); +extern void savage_emit_clip_rect_s4(drm_savage_private_t *dev_priv, + const struct drm_clip_rect *pbox); #define SAVAGE_FB_SIZE_S3 0x01000000 /* 16MB */ #define SAVAGE_FB_SIZE_S4 0x02000000 /* 32MB */ @@ -227,17 +227,17 @@ extern void savage_emit_clip_rect_s4(drm #define SAVAGE_APERTURE_OFFSET 0x02000000 /* 32MB */ #define SAVAGE_APERTURE_SIZE 0x05000000 /* 5 tiled surfaces, 16MB each */ -#define SAVAGE_BCI_OFFSET 0x00010000 /* offset of the BCI region +#define SAVAGE_BCI_OFFSET 0x00010000 /* offset of the BCI region * inside the MMIO region */ -#define SAVAGE_BCI_FIFO_SIZE 32 /* number of entries in on-chip - * BCI FIFO */ +#define SAVAGE_BCI_FIFO_SIZE 32 /* number of entries in on-chip + * BCI FIFO */ /* * MMIO registers */ #define SAVAGE_STATUS_WORD0 0x48C00 #define SAVAGE_STATUS_WORD1 0x48C04 -#define SAVAGE_ALT_STATUS_WORD0 0x48C60 +#define SAVAGE_ALT_STATUS_WORD0 0x48C60 #define SAVAGE_FIFO_USED_MASK_S3D 0x0001ffff #define SAVAGE_FIFO_USED_MASK_S4 0x001fffff @@ -283,7 +283,7 @@ extern void savage_emit_clip_rect_s4(drm #define SAVAGE_TEXADDR1_S4 0x23 #define SAVAGE_TEXBLEND0_S4 0x24 #define SAVAGE_TEXBLEND1_S4 0x25 -#define SAVAGE_TEXXPRCLR_S4 0x26 /* never used */ +#define SAVAGE_TEXXPRCLR_S4 0x26 /* never used */ #define SAVAGE_TEXDESCR_S4 0x27 #define SAVAGE_FOGTABLE_S4 0x28 #define SAVAGE_FOGCTRL_S4 0x30 @@ -298,7 +298,7 @@ extern void savage_emit_clip_rect_s4(drm #define SAVAGE_TEXBLENDCOLOR_S4 0x39 /* Savage3D/MX/IX 3D registers */ #define SAVAGE_TEXPALADDR_S3D 0x18 -#define SAVAGE_TEXXPRCLR_S3D 0x19 /* never used */ +#define SAVAGE_TEXXPRCLR_S3D 0x19 /* never used */ #define SAVAGE_TEXADDR_S3D 0x1A #define SAVAGE_TEXDESCR_S3D 0x1B #define SAVAGE_TEXCTRL_S3D 0x1C @@ -318,9 +318,9 @@ extern void savage_emit_clip_rect_s4(drm #define SAVAGE_DMABUFADDR 0x51 /* texture enable bits (needed for tex addr checking) */ -#define SAVAGE_TEXCTRL_TEXEN_MASK 0x00010000 /* S3D */ -#define SAVAGE_TEXDESCR_TEX0EN_MASK 0x02000000 /* S4 */ -#define SAVAGE_TEXDESCR_TEX1EN_MASK 0x04000000 /* S4 */ +#define SAVAGE_TEXCTRL_TEXEN_MASK 0x00010000 /* S3D */ +#define SAVAGE_TEXDESCR_TEX0EN_MASK 0x02000000 /* S4 */ +#define SAVAGE_TEXDESCR_TEX1EN_MASK 0x04000000 /* S4 */ /* Global fields in Savage4/Twister/ProSavage 3D registers: * @@ -572,4 +572,4 @@ extern void savage_emit_clip_rect_s4(drm #define TEST_AGE( age, e, w ) \ ( (age)->wrap < (w) || ( (age)->wrap == (w) && (age)->event <= (e) ) ) -#endif /* __SAVAGE_DRV_H__ */ +#endif /* __SAVAGE_DRV_H__ */ diff -Napur -X /home/jbarnes/dontdiff gpu/drm/savage/savage_state.c /tmp/drm-master/drivers/gpu/drm/savage/savage_state.c --- gpu/drm/savage/savage_state.c 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/savage/savage_state.c 2008-08-21 14:34:09.000000000 -0700 @@ -26,19 +26,19 @@ #include "savage_drm.h" #include "savage_drv.h" -void savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv, - const struct drm_clip_rect * pbox) +void savage_emit_clip_rect_s3d(drm_savage_private_t *dev_priv, + const struct drm_clip_rect *pbox) { uint32_t scstart = dev_priv->state.s3d.new_scstart; uint32_t scend = dev_priv->state.s3d.new_scend; scstart = (scstart & ~SAVAGE_SCISSOR_MASK_S3D) | - ((uint32_t) pbox->x1 & 0x000007ff) | - (((uint32_t) pbox->y1 << 16) & 0x07ff0000); - scend = (scend & ~SAVAGE_SCISSOR_MASK_S3D) | - (((uint32_t) pbox->x2 - 1) & 0x000007ff) | - ((((uint32_t) pbox->y2 - 1) << 16) & 0x07ff0000); + ((uint32_t)pbox->x1 & 0x000007ff) | + (((uint32_t)pbox->y1 << 16) & 0x07ff0000); + scend = (scend & ~SAVAGE_SCISSOR_MASK_S3D) | + (((uint32_t)pbox->x2 - 1) & 0x000007ff) | + ((((uint32_t)pbox->y2 - 1) << 16) & 0x07ff0000); if (scstart != dev_priv->state.s3d.scstart || - scend != dev_priv->state.s3d.scend) { + scend != dev_priv->state.s3d.scend) { DMA_LOCALS; BEGIN_DMA(4); DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D); @@ -52,17 +52,17 @@ void savage_emit_clip_rect_s3d(drm_savag } } -void savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv, - const struct drm_clip_rect * pbox) +void savage_emit_clip_rect_s4(drm_savage_private_t *dev_priv, + const struct drm_clip_rect *pbox) { uint32_t drawctrl0 = dev_priv->state.s4.new_drawctrl0; uint32_t drawctrl1 = dev_priv->state.s4.new_drawctrl1; drawctrl0 = (drawctrl0 & ~SAVAGE_SCISSOR_MASK_S4) | - ((uint32_t) pbox->x1 & 0x000007ff) | - (((uint32_t) pbox->y1 << 12) & 0x00fff000); + ((uint32_t)pbox->x1 & 0x000007ff) | + (((uint32_t)pbox->y1 << 12) & 0x00fff000); drawctrl1 = (drawctrl1 & ~SAVAGE_SCISSOR_MASK_S4) | - (((uint32_t) pbox->x2 - 1) & 0x000007ff) | - ((((uint32_t) pbox->y2 - 1) << 12) & 0x00fff000); + (((uint32_t)pbox->x2 - 1) & 0x000007ff) | + ((((uint32_t)pbox->y2 - 1) << 12) & 0x00fff000); if (drawctrl0 != dev_priv->state.s4.drawctrl0 || drawctrl1 != dev_priv->state.s4.drawctrl1) { DMA_LOCALS; @@ -78,14 +78,14 @@ void savage_emit_clip_rect_s4(drm_savage } } -static int savage_verify_texaddr(drm_savage_private_t * dev_priv, int unit, +static int savage_verify_texaddr(drm_savage_private_t *dev_priv, int unit, uint32_t addr) { - if ((addr & 6) != 2) { /* reserved bits */ + if ((addr & 6) != 2) { /* reserved bits */ DRM_ERROR("bad texAddr%d %08x (reserved bits)\n", unit, addr); return -EINVAL; } - if (!(addr & 1)) { /* local */ + if (!(addr & 1)) { /* local */ addr &= ~7; if (addr < dev_priv->texture_offset || addr >= dev_priv->texture_offset + dev_priv->texture_size) { @@ -94,7 +94,7 @@ static int savage_verify_texaddr(drm_sav unit, addr); return -EINVAL; } - } else { /* AGP */ + } else { /* AGP */ if (!dev_priv->agp_textures) { DRM_ERROR("bad texAddr%d %08x (AGP not available)\n", unit, addr); @@ -114,18 +114,17 @@ static int savage_verify_texaddr(drm_sav } #define SAVE_STATE(reg,where) \ - if(start <= reg && start+count > reg) \ + if(start <= reg && start + count > reg) \ dev_priv->state.where = regs[reg - start] #define SAVE_STATE_MASK(reg,where,mask) do { \ - if(start <= reg && start+count > reg) { \ + if(start <= reg && start + count > reg) { \ uint32_t tmp; \ tmp = regs[reg - start]; \ dev_priv->state.where = (tmp & (mask)) | \ (dev_priv->state.where & ~(mask)); \ } \ } while (0) - -static int savage_verify_state_s3d(drm_savage_private_t * dev_priv, +static int savage_verify_state_s3d(drm_savage_private_t *dev_priv, unsigned int start, unsigned int count, const uint32_t *regs) { @@ -155,7 +154,7 @@ static int savage_verify_state_s3d(drm_s return 0; } -static int savage_verify_state_s4(drm_savage_private_t * dev_priv, +static int savage_verify_state_s4(drm_savage_private_t *dev_priv, unsigned int start, unsigned int count, const uint32_t *regs) { @@ -190,12 +189,11 @@ static int savage_verify_state_s4(drm_sa return ret; } - #undef SAVE_STATE #undef SAVE_STATE_MASK -static int savage_dispatch_state(drm_savage_private_t * dev_priv, - const drm_savage_cmd_header_t * cmd_header, +static int savage_dispatch_state(drm_savage_private_t *dev_priv, + const drm_savage_cmd_header_t *cmd_header, const uint32_t *regs) { unsigned int count = cmd_header->state.count; @@ -275,9 +273,9 @@ static int savage_dispatch_state(drm_sav return 0; } -static int savage_dispatch_dma_prim(drm_savage_private_t * dev_priv, - const drm_savage_cmd_header_t * cmd_header, - const struct drm_buf * dmabuf) +static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv, + const drm_savage_cmd_header_t *cmd_header, + const struct drm_buf *dmabuf) { unsigned char reorder = 0; unsigned int prim = cmd_header->prim.prim; @@ -310,8 +308,8 @@ static int savage_dispatch_dma_prim(drm_ case SAVAGE_PRIM_TRIFAN: if (n < 3) { DRM_ERROR - ("wrong number of vertices %u in TRIFAN/STRIP\n", - n); + ("wrong number of vertices %u in TRIFAN/STRIP\n", + n); return -EINVAL; } break; @@ -327,8 +325,8 @@ static int savage_dispatch_dma_prim(drm_ } } else { unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) - - (skip >> 2 & 1) - (skip >> 3 & 1) - (skip >> 4 & 1) - - (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1); + (skip >> 2 & 1) - (skip >> 3 & 1) - (skip >> 4 & 1) - + (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1); if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) { DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip); return -EINVAL; @@ -415,8 +413,8 @@ static int savage_dispatch_dma_prim(drm_ return 0; } -static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv, - const drm_savage_cmd_header_t * cmd_header, +static int savage_dispatch_vb_prim(drm_savage_private_t *dev_priv, + const drm_savage_cmd_header_t *cmd_header, const uint32_t *vtxbuf, unsigned int vb_size, unsigned int vb_stride) { @@ -462,18 +460,18 @@ static int savage_dispatch_vb_prim(drm_s DRM_ERROR("invalid skip flags 0x%04x\n", skip); return -EINVAL; } - vtx_size = 8; /* full vertex */ + vtx_size = 8; /* full vertex */ } else { if (skip > SAVAGE_SKIP_ALL_S4) { DRM_ERROR("invalid skip flags 0x%04x\n", skip); return -EINVAL; } - vtx_size = 10; /* full vertex */ + vtx_size = 10; /* full vertex */ } vtx_size -= (skip & 1) + (skip >> 1 & 1) + - (skip >> 2 & 1) + (skip >> 3 & 1) + (skip >> 4 & 1) + - (skip >> 5 & 1) + (skip >> 6 & 1) + (skip >> 7 & 1); + (skip >> 2 & 1) + (skip >> 3 & 1) + (skip >> 4 & 1) + + (skip >> 5 & 1) + (skip >> 6 & 1) + (skip >> 7 & 1); if (vtx_size > vb_stride) { DRM_ERROR("vertex size greater than vb stride (%u > %u)\n", @@ -516,7 +514,7 @@ static int savage_dispatch_vb_prim(drm_s vtx_size * count); } else { for (i = start; i < start + count; ++i) { - DMA_COPY(&vtxbuf [vb_stride * i], + DMA_COPY(&vtxbuf[vb_stride * i], vtx_size); } } @@ -533,10 +531,10 @@ static int savage_dispatch_vb_prim(drm_s return 0; } -static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv, - const drm_savage_cmd_header_t * cmd_header, +static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv, + const drm_savage_cmd_header_t *cmd_header, const uint16_t *idx, - const struct drm_buf * dmabuf) + const struct drm_buf *dmabuf) { unsigned char reorder = 0; unsigned int prim = cmd_header->idx.prim; @@ -583,8 +581,8 @@ static int savage_dispatch_dma_idx(drm_s } } else { unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) - - (skip >> 2 & 1) - (skip >> 3 & 1) - (skip >> 4 & 1) - - (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1); + (skip >> 2 & 1) - (skip >> 3 & 1) - (skip >> 4 & 1) - + (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1); if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) { DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip); return -EINVAL; @@ -674,8 +672,8 @@ static int savage_dispatch_dma_idx(drm_s return 0; } -static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv, - const drm_savage_cmd_header_t * cmd_header, +static int savage_dispatch_vb_idx(drm_savage_private_t *dev_priv, + const drm_savage_cmd_header_t *cmd_header, const uint16_t *idx, const uint32_t *vtxbuf, unsigned int vb_size, unsigned int vb_stride) @@ -719,18 +717,18 @@ static int savage_dispatch_vb_idx(drm_sa DRM_ERROR("invalid skip flags 0x%04x\n", skip); return -EINVAL; } - vtx_size = 8; /* full vertex */ + vtx_size = 8; /* full vertex */ } else { if (skip > SAVAGE_SKIP_ALL_S4) { DRM_ERROR("invalid skip flags 0x%04x\n", skip); return -EINVAL; } - vtx_size = 10; /* full vertex */ + vtx_size = 10; /* full vertex */ } vtx_size -= (skip & 1) + (skip >> 1 & 1) + - (skip >> 2 & 1) + (skip >> 3 & 1) + (skip >> 4 & 1) + - (skip >> 5 & 1) + (skip >> 6 & 1) + (skip >> 7 & 1); + (skip >> 2 & 1) + (skip >> 3 & 1) + (skip >> 4 & 1) + + (skip >> 5 & 1) + (skip >> 6 & 1) + (skip >> 7 & 1); if (vtx_size > vb_stride) { DRM_ERROR("vertex size greater than vb stride (%u > %u)\n", @@ -747,7 +745,7 @@ static int savage_dispatch_vb_idx(drm_sa for (i = 0; i < count; ++i) { if (idx[i] > vb_size / (vb_stride * 4)) { DRM_ERROR("idx[%u]=%u out of range (0-%u)\n", - i, idx[i], vb_size / (vb_stride * 4)); + i, idx[i], vb_size / (vb_stride * 4)); return -EINVAL; } } @@ -788,8 +786,8 @@ static int savage_dispatch_vb_idx(drm_sa return 0; } -static int savage_dispatch_clear(drm_savage_private_t * dev_priv, - const drm_savage_cmd_header_t * cmd_header, +static int savage_dispatch_clear(drm_savage_private_t *dev_priv, + const drm_savage_cmd_header_t *cmd_header, const drm_savage_cmd_header_t *data, unsigned int nbox, const struct drm_clip_rect *boxes) @@ -803,8 +801,8 @@ static int savage_dispatch_clear(drm_sav return 0; clear_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP | - BCI_CMD_SEND_COLOR | BCI_CMD_DEST_PBD_NEW; - BCI_CMD_SET_ROP(clear_cmd, 0xCC); + BCI_CMD_SEND_COLOR | BCI_CMD_DEST_PBD_NEW; + BCI_CMD_SET_ROP(clear_cmd,0xCC); nbufs = ((flags & SAVAGE_FRONT) ? 1 : 0) + ((flags & SAVAGE_BACK) ? 1 : 0) + ((flags & SAVAGE_DEPTH) ? 1 : 0); @@ -821,6 +819,7 @@ static int savage_dispatch_clear(drm_sav for (i = 0; i < nbox; ++i) { unsigned int x, y, w, h; unsigned int buf; + x = boxes[i].x1, y = boxes[i].y1; w = boxes[i].x2 - boxes[i].x1; h = boxes[i].y2 - boxes[i].y1; @@ -860,7 +859,7 @@ static int savage_dispatch_clear(drm_sav return 0; } -static int savage_dispatch_swap(drm_savage_private_t * dev_priv, +static int savage_dispatch_swap(drm_savage_private_t *dev_priv, unsigned int nbox, const struct drm_clip_rect *boxes) { unsigned int swap_cmd; @@ -871,8 +870,8 @@ static int savage_dispatch_swap(drm_sava return 0; swap_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP | - BCI_CMD_SRC_PBD_COLOR_NEW | BCI_CMD_DEST_GBD; - BCI_CMD_SET_ROP(swap_cmd, 0xCC); + BCI_CMD_SRC_PBD_COLOR_NEW | BCI_CMD_DEST_GBD; + BCI_CMD_SET_ROP(swap_cmd,0xCC); for (i = 0; i < nbox; ++i) { BEGIN_DMA(6); @@ -889,10 +888,10 @@ static int savage_dispatch_swap(drm_sava return 0; } -static int savage_dispatch_draw(drm_savage_private_t * dev_priv, +static int savage_dispatch_draw(drm_savage_private_t *dev_priv, const drm_savage_cmd_header_t *start, const drm_savage_cmd_header_t *end, - const struct drm_buf * dmabuf, + const struct drm_buf *dmabuf, const unsigned int *vtxbuf, unsigned int vb_size, unsigned int vb_stride, unsigned int nbox, @@ -1015,19 +1014,21 @@ int savage_bci_cmdbuf(struct drm_device cmdbuf->vb_addr = kvb_addr; } if (cmdbuf->nbox) { - kbox_addr = drm_alloc(cmdbuf->nbox * sizeof(struct drm_clip_rect), - DRM_MEM_DRIVER); + kbox_addr = drm_alloc(cmdbuf->nbox * + sizeof(struct drm_clip_rect), + DRM_MEM_DRIVER); if (kbox_addr == NULL) { ret = -ENOMEM; goto done; } if (DRM_COPY_FROM_USER(kbox_addr, cmdbuf->box_addr, - cmdbuf->nbox * sizeof(struct drm_clip_rect))) { + cmdbuf->nbox * + sizeof(struct drm_clip_rect))) { ret = -EFAULT; goto done; } - cmdbuf->box_addr = kbox_addr; + cmdbuf->box_addr = kbox_addr; } /* Make sure writes to DMA buffers are finished before sending @@ -1070,11 +1071,12 @@ int savage_bci_cmdbuf(struct drm_device default: if (first_draw_cmd) { ret = savage_dispatch_draw( - dev_priv, first_draw_cmd, - cmdbuf->cmd_addr - 1, - dmabuf, cmdbuf->vb_addr, cmdbuf->vb_size, - cmdbuf->vb_stride, - cmdbuf->nbox, cmdbuf->box_addr); + dev_priv, first_draw_cmd, + cmdbuf->cmd_addr - 1, + dmabuf, cmdbuf->vb_addr, + cmdbuf->vb_size, + cmdbuf->vb_stride, + cmdbuf->nbox, cmdbuf->box_addr); if (ret != 0) return ret; first_draw_cmd = NULL; @@ -1132,7 +1134,7 @@ int savage_bci_cmdbuf(struct drm_device } if (first_draw_cmd) { - ret = savage_dispatch_draw ( + ret = savage_dispatch_draw( dev_priv, first_draw_cmd, cmdbuf->cmd_addr, dmabuf, cmdbuf->vb_addr, cmdbuf->vb_size, cmdbuf->vb_stride, cmdbuf->nbox, cmdbuf->box_addr); diff -Napur -X /home/jbarnes/dontdiff gpu/drm/sis/Makefile /tmp/drm-master/drivers/gpu/drm/sis/Makefile --- gpu/drm/sis/Makefile 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/sis/Makefile 1969-12-31 16:00:00.000000000 -0800 @@ -1,10 +0,0 @@ -# -# Makefile for the drm device driver. This driver provides support for the -# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. - -ccflags-y = -Iinclude/drm -sis-y := sis_drv.o sis_mm.o - -obj-$(CONFIG_DRM_SIS) += sis.o - - diff -Napur -X /home/jbarnes/dontdiff gpu/drm/sis/sis_drv.c /tmp/drm-master/drivers/gpu/drm/sis/sis_drv.c --- gpu/drm/sis/sis_drv.c 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/sis/sis_drv.c 2008-08-21 14:34:09.000000000 -0700 @@ -32,9 +32,10 @@ #include "drm_pciids.h" static struct pci_device_id pciidlist[] = { - sisdrv_PCI_IDS + sis_PCI_IDS }; + static int sis_driver_load(struct drm_device *dev, unsigned long chipset) { drm_sis_private_t *dev_priv; @@ -64,6 +65,8 @@ static int sis_driver_unload(struct drm_ return 0; } + +static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); static struct drm_driver driver = { .driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR, .load = sis_driver_load, @@ -77,17 +80,19 @@ static struct drm_driver driver = { .get_reg_ofs = drm_core_get_reg_ofs, .ioctls = sis_ioctls, .fops = { - .owner = THIS_MODULE, - .open = drm_open, - .release = drm_release, - .ioctl = drm_ioctl, - .mmap = drm_mmap, - .poll = drm_poll, - .fasync = drm_fasync, - }, + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .ioctl = drm_ioctl, + .mmap = drm_mmap, + .poll = drm_poll, + .fasync = drm_fasync, + }, .pci_driver = { - .name = DRIVER_NAME, - .id_table = pciidlist, + .name = DRIVER_NAME, + .id_table = pciidlist, + .probe = probe, + .remove = __devexit_p(drm_cleanup_pci), }, .name = DRIVER_NAME, @@ -98,10 +103,15 @@ static struct drm_driver driver = { .patchlevel = DRIVER_PATCHLEVEL, }; +static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + return drm_get_dev(pdev, ent, &driver); +} + static int __init sis_init(void) { driver.num_ioctls = sis_max_ioctl; - return drm_init(&driver); + return drm_init(&driver, pciidlist); } static void __exit sis_exit(void) diff -Napur -X /home/jbarnes/dontdiff gpu/drm/sis/sis_drv.h /tmp/drm-master/drivers/gpu/drm/sis/sis_drv.h --- gpu/drm/sis/sis_drv.h 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/sis/sis_drv.h 2008-08-21 14:34:09.000000000 -0700 @@ -44,11 +44,12 @@ enum sis_family { SIS_CHIP_315 = 1, }; -#include "drm_sman.h" +#define SIS_HAVE_CORE_MM +#include "drm_sman.h" #define SIS_BASE (dev_priv->mmio) -#define SIS_READ(reg) DRM_READ32(SIS_BASE, reg); +#define SIS_READ(reg) DRM_READ32(SIS_BASE, reg); #define SIS_WRITE(reg, val) DRM_WRITE32(SIS_BASE, reg, val); typedef struct drm_sis_private { @@ -67,6 +68,7 @@ extern void sis_reclaim_buffers_locked(s struct drm_file *file_priv); extern void sis_lastclose(struct drm_device *dev); + extern struct drm_ioctl_desc sis_ioctls[]; extern int sis_max_ioctl; diff -Napur -X /home/jbarnes/dontdiff gpu/drm/sis/sis_mm.c /tmp/drm-master/drivers/gpu/drm/sis/sis_mm.c --- gpu/drm/sis/sis_mm.c 2008-08-21 14:43:42.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/sis/sis_mm.c 2008-08-21 14:34:09.000000000 -0700 @@ -28,7 +28,7 @@ /* * Authors: - * Thomas Hellström + * Thomas Hellström */ #include "drmP.h" @@ -40,8 +40,10 @@ #define VIDEO_TYPE 0 #define AGP_TYPE 1 +#define SIS_MM_ALIGN_SHIFT 4 +#define SIS_MM_ALIGN_MASK ( (1 << SIS_MM_ALIGN_SHIFT) - 1) -#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE) +#if defined(__linux__) && defined(CONFIG_FB_SIS) /* fb management via fb device */ #define SIS_MM_ALIGN_SHIFT 0 @@ -57,7 +59,7 @@ static void *sis_sman_mm_allocate(void * if (req.size == 0) return NULL; else - return (void *)(unsigned long)~req.offset; + return (void *)~req.offset; } static void sis_sman_mm_free(void *private, void *ref) @@ -75,12 +77,7 @@ static unsigned long sis_sman_mm_offset( return ~((unsigned long)ref); } -#else /* CONFIG_FB_SIS[_MODULE] */ - -#define SIS_MM_ALIGN_SHIFT 4 -#define SIS_MM_ALIGN_MASK ( (1 << SIS_MM_ALIGN_SHIFT) - 1) - -#endif /* CONFIG_FB_SIS[_MODULE] */ +#endif static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv) { @@ -89,7 +86,7 @@ static int sis_fb_init(struct drm_device int ret; mutex_lock(&dev->struct_mutex); -#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE) +#if defined(__linux__) && defined(CONFIG_FB_SIS) { struct drm_sman_mm sman_mm; sman_mm.private = (void *)0xFFFFFFFF; diff -Napur -X /home/jbarnes/dontdiff gpu/drm/tdfx/Makefile /tmp/drm-master/drivers/gpu/drm/tdfx/Makefile --- gpu/drm/tdfx/Makefile 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/tdfx/Makefile 1969-12-31 16:00:00.000000000 -0800 @@ -1,8 +0,0 @@ -# -# Makefile for the drm device driver. This driver provides support for the -# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. - -ccflags-y := -Iinclude/drm -tdfx-y := tdfx_drv.o - -obj-$(CONFIG_DRM_TDFX) += tdfx.o diff -Napur -X /home/jbarnes/dontdiff gpu/drm/tdfx/tdfx_drv.c /tmp/drm-master/drivers/gpu/drm/tdfx/tdfx_drv.c --- gpu/drm/tdfx/tdfx_drv.c 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/tdfx/tdfx_drv.c 2008-08-21 14:34:09.000000000 -0700 @@ -39,23 +39,26 @@ static struct pci_device_id pciidlist[] tdfx_PCI_IDS }; +static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); static struct drm_driver driver = { .driver_features = DRIVER_USE_MTRR, .reclaim_buffers = drm_core_reclaim_buffers, .get_map_ofs = drm_core_get_map_ofs, .get_reg_ofs = drm_core_get_reg_ofs, .fops = { - .owner = THIS_MODULE, - .open = drm_open, - .release = drm_release, - .ioctl = drm_ioctl, - .mmap = drm_mmap, - .poll = drm_poll, - .fasync = drm_fasync, - }, + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .ioctl = drm_ioctl, + .mmap = drm_mmap, + .poll = drm_poll, + .fasync = drm_fasync, + }, .pci_driver = { - .name = DRIVER_NAME, - .id_table = pciidlist, + .name = DRIVER_NAME, + .id_table = pciidlist, + .probe = probe, + .remove = __devexit_p(drm_cleanup_pci), }, .name = DRIVER_NAME, @@ -66,9 +69,15 @@ static struct drm_driver driver = { .patchlevel = DRIVER_PATCHLEVEL, }; +static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + return drm_get_dev(pdev, ent, &driver); +} + + static int __init tdfx_init(void) { - return drm_init(&driver); + return drm_init(&driver, pciidlist); } static void __exit tdfx_exit(void) diff -Napur -X /home/jbarnes/dontdiff gpu/drm/via/Makefile /tmp/drm-master/drivers/gpu/drm/via/Makefile --- gpu/drm/via/Makefile 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/via/Makefile 1969-12-31 16:00:00.000000000 -0800 @@ -1,8 +0,0 @@ -# -# Makefile for the drm device driver. This driver provides support for the -# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. - -ccflags-y := -Iinclude/drm -via-y := via_irq.o via_drv.o via_map.o via_mm.o via_dma.o via_verifier.o via_video.o via_dmablit.o - -obj-$(CONFIG_DRM_VIA) +=via.o diff -Napur -X /home/jbarnes/dontdiff gpu/drm/via/via_buffer.c /tmp/drm-master/drivers/gpu/drm/via/via_buffer.c --- gpu/drm/via/via_buffer.c 1969-12-31 16:00:00.000000000 -0800 +++ /tmp/drm-master/drivers/gpu/drm/via/via_buffer.c 2008-08-21 14:34:09.000000000 -0700 @@ -0,0 +1,163 @@ +/************************************************************************** + * + * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA, + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellström + */ + +#include "drmP.h" +#include "via_drm.h" +#include "via_drv.h" + +struct drm_ttm_backend *via_create_ttm_backend_entry(struct drm_device * dev) +{ + return drm_agp_init_ttm(dev); +} + +int via_fence_types(struct drm_buffer_object *bo, uint32_t * fclass, + uint32_t * type) +{ + *type = 3; + return 0; +} + +int via_invalidate_caches(struct drm_device * dev, uint64_t flags) +{ + /* + * FIXME: Invalidate texture caches here. + */ + + return 0; +} + + +static int via_vram_info(struct drm_device *dev, + unsigned long *offset, + unsigned long *size) +{ + struct pci_dev *pdev = dev->pdev; + unsigned long flags; + + int ret = -EINVAL; + int i; + for (i=0; i<6; ++i) { + flags = pci_resource_flags(pdev, i); + if ((flags & (IORESOURCE_MEM | IORESOURCE_PREFETCH)) == + (IORESOURCE_MEM | IORESOURCE_PREFETCH)) { + ret = 0; + break; + } + } + + if (ret) { + DRM_ERROR("Could not find VRAM PCI resource\n"); + return ret; + } + + *offset = pci_resource_start(pdev, i); + *size = pci_resource_end(pdev, i) - *offset + 1; + return 0; +} + +int via_init_mem_type(struct drm_device * dev, uint32_t type, + struct drm_mem_type_manager * man) +{ + switch (type) { + case DRM_BO_MEM_LOCAL: + /* System memory */ + + man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | + _DRM_FLAG_MEMTYPE_CACHED; + man->drm_bus_maptype = 0; + break; + + case DRM_BO_MEM_TT: + /* Dynamic agpgart memory */ + + if (!(drm_core_has_AGP(dev) && dev->agp)) { + DRM_ERROR("AGP is not enabled for memory type %u\n", + (unsigned)type); + return -EINVAL; + } + man->io_offset = dev->agp->agp_info.aper_base; + man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024; + man->io_addr = NULL; + man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | _DRM_FLAG_NEEDS_IOREMAP; + + /* Only to get pte protection right. */ + + man->drm_bus_maptype = _DRM_AGP; + break; + + case DRM_BO_MEM_VRAM: + /* "On-card" video ram */ + + man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | _DRM_FLAG_NEEDS_IOREMAP; + man->drm_bus_maptype = _DRM_FRAME_BUFFER; + man->io_addr = NULL; + return via_vram_info(dev, &man->io_offset, &man->io_size); + break; + + case DRM_BO_MEM_PRIV0: + /* Pre-bound agpgart memory */ + + if (!(drm_core_has_AGP(dev) && dev->agp)) { + DRM_ERROR("AGP is not enabled for memory type %u\n", + (unsigned)type); + return -EINVAL; + } + man->io_offset = dev->agp->agp_info.aper_base; + man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024; + man->io_addr = NULL; + man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | + _DRM_FLAG_MEMTYPE_FIXED | _DRM_FLAG_NEEDS_IOREMAP; + man->drm_bus_maptype = _DRM_AGP; + break; + + default: + DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); + return -EINVAL; + } + return 0; +} + +uint64_t via_evict_flags(struct drm_buffer_object *bo) +{ + switch (bo->mem.mem_type) { + case DRM_BO_MEM_LOCAL: + case DRM_BO_MEM_TT: + return DRM_BO_FLAG_MEM_LOCAL; /* Evict TT to local */ + case DRM_BO_MEM_PRIV0: /* Evict pre-bound AGP to TT */ + return DRM_BO_MEM_TT; + case DRM_BO_MEM_VRAM: + if (bo->mem.num_pages > 128) + return DRM_BO_MEM_TT; + else + return DRM_BO_MEM_LOCAL; + default: + return DRM_BO_MEM_LOCAL; + } +} diff -Napur -X /home/jbarnes/dontdiff gpu/drm/via/via_dmablit.c /tmp/drm-master/drivers/gpu/drm/via/via_dmablit.c --- gpu/drm/via/via_dmablit.c 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/via/via_dmablit.c 2008-08-21 14:34:09.000000000 -0700 @@ -41,9 +41,9 @@ #include -#define VIA_PGDN(x) (((unsigned long)(x)) & PAGE_MASK) -#define VIA_PGOFF(x) (((unsigned long)(x)) & ~PAGE_MASK) -#define VIA_PFN(x) ((unsigned long)(x) >> PAGE_SHIFT) +#define VIA_PGDN(x) (((unsigned long)(x)) & PAGE_MASK) +#define VIA_PGOFF(x) (((unsigned long)(x)) & ~PAGE_MASK) +#define VIA_PFN(x) ((unsigned long)(x) >> PAGE_SHIFT) typedef struct _drm_via_descriptor { uint32_t mem_addr; @@ -125,12 +125,11 @@ via_map_blit_for_device(struct pci_dev * line_len -= remaining_len; if (mode == 1) { - desc_ptr->mem_addr = - dma_map_page(&pdev->dev, - vsg->pages[VIA_PFN(cur_mem) - - VIA_PFN(first_addr)], - VIA_PGOFF(cur_mem), remaining_len, - vsg->direction); + desc_ptr->mem_addr = dma_map_page(&pdev->dev, + vsg->pages[VIA_PFN(cur_mem) - + VIA_PFN(first_addr)], + VIA_PGOFF(cur_mem), remaining_len, + vsg->direction); desc_ptr->dev_addr = cur_fb; desc_ptr->size = remaining_len; @@ -273,9 +272,10 @@ via_alloc_desc_pages(drm_via_sg_info_t * vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) / vsg->descriptors_per_page; - if (NULL == (vsg->desc_pages = kcalloc(vsg->num_desc_pages, sizeof(void *), GFP_KERNEL))) + if (NULL == (vsg->desc_pages = kmalloc(sizeof(void *) * vsg->num_desc_pages, GFP_KERNEL))) return -ENOMEM; + memset(vsg->desc_pages, 0, sizeof(void *) * vsg->num_desc_pages); vsg->state = dr_via_desc_pages_alloc; for (i=0; inum_desc_pages; ++i) { if (NULL == (vsg->desc_pages[i] = @@ -375,8 +375,10 @@ via_dmablit_handler(struct drm_device *d blitq->cur = cur; blitq->num_outstanding--; blitq->end = jiffies + DRM_HZ; - if (!timer_pending(&blitq->poll_timer)) - mod_timer(&blitq->poll_timer, jiffies + 1); + if (!timer_pending(&blitq->poll_timer)) { + blitq->poll_timer.expires = jiffies+1; + add_timer(&blitq->poll_timer); + } } else { if (timer_pending(&blitq->poll_timer)) { del_timer(&blitq->poll_timer); @@ -475,15 +477,15 @@ via_dmablit_timer(unsigned long data) via_dmablit_handler(dev, engine, 0); if (!timer_pending(&blitq->poll_timer)) { - mod_timer(&blitq->poll_timer, jiffies + 1); - - /* - * Rerun handler to delete timer if engines are off, and - * to shorten abort latency. This is a little nasty. - */ + blitq->poll_timer.expires = jiffies+1; + add_timer(&blitq->poll_timer); - via_dmablit_handler(dev, engine, 0); + /* + * Rerun handler to delete timer if engines are off, and + * to shorten abort latency. This is a little nasty. + */ + via_dmablit_handler(dev, engine, 0); } } @@ -570,8 +572,9 @@ via_init_dmablit(struct drm_device *dev) } DRM_INIT_WAITQUEUE(&blitq->busy_queue); INIT_WORK(&blitq->wq, via_dmablit_workqueue); - setup_timer(&blitq->poll_timer, via_dmablit_timer, - (unsigned long)blitq); + init_timer(&blitq->poll_timer); + blitq->poll_timer.function = &via_dmablit_timer; + blitq->poll_timer.data = (unsigned long) blitq; } } @@ -610,7 +613,7 @@ via_build_sg_info(struct drm_device *dev } if ((xfer->mem_stride == xfer->line_length) && - (xfer->fb_stride == xfer->line_length)) { + (xfer->fb_stride == xfer->line_length)) { xfer->mem_stride *= xfer->num_lines; xfer->line_length = xfer->mem_stride; xfer->fb_stride = xfer->mem_stride; @@ -633,7 +636,7 @@ via_build_sg_info(struct drm_device *dev */ if (xfer->mem_stride < xfer->line_length || - abs(xfer->fb_stride) < xfer->line_length) { + abs(xfer->fb_stride) < xfer->line_length) { DRM_ERROR("Invalid frame-buffer / memory stride.\n"); return -EINVAL; } @@ -651,10 +654,8 @@ via_build_sg_info(struct drm_device *dev return -EINVAL; } #else - if ((((unsigned long)xfer->mem_addr & 15) || - ((unsigned long)xfer->fb_addr & 3)) || - ((xfer->num_lines > 1) && - ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) { + if ((((unsigned long)xfer->mem_addr & 15) || ((unsigned long)xfer->fb_addr & 3)) || + ((xfer->num_lines > 1) && ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) { DRM_ERROR("Invalid DRM bitblt alignment.\n"); return -EINVAL; } diff -Napur -X /home/jbarnes/dontdiff gpu/drm/via/via_dmablit.h /tmp/drm-master/drivers/gpu/drm/via/via_dmablit.h --- gpu/drm/via/via_dmablit.h 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/via/via_dmablit.h 2008-08-21 14:34:09.000000000 -0700 @@ -45,12 +45,12 @@ typedef struct _drm_via_sg_info { int num_desc; enum dma_data_direction direction; unsigned char *bounce_buffer; - dma_addr_t chain_start; + dma_addr_t chain_start; uint32_t free_on_sequence; - unsigned int descriptors_per_page; + unsigned int descriptors_per_page; int aborted; enum { - dr_via_device_mapped, + dr_via_device_mapped, dr_via_desc_pages_alloc, dr_via_pages_locked, dr_via_pages_alloc, @@ -68,7 +68,7 @@ typedef struct _drm_via_blitq { unsigned num_free; unsigned num_outstanding; unsigned long end; - int aborting; + int aborting; int is_active; drm_via_sg_info_t *blits[VIA_NUM_BLIT_SLOTS]; spinlock_t blit_lock; diff -Napur -X /home/jbarnes/dontdiff gpu/drm/via/via_dma.c /tmp/drm-master/drivers/gpu/drm/via/via_dma.c --- gpu/drm/via/via_dma.c 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/via/via_dma.c 2008-08-21 14:34:09.000000000 -0700 @@ -40,20 +40,6 @@ #include "via_drv.h" #include "via_3d_reg.h" -#define CMDBUF_ALIGNMENT_SIZE (0x100) -#define CMDBUF_ALIGNMENT_MASK (0x0ff) - -/* defines for VIA 3D registers */ -#define VIA_REG_STATUS 0x400 -#define VIA_REG_TRANSET 0x43C -#define VIA_REG_TRANSPACE 0x440 - -/* VIA_REG_STATUS(0x400): Engine Status */ -#define VIA_CMD_RGTR_BUSY 0x00000080 /* Command Regulator is busy */ -#define VIA_2D_ENG_BUSY 0x00000001 /* 2D Engine is busy */ -#define VIA_3D_ENG_BUSY 0x00000002 /* 3D Engine is busy */ -#define VIA_VR_QUEUE_BUSY 0x00020000 /* Virtual Queue is busy */ - #define SetReg2DAGP(nReg, nData) { \ *((uint32_t *)(vb)) = ((nReg) >> 2) | HALCYON_HEADER1; \ *((uint32_t *)(vb) + 1) = (nData); \ @@ -68,18 +54,19 @@ *vb++ = (w2); \ dev_priv->dma_low += 8; -static void via_cmdbuf_start(drm_via_private_t * dev_priv); -static void via_cmdbuf_pause(drm_via_private_t * dev_priv); -static void via_cmdbuf_reset(drm_via_private_t * dev_priv); -static void via_cmdbuf_rewind(drm_via_private_t * dev_priv); -static int via_wait_idle(drm_via_private_t * dev_priv); -static void via_pad_cache(drm_via_private_t * dev_priv, int qwords); +static void via_cmdbuf_start(drm_via_private_t *dev_priv); +static void via_cmdbuf_pause(drm_via_private_t *dev_priv); +static void via_cmdbuf_reset(drm_via_private_t *dev_priv); +static void via_cmdbuf_rewind(drm_via_private_t *dev_priv); +static int via_wait_idle(drm_via_private_t *dev_priv); +static void via_pad_cache(drm_via_private_t *dev_priv, int qwords); + /* * Free space in command buffer. */ -static uint32_t via_cmdbuf_space(drm_via_private_t * dev_priv) +static uint32_t via_cmdbuf_space(drm_via_private_t *dev_priv) { uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr; uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base; @@ -93,7 +80,7 @@ static uint32_t via_cmdbuf_space(drm_via * How much does the command regulator lag behind? */ -static uint32_t via_cmdbuf_lag(drm_via_private_t * dev_priv) +static uint32_t via_cmdbuf_lag(drm_via_private_t *dev_priv) { uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr; uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base; @@ -132,6 +119,7 @@ via_cmdbuf_wait(drm_via_private_t * dev_ return 0; } + /* * Checks whether buffer head has reach the end. Rewind the ring buffer * when necessary. @@ -157,7 +145,7 @@ int via_dma_cleanup(struct drm_device * { if (dev->dev_private) { drm_via_private_t *dev_priv = - (drm_via_private_t *) dev->dev_private; + (drm_via_private_t *) dev->dev_private; if (dev_priv->ring.virtual_start) { via_cmdbuf_reset(dev_priv); @@ -258,6 +246,8 @@ static int via_dma_init(struct drm_devic return retcode; } + + static int via_dispatch_cmdbuffer(struct drm_device * dev, drm_via_cmdbuffer_t * cmd) { drm_via_private_t *dev_priv; @@ -285,7 +275,7 @@ static int via_dispatch_cmdbuffer(struct */ if ((ret = - via_verify_command_stream((uint32_t *) dev_priv->pci_buf, + via_verify_command_stream((uint32_t *)dev_priv->pci_buf, cmd->size, dev, 1))) { return ret; } @@ -397,7 +387,7 @@ static inline uint32_t *via_align_buffer } /* - * This function is used internally by ring buffer management code. + * This function is used internally by ring buffer mangement code. * * Returns virtual pointer to ring buffer. */ @@ -474,10 +464,13 @@ static int via_hook_segment(drm_via_priv VIA_READ(VIA_REG_TRANSPACE); } } + return paused; } -static int via_wait_idle(drm_via_private_t * dev_priv) + + +static int via_wait_idle(drm_via_private_t *dev_priv) { int count = 10000000; @@ -489,9 +482,9 @@ static int via_wait_idle(drm_via_private return count; } -static uint32_t *via_align_cmd(drm_via_private_t * dev_priv, uint32_t cmd_type, - uint32_t addr, uint32_t * cmd_addr_hi, - uint32_t * cmd_addr_lo, int skip_wait) +static uint32_t *via_align_cmd(drm_via_private_t *dev_priv, uint32_t cmd_type, + uint32_t addr, uint32_t *cmd_addr_hi, + uint32_t *cmd_addr_lo, int skip_wait) { uint32_t agp_base; uint32_t cmd_addr, addr_lo, addr_hi; @@ -504,12 +497,13 @@ static uint32_t *via_align_cmd(drm_via_p vb = via_get_dma(dev_priv); VIA_OUT_RING_QW(HC_HEADER2 | ((VIA_REG_TRANSET >> 2) << 12) | (VIA_REG_TRANSPACE >> 2), HC_ParaType_PreCR << 16); + agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr; qw_pad_count = (CMDBUF_ALIGNMENT_SIZE >> 3) - - ((dev_priv->dma_low & CMDBUF_ALIGNMENT_MASK) >> 3); + ((dev_priv->dma_low & CMDBUF_ALIGNMENT_MASK) >> 3); cmd_addr = (addr) ? addr : - agp_base + dev_priv->dma_low - 8 + (qw_pad_count << 3); + agp_base + dev_priv->dma_low - 8 + (qw_pad_count << 3); addr_lo = ((HC_SubA_HAGPBpL << 24) | (cmd_type & HC_HAGPBpID_MASK) | (cmd_addr & HC_HAGPBpL_MASK)); addr_hi = ((HC_SubA_HAGPBpH << 24) | (cmd_addr >> 24)); @@ -542,8 +536,8 @@ static void via_cmdbuf_start(drm_via_pri ((end_addr & 0xff000000) >> 16)); dev_priv->last_pause_ptr = - via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, - &pause_addr_hi, &pause_addr_lo, 1) - 1; + via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, + &pause_addr_hi, & pause_addr_lo, 1) - 1; via_flush_write_combine(); (void) *(volatile uint32_t *)dev_priv->last_pause_ptr; @@ -578,7 +572,7 @@ static void via_cmdbuf_start(drm_via_pri dev_priv->dma_diff = ptr - reader; } -static void via_pad_cache(drm_via_private_t * dev_priv, int qwords) +static void via_pad_cache(drm_via_private_t *dev_priv, int qwords) { uint32_t *vb; @@ -603,7 +597,7 @@ static void via_cmdbuf_jump(drm_via_priv uint32_t jump_addr_lo, jump_addr_hi; volatile uint32_t *last_pause_ptr; uint32_t dma_low_save1, dma_low_save2; - + agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr; via_align_cmd(dev_priv, HC_HAGPBpID_JUMP, 0, &jump_addr_hi, &jump_addr_lo, 0); @@ -623,14 +617,14 @@ static void via_cmdbuf_jump(drm_via_priv via_dummy_bitblt(dev_priv); last_pause_ptr = - via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi, + via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi, &pause_addr_lo, 0) - 1; via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi, &pause_addr_lo, 0); *last_pause_ptr = pause_addr_lo; dma_low_save1 = dev_priv->dma_low; - + /* * Now, set a trap that will pause the regulator if it tries to rerun the old * command buffer. (Which may happen if via_hook_segment detecs a command regulator pause @@ -646,7 +640,7 @@ static void via_cmdbuf_jump(drm_via_priv via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi, &pause_addr_lo, 0); *last_pause_ptr = pause_addr_lo; - + dma_low_save2 = dev_priv->dma_low; dev_priv->dma_low = dma_low_save1; via_hook_segment(dev_priv, jump_addr_hi, jump_addr_lo, 0); @@ -668,6 +662,7 @@ static void via_cmdbuf_flush(drm_via_pri via_hook_segment(dev_priv, pause_addr_hi, pause_addr_lo, 0); } + static void via_cmdbuf_pause(drm_via_private_t * dev_priv) { via_cmdbuf_flush(dev_priv, HC_HAGPBpID_PAUSE); @@ -735,6 +730,7 @@ static int via_cmdbuf_size(struct drm_de return ret; } + struct drm_ioctl_desc via_ioctls[] = { DRM_IOCTL_DEF(DRM_VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH), DRM_IOCTL_DEF(DRM_VIA_FREEMEM, via_mem_free, DRM_AUTH), diff -Napur -X /home/jbarnes/dontdiff gpu/drm/via/via_drv.c /tmp/drm-master/drivers/gpu/drm/via/via_drv.c --- gpu/drm/via/via_drv.c 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/via/via_drv.c 2008-08-21 14:34:09.000000000 -0700 @@ -28,23 +28,58 @@ #include "drm_pciids.h" -static int dri_library_name(struct drm_device *dev, char *buf) + +static int dri_library_name(struct drm_device * dev, char * buf) { - return snprintf(buf, PAGE_SIZE, "unichrome"); + return snprintf(buf, PAGE_SIZE, "unichrome\n"); } static struct pci_device_id pciidlist[] = { viadrv_PCI_IDS }; + +extern struct drm_fence_driver via_fence_driver; + + +/** + * If there's no thrashing. This is the preferred memory type order. + */ +static uint32_t via_mem_prios[] = {DRM_BO_MEM_PRIV0, DRM_BO_MEM_VRAM, DRM_BO_MEM_TT, DRM_BO_MEM_LOCAL}; + +/** + * If we have thrashing, most memory will be evicted to TT anyway, so we might as well + * just move the new buffer into TT from the start. + */ +static uint32_t via_busy_prios[] = {DRM_BO_MEM_TT, DRM_BO_MEM_PRIV0, DRM_BO_MEM_VRAM, DRM_BO_MEM_LOCAL}; + + +static struct drm_bo_driver via_bo_driver = { + .mem_type_prio = via_mem_prios, + .mem_busy_prio = via_busy_prios, + .num_mem_type_prio = ARRAY_SIZE(via_mem_prios), + .num_mem_busy_prio = ARRAY_SIZE(via_busy_prios), + .create_ttm_backend_entry = via_create_ttm_backend_entry, + .fence_type = via_fence_types, + .invalidate_caches = via_invalidate_caches, + .init_mem_type = via_init_mem_type, + .evict_flags = via_evict_flags, + .move = NULL, + .ttm_cache_flush = NULL, + .command_stream_barrier = NULL +}; + +static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); static struct drm_driver driver = { .driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ | - DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL, + DRIVER_IRQ_SHARED, .load = via_driver_load, .unload = via_driver_unload, .context_dtor = via_final_context, - .vblank_wait = via_driver_vblank_wait, + .get_vblank_counter = via_get_vblank_counter, + .enable_vblank = via_enable_vblank, + .disable_vblank = via_disable_vblank, .irq_preinstall = via_driver_irq_preinstall, .irq_postinstall = via_driver_irq_postinstall, .irq_uninstall = via_driver_irq_uninstall, @@ -59,32 +94,41 @@ static struct drm_driver driver = { .get_reg_ofs = drm_core_get_reg_ofs, .ioctls = via_ioctls, .fops = { - .owner = THIS_MODULE, - .open = drm_open, - .release = drm_release, - .ioctl = drm_ioctl, - .mmap = drm_mmap, - .poll = drm_poll, - .fasync = drm_fasync, - }, + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .ioctl = drm_ioctl, + .mmap = drm_mmap, + .poll = drm_poll, + .fasync = drm_fasync, + }, .pci_driver = { - .name = DRIVER_NAME, - .id_table = pciidlist, + .name = DRIVER_NAME, + .id_table = pciidlist, + .probe = probe, + .remove = __devexit_p(drm_cleanup_pci), }, - + .fence_driver = &via_fence_driver, + .bo_driver = &via_bo_driver, .name = DRIVER_NAME, .desc = DRIVER_DESC, - .date = DRIVER_DATE, - .major = DRIVER_MAJOR, - .minor = DRIVER_MINOR, - .patchlevel = DRIVER_PATCHLEVEL, + .date = VIA_DRM_DRIVER_DATE, + .major = VIA_DRM_DRIVER_MAJOR, + .minor = VIA_DRM_DRIVER_MINOR, + .patchlevel = VIA_DRM_DRIVER_PATCHLEVEL }; +static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + return drm_get_dev(pdev, ent, &driver); +} + static int __init via_init(void) { driver.num_ioctls = via_max_ioctl; + via_init_command_verifier(); - return drm_init(&driver); + return drm_init(&driver, pciidlist); } static void __exit via_exit(void) diff -Napur -X /home/jbarnes/dontdiff gpu/drm/via/via_drv.h /tmp/drm-master/drivers/gpu/drm/via/via_drv.h --- gpu/drm/via/via_drv.h 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/via/via_drv.h 2008-08-21 14:34:09.000000000 -0700 @@ -29,16 +29,41 @@ #define DRIVER_NAME "via" #define DRIVER_DESC "VIA Unichrome / Pro" -#define DRIVER_DATE "20070202" - -#define DRIVER_MAJOR 2 -#define DRIVER_MINOR 11 -#define DRIVER_PATCHLEVEL 1 #include "via_verifier.h" +/* + * Registers go here. + */ + + +#define CMDBUF_ALIGNMENT_SIZE (0x100) +#define CMDBUF_ALIGNMENT_MASK (0x0ff) + +/* defines for VIA 3D registers */ +#define VIA_REG_STATUS 0x400 +#define VIA_REG_TRANSET 0x43C +#define VIA_REG_TRANSPACE 0x440 + +/* VIA_REG_STATUS(0x400): Engine Status */ +#define VIA_CMD_RGTR_BUSY 0x00000080 /* Command Regulator is busy */ +#define VIA_2D_ENG_BUSY 0x00000001 /* 2D Engine is busy */ +#define VIA_3D_ENG_BUSY 0x00000002 /* 3D Engine is busy */ +#define VIA_VR_QUEUE_BUSY 0x00020000 /* Virtual Queue is busy */ + + + #include "via_dmablit.h" +/* + * This define and all its references can be removed when + * the DMA blit code has been implemented for FreeBSD. + */ +#define VIA_HAVE_DMABLIT 1 +#define VIA_HAVE_CORE_MM 1 +#define VIA_HAVE_FENCE 1 +#define VIA_HAVE_BUFFER 1 + #define VIA_PCI_BUF_SIZE 60000 #define VIA_FIRE_BUF_SIZE 1024 #define VIA_NUM_IRQS 4 @@ -75,6 +100,7 @@ typedef struct drm_via_private { struct timeval last_vblank; int last_vblank_valid; unsigned usec_per_vblank; + atomic_t vbl_received; drm_via_state_t hc_state; char pci_buf[VIA_PCI_BUF_SIZE]; const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE]; @@ -86,6 +112,7 @@ typedef struct drm_via_private { uint32_t irq_enable_mask; uint32_t irq_pending_mask; int *irq_map; + /* Memory manager stuff */ unsigned int idle_fault; struct drm_sman sman; int vram_initialized; @@ -93,7 +120,11 @@ typedef struct drm_via_private { unsigned long vram_offset; unsigned long agp_offset; drm_via_blitq_t blit_queues[VIA_NUM_BLIT_ENGINES]; - uint32_t dma_diff; + uint32_t dma_diff; + spinlock_t fence_lock; + uint32_t emit_0_sequence; + int have_idlelock; + struct timer_list fence_timer; } drm_via_private_t; enum via_family { @@ -125,29 +156,40 @@ extern int via_dma_blit( struct drm_devi extern int via_driver_load(struct drm_device *dev, unsigned long chipset); extern int via_driver_unload(struct drm_device *dev); - -extern int via_init_context(struct drm_device * dev, int context); extern int via_final_context(struct drm_device * dev, int context); extern int via_do_cleanup_map(struct drm_device * dev); -extern int via_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence); +extern u32 via_get_vblank_counter(struct drm_device *dev, int crtc); +extern int via_enable_vblank(struct drm_device *dev, int crtc); +extern void via_disable_vblank(struct drm_device *dev, int crtc); extern irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS); extern void via_driver_irq_preinstall(struct drm_device * dev); -extern void via_driver_irq_postinstall(struct drm_device * dev); +extern int via_driver_irq_postinstall(struct drm_device * dev); extern void via_driver_irq_uninstall(struct drm_device * dev); extern int via_dma_cleanup(struct drm_device * dev); extern void via_init_command_verifier(void); extern int via_driver_dma_quiescent(struct drm_device * dev); -extern void via_init_futex(drm_via_private_t * dev_priv); -extern void via_cleanup_futex(drm_via_private_t * dev_priv); -extern void via_release_futex(drm_via_private_t * dev_priv, int context); +extern void via_init_futex(drm_via_private_t *dev_priv); +extern void via_cleanup_futex(drm_via_private_t *dev_priv); +extern void via_release_futex(drm_via_private_t *dev_priv, int context); -extern void via_reclaim_buffers_locked(struct drm_device *dev, struct drm_file *file_priv); +extern void via_reclaim_buffers_locked(struct drm_device *dev, + struct drm_file *file_priv); extern void via_lastclose(struct drm_device *dev); extern void via_dmablit_handler(struct drm_device *dev, int engine, int from_irq); extern void via_init_dmablit(struct drm_device *dev); +extern struct drm_ttm_backend *via_create_ttm_backend_entry(struct drm_device *dev); +extern int via_fence_types(struct drm_buffer_object *bo, uint32_t *fclass, + uint32_t *type); +extern int via_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags); +extern int via_init_mem_type(struct drm_device *dev, uint32_t type, + struct drm_mem_type_manager *man); +extern uint64_t via_evict_flags(struct drm_buffer_object *bo); +extern int via_move(struct drm_buffer_object *bo, int evict, + int no_wait, struct drm_bo_mem_reg *new_mem); + #endif diff -Napur -X /home/jbarnes/dontdiff gpu/drm/via/via_fence.c /tmp/drm-master/drivers/gpu/drm/via/via_fence.c --- gpu/drm/via/via_fence.c 1969-12-31 16:00:00.000000000 -0800 +++ /tmp/drm-master/drivers/gpu/drm/via/via_fence.c 2008-08-21 14:34:09.000000000 -0700 @@ -0,0 +1,169 @@ +/************************************************************************** + * + * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA, + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * + **************************************************************************/ +/* + * Authors: Thomas Hellström + */ + +#include "drmP.h" +#include "via_drm.h" +#include "via_drv.h" + +/* + * DRM_FENCE_TYPE_EXE guarantees that all command buffers can be evicted. + * DRM_VIA_FENCE_TYPE_ACCEL guarantees that all 2D & 3D rendering is complete. + */ + +static void via_fence_poll(struct drm_device *dev, uint32_t class, + uint32_t waiting_types) +{ + drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; + uint32_t signaled_flush_types = 0; + uint32_t status; + + if (class != 0) + return; + + if (unlikely(!dev_priv)) + return; + + spin_lock(&dev_priv->fence_lock); + if (waiting_types) { + + /* + * Take the idlelock. This guarantees that the next time a client tries + * to grab the lock, it will stall until the idlelock is released. This + * guarantees that eventually, the GPU engines will be idle, but nothing + * else. It cannot be used to protect the hardware. + */ + + + if (!dev_priv->have_idlelock) { + drm_idlelock_take(&dev->lock); + dev_priv->have_idlelock = 1; + } + + /* + * Check if AGP command reader is idle. + */ + + if (waiting_types & DRM_FENCE_TYPE_EXE) + if (VIA_READ(0x41C) & 0x80000000) + signaled_flush_types |= DRM_FENCE_TYPE_EXE; + + /* + * Check VRAM command queue empty and 2D + 3D engines idle. + */ + + if (waiting_types & DRM_VIA_FENCE_TYPE_ACCEL) { + status = VIA_READ(VIA_REG_STATUS); + if ((status & VIA_VR_QUEUE_BUSY) && + !(status & (VIA_CMD_RGTR_BUSY | VIA_2D_ENG_BUSY | VIA_3D_ENG_BUSY))) + signaled_flush_types |= DRM_VIA_FENCE_TYPE_ACCEL; + } + + if (signaled_flush_types) { + waiting_types &= ~signaled_flush_types; + if (!waiting_types && dev_priv->have_idlelock) { + drm_idlelock_release(&dev->lock); + dev_priv->have_idlelock = 0; + } + drm_fence_handler(dev, 0, dev_priv->emit_0_sequence, + signaled_flush_types, 0); + } + } + + spin_unlock(&dev_priv->fence_lock); + + return; +} + + +/** + * Emit a fence sequence. + */ + +static int via_fence_emit_sequence(struct drm_device * dev, uint32_t class, uint32_t flags, + uint32_t * sequence, uint32_t * native_type) +{ + drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; + int ret = 0; + + if (!dev_priv) + return -EINVAL; + + switch(class) { + case 0: /* AGP command stream */ + + /* + * The sequence number isn't really used by the hardware yet. + */ + + spin_lock(&dev_priv->fence_lock); + *sequence = ++dev_priv->emit_0_sequence; + spin_unlock(&dev_priv->fence_lock); + + /* + * When drm_fence_handler() is called with flush type 0x01, and a + * sequence number, That means that the EXE flag is expired. + * Nothing else. No implicit flushing or other engines idle. + */ + + *native_type = DRM_FENCE_TYPE_EXE; + break; + default: + ret = -EINVAL; + break; + } + return ret; +} + +/** + * No irq fence expirations implemented yet. + * Although both the HQV engines and PCI dmablit engines signal + * idle with an IRQ, we haven't implemented this yet. + * This means that the drm fence manager will always poll for engine idle, + * unless the caller wanting to wait for a fence object has indicated a lazy wait. + */ + +static int via_fence_has_irq(struct drm_device * dev, uint32_t class, + uint32_t flags) +{ + return 0; +} + +struct drm_fence_driver via_fence_driver = { + .num_classes = 1, + .wrap_diff = (1 << 30), + .flush_diff = (1 << 20), + .sequence_mask = 0xffffffffU, + .has_irq = via_fence_has_irq, + .emit = via_fence_emit_sequence, + .poll = via_fence_poll, + .needed_flush = NULL, + .wait = NULL +}; diff -Napur -X /home/jbarnes/dontdiff gpu/drm/via/via_irq.c /tmp/drm-master/drivers/gpu/drm/via/via_irq.c --- gpu/drm/via/via_irq.c 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/via/via_irq.c 2008-08-21 14:34:09.000000000 -0700 @@ -43,7 +43,7 @@ #define VIA_REG_INTERRUPT 0x200 /* VIA_REG_INTERRUPT */ -#define VIA_IRQ_GLOBAL (1 << 31) +#define VIA_IRQ_GLOBAL (1 << 31) #define VIA_IRQ_VBLANK_ENABLE (1 << 19) #define VIA_IRQ_VBLANK_PENDING (1 << 3) #define VIA_IRQ_HQV0_ENABLE (1 << 11) @@ -68,16 +68,15 @@ static maskarray_t via_pro_group_a_irqs[] = { {VIA_IRQ_HQV0_ENABLE, VIA_IRQ_HQV0_PENDING, 0x000003D0, 0x00008010, - 0x00000000}, + 0x00000000 }, {VIA_IRQ_HQV1_ENABLE, VIA_IRQ_HQV1_PENDING, 0x000013D0, 0x00008010, - 0x00000000}, + 0x00000000 }, {VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0, VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}, {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1, VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}, }; -static int via_num_pro_group_a = - sizeof(via_pro_group_a_irqs) / sizeof(maskarray_t); +static int via_num_pro_group_a = ARRAY_SIZE(via_pro_group_a_irqs); static int via_irqmap_pro_group_a[] = {0, 1, -1, 2, -1, 3}; static maskarray_t via_unichrome_irqs[] = { @@ -86,14 +85,24 @@ static maskarray_t via_unichrome_irqs[] {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1, VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008} }; -static int via_num_unichrome = sizeof(via_unichrome_irqs) / sizeof(maskarray_t); +static int via_num_unichrome = ARRAY_SIZE(via_unichrome_irqs); static int via_irqmap_unichrome[] = {-1, -1, -1, 0, -1, 1}; -static unsigned time_diff(struct timeval *now, struct timeval *then) + +static unsigned time_diff(struct timeval *now,struct timeval *then) { return (now->tv_usec >= then->tv_usec) ? - now->tv_usec - then->tv_usec : - 1000000 - (then->tv_usec - now->tv_usec); + now->tv_usec - then->tv_usec : + 1000000 - (then->tv_usec - now->tv_usec); +} + +u32 via_get_vblank_counter(struct drm_device *dev, int crtc) +{ + drm_via_private_t *dev_priv = dev->dev_private; + if (crtc != 0) + return 0; + + return atomic_read(&dev_priv->vbl_received); } irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS) @@ -108,23 +117,22 @@ irqreturn_t via_driver_irq_handler(DRM_I status = VIA_READ(VIA_REG_INTERRUPT); if (status & VIA_IRQ_VBLANK_PENDING) { - atomic_inc(&dev->vbl_received); - if (!(atomic_read(&dev->vbl_received) & 0x0F)) { + atomic_inc(&dev_priv->vbl_received); + if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) { do_gettimeofday(&cur_vblank); if (dev_priv->last_vblank_valid) { dev_priv->usec_per_vblank = - time_diff(&cur_vblank, - &dev_priv->last_vblank) >> 4; + time_diff(&cur_vblank, + &dev_priv->last_vblank) >> 4; } dev_priv->last_vblank = cur_vblank; dev_priv->last_vblank_valid = 1; } - if (!(atomic_read(&dev->vbl_received) & 0xFF)) { + if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) { DRM_DEBUG("US per vblank is: %u\n", dev_priv->usec_per_vblank); } - DRM_WAKEUP(&dev->vbl_queue); - drm_vbl_send_signals(dev); + drm_handle_vblank(dev, 0); handled = 1; } @@ -145,6 +153,7 @@ irqreturn_t via_driver_irq_handler(DRM_I /* Acknowlege interrupts */ VIA_WRITE(VIA_REG_INTERRUPT, status); + if (handled) return IRQ_HANDLED; else @@ -163,31 +172,34 @@ static __inline__ void viadrv_acknowledg } } -int via_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence) +int via_enable_vblank(struct drm_device *dev, int crtc) { - drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; - unsigned int cur_vblank; - int ret = 0; + drm_via_private_t *dev_priv = dev->dev_private; + u32 status; - DRM_DEBUG("\n"); - if (!dev_priv) { - DRM_ERROR("called with no initialization\n"); + if (crtc != 0) { + DRM_ERROR("%s: bad crtc %d\n", __FUNCTION__, crtc); return -EINVAL; } - viadrv_acknowledge_irqs(dev_priv); + status = VIA_READ(VIA_REG_INTERRUPT); + VIA_WRITE(VIA_REG_INTERRUPT, status & VIA_IRQ_VBLANK_ENABLE); - /* Assume that the user has missed the current sequence number - * by about a day rather than she wants to wait for years - * using vertical blanks... - */ - - DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ, - (((cur_vblank = atomic_read(&dev->vbl_received)) - - *sequence) <= (1 << 23))); + VIA_WRITE8(0x83d4, 0x11); + VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30); - *sequence = cur_vblank; - return ret; + return 0; +} + +void via_disable_vblank(struct drm_device *dev, int crtc) +{ + drm_via_private_t *dev_priv = dev->dev_private; + + VIA_WRITE8(0x83d4, 0x11); + VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) & ~0x30); + + if (crtc != 0) + DRM_ERROR("%s: bad crtc %d\n", __FUNCTION__, crtc); } static int @@ -239,6 +251,7 @@ via_driver_irq_wait(struct drm_device * return ret; } + /* * drm_dma.h hooks */ @@ -292,23 +305,25 @@ void via_driver_irq_preinstall(struct dr } } -void via_driver_irq_postinstall(struct drm_device * dev) +int via_driver_irq_postinstall(struct drm_device * dev) { drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; u32 status; - DRM_DEBUG("\n"); - if (dev_priv) { - status = VIA_READ(VIA_REG_INTERRUPT); - VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL - | dev_priv->irq_enable_mask); + DRM_DEBUG("via_driver_irq_postinstall\n"); + if (!dev_priv) + return -EINVAL; - /* Some magic, oh for some data sheets ! */ + drm_vblank_init(dev, 1); + status = VIA_READ(VIA_REG_INTERRUPT); + VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL + | dev_priv->irq_enable_mask); - VIA_WRITE8(0x83d4, 0x11); - VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30); + /* Some magic, oh for some data sheets ! */ + VIA_WRITE8(0x83d4, 0x11); + VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30); - } + return 0; } void via_driver_irq_uninstall(struct drm_device * dev) @@ -352,7 +367,8 @@ int via_wait_irq(struct drm_device *dev, switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) { case VIA_IRQ_RELATIVE: - irqwait->request.sequence += atomic_read(&cur_irq->irq_received); + irqwait->request.sequence += + atomic_read(&cur_irq->irq_received); irqwait->request.type &= ~_DRM_VBLANK_RELATIVE; case VIA_IRQ_ABSOLUTE: break; diff -Napur -X /home/jbarnes/dontdiff gpu/drm/via/via_map.c /tmp/drm-master/drivers/gpu/drm/via/via_map.c --- gpu/drm/via/via_map.c 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/via/via_map.c 2008-08-21 14:34:09.000000000 -0700 @@ -28,6 +28,7 @@ static int via_do_init_map(struct drm_device * dev, drm_via_init_t * init) { drm_via_private_t *dev_priv = dev->dev_private; + int ret = 0; DRM_DEBUG("\n"); @@ -60,12 +61,17 @@ static int via_do_init_map(struct drm_de dev_priv->agpAddr = init->agpAddr; - via_init_futex(dev_priv); - - via_init_dmablit(dev); - + via_init_futex( dev_priv ); + via_init_dmablit( dev ); + dev_priv->emit_0_sequence = 0; + dev_priv->have_idlelock = 0; + spin_lock_init(&dev_priv->fence_lock); dev->dev_private = (void *)dev_priv; - return 0; + ret = drm_bo_driver_init(dev); + if (ret) + DRM_ERROR("Could not initialize buffer object driver.\n"); + return ret; + } int via_do_cleanup_map(struct drm_device * dev) @@ -75,6 +81,7 @@ int via_do_cleanup_map(struct drm_device return 0; } + int via_map_init(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_via_init_t *init = data; @@ -116,7 +123,6 @@ int via_driver_unload(struct drm_device drm_via_private_t *dev_priv = dev->dev_private; drm_sman_takedown(&dev_priv->sman); - drm_free(dev_priv, sizeof(drm_via_private_t), DRM_MEM_DRIVER); return 0; diff -Napur -X /home/jbarnes/dontdiff gpu/drm/via/via_mm.c /tmp/drm-master/drivers/gpu/drm/via/via_mm.c --- gpu/drm/via/via_mm.c 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/via/via_mm.c 2008-08-21 14:34:09.000000000 -0700 @@ -22,7 +22,7 @@ * DEALINGS IN THE SOFTWARE. */ /* - * Authors: Thomas Hellström + * Authors: Thomas Hellström */ #include "drmP.h" diff -Napur -X /home/jbarnes/dontdiff gpu/drm/via/via_mm.h /tmp/drm-master/drivers/gpu/drm/via/via_mm.h --- gpu/drm/via/via_mm.h 1969-12-31 16:00:00.000000000 -0800 +++ /tmp/drm-master/drivers/gpu/drm/via/via_mm.h 2008-08-21 14:34:09.000000000 -0700 @@ -0,0 +1,40 @@ +/* + * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved. + * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sub license, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _via_drm_mm_h_ +#define _via_drm_mm_h_ + +typedef struct { + unsigned int context; + unsigned int size; + unsigned long offset; + unsigned long free; +} drm_via_mm_t; + +typedef struct { + unsigned int size; + unsigned long handle; + void *virtual; +} drm_via_dma_t; + +#endif diff -Napur -X /home/jbarnes/dontdiff gpu/drm/via/via_verifier.c /tmp/drm-master/drivers/gpu/drm/via/via_verifier.c --- gpu/drm/via/via_verifier.c 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/via/via_verifier.c 2008-08-21 14:34:09.000000000 -0700 @@ -250,9 +250,9 @@ eat_words(const uint32_t ** buf, const u */ static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t *seq, - unsigned long offset, - unsigned long size, - struct drm_device * dev) + unsigned long offset, + unsigned long size, + struct drm_device *dev) { struct drm_map_list *r_list; drm_local_map_t *map = seq->map_cache; @@ -261,7 +261,6 @@ static __inline__ drm_local_map_t *via_d && (offset + size) <= (map->offset + map->size)) { return map; } - list_for_each_entry(r_list, &dev->maplist, head) { map = r_list->map; if (!map) diff -Napur -X /home/jbarnes/dontdiff gpu/drm/via/via_verifier.h /tmp/drm-master/drivers/gpu/drm/via/via_verifier.h --- gpu/drm/via/via_verifier.h 2008-08-21 13:52:27.000000000 -0700 +++ /tmp/drm-master/drivers/gpu/drm/via/via_verifier.h 2008-08-21 14:34:09.000000000 -0700 @@ -20,7 +20,7 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * - * Author: Thomas Hellström 2004. + * Author: Thomas Hellström 2004. */ #ifndef _VIA_VERIFIER_H_ @@ -54,9 +54,9 @@ typedef struct { const uint32_t *buf_start; } drm_via_state_t; -extern int via_verify_command_stream(const uint32_t * buf, unsigned int size, - struct drm_device * dev, int agp); +extern int via_verify_command_stream(const uint32_t *buf, unsigned int size, + struct drm_device *dev, int agp); extern int via_parse_command_stream(struct drm_device *dev, const uint32_t *buf, - unsigned int size); + unsigned int size); #endif