Update drm kernel drivers.

This is a sync to mesa/drm pre-gem, with a few fixes on top of that.
It also contains one local patch supplied by kib@ that I can't apply to
git.master shared code.

Approved by:	flz
Obtained from:	mesa/drm git.master
MFC after:	2 weeks
This commit is contained in:
Robert Noland 2008-08-23 20:59:12 +00:00
parent 993a741ac6
commit a2a2d95441
70 changed files with 13381 additions and 6402 deletions

View File

@ -572,6 +572,7 @@ dev/drm/i915_dma.c optional i915drm
dev/drm/i915_drv.c optional i915drm
dev/drm/i915_irq.c optional i915drm
dev/drm/i915_mem.c optional i915drm
dev/drm/i915_suspend.c optional i915drm
dev/drm/mach64_dma.c optional mach64drm
dev/drm/mach64_drv.c optional mach64drm
dev/drm/mach64_irq.c optional mach64drm

View File

@ -1,6 +1,3 @@
/* ati_pcigart.h -- ATI PCI GART support -*- linux-c -*-
* Created: Wed Dec 13 21:52:19 2000 by gareth@valinux.com
*/
/*-
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
@ -32,72 +29,133 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/** @file ati_pcigart.c
* Implementation of ATI's PCIGART, which provides an aperture in card virtual
* address space with addresses remapped to system memory.
*/
#include "dev/drm/drmP.h"
#define ATI_PCIGART_PAGE_SIZE 4096 /* PCI GART page size */
#define ATI_MAX_PCIGART_PAGES 8192 /* 32 MB aperture, 4K pages */
#define ATI_PCIGART_TABLE_SIZE 32768
#define ATI_PCIGART_PAGE_MASK (~(ATI_PCIGART_PAGE_SIZE-1))
int drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
#define ATI_PCIE_WRITE 0x4
#define ATI_PCIE_READ 0x8
static int drm_ati_alloc_pcigart_table(struct drm_device *dev,
struct drm_ati_pcigart_info *gart_info)
{
unsigned long pages;
u32 *pci_gart = NULL, page_base;
int i, j;
dev->sg->dmah = drm_pci_alloc(dev, gart_info->table_size,
PAGE_SIZE,
gart_info->table_mask);
if (dev->sg->dmah == NULL)
return ENOMEM;
return 0;
}
static void drm_ati_free_pcigart_table(struct drm_device *dev,
struct drm_ati_pcigart_info *gart_info)
{
drm_pci_free(dev, dev->sg->dmah);
dev->sg->dmah = NULL;
}
int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
{
/* we need to support large memory configurations */
if (dev->sg == NULL) {
DRM_ERROR( "no scatter/gather memory!\n" );
DRM_ERROR("no scatter/gather memory!\n");
return 0;
}
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
/* GART table in system memory */
dev->sg->dmah = drm_pci_alloc(dev, ATI_PCIGART_TABLE_SIZE, 0,
0xfffffffful);
if (dev->sg->dmah == NULL) {
DRM_ERROR("cannot allocate PCI GART table!\n");
return 0;
if (gart_info->bus_addr) {
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
gart_info->bus_addr = 0;
if (dev->sg->dmah)
drm_ati_free_pcigart_table(dev, gart_info);
}
gart_info->addr = (void *)dev->sg->dmah->vaddr;
gart_info->bus_addr = dev->sg->dmah->busaddr;
pci_gart = (u32 *)dev->sg->dmah->vaddr;
} else {
/* GART table in framebuffer memory */
pci_gart = gart_info->addr;
}
pages = DRM_MIN(dev->sg->pages, ATI_MAX_PCIGART_PAGES);
bzero(pci_gart, ATI_PCIGART_TABLE_SIZE);
return 1;
}
int drm_ati_pcigart_init(struct drm_device *dev,
struct drm_ati_pcigart_info *gart_info)
{
void *address = NULL;
unsigned long pages;
u32 *pci_gart, page_base;
dma_addr_t bus_address = 0;
int i, j, ret = 0;
int max_pages;
dma_addr_t entry_addr;
/* we need to support large memory configurations */
if (dev->sg == NULL) {
DRM_ERROR("no scatter/gather memory!\n");
goto done;
}
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n");
ret = drm_ati_alloc_pcigart_table(dev, gart_info);
if (ret) {
DRM_ERROR("cannot allocate PCI GART page!\n");
goto done;
}
address = (void *)dev->sg->dmah->vaddr;
bus_address = dev->sg->dmah->busaddr;
} else {
address = gart_info->addr;
bus_address = gart_info->bus_addr;
DRM_DEBUG("PCI: Gart Table: VRAM %08X mapped at %08lX\n",
(unsigned int)bus_address, (unsigned long)address);
}
pci_gart = (u32 *) address;
max_pages = (gart_info->table_size / sizeof(u32));
pages = (dev->sg->pages <= max_pages)
? dev->sg->pages : max_pages;
memset(pci_gart, 0, max_pages * sizeof(u32));
KASSERT(PAGE_SIZE >= ATI_PCIGART_PAGE_SIZE, ("page size too small"));
for ( i = 0 ; i < pages ; i++ ) {
page_base = (u32) dev->sg->busaddr[i];
for (i = 0; i < pages; i++) {
entry_addr = dev->sg->busaddr[i];
for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) {
if (gart_info->is_pcie)
*pci_gart = (cpu_to_le32(page_base) >> 8) | 0xc;
else
*pci_gart = cpu_to_le32(page_base);
page_base = (u32) entry_addr & ATI_PCIGART_PAGE_MASK;
switch(gart_info->gart_reg_if) {
case DRM_ATI_GART_IGP:
page_base |= (upper_32_bits(entry_addr) & 0xff) << 4;
page_base |= 0xc;
break;
case DRM_ATI_GART_PCIE:
page_base >>= 8;
page_base |= (upper_32_bits(entry_addr) & 0xff) << 24;
page_base |= ATI_PCIE_READ | ATI_PCIE_WRITE;
break;
default:
case DRM_ATI_GART_PCI:
break;
}
*pci_gart = cpu_to_le32(page_base);
pci_gart++;
page_base += ATI_PCIGART_PAGE_SIZE;
entry_addr += ATI_PCIGART_PAGE_SIZE;
}
}
DRM_MEMORYBARRIER();
return 1;
}
int drm_ati_pcigart_cleanup(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
{
if (dev->sg == NULL) {
DRM_ERROR( "no scatter/gather memory!\n" );
return 0;
}
drm_pci_free(dev, dev->sg->dmah);
return 1;
ret = 1;
done:
gart_info->addr = address;
gart_info->bus_addr = bus_address;
return ret;
}

View File

@ -37,6 +37,7 @@ while (<>) {
s/#include "(.*)_drm.h/#include "dev\/drm\/\1_drm.h/;
s/#include "mga_ucode.h/#include "dev\/drm\/mga_ucode.h/;
s/#include "r300_reg.h/#include "dev\/drm\/r300_reg.h/;
s/#include "radeon_microcode.h/#include "dev\/drm\/radeon_microcode.h/;
s/#include "sis_ds.h/#include "dev\/drm\/sis_ds.h/;
s/#include "drm/#include "dev\/drm\/drm/;
print;
@ -47,4 +48,4 @@ while (<>) {
# if we never found the copyright header, then we're still a line behind.
if (!$foundopening) {
print $lastline;
}
}

File diff suppressed because it is too large Load Diff

View File

@ -39,7 +39,7 @@ __FBSDID("$FreeBSD$");
#if defined(_KERNEL) || defined(__KERNEL__)
typedef struct drm_device drm_device_t;
struct drm_device;
typedef struct drm_file drm_file_t;
#include <sys/param.h>
@ -62,6 +62,8 @@ typedef struct drm_file drm_file_t;
#include <sys/bus.h>
#include <sys/signalvar.h>
#include <sys/poll.h>
#include <sys/tree.h>
#include <sys/taskqueue.h>
#include <vm/vm.h>
#include <vm/pmap.h>
#include <vm/vm_extern.h>
@ -79,9 +81,9 @@ typedef struct drm_file drm_file_t;
#include <sys/memrange.h>
#if __FreeBSD_version >= 800004
#include <dev/agp/agpvar.h>
#else
#else /* __FreeBSD_version >= 800004 */
#include <pci/agpvar.h>
#endif
#endif /* __FreeBSD_version >= 800004 */
#include <sys/agpio.h>
#if __FreeBSD_version >= 500000
#include <sys/mutex.h>
@ -113,6 +115,7 @@ typedef struct drm_file drm_file_t;
#include "dev/drm/drm.h"
#include "dev/drm/drm_linux_list.h"
#include "dev/drm/drm_atomic.h"
#include "dev/drm/drm_internal.h"
#ifdef __FreeBSD__
#include <opt_drm.h>
@ -159,6 +162,7 @@ typedef struct drm_file drm_file_t;
#define DRM_MEM_CTXBITMAP 17
#define DRM_MEM_STUB 18
#define DRM_MEM_SGLISTS 19
#define DRM_MEM_DRAWABLE 20
#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
@ -191,10 +195,15 @@ MALLOC_DECLARE(M_DRM);
#define DRM_CURPROC curthread
#define DRM_STRUCTPROC struct thread
#define DRM_SPINTYPE struct mtx
#define DRM_SPININIT(l,name) mtx_init(&l, name, NULL, MTX_DEF)
#define DRM_SPINUNINIT(l) mtx_destroy(&l)
#define DRM_SPININIT(l,name) mtx_init(l, name, NULL, MTX_DEF)
#define DRM_SPINUNINIT(l) mtx_destroy(l)
#define DRM_SPINLOCK(l) mtx_lock(l)
#define DRM_SPINUNLOCK(u) mtx_unlock(u);
#define DRM_SPINUNLOCK(u) mtx_unlock(u)
#define DRM_SPINLOCK_IRQSAVE(l, irqflags) do { \
mtx_lock(l); \
(void)irqflags; \
} while (0)
#define DRM_SPINUNLOCK_IRQRESTORE(u, irqflags) mtx_unlock(u)
#define DRM_SPINLOCK_ASSERT(l) mtx_assert(l, MA_OWNED)
#define DRM_CURRENTPID curthread->td_proc->p_pid
#define DRM_LOCK() mtx_lock(&dev->dev_lock)
@ -216,10 +225,6 @@ MALLOC_DECLARE(M_DRM);
#define spldrm() spltty()
#endif /* __NetBSD__ || __OpenBSD__ */
/* Currently our DRMFILE (filp) is a void * which is actually the pid
* of the current process. It should be a per-open unique pointer, but
* code for that is not yet written */
#define DRMFILE void *
#define DRM_IRQ_ARGS void *arg
typedef void irqreturn_t;
#define IRQ_HANDLED /* nothing */
@ -233,15 +238,19 @@ enum {
#define DRM_AGP_MEM struct agp_memory_info
#if defined(__FreeBSD__)
#define DRM_DEVICE \
drm_device_t *dev = kdev->si_drv1
#define DRM_IOCTL_ARGS struct cdev *kdev, u_long cmd, caddr_t data, \
int flags, DRM_STRUCTPROC *p, DRMFILE filp
#define drm_get_device_from_kdev(_kdev) (_kdev->si_drv1)
#elif defined(__NetBSD__)
#define drm_get_device_from_kdev(_kdev) device_lookup(&drm_cd, minor(_kdev))
#elif defined(__OpenBSD__)
#define drm_get_device_from_kdev(_kdev) device_lookup(&drm_cd, \
minor(_kdev)))->dv_cfdata->cf_driver->cd_devs[minor(_kdev)]
#endif
#if defined(__FreeBSD__)
#define PAGE_ALIGN(addr) round_page(addr)
/* DRM_SUSER returns true if the user is superuser */
#if __FreeBSD_version >= 700000
#define DRM_SUSER(p) (priv_check(p, PRIV_DRIVER) == 0)
#define DRM_SUSER(p) (priv_check(p, PRIV_DRIVER) == 0)
#else
#define DRM_SUSER(p) (suser(p) == 0)
#endif
@ -251,17 +260,6 @@ enum {
#else /* __FreeBSD__ */
#if defined(__NetBSD__)
#define DRM_DEVICE \
drm_device_t *dev = device_lookup(&drm_cd, minor(kdev))
#elif defined(__OpenBSD__)
#define DRM_DEVICE \
drm_device_t *dev = (device_lookup(&drm_cd, \
minor(kdev)))->dv_cfdata->cf_driver->cd_devs[minor(kdev)]
#endif /* __OpenBSD__ */
#define DRM_IOCTL_ARGS dev_t kdev, u_long cmd, caddr_t data, \
int flags, DRM_STRUCTPROC *p, DRMFILE filp
#define CDEV_MAJOR 34
#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
/* DRM_SUSER returns true if the user is superuser */
@ -270,7 +268,7 @@ enum {
#define DRM_MTRR_WC MTRR_TYPE_WC
#define jiffies hardclock_ticks
typedef drm_device_t *device_t;
typedef struct drm_device *device_t;
extern struct cfdriver drm_cd;
#endif /* !__FreeBSD__ */
@ -349,14 +347,6 @@ typedef vaddr_t vm_offset_t;
(!uvm_useracc((caddr_t)uaddr, size, VM_PROT_READ))
#endif /* !__FreeBSD__ */
#define DRM_COPY_TO_USER_IOCTL(user, kern, size) \
if ( IOCPARM_LEN(cmd) != size) \
return EINVAL; \
*user = kern;
#define DRM_COPY_FROM_USER_IOCTL(kern, user, size) \
if ( IOCPARM_LEN(cmd) != size) \
return EINVAL; \
kern = *user;
#define DRM_COPY_TO_USER(user, kern, size) \
copyout(kern, user, size)
#define DRM_COPY_FROM_USER(kern, user, size) \
@ -376,7 +366,6 @@ typedef vaddr_t vm_offset_t;
#define cpu_to_le32(x) htole32(x)
#define le32_to_cpu(x) le32toh(x)
#define DRM_ERR(v) v
#define DRM_HZ hz
#define DRM_UDELAY(udelay) DELAY(udelay)
#define DRM_TIME_SLICE (hz/20) /* Time slice for GLXContexts */
@ -385,59 +374,35 @@ typedef vaddr_t vm_offset_t;
(_map) = (_dev)->context_sareas[_ctx]; \
} while(0)
#define DRM_GET_PRIV_WITH_RETURN(_priv, _filp) \
do { \
if (_filp != (DRMFILE)(intptr_t)DRM_CURRENTPID) { \
DRM_ERROR("filp doesn't match curproc\n"); \
return EINVAL; \
} \
_priv = drm_find_file_by_proc(dev, DRM_CURPROC); \
if (_priv == NULL) { \
DRM_ERROR("can't find authenticator\n"); \
return EINVAL; \
} \
} while (0)
#define LOCK_TEST_WITH_RETURN(dev, filp) \
#define LOCK_TEST_WITH_RETURN(dev, file_priv) \
do { \
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) || \
dev->lock.filp != filp) { \
dev->lock.file_priv != file_priv) { \
DRM_ERROR("%s called without lock held\n", \
__FUNCTION__); \
return EINVAL; \
} \
} while (0)
#define DRM_GETSAREA() \
do { \
drm_local_map_t *map; \
DRM_SPINLOCK_ASSERT(&dev->dev_lock); \
TAILQ_FOREACH(map, &dev->maplist, link) { \
if (map->type == _DRM_SHM && \
map->flags & _DRM_CONTAINS_LOCK) { \
dev_priv->sarea = map; \
break; \
} \
} \
} while (0)
#if defined(__FreeBSD__) && __FreeBSD_version > 500000
/* Returns -errno to shared code */
#define DRM_WAIT_ON( ret, queue, timeout, condition ) \
for ( ret = 0 ; !ret && !(condition) ; ) { \
DRM_UNLOCK(); \
mtx_lock(&dev->irq_lock); \
if (!(condition)) \
ret = msleep(&(queue), &dev->irq_lock, \
ret = -mtx_sleep(&(queue), &dev->irq_lock, \
PZERO | PCATCH, "drmwtq", (timeout)); \
mtx_unlock(&dev->irq_lock); \
DRM_LOCK(); \
}
#else
/* Returns -errno to shared code */
#define DRM_WAIT_ON( ret, queue, timeout, condition ) \
for ( ret = 0 ; !ret && !(condition) ; ) { \
int s = spldrm(); \
if (!(condition)) \
ret = tsleep( &(queue), PZERO | PCATCH, \
ret = -tsleep( &(queue), PZERO | PCATCH, \
"drmwtq", (timeout) ); \
splx(s); \
}
@ -467,9 +432,17 @@ typedef struct drm_pci_id_list
#define DRM_MASTER 0x2
#define DRM_ROOT_ONLY 0x4
typedef struct drm_ioctl_desc {
int (*func)(DRM_IOCTL_ARGS);
unsigned long cmd;
int (*func)(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int flags;
} drm_ioctl_desc_t;
/**
* Creates a driver or general drm_ioctl_desc array entry for the given
* ioctl, for use by drm_ioctl().
*/
#define DRM_IOCTL_DEF(ioctl, func, flags) \
[DRM_IOCTL_NR(ioctl)] = {ioctl, func, flags}
typedef struct drm_magic_entry {
drm_magic_t magic;
@ -492,7 +465,7 @@ typedef struct drm_buf {
unsigned long bus_address; /* Bus address of buffer */
struct drm_buf *next; /* Kernel-only: used for free list */
__volatile__ int pending; /* On hardware DMA queue */
DRMFILE filp; /* Unique identifier of holding process */
struct drm_file *file_priv; /* Unique identifier of holding process */
int context; /* Kernel queue for this buffer */
enum {
DRM_LIST_NONE = 0,
@ -554,12 +527,13 @@ struct drm_file {
typedef struct drm_lock_data {
drm_hw_lock_t *hw_lock; /* Hardware lock */
DRMFILE filp; /* Unique identifier of holding process (NULL is kernel)*/
struct drm_file *file_priv; /* Unique identifier of holding process (NULL is kernel)*/
int lock_queue; /* Queue of blocked processes */
unsigned long lock_time; /* Time of last lock in jiffies */
} drm_lock_data_t;
/* This structure, in the drm_device_t, is always initialized while the device
/* This structure, in the struct drm_device, is always initialized while the
* device
* is open. dev->dma_lock protects the incrementing of dev->buf_use, which
* when set marks that no further bufs may be allocated until device teardown
* occurs (when the last open of the device has closed). The high/low
@ -637,28 +611,57 @@ typedef struct drm_vbl_sig {
int pid;
} drm_vbl_sig_t;
struct drm_vblank_info {
wait_queue_head_t queue; /* vblank wait queue */
atomic_t count; /* number of VBLANK interrupts */
/* (driver must alloc the right number of counters) */
struct drm_vbl_sig_list sigs; /* signal list to send on VBLANK */
atomic_t refcount; /* number of users of vblank interrupts */
u32 last; /* protected by dev->vbl_lock, used */
/* for wraparound handling */
int enabled; /* so we don't call enable more than */
/* once per disable */
int inmodeset; /* Display driver is setting mode */
};
/* location of GART table */
#define DRM_ATI_GART_MAIN 1
#define DRM_ATI_GART_FB 2
typedef struct ati_pcigart_info {
#define DRM_ATI_GART_PCI 1
#define DRM_ATI_GART_PCIE 2
#define DRM_ATI_GART_IGP 3
struct drm_ati_pcigart_info {
int gart_table_location;
int is_pcie;
int gart_reg_if;
void *addr;
dma_addr_t bus_addr;
dma_addr_t table_mask;
dma_addr_t member_mask;
struct drm_dma_handle *table_handle;
drm_local_map_t mapping;
} drm_ati_pcigart_info;
int table_size;
};
#ifndef DMA_BIT_MASK
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : (1ULL<<(n)) - 1)
#endif
#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
struct drm_driver_info {
int (*load)(struct drm_device *, unsigned long flags);
int (*firstopen)(struct drm_device *);
int (*open)(struct drm_device *, drm_file_t *);
void (*preclose)(struct drm_device *, void *filp);
void (*preclose)(struct drm_device *, struct drm_file *file_priv);
void (*postclose)(struct drm_device *, drm_file_t *);
void (*lastclose)(struct drm_device *);
int (*unload)(struct drm_device *);
void (*reclaim_buffers_locked)(struct drm_device *, void *filp);
int (*dma_ioctl)(DRM_IOCTL_ARGS);
void (*reclaim_buffers_locked)(struct drm_device *,
struct drm_file *file_priv);
int (*dma_ioctl)(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void (*dma_ready)(struct drm_device *);
int (*dma_quiescent)(struct drm_device *);
int (*dma_flush_block_and_flush)(struct drm_device *, int context,
@ -670,11 +673,13 @@ struct drm_driver_info {
int (*kernel_context_switch)(struct drm_device *dev, int old,
int new);
int (*kernel_context_switch_unlock)(struct drm_device *dev);
void (*irq_preinstall)(drm_device_t *dev);
void (*irq_postinstall)(drm_device_t *dev);
void (*irq_uninstall)(drm_device_t *dev);
void (*irq_preinstall)(struct drm_device *dev);
int (*irq_postinstall)(struct drm_device *dev);
void (*irq_uninstall)(struct drm_device *dev);
void (*irq_handler)(DRM_IRQ_ARGS);
int (*vblank_wait)(drm_device_t *dev, unsigned int *sequence);
u32 (*get_vblank_counter)(struct drm_device *dev, int crtc);
int (*enable_vblank)(struct drm_device *dev, int crtc);
void (*disable_vblank)(struct drm_device *dev, int crtc);
drm_pci_id_list_t *id_entry; /* PCI ID, name, and chipset private */
@ -711,6 +716,7 @@ struct drm_driver_info {
unsigned use_dma_queue :1;
unsigned use_irq :1;
unsigned use_vbl_irq :1;
unsigned use_vbl_irq2 :1;
unsigned use_mtrr :1;
};
@ -743,10 +749,14 @@ struct drm_device {
/* Locks */
#if defined(__FreeBSD__) && __FreeBSD_version > 500000
struct mtx vbl_lock; /* protects vblank operations */
struct mtx dma_lock; /* protects dev->dma */
struct mtx irq_lock; /* protects irq condition checks */
struct mtx dev_lock; /* protects everything else */
#endif
DRM_SPINTYPE drw_lock;
DRM_SPINTYPE tsk_lock;
/* Usage Counters */
int open_count; /* Outstanding files open */
int buf_use; /* Buffers in use -- cannot alloc */
@ -793,8 +803,13 @@ struct drm_device {
atomic_t context_flag; /* Context swapping flag */
int last_context; /* Last current context */
int vbl_queue; /* vbl wait channel */
atomic_t vbl_received;
int vblank_disable_allowed;
atomic_t vbl_signal_pending; /* number of signals pending on all crtcs */
struct callout vblank_disable_timer;
u32 max_vblank_count; /* size of vblank counter register */
struct drm_vblank_info *vblank; /* per crtc vblank info */
int num_crtcs;
#ifdef __FreeBSD__
struct sigio *buf_sigio; /* Processes waiting for SIGIO */
@ -811,6 +826,13 @@ struct drm_device {
void *dev_private;
unsigned int agp_buffer_token;
drm_local_map_t *agp_buffer_map;
struct unrhdr *drw_unrhdr;
/* RB tree of drawable infos */
RB_HEAD(drawable_tree, bsd_drm_drawable_info) drw_head;
struct task locked_task;
void (*locked_task_call)(struct drm_device *dev);
};
extern int drm_debug_flag;
@ -836,17 +858,20 @@ dev_type_read(drm_read);
dev_type_poll(drm_poll);
dev_type_mmap(drm_mmap);
#endif
extern drm_local_map_t *drm_getsarea(struct drm_device *dev);
/* File operations helpers (drm_fops.c) */
#ifdef __FreeBSD__
extern int drm_open_helper(struct cdev *kdev, int flags, int fmt,
DRM_STRUCTPROC *p, drm_device_t *dev);
extern drm_file_t *drm_find_file_by_proc(drm_device_t *dev,
DRM_STRUCTPROC *p);
extern int drm_open_helper(struct cdev *kdev, int flags, int fmt,
DRM_STRUCTPROC *p,
struct drm_device *dev);
extern drm_file_t *drm_find_file_by_proc(struct drm_device *dev,
DRM_STRUCTPROC *p);
#elif defined(__NetBSD__) || defined(__OpenBSD__)
extern int drm_open_helper(dev_t kdev, int flags, int fmt,
DRM_STRUCTPROC *p, drm_device_t *dev);
extern drm_file_t *drm_find_file_by_proc(drm_device_t *dev,
extern int drm_open_helper(dev_t kdev, int flags, int fmt,
DRM_STRUCTPROC *p,
struct drm_device *dev);
extern drm_file_t *drm_find_file_by_proc(struct drm_device *dev,
DRM_STRUCTPROC *p);
#endif /* __NetBSD__ || __OpenBSD__ */
@ -858,171 +883,238 @@ void *drm_calloc(size_t nmemb, size_t size, int area);
void *drm_realloc(void *oldpt, size_t oldsize, size_t size,
int area);
void drm_free(void *pt, size_t size, int area);
void *drm_ioremap(drm_device_t *dev, drm_local_map_t *map);
void *drm_ioremap(struct drm_device *dev, drm_local_map_t *map);
void drm_ioremapfree(drm_local_map_t *map);
int drm_mtrr_add(unsigned long offset, size_t size, int flags);
int drm_mtrr_del(int handle, unsigned long offset, size_t size, int flags);
int drm_context_switch(drm_device_t *dev, int old, int new);
int drm_context_switch_complete(drm_device_t *dev, int new);
int drm_context_switch(struct drm_device *dev, int old, int new);
int drm_context_switch_complete(struct drm_device *dev, int new);
int drm_ctxbitmap_init(drm_device_t *dev);
void drm_ctxbitmap_cleanup(drm_device_t *dev);
void drm_ctxbitmap_free(drm_device_t *dev, int ctx_handle);
int drm_ctxbitmap_next(drm_device_t *dev);
int drm_ctxbitmap_init(struct drm_device *dev);
void drm_ctxbitmap_cleanup(struct drm_device *dev);
void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
int drm_ctxbitmap_next(struct drm_device *dev);
/* Locking IOCTL support (drm_lock.c) */
int drm_lock_take(__volatile__ unsigned int *lock,
unsigned int context);
int drm_lock_transfer(drm_device_t *dev,
__volatile__ unsigned int *lock,
unsigned int context);
int drm_lock_free(drm_device_t *dev,
__volatile__ unsigned int *lock,
unsigned int context);
int drm_lock_transfer(struct drm_device *dev,
__volatile__ unsigned int *lock,
unsigned int context);
int drm_lock_free(struct drm_device *dev,
__volatile__ unsigned int *lock,
unsigned int context);
/* Buffer management support (drm_bufs.c) */
unsigned long drm_get_resource_start(drm_device_t *dev, unsigned int resource);
unsigned long drm_get_resource_len(drm_device_t *dev, unsigned int resource);
void drm_rmmap(drm_device_t *dev, drm_local_map_t *map);
unsigned long drm_get_resource_start(struct drm_device *dev,
unsigned int resource);
unsigned long drm_get_resource_len(struct drm_device *dev,
unsigned int resource);
void drm_rmmap(struct drm_device *dev, drm_local_map_t *map);
int drm_order(unsigned long size);
int drm_addmap(drm_device_t * dev, unsigned long offset, unsigned long size,
int drm_addmap(struct drm_device *dev, unsigned long offset,
unsigned long size,
drm_map_type_t type, drm_map_flags_t flags,
drm_local_map_t **map_ptr);
int drm_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request);
int drm_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request);
int drm_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request);
int drm_addbufs_pci(struct drm_device *dev, drm_buf_desc_t *request);
int drm_addbufs_sg(struct drm_device *dev, drm_buf_desc_t *request);
int drm_addbufs_agp(struct drm_device *dev, drm_buf_desc_t *request);
/* DMA support (drm_dma.c) */
int drm_dma_setup(drm_device_t *dev);
void drm_dma_takedown(drm_device_t *dev);
void drm_free_buffer(drm_device_t *dev, drm_buf_t *buf);
void drm_reclaim_buffers(drm_device_t *dev, DRMFILE filp);
int drm_dma_setup(struct drm_device *dev);
void drm_dma_takedown(struct drm_device *dev);
void drm_free_buffer(struct drm_device *dev, drm_buf_t *buf);
void drm_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv);
#define drm_core_reclaim_buffers drm_reclaim_buffers
/* IRQ support (drm_irq.c) */
int drm_irq_install(drm_device_t *dev);
int drm_irq_uninstall(drm_device_t *dev);
int drm_irq_install(struct drm_device *dev);
int drm_irq_uninstall(struct drm_device *dev);
irqreturn_t drm_irq_handler(DRM_IRQ_ARGS);
void drm_driver_irq_preinstall(drm_device_t *dev);
void drm_driver_irq_postinstall(drm_device_t *dev);
void drm_driver_irq_uninstall(drm_device_t *dev);
int drm_vblank_wait(drm_device_t *dev, unsigned int *vbl_seq);
void drm_vbl_send_signals(drm_device_t *dev);
void drm_driver_irq_preinstall(struct drm_device *dev);
void drm_driver_irq_postinstall(struct drm_device *dev);
void drm_driver_irq_uninstall(struct drm_device *dev);
void drm_handle_vblank(struct drm_device *dev, int crtc);
u32 drm_vblank_count(struct drm_device *dev, int crtc);
int drm_vblank_get(struct drm_device *dev, int crtc);
void drm_vblank_put(struct drm_device *dev, int crtc);
int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq);
int drm_vblank_init(struct drm_device *dev, int num_crtcs);
void drm_vbl_send_signals(struct drm_device *dev, int crtc);
int drm_modeset_ctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
/* AGP/PCI Express/GART support (drm_agpsupport.c) */
int drm_device_is_agp(drm_device_t *dev);
int drm_device_is_pcie(drm_device_t *dev);
int drm_device_is_agp(struct drm_device *dev);
int drm_device_is_pcie(struct drm_device *dev);
drm_agp_head_t *drm_agp_init(void);
int drm_agp_acquire(drm_device_t *dev);
int drm_agp_release(drm_device_t *dev);
int drm_agp_info(drm_device_t * dev, drm_agp_info_t *info);
int drm_agp_enable(drm_device_t *dev, drm_agp_mode_t mode);
int drm_agp_acquire(struct drm_device *dev);
int drm_agp_release(struct drm_device *dev);
int drm_agp_info(struct drm_device * dev, drm_agp_info_t *info);
int drm_agp_enable(struct drm_device *dev, drm_agp_mode_t mode);
void *drm_agp_allocate_memory(size_t pages, u32 type);
int drm_agp_free_memory(void *handle);
int drm_agp_bind_memory(void *handle, off_t start);
int drm_agp_unbind_memory(void *handle);
int drm_agp_alloc(drm_device_t *dev, drm_agp_buffer_t *request);
int drm_agp_free(drm_device_t *dev, drm_agp_buffer_t *request);
int drm_agp_bind(drm_device_t *dev, drm_agp_binding_t *request);
int drm_agp_unbind(drm_device_t *dev, drm_agp_binding_t *request);
int drm_agp_alloc(struct drm_device *dev, drm_agp_buffer_t *request);
int drm_agp_free(struct drm_device *dev, drm_agp_buffer_t *request);
int drm_agp_bind(struct drm_device *dev, drm_agp_binding_t *request);
int drm_agp_unbind(struct drm_device *dev, drm_agp_binding_t *request);
/* Scatter Gather Support (drm_scatter.c) */
void drm_sg_cleanup(drm_sg_mem_t *entry);
int drm_sg_alloc(struct drm_device *dev, drm_scatter_gather_t * request);
#ifdef __FreeBSD__
/* sysctl support (drm_sysctl.h) */
extern int drm_sysctl_init(drm_device_t *dev);
extern int drm_sysctl_cleanup(drm_device_t *dev);
extern int drm_sysctl_init(struct drm_device *dev);
extern int drm_sysctl_cleanup(struct drm_device *dev);
#endif /* __FreeBSD__ */
/* ATI PCIGART support (ati_pcigart.c) */
int drm_ati_pcigart_init(drm_device_t *dev,
drm_ati_pcigart_info *gart_info);
int drm_ati_pcigart_cleanup(drm_device_t *dev,
drm_ati_pcigart_info *gart_info);
int drm_ati_pcigart_init(struct drm_device *dev,
struct drm_ati_pcigart_info *gart_info);
int drm_ati_pcigart_cleanup(struct drm_device *dev,
struct drm_ati_pcigart_info *gart_info);
/* Locking IOCTL support (drm_drv.c) */
int drm_lock(DRM_IOCTL_ARGS);
int drm_unlock(DRM_IOCTL_ARGS);
int drm_version(DRM_IOCTL_ARGS);
int drm_setversion(DRM_IOCTL_ARGS);
int drm_lock(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_unlock(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_version(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_setversion(struct drm_device *dev, void *data,
struct drm_file *file_priv);
/* Misc. IOCTL support (drm_ioctl.c) */
int drm_irq_by_busid(DRM_IOCTL_ARGS);
int drm_getunique(DRM_IOCTL_ARGS);
int drm_setunique(DRM_IOCTL_ARGS);
int drm_getmap(DRM_IOCTL_ARGS);
int drm_getclient(DRM_IOCTL_ARGS);
int drm_getstats(DRM_IOCTL_ARGS);
int drm_noop(DRM_IOCTL_ARGS);
int drm_irq_by_busid(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_getunique(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_setunique(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_getmap(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_getclient(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_getstats(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_noop(struct drm_device *dev, void *data,
struct drm_file *file_priv);
/* Context IOCTL support (drm_context.c) */
int drm_resctx(DRM_IOCTL_ARGS);
int drm_addctx(DRM_IOCTL_ARGS);
int drm_modctx(DRM_IOCTL_ARGS);
int drm_getctx(DRM_IOCTL_ARGS);
int drm_switchctx(DRM_IOCTL_ARGS);
int drm_newctx(DRM_IOCTL_ARGS);
int drm_rmctx(DRM_IOCTL_ARGS);
int drm_setsareactx(DRM_IOCTL_ARGS);
int drm_getsareactx(DRM_IOCTL_ARGS);
int drm_resctx(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_addctx(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_modctx(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_getctx(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_switchctx(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_newctx(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_rmctx(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_setsareactx(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_getsareactx(struct drm_device *dev, void *data,
struct drm_file *file_priv);
/* Drawable IOCTL support (drm_drawable.c) */
int drm_adddraw(DRM_IOCTL_ARGS);
int drm_rmdraw(DRM_IOCTL_ARGS);
int drm_adddraw(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_rmdraw(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_update_draw(struct drm_device *dev, void *data,
struct drm_file *file_priv);
struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev,
int handle);
/* Drawable support (drm_drawable.c) */
void drm_drawable_free_all(struct drm_device *dev);
/* Authentication IOCTL support (drm_auth.c) */
int drm_getmagic(DRM_IOCTL_ARGS);
int drm_authmagic(DRM_IOCTL_ARGS);
int drm_getmagic(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_authmagic(struct drm_device *dev, void *data,
struct drm_file *file_priv);
/* Buffer management support (drm_bufs.c) */
int drm_addmap_ioctl(DRM_IOCTL_ARGS);
int drm_rmmap_ioctl(DRM_IOCTL_ARGS);
int drm_addbufs_ioctl(DRM_IOCTL_ARGS);
int drm_infobufs(DRM_IOCTL_ARGS);
int drm_markbufs(DRM_IOCTL_ARGS);
int drm_freebufs(DRM_IOCTL_ARGS);
int drm_mapbufs(DRM_IOCTL_ARGS);
int drm_addmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_rmmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_addbufs_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_infobufs(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_markbufs(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_freebufs(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_mapbufs(struct drm_device *dev, void *data,
struct drm_file *file_priv);
/* DMA support (drm_dma.c) */
int drm_dma(DRM_IOCTL_ARGS);
int drm_dma(struct drm_device *dev, void *data, struct drm_file *file_priv);
/* IRQ support (drm_irq.c) */
int drm_control(DRM_IOCTL_ARGS);
int drm_wait_vblank(DRM_IOCTL_ARGS);
int drm_control(struct drm_device *dev, void *data, struct drm_file *file_priv);
int drm_wait_vblank(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void drm_locked_tasklet(struct drm_device *dev,
void (*tasklet)(struct drm_device *dev));
/* AGP/GART support (drm_agpsupport.c) */
int drm_agp_acquire_ioctl(DRM_IOCTL_ARGS);
int drm_agp_release_ioctl(DRM_IOCTL_ARGS);
int drm_agp_enable_ioctl(DRM_IOCTL_ARGS);
int drm_agp_info_ioctl(DRM_IOCTL_ARGS);
int drm_agp_alloc_ioctl(DRM_IOCTL_ARGS);
int drm_agp_free_ioctl(DRM_IOCTL_ARGS);
int drm_agp_unbind_ioctl(DRM_IOCTL_ARGS);
int drm_agp_bind_ioctl(DRM_IOCTL_ARGS);
int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_agp_release_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_agp_info_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_agp_free_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
/* Scatter Gather Support (drm_scatter.c) */
int drm_sg_alloc(DRM_IOCTL_ARGS);
int drm_sg_free(DRM_IOCTL_ARGS);
int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_sg_free(struct drm_device *dev, void *data,
struct drm_file *file_priv);
/* consistent PCI memory functions (drm_pci.c) */
drm_dma_handle_t *drm_pci_alloc(drm_device_t *dev, size_t size, size_t align,
dma_addr_t maxaddr);
void drm_pci_free(drm_device_t *dev, drm_dma_handle_t *dmah);
drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size,
size_t align, dma_addr_t maxaddr);
void drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah);
#define drm_core_ioremap_wc drm_core_ioremap
/* Inline replacements for DRM_IOREMAP macros */
static __inline__ void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev)
static __inline__ void
drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev)
{
map->handle = drm_ioremap(dev, map);
}
static __inline__ void drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev)
static __inline__ void
drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev)
{
if ( map->handle && map->size )
drm_ioremapfree(map);
}
static __inline__ struct drm_local_map *drm_core_findmap(struct drm_device *dev, unsigned long offset)
static __inline__ struct drm_local_map *
drm_core_findmap(struct drm_device *dev, unsigned long offset)
{
drm_local_map_t *map;

View File

@ -1,6 +1,3 @@
/* drm_agpsupport.h -- DRM support for AGP/GART backend -*- linux-c -*-
* Created: Mon Dec 13 09:56:45 1999 by faith@precisioninsight.com
*/
/*-
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
@ -34,20 +31,25 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/** @file drm_agpsupport.c
* Support code for tying the kernel AGP support to DRM drivers and
* the DRM's AGP ioctls.
*/
#include "dev/drm/drmP.h"
#ifdef __FreeBSD__
#if __FreeBSD_version >= 800004
#include <dev/agp/agpreg.h>
#else
#else /* __FreeBSD_version >= 800004 */
#include <pci/agpreg.h>
#endif
#endif /* __FreeBSD_version >= 800004 */
#include <dev/pci/pcireg.h>
#endif
/* Returns 1 if AGP or 0 if not. */
static int
drm_device_find_capability(drm_device_t *dev, int cap)
drm_device_find_capability(struct drm_device *dev, int cap)
{
#ifdef __FreeBSD__
#if __FreeBSD_version >= 602102
@ -89,7 +91,7 @@ drm_device_find_capability(drm_device_t *dev, int cap)
#endif
}
int drm_device_is_agp(drm_device_t *dev)
int drm_device_is_agp(struct drm_device *dev)
{
if (dev->driver.device_is_agp != NULL) {
int ret;
@ -105,12 +107,12 @@ int drm_device_is_agp(drm_device_t *dev)
return (drm_device_find_capability(dev, PCIY_AGP));
}
int drm_device_is_pcie(drm_device_t *dev)
int drm_device_is_pcie(struct drm_device *dev)
{
return (drm_device_find_capability(dev, PCIY_EXPRESS));
}
int drm_agp_info(drm_device_t * dev, drm_agp_info_t *info)
int drm_agp_info(struct drm_device * dev, drm_agp_info_t *info)
{
struct agp_info *kern;
@ -132,11 +134,11 @@ int drm_agp_info(drm_device_t * dev, drm_agp_info_t *info)
return 0;
}
int drm_agp_info_ioctl(DRM_IOCTL_ARGS)
int drm_agp_info_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
int err;
drm_agp_info_t info;
DRM_DEVICE;
err = drm_agp_info(dev, &info);
if (err != 0)
@ -146,14 +148,14 @@ int drm_agp_info_ioctl(DRM_IOCTL_ARGS)
return 0;
}
int drm_agp_acquire_ioctl(DRM_IOCTL_ARGS)
int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
DRM_DEVICE;
return drm_agp_acquire(dev);
}
int drm_agp_acquire(drm_device_t *dev)
int drm_agp_acquire(struct drm_device *dev)
{
int retcode;
@ -168,14 +170,14 @@ int drm_agp_acquire(drm_device_t *dev)
return 0;
}
int drm_agp_release_ioctl(DRM_IOCTL_ARGS)
int drm_agp_release_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
DRM_DEVICE;
return drm_agp_release(dev);
}
int drm_agp_release(drm_device_t * dev)
int drm_agp_release(struct drm_device * dev)
{
if (!dev->agp || !dev->agp->acquired)
return EINVAL;
@ -184,7 +186,7 @@ int drm_agp_release(drm_device_t * dev)
return 0;
}
int drm_agp_enable(drm_device_t *dev, drm_agp_mode_t mode)
int drm_agp_enable(struct drm_device *dev, drm_agp_mode_t mode)
{
if (!dev->agp || !dev->agp->acquired)
@ -192,22 +194,21 @@ int drm_agp_enable(drm_device_t *dev, drm_agp_mode_t mode)
dev->agp->mode = mode.mode;
agp_enable(dev->agp->agpdev, mode.mode);
dev->agp->base = dev->agp->info.ai_aperture_base;
dev->agp->enabled = 1;
return 0;
}
int drm_agp_enable_ioctl(DRM_IOCTL_ARGS)
int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_agp_mode_t mode;
DRM_DEVICE;
mode = *(drm_agp_mode_t *) data;
return drm_agp_enable(dev, mode);
}
int drm_agp_alloc(drm_device_t *dev, drm_agp_buffer_t *request)
int drm_agp_alloc(struct drm_device *dev, drm_agp_buffer_t *request)
{
drm_agp_mem_t *entry;
void *handle;
@ -250,9 +251,9 @@ int drm_agp_alloc(drm_device_t *dev, drm_agp_buffer_t *request)
return 0;
}
int drm_agp_alloc_ioctl(DRM_IOCTL_ARGS)
int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
DRM_DEVICE;
drm_agp_buffer_t request;
int retcode;
@ -267,7 +268,8 @@ int drm_agp_alloc_ioctl(DRM_IOCTL_ARGS)
return retcode;
}
static drm_agp_mem_t * drm_agp_lookup_entry(drm_device_t *dev, void *handle)
static drm_agp_mem_t * drm_agp_lookup_entry(struct drm_device *dev,
void *handle)
{
drm_agp_mem_t *entry;
@ -277,7 +279,7 @@ static drm_agp_mem_t * drm_agp_lookup_entry(drm_device_t *dev, void *handle)
return NULL;
}
int drm_agp_unbind(drm_device_t *dev, drm_agp_binding_t *request)
int drm_agp_unbind(struct drm_device *dev, drm_agp_binding_t *request)
{
drm_agp_mem_t *entry;
int retcode;
@ -299,9 +301,9 @@ int drm_agp_unbind(drm_device_t *dev, drm_agp_binding_t *request)
return retcode;
}
int drm_agp_unbind_ioctl(DRM_IOCTL_ARGS)
int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
DRM_DEVICE;
drm_agp_binding_t request;
int retcode;
@ -314,7 +316,7 @@ int drm_agp_unbind_ioctl(DRM_IOCTL_ARGS)
return retcode;
}
int drm_agp_bind(drm_device_t *dev, drm_agp_binding_t *request)
int drm_agp_bind(struct drm_device *dev, drm_agp_binding_t *request)
{
drm_agp_mem_t *entry;
int retcode;
@ -340,9 +342,9 @@ int drm_agp_bind(drm_device_t *dev, drm_agp_binding_t *request)
return retcode;
}
int drm_agp_bind_ioctl(DRM_IOCTL_ARGS)
int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
DRM_DEVICE;
drm_agp_binding_t request;
int retcode;
@ -355,7 +357,7 @@ int drm_agp_bind_ioctl(DRM_IOCTL_ARGS)
return retcode;
}
int drm_agp_free(drm_device_t *dev, drm_agp_buffer_t *request)
int drm_agp_free(struct drm_device *dev, drm_agp_buffer_t *request)
{
drm_agp_mem_t *entry;
@ -385,9 +387,9 @@ int drm_agp_free(drm_device_t *dev, drm_agp_buffer_t *request)
}
int drm_agp_free_ioctl(DRM_IOCTL_ARGS)
int drm_agp_free_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
DRM_DEVICE;
drm_agp_buffer_t request;
int retcode;
@ -418,6 +420,7 @@ drm_agp_head_t *drm_agp_init(void)
return NULL;
head->agpdev = agpdev;
agp_get_info(agpdev, &head->info);
head->base = head->info.ai_aperture_base;
head->memory = NULL;
DRM_INFO("AGP at 0x%08lx %dMB\n",
(long)head->info.ai_aperture_base,

View File

@ -1,6 +1,3 @@
/* drm_auth.h -- IOCTLs for authentication -*- linux-c -*-
* Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
*/
/*-
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
@ -34,6 +31,11 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/** @file drm_auth.c
* Implementation of the get/authmagic ioctls implementing the authentication
* scheme between the master and clients.
*/
#include "dev/drm/drmP.h"
static int drm_hash_magic(drm_magic_t magic)
@ -41,40 +43,46 @@ static int drm_hash_magic(drm_magic_t magic)
return magic & (DRM_HASH_SIZE-1);
}
static drm_file_t *drm_find_file(drm_device_t *dev, drm_magic_t magic)
/**
* Returns the file private associated with the given magic number.
*/
static drm_file_t *drm_find_file(struct drm_device *dev, drm_magic_t magic)
{
drm_file_t *retval = NULL;
drm_magic_entry_t *pt;
int hash;
int hash = drm_hash_magic(magic);
hash = drm_hash_magic(magic);
DRM_SPINLOCK_ASSERT(&dev->dev_lock);
DRM_LOCK();
for (pt = dev->magiclist[hash].head; pt; pt = pt->next) {
if (pt->magic == magic) {
retval = pt->priv;
break;
return pt->priv;
}
}
DRM_UNLOCK();
return retval;
return NULL;
}
static int drm_add_magic(drm_device_t *dev, drm_file_t *priv, drm_magic_t magic)
/**
* Inserts the given magic number into the hash table of used magic number
* lists.
*/
static int drm_add_magic(struct drm_device *dev, drm_file_t *priv,
drm_magic_t magic)
{
int hash;
drm_magic_entry_t *entry;
DRM_DEBUG("%d\n", magic);
DRM_SPINLOCK_ASSERT(&dev->dev_lock);
hash = drm_hash_magic(magic);
entry = malloc(sizeof(*entry), M_DRM, M_ZERO | M_NOWAIT);
if (!entry) return DRM_ERR(ENOMEM);
if (!entry) return ENOMEM;
entry->magic = magic;
entry->priv = priv;
entry->next = NULL;
DRM_LOCK();
if (dev->magiclist[hash].tail) {
dev->magiclist[hash].tail->next = entry;
dev->magiclist[hash].tail = entry;
@ -82,21 +90,25 @@ static int drm_add_magic(drm_device_t *dev, drm_file_t *priv, drm_magic_t magic)
dev->magiclist[hash].head = entry;
dev->magiclist[hash].tail = entry;
}
DRM_UNLOCK();
return 0;
}
static int drm_remove_magic(drm_device_t *dev, drm_magic_t magic)
/**
* Removes the given magic number from the hash table of used magic number
* lists.
*/
static int drm_remove_magic(struct drm_device *dev, drm_magic_t magic)
{
drm_magic_entry_t *prev = NULL;
drm_magic_entry_t *pt;
int hash;
DRM_SPINLOCK_ASSERT(&dev->dev_lock);
DRM_DEBUG("%d\n", magic);
hash = drm_hash_magic(magic);
DRM_LOCK();
for (pt = dev->magiclist[hash].head; pt; prev = pt, pt = pt->next) {
if (pt->magic == magic) {
if (dev->magiclist[hash].head == pt) {
@ -108,68 +120,70 @@ static int drm_remove_magic(drm_device_t *dev, drm_magic_t magic)
if (prev) {
prev->next = pt->next;
}
DRM_UNLOCK();
free(pt, M_DRM);
return 0;
}
}
DRM_UNLOCK();
free(pt, M_DRM);
return DRM_ERR(EINVAL);
return EINVAL;
}
int drm_getmagic(DRM_IOCTL_ARGS)
/**
* Called by the client, this returns a unique magic number to be authorized
* by the master.
*
* The master may use its own knowledge of the client (such as the X
* connection that the magic is passed over) to determine if the magic number
* should be authenticated.
*/
int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
static drm_magic_t sequence = 0;
drm_auth_t auth;
drm_file_t *priv;
DRM_LOCK();
priv = drm_find_file_by_proc(dev, p);
DRM_UNLOCK();
if (priv == NULL) {
DRM_ERROR("can't find authenticator\n");
return EINVAL;
}
drm_auth_t *auth = data;
/* Find unique magic */
if (priv->magic) {
auth.magic = priv->magic;
if (file_priv->magic) {
auth->magic = file_priv->magic;
} else {
DRM_LOCK();
do {
int old = sequence;
auth.magic = old+1;
if (!atomic_cmpset_int(&sequence, old, auth.magic))
auth->magic = old+1;
if (!atomic_cmpset_int(&sequence, old, auth->magic))
continue;
} while (drm_find_file(dev, auth.magic));
priv->magic = auth.magic;
drm_add_magic(dev, priv, auth.magic);
} while (drm_find_file(dev, auth->magic));
file_priv->magic = auth->magic;
drm_add_magic(dev, file_priv, auth->magic);
DRM_UNLOCK();
}
DRM_DEBUG("%u\n", auth.magic);
DRM_COPY_TO_USER_IOCTL((drm_auth_t *)data, auth, sizeof(auth));
DRM_DEBUG("%u\n", auth->magic);
return 0;
}
int drm_authmagic(DRM_IOCTL_ARGS)
/**
* Marks the client associated with the given magic number as authenticated.
*/
int drm_authmagic(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_auth_t auth;
drm_file_t *file;
DRM_DEVICE;
drm_auth_t *auth = data;
drm_file_t *priv;
DRM_COPY_FROM_USER_IOCTL(auth, (drm_auth_t *)data, sizeof(auth));
DRM_DEBUG("%u\n", auth->magic);
DRM_DEBUG("%u\n", auth.magic);
if ((file = drm_find_file(dev, auth.magic))) {
file->authenticated = 1;
drm_remove_magic(dev, auth.magic);
DRM_LOCK();
priv = drm_find_file(dev, auth->magic);
if (priv != NULL) {
priv->authenticated = 1;
drm_remove_magic(dev, auth->magic);
DRM_UNLOCK();
return 0;
} else {
DRM_UNLOCK();
return EINVAL;
}
return DRM_ERR(EINVAL);
}

View File

@ -1,6 +1,3 @@
/* drm_bufs.h -- Generic buffer template -*- linux-c -*-
* Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
*/
/*-
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
@ -34,6 +31,10 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/** @file drm_bufs.c
* Implementation of the ioctls for setup of DRM mappings and DMA buffers.
*/
#include "dev/pci/pcireg.h"
#include "dev/drm/drmP.h"
@ -58,7 +59,7 @@ int drm_order(unsigned long size)
* drm_get_resource_*. Note that they are not RF_ACTIVE, so there's no virtual
* address for accessing them. Cleaned up at unload.
*/
static int drm_alloc_resource(drm_device_t *dev, int resource)
static int drm_alloc_resource(struct drm_device *dev, int resource)
{
if (resource >= DRM_MAX_PCI_RESOURCE) {
DRM_ERROR("Resource %d too large\n", resource);
@ -84,7 +85,8 @@ static int drm_alloc_resource(drm_device_t *dev, int resource)
return 0;
}
unsigned long drm_get_resource_start(drm_device_t *dev, unsigned int resource)
unsigned long drm_get_resource_start(struct drm_device *dev,
unsigned int resource)
{
if (drm_alloc_resource(dev, resource) != 0)
return 0;
@ -92,7 +94,8 @@ unsigned long drm_get_resource_start(drm_device_t *dev, unsigned int resource)
return rman_get_start(dev->pcir[resource]);
}
unsigned long drm_get_resource_len(drm_device_t *dev, unsigned int resource)
unsigned long drm_get_resource_len(struct drm_device *dev,
unsigned int resource)
{
if (drm_alloc_resource(dev, resource) != 0)
return 0;
@ -100,7 +103,8 @@ unsigned long drm_get_resource_len(drm_device_t *dev, unsigned int resource)
return rman_get_size(dev->pcir[resource]);
}
int drm_addmap(drm_device_t * dev, unsigned long offset, unsigned long size,
int drm_addmap(struct drm_device * dev, unsigned long offset,
unsigned long size,
drm_map_type_t type, drm_map_flags_t flags, drm_local_map_t **map_ptr)
{
drm_local_map_t *map;
@ -151,8 +155,10 @@ int drm_addmap(drm_device_t * dev, unsigned long offset, unsigned long size,
* initialization necessary.
*/
map = malloc(sizeof(*map), M_DRM, M_ZERO | M_NOWAIT);
if ( !map )
return DRM_ERR(ENOMEM);
if ( !map ) {
DRM_LOCK();
return ENOMEM;
}
map->offset = offset;
map->size = size;
@ -175,7 +181,8 @@ int drm_addmap(drm_device_t * dev, unsigned long offset, unsigned long size,
map->size, drm_order(map->size), map->handle );
if ( !map->handle ) {
free(map, M_DRM);
return DRM_ERR(ENOMEM);
DRM_LOCK();
return ENOMEM;
}
map->offset = (unsigned long)map->handle;
if ( map->flags & _DRM_CONTAINS_LOCK ) {
@ -185,7 +192,7 @@ int drm_addmap(drm_device_t * dev, unsigned long offset, unsigned long size,
DRM_UNLOCK();
free(map->handle, M_DRM);
free(map, M_DRM);
return DRM_ERR(EBUSY);
return EBUSY;
}
dev->lock.hw_lock = map->handle; /* Pointer to lock */
DRM_UNLOCK();
@ -193,7 +200,17 @@ int drm_addmap(drm_device_t * dev, unsigned long offset, unsigned long size,
break;
case _DRM_AGP:
/*valid = 0;*/
map->offset += dev->agp->base;
/* In some cases (i810 driver), user space may have already
* added the AGP base itself, because dev->agp->base previously
* only got set during AGP enable. So, only add the base
* address if the map's offset isn't already within the
* aperture.
*/
if (map->offset < dev->agp->base ||
map->offset > dev->agp->base +
dev->agp->info.ai_aperture_size - 1) {
map->offset += dev->agp->base;
}
map->mtrr = dev->agp->mtrr; /* for getmap */
/*for (entry = dev->agp->memory; entry; entry = entry->next) {
if ((map->offset >= entry->bound) &&
@ -205,13 +222,15 @@ int drm_addmap(drm_device_t * dev, unsigned long offset, unsigned long size,
}
if (!valid) {
free(map, M_DRM);
return DRM_ERR(EACCES);
DRM_LOCK();
return EACCES;
}*/
break;
case _DRM_SCATTER_GATHER:
if (!dev->sg) {
free(map, M_DRM);
return DRM_ERR(EINVAL);
DRM_LOCK();
return EINVAL;
}
map->offset = map->offset + dev->sg->handle;
break;
@ -228,7 +247,8 @@ int drm_addmap(drm_device_t * dev, unsigned long offset, unsigned long size,
map->dmah = drm_pci_alloc(dev, map->size, align, 0xfffffffful);
if (map->dmah == NULL) {
free(map, M_DRM);
return DRM_ERR(ENOMEM);
DRM_LOCK();
return ENOMEM;
}
map->handle = map->dmah->vaddr;
map->offset = map->dmah->busaddr;
@ -236,7 +256,8 @@ int drm_addmap(drm_device_t * dev, unsigned long offset, unsigned long size,
default:
DRM_ERROR("Bad map type %d\n", map->type);
free(map, M_DRM);
return DRM_ERR(EINVAL);
DRM_LOCK();
return EINVAL;
}
DRM_LOCK();
@ -253,44 +274,41 @@ done:
return 0;
}
int drm_addmap_ioctl(DRM_IOCTL_ARGS)
int drm_addmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_map_t request;
drm_map_t *request = data;
drm_local_map_t *map;
int err;
DRM_DEVICE;
if (!(dev->flags & (FREAD|FWRITE)))
return DRM_ERR(EACCES); /* Require read/write */
return EACCES; /* Require read/write */
DRM_COPY_FROM_USER_IOCTL(request, (drm_map_t *)data, sizeof(drm_map_t));
if (!DRM_SUSER(p) && request.type != _DRM_AGP)
return DRM_ERR(EACCES);
if (!DRM_SUSER(DRM_CURPROC) && request->type != _DRM_AGP)
return EACCES;
DRM_LOCK();
err = drm_addmap(dev, request.offset, request.size, request.type,
request.flags, &map);
err = drm_addmap(dev, request->offset, request->size, request->type,
request->flags, &map);
DRM_UNLOCK();
if (err != 0)
return err;
request.offset = map->offset;
request.size = map->size;
request.type = map->type;
request.flags = map->flags;
request.mtrr = map->mtrr;
request.handle = map->handle;
request->offset = map->offset;
request->size = map->size;
request->type = map->type;
request->flags = map->flags;
request->mtrr = map->mtrr;
request->handle = map->handle;
if (request.type != _DRM_SHM) {
request.handle = (void *)request.offset;
if (request->type != _DRM_SHM) {
request->handle = (void *)request->offset;
}
DRM_COPY_TO_USER_IOCTL((drm_map_t *)data, request, sizeof(drm_map_t));
return 0;
}
void drm_rmmap(drm_device_t *dev, drm_local_map_t *map)
void drm_rmmap(struct drm_device *dev, drm_local_map_t *map)
{
DRM_SPINLOCK_ASSERT(&dev->dev_lock);
@ -319,6 +337,9 @@ void drm_rmmap(drm_device_t *dev, drm_local_map_t *map)
case _DRM_CONSISTENT:
drm_pci_free(dev, map->dmah);
break;
default:
DRM_ERROR("Bad map type %d\n", map->type);
break;
}
if (map->bsr != NULL) {
@ -333,17 +354,15 @@ void drm_rmmap(drm_device_t *dev, drm_local_map_t *map)
* isn't in use.
*/
int drm_rmmap_ioctl(DRM_IOCTL_ARGS)
int drm_rmmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
DRM_DEVICE;
drm_local_map_t *map;
drm_map_t request;
DRM_COPY_FROM_USER_IOCTL( request, (drm_map_t *)data, sizeof(request) );
drm_map_t *request = data;
DRM_LOCK();
TAILQ_FOREACH(map, &dev->maplist, link) {
if (map->handle == request.handle &&
if (map->handle == request->handle &&
map->flags & _DRM_REMOVABLE)
break;
}
@ -351,7 +370,7 @@ int drm_rmmap_ioctl(DRM_IOCTL_ARGS)
/* No match found. */
if (map == NULL) {
DRM_UNLOCK();
return DRM_ERR(EINVAL);
return EINVAL;
}
drm_rmmap(dev, map);
@ -362,7 +381,8 @@ int drm_rmmap_ioctl(DRM_IOCTL_ARGS)
}
static void drm_cleanup_buf_error(drm_device_t *dev, drm_buf_entry_t *entry)
static void drm_cleanup_buf_error(struct drm_device *dev,
drm_buf_entry_t *entry)
{
int i;
@ -385,7 +405,7 @@ static void drm_cleanup_buf_error(drm_device_t *dev, drm_buf_entry_t *entry)
}
}
static int drm_do_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
static int drm_do_addbufs_agp(struct drm_device *dev, drm_buf_desc_t *request)
{
drm_device_dma_t *dma = dev->dma;
drm_buf_entry_t *entry;
@ -441,7 +461,7 @@ static int drm_do_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
}
if (!valid) {
DRM_DEBUG("zone invalid\n");
return DRM_ERR(EINVAL);
return EINVAL;
}*/
entry = &dma->bufs[order];
@ -449,7 +469,7 @@ static int drm_do_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
entry->buflist = malloc(count * sizeof(*entry->buflist), M_DRM,
M_NOWAIT | M_ZERO);
if ( !entry->buflist ) {
return DRM_ERR(ENOMEM);
return ENOMEM;
}
entry->buf_size = size;
@ -469,7 +489,7 @@ static int drm_do_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
buf->address = (void *)(agp_offset + offset);
buf->next = NULL;
buf->pending = 0;
buf->filp = NULL;
buf->file_priv = NULL;
buf->dev_priv_size = dev->driver.buf_priv_size;
buf->dev_private = malloc(buf->dev_priv_size, M_DRM,
@ -478,7 +498,7 @@ static int drm_do_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
/* Set count correctly so we free the proper amount. */
entry->buf_count = count;
drm_cleanup_buf_error(dev, entry);
return DRM_ERR(ENOMEM);
return ENOMEM;
}
offset += alignment;
@ -494,7 +514,7 @@ static int drm_do_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
if (temp_buflist == NULL) {
/* Free the entry because it isn't valid */
drm_cleanup_buf_error(dev, entry);
return DRM_ERR(ENOMEM);
return ENOMEM;
}
dma->buflist = temp_buflist;
@ -516,7 +536,7 @@ static int drm_do_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
return 0;
}
static int drm_do_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request)
static int drm_do_addbufs_pci(struct drm_device *dev, drm_buf_desc_t *request)
{
drm_device_dma_t *dma = dev->dma;
int count;
@ -563,7 +583,7 @@ static int drm_do_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request)
temp_pagelist == NULL) {
free(entry->buflist, M_DRM);
free(entry->seglist, M_DRM);
return DRM_ERR(ENOMEM);
return ENOMEM;
}
memcpy(temp_pagelist, dma->pagelist, dma->page_count *
@ -586,7 +606,7 @@ static int drm_do_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request)
entry->seg_count = count;
drm_cleanup_buf_error(dev, entry);
free(temp_pagelist, M_DRM);
return DRM_ERR(ENOMEM);
return ENOMEM;
}
entry->seglist[entry->seg_count++] = dmah;
@ -610,7 +630,7 @@ static int drm_do_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request)
buf->bus_address = dmah->busaddr + offset;
buf->next = NULL;
buf->pending = 0;
buf->filp = NULL;
buf->file_priv = NULL;
buf->dev_priv_size = dev->driver.buf_priv_size;
buf->dev_private = malloc(buf->dev_priv_size, M_DRM,
@ -621,7 +641,7 @@ static int drm_do_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request)
entry->seg_count = count;
drm_cleanup_buf_error(dev, entry);
free(temp_pagelist, M_DRM);
return DRM_ERR(ENOMEM);
return ENOMEM;
}
DRM_DEBUG( "buffer %d @ %p\n",
@ -637,7 +657,7 @@ static int drm_do_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request)
/* Free the entry because it isn't valid */
drm_cleanup_buf_error(dev, entry);
free(temp_pagelist, M_DRM);
return DRM_ERR(ENOMEM);
return ENOMEM;
}
dma->buflist = temp_buflist;
@ -663,7 +683,7 @@ static int drm_do_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request)
}
static int drm_do_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
static int drm_do_addbufs_sg(struct drm_device *dev, drm_buf_desc_t *request)
{
drm_device_dma_t *dma = dev->dma;
drm_buf_entry_t *entry;
@ -705,7 +725,7 @@ static int drm_do_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
entry->buflist = malloc(count * sizeof(*entry->buflist), M_DRM,
M_NOWAIT | M_ZERO);
if (entry->buflist == NULL)
return DRM_ERR(ENOMEM);
return ENOMEM;
entry->buf_size = size;
entry->page_order = page_order;
@ -724,7 +744,7 @@ static int drm_do_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
buf->address = (void *)(agp_offset + offset + dev->sg->handle);
buf->next = NULL;
buf->pending = 0;
buf->filp = NULL;
buf->file_priv = NULL;
buf->dev_priv_size = dev->driver.buf_priv_size;
buf->dev_private = malloc(buf->dev_priv_size, M_DRM,
@ -733,7 +753,7 @@ static int drm_do_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
/* Set count correctly so we free the proper amount. */
entry->buf_count = count;
drm_cleanup_buf_error(dev, entry);
return DRM_ERR(ENOMEM);
return ENOMEM;
}
DRM_DEBUG( "buffer %d @ %p\n",
@ -752,7 +772,7 @@ static int drm_do_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
if (temp_buflist == NULL) {
/* Free the entry because it isn't valid */
drm_cleanup_buf_error(dev, entry);
return DRM_ERR(ENOMEM);
return ENOMEM;
}
dma->buflist = temp_buflist;
@ -774,28 +794,28 @@ static int drm_do_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
return 0;
}
int drm_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
int drm_addbufs_agp(struct drm_device *dev, drm_buf_desc_t *request)
{
int order, ret;
DRM_SPINLOCK(&dev->dma_lock);
if (request->count < 0 || request->count > 4096)
return DRM_ERR(EINVAL);
return EINVAL;
order = drm_order(request->size);
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
return DRM_ERR(EINVAL);
return EINVAL;
DRM_SPINLOCK(&dev->dma_lock);
/* No more allocations after first buffer-using ioctl. */
if (dev->buf_use != 0) {
DRM_SPINUNLOCK(&dev->dma_lock);
return DRM_ERR(EBUSY);
return EBUSY;
}
/* No more than one allocation per order */
if (dev->dma->bufs[order].buf_count != 0) {
DRM_SPINUNLOCK(&dev->dma_lock);
return DRM_ERR(ENOMEM);
return ENOMEM;
}
ret = drm_do_addbufs_agp(dev, request);
@ -805,31 +825,31 @@ int drm_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
return ret;
}
int drm_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
int drm_addbufs_sg(struct drm_device *dev, drm_buf_desc_t *request)
{
int order, ret;
DRM_SPINLOCK(&dev->dma_lock);
if (!DRM_SUSER(DRM_CURPROC))
return DRM_ERR(EACCES);
return EACCES;
if (request->count < 0 || request->count > 4096)
return DRM_ERR(EINVAL);
return EINVAL;
order = drm_order(request->size);
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
return DRM_ERR(EINVAL);
return EINVAL;
DRM_SPINLOCK(&dev->dma_lock);
/* No more allocations after first buffer-using ioctl. */
if (dev->buf_use != 0) {
DRM_SPINUNLOCK(&dev->dma_lock);
return DRM_ERR(EBUSY);
return EBUSY;
}
/* No more than one allocation per order */
if (dev->dma->bufs[order].buf_count != 0) {
DRM_SPINUNLOCK(&dev->dma_lock);
return DRM_ERR(ENOMEM);
return ENOMEM;
}
ret = drm_do_addbufs_sg(dev, request);
@ -839,31 +859,31 @@ int drm_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
return ret;
}
int drm_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request)
int drm_addbufs_pci(struct drm_device *dev, drm_buf_desc_t *request)
{
int order, ret;
DRM_SPINLOCK(&dev->dma_lock);
if (!DRM_SUSER(DRM_CURPROC))
return DRM_ERR(EACCES);
return EACCES;
if (request->count < 0 || request->count > 4096)
return DRM_ERR(EINVAL);
return EINVAL;
order = drm_order(request->size);
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
return DRM_ERR(EINVAL);
return EINVAL;
DRM_SPINLOCK(&dev->dma_lock);
/* No more allocations after first buffer-using ioctl. */
if (dev->buf_use != 0) {
DRM_SPINUNLOCK(&dev->dma_lock);
return DRM_ERR(EBUSY);
return EBUSY;
}
/* No more than one allocation per order */
if (dev->dma->bufs[order].buf_count != 0) {
DRM_SPINUNLOCK(&dev->dma_lock);
return DRM_ERR(ENOMEM);
return ENOMEM;
}
ret = drm_do_addbufs_pci(dev, request);
@ -873,39 +893,30 @@ int drm_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request)
return ret;
}
int drm_addbufs_ioctl(DRM_IOCTL_ARGS)
int drm_addbufs_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
DRM_DEVICE;
drm_buf_desc_t request;
drm_buf_desc_t *request = data;
int err;
DRM_COPY_FROM_USER_IOCTL(request, (drm_buf_desc_t *)data,
sizeof(request));
if (request.flags & _DRM_AGP_BUFFER)
err = drm_addbufs_agp(dev, &request);
else if (request.flags & _DRM_SG_BUFFER)
err = drm_addbufs_sg(dev, &request);
if (request->flags & _DRM_AGP_BUFFER)
err = drm_addbufs_agp(dev, request);
else if (request->flags & _DRM_SG_BUFFER)
err = drm_addbufs_sg(dev, request);
else
err = drm_addbufs_pci(dev, &request);
DRM_COPY_TO_USER_IOCTL((drm_buf_desc_t *)data, request,
sizeof(request));
err = drm_addbufs_pci(dev, request);
return err;
}
int drm_infobufs(DRM_IOCTL_ARGS)
int drm_infobufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_device_dma_t *dma = dev->dma;
drm_buf_info_t request;
drm_buf_info_t *request = data;
int i;
int count;
int retcode = 0;
DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_info_t *)data, sizeof(request) );
DRM_SPINLOCK(&dev->dma_lock);
++dev->buf_use; /* Can't allocate more after this call */
DRM_SPINUNLOCK(&dev->dma_lock);
@ -916,7 +927,7 @@ int drm_infobufs(DRM_IOCTL_ARGS)
DRM_DEBUG( "count = %d\n", count );
if ( request.count >= count ) {
if ( request->count >= count ) {
for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
if ( dma->bufs[i].buf_count ) {
drm_buf_desc_t from;
@ -926,9 +937,9 @@ int drm_infobufs(DRM_IOCTL_ARGS)
from.low_mark = dma->bufs[i].freelist.low_mark;
from.high_mark = dma->bufs[i].freelist.high_mark;
if (DRM_COPY_TO_USER(&request.list[count], &from,
if (DRM_COPY_TO_USER(&request->list[count], &from,
sizeof(drm_buf_desc_t)) != 0) {
retcode = DRM_ERR(EFAULT);
retcode = EFAULT;
break;
}
@ -942,76 +953,69 @@ int drm_infobufs(DRM_IOCTL_ARGS)
}
}
}
request.count = count;
DRM_COPY_TO_USER_IOCTL( (drm_buf_info_t *)data, request, sizeof(request) );
request->count = count;
return retcode;
}
int drm_markbufs(DRM_IOCTL_ARGS)
int drm_markbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_device_dma_t *dma = dev->dma;
drm_buf_desc_t request;
drm_buf_desc_t *request = data;
int order;
DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_desc_t *)data, sizeof(request) );
DRM_DEBUG( "%d, %d, %d\n",
request.size, request.low_mark, request.high_mark );
request->size, request->low_mark, request->high_mark );
order = drm_order(request.size);
order = drm_order(request->size);
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ||
request.low_mark < 0 || request.high_mark < 0) {
return DRM_ERR(EINVAL);
request->low_mark < 0 || request->high_mark < 0) {
return EINVAL;
}
DRM_SPINLOCK(&dev->dma_lock);
if (request.low_mark > dma->bufs[order].buf_count ||
request.high_mark > dma->bufs[order].buf_count) {
return DRM_ERR(EINVAL);
if (request->low_mark > dma->bufs[order].buf_count ||
request->high_mark > dma->bufs[order].buf_count) {
DRM_SPINUNLOCK(&dev->dma_lock);
return EINVAL;
}
dma->bufs[order].freelist.low_mark = request.low_mark;
dma->bufs[order].freelist.high_mark = request.high_mark;
dma->bufs[order].freelist.low_mark = request->low_mark;
dma->bufs[order].freelist.high_mark = request->high_mark;
DRM_SPINUNLOCK(&dev->dma_lock);
return 0;
}
int drm_freebufs(DRM_IOCTL_ARGS)
int drm_freebufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_device_dma_t *dma = dev->dma;
drm_buf_free_t request;
drm_buf_free_t *request = data;
int i;
int idx;
drm_buf_t *buf;
int retcode = 0;
DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_free_t *)data, sizeof(request) );
DRM_DEBUG( "%d\n", request.count );
DRM_DEBUG( "%d\n", request->count );
DRM_SPINLOCK(&dev->dma_lock);
for ( i = 0 ; i < request.count ; i++ ) {
if (DRM_COPY_FROM_USER(&idx, &request.list[i], sizeof(idx))) {
retcode = DRM_ERR(EFAULT);
for ( i = 0 ; i < request->count ; i++ ) {
if (DRM_COPY_FROM_USER(&idx, &request->list[i], sizeof(idx))) {
retcode = EFAULT;
break;
}
if ( idx < 0 || idx >= dma->buf_count ) {
DRM_ERROR( "Index %d (of %d max)\n",
idx, dma->buf_count - 1 );
retcode = DRM_ERR(EINVAL);
retcode = EINVAL;
break;
}
buf = dma->buflist[idx];
if ( buf->filp != filp ) {
if ( buf->file_priv != file_priv ) {
DRM_ERROR("Process %d freeing buffer not owned\n",
DRM_CURRENTPID);
retcode = DRM_ERR(EINVAL);
retcode = EINVAL;
break;
}
drm_free_buffer(dev, buf);
@ -1021,9 +1025,8 @@ int drm_freebufs(DRM_IOCTL_ARGS)
return retcode;
}
int drm_mapbufs(DRM_IOCTL_ARGS)
int drm_mapbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_device_dma_t *dma = dev->dma;
int retcode = 0;
const int zero = 0;
@ -1040,27 +1043,25 @@ int drm_mapbufs(DRM_IOCTL_ARGS)
vaddr_t vaddr;
#endif /* __NetBSD__ || __OpenBSD__ */
drm_buf_map_t request;
drm_buf_map_t *request = data;
int i;
DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_map_t *)data, sizeof(request) );
#if defined(__NetBSD__) || defined(__OpenBSD__)
if (!vfinddev(kdev, VCHR, &vn))
return 0; /* FIXME: Shouldn't this be EINVAL or something? */
#endif /* __NetBSD__ || __OpenBSD */
#if defined(__FreeBSD__) && __FreeBSD_version >= 500000
vms = p->td_proc->p_vmspace;
vms = DRM_CURPROC->td_proc->p_vmspace;
#else
vms = p->p_vmspace;
vms = DRM_CURPROC->p_vmspace;
#endif
DRM_SPINLOCK(&dev->dma_lock);
dev->buf_use++; /* Can't allocate more after this call */
DRM_SPINUNLOCK(&dev->dma_lock);
if (request.count < dma->buf_count)
if (request->count < dma->buf_count)
goto done;
if ((dev->driver.use_agp && (dma->flags & _DRM_DMA_USE_AGP)) ||
@ -1082,10 +1083,11 @@ int drm_mapbufs(DRM_IOCTL_ARGS)
vaddr = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
#if __FreeBSD_version >= 600023
retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
VM_PROT_ALL, MAP_SHARED, OBJT_DEVICE, kdev, foff );
VM_PROT_ALL, MAP_SHARED, OBJT_DEVICE, dev->devnode, foff);
#else
retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
VM_PROT_ALL, MAP_SHARED, SLIST_FIRST(&kdev->si_hlist), foff );
VM_PROT_ALL, MAP_SHARED, SLIST_FIRST(&dev->devnode->si_hlist),
foff);
#endif
#elif defined(__NetBSD__) || defined(__OpenBSD__)
vaddr = round_page((vaddr_t)vms->vm_daddr + MAXDSIZ);
@ -1096,26 +1098,26 @@ int drm_mapbufs(DRM_IOCTL_ARGS)
if (retcode)
goto done;
request.virtual = (void *)vaddr;
request->virtual = (void *)vaddr;
for ( i = 0 ; i < dma->buf_count ; i++ ) {
if (DRM_COPY_TO_USER(&request.list[i].idx,
&dma->buflist[i]->idx, sizeof(request.list[0].idx))) {
if (DRM_COPY_TO_USER(&request->list[i].idx,
&dma->buflist[i]->idx, sizeof(request->list[0].idx))) {
retcode = EFAULT;
goto done;
}
if (DRM_COPY_TO_USER(&request.list[i].total,
&dma->buflist[i]->total, sizeof(request.list[0].total))) {
if (DRM_COPY_TO_USER(&request->list[i].total,
&dma->buflist[i]->total, sizeof(request->list[0].total))) {
retcode = EFAULT;
goto done;
}
if (DRM_COPY_TO_USER(&request.list[i].used, &zero,
if (DRM_COPY_TO_USER(&request->list[i].used, &zero,
sizeof(zero))) {
retcode = EFAULT;
goto done;
}
address = vaddr + dma->buflist[i]->offset; /* *** */
if (DRM_COPY_TO_USER(&request.list[i].address, &address,
if (DRM_COPY_TO_USER(&request->list[i].address, &address,
sizeof(address))) {
retcode = EFAULT;
goto done;
@ -1123,11 +1125,9 @@ int drm_mapbufs(DRM_IOCTL_ARGS)
}
done:
request.count = dma->buf_count;
request->count = dma->buf_count;
DRM_DEBUG( "%d buffers, retcode = %d\n", request.count, retcode );
DRM_DEBUG( "%d buffers, retcode = %d\n", request->count, retcode );
DRM_COPY_TO_USER_IOCTL((drm_buf_map_t *)data, request, sizeof(request));
return DRM_ERR(retcode);
return retcode;
}

View File

@ -1,6 +1,3 @@
/* drm_context.h -- IOCTLs for generic contexts -*- linux-c -*-
* Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com
*/
/*-
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
@ -34,13 +31,17 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/** @file drm_context.c
* Implementation of the context management ioctls.
*/
#include "dev/drm/drmP.h"
/* ================================================================
* Context bitmap support
*/
void drm_ctxbitmap_free(drm_device_t *dev, int ctx_handle)
void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle)
{
if (ctx_handle < 0 || ctx_handle >= DRM_MAX_CTXBITMAP ||
dev->ctx_bitmap == NULL) {
@ -56,7 +57,7 @@ void drm_ctxbitmap_free(drm_device_t *dev, int ctx_handle)
return;
}
int drm_ctxbitmap_next(drm_device_t *dev)
int drm_ctxbitmap_next(struct drm_device *dev)
{
int bit;
@ -103,7 +104,7 @@ int drm_ctxbitmap_next(drm_device_t *dev)
return bit;
}
int drm_ctxbitmap_init(drm_device_t *dev)
int drm_ctxbitmap_init(struct drm_device *dev)
{
int i;
int temp;
@ -112,7 +113,7 @@ int drm_ctxbitmap_init(drm_device_t *dev)
dev->ctx_bitmap = malloc(PAGE_SIZE, M_DRM, M_NOWAIT | M_ZERO);
if ( dev->ctx_bitmap == NULL ) {
DRM_UNLOCK();
return DRM_ERR(ENOMEM);
return ENOMEM;
}
dev->context_sareas = NULL;
dev->max_context = -1;
@ -126,7 +127,7 @@ int drm_ctxbitmap_init(drm_device_t *dev)
return 0;
}
void drm_ctxbitmap_cleanup(drm_device_t *dev)
void drm_ctxbitmap_cleanup(struct drm_device *dev)
{
DRM_LOCK();
if (dev->context_sareas != NULL)
@ -139,48 +140,41 @@ void drm_ctxbitmap_cleanup(drm_device_t *dev)
* Per Context SAREA Support
*/
int drm_getsareactx( DRM_IOCTL_ARGS )
int drm_getsareactx(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
DRM_DEVICE;
drm_ctx_priv_map_t request;
drm_ctx_priv_map_t *request = data;
drm_local_map_t *map;
DRM_COPY_FROM_USER_IOCTL( request, (drm_ctx_priv_map_t *)data,
sizeof(request) );
DRM_LOCK();
if (dev->max_context < 0 || request.ctx_id >= (unsigned) dev->max_context) {
if (dev->max_context < 0 ||
request->ctx_id >= (unsigned) dev->max_context) {
DRM_UNLOCK();
return DRM_ERR(EINVAL);
return EINVAL;
}
map = dev->context_sareas[request.ctx_id];
map = dev->context_sareas[request->ctx_id];
DRM_UNLOCK();
request.handle = map->handle;
DRM_COPY_TO_USER_IOCTL( (drm_ctx_priv_map_t *)data, request, sizeof(request) );
request->handle = map->handle;
return 0;
}
int drm_setsareactx( DRM_IOCTL_ARGS )
int drm_setsareactx(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
DRM_DEVICE;
drm_ctx_priv_map_t request;
drm_ctx_priv_map_t *request = data;
drm_local_map_t *map = NULL;
DRM_COPY_FROM_USER_IOCTL( request, (drm_ctx_priv_map_t *)data,
sizeof(request) );
DRM_LOCK();
TAILQ_FOREACH(map, &dev->maplist, link) {
if (map->handle == request.handle) {
if (map->handle == request->handle) {
if (dev->max_context < 0)
goto bad;
if (request.ctx_id >= (unsigned) dev->max_context)
if (request->ctx_id >= (unsigned) dev->max_context)
goto bad;
dev->context_sareas[request.ctx_id] = map;
dev->context_sareas[request->ctx_id] = map;
DRM_UNLOCK();
return 0;
}
@ -188,18 +182,18 @@ int drm_setsareactx( DRM_IOCTL_ARGS )
bad:
DRM_UNLOCK();
return DRM_ERR(EINVAL);
return EINVAL;
}
/* ================================================================
* The actual DRM context handling routines
*/
int drm_context_switch(drm_device_t *dev, int old, int new)
int drm_context_switch(struct drm_device *dev, int old, int new)
{
if ( test_and_set_bit( 0, &dev->context_flag ) ) {
DRM_ERROR( "Reentering -- FIXME\n" );
return DRM_ERR(EBUSY);
return EBUSY;
}
DRM_DEBUG( "Context switch from %d to %d\n", old, new );
@ -212,7 +206,7 @@ int drm_context_switch(drm_device_t *dev, int old, int new)
return 0;
}
int drm_context_switch_complete(drm_device_t *dev, int new)
int drm_context_switch_complete(struct drm_device *dev, int new)
{
dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
@ -228,120 +222,99 @@ int drm_context_switch_complete(drm_device_t *dev, int new)
return 0;
}
int drm_resctx(DRM_IOCTL_ARGS)
int drm_resctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_ctx_res_t res;
drm_ctx_res_t *res = data;
drm_ctx_t ctx;
int i;
DRM_COPY_FROM_USER_IOCTL( res, (drm_ctx_res_t *)data, sizeof(res) );
if ( res.count >= DRM_RESERVED_CONTEXTS ) {
if ( res->count >= DRM_RESERVED_CONTEXTS ) {
bzero(&ctx, sizeof(ctx));
for ( i = 0 ; i < DRM_RESERVED_CONTEXTS ; i++ ) {
ctx.handle = i;
if ( DRM_COPY_TO_USER( &res.contexts[i],
if ( DRM_COPY_TO_USER( &res->contexts[i],
&ctx, sizeof(ctx) ) )
return DRM_ERR(EFAULT);
return EFAULT;
}
}
res.count = DRM_RESERVED_CONTEXTS;
DRM_COPY_TO_USER_IOCTL( (drm_ctx_res_t *)data, res, sizeof(res) );
res->count = DRM_RESERVED_CONTEXTS;
return 0;
}
int drm_addctx(DRM_IOCTL_ARGS)
int drm_addctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_ctx_t ctx;
drm_ctx_t *ctx = data;
DRM_COPY_FROM_USER_IOCTL( ctx, (drm_ctx_t *)data, sizeof(ctx) );
ctx.handle = drm_ctxbitmap_next(dev);
if ( ctx.handle == DRM_KERNEL_CONTEXT ) {
ctx->handle = drm_ctxbitmap_next(dev);
if ( ctx->handle == DRM_KERNEL_CONTEXT ) {
/* Skip kernel's context and get a new one. */
ctx.handle = drm_ctxbitmap_next(dev);
ctx->handle = drm_ctxbitmap_next(dev);
}
DRM_DEBUG( "%d\n", ctx.handle );
if ( ctx.handle == -1 ) {
DRM_DEBUG( "%d\n", ctx->handle );
if ( ctx->handle == -1 ) {
DRM_DEBUG( "Not enough free contexts.\n" );
/* Should this return -EBUSY instead? */
return DRM_ERR(ENOMEM);
return ENOMEM;
}
if (dev->driver.context_ctor && ctx.handle != DRM_KERNEL_CONTEXT) {
if (dev->driver.context_ctor && ctx->handle != DRM_KERNEL_CONTEXT) {
DRM_LOCK();
dev->driver.context_ctor(dev, ctx.handle);
dev->driver.context_ctor(dev, ctx->handle);
DRM_UNLOCK();
}
DRM_COPY_TO_USER_IOCTL( (drm_ctx_t *)data, ctx, sizeof(ctx) );
return 0;
}
int drm_modctx(DRM_IOCTL_ARGS)
int drm_modctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
/* This does nothing */
return 0;
}
int drm_getctx(DRM_IOCTL_ARGS)
int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_ctx_t ctx;
DRM_COPY_FROM_USER_IOCTL( ctx, (drm_ctx_t *)data, sizeof(ctx) );
drm_ctx_t *ctx = data;
/* This is 0, because we don't handle any context flags */
ctx.flags = 0;
DRM_COPY_TO_USER_IOCTL( (drm_ctx_t *)data, ctx, sizeof(ctx) );
ctx->flags = 0;
return 0;
}
int drm_switchctx(DRM_IOCTL_ARGS)
int drm_switchctx(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
DRM_DEVICE;
drm_ctx_t ctx;
drm_ctx_t *ctx = data;
DRM_COPY_FROM_USER_IOCTL( ctx, (drm_ctx_t *)data, sizeof(ctx) );
DRM_DEBUG( "%d\n", ctx.handle );
return drm_context_switch(dev, dev->last_context, ctx.handle);
DRM_DEBUG( "%d\n", ctx->handle );
return drm_context_switch(dev, dev->last_context, ctx->handle);
}
int drm_newctx(DRM_IOCTL_ARGS)
int drm_newctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_ctx_t ctx;
drm_ctx_t *ctx = data;
DRM_COPY_FROM_USER_IOCTL( ctx, (drm_ctx_t *)data, sizeof(ctx) );
DRM_DEBUG( "%d\n", ctx.handle );
drm_context_switch_complete(dev, ctx.handle);
DRM_DEBUG( "%d\n", ctx->handle );
drm_context_switch_complete(dev, ctx->handle);
return 0;
}
int drm_rmctx(DRM_IOCTL_ARGS)
int drm_rmctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_ctx_t ctx;
drm_ctx_t *ctx = data;
DRM_COPY_FROM_USER_IOCTL( ctx, (drm_ctx_t *)data, sizeof(ctx) );
DRM_DEBUG( "%d\n", ctx.handle );
if ( ctx.handle != DRM_KERNEL_CONTEXT ) {
DRM_DEBUG( "%d\n", ctx->handle );
if ( ctx->handle != DRM_KERNEL_CONTEXT ) {
if (dev->driver.context_dtor) {
DRM_LOCK();
dev->driver.context_dtor(dev, ctx.handle);
dev->driver.context_dtor(dev, ctx->handle);
DRM_UNLOCK();
}
drm_ctxbitmap_free(dev, ctx.handle);
drm_ctxbitmap_free(dev, ctx->handle);
}
return 0;

View File

@ -1,6 +1,3 @@
/* drm_dma.c -- DMA IOCTL and function support -*- linux-c -*-
* Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
*/
/*-
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
@ -34,21 +31,29 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/** @file drm_dma.c
* Support code for DMA buffer management.
*
* The implementation used to be significantly more complicated, but the
* complexity has been moved into the drivers as different buffer management
* schemes evolved.
*/
#include "dev/drm/drmP.h"
int drm_dma_setup(drm_device_t *dev)
int drm_dma_setup(struct drm_device *dev)
{
dev->dma = malloc(sizeof(*dev->dma), M_DRM, M_NOWAIT | M_ZERO);
if (dev->dma == NULL)
return DRM_ERR(ENOMEM);
return ENOMEM;
DRM_SPININIT(dev->dma_lock, "drmdma");
DRM_SPININIT(&dev->dma_lock, "drmdma");
return 0;
}
void drm_dma_takedown(drm_device_t *dev)
void drm_dma_takedown(struct drm_device *dev)
{
drm_device_dma_t *dma = dev->dma;
int i, j;
@ -83,27 +88,27 @@ void drm_dma_takedown(drm_device_t *dev)
free(dma->pagelist, M_DRM);
free(dev->dma, M_DRM);
dev->dma = NULL;
DRM_SPINUNINIT(dev->dma_lock);
DRM_SPINUNINIT(&dev->dma_lock);
}
void drm_free_buffer(drm_device_t *dev, drm_buf_t *buf)
void drm_free_buffer(struct drm_device *dev, drm_buf_t *buf)
{
if (!buf) return;
buf->pending = 0;
buf->filp = NULL;
buf->file_priv= NULL;
buf->used = 0;
}
void drm_reclaim_buffers(drm_device_t *dev, DRMFILE filp)
void drm_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
{
drm_device_dma_t *dma = dev->dma;
int i;
if (!dma) return;
for (i = 0; i < dma->buf_count; i++) {
if (dma->buflist[i]->filp == filp) {
if (dma->buflist[i]->file_priv == file_priv) {
switch (dma->buflist[i]->list) {
case DRM_LIST_NONE:
drm_free_buffer(dev, dma->buflist[i]);
@ -120,12 +125,12 @@ void drm_reclaim_buffers(drm_device_t *dev, DRMFILE filp)
}
/* Call into the driver-specific DMA handler */
int drm_dma(DRM_IOCTL_ARGS)
int drm_dma(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
if (dev->driver.dma_ioctl) {
return dev->driver.dma_ioctl(kdev, cmd, data, flags, p, filp);
/* shared code returns -errno */
return -dev->driver.dma_ioctl(dev, data, file_priv);
} else {
DRM_DEBUG("DMA ioctl on driver with no dma handler\n");
return EINVAL;

View File

@ -1,6 +1,3 @@
/* drm_drawable.h -- IOCTLs for drawables -*- linux-c -*-
* Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
*/
/*-
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
@ -34,21 +31,145 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/** @file drm_drawable.c
* This file implements ioctls to store information along with DRM drawables,
* such as the current set of cliprects for vblank-synced buffer swaps.
*/
#include "dev/drm/drmP.h"
int drm_adddraw(DRM_IOCTL_ARGS)
{
drm_draw_t draw;
struct bsd_drm_drawable_info {
struct drm_drawable_info info;
int handle;
RB_ENTRY(bsd_drm_drawable_info) tree;
};
draw.handle = 0; /* NOOP */
DRM_DEBUG("%d\n", draw.handle);
DRM_COPY_TO_USER_IOCTL( (drm_draw_t *)data, draw, sizeof(draw) );
static int
drm_drawable_compare(struct bsd_drm_drawable_info *a,
struct bsd_drm_drawable_info *b)
{
if (a->handle > b->handle)
return 1;
if (a->handle < b->handle)
return -1;
return 0;
}
RB_GENERATE_STATIC(drawable_tree, bsd_drm_drawable_info, tree,
drm_drawable_compare);
struct drm_drawable_info *
drm_get_drawable_info(struct drm_device *dev, int handle)
{
struct bsd_drm_drawable_info find, *result;
find.handle = handle;
result = RB_FIND(drawable_tree, &dev->drw_head, &find);
return &result->info;
}
int drm_adddraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_draw_t *draw = data;
struct bsd_drm_drawable_info *info;
info = drm_calloc(1, sizeof(struct bsd_drm_drawable_info),
DRM_MEM_DRAWABLE);
if (info == NULL)
return ENOMEM;
info->handle = alloc_unr(dev->drw_unrhdr);
DRM_SPINLOCK(&dev->drw_lock);
RB_INSERT(drawable_tree, &dev->drw_head, info);
draw->handle = info->handle;
DRM_SPINUNLOCK(&dev->drw_lock);
DRM_DEBUG("%d\n", draw->handle);
return 0;
}
int drm_rmdraw(DRM_IOCTL_ARGS)
int drm_rmdraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
return 0; /* NOOP */
drm_draw_t *draw = (drm_draw_t *)data;
struct drm_drawable_info *info;
DRM_SPINLOCK(&dev->drw_lock);
info = drm_get_drawable_info(dev, draw->handle);
if (info != NULL) {
RB_REMOVE(drawable_tree, &dev->drw_head,
(struct bsd_drm_drawable_info *)info);
DRM_SPINUNLOCK(&dev->drw_lock);
free_unr(dev->drw_unrhdr, draw->handle);
drm_free(info, sizeof(struct bsd_drm_drawable_info),
DRM_MEM_DRAWABLE);
return 0;
} else {
DRM_SPINUNLOCK(&dev->drw_lock);
return EINVAL;
}
}
int drm_update_draw(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_drawable_info *info;
struct drm_update_draw *update = (struct drm_update_draw *)data;
int ret;
info = drm_get_drawable_info(dev, update->handle);
if (info == NULL)
return EINVAL;
switch (update->type) {
case DRM_DRAWABLE_CLIPRECTS:
DRM_SPINLOCK(&dev->drw_lock);
if (update->num != info->num_rects) {
drm_free(info->rects,
sizeof(*info->rects) * info->num_rects,
DRM_MEM_DRAWABLE);
info->rects = NULL;
info->num_rects = 0;
}
if (update->num == 0) {
DRM_SPINUNLOCK(&dev->drw_lock);
return 0;
}
if (info->rects == NULL) {
info->rects = drm_alloc(sizeof(*info->rects) *
update->num, DRM_MEM_DRAWABLE);
if (info->rects == NULL) {
DRM_SPINUNLOCK(&dev->drw_lock);
return ENOMEM;
}
info->num_rects = update->num;
}
/* For some reason the pointer arg is unsigned long long. */
ret = copyin((void *)(intptr_t)update->data, info->rects,
sizeof(*info->rects) * info->num_rects);
DRM_SPINUNLOCK(&dev->drw_lock);
return ret;
default:
return EINVAL;
}
}
void drm_drawable_free_all(struct drm_device *dev)
{
struct bsd_drm_drawable_info *info, *next;
DRM_SPINLOCK(&dev->drw_lock);
for (info = RB_MIN(drawable_tree, &dev->drw_head);
info != NULL ; info = next) {
next = RB_NEXT(drawable_tree, &dev->drw_head, info);
RB_REMOVE(drawable_tree, &dev->drw_head,
(struct bsd_drm_drawable_info *)info);
DRM_SPINUNLOCK(&dev->drw_lock);
free_unr(dev->drw_unrhdr, info->handle);
drm_free(info, sizeof(struct bsd_drm_drawable_info),
DRM_MEM_DRAWABLE);
DRM_SPINLOCK(&dev->drw_lock);
}
DRM_SPINUNLOCK(&dev->drw_lock);
}

View File

@ -1,6 +1,3 @@
/* drm_drv.h -- Generic driver template -*- linux-c -*-
* Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
*/
/*-
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
@ -34,6 +31,13 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/** @file drm_drv.c
* The catch-all file for DRM device support, including module setup/teardown,
* open/close, and ioctl dispatch.
*/
#include <sys/limits.h>
#include "dev/drm/drmP.h"
#include "dev/drm/drm.h"
#include "dev/drm/drm_sarea.h"
@ -44,14 +48,14 @@ int drm_debug_flag = 1;
int drm_debug_flag = 0;
#endif
static int drm_load(drm_device_t *dev);
static void drm_unload(drm_device_t *dev);
static int drm_load(struct drm_device *dev);
static void drm_unload(struct drm_device *dev);
static drm_pci_id_list_t *drm_find_description(int vendor, int device,
drm_pci_id_list_t *idlist);
#ifdef __FreeBSD__
#define DRIVER_SOFTC(unit) \
((drm_device_t *)devclass_get_softc(drm_devclass, unit))
((struct drm_device *)devclass_get_softc(drm_devclass, unit))
MODULE_VERSION(drm, 1);
MODULE_DEPEND(drm, agp, 1, 1, 1);
@ -63,67 +67,69 @@ MODULE_DEPEND(drm, mem, 1, 1, 1);
#if defined(__NetBSD__) || defined(__OpenBSD__)
#define DRIVER_SOFTC(unit) \
((drm_device_t *)device_lookup(&drm_cd, unit))
((struct drm_device *)device_lookup(&drm_cd, unit))
#endif /* __NetBSD__ || __OpenBSD__ */
static drm_ioctl_desc_t drm_ioctls[256] = {
[DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { drm_version, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { drm_getmagic, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY},
[DRM_IOCTL_NR(DRM_IOCTL_GET_MAP)] = { drm_getmap, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT)] = { drm_getclient, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_STATS)] = { drm_getstats, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_SET_VERSION)] = { drm_setversion, DRM_MASTER|DRM_ROOT_ONLY },
DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0),
DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER|DRM_ROOT_ONLY),
[DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
[DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
[DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
[DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
[DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
[DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)] = { drm_rmmap_ioctl, DRM_AUTH },
DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH),
[DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = { drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
[DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = { drm_getsareactx, DRM_AUTH },
DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH),
[DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { drm_addctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
[DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
[DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
[DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { drm_getctx, DRM_AUTH },
[DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
[DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
[DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { drm_resctx, DRM_AUTH },
DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH),
[DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
[DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
[DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { drm_lock, DRM_AUTH },
[DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { drm_unlock, DRM_AUTH },
[DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_noop, DRM_AUTH },
DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
[DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { drm_addbufs_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
[DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { drm_markbufs, DRM_AUTH|DRM_MASTER },
[DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { drm_infobufs, DRM_AUTH },
[DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { drm_mapbufs, DRM_AUTH },
[DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { drm_freebufs, DRM_AUTH },
[DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { drm_dma, DRM_AUTH },
DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_dma, DRM_AUTH),
[DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { drm_agp_info_ioctl, DRM_AUTH },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
[DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC)] = { drm_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
[DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = { drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
[DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = { drm_wait_vblank, 0 },
DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_draw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
};
#ifdef __FreeBSD__
@ -178,7 +184,7 @@ int drm_probe(device_t dev, drm_pci_id_list_t *idlist)
int drm_attach(device_t nbdev, drm_pci_id_list_t *idlist)
{
drm_device_t *dev;
struct drm_device *dev;
drm_pci_id_list_t *id_entry;
int unit;
@ -200,7 +206,11 @@ int drm_attach(device_t nbdev, drm_pci_id_list_t *idlist)
DRM_DEV_MODE,
"dri/card%d", unit);
#if __FreeBSD_version >= 500000
mtx_init(&dev->dev_lock, "drm device", NULL, MTX_DEF);
mtx_init(&dev->dev_lock, "drmdev", NULL, MTX_DEF);
mtx_init(&dev->irq_lock, "drmirq", NULL, MTX_DEF);
mtx_init(&dev->vbl_lock, "drmvbl", NULL, MTX_DEF);
mtx_init(&dev->drw_lock, "drmdrw", NULL, MTX_DEF);
mtx_init(&dev->tsk_lock, "drmtsk", NULL, MTX_DEF);
#endif
id_entry = drm_find_description(pci_get_vendor(dev->device),
@ -316,14 +326,15 @@ void drm_attach(struct pci_attach_args *pa, dev_t kdev,
drm_pci_id_list_t *idlist)
{
int i;
drm_device_t *dev;
struct drm_device *dev;
drm_pci_id_list_t *id_entry;
config_makeroom(kdev, &drm_cd);
drm_cd.cd_devs[(kdev)] = malloc(sizeof(drm_device_t), M_DRM, M_WAITOK);
drm_cd.cd_devs[(kdev)] = malloc(sizeof(struct drm_device),
M_DRM, M_WAITOK);
dev = DRIVER_SOFTC(kdev);
memset(dev, 0, sizeof(drm_device_t));
memset(dev, 0, sizeof(struct drm_device));
memcpy(&dev->pa, pa, sizeof(dev->pa));
dev->irq = pa->pa_intrline;
@ -343,7 +354,7 @@ void drm_attach(struct pci_attach_args *pa, dev_t kdev,
int drm_detach(struct device *self, int flags)
{
drm_unload((drm_device_t *)self);
drm_unload((struct drm_device *)self);
return 0;
}
@ -376,7 +387,7 @@ drm_pci_id_list_t *drm_find_description(int vendor, int device,
return NULL;
}
static int drm_firstopen(drm_device_t *dev)
static int drm_firstopen(struct drm_device *dev)
{
drm_local_map_t *map;
int i;
@ -400,17 +411,6 @@ static int drm_firstopen(drm_device_t *dev)
return i;
}
dev->counters = 6;
dev->types[0] = _DRM_STAT_LOCK;
dev->types[1] = _DRM_STAT_OPENS;
dev->types[2] = _DRM_STAT_CLOSES;
dev->types[3] = _DRM_STAT_IOCTLS;
dev->types[4] = _DRM_STAT_LOCKS;
dev->types[5] = _DRM_STAT_UNLOCKS;
for ( i = 0 ; i < DRM_ARRAY_SIZE(dev->counts) ; i++ )
atomic_set( &dev->counts[i], 0 );
for ( i = 0 ; i < DRM_HASH_SIZE ; i++ ) {
dev->magiclist[i].head = NULL;
dev->magiclist[i].tail = NULL;
@ -433,7 +433,7 @@ static int drm_firstopen(drm_device_t *dev)
return 0;
}
static int drm_lastclose(drm_device_t *dev)
static int drm_lastclose(struct drm_device *dev)
{
drm_magic_entry_t *pt, *next;
drm_local_map_t *map, *mapsave;
@ -463,6 +463,8 @@ static int drm_lastclose(drm_device_t *dev)
dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
}
drm_drawable_free_all(dev);
/* Clear AGP information */
if ( dev->agp ) {
drm_agp_mem_t *entry;
@ -492,29 +494,32 @@ static int drm_lastclose(drm_device_t *dev)
}
TAILQ_FOREACH_SAFE(map, &dev->maplist, link, mapsave) {
drm_rmmap(dev, map);
if (!(map->flags & _DRM_DRIVER))
drm_rmmap(dev, map);
}
drm_dma_takedown(dev);
if ( dev->lock.hw_lock ) {
dev->lock.hw_lock = NULL; /* SHM removed */
dev->lock.filp = NULL;
dev->lock.file_priv = NULL;
DRM_WAKEUP_INT((void *)&dev->lock.lock_queue);
}
return 0;
}
static int drm_load(drm_device_t *dev)
static int drm_load(struct drm_device *dev)
{
int retcode;
int i, retcode;
DRM_DEBUG( "\n" );
dev->irq = pci_get_irq(dev->device);
/* XXX Fix domain number (alpha hoses) */
#if defined(__FreeBSD__) && __FreeBSD_version >= 700053
dev->pci_domain = pci_get_domain(dev->device);
#else
dev->pci_domain = 0;
#endif
dev->pci_bus = pci_get_bus(dev->device);
dev->pci_slot = pci_get_slot(dev->device);
dev->pci_func = pci_get_function(dev->device);
@ -530,9 +535,24 @@ static int drm_load(drm_device_t *dev)
#endif
TAILQ_INIT(&dev->files);
dev->counters = 6;
dev->types[0] = _DRM_STAT_LOCK;
dev->types[1] = _DRM_STAT_OPENS;
dev->types[2] = _DRM_STAT_CLOSES;
dev->types[3] = _DRM_STAT_IOCTLS;
dev->types[4] = _DRM_STAT_LOCKS;
dev->types[5] = _DRM_STAT_UNLOCKS;
for ( i = 0 ; i < DRM_ARRAY_SIZE(dev->counts) ; i++ )
atomic_set( &dev->counts[i], 0 );
if (dev->driver.load != NULL) {
DRM_LOCK();
retcode = dev->driver.load(dev, dev->id_entry->driver_private);
/* Shared code returns -errno. */
retcode = -dev->driver.load(dev,
dev->id_entry->driver_private);
if (pci_enable_busmaster(dev->device))
DRM_ERROR("Request to enable bus-master failed.\n");
DRM_UNLOCK();
if (retcode != 0)
goto error;
@ -544,7 +564,7 @@ static int drm_load(drm_device_t *dev)
if (dev->driver.require_agp && dev->agp == NULL) {
DRM_ERROR("Card isn't AGP, or couldn't initialize "
"AGP.\n");
retcode = DRM_ERR(ENOMEM);
retcode = ENOMEM;
goto error;
}
if (dev->agp != NULL) {
@ -559,7 +579,13 @@ static int drm_load(drm_device_t *dev)
DRM_ERROR("Cannot allocate memory for context bitmap.\n");
goto error;
}
dev->drw_unrhdr = new_unrhdr(1, INT_MAX, NULL);
if (dev->drw_unrhdr == NULL) {
DRM_ERROR("Couldn't allocate drawable number allocator\n");
goto error;
}
DRM_INFO("Initialized %s %d.%d.%d %s\n",
dev->driver.name,
dev->driver.major,
@ -579,13 +605,17 @@ error:
#ifdef __FreeBSD__
destroy_dev(dev->devnode);
#if __FreeBSD_version >= 500000
mtx_destroy(&dev->tsk_lock);
mtx_destroy(&dev->drw_lock);
mtx_destroy(&dev->vbl_lock);
mtx_destroy(&dev->irq_lock);
mtx_destroy(&dev->dev_lock);
#endif
#endif
return retcode;
}
static void drm_unload(drm_device_t *dev)
static void drm_unload(struct drm_device *dev)
{
int i;
@ -631,46 +661,51 @@ static void drm_unload(drm_device_t *dev)
if (dev->driver.unload != NULL)
dev->driver.unload(dev);
delete_unrhdr(dev->drw_unrhdr);
drm_mem_uninit();
if (pci_disable_busmaster(dev->device))
DRM_ERROR("Request to disable bus-master failed.\n");
#if defined(__FreeBSD__) && __FreeBSD_version >= 500000
mtx_destroy(&dev->tsk_lock);
mtx_destroy(&dev->drw_lock);
mtx_destroy(&dev->vbl_lock);
mtx_destroy(&dev->irq_lock);
mtx_destroy(&dev->dev_lock);
#endif
}
int drm_version(DRM_IOCTL_ARGS)
int drm_version(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_version_t version;
drm_version_t *version = data;
int len;
DRM_COPY_FROM_USER_IOCTL( version, (drm_version_t *)data, sizeof(version) );
#define DRM_COPY( name, value ) \
len = strlen( value ); \
if ( len > name##_len ) len = name##_len; \
name##_len = strlen( value ); \
if ( len && name ) { \
if ( DRM_COPY_TO_USER( name, value, len ) ) \
return DRM_ERR(EFAULT); \
return EFAULT; \
}
version.version_major = dev->driver.major;
version.version_minor = dev->driver.minor;
version.version_patchlevel = dev->driver.patchlevel;
version->version_major = dev->driver.major;
version->version_minor = dev->driver.minor;
version->version_patchlevel = dev->driver.patchlevel;
DRM_COPY(version.name, dev->driver.name);
DRM_COPY(version.date, dev->driver.date);
DRM_COPY(version.desc, dev->driver.desc);
DRM_COPY_TO_USER_IOCTL( (drm_version_t *)data, version, sizeof(version) );
DRM_COPY(version->name, dev->driver.name);
DRM_COPY(version->date, dev->driver.date);
DRM_COPY(version->desc, dev->driver.desc);
return 0;
}
int drm_open(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p)
{
drm_device_t *dev = NULL;
struct drm_device *dev = NULL;
int retcode = 0;
dev = DRIVER_SOFTC(minor(kdev));
@ -695,27 +730,26 @@ int drm_open(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p)
int drm_close(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p)
{
drm_file_t *priv;
DRM_DEVICE;
struct drm_device *dev = drm_get_device_from_kdev(kdev);
drm_file_t *file_priv;
int retcode = 0;
DRMFILE filp = (void *)(uintptr_t)(DRM_CURRENTPID);
DRM_DEBUG( "open_count = %d\n", dev->open_count );
DRM_LOCK();
priv = drm_find_file_by_proc(dev, p);
if (!priv) {
file_priv = drm_find_file_by_proc(dev, p);
if (!file_priv) {
DRM_UNLOCK();
DRM_ERROR("can't find authenticator\n");
return EINVAL;
}
if (--priv->refs != 0)
if (--file_priv->refs != 0)
goto done;
if (dev->driver.preclose != NULL)
dev->driver.preclose(dev, filp);
dev->driver.preclose(dev, file_priv);
/* ========================================================
* Begin inline drm_release
@ -730,12 +764,12 @@ int drm_close(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p)
#endif
if (dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)
&& dev->lock.filp == filp) {
&& dev->lock.file_priv == file_priv) {
DRM_DEBUG("Process %d dead, freeing lock for context %d\n",
DRM_CURRENTPID,
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
if (dev->driver.reclaim_buffers_locked != NULL)
dev->driver.reclaim_buffers_locked(dev, filp);
dev->driver.reclaim_buffers_locked(dev, file_priv);
drm_lock_free(dev, &dev->lock.hw_lock->lock,
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
@ -750,19 +784,19 @@ int drm_close(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p)
for (;;) {
if ( !dev->lock.hw_lock ) {
/* Device has been unregistered */
retcode = DRM_ERR(EINTR);
retcode = EINTR;
break;
}
if (drm_lock_take(&dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
dev->lock.filp = filp;
dev->lock.file_priv = file_priv;
dev->lock.lock_time = jiffies;
atomic_inc( &dev->counts[_DRM_STAT_LOCKS] );
break; /* Got lock */
}
/* Contention */
#if defined(__FreeBSD__) && __FreeBSD_version > 500000
retcode = msleep((void *)&dev->lock.lock_queue,
retcode = mtx_sleep((void *)&dev->lock.lock_queue,
&dev->dev_lock, PZERO | PCATCH, "drmlk2", 0);
#else
retcode = tsleep((void *)&dev->lock.lock_queue,
@ -772,14 +806,14 @@ int drm_close(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p)
break;
}
if (retcode == 0) {
dev->driver.reclaim_buffers_locked(dev, filp);
dev->driver.reclaim_buffers_locked(dev, file_priv);
drm_lock_free(dev, &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT);
}
}
if (dev->driver.use_dma && !dev->driver.reclaim_buffers_locked)
drm_reclaim_buffers(dev, filp);
drm_reclaim_buffers(dev, file_priv);
#if defined (__FreeBSD__) && (__FreeBSD_version >= 500000)
funsetown(&dev->buf_sigio);
@ -790,16 +824,15 @@ int drm_close(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p)
#endif /* __NetBSD__ || __OpenBSD__ */
if (dev->driver.postclose != NULL)
dev->driver.postclose(dev, priv);
TAILQ_REMOVE(&dev->files, priv, link);
free(priv, M_DRM);
dev->driver.postclose(dev, file_priv);
TAILQ_REMOVE(&dev->files, file_priv, link);
free(file_priv, M_DRM);
/* ========================================================
* End inline drm_release
*/
done:
done:
atomic_inc( &dev->counts[_DRM_STAT_CLOSES] );
#ifdef __FreeBSD__
device_unbusy(dev->device);
@ -818,32 +851,33 @@ int drm_close(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p)
int drm_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int flags,
DRM_STRUCTPROC *p)
{
DRM_DEVICE;
struct drm_device *dev = drm_get_device_from_kdev(kdev);
int retcode = 0;
drm_ioctl_desc_t *ioctl;
int (*func)(DRM_IOCTL_ARGS);
int (*func)(struct drm_device *dev, void *data, struct drm_file *file_priv);
int nr = DRM_IOCTL_NR(cmd);
int is_driver_ioctl = 0;
drm_file_t *priv;
DRMFILE filp = (DRMFILE)(uintptr_t)DRM_CURRENTPID;
drm_file_t *file_priv;
DRM_LOCK();
priv = drm_find_file_by_proc(dev, p);
file_priv = drm_find_file_by_proc(dev, p);
DRM_UNLOCK();
if (priv == NULL) {
if (file_priv == NULL) {
DRM_ERROR("can't find authenticator\n");
return EINVAL;
}
atomic_inc( &dev->counts[_DRM_STAT_IOCTLS] );
++priv->ioctl_count;
++file_priv->ioctl_count;
#ifdef __FreeBSD__
DRM_DEBUG( "pid=%d, cmd=0x%02lx, nr=0x%02x, dev 0x%lx, auth=%d\n",
DRM_CURRENTPID, cmd, nr, (long)dev->device, priv->authenticated );
DRM_CURRENTPID, cmd, nr, (long)dev->device,
file_priv->authenticated );
#elif defined(__NetBSD__) || defined(__OpenBSD__)
DRM_DEBUG( "pid=%d, cmd=0x%02lx, nr=0x%02x, dev 0x%lx, auth=%d\n",
DRM_CURRENTPID, cmd, nr, (long)&dev->device, priv->authenticated );
DRM_CURRENTPID, cmd, nr, (long)&dev->device,
file_priv->authenticated );
#endif
switch (cmd) {
@ -898,26 +932,39 @@ int drm_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int flags,
DRM_DEBUG( "no function\n" );
return EINVAL;
}
/* ioctl->master check should be against something in the filp set up
* for the first opener, but it doesn't matter yet.
*/
if (((ioctl->flags & DRM_ROOT_ONLY) && !DRM_SUSER(p)) ||
((ioctl->flags & DRM_AUTH) && !priv->authenticated) ||
((ioctl->flags & DRM_MASTER) && !priv->master))
((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) ||
((ioctl->flags & DRM_MASTER) && !file_priv->master))
return EACCES;
if (is_driver_ioctl)
if (is_driver_ioctl) {
DRM_LOCK();
retcode = func(kdev, cmd, data, flags, p, filp);
if (is_driver_ioctl)
/* shared code returns -errno */
retcode = -func(dev, data, file_priv);
DRM_UNLOCK();
} else {
retcode = func(dev, data, file_priv);
}
if (retcode != 0)
DRM_DEBUG(" returning %d\n", retcode);
return DRM_ERR(retcode);
return retcode;
}
drm_local_map_t *drm_getsarea(struct drm_device *dev)
{
drm_local_map_t *map;
DRM_SPINLOCK_ASSERT(&dev->dev_lock);
TAILQ_FOREACH(map, &dev->maplist, link) {
if (map->type == _DRM_SHM && (map->flags & _DRM_CONTAINS_LOCK))
return map;
}
return NULL;
}
#if DRM_LINUX

View File

@ -1,6 +1,3 @@
/* drm_fops.h -- File operations for DRM -*- linux-c -*-
* Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
*/
/*-
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
@ -35,9 +32,14 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/** @file drm_fops.c
* Support code for dealing with the file privates associated with each
* open of the DRM device.
*/
#include "dev/drm/drmP.h"
drm_file_t *drm_find_file_by_proc(drm_device_t *dev, DRM_STRUCTPROC *p)
drm_file_t *drm_find_file_by_proc(struct drm_device *dev, DRM_STRUCTPROC *p)
{
#if __FreeBSD_version >= 500021
uid_t uid = p->td_ucred->cr_svuid;
@ -58,7 +60,7 @@ drm_file_t *drm_find_file_by_proc(drm_device_t *dev, DRM_STRUCTPROC *p)
/* drm_open_helper is called whenever a process opens /dev/drm. */
int drm_open_helper(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p,
drm_device_t *dev)
struct drm_device *dev)
{
int m = minor(kdev);
drm_file_t *priv;
@ -78,7 +80,7 @@ int drm_open_helper(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p,
priv = malloc(sizeof(*priv), M_DRM, M_NOWAIT | M_ZERO);
if (priv == NULL) {
DRM_UNLOCK();
return DRM_ERR(ENOMEM);
return ENOMEM;
}
#if __FreeBSD_version >= 500000
priv->uid = p->td_ucred->cr_svuid;
@ -96,7 +98,8 @@ int drm_open_helper(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p,
priv->authenticated = DRM_SUSER(p);
if (dev->driver.open) {
retcode = dev->driver.open(dev, priv);
/* shared code returns -errno */
retcode = -dev->driver.open(dev, priv);
if (retcode != 0) {
free(priv, M_DRM);
DRM_UNLOCK();

View File

@ -0,0 +1,43 @@
/*-
* Copyright 2007 Red Hat, Inc
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/* This header file holds function prototypes and data types that are
* internal to the drm (not exported to user space) but shared across
* drivers and platforms */
#ifndef __DRM_INTERNAL_H__
#define __DRM_INTERNAL_H__
/**
* Drawable information.
*/
struct drm_drawable_info {
unsigned int num_rects;
struct drm_clip_rect *rects;
};
#endif

View File

@ -1,6 +1,3 @@
/* drm_ioctl.h -- IOCTL processing for DRM -*- linux-c -*-
* Created: Fri Jan 8 09:01:26 1999 by faith@valinux.com
*/
/*-
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
@ -34,6 +31,11 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/** @file drm_ioctl.c
* Varios minor DRM ioctls not applicable to other files, such as versioning
* information and reporting DRM information to userland.
*/
#include "dev/drm/drmP.h"
/*
@ -42,20 +44,16 @@ __FBSDID("$FreeBSD$");
* before setunique has been called. The format for the bus-specific part of
* the unique is not defined for any other bus.
*/
int drm_getunique(DRM_IOCTL_ARGS)
int drm_getunique(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
DRM_DEVICE;
drm_unique_t u;
drm_unique_t *u = data;
DRM_COPY_FROM_USER_IOCTL( u, (drm_unique_t *)data, sizeof(u) );
if (u.unique_len >= dev->unique_len) {
if (DRM_COPY_TO_USER(u.unique, dev->unique, dev->unique_len))
return DRM_ERR(EFAULT);
if (u->unique_len >= dev->unique_len) {
if (DRM_COPY_TO_USER(u->unique, dev->unique, dev->unique_len))
return EFAULT;
}
u.unique_len = dev->unique_len;
DRM_COPY_TO_USER_IOCTL( (drm_unique_t *)data, u, sizeof(u) );
u->unique_len = dev->unique_len;
return 0;
}
@ -63,28 +61,26 @@ int drm_getunique(DRM_IOCTL_ARGS)
/* Deprecated in DRM version 1.1, and will return EBUSY when setversion has
* requested version 1.1 or greater.
*/
int drm_setunique(DRM_IOCTL_ARGS)
int drm_setunique(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
DRM_DEVICE;
drm_unique_t u;
drm_unique_t *u = data;
int domain, bus, slot, func, ret;
char *busid;
DRM_COPY_FROM_USER_IOCTL( u, (drm_unique_t *)data, sizeof(u) );
/* Check and copy in the submitted Bus ID */
if (!u.unique_len || u.unique_len > 1024)
return DRM_ERR(EINVAL);
if (!u->unique_len || u->unique_len > 1024)
return EINVAL;
busid = malloc(u.unique_len + 1, M_DRM, M_WAITOK);
busid = malloc(u->unique_len + 1, M_DRM, M_WAITOK);
if (busid == NULL)
return DRM_ERR(ENOMEM);
return ENOMEM;
if (DRM_COPY_FROM_USER(busid, u.unique, u.unique_len)) {
if (DRM_COPY_FROM_USER(busid, u->unique, u->unique_len)) {
free(busid, M_DRM);
return DRM_ERR(EFAULT);
return EFAULT;
}
busid[u.unique_len] = '\0';
busid[u->unique_len] = '\0';
/* Return error if the busid submitted doesn't match the device's actual
* busid.
@ -92,7 +88,7 @@ int drm_setunique(DRM_IOCTL_ARGS)
ret = sscanf(busid, "PCI:%d:%d:%d", &bus, &slot, &func);
if (ret != 3) {
free(busid, M_DRM);
return DRM_ERR(EINVAL);
return EINVAL;
}
domain = bus >> 8;
bus &= 0xff;
@ -102,17 +98,17 @@ int drm_setunique(DRM_IOCTL_ARGS)
(slot != dev->pci_slot) ||
(func != dev->pci_func)) {
free(busid, M_DRM);
return DRM_ERR(EINVAL);
return EINVAL;
}
/* Actually set the device's busid now. */
DRM_LOCK();
if (dev->unique_len || dev->unique) {
DRM_UNLOCK();
return DRM_ERR(EBUSY);
return EBUSY;
}
dev->unique_len = u.unique_len;
dev->unique_len = u->unique_len;
dev->unique = busid;
DRM_UNLOCK();
@ -121,7 +117,7 @@ int drm_setunique(DRM_IOCTL_ARGS)
static int
drm_set_busid(drm_device_t *dev)
drm_set_busid(struct drm_device *dev)
{
DRM_LOCK();
@ -146,32 +142,29 @@ drm_set_busid(drm_device_t *dev)
return 0;
}
int drm_getmap(DRM_IOCTL_ARGS)
int drm_getmap(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_map_t map;
drm_map_t *map = data;
drm_local_map_t *mapinlist;
int idx;
int i = 0;
DRM_COPY_FROM_USER_IOCTL( map, (drm_map_t *)data, sizeof(map) );
idx = map.offset;
idx = map->offset;
DRM_LOCK();
if (idx < 0) {
DRM_UNLOCK();
return DRM_ERR(EINVAL);
return EINVAL;
}
TAILQ_FOREACH(mapinlist, &dev->maplist, link) {
if (i==idx) {
map.offset = mapinlist->offset;
map.size = mapinlist->size;
map.type = mapinlist->type;
map.flags = mapinlist->flags;
map.handle = mapinlist->handle;
map.mtrr = mapinlist->mtrr;
map->offset = mapinlist->offset;
map->size = mapinlist->size;
map->type = mapinlist->type;
map->flags = mapinlist->flags;
map->handle = mapinlist->handle;
map->mtrr = mapinlist->mtrr;
break;
}
i++;
@ -182,100 +175,91 @@ int drm_getmap(DRM_IOCTL_ARGS)
if (mapinlist == NULL)
return EINVAL;
DRM_COPY_TO_USER_IOCTL( (drm_map_t *)data, map, sizeof(map) );
return 0;
}
int drm_getclient(DRM_IOCTL_ARGS)
int drm_getclient(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
DRM_DEVICE;
drm_client_t client;
drm_client_t *client = data;
drm_file_t *pt;
int idx;
int i = 0;
DRM_COPY_FROM_USER_IOCTL( client, (drm_client_t *)data, sizeof(client) );
idx = client.idx;
idx = client->idx;
DRM_LOCK();
TAILQ_FOREACH(pt, &dev->files, link) {
if (i==idx)
{
client.auth = pt->authenticated;
client.pid = pt->pid;
client.uid = pt->uid;
client.magic = pt->magic;
client.iocs = pt->ioctl_count;
client->auth = pt->authenticated;
client->pid = pt->pid;
client->uid = pt->uid;
client->magic = pt->magic;
client->iocs = pt->ioctl_count;
DRM_UNLOCK();
*(drm_client_t *)data = client;
return 0;
}
i++;
}
DRM_UNLOCK();
DRM_COPY_TO_USER_IOCTL( (drm_client_t *)data, client, sizeof(client) );
return 0;
return EINVAL;
}
int drm_getstats(DRM_IOCTL_ARGS)
int drm_getstats(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_stats_t stats;
drm_stats_t *stats = data;
int i;
memset(&stats, 0, sizeof(stats));
memset(stats, 0, sizeof(drm_stats_t));
DRM_LOCK();
for (i = 0; i < dev->counters; i++) {
if (dev->types[i] == _DRM_STAT_LOCK)
stats.data[i].value
stats->data[i].value
= (dev->lock.hw_lock
? dev->lock.hw_lock->lock : 0);
else
stats.data[i].value = atomic_read(&dev->counts[i]);
stats.data[i].type = dev->types[i];
stats->data[i].value = atomic_read(&dev->counts[i]);
stats->data[i].type = dev->types[i];
}
stats.count = dev->counters;
stats->count = dev->counters;
DRM_UNLOCK();
DRM_COPY_TO_USER_IOCTL( (drm_stats_t *)data, stats, sizeof(stats) );
return 0;
}
#define DRM_IF_MAJOR 1
#define DRM_IF_MINOR 2
int drm_setversion(DRM_IOCTL_ARGS)
int drm_setversion(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
DRM_DEVICE;
drm_set_version_t sv;
drm_set_version_t retv;
drm_set_version_t *sv = data;
drm_set_version_t ver;
int if_version;
DRM_COPY_FROM_USER_IOCTL(sv, (drm_set_version_t *)data, sizeof(sv));
/* Save the incoming data, and set the response before continuing
* any further.
*/
ver = *sv;
sv->drm_di_major = DRM_IF_MAJOR;
sv->drm_di_minor = DRM_IF_MINOR;
sv->drm_dd_major = dev->driver.major;
sv->drm_dd_minor = dev->driver.minor;
retv.drm_di_major = DRM_IF_MAJOR;
retv.drm_di_minor = DRM_IF_MINOR;
retv.drm_dd_major = dev->driver.major;
retv.drm_dd_minor = dev->driver.minor;
DRM_COPY_TO_USER_IOCTL((drm_set_version_t *)data, retv, sizeof(sv));
if (sv.drm_di_major != -1) {
if (sv.drm_di_major != DRM_IF_MAJOR ||
sv.drm_di_minor < 0 || sv.drm_di_minor > DRM_IF_MINOR)
if (ver.drm_di_major != -1) {
if (ver.drm_di_major != DRM_IF_MAJOR ||
ver.drm_di_minor < 0 || ver.drm_di_minor > DRM_IF_MINOR) {
return EINVAL;
if_version = DRM_IF_VERSION(sv.drm_di_major, sv.drm_dd_minor);
}
if_version = DRM_IF_VERSION(ver.drm_di_major,
ver.drm_dd_minor);
dev->if_version = DRM_MAX(if_version, dev->if_version);
if (sv.drm_di_minor >= 1) {
if (ver.drm_di_minor >= 1) {
/*
* Version 1.1 includes tying of DRM to specific device
*/
@ -283,16 +267,20 @@ int drm_setversion(DRM_IOCTL_ARGS)
}
}
if (sv.drm_dd_major != -1) {
if (sv.drm_dd_major != dev->driver.major ||
sv.drm_dd_minor < 0 || sv.drm_dd_minor > dev->driver.minor)
if (ver.drm_dd_major != -1) {
if (ver.drm_dd_major != dev->driver.major ||
ver.drm_dd_minor < 0 ||
ver.drm_dd_minor > dev->driver.minor)
{
return EINVAL;
}
}
return 0;
}
int drm_noop(DRM_IOCTL_ARGS)
int drm_noop(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEBUG("\n");
return 0;

View File

@ -1,6 +1,3 @@
/* drm_irq.c -- IRQ IOCTL and function support
* Created: Fri Oct 18 2003 by anholt@FreeBSD.org
*/
/*-
* Copyright 2003 Eric Anholt
* All Rights Reserved.
@ -31,28 +28,31 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/** @file drm_irq.c
* Support code for handling setup/teardown of interrupt handlers and
* handing interrupt handlers off to the drivers.
*/
#include "dev/drm/drmP.h"
#include "dev/drm/drm.h"
int drm_irq_by_busid(DRM_IOCTL_ARGS)
static void drm_locked_task(void *context, int pending __unused);
int drm_irq_by_busid(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
DRM_DEVICE;
drm_irq_busid_t irq;
drm_irq_busid_t *irq = data;
DRM_COPY_FROM_USER_IOCTL(irq, (drm_irq_busid_t *)data, sizeof(irq));
if ((irq.busnum >> 8) != dev->pci_domain ||
(irq.busnum & 0xff) != dev->pci_bus ||
irq.devnum != dev->pci_slot ||
irq.funcnum != dev->pci_func)
if ((irq->busnum >> 8) != dev->pci_domain ||
(irq->busnum & 0xff) != dev->pci_bus ||
irq->devnum != dev->pci_slot ||
irq->funcnum != dev->pci_func)
return EINVAL;
irq.irq = dev->irq;
irq->irq = dev->irq;
DRM_DEBUG("%d:%d:%d => IRQ %d\n",
irq.busnum, irq.devnum, irq.funcnum, irq.irq);
DRM_COPY_TO_USER_IOCTL( (drm_irq_busid_t *)data, irq, sizeof(irq) );
irq->busnum, irq->devnum, irq->funcnum, irq->irq);
return 0;
}
@ -61,7 +61,7 @@ int drm_irq_by_busid(DRM_IOCTL_ARGS)
static irqreturn_t
drm_irq_handler_wrap(DRM_IRQ_ARGS)
{
drm_device_t *dev = (drm_device_t *)arg;
struct drm_device *dev = arg;
DRM_SPINLOCK(&dev->irq_lock);
dev->driver.irq_handler(arg);
@ -69,7 +69,89 @@ drm_irq_handler_wrap(DRM_IRQ_ARGS)
}
#endif
int drm_irq_install(drm_device_t *dev)
static void vblank_disable_fn(void *arg)
{
struct drm_device *dev = (struct drm_device *)arg;
int i;
if (callout_pending(&dev->vblank_disable_timer)) {
/* callout was reset */
return;
}
if (!callout_active(&dev->vblank_disable_timer)) {
/* callout was stopped */
return;
}
callout_deactivate(&dev->vblank_disable_timer);
if (!dev->vblank_disable_allowed)
return;
for (i = 0; i < dev->num_crtcs; i++) {
if (atomic_read(&dev->vblank[i].refcount) == 0 &&
dev->vblank[i].enabled) {
DRM_DEBUG("disabling vblank on crtc %d\n", i);
dev->vblank[i].last =
dev->driver.get_vblank_counter(dev, i);
dev->driver.disable_vblank(dev, i);
dev->vblank[i].enabled = 0;
}
}
}
static void drm_vblank_cleanup(struct drm_device *dev)
{
unsigned long irqflags;
/* Bail if the driver didn't call drm_vblank_init() */
if (dev->num_crtcs == 0)
return;
DRM_SPINLOCK_IRQSAVE(&dev->vbl_lock, irqflags);
callout_stop(&dev->vblank_disable_timer);
DRM_SPINUNLOCK_IRQRESTORE(&dev->vbl_lock, irqflags);
callout_drain(&dev->vblank_disable_timer);
vblank_disable_fn((void *)dev);
drm_free(dev->vblank, sizeof(struct drm_vblank_info) * dev->num_crtcs,
DRM_MEM_DRIVER);
dev->num_crtcs = 0;
}
int drm_vblank_init(struct drm_device *dev, int num_crtcs)
{
int i, ret = ENOMEM;
callout_init_mtx(&dev->vblank_disable_timer, &dev->vbl_lock, 0);
atomic_set(&dev->vbl_signal_pending, 0);
dev->num_crtcs = num_crtcs;
dev->vblank = drm_calloc(num_crtcs, sizeof(struct drm_vblank_info),
DRM_MEM_DRIVER);
if (!dev->vblank)
goto err;
/* Zero per-crtc vblank stuff */
for (i = 0; i < num_crtcs; i++) {
DRM_INIT_WAITQUEUE(&dev->vblank[i].queue);
TAILQ_INIT(&dev->vblank[i].sigs);
atomic_set(&dev->vblank[i].count, 0);
atomic_set(&dev->vblank[i].refcount, 0);
}
dev->vblank_disable_allowed = 0;
return 0;
err:
drm_vblank_cleanup(dev);
return ret;
}
int drm_irq_install(struct drm_device *dev)
{
int retcode;
#ifdef __NetBSD__
@ -77,21 +159,19 @@ int drm_irq_install(drm_device_t *dev)
#endif
if (dev->irq == 0 || dev->dev_private == NULL)
return DRM_ERR(EINVAL);
return EINVAL;
DRM_DEBUG( "%s: irq=%d\n", __FUNCTION__, dev->irq );
DRM_LOCK();
if (dev->irq_enabled) {
DRM_UNLOCK();
return DRM_ERR(EBUSY);
return EBUSY;
}
dev->irq_enabled = 1;
dev->context_flag = 0;
DRM_SPININIT(dev->irq_lock, "DRM IRQ lock");
/* Before installing handler */
dev->driver.irq_preinstall(dev);
DRM_UNLOCK();
@ -105,12 +185,14 @@ int drm_irq_install(drm_device_t *dev)
retcode = ENOENT;
goto err;
}
#if __FreeBSD_version < 500000
retcode = bus_setup_intr(dev->device, dev->irqr, INTR_TYPE_TTY,
dev->irq_handler, dev, &dev->irqh);
#else
retcode = bus_setup_intr(dev->device, dev->irqr, INTR_TYPE_TTY | INTR_MPSAFE,
#if __FreeBSD_version >= 700031
retcode = bus_setup_intr(dev->device, dev->irqr,
INTR_TYPE_TTY | INTR_MPSAFE,
NULL, drm_irq_handler_wrap, dev, &dev->irqh);
#else
retcode = bus_setup_intr(dev->device, dev->irqr,
INTR_TYPE_TTY | INTR_MPSAFE,
drm_irq_handler_wrap, dev, &dev->irqh);
#endif
if (retcode != 0)
goto err;
@ -132,6 +214,7 @@ int drm_irq_install(drm_device_t *dev)
dev->driver.irq_postinstall(dev);
DRM_UNLOCK();
TASK_INIT(&dev->locked_task, 0, drm_locked_task, dev);
return 0;
err:
DRM_LOCK();
@ -143,19 +226,18 @@ err:
dev->irqrid = 0;
}
#endif
DRM_SPINUNINIT(dev->irq_lock);
DRM_UNLOCK();
return retcode;
}
int drm_irq_uninstall(drm_device_t *dev)
int drm_irq_uninstall(struct drm_device *dev)
{
#ifdef __FreeBSD__
int irqrid;
#endif
if (!dev->irq_enabled)
return DRM_ERR(EINVAL);
return EINVAL;
dev->irq_enabled = 0;
#ifdef __FreeBSD__
@ -175,20 +257,17 @@ int drm_irq_uninstall(drm_device_t *dev)
#elif defined(__NetBSD__) || defined(__OpenBSD__)
pci_intr_disestablish(&dev->pa.pa_pc, dev->irqh);
#endif
DRM_SPINUNINIT(dev->irq_lock);
drm_vblank_cleanup(dev);
return 0;
}
int drm_control(DRM_IOCTL_ARGS)
int drm_control(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_control_t ctl;
drm_control_t *ctl = data;
int err;
DRM_COPY_FROM_USER_IOCTL( ctl, (drm_control_t *) data, sizeof(ctl) );
switch ( ctl.func ) {
switch ( ctl->func ) {
case DRM_INST_HANDLER:
/* Handle drivers whose DRM used to require IRQ setup but the
* no longer does.
@ -196,8 +275,8 @@ int drm_control(DRM_IOCTL_ARGS)
if (!dev->driver.use_irq)
return 0;
if (dev->if_version < DRM_IF_VERSION(1, 2) &&
ctl.irq != dev->irq)
return DRM_ERR(EINVAL);
ctl->irq != dev->irq)
return EINVAL;
return drm_irq_install(dev);
case DRM_UNINST_HANDLER:
if (!dev->driver.use_irq)
@ -207,29 +286,170 @@ int drm_control(DRM_IOCTL_ARGS)
DRM_UNLOCK();
return err;
default:
return DRM_ERR(EINVAL);
return EINVAL;
}
}
int drm_wait_vblank(DRM_IOCTL_ARGS)
u32 drm_vblank_count(struct drm_device *dev, int crtc)
{
DRM_DEVICE;
drm_wait_vblank_t vblwait;
struct timeval now;
int ret;
return atomic_read(&dev->vblank[crtc].count);
}
if (!dev->irq_enabled)
return DRM_ERR(EINVAL);
static void drm_update_vblank_count(struct drm_device *dev, int crtc)
{
u32 cur_vblank, diff;
DRM_COPY_FROM_USER_IOCTL( vblwait, (drm_wait_vblank_t *)data,
sizeof(vblwait) );
/*
* Interrupts were disabled prior to this call, so deal with counter
* wrap if needed.
* NOTE! It's possible we lost a full dev->max_vblank_count events
* here if the register is small or we had vblank interrupts off for
* a long time.
*/
cur_vblank = dev->driver.get_vblank_counter(dev, crtc);
diff = cur_vblank - dev->vblank[crtc].last;
if (cur_vblank < dev->vblank[crtc].last) {
diff += dev->max_vblank_count;
if (vblwait.request.type & _DRM_VBLANK_RELATIVE) {
vblwait.request.sequence += atomic_read(&dev->vbl_received);
vblwait.request.type &= ~_DRM_VBLANK_RELATIVE;
DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
crtc, dev->vblank[crtc].last, cur_vblank, diff);
}
DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
crtc, diff);
atomic_add(diff, &dev->vblank[crtc].count);
}
int drm_vblank_get(struct drm_device *dev, int crtc)
{
unsigned long irqflags;
int ret = 0;
DRM_SPINLOCK_IRQSAVE(&dev->vbl_lock, irqflags);
/* Going from 0->1 means we have to enable interrupts again */
atomic_add_acq_int(&dev->vblank[crtc].refcount, 1);
if (dev->vblank[crtc].refcount == 1 &&
!dev->vblank[crtc].enabled) {
ret = dev->driver.enable_vblank(dev, crtc);
if (ret)
atomic_dec(&dev->vblank[crtc].refcount);
else {
dev->vblank[crtc].enabled = 1;
drm_update_vblank_count(dev, crtc);
}
}
DRM_SPINUNLOCK_IRQRESTORE(&dev->vbl_lock, irqflags);
return ret;
}
void drm_vblank_put(struct drm_device *dev, int crtc)
{
unsigned long irqflags;
DRM_SPINLOCK_IRQSAVE(&dev->vbl_lock, irqflags);
/* Last user schedules interrupt disable */
atomic_subtract_acq_int(&dev->vblank[crtc].refcount, 1);
if (dev->vblank[crtc].refcount == 0)
callout_reset(&dev->vblank_disable_timer, 5 * DRM_HZ,
(timeout_t *)vblank_disable_fn, (void *)dev);
DRM_SPINUNLOCK_IRQRESTORE(&dev->vbl_lock, irqflags);
}
int drm_modeset_ctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_modeset_ctl *modeset = data;
unsigned long irqflags;
int crtc, ret = 0;
/* If drm_vblank_init() hasn't been called yet, just no-op */
if (!dev->num_crtcs)
goto out;
crtc = modeset->crtc;
if (crtc >= dev->num_crtcs) {
ret = EINVAL;
goto out;
}
/*
* To avoid all the problems that might happen if interrupts
* were enabled/disabled around or between these calls, we just
* have the kernel take a reference on the CRTC (just once though
* to avoid corrupting the count if multiple, mismatch calls occur),
* so that interrupts remain enabled in the interim.
*/
switch (modeset->cmd) {
case _DRM_PRE_MODESET:
if (!dev->vblank[crtc].inmodeset) {
dev->vblank[crtc].inmodeset = 1;
drm_vblank_get(dev, crtc);
}
break;
case _DRM_POST_MODESET:
if (dev->vblank[crtc].inmodeset) {
DRM_SPINLOCK_IRQSAVE(&dev->vbl_lock, irqflags);
dev->vblank_disable_allowed = 1;
dev->vblank[crtc].inmodeset = 0;
DRM_SPINUNLOCK_IRQRESTORE(&dev->vbl_lock, irqflags);
drm_vblank_put(dev, crtc);
}
break;
default:
ret = EINVAL;
break;
}
out:
return ret;
}
int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_wait_vblank_t *vblwait = data;
int ret = 0;
int flags, seq, crtc;
if (!dev->irq_enabled)
return EINVAL;
if (vblwait->request.type &
~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) {
DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n",
vblwait->request.type,
(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK));
return EINVAL;
}
flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
if (crtc >= dev->num_crtcs)
return EINVAL;
ret = drm_vblank_get(dev, crtc);
if (ret)
return ret;
seq = drm_vblank_count(dev, crtc);
switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
case _DRM_VBLANK_RELATIVE:
vblwait->request.sequence += seq;
vblwait->request.type &= ~_DRM_VBLANK_RELATIVE;
case _DRM_VBLANK_ABSOLUTE:
break;
default:
ret = EINVAL;
goto done;
}
if ((flags & _DRM_VBLANK_NEXTONMISS) &&
(seq - vblwait->request.sequence) <= (1<<23)) {
vblwait->request.sequence = seq + 1;
}
flags = vblwait.request.type & _DRM_VBLANK_FLAGS_MASK;
if (flags & _DRM_VBLANK_SIGNAL) {
#if 0 /* disabled */
drm_vbl_sig_t *vbl_sig = malloc(sizeof(drm_vbl_sig_t), M_DRM,
@ -237,40 +457,48 @@ int drm_wait_vblank(DRM_IOCTL_ARGS)
if (vbl_sig == NULL)
return ENOMEM;
vbl_sig->sequence = vblwait.request.sequence;
vbl_sig->signo = vblwait.request.signal;
vbl_sig->sequence = vblwait->request.sequence;
vbl_sig->signo = vblwait->request.signal;
vbl_sig->pid = DRM_CURRENTPID;
vblwait.reply.sequence = atomic_read(&dev->vbl_received);
vblwait->reply.sequence = atomic_read(&dev->vbl_received);
DRM_SPINLOCK(&dev->irq_lock);
DRM_SPINLOCK(&dev->vbl_lock);
TAILQ_INSERT_HEAD(&dev->vbl_sig_list, vbl_sig, link);
DRM_SPINUNLOCK(&dev->irq_lock);
DRM_SPINUNLOCK(&dev->vbl_lock);
ret = 0;
#endif
ret = EINVAL;
} else {
DRM_LOCK();
ret = dev->driver.vblank_wait(dev, &vblwait.request.sequence);
/* shared code returns -errno */
DRM_WAIT_ON(ret, dev->vblank[crtc].queue, 3 * DRM_HZ,
((drm_vblank_count(dev, crtc)
- vblwait->request.sequence) <= (1 << 23)));
DRM_UNLOCK();
microtime(&now);
vblwait.reply.tval_sec = now.tv_sec;
vblwait.reply.tval_usec = now.tv_usec;
if (ret != EINTR) {
struct timeval now;
microtime(&now);
vblwait->reply.tval_sec = now.tv_sec;
vblwait->reply.tval_usec = now.tv_usec;
vblwait->reply.sequence = drm_vblank_count(dev, crtc);
}
}
DRM_COPY_TO_USER_IOCTL( (drm_wait_vblank_t *)data, vblwait,
sizeof(vblwait) );
done:
drm_vblank_put(dev, crtc);
return ret;
}
void drm_vbl_send_signals(drm_device_t *dev)
void drm_vbl_send_signals(struct drm_device *dev, int crtc)
{
}
#if 0 /* disabled */
void drm_vbl_send_signals( drm_device_t *dev )
void drm_vbl_send_signals(struct drm_device *dev, int crtc )
{
drm_vbl_sig_t *vbl_sig;
unsigned int vbl_seq = atomic_read( &dev->vbl_received );
@ -292,3 +520,54 @@ void drm_vbl_send_signals( drm_device_t *dev )
}
}
#endif
void drm_handle_vblank(struct drm_device *dev, int crtc)
{
atomic_inc(&dev->vblank[crtc].count);
DRM_WAKEUP(&dev->vblank[crtc].queue);
drm_vbl_send_signals(dev, crtc);
}
static void drm_locked_task(void *context, int pending __unused)
{
struct drm_device *dev = context;
DRM_SPINLOCK(&dev->tsk_lock);
DRM_LOCK(); /* XXX drm_lock_take() should do it's own locking */
if (dev->locked_task_call == NULL ||
drm_lock_take(&dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT) == 0) {
DRM_UNLOCK();
DRM_SPINUNLOCK(&dev->tsk_lock);
return;
}
dev->lock.file_priv = NULL; /* kernel owned */
dev->lock.lock_time = jiffies;
atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
DRM_UNLOCK();
dev->locked_task_call(dev);
drm_lock_free(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT);
dev->locked_task_call = NULL;
DRM_SPINUNLOCK(&dev->tsk_lock);
}
void
drm_locked_tasklet(struct drm_device *dev,
void (*tasklet)(struct drm_device *dev))
{
DRM_SPINLOCK(&dev->tsk_lock);
if (dev->locked_task_call != NULL) {
DRM_SPINUNLOCK(&dev->tsk_lock);
return;
}
dev->locked_task_call = tasklet;
DRM_SPINUNLOCK(&dev->tsk_lock);
taskqueue_enqueue(taskqueue_swi, &dev->locked_task);
}

View File

@ -1,6 +1,3 @@
/* lock.c -- IOCTLs for locking -*- linux-c -*-
* Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
*/
/*-
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
@ -34,6 +31,25 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/** @file drm_lock.c
* Implementation of the ioctls and other support code for dealing with the
* hardware lock.
*
* The DRM hardware lock is a shared structure between the kernel and userland.
*
* On uncontended access where the new context was the last context, the
* client may take the lock without dropping down into the kernel, using atomic
* compare-and-set.
*
* If the client finds during compare-and-set that it was not the last owner
* of the lock, it calls the DRM lock ioctl, which may sleep waiting for the
* lock, and may have side-effects of kernel-managed context switching.
*
* When the client releases the lock, if the lock is marked as being contended
* by another client, then the DRM unlock ioctl is called so that the
* contending client may be woken up.
*/
#include "dev/drm/drmP.h"
int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context)
@ -64,12 +80,12 @@ int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context)
/* This takes a lock forcibly and hands it to context. Should ONLY be used
inside *_unlock to give lock to kernel before calling *_dma_schedule. */
int drm_lock_transfer(drm_device_t *dev,
int drm_lock_transfer(struct drm_device *dev,
__volatile__ unsigned int *lock, unsigned int context)
{
unsigned int old, new;
dev->lock.filp = NULL;
dev->lock.file_priv = NULL;
do {
old = *lock;
new = context | _DRM_LOCK_HELD;
@ -78,12 +94,12 @@ int drm_lock_transfer(drm_device_t *dev,
return 1;
}
int drm_lock_free(drm_device_t *dev,
int drm_lock_free(struct drm_device *dev,
__volatile__ unsigned int *lock, unsigned int context)
{
unsigned int old, new;
dev->lock.filp = NULL;
dev->lock.file_priv = NULL;
do {
old = *lock;
new = 0;
@ -98,30 +114,28 @@ int drm_lock_free(drm_device_t *dev,
return 0;
}
int drm_lock(DRM_IOCTL_ARGS)
int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_lock_t lock;
drm_lock_t *lock = data;
int ret = 0;
DRM_COPY_FROM_USER_IOCTL(lock, (drm_lock_t *)data, sizeof(lock));
if (lock.context == DRM_KERNEL_CONTEXT) {
if (lock->context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
DRM_CURRENTPID, lock.context);
DRM_CURRENTPID, lock->context);
return EINVAL;
}
DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
lock.context, DRM_CURRENTPID, dev->lock.hw_lock->lock, lock.flags);
lock->context, DRM_CURRENTPID, dev->lock.hw_lock->lock,
lock->flags);
if (dev->driver.use_dma_queue && lock.context < 0)
if (dev->driver.use_dma_queue && lock->context < 0)
return EINVAL;
DRM_LOCK();
for (;;) {
if (drm_lock_take(&dev->lock.hw_lock->lock, lock.context)) {
dev->lock.filp = (void *)(uintptr_t)DRM_CURRENTPID;
if (drm_lock_take(&dev->lock.hw_lock->lock, lock->context)) {
dev->lock.file_priv = file_priv;
dev->lock.lock_time = jiffies;
atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
break; /* Got lock */
@ -129,7 +143,7 @@ int drm_lock(DRM_IOCTL_ARGS)
/* Contention */
#if defined(__FreeBSD__) && __FreeBSD_version > 500000
ret = msleep((void *)&dev->lock.lock_queue, &dev->dev_lock,
ret = mtx_sleep((void *)&dev->lock.lock_queue, &dev->dev_lock,
PZERO | PCATCH, "drmlk2", 0);
#else
ret = tsleep((void *)&dev->lock.lock_queue, PZERO | PCATCH,
@ -139,7 +153,7 @@ int drm_lock(DRM_IOCTL_ARGS)
break;
}
DRM_UNLOCK();
DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock");
DRM_DEBUG("%d %s\n", lock->context, ret ? "interrupted" : "has lock");
if (ret != 0)
return ret;
@ -147,24 +161,34 @@ int drm_lock(DRM_IOCTL_ARGS)
/* XXX: Add signal blocking here */
if (dev->driver.dma_quiescent != NULL &&
(lock.flags & _DRM_LOCK_QUIESCENT))
(lock->flags & _DRM_LOCK_QUIESCENT))
dev->driver.dma_quiescent(dev);
return 0;
}
int drm_unlock(DRM_IOCTL_ARGS)
int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_lock_t lock;
drm_lock_t *lock = data;
DRM_COPY_FROM_USER_IOCTL(lock, (drm_lock_t *)data, sizeof(lock));
if (lock.context == DRM_KERNEL_CONTEXT) {
if (lock->context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
DRM_CURRENTPID, lock.context);
DRM_CURRENTPID, lock->context);
return EINVAL;
}
/* Check that the context unlock being requested actually matches
* who currently holds the lock.
*/
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) ||
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) != lock->context)
return EINVAL;
DRM_SPINLOCK(&dev->tsk_lock);
if (dev->locked_task_call != NULL) {
dev->locked_task_call(dev);
dev->locked_task_call = NULL;
}
DRM_SPINUNLOCK(&dev->tsk_lock);
atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);

View File

@ -1,6 +1,3 @@
/* drm_memory.h -- Memory management wrappers for DRM -*- linux-c -*-
* Created: Thu Feb 4 14:00:34 1999 by faith@valinux.com
*/
/*-
*Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
@ -34,6 +31,14 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/** @file drm_memory.c
* Wrappers for kernel memory allocation routines, and MTRR management support.
*
* This file previously implemented a memory consumption tracking system using
* the "area" argument for various different types of allocations, but that
* has been stripped out for now.
*/
#include "dev/drm/drmP.h"
MALLOC_DEFINE(M_DRM, "drm", "DRM Data Structures");
@ -78,7 +83,7 @@ void drm_free(void *pt, size_t size, int area)
free(pt, M_DRM);
}
void *drm_ioremap(drm_device_t *dev, drm_local_map_t *map)
void *drm_ioremap(struct drm_device *dev, drm_local_map_t *map)
{
#ifdef __FreeBSD__
return pmap_mapdev(map->offset, map->size);

View File

@ -1,10 +1,3 @@
/**
* \file drm_pci.h
* \brief PCI consistent, DMA-accessible memory functions.
*
* \author Eric Anholt <anholt@FreeBSD.org>
*/
/*-
* Copyright 2003 Eric Anholt.
* All Rights Reserved.
@ -31,6 +24,13 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/**
* \file drm_pci.h
* \brief PCI consistent, DMA-accessible memory allocation.
*
* \author Eric Anholt <anholt@FreeBSD.org>
*/
#include "dev/drm/drmP.h"
/**********************************************************************/
@ -56,7 +56,8 @@ drm_pci_busdma_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error
* memory block.
*/
drm_dma_handle_t *
drm_pci_alloc(drm_device_t *dev, size_t size, size_t align, dma_addr_t maxaddr)
drm_pci_alloc(struct drm_device *dev, size_t size,
size_t align, dma_addr_t maxaddr)
{
drm_dma_handle_t *dmah;
int ret;
@ -73,6 +74,7 @@ drm_pci_alloc(drm_device_t *dev, size_t size, size_t align, dma_addr_t maxaddr)
return NULL;
#ifdef __FreeBSD__
DRM_UNLOCK();
ret = bus_dma_tag_create(NULL, align, 0, /* tag, align, boundary */
maxaddr, BUS_SPACE_MAXADDR, /* lowaddr, highaddr */
NULL, NULL, /* filtfunc, filtfuncargs */
@ -81,6 +83,7 @@ drm_pci_alloc(drm_device_t *dev, size_t size, size_t align, dma_addr_t maxaddr)
&dmah->tag);
if (ret != 0) {
free(dmah, M_DRM);
DRM_LOCK();
return NULL;
}
@ -89,9 +92,10 @@ drm_pci_alloc(drm_device_t *dev, size_t size, size_t align, dma_addr_t maxaddr)
if (ret != 0) {
bus_dma_tag_destroy(dmah->tag);
free(dmah, M_DRM);
DRM_LOCK();
return NULL;
}
DRM_LOCK();
ret = bus_dmamap_load(dmah->tag, dmah->map, dmah->vaddr, size,
drm_pci_busdma_callback, dmah, 0);
if (ret != 0) {
@ -126,7 +130,7 @@ drm_pci_alloc(drm_device_t *dev, size_t size, size_t align, dma_addr_t maxaddr)
* \brief Free a DMA-accessible consistent memory block.
*/
void
drm_pci_free(drm_device_t *dev, drm_dma_handle_t *dmah)
drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah)
{
if (dmah == NULL)
return;

View File

@ -6,13 +6,13 @@
Please contact dri-devel@lists.sf.net to add new cards to this list
*/
#define radeon_PCI_IDS \
{0x1002, 0x3150, CHIP_RV380|CHIP_IS_MOBILITY, "ATI Radeon Mobility X600 M24"}, \
{0x1002, 0x3152, CHIP_RV380|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP, "ATI Radeon Mobility X300 M24"}, \
{0x1002, 0x3154, CHIP_RV380|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP, "ATI FireGL M24 GL"}, \
{0x1002, 0x3E50, CHIP_RV380|CHIP_NEW_MEMMAP, "ATI Radeon RV380 X600"}, \
{0x1002, 0x3E54, CHIP_RV380|CHIP_NEW_MEMMAP, "ATI FireGL V3200 RV380"}, \
{0x1002, 0x4136, CHIP_RS100|CHIP_IS_IGP, "ATI Radeon RS100 IGP 320"}, \
{0x1002, 0x4137, CHIP_RS200|CHIP_IS_IGP, "ATI Radeon RS200 IGP 340"}, \
{0x1002, 0x3150, CHIP_RV380|RADEON_IS_MOBILITY, "ATI Radeon Mobility X600 M24"}, \
{0x1002, 0x3152, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Radeon Mobility X300 M24"}, \
{0x1002, 0x3154, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI FireGL M24 GL"}, \
{0x1002, 0x3E50, CHIP_RV380|RADEON_NEW_MEMMAP, "ATI Radeon RV380 X600"}, \
{0x1002, 0x3E54, CHIP_RV380|RADEON_NEW_MEMMAP, "ATI FireGL V3200 RV380"}, \
{0x1002, 0x4136, CHIP_RS100|RADEON_IS_IGP, "ATI Radeon RS100 IGP 320"}, \
{0x1002, 0x4137, CHIP_RS200|RADEON_IS_IGP, "ATI Radeon RS200 IGP 340"}, \
{0x1002, 0x4144, CHIP_R300, "ATI Radeon AD 9500"}, \
{0x1002, 0x4145, CHIP_R300, "ATI Radeon AE 9700 Pro"}, \
{0x1002, 0x4146, CHIP_R300, "ATI Radeon AF R300 9600TX"}, \
@ -28,35 +28,35 @@
{0x1002, 0x4154, CHIP_RV350, "ATI FireGL AT T2"}, \
{0x1002, 0x4155, CHIP_RV350, "ATI Radeon 9650"}, \
{0x1002, 0x4156, CHIP_RV350, "ATI FireGL AV RV360 T2"}, \
{0x1002, 0x4237, CHIP_RS200|CHIP_IS_IGP, "ATI Radeon RS250 IGP"}, \
{0x1002, 0x4237, CHIP_RS200|RADEON_IS_IGP, "ATI Radeon RS250 IGP"}, \
{0x1002, 0x4242, CHIP_R200, "ATI Radeon BB R200 AIW 8500DV"}, \
{0x1002, 0x4243, CHIP_R200, "ATI Radeon BC R200"}, \
{0x1002, 0x4336, CHIP_RS100|CHIP_IS_IGP|CHIP_IS_MOBILITY, "ATI Radeon RS100 Mobility U1"}, \
{0x1002, 0x4337, CHIP_RS200|CHIP_IS_IGP|CHIP_IS_MOBILITY, "ATI Radeon RS200 Mobility IGP 340M"}, \
{0x1002, 0x4437, CHIP_RS200|CHIP_IS_IGP|CHIP_IS_MOBILITY, "ATI Radeon RS250 Mobility IGP"}, \
{0x1002, 0x4336, CHIP_RS100|RADEON_IS_IGP|RADEON_IS_MOBILITY, "ATI Radeon RS100 Mobility U1"}, \
{0x1002, 0x4337, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY, "ATI Radeon RS200 Mobility IGP 340M"}, \
{0x1002, 0x4437, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY, "ATI Radeon RS250 Mobility IGP"}, \
{0x1002, 0x4966, CHIP_RV250, "ATI Radeon If RV250 9000"}, \
{0x1002, 0x4967, CHIP_RV250, "ATI Radeon Ig RV250 9000"}, \
{0x1002, 0x4A48, CHIP_R420|CHIP_NEW_MEMMAP, "ATI Radeon JH R420 X800"}, \
{0x1002, 0x4A49, CHIP_R420|CHIP_NEW_MEMMAP, "ATI Radeon JI R420 X800 Pro"}, \
{0x1002, 0x4A4A, CHIP_R420|CHIP_NEW_MEMMAP, "ATI Radeon JJ R420 X800 SE"}, \
{0x1002, 0x4A4B, CHIP_R420|CHIP_NEW_MEMMAP, "ATI Radeon JK R420 X800 XT"}, \
{0x1002, 0x4A4C, CHIP_R420|CHIP_NEW_MEMMAP, "ATI Radeon JL R420 X800"}, \
{0x1002, 0x4A4D, CHIP_R420|CHIP_NEW_MEMMAP, "ATI FireGL JM X3-256"}, \
{0x1002, 0x4A4E, CHIP_R420|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP, "ATI Radeon JN R420 Mobility M18"}, \
{0x1002, 0x4A4F, CHIP_R420|CHIP_NEW_MEMMAP, "ATI Radeon JO R420 X800 SE"}, \
{0x1002, 0x4A50, CHIP_R420|CHIP_NEW_MEMMAP, "ATI Radeon JP R420 X800 XT PE"}, \
{0x1002, 0x4A54, CHIP_R420|CHIP_NEW_MEMMAP, "ATI Radeon JT R420 AIW X800 VE"}, \
{0x1002, 0x4B49, CHIP_R420|CHIP_NEW_MEMMAP, "ATI Radeon R481 X850 XT"}, \
{0x1002, 0x4B4A, CHIP_R420|CHIP_NEW_MEMMAP, "ATI Radeon R481 X850 SE"}, \
{0x1002, 0x4B4B, CHIP_R420|CHIP_NEW_MEMMAP, "ATI Radeon R481 X850 Pro"}, \
{0x1002, 0x4B4C, CHIP_R420|CHIP_NEW_MEMMAP, "ATI Radeon R481 X850 XT PE"}, \
{0x1002, 0x4C57, CHIP_RV200|CHIP_IS_MOBILITY, "ATI Radeon LW RV200 Mobility 7500 M7"}, \
{0x1002, 0x4C58, CHIP_RV200|CHIP_IS_MOBILITY, "ATI Radeon LX RV200 Mobility FireGL 7800 M7"}, \
{0x1002, 0x4C59, CHIP_RV100|CHIP_IS_MOBILITY, "ATI Radeon LY RV100 Mobility M6"}, \
{0x1002, 0x4C5A, CHIP_RV100|CHIP_IS_MOBILITY, "ATI Radeon LZ RV100 Mobility M6"}, \
{0x1002, 0x4C64, CHIP_RV250|CHIP_IS_MOBILITY, "ATI Radeon Ld RV250 Mobility 9000 M9"}, \
{0x1002, 0x4A48, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon JH R420 X800"}, \
{0x1002, 0x4A49, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon JI R420 X800 Pro"}, \
{0x1002, 0x4A4A, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon JJ R420 X800 SE"}, \
{0x1002, 0x4A4B, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon JK R420 X800 XT"}, \
{0x1002, 0x4A4C, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon JL R420 X800"}, \
{0x1002, 0x4A4D, CHIP_R420|RADEON_NEW_MEMMAP, "ATI FireGL JM X3-256"}, \
{0x1002, 0x4A4E, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Radeon JN R420 Mobility M18"}, \
{0x1002, 0x4A4F, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon JO R420 X800 SE"}, \
{0x1002, 0x4A50, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon JP R420 X800 XT PE"}, \
{0x1002, 0x4A54, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon JT R420 AIW X800 VE"}, \
{0x1002, 0x4B49, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon R481 X850 XT"}, \
{0x1002, 0x4B4A, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon R481 X850 SE"}, \
{0x1002, 0x4B4B, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon R481 X850 Pro"}, \
{0x1002, 0x4B4C, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon R481 X850 XT PE"}, \
{0x1002, 0x4C57, CHIP_RV200|RADEON_IS_MOBILITY, "ATI Radeon LW RV200 Mobility 7500 M7"}, \
{0x1002, 0x4C58, CHIP_RV200|RADEON_IS_MOBILITY, "ATI Radeon LX RV200 Mobility FireGL 7800 M7"}, \
{0x1002, 0x4C59, CHIP_RV100|RADEON_IS_MOBILITY, "ATI Radeon LY RV100 Mobility M6"}, \
{0x1002, 0x4C5A, CHIP_RV100|RADEON_IS_MOBILITY, "ATI Radeon LZ RV100 Mobility M6"}, \
{0x1002, 0x4C64, CHIP_RV250|RADEON_IS_MOBILITY, "ATI Radeon Ld RV250 Mobility 9000 M9"}, \
{0x1002, 0x4C66, CHIP_RV250, "ATI Radeon Lf RV250 Mobility 9000 M9 / FireMV 2400 PCI"}, \
{0x1002, 0x4C67, CHIP_RV250|CHIP_IS_MOBILITY, "ATI Radeon Lg RV250 Mobility 9000 M9"}, \
{0x1002, 0x4C67, CHIP_RV250|RADEON_IS_MOBILITY, "ATI Radeon Lg RV250 Mobility 9000 M9"}, \
{0x1002, 0x4E44, CHIP_R300, "ATI Radeon ND R300 9700 Pro"}, \
{0x1002, 0x4E45, CHIP_R300, "ATI Radeon NE R300 9500 Pro / 9700"}, \
{0x1002, 0x4E46, CHIP_R300, "ATI Radeon NF R300 9600TX"}, \
@ -65,16 +65,16 @@
{0x1002, 0x4E49, CHIP_R350, "ATI Radeon NI R350 9800"}, \
{0x1002, 0x4E4A, CHIP_R350, "ATI Radeon NJ R360 9800 XT"}, \
{0x1002, 0x4E4B, CHIP_R350, "ATI FireGL NK X2"}, \
{0x1002, 0x4E50, CHIP_RV350|CHIP_IS_MOBILITY, "ATI Radeon RV350 Mobility 9600 M10 NP"}, \
{0x1002, 0x4E51, CHIP_RV350|CHIP_IS_MOBILITY, "ATI Radeon RV350 Mobility 9600 M10 NQ"}, \
{0x1002, 0x4E52, CHIP_RV350|CHIP_IS_MOBILITY, "ATI Radeon RV350 Mobility 9600 M11 NR"}, \
{0x1002, 0x4E53, CHIP_RV350|CHIP_IS_MOBILITY, "ATI Radeon RV350 Mobility 9600 M10 NS"}, \
{0x1002, 0x4E54, CHIP_RV350|CHIP_IS_MOBILITY, "ATI FireGL T2/T2e"}, \
{0x1002, 0x4E56, CHIP_RV350|CHIP_IS_MOBILITY, "ATI Radeon Mobility 9550"}, \
{0x1002, 0x5144, CHIP_R100|CHIP_SINGLE_CRTC, "ATI Radeon QD R100"}, \
{0x1002, 0x5145, CHIP_R100|CHIP_SINGLE_CRTC, "ATI Radeon QE R100"}, \
{0x1002, 0x5146, CHIP_R100|CHIP_SINGLE_CRTC, "ATI Radeon QF R100"}, \
{0x1002, 0x5147, CHIP_R100|CHIP_SINGLE_CRTC, "ATI Radeon QG R100"}, \
{0x1002, 0x4E50, CHIP_RV350|RADEON_IS_MOBILITY, "ATI Radeon RV350 Mobility 9600 M10 NP"}, \
{0x1002, 0x4E51, CHIP_RV350|RADEON_IS_MOBILITY, "ATI Radeon RV350 Mobility 9600 M10 NQ"}, \
{0x1002, 0x4E52, CHIP_RV350|RADEON_IS_MOBILITY, "ATI Radeon RV350 Mobility 9600 M11 NR"}, \
{0x1002, 0x4E53, CHIP_RV350|RADEON_IS_MOBILITY, "ATI Radeon RV350 Mobility 9600 M10 NS"}, \
{0x1002, 0x4E54, CHIP_RV350|RADEON_IS_MOBILITY, "ATI FireGL T2/T2e"}, \
{0x1002, 0x4E56, CHIP_RV350|RADEON_IS_MOBILITY, "ATI Radeon Mobility 9550"}, \
{0x1002, 0x5144, CHIP_R100|RADEON_SINGLE_CRTC, "ATI Radeon QD R100"}, \
{0x1002, 0x5145, CHIP_R100|RADEON_SINGLE_CRTC, "ATI Radeon QE R100"}, \
{0x1002, 0x5146, CHIP_R100|RADEON_SINGLE_CRTC, "ATI Radeon QF R100"}, \
{0x1002, 0x5147, CHIP_R100|RADEON_SINGLE_CRTC, "ATI Radeon QG R100"}, \
{0x1002, 0x5148, CHIP_R200, "ATI Radeon QH R200 8500"}, \
{0x1002, 0x514C, CHIP_R200, "ATI Radeon QL R200 8500 LE"}, \
{0x1002, 0x514D, CHIP_R200, "ATI Radeon QM R200 9100"}, \
@ -83,59 +83,165 @@
{0x1002, 0x5159, CHIP_RV100, "ATI Radeon QY RV100 7000/VE"}, \
{0x1002, 0x515A, CHIP_RV100, "ATI Radeon QZ RV100 7000/VE"}, \
{0x1002, 0x515E, CHIP_RV100, "ATI ES1000 RN50"}, \
{0x1002, 0x5460, CHIP_RV380|CHIP_IS_MOBILITY, "ATI Radeon Mobility X300 M22"}, \
{0x1002, 0x5462, CHIP_RV380|CHIP_IS_MOBILITY, "ATI Radeon Mobility X600 SE M24C"}, \
{0x1002, 0x5464, CHIP_RV380|CHIP_IS_MOBILITY, "ATI FireGL M22 GL 5464"}, \
{0x1002, 0x5548, CHIP_R420|CHIP_NEW_MEMMAP, "ATI Radeon R423 X800"}, \
{0x1002, 0x5549, CHIP_R420|CHIP_NEW_MEMMAP, "ATI Radeon R423 X800 Pro"}, \
{0x1002, 0x554A, CHIP_R420|CHIP_NEW_MEMMAP, "ATI Radeon R423 X800 XT PE"}, \
{0x1002, 0x554B, CHIP_R420|CHIP_NEW_MEMMAP, "ATI Radeon R423 X800 SE"}, \
{0x1002, 0x554C, CHIP_R420|CHIP_NEW_MEMMAP, "ATI Radeon R430 X800 XTP"}, \
{0x1002, 0x554D, CHIP_R420|CHIP_NEW_MEMMAP, "ATI Radeon R430 X800 XL"}, \
{0x1002, 0x554E, CHIP_R420|CHIP_NEW_MEMMAP, "ATI Radeon R430 X800 SE"}, \
{0x1002, 0x554F, CHIP_R420|CHIP_NEW_MEMMAP, "ATI Radeon R430 X800"}, \
{0x1002, 0x5550, CHIP_R420|CHIP_NEW_MEMMAP, "ATI FireGL V7100 R423"}, \
{0x1002, 0x5551, CHIP_R420|CHIP_NEW_MEMMAP, "ATI FireGL V5100 R423 UQ"}, \
{0x1002, 0x5552, CHIP_R420|CHIP_NEW_MEMMAP, "ATI FireGL unknown R423 UR"}, \
{0x1002, 0x5554, CHIP_R420|CHIP_NEW_MEMMAP, "ATI FireGL unknown R423 UT"}, \
{0x1002, 0x564A, CHIP_RV410|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP, "ATI Mobility FireGL V5000 M26"}, \
{0x1002, 0x564B, CHIP_RV410|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP, "ATI Mobility FireGL V5000 M26"}, \
{0x1002, 0x564F, CHIP_RV410|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP, "ATI Radeon Mobility X700 XL M26"}, \
{0x1002, 0x5652, CHIP_RV410|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP, "ATI Radeon Mobility X700 M26"}, \
{0x1002, 0x5653, CHIP_RV410|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP, "ATI Radeon Mobility X700 M26"}, \
{0x1002, 0x5834, CHIP_RS300|CHIP_IS_IGP, "ATI Radeon RS300 9100 IGP"}, \
{0x1002, 0x5835, CHIP_RS300|CHIP_IS_IGP|CHIP_IS_MOBILITY, "ATI Radeon RS300 Mobility IGP"}, \
{0x1002, 0x5460, CHIP_RV380|RADEON_IS_MOBILITY, "ATI Radeon Mobility X300 M22"}, \
{0x1002, 0x5462, CHIP_RV380|RADEON_IS_MOBILITY, "ATI Radeon Mobility X600 SE M24C"}, \
{0x1002, 0x5464, CHIP_RV380|RADEON_IS_MOBILITY, "ATI FireGL M22 GL 5464"}, \
{0x1002, 0x5548, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon R423 X800"}, \
{0x1002, 0x5549, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon R423 X800 Pro"}, \
{0x1002, 0x554A, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon R423 X800 XT PE"}, \
{0x1002, 0x554B, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon R423 X800 SE"}, \
{0x1002, 0x554C, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon R430 X800 XTP"}, \
{0x1002, 0x554D, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon R430 X800 XL"}, \
{0x1002, 0x554E, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon R430 X800 SE"}, \
{0x1002, 0x554F, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon R430 X800"}, \
{0x1002, 0x5550, CHIP_R420|RADEON_NEW_MEMMAP, "ATI FireGL V7100 R423"}, \
{0x1002, 0x5551, CHIP_R420|RADEON_NEW_MEMMAP, "ATI FireGL V5100 R423 UQ"}, \
{0x1002, 0x5552, CHIP_R420|RADEON_NEW_MEMMAP, "ATI FireGL unknown R423 UR"}, \
{0x1002, 0x5554, CHIP_R420|RADEON_NEW_MEMMAP, "ATI FireGL unknown R423 UT"}, \
{0x1002, 0x564A, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility FireGL V5000 M26"}, \
{0x1002, 0x564B, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility FireGL V5000 M26"}, \
{0x1002, 0x564F, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Radeon Mobility X700 XL M26"}, \
{0x1002, 0x5652, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Radeon Mobility X700 M26"}, \
{0x1002, 0x5653, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Radeon Mobility X700 M26"}, \
{0x1002, 0x5657, CHIP_RV410|RADEON_NEW_MEMMAP, "ATI Radeon X550XTX"}, \
{0x1002, 0x5834, CHIP_RS300|RADEON_IS_IGP, "ATI Radeon RS300 9100 IGP"}, \
{0x1002, 0x5835, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY, "ATI Radeon RS300 Mobility IGP"}, \
{0x1002, 0x5954, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_IGPGART, "ATI RS480 XPRESS 200G"}, \
{0x1002, 0x5955, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART, "ATI Radeon XPRESS 200M 5955"}, \
{0x1002, 0x5974, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART, "ATI Radeon RS482 XPRESS 200"}, \
{0x1002, 0x5975, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART, "ATI Radeon RS485 XPRESS 1100 IGP"}, \
{0x1002, 0x5960, CHIP_RV280, "ATI Radeon RV280 9250"}, \
{0x1002, 0x5961, CHIP_RV280, "ATI Radeon RV280 9200"}, \
{0x1002, 0x5962, CHIP_RV280, "ATI Radeon RV280 9200"}, \
{0x1002, 0x5964, CHIP_RV280, "ATI Radeon RV280 9200 SE"}, \
{0x1002, 0x5965, CHIP_RV280, "ATI FireMV 2200 PCI"}, \
{0x1002, 0x5969, CHIP_RV100, "ATI ES1000 RN50"}, \
{0x1002, 0x5b60, CHIP_RV380|CHIP_NEW_MEMMAP, "ATI Radeon RV370 X300 SE"}, \
{0x1002, 0x5b62, CHIP_RV380|CHIP_NEW_MEMMAP, "ATI Radeon RV370 X600 Pro"}, \
{0x1002, 0x5b63, CHIP_RV380|CHIP_NEW_MEMMAP, "ATI Radeon RV370 X550"}, \
{0x1002, 0x5b64, CHIP_RV380|CHIP_NEW_MEMMAP, "ATI FireGL V3100 (RV370) 5B64"}, \
{0x1002, 0x5b65, CHIP_RV380|CHIP_NEW_MEMMAP, "ATI FireMV 2200 PCIE (RV370) 5B65"}, \
{0x1002, 0x5c61, CHIP_RV280|CHIP_IS_MOBILITY, "ATI Radeon RV280 Mobility"}, \
{0x1002, 0x5c63, CHIP_RV280|CHIP_IS_MOBILITY, "ATI Radeon RV280 Mobility"}, \
{0x1002, 0x5d48, CHIP_R420|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP, "ATI Mobility Radeon X800 XT M28"}, \
{0x1002, 0x5d49, CHIP_R420|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP, "ATI Mobility FireGL V5100 M28"}, \
{0x1002, 0x5d4a, CHIP_R420|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP, "ATI Mobility Radeon X800 M28"}, \
{0x1002, 0x5d4c, CHIP_R420|CHIP_NEW_MEMMAP, "ATI Radeon R480 X850"}, \
{0x1002, 0x5d4d, CHIP_R420|CHIP_NEW_MEMMAP, "ATI Radeon R480 X850 XT PE"}, \
{0x1002, 0x5d4e, CHIP_R420|CHIP_NEW_MEMMAP, "ATI Radeon R480 X850 SE"}, \
{0x1002, 0x5d4f, CHIP_R420|CHIP_NEW_MEMMAP, "ATI Radeon R480 X850 Pro"}, \
{0x1002, 0x5d50, CHIP_R420|CHIP_NEW_MEMMAP, "ATI unknown Radeon / FireGL R480"}, \
{0x1002, 0x5d52, CHIP_R420|CHIP_NEW_MEMMAP, "ATI Radeon R480 X850 XT"}, \
{0x1002, 0x5d57, CHIP_R420|CHIP_NEW_MEMMAP, "ATI Radeon R423 X800 XT"}, \
{0x1002, 0x5e48, CHIP_RV410|CHIP_NEW_MEMMAP, "ATI FireGL V5000 RV410"}, \
{0x1002, 0x5e4a, CHIP_RV410|CHIP_NEW_MEMMAP, "ATI Radeon RV410 X700 XT"}, \
{0x1002, 0x5e4b, CHIP_RV410|CHIP_NEW_MEMMAP, "ATI Radeon RV410 X700 Pro"}, \
{0x1002, 0x5e4c, CHIP_RV410|CHIP_NEW_MEMMAP, "ATI Radeon RV410 X700 SE"}, \
{0x1002, 0x5e4d, CHIP_RV410|CHIP_NEW_MEMMAP, "ATI Radeon RV410 X700"}, \
{0x1002, 0x5e4f, CHIP_RV410|CHIP_NEW_MEMMAP, "ATI Radeon RV410 X700 SE"}, \
{0x1002, 0x7834, CHIP_RS300|CHIP_IS_IGP|CHIP_NEW_MEMMAP, "ATI Radeon RS350 9000/9100 IGP"}, \
{0x1002, 0x7835, CHIP_RS300|CHIP_IS_IGP|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP, "ATI Radeon RS350 Mobility IGP"}, \
{0x1002, 0x5a41, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART, "ATI Radeon XPRESS 200 5A41 (PCIE)"}, \
{0x1002, 0x5a42, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART, "ATI Radeon XPRESS 200M 5A42 (PCIE)"}, \
{0x1002, 0x5a61, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART, "ATI Radeon RC410 XPRESS 200"}, \
{0x1002, 0x5a62, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART, "ATI Radeon RC410 XPRESS 200M"}, \
{0x1002, 0x5b60, CHIP_RV380|RADEON_NEW_MEMMAP, "ATI Radeon RV370 X300 SE"}, \
{0x1002, 0x5b62, CHIP_RV380|RADEON_NEW_MEMMAP, "ATI Radeon RV370 X600 Pro"}, \
{0x1002, 0x5b63, CHIP_RV380|RADEON_NEW_MEMMAP, "ATI Radeon RV370 X550"}, \
{0x1002, 0x5b64, CHIP_RV380|RADEON_NEW_MEMMAP, "ATI FireGL V3100 (RV370) 5B64"}, \
{0x1002, 0x5b65, CHIP_RV380|RADEON_NEW_MEMMAP, "ATI FireMV 2200 PCIE (RV370) 5B65"}, \
{0x1002, 0x5c61, CHIP_RV280|RADEON_IS_MOBILITY, "ATI Radeon RV280 Mobility"}, \
{0x1002, 0x5c63, CHIP_RV280|RADEON_IS_MOBILITY, "ATI Radeon RV280 Mobility"}, \
{0x1002, 0x5d48, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X800 XT M28"}, \
{0x1002, 0x5d49, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility FireGL V5100 M28"}, \
{0x1002, 0x5d4a, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X800 M28"}, \
{0x1002, 0x5d4c, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon R480 X850"}, \
{0x1002, 0x5d4d, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon R480 X850 XT PE"}, \
{0x1002, 0x5d4e, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon R480 X850 SE"}, \
{0x1002, 0x5d4f, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon R480 X850 Pro"}, \
{0x1002, 0x5d50, CHIP_R420|RADEON_NEW_MEMMAP, "ATI unknown Radeon / FireGL R480"}, \
{0x1002, 0x5d52, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon R480 X850 XT"}, \
{0x1002, 0x5d57, CHIP_R420|RADEON_NEW_MEMMAP, "ATI Radeon R423 X800 XT"}, \
{0x1002, 0x5e48, CHIP_RV410|RADEON_NEW_MEMMAP, "ATI FireGL V5000 RV410"}, \
{0x1002, 0x5e4a, CHIP_RV410|RADEON_NEW_MEMMAP, "ATI Radeon RV410 X700 XT"}, \
{0x1002, 0x5e4b, CHIP_RV410|RADEON_NEW_MEMMAP, "ATI Radeon RV410 X700 Pro"}, \
{0x1002, 0x5e4c, CHIP_RV410|RADEON_NEW_MEMMAP, "ATI Radeon RV410 X700 SE"}, \
{0x1002, 0x5e4d, CHIP_RV410|RADEON_NEW_MEMMAP, "ATI Radeon RV410 X700"}, \
{0x1002, 0x5e4f, CHIP_RV410|RADEON_NEW_MEMMAP, "ATI Radeon RV410 X700 SE"}, \
{0x1002, 0x7100, CHIP_R520|RADEON_NEW_MEMMAP, "ATI Radeon X1800"}, \
{0x1002, 0x7101, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X1800 XT"}, \
{0x1002, 0x7102, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X1800"}, \
{0x1002, 0x7103, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility FireGL V7200"}, \
{0x1002, 0x7104, CHIP_R520|RADEON_NEW_MEMMAP, "ATI FireGL V7200"}, \
{0x1002, 0x7105, CHIP_R520|RADEON_NEW_MEMMAP, "ATI FireGL V5300"}, \
{0x1002, 0x7106, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility FireGL V7100"}, \
{0x1002, 0x7108, CHIP_R520|RADEON_NEW_MEMMAP, "ATI Radeon X1800"}, \
{0x1002, 0x7109, CHIP_R520|RADEON_NEW_MEMMAP, "ATI Radeon X1800"}, \
{0x1002, 0x710A, CHIP_R520|RADEON_NEW_MEMMAP, "ATI Radeon X1800"}, \
{0x1002, 0x710B, CHIP_R520|RADEON_NEW_MEMMAP, "ATI Radeon X1800"}, \
{0x1002, 0x710C, CHIP_R520|RADEON_NEW_MEMMAP, "ATI Radeon X1800"}, \
{0x1002, 0x710E, CHIP_R520|RADEON_NEW_MEMMAP, "ATI FireGL V7300"}, \
{0x1002, 0x710F, CHIP_R520|RADEON_NEW_MEMMAP, "ATI FireGL V7350"}, \
{0x1002, 0x7140, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI Radeon X1600"}, \
{0x1002, 0x7141, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI RV505"}, \
{0x1002, 0x7142, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI Radeon X1300/X1550"}, \
{0x1002, 0x7143, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI Radeon X1550"}, \
{0x1002, 0x7144, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI M54-GL"}, \
{0x1002, 0x7145, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X1400"}, \
{0x1002, 0x7146, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI Radeon X1300/X1550"}, \
{0x1002, 0x7147, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI Radeon X1550 64-bit"}, \
{0x1002, 0x7149, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X1300"}, \
{0x1002, 0x714A, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X1300"}, \
{0x1002, 0x714B, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X1300"}, \
{0x1002, 0x714C, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X1300"}, \
{0x1002, 0x714D, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI Radeon X1300"}, \
{0x1002, 0x714E, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI Radeon X1300"}, \
{0x1002, 0x714F, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI RV505"}, \
{0x1002, 0x7151, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI RV505"}, \
{0x1002, 0x7152, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI FireGL V3300"}, \
{0x1002, 0x7153, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI FireGL V3350"}, \
{0x1002, 0x715E, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI Radeon X1300"}, \
{0x1002, 0x715F, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI Radeon X1550 64-bit"}, \
{0x1002, 0x7180, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI Radeon X1300/X1550"}, \
{0x1002, 0x7181, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI Radeon X1600"}, \
{0x1002, 0x7183, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI Radeon X1300/X1550"}, \
{0x1002, 0x7186, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X1450"}, \
{0x1002, 0x7187, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI Radeon X1300/X1550"}, \
{0x1002, 0x7188, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X2300"}, \
{0x1002, 0x718A, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X2300"}, \
{0x1002, 0x718B, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X1350"}, \
{0x1002, 0x718C, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X1350"}, \
{0x1002, 0x718D, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X1450"}, \
{0x1002, 0x718F, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI Radeon X1300"}, \
{0x1002, 0x7193, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI Radeon X1550"}, \
{0x1002, 0x7196, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X1350"}, \
{0x1002, 0x719B, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI FireMV 2250"}, \
{0x1002, 0x719F, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI Radeon X1550 64-bit"}, \
{0x1002, 0x71C0, CHIP_RV530|RADEON_NEW_MEMMAP, "ATI Radeon X1600"}, \
{0x1002, 0x71C1, CHIP_RV530|RADEON_NEW_MEMMAP, "ATI Radeon X1650"}, \
{0x1002, 0x71C2, CHIP_RV530|RADEON_NEW_MEMMAP, "ATI Radeon X1600"}, \
{0x1002, 0x71C3, CHIP_RV530|RADEON_NEW_MEMMAP, "ATI Radeon X1600"}, \
{0x1002, 0x71C4, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility FireGL V5200"}, \
{0x1002, 0x71C5, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X1600"}, \
{0x1002, 0x71C6, CHIP_RV530|RADEON_NEW_MEMMAP, "ATI Radeon X1650"}, \
{0x1002, 0x71C7, CHIP_RV530|RADEON_NEW_MEMMAP, "ATI Radeon X1650"}, \
{0x1002, 0x71CD, CHIP_RV530|RADEON_NEW_MEMMAP, "ATI Radeon X1600"}, \
{0x1002, 0x71CE, CHIP_RV530|RADEON_NEW_MEMMAP, "ATI Radeon X1300 XT/X1600 Pro"}, \
{0x1002, 0x71D2, CHIP_RV530|RADEON_NEW_MEMMAP, "ATI FireGL V3400"}, \
{0x1002, 0x71D4, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility FireGL V5250"}, \
{0x1002, 0x71D5, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X1700"}, \
{0x1002, 0x71D6, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X1700 XT"}, \
{0x1002, 0x71DA, CHIP_RV530|RADEON_NEW_MEMMAP, "ATI FireGL V5200"}, \
{0x1002, 0x71DE, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X1700"}, \
{0x1002, 0x7200, CHIP_RV515|RADEON_NEW_MEMMAP, "ATI Radeon X2300HD"}, \
{0x1002, 0x7210, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 2300"}, \
{0x1002, 0x7211, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon HD 2300"}, \
{0x1002, 0x7240, CHIP_R580|RADEON_NEW_MEMMAP, "ATI Radeon X1950"}, \
{0x1002, 0x7243, CHIP_R580|RADEON_NEW_MEMMAP, "ATI Radeon X1900"}, \
{0x1002, 0x7244, CHIP_R580|RADEON_NEW_MEMMAP, "ATI Radeon X1950"}, \
{0x1002, 0x7245, CHIP_R580|RADEON_NEW_MEMMAP, "ATI Radeon X1900"}, \
{0x1002, 0x7246, CHIP_R580|RADEON_NEW_MEMMAP, "ATI Radeon X1900"}, \
{0x1002, 0x7247, CHIP_R580|RADEON_NEW_MEMMAP, "ATI Radeon X1900"}, \
{0x1002, 0x7248, CHIP_R580|RADEON_NEW_MEMMAP, "ATI Radeon X1900"}, \
{0x1002, 0x7249, CHIP_R580|RADEON_NEW_MEMMAP, "ATI Radeon X1900"}, \
{0x1002, 0x724A, CHIP_R580|RADEON_NEW_MEMMAP, "ATI Radeon X1900"}, \
{0x1002, 0x724B, CHIP_R580|RADEON_NEW_MEMMAP, "ATI Radeon X1900"}, \
{0x1002, 0x724C, CHIP_R580|RADEON_NEW_MEMMAP, "ATI Radeon X1900"}, \
{0x1002, 0x724D, CHIP_R580|RADEON_NEW_MEMMAP, "ATI Radeon X1900"}, \
{0x1002, 0x724E, CHIP_R580|RADEON_NEW_MEMMAP, "ATI AMD Stream Processor"}, \
{0x1002, 0x724F, CHIP_R580|RADEON_NEW_MEMMAP, "ATI Radeon X1900"}, \
{0x1002, 0x7280, CHIP_RV570|RADEON_NEW_MEMMAP, "ATI Radeon X1950"}, \
{0x1002, 0x7281, CHIP_RV560|RADEON_NEW_MEMMAP, "ATI RV560"}, \
{0x1002, 0x7283, CHIP_RV560|RADEON_NEW_MEMMAP, "ATI RV560"}, \
{0x1002, 0x7284, CHIP_R580|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Mobility Radeon X1900"}, \
{0x1002, 0x7287, CHIP_RV560|RADEON_NEW_MEMMAP, "ATI RV560"}, \
{0x1002, 0x7288, CHIP_RV570|RADEON_NEW_MEMMAP, "ATI Radeon X1950 GT"}, \
{0x1002, 0x7289, CHIP_RV570|RADEON_NEW_MEMMAP, "ATI RV570"}, \
{0x1002, 0x728B, CHIP_RV570|RADEON_NEW_MEMMAP, "ATI RV570"}, \
{0x1002, 0x728C, CHIP_RV570|RADEON_NEW_MEMMAP, "ATI ATI FireGL V7400"}, \
{0x1002, 0x7290, CHIP_RV560|RADEON_NEW_MEMMAP, "ATI RV560"}, \
{0x1002, 0x7291, CHIP_RV560|RADEON_NEW_MEMMAP, "ATI Radeon X1650"}, \
{0x1002, 0x7293, CHIP_RV560|RADEON_NEW_MEMMAP, "ATI Radeon X1650"}, \
{0x1002, 0x7297, CHIP_RV560|RADEON_NEW_MEMMAP, "ATI RV560"}, \
{0x1002, 0x7834, CHIP_RS300|RADEON_IS_IGP|RADEON_NEW_MEMMAP, "ATI Radeon RS350 9000/9100 IGP"}, \
{0x1002, 0x7835, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "ATI Radeon RS350 Mobility IGP"}, \
{0x1002, 0x791e, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART, "ATI Radeon RS690 X1250 IGP"}, \
{0x1002, 0x791f, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART, "ATI Radeon RS690 X1270 IGP"}, \
{0, 0, 0, NULL}
#define r128_PCI_IDS \
@ -212,7 +318,10 @@
{0x1039, 0x0300, 0, "SiS 300/305"}, \
{0x1039, 0x5300, 0, "SiS 540"}, \
{0x1039, 0x6300, 0, "SiS 630"}, \
{0x1039, 0x6330, SIS_CHIP_315, "SiS 661"}, \
{0x1039, 0x7300, 0, "SiS 730"}, \
{0x18CA, 0x0040, SIS_CHIP_315, "Volari V3XT/V5/V8"}, \
{0x18CA, 0x0042, SIS_CHIP_315, "Volari Unknown"}, \
{0, 0, 0, NULL}
#define tdfx_PCI_IDS \
@ -230,7 +339,11 @@
{0x1106, 0x3122, 0, "VIA CLE266"}, \
{0x1106, 0x7205, 0, "VIA KM400"}, \
{0x1106, 0x3108, 0, "VIA K8M800"}, \
{0x1106, 0x3344, 0, "VIA P4VM800PRO"}, \
{0x1106, 0x3344, 0, "VIA CN700 / VM800 / P4M800Pro"}, \
{0x1106, 0x3343, 0, "VIA P4M890"}, \
{0x1106, 0x3230, VIA_DX9_0, "VIA K8M890"}, \
{0x1106, 0x3157, VIA_PRO_GROUP_A, "VIA CX700"}, \
{0x1106, 0x3371, VIA_DX9_0, "VIA P4M900 / VN896"}, \
{0, 0, 0, NULL}
#define i810_PCI_IDS \
@ -281,20 +394,29 @@
{0, 0, 0, NULL}
#define i915_PCI_IDS \
{0x8086, 0x3577, 0, "Intel i830M GMCH"}, \
{0x8086, 0x2562, 0, "Intel i845G GMCH"}, \
{0x8086, 0x3582, 0, "Intel i852GM/i855GM GMCH"}, \
{0x8086, 0x2572, 0, "Intel i865G GMCH"}, \
{0x8086, 0x2582, 0, "Intel i915G"}, \
{0x8086, 0x2592, 0, "Intel i915GM"}, \
{0x8086, 0x2772, 0, "Intel i945G"}, \
{0x8086, 0x27A2, 0, "Intel i945GM"}, \
{0x8086, 0x2972, 0, "Intel i946GZ"}, \
{0x8086, 0x2982, 0, "Intel i965G"}, \
{0x8086, 0x2992, 0, "Intel i965Q"}, \
{0x8086, 0x29A2, 0, "Intel i965G"}, \
{0x8086, 0x2A02, 0, "Intel i965GM"}, \
{0x8086, 0x2A12, 0, "Intel i965GME"}, \
{0x8086, 0x3577, CHIP_I8XX, "Intel i830M GMCH"}, \
{0x8086, 0x2562, CHIP_I8XX, "Intel i845G GMCH"}, \
{0x8086, 0x3582, CHIP_I8XX, "Intel i852GM/i855GM GMCH"}, \
{0x8086, 0x2572, CHIP_I8XX, "Intel i865G GMCH"}, \
{0x8086, 0x2582, CHIP_I9XX|CHIP_I915, "Intel i915G"}, \
{0x8086, 0x258a, CHIP_I9XX|CHIP_I915, "Intel E7221 (i915)"}, \
{0x8086, 0x2592, CHIP_I9XX|CHIP_I915, "Intel i915GM"}, \
{0x8086, 0x2772, CHIP_I9XX|CHIP_I915, "Intel i945G"}, \
{0x8086, 0x27A2, CHIP_I9XX|CHIP_I915, "Intel i945GM"}, \
{0x8086, 0x27AE, CHIP_I9XX|CHIP_I915, "Intel i945GME"}, \
{0x8086, 0x2972, CHIP_I9XX|CHIP_I965, "Intel i946GZ"}, \
{0x8086, 0x2982, CHIP_I9XX|CHIP_I965, "Intel i965G"}, \
{0x8086, 0x2992, CHIP_I9XX|CHIP_I965, "Intel i965Q"}, \
{0x8086, 0x29A2, CHIP_I9XX|CHIP_I965, "Intel i965G"}, \
{0x8086, 0x2A02, CHIP_I9XX|CHIP_I965, "Intel i965GM"}, \
{0x8086, 0x2A12, CHIP_I9XX|CHIP_I965, "Intel i965GME/GLE"}, \
{0x8086, 0x29C2, CHIP_I9XX|CHIP_I915, "Intel G33"}, \
{0x8086, 0x29B2, CHIP_I9XX|CHIP_I915, "Intel Q35"}, \
{0x8086, 0x29D2, CHIP_I9XX|CHIP_I915, "Intel Q33"}, \
{0x8086, 0x2A42, CHIP_I9XX|CHIP_I965, "Mobile Intel® GM45 Express Chipset"}, \
{0x8086, 0x2E02, CHIP_I9XX|CHIP_I965, "Intel Integrated Graphics Device"}, \
{0x8086, 0x2E12, CHIP_I9XX|CHIP_I965, "Intel Q45/Q43"}, \
{0x8086, 0x2E22, CHIP_I9XX|CHIP_I965, "Intel G45/G43"}, \
{0, 0, 0, NULL}
#define imagine_PCI_IDS \
@ -374,7 +496,7 @@
{0x10DE, 0x0309, NV30, "NVidia Quadro FX 1000"}, \
{0x10DE, 0x0311, NV30, "NVidia GeForce FX 5600 Ultra"}, \
{0x10DE, 0x0312, NV30, "NVidia GeForce FX 5600"}, \
{0x10DE, 0x0313, NV30, "NVidia 0x0313"},}, \
{0x10DE, 0x0313, NV30, "NVidia 0x0313"}, \
{0x10DE, 0x0314, NV30, "NVidia GeForce FX 5600SE"}, \
{0x10DE, 0x0316, NV30, "NVidia 0x0316"}, \
{0x10DE, 0x0317, NV30, "NVidia 0x0317"}, \
@ -484,3 +606,8 @@
{0x10DE, 0x009D, NV40, "NVidia Quadro FX 4500"}, \
{0x10DE, 0x009E, NV40, "NVidia 0x009E"}, \
{0, 0, 0, NULL}
#define xgi_PCI_IDS \
{0x18ca, 0x2200, 0, "XP5"}, \
{0x18ca, 0x0047, 0, "XP10 / XG47"}, \
{0, 0, 0, NULL}

View File

@ -44,38 +44,44 @@ __FBSDID("$FreeBSD$");
#define SAREA_MAX 0x10000 /* 64kB */
#else
/* Intel 830M driver needs at least 8k SAREA */
#define SAREA_MAX 0x2000
#define SAREA_MAX 0x2000UL
#endif
/** Maximum number of drawables in the SAREA */
#define SAREA_MAX_DRAWABLES 256
#define SAREA_MAX_DRAWABLES 256
#define SAREA_DRAWABLE_CLAIMED_ENTRY 0x80000000
/** SAREA drawable */
typedef struct drm_sarea_drawable {
struct drm_sarea_drawable {
unsigned int stamp;
unsigned int flags;
} drm_sarea_drawable_t;
};
/** SAREA frame */
typedef struct drm_sarea_frame {
struct drm_sarea_frame {
unsigned int x;
unsigned int y;
unsigned int width;
unsigned int height;
unsigned int fullscreen;
} drm_sarea_frame_t;
};
/** SAREA */
typedef struct drm_sarea {
struct drm_sarea {
/** first thing is always the DRM locking structure */
drm_hw_lock_t lock;
struct drm_hw_lock lock;
/** \todo Use readers/writer lock for drm_sarea::drawable_lock */
drm_hw_lock_t drawable_lock;
drm_sarea_drawable_t drawableTable[SAREA_MAX_DRAWABLES]; /**< drawables */
drm_sarea_frame_t frame; /**< frame */
struct drm_hw_lock drawable_lock;
struct drm_sarea_drawable drawableTable[SAREA_MAX_DRAWABLES]; /**< drawables */
struct drm_sarea_frame frame; /**< frame */
drm_context_t dummy_context;
} drm_sarea_t;
};
#ifndef __KERNEL__
typedef struct drm_sarea_drawable drm_sarea_drawable_t;
typedef struct drm_sarea_frame drm_sarea_frame_t;
typedef struct drm_sarea drm_sarea_t;
#endif
#endif /* _DRM_SAREA_H_ */

View File

@ -1,5 +1,3 @@
/* drm_scatter.h -- IOCTLs to manage scatter/gather memory -*- linux-c -*-
* Created: Mon Dec 18 23:20:54 2000 by gareth@valinux.com */
/*-
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
@ -32,6 +30,13 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/** @file drm_scatter.c
* Allocation of memory for scatter-gather mappings by the graphics chip.
*
* The memory allocated here is then made into an aperture in the card
* by drm_ati_pcigart_init().
*/
#include "dev/drm/drmP.h"
#define DEBUG_SCATTER 0
@ -43,28 +48,21 @@ void drm_sg_cleanup(drm_sg_mem_t *entry)
free(entry, M_DRM);
}
int drm_sg_alloc(DRM_IOCTL_ARGS)
int drm_sg_alloc(struct drm_device * dev, drm_scatter_gather_t * request)
{
DRM_DEVICE;
drm_scatter_gather_t request;
drm_sg_mem_t *entry;
unsigned long pages;
int i;
DRM_DEBUG( "%s\n", __FUNCTION__ );
if ( dev->sg )
return EINVAL;
DRM_COPY_FROM_USER_IOCTL(request, (drm_scatter_gather_t *)data,
sizeof(request) );
entry = malloc(sizeof(*entry), M_DRM, M_WAITOK | M_ZERO);
if ( !entry )
return ENOMEM;
pages = round_page(request.size) / PAGE_SIZE;
DRM_DEBUG( "sg size=%ld pages=%ld\n", request.size, pages );
pages = round_page(request->size) / PAGE_SIZE;
DRM_DEBUG( "sg size=%ld pages=%ld\n", request->size, pages );
entry->pages = pages;
@ -89,11 +87,7 @@ int drm_sg_alloc(DRM_IOCTL_ARGS)
DRM_DEBUG( "sg alloc handle = %08lx\n", entry->handle );
entry->virtual = (void *)entry->handle;
request.handle = entry->handle;
DRM_COPY_TO_USER_IOCTL( (drm_scatter_gather_t *)data,
request,
sizeof(request) );
request->handle = entry->handle;
DRM_LOCK();
if (dev->sg) {
@ -107,21 +101,29 @@ int drm_sg_alloc(DRM_IOCTL_ARGS)
return 0;
}
int drm_sg_free(DRM_IOCTL_ARGS)
int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
DRM_DEVICE;
drm_scatter_gather_t request;
drm_sg_mem_t *entry;
drm_scatter_gather_t *request = data;
int ret;
DRM_COPY_FROM_USER_IOCTL( request, (drm_scatter_gather_t *)data,
sizeof(request) );
DRM_DEBUG( "%s\n", __FUNCTION__ );
ret = drm_sg_alloc(dev, request);
return ret;
}
int drm_sg_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_scatter_gather_t *request = data;
drm_sg_mem_t *entry;
DRM_LOCK();
entry = dev->sg;
dev->sg = NULL;
DRM_UNLOCK();
if ( !entry || entry->handle != request.handle )
if ( !entry || entry->handle != request->handle )
return EINVAL;
DRM_DEBUG( "sg free virtual = 0x%lx\n", entry->handle );

View File

@ -24,6 +24,11 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/** @file drm_sysctl.c
* Implementation of various sysctls for controlling DRM behavior and reporting
* debug information.
*/
#include "dev/drm/drmP.h"
#include "dev/drm/drm.h"
@ -50,7 +55,7 @@ struct drm_sysctl_info {
char name[2];
};
int drm_sysctl_init(drm_device_t *dev)
int drm_sysctl_init(struct drm_device *dev)
{
struct drm_sysctl_info *info;
struct sysctl_oid *oid;
@ -104,7 +109,7 @@ int drm_sysctl_init(drm_device_t *dev)
return 0;
}
int drm_sysctl_cleanup(drm_device_t *dev)
int drm_sysctl_cleanup(struct drm_device *dev)
{
int error;
error = sysctl_ctx_free( &dev->sysctl->ctx );
@ -125,7 +130,7 @@ do { \
static int drm_name_info DRM_SYSCTL_HANDLER_ARGS
{
drm_device_t *dev = arg1;
struct drm_device *dev = arg1;
char buf[128];
int retcode;
int hasunique = 0;
@ -150,7 +155,7 @@ done:
static int drm_vm_info DRM_SYSCTL_HANDLER_ARGS
{
drm_device_t *dev = arg1;
struct drm_device *dev = arg1;
drm_local_map_t *map, *tempmaps;
const char *types[] = { "FB", "REG", "SHM", "AGP", "SG" };
const char *type, *yesno;
@ -209,7 +214,7 @@ done:
static int drm_bufs_info DRM_SYSCTL_HANDLER_ARGS
{
drm_device_t *dev = arg1;
struct drm_device *dev = arg1;
drm_device_dma_t *dma = dev->dma;
drm_device_dma_t tempdma;
int *templists;
@ -265,7 +270,7 @@ done:
static int drm_clients_info DRM_SYSCTL_HANDLER_ARGS
{
drm_device_t *dev = arg1;
struct drm_device *dev = arg1;
drm_file_t *priv, *tempprivs;
char buf[128];
int retcode;

View File

@ -24,6 +24,10 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/** @file drm_vm.c
* Support code for mmaping of DRM maps.
*/
#include "dev/drm/drmP.h"
#include "dev/drm/drm.h"
@ -36,7 +40,7 @@ int drm_mmap(dev_t kdev, vm_offset_t offset, int prot)
paddr_t drm_mmap(dev_t kdev, off_t offset, int prot)
#endif
{
DRM_DEVICE;
struct drm_device *dev = drm_get_device_from_kdev(kdev);
drm_local_map_t *map;
drm_file_t *priv;
drm_map_type_t type;
@ -55,7 +59,7 @@ paddr_t drm_mmap(dev_t kdev, off_t offset, int prot)
}
if (!priv->authenticated)
return DRM_ERR(EACCES);
return EACCES;
if (dev->dma && offset >= 0 && offset < ptoa(dev->dma->page_count)) {
drm_device_dma_t *dma = dev->dma;
@ -66,9 +70,9 @@ paddr_t drm_mmap(dev_t kdev, off_t offset, int prot)
unsigned long page = offset >> PAGE_SHIFT;
unsigned long phys = dma->pagelist[page];
DRM_SPINUNLOCK(&dev->dma_lock);
#if defined(__FreeBSD__) && __FreeBSD_version >= 500102
*paddr = phys;
DRM_SPINUNLOCK(&dev->dma_lock);
return 0;
#else
return atop(phys);
@ -77,7 +81,6 @@ paddr_t drm_mmap(dev_t kdev, off_t offset, int prot)
DRM_SPINUNLOCK(&dev->dma_lock);
return -1;
}
DRM_SPINUNLOCK(&dev->dma_lock);
}
/* A sequential search of a linked list is

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,7 @@
/*-
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
@ -9,11 +9,11 @@
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
@ -21,7 +21,7 @@
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*
*/
#include <sys/cdefs.h>
@ -46,7 +46,12 @@ typedef struct _drm_i915_init {
enum {
I915_INIT_DMA = 0x01,
I915_CLEANUP_DMA = 0x02,
I915_RESUME_DMA = 0x03
I915_RESUME_DMA = 0x03,
/* Since this struct isn't versioned, just used a new
* 'func' code to indicate the presence of dri2 sarea
* info. */
I915_INIT_DMA2 = 0x04
} func;
unsigned int mmio_offset;
int sarea_priv_offset;
@ -64,10 +69,11 @@ typedef struct _drm_i915_init {
unsigned int depth_pitch;
unsigned int cpp;
unsigned int chipset;
unsigned int sarea_handle;
} drm_i915_init_t;
typedef struct _drm_i915_sarea {
drm_tex_region_t texList[I915_NR_TEX_REGIONS + 1];
typedef struct drm_i915_sarea {
struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];
int last_upload; /* last time texture was uploaded */
int last_enqueue; /* last time a buffer was enqueued */
int last_dispatch; /* age of the most recently dispatched buffer */
@ -107,8 +113,42 @@ typedef struct _drm_i915_sarea {
unsigned int depth_tiled;
unsigned int rotated_tiled;
unsigned int rotated2_tiled;
int planeA_x;
int planeA_y;
int planeA_w;
int planeA_h;
int planeB_x;
int planeB_y;
int planeB_w;
int planeB_h;
/* Triple buffering */
drm_handle_t third_handle;
int third_offset;
int third_size;
unsigned int third_tiled;
/* buffer object handles for the static buffers. May change
* over the lifetime of the client, though it doesn't in our current
* implementation.
*/
unsigned int front_bo_handle;
unsigned int back_bo_handle;
unsigned int third_bo_handle;
unsigned int depth_bo_handle;
} drm_i915_sarea_t;
/* Driver specific fence types and classes.
*/
/* The only fence class we support */
#define DRM_I915_FENCE_CLASS_ACCEL 0
/* Fence type that guarantees read-write flush */
#define DRM_I915_FENCE_TYPE_RW 2
/* MI_FLUSH programmed just before the fence */
#define DRM_I915_FENCE_FLAG_FLUSHED 0x01000000
/* Flags for perf_boxes
*/
#define I915_BOX_RING_EMPTY 0x1
@ -135,10 +175,14 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_DESTROY_HEAP 0x0c
#define DRM_I915_SET_VBLANK_PIPE 0x0d
#define DRM_I915_GET_VBLANK_PIPE 0x0e
#define DRM_I915_VBLANK_SWAP 0x0f
#define DRM_I915_MMIO 0x10
#define DRM_I915_HWS_ADDR 0x11
#define DRM_I915_EXECBUFFER 0x12
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
#define DRM_IOCTL_I915_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP)
#define DRM_IOCTL_I915_FLIP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FLIP, drm_i915_flip_t)
#define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t)
#define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t)
#define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t)
@ -151,18 +195,32 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_DESTROY_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t)
#define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
#define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
#define DRM_IOCTL_I915_MMIO DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_MMIO, drm_i915_mmio)
#define DRM_IOCTL_I915_EXECBUFFER DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_EXECBUFFER, struct drm_i915_execbuffer)
/* Asynchronous page flipping:
*/
typedef struct drm_i915_flip {
/*
* This is really talking about planes, and we could rename it
* except for the fact that some of the duplicated i915_drm.h files
* out there check for HAVE_I915_FLIP and so might pick up this
* version.
*/
int pipes;
} drm_i915_flip_t;
/* Allow drivers to submit batchbuffers directly to hardware, relying
* on the security mechanisms provided by hardware.
*/
typedef struct _drm_i915_batchbuffer {
typedef struct drm_i915_batchbuffer {
int start; /* agp offset */
int used; /* nr bytes in use */
int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
int num_cliprects; /* mulitpass with multiple cliprects? */
drm_clip_rect_t __user *cliprects; /* pointer to userspace cliprects */
struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
} drm_i915_batchbuffer_t;
/* As above, but pass a pointer to userspace buffer which can be
@ -174,7 +232,7 @@ typedef struct _drm_i915_cmdbuffer {
int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
int num_cliprects; /* mulitpass with multiple cliprects? */
drm_clip_rect_t __user *cliprects; /* pointer to userspace cliprects */
struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
} drm_i915_cmdbuffer_t;
/* Userspace can request & wait on irq's:
@ -192,6 +250,7 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_IRQ_ACTIVE 1
#define I915_PARAM_ALLOW_BATCHBUFFER 2
#define I915_PARAM_LAST_DISPATCH 3
#define I915_PARAM_CHIPSET_ID 4
typedef struct drm_i915_getparam {
int param;
@ -235,7 +294,7 @@ typedef struct drm_i915_mem_init_heap {
* rotate):
*/
typedef struct drm_i915_mem_destroy_heap {
int region;
int region;
} drm_i915_mem_destroy_heap_t;
/* Allow X server to configure which pipes to monitor for vblank signals
@ -247,4 +306,96 @@ typedef struct drm_i915_vblank_pipe {
int pipe;
} drm_i915_vblank_pipe_t;
/* Schedule buffer swap at given vertical blank:
*/
typedef struct drm_i915_vblank_swap {
drm_drawable_t drawable;
enum drm_vblank_seq_type seqtype;
unsigned int sequence;
} drm_i915_vblank_swap_t;
#define I915_MMIO_READ 0
#define I915_MMIO_WRITE 1
#define I915_MMIO_MAY_READ 0x1
#define I915_MMIO_MAY_WRITE 0x2
#define MMIO_REGS_IA_PRIMATIVES_COUNT 0
#define MMIO_REGS_IA_VERTICES_COUNT 1
#define MMIO_REGS_VS_INVOCATION_COUNT 2
#define MMIO_REGS_GS_PRIMITIVES_COUNT 3
#define MMIO_REGS_GS_INVOCATION_COUNT 4
#define MMIO_REGS_CL_PRIMITIVES_COUNT 5
#define MMIO_REGS_CL_INVOCATION_COUNT 6
#define MMIO_REGS_PS_INVOCATION_COUNT 7
#define MMIO_REGS_PS_DEPTH_COUNT 8
typedef struct drm_i915_mmio_entry {
unsigned int flag;
unsigned int offset;
unsigned int size;
} drm_i915_mmio_entry_t;
typedef struct drm_i915_mmio {
unsigned int read_write:1;
unsigned int reg:31;
void __user *data;
} drm_i915_mmio_t;
typedef struct drm_i915_hws_addr {
uint64_t addr;
} drm_i915_hws_addr_t;
/*
* Relocation header is 4 uint32_ts
* 0 - 32 bit reloc count
* 1 - 32-bit relocation type
* 2-3 - 64-bit user buffer handle ptr for another list of relocs.
*/
#define I915_RELOC_HEADER 4
/*
* type 0 relocation has 4-uint32_t stride
* 0 - offset into buffer
* 1 - delta to add in
* 2 - buffer handle
* 3 - reserved (for optimisations later).
*/
/*
* type 1 relocation has 4-uint32_t stride.
* Hangs off the first item in the op list.
* Performed after all valiations are done.
* Try to group relocs into the same relocatee together for
* performance reasons.
* 0 - offset into buffer
* 1 - delta to add in
* 2 - buffer index in op list.
* 3 - relocatee index in op list.
*/
#define I915_RELOC_TYPE_0 0
#define I915_RELOC0_STRIDE 4
#define I915_RELOC_TYPE_1 1
#define I915_RELOC1_STRIDE 4
struct drm_i915_op_arg {
uint64_t next;
uint64_t reloc_ptr;
int handled;
unsigned int pad64;
union {
struct drm_bo_op_req req;
struct drm_bo_arg_rep rep;
} d;
};
struct drm_i915_execbuffer {
uint64_t ops_list;
uint32_t num_buffers;
struct drm_i915_batchbuffer batch;
drm_context_t context; /* for lockless use in the future */
struct drm_fence_arg fence_arg;
};
#endif /* _I915_DRM_H_ */

View File

@ -1,4 +1,4 @@
/* i915_drv.c -- ATI Radeon driver -*- linux-c -*-
/* i915_drv.c -- Intel i915 driver -*- linux-c -*-
* Created: Wed Feb 14 17:10:04 2001 by gareth@valinux.com
*/
/*-
@ -43,14 +43,44 @@ static drm_pci_id_list_t i915_pciidlist[] = {
i915_PCI_IDS
};
static void i915_configure(drm_device_t *dev)
static int i915_suspend(device_t nbdev)
{
dev->driver.buf_priv_size = 1; /* No dev_priv */
struct drm_device *dev = device_get_softc(nbdev);
struct drm_i915_private *dev_priv = dev->dev_private;
if (!dev || !dev_priv) {
DRM_ERROR("dev: 0x%lx, dev_priv: 0x%lx\n",
(unsigned long) dev, (unsigned long) dev_priv);
DRM_ERROR("DRM not initialized, aborting suspend.\n");
return -ENODEV;
}
i915_save_state(dev);
return (bus_generic_suspend(nbdev));
}
static int i915_resume(device_t nbdev)
{
struct drm_device *dev = device_get_softc(nbdev);
i915_restore_state(dev);
return (bus_generic_resume(nbdev));
}
static void i915_configure(struct drm_device *dev)
{
dev->driver.buf_priv_size = sizeof(drm_i915_private_t);
dev->driver.load = i915_driver_load;
dev->driver.unload = i915_driver_unload;
dev->driver.firstopen = i915_driver_firstopen;
dev->driver.preclose = i915_driver_preclose;
dev->driver.lastclose = i915_driver_lastclose;
dev->driver.device_is_agp = i915_driver_device_is_agp,
dev->driver.vblank_wait = i915_driver_vblank_wait;
dev->driver.device_is_agp = i915_driver_device_is_agp;
dev->driver.get_vblank_counter = i915_get_vblank_counter;
dev->driver.enable_vblank = i915_enable_vblank;
dev->driver.disable_vblank = i915_disable_vblank;
dev->driver.irq_preinstall = i915_driver_irq_preinstall;
dev->driver.irq_postinstall = i915_driver_irq_postinstall;
dev->driver.irq_uninstall = i915_driver_irq_uninstall;
@ -71,6 +101,7 @@ static void i915_configure(drm_device_t *dev)
dev->driver.use_mtrr = 1;
dev->driver.use_irq = 1;
dev->driver.use_vbl_irq = 1;
dev->driver.use_vbl_irq2 = 1;
}
#ifdef __FreeBSD__
@ -83,9 +114,9 @@ i915_probe(device_t dev)
static int
i915_attach(device_t nbdev)
{
drm_device_t *dev = device_get_softc(nbdev);
struct drm_device *dev = device_get_softc(nbdev);
bzero(dev, sizeof(drm_device_t));
bzero(dev, sizeof(struct drm_device));
i915_configure(dev);
return drm_attach(nbdev, i915_pciidlist);
}
@ -94,6 +125,8 @@ static device_method_t i915_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, i915_probe),
DEVMETHOD(device_attach, i915_attach),
DEVMETHOD(device_suspend, i915_suspend),
DEVMETHOD(device_resume, i915_resume),
DEVMETHOD(device_detach, drm_detach),
{ 0, 0 }
@ -106,7 +139,7 @@ static driver_t i915_driver = {
"drmsub",
#endif
i915_methods,
sizeof(drm_device_t)
sizeof(struct drm_device)
};
extern devclass_t drm_devclass;

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -46,11 +46,11 @@ __FBSDID("$FreeBSD$");
* block to allocate, and the ring is drained prior to allocations --
* in other words allocation is expensive.
*/
static void mark_block(drm_device_t * dev, struct mem_block *p, int in_use)
static void mark_block(struct drm_device * dev, struct mem_block *p, int in_use)
{
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_tex_region_t *list;
struct drm_tex_region *list;
unsigned shift, nr;
unsigned start;
unsigned end;
@ -92,7 +92,7 @@ static void mark_block(drm_device_t * dev, struct mem_block *p, int in_use)
*/
static struct mem_block *split_block(struct mem_block *p, int start, int size,
DRMFILE filp)
struct drm_file *file_priv)
{
/* Maybe cut off the start of an existing block */
if (start > p->start) {
@ -102,7 +102,7 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size,
goto out;
newblock->start = start;
newblock->size = p->size - (start - p->start);
newblock->filp = NULL;
newblock->file_priv = NULL;
newblock->next = p->next;
newblock->prev = p;
p->next->prev = newblock;
@ -119,7 +119,7 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size,
goto out;
newblock->start = start + size;
newblock->size = p->size - size;
newblock->filp = NULL;
newblock->file_priv = NULL;
newblock->next = p->next;
newblock->prev = p;
p->next->prev = newblock;
@ -129,20 +129,20 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size,
out:
/* Our block is in the middle */
p->filp = filp;
p->file_priv = file_priv;
return p;
}
static struct mem_block *alloc_block(struct mem_block *heap, int size,
int align2, DRMFILE filp)
int align2, struct drm_file *file_priv)
{
struct mem_block *p;
int mask = (1 << align2) - 1;
for (p = heap->next; p != heap; p = p->next) {
int start = (p->start + mask) & ~mask;
if (p->filp == NULL && start + size <= p->start + p->size)
return split_block(p, start, size, filp);
if (p->file_priv == NULL && start + size <= p->start + p->size)
return split_block(p, start, size, file_priv);
}
return NULL;
@ -161,12 +161,12 @@ static struct mem_block *find_block(struct mem_block *heap, int start)
static void free_block(struct mem_block *p)
{
p->filp = NULL;
p->file_priv = NULL;
/* Assumes a single contiguous range. Needs a special filp in
/* Assumes a single contiguous range. Needs a special file_priv in
* 'heap' to stop it being subsumed.
*/
if (p->next->filp == NULL) {
if (p->next->file_priv == NULL) {
struct mem_block *q = p->next;
p->size += q->size;
p->next = q->next;
@ -174,7 +174,7 @@ static void free_block(struct mem_block *p)
drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
}
if (p->prev->filp == NULL) {
if (p->prev->file_priv == NULL) {
struct mem_block *q = p->prev;
q->size += p->size;
q->next = p->next;
@ -200,18 +200,19 @@ static int init_heap(struct mem_block **heap, int start, int size)
blocks->start = start;
blocks->size = size;
blocks->filp = NULL;
blocks->file_priv = NULL;
blocks->next = blocks->prev = *heap;
memset(*heap, 0, sizeof(**heap));
(*heap)->filp = (DRMFILE) - 1;
(*heap)->file_priv = (struct drm_file *) - 1;
(*heap)->next = (*heap)->prev = blocks;
return 0;
}
/* Free all blocks associated with the releasing file.
*/
void i915_mem_release(drm_device_t * dev, DRMFILE filp, struct mem_block *heap)
void i915_mem_release(struct drm_device * dev, struct drm_file *file_priv,
struct mem_block *heap)
{
struct mem_block *p;
@ -219,17 +220,17 @@ void i915_mem_release(drm_device_t * dev, DRMFILE filp, struct mem_block *heap)
return;
for (p = heap->next; p != heap; p = p->next) {
if (p->filp == filp) {
p->filp = NULL;
if (p->file_priv == file_priv) {
p->file_priv = NULL;
mark_block(dev, p, 0);
}
}
/* Assumes a single contiguous range. Needs a special filp in
/* Assumes a single contiguous range. Needs a special file_priv in
* 'heap' to stop it being subsumed.
*/
for (p = heap->next; p != heap; p = p->next) {
while (p->filp == NULL && p->next->filp == NULL) {
while (p->file_priv == NULL && p->next->file_priv == NULL) {
struct mem_block *q = p->next;
p->size += q->size;
p->next = q->next;
@ -270,129 +271,117 @@ static struct mem_block **get_heap(drm_i915_private_t * dev_priv, int region)
/* IOCTL HANDLERS */
int i915_mem_alloc(DRM_IOCTL_ARGS)
int i915_mem_alloc(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
DRM_DEVICE;
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_mem_alloc_t alloc;
drm_i915_mem_alloc_t *alloc = data;
struct mem_block *block, **heap;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
return DRM_ERR(EINVAL);
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
DRM_COPY_FROM_USER_IOCTL(alloc, (drm_i915_mem_alloc_t __user *) data,
sizeof(alloc));
heap = get_heap(dev_priv, alloc.region);
heap = get_heap(dev_priv, alloc->region);
if (!heap || !*heap)
return DRM_ERR(EFAULT);
return -EFAULT;
/* Make things easier on ourselves: all allocations at least
* 4k aligned.
*/
if (alloc.alignment < 12)
alloc.alignment = 12;
if (alloc->alignment < 12)
alloc->alignment = 12;
block = alloc_block(*heap, alloc.size, alloc.alignment, filp);
block = alloc_block(*heap, alloc->size, alloc->alignment, file_priv);
if (!block)
return DRM_ERR(ENOMEM);
return -ENOMEM;
mark_block(dev, block, 1);
if (DRM_COPY_TO_USER(alloc.region_offset, &block->start, sizeof(int))) {
if (DRM_COPY_TO_USER(alloc->region_offset, &block->start,
sizeof(int))) {
DRM_ERROR("copy_to_user\n");
return DRM_ERR(EFAULT);
return -EFAULT;
}
return 0;
}
int i915_mem_free(DRM_IOCTL_ARGS)
int i915_mem_free(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
DRM_DEVICE;
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_mem_free_t memfree;
drm_i915_mem_free_t *memfree = data;
struct mem_block *block, **heap;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
return DRM_ERR(EINVAL);
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
DRM_COPY_FROM_USER_IOCTL(memfree, (drm_i915_mem_free_t __user *) data,
sizeof(memfree));
heap = get_heap(dev_priv, memfree.region);
heap = get_heap(dev_priv, memfree->region);
if (!heap || !*heap)
return DRM_ERR(EFAULT);
return -EFAULT;
block = find_block(*heap, memfree.region_offset);
block = find_block(*heap, memfree->region_offset);
if (!block)
return DRM_ERR(EFAULT);
return -EFAULT;
if (block->filp != filp)
return DRM_ERR(EPERM);
if (block->file_priv != file_priv)
return -EPERM;
mark_block(dev, block, 0);
free_block(block);
return 0;
}
int i915_mem_init_heap(DRM_IOCTL_ARGS)
int i915_mem_init_heap(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
DRM_DEVICE;
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_mem_init_heap_t initheap;
drm_i915_mem_init_heap_t *initheap = data;
struct mem_block **heap;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
return DRM_ERR(EINVAL);
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
DRM_COPY_FROM_USER_IOCTL(initheap,
(drm_i915_mem_init_heap_t __user *) data,
sizeof(initheap));
heap = get_heap(dev_priv, initheap.region);
heap = get_heap(dev_priv, initheap->region);
if (!heap)
return DRM_ERR(EFAULT);
return -EFAULT;
if (*heap) {
DRM_ERROR("heap already initialized?");
return DRM_ERR(EFAULT);
return -EFAULT;
}
return init_heap(heap, initheap.start, initheap.size);
return init_heap(heap, initheap->start, initheap->size);
}
int i915_mem_destroy_heap( DRM_IOCTL_ARGS )
int i915_mem_destroy_heap( struct drm_device *dev, void *data,
struct drm_file *file_priv )
{
DRM_DEVICE;
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_mem_destroy_heap_t destroyheap;
drm_i915_mem_destroy_heap_t *destroyheap = data;
struct mem_block **heap;
if ( !dev_priv ) {
DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
return DRM_ERR(EINVAL);
DRM_ERROR( "called with no initialization\n" );
return -EINVAL;
}
DRM_COPY_FROM_USER_IOCTL( destroyheap, (drm_i915_mem_destroy_heap_t *)data,
sizeof(destroyheap) );
heap = get_heap( dev_priv, destroyheap.region );
heap = get_heap( dev_priv, destroyheap->region );
if (!heap) {
DRM_ERROR("get_heap failed");
return DRM_ERR(EFAULT);
return -EFAULT;
}
if (!*heap) {
DRM_ERROR("heap not initialized?");
return DRM_ERR(EFAULT);
return -EFAULT;
}
i915_mem_takedown( heap );

521
sys/dev/drm/i915_suspend.c Normal file
View File

@ -0,0 +1,521 @@
/*-
*
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "dev/drm/drmP.h"
#include "dev/drm/drm.h"
#include "dev/drm/i915_drm.h"
#include "dev/drm/i915_drv.h"
static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (pipe == PIPE_A)
return (I915_READ(DPLL_A) & DPLL_VCO_ENABLE);
else
return (I915_READ(DPLL_B) & DPLL_VCO_ENABLE);
}
static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
u32 *array;
int i;
if (!i915_pipe_enabled(dev, pipe))
return;
if (pipe == PIPE_A)
array = dev_priv->save_palette_a;
else
array = dev_priv->save_palette_b;
for(i = 0; i < 256; i++)
array[i] = I915_READ(reg + (i << 2));
}
static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
u32 *array;
int i;
if (!i915_pipe_enabled(dev, pipe))
return;
if (pipe == PIPE_A)
array = dev_priv->save_palette_a;
else
array = dev_priv->save_palette_b;
for(i = 0; i < 256; i++)
I915_WRITE(reg + (i << 2), array[i]);
}
static u8 i915_read_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE8(index_port, reg);
return I915_READ8(data_port);
}
static u8 i915_read_ar(struct drm_device *dev, u16 st01, u8 reg, u16 palette_enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_READ8(st01);
I915_WRITE8(VGA_AR_INDEX, palette_enable | reg);
return I915_READ8(VGA_AR_DATA_READ);
}
static void i915_write_ar(struct drm_device *dev, u16 st01, u8 reg, u8 val, u16 palette_enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_READ8(st01);
I915_WRITE8(VGA_AR_INDEX, palette_enable | reg);
I915_WRITE8(VGA_AR_DATA_WRITE, val);
}
static void i915_write_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg, u8 val)
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE8(index_port, reg);
I915_WRITE8(data_port, val);
}
static void i915_save_vga(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
u16 cr_index, cr_data, st01;
/* VGA color palette registers */
dev_priv->saveDACMASK = I915_READ8(VGA_DACMASK);
/* DACCRX automatically increments during read */
I915_WRITE8(VGA_DACRX, 0);
/* Read 3 bytes of color data from each index */
for (i = 0; i < 256 * 3; i++)
dev_priv->saveDACDATA[i] = I915_READ8(VGA_DACDATA);
/* MSR bits */
dev_priv->saveMSR = I915_READ8(VGA_MSR_READ);
if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
cr_index = VGA_CR_INDEX_CGA;
cr_data = VGA_CR_DATA_CGA;
st01 = VGA_ST01_CGA;
} else {
cr_index = VGA_CR_INDEX_MDA;
cr_data = VGA_CR_DATA_MDA;
st01 = VGA_ST01_MDA;
}
/* CRT controller regs */
i915_write_indexed(dev, cr_index, cr_data, 0x11,
i915_read_indexed(dev, cr_index, cr_data, 0x11) &
(~0x80));
for (i = 0; i <= 0x24; i++)
dev_priv->saveCR[i] =
i915_read_indexed(dev, cr_index, cr_data, i);
/* Make sure we don't turn off CR group 0 writes */
dev_priv->saveCR[0x11] &= ~0x80;
/* Attribute controller registers */
I915_READ8(st01);
dev_priv->saveAR_INDEX = I915_READ8(VGA_AR_INDEX);
for (i = 0; i <= 0x14; i++)
dev_priv->saveAR[i] = i915_read_ar(dev, st01, i, 0);
I915_READ8(st01);
I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX);
I915_READ8(st01);
/* Graphics controller registers */
for (i = 0; i < 9; i++)
dev_priv->saveGR[i] =
i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i);
dev_priv->saveGR[0x10] =
i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10);
dev_priv->saveGR[0x11] =
i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11);
dev_priv->saveGR[0x18] =
i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18);
/* Sequencer registers */
for (i = 0; i < 8; i++)
dev_priv->saveSR[i] =
i915_read_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i);
}
static void i915_restore_vga(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
u16 cr_index, cr_data, st01;
/* MSR bits */
I915_WRITE8(VGA_MSR_WRITE, dev_priv->saveMSR);
if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
cr_index = VGA_CR_INDEX_CGA;
cr_data = VGA_CR_DATA_CGA;
st01 = VGA_ST01_CGA;
} else {
cr_index = VGA_CR_INDEX_MDA;
cr_data = VGA_CR_DATA_MDA;
st01 = VGA_ST01_MDA;
}
/* Sequencer registers, don't write SR07 */
for (i = 0; i < 7; i++)
i915_write_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i,
dev_priv->saveSR[i]);
/* CRT controller regs */
/* Enable CR group 0 writes */
i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]);
for (i = 0; i <= 0x24; i++)
i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->saveCR[i]);
/* Graphics controller regs */
for (i = 0; i < 9; i++)
i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i,
dev_priv->saveGR[i]);
i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10,
dev_priv->saveGR[0x10]);
i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11,
dev_priv->saveGR[0x11]);
i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18,
dev_priv->saveGR[0x18]);
/* Attribute controller registers */
I915_READ8(st01); /* switch back to index mode */
for (i = 0; i <= 0x14; i++)
i915_write_ar(dev, st01, i, dev_priv->saveAR[i], 0);
I915_READ8(st01); /* switch back to index mode */
I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX | 0x20);
I915_READ8(st01);
/* VGA color palette registers */
I915_WRITE8(VGA_DACMASK, dev_priv->saveDACMASK);
/* DACCRX automatically increments during read */
I915_WRITE8(VGA_DACWX, 0);
/* Read 3 bytes of color data from each index */
for (i = 0; i < 256 * 3; i++)
I915_WRITE8(VGA_DACDATA, dev_priv->saveDACDATA[i]);
}
int i915_save_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
#if defined(__FreeBSD__)
dev_priv->saveLBB = (u8) pci_read_config(dev->device, LBB, 1);
#else
pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
#endif
/* Display arbitration control */
dev_priv->saveDSPARB = I915_READ(DSPARB);
/* Pipe & plane A info */
dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
dev_priv->saveFPA0 = I915_READ(FPA0);
dev_priv->saveFPA1 = I915_READ(FPA1);
dev_priv->saveDPLL_A = I915_READ(DPLL_A);
if (IS_I965G(dev))
dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
dev_priv->saveHSYNC_A = I915_READ(HSYNC_A);
dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
dev_priv->saveDSPACNTR = I915_READ(DSPACNTR);
dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE);
dev_priv->saveDSPASIZE = I915_READ(DSPASIZE);
dev_priv->saveDSPAPOS = I915_READ(DSPAPOS);
dev_priv->saveDSPAADDR = I915_READ(DSPAADDR);
if (IS_I965G(dev)) {
dev_priv->saveDSPASURF = I915_READ(DSPASURF);
dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF);
}
i915_save_palette(dev, PIPE_A);
dev_priv->savePIPEASTAT = I915_READ(PIPEASTAT);
/* Pipe & plane B info */
dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
dev_priv->saveFPB0 = I915_READ(FPB0);
dev_priv->saveFPB1 = I915_READ(FPB1);
dev_priv->saveDPLL_B = I915_READ(DPLL_B);
if (IS_I965G(dev))
dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
dev_priv->saveHSYNC_B = I915_READ(HSYNC_B);
dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR);
dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE);
dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);
dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);
dev_priv->saveDSPBADDR = I915_READ(DSPBADDR);
if (IS_I965GM(dev) || IS_GM45(dev)) {
dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);
dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
}
i915_save_palette(dev, PIPE_B);
dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT);
/* CRT state */
dev_priv->saveADPA = I915_READ(ADPA);
/* LVDS state */
dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
if (IS_I965G(dev))
dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
if (IS_MOBILE(dev) && !IS_I830(dev))
dev_priv->saveLVDS = I915_READ(LVDS);
if (!IS_I830(dev) && !IS_845G(dev))
dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR);
/* FIXME: save TV & SDVO state */
/* FBC state */
dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
/* Interrupt state */
dev_priv->saveIIR = I915_READ(IIR);
dev_priv->saveIER = I915_READ(IER);
dev_priv->saveIMR = I915_READ(IMR);
/* VGA state */
dev_priv->saveVGA0 = I915_READ(VGA0);
dev_priv->saveVGA1 = I915_READ(VGA1);
dev_priv->saveVGA_PD = I915_READ(VGA_PD);
dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
/* Clock gating state */
dev_priv->saveD_STATE = I915_READ(D_STATE);
dev_priv->saveCG_2D_DIS = I915_READ(CG_2D_DIS);
/* Cache mode state */
dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
/* Memory Arbitration state */
dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
/* Scratch space */
for (i = 0; i < 16; i++) {
dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2));
dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
}
for (i = 0; i < 3; i++)
dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
i915_save_vga(dev);
return 0;
}
int i915_restore_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
#if defined(__FreeBSD__)
pci_write_config(dev->device, LBB, dev_priv->saveLBB, 1);
#else
pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
#endif
I915_WRITE(DSPARB, dev_priv->saveDSPARB);
/* Pipe & plane A info */
/* Prime the clock */
if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
I915_WRITE(DPLL_A, dev_priv->saveDPLL_A &
~DPLL_VCO_ENABLE);
DRM_UDELAY(150);
}
I915_WRITE(FPA0, dev_priv->saveFPA0);
I915_WRITE(FPA1, dev_priv->saveFPA1);
/* Actually enable it */
I915_WRITE(DPLL_A, dev_priv->saveDPLL_A);
DRM_UDELAY(150);
if (IS_I965G(dev))
I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
DRM_UDELAY(150);
/* Restore mode */
I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A);
I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A);
I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A);
I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
/* Restore plane info */
I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE);
I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS);
I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR);
I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
if (IS_I965G(dev)) {
I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
}
I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF);
i915_restore_palette(dev, PIPE_A);
/* Enable the plane */
I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR);
I915_WRITE(DSPAADDR, I915_READ(DSPAADDR));
/* Pipe & plane B info */
if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
I915_WRITE(DPLL_B, dev_priv->saveDPLL_B &
~DPLL_VCO_ENABLE);
DRM_UDELAY(150);
}
I915_WRITE(FPB0, dev_priv->saveFPB0);
I915_WRITE(FPB1, dev_priv->saveFPB1);
/* Actually enable it */
I915_WRITE(DPLL_B, dev_priv->saveDPLL_B);
DRM_UDELAY(150);
if (IS_I965G(dev))
I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
DRM_UDELAY(150);
/* Restore mode */
I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B);
I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B);
I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B);
I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
/* Restore plane info */
I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE);
I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS);
I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR);
I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
if (IS_I965G(dev)) {
I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
}
I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF);
i915_restore_palette(dev, PIPE_B);
/* Enable the plane */
I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
I915_WRITE(DSPBADDR, I915_READ(DSPBADDR));
/* CRT state */
I915_WRITE(ADPA, dev_priv->saveADPA);
/* LVDS state */
if (IS_I965G(dev))
I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
if (IS_MOBILE(dev) && !IS_I830(dev))
I915_WRITE(LVDS, dev_priv->saveLVDS);
if (!IS_I830(dev) && !IS_845G(dev))
I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR);
I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
/* FIXME: restore TV & SDVO state */
/* FBC info */
I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
/* VGA state */
I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
I915_WRITE(VGA0, dev_priv->saveVGA0);
I915_WRITE(VGA1, dev_priv->saveVGA1);
I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
DRM_UDELAY(150);
/* Clock gating state */
I915_WRITE (D_STATE, dev_priv->saveD_STATE);
I915_WRITE (CG_2D_DIS, dev_priv->saveCG_2D_DIS);
/* Cache mode state */
I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
/* Memory arbitration state */
I915_WRITE (MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);
for (i = 0; i < 16; i++) {
I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]);
I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i+7]);
}
for (i = 0; i < 3; i++)
I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
i915_restore_vga(dev);
return 0;
}

File diff suppressed because it is too large Load Diff

View File

@ -133,7 +133,7 @@ typedef struct drm_mach64_sarea {
/* The current cliprects, or a subset thereof.
*/
drm_clip_rect_t boxes[MACH64_NR_SAREA_CLIPRECTS];
struct drm_clip_rect boxes[MACH64_NR_SAREA_CLIPRECTS];
unsigned int nbox;
/* Counters for client-side throttling of rendering clients.
@ -142,7 +142,7 @@ typedef struct drm_mach64_sarea {
/* Texture memory LRU.
*/
drm_tex_region_t tex_list[MACH64_NR_TEX_HEAPS][MACH64_NR_TEX_REGIONS +
struct drm_tex_region tex_list[MACH64_NR_TEX_HEAPS][MACH64_NR_TEX_REGIONS +
1];
unsigned int tex_age[MACH64_NR_TEX_HEAPS];
int ctx_owner;
@ -240,7 +240,7 @@ typedef struct drm_mach64_vertex {
} drm_mach64_vertex_t;
typedef struct drm_mach64_blit {
int idx;
void *buf;
int pitch;
int offset;
int format;

View File

@ -47,11 +47,13 @@ static drm_pci_id_list_t mach64_pciidlist[] = {
mach64_PCI_IDS
};
static void mach64_configure(drm_device_t *dev)
static void mach64_configure(struct drm_device *dev)
{
dev->driver.buf_priv_size = 1; /* No dev_priv */
dev->driver.lastclose = mach64_driver_lastclose;
dev->driver.vblank_wait = mach64_driver_vblank_wait;
dev->driver.get_vblank_counter = mach64_get_vblank_counter;
dev->driver.enable_vblank = mach64_enable_vblank;
dev->driver.disable_vblank = mach64_disable_vblank;
dev->driver.irq_preinstall = mach64_driver_irq_preinstall;
dev->driver.irq_postinstall = mach64_driver_irq_postinstall;
dev->driver.irq_uninstall = mach64_driver_irq_uninstall;
@ -86,9 +88,9 @@ mach64_probe(device_t dev)
static int
mach64_attach(device_t nbdev)
{
drm_device_t *dev = device_get_softc(nbdev);
struct drm_device *dev = device_get_softc(nbdev);
bzero(dev, sizeof(drm_device_t));
bzero(dev, sizeof(struct drm_device));
mach64_configure(dev);
return drm_attach(nbdev, mach64_pciidlist);
}
@ -105,7 +107,7 @@ static device_method_t mach64_methods[] = {
static driver_t mach64_driver = {
"drm",
mach64_methods,
sizeof(drm_device_t)
sizeof(struct drm_device)
};
extern devclass_t drm_devclass;

View File

@ -29,7 +29,7 @@
* Gareth Hughes <gareth@valinux.com>
* Frank C. Earl <fearl@airmail.net>
* Leif Delgass <ldelgass@retinalburn.net>
* Jos<EFBFBD>Fonseca <j_r_fonseca@yahoo.co.uk>
* José Fonseca <j_r_fonseca@yahoo.co.uk>
*/
#include <sys/cdefs.h>
@ -45,9 +45,9 @@ __FBSDID("$FreeBSD$");
#define DRIVER_NAME "mach64"
#define DRIVER_DESC "DRM module for the ATI Rage Pro"
#define DRIVER_DATE "20020904"
#define DRIVER_DATE "20060718"
#define DRIVER_MAJOR 1
#define DRIVER_MAJOR 2
#define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 0
@ -58,13 +58,12 @@ __FBSDID("$FreeBSD$");
typedef struct drm_mach64_freelist {
struct list_head list; /* List pointers for free_list, placeholders, or pending list */
drm_buf_t *buf; /* Pointer to the buffer */
struct drm_buf *buf; /* Pointer to the buffer */
int discard; /* This flag is set when we're done (re)using a buffer */
u32 ring_ofs; /* dword offset in ring of last descriptor for this buffer */
} drm_mach64_freelist_t;
typedef struct drm_mach64_descriptor_ring {
drm_dma_handle_t *dmah; /* Handle to pci dma memory */
void *start; /* write pointer (cpu address) to start of descriptor ring */
u32 start_addr; /* bus address of beginning of descriptor ring */
int size; /* size of ring in bytes */
@ -100,6 +99,8 @@ typedef struct drm_mach64_private {
unsigned int depth_bpp;
unsigned int depth_offset, depth_pitch;
atomic_t vbl_received; /**< Number of vblanks received. */
u32 front_offset_pitch;
u32 back_offset_pitch;
u32 depth_offset_pitch;
@ -112,20 +113,27 @@ typedef struct drm_mach64_private {
drm_local_map_t *agp_textures;
} drm_mach64_private_t;
extern drm_ioctl_desc_t mach64_ioctls[];
extern struct drm_ioctl_desc mach64_ioctls[];
extern int mach64_max_ioctl;
/* mach64_dma.c */
extern int mach64_dma_init(DRM_IOCTL_ARGS);
extern int mach64_dma_idle(DRM_IOCTL_ARGS);
extern int mach64_dma_flush(DRM_IOCTL_ARGS);
extern int mach64_engine_reset(DRM_IOCTL_ARGS);
extern int mach64_dma_buffers(DRM_IOCTL_ARGS);
extern void mach64_driver_lastclose(drm_device_t * dev);
extern int mach64_dma_init(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int mach64_dma_idle(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int mach64_dma_flush(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int mach64_engine_reset(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int mach64_dma_buffers(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern void mach64_driver_lastclose(struct drm_device * dev);
extern int mach64_init_freelist(drm_device_t * dev);
extern void mach64_destroy_freelist(drm_device_t * dev);
extern drm_buf_t *mach64_freelist_get(drm_mach64_private_t * dev_priv);
extern int mach64_init_freelist(struct drm_device * dev);
extern void mach64_destroy_freelist(struct drm_device * dev);
extern struct drm_buf *mach64_freelist_get(drm_mach64_private_t * dev_priv);
extern int mach64_freelist_put(drm_mach64_private_t * dev_priv,
struct drm_buf * copy_buf);
extern int mach64_do_wait_for_fifo(drm_mach64_private_t * dev_priv,
int entries);
@ -137,23 +145,34 @@ extern void mach64_dump_engine_info(drm_mach64_private_t * dev_priv);
extern void mach64_dump_ring_info(drm_mach64_private_t * dev_priv);
extern int mach64_do_engine_reset(drm_mach64_private_t * dev_priv);
extern int mach64_add_buf_to_ring(drm_mach64_private_t *dev_priv,
drm_mach64_freelist_t *_entry);
extern int mach64_add_hostdata_buf_to_ring(drm_mach64_private_t *dev_priv,
drm_mach64_freelist_t *_entry);
extern int mach64_do_dma_idle(drm_mach64_private_t * dev_priv);
extern int mach64_do_dma_flush(drm_mach64_private_t * dev_priv);
extern int mach64_do_cleanup_dma(drm_device_t * dev);
extern int mach64_do_cleanup_dma(struct drm_device * dev);
/* mach64_state.c */
extern int mach64_dma_clear(DRM_IOCTL_ARGS);
extern int mach64_dma_swap(DRM_IOCTL_ARGS);
extern int mach64_dma_vertex(DRM_IOCTL_ARGS);
extern int mach64_dma_blit(DRM_IOCTL_ARGS);
extern int mach64_get_param(DRM_IOCTL_ARGS);
extern int mach64_driver_vblank_wait(drm_device_t * dev,
unsigned int *sequence);
extern int mach64_dma_clear(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int mach64_dma_swap(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int mach64_dma_vertex(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int mach64_dma_blit(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int mach64_get_param(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern u32 mach64_get_vblank_counter(struct drm_device *dev, int crtc);
extern int mach64_enable_vblank(struct drm_device *dev, int crtc);
extern void mach64_disable_vblank(struct drm_device *dev, int crtc);
extern irqreturn_t mach64_driver_irq_handler(DRM_IRQ_ARGS);
extern void mach64_driver_irq_preinstall(drm_device_t * dev);
extern void mach64_driver_irq_postinstall(drm_device_t * dev);
extern void mach64_driver_irq_uninstall(drm_device_t * dev);
extern void mach64_driver_irq_preinstall(struct drm_device *dev);
extern int mach64_driver_irq_postinstall(struct drm_device *dev);
extern void mach64_driver_irq_uninstall(struct drm_device *dev);
/* ================================================================
* Registers
@ -163,14 +182,14 @@ extern void mach64_driver_irq_uninstall(drm_device_t * dev);
#define MACH64_AGP_CNTL 0x014c
#define MACH64_ALPHA_TST_CNTL 0x0550
#define MACH64_DSP_CONFIG 0x0420
#define MACH64_DSP_ON_OFF 0x0424
#define MACH64_EXT_MEM_CNTL 0x04ac
#define MACH64_GEN_TEST_CNTL 0x04d0
#define MACH64_HW_DEBUG 0x047c
#define MACH64_MEM_ADDR_CONFIG 0x0434
#define MACH64_MEM_BUF_CNTL 0x042c
#define MACH64_MEM_CNTL 0x04b0
#define MACH64_DSP_CONFIG 0x0420
#define MACH64_DSP_ON_OFF 0x0424
#define MACH64_EXT_MEM_CNTL 0x04ac
#define MACH64_GEN_TEST_CNTL 0x04d0
#define MACH64_HW_DEBUG 0x047c
#define MACH64_MEM_ADDR_CONFIG 0x0434
#define MACH64_MEM_BUF_CNTL 0x042c
#define MACH64_MEM_CNTL 0x04b0
#define MACH64_BM_ADDR 0x0648
#define MACH64_BM_COMMAND 0x0188
@ -197,16 +216,16 @@ extern void mach64_driver_irq_uninstall(drm_device_t * dev);
#define MACH64_CLR_CMP_CLR 0x0700
#define MACH64_CLR_CMP_CNTL 0x0708
#define MACH64_CLR_CMP_MASK 0x0704
#define MACH64_CONFIG_CHIP_ID 0x04e0
#define MACH64_CONFIG_CNTL 0x04dc
#define MACH64_CONFIG_STAT0 0x04e4
#define MACH64_CONFIG_STAT1 0x0494
#define MACH64_CONFIG_STAT2 0x0498
#define MACH64_CONFIG_CHIP_ID 0x04e0
#define MACH64_CONFIG_CNTL 0x04dc
#define MACH64_CONFIG_STAT0 0x04e4
#define MACH64_CONFIG_STAT1 0x0494
#define MACH64_CONFIG_STAT2 0x0498
#define MACH64_CONTEXT_LOAD_CNTL 0x072c
#define MACH64_CONTEXT_MASK 0x0720
#define MACH64_COMPOSITE_SHADOW_ID 0x0798
#define MACH64_CRC_SIG 0x04e8
#define MACH64_CUSTOM_MACRO_CNTL 0x04d4
#define MACH64_CRC_SIG 0x04e8
#define MACH64_CUSTOM_MACRO_CNTL 0x04d4
#define MACH64_DP_BKGD_CLR 0x06c0
#define MACH64_DP_FOG_CLR 0x06c4
@ -350,7 +369,7 @@ extern void mach64_driver_irq_uninstall(drm_device_t * dev);
#define MACH64_TEX_0_OFF 0x05c0
#define MACH64_TEX_CNTL 0x0774
#define MACH64_TEX_SIZE_PITCH 0x0770
#define MACH64_TIMER_CONFIG 0x0428
#define MACH64_TIMER_CONFIG 0x0428
#define MACH64_VERTEX_1_ARGB 0x0254
#define MACH64_VERTEX_1_S 0x0240
@ -513,95 +532,17 @@ extern void mach64_driver_irq_uninstall(drm_device_t * dev);
#define MACH64_APERTURE_OFFSET 0x7ff800 /* frame-buffer offset for gui-masters */
/* ================================================================
* Misc helper macros
* Ring operations
*
* Since the Mach64 bus master engine requires polling, these functions end
* up being called frequently, hence being inline.
*/
static __inline__ void mach64_set_dma_eol(volatile u32 * addr)
{
#if defined(__i386__)
int nr = 31;
/* Taken from include/asm-i386/bitops.h linux header */
__asm__ __volatile__("lock;" "btsl %1,%0":"=m"(*addr)
:"Ir"(nr));
#elif defined(__powerpc__)
u32 old;
u32 mask = cpu_to_le32(MACH64_DMA_EOL);
/* Taken from the include/asm-ppc/bitops.h linux header */
__asm__ __volatile__("\n\
1: lwarx %0,0,%3 \n\
or %0,%0,%2 \n\
stwcx. %0,0,%3 \n\
bne- 1b":"=&r"(old), "=m"(*addr)
:"r"(mask), "r"(addr), "m"(*addr)
:"cc");
#elif defined(__alpha__)
u32 temp;
u32 mask = MACH64_DMA_EOL;
/* Taken from the include/asm-alpha/bitops.h linux header */
__asm__ __volatile__("1: ldl_l %0,%3\n"
" bis %0,%2,%0\n"
" stl_c %0,%1\n"
" beq %0,2f\n"
".subsection 2\n"
"2: br 1b\n"
".previous":"=&r"(temp), "=m"(*addr)
:"Ir"(mask), "m"(*addr));
#else
u32 mask = cpu_to_le32(MACH64_DMA_EOL);
*addr |= mask;
#endif
}
static __inline__ void mach64_clear_dma_eol(volatile u32 * addr)
{
#if defined(__i386__)
int nr = 31;
/* Taken from include/asm-i386/bitops.h linux header */
__asm__ __volatile__("lock;" "btrl %1,%0":"=m"(*addr)
:"Ir"(nr));
#elif defined(__powerpc__)
u32 old;
u32 mask = cpu_to_le32(MACH64_DMA_EOL);
/* Taken from the include/asm-ppc/bitops.h linux header */
__asm__ __volatile__("\n\
1: lwarx %0,0,%3 \n\
andc %0,%0,%2 \n\
stwcx. %0,0,%3 \n\
bne- 1b":"=&r"(old), "=m"(*addr)
:"r"(mask), "r"(addr), "m"(*addr)
:"cc");
#elif defined(__alpha__)
u32 temp;
u32 mask = ~MACH64_DMA_EOL;
/* Taken from the include/asm-alpha/bitops.h linux header */
__asm__ __volatile__("1: ldl_l %0,%3\n"
" and %0,%2,%0\n"
" stl_c %0,%1\n"
" beq %0,2f\n"
".subsection 2\n"
"2: br 1b\n"
".previous":"=&r"(temp), "=m"(*addr)
:"Ir"(mask), "m"(*addr));
#else
u32 mask = cpu_to_le32(~MACH64_DMA_EOL);
*addr &= mask;
#endif
}
static __inline__ void mach64_ring_start(drm_mach64_private_t * dev_priv)
{
drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
DRM_DEBUG("%s: head_addr: 0x%08x head: %d tail: %d space: %d\n",
__FUNCTION__,
DRM_DEBUG("head_addr: 0x%08x head: %d tail: %d space: %d\n",
ring->head_addr, ring->head, ring->tail, ring->space);
if (mach64_do_wait_for_idle(dev_priv) < 0) {
@ -627,8 +568,7 @@ static __inline__ void mach64_ring_start(drm_mach64_private_t * dev_priv)
static __inline__ void mach64_ring_resume(drm_mach64_private_t * dev_priv,
drm_mach64_descriptor_ring_t * ring)
{
DRM_DEBUG("%s: head_addr: 0x%08x head: %d tail: %d space: %d\n",
__FUNCTION__,
DRM_DEBUG("head_addr: 0x%08x head: %d tail: %d space: %d\n",
ring->head_addr, ring->head, ring->tail, ring->space);
/* reset descriptor table ring head */
@ -647,8 +587,7 @@ static __inline__ void mach64_ring_resume(drm_mach64_private_t * dev_priv,
MACH64_WRITE(MACH64_DST_HEIGHT_WIDTH, 0);
if (dev_priv->driver_mode == MACH64_MODE_DMA_SYNC) {
if ((mach64_do_wait_for_idle(dev_priv)) < 0) {
DRM_ERROR("%s: idle failed, resetting engine\n",
__FUNCTION__);
DRM_ERROR("idle failed, resetting engine\n");
mach64_dump_engine_info(dev_priv);
mach64_do_engine_reset(dev_priv);
return;
@ -658,11 +597,22 @@ static __inline__ void mach64_ring_resume(drm_mach64_private_t * dev_priv,
}
}
/**
* Poll the ring head and make sure the bus master is alive.
*
* Mach64's bus master engine will stop if there are no more entries to process.
* This function polls the engine for the last processed entry and calls
* mach64_ring_resume if there is an unprocessed entry.
*
* Note also that, since we update the ring tail while the bus master engine is
* in operation, it is possible that the last tail update was too late to be
* processed, and the bus master engine stops at the previous tail position.
* Therefore it is important to call this function frequently.
*/
static __inline__ void mach64_ring_tick(drm_mach64_private_t * dev_priv,
drm_mach64_descriptor_ring_t * ring)
{
DRM_DEBUG("%s: head_addr: 0x%08x head: %d tail: %d space: %d\n",
__FUNCTION__,
DRM_DEBUG("head_addr: 0x%08x head: %d tail: %d space: %d\n",
ring->head_addr, ring->head, ring->tail, ring->space);
if (!dev_priv->ring_running) {
@ -709,8 +659,7 @@ static __inline__ void mach64_ring_tick(drm_mach64_private_t * dev_priv,
static __inline__ void mach64_ring_stop(drm_mach64_private_t * dev_priv)
{
DRM_DEBUG("%s: head_addr: 0x%08x head: %d tail: %d space: %d\n",
__FUNCTION__,
DRM_DEBUG("head_addr: 0x%08x head: %d tail: %d space: %d\n",
dev_priv->ring.head_addr, dev_priv->ring.head,
dev_priv->ring.tail, dev_priv->ring.space);
@ -731,7 +680,7 @@ mach64_update_ring_snapshot(drm_mach64_private_t * dev_priv)
{
drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
DRM_DEBUG("%s\n", __FUNCTION__);
DRM_DEBUG("\n");
mach64_ring_tick(dev_priv, ring);
@ -741,71 +690,23 @@ mach64_update_ring_snapshot(drm_mach64_private_t * dev_priv)
}
}
/* ================================================================
* DMA descriptor ring macros
*/
#define RING_LOCALS \
int _ring_tail, _ring_write; unsigned int _ring_mask; volatile u32 *_ring
#define RING_WRITE_OFS _ring_write
#define BEGIN_RING( n ) \
do { \
if ( MACH64_VERBOSE ) { \
DRM_INFO( "BEGIN_RING( %d ) in %s\n", \
(n), __FUNCTION__ ); \
} \
if ( dev_priv->ring.space <= (n) * sizeof(u32) ) { \
int ret; \
if ((ret=mach64_wait_ring( dev_priv, (n) * sizeof(u32))) < 0 ) { \
DRM_ERROR( "wait_ring failed, resetting engine\n"); \
mach64_dump_engine_info( dev_priv ); \
mach64_do_engine_reset( dev_priv ); \
return ret; \
} \
} \
dev_priv->ring.space -= (n) * sizeof(u32); \
_ring = (u32 *) dev_priv->ring.start; \
_ring_tail = _ring_write = dev_priv->ring.tail; \
_ring_mask = dev_priv->ring.tail_mask; \
} while (0)
#define OUT_RING( x ) \
do { \
if ( MACH64_VERBOSE ) { \
DRM_INFO( " OUT_RING( 0x%08x ) at 0x%x\n", \
(unsigned int)(x), _ring_write ); \
} \
_ring[_ring_write++] = cpu_to_le32( x ); \
_ring_write &= _ring_mask; \
} while (0)
#define ADVANCE_RING() \
do { \
if ( MACH64_VERBOSE ) { \
DRM_INFO( "ADVANCE_RING() wr=0x%06x tail=0x%06x\n", \
_ring_write, _ring_tail ); \
} \
DRM_MEMORYBARRIER(); \
mach64_clear_dma_eol( &_ring[(_ring_tail - 2) & _ring_mask] ); \
DRM_MEMORYBARRIER(); \
dev_priv->ring.tail = _ring_write; \
mach64_ring_tick( dev_priv, &(dev_priv)->ring ); \
} while (0)
/* ================================================================
* DMA macros
*
* Mach64's ring buffer doesn't take register writes directly. These
* have to be written indirectly in DMA buffers. These macros simplify
* the task of setting up a buffer, writing commands to it, and
* queuing the buffer in the ring.
*/
#define DMALOCALS \
drm_mach64_freelist_t *_entry = NULL; \
drm_buf_t *_buf = NULL; \
struct drm_buf *_buf = NULL; \
u32 *_buf_wptr; int _outcount
#define GETBUFPTR( __buf ) \
((dev_priv->is_pci) ? \
((u32 *)(__buf)->address) : \
((dev_priv->is_pci) ? \
((u32 *)(__buf)->address) : \
((u32 *)((char *)dev_priv->dev_buffers->handle + (__buf)->offset)))
#define GETBUFADDR( __buf ) ((u32)(__buf)->bus_address)
@ -815,20 +716,20 @@ do { \
static __inline__ int mach64_find_pending_buf_entry(drm_mach64_private_t *
dev_priv,
drm_mach64_freelist_t **
entry, drm_buf_t * buf)
entry, struct drm_buf * buf)
{
struct list_head *ptr;
#if MACH64_EXTRA_CHECKING
if (list_empty(&dev_priv->pending)) {
DRM_ERROR("Empty pending list in %s\n", __FUNCTION__);
return DRM_ERR(EINVAL);
DRM_ERROR("Empty pending list in \n");
return -EINVAL;
}
#endif
ptr = dev_priv->pending.prev;
*entry = list_entry(ptr, drm_mach64_freelist_t, list);
while ((*entry)->buf != buf) {
if (ptr == &dev_priv->pending) {
return DRM_ERR(EFAULT);
return -EFAULT;
}
ptr = ptr->prev;
*entry = list_entry(ptr, drm_mach64_freelist_t, list);
@ -836,7 +737,7 @@ static __inline__ int mach64_find_pending_buf_entry(drm_mach64_private_t *
return 0;
}
#define DMASETPTR( _p ) \
#define DMASETPTR( _p ) \
do { \
_buf = (_p); \
_outcount = 0; \
@ -844,24 +745,21 @@ do { \
} while(0)
/* FIXME: use a private set of smaller buffers for state emits, clears, and swaps? */
#define DMAGETPTR( filp, dev_priv, n ) \
#define DMAGETPTR( file_priv, dev_priv, n ) \
do { \
if ( MACH64_VERBOSE ) { \
DRM_INFO( "DMAGETPTR( %d ) in %s\n", \
n, __FUNCTION__ ); \
DRM_INFO( "DMAGETPTR( %d )\n", (n) ); \
} \
_buf = mach64_freelist_get( dev_priv ); \
if (_buf == NULL) { \
DRM_ERROR("%s: couldn't get buffer in DMAGETPTR\n", \
__FUNCTION__ ); \
return DRM_ERR(EAGAIN); \
DRM_ERROR("couldn't get buffer in DMAGETPTR\n"); \
return -EAGAIN; \
} \
if (_buf->pending) { \
DRM_ERROR("%s: pending buf in DMAGETPTR\n", \
__FUNCTION__ ); \
return DRM_ERR(EFAULT); \
DRM_ERROR("pending buf in DMAGETPTR\n"); \
return -EFAULT; \
} \
_buf->filp = filp; \
_buf->file_priv = file_priv; \
_outcount = 0; \
\
_buf_wptr = GETBUFPTR( _buf ); \
@ -878,173 +776,87 @@ do { \
_buf->used += 8; \
} while (0)
#define DMAADVANCE( dev_priv, _discard ) \
do { \
struct list_head *ptr; \
RING_LOCALS; \
\
if ( MACH64_VERBOSE ) { \
DRM_INFO( "DMAADVANCE() in %s\n", __FUNCTION__ ); \
} \
\
if (_buf->used <= 0) { \
DRM_ERROR( "DMAADVANCE() in %s: sending empty buf %d\n", \
__FUNCTION__, _buf->idx ); \
return DRM_ERR(EFAULT); \
} \
if (_buf->pending) { \
/* This is a resued buffer, so we need to find it in the pending list */ \
int ret; \
if ( (ret=mach64_find_pending_buf_entry(dev_priv, &_entry, _buf)) ) { \
DRM_ERROR( "DMAADVANCE() in %s: couldn't find pending buf %d\n", \
__FUNCTION__, _buf->idx ); \
return ret; \
} \
if (_entry->discard) { \
DRM_ERROR( "DMAADVANCE() in %s: sending discarded pending buf %d\n", \
__FUNCTION__, _buf->idx ); \
return DRM_ERR(EFAULT); \
} \
} else { \
if (list_empty(&dev_priv->placeholders)) { \
DRM_ERROR( "DMAADVANCE() in %s: empty placeholder list\n", \
__FUNCTION__ ); \
return DRM_ERR(EFAULT); \
} \
ptr = dev_priv->placeholders.next; \
list_del(ptr); \
_entry = list_entry(ptr, drm_mach64_freelist_t, list); \
_buf->pending = 1; \
_entry->buf = _buf; \
list_add_tail(ptr, &dev_priv->pending); \
} \
_entry->discard = (_discard); \
ADD_BUF_TO_RING( dev_priv ); \
} while (0)
#define DMAADVANCE( dev_priv, _discard ) \
do { \
struct list_head *ptr; \
int ret; \
\
if ( MACH64_VERBOSE ) { \
DRM_INFO( "DMAADVANCE() in \n" ); \
} \
\
if (_buf->used <= 0) { \
DRM_ERROR( "DMAADVANCE(): sending empty buf %d\n", \
_buf->idx ); \
return -EFAULT; \
} \
if (_buf->pending) { \
/* This is a resued buffer, so we need to find it in the pending list */ \
if ((ret = mach64_find_pending_buf_entry(dev_priv, &_entry, _buf))) { \
DRM_ERROR( "DMAADVANCE(): couldn't find pending buf %d\n", _buf->idx ); \
return ret; \
} \
if (_entry->discard) { \
DRM_ERROR( "DMAADVANCE(): sending discarded pending buf %d\n", _buf->idx ); \
return -EFAULT; \
} \
} else { \
if (list_empty(&dev_priv->placeholders)) { \
DRM_ERROR( "DMAADVANCE(): empty placeholder list\n"); \
return -EFAULT; \
} \
ptr = dev_priv->placeholders.next; \
list_del(ptr); \
_entry = list_entry(ptr, drm_mach64_freelist_t, list); \
_buf->pending = 1; \
_entry->buf = _buf; \
list_add_tail(ptr, &dev_priv->pending); \
} \
_entry->discard = (_discard); \
if ((ret = mach64_add_buf_to_ring( dev_priv, _entry ))) \
return ret; \
} while (0)
#define DMADISCARDBUF() \
do { \
if (_entry == NULL) { \
int ret; \
if ( (ret=mach64_find_pending_buf_entry(dev_priv, &_entry, _buf)) ) { \
DRM_ERROR( "%s: couldn't find pending buf %d\n", \
__FUNCTION__, _buf->idx ); \
return ret; \
} \
} \
_entry->discard = 1; \
} while(0)
#define DMADISCARDBUF() \
do { \
if (_entry == NULL) { \
int ret; \
if ((ret = mach64_find_pending_buf_entry(dev_priv, &_entry, _buf))) { \
DRM_ERROR( "couldn't find pending buf %d\n", \
_buf->idx ); \
return ret; \
} \
} \
_entry->discard = 1; \
} while(0)
#define ADD_BUF_TO_RING( dev_priv ) \
do { \
int bytes, pages, remainder; \
u32 address, page; \
int i; \
\
bytes = _buf->used; \
address = GETBUFADDR( _buf ); \
\
pages = (bytes + MACH64_DMA_CHUNKSIZE - 1) / MACH64_DMA_CHUNKSIZE; \
\
BEGIN_RING( pages * 4 ); \
\
for ( i = 0 ; i < pages-1 ; i++ ) { \
page = address + i * MACH64_DMA_CHUNKSIZE; \
OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_ADDR ); \
OUT_RING( page ); \
OUT_RING( MACH64_DMA_CHUNKSIZE | MACH64_DMA_HOLD_OFFSET ); \
OUT_RING( 0 ); \
} \
\
/* generate the final descriptor for any remaining commands in this buffer */ \
page = address + i * MACH64_DMA_CHUNKSIZE; \
remainder = bytes - i * MACH64_DMA_CHUNKSIZE; \
\
/* Save dword offset of last descriptor for this buffer. \
* This is needed to check for completion of the buffer in freelist_get \
*/ \
_entry->ring_ofs = RING_WRITE_OFS; \
\
OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_ADDR ); \
OUT_RING( page ); \
OUT_RING( remainder | MACH64_DMA_HOLD_OFFSET | MACH64_DMA_EOL ); \
OUT_RING( 0 ); \
\
ADVANCE_RING(); \
} while(0)
#define DMAADVANCEHOSTDATA( dev_priv ) \
do { \
struct list_head *ptr; \
RING_LOCALS; \
\
if ( MACH64_VERBOSE ) { \
DRM_INFO( "DMAADVANCEHOSTDATA() in %s\n", __FUNCTION__ ); \
} \
\
if (_buf->used <= 0) { \
DRM_ERROR( "DMAADVANCEHOSTDATA() in %s: sending empty buf %d\n", \
__FUNCTION__, _buf->idx ); \
return DRM_ERR(EFAULT); \
} \
if (list_empty(&dev_priv->placeholders)) { \
DRM_ERROR( "%s: empty placeholder list in DMAADVANCEHOSTDATA()\n", \
__FUNCTION__ ); \
return DRM_ERR(EFAULT); \
} \
\
ptr = dev_priv->placeholders.next; \
list_del(ptr); \
_entry = list_entry(ptr, drm_mach64_freelist_t, list); \
_entry->buf = _buf; \
_entry->buf->pending = 1; \
list_add_tail(ptr, &dev_priv->pending); \
_entry->discard = 1; \
ADD_HOSTDATA_BUF_TO_RING( dev_priv ); \
} while (0)
#define ADD_HOSTDATA_BUF_TO_RING( dev_priv ) \
do { \
int bytes, pages, remainder; \
u32 address, page; \
int i; \
\
bytes = _buf->used - MACH64_HOSTDATA_BLIT_OFFSET; \
pages = (bytes + MACH64_DMA_CHUNKSIZE - 1) / MACH64_DMA_CHUNKSIZE; \
address = GETBUFADDR( _buf ); \
\
BEGIN_RING( 4 + pages * 4 ); \
\
OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_ADDR ); \
OUT_RING( address ); \
OUT_RING( MACH64_HOSTDATA_BLIT_OFFSET | MACH64_DMA_HOLD_OFFSET ); \
OUT_RING( 0 ); \
\
address += MACH64_HOSTDATA_BLIT_OFFSET; \
\
for ( i = 0 ; i < pages-1 ; i++ ) { \
page = address + i * MACH64_DMA_CHUNKSIZE; \
OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_HOSTDATA ); \
OUT_RING( page ); \
OUT_RING( MACH64_DMA_CHUNKSIZE | MACH64_DMA_HOLD_OFFSET ); \
OUT_RING( 0 ); \
} \
\
/* generate the final descriptor for any remaining commands in this buffer */ \
page = address + i * MACH64_DMA_CHUNKSIZE; \
remainder = bytes - i * MACH64_DMA_CHUNKSIZE; \
\
/* Save dword offset of last descriptor for this buffer. \
* This is needed to check for completion of the buffer in freelist_get \
*/ \
_entry->ring_ofs = RING_WRITE_OFS; \
\
OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_HOSTDATA ); \
OUT_RING( page ); \
OUT_RING( remainder | MACH64_DMA_HOLD_OFFSET | MACH64_DMA_EOL ); \
OUT_RING( 0 ); \
\
ADVANCE_RING(); \
} while(0)
#define DMAADVANCEHOSTDATA( dev_priv ) \
do { \
struct list_head *ptr; \
int ret; \
\
if ( MACH64_VERBOSE ) { \
DRM_INFO( "DMAADVANCEHOSTDATA() in \n" ); \
} \
\
if (_buf->used <= 0) { \
DRM_ERROR( "DMAADVANCEHOSTDATA(): sending empty buf %d\n", _buf->idx ); \
return -EFAULT; \
} \
if (list_empty(&dev_priv->placeholders)) { \
DRM_ERROR( "empty placeholder list in DMAADVANCEHOSTDATA()\n" ); \
return -EFAULT; \
} \
\
ptr = dev_priv->placeholders.next; \
list_del(ptr); \
_entry = list_entry(ptr, drm_mach64_freelist_t, list); \
_entry->buf = _buf; \
_entry->buf->pending = 1; \
list_add_tail(ptr, &dev_priv->pending); \
_entry->discard = 1; \
if ((ret = mach64_add_hostdata_buf_to_ring( dev_priv, _entry ))) \
return ret; \
} while (0)
#endif /* __MACH64_DRV_H__ */

View File

@ -45,9 +45,8 @@ __FBSDID("$FreeBSD$");
irqreturn_t mach64_driver_irq_handler(DRM_IRQ_ARGS)
{
drm_device_t *dev = (drm_device_t *) arg;
drm_mach64_private_t *dev_priv =
(drm_mach64_private_t *) dev->dev_private;
struct drm_device *dev = arg;
drm_mach64_private_t *dev_priv = dev->dev_private;
int status;
status = MACH64_READ(MACH64_CRTC_INT_CNTL);
@ -65,74 +64,98 @@ irqreturn_t mach64_driver_irq_handler(DRM_IRQ_ARGS)
(status & ~MACH64_CRTC_INT_ACKS)
| MACH64_CRTC_VBLANK_INT);
atomic_inc(&dev->vbl_received);
DRM_WAKEUP(&dev->vbl_queue);
drm_vbl_send_signals(dev);
atomic_inc(&dev_priv->vbl_received);
drm_handle_vblank(dev, 0);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
int mach64_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence)
u32 mach64_get_vblank_counter(struct drm_device * dev, int crtc)
{
unsigned int cur_vblank;
int ret = 0;
const drm_mach64_private_t *const dev_priv = dev->dev_private;
/* Assume that the user has missed the current sequence number
* by about a day rather than she wants to wait for years
* using vertical blanks...
*/
DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
(((cur_vblank = atomic_read(&dev->vbl_received))
- *sequence) <= (1 << 23)));
if (crtc != 0)
return 0;
*sequence = cur_vblank;
return ret;
return atomic_read(&dev_priv->vbl_received);
}
/* drm_dma.h hooks
*/
void mach64_driver_irq_preinstall(drm_device_t * dev)
int mach64_enable_vblank(struct drm_device * dev, int crtc)
{
drm_mach64_private_t *dev_priv =
(drm_mach64_private_t *) dev->dev_private;
drm_mach64_private_t *dev_priv = dev->dev_private;
u32 status = MACH64_READ(MACH64_CRTC_INT_CNTL);
DRM_DEBUG("before install CRTC_INT_CTNL: 0x%08x\n", status);
if (crtc != 0) {
DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
crtc);
return -EINVAL;
}
DRM_DEBUG("before enable vblank CRTC_INT_CTNL: 0x%08x\n", status);
/* Turn on VBLANK interrupt */
MACH64_WRITE(MACH64_CRTC_INT_CNTL, MACH64_READ(MACH64_CRTC_INT_CNTL)
| MACH64_CRTC_VBLANK_INT_EN);
return 0;
}
void mach64_disable_vblank(struct drm_device * dev, int crtc)
{
if (crtc != 0) {
DRM_ERROR("tried to disable vblank on non-existent crtc %d\n",
crtc);
return;
}
/*
* FIXME: implement proper interrupt disable by using the vblank
* counter register (if available).
*/
}
static void mach64_disable_vblank_local(struct drm_device * dev, int crtc)
{
drm_mach64_private_t *dev_priv = dev->dev_private;
u32 status = MACH64_READ(MACH64_CRTC_INT_CNTL);
if (crtc != 0) {
DRM_ERROR("tried to disable vblank on non-existent crtc %d\n",
crtc);
return;
}
DRM_DEBUG("before disable vblank CRTC_INT_CTNL: 0x%08x\n", status);
/* Disable and clear VBLANK interrupt */
MACH64_WRITE(MACH64_CRTC_INT_CNTL, (status & ~MACH64_CRTC_VBLANK_INT_EN)
| MACH64_CRTC_VBLANK_INT);
}
void mach64_driver_irq_postinstall(drm_device_t * dev)
void mach64_driver_irq_preinstall(struct drm_device * dev)
{
drm_mach64_private_t *dev_priv =
(drm_mach64_private_t *) dev->dev_private;
drm_mach64_private_t *dev_priv = dev->dev_private;
/* Turn on VBLANK interrupt */
MACH64_WRITE(MACH64_CRTC_INT_CNTL, MACH64_READ(MACH64_CRTC_INT_CNTL)
| MACH64_CRTC_VBLANK_INT_EN);
u32 status = MACH64_READ(MACH64_CRTC_INT_CNTL);
DRM_DEBUG("after install CRTC_INT_CTNL: 0x%08x\n",
MACH64_READ(MACH64_CRTC_INT_CNTL));
DRM_DEBUG("before install CRTC_INT_CTNL: 0x%08x\n", status);
mach64_disable_vblank_local(dev, 0);
}
void mach64_driver_irq_uninstall(drm_device_t * dev)
int mach64_driver_irq_postinstall(struct drm_device * dev)
{
drm_mach64_private_t *dev_priv =
(drm_mach64_private_t *) dev->dev_private;
return drm_vblank_init(dev, 1);
}
void mach64_driver_irq_uninstall(struct drm_device * dev)
{
drm_mach64_private_t *dev_priv = dev->dev_private;
if (!dev_priv)
return;
/* Disable and clear VBLANK interrupt */
MACH64_WRITE(MACH64_CRTC_INT_CNTL,
(MACH64_READ(MACH64_CRTC_INT_CNTL) &
~MACH64_CRTC_VBLANK_INT_EN)
| MACH64_CRTC_VBLANK_INT);
mach64_disable_vblank_local(dev, 0);
DRM_DEBUG("after uninstall CRTC_INT_CTNL: 0x%08x\n",
MACH64_READ(MACH64_CRTC_INT_CNTL));

View File

@ -27,7 +27,7 @@
* Authors:
* Gareth Hughes <gareth@valinux.com>
* Leif Delgass <ldelgass@retinalburn.net>
* Jos<EFBFBD>Fonseca <j_r_fonseca@yahoo.co.uk>
* José Fonseca <j_r_fonseca@yahoo.co.uk>
*/
#include <sys/cdefs.h>
@ -43,16 +43,16 @@ __FBSDID("$FreeBSD$");
* 1.0 - Initial mach64 DRM
*
*/
drm_ioctl_desc_t mach64_ioctls[] = {
[DRM_IOCTL_NR(DRM_MACH64_INIT)] = {mach64_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
[DRM_IOCTL_NR(DRM_MACH64_CLEAR)] = {mach64_dma_clear, DRM_AUTH},
[DRM_IOCTL_NR(DRM_MACH64_SWAP)] = {mach64_dma_swap, DRM_AUTH},
[DRM_IOCTL_NR(DRM_MACH64_IDLE)] = {mach64_dma_idle, DRM_AUTH},
[DRM_IOCTL_NR(DRM_MACH64_RESET)] = {mach64_engine_reset, DRM_AUTH},
[DRM_IOCTL_NR(DRM_MACH64_VERTEX)] = {mach64_dma_vertex, DRM_AUTH},
[DRM_IOCTL_NR(DRM_MACH64_BLIT)] = {mach64_dma_blit, DRM_AUTH},
[DRM_IOCTL_NR(DRM_MACH64_FLUSH)] = {mach64_dma_flush, DRM_AUTH},
[DRM_IOCTL_NR(DRM_MACH64_GETPARAM)] = {mach64_get_param, DRM_AUTH},
struct drm_ioctl_desc mach64_ioctls[] = {
DRM_IOCTL_DEF(DRM_MACH64_INIT, mach64_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_MACH64_CLEAR, mach64_dma_clear, DRM_AUTH),
DRM_IOCTL_DEF(DRM_MACH64_SWAP, mach64_dma_swap, DRM_AUTH),
DRM_IOCTL_DEF(DRM_MACH64_IDLE, mach64_dma_idle, DRM_AUTH),
DRM_IOCTL_DEF(DRM_MACH64_RESET, mach64_engine_reset, DRM_AUTH),
DRM_IOCTL_DEF(DRM_MACH64_VERTEX, mach64_dma_vertex, DRM_AUTH),
DRM_IOCTL_DEF(DRM_MACH64_BLIT, mach64_dma_blit, DRM_AUTH),
DRM_IOCTL_DEF(DRM_MACH64_FLUSH, mach64_dma_flush, DRM_AUTH),
DRM_IOCTL_DEF(DRM_MACH64_GETPARAM, mach64_get_param, DRM_AUTH),
};
int mach64_max_ioctl = DRM_ARRAY_SIZE(mach64_ioctls);
@ -88,16 +88,17 @@ static void mach64_print_dirty(const char *msg, unsigned int flags)
/* This function returns 0 on success, 1 for no intersection, and
* negative for an error
*/
static int mach64_emit_cliprect(DRMFILE filp, drm_mach64_private_t * dev_priv,
drm_clip_rect_t * box)
static int mach64_emit_cliprect(struct drm_file *file_priv,
drm_mach64_private_t * dev_priv,
struct drm_clip_rect * box)
{
u32 sc_left_right, sc_top_bottom;
drm_clip_rect_t scissor;
struct drm_clip_rect scissor;
drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mach64_context_regs_t *regs = &sarea_priv->context_state;
DMALOCALS;
DRM_DEBUG("%s: box=%p\n", __FUNCTION__, box);
DRM_DEBUG("box=%p\n", box);
/* Get GL scissor */
/* FIXME: store scissor in SAREA as a cliprect instead of in
@ -123,7 +124,7 @@ static int mach64_emit_cliprect(DRMFILE filp, drm_mach64_private_t * dev_priv,
if (scissor.y1 >= scissor.y2)
return 1;
DMAGETPTR(filp, dev_priv, 2); /* returns on failure to get buffer */
DMAGETPTR(file_priv, dev_priv, 2); /* returns on failure to get buffer */
sc_left_right = ((scissor.x1 << 0) | (scissor.x2 << 16));
sc_top_bottom = ((scissor.y1 << 0) | (scissor.y2 << 16));
@ -136,7 +137,7 @@ static int mach64_emit_cliprect(DRMFILE filp, drm_mach64_private_t * dev_priv,
return 0;
}
static __inline__ int mach64_emit_state(DRMFILE filp,
static __inline__ int mach64_emit_state(struct drm_file *file_priv,
drm_mach64_private_t * dev_priv)
{
drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
@ -148,10 +149,10 @@ static __inline__ int mach64_emit_state(DRMFILE filp,
if (MACH64_VERBOSE) {
mach64_print_dirty(__FUNCTION__, dirty);
} else {
DRM_DEBUG("%s: dirty=0x%08x\n", __FUNCTION__, dirty);
DRM_DEBUG("dirty=0x%08x\n", dirty);
}
DMAGETPTR(filp, dev_priv, 17); /* returns on failure to get buffer */
DMAGETPTR(file_priv, dev_priv, 17); /* returns on failure to get buffer */
if (dirty & MACH64_UPLOAD_MISC) {
DMAOUTREG(MACH64_DP_MIX, regs->dp_mix);
@ -215,7 +216,8 @@ static __inline__ int mach64_emit_state(DRMFILE filp,
* DMA command dispatch functions
*/
static int mach64_dma_dispatch_clear(DRMFILE filp, drm_device_t * dev,
static int mach64_dma_dispatch_clear(struct drm_device * dev,
struct drm_file *file_priv,
unsigned int flags,
int cx, int cy, int cw, int ch,
unsigned int clear_color,
@ -225,12 +227,12 @@ static int mach64_dma_dispatch_clear(DRMFILE filp, drm_device_t * dev,
drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mach64_context_regs_t *ctx = &sarea_priv->context_state;
int nbox = sarea_priv->nbox;
drm_clip_rect_t *pbox = sarea_priv->boxes;
struct drm_clip_rect *pbox = sarea_priv->boxes;
u32 fb_bpp, depth_bpp;
int i;
DMALOCALS;
DRM_DEBUG("%s\n", __FUNCTION__);
DRM_DEBUG("\n");
switch (dev_priv->fb_bpp) {
case 16:
@ -240,7 +242,7 @@ static int mach64_dma_dispatch_clear(DRMFILE filp, drm_device_t * dev,
fb_bpp = MACH64_DATATYPE_ARGB8888;
break;
default:
return DRM_ERR(EINVAL);
return -EINVAL;
}
switch (dev_priv->depth_bpp) {
case 16:
@ -251,13 +253,13 @@ static int mach64_dma_dispatch_clear(DRMFILE filp, drm_device_t * dev,
depth_bpp = MACH64_DATATYPE_ARGB8888;
break;
default:
return DRM_ERR(EINVAL);
return -EINVAL;
}
if (!nbox)
return 0;
DMAGETPTR(filp, dev_priv, nbox * 31); /* returns on failure to get buffer */
DMAGETPTR(file_priv, dev_priv, nbox * 31); /* returns on failure to get buffer */
for (i = 0; i < nbox; i++) {
int x = pbox[i].x1;
@ -358,17 +360,18 @@ static int mach64_dma_dispatch_clear(DRMFILE filp, drm_device_t * dev,
return 0;
}
static int mach64_dma_dispatch_swap(DRMFILE filp, drm_device_t * dev)
static int mach64_dma_dispatch_swap(struct drm_device * dev,
struct drm_file *file_priv)
{
drm_mach64_private_t *dev_priv = dev->dev_private;
drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
int nbox = sarea_priv->nbox;
drm_clip_rect_t *pbox = sarea_priv->boxes;
struct drm_clip_rect *pbox = sarea_priv->boxes;
u32 fb_bpp;
int i;
DMALOCALS;
DRM_DEBUG("%s\n", __FUNCTION__);
DRM_DEBUG("\n");
switch (dev_priv->fb_bpp) {
case 16:
@ -383,7 +386,7 @@ static int mach64_dma_dispatch_swap(DRMFILE filp, drm_device_t * dev)
if (!nbox)
return 0;
DMAGETPTR(filp, dev_priv, 13 + nbox * 4); /* returns on failure to get buffer */
DMAGETPTR(file_priv, dev_priv, 13 + nbox * 4); /* returns on failure to get buffer */
DMAOUTREG(MACH64_Z_CNTL, 0);
DMAOUTREG(MACH64_SCALE_3D_CNTL, 0);
@ -445,7 +448,7 @@ static int mach64_do_get_frames_queued(drm_mach64_private_t * dev_priv)
int i, start;
u32 head, tail, ofs;
DRM_DEBUG("%s\n", __FUNCTION__);
DRM_DEBUG("\n");
if (sarea_priv->frames_queued == 0)
return 0;
@ -483,20 +486,20 @@ static int mach64_do_get_frames_queued(drm_mach64_private_t * dev_priv)
/* Copy and verify a client submited buffer.
* FIXME: Make an assembly optimized version
*/
static __inline__ int copy_and_verify_from_user(u32 *to,
const u32 __user *ufrom,
unsigned long bytes)
static __inline__ int copy_from_user_vertex(u32 *to,
const u32 __user *ufrom,
unsigned long bytes)
{
unsigned long n = bytes; /* dwords remaining in buffer */
u32 *from, *orig_from;
from = drm_alloc(bytes, DRM_MEM_DRIVER);
if (from == NULL)
return ENOMEM;
return -ENOMEM;
if (DRM_COPY_FROM_USER(from, ufrom, bytes)) {
drm_free(from, bytes, DRM_MEM_DRIVER);
return DRM_ERR(EFAULT);
return -EFAULT;
}
orig_from = from; /* we'll be modifying the "from" ptr, so save it */
@ -525,17 +528,16 @@ static __inline__ int copy_and_verify_from_user(u32 *to,
from += count;
to += count;
} else {
DRM_ERROR("%s: Got bad command: 0x%04x\n",
__FUNCTION__, reg);
DRM_ERROR("Got bad command: 0x%04x\n", reg);
drm_free(orig_from, bytes, DRM_MEM_DRIVER);
return DRM_ERR(EACCES);
return -EACCES;
}
} else {
DRM_ERROR
("%s: Got bad command count(=%u) dwords remaining=%lu\n",
__FUNCTION__, count, n);
("Got bad command count(=%u) dwords remaining=%lu\n",
count, n);
drm_free(orig_from, bytes, DRM_MEM_DRIVER);
return DRM_ERR(EINVAL);
return -EINVAL;
}
}
@ -543,119 +545,120 @@ static __inline__ int copy_and_verify_from_user(u32 *to,
if (n == 0)
return 0;
else {
DRM_ERROR("%s: Bad buf->used(=%lu)\n", __FUNCTION__, bytes);
return DRM_ERR(EINVAL);
DRM_ERROR("Bad buf->used(=%lu)\n", bytes);
return -EINVAL;
}
}
static int mach64_dma_dispatch_vertex(DRMFILE filp, drm_device_t * dev,
int prim, void *buf, unsigned long used,
int discard)
static int mach64_dma_dispatch_vertex(struct drm_device * dev,
struct drm_file *file_priv,
drm_mach64_vertex_t * vertex)
{
drm_mach64_private_t *dev_priv = dev->dev_private;
drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_buf_t *copy_buf;
struct drm_buf *copy_buf;
void *buf = vertex->buf;
unsigned long used = vertex->used;
int ret = 0;
int i = 0;
int done = 0;
int verify_ret = 0;
DMALOCALS;
DRM_DEBUG("%s: buf=%p used=%lu nbox=%d\n",
__FUNCTION__, buf, used, sarea_priv->nbox);
DRM_DEBUG("buf=%p used=%lu nbox=%d\n",
buf, used, sarea_priv->nbox);
if (used) {
int ret = 0;
int i = 0;
if (!used)
goto _vertex_done;
copy_buf = mach64_freelist_get(dev_priv);
if (copy_buf == NULL) {
DRM_ERROR("%s: couldn't get buffer in DMAGETPTR\n",
__FUNCTION__);
return DRM_ERR(EAGAIN);
}
copy_buf = mach64_freelist_get(dev_priv);
if (copy_buf == NULL) {
DRM_ERROR("couldn't get buffer\n");
return -EAGAIN;
}
if ((verify_ret =
copy_and_verify_from_user(GETBUFPTR(copy_buf), buf,
used)) == 0) {
/* Mach64's vertex data is actually register writes. To avoid security
* compromises these register writes have to be verified and copied from
* user space into a private DMA buffer.
*/
verify_ret = copy_from_user_vertex(GETBUFPTR(copy_buf), buf, used);
copy_buf->used = used;
if (verify_ret != 0) {
mach64_freelist_put(dev_priv, copy_buf);
goto _vertex_done;
}
DMASETPTR(copy_buf);
copy_buf->used = used;
if (sarea_priv->dirty & ~MACH64_UPLOAD_CLIPRECTS) {
ret = mach64_emit_state(filp, dev_priv);
if (ret < 0)
return ret;
DMASETPTR(copy_buf);
if (sarea_priv->dirty & ~MACH64_UPLOAD_CLIPRECTS) {
ret = mach64_emit_state(file_priv, dev_priv);
if (ret < 0)
return ret;
}
do {
/* Emit the next cliprect */
if (i < sarea_priv->nbox) {
ret = mach64_emit_cliprect(file_priv, dev_priv,
&sarea_priv->boxes[i]);
if (ret < 0) {
/* failed to get buffer */
return ret;
} else if (ret != 0) {
/* null intersection with scissor */
continue;
}
do {
/* Emit the next cliprect */
if (i < sarea_priv->nbox) {
ret =
mach64_emit_cliprect(filp, dev_priv,
&sarea_priv->
boxes[i]);
if (ret < 0) {
/* failed to get buffer */
return ret;
} else if (ret != 0) {
/* null intersection with scissor */
continue;
}
}
if ((i >= sarea_priv->nbox - 1))
done = 1;
/* Add the buffer to the DMA queue */
DMAADVANCE(dev_priv, done);
} while (++i < sarea_priv->nbox);
}
if ((i >= sarea_priv->nbox - 1))
done = 1;
if (copy_buf->pending && !done) {
/* Add the buffer to the DMA queue */
DMAADVANCE(dev_priv, done);
} while (++i < sarea_priv->nbox);
if (!done) {
if (copy_buf->pending) {
DMADISCARDBUF();
} else if (!done) {
/* This buffer wasn't used (no cliprects or verify failed), so place it back
* on the free list
} else {
/* This buffer wasn't used (no cliprects), so place it
* back on the free list
*/
struct list_head *ptr;
drm_mach64_freelist_t *entry;
#if MACH64_EXTRA_CHECKING
list_for_each(ptr, &dev_priv->pending) {
entry =
list_entry(ptr, drm_mach64_freelist_t,
list);
if (copy_buf == entry->buf) {
DRM_ERROR
("%s: Trying to release a pending buf\n",
__FUNCTION__);
return DRM_ERR(EFAULT);
}
}
#endif
ptr = dev_priv->placeholders.next;
entry = list_entry(ptr, drm_mach64_freelist_t, list);
copy_buf->pending = 0;
copy_buf->used = 0;
entry->buf = copy_buf;
entry->discard = 1;
list_del(ptr);
list_add_tail(ptr, &dev_priv->free_list);
mach64_freelist_put(dev_priv, copy_buf);
}
}
_vertex_done:
sarea_priv->dirty &= ~MACH64_UPLOAD_CLIPRECTS;
sarea_priv->nbox = 0;
return verify_ret;
}
static int mach64_dma_dispatch_blit(DRMFILE filp, drm_device_t * dev,
static __inline__ int copy_from_user_blit(u32 *to,
const u32 __user *ufrom,
unsigned long bytes)
{
to = (u32 *)((char *)to + MACH64_HOSTDATA_BLIT_OFFSET);
if (DRM_COPY_FROM_USER(to, ufrom, bytes)) {
return -EFAULT;
}
return 0;
}
static int mach64_dma_dispatch_blit(struct drm_device * dev,
struct drm_file *file_priv,
drm_mach64_blit_t * blit)
{
drm_mach64_private_t *dev_priv = dev->dev_private;
drm_device_dma_t *dma = dev->dma;
int dword_shift, dwords;
drm_buf_t *buf;
unsigned long used;
struct drm_buf *copy_buf;
int verify_ret = 0;
DMALOCALS;
/* The compiler won't optimize away a division by a variable,
@ -679,22 +682,7 @@ static int mach64_dma_dispatch_blit(DRMFILE filp, drm_device_t * dev,
break;
default:
DRM_ERROR("invalid blit format %d\n", blit->format);
return DRM_ERR(EINVAL);
}
/* Dispatch the blit buffer.
*/
buf = dma->buflist[blit->idx];
if (buf->filp != filp) {
DRM_ERROR("process %d (filp %p) using buffer with filp %p\n",
DRM_CURRENTPID, filp, buf->filp);
return DRM_ERR(EINVAL);
}
if (buf->pending) {
DRM_ERROR("sending pending buffer %d\n", blit->idx);
return DRM_ERR(EINVAL);
return -EINVAL;
}
/* Set buf->used to the bytes of blit data based on the blit dimensions
@ -703,13 +691,38 @@ static int mach64_dma_dispatch_blit(DRMFILE filp, drm_device_t * dev,
* used for setup as well as the blit data.
*/
dwords = (blit->width * blit->height) >> dword_shift;
buf->used = dwords << 2;
if (buf->used <= 0 ||
buf->used > MACH64_BUFFER_SIZE - MACH64_HOSTDATA_BLIT_OFFSET) {
DRM_ERROR("Invalid blit size: %d bytes\n", buf->used);
return DRM_ERR(EINVAL);
used = dwords << 2;
if (used <= 0 ||
used > MACH64_BUFFER_SIZE - MACH64_HOSTDATA_BLIT_OFFSET) {
DRM_ERROR("Invalid blit size: %lu bytes\n", used);
return -EINVAL;
}
copy_buf = mach64_freelist_get(dev_priv);
if (copy_buf == NULL) {
DRM_ERROR("couldn't get buffer\n");
return -EAGAIN;
}
/* Copy the blit data from userspace.
*
* XXX: This is overkill. The most efficient solution would be having
* two sets of buffers (one set private for vertex data, the other set
* client-writable for blits). However that would bring more complexity
* and would break backward compatability. The solution currently
* implemented is keeping all buffers private, allowing to secure the
* driver, without increasing complexity at the expense of some speed
* transfering data.
*/
verify_ret = copy_from_user_blit(GETBUFPTR(copy_buf), blit->buf, used);
if (verify_ret != 0) {
mach64_freelist_put(dev_priv, copy_buf);
goto _blit_done;
}
copy_buf->used = used;
/* FIXME: Use a last buffer flag and reduce the state emitted for subsequent,
* continuation buffers?
*/
@ -718,7 +731,7 @@ static int mach64_dma_dispatch_blit(DRMFILE filp, drm_device_t * dev,
* a register command every 16 dwords. State setup is added at the start of the
* buffer -- the client leaves space for this based on MACH64_HOSTDATA_BLIT_OFFSET
*/
DMASETPTR(buf);
DMASETPTR(copy_buf);
DMAOUTREG(MACH64_Z_CNTL, 0);
DMAOUTREG(MACH64_SCALE_3D_CNTL, 0);
@ -748,39 +761,38 @@ static int mach64_dma_dispatch_blit(DRMFILE filp, drm_device_t * dev,
DMAOUTREG(MACH64_DST_X_Y, (blit->y << 16) | blit->x);
DMAOUTREG(MACH64_DST_WIDTH_HEIGHT, (blit->height << 16) | blit->width);
DRM_DEBUG("%s: %d bytes\n", __FUNCTION__, buf->used);
DRM_DEBUG("%lu bytes\n", used);
/* Add the buffer to the queue */
DMAADVANCEHOSTDATA(dev_priv);
return 0;
_blit_done:
return verify_ret;
}
/* ================================================================
* IOCTL functions
*/
int mach64_dma_clear(DRM_IOCTL_ARGS)
int mach64_dma_clear(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
DRM_DEVICE;
drm_mach64_private_t *dev_priv = dev->dev_private;
drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mach64_clear_t clear;
drm_mach64_clear_t *clear = data;
int ret;
DRM_DEBUG("%s: pid=%d\n", __FUNCTION__, DRM_CURRENTPID);
DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
LOCK_TEST_WITH_RETURN(dev, filp);
DRM_COPY_FROM_USER_IOCTL(clear, (drm_mach64_clear_t *) data,
sizeof(clear));
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (sarea_priv->nbox > MACH64_NR_SAREA_CLIPRECTS)
sarea_priv->nbox = MACH64_NR_SAREA_CLIPRECTS;
ret = mach64_dma_dispatch_clear(filp, dev, clear.flags,
clear.x, clear.y, clear.w, clear.h,
clear.clear_color, clear.clear_depth);
ret = mach64_dma_dispatch_clear(dev, file_priv, clear->flags,
clear->x, clear->y, clear->w, clear->h,
clear->clear_color,
clear->clear_depth);
/* Make sure we restore the 3D state next time.
*/
@ -788,21 +800,21 @@ int mach64_dma_clear(DRM_IOCTL_ARGS)
return ret;
}
int mach64_dma_swap(DRM_IOCTL_ARGS)
int mach64_dma_swap(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
DRM_DEVICE;
drm_mach64_private_t *dev_priv = dev->dev_private;
drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
int ret;
DRM_DEBUG("%s: pid=%d\n", __FUNCTION__, DRM_CURRENTPID);
DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
LOCK_TEST_WITH_RETURN(dev, filp);
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (sarea_priv->nbox > MACH64_NR_SAREA_CLIPRECTS)
sarea_priv->nbox = MACH64_NR_SAREA_CLIPRECTS;
ret = mach64_dma_dispatch_swap(filp, dev);
ret = mach64_dma_dispatch_swap(dev, file_priv);
/* Make sure we restore the 3D state next time.
*/
@ -810,69 +822,52 @@ int mach64_dma_swap(DRM_IOCTL_ARGS)
return ret;
}
int mach64_dma_vertex(DRM_IOCTL_ARGS)
int mach64_dma_vertex(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
DRM_DEVICE;
drm_mach64_private_t *dev_priv = dev->dev_private;
drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mach64_vertex_t vertex;
drm_mach64_vertex_t *vertex = data;
LOCK_TEST_WITH_RETURN(dev, filp);
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
return DRM_ERR(EINVAL);
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
DRM_COPY_FROM_USER_IOCTL(vertex, (drm_mach64_vertex_t *) data,
sizeof(vertex));
DRM_DEBUG("pid=%d buf=%p used=%lu discard=%d\n",
DRM_CURRENTPID,
vertex->buf, vertex->used, vertex->discard);
DRM_DEBUG("%s: pid=%d buf=%p used=%lu discard=%d\n",
__FUNCTION__, DRM_CURRENTPID,
vertex.buf, vertex.used, vertex.discard);
if (vertex.prim < 0 || vertex.prim > MACH64_PRIM_POLYGON) {
DRM_ERROR("buffer prim %d\n", vertex.prim);
return DRM_ERR(EINVAL);
if (vertex->prim < 0 || vertex->prim > MACH64_PRIM_POLYGON) {
DRM_ERROR("buffer prim %d\n", vertex->prim);
return -EINVAL;
}
if (vertex.used > MACH64_BUFFER_SIZE || (vertex.used & 3) != 0) {
if (vertex->used > MACH64_BUFFER_SIZE || (vertex->used & 3) != 0) {
DRM_ERROR("Invalid vertex buffer size: %lu bytes\n",
vertex.used);
return DRM_ERR(EINVAL);
vertex->used);
return -EINVAL;
}
if (sarea_priv->nbox > MACH64_NR_SAREA_CLIPRECTS)
sarea_priv->nbox = MACH64_NR_SAREA_CLIPRECTS;
return mach64_dma_dispatch_vertex(filp, dev, vertex.prim, vertex.buf,
vertex.used, vertex.discard);
return mach64_dma_dispatch_vertex(dev, file_priv, vertex);
}
int mach64_dma_blit(DRM_IOCTL_ARGS)
int mach64_dma_blit(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
DRM_DEVICE;
drm_device_dma_t *dma = dev->dma;
drm_mach64_private_t *dev_priv = dev->dev_private;
drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mach64_blit_t blit;
drm_mach64_blit_t *blit = data;
int ret;
LOCK_TEST_WITH_RETURN(dev, filp);
LOCK_TEST_WITH_RETURN(dev, file_priv);
DRM_COPY_FROM_USER_IOCTL(blit, (drm_mach64_blit_t *) data,
sizeof(blit));
DRM_DEBUG("%s: pid=%d index=%d\n",
__FUNCTION__, DRM_CURRENTPID, blit.idx);
if (blit.idx < 0 || blit.idx >= dma->buf_count) {
DRM_ERROR("buffer index %d (of %d max)\n",
blit.idx, dma->buf_count - 1);
return DRM_ERR(EINVAL);
}
ret = mach64_dma_dispatch_blit(filp, dev, &blit);
ret = mach64_dma_dispatch_blit(dev, file_priv, blit);
/* Make sure we restore the 3D state next time.
*/
@ -882,39 +877,36 @@ int mach64_dma_blit(DRM_IOCTL_ARGS)
return ret;
}
int mach64_get_param(DRM_IOCTL_ARGS)
int mach64_get_param(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
DRM_DEVICE;
drm_mach64_private_t *dev_priv = dev->dev_private;
drm_mach64_getparam_t param;
drm_mach64_getparam_t *param = data;
int value;
DRM_DEBUG("%s\n", __FUNCTION__);
DRM_DEBUG("\n");
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
return DRM_ERR(EINVAL);
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
DRM_COPY_FROM_USER_IOCTL(param, (drm_mach64_getparam_t *) data,
sizeof(param));
switch (param.param) {
switch (param->param) {
case MACH64_PARAM_FRAMES_QUEUED:
/* Needs lock since it calls mach64_ring_tick() */
LOCK_TEST_WITH_RETURN(dev, filp);
LOCK_TEST_WITH_RETURN(dev, file_priv);
value = mach64_do_get_frames_queued(dev_priv);
break;
case MACH64_PARAM_IRQ_NR:
value = dev->irq;
break;
default:
return DRM_ERR(EINVAL);
return -EINVAL;
}
if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) {
if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
DRM_ERROR("copy_to_user\n");
return DRM_ERR(EFAULT);
return -EFAULT;
}
return 0;

View File

@ -31,7 +31,7 @@ __FBSDID("$FreeBSD$");
/**
* \file mga_dma.c
* DMA support for MGA G200 / G400.
*
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
* \author Jeff Hartmann <jhartmann@valinux.com>
* \author Keith Whitwell <keith@tungstengraphics.com>
@ -49,7 +49,7 @@ __FBSDID("$FreeBSD$");
#define MINIMAL_CLEANUP 0
#define FULL_CLEANUP 1
static int mga_do_cleanup_dma(drm_device_t * dev, int full_cleanup);
static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup);
/* ================================================================
* Engine control
@ -74,7 +74,7 @@ int mga_do_wait_for_idle(drm_mga_private_t * dev_priv)
DRM_ERROR("failed!\n");
DRM_INFO(" status=0x%08x\n", status);
#endif
return DRM_ERR(EBUSY);
return -EBUSY;
}
static int mga_do_dma_reset(drm_mga_private_t * dev_priv)
@ -227,7 +227,7 @@ void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv)
#define MGA_BUFFER_FREE 0
#if MGA_FREELIST_DEBUG
static void mga_freelist_print(drm_device_t * dev)
static void mga_freelist_print(struct drm_device * dev)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_freelist_t *entry;
@ -248,10 +248,10 @@ static void mga_freelist_print(drm_device_t * dev)
}
#endif
static int mga_freelist_init(drm_device_t * dev, drm_mga_private_t * dev_priv)
static int mga_freelist_init(struct drm_device * dev, drm_mga_private_t * dev_priv)
{
drm_device_dma_t *dma = dev->dma;
drm_buf_t *buf;
struct drm_device_dma *dma = dev->dma;
struct drm_buf *buf;
drm_mga_buf_priv_t *buf_priv;
drm_mga_freelist_t *entry;
int i;
@ -259,7 +259,7 @@ static int mga_freelist_init(drm_device_t * dev, drm_mga_private_t * dev_priv)
dev_priv->head = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER);
if (dev_priv->head == NULL)
return DRM_ERR(ENOMEM);
return -ENOMEM;
memset(dev_priv->head, 0, sizeof(drm_mga_freelist_t));
SET_AGE(&dev_priv->head->age, MGA_BUFFER_USED, 0);
@ -270,7 +270,7 @@ static int mga_freelist_init(drm_device_t * dev, drm_mga_private_t * dev_priv)
entry = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER);
if (entry == NULL)
return DRM_ERR(ENOMEM);
return -ENOMEM;
memset(entry, 0, sizeof(drm_mga_freelist_t));
@ -294,7 +294,7 @@ static int mga_freelist_init(drm_device_t * dev, drm_mga_private_t * dev_priv)
return 0;
}
static void mga_freelist_cleanup(drm_device_t * dev)
static void mga_freelist_cleanup(struct drm_device * dev)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_freelist_t *entry;
@ -314,10 +314,10 @@ static void mga_freelist_cleanup(drm_device_t * dev)
#if 0
/* FIXME: Still needed?
*/
static void mga_freelist_reset(drm_device_t * dev)
static void mga_freelist_reset(struct drm_device * dev)
{
drm_device_dma_t *dma = dev->dma;
drm_buf_t *buf;
struct drm_buf *buf;
drm_mga_buf_priv_t *buf_priv;
int i;
@ -329,7 +329,7 @@ static void mga_freelist_reset(drm_device_t * dev)
}
#endif
static drm_buf_t *mga_freelist_get(drm_device_t * dev)
static struct drm_buf *mga_freelist_get(struct drm_device * dev)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_freelist_t *next;
@ -362,7 +362,7 @@ static drm_buf_t *mga_freelist_get(drm_device_t * dev)
return NULL;
}
int mga_freelist_put(drm_device_t * dev, drm_buf_t * buf)
int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_buf_priv_t *buf_priv = buf->dev_private;
@ -396,13 +396,13 @@ int mga_freelist_put(drm_device_t * dev, drm_buf_t * buf)
* DMA initialization, cleanup
*/
int mga_driver_load(drm_device_t *dev, unsigned long flags)
int mga_driver_load(struct drm_device *dev, unsigned long flags)
{
drm_mga_private_t * dev_priv;
drm_mga_private_t *dev_priv;
dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER);
if (!dev_priv)
return DRM_ERR(ENOMEM);
return -ENOMEM;
dev->dev_private = (void *)dev_priv;
memset(dev_priv, 0, sizeof(drm_mga_private_t));
@ -423,7 +423,7 @@ int mga_driver_load(drm_device_t *dev, unsigned long flags)
/**
* Bootstrap the driver for AGP DMA.
*
*
* \todo
* Investigate whether there is any benifit to storing the WARP microcode in
* AGP memory. If not, the microcode may as well always be put in PCI
@ -436,21 +436,22 @@ int mga_driver_load(drm_device_t *dev, unsigned long flags)
*
* \sa mga_do_dma_bootstrap, mga_do_pci_dma_bootstrap
*/
static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
static int mga_do_agp_dma_bootstrap(struct drm_device *dev,
drm_mga_dma_bootstrap_t * dma_bs)
{
drm_mga_private_t * const dev_priv = (drm_mga_private_t *) dev->dev_private;
drm_mga_private_t *const dev_priv =
(drm_mga_private_t *)dev->dev_private;
unsigned int warp_size = mga_warp_microcode_size(dev_priv);
int err;
unsigned offset;
unsigned offset;
const unsigned secondary_size = dma_bs->secondary_bin_count
* dma_bs->secondary_bin_size;
const unsigned agp_size = (dma_bs->agp_size << 20);
drm_buf_desc_t req;
drm_agp_mode_t mode;
drm_agp_info_t info;
drm_agp_buffer_t agp_req;
drm_agp_binding_t bind_req;
struct drm_buf_desc req;
struct drm_agp_mode mode;
struct drm_agp_info info;
struct drm_agp_buffer agp_req;
struct drm_agp_binding bind_req;
/* Acquire AGP. */
err = drm_agp_acquire(dev);
@ -484,11 +485,10 @@ static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
}
}
/* Allocate and bind AGP memory. */
agp_req.size = agp_size;
agp_req.type = 0;
err = drm_agp_alloc( dev, & agp_req );
err = drm_agp_alloc(dev, &agp_req);
if (err) {
dev_priv->agp_size = 0;
DRM_ERROR("Unable to allocate %uMB AGP memory\n",
@ -514,36 +514,36 @@ static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
warp_size = PAGE_SIZE;
offset = 0;
err = drm_addmap( dev, offset, warp_size,
_DRM_AGP, _DRM_READ_ONLY, & dev_priv->warp );
err = drm_addmap(dev, offset, warp_size,
_DRM_AGP, _DRM_READ_ONLY, &dev_priv->warp);
if (err) {
DRM_ERROR("Unable to map WARP microcode: %d\n", err);
return err;
}
offset += warp_size;
err = drm_addmap( dev, offset, dma_bs->primary_size,
_DRM_AGP, _DRM_READ_ONLY, & dev_priv->primary );
err = drm_addmap(dev, offset, dma_bs->primary_size,
_DRM_AGP, _DRM_READ_ONLY, & dev_priv->primary);
if (err) {
DRM_ERROR("Unable to map primary DMA region: %d\n", err);
return err;
}
offset += dma_bs->primary_size;
err = drm_addmap( dev, offset, secondary_size,
_DRM_AGP, 0, & dev->agp_buffer_map );
err = drm_addmap(dev, offset, secondary_size,
_DRM_AGP, 0, & dev->agp_buffer_map);
if (err) {
DRM_ERROR("Unable to map secondary DMA region: %d\n", err);
return err;
}
(void) memset( &req, 0, sizeof(req) );
(void)memset( &req, 0, sizeof(req) );
req.count = dma_bs->secondary_bin_count;
req.size = dma_bs->secondary_bin_size;
req.flags = _DRM_AGP_BUFFER;
req.agp_start = offset;
err = drm_addbufs_agp( dev, & req );
err = drm_addbufs_agp(dev, &req);
if (err) {
DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err);
return err;
@ -551,10 +551,10 @@ static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
#ifdef __linux__
{
drm_map_list_t *_entry;
struct drm_map_list *_entry;
unsigned long agp_token = 0;
list_for_each_entry(_entry, &dev->maplist->head, head) {
list_for_each_entry(_entry, &dev->maplist, head) {
if (_entry->map == dev->agp_buffer_map)
agp_token = _entry->user_token;
}
@ -566,8 +566,8 @@ static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
#endif
offset += secondary_size;
err = drm_addmap( dev, offset, agp_size - offset,
_DRM_AGP, 0, & dev_priv->agp_textures );
err = drm_addmap(dev, offset, agp_size - offset,
_DRM_AGP, 0, & dev_priv->agp_textures);
if (err) {
DRM_ERROR("Unable to map AGP texture region: %d\n", err);
return err;
@ -582,7 +582,7 @@ static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
DRM_ERROR("failed to ioremap agp regions! (%p, %p, %p)\n",
dev_priv->warp->handle, dev_priv->primary->handle,
dev->agp_buffer_map->handle);
return DRM_ERR(ENOMEM);
return -ENOMEM;
}
dev_priv->dma_access = MGA_PAGPXFER;
@ -594,7 +594,7 @@ static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
/**
* Bootstrap the driver for PCI DMA.
*
*
* \todo
* The algorithm for decreasing the size of the primary DMA buffer could be
* better. The size should be rounded up to the nearest page size, then
@ -603,23 +603,24 @@ static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
* \todo
* Determine whether the maximum address passed to drm_pci_alloc is correct.
* The same goes for drm_addbufs_pci.
*
*
* \sa mga_do_dma_bootstrap, mga_do_agp_dma_bootstrap
*/
static int mga_do_pci_dma_bootstrap(drm_device_t * dev,
static int mga_do_pci_dma_bootstrap(struct drm_device * dev,
drm_mga_dma_bootstrap_t * dma_bs)
{
drm_mga_private_t * const dev_priv = (drm_mga_private_t *) dev->dev_private;
drm_mga_private_t *const dev_priv =
(drm_mga_private_t *) dev->dev_private;
unsigned int warp_size = mga_warp_microcode_size(dev_priv);
unsigned int primary_size;
unsigned int bin_count;
int err;
drm_buf_desc_t req;
struct drm_buf_desc req;
if (dev->dma == NULL) {
DRM_ERROR("dev->dma is NULL\n");
return DRM_ERR(EFAULT);
return -EFAULT;
}
/* Make drm_addbufs happy by not trying to create a mapping for less
@ -642,9 +643,8 @@ static int mga_do_pci_dma_bootstrap(drm_device_t * dev,
* alignment of the primary or secondary DMA buffers.
*/
for ( primary_size = dma_bs->primary_size
; primary_size != 0
; primary_size >>= 1 ) {
for (primary_size = dma_bs->primary_size; primary_size != 0;
primary_size >>= 1 ) {
/* The proper alignment for this mapping is 0x04 */
err = drm_addmap(dev, 0, primary_size, _DRM_CONSISTENT,
_DRM_READ_ONLY, &dev_priv->primary);
@ -654,29 +654,28 @@ static int mga_do_pci_dma_bootstrap(drm_device_t * dev,
if (err != 0) {
DRM_ERROR("Unable to allocate primary DMA region: %d\n", err);
return DRM_ERR(ENOMEM);
return -ENOMEM;
}
if (dev_priv->primary->size != dma_bs->primary_size) {
DRM_INFO("Primary DMA buffer size reduced from %u to %u.\n",
dma_bs->primary_size,
(unsigned) dev_priv->primary->size);
dma_bs->primary_size,
(unsigned)dev_priv->primary->size);
dma_bs->primary_size = dev_priv->primary->size;
}
for ( bin_count = dma_bs->secondary_bin_count
; bin_count > 0
; bin_count-- ) {
(void) memset( &req, 0, sizeof(req) );
for (bin_count = dma_bs->secondary_bin_count; bin_count > 0;
bin_count-- ) {
(void)memset(&req, 0, sizeof(req));
req.count = bin_count;
req.size = dma_bs->secondary_bin_size;
err = drm_addbufs_pci( dev, & req );
err = drm_addbufs_pci(dev, &req);
if (!err) {
break;
}
}
if (bin_count == 0) {
DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err);
return err;
@ -699,12 +698,12 @@ static int mga_do_pci_dma_bootstrap(drm_device_t * dev,
}
static int mga_do_dma_bootstrap(drm_device_t * dev,
drm_mga_dma_bootstrap_t * dma_bs)
static int mga_do_dma_bootstrap(struct drm_device *dev,
drm_mga_dma_bootstrap_t *dma_bs)
{
const int is_agp = (dma_bs->agp_mode != 0) && drm_device_is_agp(dev);
int err;
drm_mga_private_t * const dev_priv =
drm_mga_private_t *const dev_priv =
(drm_mga_private_t *) dev->dev_private;
@ -713,17 +712,17 @@ static int mga_do_dma_bootstrap(drm_device_t * dev,
/* The first steps are the same for both PCI and AGP based DMA. Map
* the cards MMIO registers and map a status page.
*/
err = drm_addmap( dev, dev_priv->mmio_base, dev_priv->mmio_size,
_DRM_REGISTERS, _DRM_READ_ONLY, & dev_priv->mmio );
err = drm_addmap(dev, dev_priv->mmio_base, dev_priv->mmio_size,
_DRM_REGISTERS, _DRM_READ_ONLY, & dev_priv->mmio);
if (err) {
DRM_ERROR("Unable to map MMIO region: %d\n", err);
return err;
}
err = drm_addmap( dev, 0, SAREA_MAX, _DRM_SHM,
_DRM_READ_ONLY | _DRM_LOCKED | _DRM_KERNEL,
& dev_priv->status );
err = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM,
_DRM_READ_ONLY | _DRM_LOCKED | _DRM_KERNEL,
& dev_priv->status);
if (err) {
DRM_ERROR("Unable to map status region: %d\n", err);
return err;
@ -739,7 +738,7 @@ static int mga_do_dma_bootstrap(drm_device_t * dev,
if (is_agp) {
err = mga_do_agp_dma_bootstrap(dev, dma_bs);
}
/* If we attempted to initialize the card for AGP DMA but failed,
* clean-up any mess that may have been created.
*/
@ -765,44 +764,37 @@ static int mga_do_dma_bootstrap(drm_device_t * dev,
return err;
}
int mga_dma_bootstrap(DRM_IOCTL_ARGS)
int mga_dma_bootstrap(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
DRM_DEVICE;
drm_mga_dma_bootstrap_t bootstrap;
drm_mga_dma_bootstrap_t *bootstrap = data;
int err;
static const int modes[] = { 0, 1, 2, 2, 4, 4, 4, 4 };
const drm_mga_private_t * const dev_priv =
const drm_mga_private_t *const dev_priv =
(drm_mga_private_t *) dev->dev_private;
DRM_COPY_FROM_USER_IOCTL(bootstrap,
(drm_mga_dma_bootstrap_t __user *) data,
sizeof(bootstrap));
err = mga_do_dma_bootstrap(dev, & bootstrap);
err = mga_do_dma_bootstrap(dev, bootstrap);
if (err) {
mga_do_cleanup_dma(dev, FULL_CLEANUP);
return err;
}
if (dev_priv->agp_textures != NULL) {
bootstrap.texture_handle = dev_priv->agp_textures->offset;
bootstrap.texture_size = dev_priv->agp_textures->size;
bootstrap->texture_handle = dev_priv->agp_textures->offset;
bootstrap->texture_size = dev_priv->agp_textures->size;
} else {
bootstrap.texture_handle = 0;
bootstrap.texture_size = 0;
bootstrap->texture_handle = 0;
bootstrap->texture_size = 0;
}
bootstrap.agp_mode = modes[bootstrap.agp_mode & 0x07];
DRM_COPY_TO_USER_IOCTL((drm_mga_dma_bootstrap_t __user *)data,
bootstrap, sizeof(bootstrap));
bootstrap->agp_mode = modes[bootstrap->agp_mode & 0x07];
return 0;
}
static int mga_do_init_dma(drm_device_t * dev, drm_mga_init_t * init)
static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init)
{
drm_mga_private_t *dev_priv;
int ret;
@ -833,14 +825,13 @@ static int mga_do_init_dma(drm_device_t * dev, drm_mga_init_t * init)
dev_priv->texture_offset = init->texture_offset[0];
dev_priv->texture_size = init->texture_size[0];
DRM_GETSAREA();
dev_priv->sarea = drm_getsarea(dev);
if (!dev_priv->sarea) {
DRM_ERROR("failed to find sarea!\n");
return DRM_ERR(EINVAL);
return -EINVAL;
}
if (! dev_priv->used_new_dma_init) {
if (!dev_priv->used_new_dma_init) {
dev_priv->dma_access = MGA_PAGPXFER;
dev_priv->wagp_enable = MGA_WAGP_ENABLE;
@ -848,28 +839,29 @@ static int mga_do_init_dma(drm_device_t * dev, drm_mga_init_t * init)
dev_priv->status = drm_core_findmap(dev, init->status_offset);
if (!dev_priv->status) {
DRM_ERROR("failed to find status page!\n");
return DRM_ERR(EINVAL);
return -EINVAL;
}
dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
if (!dev_priv->mmio) {
DRM_ERROR("failed to find mmio region!\n");
return DRM_ERR(EINVAL);
return -EINVAL;
}
dev_priv->warp = drm_core_findmap(dev, init->warp_offset);
if (!dev_priv->warp) {
DRM_ERROR("failed to find warp microcode region!\n");
return DRM_ERR(EINVAL);
return -EINVAL;
}
dev_priv->primary = drm_core_findmap(dev, init->primary_offset);
if (!dev_priv->primary) {
DRM_ERROR("failed to find primary dma region!\n");
return DRM_ERR(EINVAL);
return -EINVAL;
}
dev->agp_buffer_token = init->buffers_offset;
dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
dev->agp_buffer_map =
drm_core_findmap(dev, init->buffers_offset);
if (!dev->agp_buffer_map) {
DRM_ERROR("failed to find dma buffer region!\n");
return DRM_ERR(EINVAL);
return -EINVAL;
}
drm_core_ioremap(dev_priv->warp, dev);
@ -887,7 +879,7 @@ static int mga_do_init_dma(drm_device_t * dev, drm_mga_init_t * init)
((dev->agp_buffer_map == NULL) ||
(dev->agp_buffer_map->handle == NULL)))) {
DRM_ERROR("failed to ioremap agp regions!\n");
return DRM_ERR(ENOMEM);
return -ENOMEM;
}
ret = mga_warp_install_microcode(dev_priv);
@ -909,10 +901,6 @@ static int mga_do_init_dma(drm_device_t * dev, drm_mga_init_t * init)
/* Init the primary DMA registers.
*/
MGA_WRITE(MGA_PRIMADDRESS, dev_priv->primary->offset | MGA_DMA_GENERAL);
#if 0
MGA_WRITE(MGA_PRIMPTR, virt_to_bus((void *)dev_priv->prim.status) | MGA_PRIMPTREN0 | /* Soft trap, SECEND, SETUPEND */
MGA_PRIMPTREN1); /* DWGSYNC */
#endif
dev_priv->prim.start = (u8 *) dev_priv->primary->handle;
dev_priv->prim.end = ((u8 *) dev_priv->primary->handle
@ -937,13 +925,13 @@ static int mga_do_init_dma(drm_device_t * dev, drm_mga_init_t * init)
if (mga_freelist_init(dev, dev_priv) < 0) {
DRM_ERROR("could not initialize freelist\n");
return DRM_ERR(ENOMEM);
return -ENOMEM;
}
return 0;
}
static int mga_do_cleanup_dma(drm_device_t * dev, int full_cleanup)
static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup)
{
int err = 0;
DRM_DEBUG("\n");
@ -962,7 +950,7 @@ static int mga_do_cleanup_dma(drm_device_t * dev, int full_cleanup)
&& (dev_priv->warp->type != _DRM_CONSISTENT))
drm_core_ioremapfree(dev_priv->warp, dev);
if ((dev_priv->primary != NULL)
if ((dev_priv->primary != NULL)
&& (dev_priv->primary->type != _DRM_CONSISTENT))
drm_core_ioremapfree(dev_priv->primary, dev);
@ -971,8 +959,8 @@ static int mga_do_cleanup_dma(drm_device_t * dev, int full_cleanup)
if (dev_priv->used_new_dma_init) {
if (dev_priv->agp_handle != 0) {
drm_agp_binding_t unbind_req;
drm_agp_buffer_t free_req;
struct drm_agp_binding unbind_req;
struct drm_agp_buffer free_req;
unbind_req.handle = dev_priv->agp_handle;
drm_agp_unbind(dev, &unbind_req);
@ -1004,72 +992,67 @@ static int mga_do_cleanup_dma(drm_device_t * dev, int full_cleanup)
memset(&dev_priv->prim, 0, sizeof(dev_priv->prim));
dev_priv->warp_pipe = 0;
memset(dev_priv->warp_pipe_phys, 0, sizeof(dev_priv->warp_pipe_phys));
memset(dev_priv->warp_pipe_phys, 0,
sizeof(dev_priv->warp_pipe_phys));
if (dev_priv->head != NULL) {
mga_freelist_cleanup(dev);
}
}
return 0;
return err;
}
int mga_dma_init(DRM_IOCTL_ARGS)
int mga_dma_init(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
DRM_DEVICE;
drm_mga_init_t init;
drm_mga_init_t *init = data;
int err;
LOCK_TEST_WITH_RETURN(dev, filp);
LOCK_TEST_WITH_RETURN(dev, file_priv);
DRM_COPY_FROM_USER_IOCTL(init, (drm_mga_init_t __user *) data,
sizeof(init));
switch (init.func) {
switch (init->func) {
case MGA_INIT_DMA:
err = mga_do_init_dma(dev, &init);
err = mga_do_init_dma(dev, init);
if (err) {
(void) mga_do_cleanup_dma(dev, FULL_CLEANUP);
(void)mga_do_cleanup_dma(dev, FULL_CLEANUP);
}
return err;
case MGA_CLEANUP_DMA:
return mga_do_cleanup_dma(dev, FULL_CLEANUP);
}
return DRM_ERR(EINVAL);
return -EINVAL;
}
/* ================================================================
* Primary DMA stream management
*/
int mga_dma_flush(DRM_IOCTL_ARGS)
int mga_dma_flush(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
DRM_DEVICE;
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
drm_lock_t lock;
struct drm_lock *lock = data;
LOCK_TEST_WITH_RETURN(dev, filp);
DRM_COPY_FROM_USER_IOCTL(lock, (drm_lock_t __user *) data,
sizeof(lock));
LOCK_TEST_WITH_RETURN(dev, file_priv);
DRM_DEBUG("%s%s%s\n",
(lock.flags & _DRM_LOCK_FLUSH) ? "flush, " : "",
(lock.flags & _DRM_LOCK_FLUSH_ALL) ? "flush all, " : "",
(lock.flags & _DRM_LOCK_QUIESCENT) ? "idle, " : "");
(lock->flags & _DRM_LOCK_FLUSH) ? "flush, " : "",
(lock->flags & _DRM_LOCK_FLUSH_ALL) ? "flush all, " : "",
(lock->flags & _DRM_LOCK_QUIESCENT) ? "idle, " : "");
WRAP_WAIT_WITH_RETURN(dev_priv);
if (lock.flags & (_DRM_LOCK_FLUSH | _DRM_LOCK_FLUSH_ALL)) {
if (lock->flags & (_DRM_LOCK_FLUSH | _DRM_LOCK_FLUSH_ALL)) {
mga_do_dma_flush(dev_priv);
}
if (lock.flags & _DRM_LOCK_QUIESCENT) {
if (lock->flags & _DRM_LOCK_QUIESCENT) {
#if MGA_DMA_DEBUG
int ret = mga_do_wait_for_idle(dev_priv);
if (ret < 0)
DRM_INFO("%s: -EBUSY\n", __FUNCTION__);
DRM_INFO("-EBUSY\n");
return ret;
#else
return mga_do_wait_for_idle(dev_priv);
@ -1079,12 +1062,12 @@ int mga_dma_flush(DRM_IOCTL_ARGS)
}
}
int mga_dma_reset(DRM_IOCTL_ARGS)
int mga_dma_reset(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
DRM_DEVICE;
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
LOCK_TEST_WITH_RETURN(dev, filp);
LOCK_TEST_WITH_RETURN(dev, file_priv);
return mga_do_dma_reset(dev_priv);
}
@ -1093,76 +1076,72 @@ int mga_dma_reset(DRM_IOCTL_ARGS)
* DMA buffer management
*/
static int mga_dma_get_buffers(DRMFILE filp, drm_device_t * dev, drm_dma_t * d)
static int mga_dma_get_buffers(struct drm_device * dev,
struct drm_file *file_priv, struct drm_dma * d)
{
drm_buf_t *buf;
struct drm_buf *buf;
int i;
for (i = d->granted_count; i < d->request_count; i++) {
buf = mga_freelist_get(dev);
if (!buf)
return DRM_ERR(EAGAIN);
return -EAGAIN;
buf->filp = filp;
buf->file_priv = file_priv;
if (DRM_COPY_TO_USER(&d->request_indices[i],
&buf->idx, sizeof(buf->idx)))
return DRM_ERR(EFAULT);
return -EFAULT;
if (DRM_COPY_TO_USER(&d->request_sizes[i],
&buf->total, sizeof(buf->total)))
return DRM_ERR(EFAULT);
return -EFAULT;
d->granted_count++;
}
return 0;
}
int mga_dma_buffers(DRM_IOCTL_ARGS)
int mga_dma_buffers(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
DRM_DEVICE;
drm_device_dma_t *dma = dev->dma;
struct drm_device_dma *dma = dev->dma;
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
drm_dma_t __user *argp = (void __user *)data;
drm_dma_t d;
struct drm_dma *d = data;
int ret = 0;
LOCK_TEST_WITH_RETURN(dev, filp);
DRM_COPY_FROM_USER_IOCTL(d, argp, sizeof(d));
LOCK_TEST_WITH_RETURN(dev, file_priv);
/* Please don't send us buffers.
*/
if (d.send_count != 0) {
if (d->send_count != 0) {
DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
DRM_CURRENTPID, d.send_count);
return DRM_ERR(EINVAL);
DRM_CURRENTPID, d->send_count);
return -EINVAL;
}
/* We'll send you buffers.
*/
if (d.request_count < 0 || d.request_count > dma->buf_count) {
if (d->request_count < 0 || d->request_count > dma->buf_count) {
DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
DRM_CURRENTPID, d.request_count, dma->buf_count);
return DRM_ERR(EINVAL);
DRM_CURRENTPID, d->request_count, dma->buf_count);
return -EINVAL;
}
WRAP_TEST_WITH_RETURN(dev_priv);
d.granted_count = 0;
d->granted_count = 0;
if (d.request_count) {
ret = mga_dma_get_buffers(filp, dev, &d);
if (d->request_count) {
ret = mga_dma_get_buffers(dev, file_priv, d);
}
DRM_COPY_TO_USER_IOCTL(argp, d, sizeof(d));
return ret;
}
/**
* Called just before the module is unloaded.
*/
int mga_driver_unload(drm_device_t * dev)
int mga_driver_unload(struct drm_device * dev)
{
drm_free(dev->dev_private, sizeof(drm_mga_private_t), DRM_MEM_DRIVER);
dev->dev_private = NULL;
@ -1173,12 +1152,12 @@ int mga_driver_unload(drm_device_t * dev)
/**
* Called when the last opener of the device is closed.
*/
void mga_driver_lastclose(drm_device_t * dev)
void mga_driver_lastclose(struct drm_device * dev)
{
mga_do_cleanup_dma(dev, FULL_CLEANUP);
}
int mga_driver_dma_quiescent(drm_device_t * dev)
int mga_driver_dma_quiescent(struct drm_device * dev)
{
drm_mga_private_t *dev_priv = dev->dev_private;
return mga_do_wait_for_idle(dev_priv);

View File

@ -184,7 +184,7 @@ typedef struct _drm_mga_sarea {
/* The current cliprects, or a subset thereof.
*/
drm_clip_rect_t boxes[MGA_NR_SAREA_CLIPRECTS];
struct drm_clip_rect boxes[MGA_NR_SAREA_CLIPRECTS];
unsigned int nbox;
/* Information about the most recently used 3d drawable. The
@ -205,7 +205,7 @@ typedef struct _drm_mga_sarea {
unsigned int exported_nback;
int exported_back_x, exported_front_x, exported_w;
int exported_back_y, exported_front_y, exported_h;
drm_clip_rect_t exported_boxes[MGA_NR_SAREA_CLIPRECTS];
struct drm_clip_rect exported_boxes[MGA_NR_SAREA_CLIPRECTS];
/* Counters for aging textures and for client-side throttling.
*/
@ -219,7 +219,7 @@ typedef struct _drm_mga_sarea {
/* LRU lists for texture memory in agp space and on the card.
*/
drm_tex_region_t texList[MGA_NR_TEX_HEAPS][MGA_NR_TEX_REGIONS + 1];
struct drm_tex_region texList[MGA_NR_TEX_HEAPS][MGA_NR_TEX_REGIONS + 1];
unsigned int texAge[MGA_NR_TEX_HEAPS];
/* Mechanism to validate card state.
@ -305,10 +305,10 @@ typedef struct drm_mga_init {
typedef struct drm_mga_dma_bootstrap {
/**
* \name AGP texture region
*
*
* On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, these fields will
* be filled in with the actual AGP texture settings.
*
*
* \warning
* If these fields are non-zero, but dma_mga_dma_bootstrap::agp_mode
* is zero, it means that PCI memory (most likely through the use of
@ -322,7 +322,7 @@ typedef struct drm_mga_dma_bootstrap {
/**
* Requested size of the primary DMA region.
*
*
* On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
* filled in with the actual AGP mode. If AGP was not available
*/
@ -331,18 +331,18 @@ typedef struct drm_mga_dma_bootstrap {
/**
* Requested number of secondary DMA buffers.
*
*
* On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
* filled in with the actual number of secondary DMA buffers
* allocated. Particularly when PCI DMA is used, this may be
* (subtantially) less than the number requested.
*/
uint32_t secondary_bin_count;
/**
* Requested size of each secondary DMA buffer.
*
*
* While the kernel \b is free to reduce
* dma_mga_dma_bootstrap::secondary_bin_count, it is \b not allowed
* to reduce dma_mga_dma_bootstrap::secondary_bin_size.
@ -355,7 +355,7 @@ typedef struct drm_mga_dma_bootstrap {
* \c AGPSTAT2_2X, and \c AGPSTAT2_4X are supported. If this value is
* zero, it means that PCI DMA should be used, even if AGP is
* possible.
*
*
* On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
* filled in with the actual AGP mode. If AGP was not available
* (i.e., PCI DMA was used), this value will be zero.

View File

@ -62,7 +62,7 @@ static drm_pci_id_list_t mga_pciidlist[] = {
* This function needs to be filled in! The implementation in
* linux-core/mga_drv.c shows what needs to be done.
*/
static int mga_driver_device_is_agp(drm_device_t * dev)
static int mga_driver_device_is_agp(struct drm_device * dev)
{
device_t bus;
@ -87,13 +87,15 @@ static int mga_driver_device_is_agp(drm_device_t * dev)
return DRM_MIGHT_BE_AGP;
}
static void mga_configure(drm_device_t *dev)
static void mga_configure(struct drm_device *dev)
{
dev->driver.buf_priv_size = sizeof(drm_mga_buf_priv_t);
dev->driver.load = mga_driver_load;
dev->driver.unload = mga_driver_unload;
dev->driver.lastclose = mga_driver_lastclose;
dev->driver.vblank_wait = mga_driver_vblank_wait;
dev->driver.get_vblank_counter = mga_get_vblank_counter;
dev->driver.enable_vblank = mga_enable_vblank;
dev->driver.disable_vblank = mga_disable_vblank;
dev->driver.irq_preinstall = mga_driver_irq_preinstall;
dev->driver.irq_postinstall = mga_driver_irq_postinstall;
dev->driver.irq_uninstall = mga_driver_irq_uninstall;
@ -132,9 +134,9 @@ mga_probe(device_t dev)
static int
mga_attach(device_t nbdev)
{
drm_device_t *dev = device_get_softc(nbdev);
struct drm_device *dev = device_get_softc(nbdev);
bzero(dev, sizeof(drm_device_t));
bzero(dev, sizeof(struct drm_device));
mga_configure(dev);
return drm_attach(nbdev, mga_pciidlist);
}
@ -151,7 +153,7 @@ static device_method_t mga_methods[] = {
static driver_t mga_driver = {
"drm",
mga_methods,
sizeof(drm_device_t)
sizeof(struct drm_device)
};
extern devclass_t drm_devclass;
@ -166,7 +168,7 @@ MODULE_DEPEND(mga, drm, 1, 1, 1);
#ifdef _LKM
CFDRIVER_DECL(mga, DV_TTY, NULL);
#else
CFATTACH_DECL(mga, sizeof(drm_device_t), drm_probe, drm_attach, drm_detach,
CFATTACH_DECL(mga, sizeof(struct drm_device), drm_probe, drm_attach, drm_detach,
drm_activate);
#endif
#endif

View File

@ -68,7 +68,7 @@ typedef struct drm_mga_freelist {
struct drm_mga_freelist *next;
struct drm_mga_freelist *prev;
drm_mga_age_t age;
drm_buf_t *buf;
struct drm_buf *buf;
} drm_mga_freelist_t;
typedef struct {
@ -112,17 +112,18 @@ typedef struct drm_mga_private {
/**
* \name MMIO region parameters.
*
*
* \sa drm_mga_private_t::mmio
*/
/*@{*/
u32 mmio_base; /**< Bus address of base of MMIO. */
u32 mmio_size; /**< Size of the MMIO region. */
u32 mmio_base; /**< Bus address of base of MMIO. */
u32 mmio_size; /**< Size of the MMIO region. */
/*@}*/
u32 clear_cmd;
u32 maccess;
atomic_t vbl_received; /**< Number of vblanks received. */
wait_queue_head_t fence_queue;
atomic_t last_fence_retired;
u32 next_fence_to_post;
@ -146,24 +147,29 @@ typedef struct drm_mga_private {
drm_local_map_t *warp;
drm_local_map_t *primary;
drm_local_map_t *agp_textures;
unsigned long agp_handle;
unsigned int agp_size;
} drm_mga_private_t;
extern drm_ioctl_desc_t mga_ioctls[];
extern struct drm_ioctl_desc mga_ioctls[];
extern int mga_max_ioctl;
/* mga_dma.c */
extern int mga_dma_bootstrap(DRM_IOCTL_ARGS);
extern int mga_dma_init(DRM_IOCTL_ARGS);
extern int mga_dma_flush(DRM_IOCTL_ARGS);
extern int mga_dma_reset(DRM_IOCTL_ARGS);
extern int mga_dma_buffers(DRM_IOCTL_ARGS);
extern int mga_driver_load(drm_device_t *dev, unsigned long flags);
extern int mga_driver_unload(drm_device_t * dev);
extern void mga_driver_lastclose(drm_device_t * dev);
extern int mga_driver_dma_quiescent(drm_device_t * dev);
extern int mga_dma_bootstrap(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int mga_dma_init(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int mga_dma_flush(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int mga_dma_reset(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int mga_dma_buffers(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int mga_driver_load(struct drm_device *dev, unsigned long flags);
extern int mga_driver_unload(struct drm_device * dev);
extern void mga_driver_lastclose(struct drm_device * dev);
extern int mga_driver_dma_quiescent(struct drm_device * dev);
extern int mga_do_wait_for_idle(drm_mga_private_t * dev_priv);
@ -171,7 +177,7 @@ extern void mga_do_dma_flush(drm_mga_private_t * dev_priv);
extern void mga_do_dma_wrap_start(drm_mga_private_t * dev_priv);
extern void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv);
extern int mga_freelist_put(drm_device_t * dev, drm_buf_t * buf);
extern int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf);
/* mga_warp.c */
extern unsigned int mga_warp_microcode_size(const drm_mga_private_t * dev_priv);
@ -179,12 +185,15 @@ extern int mga_warp_install_microcode(drm_mga_private_t * dev_priv);
extern int mga_warp_init(drm_mga_private_t * dev_priv);
/* mga_irq.c */
extern int mga_driver_fence_wait(drm_device_t * dev, unsigned int *sequence);
extern int mga_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence);
extern int mga_enable_vblank(struct drm_device *dev, int crtc);
extern void mga_disable_vblank(struct drm_device *dev, int crtc);
extern u32 mga_get_vblank_counter(struct drm_device *dev, int crtc);
extern int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence);
extern int mga_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence);
extern irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS);
extern void mga_driver_irq_preinstall(drm_device_t * dev);
extern void mga_driver_irq_postinstall(drm_device_t * dev);
extern void mga_driver_irq_uninstall(drm_device_t * dev);
extern void mga_driver_irq_preinstall(struct drm_device * dev);
extern int mga_driver_irq_postinstall(struct drm_device * dev);
extern void mga_driver_irq_uninstall(struct drm_device * dev);
extern long mga_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
@ -214,8 +223,8 @@ static inline u32 _MGA_READ(u32 * addr)
#define MGA_WRITE( reg, val ) DRM_WRITE32(dev_priv->mmio, (reg), (val))
#endif
#define DWGREG0 0x1c00
#define DWGREG0_END 0x1dff
#define DWGREG0 0x1c00
#define DWGREG0_END 0x1dff
#define DWGREG1 0x2c00
#define DWGREG1_END 0x2dff
@ -247,8 +256,8 @@ do { \
} else if ( dev_priv->prim.space < \
dev_priv->prim.high_mark ) { \
if ( MGA_DMA_DEBUG ) \
DRM_INFO( "%s: wrap...\n", __FUNCTION__ ); \
return DRM_ERR(EBUSY); \
DRM_INFO( "wrap...\n"); \
return -EBUSY; \
} \
} \
} while (0)
@ -258,8 +267,8 @@ do { \
if ( test_bit( 0, &dev_priv->prim.wrapped ) ) { \
if ( mga_do_wait_for_idle( dev_priv ) < 0 ) { \
if ( MGA_DMA_DEBUG ) \
DRM_INFO( "%s: wrap...\n", __FUNCTION__ ); \
return DRM_ERR(EBUSY); \
DRM_INFO( "wrap...\n"); \
return -EBUSY; \
} \
mga_do_dma_wrap_end( dev_priv ); \
} \
@ -278,8 +287,7 @@ do { \
#define BEGIN_DMA( n ) \
do { \
if ( MGA_VERBOSE ) { \
DRM_INFO( "BEGIN_DMA( %d ) in %s\n", \
(n), __FUNCTION__ ); \
DRM_INFO( "BEGIN_DMA( %d )\n", (n) ); \
DRM_INFO( " space=0x%x req=0x%Zx\n", \
dev_priv->prim.space, (n) * DMA_BLOCK_SIZE ); \
} \
@ -290,7 +298,7 @@ do { \
#define BEGIN_DMA_WRAP() \
do { \
if ( MGA_VERBOSE ) { \
DRM_INFO( "BEGIN_DMA() in %s\n", __FUNCTION__ ); \
DRM_INFO( "BEGIN_DMA()\n" ); \
DRM_INFO( " space=0x%x\n", dev_priv->prim.space ); \
} \
prim = dev_priv->prim.start; \
@ -309,7 +317,7 @@ do { \
#define FLUSH_DMA() \
do { \
if ( 0 ) { \
DRM_INFO( "%s:\n", __FUNCTION__ ); \
DRM_INFO( "\n" ); \
DRM_INFO( " tail=0x%06x head=0x%06lx\n", \
dev_priv->prim.tail, \
MGA_READ( MGA_PRIMADDRESS ) - \
@ -392,22 +400,22 @@ do { \
#define MGA_VINTCLR (1 << 4)
#define MGA_VINTEN (1 << 5)
#define MGA_ALPHACTRL 0x2c7c
#define MGA_AR0 0x1c60
#define MGA_AR1 0x1c64
#define MGA_AR2 0x1c68
#define MGA_AR3 0x1c6c
#define MGA_AR4 0x1c70
#define MGA_AR5 0x1c74
#define MGA_AR6 0x1c78
#define MGA_ALPHACTRL 0x2c7c
#define MGA_AR0 0x1c60
#define MGA_AR1 0x1c64
#define MGA_AR2 0x1c68
#define MGA_AR3 0x1c6c
#define MGA_AR4 0x1c70
#define MGA_AR5 0x1c74
#define MGA_AR6 0x1c78
#define MGA_CXBNDRY 0x1c80
#define MGA_CXLEFT 0x1ca0
#define MGA_CXLEFT 0x1ca0
#define MGA_CXRIGHT 0x1ca4
#define MGA_DMAPAD 0x1c54
#define MGA_DSTORG 0x2cb8
#define MGA_DWGCTL 0x1c00
#define MGA_DMAPAD 0x1c54
#define MGA_DSTORG 0x2cb8
#define MGA_DWGCTL 0x1c00
# define MGA_OPCOD_MASK (15 << 0)
# define MGA_OPCOD_TRAP (4 << 0)
# define MGA_OPCOD_TEXTURE_TRAP (6 << 0)
@ -453,27 +461,27 @@ do { \
# define MGA_CLIPDIS (1 << 31)
#define MGA_DWGSYNC 0x2c4c
#define MGA_FCOL 0x1c24
#define MGA_FIFOSTATUS 0x1e10
#define MGA_FOGCOL 0x1cf4
#define MGA_FCOL 0x1c24
#define MGA_FIFOSTATUS 0x1e10
#define MGA_FOGCOL 0x1cf4
#define MGA_FXBNDRY 0x1c84
#define MGA_FXLEFT 0x1ca8
#define MGA_FXLEFT 0x1ca8
#define MGA_FXRIGHT 0x1cac
#define MGA_ICLEAR 0x1e18
#define MGA_ICLEAR 0x1e18
# define MGA_SOFTRAPICLR (1 << 0)
# define MGA_VLINEICLR (1 << 5)
#define MGA_IEN 0x1e1c
#define MGA_IEN 0x1e1c
# define MGA_SOFTRAPIEN (1 << 0)
# define MGA_VLINEIEN (1 << 5)
#define MGA_LEN 0x1c5c
#define MGA_LEN 0x1c5c
#define MGA_MACCESS 0x1c04
#define MGA_PITCH 0x1c8c
#define MGA_PLNWT 0x1c1c
#define MGA_PRIMADDRESS 0x1e58
#define MGA_PITCH 0x1c8c
#define MGA_PLNWT 0x1c1c
#define MGA_PRIMADDRESS 0x1e58
# define MGA_DMA_GENERAL (0 << 0)
# define MGA_DMA_BLIT (1 << 0)
# define MGA_DMA_VECTOR (2 << 0)
@ -485,43 +493,43 @@ do { \
# define MGA_PRIMPTREN0 (1 << 0)
# define MGA_PRIMPTREN1 (1 << 1)
#define MGA_RST 0x1e40
#define MGA_RST 0x1e40
# define MGA_SOFTRESET (1 << 0)
# define MGA_SOFTEXTRST (1 << 1)
#define MGA_SECADDRESS 0x2c40
#define MGA_SECEND 0x2c44
#define MGA_SETUPADDRESS 0x2cd0
#define MGA_SETUPEND 0x2cd4
#define MGA_SECADDRESS 0x2c40
#define MGA_SECEND 0x2c44
#define MGA_SETUPADDRESS 0x2cd0
#define MGA_SETUPEND 0x2cd4
#define MGA_SGN 0x1c58
#define MGA_SOFTRAP 0x2c48
#define MGA_SRCORG 0x2cb4
#define MGA_SRCORG 0x2cb4
# define MGA_SRMMAP_MASK (1 << 0)
# define MGA_SRCMAP_FB (0 << 0)
# define MGA_SRCMAP_SYSMEM (1 << 0)
# define MGA_SRCACC_MASK (1 << 1)
# define MGA_SRCACC_PCI (0 << 1)
# define MGA_SRCACC_AGP (1 << 1)
#define MGA_STATUS 0x1e14
#define MGA_STATUS 0x1e14
# define MGA_SOFTRAPEN (1 << 0)
# define MGA_VSYNCPEN (1 << 4)
# define MGA_VLINEPEN (1 << 5)
# define MGA_DWGENGSTS (1 << 16)
# define MGA_ENDPRDMASTS (1 << 17)
#define MGA_STENCIL 0x2cc8
#define MGA_STENCILCTL 0x2ccc
#define MGA_STENCILCTL 0x2ccc
#define MGA_TDUALSTAGE0 0x2cf8
#define MGA_TDUALSTAGE1 0x2cfc
#define MGA_TEXBORDERCOL 0x2c5c
#define MGA_TEXCTL 0x2c30
#define MGA_TDUALSTAGE0 0x2cf8
#define MGA_TDUALSTAGE1 0x2cfc
#define MGA_TEXBORDERCOL 0x2c5c
#define MGA_TEXCTL 0x2c30
#define MGA_TEXCTL2 0x2c3c
# define MGA_DUALTEX (1 << 7)
# define MGA_G400_TC2_MAGIC (1 << 15)
# define MGA_MAP1_ENABLE (1 << 31)
#define MGA_TEXFILTER 0x2c58
#define MGA_TEXHEIGHT 0x2c2c
#define MGA_TEXORG 0x2c24
#define MGA_TEXFILTER 0x2c58
#define MGA_TEXHEIGHT 0x2c2c
#define MGA_TEXORG 0x2c24
# define MGA_TEXORGMAP_MASK (1 << 0)
# define MGA_TEXORGMAP_FB (0 << 0)
# define MGA_TEXORGMAP_SYSMEM (1 << 0)
@ -532,45 +540,45 @@ do { \
#define MGA_TEXORG2 0x2ca8
#define MGA_TEXORG3 0x2cac
#define MGA_TEXORG4 0x2cb0
#define MGA_TEXTRANS 0x2c34
#define MGA_TEXTRANSHIGH 0x2c38
#define MGA_TEXWIDTH 0x2c28
#define MGA_TEXTRANS 0x2c34
#define MGA_TEXTRANSHIGH 0x2c38
#define MGA_TEXWIDTH 0x2c28
#define MGA_WACCEPTSEQ 0x1dd4
#define MGA_WCODEADDR 0x1e6c
#define MGA_WFLAG 0x1dc4
#define MGA_WFLAG1 0x1de0
#define MGA_WACCEPTSEQ 0x1dd4
#define MGA_WCODEADDR 0x1e6c
#define MGA_WFLAG 0x1dc4
#define MGA_WFLAG1 0x1de0
#define MGA_WFLAGNB 0x1e64
#define MGA_WFLAGNB1 0x1e08
#define MGA_WFLAGNB1 0x1e08
#define MGA_WGETMSB 0x1dc8
#define MGA_WIADDR 0x1dc0
#define MGA_WIADDR 0x1dc0
#define MGA_WIADDR2 0x1dd8
# define MGA_WMODE_SUSPEND (0 << 0)
# define MGA_WMODE_RESUME (1 << 0)
# define MGA_WMODE_JUMP (2 << 0)
# define MGA_WMODE_START (3 << 0)
# define MGA_WAGP_ENABLE (1 << 2)
#define MGA_WMISC 0x1e70
#define MGA_WMISC 0x1e70
# define MGA_WUCODECACHE_ENABLE (1 << 0)
# define MGA_WMASTER_ENABLE (1 << 1)
# define MGA_WCACHEFLUSH_ENABLE (1 << 3)
#define MGA_WVRTXSZ 0x1dcc
#define MGA_YBOT 0x1c9c
#define MGA_YDST 0x1c90
#define MGA_YBOT 0x1c9c
#define MGA_YDST 0x1c90
#define MGA_YDSTLEN 0x1c88
#define MGA_YDSTORG 0x1c94
#define MGA_YTOP 0x1c98
#define MGA_YTOP 0x1c98
#define MGA_ZORG 0x1c0c
#define MGA_ZORG 0x1c0c
/* This finishes the current batch of commands
*/
#define MGA_EXEC 0x0100
#define MGA_EXEC 0x0100
/* AGP PLL encoding (for G200 only).
*/
#define MGA_AGP_PLL 0x1e4c
#define MGA_AGP_PLL 0x1e4c
# define MGA_AGP2XPLL_DISABLE (0 << 0)
# define MGA_AGP2XPLL_ENABLE (1 << 0)

View File

@ -39,9 +39,23 @@ __FBSDID("$FreeBSD$");
#include "dev/drm/mga_drm.h"
#include "dev/drm/mga_drv.h"
u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
{
const drm_mga_private_t *const dev_priv =
(drm_mga_private_t *) dev->dev_private;
if (crtc != 0) {
return 0;
}
return atomic_read(&dev_priv->vbl_received);
}
irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
{
drm_device_t *dev = (drm_device_t *) arg;
struct drm_device *dev = (struct drm_device *) arg;
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
int status;
int handled = 0;
@ -51,16 +65,15 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
/* VBLANK interrupt */
if (status & MGA_VLINEPEN) {
MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
atomic_inc(&dev->vbl_received);
DRM_WAKEUP(&dev->vbl_queue);
drm_vbl_send_signals(dev);
atomic_inc(&dev_priv->vbl_received);
drm_handle_vblank(dev, 0);
handled = 1;
}
/* SOFTRAP interrupt */
if (status & MGA_SOFTRAPEN) {
const u32 prim_start = MGA_READ(MGA_PRIMADDRESS);
const u32 prim_end = MGA_READ(MGA_PRIMEND);
const u32 prim_end = MGA_READ(MGA_PRIMEND);
MGA_WRITE(MGA_ICLEAR, MGA_SOFTRAPICLR);
@ -68,7 +81,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
/* In addition to clearing the interrupt-pending bit, we
* have to write to MGA_PRIMEND to re-start the DMA operation.
*/
if ( (prim_start & ~0x03) != (prim_end & ~0x03) ) {
if ((prim_start & ~0x03) != (prim_end & ~0x03)) {
MGA_WRITE(MGA_PRIMEND, prim_end);
}
@ -77,31 +90,42 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
handled = 1;
}
if ( handled ) {
if (handled)
return IRQ_HANDLED;
}
return IRQ_NONE;
}
int mga_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence)
int mga_enable_vblank(struct drm_device *dev, int crtc)
{
unsigned int cur_vblank;
int ret = 0;
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
/* Assume that the user has missed the current sequence number
* by about a day rather than she wants to wait for years
* using vertical blanks...
*/
DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
(((cur_vblank = atomic_read(&dev->vbl_received))
- *sequence) <= (1 << 23)));
if (crtc != 0) {
DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
crtc);
return 0;
}
*sequence = cur_vblank;
return ret;
MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN);
return 0;
}
int mga_driver_fence_wait(drm_device_t * dev, unsigned int *sequence)
void mga_disable_vblank(struct drm_device *dev, int crtc)
{
if (crtc != 0) {
DRM_ERROR("tried to disable vblank on non-existent crtc %d\n",
crtc);
}
/* Do *NOT* disable the vertical refresh interrupt. MGA doesn't have
* a nice hardware counter that tracks the number of refreshes when
* the interrupt is disabled, and the kernel doesn't know the refresh
* rate to calculate an estimate.
*/
/* MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN); */
}
int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence)
{
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
unsigned int cur_fence;
@ -120,7 +144,7 @@ int mga_driver_fence_wait(drm_device_t * dev, unsigned int *sequence)
return ret;
}
void mga_driver_irq_preinstall(drm_device_t * dev)
void mga_driver_irq_preinstall(struct drm_device * dev)
{
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
@ -130,17 +154,25 @@ void mga_driver_irq_preinstall(drm_device_t * dev)
MGA_WRITE(MGA_ICLEAR, ~0);
}
void mga_driver_irq_postinstall(drm_device_t * dev)
int mga_driver_irq_postinstall(struct drm_device * dev)
{
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
int ret;
DRM_INIT_WAITQUEUE( &dev_priv->fence_queue );
ret = drm_vblank_init(dev, 1);
if (ret)
return ret;
/* Turn on vertical blank interrupt and soft trap interrupt. */
MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN);
DRM_INIT_WAITQUEUE(&dev_priv->fence_queue);
/* Turn on soft trap interrupt. Vertical blank interrupts are enabled
* in mga_enable_vblank.
*/
MGA_WRITE(MGA_IEN, MGA_SOFTRAPEN);
return 0;
}
void mga_driver_irq_uninstall(drm_device_t * dev)
void mga_driver_irq_uninstall(struct drm_device * dev)
{
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
if (!dev_priv)
@ -148,6 +180,6 @@ void mga_driver_irq_uninstall(drm_device_t * dev)
/* Disable *all* interrupts */
MGA_WRITE(MGA_IEN, 0);
dev->irq_enabled = 0;
}

View File

@ -46,7 +46,7 @@ __FBSDID("$FreeBSD$");
*/
static void mga_emit_clip_rect(drm_mga_private_t * dev_priv,
drm_clip_rect_t * box)
struct drm_clip_rect * box)
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
@ -65,8 +65,7 @@ static void mga_emit_clip_rect(drm_mga_private_t * dev_priv,
}
DMA_BLOCK(MGA_DMAPAD, 0x00000000,
MGA_CXBNDRY, ((box->x2 - 1) << 16) | box->x1,
MGA_YTOP, box->y1 * pitch,
MGA_YBOT, (box->y2 - 1) * pitch);
MGA_YTOP, box->y1 * pitch, MGA_YBOT, (box->y2 - 1) * pitch);
ADVANCE_DMA();
}
@ -81,18 +80,15 @@ static __inline__ void mga_g200_emit_context(drm_mga_private_t * dev_priv)
DMA_BLOCK(MGA_DSTORG, ctx->dstorg,
MGA_MACCESS, ctx->maccess,
MGA_PLNWT, ctx->plnwt,
MGA_DWGCTL, ctx->dwgctl);
MGA_PLNWT, ctx->plnwt, MGA_DWGCTL, ctx->dwgctl);
DMA_BLOCK(MGA_ALPHACTRL, ctx->alphactrl,
MGA_FOGCOL, ctx->fogcolor,
MGA_WFLAG, ctx->wflag,
MGA_ZORG, dev_priv->depth_offset);
MGA_WFLAG, ctx->wflag, MGA_ZORG, dev_priv->depth_offset);
DMA_BLOCK(MGA_FCOL, ctx->fcol,
MGA_DMAPAD, 0x00000000,
MGA_DMAPAD, 0x00000000,
MGA_DMAPAD, 0x00000000);
MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
ADVANCE_DMA();
}
@ -165,8 +161,8 @@ static __inline__ void mga_g400_emit_tex0(drm_mga_private_t * dev_priv)
drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[0];
DMA_LOCALS;
/* printk("mga_g400_emit_tex0 %x %x %x\n", tex->texorg, */
/* tex->texctl, tex->texctl2); */
/* printk("mga_g400_emit_tex0 %x %x %x\n", tex->texorg, */
/* tex->texctl, tex->texctl2); */
BEGIN_DMA(6);
@ -209,8 +205,8 @@ static __inline__ void mga_g400_emit_tex1(drm_mga_private_t * dev_priv)
drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[1];
DMA_LOCALS;
/* printk("mga_g400_emit_tex1 %x %x %x\n", tex->texorg, */
/* tex->texctl, tex->texctl2); */
/* printk("mga_g400_emit_tex1 %x %x %x\n", tex->texorg, */
/* tex->texctl, tex->texctl2); */
BEGIN_DMA(5);
@ -279,7 +275,7 @@ static __inline__ void mga_g400_emit_pipe(drm_mga_private_t * dev_priv)
unsigned int pipe = sarea_priv->warp_pipe;
DMA_LOCALS;
/* printk("mga_g400_emit_pipe %x\n", pipe); */
/* printk("mga_g400_emit_pipe %x\n", pipe); */
BEGIN_DMA(10);
@ -419,7 +415,7 @@ static int mga_verify_context(drm_mga_private_t * dev_priv)
ctx->dstorg, dev_priv->front_offset,
dev_priv->back_offset);
ctx->dstorg = 0;
return DRM_ERR(EINVAL);
return -EINVAL;
}
return 0;
@ -438,7 +434,7 @@ static int mga_verify_tex(drm_mga_private_t * dev_priv, int unit)
if (org == (MGA_TEXORGMAP_SYSMEM | MGA_TEXORGACC_PCI)) {
DRM_ERROR("*** bad TEXORG: 0x%x, unit %d\n", tex->texorg, unit);
tex->texorg = 0;
return DRM_ERR(EINVAL);
return -EINVAL;
}
return 0;
@ -480,13 +476,13 @@ static int mga_verify_iload(drm_mga_private_t * dev_priv,
dstorg + length > (dev_priv->texture_offset +
dev_priv->texture_size)) {
DRM_ERROR("*** bad iload DSTORG: 0x%x\n", dstorg);
return DRM_ERR(EINVAL);
return -EINVAL;
}
if (length & MGA_ILOAD_MASK) {
DRM_ERROR("*** bad iload length: 0x%x\n",
length & MGA_ILOAD_MASK);
return DRM_ERR(EINVAL);
return -EINVAL;
}
return 0;
@ -498,7 +494,7 @@ static int mga_verify_blit(drm_mga_private_t * dev_priv,
if ((srcorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM) ||
(dstorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM)) {
DRM_ERROR("*** bad blit: src=0x%x dst=0x%x\n", srcorg, dstorg);
return DRM_ERR(EINVAL);
return -EINVAL;
}
return 0;
}
@ -507,12 +503,12 @@ static int mga_verify_blit(drm_mga_private_t * dev_priv,
*
*/
static void mga_dma_dispatch_clear(drm_device_t * dev, drm_mga_clear_t * clear)
static void mga_dma_dispatch_clear(struct drm_device * dev, drm_mga_clear_t * clear)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
drm_clip_rect_t *pbox = sarea_priv->boxes;
struct drm_clip_rect *pbox = sarea_priv->boxes;
int nbox = sarea_priv->nbox;
int i;
DMA_LOCALS;
@ -528,7 +524,7 @@ static void mga_dma_dispatch_clear(drm_device_t * dev, drm_mga_clear_t * clear)
ADVANCE_DMA();
for (i = 0; i < nbox; i++) {
drm_clip_rect_t *box = &pbox[i];
struct drm_clip_rect *box = &pbox[i];
u32 height = box->y2 - box->y1;
DRM_DEBUG(" from=%d,%d to=%d,%d\n",
@ -597,12 +593,12 @@ static void mga_dma_dispatch_clear(drm_device_t * dev, drm_mga_clear_t * clear)
FLUSH_DMA();
}
static void mga_dma_dispatch_swap(drm_device_t * dev)
static void mga_dma_dispatch_swap(struct drm_device * dev)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
drm_clip_rect_t *pbox = sarea_priv->boxes;
struct drm_clip_rect *pbox = sarea_priv->boxes;
int nbox = sarea_priv->nbox;
int i;
DMA_LOCALS;
@ -629,7 +625,7 @@ static void mga_dma_dispatch_swap(drm_device_t * dev)
MGA_DWGCTL, MGA_DWGCTL_COPY);
for (i = 0; i < nbox; i++) {
drm_clip_rect_t *box = &pbox[i];
struct drm_clip_rect *box = &pbox[i];
u32 height = box->y2 - box->y1;
u32 start = box->y1 * dev_priv->front_pitch;
@ -651,10 +647,10 @@ static void mga_dma_dispatch_swap(drm_device_t * dev)
FLUSH_DMA();
DRM_DEBUG("%s... done.\n", __FUNCTION__);
DRM_DEBUG("... done.\n");
}
static void mga_dma_dispatch_vertex(drm_device_t * dev, drm_buf_t * buf)
static void mga_dma_dispatch_vertex(struct drm_device * dev, struct drm_buf * buf)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_buf_priv_t *buf_priv = buf->dev_private;
@ -663,7 +659,7 @@ static void mga_dma_dispatch_vertex(drm_device_t * dev, drm_buf_t * buf)
u32 length = (u32) buf->used;
int i = 0;
DMA_LOCALS;
DRM_DEBUG("vertex: buf=%d used=%d\n", buf->idx, buf->used);
DRM_DEBUG("buf=%d used=%d\n", buf->idx, buf->used);
if (buf->used) {
buf_priv->dispatched = 1;
@ -701,7 +697,7 @@ static void mga_dma_dispatch_vertex(drm_device_t * dev, drm_buf_t * buf)
FLUSH_DMA();
}
static void mga_dma_dispatch_indices(drm_device_t * dev, drm_buf_t * buf,
static void mga_dma_dispatch_indices(struct drm_device * dev, struct drm_buf * buf,
unsigned int start, unsigned int end)
{
drm_mga_private_t *dev_priv = dev->dev_private;
@ -710,7 +706,7 @@ static void mga_dma_dispatch_indices(drm_device_t * dev, drm_buf_t * buf,
u32 address = (u32) buf->bus_address;
int i = 0;
DMA_LOCALS;
DRM_DEBUG("indices: buf=%d start=%d end=%d\n", buf->idx, start, end);
DRM_DEBUG("buf=%d start=%d end=%d\n", buf->idx, start, end);
if (start != end) {
buf_priv->dispatched = 1;
@ -750,7 +746,7 @@ static void mga_dma_dispatch_indices(drm_device_t * dev, drm_buf_t * buf,
/* This copies a 64 byte aligned agp region to the frambuffer with a
* standard blit, the ioctl needs to do checking.
*/
static void mga_dma_dispatch_iload(drm_device_t * dev, drm_buf_t * buf,
static void mga_dma_dispatch_iload(struct drm_device * dev, struct drm_buf * buf,
unsigned int dstorg, unsigned int length)
{
drm_mga_private_t *dev_priv = dev->dev_private;
@ -803,12 +799,12 @@ static void mga_dma_dispatch_iload(drm_device_t * dev, drm_buf_t * buf,
FLUSH_DMA();
}
static void mga_dma_dispatch_blit(drm_device_t * dev, drm_mga_blit_t * blit)
static void mga_dma_dispatch_blit(struct drm_device * dev, drm_mga_blit_t * blit)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
drm_clip_rect_t *pbox = sarea_priv->boxes;
struct drm_clip_rect *pbox = sarea_priv->boxes;
int nbox = sarea_priv->nbox;
u32 scandir = 0, i;
DMA_LOCALS;
@ -868,24 +864,20 @@ static void mga_dma_dispatch_blit(drm_device_t * dev, drm_mga_blit_t * blit)
*
*/
static int mga_dma_clear(DRM_IOCTL_ARGS)
static int mga_dma_clear(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mga_clear_t clear;
drm_mga_clear_t *clear = data;
LOCK_TEST_WITH_RETURN(dev, filp);
DRM_COPY_FROM_USER_IOCTL(clear, (drm_mga_clear_t __user *) data,
sizeof(clear));
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS)
sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
WRAP_TEST_WITH_RETURN(dev_priv);
mga_dma_dispatch_clear(dev, &clear);
mga_dma_dispatch_clear(dev, clear);
/* Make sure we restore the 3D state next time.
*/
@ -894,13 +886,12 @@ static int mga_dma_clear(DRM_IOCTL_ARGS)
return 0;
}
static int mga_dma_swap(DRM_IOCTL_ARGS)
static int mga_dma_swap(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
LOCK_TEST_WITH_RETURN(dev, filp);
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS)
sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
@ -916,37 +907,32 @@ static int mga_dma_swap(DRM_IOCTL_ARGS)
return 0;
}
static int mga_dma_vertex(DRM_IOCTL_ARGS)
static int mga_dma_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_mga_private_t *dev_priv = dev->dev_private;
drm_device_dma_t *dma = dev->dma;
drm_buf_t *buf;
struct drm_device_dma *dma = dev->dma;
struct drm_buf *buf;
drm_mga_buf_priv_t *buf_priv;
drm_mga_vertex_t vertex;
drm_mga_vertex_t *vertex = data;
LOCK_TEST_WITH_RETURN(dev, filp);
LOCK_TEST_WITH_RETURN(dev, file_priv);
DRM_COPY_FROM_USER_IOCTL(vertex,
(drm_mga_vertex_t __user *) data,
sizeof(vertex));
if (vertex.idx < 0 || vertex.idx > dma->buf_count)
return DRM_ERR(EINVAL);
buf = dma->buflist[vertex.idx];
if (vertex->idx < 0 || vertex->idx > dma->buf_count)
return -EINVAL;
buf = dma->buflist[vertex->idx];
buf_priv = buf->dev_private;
buf->used = vertex.used;
buf_priv->discard = vertex.discard;
buf->used = vertex->used;
buf_priv->discard = vertex->discard;
if (!mga_verify_state(dev_priv)) {
if (vertex.discard) {
if (vertex->discard) {
if (buf_priv->dispatched == 1)
AGE_BUFFER(buf_priv);
buf_priv->dispatched = 0;
mga_freelist_put(dev, buf);
}
return DRM_ERR(EINVAL);
return -EINVAL;
}
WRAP_TEST_WITH_RETURN(dev_priv);
@ -956,82 +942,73 @@ static int mga_dma_vertex(DRM_IOCTL_ARGS)
return 0;
}
static int mga_dma_indices(DRM_IOCTL_ARGS)
static int mga_dma_indices(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_mga_private_t *dev_priv = dev->dev_private;
drm_device_dma_t *dma = dev->dma;
drm_buf_t *buf;
struct drm_device_dma *dma = dev->dma;
struct drm_buf *buf;
drm_mga_buf_priv_t *buf_priv;
drm_mga_indices_t indices;
drm_mga_indices_t *indices = data;
LOCK_TEST_WITH_RETURN(dev, filp);
LOCK_TEST_WITH_RETURN(dev, file_priv);
DRM_COPY_FROM_USER_IOCTL(indices,
(drm_mga_indices_t __user *) data,
sizeof(indices));
if (indices->idx < 0 || indices->idx > dma->buf_count)
return -EINVAL;
if (indices.idx < 0 || indices.idx > dma->buf_count)
return DRM_ERR(EINVAL);
buf = dma->buflist[indices.idx];
buf = dma->buflist[indices->idx];
buf_priv = buf->dev_private;
buf_priv->discard = indices.discard;
buf_priv->discard = indices->discard;
if (!mga_verify_state(dev_priv)) {
if (indices.discard) {
if (indices->discard) {
if (buf_priv->dispatched == 1)
AGE_BUFFER(buf_priv);
buf_priv->dispatched = 0;
mga_freelist_put(dev, buf);
}
return DRM_ERR(EINVAL);
return -EINVAL;
}
WRAP_TEST_WITH_RETURN(dev_priv);
mga_dma_dispatch_indices(dev, buf, indices.start, indices.end);
mga_dma_dispatch_indices(dev, buf, indices->start, indices->end);
return 0;
}
static int mga_dma_iload(DRM_IOCTL_ARGS)
static int mga_dma_iload(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_device_dma_t *dma = dev->dma;
struct drm_device_dma *dma = dev->dma;
drm_mga_private_t *dev_priv = dev->dev_private;
drm_buf_t *buf;
struct drm_buf *buf;
drm_mga_buf_priv_t *buf_priv;
drm_mga_iload_t iload;
drm_mga_iload_t *iload = data;
DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, filp);
DRM_COPY_FROM_USER_IOCTL(iload, (drm_mga_iload_t __user *) data,
sizeof(iload));
LOCK_TEST_WITH_RETURN(dev, file_priv);
#if 0
if (mga_do_wait_for_idle(dev_priv) < 0) {
if (MGA_DMA_DEBUG)
DRM_INFO("%s: -EBUSY\n", __FUNCTION__);
return DRM_ERR(EBUSY);
DRM_INFO("-EBUSY\n");
return -EBUSY;
}
#endif
if (iload.idx < 0 || iload.idx > dma->buf_count)
return DRM_ERR(EINVAL);
if (iload->idx < 0 || iload->idx > dma->buf_count)
return -EINVAL;
buf = dma->buflist[iload.idx];
buf = dma->buflist[iload->idx];
buf_priv = buf->dev_private;
if (mga_verify_iload(dev_priv, iload.dstorg, iload.length)) {
if (mga_verify_iload(dev_priv, iload->dstorg, iload->length)) {
mga_freelist_put(dev, buf);
return DRM_ERR(EINVAL);
return -EINVAL;
}
WRAP_TEST_WITH_RETURN(dev_priv);
mga_dma_dispatch_iload(dev, buf, iload.dstorg, iload.length);
mga_dma_dispatch_iload(dev, buf, iload->dstorg, iload->length);
/* Make sure we restore the 3D state next time.
*/
@ -1040,28 +1017,24 @@ static int mga_dma_iload(DRM_IOCTL_ARGS)
return 0;
}
static int mga_dma_blit(DRM_IOCTL_ARGS)
static int mga_dma_blit(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mga_blit_t blit;
drm_mga_blit_t *blit = data;
DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, filp);
DRM_COPY_FROM_USER_IOCTL(blit, (drm_mga_blit_t __user *) data,
sizeof(blit));
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS)
sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
if (mga_verify_blit(dev_priv, blit.srcorg, blit.dstorg))
return DRM_ERR(EINVAL);
if (mga_verify_blit(dev_priv, blit->srcorg, blit->dstorg))
return -EINVAL;
WRAP_TEST_WITH_RETURN(dev_priv);
mga_dma_dispatch_blit(dev, &blit);
mga_dma_dispatch_blit(dev, blit);
/* Make sure we restore the 3D state next time.
*/
@ -1070,24 +1043,20 @@ static int mga_dma_blit(DRM_IOCTL_ARGS)
return 0;
}
static int mga_getparam(DRM_IOCTL_ARGS)
static int mga_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_getparam_t param;
drm_mga_getparam_t *param = data;
int value;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
return DRM_ERR(EINVAL);
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
DRM_COPY_FROM_USER_IOCTL(param, (drm_mga_getparam_t __user *) data,
sizeof(param));
DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
switch (param.param) {
switch (param->param) {
case MGA_PARAM_IRQ_NR:
value = dev->irq;
break;
@ -1095,36 +1064,35 @@ static int mga_getparam(DRM_IOCTL_ARGS)
value = dev_priv->chipset;
break;
default:
return DRM_ERR(EINVAL);
return -EINVAL;
}
if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) {
if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
DRM_ERROR("copy_to_user\n");
return DRM_ERR(EFAULT);
return -EFAULT;
}
return 0;
}
static int mga_set_fence(DRM_IOCTL_ARGS)
static int mga_set_fence(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_mga_private_t *dev_priv = dev->dev_private;
u32 temp;
u32 *fence = data;
DMA_LOCALS;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
return DRM_ERR(EINVAL);
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
/* I would normal do this assignment in the declaration of temp,
/* I would normal do this assignment in the declaration of fence,
* but dev_priv may be NULL.
*/
temp = dev_priv->next_fence_to_post;
*fence = dev_priv->next_fence_to_post;
dev_priv->next_fence_to_post++;
BEGIN_DMA(1);
@ -1134,47 +1102,40 @@ static int mga_set_fence(DRM_IOCTL_ARGS)
MGA_SOFTRAP, 0x00000000);
ADVANCE_DMA();
DRM_COPY_TO_USER_IOCTL((u32 __user *)data, temp, sizeof(u32));
return 0;
}
static int mga_wait_fence(DRM_IOCTL_ARGS)
static int mga_wait_fence(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_mga_private_t *dev_priv = dev->dev_private;
u32 fence;
u32 *fence = data;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
return DRM_ERR(EINVAL);
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
DRM_COPY_FROM_USER_IOCTL(fence, (u32 __user *) data, sizeof(u32));
DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
mga_driver_fence_wait(dev, & fence);
DRM_COPY_TO_USER_IOCTL((u32 __user *)data, fence, sizeof(u32));
mga_driver_fence_wait(dev, fence);
return 0;
}
drm_ioctl_desc_t mga_ioctls[] = {
[DRM_IOCTL_NR(DRM_MGA_INIT)] = {mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
[DRM_IOCTL_NR(DRM_MGA_FLUSH)] = {mga_dma_flush, DRM_AUTH},
[DRM_IOCTL_NR(DRM_MGA_RESET)] = {mga_dma_reset, DRM_AUTH},
[DRM_IOCTL_NR(DRM_MGA_SWAP)] = {mga_dma_swap, DRM_AUTH},
[DRM_IOCTL_NR(DRM_MGA_CLEAR)] = {mga_dma_clear, DRM_AUTH},
[DRM_IOCTL_NR(DRM_MGA_VERTEX)] = {mga_dma_vertex, DRM_AUTH},
[DRM_IOCTL_NR(DRM_MGA_INDICES)] = {mga_dma_indices, DRM_AUTH},
[DRM_IOCTL_NR(DRM_MGA_ILOAD)] = {mga_dma_iload, DRM_AUTH},
[DRM_IOCTL_NR(DRM_MGA_BLIT)] = {mga_dma_blit, DRM_AUTH},
[DRM_IOCTL_NR(DRM_MGA_GETPARAM)] = {mga_getparam, DRM_AUTH},
[DRM_IOCTL_NR(DRM_MGA_SET_FENCE)] = {mga_set_fence, DRM_AUTH},
[DRM_IOCTL_NR(DRM_MGA_WAIT_FENCE)] = {mga_wait_fence, DRM_AUTH},
[DRM_IOCTL_NR(DRM_MGA_DMA_BOOTSTRAP)] = {mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
struct drm_ioctl_desc mga_ioctls[] = {
DRM_IOCTL_DEF(DRM_MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_MGA_FLUSH, mga_dma_flush, DRM_AUTH),
DRM_IOCTL_DEF(DRM_MGA_RESET, mga_dma_reset, DRM_AUTH),
DRM_IOCTL_DEF(DRM_MGA_SWAP, mga_dma_swap, DRM_AUTH),
DRM_IOCTL_DEF(DRM_MGA_CLEAR, mga_dma_clear, DRM_AUTH),
DRM_IOCTL_DEF(DRM_MGA_VERTEX, mga_dma_vertex, DRM_AUTH),
DRM_IOCTL_DEF(DRM_MGA_INDICES, mga_dma_indices, DRM_AUTH),
DRM_IOCTL_DEF(DRM_MGA_ILOAD, mga_dma_iload, DRM_AUTH),
DRM_IOCTL_DEF(DRM_MGA_BLIT, mga_dma_blit, DRM_AUTH),
DRM_IOCTL_DEF(DRM_MGA_GETPARAM, mga_getparam, DRM_AUTH),
DRM_IOCTL_DEF(DRM_MGA_SET_FENCE, mga_set_fence, DRM_AUTH),
DRM_IOCTL_DEF(DRM_MGA_WAIT_FENCE, mga_wait_fence, DRM_AUTH),
DRM_IOCTL_DEF(DRM_MGA_DMA_BOOTSTRAP, mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
};

View File

@ -149,7 +149,7 @@ int mga_warp_install_microcode(drm_mga_private_t * dev_priv)
if (size > dev_priv->warp->size) {
DRM_ERROR("microcode too large! (%u > %lu)\n",
size, dev_priv->warp->size);
return DRM_ERR(ENOMEM);
return -ENOMEM;
}
switch (dev_priv->chipset) {
@ -159,7 +159,7 @@ int mga_warp_install_microcode(drm_mga_private_t * dev_priv)
case MGA_CARD_TYPE_G200:
return mga_warp_install_g200_microcode(dev_priv);
default:
return DRM_ERR(EINVAL);
return -EINVAL;
}
}
@ -185,7 +185,7 @@ int mga_warp_init(drm_mga_private_t * dev_priv)
MGA_WRITE(MGA_WVRTXSZ, 7);
break;
default:
return DRM_ERR(EINVAL);
return -EINVAL;
}
MGA_WRITE(MGA_WMISC, (MGA_WUCODECACHE_ENABLE |
@ -194,7 +194,7 @@ int mga_warp_init(drm_mga_private_t * dev_priv)
if (wmisc != WMISC_EXPECTED) {
DRM_ERROR("WARP engine config failed! 0x%x != 0x%x\n",
wmisc, WMISC_EXPECTED);
return DRM_ERR(EINVAL);
return -EINVAL;
}
return 0;

View File

@ -84,7 +84,7 @@ static u32 r128_cce_microcode[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
static int R128_READ_PLL(drm_device_t * dev, int addr)
static int R128_READ_PLL(struct drm_device * dev, int addr)
{
drm_r128_private_t *dev_priv = dev->dev_private;
@ -132,7 +132,7 @@ static int r128_do_pixcache_flush(drm_r128_private_t * dev_priv)
#if R128_FIFO_DEBUG
DRM_ERROR("failed!\n");
#endif
return DRM_ERR(EBUSY);
return -EBUSY;
}
static int r128_do_wait_for_fifo(drm_r128_private_t * dev_priv, int entries)
@ -149,7 +149,7 @@ static int r128_do_wait_for_fifo(drm_r128_private_t * dev_priv, int entries)
#if R128_FIFO_DEBUG
DRM_ERROR("failed!\n");
#endif
return DRM_ERR(EBUSY);
return -EBUSY;
}
static int r128_do_wait_for_idle(drm_r128_private_t * dev_priv)
@ -171,7 +171,7 @@ static int r128_do_wait_for_idle(drm_r128_private_t * dev_priv)
#if R128_FIFO_DEBUG
DRM_ERROR("failed!\n");
#endif
return DRM_ERR(EBUSY);
return -EBUSY;
}
/* ================================================================
@ -230,7 +230,7 @@ int r128_do_cce_idle(drm_r128_private_t * dev_priv)
DRM_ERROR("failed!\n");
r128_status(dev_priv);
#endif
return DRM_ERR(EBUSY);
return -EBUSY;
}
/* Start the Concurrent Command Engine.
@ -274,7 +274,7 @@ static void r128_do_cce_stop(drm_r128_private_t * dev_priv)
/* Reset the engine. This will stop the CCE if it is running.
*/
static int r128_do_engine_reset(drm_device_t * dev)
static int r128_do_engine_reset(struct drm_device * dev)
{
drm_r128_private_t *dev_priv = dev->dev_private;
u32 clock_cntl_index, mclk_cntl, gen_reset_cntl;
@ -311,7 +311,7 @@ static int r128_do_engine_reset(drm_device_t * dev)
return 0;
}
static void r128_cce_init_ring_buffer(drm_device_t * dev,
static void r128_cce_init_ring_buffer(struct drm_device * dev,
drm_r128_private_t * dev_priv)
{
u32 ring_start;
@ -327,7 +327,7 @@ static void r128_cce_init_ring_buffer(drm_device_t * dev,
ring_start = dev_priv->cce_ring->offset - dev->agp->base;
else
#endif
ring_start = dev_priv->cce_ring->offset -
ring_start = dev_priv->cce_ring->offset -
(unsigned long)dev->sg->virtual;
R128_WRITE(R128_PM4_BUFFER_OFFSET, ring_start | R128_AGP_OFFSET);
@ -350,7 +350,7 @@ static void r128_cce_init_ring_buffer(drm_device_t * dev,
R128_WRITE(R128_BUS_CNTL, tmp);
}
static int r128_do_init_cce(drm_device_t * dev, drm_r128_init_t * init)
static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
{
drm_r128_private_t *dev_priv;
@ -358,7 +358,7 @@ static int r128_do_init_cce(drm_device_t * dev, drm_r128_init_t * init)
dev_priv = drm_alloc(sizeof(drm_r128_private_t), DRM_MEM_DRIVER);
if (dev_priv == NULL)
return DRM_ERR(ENOMEM);
return -ENOMEM;
memset(dev_priv, 0, sizeof(drm_r128_private_t));
@ -368,7 +368,7 @@ static int r128_do_init_cce(drm_device_t * dev, drm_r128_init_t * init)
DRM_ERROR("PCI GART memory not allocated!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce(dev);
return DRM_ERR(EINVAL);
return -EINVAL;
}
dev_priv->usec_timeout = init->usec_timeout;
@ -377,7 +377,7 @@ static int r128_do_init_cce(drm_device_t * dev, drm_r128_init_t * init)
DRM_DEBUG("TIMEOUT problem!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce(dev);
return DRM_ERR(EINVAL);
return -EINVAL;
}
dev_priv->cce_mode = init->cce_mode;
@ -397,7 +397,7 @@ static int r128_do_init_cce(drm_device_t * dev, drm_r128_init_t * init)
DRM_DEBUG("Bad cce_mode!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce(dev);
return DRM_ERR(EINVAL);
return -EINVAL;
}
switch (init->cce_mode) {
@ -459,13 +459,12 @@ static int r128_do_init_cce(drm_device_t * dev, drm_r128_init_t * init)
dev_priv->span_pitch_offset_c = (((dev_priv->depth_pitch / 8) << 21) |
(dev_priv->span_offset >> 5));
DRM_GETSAREA();
dev_priv->sarea = drm_getsarea(dev);
if (!dev_priv->sarea) {
DRM_ERROR("could not find sarea!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce(dev);
return DRM_ERR(EINVAL);
return -EINVAL;
}
dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
@ -473,21 +472,21 @@ static int r128_do_init_cce(drm_device_t * dev, drm_r128_init_t * init)
DRM_ERROR("could not find mmio region!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce(dev);
return DRM_ERR(EINVAL);
return -EINVAL;
}
dev_priv->cce_ring = drm_core_findmap(dev, init->ring_offset);
if (!dev_priv->cce_ring) {
DRM_ERROR("could not find cce ring region!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce(dev);
return DRM_ERR(EINVAL);
return -EINVAL;
}
dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset);
if (!dev_priv->ring_rptr) {
DRM_ERROR("could not find ring read pointer!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce(dev);
return DRM_ERR(EINVAL);
return -EINVAL;
}
dev->agp_buffer_token = init->buffers_offset;
dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
@ -495,7 +494,7 @@ static int r128_do_init_cce(drm_device_t * dev, drm_r128_init_t * init)
DRM_ERROR("could not find dma buffer region!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce(dev);
return DRM_ERR(EINVAL);
return -EINVAL;
}
if (!dev_priv->is_pci) {
@ -505,7 +504,7 @@ static int r128_do_init_cce(drm_device_t * dev, drm_r128_init_t * init)
DRM_ERROR("could not find agp texture region!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce(dev);
return DRM_ERR(EINVAL);
return -EINVAL;
}
}
@ -524,7 +523,7 @@ static int r128_do_init_cce(drm_device_t * dev, drm_r128_init_t * init)
DRM_ERROR("Could not ioremap agp regions!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce(dev);
return DRM_ERR(ENOMEM);
return -ENOMEM;
}
} else
#endif
@ -562,15 +561,17 @@ static int r128_do_init_cce(drm_device_t * dev, drm_r128_init_t * init)
#if __OS_HAS_AGP
if (dev_priv->is_pci) {
#endif
dev_priv->gart_info.table_mask = DMA_BIT_MASK(32);
dev_priv->gart_info.gart_table_location = DRM_ATI_GART_MAIN;
dev_priv->gart_info.table_size = R128_PCIGART_TABLE_SIZE;
dev_priv->gart_info.addr = NULL;
dev_priv->gart_info.bus_addr = 0;
dev_priv->gart_info.is_pcie = 0;
dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI;
if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) {
DRM_ERROR("failed to init PCI GART!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce(dev);
return DRM_ERR(ENOMEM);
return -ENOMEM;
}
R128_WRITE(R128_PCI_GART_PAGE, dev_priv->gart_info.bus_addr);
#if __OS_HAS_AGP
@ -587,7 +588,7 @@ static int r128_do_init_cce(drm_device_t * dev, drm_r128_init_t * init)
return 0;
}
int r128_do_cleanup_cce(drm_device_t * dev)
int r128_do_cleanup_cce(struct drm_device * dev)
{
/* Make sure interrupts are disabled here because the uninstall ioctl
@ -626,38 +627,33 @@ int r128_do_cleanup_cce(drm_device_t * dev)
return 0;
}
int r128_cce_init(DRM_IOCTL_ARGS)
int r128_cce_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_r128_init_t init;
drm_r128_init_t *init = data;
DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, filp);
LOCK_TEST_WITH_RETURN(dev, file_priv);
DRM_COPY_FROM_USER_IOCTL(init, (drm_r128_init_t __user *) data,
sizeof(init));
switch (init.func) {
switch (init->func) {
case R128_INIT_CCE:
return r128_do_init_cce(dev, &init);
return r128_do_init_cce(dev, init);
case R128_CLEANUP_CCE:
return r128_do_cleanup_cce(dev);
}
return DRM_ERR(EINVAL);
return -EINVAL;
}
int r128_cce_start(DRM_IOCTL_ARGS)
int r128_cce_start(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_r128_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, filp);
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (dev_priv->cce_running || dev_priv->cce_mode == R128_PM4_NONPM4) {
DRM_DEBUG("%s while CCE running\n", __FUNCTION__);
DRM_DEBUG("while CCE running\n");
return 0;
}
@ -669,30 +665,26 @@ int r128_cce_start(DRM_IOCTL_ARGS)
/* Stop the CCE. The engine must have been idled before calling this
* routine.
*/
int r128_cce_stop(DRM_IOCTL_ARGS)
int r128_cce_stop(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_cce_stop_t stop;
drm_r128_cce_stop_t *stop = data;
int ret;
DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, filp);
DRM_COPY_FROM_USER_IOCTL(stop, (drm_r128_cce_stop_t __user *) data,
sizeof(stop));
LOCK_TEST_WITH_RETURN(dev, file_priv);
/* Flush any pending CCE commands. This ensures any outstanding
* commands are exectuted by the engine before we turn it off.
*/
if (stop.flush) {
if (stop->flush) {
r128_do_cce_flush(dev_priv);
}
/* If we fail to make the engine go idle, we return an error
* code so that the DRM ioctl wrapper can try again.
*/
if (stop.idle) {
if (stop->idle) {
ret = r128_do_cce_idle(dev_priv);
if (ret)
return ret;
@ -712,17 +704,16 @@ int r128_cce_stop(DRM_IOCTL_ARGS)
/* Just reset the CCE ring. Called as part of an X Server engine reset.
*/
int r128_cce_reset(DRM_IOCTL_ARGS)
int r128_cce_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_r128_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, filp);
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (!dev_priv) {
DRM_DEBUG("%s called before init done\n", __FUNCTION__);
return DRM_ERR(EINVAL);
DRM_DEBUG("called before init done\n");
return -EINVAL;
}
r128_do_cce_reset(dev_priv);
@ -733,13 +724,12 @@ int r128_cce_reset(DRM_IOCTL_ARGS)
return 0;
}
int r128_cce_idle(DRM_IOCTL_ARGS)
int r128_cce_idle(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_r128_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, filp);
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (dev_priv->cce_running) {
r128_do_cce_flush(dev_priv);
@ -748,19 +738,18 @@ int r128_cce_idle(DRM_IOCTL_ARGS)
return r128_do_cce_idle(dev_priv);
}
int r128_engine_reset(DRM_IOCTL_ARGS)
int r128_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, filp);
LOCK_TEST_WITH_RETURN(dev, file_priv);
return r128_do_engine_reset(dev);
}
int r128_fullscreen(DRM_IOCTL_ARGS)
int r128_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
return DRM_ERR(EINVAL);
return -EINVAL;
}
/* ================================================================
@ -770,18 +759,18 @@ int r128_fullscreen(DRM_IOCTL_ARGS)
#define R128_BUFFER_FREE 0
#if 0
static int r128_freelist_init(drm_device_t * dev)
static int r128_freelist_init(struct drm_device * dev)
{
drm_device_dma_t *dma = dev->dma;
struct drm_device_dma *dma = dev->dma;
drm_r128_private_t *dev_priv = dev->dev_private;
drm_buf_t *buf;
struct drm_buf *buf;
drm_r128_buf_priv_t *buf_priv;
drm_r128_freelist_t *entry;
int i;
dev_priv->head = drm_alloc(sizeof(drm_r128_freelist_t), DRM_MEM_DRIVER);
if (dev_priv->head == NULL)
return DRM_ERR(ENOMEM);
return -ENOMEM;
memset(dev_priv->head, 0, sizeof(drm_r128_freelist_t));
dev_priv->head->age = R128_BUFFER_USED;
@ -792,7 +781,7 @@ static int r128_freelist_init(drm_device_t * dev)
entry = drm_alloc(sizeof(drm_r128_freelist_t), DRM_MEM_DRIVER);
if (!entry)
return DRM_ERR(ENOMEM);
return -ENOMEM;
entry->age = R128_BUFFER_FREE;
entry->buf = buf;
@ -816,12 +805,12 @@ static int r128_freelist_init(drm_device_t * dev)
}
#endif
static drm_buf_t *r128_freelist_get(drm_device_t * dev)
static struct drm_buf *r128_freelist_get(struct drm_device * dev)
{
drm_device_dma_t *dma = dev->dma;
struct drm_device_dma *dma = dev->dma;
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_buf_priv_t *buf_priv;
drm_buf_t *buf;
struct drm_buf *buf;
int i, t;
/* FIXME: Optimize -- use freelist code */
@ -829,7 +818,7 @@ static drm_buf_t *r128_freelist_get(drm_device_t * dev)
for (i = 0; i < dma->buf_count; i++) {
buf = dma->buflist[i];
buf_priv = buf->dev_private;
if (buf->filp == 0)
if (buf->file_priv == 0)
return buf;
}
@ -854,13 +843,13 @@ static drm_buf_t *r128_freelist_get(drm_device_t * dev)
return NULL;
}
void r128_freelist_reset(drm_device_t * dev)
void r128_freelist_reset(struct drm_device * dev)
{
drm_device_dma_t *dma = dev->dma;
struct drm_device_dma *dma = dev->dma;
int i;
for (i = 0; i < dma->buf_count; i++) {
drm_buf_t *buf = dma->buflist[i];
struct drm_buf *buf = dma->buflist[i];
drm_r128_buf_priv_t *buf_priv = buf->dev_private;
buf_priv->age = 0;
}
@ -884,68 +873,64 @@ int r128_wait_ring(drm_r128_private_t * dev_priv, int n)
/* FIXME: This is being ignored... */
DRM_ERROR("failed!\n");
return DRM_ERR(EBUSY);
return -EBUSY;
}
static int r128_cce_get_buffers(DRMFILE filp, drm_device_t * dev, drm_dma_t * d)
static int r128_cce_get_buffers(struct drm_device * dev,
struct drm_file *file_priv,
struct drm_dma * d)
{
int i;
drm_buf_t *buf;
struct drm_buf *buf;
for (i = d->granted_count; i < d->request_count; i++) {
buf = r128_freelist_get(dev);
if (!buf)
return DRM_ERR(EAGAIN);
return -EAGAIN;
buf->filp = filp;
buf->file_priv = file_priv;
if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx,
sizeof(buf->idx)))
return DRM_ERR(EFAULT);
return -EFAULT;
if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total,
sizeof(buf->total)))
return DRM_ERR(EFAULT);
return -EFAULT;
d->granted_count++;
}
return 0;
}
int r128_cce_buffers(DRM_IOCTL_ARGS)
int r128_cce_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_device_dma_t *dma = dev->dma;
struct drm_device_dma *dma = dev->dma;
int ret = 0;
drm_dma_t __user *argp = (void __user *)data;
drm_dma_t d;
struct drm_dma *d = data;
LOCK_TEST_WITH_RETURN(dev, filp);
DRM_COPY_FROM_USER_IOCTL(d, argp, sizeof(d));
LOCK_TEST_WITH_RETURN(dev, file_priv);
/* Please don't send us buffers.
*/
if (d.send_count != 0) {
if (d->send_count != 0) {
DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
DRM_CURRENTPID, d.send_count);
return DRM_ERR(EINVAL);
DRM_CURRENTPID, d->send_count);
return -EINVAL;
}
/* We'll send you buffers.
*/
if (d.request_count < 0 || d.request_count > dma->buf_count) {
if (d->request_count < 0 || d->request_count > dma->buf_count) {
DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
DRM_CURRENTPID, d.request_count, dma->buf_count);
return DRM_ERR(EINVAL);
DRM_CURRENTPID, d->request_count, dma->buf_count);
return -EINVAL;
}
d.granted_count = 0;
d->granted_count = 0;
if (d.request_count) {
ret = r128_cce_get_buffers(filp, dev, &d);
if (d->request_count) {
ret = r128_cce_get_buffers(dev, file_priv, d);
}
DRM_COPY_TO_USER_IOCTL(argp, d, sizeof(d));
return ret;
}

View File

@ -156,7 +156,7 @@ typedef struct drm_r128_sarea {
/* The current cliprects, or a subset thereof.
*/
drm_clip_rect_t boxes[R128_NR_SAREA_CLIPRECTS];
struct drm_clip_rect boxes[R128_NR_SAREA_CLIPRECTS];
unsigned int nbox;
/* Counters for client-side throttling of rendering clients.
@ -164,7 +164,7 @@ typedef struct drm_r128_sarea {
unsigned int last_frame;
unsigned int last_dispatch;
drm_tex_region_t tex_list[R128_NR_TEX_HEAPS][R128_NR_TEX_REGIONS + 1];
struct drm_tex_region tex_list[R128_NR_TEX_HEAPS][R128_NR_TEX_REGIONS + 1];
unsigned int tex_age[R128_NR_TEX_HEAPS];
int ctx_owner;
int pfAllowPageFlip; /* number of 3d windows (0,1,2 or more) */
@ -225,11 +225,7 @@ typedef struct drm_r128_init {
R128_INIT_CCE = 0x01,
R128_CLEANUP_CCE = 0x02
} func;
#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0)
int sarea_priv_offset;
#else
unsigned long sarea_priv_offset;
#endif
int is_pci;
int cce_mode;
int cce_secure;
@ -243,21 +239,12 @@ typedef struct drm_r128_init {
unsigned int depth_offset, depth_pitch;
unsigned int span_offset;
#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0)
unsigned int fb_offset;
unsigned int mmio_offset;
unsigned int ring_offset;
unsigned int ring_rptr_offset;
unsigned int buffers_offset;
unsigned int agp_textures_offset;
#else
unsigned long fb_offset;
unsigned long mmio_offset;
unsigned long ring_offset;
unsigned long ring_rptr_offset;
unsigned long buffers_offset;
unsigned long agp_textures_offset;
#endif
} drm_r128_init_t;
typedef struct drm_r128_cce_stop {
@ -267,15 +254,10 @@ typedef struct drm_r128_cce_stop {
typedef struct drm_r128_clear {
unsigned int flags;
#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0)
int x, y, w, h;
#endif
unsigned int clear_color;
unsigned int clear_depth;
#if CONFIG_XFREE86_VERSION >= XFREE86_VERSION(4,1,0,0)
unsigned int color_mask;
unsigned int depth_mask;
#endif
} drm_r128_clear_t;
typedef struct drm_r128_vertex {

View File

@ -45,12 +45,14 @@ static drm_pci_id_list_t r128_pciidlist[] = {
r128_PCI_IDS
};
static void r128_configure(drm_device_t *dev)
static void r128_configure(struct drm_device *dev)
{
dev->driver.buf_priv_size = sizeof(drm_r128_buf_priv_t);
dev->driver.preclose = r128_driver_preclose;
dev->driver.lastclose = r128_driver_lastclose;
dev->driver.vblank_wait = r128_driver_vblank_wait;
dev->driver.get_vblank_counter = r128_get_vblank_counter;
dev->driver.enable_vblank = r128_enable_vblank;
dev->driver.disable_vblank = r128_disable_vblank;
dev->driver.irq_preinstall = r128_driver_irq_preinstall;
dev->driver.irq_postinstall = r128_driver_irq_postinstall;
dev->driver.irq_uninstall = r128_driver_irq_uninstall;
@ -86,9 +88,9 @@ r128_probe(device_t dev)
static int
r128_attach(device_t nbdev)
{
drm_device_t *dev = device_get_softc(nbdev);
struct drm_device *dev = device_get_softc(nbdev);
bzero(dev, sizeof(drm_device_t));
bzero(dev, sizeof(struct drm_device));
r128_configure(dev);
return drm_attach(nbdev, r128_pciidlist);
}
@ -105,7 +107,7 @@ static device_method_t r128_methods[] = {
static driver_t r128_driver = {
"drm",
r128_methods,
sizeof(drm_device_t)
sizeof(struct drm_device)
};
extern devclass_t drm_devclass;
@ -120,7 +122,7 @@ MODULE_DEPEND(r128, drm, 1, 1, 1);
#ifdef _LKM
CFDRIVER_DECL(r128, DV_TTY, NULL);
#else
CFATTACH_DECL(r128, sizeof(drm_device_t), drm_probe, drm_attach, drm_detach,
drm_activate);
CFATTACH_DECL(r128, sizeof(struct drm_device), drm_probe, drm_attach,
drm_detach, drm_activate);
#endif
#endif

View File

@ -60,7 +60,7 @@ __FBSDID("$FreeBSD$");
typedef struct drm_r128_freelist {
unsigned int age;
drm_buf_t *buf;
struct drm_buf *buf;
struct drm_r128_freelist *next;
struct drm_r128_freelist *prev;
} drm_r128_freelist_t;
@ -100,6 +100,8 @@ typedef struct drm_r128_private {
u32 crtc_offset;
u32 crtc_offset_cntl;
atomic_t vbl_received;
u32 color_fmt;
unsigned int front_offset;
unsigned int front_pitch;
@ -121,7 +123,7 @@ typedef struct drm_r128_private {
drm_local_map_t *cce_ring;
drm_local_map_t *ring_rptr;
drm_local_map_t *agp_textures;
drm_ati_pcigart_info gart_info;
struct drm_ati_pcigart_info gart_info;
} drm_r128_private_t;
typedef struct drm_r128_buf_priv {
@ -132,34 +134,36 @@ typedef struct drm_r128_buf_priv {
drm_r128_freelist_t *list_entry;
} drm_r128_buf_priv_t;
extern drm_ioctl_desc_t r128_ioctls[];
extern struct drm_ioctl_desc r128_ioctls[];
extern int r128_max_ioctl;
/* r128_cce.c */
extern int r128_cce_init(DRM_IOCTL_ARGS);
extern int r128_cce_start(DRM_IOCTL_ARGS);
extern int r128_cce_stop(DRM_IOCTL_ARGS);
extern int r128_cce_reset(DRM_IOCTL_ARGS);
extern int r128_cce_idle(DRM_IOCTL_ARGS);
extern int r128_engine_reset(DRM_IOCTL_ARGS);
extern int r128_fullscreen(DRM_IOCTL_ARGS);
extern int r128_cce_buffers(DRM_IOCTL_ARGS);
extern int r128_cce_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int r128_cce_start(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int r128_cce_stop(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int r128_cce_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int r128_cce_idle(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int r128_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int r128_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int r128_cce_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern void r128_freelist_reset(drm_device_t * dev);
extern void r128_freelist_reset(struct drm_device * dev);
extern int r128_wait_ring(drm_r128_private_t * dev_priv, int n);
extern int r128_do_cce_idle(drm_r128_private_t * dev_priv);
extern int r128_do_cleanup_cce(drm_device_t * dev);
extern int r128_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence);
extern int r128_do_cleanup_cce(struct drm_device * dev);
extern int r128_enable_vblank(struct drm_device *dev, int crtc);
extern void r128_disable_vblank(struct drm_device *dev, int crtc);
extern u32 r128_get_vblank_counter(struct drm_device *dev, int crtc);
extern irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS);
extern void r128_driver_irq_preinstall(drm_device_t * dev);
extern void r128_driver_irq_postinstall(drm_device_t * dev);
extern void r128_driver_irq_uninstall(drm_device_t * dev);
extern void r128_driver_lastclose(drm_device_t * dev);
extern void r128_driver_preclose(drm_device_t * dev, DRMFILE filp);
extern void r128_driver_irq_preinstall(struct drm_device * dev);
extern int r128_driver_irq_postinstall(struct drm_device * dev);
extern void r128_driver_irq_uninstall(struct drm_device * dev);
extern void r128_driver_lastclose(struct drm_device * dev);
extern void r128_driver_preclose(struct drm_device * dev,
struct drm_file *file_priv);
extern long r128_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
@ -386,6 +390,8 @@ extern long r128_compat_ioctl(struct file *filp, unsigned int cmd,
#define R128_PERFORMANCE_BOXES 0
#define R128_PCIGART_TABLE_SIZE 32768
#define R128_READ(reg) DRM_READ32( dev_priv->mmio, (reg) )
#define R128_WRITE(reg,val) DRM_WRITE32( dev_priv->mmio, (reg), (val) )
#define R128_READ8(reg) DRM_READ8( dev_priv->mmio, (reg) )
@ -429,7 +435,7 @@ do { \
DRM_UDELAY(1); \
} \
DRM_ERROR( "ring space check failed!\n" ); \
return DRM_ERR(EBUSY); \
return -EBUSY; \
} \
__ring_space_done: \
; \
@ -462,8 +468,7 @@ do { \
#define BEGIN_RING( n ) do { \
if ( R128_VERBOSE ) { \
DRM_INFO( "BEGIN_RING( %d ) in %s\n", \
(n), __FUNCTION__ ); \
DRM_INFO( "BEGIN_RING( %d )\n", (n)); \
} \
if ( dev_priv->ring.space <= (n) * sizeof(u32) ) { \
COMMIT_RING(); \
@ -493,7 +498,7 @@ do { \
write * sizeof(u32) ); \
} \
if (((dev_priv->ring.tail + _nr) & tail_mask) != write) { \
DRM_ERROR( \
DRM_ERROR( \
"ADVANCE_RING(): mismatch: nr: %x write: %x line: %d\n", \
((dev_priv->ring.tail + _nr) & tail_mask), \
write, __LINE__); \

View File

@ -1,5 +1,4 @@
/* r128_irq.c -- IRQ handling for radeon -*- linux-c -*-
*/
/* r128_irq.c -- IRQ handling for radeon -*- linux-c -*- */
/*-
* Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
*
@ -39,9 +38,19 @@ __FBSDID("$FreeBSD$");
#include "dev/drm/r128_drm.h"
#include "dev/drm/r128_drv.h"
u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
{
const drm_r128_private_t *dev_priv = dev->dev_private;
if (crtc != 0)
return 0;
return atomic_read(&dev_priv->vbl_received);
}
irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
{
drm_device_t *dev = (drm_device_t *) arg;
struct drm_device *dev = (struct drm_device *) arg;
drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
int status;
@ -50,33 +59,41 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
/* VBLANK interrupt */
if (status & R128_CRTC_VBLANK_INT) {
R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
atomic_inc(&dev->vbl_received);
DRM_WAKEUP(&dev->vbl_queue);
drm_vbl_send_signals(dev);
atomic_inc(&dev_priv->vbl_received);
drm_handle_vblank(dev, 0);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
int r128_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence)
int r128_enable_vblank(struct drm_device *dev, int crtc)
{
unsigned int cur_vblank;
int ret = 0;
drm_r128_private_t *dev_priv = dev->dev_private;
/* Assume that the user has missed the current sequence number
* by about a day rather than she wants to wait for years
* using vertical blanks...
*/
DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
(((cur_vblank = atomic_read(&dev->vbl_received))
- *sequence) <= (1 << 23)));
if (crtc != 0) {
DRM_ERROR("%s: bad crtc %d\n", __FUNCTION__, crtc);
return -EINVAL;
}
*sequence = cur_vblank;
return ret;
R128_WRITE(R128_GEN_INT_CNTL, R128_CRTC_VBLANK_INT_EN);
return 0;
}
void r128_driver_irq_preinstall(drm_device_t * dev)
void r128_disable_vblank(struct drm_device *dev, int crtc)
{
if (crtc != 0)
DRM_ERROR("%s: bad crtc %d\n", __FUNCTION__, crtc);
/*
* FIXME: implement proper interrupt disable by using the vblank
* counter register (if available)
*
* R128_WRITE(R128_GEN_INT_CNTL,
* R128_READ(R128_GEN_INT_CNTL) & ~R128_CRTC_VBLANK_INT_EN);
*/
}
void r128_driver_irq_preinstall(struct drm_device * dev)
{
drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
@ -86,15 +103,12 @@ void r128_driver_irq_preinstall(drm_device_t * dev)
R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
}
void r128_driver_irq_postinstall(drm_device_t * dev)
int r128_driver_irq_postinstall(struct drm_device * dev)
{
drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
/* Turn on VBL interrupt */
R128_WRITE(R128_GEN_INT_CNTL, R128_CRTC_VBLANK_INT_EN);
return drm_vblank_init(dev, 1);
}
void r128_driver_irq_uninstall(drm_device_t * dev)
void r128_driver_irq_uninstall(struct drm_device * dev)
{
drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
if (!dev_priv)

View File

@ -41,11 +41,11 @@ __FBSDID("$FreeBSD$");
*/
static void r128_emit_clip_rects(drm_r128_private_t * dev_priv,
drm_clip_rect_t * boxes, int count)
struct drm_clip_rect * boxes, int count)
{
u32 aux_sc_cntl = 0x00000000;
RING_LOCALS;
DRM_DEBUG(" %s\n", __FUNCTION__);
DRM_DEBUG("\n");
BEGIN_RING((count < 3 ? count : 3) * 5 + 2);
@ -88,7 +88,7 @@ static __inline__ void r128_emit_core(drm_r128_private_t * dev_priv)
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
RING_LOCALS;
DRM_DEBUG(" %s\n", __FUNCTION__);
DRM_DEBUG("\n");
BEGIN_RING(2);
@ -103,7 +103,7 @@ static __inline__ void r128_emit_context(drm_r128_private_t * dev_priv)
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
RING_LOCALS;
DRM_DEBUG(" %s\n", __FUNCTION__);
DRM_DEBUG("\n");
BEGIN_RING(13);
@ -129,7 +129,7 @@ static __inline__ void r128_emit_setup(drm_r128_private_t * dev_priv)
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
RING_LOCALS;
DRM_DEBUG(" %s\n", __FUNCTION__);
DRM_DEBUG("\n");
BEGIN_RING(3);
@ -145,7 +145,7 @@ static __inline__ void r128_emit_masks(drm_r128_private_t * dev_priv)
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
RING_LOCALS;
DRM_DEBUG(" %s\n", __FUNCTION__);
DRM_DEBUG("\n");
BEGIN_RING(5);
@ -164,7 +164,7 @@ static __inline__ void r128_emit_window(drm_r128_private_t * dev_priv)
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
RING_LOCALS;
DRM_DEBUG(" %s\n", __FUNCTION__);
DRM_DEBUG("\n");
BEGIN_RING(2);
@ -181,7 +181,7 @@ static __inline__ void r128_emit_tex0(drm_r128_private_t * dev_priv)
drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[0];
int i;
RING_LOCALS;
DRM_DEBUG(" %s\n", __FUNCTION__);
DRM_DEBUG("\n");
BEGIN_RING(7 + R128_MAX_TEXTURE_LEVELS);
@ -207,7 +207,7 @@ static __inline__ void r128_emit_tex1(drm_r128_private_t * dev_priv)
drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[1];
int i;
RING_LOCALS;
DRM_DEBUG(" %s\n", __FUNCTION__);
DRM_DEBUG("\n");
BEGIN_RING(5 + R128_MAX_TEXTURE_LEVELS);
@ -224,12 +224,12 @@ static __inline__ void r128_emit_tex1(drm_r128_private_t * dev_priv)
ADVANCE_RING();
}
static __inline__ void r128_emit_state(drm_r128_private_t * dev_priv)
static void r128_emit_state(drm_r128_private_t * dev_priv)
{
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
unsigned int dirty = sarea_priv->dirty;
DRM_DEBUG("%s: dirty=0x%08x\n", __FUNCTION__, dirty);
DRM_DEBUG("dirty=0x%08x\n", dirty);
if (dirty & R128_UPLOAD_CORE) {
r128_emit_core(dev_priv);
@ -355,17 +355,17 @@ static void r128_print_dirty(const char *msg, unsigned int flags)
(flags & R128_REQUIRE_QUIESCENCE) ? "quiescence, " : "");
}
static void r128_cce_dispatch_clear(drm_device_t * dev,
static void r128_cce_dispatch_clear(struct drm_device * dev,
drm_r128_clear_t * clear)
{
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
int nbox = sarea_priv->nbox;
drm_clip_rect_t *pbox = sarea_priv->boxes;
struct drm_clip_rect *pbox = sarea_priv->boxes;
unsigned int flags = clear->flags;
int i;
RING_LOCALS;
DRM_DEBUG("%s\n", __FUNCTION__);
DRM_DEBUG("\n");
if (dev_priv->page_flipping && dev_priv->current_page == 1) {
unsigned int tmp = flags;
@ -461,15 +461,15 @@ static void r128_cce_dispatch_clear(drm_device_t * dev,
}
}
static void r128_cce_dispatch_swap(drm_device_t * dev)
static void r128_cce_dispatch_swap(struct drm_device * dev)
{
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
int nbox = sarea_priv->nbox;
drm_clip_rect_t *pbox = sarea_priv->boxes;
struct drm_clip_rect *pbox = sarea_priv->boxes;
int i;
RING_LOCALS;
DRM_DEBUG("%s\n", __FUNCTION__);
DRM_DEBUG("\n");
#if R128_PERFORMANCE_BOXES
/* Do some trivial performance monitoring...
@ -527,12 +527,11 @@ static void r128_cce_dispatch_swap(drm_device_t * dev)
ADVANCE_RING();
}
static void r128_cce_dispatch_flip(drm_device_t * dev)
static void r128_cce_dispatch_flip(struct drm_device * dev)
{
drm_r128_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
__FUNCTION__,
DRM_DEBUG("page=%d pfCurrentPage=%d\n",
dev_priv->current_page, dev_priv->sarea_priv->pfCurrentPage);
#if R128_PERFORMANCE_BOXES
@ -570,7 +569,7 @@ static void r128_cce_dispatch_flip(drm_device_t * dev)
ADVANCE_RING();
}
static void r128_cce_dispatch_vertex(drm_device_t * dev, drm_buf_t * buf)
static void r128_cce_dispatch_vertex(struct drm_device * dev, struct drm_buf * buf)
{
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_buf_priv_t *buf_priv = buf->dev_private;
@ -640,8 +639,8 @@ static void r128_cce_dispatch_vertex(drm_device_t * dev, drm_buf_t * buf)
sarea_priv->nbox = 0;
}
static void r128_cce_dispatch_indirect(drm_device_t * dev,
drm_buf_t * buf, int start, int end)
static void r128_cce_dispatch_indirect(struct drm_device * dev,
struct drm_buf * buf, int start, int end)
{
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_buf_priv_t *buf_priv = buf->dev_private;
@ -695,8 +694,8 @@ static void r128_cce_dispatch_indirect(drm_device_t * dev,
dev_priv->sarea_priv->last_dispatch++;
}
static void r128_cce_dispatch_indices(drm_device_t * dev,
drm_buf_t * buf,
static void r128_cce_dispatch_indices(struct drm_device * dev,
struct drm_buf * buf,
int start, int end, int count)
{
drm_r128_private_t *dev_priv = dev->dev_private;
@ -779,12 +778,13 @@ static void r128_cce_dispatch_indices(drm_device_t * dev,
sarea_priv->nbox = 0;
}
static int r128_cce_dispatch_blit(DRMFILE filp,
drm_device_t * dev, drm_r128_blit_t * blit)
static int r128_cce_dispatch_blit(struct drm_device * dev,
struct drm_file *file_priv,
drm_r128_blit_t * blit)
{
drm_r128_private_t *dev_priv = dev->dev_private;
drm_device_dma_t *dma = dev->dma;
drm_buf_t *buf;
struct drm_device_dma *dma = dev->dma;
struct drm_buf *buf;
drm_r128_buf_priv_t *buf_priv;
u32 *data;
int dword_shift, dwords;
@ -812,7 +812,7 @@ static int r128_cce_dispatch_blit(DRMFILE filp,
break;
default:
DRM_ERROR("invalid blit format %d\n", blit->format);
return DRM_ERR(EINVAL);
return -EINVAL;
}
/* Flush the pixel cache, and mark the contents as Read Invalid.
@ -832,14 +832,14 @@ static int r128_cce_dispatch_blit(DRMFILE filp,
buf = dma->buflist[blit->idx];
buf_priv = buf->dev_private;
if (buf->filp != filp) {
if (buf->file_priv != file_priv) {
DRM_ERROR("process %d using buffer owned by %p\n",
DRM_CURRENTPID, buf->filp);
return DRM_ERR(EINVAL);
DRM_CURRENTPID, buf->file_priv);
return -EINVAL;
}
if (buf->pending) {
DRM_ERROR("sending pending buffer %d\n", blit->idx);
return DRM_ERR(EINVAL);
return -EINVAL;
}
buf_priv->discard = 1;
@ -890,7 +890,7 @@ static int r128_cce_dispatch_blit(DRMFILE filp,
* have hardware stencil support.
*/
static int r128_cce_dispatch_write_span(drm_device_t * dev,
static int r128_cce_dispatch_write_span(struct drm_device * dev,
drm_r128_depth_t * depth)
{
drm_r128_private_t *dev_priv = dev->dev_private;
@ -903,22 +903,22 @@ static int r128_cce_dispatch_write_span(drm_device_t * dev,
count = depth->n;
if (count > 4096 || count <= 0)
return DRM_ERR(EMSGSIZE);
return -EMSGSIZE;
if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) {
return DRM_ERR(EFAULT);
return -EFAULT;
}
if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) {
return DRM_ERR(EFAULT);
return -EFAULT;
}
buffer_size = depth->n * sizeof(u32);
buffer = drm_alloc(buffer_size, DRM_MEM_BUFS);
if (buffer == NULL)
return DRM_ERR(ENOMEM);
return -ENOMEM;
if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) {
drm_free(buffer, buffer_size, DRM_MEM_BUFS);
return DRM_ERR(EFAULT);
return -EFAULT;
}
mask_size = depth->n * sizeof(u8);
@ -926,12 +926,12 @@ static int r128_cce_dispatch_write_span(drm_device_t * dev,
mask = drm_alloc(mask_size, DRM_MEM_BUFS);
if (mask == NULL) {
drm_free(buffer, buffer_size, DRM_MEM_BUFS);
return DRM_ERR(ENOMEM);
return -ENOMEM;
}
if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) {
drm_free(buffer, buffer_size, DRM_MEM_BUFS);
drm_free(mask, mask_size, DRM_MEM_BUFS);
return DRM_ERR(EFAULT);
return -EFAULT;
}
for (i = 0; i < count; i++, x++) {
@ -986,7 +986,7 @@ static int r128_cce_dispatch_write_span(drm_device_t * dev,
return 0;
}
static int r128_cce_dispatch_write_pixels(drm_device_t * dev,
static int r128_cce_dispatch_write_pixels(struct drm_device * dev,
drm_r128_depth_t * depth)
{
drm_r128_private_t *dev_priv = dev->dev_private;
@ -999,28 +999,28 @@ static int r128_cce_dispatch_write_pixels(drm_device_t * dev,
count = depth->n;
if (count > 4096 || count <= 0)
return DRM_ERR(EMSGSIZE);
return -EMSGSIZE;
xbuf_size = count * sizeof(*x);
ybuf_size = count * sizeof(*y);
x = drm_alloc(xbuf_size, DRM_MEM_BUFS);
if (x == NULL) {
return DRM_ERR(ENOMEM);
return -ENOMEM;
}
y = drm_alloc(ybuf_size, DRM_MEM_BUFS);
if (y == NULL) {
drm_free(x, xbuf_size, DRM_MEM_BUFS);
return DRM_ERR(ENOMEM);
return -ENOMEM;
}
if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) {
drm_free(x, xbuf_size, DRM_MEM_BUFS);
drm_free(y, ybuf_size, DRM_MEM_BUFS);
return DRM_ERR(EFAULT);
return -EFAULT;
}
if (DRM_COPY_FROM_USER(y, depth->y, xbuf_size)) {
drm_free(x, xbuf_size, DRM_MEM_BUFS);
drm_free(y, ybuf_size, DRM_MEM_BUFS);
return DRM_ERR(EFAULT);
return -EFAULT;
}
buffer_size = depth->n * sizeof(u32);
@ -1028,13 +1028,13 @@ static int r128_cce_dispatch_write_pixels(drm_device_t * dev,
if (buffer == NULL) {
drm_free(x, xbuf_size, DRM_MEM_BUFS);
drm_free(y, ybuf_size, DRM_MEM_BUFS);
return DRM_ERR(ENOMEM);
return -ENOMEM;
}
if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) {
drm_free(x, xbuf_size, DRM_MEM_BUFS);
drm_free(y, ybuf_size, DRM_MEM_BUFS);
drm_free(buffer, buffer_size, DRM_MEM_BUFS);
return DRM_ERR(EFAULT);
return -EFAULT;
}
if (depth->mask) {
@ -1044,14 +1044,14 @@ static int r128_cce_dispatch_write_pixels(drm_device_t * dev,
drm_free(x, xbuf_size, DRM_MEM_BUFS);
drm_free(y, ybuf_size, DRM_MEM_BUFS);
drm_free(buffer, buffer_size, DRM_MEM_BUFS);
return DRM_ERR(ENOMEM);
return -ENOMEM;
}
if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) {
drm_free(x, xbuf_size, DRM_MEM_BUFS);
drm_free(y, ybuf_size, DRM_MEM_BUFS);
drm_free(buffer, buffer_size, DRM_MEM_BUFS);
drm_free(mask, mask_size, DRM_MEM_BUFS);
return DRM_ERR(EFAULT);
return -EFAULT;
}
for (i = 0; i < count; i++) {
@ -1108,7 +1108,7 @@ static int r128_cce_dispatch_write_pixels(drm_device_t * dev,
return 0;
}
static int r128_cce_dispatch_read_span(drm_device_t * dev,
static int r128_cce_dispatch_read_span(struct drm_device * dev,
drm_r128_depth_t * depth)
{
drm_r128_private_t *dev_priv = dev->dev_private;
@ -1118,13 +1118,13 @@ static int r128_cce_dispatch_read_span(drm_device_t * dev,
count = depth->n;
if (count > 4096 || count <= 0)
return DRM_ERR(EMSGSIZE);
return -EMSGSIZE;
if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) {
return DRM_ERR(EFAULT);
return -EFAULT;
}
if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) {
return DRM_ERR(EFAULT);
return -EFAULT;
}
BEGIN_RING(7);
@ -1151,18 +1151,18 @@ static int r128_cce_dispatch_read_span(drm_device_t * dev,
return 0;
}
static int r128_cce_dispatch_read_pixels(drm_device_t * dev,
static int r128_cce_dispatch_read_pixels(struct drm_device * dev,
drm_r128_depth_t * depth)
{
drm_r128_private_t *dev_priv = dev->dev_private;
int count, *x, *y;
int i, xbuf_size, ybuf_size;
RING_LOCALS;
DRM_DEBUG("%s\n", __FUNCTION__);
DRM_DEBUG("\n");
count = depth->n;
if (count > 4096 || count <= 0)
return DRM_ERR(EMSGSIZE);
return -EMSGSIZE;
if (count > dev_priv->depth_pitch) {
count = dev_priv->depth_pitch;
@ -1172,22 +1172,22 @@ static int r128_cce_dispatch_read_pixels(drm_device_t * dev,
ybuf_size = count * sizeof(*y);
x = drm_alloc(xbuf_size, DRM_MEM_BUFS);
if (x == NULL) {
return DRM_ERR(ENOMEM);
return -ENOMEM;
}
y = drm_alloc(ybuf_size, DRM_MEM_BUFS);
if (y == NULL) {
drm_free(x, xbuf_size, DRM_MEM_BUFS);
return DRM_ERR(ENOMEM);
return -ENOMEM;
}
if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) {
drm_free(x, xbuf_size, DRM_MEM_BUFS);
drm_free(y, ybuf_size, DRM_MEM_BUFS);
return DRM_ERR(EFAULT);
return -EFAULT;
}
if (DRM_COPY_FROM_USER(y, depth->y, ybuf_size)) {
drm_free(x, xbuf_size, DRM_MEM_BUFS);
drm_free(y, ybuf_size, DRM_MEM_BUFS);
return DRM_ERR(EFAULT);
return -EFAULT;
}
for (i = 0; i < count; i++) {
@ -1223,12 +1223,12 @@ static int r128_cce_dispatch_read_pixels(drm_device_t * dev,
* Polygon stipple
*/
static void r128_cce_dispatch_stipple(drm_device_t * dev, u32 * stipple)
static void r128_cce_dispatch_stipple(struct drm_device * dev, u32 * stipple)
{
drm_r128_private_t *dev_priv = dev->dev_private;
int i;
RING_LOCALS;
DRM_DEBUG("%s\n", __FUNCTION__);
DRM_DEBUG("\n");
BEGIN_RING(33);
@ -1244,25 +1244,21 @@ static void r128_cce_dispatch_stipple(drm_device_t * dev, u32 * stipple)
* IOCTL functions
*/
static int r128_cce_clear(DRM_IOCTL_ARGS)
static int r128_cce_clear(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_r128_clear_t clear;
drm_r128_clear_t *clear = data;
DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, filp);
DRM_COPY_FROM_USER_IOCTL(clear, (drm_r128_clear_t __user *) data,
sizeof(clear));
LOCK_TEST_WITH_RETURN(dev, file_priv);
RING_SPACE_TEST_WITH_RETURN(dev_priv);
if (sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS)
sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS;
r128_cce_dispatch_clear(dev, &clear);
r128_cce_dispatch_clear(dev, clear);
COMMIT_RING();
/* Make sure we restore the 3D state next time.
@ -1272,7 +1268,7 @@ static int r128_cce_clear(DRM_IOCTL_ARGS)
return 0;
}
static int r128_do_init_pageflip(drm_device_t * dev)
static int r128_do_init_pageflip(struct drm_device * dev)
{
drm_r128_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
@ -1291,7 +1287,7 @@ static int r128_do_init_pageflip(drm_device_t * dev)
return 0;
}
static int r128_do_cleanup_pageflip(drm_device_t * dev)
static int r128_do_cleanup_pageflip(struct drm_device * dev)
{
drm_r128_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
@ -1312,13 +1308,12 @@ static int r128_do_cleanup_pageflip(drm_device_t * dev)
* They can & should be intermixed to support multiple 3d windows.
*/
static int r128_cce_flip(DRM_IOCTL_ARGS)
static int r128_cce_flip(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_r128_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("%s\n", __FUNCTION__);
DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, filp);
LOCK_TEST_WITH_RETURN(dev, file_priv);
RING_SPACE_TEST_WITH_RETURN(dev_priv);
@ -1331,14 +1326,13 @@ static int r128_cce_flip(DRM_IOCTL_ARGS)
return 0;
}
static int r128_cce_swap(DRM_IOCTL_ARGS)
static int r128_cce_swap(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
DRM_DEBUG("%s\n", __FUNCTION__);
DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, filp);
LOCK_TEST_WITH_RETURN(dev, file_priv);
RING_SPACE_TEST_WITH_RETURN(dev_priv);
@ -1353,58 +1347,54 @@ static int r128_cce_swap(DRM_IOCTL_ARGS)
return 0;
}
static int r128_cce_vertex(DRM_IOCTL_ARGS)
static int r128_cce_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_r128_private_t *dev_priv = dev->dev_private;
drm_device_dma_t *dma = dev->dma;
drm_buf_t *buf;
struct drm_device_dma *dma = dev->dma;
struct drm_buf *buf;
drm_r128_buf_priv_t *buf_priv;
drm_r128_vertex_t vertex;
drm_r128_vertex_t *vertex = data;
LOCK_TEST_WITH_RETURN(dev, filp);
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
return DRM_ERR(EINVAL);
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
DRM_COPY_FROM_USER_IOCTL(vertex, (drm_r128_vertex_t __user *) data,
sizeof(vertex));
DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n",
DRM_CURRENTPID, vertex.idx, vertex.count, vertex.discard);
DRM_CURRENTPID, vertex->idx, vertex->count, vertex->discard);
if (vertex.idx < 0 || vertex.idx >= dma->buf_count) {
if (vertex->idx < 0 || vertex->idx >= dma->buf_count) {
DRM_ERROR("buffer index %d (of %d max)\n",
vertex.idx, dma->buf_count - 1);
return DRM_ERR(EINVAL);
vertex->idx, dma->buf_count - 1);
return -EINVAL;
}
if (vertex.prim < 0 ||
vertex.prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) {
DRM_ERROR("buffer prim %d\n", vertex.prim);
return DRM_ERR(EINVAL);
if (vertex->prim < 0 ||
vertex->prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) {
DRM_ERROR("buffer prim %d\n", vertex->prim);
return -EINVAL;
}
RING_SPACE_TEST_WITH_RETURN(dev_priv);
VB_AGE_TEST_WITH_RETURN(dev_priv);
buf = dma->buflist[vertex.idx];
buf = dma->buflist[vertex->idx];
buf_priv = buf->dev_private;
if (buf->filp != filp) {
if (buf->file_priv != file_priv) {
DRM_ERROR("process %d using buffer owned by %p\n",
DRM_CURRENTPID, buf->filp);
return DRM_ERR(EINVAL);
DRM_CURRENTPID, buf->file_priv);
return -EINVAL;
}
if (buf->pending) {
DRM_ERROR("sending pending buffer %d\n", vertex.idx);
return DRM_ERR(EINVAL);
DRM_ERROR("sending pending buffer %d\n", vertex->idx);
return -EINVAL;
}
buf->used = vertex.count;
buf_priv->prim = vertex.prim;
buf_priv->discard = vertex.discard;
buf->used = vertex->count;
buf_priv->prim = vertex->prim;
buf_priv->discard = vertex->discard;
r128_cce_dispatch_vertex(dev, buf);
@ -1412,134 +1402,123 @@ static int r128_cce_vertex(DRM_IOCTL_ARGS)
return 0;
}
static int r128_cce_indices(DRM_IOCTL_ARGS)
static int r128_cce_indices(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_r128_private_t *dev_priv = dev->dev_private;
drm_device_dma_t *dma = dev->dma;
drm_buf_t *buf;
struct drm_device_dma *dma = dev->dma;
struct drm_buf *buf;
drm_r128_buf_priv_t *buf_priv;
drm_r128_indices_t elts;
drm_r128_indices_t *elts = data;
int count;
LOCK_TEST_WITH_RETURN(dev, filp);
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
return DRM_ERR(EINVAL);
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
DRM_COPY_FROM_USER_IOCTL(elts, (drm_r128_indices_t __user *) data,
sizeof(elts));
DRM_DEBUG("pid=%d buf=%d s=%d e=%d d=%d\n", DRM_CURRENTPID,
elts.idx, elts.start, elts.end, elts.discard);
elts->idx, elts->start, elts->end, elts->discard);
if (elts.idx < 0 || elts.idx >= dma->buf_count) {
if (elts->idx < 0 || elts->idx >= dma->buf_count) {
DRM_ERROR("buffer index %d (of %d max)\n",
elts.idx, dma->buf_count - 1);
return DRM_ERR(EINVAL);
elts->idx, dma->buf_count - 1);
return -EINVAL;
}
if (elts.prim < 0 || elts.prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) {
DRM_ERROR("buffer prim %d\n", elts.prim);
return DRM_ERR(EINVAL);
if (elts->prim < 0 ||
elts->prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) {
DRM_ERROR("buffer prim %d\n", elts->prim);
return -EINVAL;
}
RING_SPACE_TEST_WITH_RETURN(dev_priv);
VB_AGE_TEST_WITH_RETURN(dev_priv);
buf = dma->buflist[elts.idx];
buf = dma->buflist[elts->idx];
buf_priv = buf->dev_private;
if (buf->filp != filp) {
if (buf->file_priv != file_priv) {
DRM_ERROR("process %d using buffer owned by %p\n",
DRM_CURRENTPID, buf->filp);
return DRM_ERR(EINVAL);
DRM_CURRENTPID, buf->file_priv);
return -EINVAL;
}
if (buf->pending) {
DRM_ERROR("sending pending buffer %d\n", elts.idx);
return DRM_ERR(EINVAL);
DRM_ERROR("sending pending buffer %d\n", elts->idx);
return -EINVAL;
}
count = (elts.end - elts.start) / sizeof(u16);
elts.start -= R128_INDEX_PRIM_OFFSET;
count = (elts->end - elts->start) / sizeof(u16);
elts->start -= R128_INDEX_PRIM_OFFSET;
if (elts.start & 0x7) {
DRM_ERROR("misaligned buffer 0x%x\n", elts.start);
return DRM_ERR(EINVAL);
if (elts->start & 0x7) {
DRM_ERROR("misaligned buffer 0x%x\n", elts->start);
return -EINVAL;
}
if (elts.start < buf->used) {
DRM_ERROR("no header 0x%x - 0x%x\n", elts.start, buf->used);
return DRM_ERR(EINVAL);
if (elts->start < buf->used) {
DRM_ERROR("no header 0x%x - 0x%x\n", elts->start, buf->used);
return -EINVAL;
}
buf->used = elts.end;
buf_priv->prim = elts.prim;
buf_priv->discard = elts.discard;
buf->used = elts->end;
buf_priv->prim = elts->prim;
buf_priv->discard = elts->discard;
r128_cce_dispatch_indices(dev, buf, elts.start, elts.end, count);
r128_cce_dispatch_indices(dev, buf, elts->start, elts->end, count);
COMMIT_RING();
return 0;
}
static int r128_cce_blit(DRM_IOCTL_ARGS)
static int r128_cce_blit(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_device_dma_t *dma = dev->dma;
struct drm_device_dma *dma = dev->dma;
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_blit_t blit;
drm_r128_blit_t *blit = data;
int ret;
LOCK_TEST_WITH_RETURN(dev, filp);
LOCK_TEST_WITH_RETURN(dev, file_priv);
DRM_COPY_FROM_USER_IOCTL(blit, (drm_r128_blit_t __user *) data,
sizeof(blit));
DRM_DEBUG("pid=%d index=%d\n", DRM_CURRENTPID, blit->idx);
DRM_DEBUG("pid=%d index=%d\n", DRM_CURRENTPID, blit.idx);
if (blit.idx < 0 || blit.idx >= dma->buf_count) {
if (blit->idx < 0 || blit->idx >= dma->buf_count) {
DRM_ERROR("buffer index %d (of %d max)\n",
blit.idx, dma->buf_count - 1);
return DRM_ERR(EINVAL);
blit->idx, dma->buf_count - 1);
return -EINVAL;
}
RING_SPACE_TEST_WITH_RETURN(dev_priv);
VB_AGE_TEST_WITH_RETURN(dev_priv);
ret = r128_cce_dispatch_blit(filp, dev, &blit);
ret = r128_cce_dispatch_blit(dev, file_priv, blit);
COMMIT_RING();
return ret;
}
static int r128_cce_depth(DRM_IOCTL_ARGS)
static int r128_cce_depth(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_depth_t depth;
drm_r128_depth_t *depth = data;
int ret;
LOCK_TEST_WITH_RETURN(dev, filp);
DRM_COPY_FROM_USER_IOCTL(depth, (drm_r128_depth_t __user *) data,
sizeof(depth));
LOCK_TEST_WITH_RETURN(dev, file_priv);
RING_SPACE_TEST_WITH_RETURN(dev_priv);
ret = DRM_ERR(EINVAL);
switch (depth.func) {
ret = -EINVAL;
switch (depth->func) {
case R128_WRITE_SPAN:
ret = r128_cce_dispatch_write_span(dev, &depth);
ret = r128_cce_dispatch_write_span(dev, depth);
break;
case R128_WRITE_PIXELS:
ret = r128_cce_dispatch_write_pixels(dev, &depth);
ret = r128_cce_dispatch_write_pixels(dev, depth);
break;
case R128_READ_SPAN:
ret = r128_cce_dispatch_read_span(dev, &depth);
ret = r128_cce_dispatch_read_span(dev, depth);
break;
case R128_READ_PIXELS:
ret = r128_cce_dispatch_read_pixels(dev, &depth);
ret = r128_cce_dispatch_read_pixels(dev, depth);
break;
}
@ -1547,20 +1526,16 @@ static int r128_cce_depth(DRM_IOCTL_ARGS)
return ret;
}
static int r128_cce_stipple(DRM_IOCTL_ARGS)
static int r128_cce_stipple(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_stipple_t stipple;
drm_r128_stipple_t *stipple = data;
u32 mask[32];
LOCK_TEST_WITH_RETURN(dev, filp);
LOCK_TEST_WITH_RETURN(dev, file_priv);
DRM_COPY_FROM_USER_IOCTL(stipple, (drm_r128_stipple_t __user *) data,
sizeof(stipple));
if (DRM_COPY_FROM_USER(&mask, stipple.mask, 32 * sizeof(u32)))
return DRM_ERR(EFAULT);
if (DRM_COPY_FROM_USER(&mask, stipple->mask, 32 * sizeof(u32)))
return -EFAULT;
RING_SPACE_TEST_WITH_RETURN(dev_priv);
@ -1570,61 +1545,58 @@ static int r128_cce_stipple(DRM_IOCTL_ARGS)
return 0;
}
static int r128_cce_indirect(DRM_IOCTL_ARGS)
static int r128_cce_indirect(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_r128_private_t *dev_priv = dev->dev_private;
drm_device_dma_t *dma = dev->dma;
drm_buf_t *buf;
struct drm_device_dma *dma = dev->dma;
struct drm_buf *buf;
drm_r128_buf_priv_t *buf_priv;
drm_r128_indirect_t indirect;
drm_r128_indirect_t *indirect = data;
#if 0
RING_LOCALS;
#endif
LOCK_TEST_WITH_RETURN(dev, filp);
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
return DRM_ERR(EINVAL);
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
DRM_COPY_FROM_USER_IOCTL(indirect, (drm_r128_indirect_t __user *) data,
sizeof(indirect));
DRM_DEBUG("idx=%d s=%d e=%d d=%d\n",
indirect->idx, indirect->start, indirect->end,
indirect->discard);
DRM_DEBUG("indirect: idx=%d s=%d e=%d d=%d\n",
indirect.idx, indirect.start, indirect.end, indirect.discard);
if (indirect.idx < 0 || indirect.idx >= dma->buf_count) {
if (indirect->idx < 0 || indirect->idx >= dma->buf_count) {
DRM_ERROR("buffer index %d (of %d max)\n",
indirect.idx, dma->buf_count - 1);
return DRM_ERR(EINVAL);
indirect->idx, dma->buf_count - 1);
return -EINVAL;
}
buf = dma->buflist[indirect.idx];
buf = dma->buflist[indirect->idx];
buf_priv = buf->dev_private;
if (buf->filp != filp) {
if (buf->file_priv != file_priv) {
DRM_ERROR("process %d using buffer owned by %p\n",
DRM_CURRENTPID, buf->filp);
return DRM_ERR(EINVAL);
DRM_CURRENTPID, buf->file_priv);
return -EINVAL;
}
if (buf->pending) {
DRM_ERROR("sending pending buffer %d\n", indirect.idx);
return DRM_ERR(EINVAL);
DRM_ERROR("sending pending buffer %d\n", indirect->idx);
return -EINVAL;
}
if (indirect.start < buf->used) {
if (indirect->start < buf->used) {
DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n",
indirect.start, buf->used);
return DRM_ERR(EINVAL);
indirect->start, buf->used);
return -EINVAL;
}
RING_SPACE_TEST_WITH_RETURN(dev_priv);
VB_AGE_TEST_WITH_RETURN(dev_priv);
buf->used = indirect.end;
buf_priv->discard = indirect.discard;
buf->used = indirect->end;
buf_priv->discard = indirect->discard;
#if 0
/* Wait for the 3D stream to idle before the indirect buffer
@ -1639,46 +1611,42 @@ static int r128_cce_indirect(DRM_IOCTL_ARGS)
* X server. This is insecure and is thus only available to
* privileged clients.
*/
r128_cce_dispatch_indirect(dev, buf, indirect.start, indirect.end);
r128_cce_dispatch_indirect(dev, buf, indirect->start, indirect->end);
COMMIT_RING();
return 0;
}
static int r128_getparam(DRM_IOCTL_ARGS)
static int r128_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_getparam_t param;
drm_r128_getparam_t *param = data;
int value;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
return DRM_ERR(EINVAL);
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
DRM_COPY_FROM_USER_IOCTL(param, (drm_r128_getparam_t __user *) data,
sizeof(param));
DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
switch (param.param) {
switch (param->param) {
case R128_PARAM_IRQ_NR:
value = dev->irq;
break;
default:
return DRM_ERR(EINVAL);
return -EINVAL;
}
if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) {
if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
DRM_ERROR("copy_to_user\n");
return DRM_ERR(EFAULT);
return -EFAULT;
}
return 0;
}
void r128_driver_preclose(drm_device_t * dev, DRMFILE filp)
void r128_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
{
if (dev->dev_private) {
drm_r128_private_t *dev_priv = dev->dev_private;
@ -1688,29 +1656,29 @@ void r128_driver_preclose(drm_device_t * dev, DRMFILE filp)
}
}
void r128_driver_lastclose(drm_device_t * dev)
void r128_driver_lastclose(struct drm_device * dev)
{
r128_do_cleanup_cce(dev);
}
drm_ioctl_desc_t r128_ioctls[] = {
[DRM_IOCTL_NR(DRM_R128_INIT)] = {r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
[DRM_IOCTL_NR(DRM_R128_CCE_START)] = {r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
[DRM_IOCTL_NR(DRM_R128_CCE_STOP)] = {r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
[DRM_IOCTL_NR(DRM_R128_CCE_RESET)] = {r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
[DRM_IOCTL_NR(DRM_R128_CCE_IDLE)] = {r128_cce_idle, DRM_AUTH},
[DRM_IOCTL_NR(DRM_R128_RESET)] = {r128_engine_reset, DRM_AUTH},
[DRM_IOCTL_NR(DRM_R128_FULLSCREEN)] = {r128_fullscreen, DRM_AUTH},
[DRM_IOCTL_NR(DRM_R128_SWAP)] = {r128_cce_swap, DRM_AUTH},
[DRM_IOCTL_NR(DRM_R128_FLIP)] = {r128_cce_flip, DRM_AUTH},
[DRM_IOCTL_NR(DRM_R128_CLEAR)] = {r128_cce_clear, DRM_AUTH},
[DRM_IOCTL_NR(DRM_R128_VERTEX)] = {r128_cce_vertex, DRM_AUTH},
[DRM_IOCTL_NR(DRM_R128_INDICES)] = {r128_cce_indices, DRM_AUTH},
[DRM_IOCTL_NR(DRM_R128_BLIT)] = {r128_cce_blit, DRM_AUTH},
[DRM_IOCTL_NR(DRM_R128_DEPTH)] = {r128_cce_depth, DRM_AUTH},
[DRM_IOCTL_NR(DRM_R128_STIPPLE)] = {r128_cce_stipple, DRM_AUTH},
[DRM_IOCTL_NR(DRM_R128_INDIRECT)] = {r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
[DRM_IOCTL_NR(DRM_R128_GETPARAM)] = {r128_getparam, DRM_AUTH},
struct drm_ioctl_desc r128_ioctls[] = {
DRM_IOCTL_DEF(DRM_R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_R128_CCE_RESET, r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_R128_CCE_IDLE, r128_cce_idle, DRM_AUTH),
DRM_IOCTL_DEF(DRM_R128_RESET, r128_engine_reset, DRM_AUTH),
DRM_IOCTL_DEF(DRM_R128_FULLSCREEN, r128_fullscreen, DRM_AUTH),
DRM_IOCTL_DEF(DRM_R128_SWAP, r128_cce_swap, DRM_AUTH),
DRM_IOCTL_DEF(DRM_R128_FLIP, r128_cce_flip, DRM_AUTH),
DRM_IOCTL_DEF(DRM_R128_CLEAR, r128_cce_clear, DRM_AUTH),
DRM_IOCTL_DEF(DRM_R128_VERTEX, r128_cce_vertex, DRM_AUTH),
DRM_IOCTL_DEF(DRM_R128_INDICES, r128_cce_indices, DRM_AUTH),
DRM_IOCTL_DEF(DRM_R128_BLIT, r128_cce_blit, DRM_AUTH),
DRM_IOCTL_DEF(DRM_R128_DEPTH, r128_cce_depth, DRM_AUTH),
DRM_IOCTL_DEF(DRM_R128_STIPPLE, r128_cce_stipple, DRM_AUTH),
DRM_IOCTL_DEF(DRM_R128_INDIRECT, r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_R128_GETPARAM, r128_getparam, DRM_AUTH),
};
int r128_max_ioctl = DRM_ARRAY_SIZE(r128_ioctls);

View File

@ -58,7 +58,7 @@ static const int r300_cliprect_cntl[4] = {
static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
drm_radeon_kcmd_buffer_t *cmdbuf, int n)
{
drm_clip_rect_t box;
struct drm_clip_rect box;
int nr;
int i;
RING_LOCALS;
@ -77,26 +77,37 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
if (DRM_COPY_FROM_USER_UNCHECKED
(&box, &cmdbuf->boxes[n + i], sizeof(box))) {
DRM_ERROR("copy cliprect faulted\n");
return DRM_ERR(EFAULT);
return -EFAULT;
}
box.x1 =
(box.x1 +
R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
box.y1 =
(box.y1 +
R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
box.x2 =
(box.x2 +
R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
box.y2 =
(box.y2 +
R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
box.x2--; /* Hardware expects inclusive bottom-right corner */
box.y2--;
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
box.x1 = (box.x1) &
R300_CLIPRECT_MASK;
box.y1 = (box.y1) &
R300_CLIPRECT_MASK;
box.x2 = (box.x2) &
R300_CLIPRECT_MASK;
box.y2 = (box.y2) &
R300_CLIPRECT_MASK;
} else {
box.x1 = (box.x1 + R300_CLIPRECT_OFFSET) &
R300_CLIPRECT_MASK;
box.y1 = (box.y1 + R300_CLIPRECT_OFFSET) &
R300_CLIPRECT_MASK;
box.x2 = (box.x2 + R300_CLIPRECT_OFFSET) &
R300_CLIPRECT_MASK;
box.y2 = (box.y2 + R300_CLIPRECT_OFFSET) &
R300_CLIPRECT_MASK;
}
OUT_RING((box.x1 << R300_CLIPRECT_X_SHIFT) |
(box.y1 << R300_CLIPRECT_Y_SHIFT));
OUT_RING((box.x2 << R300_CLIPRECT_X_SHIFT) |
(box.y2 << R300_CLIPRECT_Y_SHIFT));
}
OUT_RING_REG(R300_RE_CLIPRECT_CNTL, r300_cliprect_cntl[nr - 1]);
@ -131,14 +142,28 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
ADVANCE_RING();
}
/* flus cache and wait idle clean after cliprect change */
BEGIN_RING(2);
OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
OUT_RING(R300_RB3D_DC_FLUSH);
ADVANCE_RING();
BEGIN_RING(2);
OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
ADVANCE_RING();
/* set flush flag */
dev_priv->track_flush |= RADEON_FLUSH_EMITED;
return 0;
}
static u8 r300_reg_flags[0x10000 >> 2];
void r300_init_reg_flags(void)
void r300_init_reg_flags(struct drm_device *dev)
{
int i;
drm_radeon_private_t *dev_priv = dev->dev_private;
memset(r300_reg_flags, 0, 0x10000 >> 2);
#define ADD_RANGE_MARK(reg, count,mark) \
for(i=((reg)>>2);i<((reg)>>2)+(count);i++)\
@ -151,70 +176,63 @@ void r300_init_reg_flags(void)
/* these match cmducs() command in r300_driver/r300/r300_cmdbuf.c */
ADD_RANGE(R300_SE_VPORT_XSCALE, 6);
ADD_RANGE(0x2080, 1);
ADD_RANGE(R300_VAP_CNTL, 1);
ADD_RANGE(R300_SE_VTE_CNTL, 2);
ADD_RANGE(0x2134, 2);
ADD_RANGE(0x2140, 1);
ADD_RANGE(R300_VAP_CNTL_STATUS, 1);
ADD_RANGE(R300_VAP_INPUT_CNTL_0, 2);
ADD_RANGE(0x21DC, 1);
ADD_RANGE(0x221C, 1);
ADD_RANGE(0x2220, 4);
ADD_RANGE(0x2288, 1);
ADD_RANGE(R300_VAP_UNKNOWN_221C, 1);
ADD_RANGE(R300_VAP_CLIP_X_0, 4);
ADD_RANGE(R300_VAP_PVS_STATE_FLUSH_REG, 1);
ADD_RANGE(R300_VAP_UNKNOWN_2288, 1);
ADD_RANGE(R300_VAP_OUTPUT_VTX_FMT_0, 2);
ADD_RANGE(R300_VAP_PVS_CNTL_1, 3);
ADD_RANGE(R300_GB_ENABLE, 1);
ADD_RANGE(R300_GB_MSPOS0, 5);
ADD_RANGE(R300_TX_CNTL, 1);
ADD_RANGE(R300_TX_INVALTAGS, 1);
ADD_RANGE(R300_TX_ENABLE, 1);
ADD_RANGE(0x4200, 4);
ADD_RANGE(0x4214, 1);
ADD_RANGE(R300_RE_POINTSIZE, 1);
ADD_RANGE(0x4230, 3);
ADD_RANGE(R300_RE_LINE_CNT, 1);
ADD_RANGE(0x4238, 1);
ADD_RANGE(R300_RE_UNK4238, 1);
ADD_RANGE(0x4260, 3);
ADD_RANGE(0x4274, 4);
ADD_RANGE(0x4288, 5);
ADD_RANGE(0x42A0, 1);
ADD_RANGE(R300_RE_SHADE, 4);
ADD_RANGE(R300_RE_POLYGON_MODE, 5);
ADD_RANGE(R300_RE_ZBIAS_CNTL, 1);
ADD_RANGE(R300_RE_ZBIAS_T_FACTOR, 4);
ADD_RANGE(0x42B4, 1);
ADD_RANGE(R300_RE_OCCLUSION_CNTL, 1);
ADD_RANGE(R300_RE_CULL_CNTL, 1);
ADD_RANGE(0x42C0, 2);
ADD_RANGE(R300_RS_CNTL_0, 2);
ADD_RANGE(R300_RS_INTERP_0, 8);
ADD_RANGE(R300_RS_ROUTE_0, 8);
ADD_RANGE(0x43A4, 2);
ADD_RANGE(R300_SC_HYPERZ, 2);
ADD_RANGE(0x43E8, 1);
ADD_RANGE(R300_PFS_CNTL_0, 3);
ADD_RANGE(R300_PFS_NODE_0, 4);
ADD_RANGE(R300_PFS_TEXI_0, 64);
ADD_RANGE(0x46A4, 5);
ADD_RANGE(R300_PFS_INSTR0_0, 64);
ADD_RANGE(R300_PFS_INSTR1_0, 64);
ADD_RANGE(R300_PFS_INSTR2_0, 64);
ADD_RANGE(R300_PFS_INSTR3_0, 64);
ADD_RANGE(0x4BC0, 1);
ADD_RANGE(0x4BC8, 3);
ADD_RANGE(R300_RE_FOG_STATE, 1);
ADD_RANGE(R300_FOG_COLOR_R, 3);
ADD_RANGE(R300_PP_ALPHA_TEST, 2);
ADD_RANGE(0x4BD8, 1);
ADD_RANGE(R300_PFS_PARAM_0_X, 64);
ADD_RANGE(0x4E00, 1);
ADD_RANGE(R300_RB3D_CBLEND, 2);
ADD_RANGE(R300_RB3D_COLORMASK, 1);
ADD_RANGE(0x4E10, 3);
ADD_RANGE(R300_RB3D_BLEND_COLOR, 3);
ADD_RANGE_MARK(R300_RB3D_COLOROFFSET0, 1, MARK_CHECK_OFFSET); /* check offset */
ADD_RANGE(R300_RB3D_COLORPITCH0, 1);
ADD_RANGE(0x4E50, 9);
ADD_RANGE(0x4E88, 1);
ADD_RANGE(0x4EA0, 2);
ADD_RANGE(R300_RB3D_ZSTENCIL_CNTL_0, 3);
ADD_RANGE(0x4F10, 4);
ADD_RANGE_MARK(R300_RB3D_DEPTHOFFSET, 1, MARK_CHECK_OFFSET); /* check offset */
ADD_RANGE(R300_RB3D_DEPTHPITCH, 1);
ADD_RANGE(0x4F28, 1);
ADD_RANGE(0x4F30, 2);
ADD_RANGE(0x4F44, 1);
ADD_RANGE(0x4F54, 1);
ADD_RANGE(R300_ZB_CNTL, 3);
ADD_RANGE(R300_ZB_FORMAT, 4);
ADD_RANGE_MARK(R300_ZB_DEPTHOFFSET, 1, MARK_CHECK_OFFSET); /* check offset */
ADD_RANGE(R300_ZB_DEPTHPITCH, 1);
ADD_RANGE(R300_ZB_DEPTHCLEARVALUE, 1);
ADD_RANGE(R300_ZB_ZMASK_OFFSET, 13);
ADD_RANGE(R300_TX_FILTER_0, 16);
ADD_RANGE(R300_TX_FILTER1_0, 16);
@ -227,11 +245,33 @@ void r300_init_reg_flags(void)
ADD_RANGE(R300_TX_BORDER_COLOR_0, 16);
/* Sporadic registers used as primitives are emitted */
ADD_RANGE(0x4f18, 1);
ADD_RANGE(R300_ZB_ZCACHE_CTLSTAT, 1);
ADD_RANGE(R300_RB3D_DSTCACHE_CTLSTAT, 1);
ADD_RANGE(R300_VAP_INPUT_ROUTE_0_0, 8);
ADD_RANGE(R300_VAP_INPUT_ROUTE_1_0, 8);
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
ADD_RANGE(R500_VAP_INDEX_OFFSET, 1);
ADD_RANGE(R500_US_CONFIG, 2);
ADD_RANGE(R500_US_CODE_ADDR, 3);
ADD_RANGE(R500_US_FC_CTRL, 1);
ADD_RANGE(R500_RS_IP_0, 16);
ADD_RANGE(R500_RS_INST_0, 16);
ADD_RANGE(R500_RB3D_COLOR_CLEAR_VALUE_AR, 2);
ADD_RANGE(R500_RB3D_CONSTANT_COLOR_AR, 2);
ADD_RANGE(R500_ZB_FIFO_SIZE, 2);
} else {
ADD_RANGE(R300_PFS_CNTL_0, 3);
ADD_RANGE(R300_PFS_NODE_0, 4);
ADD_RANGE(R300_PFS_TEXI_0, 64);
ADD_RANGE(R300_PFS_INSTR0_0, 64);
ADD_RANGE(R300_PFS_INSTR1_0, 64);
ADD_RANGE(R300_PFS_INSTR2_0, 64);
ADD_RANGE(R300_PFS_INSTR3_0, 64);
ADD_RANGE(R300_RS_INTERP_0, 8);
ADD_RANGE(R300_RS_ROUTE_0, 8);
}
}
static __inline__ int r300_check_range(unsigned reg, int count)
@ -245,26 +285,6 @@ static __inline__ int r300_check_range(unsigned reg, int count)
return 0;
}
/*
* we expect offsets passed to the framebuffer to be either within video
* memory or within AGP space
*/
static __inline__ int r300_check_offset(drm_radeon_private_t *dev_priv,
u32 offset)
{
/* we realy want to check against end of video aperture
but this value is not being kept.
This code is correct for now (does the same thing as the
code that sets MC_FB_LOCATION) in radeon_cp.c */
if (offset >= dev_priv->fb_location &&
offset < (dev_priv->fb_location + dev_priv->fb_size))
return 0;
if (offset >= dev_priv->gart_vm_start &&
offset < (dev_priv->gart_vm_start + dev_priv->gart_size))
return 0;
return 1;
}
static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t *
dev_priv,
drm_radeon_kcmd_buffer_t
@ -285,7 +305,7 @@ static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t *
DRM_ERROR
("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n",
reg, sz);
return DRM_ERR(EINVAL);
return -EINVAL;
}
for (i = 0; i < sz; i++) {
values[i] = ((int *)cmdbuf->buf)[i];
@ -293,17 +313,17 @@ static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t *
case MARK_SAFE:
break;
case MARK_CHECK_OFFSET:
if (r300_check_offset(dev_priv, (u32) values[i])) {
if (!radeon_check_offset(dev_priv, (u32) values[i])) {
DRM_ERROR
("Offset failed range check (reg=%04x sz=%d)\n",
reg, sz);
return DRM_ERR(EINVAL);
return -EINVAL;
}
break;
default:
DRM_ERROR("Register %04x failed check as flag=%02x\n",
reg + i * 4, r300_reg_flags[(reg >> 2) + i]);
return DRM_ERR(EINVAL);
return -EINVAL;
}
}
@ -335,16 +355,17 @@ static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv,
sz = header.packet0.count;
reg = (header.packet0.reghi << 8) | header.packet0.reglo;
DRM_DEBUG("R300_CMD_PACKET0: reg %04x, sz %d\n", reg, sz);
if (!sz)
return 0;
if (sz * 4 > cmdbuf->bufsz)
return DRM_ERR(EINVAL);
return -EINVAL;
if (reg + sz * 4 >= 0x10000) {
DRM_ERROR("No such registers in hardware reg=%04x sz=%d\n", reg,
sz);
return DRM_ERR(EINVAL);
return -EINVAL;
}
if (r300_check_range(reg, sz)) {
@ -384,17 +405,30 @@ static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
if (!sz)
return 0;
if (sz * 16 > cmdbuf->bufsz)
return DRM_ERR(EINVAL);
return -EINVAL;
BEGIN_RING(5 + sz * 4);
/* Wait for VAP to come to senses.. */
/* there is no need to emit it multiple times, (only once before VAP is programmed,
but this optimization is for later */
OUT_RING_REG(R300_VAP_PVS_WAITIDLE, 0);
/* VAP is very sensitive so we purge cache before we program it
* and we also flush its state before & after */
BEGIN_RING(6);
OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
OUT_RING(R300_RB3D_DC_FLUSH);
OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
OUT_RING(CP_PACKET0(R300_VAP_PVS_STATE_FLUSH_REG, 0));
OUT_RING(0);
ADVANCE_RING();
/* set flush flag */
dev_priv->track_flush |= RADEON_FLUSH_EMITED;
BEGIN_RING(3 + sz * 4);
OUT_RING_REG(R300_VAP_PVS_UPLOAD_ADDRESS, addr);
OUT_RING(CP_PACKET0_TABLE(R300_VAP_PVS_UPLOAD_DATA, sz * 4 - 1));
OUT_RING_TABLE((int *)cmdbuf->buf, sz * 4);
ADVANCE_RING();
BEGIN_RING(2);
OUT_RING(CP_PACKET0(R300_VAP_PVS_STATE_FLUSH_REG, 0));
OUT_RING(0);
ADVANCE_RING();
cmdbuf->buf += sz * 16;
@ -413,7 +447,7 @@ static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv,
RING_LOCALS;
if (8 * 4 > cmdbuf->bufsz)
return DRM_ERR(EINVAL);
return -EINVAL;
BEGIN_RING(10);
OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 8));
@ -422,6 +456,15 @@ static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv,
OUT_RING_TABLE((int *)cmdbuf->buf, 8);
ADVANCE_RING();
BEGIN_RING(4);
OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
OUT_RING(R300_RB3D_DC_FLUSH);
OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
ADVANCE_RING();
/* set flush flag */
dev_priv->track_flush |= RADEON_FLUSH_EMITED;
cmdbuf->buf += 8 * 4;
cmdbuf->bufsz -= 8 * 4;
@ -443,7 +486,7 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
if ((count + 1) > MAX_ARRAY_PACKET) {
DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
count);
return DRM_ERR(EINVAL);
return -EINVAL;
}
memset(payload, 0, MAX_ARRAY_PACKET * 4);
memcpy(payload, cmdbuf->buf + 4, (count + 1) * 4);
@ -455,22 +498,22 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
i = 1;
while ((k < narrays) && (i < (count + 1))) {
i++; /* skip attribute field */
if (r300_check_offset(dev_priv, payload[i])) {
if (!radeon_check_offset(dev_priv, payload[i])) {
DRM_ERROR
("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
k, i);
return DRM_ERR(EINVAL);
return -EINVAL;
}
k++;
i++;
if (k == narrays)
break;
/* have one more to process, they come in pairs */
if (r300_check_offset(dev_priv, payload[i])) {
if (!radeon_check_offset(dev_priv, payload[i])) {
DRM_ERROR
("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
k, i);
return DRM_ERR(EINVAL);
return -EINVAL;
}
k++;
i++;
@ -480,7 +523,7 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
DRM_ERROR
("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n",
k, i, narrays, count + 1);
return DRM_ERR(EINVAL);
return -EINVAL;
}
/* all clear, output packet */
@ -508,25 +551,25 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
if (cmd[0] & 0x8000) {
u32 offset;
if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
| RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
offset = cmd[2] << 10;
ret = r300_check_offset(dev_priv, offset);
ret = !radeon_check_offset(dev_priv, offset);
if (ret) {
DRM_ERROR("Invalid bitblt first offset is %08X\n", offset);
return DRM_ERR(EINVAL);
return -EINVAL;
}
}
if ((cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
(cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
offset = cmd[3] << 10;
ret = r300_check_offset(dev_priv, offset);
ret = !radeon_check_offset(dev_priv, offset);
if (ret) {
DRM_ERROR("Invalid bitblt second offset is %08X\n", offset);
return DRM_ERR(EINVAL);
return -EINVAL;
}
}
}
@ -541,6 +584,81 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
return 0;
}
static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv,
drm_radeon_kcmd_buffer_t *cmdbuf)
{
u32 *cmd;
int count;
int expected_count;
RING_LOCALS;
cmd = (u32 *) cmdbuf->buf;
count = (cmd[0]>>16) & 0x3fff;
expected_count = cmd[1] >> 16;
if (!(cmd[1] & R300_VAP_VF_CNTL__INDEX_SIZE_32bit))
expected_count = (expected_count+1)/2;
if (count && count != expected_count) {
DRM_ERROR("3D_DRAW_INDX_2: packet size %i, expected %i\n",
count, expected_count);
return -EINVAL;
}
BEGIN_RING(count+2);
OUT_RING(cmd[0]);
OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
ADVANCE_RING();
cmdbuf->buf += (count+2)*4;
cmdbuf->bufsz -= (count+2)*4;
if (!count) {
drm_r300_cmd_header_t header;
if (cmdbuf->bufsz < 4*4 + sizeof(header)) {
DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER, but stream is too short.\n");
return -EINVAL;
}
header.u = *(unsigned int *)cmdbuf->buf;
cmdbuf->buf += sizeof(header);
cmdbuf->bufsz -= sizeof(header);
cmd = (u32 *) cmdbuf->buf;
if (header.header.cmd_type != R300_CMD_PACKET3 ||
header.packet3.packet != R300_CMD_PACKET3_RAW ||
cmd[0] != CP_PACKET3(RADEON_CP_INDX_BUFFER, 2)) {
DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER.\n");
return -EINVAL;
}
if ((cmd[1] & 0x8000ffff) != 0x80000810) {
DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]);
return -EINVAL;
}
if (!radeon_check_offset(dev_priv, cmd[2])) {
DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]);
return -EINVAL;
}
if (cmd[3] != expected_count) {
DRM_ERROR("INDX_BUFFER: buffer size %i, expected %i\n",
cmd[3], expected_count);
return -EINVAL;
}
BEGIN_RING(4);
OUT_RING(cmd[0]);
OUT_RING_TABLE((int *)(cmdbuf->buf + 4), 3);
ADVANCE_RING();
cmdbuf->buf += 4*4;
cmdbuf->bufsz -= 4*4;
}
return 0;
}
static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
drm_radeon_kcmd_buffer_t *cmdbuf)
{
@ -549,7 +667,7 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
RING_LOCALS;
if (4 > cmdbuf->bufsz)
return DRM_ERR(EINVAL);
return -EINVAL;
/* Fixme !! This simply emits a packet without much checking.
We need to be smarter. */
@ -560,7 +678,7 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
/* Is it packet 3 ? */
if ((header >> 30) != 0x3) {
DRM_ERROR("Not a packet3 header (0x%08x)\n", header);
return DRM_ERR(EINVAL);
return -EINVAL;
}
count = (header >> 16) & 0x3fff;
@ -570,7 +688,7 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
DRM_ERROR
("Expected packet3 of length %d but have only %d bytes left\n",
(count + 2) * 4, cmdbuf->bufsz);
return DRM_ERR(EINVAL);
return -EINVAL;
}
/* Is it a packet type we know about ? */
@ -581,17 +699,29 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
case RADEON_CNTL_BITBLT_MULTI:
return r300_emit_bitblt_multi(dev_priv, cmdbuf);
case RADEON_CP_3D_DRAW_IMMD_2: /* triggers drawing using in-packet vertex data */
case RADEON_CP_3D_DRAW_VBUF_2: /* triggers drawing of vertex buffers setup elsewhere */
case RADEON_CP_3D_DRAW_INDX_2: /* triggers drawing using indices to vertex buffer */
case RADEON_CP_INDX_BUFFER: /* DRAW_INDX_2 without INDX_BUFFER seems to lock up the gpu */
case RADEON_CP_INDX_BUFFER:
DRM_ERROR("packet3 INDX_BUFFER without preceding 3D_DRAW_INDX_2 is illegal.\n");
return -EINVAL;
case RADEON_CP_3D_DRAW_IMMD_2:
/* triggers drawing using in-packet vertex data */
case RADEON_CP_3D_DRAW_VBUF_2:
/* triggers drawing of vertex buffers setup elsewhere */
dev_priv->track_flush &= ~(RADEON_FLUSH_EMITED |
RADEON_PURGE_EMITED);
break;
case RADEON_CP_3D_DRAW_INDX_2:
/* triggers drawing using indices to vertex buffer */
/* whenever we send vertex we clear flush & purge */
dev_priv->track_flush &= ~(RADEON_FLUSH_EMITED |
RADEON_PURGE_EMITED);
return r300_emit_draw_indx_2(dev_priv, cmdbuf);
case RADEON_WAIT_FOR_IDLE:
case RADEON_CP_NOP:
/* these packets are safe */
break;
default:
DRM_ERROR("Unknown packet3 header (0x%08x)\n", header);
return DRM_ERR(EINVAL);
return -EINVAL;
}
BEGIN_RING(count + 2);
@ -655,7 +785,7 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
DRM_ERROR("bad packet3 type %i at %p\n",
header.packet3.packet,
cmdbuf->buf - sizeof(header));
return DRM_ERR(EINVAL);
return -EINVAL;
}
n += R300_SIMULTANEOUS_CLIPRECTS;
@ -680,16 +810,53 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
*/
static __inline__ void r300_pacify(drm_radeon_private_t *dev_priv)
{
uint32_t cache_z, cache_3d, cache_2d;
RING_LOCALS;
BEGIN_RING(6);
OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
OUT_RING(0xa);
OUT_RING(CP_PACKET0(0x4f18, 0));
OUT_RING(0x3);
OUT_RING(CP_PACKET3(RADEON_CP_NOP, 0));
OUT_RING(0x0);
cache_z = R300_ZC_FLUSH;
cache_2d = R300_RB2D_DC_FLUSH;
cache_3d = R300_RB3D_DC_FLUSH;
if (!(dev_priv->track_flush & RADEON_PURGE_EMITED)) {
/* we can purge, primitive where draw since last purge */
cache_z |= R300_ZC_FREE;
cache_2d |= R300_RB2D_DC_FREE;
cache_3d |= R300_RB3D_DC_FREE;
}
/* flush & purge zbuffer */
BEGIN_RING(2);
OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0));
OUT_RING(cache_z);
ADVANCE_RING();
/* flush & purge 3d */
BEGIN_RING(2);
OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
OUT_RING(cache_3d);
ADVANCE_RING();
/* flush & purge texture */
BEGIN_RING(2);
OUT_RING(CP_PACKET0(R300_TX_INVALTAGS, 0));
OUT_RING(0);
ADVANCE_RING();
/* FIXME: is this one really needed ? */
BEGIN_RING(2);
OUT_RING(CP_PACKET0(R300_RB3D_AARESOLVE_CTL, 0));
OUT_RING(0);
ADVANCE_RING();
BEGIN_RING(2);
OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
ADVANCE_RING();
/* flush & purge 2d through E2 as RB2D will trigger lockup */
BEGIN_RING(4);
OUT_RING(CP_PACKET0(R300_DSTCACHE_CTLSTAT, 0));
OUT_RING(cache_2d);
OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
OUT_RING(RADEON_WAIT_2D_IDLECLEAN |
RADEON_WAIT_HOST_IDLECLEAN);
ADVANCE_RING();
/* set flush & purge flags */
dev_priv->track_flush |= RADEON_FLUSH_EMITED | RADEON_PURGE_EMITED;
}
/**
@ -697,7 +864,7 @@ static __inline__ void r300_pacify(drm_radeon_private_t *dev_priv)
* The actual age emit is done by r300_do_cp_cmdbuf, which is why you must
* be careful about how this function is called.
*/
static void r300_discard_buffer(drm_device_t * dev, drm_buf_t * buf)
static void r300_discard_buffer(struct drm_device * dev, struct drm_buf * buf)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
@ -707,6 +874,47 @@ static void r300_discard_buffer(drm_device_t * dev, drm_buf_t * buf)
buf->used = 0;
}
static void r300_cmd_wait(drm_radeon_private_t * dev_priv,
drm_r300_cmd_header_t header)
{
u32 wait_until;
RING_LOCALS;
if (!header.wait.flags)
return;
wait_until = 0;
switch(header.wait.flags) {
case R300_WAIT_2D:
wait_until = RADEON_WAIT_2D_IDLE;
break;
case R300_WAIT_3D:
wait_until = RADEON_WAIT_3D_IDLE;
break;
case R300_NEW_WAIT_2D_3D:
wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_3D_IDLE;
break;
case R300_NEW_WAIT_2D_2D_CLEAN:
wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_2D_IDLECLEAN;
break;
case R300_NEW_WAIT_3D_3D_CLEAN:
wait_until = RADEON_WAIT_3D_IDLE|RADEON_WAIT_3D_IDLECLEAN;
break;
case R300_NEW_WAIT_2D_2D_CLEAN_3D_3D_CLEAN:
wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_2D_IDLECLEAN;
wait_until |= RADEON_WAIT_3D_IDLE|RADEON_WAIT_3D_IDLECLEAN;
break;
default:
return;
}
BEGIN_RING(2);
OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
OUT_RING(wait_until);
ADVANCE_RING();
}
static int r300_scratch(drm_radeon_private_t *dev_priv,
drm_radeon_kcmd_buffer_t *cmdbuf,
drm_r300_cmd_header_t header)
@ -714,76 +922,122 @@ static int r300_scratch(drm_radeon_private_t *dev_priv,
u32 *ref_age_base;
u32 i, buf_idx, h_pending;
RING_LOCALS;
if (cmdbuf->bufsz < sizeof(uint64_t) + header.scratch.n_bufs * sizeof(buf_idx) ) {
return DRM_ERR(EINVAL);
return -EINVAL;
}
if (header.scratch.reg >= 5) {
return DRM_ERR(EINVAL);
return -EINVAL;
}
dev_priv->scratch_ages[header.scratch.reg] ++;
ref_age_base = (u32 *)(unsigned long)*((uint64_t *)cmdbuf->buf);
cmdbuf->buf += sizeof(uint64_t);
cmdbuf->bufsz -= sizeof(uint64_t);
for (i=0; i < header.scratch.n_bufs; i++) {
buf_idx = *(u32 *)cmdbuf->buf;
buf_idx *= 2; /* 8 bytes per buf */
if (DRM_COPY_TO_USER(ref_age_base + buf_idx, &dev_priv->scratch_ages[header.scratch.reg], sizeof(u32))) {
return DRM_ERR(EINVAL);
return -EINVAL;
}
if (DRM_COPY_FROM_USER(&h_pending, ref_age_base + buf_idx + 1, sizeof(u32))) {
return DRM_ERR(EINVAL);
return -EINVAL;
}
if (h_pending == 0) {
return DRM_ERR(EINVAL);
return -EINVAL;
}
h_pending--;
if (DRM_COPY_TO_USER(ref_age_base + buf_idx + 1, &h_pending, sizeof(u32))) {
return DRM_ERR(EINVAL);
return -EINVAL;
}
cmdbuf->buf += sizeof(buf_idx);
cmdbuf->bufsz -= sizeof(buf_idx);
}
BEGIN_RING(2);
OUT_RING( CP_PACKET0( RADEON_SCRATCH_REG0 + header.scratch.reg * 4, 0 ) );
OUT_RING( dev_priv->scratch_ages[header.scratch.reg] );
ADVANCE_RING();
return 0;
}
/**
* Uploads user-supplied vertex program instructions or parameters onto
* the graphics card.
* Called by r300_do_cp_cmdbuf.
*/
static __inline__ int r300_emit_r500fp(drm_radeon_private_t *dev_priv,
drm_radeon_kcmd_buffer_t *cmdbuf,
drm_r300_cmd_header_t header)
{
int sz;
int addr;
int type;
int clamp;
int stride;
RING_LOCALS;
sz = header.r500fp.count;
/* address is 9 bits 0 - 8, bit 1 of flags is part of address */
addr = ((header.r500fp.adrhi_flags & 1) << 8) | header.r500fp.adrlo;
type = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_TYPE);
clamp = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_CLAMP);
addr |= (type << 16);
addr |= (clamp << 17);
stride = type ? 4 : 6;
DRM_DEBUG("r500fp %d %d type: %d\n", sz, addr, type);
if (!sz)
return 0;
if (sz * stride * 4 > cmdbuf->bufsz)
return -EINVAL;
BEGIN_RING(3 + sz * stride);
OUT_RING_REG(R500_GA_US_VECTOR_INDEX, addr);
OUT_RING(CP_PACKET0_TABLE(R500_GA_US_VECTOR_DATA, sz * stride - 1));
OUT_RING_TABLE((int *)cmdbuf->buf, sz * stride);
ADVANCE_RING();
cmdbuf->buf += sz * stride * 4;
cmdbuf->bufsz -= sz * stride * 4;
return 0;
}
/**
* Parses and validates a user-supplied command buffer and emits appropriate
* commands on the DMA ring buffer.
* Called by the ioctl handler function radeon_cp_cmdbuf.
*/
int r300_do_cp_cmdbuf(drm_device_t *dev,
DRMFILE filp,
drm_file_t *filp_priv,
int r300_do_cp_cmdbuf(struct drm_device *dev,
struct drm_file *file_priv,
drm_radeon_kcmd_buffer_t *cmdbuf)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_device_dma_t *dma = dev->dma;
drm_buf_t *buf = NULL;
struct drm_device_dma *dma = dev->dma;
struct drm_buf *buf = NULL;
int emit_dispatch_age = 0;
int ret = 0;
DRM_DEBUG("\n");
/* See the comment above r300_emit_begin3d for why this call must be here,
* and what the cleanup gotos are for. */
/* pacify */
r300_pacify(dev_priv);
if (cmdbuf->nbox <= R300_SIMULTANEOUS_CLIPRECTS) {
@ -803,7 +1057,6 @@ int r300_do_cp_cmdbuf(drm_device_t *dev,
switch (header.header.cmd_type) {
case R300_CMD_PACKET0:
DRM_DEBUG("R300_CMD_PACKET0\n");
ret = r300_emit_packet0(dev_priv, cmdbuf, header);
if (ret) {
DRM_ERROR("r300_emit_packet0 failed\n");
@ -869,15 +1122,16 @@ int r300_do_cp_cmdbuf(drm_device_t *dev,
if (idx < 0 || idx >= dma->buf_count) {
DRM_ERROR("buffer index %d (of %d max)\n",
idx, dma->buf_count - 1);
ret = DRM_ERR(EINVAL);
ret = -EINVAL;
goto cleanup;
}
buf = dma->buflist[idx];
if (buf->filp != filp || buf->pending) {
if (buf->file_priv != file_priv || buf->pending) {
DRM_ERROR("bad buffer %p %p %d\n",
buf->filp, filp, buf->pending);
ret = DRM_ERR(EINVAL);
buf->file_priv, file_priv,
buf->pending);
ret = -EINVAL;
goto cleanup;
}
@ -886,19 +1140,8 @@ int r300_do_cp_cmdbuf(drm_device_t *dev,
break;
case R300_CMD_WAIT:
/* simple enough, we can do it here */
DRM_DEBUG("R300_CMD_WAIT\n");
if (header.wait.flags == 0)
break; /* nothing to do */
{
RING_LOCALS;
BEGIN_RING(2);
OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
OUT_RING((header.wait.flags & 0xf) << 14);
ADVANCE_RING();
}
r300_cmd_wait(dev_priv, header);
break;
case R300_CMD_SCRATCH:
@ -909,12 +1152,25 @@ int r300_do_cp_cmdbuf(drm_device_t *dev,
goto cleanup;
}
break;
case R300_CMD_R500FP:
if ((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_RV515) {
DRM_ERROR("Calling r500 command on r300 card\n");
ret = -EINVAL;
goto cleanup;
}
DRM_DEBUG("R300_CMD_R500FP\n");
ret = r300_emit_r500fp(dev_priv, cmdbuf, header);
if (ret) {
DRM_ERROR("r300_emit_r500fp failed\n");
goto cleanup;
}
break;
default:
DRM_ERROR("bad cmd_type %i at %p\n",
header.header.cmd_type,
cmdbuf->buf - sizeof(header));
ret = DRM_ERR(EINVAL);
ret = -EINVAL;
goto cleanup;
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -226,11 +226,24 @@ typedef union {
#define R300_CMD_CP_DELAY 5
#define R300_CMD_DMA_DISCARD 6
#define R300_CMD_WAIT 7
# define R300_WAIT_2D 0x1
# define R300_WAIT_3D 0x2
# define R300_WAIT_2D_CLEAN 0x3
# define R300_WAIT_3D_CLEAN 0x4
# define R300_WAIT_2D 0x1
# define R300_WAIT_3D 0x2
/* these two defines are DOING IT WRONG - however
* we have userspace which relies on using these.
* The wait interface is backwards compat new
* code should use the NEW_WAIT defines below
* THESE ARE NOT BIT FIELDS
*/
# define R300_WAIT_2D_CLEAN 0x3
# define R300_WAIT_3D_CLEAN 0x4
# define R300_NEW_WAIT_2D_3D 0x3
# define R300_NEW_WAIT_2D_2D_CLEAN 0x4
# define R300_NEW_WAIT_3D_3D_CLEAN 0x6
# define R300_NEW_WAIT_2D_2D_CLEAN_3D_3D_CLEAN 0x8
#define R300_CMD_SCRATCH 8
#define R300_CMD_R500FP 9
typedef union {
unsigned int u;
@ -259,6 +272,9 @@ typedef union {
struct {
unsigned char cmd_type, reg, n_bufs, flags;
} scratch;
struct {
unsigned char cmd_type, count, adrlo, adrhi_flags;
} r500fp;
} drm_r300_cmd_header_t;
#define RADEON_FRONT 0x1
@ -269,6 +285,9 @@ typedef union {
#define RADEON_USE_HIERZ 0x40000000
#define RADEON_USE_COMP_ZBUF 0x20000000
#define R500FP_CONSTANT_TYPE (1 << 1)
#define R500FP_CONSTANT_CLAMP (1 << 2)
/* Primitive types
*/
#define RADEON_POINTS 0x1
@ -420,7 +439,7 @@ typedef struct {
/* The current cliprects, or a subset thereof.
*/
drm_clip_rect_t boxes[RADEON_NR_SAREA_CLIPRECTS];
struct drm_clip_rect boxes[RADEON_NR_SAREA_CLIPRECTS];
unsigned int nbox;
/* Counters for client-side throttling of rendering clients.
@ -429,7 +448,7 @@ typedef struct {
unsigned int last_dispatch;
unsigned int last_clear;
drm_tex_region_t tex_list[RADEON_NR_TEX_HEAPS][RADEON_NR_TEX_REGIONS +
struct drm_tex_region tex_list[RADEON_NR_TEX_HEAPS][RADEON_NR_TEX_REGIONS +
1];
unsigned int tex_age[RADEON_NR_TEX_HEAPS];
int ctx_owner;
@ -607,7 +626,7 @@ typedef struct drm_radeon_cmd_buffer {
int bufsz;
char __user *buf;
int nbox;
drm_clip_rect_t __user *boxes;
struct drm_clip_rect __user *boxes;
} drm_radeon_cmd_buffer_t;
typedef struct drm_radeon_tex_image {
@ -658,6 +677,9 @@ typedef struct drm_radeon_indirect {
#define RADEON_PARAM_GART_TEX_HANDLE 10
#define RADEON_PARAM_SCRATCH_OFFSET 11
#define RADEON_PARAM_CARD_TYPE 12
#define RADEON_PARAM_VBLANK_CRTC 13 /* VBLANK CRTC */
#define RADEON_PARAM_FB_LOCATION 14 /* FB location */
#define RADEON_PARAM_NUM_GB_PIPES 15 /* num GB pipes */
typedef struct drm_radeon_getparam {
int param;
@ -711,7 +733,8 @@ typedef struct drm_radeon_setparam {
#define RADEON_SETPARAM_PCIGART_LOCATION 3 /* PCI Gart Location */
#define RADEON_SETPARAM_NEW_MEMMAP 4 /* Use new memory map */
#define RADEON_SETPARAM_PCIGART_TABLE_SIZE 5 /* PCI GART Table Size */
#define RADEON_SETPARAM_VBLANK_CRTC 6 /* VBLANK CRTC */
/* 1.14: Clients can allocate/free a surface
*/
typedef struct drm_radeon_surface_alloc {
@ -724,5 +747,7 @@ typedef struct drm_radeon_surface_free {
unsigned int address;
} drm_radeon_surface_free_t;
#define DRM_RADEON_VBLANK_CRTC1 1
#define DRM_RADEON_VBLANK_CRTC2 2
#endif

View File

@ -45,7 +45,7 @@ static drm_pci_id_list_t radeon_pciidlist[] = {
radeon_PCI_IDS
};
static void radeon_configure(drm_device_t *dev)
static void radeon_configure(struct drm_device *dev)
{
dev->driver.buf_priv_size = sizeof(drm_radeon_buf_priv_t);
dev->driver.load = radeon_driver_load;
@ -55,7 +55,9 @@ static void radeon_configure(drm_device_t *dev)
dev->driver.preclose = radeon_driver_preclose;
dev->driver.postclose = radeon_driver_postclose;
dev->driver.lastclose = radeon_driver_lastclose;
dev->driver.vblank_wait = radeon_driver_vblank_wait;
dev->driver.get_vblank_counter = radeon_get_vblank_counter;
dev->driver.enable_vblank = radeon_enable_vblank;
dev->driver.disable_vblank = radeon_disable_vblank;
dev->driver.irq_preinstall = radeon_driver_irq_preinstall;
dev->driver.irq_postinstall = radeon_driver_irq_postinstall;
dev->driver.irq_uninstall = radeon_driver_irq_uninstall;
@ -79,6 +81,7 @@ static void radeon_configure(drm_device_t *dev)
dev->driver.use_dma = 1;
dev->driver.use_irq = 1;
dev->driver.use_vbl_irq = 1;
dev->driver.use_vbl_irq2 = 1;
}
#ifdef __FreeBSD__
@ -91,9 +94,9 @@ radeon_probe(device_t dev)
static int
radeon_attach(device_t nbdev)
{
drm_device_t *dev = device_get_softc(nbdev);
struct drm_device *dev = device_get_softc(nbdev);
bzero(dev, sizeof(drm_device_t));
bzero(dev, sizeof(struct drm_device));
radeon_configure(dev);
return drm_attach(nbdev, radeon_pciidlist);
}
@ -110,7 +113,7 @@ static device_method_t radeon_methods[] = {
static driver_t radeon_driver = {
"drm",
radeon_methods,
sizeof(drm_device_t)
sizeof(struct drm_device)
};
extern devclass_t drm_devclass;
@ -125,7 +128,7 @@ MODULE_DEPEND(radeon, drm, 1, 1, 1);
#ifdef _LKM
CFDRIVER_DECL(radeon, DV_TTY, NULL);
#else
CFATTACH_DECL(radeon, sizeof(drm_device_t), drm_probe, drm_attach, drm_detach,
drm_activate);
CFATTACH_DECL(radeon, sizeof(struct drm_device), drm_probe, drm_attach,
drm_detach, drm_activate);
#endif
#endif /* __FreeBSD__ */

View File

@ -41,7 +41,7 @@ __FBSDID("$FreeBSD$");
#define DRIVER_NAME "radeon"
#define DRIVER_DESC "ATI Radeon"
#define DRIVER_DATE "20060524"
#define DRIVER_DATE "20080613"
/* Interface history:
*
@ -98,10 +98,14 @@ __FBSDID("$FreeBSD$");
* 1.24- Add general-purpose packet for manipulating scratch registers (r300)
* 1.25- Add support for r200 vertex programs (R200_EMIT_VAP_PVS_CNTL,
* new packet type)
* 1.26- Add support for variable size PCI(E) gart aperture
* 1.27- Add support for IGP GART
* 1.28- Add support for VBL on CRTC2
* 1.29- R500 3D cmd buffer support
*/
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 25
#define DRIVER_MINOR 29
#define DRIVER_PATCHLEVEL 0
/*
@ -124,29 +128,32 @@ enum radeon_family {
CHIP_R420,
CHIP_RV410,
CHIP_RS400,
CHIP_RS480,
CHIP_RS690,
CHIP_RV515,
CHIP_R520,
CHIP_RV530,
CHIP_RV560,
CHIP_RV570,
CHIP_R580,
CHIP_LAST,
};
enum radeon_cp_microcode_version {
UCODE_R100,
UCODE_R200,
UCODE_R300,
};
/*
* Chip flags
*/
enum radeon_chip_flags {
CHIP_FAMILY_MASK = 0x0000ffffUL,
CHIP_FLAGS_MASK = 0xffff0000UL,
CHIP_IS_MOBILITY = 0x00010000UL,
CHIP_IS_IGP = 0x00020000UL,
CHIP_SINGLE_CRTC = 0x00040000UL,
CHIP_IS_AGP = 0x00080000UL,
CHIP_HAS_HIERZ = 0x00100000UL,
CHIP_IS_PCIE = 0x00200000UL,
CHIP_NEW_MEMMAP = 0x00400000UL,
CHIP_IS_PCI = 0x00800000UL,
RADEON_FAMILY_MASK = 0x0000ffffUL,
RADEON_FLAGS_MASK = 0xffff0000UL,
RADEON_IS_MOBILITY = 0x00010000UL,
RADEON_IS_IGP = 0x00020000UL,
RADEON_SINGLE_CRTC = 0x00040000UL,
RADEON_IS_AGP = 0x00080000UL,
RADEON_HAS_HIERZ = 0x00100000UL,
RADEON_IS_PCIE = 0x00200000UL,
RADEON_NEW_MEMMAP = 0x00400000UL,
RADEON_IS_PCI = 0x00800000UL,
RADEON_IS_IGPGART = 0x01000000UL,
};
#define GET_RING_HEAD(dev_priv) (dev_priv->writeback_works ? \
@ -155,7 +162,7 @@ enum radeon_chip_flags {
typedef struct drm_radeon_freelist {
unsigned int age;
drm_buf_t *buf;
struct drm_buf *buf;
struct drm_radeon_freelist *next;
struct drm_radeon_freelist *prev;
} drm_radeon_freelist_t;
@ -163,8 +170,14 @@ typedef struct drm_radeon_freelist {
typedef struct drm_radeon_ring_buffer {
u32 *start;
u32 *end;
int size;
int size_l2qw;
int size; /* Double Words */
int size_l2qw; /* log2 Quad Words */
int rptr_update; /* Double Words */
int rptr_update_l2qw; /* log2 Quad Words */
int fetch_size; /* Double Words */
int fetch_size_l2ow; /* log2 Oct Words */
u32 tail;
u32 tail_mask;
@ -188,7 +201,7 @@ struct mem_block {
struct mem_block *prev;
int start;
int size;
DRMFILE filp; /* 0: free, -1: heap, other: real files */
struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
};
struct radeon_surface {
@ -203,9 +216,12 @@ struct radeon_virt_surface {
u32 lower;
u32 upper;
u32 flags;
DRMFILE filp;
struct drm_file *file_priv;
};
#define RADEON_FLUSH_EMITED (1 < 0)
#define RADEON_PURGE_EMITED (1 < 1)
typedef struct drm_radeon_private {
drm_radeon_ring_buffer_t ring;
@ -230,8 +246,6 @@ typedef struct drm_radeon_private {
int usec_timeout;
int microcode_version;
struct {
u32 boxes;
int freelist_timeouts;
@ -245,7 +259,6 @@ typedef struct drm_radeon_private {
int do_boxes;
int page_flipping;
int current_page;
u32 color_fmt;
unsigned int front_offset;
@ -280,18 +293,30 @@ typedef struct drm_radeon_private {
/* SW interrupt */
wait_queue_head_t swi_queue;
atomic_t swi_emitted;
int vblank_crtc;
uint32_t irq_enable_reg;
int irq_enabled;
uint32_t r500_disp_irq_reg;
struct radeon_surface surfaces[RADEON_MAX_SURFACES];
struct radeon_virt_surface virt_surfaces[2*RADEON_MAX_SURFACES];
struct radeon_virt_surface virt_surfaces[2 * RADEON_MAX_SURFACES];
unsigned long pcigart_offset;
drm_ati_pcigart_info gart_info;
unsigned int pcigart_offset_set;
struct drm_ati_pcigart_info gart_info;
u32 scratch_ages[5];
unsigned int crtc_last_cnt;
unsigned int crtc2_last_cnt;
/* starting from here on, data is preserved accross an open */
uint32_t flags; /* see radeon_chip_flags */
unsigned long fb_aper_offset;
int num_gb_pipes;
int track_flush;
uint32_t chip_family; /* extract from flags */
} drm_radeon_private_t;
typedef struct drm_radeon_buf_priv {
@ -302,65 +327,89 @@ typedef struct drm_radeon_kcmd_buffer {
int bufsz;
char *buf;
int nbox;
drm_clip_rect_t __user *boxes;
struct drm_clip_rect __user *boxes;
} drm_radeon_kcmd_buffer_t;
extern int radeon_no_wb;
extern drm_ioctl_desc_t radeon_ioctls[];
extern struct drm_ioctl_desc radeon_ioctls[];
extern int radeon_max_ioctl;
/* radeon_cp.c */
extern int radeon_cp_init(DRM_IOCTL_ARGS);
extern int radeon_cp_start(DRM_IOCTL_ARGS);
extern int radeon_cp_stop(DRM_IOCTL_ARGS);
extern int radeon_cp_reset(DRM_IOCTL_ARGS);
extern int radeon_cp_idle(DRM_IOCTL_ARGS);
extern int radeon_cp_resume(DRM_IOCTL_ARGS);
extern int radeon_engine_reset(DRM_IOCTL_ARGS);
extern int radeon_fullscreen(DRM_IOCTL_ARGS);
extern int radeon_cp_buffers(DRM_IOCTL_ARGS);
/* Check whether the given hardware address is inside the framebuffer or the
* GART area.
*/
static __inline__ int radeon_check_offset(drm_radeon_private_t *dev_priv,
u64 off)
{
u32 fb_start = dev_priv->fb_location;
u32 fb_end = fb_start + dev_priv->fb_size - 1;
u32 gart_start = dev_priv->gart_vm_start;
u32 gart_end = gart_start + dev_priv->gart_size - 1;
extern void radeon_freelist_reset(drm_device_t * dev);
extern drm_buf_t *radeon_freelist_get(drm_device_t * dev);
return ((off >= fb_start && off <= fb_end) ||
(off >= gart_start && off <= gart_end));
}
/* radeon_cp.c */
extern int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int radeon_cp_start(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int radeon_cp_stop(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int radeon_cp_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int radeon_cp_idle(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int radeon_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv);
extern void radeon_freelist_reset(struct drm_device * dev);
extern struct drm_buf *radeon_freelist_get(struct drm_device * dev);
extern int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n);
extern int radeon_do_cp_idle(drm_radeon_private_t * dev_priv);
extern int radeon_mem_alloc(DRM_IOCTL_ARGS);
extern int radeon_mem_free(DRM_IOCTL_ARGS);
extern int radeon_mem_init_heap(DRM_IOCTL_ARGS);
extern int radeon_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int radeon_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int radeon_mem_init_heap(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern void radeon_mem_takedown(struct mem_block **heap);
extern void radeon_mem_release(DRMFILE filp, struct mem_block *heap);
extern void radeon_mem_release(struct drm_file *file_priv,
struct mem_block *heap);
/* radeon_irq.c */
extern int radeon_irq_emit(DRM_IOCTL_ARGS);
extern int radeon_irq_wait(DRM_IOCTL_ARGS);
extern void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state);
extern int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern void radeon_do_release(drm_device_t * dev);
extern int radeon_driver_vblank_wait(drm_device_t * dev,
unsigned int *sequence);
extern void radeon_do_release(struct drm_device * dev);
extern u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc);
extern int radeon_enable_vblank(struct drm_device *dev, int crtc);
extern void radeon_disable_vblank(struct drm_device *dev, int crtc);
extern irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS);
extern void radeon_driver_irq_preinstall(drm_device_t * dev);
extern void radeon_driver_irq_postinstall(drm_device_t * dev);
extern void radeon_driver_irq_uninstall(drm_device_t * dev);
extern void radeon_driver_irq_preinstall(struct drm_device * dev);
extern int radeon_driver_irq_postinstall(struct drm_device * dev);
extern void radeon_driver_irq_uninstall(struct drm_device * dev);
extern int radeon_vblank_crtc_get(struct drm_device *dev);
extern int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value);
extern int radeon_driver_load(struct drm_device *dev, unsigned long flags);
extern int radeon_driver_unload(struct drm_device *dev);
extern int radeon_driver_firstopen(struct drm_device *dev);
extern void radeon_driver_preclose(drm_device_t * dev, DRMFILE filp);
extern void radeon_driver_postclose(drm_device_t * dev, drm_file_t * filp);
extern void radeon_driver_lastclose(drm_device_t * dev);
extern int radeon_driver_open(drm_device_t * dev, drm_file_t * filp_priv);
extern void radeon_driver_preclose(struct drm_device * dev,
struct drm_file *file_priv);
extern void radeon_driver_postclose(struct drm_device * dev,
struct drm_file *file_priv);
extern void radeon_driver_lastclose(struct drm_device * dev);
extern int radeon_driver_open(struct drm_device * dev,
struct drm_file * file_priv);
extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
/* r300_cmdbuf.c */
extern void r300_init_reg_flags(void);
extern void r300_init_reg_flags(struct drm_device *dev);
extern int r300_do_cp_cmdbuf(drm_device_t *dev, DRMFILE filp,
drm_file_t* filp_priv,
drm_radeon_kcmd_buffer_t* cmdbuf);
extern int r300_do_cp_cmdbuf(struct drm_device *dev,
struct drm_file *file_priv,
drm_radeon_kcmd_buffer_t *cmdbuf);
/* Flags for stats.boxes
*/
@ -402,30 +451,122 @@ extern int r300_do_cp_cmdbuf(drm_device_t *dev, DRMFILE filp,
#define RADEON_PCIE_INDEX 0x0030
#define RADEON_PCIE_DATA 0x0034
#define RADEON_PCIE_TX_GART_CNTL 0x10
# define RADEON_PCIE_TX_GART_EN (1 << 0)
# define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_PASS_THRU (0<<1)
# define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_CLAMP_LO (1<<1)
# define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD (3<<1)
# define RADEON_PCIE_TX_GART_MODE_32_128_CACHE (0<<3)
# define RADEON_PCIE_TX_GART_MODE_8_4_128_CACHE (1<<3)
# define RADEON_PCIE_TX_GART_CHK_RW_VALID_EN (1<<5)
# define RADEON_PCIE_TX_GART_INVALIDATE_TLB (1<<8)
# define RADEON_PCIE_TX_GART_EN (1 << 0)
# define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_PASS_THRU (0 << 1)
# define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_CLAMP_LO (1 << 1)
# define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD (3 << 1)
# define RADEON_PCIE_TX_GART_MODE_32_128_CACHE (0 << 3)
# define RADEON_PCIE_TX_GART_MODE_8_4_128_CACHE (1 << 3)
# define RADEON_PCIE_TX_GART_CHK_RW_VALID_EN (1 << 5)
# define RADEON_PCIE_TX_GART_INVALIDATE_TLB (1 << 8)
#define RADEON_PCIE_TX_DISCARD_RD_ADDR_LO 0x11
#define RADEON_PCIE_TX_DISCARD_RD_ADDR_HI 0x12
#define RADEON_PCIE_TX_GART_BASE 0x13
#define RADEON_PCIE_TX_GART_BASE 0x13
#define RADEON_PCIE_TX_GART_START_LO 0x14
#define RADEON_PCIE_TX_GART_START_HI 0x15
#define RADEON_PCIE_TX_GART_END_LO 0x16
#define RADEON_PCIE_TX_GART_END_HI 0x17
#define RS480_NB_MC_INDEX 0x168
# define RS480_NB_MC_IND_WR_EN (1 << 8)
#define RS480_NB_MC_DATA 0x16c
#define RS690_MC_INDEX 0x78
# define RS690_MC_INDEX_MASK 0x1ff
# define RS690_MC_INDEX_WR_EN (1 << 9)
# define RS690_MC_INDEX_WR_ACK 0x7f
#define RS690_MC_DATA 0x7c
/* MC indirect registers */
#define RS480_MC_MISC_CNTL 0x18
# define RS480_DISABLE_GTW (1 << 1)
/* switch between MCIND GART and MM GART registers. 0 = mmgart, 1 = mcind gart */
# define RS480_GART_INDEX_REG_EN (1 << 12)
# define RS690_BLOCK_GFX_D3_EN (1 << 14)
#define RS480_K8_FB_LOCATION 0x1e
#define RS480_GART_FEATURE_ID 0x2b
# define RS480_HANG_EN (1 << 11)
# define RS480_TLB_ENABLE (1 << 18)
# define RS480_P2P_ENABLE (1 << 19)
# define RS480_GTW_LAC_EN (1 << 25)
# define RS480_2LEVEL_GART (0 << 30)
# define RS480_1LEVEL_GART (1 << 30)
# define RS480_PDC_EN (1 << 31)
#define RS480_GART_BASE 0x2c
#define RS480_GART_CACHE_CNTRL 0x2e
# define RS480_GART_CACHE_INVALIDATE (1 << 0) /* wait for it to clear */
#define RS480_AGP_ADDRESS_SPACE_SIZE 0x38
# define RS480_GART_EN (1 << 0)
# define RS480_VA_SIZE_32MB (0 << 1)
# define RS480_VA_SIZE_64MB (1 << 1)
# define RS480_VA_SIZE_128MB (2 << 1)
# define RS480_VA_SIZE_256MB (3 << 1)
# define RS480_VA_SIZE_512MB (4 << 1)
# define RS480_VA_SIZE_1GB (5 << 1)
# define RS480_VA_SIZE_2GB (6 << 1)
#define RS480_AGP_MODE_CNTL 0x39
# define RS480_POST_GART_Q_SIZE (1 << 18)
# define RS480_NONGART_SNOOP (1 << 19)
# define RS480_AGP_RD_BUF_SIZE (1 << 20)
# define RS480_REQ_TYPE_SNOOP_SHIFT 22
# define RS480_REQ_TYPE_SNOOP_MASK 0x3
# define RS480_REQ_TYPE_SNOOP_DIS (1 << 24)
#define RS480_MC_MISC_UMA_CNTL 0x5f
#define RS480_MC_MCLK_CNTL 0x7a
#define RS480_MC_UMA_DUALCH_CNTL 0x86
#define RS690_MC_FB_LOCATION 0x100
#define RS690_MC_AGP_LOCATION 0x101
#define RS690_MC_AGP_BASE 0x102
#define RS690_MC_AGP_BASE_2 0x103
#define R520_MC_IND_INDEX 0x70
#define R520_MC_IND_WR_EN (1 << 24)
#define R520_MC_IND_DATA 0x74
#define RV515_MC_FB_LOCATION 0x01
#define RV515_MC_AGP_LOCATION 0x02
#define RV515_MC_AGP_BASE 0x03
#define RV515_MC_AGP_BASE_2 0x04
#define R520_MC_FB_LOCATION 0x04
#define R520_MC_AGP_LOCATION 0x05
#define R520_MC_AGP_BASE 0x06
#define R520_MC_AGP_BASE_2 0x07
#define RADEON_MPP_TB_CONFIG 0x01c0
#define RADEON_MEM_CNTL 0x0140
#define RADEON_MEM_SDRAM_MODE_REG 0x0158
#define RADEON_AGP_BASE_2 0x015c /* r200+ only */
#define RS480_AGP_BASE_2 0x0164
#define RADEON_AGP_BASE 0x0170
/* pipe config regs */
#define R400_GB_PIPE_SELECT 0x402c
#define R500_DYN_SCLK_PWMEM_PIPE 0x000d /* PLL */
#define R500_SU_REG_DEST 0x42c8
#define R300_GB_TILE_CONFIG 0x4018
# define R300_ENABLE_TILING (1 << 0)
# define R300_PIPE_COUNT_RV350 (0 << 1)
# define R300_PIPE_COUNT_R300 (3 << 1)
# define R300_PIPE_COUNT_R420_3P (6 << 1)
# define R300_PIPE_COUNT_R420 (7 << 1)
# define R300_TILE_SIZE_8 (0 << 4)
# define R300_TILE_SIZE_16 (1 << 4)
# define R300_TILE_SIZE_32 (2 << 4)
# define R300_SUBPIXEL_1_12 (0 << 16)
# define R300_SUBPIXEL_1_16 (1 << 16)
#define R300_DST_PIPE_CONFIG 0x170c
# define R300_PIPE_AUTO_CONFIG (1 << 31)
#define R300_RB2D_DSTCACHE_MODE 0x3428
# define R300_DC_AUTOFLUSH_ENABLE (1 << 8)
# define R300_DC_DC_DISABLE_IGNORE_PE (1 << 17)
#define RADEON_RB3D_COLOROFFSET 0x1c40
#define RADEON_RB3D_COLORPITCH 0x1c48
#define RADEON_SRC_X_Y 0x1590
#define RADEON_DP_GUI_MASTER_CNTL 0x146c
# define RADEON_GMC_SRC_PITCH_OFFSET_CNTL (1 << 0)
# define RADEON_GMC_DST_PITCH_OFFSET_CNTL (1 << 1)
@ -443,6 +584,7 @@ extern int r300_do_cp_cmdbuf(drm_device_t *dev, DRMFILE filp,
# define RADEON_ROP3_S 0x00cc0000
# define RADEON_ROP3_P 0x00f00000
#define RADEON_DP_WRITE_MASK 0x16cc
#define RADEON_SRC_PITCH_OFFSET 0x1428
#define RADEON_DST_PITCH_OFFSET 0x142c
#define RADEON_DST_PITCH_OFFSET_C 0x1c80
# define RADEON_DST_TILE_LINEAR (0 << 30)
@ -465,18 +607,29 @@ extern int r300_do_cp_cmdbuf(drm_device_t *dev, DRMFILE filp,
? DRM_READ32( dev_priv->ring_rptr, RADEON_SCRATCHOFF(x) ) \
: RADEON_READ( RADEON_SCRATCH_REG0 + 4*(x) ) )
#define RADEON_CRTC_CRNT_FRAME 0x0214
#define RADEON_CRTC2_CRNT_FRAME 0x0314
#define RADEON_CRTC_STATUS 0x005c
#define RADEON_CRTC2_STATUS 0x03fc
#define RADEON_GEN_INT_CNTL 0x0040
# define RADEON_CRTC_VBLANK_MASK (1 << 0)
# define RADEON_CRTC2_VBLANK_MASK (1 << 9)
# define RADEON_GUI_IDLE_INT_ENABLE (1 << 19)
# define RADEON_SW_INT_ENABLE (1 << 25)
#define RADEON_GEN_INT_STATUS 0x0044
# define RADEON_CRTC_VBLANK_STAT (1 << 0)
# define RADEON_CRTC_VBLANK_STAT_ACK (1 << 0)
# define RADEON_CRTC_VBLANK_STAT_ACK (1 << 0)
# define RADEON_CRTC2_VBLANK_STAT (1 << 9)
# define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9)
# define RADEON_GUI_IDLE_INT_TEST_ACK (1 << 19)
# define RADEON_SW_INT_TEST (1 << 25)
# define RADEON_SW_INT_TEST_ACK (1 << 25)
# define RADEON_SW_INT_TEST_ACK (1 << 25)
# define RADEON_SW_INT_FIRE (1 << 26)
# define R500_DISPLAY_INT_STATUS (1 << 0)
#define RADEON_HOST_PATH_CNTL 0x0130
# define RADEON_HDP_SOFT_RESET (1 << 26)
@ -520,11 +673,12 @@ extern int r300_do_cp_cmdbuf(drm_device_t *dev, DRMFILE filp,
#define RADEON_PP_TXFILTER_1 0x1c6c
#define RADEON_PP_TXFILTER_2 0x1c84
#define RADEON_RB2D_DSTCACHE_CTLSTAT 0x342c
# define RADEON_RB2D_DC_FLUSH (3 << 0)
# define RADEON_RB2D_DC_FREE (3 << 2)
# define RADEON_RB2D_DC_FLUSH_ALL 0xf
# define RADEON_RB2D_DC_BUSY (1 << 31)
#define R300_RB2D_DSTCACHE_CTLSTAT 0x342c /* use R300_DSTCACHE_CTLSTAT */
#define R300_DSTCACHE_CTLSTAT 0x1714
# define R300_RB2D_DC_FLUSH (3 << 0)
# define R300_RB2D_DC_FREE (3 << 2)
# define R300_RB2D_DC_FLUSH_ALL 0xf
# define R300_RB2D_DC_BUSY (1 << 31)
#define RADEON_RB3D_CNTL 0x1c3c
# define RADEON_ALPHA_BLEND_ENABLE (1 << 0)
# define RADEON_PLANE_MASK_ENABLE (1 << 1)
@ -547,11 +701,19 @@ extern int r300_do_cp_cmdbuf(drm_device_t *dev, DRMFILE filp,
# define RADEON_RB3D_ZC_FREE (1 << 2)
# define RADEON_RB3D_ZC_FLUSH_ALL 0x5
# define RADEON_RB3D_ZC_BUSY (1 << 31)
#define R300_ZB_ZCACHE_CTLSTAT 0x4f18
# define R300_ZC_FLUSH (1 << 0)
# define R300_ZC_FREE (1 << 1)
# define R300_ZC_BUSY (1 << 31)
#define RADEON_RB3D_DSTCACHE_CTLSTAT 0x325c
# define RADEON_RB3D_DC_FLUSH (3 << 0)
# define RADEON_RB3D_DC_FREE (3 << 2)
# define RADEON_RB3D_DC_FLUSH_ALL 0xf
# define RADEON_RB3D_DC_BUSY (1 << 31)
#define R300_RB3D_DSTCACHE_CTLSTAT 0x4e4c
# define R300_RB3D_DC_FLUSH (2 << 0)
# define R300_RB3D_DC_FREE (2 << 2)
# define R300_RB3D_DC_FINISH (1 << 4)
#define RADEON_RB3D_ZSTENCILCNTL 0x1c2c
# define RADEON_Z_TEST_MASK (7 << 4)
# define RADEON_Z_TEST_ALWAYS (7 << 4)
@ -573,9 +735,51 @@ extern int r300_do_cp_cmdbuf(drm_device_t *dev, DRMFILE filp,
# define RADEON_SOFT_RESET_E2 (1 << 5)
# define RADEON_SOFT_RESET_RB (1 << 6)
# define RADEON_SOFT_RESET_HDP (1 << 7)
/*
* 6:0 Available slots in the FIFO
* 8 Host Interface active
* 9 CP request active
* 10 FIFO request active
* 11 Host Interface retry active
* 12 CP retry active
* 13 FIFO retry active
* 14 FIFO pipeline busy
* 15 Event engine busy
* 16 CP command stream busy
* 17 2D engine busy
* 18 2D portion of render backend busy
* 20 3D setup engine busy
* 26 GA engine busy
* 27 CBA 2D engine busy
* 31 2D engine busy or 3D engine busy or FIFO not empty or CP busy or
* command stream queue not empty or Ring Buffer not empty
*/
#define RADEON_RBBM_STATUS 0x0e40
/* Same as the previous RADEON_RBBM_STATUS; this is a mirror of that register. */
/* #define RADEON_RBBM_STATUS 0x1740 */
/* bits 6:0 are dword slots available in the cmd fifo */
# define RADEON_RBBM_FIFOCNT_MASK 0x007f
# define RADEON_RBBM_ACTIVE (1 << 31)
# define RADEON_HIRQ_ON_RBB (1 << 8)
# define RADEON_CPRQ_ON_RBB (1 << 9)
# define RADEON_CFRQ_ON_RBB (1 << 10)
# define RADEON_HIRQ_IN_RTBUF (1 << 11)
# define RADEON_CPRQ_IN_RTBUF (1 << 12)
# define RADEON_CFRQ_IN_RTBUF (1 << 13)
# define RADEON_PIPE_BUSY (1 << 14)
# define RADEON_ENG_EV_BUSY (1 << 15)
# define RADEON_CP_CMDSTRM_BUSY (1 << 16)
# define RADEON_E2_BUSY (1 << 17)
# define RADEON_RB2D_BUSY (1 << 18)
# define RADEON_RB3D_BUSY (1 << 19) /* not used on r300 */
# define RADEON_VAP_BUSY (1 << 20)
# define RADEON_RE_BUSY (1 << 21) /* not used on r300 */
# define RADEON_TAM_BUSY (1 << 22) /* not used on r300 */
# define RADEON_TDM_BUSY (1 << 23) /* not used on r300 */
# define RADEON_PB_BUSY (1 << 24) /* not used on r300 */
# define RADEON_TIM_BUSY (1 << 25) /* not used on r300 */
# define RADEON_GA_BUSY (1 << 26)
# define RADEON_CBA2D_BUSY (1 << 27)
# define RADEON_RBBM_ACTIVE (1 << 31)
#define RADEON_RE_LINE_PATTERN 0x1cd0
#define RADEON_RE_MISC 0x26c4
#define RADEON_RE_TOP_LEFT 0x26c0
@ -947,7 +1151,30 @@ extern int r300_do_cp_cmdbuf(drm_device_t *dev, DRMFILE filp,
#define RADEON_VHA_BACKFRAME0_OFF_V_2 0x1894
#define RADEON_VHA_BACKFRAME1_OFF_PITCH_V_2 0x1898
#define R500_D1CRTC_STATUS 0x609c
#define R500_D2CRTC_STATUS 0x689c
#define R500_CRTC_V_BLANK (1<<0)
#define R500_D1CRTC_FRAME_COUNT 0x60a4
#define R500_D2CRTC_FRAME_COUNT 0x68a4
#define R500_D1MODE_V_COUNTER 0x6530
#define R500_D2MODE_V_COUNTER 0x6d30
#define R500_D1MODE_VBLANK_STATUS 0x6534
#define R500_D2MODE_VBLANK_STATUS 0x6d34
#define R500_VBLANK_OCCURED (1<<0)
#define R500_VBLANK_ACK (1<<4)
#define R500_VBLANK_STAT (1<<12)
#define R500_VBLANK_INT (1<<16)
#define R500_DxMODE_INT_MASK 0x6540
#define R500_D1MODE_INT_MASK (1<<0)
#define R500_D2MODE_INT_MASK (1<<8)
#define R500_DISP_INTERRUPT_STATUS 0x7edc
#define R500_D1_VBLANK_INTERRUPT (1 << 4)
#define R500_D2_VBLANK_INTERRUPT (1 << 5)
/* Constants */
#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
@ -965,8 +1192,8 @@ extern int r300_do_cp_cmdbuf(drm_device_t *dev, DRMFILE filp,
#define RADEON_PCIGART_TABLE_SIZE (32*1024)
#define RADEON_READ(reg) DRM_READ32( dev_priv->mmio, (reg) )
#define RADEON_WRITE(reg,val) DRM_WRITE32( dev_priv->mmio, (reg), (val) )
#define RADEON_READ(reg) DRM_READ32( dev_priv->mmio, (reg) )
#define RADEON_WRITE(reg,val) DRM_WRITE32( dev_priv->mmio, (reg), (val) )
#define RADEON_READ8(reg) DRM_READ8( dev_priv->mmio, (reg) )
#define RADEON_WRITE8(reg,val) DRM_WRITE8( dev_priv->mmio, (reg), (val) )
@ -984,6 +1211,36 @@ do { \
RADEON_WRITE( RADEON_PCIE_DATA, (val) ); \
} while (0)
#define R500_WRITE_MCIND( addr, val ) \
do { \
RADEON_WRITE(R520_MC_IND_INDEX, 0xff0000 | ((addr) & 0xff)); \
RADEON_WRITE(R520_MC_IND_DATA, (val)); \
RADEON_WRITE(R520_MC_IND_INDEX, 0); \
} while (0)
#define RS480_WRITE_MCIND( addr, val ) \
do { \
RADEON_WRITE( RS480_NB_MC_INDEX, \
((addr) & 0xff) | RS480_NB_MC_IND_WR_EN); \
RADEON_WRITE( RS480_NB_MC_DATA, (val) ); \
RADEON_WRITE( RS480_NB_MC_INDEX, 0xff ); \
} while (0)
#define RS690_WRITE_MCIND( addr, val ) \
do { \
RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_WR_EN | ((addr) & RS690_MC_INDEX_MASK)); \
RADEON_WRITE(RS690_MC_DATA, val); \
RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_WR_ACK); \
} while (0)
#define IGP_WRITE_MCIND( addr, val ) \
do { \
if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) \
RS690_WRITE_MCIND( addr, val ); \
else \
RS480_WRITE_MCIND( addr, val ); \
} while (0)
#define CP_PACKET0( reg, n ) \
(RADEON_CP_PACKET0 | ((n) << 16) | ((reg) >> 2))
#define CP_PACKET0_TABLE( reg, n ) \
@ -1024,23 +1281,43 @@ do { \
} while (0)
#define RADEON_FLUSH_CACHE() do { \
OUT_RING( CP_PACKET0( RADEON_RB3D_DSTCACHE_CTLSTAT, 0 ) ); \
OUT_RING( RADEON_RB3D_DC_FLUSH ); \
if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \
OUT_RING(CP_PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); \
OUT_RING(RADEON_RB3D_DC_FLUSH); \
} else { \
OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \
OUT_RING(R300_RB3D_DC_FLUSH); \
} \
} while (0)
#define RADEON_PURGE_CACHE() do { \
OUT_RING( CP_PACKET0( RADEON_RB3D_DSTCACHE_CTLSTAT, 0 ) ); \
OUT_RING( RADEON_RB3D_DC_FLUSH_ALL ); \
if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \
OUT_RING(CP_PACKET0( RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); \
OUT_RING(RADEON_RB3D_DC_FLUSH | RADEON_RB3D_DC_FREE); \
} else { \
OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); \
OUT_RING(R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE ); \
} \
} while (0)
#define RADEON_FLUSH_ZCACHE() do { \
OUT_RING( CP_PACKET0( RADEON_RB3D_ZCACHE_CTLSTAT, 0 ) ); \
OUT_RING( RADEON_RB3D_ZC_FLUSH ); \
if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \
OUT_RING( CP_PACKET0( RADEON_RB3D_ZCACHE_CTLSTAT, 0 ) ); \
OUT_RING( RADEON_RB3D_ZC_FLUSH ); \
} else { \
OUT_RING( CP_PACKET0( R300_ZB_ZCACHE_CTLSTAT, 0 ) ); \
OUT_RING( R300_ZC_FLUSH ); \
} \
} while (0)
#define RADEON_PURGE_ZCACHE() do { \
OUT_RING( CP_PACKET0( RADEON_RB3D_ZCACHE_CTLSTAT, 0 ) ); \
OUT_RING( RADEON_RB3D_ZC_FLUSH_ALL ); \
if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { \
OUT_RING(CP_PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); \
OUT_RING(RADEON_RB3D_ZC_FLUSH | RADEON_RB3D_ZC_FREE); \
} else { \
OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0)); \
OUT_RING(R300_ZC_FLUSH | R300_ZC_FREE); \
} \
} while (0)
/* ================================================================
@ -1094,8 +1371,7 @@ do { \
#define BEGIN_RING( n ) do { \
if ( RADEON_VERBOSE ) { \
DRM_INFO( "BEGIN_RING( %d ) in %s\n", \
n, __FUNCTION__ ); \
DRM_INFO( "BEGIN_RING( %d )\n", (n)); \
} \
if ( dev_priv->ring.space <= (n) * sizeof(u32) ) { \
COMMIT_RING(); \
@ -1113,7 +1389,7 @@ do { \
write, dev_priv->ring.tail ); \
} \
if (((dev_priv->ring.tail + _nr) & mask) != write) { \
DRM_ERROR( \
DRM_ERROR( \
"ADVANCE_RING(): mismatch: nr: %x write: %x line: %d\n", \
((dev_priv->ring.tail + _nr) & mask), \
write, __LINE__); \

View File

@ -38,12 +38,130 @@ __FBSDID("$FreeBSD$");
#include "dev/drm/radeon_drm.h"
#include "dev/drm/radeon_drv.h"
static __inline__ u32 radeon_acknowledge_irqs(drm_radeon_private_t * dev_priv,
u32 mask)
void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state)
{
u32 irqs = RADEON_READ(RADEON_GEN_INT_STATUS) & mask;
drm_radeon_private_t *dev_priv = dev->dev_private;
if (state)
dev_priv->irq_enable_reg |= mask;
else
dev_priv->irq_enable_reg &= ~mask;
RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
}
static void r500_vbl_irq_set_state(struct drm_device *dev, u32 mask, int state)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
if (state)
dev_priv->r500_disp_irq_reg |= mask;
else
dev_priv->r500_disp_irq_reg &= ~mask;
RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg);
}
int radeon_enable_vblank(struct drm_device *dev, int crtc)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) {
switch (crtc) {
case 0:
r500_vbl_irq_set_state(dev, R500_D1MODE_INT_MASK, 1);
break;
case 1:
r500_vbl_irq_set_state(dev, R500_D2MODE_INT_MASK, 1);
break;
default:
DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
crtc);
return EINVAL;
}
} else {
switch (crtc) {
case 0:
radeon_irq_set_state(dev, RADEON_CRTC_VBLANK_MASK, 1);
break;
case 1:
radeon_irq_set_state(dev, RADEON_CRTC2_VBLANK_MASK, 1);
break;
default:
DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
crtc);
return EINVAL;
}
}
return 0;
}
void radeon_disable_vblank(struct drm_device *dev, int crtc)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) {
switch (crtc) {
case 0:
r500_vbl_irq_set_state(dev, R500_D1MODE_INT_MASK, 0);
break;
case 1:
r500_vbl_irq_set_state(dev, R500_D2MODE_INT_MASK, 0);
break;
default:
DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
crtc);
break;
}
} else {
switch (crtc) {
case 0:
radeon_irq_set_state(dev, RADEON_CRTC_VBLANK_MASK, 0);
break;
case 1:
radeon_irq_set_state(dev, RADEON_CRTC2_VBLANK_MASK, 0);
break;
default:
DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
crtc);
break;
}
}
}
static __inline__ u32 radeon_acknowledge_irqs(drm_radeon_private_t * dev_priv, u32 *r500_disp_int)
{
u32 irqs = RADEON_READ(RADEON_GEN_INT_STATUS);
u32 irq_mask = RADEON_SW_INT_TEST;
*r500_disp_int = 0;
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) {
/* vbl interrupts in a different place */
if (irqs & R500_DISPLAY_INT_STATUS) {
/* if a display interrupt */
u32 disp_irq;
disp_irq = RADEON_READ(R500_DISP_INTERRUPT_STATUS);
*r500_disp_int = disp_irq;
if (disp_irq & R500_D1_VBLANK_INTERRUPT) {
RADEON_WRITE(R500_D1MODE_VBLANK_STATUS, R500_VBLANK_ACK);
}
if (disp_irq & R500_D2_VBLANK_INTERRUPT) {
RADEON_WRITE(R500_D2MODE_VBLANK_STATUS, R500_VBLANK_ACK);
}
}
irq_mask |= R500_DISPLAY_INT_STATUS;
} else
irq_mask |= RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT;
irqs &= irq_mask;
if (irqs)
RADEON_WRITE(RADEON_GEN_INT_STATUS, irqs);
return irqs;
}
@ -67,35 +185,41 @@ static __inline__ u32 radeon_acknowledge_irqs(drm_radeon_private_t * dev_priv,
irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS)
{
drm_device_t *dev = (drm_device_t *) arg;
struct drm_device *dev = (struct drm_device *) arg;
drm_radeon_private_t *dev_priv =
(drm_radeon_private_t *) dev->dev_private;
u32 stat;
u32 r500_disp_int;
/* Only consider the bits we're interested in - others could be used
* outside the DRM
*/
stat = radeon_acknowledge_irqs(dev_priv, (RADEON_SW_INT_TEST_ACK |
RADEON_CRTC_VBLANK_STAT));
stat = radeon_acknowledge_irqs(dev_priv, &r500_disp_int);
if (!stat)
return IRQ_NONE;
stat &= dev_priv->irq_enable_reg;
/* SW interrupt */
if (stat & RADEON_SW_INT_TEST) {
if (stat & RADEON_SW_INT_TEST)
DRM_WAKEUP(&dev_priv->swi_queue);
}
/* VBLANK interrupt */
if (stat & RADEON_CRTC_VBLANK_STAT) {
atomic_inc(&dev->vbl_received);
DRM_WAKEUP(&dev->vbl_queue);
drm_vbl_send_signals(dev);
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) {
if (r500_disp_int & R500_D1_VBLANK_INTERRUPT)
drm_handle_vblank(dev, 0);
if (r500_disp_int & R500_D2_VBLANK_INTERRUPT)
drm_handle_vblank(dev, 1);
} else {
if (stat & RADEON_CRTC_VBLANK_STAT)
drm_handle_vblank(dev, 0);
if (stat & RADEON_CRTC2_VBLANK_STAT)
drm_handle_vblank(dev, 1);
}
return IRQ_HANDLED;
}
static int radeon_emit_irq(drm_device_t * dev)
static int radeon_emit_irq(struct drm_device * dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
unsigned int ret;
@ -113,7 +237,7 @@ static int radeon_emit_irq(drm_device_t * dev)
return ret;
}
static int radeon_wait_irq(drm_device_t * dev, int swi_nr)
static int radeon_wait_irq(struct drm_device * dev, int swi_nr)
{
drm_radeon_private_t *dev_priv =
(drm_radeon_private_t *) dev->dev_private;
@ -130,59 +254,53 @@ static int radeon_wait_irq(drm_device_t * dev, int swi_nr)
return ret;
}
int radeon_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence)
u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc)
{
drm_radeon_private_t *dev_priv =
(drm_radeon_private_t *) dev->dev_private;
unsigned int cur_vblank;
int ret = 0;
drm_radeon_private_t *dev_priv = dev->dev_private;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
return DRM_ERR(EINVAL);
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
radeon_acknowledge_irqs(dev_priv, RADEON_CRTC_VBLANK_STAT);
if (crtc < 0 || crtc > 1) {
DRM_ERROR("Invalid crtc %d\n", crtc);
return -EINVAL;
}
dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
/* Assume that the user has missed the current sequence number
* by about a day rather than she wants to wait for years
* using vertical blanks...
*/
DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
(((cur_vblank = atomic_read(&dev->vbl_received))
- *sequence) <= (1 << 23)));
*sequence = cur_vblank;
return ret;
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) {
if (crtc == 0)
return RADEON_READ(R500_D1CRTC_FRAME_COUNT);
else
return RADEON_READ(R500_D2CRTC_FRAME_COUNT);
} else {
if (crtc == 0)
return RADEON_READ(RADEON_CRTC_CRNT_FRAME);
else
return RADEON_READ(RADEON_CRTC2_CRNT_FRAME);
}
}
/* Needs the lock as it touches the ring.
*/
int radeon_irq_emit(DRM_IOCTL_ARGS)
int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_radeon_irq_emit_t emit;
drm_radeon_irq_emit_t *emit = data;
int result;
LOCK_TEST_WITH_RETURN(dev, filp);
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
return DRM_ERR(EINVAL);
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
DRM_COPY_FROM_USER_IOCTL(emit, (drm_radeon_irq_emit_t __user *) data,
sizeof(emit));
result = radeon_emit_irq(dev);
if (DRM_COPY_TO_USER(emit.irq_seq, &result, sizeof(int))) {
if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
DRM_ERROR("copy_to_user\n");
return DRM_ERR(EFAULT);
return -EFAULT;
}
return 0;
@ -190,58 +308,86 @@ int radeon_irq_emit(DRM_IOCTL_ARGS)
/* Doesn't need the hardware lock.
*/
int radeon_irq_wait(DRM_IOCTL_ARGS)
int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_radeon_irq_wait_t irqwait;
drm_radeon_irq_wait_t *irqwait = data;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
return DRM_ERR(EINVAL);
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
DRM_COPY_FROM_USER_IOCTL(irqwait, (drm_radeon_irq_wait_t __user *) data,
sizeof(irqwait));
return radeon_wait_irq(dev, irqwait.irq_seq);
return radeon_wait_irq(dev, irqwait->irq_seq);
}
/* drm_dma.h hooks
*/
void radeon_driver_irq_preinstall(drm_device_t * dev)
void radeon_driver_irq_preinstall(struct drm_device * dev)
{
drm_radeon_private_t *dev_priv =
(drm_radeon_private_t *) dev->dev_private;
u32 dummy;
/* Disable *all* interrupts */
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690)
RADEON_WRITE(R500_DxMODE_INT_MASK, 0);
RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
/* Clear bits if they're already high */
radeon_acknowledge_irqs(dev_priv, (RADEON_SW_INT_TEST_ACK |
RADEON_CRTC_VBLANK_STAT));
radeon_acknowledge_irqs(dev_priv, &dummy);
}
void radeon_driver_irq_postinstall(drm_device_t * dev)
int radeon_driver_irq_postinstall(struct drm_device * dev)
{
drm_radeon_private_t *dev_priv =
(drm_radeon_private_t *) dev->dev_private;
int ret;
atomic_set(&dev_priv->swi_emitted, 0);
DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
/* Turn on SW and VBL ints */
RADEON_WRITE(RADEON_GEN_INT_CNTL,
RADEON_CRTC_VBLANK_MASK | RADEON_SW_INT_ENABLE);
ret = drm_vblank_init(dev, 2);
if (ret)
return ret;
dev->max_vblank_count = 0x001fffff;
radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1);
return 0;
}
void radeon_driver_irq_uninstall(drm_device_t * dev)
void radeon_driver_irq_uninstall(struct drm_device * dev)
{
drm_radeon_private_t *dev_priv =
(drm_radeon_private_t *) dev->dev_private;
if (!dev_priv)
return;
dev_priv->irq_enabled = 0;
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690)
RADEON_WRITE(R500_DxMODE_INT_MASK, 0);
/* Disable *all* interrupts */
RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
}
int radeon_vblank_crtc_get(struct drm_device *dev)
{
drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
return dev_priv->vblank_crtc;
}
int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value)
{
drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
if (value & ~(DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) {
DRM_ERROR("called with invalid crtc 0x%x\n", (unsigned int)value);
return -EINVAL;
}
dev_priv->vblank_crtc = (unsigned int)value;
return 0;
}

View File

@ -42,7 +42,7 @@ __FBSDID("$FreeBSD$");
*/
static struct mem_block *split_block(struct mem_block *p, int start, int size,
DRMFILE filp)
struct drm_file *file_priv)
{
/* Maybe cut off the start of an existing block */
if (start > p->start) {
@ -52,7 +52,7 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size,
goto out;
newblock->start = start;
newblock->size = p->size - (start - p->start);
newblock->filp = NULL;
newblock->file_priv = NULL;
newblock->next = p->next;
newblock->prev = p;
p->next->prev = newblock;
@ -69,7 +69,7 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size,
goto out;
newblock->start = start + size;
newblock->size = p->size - size;
newblock->filp = NULL;
newblock->file_priv = NULL;
newblock->next = p->next;
newblock->prev = p;
p->next->prev = newblock;
@ -79,20 +79,20 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size,
out:
/* Our block is in the middle */
p->filp = filp;
p->file_priv = file_priv;
return p;
}
static struct mem_block *alloc_block(struct mem_block *heap, int size,
int align2, DRMFILE filp)
int align2, struct drm_file *file_priv)
{
struct mem_block *p;
int mask = (1 << align2) - 1;
list_for_each(p, heap) {
int start = (p->start + mask) & ~mask;
if (p->filp == 0 && start + size <= p->start + p->size)
return split_block(p, start, size, filp);
if (p->file_priv == NULL && start + size <= p->start + p->size)
return split_block(p, start, size, file_priv);
}
return NULL;
@ -111,12 +111,12 @@ static struct mem_block *find_block(struct mem_block *heap, int start)
static void free_block(struct mem_block *p)
{
p->filp = NULL;
p->file_priv = NULL;
/* Assumes a single contiguous range. Needs a special filp in
/* Assumes a single contiguous range. Needs a special file_priv in
* 'heap' to stop it being subsumed.
*/
if (p->next->filp == 0) {
if (p->next->file_priv == NULL) {
struct mem_block *q = p->next;
p->size += q->size;
p->next = q->next;
@ -124,7 +124,7 @@ static void free_block(struct mem_block *p)
drm_free(q, sizeof(*q), DRM_MEM_BUFS);
}
if (p->prev->filp == 0) {
if (p->prev->file_priv == NULL) {
struct mem_block *q = p->prev;
q->size += p->size;
q->next = p->next;
@ -140,28 +140,28 @@ static int init_heap(struct mem_block **heap, int start, int size)
struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFS);
if (!blocks)
return DRM_ERR(ENOMEM);
return -ENOMEM;
*heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFS);
if (!*heap) {
drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFS);
return DRM_ERR(ENOMEM);
return -ENOMEM;
}
blocks->start = start;
blocks->size = size;
blocks->filp = NULL;
blocks->file_priv = NULL;
blocks->next = blocks->prev = *heap;
memset(*heap, 0, sizeof(**heap));
(*heap)->filp = (DRMFILE) - 1;
(*heap)->file_priv = (struct drm_file *) - 1;
(*heap)->next = (*heap)->prev = blocks;
return 0;
}
/* Free all blocks associated with the releasing file.
*/
void radeon_mem_release(DRMFILE filp, struct mem_block *heap)
void radeon_mem_release(struct drm_file *file_priv, struct mem_block *heap)
{
struct mem_block *p;
@ -169,15 +169,15 @@ void radeon_mem_release(DRMFILE filp, struct mem_block *heap)
return;
list_for_each(p, heap) {
if (p->filp == filp)
p->filp = NULL;
if (p->file_priv == file_priv)
p->file_priv = NULL;
}
/* Assumes a single contiguous range. Needs a special filp in
/* Assumes a single contiguous range. Needs a special file_priv in
* 'heap' to stop it being subsumed.
*/
list_for_each(p, heap) {
while (p->filp == 0 && p->next->filp == 0) {
while (p->file_priv == NULL && p->next->file_priv == NULL) {
struct mem_block *q = p->next;
p->size += q->size;
p->next = q->next;
@ -220,98 +220,86 @@ static struct mem_block **get_heap(drm_radeon_private_t * dev_priv, int region)
}
}
int radeon_mem_alloc(DRM_IOCTL_ARGS)
int radeon_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_radeon_mem_alloc_t alloc;
drm_radeon_mem_alloc_t *alloc = data;
struct mem_block *block, **heap;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
return DRM_ERR(EINVAL);
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
DRM_COPY_FROM_USER_IOCTL(alloc, (drm_radeon_mem_alloc_t __user *) data,
sizeof(alloc));
heap = get_heap(dev_priv, alloc.region);
heap = get_heap(dev_priv, alloc->region);
if (!heap || !*heap)
return DRM_ERR(EFAULT);
return -EFAULT;
/* Make things easier on ourselves: all allocations at least
* 4k aligned.
*/
if (alloc.alignment < 12)
alloc.alignment = 12;
if (alloc->alignment < 12)
alloc->alignment = 12;
block = alloc_block(*heap, alloc.size, alloc.alignment, filp);
block = alloc_block(*heap, alloc->size, alloc->alignment, file_priv);
if (!block)
return DRM_ERR(ENOMEM);
return -ENOMEM;
if (DRM_COPY_TO_USER(alloc.region_offset, &block->start, sizeof(int))) {
if (DRM_COPY_TO_USER(alloc->region_offset, &block->start,
sizeof(int))) {
DRM_ERROR("copy_to_user\n");
return DRM_ERR(EFAULT);
return -EFAULT;
}
return 0;
}
int radeon_mem_free(DRM_IOCTL_ARGS)
int radeon_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_radeon_mem_free_t memfree;
drm_radeon_mem_free_t *memfree = data;
struct mem_block *block, **heap;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
return DRM_ERR(EINVAL);
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
DRM_COPY_FROM_USER_IOCTL(memfree, (drm_radeon_mem_free_t __user *) data,
sizeof(memfree));
heap = get_heap(dev_priv, memfree.region);
heap = get_heap(dev_priv, memfree->region);
if (!heap || !*heap)
return DRM_ERR(EFAULT);
return -EFAULT;
block = find_block(*heap, memfree.region_offset);
block = find_block(*heap, memfree->region_offset);
if (!block)
return DRM_ERR(EFAULT);
return -EFAULT;
if (block->filp != filp)
return DRM_ERR(EPERM);
if (block->file_priv != file_priv)
return -EPERM;
free_block(block);
return 0;
}
int radeon_mem_init_heap(DRM_IOCTL_ARGS)
int radeon_mem_init_heap(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_radeon_mem_init_heap_t initheap;
drm_radeon_mem_init_heap_t *initheap = data;
struct mem_block **heap;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
return DRM_ERR(EINVAL);
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
DRM_COPY_FROM_USER_IOCTL(initheap,
(drm_radeon_mem_init_heap_t __user *) data,
sizeof(initheap));
heap = get_heap(dev_priv, initheap.region);
heap = get_heap(dev_priv, initheap->region);
if (!heap)
return DRM_ERR(EFAULT);
return -EFAULT;
if (*heap) {
DRM_ERROR("heap already initialized?");
return DRM_ERR(EFAULT);
return -EFAULT;
}
return init_heap(heap, initheap.start, initheap.size);
return init_heap(heap, initheap->start, initheap->size);
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -35,6 +35,8 @@ __FBSDID("$FreeBSD$");
#define SAVAGE_EVENT_USEC_TIMEOUT 5000000 /* 5s */
#define SAVAGE_FREELIST_DEBUG 0
static int savage_do_cleanup_bci(struct drm_device *dev);
static int
savage_bci_wait_fifo_shadow(drm_savage_private_t *dev_priv, unsigned int n)
{
@ -61,7 +63,7 @@ savage_bci_wait_fifo_shadow(drm_savage_private_t *dev_priv, unsigned int n)
DRM_ERROR("failed!\n");
DRM_INFO(" status=0x%08x, threshold=0x%08x\n", status, threshold);
#endif
return DRM_ERR(EBUSY);
return -EBUSY;
}
static int
@ -82,7 +84,7 @@ savage_bci_wait_fifo_s3d(drm_savage_private_t *dev_priv, unsigned int n)
DRM_ERROR("failed!\n");
DRM_INFO(" status=0x%08x\n", status);
#endif
return DRM_ERR(EBUSY);
return -EBUSY;
}
static int
@ -103,7 +105,7 @@ savage_bci_wait_fifo_s4(drm_savage_private_t *dev_priv, unsigned int n)
DRM_ERROR("failed!\n");
DRM_INFO(" status=0x%08x\n", status);
#endif
return DRM_ERR(EBUSY);
return -EBUSY;
}
/*
@ -137,7 +139,7 @@ savage_bci_wait_event_shadow(drm_savage_private_t *dev_priv, uint16_t e)
DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e);
#endif
return DRM_ERR(EBUSY);
return -EBUSY;
}
static int
@ -159,7 +161,7 @@ savage_bci_wait_event_reg(drm_savage_private_t *dev_priv, uint16_t e)
DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e);
#endif
return DRM_ERR(EBUSY);
return -EBUSY;
}
uint16_t savage_bci_emit_event(drm_savage_private_t *dev_priv,
@ -204,11 +206,11 @@ uint16_t savage_bci_emit_event(drm_savage_private_t *dev_priv,
/*
* Freelist management
*/
static int savage_freelist_init(drm_device_t *dev)
static int savage_freelist_init(struct drm_device *dev)
{
drm_savage_private_t *dev_priv = dev->dev_private;
drm_device_dma_t *dma = dev->dma;
drm_buf_t *buf;
struct drm_device_dma *dma = dev->dma;
struct drm_buf *buf;
drm_savage_buf_priv_t *entry;
int i;
DRM_DEBUG("count=%d\n", dma->buf_count);
@ -237,7 +239,7 @@ static int savage_freelist_init(drm_device_t *dev)
return 0;
}
static drm_buf_t *savage_freelist_get(drm_device_t *dev)
static struct drm_buf *savage_freelist_get(struct drm_device *dev)
{
drm_savage_private_t *dev_priv = dev->dev_private;
drm_savage_buf_priv_t *tail = dev_priv->tail.prev;
@ -270,7 +272,7 @@ static drm_buf_t *savage_freelist_get(drm_device_t *dev)
return NULL;
}
void savage_freelist_put(drm_device_t *dev, drm_buf_t *buf)
void savage_freelist_put(struct drm_device *dev, struct drm_buf *buf)
{
drm_savage_private_t *dev_priv = dev->dev_private;
drm_savage_buf_priv_t *entry = buf->dev_private, *prev, *next;
@ -302,7 +304,7 @@ static int savage_dma_init(drm_savage_private_t *dev_priv)
dev_priv->dma_pages = drm_alloc(sizeof(drm_savage_dma_page_t) *
dev_priv->nr_dma_pages, DRM_MEM_DRIVER);
if (dev_priv->dma_pages == NULL)
return DRM_ERR(ENOMEM);
return -ENOMEM;
for (i = 0; i < dev_priv->nr_dma_pages; ++i) {
SET_AGE(&dev_priv->dma_pages[i].age, 0, 0);
@ -365,7 +367,7 @@ uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv, unsigned int n)
unsigned int cur = dev_priv->current_dma_page;
unsigned int rest = SAVAGE_DMA_PAGE_SIZE -
dev_priv->dma_pages[cur].used;
unsigned int nr_pages = (n - rest + SAVAGE_DMA_PAGE_SIZE-1) /
unsigned int nr_pages = (n - rest + SAVAGE_DMA_PAGE_SIZE - 1) /
SAVAGE_DMA_PAGE_SIZE;
uint32_t *dma_ptr;
unsigned int i;
@ -375,7 +377,7 @@ uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv, unsigned int n)
if (cur + nr_pages < dev_priv->nr_dma_pages) {
dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle +
cur*SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used;
cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used;
if (n < rest)
rest = n;
dev_priv->dma_pages[cur].used += rest;
@ -384,7 +386,7 @@ uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv, unsigned int n)
} else {
dev_priv->dma_flush(dev_priv);
nr_pages =
(n + SAVAGE_DMA_PAGE_SIZE-1) / SAVAGE_DMA_PAGE_SIZE;
(n + SAVAGE_DMA_PAGE_SIZE - 1) / SAVAGE_DMA_PAGE_SIZE;
for (i = cur; i < dev_priv->nr_dma_pages; ++i) {
dev_priv->dma_pages[i].age = dev_priv->last_dma_age;
dev_priv->dma_pages[i].used = 0;
@ -444,7 +446,7 @@ static void savage_dma_flush(drm_savage_private_t *dev_priv)
uint32_t *dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle +
cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used;
dev_priv->dma_pages[cur].used += pad;
while(pad != 0) {
while (pad != 0) {
*dma_ptr++ = BCI_CMD_WAIT;
pad--;
}
@ -536,13 +538,13 @@ static void savage_fake_dma_flush(drm_savage_private_t *dev_priv)
dev_priv->first_dma_page = dev_priv->current_dma_page = 0;
}
int savage_driver_load(drm_device_t *dev, unsigned long chipset)
int savage_driver_load(struct drm_device *dev, unsigned long chipset)
{
drm_savage_private_t *dev_priv;
dev_priv = drm_alloc(sizeof(drm_savage_private_t), DRM_MEM_DRIVER);
if (dev_priv == NULL)
return DRM_ERR(ENOMEM);
return -ENOMEM;
memset(dev_priv, 0, sizeof(drm_savage_private_t));
dev->dev_private = (void *)dev_priv;
@ -558,7 +560,7 @@ int savage_driver_load(drm_device_t *dev, unsigned long chipset)
* in drm_addmap. Therefore we add them manually before the maps are
* initialized, and tear them down on last close.
*/
int savage_driver_firstopen(drm_device_t *dev)
int savage_driver_firstopen(struct drm_device *dev)
{
drm_savage_private_t *dev_priv = dev->dev_private;
unsigned long mmio_base, fb_base, fb_size, aperture_base;
@ -585,18 +587,18 @@ int savage_driver_firstopen(drm_device_t *dev)
* MTRRs. */
dev_priv->mtrr[0].base = fb_base;
dev_priv->mtrr[0].size = 0x01000000;
dev_priv->mtrr[0].handle =
dev_priv->mtrr[0].handle =
drm_mtrr_add(dev_priv->mtrr[0].base,
dev_priv->mtrr[0].size, DRM_MTRR_WC);
dev_priv->mtrr[1].base = fb_base+0x02000000;
dev_priv->mtrr[1].base = fb_base + 0x02000000;
dev_priv->mtrr[1].size = 0x02000000;
dev_priv->mtrr[1].handle =
drm_mtrr_add(dev_priv->mtrr[1].base,
dev_priv->mtrr[1].size, DRM_MTRR_WC);
dev_priv->mtrr[2].base = fb_base+0x04000000;
dev_priv->mtrr[2].base = fb_base + 0x04000000;
dev_priv->mtrr[2].size = 0x04000000;
dev_priv->mtrr[2].handle =
drm_mtrr_add(dev_priv->mtrr[2].base,
drm_mtrr_add(dev_priv->mtrr[2].base,
dev_priv->mtrr[2].size, DRM_MTRR_WC);
} else {
DRM_ERROR("strange pci_resource_len %08lx\n",
@ -616,7 +618,7 @@ int savage_driver_firstopen(drm_device_t *dev)
* aperture. */
dev_priv->mtrr[0].base = fb_base;
dev_priv->mtrr[0].size = 0x08000000;
dev_priv->mtrr[0].handle =
dev_priv->mtrr[0].handle =
drm_mtrr_add(dev_priv->mtrr[0].base,
dev_priv->mtrr[0].size, DRM_MTRR_WC);
} else {
@ -655,7 +657,7 @@ int savage_driver_firstopen(drm_device_t *dev)
/*
* Delete MTRRs and free device-private data.
*/
void savage_driver_lastclose(drm_device_t *dev)
void savage_driver_lastclose(struct drm_device *dev)
{
drm_savage_private_t *dev_priv = dev->dev_private;
int i;
@ -667,7 +669,7 @@ void savage_driver_lastclose(drm_device_t *dev)
dev_priv->mtrr[i].size, DRM_MTRR_WC);
}
int savage_driver_unload(drm_device_t *dev)
int savage_driver_unload(struct drm_device *dev)
{
drm_savage_private_t *dev_priv = dev->dev_private;
@ -676,22 +678,22 @@ int savage_driver_unload(drm_device_t *dev)
return 0;
}
static int savage_do_init_bci(drm_device_t *dev, drm_savage_init_t *init)
static int savage_do_init_bci(struct drm_device *dev, drm_savage_init_t *init)
{
drm_savage_private_t *dev_priv = dev->dev_private;
if (init->fb_bpp != 16 && init->fb_bpp != 32) {
DRM_ERROR("invalid frame buffer bpp %d!\n", init->fb_bpp);
return DRM_ERR(EINVAL);
return -EINVAL;
}
if (init->depth_bpp != 16 && init->depth_bpp != 32) {
DRM_ERROR("invalid depth buffer bpp %d!\n", init->fb_bpp);
return DRM_ERR(EINVAL);
return -EINVAL;
}
if (init->dma_type != SAVAGE_DMA_AGP &&
init->dma_type != SAVAGE_DMA_PCI) {
DRM_ERROR("invalid dma memory type %d!\n", init->dma_type);
return DRM_ERR(EINVAL);
return -EINVAL;
}
dev_priv->cob_size = init->cob_size;
@ -711,35 +713,36 @@ static int savage_do_init_bci(drm_device_t *dev, drm_savage_init_t *init)
dev_priv->texture_offset = init->texture_offset;
dev_priv->texture_size = init->texture_size;
DRM_GETSAREA();
dev_priv->sarea = drm_getsarea(dev);
if (!dev_priv->sarea) {
DRM_ERROR("could not find sarea!\n");
savage_do_cleanup_bci(dev);
return DRM_ERR(EINVAL);
return -EINVAL;
}
if (init->status_offset != 0) {
dev_priv->status = drm_core_findmap(dev, init->status_offset);
if (!dev_priv->status) {
DRM_ERROR("could not find shadow status region!\n");
savage_do_cleanup_bci(dev);
return DRM_ERR(EINVAL);
return -EINVAL;
}
} else {
dev_priv->status = NULL;
}
if (dev_priv->dma_type == SAVAGE_DMA_AGP && init->buffers_offset) {
dev->agp_buffer_token = init->buffers_offset;
dev->agp_buffer_map = drm_core_findmap(dev,
init->buffers_offset);
if (!dev->agp_buffer_map) {
DRM_ERROR("could not find DMA buffer region!\n");
savage_do_cleanup_bci(dev);
return DRM_ERR(EINVAL);
return -EINVAL;
}
drm_core_ioremap(dev->agp_buffer_map, dev);
if (!dev->agp_buffer_map) {
DRM_ERROR("failed to ioremap DMA buffer region!\n");
savage_do_cleanup_bci(dev);
return DRM_ERR(ENOMEM);
return -ENOMEM;
}
}
if (init->agp_textures_offset) {
@ -748,7 +751,7 @@ static int savage_do_init_bci(drm_device_t *dev, drm_savage_init_t *init)
if (!dev_priv->agp_textures) {
DRM_ERROR("could not find agp texture region!\n");
savage_do_cleanup_bci(dev);
return DRM_ERR(EINVAL);
return -EINVAL;
}
} else {
dev_priv->agp_textures = NULL;
@ -759,39 +762,39 @@ static int savage_do_init_bci(drm_device_t *dev, drm_savage_init_t *init)
DRM_ERROR("command DMA not supported on "
"Savage3D/MX/IX.\n");
savage_do_cleanup_bci(dev);
return DRM_ERR(EINVAL);
return -EINVAL;
}
if (dev->dma && dev->dma->buflist) {
DRM_ERROR("command and vertex DMA not supported "
"at the same time.\n");
savage_do_cleanup_bci(dev);
return DRM_ERR(EINVAL);
return -EINVAL;
}
dev_priv->cmd_dma = drm_core_findmap(dev, init->cmd_dma_offset);
if (!dev_priv->cmd_dma) {
DRM_ERROR("could not find command DMA region!\n");
savage_do_cleanup_bci(dev);
return DRM_ERR(EINVAL);
return -EINVAL;
}
if (dev_priv->dma_type == SAVAGE_DMA_AGP) {
if (dev_priv->cmd_dma->type != _DRM_AGP) {
DRM_ERROR("AGP command DMA region is not a "
"_DRM_AGP map!\n");
savage_do_cleanup_bci(dev);
return DRM_ERR(EINVAL);
return -EINVAL;
}
drm_core_ioremap(dev_priv->cmd_dma, dev);
if (!dev_priv->cmd_dma->handle) {
DRM_ERROR("failed to ioremap command "
"DMA region!\n");
savage_do_cleanup_bci(dev);
return DRM_ERR(ENOMEM);
return -ENOMEM;
}
} else if (dev_priv->cmd_dma->type != _DRM_CONSISTENT) {
DRM_ERROR("PCI command DMA region is not a "
"_DRM_CONSISTENT map!\n");
savage_do_cleanup_bci(dev);
return DRM_ERR(EINVAL);
return -EINVAL;
}
} else {
dev_priv->cmd_dma = NULL;
@ -808,7 +811,7 @@ static int savage_do_init_bci(drm_device_t *dev, drm_savage_init_t *init)
if (!dev_priv->fake_dma.handle) {
DRM_ERROR("could not allocate faked DMA buffer!\n");
savage_do_cleanup_bci(dev);
return DRM_ERR(ENOMEM);
return -ENOMEM;
}
dev_priv->cmd_dma = &dev_priv->fake_dma;
dev_priv->dma_flush = savage_fake_dma_flush;
@ -833,8 +836,8 @@ static int savage_do_init_bci(drm_device_t *dev, drm_savage_init_t *init)
depth_tile_format = SAVAGE_BD_TILE_DEST;
}
front_stride = dev_priv->front_pitch / (dev_priv->fb_bpp / 8);
back_stride = dev_priv->back_pitch / (dev_priv->fb_bpp / 8);
depth_stride =
back_stride = dev_priv->back_pitch / (dev_priv->fb_bpp / 8);
depth_stride =
dev_priv->depth_pitch / (dev_priv->depth_bpp / 8);
dev_priv->front_bd = front_stride | SAVAGE_BD_BW_DISABLE |
@ -885,19 +888,19 @@ static int savage_do_init_bci(drm_device_t *dev, drm_savage_init_t *init)
if (savage_freelist_init(dev) < 0) {
DRM_ERROR("could not initialize freelist\n");
savage_do_cleanup_bci(dev);
return DRM_ERR(ENOMEM);
return -ENOMEM;
}
if (savage_dma_init(dev_priv) < 0) {
if (savage_dma_init(dev_priv) < 0) {
DRM_ERROR("could not initialize command DMA\n");
savage_do_cleanup_bci(dev);
return DRM_ERR(ENOMEM);
return -ENOMEM;
}
return 0;
}
int savage_do_cleanup_bci(drm_device_t *dev)
static int savage_do_cleanup_bci(struct drm_device *dev)
{
drm_savage_private_t *dev_priv = dev->dev_private;
@ -927,59 +930,46 @@ int savage_do_cleanup_bci(drm_device_t *dev)
return 0;
}
static int savage_bci_init(DRM_IOCTL_ARGS)
static int savage_bci_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_savage_init_t init;
drm_savage_init_t *init = data;
LOCK_TEST_WITH_RETURN(dev, filp);
LOCK_TEST_WITH_RETURN(dev, file_priv);
DRM_COPY_FROM_USER_IOCTL(init, (drm_savage_init_t __user *)data,
sizeof(init));
switch (init.func) {
switch (init->func) {
case SAVAGE_INIT_BCI:
return savage_do_init_bci(dev, &init);
return savage_do_init_bci(dev, init);
case SAVAGE_CLEANUP_BCI:
return savage_do_cleanup_bci(dev);
}
return DRM_ERR(EINVAL);
return -EINVAL;
}
static int savage_bci_event_emit(DRM_IOCTL_ARGS)
static int savage_bci_event_emit(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_savage_private_t *dev_priv = dev->dev_private;
drm_savage_event_emit_t event;
drm_savage_event_emit_t *event = data;
DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, filp);
LOCK_TEST_WITH_RETURN(dev, file_priv);
DRM_COPY_FROM_USER_IOCTL(event, (drm_savage_event_emit_t __user *)data,
sizeof(event));
event->count = savage_bci_emit_event(dev_priv, event->flags);
event->count |= dev_priv->event_wrap << 16;
event.count = savage_bci_emit_event(dev_priv, event.flags);
event.count |= dev_priv->event_wrap << 16;
DRM_COPY_TO_USER_IOCTL((drm_savage_event_emit_t __user *)data,
event, sizeof(event));
return 0;
}
static int savage_bci_event_wait(DRM_IOCTL_ARGS)
static int savage_bci_event_wait(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_savage_private_t *dev_priv = dev->dev_private;
drm_savage_event_wait_t event;
drm_savage_event_wait_t *event = data;
unsigned int event_e, hw_e;
unsigned int event_w, hw_w;
DRM_DEBUG("\n");
DRM_COPY_FROM_USER_IOCTL(event, (drm_savage_event_wait_t __user *)data,
sizeof(event));
UPDATE_EVENT_COUNTER();
if (dev_priv->status_ptr)
hw_e = dev_priv->status_ptr[1] & 0xffff;
@ -989,14 +979,14 @@ static int savage_bci_event_wait(DRM_IOCTL_ARGS)
if (hw_e > dev_priv->event_counter)
hw_w--; /* hardware hasn't passed the last wrap yet */
event_e = event.count & 0xffff;
event_w = event.count >> 16;
event_e = event->count & 0xffff;
event_w = event->count >> 16;
/* Don't need to wait if
* - event counter wrapped since the event was emitted or
* - the hardware has advanced up to or over the event to wait for.
*/
if (event_w < hw_w || (event_w == hw_w && event_e <= hw_e) )
if (event_w < hw_w || (event_w == hw_w && event_e <= hw_e))
return 0;
else
return dev_priv->wait_evnt(dev_priv, event_e);
@ -1006,71 +996,68 @@ static int savage_bci_event_wait(DRM_IOCTL_ARGS)
* DMA buffer management
*/
static int savage_bci_get_buffers(DRMFILE filp, drm_device_t *dev, drm_dma_t *d)
static int savage_bci_get_buffers(struct drm_device *dev,
struct drm_file *file_priv,
struct drm_dma *d)
{
drm_buf_t *buf;
struct drm_buf *buf;
int i;
for (i = d->granted_count; i < d->request_count; i++) {
buf = savage_freelist_get(dev);
if (!buf)
return DRM_ERR(EAGAIN);
return -EAGAIN;
buf->filp = filp;
buf->file_priv = file_priv;
if (DRM_COPY_TO_USER(&d->request_indices[i],
&buf->idx, sizeof(buf->idx)))
return DRM_ERR(EFAULT);
return -EFAULT;
if (DRM_COPY_TO_USER(&d->request_sizes[i],
&buf->total, sizeof(buf->total)))
return DRM_ERR(EFAULT);
return -EFAULT;
d->granted_count++;
}
return 0;
}
int savage_bci_buffers(DRM_IOCTL_ARGS)
int savage_bci_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_device_dma_t *dma = dev->dma;
drm_dma_t d;
struct drm_device_dma *dma = dev->dma;
struct drm_dma *d = data;
int ret = 0;
LOCK_TEST_WITH_RETURN(dev, filp);
DRM_COPY_FROM_USER_IOCTL(d, (drm_dma_t __user *)data, sizeof(d));
LOCK_TEST_WITH_RETURN(dev, file_priv);
/* Please don't send us buffers.
*/
if (d.send_count != 0) {
if (d->send_count != 0) {
DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
DRM_CURRENTPID, d.send_count);
return DRM_ERR(EINVAL);
DRM_CURRENTPID, d->send_count);
return -EINVAL;
}
/* We'll send you buffers.
*/
if (d.request_count < 0 || d.request_count > dma->buf_count) {
if (d->request_count < 0 || d->request_count > dma->buf_count) {
DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
DRM_CURRENTPID, d.request_count, dma->buf_count);
return DRM_ERR(EINVAL);
DRM_CURRENTPID, d->request_count, dma->buf_count);
return -EINVAL;
}
d.granted_count = 0;
d->granted_count = 0;
if (d.request_count) {
ret = savage_bci_get_buffers(filp, dev, &d);
if (d->request_count) {
ret = savage_bci_get_buffers(dev, file_priv, d);
}
DRM_COPY_TO_USER_IOCTL((drm_dma_t __user *)data, d, sizeof(d));
return ret;
}
void savage_reclaim_buffers(drm_device_t *dev, DRMFILE filp)
void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
{
drm_device_dma_t *dma = dev->dma;
struct drm_device_dma *dma = dev->dma;
drm_savage_private_t *dev_priv = dev->dev_private;
int i;
@ -1081,13 +1068,11 @@ void savage_reclaim_buffers(drm_device_t *dev, DRMFILE filp)
if (!dma->buflist)
return;
/*i830_flush_queue(dev);*/
for (i = 0; i < dma->buf_count; i++) {
drm_buf_t *buf = dma->buflist[i];
struct drm_buf *buf = dma->buflist[i];
drm_savage_buf_priv_t *buf_priv = buf->dev_private;
if (buf->filp == filp && buf_priv &&
if (buf->file_priv == file_priv && buf_priv &&
buf_priv->next == NULL && buf_priv->prev == NULL) {
uint16_t event;
DRM_DEBUG("reclaimed from client\n");
@ -1097,15 +1082,14 @@ void savage_reclaim_buffers(drm_device_t *dev, DRMFILE filp)
}
}
drm_core_reclaim_buffers(dev, filp);
drm_core_reclaim_buffers(dev, file_priv);
}
drm_ioctl_desc_t savage_ioctls[] = {
[DRM_IOCTL_NR(DRM_SAVAGE_BCI_INIT)] = {savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
[DRM_IOCTL_NR(DRM_SAVAGE_BCI_CMDBUF)] = {savage_bci_cmdbuf, DRM_AUTH},
[DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_EMIT)] = {savage_bci_event_emit, DRM_AUTH},
[DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_WAIT)] = {savage_bci_event_wait, DRM_AUTH},
struct drm_ioctl_desc savage_ioctls[] = {
DRM_IOCTL_DEF(DRM_SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH),
DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH),
DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH),
};
int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls);

View File

@ -50,7 +50,7 @@ __FBSDID("$FreeBSD$");
typedef struct _drm_savage_sarea {
/* LRU lists for texture memory in agp space and on the card.
*/
drm_tex_region_t texList[SAVAGE_NR_TEX_HEAPS][SAVAGE_NR_TEX_REGIONS+1];
struct drm_tex_region texList[SAVAGE_NR_TEX_HEAPS][SAVAGE_NR_TEX_REGIONS+1];
unsigned int texAge[SAVAGE_NR_TEX_HEAPS];
/* Mechanism to validate card state.
@ -115,7 +115,7 @@ typedef struct drm_savage_cmdbuf {
unsigned int vb_size; /* size of client vertex buffer in bytes */
unsigned int vb_stride; /* stride of vertices in 32bit words */
/* boxes in client's address space */
drm_clip_rect_t __user *box_addr;
struct drm_clip_rect __user *box_addr;
unsigned int nbox; /* number of clipping boxes */
} drm_savage_cmdbuf_t;

View File

@ -40,7 +40,7 @@ static drm_pci_id_list_t savage_pciidlist[] = {
savage_PCI_IDS
};
static void savage_configure(drm_device_t *dev)
static void savage_configure(struct drm_device *dev)
{
dev->driver.buf_priv_size = sizeof(drm_savage_buf_priv_t);
dev->driver.load = savage_driver_load;
@ -76,9 +76,9 @@ savage_probe(device_t dev)
static int
savage_attach(device_t nbdev)
{
drm_device_t *dev = device_get_softc(nbdev);
struct drm_device *dev = device_get_softc(nbdev);
bzero(dev, sizeof(drm_device_t));
bzero(dev, sizeof(struct drm_device));
savage_configure(dev);
return drm_attach(nbdev, savage_pciidlist);
}
@ -95,7 +95,7 @@ static device_method_t savage_methods[] = {
static driver_t savage_driver = {
"drm",
savage_methods,
sizeof(drm_device_t)
sizeof(struct drm_device)
};
extern devclass_t drm_devclass;

View File

@ -61,7 +61,7 @@ typedef struct drm_savage_buf_priv {
struct drm_savage_buf_priv *next;
struct drm_savage_buf_priv *prev;
drm_savage_age_t age;
drm_buf_t *buf;
struct drm_buf *buf;
} drm_savage_buf_priv_t;
typedef struct drm_savage_dma_page {
@ -107,7 +107,7 @@ enum savage_family {
S3_LAST
};
extern drm_ioctl_desc_t savage_ioctls[];
extern struct drm_ioctl_desc savage_ioctls[];
extern int savage_max_ioctl;
#define S3_SAVAGE3D_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX))
@ -195,34 +195,34 @@ typedef struct drm_savage_private {
/* Err, there is a macro wait_event in include/linux/wait.h.
* Avoid unwanted macro expansion. */
void (*emit_clip_rect)(struct drm_savage_private *dev_priv,
const drm_clip_rect_t *pbox);
const struct drm_clip_rect *pbox);
void (*dma_flush)(struct drm_savage_private *dev_priv);
} drm_savage_private_t;
/* ioctls */
extern int savage_bci_cmdbuf(DRM_IOCTL_ARGS);
extern int savage_bci_buffers(DRM_IOCTL_ARGS);
extern int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int savage_bci_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv);
/* BCI functions */
extern uint16_t savage_bci_emit_event(drm_savage_private_t *dev_priv,
unsigned int flags);
extern void savage_freelist_put(drm_device_t *dev, drm_buf_t *buf);
extern void savage_freelist_put(struct drm_device *dev, struct drm_buf *buf);
extern void savage_dma_reset(drm_savage_private_t *dev_priv);
extern void savage_dma_wait(drm_savage_private_t *dev_priv, unsigned int page);
extern uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv,
unsigned int n);
extern int savage_driver_load(drm_device_t *dev, unsigned long chipset);
extern int savage_driver_firstopen(drm_device_t *dev);
extern void savage_driver_lastclose(drm_device_t *dev);
extern int savage_driver_unload(drm_device_t *dev);
extern int savage_do_cleanup_bci(drm_device_t *dev);
extern void savage_reclaim_buffers(drm_device_t *dev, DRMFILE filp);
extern int savage_driver_load(struct drm_device *dev, unsigned long chipset);
extern int savage_driver_firstopen(struct drm_device *dev);
extern void savage_driver_lastclose(struct drm_device *dev);
extern int savage_driver_unload(struct drm_device *dev);
extern void savage_reclaim_buffers(struct drm_device *dev,
struct drm_file *file_priv);
/* state functions */
extern void savage_emit_clip_rect_s3d(drm_savage_private_t *dev_priv,
const drm_clip_rect_t *pbox);
const struct drm_clip_rect *pbox);
extern void savage_emit_clip_rect_s4(drm_savage_private_t *dev_priv,
const drm_clip_rect_t *pbox);
const struct drm_clip_rect *pbox);
#define SAVAGE_FB_SIZE_S3 0x01000000 /* 16MB */
#define SAVAGE_FB_SIZE_S4 0x02000000 /* 32MB */
@ -240,7 +240,7 @@ extern void savage_emit_clip_rect_s4(drm_savage_private_t *dev_priv,
*/
#define SAVAGE_STATUS_WORD0 0x48C00
#define SAVAGE_STATUS_WORD1 0x48C04
#define SAVAGE_ALT_STATUS_WORD0 0x48C60
#define SAVAGE_ALT_STATUS_WORD0 0x48C60
#define SAVAGE_FIFO_USED_MASK_S3D 0x0001ffff
#define SAVAGE_FIFO_USED_MASK_S4 0x001fffff
@ -313,7 +313,7 @@ extern void savage_emit_clip_rect_s4(drm_savage_private_t *dev_priv,
#define SAVAGE_DESTCTRL_S3D 0x34
#define SAVAGE_SCSTART_S3D 0x35
#define SAVAGE_SCEND_S3D 0x36
#define SAVAGE_ZWATERMARK_S3D 0x37
#define SAVAGE_ZWATERMARK_S3D 0x37
#define SAVAGE_DESTTEXRWWATERMARK_S3D 0x38
/* common stuff */
#define SAVAGE_VERTBUFADDR 0x3e

View File

@ -30,33 +30,33 @@ __FBSDID("$FreeBSD$");
#include "dev/drm/savage_drv.h"
void savage_emit_clip_rect_s3d(drm_savage_private_t *dev_priv,
const drm_clip_rect_t *pbox)
const struct drm_clip_rect *pbox)
{
uint32_t scstart = dev_priv->state.s3d.new_scstart;
uint32_t scend = dev_priv->state.s3d.new_scend;
uint32_t scend = dev_priv->state.s3d.new_scend;
scstart = (scstart & ~SAVAGE_SCISSOR_MASK_S3D) |
((uint32_t)pbox->x1 & 0x000007ff) |
((uint32_t)pbox->x1 & 0x000007ff) |
(((uint32_t)pbox->y1 << 16) & 0x07ff0000);
scend = (scend & ~SAVAGE_SCISSOR_MASK_S3D) |
(((uint32_t)pbox->x2-1) & 0x000007ff) |
((((uint32_t)pbox->y2-1) << 16) & 0x07ff0000);
scend = (scend & ~SAVAGE_SCISSOR_MASK_S3D) |
(((uint32_t)pbox->x2 - 1) & 0x000007ff) |
((((uint32_t)pbox->y2 - 1) << 16) & 0x07ff0000);
if (scstart != dev_priv->state.s3d.scstart ||
scend != dev_priv->state.s3d.scend) {
DMA_LOCALS;
BEGIN_DMA(4);
DMA_WRITE(BCI_CMD_WAIT|BCI_CMD_WAIT_3D);
DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D);
DMA_SET_REGISTERS(SAVAGE_SCSTART_S3D, 2);
DMA_WRITE(scstart);
DMA_WRITE(scend);
dev_priv->state.s3d.scstart = scstart;
dev_priv->state.s3d.scend = scend;
dev_priv->state.s3d.scend = scend;
dev_priv->waiting = 1;
DMA_COMMIT();
}
}
void savage_emit_clip_rect_s4(drm_savage_private_t *dev_priv,
const drm_clip_rect_t *pbox)
const struct drm_clip_rect *pbox)
{
uint32_t drawctrl0 = dev_priv->state.s4.new_drawctrl0;
uint32_t drawctrl1 = dev_priv->state.s4.new_drawctrl1;
@ -64,13 +64,13 @@ void savage_emit_clip_rect_s4(drm_savage_private_t *dev_priv,
((uint32_t)pbox->x1 & 0x000007ff) |
(((uint32_t)pbox->y1 << 12) & 0x00fff000);
drawctrl1 = (drawctrl1 & ~SAVAGE_SCISSOR_MASK_S4) |
(((uint32_t)pbox->x2-1) & 0x000007ff) |
((((uint32_t)pbox->y2-1) << 12) & 0x00fff000);
(((uint32_t)pbox->x2 - 1) & 0x000007ff) |
((((uint32_t)pbox->y2 - 1) << 12) & 0x00fff000);
if (drawctrl0 != dev_priv->state.s4.drawctrl0 ||
drawctrl1 != dev_priv->state.s4.drawctrl1) {
DMA_LOCALS;
BEGIN_DMA(4);
DMA_WRITE(BCI_CMD_WAIT|BCI_CMD_WAIT_3D);
DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D);
DMA_SET_REGISTERS(SAVAGE_DRAWCTRL0_S4, 2);
DMA_WRITE(drawctrl0);
DMA_WRITE(drawctrl1);
@ -86,22 +86,22 @@ static int savage_verify_texaddr(drm_savage_private_t *dev_priv, int unit,
{
if ((addr & 6) != 2) { /* reserved bits */
DRM_ERROR("bad texAddr%d %08x (reserved bits)\n", unit, addr);
return DRM_ERR(EINVAL);
return -EINVAL;
}
if (!(addr & 1)) { /* local */
addr &= ~7;
if (addr < dev_priv->texture_offset ||
addr >= dev_priv->texture_offset+dev_priv->texture_size) {
if (addr < dev_priv->texture_offset ||
addr >= dev_priv->texture_offset + dev_priv->texture_size) {
DRM_ERROR
("bad texAddr%d %08x (local addr out of range)\n",
unit, addr);
return DRM_ERR(EINVAL);
return -EINVAL;
}
} else { /* AGP */
if (!dev_priv->agp_textures) {
DRM_ERROR("bad texAddr%d %08x (AGP not available)\n",
unit, addr);
return DRM_ERR(EINVAL);
return -EINVAL;
}
addr &= ~7;
if (addr < dev_priv->agp_textures->offset ||
@ -110,17 +110,17 @@ static int savage_verify_texaddr(drm_savage_private_t *dev_priv, int unit,
DRM_ERROR
("bad texAddr%d %08x (AGP addr out of range)\n",
unit, addr);
return DRM_ERR(EINVAL);
return -EINVAL;
}
}
return 0;
}
#define SAVE_STATE(reg,where) \
if(start <= reg && start+count > reg) \
if(start <= reg && start + count > reg) \
dev_priv->state.where = regs[reg - start]
#define SAVE_STATE_MASK(reg,where,mask) do { \
if(start <= reg && start+count > reg) { \
if(start <= reg && start + count > reg) { \
uint32_t tmp; \
tmp = regs[reg - start]; \
dev_priv->state.where = (tmp & (mask)) | \
@ -132,10 +132,10 @@ static int savage_verify_state_s3d(drm_savage_private_t *dev_priv,
const uint32_t *regs)
{
if (start < SAVAGE_TEXPALADDR_S3D ||
start+count-1 > SAVAGE_DESTTEXRWWATERMARK_S3D) {
start + count - 1 > SAVAGE_DESTTEXRWWATERMARK_S3D) {
DRM_ERROR("invalid register range (0x%04x-0x%04x)\n",
start, start+count-1);
return DRM_ERR(EINVAL);
start, start + count - 1);
return -EINVAL;
}
SAVE_STATE_MASK(SAVAGE_SCSTART_S3D, s3d.new_scstart,
@ -145,7 +145,7 @@ static int savage_verify_state_s3d(drm_savage_private_t *dev_priv,
/* if any texture regs were changed ... */
if (start <= SAVAGE_TEXCTRL_S3D &&
start+count > SAVAGE_TEXPALADDR_S3D) {
start + count > SAVAGE_TEXPALADDR_S3D) {
/* ... check texture state */
SAVE_STATE(SAVAGE_TEXCTRL_S3D, s3d.texctrl);
SAVE_STATE(SAVAGE_TEXADDR_S3D, s3d.texaddr);
@ -164,10 +164,10 @@ static int savage_verify_state_s4(drm_savage_private_t *dev_priv,
int ret = 0;
if (start < SAVAGE_DRAWLOCALCTRL_S4 ||
start+count-1 > SAVAGE_TEXBLENDCOLOR_S4) {
start + count - 1 > SAVAGE_TEXBLENDCOLOR_S4) {
DRM_ERROR("invalid register range (0x%04x-0x%04x)\n",
start, start+count-1);
return DRM_ERR(EINVAL);
start, start + count - 1);
return -EINVAL;
}
SAVE_STATE_MASK(SAVAGE_DRAWCTRL0_S4, s4.new_drawctrl0,
@ -215,14 +215,14 @@ static int savage_dispatch_state(drm_savage_private_t *dev_priv,
return ret;
/* scissor regs are emitted in savage_dispatch_draw */
if (start < SAVAGE_SCSTART_S3D) {
if (start+count > SAVAGE_SCEND_S3D+1)
count2 = count - (SAVAGE_SCEND_S3D+1 - start);
if (start+count > SAVAGE_SCSTART_S3D)
if (start + count > SAVAGE_SCEND_S3D + 1)
count2 = count - (SAVAGE_SCEND_S3D + 1 - start);
if (start + count > SAVAGE_SCSTART_S3D)
count = SAVAGE_SCSTART_S3D - start;
} else if (start <= SAVAGE_SCEND_S3D) {
if (start+count > SAVAGE_SCEND_S3D+1) {
count -= SAVAGE_SCEND_S3D+1 - start;
start = SAVAGE_SCEND_S3D+1;
if (start + count > SAVAGE_SCEND_S3D + 1) {
count -= SAVAGE_SCEND_S3D + 1 - start;
start = SAVAGE_SCEND_S3D + 1;
} else
return 0;
}
@ -232,24 +232,24 @@ static int savage_dispatch_state(drm_savage_private_t *dev_priv,
return ret;
/* scissor regs are emitted in savage_dispatch_draw */
if (start < SAVAGE_DRAWCTRL0_S4) {
if (start+count > SAVAGE_DRAWCTRL1_S4+1)
if (start + count > SAVAGE_DRAWCTRL1_S4 + 1)
count2 = count -
(SAVAGE_DRAWCTRL1_S4 + 1 - start);
if (start+count > SAVAGE_DRAWCTRL0_S4)
if (start + count > SAVAGE_DRAWCTRL0_S4)
count = SAVAGE_DRAWCTRL0_S4 - start;
} else if (start <= SAVAGE_DRAWCTRL1_S4) {
if (start+count > SAVAGE_DRAWCTRL1_S4+1) {
count -= SAVAGE_DRAWCTRL1_S4+1 - start;
start = SAVAGE_DRAWCTRL1_S4+1;
if (start + count > SAVAGE_DRAWCTRL1_S4 + 1) {
count -= SAVAGE_DRAWCTRL1_S4 + 1 - start;
start = SAVAGE_DRAWCTRL1_S4 + 1;
} else
return 0;
}
}
bci_size = count + (count+254)/255 + count2 + (count2+254)/255;
bci_size = count + (count + 254) / 255 + count2 + (count2 + 254) / 255;
if (cmd_header->state.global) {
BEGIN_DMA(bci_size+1);
BEGIN_DMA(bci_size + 1);
DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D);
dev_priv->waiting = 1;
} else {
@ -278,7 +278,7 @@ static int savage_dispatch_state(drm_savage_private_t *dev_priv,
static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv,
const drm_savage_cmd_header_t *cmd_header,
const drm_buf_t *dmabuf)
const struct drm_buf *dmabuf)
{
unsigned char reorder = 0;
unsigned int prim = cmd_header->prim.prim;
@ -289,8 +289,8 @@ static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv,
BCI_LOCALS;
if (!dmabuf) {
DRM_ERROR("called without dma buffers!\n");
return DRM_ERR(EINVAL);
DRM_ERROR("called without dma buffers!\n");
return -EINVAL;
}
if (!n)
@ -304,7 +304,7 @@ static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv,
if (n % 3 != 0) {
DRM_ERROR("wrong number of vertices %u in TRILIST\n",
n);
return DRM_ERR(EINVAL);
return -EINVAL;
}
break;
case SAVAGE_PRIM_TRISTRIP:
@ -313,18 +313,18 @@ static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv,
DRM_ERROR
("wrong number of vertices %u in TRIFAN/STRIP\n",
n);
return DRM_ERR(EINVAL);
return -EINVAL;
}
break;
default:
DRM_ERROR("invalid primitive type %u\n", prim);
return DRM_ERR(EINVAL);
return -EINVAL;
}
if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
if (skip != 0) {
DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
return DRM_ERR(EINVAL);
return -EINVAL;
}
} else {
unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) -
@ -332,18 +332,18 @@ static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv,
(skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1);
if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) {
DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
return DRM_ERR(EINVAL);
return -EINVAL;
}
if (reorder) {
DRM_ERROR("TRILIST_201 used on Savage4 hardware\n");
return DRM_ERR(EINVAL);
return -EINVAL;
}
}
if (start + n > dmabuf->total/32) {
if (start + n > dmabuf->total / 32) {
DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n",
start, start + n - 1, dmabuf->total/32);
return DRM_ERR(EINVAL);
start, start + n - 1, dmabuf->total / 32);
return -EINVAL;
}
/* Vertex DMA doesn't work with command DMA at the same time,
@ -377,33 +377,33 @@ static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv,
/* Need to reorder indices for correct flat
* shading while preserving the clock sense
* for correct culling. Only on Savage3D. */
int reorder[3] = {-1, -1, -1};
reorder[start%3] = 2;
int reorder[3] = { -1, -1, -1 };
reorder[start % 3] = 2;
BEGIN_BCI((count+1+1)/2);
BCI_DRAW_INDICES_S3D(count, prim, start+2);
BEGIN_BCI((count + 1 + 1) / 2);
BCI_DRAW_INDICES_S3D(count, prim, start + 2);
for (i = start+1; i+1 < start+count; i += 2)
for (i = start + 1; i + 1 < start + count; i += 2)
BCI_WRITE((i + reorder[i % 3]) |
((i + 1 +
reorder[(i + 1) % 3]) << 16));
if (i < start+count)
BCI_WRITE(i + reorder[i%3]);
if (i < start + count)
BCI_WRITE(i + reorder[i % 3]);
} else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
BEGIN_BCI((count+1+1)/2);
BEGIN_BCI((count + 1 + 1) / 2);
BCI_DRAW_INDICES_S3D(count, prim, start);
for (i = start+1; i+1 < start+count; i += 2)
BCI_WRITE(i | ((i+1) << 16));
if (i < start+count)
for (i = start + 1; i + 1 < start + count; i += 2)
BCI_WRITE(i | ((i + 1) << 16));
if (i < start + count)
BCI_WRITE(i);
} else {
BEGIN_BCI((count+2+1)/2);
BEGIN_BCI((count + 2 + 1) / 2);
BCI_DRAW_INDICES_S4(count, prim, skip);
for (i = start; i+1 < start+count; i += 2)
BCI_WRITE(i | ((i+1) << 16));
if (i < start+count)
for (i = start; i + 1 < start + count; i += 2)
BCI_WRITE(i | ((i + 1) << 16));
if (i < start + count)
BCI_WRITE(i);
}
@ -441,7 +441,7 @@ static int savage_dispatch_vb_prim(drm_savage_private_t *dev_priv,
if (n % 3 != 0) {
DRM_ERROR("wrong number of vertices %u in TRILIST\n",
n);
return DRM_ERR(EINVAL);
return -EINVAL;
}
break;
case SAVAGE_PRIM_TRISTRIP:
@ -450,24 +450,24 @@ static int savage_dispatch_vb_prim(drm_savage_private_t *dev_priv,
DRM_ERROR
("wrong number of vertices %u in TRIFAN/STRIP\n",
n);
return DRM_ERR(EINVAL);
return -EINVAL;
}
break;
default:
DRM_ERROR("invalid primitive type %u\n", prim);
return DRM_ERR(EINVAL);
return -EINVAL;
}
if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
if (skip > SAVAGE_SKIP_ALL_S3D) {
DRM_ERROR("invalid skip flags 0x%04x\n", skip);
return DRM_ERR(EINVAL);
return -EINVAL;
}
vtx_size = 8; /* full vertex */
} else {
if (skip > SAVAGE_SKIP_ALL_S4) {
DRM_ERROR("invalid skip flags 0x%04x\n", skip);
return DRM_ERR(EINVAL);
return -EINVAL;
}
vtx_size = 10; /* full vertex */
}
@ -479,13 +479,13 @@ static int savage_dispatch_vb_prim(drm_savage_private_t *dev_priv,
if (vtx_size > vb_stride) {
DRM_ERROR("vertex size greater than vb stride (%u > %u)\n",
vtx_size, vb_stride);
return DRM_ERR(EINVAL);
return -EINVAL;
}
if (start + n > vb_size / (vb_stride*4)) {
if (start + n > vb_size / (vb_stride * 4)) {
DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n",
start, start + n - 1, vb_size / (vb_stride*4));
return DRM_ERR(EINVAL);
start, start + n - 1, vb_size / (vb_stride * 4));
return -EINVAL;
}
prim <<= 25;
@ -496,28 +496,28 @@ static int savage_dispatch_vb_prim(drm_savage_private_t *dev_priv,
/* Need to reorder vertices for correct flat
* shading while preserving the clock sense
* for correct culling. Only on Savage3D. */
int reorder[3] = {-1, -1, -1};
reorder[start%3] = 2;
int reorder[3] = { -1, -1, -1 };
reorder[start % 3] = 2;
BEGIN_DMA(count*vtx_size+1);
BEGIN_DMA(count * vtx_size + 1);
DMA_DRAW_PRIMITIVE(count, prim, skip);
for (i = start; i < start+count; ++i) {
for (i = start; i < start + count; ++i) {
unsigned int j = i + reorder[i % 3];
DMA_COPY(&vtxbuf[vb_stride*j], vtx_size);
DMA_COPY(&vtxbuf[vb_stride * j], vtx_size);
}
DMA_COMMIT();
} else {
BEGIN_DMA(count*vtx_size+1);
BEGIN_DMA(count * vtx_size + 1);
DMA_DRAW_PRIMITIVE(count, prim, skip);
if (vb_stride == vtx_size) {
DMA_COPY(&vtxbuf[vb_stride*start],
vtx_size*count);
DMA_COPY(&vtxbuf[vb_stride * start],
vtx_size * count);
} else {
for (i = start; i < start+count; ++i) {
DMA_COPY(&vtxbuf[vb_stride*i],
for (i = start; i < start + count; ++i) {
DMA_COPY(&vtxbuf[vb_stride * i],
vtx_size);
}
}
@ -537,7 +537,7 @@ static int savage_dispatch_vb_prim(drm_savage_private_t *dev_priv,
static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv,
const drm_savage_cmd_header_t *cmd_header,
const uint16_t *idx,
const drm_buf_t *dmabuf)
const struct drm_buf *dmabuf)
{
unsigned char reorder = 0;
unsigned int prim = cmd_header->idx.prim;
@ -547,8 +547,8 @@ static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv,
BCI_LOCALS;
if (!dmabuf) {
DRM_ERROR("called without dma buffers!\n");
return DRM_ERR(EINVAL);
DRM_ERROR("called without dma buffers!\n");
return -EINVAL;
}
if (!n)
@ -561,7 +561,7 @@ static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv,
case SAVAGE_PRIM_TRILIST:
if (n % 3 != 0) {
DRM_ERROR("wrong number of indices %u in TRILIST\n", n);
return DRM_ERR(EINVAL);
return -EINVAL;
}
break;
case SAVAGE_PRIM_TRISTRIP:
@ -569,18 +569,18 @@ static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv,
if (n < 3) {
DRM_ERROR
("wrong number of indices %u in TRIFAN/STRIP\n", n);
return DRM_ERR(EINVAL);
return -EINVAL;
}
break;
default:
DRM_ERROR("invalid primitive type %u\n", prim);
return DRM_ERR(EINVAL);
return -EINVAL;
}
if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
if (skip != 0) {
DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
return DRM_ERR(EINVAL);
return -EINVAL;
}
} else {
unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) -
@ -588,11 +588,11 @@ static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv,
(skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1);
if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) {
DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
return DRM_ERR(EINVAL);
return -EINVAL;
}
if (reorder) {
DRM_ERROR("TRILIST_201 used on Savage4 hardware\n");
return DRM_ERR(EINVAL);
return -EINVAL;
}
}
@ -626,10 +626,10 @@ static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv,
/* check indices */
for (i = 0; i < count; ++i) {
if (idx[i] > dmabuf->total/32) {
if (idx[i] > dmabuf->total / 32) {
DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
i, idx[i], dmabuf->total/32);
return DRM_ERR(EINVAL);
i, idx[i], dmabuf->total / 32);
return -EINVAL;
}
}
@ -637,31 +637,31 @@ static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv,
/* Need to reorder indices for correct flat
* shading while preserving the clock sense
* for correct culling. Only on Savage3D. */
int reorder[3] = {2, -1, -1};
int reorder[3] = { 2, -1, -1 };
BEGIN_BCI((count+1+1)/2);
BEGIN_BCI((count + 1 + 1) / 2);
BCI_DRAW_INDICES_S3D(count, prim, idx[2]);
for (i = 1; i+1 < count; i += 2)
for (i = 1; i + 1 < count; i += 2)
BCI_WRITE(idx[i + reorder[i % 3]] |
(idx[i + 1 +
reorder[(i + 1) % 3]] << 16));
if (i < count)
BCI_WRITE(idx[i + reorder[i%3]]);
BCI_WRITE(idx[i + reorder[i % 3]]);
} else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
BEGIN_BCI((count+1+1)/2);
BEGIN_BCI((count + 1 + 1) / 2);
BCI_DRAW_INDICES_S3D(count, prim, idx[0]);
for (i = 1; i+1 < count; i += 2)
BCI_WRITE(idx[i] | (idx[i+1] << 16));
for (i = 1; i + 1 < count; i += 2)
BCI_WRITE(idx[i] | (idx[i + 1] << 16));
if (i < count)
BCI_WRITE(idx[i]);
} else {
BEGIN_BCI((count+2+1)/2);
BEGIN_BCI((count + 2 + 1) / 2);
BCI_DRAW_INDICES_S4(count, prim, skip);
for (i = 0; i+1 < count; i += 2)
BCI_WRITE(idx[i] | (idx[i+1] << 16));
for (i = 0; i + 1 < count; i += 2)
BCI_WRITE(idx[i] | (idx[i + 1] << 16));
if (i < count)
BCI_WRITE(idx[i]);
}
@ -699,7 +699,7 @@ static int savage_dispatch_vb_idx(drm_savage_private_t *dev_priv,
case SAVAGE_PRIM_TRILIST:
if (n % 3 != 0) {
DRM_ERROR("wrong number of indices %u in TRILIST\n", n);
return DRM_ERR(EINVAL);
return -EINVAL;
}
break;
case SAVAGE_PRIM_TRISTRIP:
@ -707,24 +707,24 @@ static int savage_dispatch_vb_idx(drm_savage_private_t *dev_priv,
if (n < 3) {
DRM_ERROR
("wrong number of indices %u in TRIFAN/STRIP\n", n);
return DRM_ERR(EINVAL);
return -EINVAL;
}
break;
default:
DRM_ERROR("invalid primitive type %u\n", prim);
return DRM_ERR(EINVAL);
return -EINVAL;
}
if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
if (skip > SAVAGE_SKIP_ALL_S3D) {
DRM_ERROR("invalid skip flags 0x%04x\n", skip);
return DRM_ERR(EINVAL);
return -EINVAL;
}
vtx_size = 8; /* full vertex */
} else {
if (skip > SAVAGE_SKIP_ALL_S4) {
DRM_ERROR("invalid skip flags 0x%04x\n", skip);
return DRM_ERR(EINVAL);
return -EINVAL;
}
vtx_size = 10; /* full vertex */
}
@ -736,7 +736,7 @@ static int savage_dispatch_vb_idx(drm_savage_private_t *dev_priv,
if (vtx_size > vb_stride) {
DRM_ERROR("vertex size greater than vb stride (%u > %u)\n",
vtx_size, vb_stride);
return DRM_ERR(EINVAL);
return -EINVAL;
}
prim <<= 25;
@ -746,10 +746,10 @@ static int savage_dispatch_vb_idx(drm_savage_private_t *dev_priv,
/* Check indices */
for (i = 0; i < count; ++i) {
if (idx[i] > vb_size / (vb_stride*4)) {
if (idx[i] > vb_size / (vb_stride * 4)) {
DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
i, idx[i], vb_size / (vb_stride*4));
return DRM_ERR(EINVAL);
i, idx[i], vb_size / (vb_stride * 4));
return -EINVAL;
}
}
@ -757,24 +757,24 @@ static int savage_dispatch_vb_idx(drm_savage_private_t *dev_priv,
/* Need to reorder vertices for correct flat
* shading while preserving the clock sense
* for correct culling. Only on Savage3D. */
int reorder[3] = {2, -1, -1};
int reorder[3] = { 2, -1, -1 };
BEGIN_DMA(count*vtx_size+1);
BEGIN_DMA(count * vtx_size + 1);
DMA_DRAW_PRIMITIVE(count, prim, skip);
for (i = 0; i < count; ++i) {
unsigned int j = idx[i + reorder[i % 3]];
DMA_COPY(&vtxbuf[vb_stride*j], vtx_size);
DMA_COPY(&vtxbuf[vb_stride * j], vtx_size);
}
DMA_COMMIT();
} else {
BEGIN_DMA(count*vtx_size+1);
BEGIN_DMA(count * vtx_size + 1);
DMA_DRAW_PRIMITIVE(count, prim, skip);
for (i = 0; i < count; ++i) {
unsigned int j = idx[i];
DMA_COPY(&vtxbuf[vb_stride*j], vtx_size);
DMA_COPY(&vtxbuf[vb_stride * j], vtx_size);
}
DMA_COMMIT();
@ -793,7 +793,7 @@ static int savage_dispatch_clear(drm_savage_private_t *dev_priv,
const drm_savage_cmd_header_t *cmd_header,
const drm_savage_cmd_header_t *data,
unsigned int nbox,
const drm_clip_rect_t *boxes)
const struct drm_clip_rect *boxes)
{
unsigned int flags = cmd_header->clear0.flags;
unsigned int clear_cmd;
@ -826,12 +826,12 @@ static int savage_dispatch_clear(drm_savage_private_t *dev_priv,
x = boxes[i].x1, y = boxes[i].y1;
w = boxes[i].x2 - boxes[i].x1;
h = boxes[i].y2 - boxes[i].y1;
BEGIN_DMA(nbufs*6);
BEGIN_DMA(nbufs * 6);
for (buf = SAVAGE_FRONT; buf <= SAVAGE_DEPTH; buf <<= 1) {
if (!(flags & buf))
continue;
DMA_WRITE(clear_cmd);
switch(buf) {
switch (buf) {
case SAVAGE_FRONT:
DMA_WRITE(dev_priv->front_offset);
DMA_WRITE(dev_priv->front_bd);
@ -863,7 +863,7 @@ static int savage_dispatch_clear(drm_savage_private_t *dev_priv,
}
static int savage_dispatch_swap(drm_savage_private_t *dev_priv,
unsigned int nbox, const drm_clip_rect_t *boxes)
unsigned int nbox, const struct drm_clip_rect *boxes)
{
unsigned int swap_cmd;
unsigned int i;
@ -883,8 +883,8 @@ static int savage_dispatch_swap(drm_savage_private_t *dev_priv,
DMA_WRITE(dev_priv->back_bd);
DMA_WRITE(BCI_X_Y(boxes[i].x1, boxes[i].y1));
DMA_WRITE(BCI_X_Y(boxes[i].x1, boxes[i].y1));
DMA_WRITE(BCI_W_H(boxes[i].x2-boxes[i].x1,
boxes[i].y2-boxes[i].y1));
DMA_WRITE(BCI_W_H(boxes[i].x2 - boxes[i].x1,
boxes[i].y2 - boxes[i].y1));
DMA_COMMIT();
}
@ -894,11 +894,11 @@ static int savage_dispatch_swap(drm_savage_private_t *dev_priv,
static int savage_dispatch_draw(drm_savage_private_t *dev_priv,
const drm_savage_cmd_header_t *start,
const drm_savage_cmd_header_t *end,
const drm_buf_t *dmabuf,
const struct drm_buf *dmabuf,
const unsigned int *vtxbuf,
unsigned int vb_size, unsigned int vb_stride,
unsigned int nbox,
const drm_clip_rect_t *boxes)
const struct drm_clip_rect *boxes)
{
unsigned int i, j;
int ret;
@ -944,7 +944,7 @@ static int savage_dispatch_draw(drm_savage_private_t *dev_priv,
DRM_ERROR("IMPLEMENTATION ERROR: "
"non-drawing-command %d\n",
cmd_header.cmd.cmd);
return DRM_ERR(EINVAL);
return -EINVAL;
}
if (ret != 0)
@ -955,35 +955,31 @@ static int savage_dispatch_draw(drm_savage_private_t *dev_priv,
return 0;
}
int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_savage_private_t *dev_priv = dev->dev_private;
drm_device_dma_t *dma = dev->dma;
drm_buf_t *dmabuf;
drm_savage_cmdbuf_t cmdbuf;
struct drm_device_dma *dma = dev->dma;
struct drm_buf *dmabuf;
drm_savage_cmdbuf_t *cmdbuf = data;
drm_savage_cmd_header_t *kcmd_addr = NULL;
drm_savage_cmd_header_t *first_draw_cmd;
unsigned int *kvb_addr = NULL;
drm_clip_rect_t *kbox_addr = NULL;
struct drm_clip_rect *kbox_addr = NULL;
unsigned int i, j;
int ret = 0;
DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, filp);
DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_savage_cmdbuf_t __user *)data,
sizeof(cmdbuf));
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (dma && dma->buflist) {
if (cmdbuf.dma_idx > dma->buf_count) {
if (cmdbuf->dma_idx > dma->buf_count) {
DRM_ERROR
("vertex buffer index %u out of range (0-%u)\n",
cmdbuf.dma_idx, dma->buf_count-1);
return DRM_ERR(EINVAL);
cmdbuf->dma_idx, dma->buf_count - 1);
return -EINVAL;
}
dmabuf = dma->buflist[cmdbuf.dma_idx];
dmabuf = dma->buflist[cmdbuf->dma_idx];
} else {
dmabuf = NULL;
}
@ -993,47 +989,49 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
* COPY_FROM_USER_UNCHECKED when done in other drivers, and is correct
* for locking on FreeBSD.
*/
if (cmdbuf.size) {
kcmd_addr = drm_alloc(cmdbuf.size * 8, DRM_MEM_DRIVER);
if (cmdbuf->size) {
kcmd_addr = drm_alloc(cmdbuf->size * 8, DRM_MEM_DRIVER);
if (kcmd_addr == NULL)
return ENOMEM;
return -ENOMEM;
if (DRM_COPY_FROM_USER(kcmd_addr, cmdbuf.cmd_addr,
cmdbuf.size * 8))
if (DRM_COPY_FROM_USER(kcmd_addr, cmdbuf->cmd_addr,
cmdbuf->size * 8))
{
drm_free(kcmd_addr, cmdbuf.size * 8, DRM_MEM_DRIVER);
return DRM_ERR(EFAULT);
drm_free(kcmd_addr, cmdbuf->size * 8, DRM_MEM_DRIVER);
return -EFAULT;
}
cmdbuf.cmd_addr = kcmd_addr;
cmdbuf->cmd_addr = kcmd_addr;
}
if (cmdbuf.vb_size) {
kvb_addr = drm_alloc(cmdbuf.vb_size, DRM_MEM_DRIVER);
if (cmdbuf->vb_size) {
kvb_addr = drm_alloc(cmdbuf->vb_size, DRM_MEM_DRIVER);
if (kvb_addr == NULL) {
ret = DRM_ERR(ENOMEM);
ret = -ENOMEM;
goto done;
}
if (DRM_COPY_FROM_USER(kvb_addr, cmdbuf.vb_addr,
cmdbuf.vb_size)) {
ret = DRM_ERR(EFAULT);
if (DRM_COPY_FROM_USER(kvb_addr, cmdbuf->vb_addr,
cmdbuf->vb_size)) {
ret = -EFAULT;
goto done;
}
cmdbuf.vb_addr = kvb_addr;
cmdbuf->vb_addr = kvb_addr;
}
if (cmdbuf.nbox) {
kbox_addr = drm_alloc(cmdbuf.nbox * sizeof(drm_clip_rect_t),
DRM_MEM_DRIVER);
if (cmdbuf->nbox) {
kbox_addr = drm_alloc(cmdbuf->nbox *
sizeof(struct drm_clip_rect),
DRM_MEM_DRIVER);
if (kbox_addr == NULL) {
ret = DRM_ERR(ENOMEM);
ret = -ENOMEM;
goto done;
}
if (DRM_COPY_FROM_USER(kbox_addr, cmdbuf.box_addr,
cmdbuf.nbox * sizeof(drm_clip_rect_t))) {
ret = DRM_ERR(EFAULT);
if (DRM_COPY_FROM_USER(kbox_addr, cmdbuf->box_addr,
cmdbuf->nbox *
sizeof(struct drm_clip_rect))) {
ret = -EFAULT;
goto done;
}
cmdbuf.box_addr = kbox_addr;
cmdbuf->box_addr = kbox_addr;
}
/* Make sure writes to DMA buffers are finished before sending
@ -1046,10 +1044,10 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
i = 0;
first_draw_cmd = NULL;
while (i < cmdbuf.size) {
while (i < cmdbuf->size) {
drm_savage_cmd_header_t cmd_header;
cmd_header = *(drm_savage_cmd_header_t *)cmdbuf.cmd_addr;
cmdbuf.cmd_addr++;
cmd_header = *(drm_savage_cmd_header_t *)cmdbuf->cmd_addr;
cmdbuf->cmd_addr++;
i++;
/* Group drawing commands with same state to minimize
@ -1059,28 +1057,29 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
case SAVAGE_CMD_DMA_IDX:
case SAVAGE_CMD_VB_IDX:
j = (cmd_header.idx.count + 3) / 4;
if (i + j > cmdbuf.size) {
if (i + j > cmdbuf->size) {
DRM_ERROR("indexed drawing command extends "
"beyond end of command buffer\n");
DMA_FLUSH();
return DRM_ERR(EINVAL);
return -EINVAL;
}
/* fall through */
case SAVAGE_CMD_DMA_PRIM:
case SAVAGE_CMD_VB_PRIM:
if (!first_draw_cmd)
first_draw_cmd = cmdbuf.cmd_addr-1;
cmdbuf.cmd_addr += j;
first_draw_cmd = cmdbuf->cmd_addr - 1;
cmdbuf->cmd_addr += j;
i += j;
break;
default:
if (first_draw_cmd) {
ret = savage_dispatch_draw (
ret = savage_dispatch_draw(
dev_priv, first_draw_cmd,
cmdbuf.cmd_addr-1,
dmabuf, cmdbuf.vb_addr, cmdbuf.vb_size,
cmdbuf.vb_stride,
cmdbuf.nbox, cmdbuf.box_addr);
cmdbuf->cmd_addr - 1,
dmabuf, cmdbuf->vb_addr,
cmdbuf->vb_size,
cmdbuf->vb_stride,
cmdbuf->nbox, cmdbuf->box_addr);
if (ret != 0)
return ret;
first_draw_cmd = NULL;
@ -1092,40 +1091,42 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
switch (cmd_header.cmd.cmd) {
case SAVAGE_CMD_STATE:
j = (cmd_header.state.count + 1) / 2;
if (i + j > cmdbuf.size) {
if (i + j > cmdbuf->size) {
DRM_ERROR("command SAVAGE_CMD_STATE extends "
"beyond end of command buffer\n");
DMA_FLUSH();
ret = DRM_ERR(EINVAL);
ret = -EINVAL;
goto done;
}
ret = savage_dispatch_state(dev_priv, &cmd_header,
(const uint32_t *)cmdbuf.cmd_addr);
cmdbuf.cmd_addr += j;
(const uint32_t *)cmdbuf->cmd_addr);
cmdbuf->cmd_addr += j;
i += j;
break;
case SAVAGE_CMD_CLEAR:
if (i + 1 > cmdbuf.size) {
if (i + 1 > cmdbuf->size) {
DRM_ERROR("command SAVAGE_CMD_CLEAR extends "
"beyond end of command buffer\n");
DMA_FLUSH();
ret = DRM_ERR(EINVAL);
ret = -EINVAL;
goto done;
}
ret = savage_dispatch_clear(dev_priv, &cmd_header,
cmdbuf.cmd_addr,
cmdbuf.nbox, cmdbuf.box_addr);
cmdbuf.cmd_addr++;
cmdbuf->cmd_addr,
cmdbuf->nbox,
cmdbuf->box_addr);
cmdbuf->cmd_addr++;
i++;
break;
case SAVAGE_CMD_SWAP:
ret = savage_dispatch_swap(dev_priv, cmdbuf.nbox,
cmdbuf.box_addr);
ret = savage_dispatch_swap(dev_priv, cmdbuf->nbox,
cmdbuf->box_addr);
break;
default:
DRM_ERROR("invalid command 0x%x\n", cmd_header.cmd.cmd);
DRM_ERROR("invalid command 0x%x\n",
cmd_header.cmd.cmd);
DMA_FLUSH();
ret = DRM_ERR(EINVAL);
ret = -EINVAL;
goto done;
}
@ -1136,10 +1137,10 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
}
if (first_draw_cmd) {
ret = savage_dispatch_draw (
dev_priv, first_draw_cmd, cmdbuf.cmd_addr, dmabuf,
cmdbuf.vb_addr, cmdbuf.vb_size, cmdbuf.vb_stride,
cmdbuf.nbox, cmdbuf.box_addr);
ret = savage_dispatch_draw(
dev_priv, first_draw_cmd, cmdbuf->cmd_addr, dmabuf,
cmdbuf->vb_addr, cmdbuf->vb_size, cmdbuf->vb_stride,
cmdbuf->nbox, cmdbuf->box_addr);
if (ret != 0) {
DMA_FLUSH();
goto done;
@ -1148,7 +1149,7 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
DMA_FLUSH();
if (dmabuf && cmdbuf.discard) {
if (dmabuf && cmdbuf->discard) {
drm_savage_buf_priv_t *buf_priv = dmabuf->dev_private;
uint16_t event;
event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D);
@ -1158,9 +1159,9 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
done:
/* If we didn't need to allocate them, these'll be NULL */
drm_free(kcmd_addr, cmdbuf.size * 8, DRM_MEM_DRIVER);
drm_free(kvb_addr, cmdbuf.vb_size, DRM_MEM_DRIVER);
drm_free(kbox_addr, cmdbuf.nbox * sizeof(drm_clip_rect_t),
drm_free(kcmd_addr, cmdbuf->size * 8, DRM_MEM_DRIVER);
drm_free(kvb_addr, cmdbuf->vb_size, DRM_MEM_DRIVER);
drm_free(kbox_addr, cmdbuf->nbox * sizeof(struct drm_clip_rect),
DRM_MEM_DRIVER);
return ret;

View File

@ -39,7 +39,7 @@ static drm_pci_id_list_t sis_pciidlist[] = {
sis_PCI_IDS
};
static void sis_configure(drm_device_t *dev)
static void sis_configure(struct drm_device *dev)
{
dev->driver.buf_priv_size = 1; /* No dev_priv */
dev->driver.context_ctor = sis_init_context;
@ -69,9 +69,9 @@ sis_probe(device_t dev)
static int
sis_attach(device_t nbdev)
{
drm_device_t *dev = device_get_softc(nbdev);
struct drm_device *dev = device_get_softc(nbdev);
bzero(dev, sizeof(drm_device_t));
bzero(dev, sizeof(struct drm_device));
sis_configure(dev);
return drm_attach(nbdev, sis_pciidlist);
}
@ -88,7 +88,7 @@ static device_method_t sis_methods[] = {
static driver_t sis_driver = {
"drm",
sis_methods,
sizeof(drm_device_t)
sizeof(struct drm_device)
};
extern devclass_t drm_devclass;
@ -103,7 +103,7 @@ MODULE_DEPEND(sisdrm, drm, 1, 1, 1);
#ifdef _LKM
CFDRIVER_DECL(sis, DV_TTY, NULL);
#else
CFATTACH_DECL(sis, sizeof(drm_device_t), drm_probe, drm_attach, drm_detach,
CFATTACH_DECL(sis, sizeof(struct drm_device), drm_probe, drm_attach, drm_detach,
drm_activate);
#endif
#endif

View File

@ -34,14 +34,47 @@ __FBSDID("$FreeBSD$");
/* General customization:
*/
#define DRIVER_AUTHOR "SIS"
#define DRIVER_AUTHOR "SIS, Tungsten Graphics"
#define DRIVER_NAME "sis"
#define DRIVER_DESC "SIS 300/630/540"
#define DRIVER_DATE "20030826"
#define DRIVER_DESC "SIS 300/630/540 and XGI V3XE/V5/V8"
#define DRIVER_DATE "20070626"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 1
#define DRIVER_MINOR 3
#define DRIVER_PATCHLEVEL 0
enum sis_family {
SIS_OTHER = 0,
SIS_CHIP_315 = 1,
};
#if defined(__linux__)
#define SIS_HAVE_CORE_MM
#endif
#ifdef SIS_HAVE_CORE_MM
#include "dev/drm/drm_sman.h"
#define SIS_BASE (dev_priv->mmio)
#define SIS_READ(reg) DRM_READ32(SIS_BASE, reg);
#define SIS_WRITE(reg, val) DRM_WRITE32(SIS_BASE, reg, val);
typedef struct drm_sis_private {
drm_local_map_t *mmio;
unsigned int idle_fault;
struct drm_sman sman;
unsigned int chipset;
int vram_initialized;
int agp_initialized;
unsigned long vram_offset;
unsigned long agp_offset;
} drm_sis_private_t;
extern int sis_idle(struct drm_device *dev);
extern void sis_reclaim_buffers_locked(struct drm_device *dev,
struct drm_file *file_priv);
extern void sis_lastclose(struct drm_device *dev);
#else
#include "dev/drm/sis_ds.h"
typedef struct drm_sis_private {
@ -49,10 +82,12 @@ typedef struct drm_sis_private {
memHeap_t *FBHeap;
} drm_sis_private_t;
extern int sis_init_context(drm_device_t * dev, int context);
extern int sis_final_context(drm_device_t * dev, int context);
extern int sis_init_context(struct drm_device * dev, int context);
extern int sis_final_context(struct drm_device * dev, int context);
extern drm_ioctl_desc_t sis_ioctls[];
#endif
extern struct drm_ioctl_desc sis_ioctls[];
extern int sis_max_ioctl;
#endif

View File

@ -84,59 +84,52 @@ static int del_alloc_set(int context, int type, unsigned int val)
/* fb management via fb device */
#if defined(__linux__) && defined(CONFIG_FB_SIS)
static int sis_fb_init(DRM_IOCTL_ARGS)
static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
return 0;
}
static int sis_fb_alloc(DRM_IOCTL_ARGS)
static int sis_fb_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_sis_mem_t fb;
drm_sis_mem_t *fb = data;
struct sis_memreq req;
drm_sis_mem_t __user *argp = (drm_sis_mem_t __user *)data;
int retval = 0;
DRM_COPY_FROM_USER_IOCTL(fb, argp, sizeof(fb));
req.size = fb.size;
req.size = fb->size;
sis_malloc(&req);
if (req.offset) {
/* TODO */
fb.offset = req.offset;
fb.free = req.offset;
if (!add_alloc_set(fb.context, VIDEO_TYPE, fb.free)) {
fb->offset = req.offset;
fb->free = req.offset;
if (!add_alloc_set(fb->context, VIDEO_TYPE, fb->free)) {
DRM_DEBUG("adding to allocation set fails\n");
sis_free(req.offset);
retval = DRM_ERR(EINVAL);
retval = -EINVAL;
}
} else {
fb.offset = 0;
fb.size = 0;
fb.free = 0;
fb->offset = 0;
fb->size = 0;
fb->free = 0;
}
DRM_COPY_TO_USER_IOCTL(argp, fb, sizeof(fb));
DRM_DEBUG("alloc fb, size = %d, offset = %ld\n", fb.size, req.offset);
DRM_DEBUG("alloc fb, size = %d, offset = %ld\n", fb->size, req.offset);
return retval;
}
static int sis_fb_free(DRM_IOCTL_ARGS)
static int sis_fb_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_sis_mem_t fb;
int retval = 0;
DRM_COPY_FROM_USER_IOCTL(fb, (drm_sis_mem_t __user *) data, sizeof(fb));
if (!fb->free)
return -EINVAL;
if (!fb.free)
return DRM_ERR(EINVAL);
if (!del_alloc_set(fb->context, VIDEO_TYPE, fb->free))
retval = -EINVAL;
sis_free(fb->free);
if (!del_alloc_set(fb.context, VIDEO_TYPE, fb.free))
retval = DRM_ERR(EINVAL);
sis_free(fb.free);
DRM_DEBUG("free fb, offset = 0x%lx\n", fb.free);
DRM_DEBUG("free fb, offset = 0x%lx\n", fb->free);
return retval;
}
@ -153,13 +146,10 @@ static int sis_fb_free(DRM_IOCTL_ARGS)
* X driver/sisfb HW- Command-
* framebuffer memory DRI heap Cursor queue
*/
static int sis_fb_init(DRM_IOCTL_ARGS)
static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_sis_private_t *dev_priv = dev->dev_private;
drm_sis_fb_t fb;
DRM_COPY_FROM_USER_IOCTL(fb, (drm_sis_fb_t __user *) data, sizeof(fb));
drm_sis_fb_t *fb = data;
if (dev_priv == NULL) {
dev->dev_private = drm_calloc(1, sizeof(drm_sis_private_t),
@ -170,71 +160,62 @@ static int sis_fb_init(DRM_IOCTL_ARGS)
}
if (dev_priv->FBHeap != NULL)
return DRM_ERR(EINVAL);
return -EINVAL;
dev_priv->FBHeap = mmInit(fb.offset, fb.size);
dev_priv->FBHeap = mmInit(fb->offset, fb->size);
DRM_DEBUG("offset = %u, size = %u", fb.offset, fb.size);
DRM_DEBUG("offset = %u, size = %u", fb->offset, fb->size);
return 0;
}
static int sis_fb_alloc(DRM_IOCTL_ARGS)
static int sis_fb_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_sis_private_t *dev_priv = dev->dev_private;
drm_sis_mem_t __user *argp = (drm_sis_mem_t __user *)data;
drm_sis_mem_t fb;
drm_sis_mem_t *fb = data;
PMemBlock block;
int retval = 0;
if (dev_priv == NULL || dev_priv->FBHeap == NULL)
return DRM_ERR(EINVAL);
return -EINVAL;
DRM_COPY_FROM_USER_IOCTL(fb, argp, sizeof(fb));
block = mmAllocMem(dev_priv->FBHeap, fb.size, 0, 0);
block = mmAllocMem(dev_priv->FBHeap, fb->size, 0, 0);
if (block) {
/* TODO */
fb.offset = block->ofs;
fb.free = (unsigned long)block;
if (!add_alloc_set(fb.context, VIDEO_TYPE, fb.free)) {
fb->offset = block->ofs;
fb->free = (unsigned long)block;
if (!add_alloc_set(fb->context, VIDEO_TYPE, fb->free)) {
DRM_DEBUG("adding to allocation set fails\n");
mmFreeMem((PMemBlock) fb.free);
retval = DRM_ERR(EINVAL);
mmFreeMem((PMemBlock) fb->free);
retval = -EINVAL;
}
} else {
fb.offset = 0;
fb.size = 0;
fb.free = 0;
fb->offset = 0;
fb->size = 0;
fb->free = 0;
}
DRM_COPY_TO_USER_IOCTL(argp, fb, sizeof(fb));
DRM_DEBUG("alloc fb, size = %d, offset = %d\n", fb.size, fb.offset);
DRM_DEBUG("alloc fb, size = %d, offset = %d\n", fb->size, fb->offset);
return retval;
}
static int sis_fb_free(DRM_IOCTL_ARGS)
static int sis_fb_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_sis_private_t *dev_priv = dev->dev_private;
drm_sis_mem_t fb;
drm_sis_mem_t *fb = data;
if (dev_priv == NULL || dev_priv->FBHeap == NULL)
return DRM_ERR(EINVAL);
return -EINVAL;
DRM_COPY_FROM_USER_IOCTL(fb, (drm_sis_mem_t __user *) data, sizeof(fb));
if (!mmBlockInHeap(dev_priv->FBHeap, (PMemBlock) fb->free))
return -EINVAL;
if (!mmBlockInHeap(dev_priv->FBHeap, (PMemBlock) fb.free))
return DRM_ERR(EINVAL);
if (!del_alloc_set(fb->context, VIDEO_TYPE, fb->free))
return -EINVAL;
mmFreeMem((PMemBlock) fb->free);
if (!del_alloc_set(fb.context, VIDEO_TYPE, fb.free))
return DRM_ERR(EINVAL);
mmFreeMem((PMemBlock) fb.free);
DRM_DEBUG("free fb, free = 0x%lx\n", fb.free);
DRM_DEBUG("free fb, free = 0x%lx\n", fb->free);
return 0;
}
@ -243,11 +224,10 @@ static int sis_fb_free(DRM_IOCTL_ARGS)
/* agp memory management */
static int sis_ioctl_agp_init(DRM_IOCTL_ARGS)
static int sis_ioctl_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_sis_private_t *dev_priv = dev->dev_private;
drm_sis_agp_t agp;
drm_sis_agp_t *agp = data;
if (dev_priv == NULL) {
dev->dev_private = drm_calloc(1, sizeof(drm_sis_private_t),
@ -258,75 +238,63 @@ static int sis_ioctl_agp_init(DRM_IOCTL_ARGS)
}
if (dev_priv->AGPHeap != NULL)
return DRM_ERR(EINVAL);
return -EINVAL;
DRM_COPY_FROM_USER_IOCTL(agp, (drm_sis_agp_t __user *) data,
sizeof(agp));
dev_priv->AGPHeap = mmInit(agp->offset, agp->size);
dev_priv->AGPHeap = mmInit(agp.offset, agp.size);
DRM_DEBUG("offset = %u, size = %u", agp.offset, agp.size);
DRM_DEBUG("offset = %u, size = %u", agp->offset, agp->size);
return 0;
}
static int sis_ioctl_agp_alloc(DRM_IOCTL_ARGS)
static int sis_ioctl_agp_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_sis_private_t *dev_priv = dev->dev_private;
drm_sis_mem_t __user *argp = (drm_sis_mem_t __user *)data;
drm_sis_mem_t agp;
drm_sis_mem_t *agp = data;
PMemBlock block;
int retval = 0;
if (dev_priv == NULL || dev_priv->AGPHeap == NULL)
return DRM_ERR(EINVAL);
return -EINVAL;
DRM_COPY_FROM_USER_IOCTL(agp, argp, sizeof(agp));
block = mmAllocMem(dev_priv->AGPHeap, agp.size, 0, 0);
block = mmAllocMem(dev_priv->AGPHeap, agp->size, 0, 0);
if (block) {
/* TODO */
agp.offset = block->ofs;
agp.free = (unsigned long)block;
if (!add_alloc_set(agp.context, AGP_TYPE, agp.free)) {
agp->offset = block->ofs;
agp->free = (unsigned long)block;
if (!add_alloc_set(agp->context, AGP_TYPE, agp->free)) {
DRM_DEBUG("adding to allocation set fails\n");
mmFreeMem((PMemBlock) agp.free);
mmFreeMem((PMemBlock) agp->free);
retval = -1;
}
} else {
agp.offset = 0;
agp.size = 0;
agp.free = 0;
agp->offset = 0;
agp->size = 0;
agp->free = 0;
}
DRM_COPY_TO_USER_IOCTL(argp, agp, sizeof(agp));
DRM_DEBUG("alloc agp, size = %d, offset = %d\n", agp.size, agp.offset);
DRM_DEBUG("alloc agp, size = %d, offset = %d\n", agp->size,
agp->offset);
return retval;
}
static int sis_ioctl_agp_free(DRM_IOCTL_ARGS)
static int sis_ioctl_agp_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEVICE;
drm_sis_private_t *dev_priv = dev->dev_private;
drm_sis_mem_t agp;
drm_sis_mem_t *agp = data;
if (dev_priv == NULL || dev_priv->AGPHeap == NULL)
return DRM_ERR(EINVAL);
return -EINVAL;
DRM_COPY_FROM_USER_IOCTL(agp, (drm_sis_mem_t __user *) data,
sizeof(agp));
if (!mmBlockInHeap(dev_priv->AGPHeap, (PMemBlock) agp->free))
return -EINVAL;
if (!mmBlockInHeap(dev_priv->AGPHeap, (PMemBlock) agp.free))
return DRM_ERR(EINVAL);
mmFreeMem((PMemBlock) agp->free);
if (!del_alloc_set(agp->context, AGP_TYPE, agp->free))
return -EINVAL;
mmFreeMem((PMemBlock) agp.free);
if (!del_alloc_set(agp.context, AGP_TYPE, agp.free))
return DRM_ERR(EINVAL);
DRM_DEBUG("free agp, free = 0x%lx\n", agp.free);
DRM_DEBUG("free agp, free = 0x%lx\n", agp->free);
return 0;
}
@ -410,12 +378,12 @@ int sis_final_context(struct drm_device *dev, int context)
}
drm_ioctl_desc_t sis_ioctls[] = {
[DRM_IOCTL_NR(DRM_SIS_FB_ALLOC)] = {sis_fb_alloc, DRM_AUTH},
[DRM_IOCTL_NR(DRM_SIS_FB_FREE)] = {sis_fb_free, DRM_AUTH},
[DRM_IOCTL_NR(DRM_SIS_AGP_INIT)] = {sis_ioctl_agp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
[DRM_IOCTL_NR(DRM_SIS_AGP_ALLOC)] = {sis_ioctl_agp_alloc, DRM_AUTH},
[DRM_IOCTL_NR(DRM_SIS_AGP_FREE)] = {sis_ioctl_agp_free, DRM_AUTH},
[DRM_IOCTL_NR(DRM_SIS_FB_INIT)] = {sis_fb_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}
DRM_IOCTL_DEF(DRM_SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH),
DRM_IOCTL_DEF(DRM_SIS_FB_FREE, sis_fb_free, DRM_AUTH),
DRM_IOCTL_DEF(DRM_SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_SIS_AGP_ALLOC, sis_ioctl_agp_alloc, DRM_AUTH),
DRM_IOCTL_DEF(DRM_SIS_AGP_FREE, sis_ioctl_agp_free, DRM_AUTH),
DRM_IOCTL_DEF(DRM_SIS_FB_INIT, sis_fb_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY)
};
int sis_max_ioctl = DRM_ARRAY_SIZE(sis_ioctls);

View File

@ -44,7 +44,7 @@ static drm_pci_id_list_t tdfx_pciidlist[] = {
tdfx_PCI_IDS
};
static void tdfx_configure(drm_device_t *dev)
static void tdfx_configure(struct drm_device *dev)
{
dev->driver.buf_priv_size = 1; /* No dev_priv */
@ -70,9 +70,9 @@ tdfx_probe(device_t dev)
static int
tdfx_attach(device_t nbdev)
{
drm_device_t *dev = device_get_softc(nbdev);
struct drm_device *dev = device_get_softc(nbdev);
bzero(dev, sizeof(drm_device_t));
bzero(dev, sizeof(struct drm_device));
tdfx_configure(dev);
return drm_attach(nbdev, tdfx_pciidlist);
}
@ -89,7 +89,7 @@ static device_method_t tdfx_methods[] = {
static driver_t tdfx_driver = {
"drm",
tdfx_methods,
sizeof(drm_device_t)
sizeof(struct drm_device)
};
extern devclass_t drm_devclass;
@ -104,7 +104,7 @@ MODULE_DEPEND(tdfx, drm, 1, 1, 1);
#ifdef _LKM
CFDRIVER_DECL(tdfx, DV_TTY, NULL);
#else
CFATTACH_DECL(tdfx, sizeof(drm_device_t), drm_probe, drm_attach, drm_detach,
drm_activate);
CFATTACH_DECL(tdfx, sizeof(struct drm_device), drm_probe, drm_attach,
drm_detach, drm_activate);
#endif
#endif

View File

@ -2,7 +2,7 @@
.PATH: ${.CURDIR}/../../../dev/drm
KMOD = i915
SRCS = i915_dma.c i915_drv.c i915_irq.c i915_mem.c
SRCS = i915_dma.c i915_drv.c i915_irq.c i915_mem.c i915_suspend.c
SRCS +=device_if.h bus_if.h pci_if.h opt_drm.h
.include <bsd.kmod.mk>