Update the DRM to the latest from DRI CVS. Includes some bugfixes and removal

of the infrastructure for the gamma driver which was removed a while back.
The DRM_LINUX option is removed because the handler is now provided by the
linux compat code itself.
This commit is contained in:
anholt 2003-04-25 01:18:47 +00:00
parent a521aa5aa4
commit 6afbdfe8ea
34 changed files with 436 additions and 2556 deletions

View File

@ -394,7 +394,6 @@ NETGRAPH_UI opt_netgraph.h
NETGRAPH_VJC opt_netgraph.h NETGRAPH_VJC opt_netgraph.h
# DRM options # DRM options
DRM_LINUX opt_drm.h
DRM_DEBUG opt_drm.h DRM_DEBUG opt_drm.h
ZERO_COPY_SOCKETS opt_zero.h ZERO_COPY_SOCKETS opt_zero.h

View File

@ -52,15 +52,6 @@
#ifndef __HAVE_DMA_IRQ #ifndef __HAVE_DMA_IRQ
#define __HAVE_DMA_IRQ 0 #define __HAVE_DMA_IRQ 0
#endif #endif
#ifndef __HAVE_DMA_WAITLIST
#define __HAVE_DMA_WAITLIST 0
#endif
#ifndef __HAVE_DMA_FREELIST
#define __HAVE_DMA_FREELIST 0
#endif
#ifndef __HAVE_DMA_HISTOGRAM
#define __HAVE_DMA_HISTOGRAM 0
#endif
#define DRM_DEBUG_CODE 0 /* Include debugging code (if > 1, then #define DRM_DEBUG_CODE 0 /* Include debugging code (if > 1, then
also include looping detection. */ also include looping detection. */
@ -83,12 +74,8 @@ typedef struct drm_file drm_file_t;
#define DRM_HASH_SIZE 16 /* Size of key hash table */ #define DRM_HASH_SIZE 16 /* Size of key hash table */
#define DRM_KERNEL_CONTEXT 0 /* Change drm_resctx if changed */ #define DRM_KERNEL_CONTEXT 0 /* Change drm_resctx if changed */
#define DRM_RESERVED_CONTEXTS 1 /* Change drm_resctx if changed */ #define DRM_RESERVED_CONTEXTS 1 /* Change drm_resctx if changed */
#define DRM_LOOPING_LIMIT 5000000
#define DRM_BSZ 1024 /* Buffer size for /dev/drm? output */
#define DRM_LOCK_SLICE 1 /* Time slice for lock, in jiffies */
#define DRM_FLAG_DEBUG 0x01 #define DRM_FLAG_DEBUG 0x01
#define DRM_FLAG_NOCTX 0x02
#define DRM_MEM_DMA 0 #define DRM_MEM_DMA 0
#define DRM_MEM_SAREA 1 #define DRM_MEM_SAREA 1
@ -96,30 +83,23 @@ typedef struct drm_file drm_file_t;
#define DRM_MEM_MAGIC 3 #define DRM_MEM_MAGIC 3
#define DRM_MEM_IOCTLS 4 #define DRM_MEM_IOCTLS 4
#define DRM_MEM_MAPS 5 #define DRM_MEM_MAPS 5
#define DRM_MEM_VMAS 6 #define DRM_MEM_BUFS 6
#define DRM_MEM_BUFS 7 #define DRM_MEM_SEGS 7
#define DRM_MEM_SEGS 8 #define DRM_MEM_PAGES 8
#define DRM_MEM_PAGES 9 #define DRM_MEM_FILES 9
#define DRM_MEM_FILES 10 #define DRM_MEM_QUEUES 10
#define DRM_MEM_QUEUES 11 #define DRM_MEM_CMDS 11
#define DRM_MEM_CMDS 12 #define DRM_MEM_MAPPINGS 12
#define DRM_MEM_MAPPINGS 13 #define DRM_MEM_BUFLISTS 13
#define DRM_MEM_BUFLISTS 14 #define DRM_MEM_AGPLISTS 14
#define DRM_MEM_AGPLISTS 15 #define DRM_MEM_TOTALAGP 15
#define DRM_MEM_TOTALAGP 16 #define DRM_MEM_BOUNDAGP 16
#define DRM_MEM_BOUNDAGP 17 #define DRM_MEM_CTXBITMAP 17
#define DRM_MEM_CTXBITMAP 18 #define DRM_MEM_STUB 18
#define DRM_MEM_STUB 19 #define DRM_MEM_SGLISTS 19
#define DRM_MEM_SGLISTS 20
#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8) #define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
/* Backward compatibility section */
/* _PAGE_WT changed to _PAGE_PWT in 2.2.6 */
#ifndef _PAGE_PWT
#define _PAGE_PWT _PAGE_WT
#endif
/* Mapping helper macros */ /* Mapping helper macros */
#define DRM_IOREMAP(map) \ #define DRM_IOREMAP(map) \
(map)->handle = DRM(ioremap)( dev, map ) (map)->handle = DRM(ioremap)( dev, map )
@ -147,22 +127,12 @@ typedef struct drm_file drm_file_t;
} while(0) } while(0)
typedef struct drm_pci_list {
u16 vendor;
u16 device;
} drm_pci_list_t;
typedef struct drm_ioctl_desc { typedef struct drm_ioctl_desc {
d_ioctl_t *func; int (*func)(DRM_IOCTL_ARGS);
int auth_needed; int auth_needed;
int root_only; int root_only;
} drm_ioctl_desc_t; } drm_ioctl_desc_t;
typedef struct drm_devstate {
pid_t owner; /* X server pid holding x_lock */
} drm_devstate_t;
typedef struct drm_magic_entry { typedef struct drm_magic_entry {
drm_magic_t magic; drm_magic_t magic;
struct drm_file *priv; struct drm_file *priv;
@ -174,12 +144,6 @@ typedef struct drm_magic_head {
struct drm_magic_entry *tail; struct drm_magic_entry *tail;
} drm_magic_head_t; } drm_magic_head_t;
typedef struct drm_vma_entry {
struct vm_area_struct *vma;
struct drm_vma_entry *next;
pid_t pid;
} drm_vma_entry_t;
typedef struct drm_buf { typedef struct drm_buf {
int idx; /* Index into master buflist */ int idx; /* Index into master buflist */
int total; /* Buffer size */ int total; /* Buffer size */
@ -189,12 +153,9 @@ typedef struct drm_buf {
void *address; /* Address of buffer */ void *address; /* Address of buffer */
unsigned long bus_address; /* Bus address of buffer */ unsigned long bus_address; /* Bus address of buffer */
struct drm_buf *next; /* Kernel-only: used for free list */ struct drm_buf *next; /* Kernel-only: used for free list */
__volatile__ int waiting; /* On kernel DMA queue */
__volatile__ int pending; /* On hardware DMA queue */ __volatile__ int pending; /* On hardware DMA queue */
wait_queue_head_t dma_wait; /* Processes waiting */ DRMFILE filp; /* Unique identifier of holding process */
pid_t pid; /* PID of holding process */
int context; /* Kernel queue for this buffer */ int context; /* Kernel queue for this buffer */
int while_locked;/* Dispatch this buffer while locked */
enum { enum {
DRM_LIST_NONE = 0, DRM_LIST_NONE = 0,
DRM_LIST_FREE = 1, DRM_LIST_FREE = 1,
@ -204,39 +165,10 @@ typedef struct drm_buf {
DRM_LIST_RECLAIM = 5 DRM_LIST_RECLAIM = 5
} list; /* Which list we're on */ } list; /* Which list we're on */
#if DRM_DMA_HISTOGRAM
cycles_t time_queued; /* Queued to kernel DMA queue */
cycles_t time_dispatched; /* Dispatched to hardware */
cycles_t time_completed; /* Completed by hardware */
cycles_t time_freed; /* Back on freelist */
#endif
int dev_priv_size; /* Size of buffer private stoarge */ int dev_priv_size; /* Size of buffer private stoarge */
void *dev_private; /* Per-buffer private storage */ void *dev_private; /* Per-buffer private storage */
} drm_buf_t; } drm_buf_t;
#if DRM_DMA_HISTOGRAM
#define DRM_DMA_HISTOGRAM_SLOTS 9
#define DRM_DMA_HISTOGRAM_INITIAL 10
#define DRM_DMA_HISTOGRAM_NEXT(current) ((current)*10)
typedef struct drm_histogram {
atomic_t total;
atomic_t queued_to_dispatched[DRM_DMA_HISTOGRAM_SLOTS];
atomic_t dispatched_to_completed[DRM_DMA_HISTOGRAM_SLOTS];
atomic_t completed_to_freed[DRM_DMA_HISTOGRAM_SLOTS];
atomic_t queued_to_completed[DRM_DMA_HISTOGRAM_SLOTS];
atomic_t queued_to_freed[DRM_DMA_HISTOGRAM_SLOTS];
atomic_t dma[DRM_DMA_HISTOGRAM_SLOTS];
atomic_t schedule[DRM_DMA_HISTOGRAM_SLOTS];
atomic_t ctx[DRM_DMA_HISTOGRAM_SLOTS];
atomic_t lacq[DRM_DMA_HISTOGRAM_SLOTS];
atomic_t lhld[DRM_DMA_HISTOGRAM_SLOTS];
} drm_histogram_t;
#endif
/* bufs is one longer than it has to be */ /* bufs is one longer than it has to be */
typedef struct drm_waitlist { typedef struct drm_waitlist {
int count; /* Number of possible buffers */ int count; /* Number of possible buffers */
@ -253,10 +185,8 @@ typedef struct drm_freelist {
atomic_t count; /* Number of free buffers */ atomic_t count; /* Number of free buffers */
drm_buf_t *next; /* End pointer */ drm_buf_t *next; /* End pointer */
wait_queue_head_t waiting; /* Processes waiting on free bufs */
int low_mark; /* Low water mark */ int low_mark; /* Low water mark */
int high_mark; /* High water mark */ int high_mark; /* High water mark */
atomic_t wfh; /* If waiting for high mark */
DRM_SPINTYPE lock; DRM_SPINTYPE lock;
} drm_freelist_t; } drm_freelist_t;
@ -289,48 +219,14 @@ struct drm_file {
struct drm_device *devXX; struct drm_device *devXX;
}; };
typedef struct drm_queue {
atomic_t use_count; /* Outstanding uses (+1) */
atomic_t finalization; /* Finalization in progress */
atomic_t block_count; /* Count of processes waiting */
atomic_t block_read; /* Queue blocked for reads */
wait_queue_head_t read_queue; /* Processes waiting on block_read */
atomic_t block_write; /* Queue blocked for writes */
wait_queue_head_t write_queue; /* Processes waiting on block_write */
#if 1
atomic_t total_queued; /* Total queued statistic */
atomic_t total_flushed;/* Total flushes statistic */
atomic_t total_locks; /* Total locks statistics */
#endif
drm_ctx_flags_t flags; /* Context preserving and 2D-only */
drm_waitlist_t waitlist; /* Pending buffers */
wait_queue_head_t flush_queue; /* Processes waiting until flush */
} drm_queue_t;
typedef struct drm_lock_data { typedef struct drm_lock_data {
drm_hw_lock_t *hw_lock; /* Hardware lock */ drm_hw_lock_t *hw_lock; /* Hardware lock */
pid_t pid; /* PID of lock holder (0=kernel) */ DRMFILE filp; /* Unique identifier of holding process (NULL is kernel)*/
wait_queue_head_t lock_queue; /* Queue of blocked processes */ wait_queue_head_t lock_queue; /* Queue of blocked processes */
unsigned long lock_time; /* Time of last lock in jiffies */ unsigned long lock_time; /* Time of last lock in jiffies */
} drm_lock_data_t; } drm_lock_data_t;
typedef struct drm_device_dma { typedef struct drm_device_dma {
#if 0
/* Performance Counters */
atomic_t total_prio; /* Total DRM_DMA_PRIORITY */
atomic_t total_bytes; /* Total bytes DMA'd */
atomic_t total_dmas; /* Total DMA buffers dispatched */
atomic_t total_missed_dma; /* Missed drm_do_dma */
atomic_t total_missed_lock; /* Missed lock in drm_do_dma */
atomic_t total_missed_free; /* Missed drm_free_this_buffer */
atomic_t total_missed_sched;/* Missed drm_dma_schedule */
atomic_t total_tried; /* Tried next_buffer */
atomic_t total_hit; /* Sent next_buffer */
atomic_t total_lost; /* Lost interrupt */
#endif
drm_buf_entry_t bufs[DRM_MAX_ORDER+1]; drm_buf_entry_t bufs[DRM_MAX_ORDER+1];
int buf_count; int buf_count;
drm_buf_t **buflist; /* Vector of pointers info bufs */ drm_buf_t **buflist; /* Vector of pointers info bufs */
@ -346,8 +242,6 @@ typedef struct drm_device_dma {
/* DMA support */ /* DMA support */
drm_buf_t *this_buffer; /* Buffer being sent */ drm_buf_t *this_buffer; /* Buffer being sent */
drm_buf_t *next_buffer; /* Selected buffer to send */ drm_buf_t *next_buffer; /* Selected buffer to send */
drm_queue_t *next_queue; /* Queue from which buffer selected*/
wait_queue_head_t waiting; /* Processes waiting on free bufs */
} drm_device_dma_t; } drm_device_dma_t;
#if __REALLY_HAVE_AGP #if __REALLY_HAVE_AGP
@ -381,11 +275,6 @@ typedef struct drm_sg_mem {
dma_addr_t *busaddr; dma_addr_t *busaddr;
} drm_sg_mem_t; } drm_sg_mem_t;
typedef struct drm_sigdata {
int context;
drm_hw_lock_t *lock;
} drm_sigdata_t;
typedef struct drm_local_map { typedef struct drm_local_map {
unsigned long offset; /* Physical address (0 for SAREA)*/ unsigned long offset; /* Physical address (0 for SAREA)*/
unsigned long size; /* Physical size (bytes) */ unsigned long size; /* Physical size (bytes) */
@ -424,21 +313,16 @@ struct drm_device {
device_t device; /* Device instance from newbus */ device_t device; /* Device instance from newbus */
#endif #endif
dev_t devnode; /* Device number for mknod */ dev_t devnode; /* Device number for mknod */
char *devname; /* For /proc/interrupts */
int blocked; /* Blocked due to VC switch? */
int flags; /* Flags to open(2) */ int flags; /* Flags to open(2) */
int writable; /* Opened with FWRITE */
/* Locks */ /* Locks */
DRM_SPINTYPE count_lock; /* For inuse, open_count, buf_use */ DRM_SPINTYPE count_lock; /* For open_count, buf_use, buf_alloc */
struct lock dev_lock; /* For others */ struct lock dev_lock; /* For others */
/* Usage Counters */ /* Usage Counters */
int open_count; /* Outstanding files open */ int open_count; /* Outstanding files open */
atomic_t ioctl_count; /* Outstanding IOCTLs pending */
atomic_t vma_count; /* Outstanding vma areas open */
int buf_use; /* Buffers in use -- cannot alloc */ int buf_use; /* Buffers in use -- cannot alloc */
atomic_t buf_alloc; /* Buffer allocation in progress */ int buf_alloc; /* Buffer allocation in progress */
/* Performance counters */ /* Performance counters */
unsigned long counters; unsigned long counters;
@ -451,19 +335,13 @@ struct drm_device {
/* Memory management */ /* Memory management */
drm_map_list_t *maplist; /* Linked list of regions */ drm_map_list_t *maplist; /* Linked list of regions */
int map_count; /* Number of mappable regions */
drm_local_map_t **context_sareas; drm_local_map_t **context_sareas;
int max_context; int max_context;
drm_vma_entry_t *vmalist; /* List of vmas (for debugging) */
drm_lock_data_t lock; /* Information on hardware lock */ drm_lock_data_t lock; /* Information on hardware lock */
/* DMA queues (contexts) */ /* DMA queues (contexts) */
int queue_count; /* Number of active DMA queues */
int queue_reserved; /* Number of reserved DMA queues */
int queue_slots; /* Actual length of queuelist */
drm_queue_t **queuelist; /* Vector of pointers to DMA queues */
drm_device_dma_t *dma; /* Optional pointer for DMA support */ drm_device_dma_t *dma; /* Optional pointer for DMA support */
/* Context support */ /* Context support */
@ -477,45 +355,25 @@ struct drm_device {
#endif #endif
void *irqh; /* Handle from bus_setup_intr */ void *irqh; /* Handle from bus_setup_intr */
atomic_t context_flag; /* Context swapping flag */ atomic_t context_flag; /* Context swapping flag */
atomic_t interrupt_flag; /* Interruption handler flag */
atomic_t dma_flag; /* DMA dispatch flag */
struct callout timer; /* Timer for delaying ctx switch */ struct callout timer; /* Timer for delaying ctx switch */
wait_queue_head_t context_wait; /* Processes waiting on ctx switch */
int last_checked; /* Last context checked for DMA */
int last_context; /* Last current context */ int last_context; /* Last current context */
unsigned long last_switch; /* jiffies at last context switch */
#if __FreeBSD_version >= 400005 #if __FreeBSD_version >= 400005
struct task task; struct task task;
#endif #endif
#if __HAVE_VBL_IRQ #if __HAVE_VBL_IRQ
wait_queue_head_t vbl_queue; /* vbl wait channel */ wait_queue_head_t vbl_queue; /* vbl wait channel */
atomic_t vbl_received; atomic_t vbl_received;
#if 0 /* vbl signals are untested, ntested */ #if 0 /* vbl signals are untested */
struct drm_vbl_sig_list vbl_sig_list; struct drm_vbl_sig_list vbl_sig_list;
DRM_SPINTYPE vbl_lock; DRM_SPINTYPE vbl_lock;
#endif #endif
#endif
cycles_t ctx_start;
cycles_t lck_start;
#if __HAVE_DMA_HISTOGRAM
drm_histogram_t histo;
#endif #endif
/* Callback to X server for context switch
and for heavy-handed reset. */
char buf[DRM_BSZ]; /* Output buffer */
char *buf_rp; /* Read pointer */
char *buf_wp; /* Write pointer */
char *buf_end; /* End pointer */
#ifdef __FreeBSD__ #ifdef __FreeBSD__
struct sigio *buf_sigio; /* Processes waiting for SIGIO */ struct sigio *buf_sigio; /* Processes waiting for SIGIO */
#elif defined(__NetBSD__) #elif defined(__NetBSD__)
pid_t buf_pgid; pid_t buf_pgid;
#endif #endif
struct selinfo buf_sel; /* Workspace for select/poll */
int buf_selecting;/* True if poll sleeper */
wait_queue_head_t buf_readers; /* Processes waiting to read */
wait_queue_head_t buf_writers; /* Processes waiting to ctx switch */
/* Sysctl support */ /* Sysctl support */
struct drm_sysctl_info *sysctl; struct drm_sysctl_info *sysctl;
@ -526,13 +384,9 @@ struct drm_device {
drm_sg_mem_t *sg; /* Scatter gather memory */ drm_sg_mem_t *sg; /* Scatter gather memory */
atomic_t *ctx_bitmap; atomic_t *ctx_bitmap;
void *dev_private; void *dev_private;
drm_sigdata_t sigdata; /* For block_all_signals */
sigset_t sigmask;
}; };
extern int DRM(flags); extern int DRM(flags);
extern void DRM(parse_options)( char *s );
extern int DRM(cpu_valid)( void );
/* Authentication (drm_auth.h) */ /* Authentication (drm_auth.h) */
extern int DRM(add_magic)(drm_device_t *dev, drm_file_t *priv, extern int DRM(add_magic)(drm_device_t *dev, drm_file_t *priv,
@ -541,7 +395,6 @@ extern int DRM(remove_magic)(drm_device_t *dev, drm_magic_t magic);
/* Driver support (drm_drv.h) */ /* Driver support (drm_drv.h) */
extern int DRM(version)( DRM_IOCTL_ARGS ); extern int DRM(version)( DRM_IOCTL_ARGS );
extern int DRM(write_string)(drm_device_t *dev, const char *s);
/* Memory management support (drm_memory.h) */ /* Memory management support (drm_memory.h) */
extern void DRM(mem_init)(void); extern void DRM(mem_init)(void);
@ -582,11 +435,6 @@ extern int DRM(lock_transfer)(drm_device_t *dev,
extern int DRM(lock_free)(drm_device_t *dev, extern int DRM(lock_free)(drm_device_t *dev,
__volatile__ unsigned int *lock, __volatile__ unsigned int *lock,
unsigned int context); unsigned int context);
extern int DRM(flush_unblock)(drm_device_t *dev, int context,
drm_lock_flags_t flags);
extern int DRM(flush_block_and_flush)(drm_device_t *dev, int context,
drm_lock_flags_t flags);
extern int DRM(notifier)(void *priv);
/* Buffer management support (drm_bufs.h) */ /* Buffer management support (drm_bufs.h) */
extern int DRM(order)( unsigned long size ); extern int DRM(order)( unsigned long size );
@ -596,16 +444,7 @@ extern int DRM(order)( unsigned long size );
extern int DRM(dma_setup)(drm_device_t *dev); extern int DRM(dma_setup)(drm_device_t *dev);
extern void DRM(dma_takedown)(drm_device_t *dev); extern void DRM(dma_takedown)(drm_device_t *dev);
extern void DRM(free_buffer)(drm_device_t *dev, drm_buf_t *buf); extern void DRM(free_buffer)(drm_device_t *dev, drm_buf_t *buf);
extern void DRM(reclaim_buffers)(drm_device_t *dev, pid_t pid); extern void DRM(reclaim_buffers)(drm_device_t *dev, DRMFILE filp);
#if __HAVE_OLD_DMA
/* GH: This is a dirty hack for now...
*/
extern void DRM(clear_next_buffer)(drm_device_t *dev);
extern int DRM(select_queue)(drm_device_t *dev,
void (*wrapper)(unsigned long));
extern int DRM(dma_enqueue)(drm_device_t *dev, drm_dma_t *dma);
extern int DRM(dma_get_buffers)(drm_device_t *dev, drm_dma_t *dma);
#endif
#if __HAVE_DMA_IRQ #if __HAVE_DMA_IRQ
extern int DRM(irq_install)( drm_device_t *dev, int irq ); extern int DRM(irq_install)( drm_device_t *dev, int irq );
extern int DRM(irq_uninstall)( drm_device_t *dev ); extern int DRM(irq_uninstall)( drm_device_t *dev );
@ -616,10 +455,6 @@ extern void DRM(driver_irq_uninstall)( drm_device_t *dev );
#if __HAVE_DMA_IRQ_BH #if __HAVE_DMA_IRQ_BH
extern void DRM(dma_immediate_bh)( DRM_TASKQUEUE_ARGS ); extern void DRM(dma_immediate_bh)( DRM_TASKQUEUE_ARGS );
#endif #endif
#endif
#if DRM_DMA_HISTOGRAM
extern int DRM(histogram_slot)(unsigned long count);
extern void DRM(histogram_compute)(drm_device_t *dev, drm_buf_t *buf);
#endif #endif
/* Buffer list support (drm_lists.h) */ /* Buffer list support (drm_lists.h) */
@ -629,13 +464,6 @@ extern int DRM(waitlist_destroy)(drm_waitlist_t *bl);
extern int DRM(waitlist_put)(drm_waitlist_t *bl, drm_buf_t *buf); extern int DRM(waitlist_put)(drm_waitlist_t *bl, drm_buf_t *buf);
extern drm_buf_t *DRM(waitlist_get)(drm_waitlist_t *bl); extern drm_buf_t *DRM(waitlist_get)(drm_waitlist_t *bl);
#endif #endif
#if __HAVE_DMA_FREELIST
extern int DRM(freelist_create)(drm_freelist_t *bl, int count);
extern int DRM(freelist_destroy)(drm_freelist_t *bl);
extern int DRM(freelist_put)(drm_device_t *dev, drm_freelist_t *bl,
drm_buf_t *buf);
extern drm_buf_t *DRM(freelist_get)(drm_freelist_t *bl, int block);
#endif
#endif /* __HAVE_DMA */ #endif /* __HAVE_DMA */
#if __HAVE_VBL_IRQ #if __HAVE_VBL_IRQ
extern int DRM(vblank_wait)(drm_device_t *dev, unsigned int *vbl_seq); extern int DRM(vblank_wait)(drm_device_t *dev, unsigned int *vbl_seq);
@ -668,5 +496,75 @@ extern int DRM(ati_pcigart_cleanup)(drm_device_t *dev,
dma_addr_t bus_addr); dma_addr_t bus_addr);
#endif #endif
/* Locking IOCTL support (drm_drv.h) */
extern int DRM(lock)(DRM_IOCTL_ARGS);
extern int DRM(unlock)(DRM_IOCTL_ARGS);
/* Misc. IOCTL support (drm_ioctl.h) */
extern int DRM(irq_busid)(DRM_IOCTL_ARGS);
extern int DRM(getunique)(DRM_IOCTL_ARGS);
extern int DRM(setunique)(DRM_IOCTL_ARGS);
extern int DRM(getmap)(DRM_IOCTL_ARGS);
extern int DRM(getclient)(DRM_IOCTL_ARGS);
extern int DRM(getstats)(DRM_IOCTL_ARGS);
extern int DRM(noop)(DRM_IOCTL_ARGS);
/* Context IOCTL support (drm_context.h) */
extern int DRM(resctx)(DRM_IOCTL_ARGS);
extern int DRM(addctx)(DRM_IOCTL_ARGS);
extern int DRM(modctx)(DRM_IOCTL_ARGS);
extern int DRM(getctx)(DRM_IOCTL_ARGS);
extern int DRM(switchctx)(DRM_IOCTL_ARGS);
extern int DRM(newctx)(DRM_IOCTL_ARGS);
extern int DRM(rmctx)(DRM_IOCTL_ARGS);
extern int DRM(setsareactx)(DRM_IOCTL_ARGS);
extern int DRM(getsareactx)(DRM_IOCTL_ARGS);
/* Drawable IOCTL support (drm_drawable.h) */
extern int DRM(adddraw)(DRM_IOCTL_ARGS);
extern int DRM(rmdraw)(DRM_IOCTL_ARGS);
/* Authentication IOCTL support (drm_auth.h) */
extern int DRM(getmagic)(DRM_IOCTL_ARGS);
extern int DRM(authmagic)(DRM_IOCTL_ARGS);
/* Buffer management support (drm_bufs.h) */
extern int DRM(addmap)(DRM_IOCTL_ARGS);
extern int DRM(rmmap)(DRM_IOCTL_ARGS);
#if __HAVE_DMA
extern int DRM(addbufs)(DRM_IOCTL_ARGS);
extern int DRM(infobufs)(DRM_IOCTL_ARGS);
extern int DRM(markbufs)(DRM_IOCTL_ARGS);
extern int DRM(freebufs)(DRM_IOCTL_ARGS);
extern int DRM(mapbufs)(DRM_IOCTL_ARGS);
#endif
/* DMA support (drm_dma.h) */
#if __HAVE_DMA
extern int DRM(control)(DRM_IOCTL_ARGS);
#endif
#if __HAVE_VBL_IRQ
extern int DRM(wait_vblank)(DRM_IOCTL_ARGS);
#endif
/* AGP/GART support (drm_agpsupport.h) */
#if __REALLY_HAVE_AGP
extern int DRM(agp_acquire)(DRM_IOCTL_ARGS);
extern int DRM(agp_release)(DRM_IOCTL_ARGS);
extern int DRM(agp_enable)(DRM_IOCTL_ARGS);
extern int DRM(agp_info)(DRM_IOCTL_ARGS);
extern int DRM(agp_alloc)(DRM_IOCTL_ARGS);
extern int DRM(agp_free)(DRM_IOCTL_ARGS);
extern int DRM(agp_unbind)(DRM_IOCTL_ARGS);
extern int DRM(agp_bind)(DRM_IOCTL_ARGS);
#endif
/* Scatter Gather Support (drm_scatter.h) */
#if __HAVE_SG
extern int DRM(sg_alloc)(DRM_IOCTL_ARGS);
extern int DRM(sg_free)(DRM_IOCTL_ARGS);
#endif
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _DRM_P_H_ */ #endif /* _DRM_P_H_ */

View File

@ -310,20 +310,14 @@ static void DRM(cleanup_buf_error)(drm_buf_entry_t *entry)
sizeof(*entry->buflist), sizeof(*entry->buflist),
DRM_MEM_BUFS); DRM_MEM_BUFS);
#if __HAVE_DMA_FREELIST
DRM(freelist_destroy)(&entry->freelist);
#endif
entry->buf_count = 0; entry->buf_count = 0;
} }
} }
#if __REALLY_HAVE_AGP #if __REALLY_HAVE_AGP
int DRM(addbufs_agp)( DRM_IOCTL_ARGS ) static int DRM(addbufs_agp)(drm_device_t *dev, drm_buf_desc_t *request)
{ {
DRM_DEVICE;
drm_device_dma_t *dma = dev->dma; drm_device_dma_t *dma = dev->dma;
drm_buf_desc_t request;
drm_buf_entry_t *entry; drm_buf_entry_t *entry;
drm_buf_t *buf; drm_buf_t *buf;
unsigned long offset; unsigned long offset;
@ -338,21 +332,17 @@ int DRM(addbufs_agp)( DRM_IOCTL_ARGS )
int i; int i;
drm_buf_t **temp_buflist; drm_buf_t **temp_buflist;
if ( !dma ) return DRM_ERR(EINVAL); count = request->count;
order = DRM(order)(request->size);
DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_desc_t *)data, sizeof(request) );
count = request.count;
order = DRM(order)( request.size );
size = 1 << order; size = 1 << order;
alignment = (request.flags & _DRM_PAGE_ALIGN) alignment = (request->flags & _DRM_PAGE_ALIGN)
? round_page(size) : size; ? round_page(size) : size;
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
total = PAGE_SIZE << page_order; total = PAGE_SIZE << page_order;
byte_count = 0; byte_count = 0;
agp_offset = dev->agp->base + request.agp_start; agp_offset = dev->agp->base + request->agp_start;
DRM_DEBUG( "count: %d\n", count ); DRM_DEBUG( "count: %d\n", count );
DRM_DEBUG( "order: %d\n", order ); DRM_DEBUG( "order: %d\n", order );
@ -364,36 +354,18 @@ int DRM(addbufs_agp)( DRM_IOCTL_ARGS )
if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER )
return DRM_ERR(EINVAL); return DRM_ERR(EINVAL);
if ( dev->queue_count )
return DRM_ERR(EBUSY); /* Not while in use */
DRM_SPINLOCK( &dev->count_lock );
if ( dev->buf_use ) {
DRM_SPINUNLOCK( &dev->count_lock );
return DRM_ERR(EBUSY);
}
atomic_inc( &dev->buf_alloc );
DRM_SPINUNLOCK( &dev->count_lock );
DRM_LOCK; DRM_LOCK;
entry = &dma->bufs[order]; entry = &dma->bufs[order];
if ( entry->buf_count ) { if ( entry->buf_count ) {
DRM_UNLOCK; DRM_UNLOCK;
atomic_dec( &dev->buf_alloc );
return DRM_ERR(ENOMEM); /* May only call once for each order */ return DRM_ERR(ENOMEM); /* May only call once for each order */
} }
if (count < 0 || count > 4096) {
DRM_UNLOCK;
atomic_dec( &dev->buf_alloc );
return DRM_ERR(EINVAL);
}
entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist), entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
DRM_MEM_BUFS ); DRM_MEM_BUFS );
if ( !entry->buflist ) { if ( !entry->buflist ) {
DRM_UNLOCK; DRM_UNLOCK;
atomic_dec( &dev->buf_alloc );
return DRM_ERR(ENOMEM); return DRM_ERR(ENOMEM);
} }
memset( entry->buflist, 0, count * sizeof(*entry->buflist) ); memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
@ -414,10 +386,8 @@ int DRM(addbufs_agp)( DRM_IOCTL_ARGS )
buf->bus_address = agp_offset + offset; buf->bus_address = agp_offset + offset;
buf->address = (void *)(agp_offset + offset); buf->address = (void *)(agp_offset + offset);
buf->next = NULL; buf->next = NULL;
buf->waiting = 0;
buf->pending = 0; buf->pending = 0;
buf->dma_wait = 0; buf->filp = NULL;
buf->pid = 0;
buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T); buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T), buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
@ -429,13 +399,6 @@ int DRM(addbufs_agp)( DRM_IOCTL_ARGS )
} }
memset( buf->dev_private, 0, buf->dev_priv_size ); memset( buf->dev_private, 0, buf->dev_priv_size );
#if __HAVE_DMA_HISTOGRAM
buf->time_queued = 0;
buf->time_dispatched = 0;
buf->time_completed = 0;
buf->time_freed = 0;
#endif
offset += alignment; offset += alignment;
entry->buf_count++; entry->buf_count++;
byte_count += PAGE_SIZE << page_order; byte_count += PAGE_SIZE << page_order;
@ -452,7 +415,6 @@ int DRM(addbufs_agp)( DRM_IOCTL_ARGS )
/* Free the entry because it isn't valid */ /* Free the entry because it isn't valid */
DRM(cleanup_buf_error)(entry); DRM(cleanup_buf_error)(entry);
DRM_UNLOCK; DRM_UNLOCK;
atomic_dec( &dev->buf_alloc );
return DRM_ERR(ENOMEM); return DRM_ERR(ENOMEM);
} }
dma->buflist = temp_buflist; dma->buflist = temp_buflist;
@ -467,32 +429,21 @@ int DRM(addbufs_agp)( DRM_IOCTL_ARGS )
DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count ); DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count ); DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
#if __HAVE_DMA_FREELIST
DRM(freelist_create)( &entry->freelist, entry->buf_count );
for ( i = 0 ; i < entry->buf_count ; i++ ) {
DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );
}
#endif
DRM_UNLOCK; DRM_UNLOCK;
request.count = entry->buf_count; request->count = entry->buf_count;
request.size = size; request->size = size;
DRM_COPY_TO_USER_IOCTL( (drm_buf_desc_t *)data, request, sizeof(request) );
dma->flags = _DRM_DMA_USE_AGP; dma->flags = _DRM_DMA_USE_AGP;
atomic_dec( &dev->buf_alloc );
return 0; return 0;
} }
#endif /* __REALLY_HAVE_AGP */ #endif /* __REALLY_HAVE_AGP */
#if __HAVE_PCI_DMA #if __HAVE_PCI_DMA
int DRM(addbufs_pci)( DRM_IOCTL_ARGS ) static int DRM(addbufs_pci)(drm_device_t *dev, drm_buf_desc_t *request)
{ {
DRM_DEVICE;
drm_device_dma_t *dma = dev->dma; drm_device_dma_t *dma = dev->dma;
drm_buf_desc_t request;
int count; int count;
int order; int order;
int size; int size;
@ -509,55 +460,32 @@ int DRM(addbufs_pci)( DRM_IOCTL_ARGS )
unsigned long *temp_pagelist; unsigned long *temp_pagelist;
drm_buf_t **temp_buflist; drm_buf_t **temp_buflist;
if ( !dma ) return DRM_ERR(EINVAL); count = request->count;
order = DRM(order)(request->size);
DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_desc_t *)data, sizeof(request) );
count = request.count;
order = DRM(order)( request.size );
size = 1 << order; size = 1 << order;
DRM_DEBUG( "count=%d, size=%d (%d), order=%d, queue_count=%d\n", DRM_DEBUG( "count=%d, size=%d (%d), order=%d\n",
request.count, request.size, size, request->count, request->size, size, order );
order, dev->queue_count );
if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER )
return DRM_ERR(EINVAL); return DRM_ERR(EINVAL);
if ( dev->queue_count )
return DRM_ERR(EBUSY); /* Not while in use */
alignment = (request.flags & _DRM_PAGE_ALIGN) alignment = (request->flags & _DRM_PAGE_ALIGN)
? round_page(size) : size; ? round_page(size) : size;
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
total = PAGE_SIZE << page_order; total = PAGE_SIZE << page_order;
DRM_SPINLOCK( &dev->count_lock );
if ( dev->buf_use ) {
DRM_SPINUNLOCK( &dev->count_lock );
return DRM_ERR(EBUSY);
}
atomic_inc( &dev->buf_alloc );
DRM_SPINUNLOCK( &dev->count_lock );
DRM_LOCK; DRM_LOCK;
entry = &dma->bufs[order]; entry = &dma->bufs[order];
if ( entry->buf_count ) { if ( entry->buf_count ) {
DRM_UNLOCK; DRM_UNLOCK;
atomic_dec( &dev->buf_alloc );
return DRM_ERR(ENOMEM); /* May only call once for each order */ return DRM_ERR(ENOMEM); /* May only call once for each order */
} }
if (count < 0 || count > 4096) {
DRM_UNLOCK;
atomic_dec( &dev->buf_alloc );
return DRM_ERR(EINVAL);
}
entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist), entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
DRM_MEM_BUFS ); DRM_MEM_BUFS );
if ( !entry->buflist ) { if ( !entry->buflist ) {
DRM_UNLOCK; DRM_UNLOCK;
atomic_dec( &dev->buf_alloc );
return DRM_ERR(ENOMEM); return DRM_ERR(ENOMEM);
} }
memset( entry->buflist, 0, count * sizeof(*entry->buflist) ); memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
@ -569,7 +497,6 @@ int DRM(addbufs_pci)( DRM_IOCTL_ARGS )
count * sizeof(*entry->buflist), count * sizeof(*entry->buflist),
DRM_MEM_BUFS ); DRM_MEM_BUFS );
DRM_UNLOCK; DRM_UNLOCK;
atomic_dec( &dev->buf_alloc );
return DRM_ERR(ENOMEM); return DRM_ERR(ENOMEM);
} }
memset( entry->seglist, 0, count * sizeof(*entry->seglist) ); memset( entry->seglist, 0, count * sizeof(*entry->seglist) );
@ -587,7 +514,6 @@ int DRM(addbufs_pci)( DRM_IOCTL_ARGS )
count * sizeof(*entry->seglist), count * sizeof(*entry->seglist),
DRM_MEM_SEGS ); DRM_MEM_SEGS );
DRM_UNLOCK; DRM_UNLOCK;
atomic_dec( &dev->buf_alloc );
return DRM_ERR(ENOMEM); return DRM_ERR(ENOMEM);
} }
@ -622,16 +548,8 @@ int DRM(addbufs_pci)( DRM_IOCTL_ARGS )
buf->offset = (dma->byte_count + byte_count + offset); buf->offset = (dma->byte_count + byte_count + offset);
buf->address = (void *)(page + offset); buf->address = (void *)(page + offset);
buf->next = NULL; buf->next = NULL;
buf->waiting = 0;
buf->pending = 0; buf->pending = 0;
buf->dma_wait = 0; buf->filp = NULL;
buf->pid = 0;
#if __HAVE_DMA_HISTOGRAM
buf->time_queued = 0;
buf->time_dispatched = 0;
buf->time_completed = 0;
buf->time_freed = 0;
#endif
DRM_DEBUG( "buffer %d @ %p\n", DRM_DEBUG( "buffer %d @ %p\n",
entry->buf_count, buf->address ); entry->buf_count, buf->address );
} }
@ -647,7 +565,6 @@ int DRM(addbufs_pci)( DRM_IOCTL_ARGS )
/* Free the entry because it isn't valid */ /* Free the entry because it isn't valid */
DRM(cleanup_buf_error)(entry); DRM(cleanup_buf_error)(entry);
DRM_UNLOCK; DRM_UNLOCK;
atomic_dec( &dev->buf_alloc );
return DRM_ERR(ENOMEM); return DRM_ERR(ENOMEM);
} }
dma->buflist = temp_buflist; dma->buflist = temp_buflist;
@ -661,31 +578,20 @@ int DRM(addbufs_pci)( DRM_IOCTL_ARGS )
dma->page_count += entry->seg_count << page_order; dma->page_count += entry->seg_count << page_order;
dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order); dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
#if __HAVE_DMA_FREELIST
DRM(freelist_create)( &entry->freelist, entry->buf_count );
for ( i = 0 ; i < entry->buf_count ; i++ ) {
DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );
}
#endif
DRM_UNLOCK; DRM_UNLOCK;
request.count = entry->buf_count; request->count = entry->buf_count;
request.size = size; request->size = size;
DRM_COPY_TO_USER_IOCTL( (drm_buf_desc_t *)data, request, sizeof(request) );
atomic_dec( &dev->buf_alloc );
return 0; return 0;
} }
#endif /* __HAVE_PCI_DMA */ #endif /* __HAVE_PCI_DMA */
#if __REALLY_HAVE_SG #if __REALLY_HAVE_SG
int DRM(addbufs_sg)( DRM_IOCTL_ARGS ) static int DRM(addbufs_sg)(drm_device_t *dev, drm_buf_desc_t *request)
{ {
DRM_DEVICE;
drm_device_dma_t *dma = dev->dma; drm_device_dma_t *dma = dev->dma;
drm_buf_desc_t request;
drm_buf_entry_t *entry; drm_buf_entry_t *entry;
drm_buf_t *buf; drm_buf_t *buf;
unsigned long offset; unsigned long offset;
@ -700,21 +606,17 @@ int DRM(addbufs_sg)( DRM_IOCTL_ARGS )
int i; int i;
drm_buf_t **temp_buflist; drm_buf_t **temp_buflist;
if ( !dma ) return DRM_ERR(EINVAL); count = request->count;
order = DRM(order)(request->size);
DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_desc_t *)data, sizeof(request) );
count = request.count;
order = DRM(order)( request.size );
size = 1 << order; size = 1 << order;
alignment = (request.flags & _DRM_PAGE_ALIGN) alignment = (request->flags & _DRM_PAGE_ALIGN)
? round_page(size) : size; ? round_page(size) : size;
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
total = PAGE_SIZE << page_order; total = PAGE_SIZE << page_order;
byte_count = 0; byte_count = 0;
agp_offset = request.agp_start; agp_offset = request->agp_start;
DRM_DEBUG( "count: %d\n", count ); DRM_DEBUG( "count: %d\n", count );
DRM_DEBUG( "order: %d\n", order ); DRM_DEBUG( "order: %d\n", order );
@ -726,35 +628,18 @@ int DRM(addbufs_sg)( DRM_IOCTL_ARGS )
if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER )
return DRM_ERR(EINVAL); return DRM_ERR(EINVAL);
if ( dev->queue_count ) return DRM_ERR(EBUSY); /* Not while in use */
DRM_SPINLOCK( &dev->count_lock );
if ( dev->buf_use ) {
DRM_SPINUNLOCK( &dev->count_lock );
return DRM_ERR(EBUSY);
}
atomic_inc( &dev->buf_alloc );
DRM_SPINUNLOCK( &dev->count_lock );
DRM_LOCK; DRM_LOCK;
entry = &dma->bufs[order]; entry = &dma->bufs[order];
if ( entry->buf_count ) { if ( entry->buf_count ) {
DRM_UNLOCK; DRM_UNLOCK;
atomic_dec( &dev->buf_alloc );
return DRM_ERR(ENOMEM); /* May only call once for each order */ return DRM_ERR(ENOMEM); /* May only call once for each order */
} }
if (count < 0 || count > 4096) {
DRM_UNLOCK;
atomic_dec( &dev->buf_alloc );
return DRM_ERR(EINVAL);
}
entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist), entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
DRM_MEM_BUFS ); DRM_MEM_BUFS );
if ( !entry->buflist ) { if ( !entry->buflist ) {
DRM_UNLOCK; DRM_UNLOCK;
atomic_dec( &dev->buf_alloc );
return DRM_ERR(ENOMEM); return DRM_ERR(ENOMEM);
} }
memset( entry->buflist, 0, count * sizeof(*entry->buflist) ); memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
@ -775,10 +660,8 @@ int DRM(addbufs_sg)( DRM_IOCTL_ARGS )
buf->bus_address = agp_offset + offset; buf->bus_address = agp_offset + offset;
buf->address = (void *)(agp_offset + offset + dev->sg->handle); buf->address = (void *)(agp_offset + offset + dev->sg->handle);
buf->next = NULL; buf->next = NULL;
buf->waiting = 0;
buf->pending = 0; buf->pending = 0;
buf->dma_wait = 0; buf->filp = NULL;
buf->pid = 0;
buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T); buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T), buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
@ -788,18 +671,11 @@ int DRM(addbufs_sg)( DRM_IOCTL_ARGS )
entry->buf_count = count; entry->buf_count = count;
DRM(cleanup_buf_error)(entry); DRM(cleanup_buf_error)(entry);
DRM_UNLOCK; DRM_UNLOCK;
atomic_dec( &dev->buf_alloc );
return DRM_ERR(ENOMEM); return DRM_ERR(ENOMEM);
} }
memset( buf->dev_private, 0, buf->dev_priv_size ); memset( buf->dev_private, 0, buf->dev_priv_size );
# if __HAVE_DMA_HISTOGRAM
buf->time_queued = 0;
buf->time_dispatched = 0;
buf->time_completed = 0;
buf->time_freed = 0;
# endif
DRM_DEBUG( "buffer %d @ %p\n", DRM_DEBUG( "buffer %d @ %p\n",
entry->buf_count, buf->address ); entry->buf_count, buf->address );
@ -819,7 +695,6 @@ int DRM(addbufs_sg)( DRM_IOCTL_ARGS )
/* Free the entry because it isn't valid */ /* Free the entry because it isn't valid */
DRM(cleanup_buf_error)(entry); DRM(cleanup_buf_error)(entry);
DRM_UNLOCK; DRM_UNLOCK;
atomic_dec( &dev->buf_alloc );
return DRM_ERR(ENOMEM); return DRM_ERR(ENOMEM);
} }
dma->buflist = temp_buflist; dma->buflist = temp_buflist;
@ -834,47 +709,65 @@ int DRM(addbufs_sg)( DRM_IOCTL_ARGS )
DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count ); DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count ); DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
#if __HAVE_DMA_FREELIST
DRM(freelist_create)( &entry->freelist, entry->buf_count );
for ( i = 0 ; i < entry->buf_count ; i++ ) {
DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );
}
#endif
DRM_UNLOCK; DRM_UNLOCK;
request.count = entry->buf_count; request->count = entry->buf_count;
request.size = size; request->size = size;
DRM_COPY_TO_USER_IOCTL( (drm_buf_desc_t *)data, request, sizeof(request) );
dma->flags = _DRM_DMA_USE_SG; dma->flags = _DRM_DMA_USE_SG;
atomic_dec( &dev->buf_alloc );
return 0; return 0;
} }
#endif /* __REALLY_HAVE_SG */ #endif /* __REALLY_HAVE_SG */
int DRM(addbufs)( DRM_IOCTL_ARGS ) int DRM(addbufs)( DRM_IOCTL_ARGS )
{ {
DRM_DEVICE;
drm_buf_desc_t request; drm_buf_desc_t request;
int err;
DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_desc_t *)data, sizeof(request) ); DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_desc_t *)data, sizeof(request) );
if (dev->dma == NULL)
return DRM_ERR(EINVAL);
if (request.count < 0 || request.count > 4096)
return DRM_ERR(EINVAL);
DRM_SPINLOCK(&dev->count_lock);
if (dev->buf_use) {
DRM_SPINUNLOCK(&dev->count_lock);
return DRM_ERR(EBUSY);
}
/* dev->buf_alloc acts as a lock to prevent infobufs/mapbufs from
* trying to read from the dma->bufs while buffers are being allocated */
dev->buf_alloc++;
DRM_SPINUNLOCK(&dev->count_lock);
#if __REALLY_HAVE_AGP #if __REALLY_HAVE_AGP
if ( request.flags & _DRM_AGP_BUFFER ) if ( request.flags & _DRM_AGP_BUFFER )
return DRM(addbufs_agp)( kdev, cmd, data, flags, p ); err = DRM(addbufs_agp)(dev, &request);
else else
#endif #endif
#if __REALLY_HAVE_SG #if __REALLY_HAVE_SG
if ( request.flags & _DRM_SG_BUFFER ) if ( request.flags & _DRM_SG_BUFFER )
return DRM(addbufs_sg)( kdev, cmd, data, flags, p ); err = DRM(addbufs_sg)(dev, &request);
else else
#endif #endif
#if __HAVE_PCI_DMA #if __HAVE_PCI_DMA
return DRM(addbufs_pci)( kdev, cmd, data, flags, p ); err = DRM(addbufs_pci)(dev, &request);
#else #else
return DRM_ERR(EINVAL); err = DRM_ERR(EINVAL);
#endif #endif
DRM_COPY_TO_USER_IOCTL((drm_buf_desc_t *)data, request, sizeof(request));
DRM_SPINLOCK(&dev->count_lock);
dev->buf_alloc--;
DRM_SPINUNLOCK(&dev->count_lock);
return err;
} }
int DRM(infobufs)( DRM_IOCTL_ARGS ) int DRM(infobufs)( DRM_IOCTL_ARGS )
@ -888,7 +781,7 @@ int DRM(infobufs)( DRM_IOCTL_ARGS )
if ( !dma ) return DRM_ERR(EINVAL); if ( !dma ) return DRM_ERR(EINVAL);
DRM_SPINLOCK( &dev->count_lock ); DRM_SPINLOCK( &dev->count_lock );
if ( atomic_read( &dev->buf_alloc ) ) { if (dev->buf_alloc != 0) {
DRM_SPINUNLOCK( &dev->count_lock ); DRM_SPINUNLOCK( &dev->count_lock );
return DRM_ERR(EBUSY); return DRM_ERR(EBUSY);
} }
@ -906,21 +799,15 @@ int DRM(infobufs)( DRM_IOCTL_ARGS )
if ( request.count >= count ) { if ( request.count >= count ) {
for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) { for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
if ( dma->bufs[i].buf_count ) { if ( dma->bufs[i].buf_count ) {
drm_buf_desc_t *to = &request.list[count]; drm_buf_desc_t from;
drm_buf_entry_t *from = &dma->bufs[i];
drm_freelist_t *list = &dma->bufs[i].freelist; from.count = dma->bufs[i].buf_count;
if ( DRM_COPY_TO_USER( &to->count, from.size = dma->bufs[i].buf_size;
&from->buf_count, from.low_mark = dma->bufs[i].freelist.low_mark;
sizeof(from->buf_count) ) || from.high_mark = dma->bufs[i].freelist.high_mark;
DRM_COPY_TO_USER( &to->size,
&from->buf_size, if (DRM_COPY_TO_USER(&request.list[count], &from,
sizeof(from->buf_size) ) || sizeof(drm_buf_desc_t)) != 0)
DRM_COPY_TO_USER( &to->low_mark,
&list->low_mark,
sizeof(list->low_mark) ) ||
DRM_COPY_TO_USER( &to->high_mark,
&list->high_mark,
sizeof(list->high_mark) ) )
return DRM_ERR(EFAULT); return DRM_ERR(EFAULT);
DRM_DEBUG( "%d %d %d %d %d\n", DRM_DEBUG( "%d %d %d %d %d\n",
@ -995,9 +882,9 @@ int DRM(freebufs)( DRM_IOCTL_ARGS )
return DRM_ERR(EINVAL); return DRM_ERR(EINVAL);
} }
buf = dma->buflist[idx]; buf = dma->buflist[idx];
if ( buf->pid != DRM_CURRENTPID ) { if ( buf->filp != filp ) {
DRM_ERROR( "Process %d freeing buffer owned by %d\n", DRM_ERROR("Process %d freeing buffer not owned\n",
DRM_CURRENTPID, buf->pid ); DRM_CURRENTPID);
return DRM_ERR(EINVAL); return DRM_ERR(EINVAL);
} }
DRM(free_buffer)( dev, buf ); DRM(free_buffer)( dev, buf );
@ -1031,7 +918,7 @@ int DRM(mapbufs)( DRM_IOCTL_ARGS )
if ( !dma ) return DRM_ERR(EINVAL); if ( !dma ) return DRM_ERR(EINVAL);
DRM_SPINLOCK( &dev->count_lock ); DRM_SPINLOCK( &dev->count_lock );
if ( atomic_read( &dev->buf_alloc ) ) { if (dev->buf_alloc != 0) {
DRM_SPINUNLOCK( &dev->count_lock ); DRM_SPINUNLOCK( &dev->count_lock );
return DRM_ERR(EBUSY); return DRM_ERR(EBUSY);
} }

View File

@ -32,7 +32,9 @@
#include "dev/drm/drmP.h" #include "dev/drm/drmP.h"
#if __HAVE_CTX_BITMAP #if !__HAVE_CTX_BITMAP
#error "__HAVE_CTX_BITMAP must be defined"
#endif
/* ================================================================ /* ================================================================
* Context bitmap support * Context bitmap support
@ -206,17 +208,11 @@ int DRM(setsareactx)( DRM_IOCTL_ARGS )
int DRM(context_switch)( drm_device_t *dev, int old, int new ) int DRM(context_switch)( drm_device_t *dev, int old, int new )
{ {
char buf[64];
if ( test_and_set_bit( 0, &dev->context_flag ) ) { if ( test_and_set_bit( 0, &dev->context_flag ) ) {
DRM_ERROR( "Reentering -- FIXME\n" ); DRM_ERROR( "Reentering -- FIXME\n" );
return DRM_ERR(EBUSY); return DRM_ERR(EBUSY);
} }
#if __HAVE_DMA_HISTOGRAM
dev->ctx_start = get_cycles();
#endif
DRM_DEBUG( "Context switch from %d to %d\n", old, new ); DRM_DEBUG( "Context switch from %d to %d\n", old, new );
if ( new == dev->last_context ) { if ( new == dev->last_context ) {
@ -224,20 +220,12 @@ int DRM(context_switch)( drm_device_t *dev, int old, int new )
return 0; return 0;
} }
if ( DRM(flags) & DRM_FLAG_NOCTX ) {
DRM(context_switch_complete)( dev, new );
} else {
sprintf( buf, "C %d %d\n", old, new );
DRM(write_string)( dev, buf );
}
return 0; return 0;
} }
int DRM(context_switch_complete)( drm_device_t *dev, int new ) int DRM(context_switch_complete)( drm_device_t *dev, int new )
{ {
dev->last_context = new; /* PRE/POST: This is the _only_ writer. */ dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
dev->last_switch = jiffies;
if ( !_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) ) { if ( !_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) ) {
DRM_ERROR( "Lock isn't held after context switch\n" ); DRM_ERROR( "Lock isn't held after context switch\n" );
@ -246,13 +234,7 @@ int DRM(context_switch_complete)( drm_device_t *dev, int new )
/* If a context switch is ever initiated /* If a context switch is ever initiated
when the kernel holds the lock, release when the kernel holds the lock, release
that lock here. */ that lock here. */
#if __HAVE_DMA_HISTOGRAM
atomic_inc( &dev->histo.ctx[DRM(histogram_slot)(get_cycles()
- dev->ctx_start)] );
#endif
clear_bit( 0, &dev->context_flag ); clear_bit( 0, &dev->context_flag );
DRM_WAKEUP( (void *)&dev->context_wait );
return 0; return 0;
} }
@ -363,364 +345,3 @@ int DRM(rmctx)( DRM_IOCTL_ARGS )
return 0; return 0;
} }
#else /* __HAVE_CTX_BITMAP */
/* ================================================================
* Old-style context support
*/
int DRM(context_switch)(drm_device_t *dev, int old, int new)
{
char buf[64];
drm_queue_t *q;
#if 0
atomic_inc(&dev->total_ctx);
#endif
if (test_and_set_bit(0, &dev->context_flag)) {
DRM_ERROR("Reentering -- FIXME\n");
return DRM_ERR(EBUSY);
}
#if __HAVE_DMA_HISTOGRAM
dev->ctx_start = get_cycles();
#endif
DRM_DEBUG("Context switch from %d to %d\n", old, new);
if (new >= dev->queue_count) {
clear_bit(0, &dev->context_flag);
return DRM_ERR(EINVAL);
}
if (new == dev->last_context) {
clear_bit(0, &dev->context_flag);
return 0;
}
q = dev->queuelist[new];
atomic_inc(&q->use_count);
if (atomic_read(&q->use_count) == 1) {
atomic_dec(&q->use_count);
clear_bit(0, &dev->context_flag);
return DRM_ERR(EINVAL);
}
if (DRM(flags) & DRM_FLAG_NOCTX) {
DRM(context_switch_complete)(dev, new);
} else {
sprintf(buf, "C %d %d\n", old, new);
DRM(write_string)(dev, buf);
}
atomic_dec(&q->use_count);
return 0;
}
int DRM(context_switch_complete)(drm_device_t *dev, int new)
{
drm_device_dma_t *dma = dev->dma;
dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
dev->last_switch = jiffies;
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
DRM_ERROR("Lock isn't held after context switch\n");
}
if (!dma || !(dma->next_buffer && dma->next_buffer->while_locked)) {
if (DRM(lock_free)(dev, &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
DRM_ERROR("Cannot free lock\n");
}
}
#if __HAVE_DMA_HISTOGRAM
atomic_inc(&dev->histo.ctx[DRM(histogram_slot)(get_cycles()
- dev->ctx_start)]);
#endif
clear_bit(0, &dev->context_flag);
DRM_WAKEUP_INT(&dev->context_wait);
return 0;
}
static int DRM(init_queue)(drm_device_t *dev, drm_queue_t *q, drm_ctx_t *ctx)
{
DRM_DEBUG("\n");
if (atomic_read(&q->use_count) != 1
|| atomic_read(&q->finalization)
|| atomic_read(&q->block_count)) {
DRM_ERROR("New queue is already in use: u%ld f%ld b%ld\n",
(unsigned long)atomic_read(&q->use_count),
(unsigned long)atomic_read(&q->finalization),
(unsigned long)atomic_read(&q->block_count));
}
atomic_set(&q->finalization, 0);
atomic_set(&q->block_count, 0);
atomic_set(&q->block_read, 0);
atomic_set(&q->block_write, 0);
atomic_set(&q->total_queued, 0);
atomic_set(&q->total_flushed, 0);
atomic_set(&q->total_locks, 0);
q->write_queue = 0;
q->read_queue = 0;
q->flush_queue = 0;
q->flags = ctx->flags;
DRM(waitlist_create)(&q->waitlist, dev->dma->buf_count);
return 0;
}
/* drm_alloc_queue:
PRE: 1) dev->queuelist[0..dev->queue_count] is allocated and will not
disappear (so all deallocation must be done after IOCTLs are off)
2) dev->queue_count < dev->queue_slots
3) dev->queuelist[i].use_count == 0 and
dev->queuelist[i].finalization == 0 if i not in use
POST: 1) dev->queuelist[i].use_count == 1
2) dev->queue_count < dev->queue_slots */
static int DRM(alloc_queue)(drm_device_t *dev)
{
int i;
drm_queue_t *queue;
int oldslots;
int newslots;
/* Check for a free queue */
for (i = 0; i < dev->queue_count; i++) {
atomic_inc(&dev->queuelist[i]->use_count);
if (atomic_read(&dev->queuelist[i]->use_count) == 1
&& !atomic_read(&dev->queuelist[i]->finalization)) {
DRM_DEBUG("%d (free)\n", i);
return i;
}
atomic_dec(&dev->queuelist[i]->use_count);
}
/* Allocate a new queue */
DRM_LOCK;
queue = gamma_alloc(sizeof(*queue), DRM_MEM_QUEUES);
memset(queue, 0, sizeof(*queue));
atomic_set(&queue->use_count, 1);
++dev->queue_count;
if (dev->queue_count >= dev->queue_slots) {
oldslots = dev->queue_slots * sizeof(*dev->queuelist);
if (!dev->queue_slots) dev->queue_slots = 1;
dev->queue_slots *= 2;
newslots = dev->queue_slots * sizeof(*dev->queuelist);
dev->queuelist = DRM(realloc)(dev->queuelist,
oldslots,
newslots,
DRM_MEM_QUEUES);
if (!dev->queuelist) {
DRM_UNLOCK;
DRM_DEBUG("out of memory\n");
return DRM_ERR(ENOMEM);
}
}
dev->queuelist[dev->queue_count-1] = queue;
DRM_UNLOCK;
DRM_DEBUG("%d (new)\n", dev->queue_count - 1);
return dev->queue_count - 1;
}
int DRM(resctx)( DRM_IOCTL_ARGS )
{
drm_ctx_res_t res;
drm_ctx_t ctx;
int i;
DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS);
DRM_COPY_FROM_USER_IOCTL( res, (drm_ctx_res_t *)data, sizeof(res) );
if (res.count >= DRM_RESERVED_CONTEXTS) {
memset(&ctx, 0, sizeof(ctx));
for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
ctx.handle = i;
if (DRM_COPY_TO_USER(&res.contexts[i],
&i,
sizeof(i)))
return DRM_ERR(EFAULT);
}
}
res.count = DRM_RESERVED_CONTEXTS;
DRM_COPY_TO_USER_IOCTL( (drm_ctx_res_t *)data, res, sizeof(res) );
return 0;
}
int DRM(addctx)( DRM_IOCTL_ARGS )
{
DRM_DEVICE;
drm_ctx_t ctx;
DRM_COPY_FROM_USER_IOCTL( ctx, (drm_ctx_t *)data, sizeof(ctx) );
if ((ctx.handle = DRM(alloc_queue)(dev)) == DRM_KERNEL_CONTEXT) {
/* Init kernel's context and get a new one. */
DRM(init_queue)(dev, dev->queuelist[ctx.handle], &ctx);
ctx.handle = DRM(alloc_queue)(dev);
}
DRM(init_queue)(dev, dev->queuelist[ctx.handle], &ctx);
DRM_DEBUG("%d\n", ctx.handle);
DRM_COPY_TO_USER_IOCTL( (drm_ctx_t *)data, ctx, sizeof(ctx) );
return 0;
}
int DRM(modctx)( DRM_IOCTL_ARGS )
{
DRM_DEVICE;
drm_ctx_t ctx;
drm_queue_t *q;
DRM_COPY_FROM_USER_IOCTL( ctx, (drm_ctx_t *)data, sizeof(ctx) );
DRM_DEBUG("%d\n", ctx.handle);
if (ctx.handle < 0 || ctx.handle >= dev->queue_count)
return DRM_ERR(EINVAL);
q = dev->queuelist[ctx.handle];
atomic_inc(&q->use_count);
if (atomic_read(&q->use_count) == 1) {
/* No longer in use */
atomic_dec(&q->use_count);
return DRM_ERR(EINVAL);
}
if (DRM_BUFCOUNT(&q->waitlist)) {
atomic_dec(&q->use_count);
return DRM_ERR(EBUSY);
}
q->flags = ctx.flags;
atomic_dec(&q->use_count);
return 0;
}
int DRM(getctx)( DRM_IOCTL_ARGS )
{
DRM_DEVICE;
drm_ctx_t ctx;
drm_queue_t *q;
DRM_COPY_FROM_USER_IOCTL( ctx, (drm_ctx_t *)data, sizeof(ctx) );
DRM_DEBUG("%d\n", ctx.handle);
if (ctx.handle >= dev->queue_count)
return DRM_ERR(EINVAL);
q = dev->queuelist[ctx.handle];
atomic_inc(&q->use_count);
if (atomic_read(&q->use_count) == 1) {
/* No longer in use */
atomic_dec(&q->use_count);
return DRM_ERR(EINVAL);
}
ctx.flags = q->flags;
atomic_dec(&q->use_count);
DRM_COPY_TO_USER_IOCTL( (drm_ctx_t *)data, ctx, sizeof(ctx) );
return 0;
}
int DRM(switchctx)( DRM_IOCTL_ARGS )
{
DRM_DEVICE;
drm_ctx_t ctx;
DRM_COPY_FROM_USER_IOCTL( ctx, (drm_ctx_t *)data, sizeof(ctx) );
DRM_DEBUG("%d\n", ctx.handle);
return DRM(context_switch)(dev, dev->last_context, ctx.handle);
}
int DRM(newctx)( DRM_IOCTL_ARGS )
{
DRM_DEVICE;
drm_ctx_t ctx;
DRM_COPY_FROM_USER_IOCTL( ctx, (drm_ctx_t *)data, sizeof(ctx) );
DRM_DEBUG("%d\n", ctx.handle);
DRM(context_switch_complete)(dev, ctx.handle);
return 0;
}
int DRM(rmctx)( DRM_IOCTL_ARGS )
{
DRM_DEVICE;
drm_ctx_t ctx;
drm_queue_t *q;
drm_buf_t *buf;
DRM_COPY_FROM_USER_IOCTL( ctx, (drm_ctx_t *)data, sizeof(ctx) );
DRM_DEBUG("%d\n", ctx.handle);
if (ctx.handle >= dev->queue_count) return DRM_ERR(EINVAL);
q = dev->queuelist[ctx.handle];
atomic_inc(&q->use_count);
if (atomic_read(&q->use_count) == 1) {
/* No longer in use */
atomic_dec(&q->use_count);
return DRM_ERR(EINVAL);
}
atomic_inc(&q->finalization); /* Mark queue in finalization state */
atomic_sub(2, &q->use_count); /* Mark queue as unused (pending
finalization) */
while (test_and_set_bit(0, &dev->interrupt_flag)) {
static int never;
int retcode;
retcode = tsleep(&never, PZERO|PCATCH, "never", 1);
if (retcode)
return retcode;
}
/* Remove queued buffers */
while ((buf = DRM(waitlist_get)(&q->waitlist))) {
DRM(free_buffer)(dev, buf);
}
clear_bit(0, &dev->interrupt_flag);
/* Wakeup blocked processes */
wakeup( &q->block_read );
wakeup( &q->block_write );
DRM_WAKEUP_INT( &q->flush_queue );
/* Finalization over. Queue is made
available when both use_count and
finalization become 0, which won't
happen until all the waiting processes
stop waiting. */
atomic_dec(&q->finalization);
return 0;
}
#endif /* __HAVE_CTX_BITMAP */

View File

@ -98,9 +98,6 @@ void DRM(dma_takedown)(drm_device_t *dev)
dma->bufs[i].buf_count * dma->bufs[i].buf_count *
sizeof(*dma->bufs[0].buflist), sizeof(*dma->bufs[0].buflist),
DRM_MEM_BUFS); DRM_MEM_BUFS);
#if __HAVE_DMA_FREELIST
DRM(freelist_destroy)(&dma->bufs[i].freelist);
#endif
} }
} }
@ -120,99 +117,24 @@ void DRM(dma_takedown)(drm_device_t *dev)
} }
#if __HAVE_DMA_HISTOGRAM
/* This is slow, but is useful for debugging. */
int DRM(histogram_slot)(unsigned long count)
{
int value = DRM_DMA_HISTOGRAM_INITIAL;
int slot;
for (slot = 0;
slot < DRM_DMA_HISTOGRAM_SLOTS;
++slot, value = DRM_DMA_HISTOGRAM_NEXT(value)) {
if (count < value) return slot;
}
return DRM_DMA_HISTOGRAM_SLOTS - 1;
}
void DRM(histogram_compute)(drm_device_t *dev, drm_buf_t *buf)
{
cycles_t queued_to_dispatched;
cycles_t dispatched_to_completed;
cycles_t completed_to_freed;
int q2d, d2c, c2f, q2c, q2f;
if (buf->time_queued) {
queued_to_dispatched = (buf->time_dispatched
- buf->time_queued);
dispatched_to_completed = (buf->time_completed
- buf->time_dispatched);
completed_to_freed = (buf->time_freed
- buf->time_completed);
q2d = DRM(histogram_slot)(queued_to_dispatched);
d2c = DRM(histogram_slot)(dispatched_to_completed);
c2f = DRM(histogram_slot)(completed_to_freed);
q2c = DRM(histogram_slot)(queued_to_dispatched
+ dispatched_to_completed);
q2f = DRM(histogram_slot)(queued_to_dispatched
+ dispatched_to_completed
+ completed_to_freed);
atomic_inc(&dev->histo.total);
atomic_inc(&dev->histo.queued_to_dispatched[q2d]);
atomic_inc(&dev->histo.dispatched_to_completed[d2c]);
atomic_inc(&dev->histo.completed_to_freed[c2f]);
atomic_inc(&dev->histo.queued_to_completed[q2c]);
atomic_inc(&dev->histo.queued_to_freed[q2f]);
}
buf->time_queued = 0;
buf->time_dispatched = 0;
buf->time_completed = 0;
buf->time_freed = 0;
}
#endif
void DRM(free_buffer)(drm_device_t *dev, drm_buf_t *buf) void DRM(free_buffer)(drm_device_t *dev, drm_buf_t *buf)
{ {
if (!buf) return; if (!buf) return;
buf->waiting = 0;
buf->pending = 0; buf->pending = 0;
buf->pid = 0; buf->filp = NULL;
buf->used = 0; buf->used = 0;
#if __HAVE_DMA_HISTOGRAM
buf->time_completed = get_cycles();
#endif
if ( buf->dma_wait ) {
wakeup( (void *)&buf->dma_wait );
buf->dma_wait = 0;
}
#if __HAVE_DMA_FREELIST
else {
drm_device_dma_t *dma = dev->dma;
/* If processes are waiting, the last one
to wake will put the buffer on the free
list. If no processes are waiting, we
put the buffer on the freelist here. */
DRM(freelist_put)(dev, &dma->bufs[buf->order].freelist, buf);
}
#endif
} }
#if !__HAVE_DMA_RECLAIM #if !__HAVE_DMA_RECLAIM
void DRM(reclaim_buffers)(drm_device_t *dev, pid_t pid) void DRM(reclaim_buffers)(drm_device_t *dev, DRMFILE filp)
{ {
drm_device_dma_t *dma = dev->dma; drm_device_dma_t *dma = dev->dma;
int i; int i;
if (!dma) return; if (!dma) return;
for (i = 0; i < dma->buf_count; i++) { for (i = 0; i < dma->buf_count; i++) {
if (dma->buflist[i]->pid == pid) { if (dma->buflist[i]->filp == filp) {
switch (dma->buflist[i]->list) { switch (dma->buflist[i]->list) {
case DRM_LIST_NONE: case DRM_LIST_NONE:
DRM(free_buffer)(dev, dma->buflist[i]); DRM(free_buffer)(dev, dma->buflist[i]);
@ -230,271 +152,6 @@ void DRM(reclaim_buffers)(drm_device_t *dev, pid_t pid)
#endif #endif
/* GH: This is a big hack for now...
*/
#if __HAVE_OLD_DMA
void DRM(clear_next_buffer)(drm_device_t *dev)
{
drm_device_dma_t *dma = dev->dma;
dma->next_buffer = NULL;
if (dma->next_queue && !DRM_BUFCOUNT(&dma->next_queue->waitlist)) {
DRM_WAKEUP_INT(&dma->next_queue->flush_queue);
}
dma->next_queue = NULL;
}
int DRM(select_queue)(drm_device_t *dev, void (*wrapper)(unsigned long))
{
int i;
int candidate = -1;
int j = jiffies;
if (!dev) {
DRM_ERROR("No device\n");
return -1;
}
if (!dev->queuelist || !dev->queuelist[DRM_KERNEL_CONTEXT]) {
/* This only happens between the time the
interrupt is initialized and the time
the queues are initialized. */
return -1;
}
/* Doing "while locked" DMA? */
if (DRM_WAITCOUNT(dev, DRM_KERNEL_CONTEXT)) {
return DRM_KERNEL_CONTEXT;
}
/* If there are buffers on the last_context
queue, and we have not been executing
this context very long, continue to
execute this context. */
if (dev->last_switch <= j
&& dev->last_switch + DRM_TIME_SLICE > j
&& DRM_WAITCOUNT(dev, dev->last_context)) {
return dev->last_context;
}
/* Otherwise, find a candidate */
for (i = dev->last_checked + 1; i < dev->queue_count; i++) {
if (DRM_WAITCOUNT(dev, i)) {
candidate = dev->last_checked = i;
break;
}
}
if (candidate < 0) {
for (i = 0; i < dev->queue_count; i++) {
if (DRM_WAITCOUNT(dev, i)) {
candidate = dev->last_checked = i;
break;
}
}
}
if (wrapper
&& candidate >= 0
&& candidate != dev->last_context
&& dev->last_switch <= j
&& dev->last_switch + DRM_TIME_SLICE > j) {
int s = splclock();
if (dev->timer.c_time != dev->last_switch + DRM_TIME_SLICE) {
callout_reset(&dev->timer,
dev->last_switch + DRM_TIME_SLICE - j,
(void (*)(void *))wrapper,
dev);
}
splx(s);
return -1;
}
return candidate;
}
int DRM(dma_enqueue)(drm_device_t *dev, drm_dma_t *d)
{
int i;
drm_queue_t *q;
drm_buf_t *buf;
int idx;
int while_locked = 0;
drm_device_dma_t *dma = dev->dma;
int error;
DRM_DEBUG("%d\n", d->send_count);
if (d->flags & _DRM_DMA_WHILE_LOCKED) {
int context = dev->lock.hw_lock->lock;
if (!_DRM_LOCK_IS_HELD(context)) {
DRM_ERROR("No lock held during \"while locked\""
" request\n");
return DRM_ERR(EINVAL);
}
if (d->context != _DRM_LOCKING_CONTEXT(context)
&& _DRM_LOCKING_CONTEXT(context) != DRM_KERNEL_CONTEXT) {
DRM_ERROR("Lock held by %d while %d makes"
" \"while locked\" request\n",
_DRM_LOCKING_CONTEXT(context),
d->context);
return DRM_ERR(EINVAL);
}
q = dev->queuelist[DRM_KERNEL_CONTEXT];
while_locked = 1;
} else {
q = dev->queuelist[d->context];
}
atomic_inc(&q->use_count);
if (atomic_read(&q->block_write)) {
atomic_inc(&q->block_count);
for (;;) {
if (!atomic_read(&q->block_write)) break;
error = tsleep(&q->block_write, PZERO|PCATCH,
"dmawr", 0);
if (error) {
atomic_dec(&q->use_count);
return error;
}
}
atomic_dec(&q->block_count);
}
for (i = 0; i < d->send_count; i++) {
idx = d->send_indices[i];
if (idx < 0 || idx >= dma->buf_count) {
atomic_dec(&q->use_count);
DRM_ERROR("Index %d (of %d max)\n",
d->send_indices[i], dma->buf_count - 1);
return DRM_ERR(EINVAL);
}
buf = dma->buflist[ idx ];
if (buf->pid != DRM_CURRENTPID) {
atomic_dec(&q->use_count);
DRM_ERROR("Process %d using buffer owned by %d\n",
DRM_CURRENTPID, buf->pid);
return DRM_ERR(EINVAL);
}
if (buf->list != DRM_LIST_NONE) {
atomic_dec(&q->use_count);
DRM_ERROR("Process %d using buffer %d on list %d\n",
DRM_CURRENTPID, buf->idx, buf->list);
}
buf->used = d->send_sizes[i];
buf->while_locked = while_locked;
buf->context = d->context;
if (!buf->used) {
DRM_ERROR("Queueing 0 length buffer\n");
}
if (buf->pending) {
atomic_dec(&q->use_count);
DRM_ERROR("Queueing pending buffer:"
" buffer %d, offset %d\n",
d->send_indices[i], i);
return DRM_ERR(EINVAL);
}
if (buf->waiting) {
atomic_dec(&q->use_count);
DRM_ERROR("Queueing waiting buffer:"
" buffer %d, offset %d\n",
d->send_indices[i], i);
return DRM_ERR(EINVAL);
}
buf->waiting = 1;
if (atomic_read(&q->use_count) == 1
|| atomic_read(&q->finalization)) {
DRM(free_buffer)(dev, buf);
} else {
DRM(waitlist_put)(&q->waitlist, buf);
atomic_inc(&q->total_queued);
}
}
atomic_dec(&q->use_count);
return 0;
}
static int DRM(dma_get_buffers_of_order)(drm_device_t *dev, drm_dma_t *d,
int order)
{
int i;
drm_buf_t *buf;
drm_device_dma_t *dma = dev->dma;
for (i = d->granted_count; i < d->request_count; i++) {
buf = DRM(freelist_get)(&dma->bufs[order].freelist,
d->flags & _DRM_DMA_WAIT);
if (!buf) break;
if (buf->pending || buf->waiting) {
DRM_ERROR("Free buffer %d in use by %d (w%d, p%d)\n",
buf->idx,
buf->pid,
buf->waiting,
buf->pending);
}
buf->pid = DRM_CURRENTPID;
if (DRM_COPY_TO_USER(&d->request_indices[i],
&buf->idx,
sizeof(buf->idx)))
return DRM_ERR(EFAULT);
if (DRM_COPY_TO_USER(&d->request_sizes[i],
&buf->total,
sizeof(buf->total)))
return DRM_ERR(EFAULT);
++d->granted_count;
}
return 0;
}
int DRM(dma_get_buffers)(drm_device_t *dev, drm_dma_t *dma)
{
int order;
int retcode = 0;
int tmp_order;
order = DRM(order)(dma->request_size);
dma->granted_count = 0;
retcode = DRM(dma_get_buffers_of_order)(dev, dma, order);
if (dma->granted_count < dma->request_count
&& (dma->flags & _DRM_DMA_SMALLER_OK)) {
for (tmp_order = order - 1;
!retcode
&& dma->granted_count < dma->request_count
&& tmp_order >= DRM_MIN_ORDER;
--tmp_order) {
retcode = DRM(dma_get_buffers_of_order)(dev, dma,
tmp_order);
}
}
if (dma->granted_count < dma->request_count
&& (dma->flags & _DRM_DMA_LARGER_OK)) {
for (tmp_order = order + 1;
!retcode
&& dma->granted_count < dma->request_count
&& tmp_order <= DRM_MAX_ORDER;
++tmp_order) {
retcode = DRM(dma_get_buffers_of_order)(dev, dma,
tmp_order);
}
}
return 0;
}
#endif /* __HAVE_OLD_DMA */
#if __HAVE_DMA_IRQ #if __HAVE_DMA_IRQ
int DRM(irq_install)( drm_device_t *dev, int irq ) int DRM(irq_install)( drm_device_t *dev, int irq )
@ -515,11 +172,8 @@ int DRM(irq_install)( drm_device_t *dev, int irq )
DRM_DEBUG( "%s: irq=%d\n", __FUNCTION__, irq ); DRM_DEBUG( "%s: irq=%d\n", __FUNCTION__, irq );
dev->context_flag = 0; dev->context_flag = 0;
dev->interrupt_flag = 0;
dev->dma_flag = 0;
dev->dma->next_buffer = NULL; dev->dma->next_buffer = NULL;
dev->dma->next_queue = NULL;
dev->dma->this_buffer = NULL; dev->dma->this_buffer = NULL;
#if __HAVE_DMA_IRQ_BH #if __HAVE_DMA_IRQ_BH

View File

@ -62,18 +62,9 @@
#ifndef __HAVE_DMA_QUEUE #ifndef __HAVE_DMA_QUEUE
#define __HAVE_DMA_QUEUE 0 #define __HAVE_DMA_QUEUE 0
#endif #endif
#ifndef __HAVE_MULTIPLE_DMA_QUEUES
#define __HAVE_MULTIPLE_DMA_QUEUES 0
#endif
#ifndef __HAVE_DMA_SCHEDULE #ifndef __HAVE_DMA_SCHEDULE
#define __HAVE_DMA_SCHEDULE 0 #define __HAVE_DMA_SCHEDULE 0
#endif #endif
#ifndef __HAVE_DMA_FLUSH
#define __HAVE_DMA_FLUSH 0
#endif
#ifndef __HAVE_DMA_READY
#define __HAVE_DMA_READY 0
#endif
#ifndef __HAVE_DMA_QUIESCENT #ifndef __HAVE_DMA_QUIESCENT
#define __HAVE_DMA_QUIESCENT 0 #define __HAVE_DMA_QUIESCENT 0
#endif #endif
@ -86,12 +77,6 @@
#ifndef __HAVE_SG #ifndef __HAVE_SG
#define __HAVE_SG 0 #define __HAVE_SG 0
#endif #endif
#ifndef __HAVE_KERNEL_CTX_SWITCH
#define __HAVE_KERNEL_CTX_SWITCH 0
#endif
#ifndef PCI_ANY_ID
#define PCI_ANY_ID ~0
#endif
#ifndef DRIVER_PREINIT #ifndef DRIVER_PREINIT
#define DRIVER_PREINIT() #define DRIVER_PREINIT()
@ -120,11 +105,10 @@
#ifndef DRIVER_FOPS #ifndef DRIVER_FOPS
#endif #endif
/* #if 1 && DRM_DEBUG_CODE
* The default number of instances (minor numbers) to initialize. int DRM(flags) = DRM_FLAG_DEBUG;
*/ #else
#ifndef DRIVER_NUM_CARDS int DRM(flags) = 0;
#define DRIVER_NUM_CARDS 1
#endif #endif
static int DRM(init)(device_t nbdev); static int DRM(init)(device_t nbdev);
@ -137,9 +121,6 @@ static void DRM(cleanup)(device_t nbdev);
#if __REALLY_HAVE_AGP #if __REALLY_HAVE_AGP
MODULE_DEPEND(DRIVER_NAME, agp, 1, 1, 1); MODULE_DEPEND(DRIVER_NAME, agp, 1, 1, 1);
#endif #endif
#if DRM_LINUX
MODULE_DEPEND(DRIVER_NAME, linux, 1, 1, 1);
#endif
#endif /* __FreeBSD__ */ #endif /* __FreeBSD__ */
#ifdef __NetBSD__ #ifdef __NetBSD__
@ -157,8 +138,8 @@ static drm_ioctl_desc_t DRM(ioctls)[] = {
[DRM_IOCTL_NR(DRM_IOCTL_GET_STATS)] = { DRM(getstats), 0, 0 }, [DRM_IOCTL_NR(DRM_IOCTL_GET_STATS)] = { DRM(getstats), 0, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { DRM(setunique), 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { DRM(setunique), 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { DRM(block), 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { DRM(noop), 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { DRM(unblock), 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { DRM(noop), 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { DRM(authmagic), 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { DRM(authmagic), 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { DRM(addmap), 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { DRM(addmap), 1, 1 },
@ -182,7 +163,7 @@ static drm_ioctl_desc_t DRM(ioctls)[] = {
[DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { DRM(lock), 1, 0 }, [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { DRM(lock), 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { DRM(unlock), 1, 0 }, [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { DRM(unlock), 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { DRM(finish), 1, 0 }, [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { DRM(noop), 1, 0 },
#if __HAVE_DMA #if __HAVE_DMA
[DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { DRM(addbufs), 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { DRM(addbufs), 1, 1 },
@ -228,7 +209,6 @@ static struct cdevsw DRM(cdevsw) = {
.d_open = DRM( open ), .d_open = DRM( open ),
.d_close = DRM( close ), .d_close = DRM( close ),
.d_read = DRM( read ), .d_read = DRM( read ),
.d_write = DRM( write ),
.d_ioctl = DRM( ioctl ), .d_ioctl = DRM( ioctl ),
.d_poll = DRM( poll ), .d_poll = DRM( poll ),
.d_mmap = DRM( mmap ), .d_mmap = DRM( mmap ),
@ -290,7 +270,7 @@ static struct cdevsw DRM(cdevsw) = {
DRM(open), DRM(open),
DRM(close), DRM(close),
DRM(read), DRM(read),
DRM(write), nowrite,
DRM(ioctl), DRM(ioctl),
nostop, nostop,
notty, notty,
@ -432,10 +412,8 @@ static int DRM(setup)( drm_device_t *dev )
int i; int i;
DRIVER_PRESETUP(); DRIVER_PRESETUP();
atomic_set( &dev->ioctl_count, 0 );
atomic_set( &dev->vma_count, 0 );
dev->buf_use = 0; dev->buf_use = 0;
atomic_set( &dev->buf_alloc, 0 ); dev->buf_alloc = 0;
#if __HAVE_DMA #if __HAVE_DMA
i = DRM(dma_setup)( dev ); i = DRM(dma_setup)( dev );
@ -494,52 +472,26 @@ static int DRM(setup)( drm_device_t *dev )
if(dev->maplist == NULL) return DRM_ERR(ENOMEM); if(dev->maplist == NULL) return DRM_ERR(ENOMEM);
memset(dev->maplist, 0, sizeof(*dev->maplist)); memset(dev->maplist, 0, sizeof(*dev->maplist));
TAILQ_INIT(dev->maplist); TAILQ_INIT(dev->maplist);
dev->map_count = 0;
dev->vmalist = NULL;
dev->lock.hw_lock = NULL; dev->lock.hw_lock = NULL;
dev->lock.lock_queue = 0; dev->lock.lock_queue = 0;
dev->queue_count = 0;
dev->queue_reserved = 0;
dev->queue_slots = 0;
dev->queuelist = NULL;
dev->irq = 0; dev->irq = 0;
dev->context_flag = 0; dev->context_flag = 0;
dev->interrupt_flag = 0;
dev->dma_flag = 0;
dev->last_context = 0; dev->last_context = 0;
dev->last_switch = 0;
dev->last_checked = 0;
#if __FreeBSD_version >= 500000 #if __FreeBSD_version >= 500000
callout_init( &dev->timer, 1 ); callout_init( &dev->timer, 1 );
#else #else
callout_init( &dev->timer ); callout_init( &dev->timer );
#endif #endif
dev->context_wait = 0;
dev->ctx_start = 0;
dev->lck_start = 0;
dev->buf_rp = dev->buf;
dev->buf_wp = dev->buf;
dev->buf_end = dev->buf + DRM_BSZ;
#ifdef __FreeBSD__ #ifdef __FreeBSD__
dev->buf_sigio = NULL; dev->buf_sigio = NULL;
#elif defined(__NetBSD__) #elif defined(__NetBSD__)
dev->buf_pgid = 0; dev->buf_pgid = 0;
#endif #endif
dev->buf_readers = 0;
dev->buf_writers = 0;
dev->buf_selecting = 0;
DRM_DEBUG( "\n" ); DRM_DEBUG( "\n" );
/* The kernel's context could be created here, but is now created
* in drm_dma_enqueue. This is more resource-efficient for
* hardware that does not do DMA, but may mean that
* drm_select_queue fails between the time the interrupt is
* initialized and the time the queues are initialized.
*/
DRIVER_POSTSETUP(); DRIVER_POSTSETUP();
return 0; return 0;
} }
@ -550,7 +502,6 @@ static int DRM(takedown)( drm_device_t *dev )
drm_magic_entry_t *pt, *next; drm_magic_entry_t *pt, *next;
drm_local_map_t *map; drm_local_map_t *map;
drm_map_list_entry_t *list; drm_map_list_entry_t *list;
drm_vma_entry_t *vma, *vma_next;
int i; int i;
DRM_DEBUG( "\n" ); DRM_DEBUG( "\n" );
@ -563,12 +514,6 @@ static int DRM(takedown)( drm_device_t *dev )
DRM_LOCK; DRM_LOCK;
callout_stop( &dev->timer ); callout_stop( &dev->timer );
if ( dev->devname ) {
DRM(free)( dev->devname, strlen( dev->devname ) + 1,
DRM_MEM_DRIVER );
dev->devname = NULL;
}
if ( dev->unique ) { if ( dev->unique ) {
DRM(free)( dev->unique, strlen( dev->unique ) + 1, DRM(free)( dev->unique, strlen( dev->unique ) + 1,
DRM_MEM_DRIVER ); DRM_MEM_DRIVER );
@ -607,15 +552,6 @@ static int DRM(takedown)( drm_device_t *dev )
} }
#endif #endif
/* Clear vma list (only built for debugging) */
if ( dev->vmalist ) {
for ( vma = dev->vmalist ; vma ; vma = vma_next ) {
vma_next = vma->next;
DRM(free)( vma, sizeof(*vma), DRM_MEM_VMAS );
}
dev->vmalist = NULL;
}
if( dev->maplist ) { if( dev->maplist ) {
while ((list=TAILQ_FIRST(dev->maplist))) { while ((list=TAILQ_FIRST(dev->maplist))) {
map = list->map; map = list->map;
@ -681,31 +617,12 @@ static int DRM(takedown)( drm_device_t *dev )
dev->maplist = NULL; dev->maplist = NULL;
} }
#if __HAVE_DMA_QUEUE || __HAVE_MULTIPLE_DMA_QUEUES
if ( dev->queuelist ) {
for ( i = 0 ; i < dev->queue_count ; i++ ) {
DRM(waitlist_destroy)( &dev->queuelist[i]->waitlist );
if ( dev->queuelist[i] ) {
DRM(free)( dev->queuelist[i],
sizeof(*dev->queuelist[0]),
DRM_MEM_QUEUES );
dev->queuelist[i] = NULL;
}
}
DRM(free)( dev->queuelist,
dev->queue_slots * sizeof(*dev->queuelist),
DRM_MEM_QUEUES );
dev->queuelist = NULL;
}
dev->queue_count = 0;
#endif
#if __HAVE_DMA #if __HAVE_DMA
DRM(dma_takedown)( dev ); DRM(dma_takedown)( dev );
#endif #endif
if ( dev->lock.hw_lock ) { if ( dev->lock.hw_lock ) {
dev->lock.hw_lock = NULL; /* SHM removed */ dev->lock.hw_lock = NULL; /* SHM removed */
dev->lock.pid = 0; dev->lock.filp = NULL;
DRM_WAKEUP_INT((void *)&dev->lock.lock_queue); DRM_WAKEUP_INT((void *)&dev->lock.lock_queue);
} }
DRM_UNLOCK; DRM_UNLOCK;
@ -929,7 +846,8 @@ int DRM(close)(dev_t kdev, int flags, int fmt, DRM_STRUCTPROC *p)
drm_file_t *priv; drm_file_t *priv;
DRM_DEVICE; DRM_DEVICE;
int retcode = 0; int retcode = 0;
DRMFILE __unused filp = (void *)(DRM_CURRENTPID);
DRM_DEBUG( "open_count = %d\n", dev->open_count ); DRM_DEBUG( "open_count = %d\n", dev->open_count );
priv = DRM(find_file_by_proc)(dev, p); priv = DRM(find_file_by_proc)(dev, p);
if (!priv) { if (!priv) {
@ -952,7 +870,7 @@ int DRM(close)(dev_t kdev, int flags, int fmt, DRM_STRUCTPROC *p)
#endif #endif
if (dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) if (dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)
&& dev->lock.pid == DRM_CURRENTPID) { && dev->lock.filp == (void *)DRM_CURRENTPID) {
DRM_DEBUG("Process %d dead, freeing lock for context %d\n", DRM_DEBUG("Process %d dead, freeing lock for context %d\n",
DRM_CURRENTPID, DRM_CURRENTPID,
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
@ -1002,7 +920,7 @@ int DRM(close)(dev_t kdev, int flags, int fmt, DRM_STRUCTPROC *p)
} }
} }
#elif __HAVE_DMA #elif __HAVE_DMA
DRM(reclaim_buffers)( dev, priv->pid ); DRM(reclaim_buffers)( dev, (void *)priv->pid );
#endif #endif
#if defined (__FreeBSD__) && (__FreeBSD_version >= 500000) #if defined (__FreeBSD__) && (__FreeBSD_version >= 500000)
@ -1035,13 +953,6 @@ int DRM(close)(dev_t kdev, int flags, int fmt, DRM_STRUCTPROC *p)
device_unbusy(dev->device); device_unbusy(dev->device);
#endif #endif
if ( !--dev->open_count ) { if ( !--dev->open_count ) {
if ( atomic_read( &dev->ioctl_count ) || dev->blocked ) {
DRM_ERROR( "Device busy: %ld %d\n",
(unsigned long)atomic_read( &dev->ioctl_count ),
dev->blocked );
DRM_SPINUNLOCK( &dev->count_lock );
return DRM_ERR(EBUSY);
}
DRM_SPINUNLOCK( &dev->count_lock ); DRM_SPINUNLOCK( &dev->count_lock );
return DRM(takedown)( dev ); return DRM(takedown)( dev );
} }
@ -1052,16 +963,16 @@ int DRM(close)(dev_t kdev, int flags, int fmt, DRM_STRUCTPROC *p)
/* DRM(ioctl) is called whenever a process performs an ioctl on /dev/drm. /* DRM(ioctl) is called whenever a process performs an ioctl on /dev/drm.
*/ */
int DRM(ioctl)( DRM_IOCTL_ARGS ) int DRM(ioctl)(dev_t kdev, u_long cmd, caddr_t data, int flags,
DRM_STRUCTPROC *p)
{ {
DRM_DEVICE; DRM_DEVICE;
int retcode = 0; int retcode = 0;
drm_ioctl_desc_t *ioctl; drm_ioctl_desc_t *ioctl;
d_ioctl_t *func; int (*func)(DRM_IOCTL_ARGS);
int nr = DRM_IOCTL_NR(cmd); int nr = DRM_IOCTL_NR(cmd);
DRM_PRIV; DRM_PRIV;
atomic_inc( &dev->ioctl_count );
atomic_inc( &dev->counts[_DRM_STAT_IOCTLS] ); atomic_inc( &dev->counts[_DRM_STAT_IOCTLS] );
++priv->ioctl_count; ++priv->ioctl_count;
@ -1075,21 +986,17 @@ int DRM(ioctl)( DRM_IOCTL_ARGS )
switch (cmd) { switch (cmd) {
case FIONBIO: case FIONBIO:
atomic_dec(&dev->ioctl_count);
return 0; return 0;
case FIOASYNC: case FIOASYNC:
atomic_dec(&dev->ioctl_count);
dev->flags |= FASYNC; dev->flags |= FASYNC;
return 0; return 0;
#ifdef __FreeBSD__ #ifdef __FreeBSD__
case FIOSETOWN: case FIOSETOWN:
atomic_dec(&dev->ioctl_count);
return fsetown(*(int *)data, &dev->buf_sigio); return fsetown(*(int *)data, &dev->buf_sigio);
case FIOGETOWN: case FIOGETOWN:
atomic_dec(&dev->ioctl_count);
#if (__FreeBSD_version >= 500000) #if (__FreeBSD_version >= 500000)
*(int *) data = fgetown(&dev->buf_sigio); *(int *) data = fgetown(&dev->buf_sigio);
#else #else
@ -1099,12 +1006,10 @@ int DRM(ioctl)( DRM_IOCTL_ARGS )
#endif /* __FreeBSD__ */ #endif /* __FreeBSD__ */
#ifdef __NetBSD__ #ifdef __NetBSD__
case TIOCSPGRP: case TIOCSPGRP:
atomic_dec(&dev->ioctl_count);
dev->buf_pgid = *(int *)data; dev->buf_pgid = *(int *)data;
return 0; return 0;
case TIOCGPGRP: case TIOCGPGRP:
atomic_dec(&dev->ioctl_count);
*(int *)data = dev->buf_pgid; *(int *)data = dev->buf_pgid;
return 0; return 0;
#endif /* __NetBSD__ */ #endif /* __NetBSD__ */
@ -1123,11 +1028,10 @@ int DRM(ioctl)( DRM_IOCTL_ARGS )
|| ( ioctl->auth_needed && !priv->authenticated ) ) { || ( ioctl->auth_needed && !priv->authenticated ) ) {
retcode = EACCES; retcode = EACCES;
} else { } else {
retcode = func( kdev, cmd, data, flags, p ); retcode = func(kdev, cmd, data, flags, p, (void *)DRM_CURRENTPID);
} }
} }
atomic_dec( &dev->ioctl_count );
return DRM_ERR(retcode); return DRM_ERR(retcode);
} }
@ -1136,14 +1040,6 @@ int DRM(lock)( DRM_IOCTL_ARGS )
DRM_DEVICE; DRM_DEVICE;
drm_lock_t lock; drm_lock_t lock;
int ret = 0; int ret = 0;
#if __HAVE_MULTIPLE_DMA_QUEUES
drm_queue_t *q;
#endif
#if __HAVE_DMA_HISTOGRAM
cycles_t start;
dev->lck_start = start = get_cycles();
#endif
DRM_COPY_FROM_USER_IOCTL( lock, (drm_lock_t *)data, sizeof(lock) ); DRM_COPY_FROM_USER_IOCTL( lock, (drm_lock_t *)data, sizeof(lock) );
@ -1160,15 +1056,8 @@ int DRM(lock)( DRM_IOCTL_ARGS )
#if __HAVE_DMA_QUEUE #if __HAVE_DMA_QUEUE
if ( lock.context < 0 ) if ( lock.context < 0 )
return DRM_ERR(EINVAL); return DRM_ERR(EINVAL);
#elif __HAVE_MULTIPLE_DMA_QUEUES
if ( lock.context < 0 || lock.context >= dev->queue_count )
return DRM_ERR(EINVAL);
q = dev->queuelist[lock.context];
#endif #endif
#if __HAVE_DMA_FLUSH
ret = DRM(flush_block_and_flush)( dev, lock.context, lock.flags );
#endif
if ( !ret ) { if ( !ret ) {
for (;;) { for (;;) {
if ( !dev->lock.hw_lock ) { if ( !dev->lock.hw_lock ) {
@ -1178,7 +1067,7 @@ int DRM(lock)( DRM_IOCTL_ARGS )
} }
if ( DRM(lock_take)( &dev->lock.hw_lock->lock, if ( DRM(lock_take)( &dev->lock.hw_lock->lock,
lock.context ) ) { lock.context ) ) {
dev->lock.pid = DRM_CURRENTPID; dev->lock.filp = (void *)DRM_CURRENTPID;
dev->lock.lock_time = jiffies; dev->lock.lock_time = jiffies;
atomic_inc( &dev->counts[_DRM_STAT_LOCKS] ); atomic_inc( &dev->counts[_DRM_STAT_LOCKS] );
break; /* Got lock */ break; /* Got lock */
@ -1194,36 +1083,18 @@ int DRM(lock)( DRM_IOCTL_ARGS )
} }
} }
#if __HAVE_DMA_FLUSH
DRM(flush_unblock)( dev, lock.context, lock.flags ); /* cleanup phase */
#endif
if ( !ret ) { if ( !ret ) {
/* FIXME: Add signal blocking here */
#if __HAVE_DMA_READY
if ( lock.flags & _DRM_LOCK_READY ) {
DRIVER_DMA_READY();
}
#endif
#if __HAVE_DMA_QUIESCENT #if __HAVE_DMA_QUIESCENT
if ( lock.flags & _DRM_LOCK_QUIESCENT ) { if ( lock.flags & _DRM_LOCK_QUIESCENT ) {
DRIVER_DMA_QUIESCENT(); DRIVER_DMA_QUIESCENT();
} }
#endif
#if __HAVE_KERNEL_CTX_SWITCH
if ( dev->last_context != lock.context ) {
DRM(context_switch)(dev, dev->last_context,
lock.context);
}
#endif #endif
} }
DRM_DEBUG( "%d %s\n", lock.context, ret ? "interrupted" : "has lock" ); DRM_DEBUG( "%d %s\n", lock.context, ret ? "interrupted" : "has lock" );
#if __HAVE_DMA_HISTOGRAM
atomic_inc(&dev->histo.lacq[DRM(histogram_slot)(get_cycles()-start)]);
#endif
return DRM_ERR(ret); return DRM_ERR(ret);
} }
@ -1243,25 +1114,6 @@ int DRM(unlock)( DRM_IOCTL_ARGS )
atomic_inc( &dev->counts[_DRM_STAT_UNLOCKS] ); atomic_inc( &dev->counts[_DRM_STAT_UNLOCKS] );
#if __HAVE_KERNEL_CTX_SWITCH
/* We no longer really hold it, but if we are the next
* agent to request it then we should just be able to
* take it immediately and not eat the ioctl.
*/
dev->lock.pid = 0;
{
__volatile__ unsigned int *plock = &dev->lock.hw_lock->lock;
unsigned int old, new, prev, ctx;
ctx = lock.context;
do {
old = *plock;
new = ctx;
prev = cmpxchg(plock, old, new);
} while (prev != old);
}
wake_up_interruptible(&dev->lock.lock_queue);
#else
DRM(lock_transfer)( dev, &dev->lock.hw_lock->lock, DRM(lock_transfer)( dev, &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT ); DRM_KERNEL_CONTEXT );
#if __HAVE_DMA_SCHEDULE #if __HAVE_DMA_SCHEDULE
@ -1276,89 +1128,46 @@ int DRM(unlock)( DRM_IOCTL_ARGS )
DRM_ERROR( "\n" ); DRM_ERROR( "\n" );
} }
} }
#endif /* !__HAVE_KERNEL_CTX_SWITCH */
return 0; return 0;
} }
#if DRM_LINUX #if DRM_LINUX
#include <sys/sysproto.h>
MODULE_DEPEND(DRIVER_NAME, linux, 1, 1, 1);
#define LINUX_IOCTL_DRM_MIN 0x6400 #define LINUX_IOCTL_DRM_MIN 0x6400
#define LINUX_IOCTL_DRM_MAX 0x64ff #define LINUX_IOCTL_DRM_MAX 0x64ff
static linux_ioctl_function_t DRM( linux_ioctl); static linux_ioctl_function_t DRM(linux_ioctl);
static struct linux_ioctl_handler DRM( handler) = {DRM( linux_ioctl), LINUX_IOCTL_DRM_MIN, LINUX_IOCTL_DRM_MAX}; static struct linux_ioctl_handler DRM(handler) = {DRM(linux_ioctl),
SYSINIT (DRM( register), SI_SUB_KLD, SI_ORDER_MIDDLE, linux_ioctl_register_handler, &DRM( handler)); LINUX_IOCTL_DRM_MIN, LINUX_IOCTL_DRM_MAX};
SYSUNINIT(DRM( unregister), SI_SUB_KLD, SI_ORDER_MIDDLE, linux_ioctl_unregister_handler, &DRM( handler));
#define LINUX_IOC_VOID IOC_VOID SYSINIT(DRM(register), SI_SUB_KLD, SI_ORDER_MIDDLE,
#define LINUX_IOC_IN IOC_OUT /* Linux has the values the other way around */ linux_ioctl_register_handler, &DRM(handler));
SYSUNINIT(DRM(unregister), SI_SUB_KLD, SI_ORDER_MIDDLE,
linux_ioctl_unregister_handler, &DRM(handler));
/* The bits for in/out are switched on Linux */
#define LINUX_IOC_IN IOC_OUT
#define LINUX_IOC_OUT IOC_IN #define LINUX_IOC_OUT IOC_IN
/*
* Linux emulation IOCTL
*/
static int static int
DRM(linux_ioctl)(DRM_STRUCTPROC *p, struct linux_ioctl_args* args) DRM(linux_ioctl)(DRM_STRUCTPROC *p, struct linux_ioctl_args* args)
{ {
u_long cmd = args->cmd; int error;
#define STK_PARAMS 128 int cmd = args->cmd;
union {
char stkbuf[STK_PARAMS];
long align;
} ubuf;
caddr_t data=NULL, memp=NULL;
u_int size = IOCPARM_LEN(cmd);
int error;
#if (__FreeBSD_version >= 500000)
struct file *fp;
#else
struct file *fp = p->p_fd->fd_ofiles[args->fd];
#endif
if ( size > STK_PARAMS ) {
if ( size > IOCPARM_MAX )
return EINVAL;
memp = malloc( (u_long)size, DRM(M_DRM), M_WAITOK );
data = memp;
} else {
data = ubuf.stkbuf;
}
if ( cmd & LINUX_IOC_IN ) { args->cmd &= ~(LINUX_IOC_IN | LINUX_IOC_OUT);
if ( size ) { if (cmd & LINUX_IOC_IN)
error = copyin( (caddr_t)args->arg, data, (u_int)size ); args->cmd |= IOC_IN;
if (error) { if (cmd & LINUX_IOC_OUT)
if ( memp ) args->cmd |= IOC_OUT;
free( data, DRM(M_DRM) );
return error; error = ioctl(p, (struct ioctl_args *)args);
}
} else {
data = (caddr_t)args->arg;
}
} else if ( (cmd & LINUX_IOC_OUT) && size ) {
/*
* Zero the buffer so the user always
* gets back something deterministic.
*/
bzero( data, size );
} else if ( cmd & LINUX_IOC_VOID ) {
*(caddr_t *)data = (caddr_t)args->arg;
}
#if (__FreeBSD_version >= 500000)
if ( (error = fget( p, args->fd, &fp )) != 0 ) {
if ( memp )
free( memp, DRM(M_DRM) );
return (error);
}
error = fo_ioctl( fp, cmd, data, p->td_ucred, p );
fdrop( fp, p );
#else
error = fo_ioctl( fp, cmd, data, p );
#endif
if ( error == 0 && (cmd & LINUX_IOC_OUT) && size )
error = copyout( data, (caddr_t)args->arg, (u_int)size );
if ( memp )
free( memp, DRM(M_DRM) );
return error; return error;
} }
#endif /* DRM_LINUX */ #endif /* DRM_LINUX */

View File

@ -62,8 +62,6 @@ int DRM(open_helper)(dev_t kdev, int flags, int fmt, DRM_STRUCTPROC *p,
if (flags & O_EXCL) if (flags & O_EXCL)
return EBUSY; /* No exclusive opens */ return EBUSY; /* No exclusive opens */
dev->flags = flags; dev->flags = flags;
if (!DRM(cpu_valid)())
return DRM_ERR(EINVAL);
DRM_DEBUG("pid = %d, minor = %d\n", DRM_CURRENTPID, m); DRM_DEBUG("pid = %d, minor = %d\n", DRM_CURRENTPID, m);
@ -98,148 +96,15 @@ int DRM(open_helper)(dev_t kdev, int flags, int fmt, DRM_STRUCTPROC *p,
} }
/* The drm_read and drm_write_string code (especially that which manages /* The DRM(read) and DRM(poll) are stubs to prevent spurious errors
the circular buffer), is based on Alessandro Rubini's LINUX DEVICE * on older X Servers (4.3.0 and earlier) */
DRIVERS (Cambridge: O'Reilly, 1998), pages 111-113. */
int DRM(read)(dev_t kdev, struct uio *uio, int ioflag) int DRM(read)(dev_t kdev, struct uio *uio, int ioflag)
{ {
DRM_DEVICE;
int left;
int avail;
int send;
int cur;
int error = 0;
DRM_DEBUG("%p, %p\n", dev->buf_rp, dev->buf_wp);
while (dev->buf_rp == dev->buf_wp) {
DRM_DEBUG(" sleeping\n");
if (dev->flags & FASYNC)
return EWOULDBLOCK;
error = tsleep(&dev->buf_rp, PZERO|PCATCH, "drmrd", 0);
if (error) {
DRM_DEBUG(" interrupted\n");
return error;
}
DRM_DEBUG(" awake\n");
}
left = (dev->buf_rp + DRM_BSZ - dev->buf_wp) % DRM_BSZ;
avail = DRM_BSZ - left;
send = DRM_MIN(avail, uio->uio_resid);
while (send) {
if (dev->buf_wp > dev->buf_rp) {
cur = DRM_MIN(send, dev->buf_wp - dev->buf_rp);
} else {
cur = DRM_MIN(send, dev->buf_end - dev->buf_rp);
}
error = uiomove(dev->buf_rp, cur, uio);
if (error)
break;
dev->buf_rp += cur;
if (dev->buf_rp == dev->buf_end) dev->buf_rp = dev->buf;
send -= cur;
}
wakeup(&dev->buf_wp);
return error;
}
int DRM(write_string)(drm_device_t *dev, const char *s)
{
int left = (dev->buf_rp + DRM_BSZ - dev->buf_wp) % DRM_BSZ;
int send = strlen(s);
int count;
#ifdef __NetBSD__
struct proc *p;
#endif /* __NetBSD__ */
DRM_DEBUG("%d left, %d to send (%p, %p)\n",
left, send, dev->buf_rp, dev->buf_wp);
if (left == 1 || dev->buf_wp != dev->buf_rp) {
DRM_ERROR("Buffer not empty (%d left, wp = %p, rp = %p)\n",
left,
dev->buf_wp,
dev->buf_rp);
}
while (send) {
if (dev->buf_wp >= dev->buf_rp) {
count = DRM_MIN(send, dev->buf_end - dev->buf_wp);
if (count == left) --count; /* Leave a hole */
} else {
count = DRM_MIN(send, dev->buf_rp - dev->buf_wp - 1);
}
strncpy(dev->buf_wp, s, count);
dev->buf_wp += count;
if (dev->buf_wp == dev->buf_end) dev->buf_wp = dev->buf;
send -= count;
}
if (dev->buf_selecting) {
dev->buf_selecting = 0;
selwakeup(&dev->buf_sel);
}
#ifdef __FreeBSD__
DRM_DEBUG("dev->buf_sigio=%p\n", dev->buf_sigio);
if (dev->buf_sigio) {
DRM_DEBUG("dev->buf_sigio->sio_pgid=%d\n", dev->buf_sigio->sio_pgid);
#if __FreeBSD_version >= 500000
pgsigio(&dev->buf_sigio, SIGIO, 0);
#else
pgsigio(dev->buf_sigio, SIGIO, 0);
#endif /* __FreeBSD_version */
}
#endif /* __FreeBSD__ */
#ifdef __NetBSD__
if (dev->buf_pgid) {
DRM_DEBUG("dev->buf_pgid=%d\n", dev->buf_pgid);
if(dev->buf_pgid > 0)
gsignal(dev->buf_pgid, SIGIO);
else if(dev->buf_pgid && (p = pfind(-dev->buf_pgid)) != NULL)
psignal(p, SIGIO);
}
#endif /* __NetBSD__ */
DRM_DEBUG("waking\n");
wakeup(&dev->buf_rp);
return 0; return 0;
} }
int DRM(poll)(dev_t kdev, int events, DRM_STRUCTPROC *p) int DRM(poll)(dev_t kdev, int events, DRM_STRUCTPROC *p)
{ {
DRM_DEVICE;
int s;
int revents = 0;
s = spldrm();
if (events & (POLLIN | POLLRDNORM)) {
int left = (dev->buf_rp + DRM_BSZ - dev->buf_wp) % DRM_BSZ;
if (left > 0)
revents |= events & (POLLIN | POLLRDNORM);
else
selrecord(p, &dev->buf_sel);
}
splx(s);
return revents;
}
int DRM(write)(dev_t kdev, struct uio *uio, int ioflag)
{
#if DRM_DEBUG_CODE
DRM_DEVICE;
#endif
#ifdef __FreeBSD__
DRM_DEBUG("pid = %d, device = %p, open_count = %d\n",
curproc->p_pid, dev->device, dev->open_count);
#elif defined(__NetBSD__)
DRM_DEBUG("pid = %d, device = %p, open_count = %d\n",
curproc->p_pid, &dev->device, dev->open_count);
#endif
return 0; return 0;
} }

View File

@ -1,111 +0,0 @@
/* drm_init.h -- Setup/Cleanup for DRM -*- linux-c -*-
* Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Gareth Hughes <gareth@valinux.com>
*
* $FreeBSD$
*/
#include "dev/drm/drmP.h"
#if 1 && DRM_DEBUG_CODE
int DRM(flags) = DRM_FLAG_DEBUG;
#else
int DRM(flags) = 0;
#endif
/* drm_parse_option parses a single option. See description for
* drm_parse_options for details.
*/
static void DRM(parse_option)(char *s)
{
char *c, *r;
DRM_DEBUG("\"%s\"\n", s);
if (!s || !*s) return;
for (c = s; *c && *c != ':'; c++); /* find : or \0 */
if (*c) r = c + 1; else r = NULL; /* remember remainder */
*c = '\0'; /* terminate */
if (!strcmp(s, "noctx")) {
DRM(flags) |= DRM_FLAG_NOCTX;
DRM_INFO("Server-mediated context switching OFF\n");
return;
}
if (!strcmp(s, "debug")) {
DRM(flags) |= DRM_FLAG_DEBUG;
DRM_INFO("Debug messages ON\n");
return;
}
DRM_ERROR("\"%s\" is not a valid option\n", s);
return;
}
/* drm_parse_options parse the insmod "drm_opts=" options, or the command-line
* options passed to the kernel via LILO. The grammar of the format is as
* follows:
*
* drm ::= 'drm_opts=' option_list
* option_list ::= option [ ';' option_list ]
* option ::= 'device:' major
* | 'debug'
* | 'noctx'
* major ::= INTEGER
*
* Note that 's' contains option_list without the 'drm_opts=' part.
*
* device=major,minor specifies the device number used for /dev/drm
* if major == 0 then the misc device is used
* if major == 0 and minor == 0 then dynamic misc allocation is used
* debug=on specifies that debugging messages will be printk'd
* debug=trace specifies that each function call will be logged via printk
* debug=off turns off all debugging options
*
*/
void DRM(parse_options)(char *s)
{
char *h, *t, *n;
DRM_DEBUG("\"%s\"\n", s ?: "");
if (!s || !*s) return;
for (h = t = n = s; h && *h; h = n) {
for (; *t && *t != ';'; t++); /* find ; or \0 */
if (*t) n = t + 1; else n = NULL; /* remember next */
*t = '\0'; /* terminate */
DRM(parse_option)(h); /* parse */
}
}
/* drm_cpu_valid returns non-zero if the DRI will run on this CPU, and 0
* otherwise.
*/
int DRM(cpu_valid)(void)
{
return 1;
}

View File

@ -121,15 +121,6 @@ int DRM(setunique)( DRM_IOCTL_ARGS )
dev->unique[dev->unique_len] = '\0'; dev->unique[dev->unique_len] = '\0';
dev->devname = DRM(alloc)(strlen(dev->name) + strlen(dev->unique) + 2,
DRM_MEM_DRIVER);
if(!dev->devname) {
DRM(free)(dev->devname, sizeof(*dev->devname), DRM_MEM_DRIVER);
return DRM_ERR(ENOMEM);
}
sprintf(dev->devname, "%s@%s", dev->name, dev->unique);
return 0; return 0;
} }
@ -148,7 +139,7 @@ int DRM(getmap)( DRM_IOCTL_ARGS )
idx = map.offset; idx = map.offset;
DRM_LOCK; DRM_LOCK;
if (idx < 0 || idx >= dev->map_count) { if (idx < 0) {
DRM_UNLOCK; DRM_UNLOCK;
return DRM_ERR(EINVAL); return DRM_ERR(EINVAL);
} }
@ -239,3 +230,9 @@ int DRM(getstats)( DRM_IOCTL_ARGS )
return 0; return 0;
} }
int DRM(noop)(DRM_IOCTL_ARGS)
{
DRM_DEBUG("\n");
return 0;
}

View File

@ -1,242 +0,0 @@
/* drm_lists.h -- Buffer list handling routines -*- linux-c -*-
* Created: Mon Apr 19 20:54:22 1999 by faith@valinux.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Gareth Hughes <gareth@valinux.com>
*
* $FreeBSD$
*/
#include "dev/drm/drmP.h"
#if __HAVE_DMA_WAITLIST
int DRM(waitlist_create)(drm_waitlist_t *bl, int count)
{
if (bl->count)
return DRM_ERR( EINVAL );
bl->bufs = DRM(alloc)((bl->count + 2) * sizeof(*bl->bufs),
DRM_MEM_BUFLISTS);
if(!bl->bufs) return DRM_ERR(ENOMEM);
bzero(bl->bufs, sizeof(*bl->bufs));
bl->count = count;
bl->rp = bl->bufs;
bl->wp = bl->bufs;
bl->end = &bl->bufs[bl->count+1];
DRM_SPININIT( bl->write_lock, "writelock" );
DRM_SPININIT( bl->read_lock, "readlock" );
return 0;
}
int DRM(waitlist_destroy)(drm_waitlist_t *bl)
{
if (bl->rp != bl->wp)
return DRM_ERR( EINVAL );
if (bl->bufs) DRM(free)(bl->bufs,
(bl->count + 2) * sizeof(*bl->bufs),
DRM_MEM_BUFLISTS);
bl->count = 0;
bl->bufs = NULL;
bl->rp = NULL;
bl->wp = NULL;
bl->end = NULL;
DRM_SPINUNINIT( bl->write_lock );
DRM_SPINUNINIT( bl->read_lock );
return 0;
}
int DRM(waitlist_put)(drm_waitlist_t *bl, drm_buf_t *buf)
{
int left;
int s;
left = DRM_LEFTCOUNT(bl);
if (!left) {
DRM_ERROR("Overflow while adding buffer %d from pid %d\n",
buf->idx, buf->pid);
return DRM_ERR( EINVAL );
}
#if __HAVE_DMA_HISTOGRAM
getnanotime(&buf->time_queued);
#endif
buf->list = DRM_LIST_WAIT;
DRM_SPINLOCK(&bl->write_lock);
s = spldrm();
*bl->wp = buf;
if (++bl->wp >= bl->end) bl->wp = bl->bufs;
splx(s);
DRM_SPINUNLOCK(&bl->write_lock);
return 0;
}
drm_buf_t *DRM(waitlist_get)(drm_waitlist_t *bl)
{
drm_buf_t *buf;
int s;
DRM_SPINLOCK(&bl->read_lock);
s = spldrm();
buf = *bl->rp;
if (bl->rp == bl->wp) {
splx(s);
DRM_SPINUNLOCK(&bl->read_lock);
return NULL;
}
if (++bl->rp >= bl->end) bl->rp = bl->bufs;
splx(s);
DRM_SPINUNLOCK(&bl->read_lock);
return buf;
}
#endif /* __HAVE_DMA_WAITLIST */
#if __HAVE_DMA_FREELIST
int DRM(freelist_create)(drm_freelist_t *bl, int count)
{
atomic_set(&bl->count, 0);
bl->next = NULL;
bl->waiting = 0;
bl->low_mark = 0;
bl->high_mark = 0;
atomic_set(&bl->wfh, 0);
DRM_SPININIT( bl->lock, "freelistlock" );
++bl->initialized;
return 0;
}
int DRM(freelist_destroy)(drm_freelist_t *bl)
{
atomic_set(&bl->count, 0);
bl->next = NULL;
DRM_SPINUNINIT( bl->lock );
return 0;
}
int DRM(freelist_put)(drm_device_t *dev, drm_freelist_t *bl, drm_buf_t *buf)
{
drm_device_dma_t *dma = dev->dma;
if (!dma) {
DRM_ERROR("No DMA support\n");
return 1;
}
if (buf->waiting || buf->pending || buf->list == DRM_LIST_FREE) {
DRM_ERROR("Freed buffer %d: w%d, p%d, l%d\n",
buf->idx, buf->waiting, buf->pending, buf->list);
}
if (!bl) return 1;
#if __HAVE_DMA_HISTOGRAM
getnanotime(&buf->time_queued);
DRM(histogram_compute)(dev, buf);
#endif
buf->list = DRM_LIST_FREE;
DRM_SPINLOCK( &bl->lock );
buf->next = bl->next;
bl->next = buf;
DRM_SPINUNLOCK( &bl->lock );
atomic_inc(&bl->count);
if (atomic_read(&bl->count) > dma->buf_count) {
DRM_ERROR("%ld of %d buffers free after addition of %d\n",
(unsigned long)atomic_read(&bl->count),
dma->buf_count, buf->idx);
return 1;
}
/* Check for high water mark */
if (atomic_read(&bl->wfh) && atomic_read(&bl->count)>=bl->high_mark) {
atomic_set(&bl->wfh, 0);
DRM_WAKEUP_INT((void *)&bl->waiting);
}
return 0;
}
static drm_buf_t *DRM(freelist_try)(drm_freelist_t *bl)
{
drm_buf_t *buf;
if (!bl) return NULL;
/* Get buffer */
DRM_SPINLOCK(&bl->lock);
if (!bl->next) {
DRM_SPINUNLOCK(&bl->lock);
return NULL;
}
buf = bl->next;
bl->next = bl->next->next;
DRM_SPINUNLOCK(&bl->lock);
atomic_dec(&bl->count);
buf->next = NULL;
buf->list = DRM_LIST_NONE;
if (buf->waiting || buf->pending) {
DRM_ERROR("Free buffer %d: w%d, p%d, l%d\n",
buf->idx, buf->waiting, buf->pending, buf->list);
}
return buf;
}
drm_buf_t *DRM(freelist_get)(drm_freelist_t *bl, int block)
{
drm_buf_t *buf = NULL;
int error;
if (!bl || !bl->initialized) return NULL;
/* Check for low water mark */
if (atomic_read(&bl->count) <= bl->low_mark) /* Became low */
atomic_set(&bl->wfh, 1);
if (atomic_read(&bl->wfh)) {
if (block) {
for (;;) {
if (!atomic_read(&bl->wfh)
&& (buf = DRM(freelist_try(bl)))) break;
error = tsleep((void *)&bl->waiting, PZERO|PCATCH,
"drmfg", 0);
if (error)
break;
}
}
return buf;
}
return DRM(freelist_try)(bl);
}
#endif /* __HAVE_DMA_FREELIST */

View File

@ -33,18 +33,6 @@
#include "dev/drm/drmP.h" #include "dev/drm/drmP.h"
int DRM(block)( DRM_IOCTL_ARGS )
{
DRM_DEBUG("\n");
return 0;
}
int DRM(unblock)( DRM_IOCTL_ARGS )
{
DRM_DEBUG("\n");
return 0;
}
int DRM(lock_take)(__volatile__ unsigned int *lock, unsigned int context) int DRM(lock_take)(__volatile__ unsigned int *lock, unsigned int context)
{ {
unsigned int old, new; unsigned int old, new;
@ -78,7 +66,7 @@ int DRM(lock_transfer)(drm_device_t *dev,
{ {
unsigned int old, new; unsigned int old, new;
dev->lock.pid = 0; dev->lock.filp = NULL;
do { do {
old = *lock; old = *lock;
new = context | _DRM_LOCK_HELD; new = context | _DRM_LOCK_HELD;
@ -91,149 +79,19 @@ int DRM(lock_free)(drm_device_t *dev,
__volatile__ unsigned int *lock, unsigned int context) __volatile__ unsigned int *lock, unsigned int context)
{ {
unsigned int old, new; unsigned int old, new;
pid_t pid = dev->lock.pid;
dev->lock.pid = 0; dev->lock.filp = NULL;
do { do {
old = *lock; old = *lock;
new = 0; new = 0;
} while (!atomic_cmpset_int(lock, old, new)); } while (!atomic_cmpset_int(lock, old, new));
if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) { if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
DRM_ERROR("%d freed heavyweight lock held by %d (pid %d)\n", DRM_ERROR("%d freed heavyweight lock held by %d\n",
context, context, _DRM_LOCKING_CONTEXT(old));
_DRM_LOCKING_CONTEXT(old),
pid);
return 1; return 1;
} }
DRM_WAKEUP_INT((void *)&dev->lock.lock_queue); DRM_WAKEUP_INT((void *)&dev->lock.lock_queue);
return 0; return 0;
} }
static int DRM(flush_queue)(drm_device_t *dev, int context)
{
int error;
int ret = 0;
drm_queue_t *q = dev->queuelist[context];
DRM_DEBUG("\n");
atomic_inc(&q->use_count);
if (atomic_read(&q->use_count) > 1) {
atomic_inc(&q->block_write);
atomic_inc(&q->block_count);
error = tsleep((void *)&q->flush_queue, PZERO|PCATCH, "drmfq", 0);
if (error)
return error;
atomic_dec(&q->block_count);
}
atomic_dec(&q->use_count);
/* NOTE: block_write is still incremented!
Use drm_flush_unlock_queue to decrement. */
return ret;
}
static int DRM(flush_unblock_queue)(drm_device_t *dev, int context)
{
drm_queue_t *q = dev->queuelist[context];
DRM_DEBUG("\n");
atomic_inc(&q->use_count);
if (atomic_read(&q->use_count) > 1) {
if (atomic_read(&q->block_write)) {
atomic_dec(&q->block_write);
DRM_WAKEUP_INT((void *)&q->write_queue);
}
}
atomic_dec(&q->use_count);
return 0;
}
int DRM(flush_block_and_flush)(drm_device_t *dev, int context,
drm_lock_flags_t flags)
{
int ret = 0;
int i;
DRM_DEBUG("\n");
if (flags & _DRM_LOCK_FLUSH) {
ret = DRM(flush_queue)(dev, DRM_KERNEL_CONTEXT);
if (!ret) ret = DRM(flush_queue)(dev, context);
}
if (flags & _DRM_LOCK_FLUSH_ALL) {
for (i = 0; !ret && i < dev->queue_count; i++) {
ret = DRM(flush_queue)(dev, i);
}
}
return ret;
}
int DRM(flush_unblock)(drm_device_t *dev, int context, drm_lock_flags_t flags)
{
int ret = 0;
int i;
DRM_DEBUG("\n");
if (flags & _DRM_LOCK_FLUSH) {
ret = DRM(flush_unblock_queue)(dev, DRM_KERNEL_CONTEXT);
if (!ret) ret = DRM(flush_unblock_queue)(dev, context);
}
if (flags & _DRM_LOCK_FLUSH_ALL) {
for (i = 0; !ret && i < dev->queue_count; i++) {
ret = DRM(flush_unblock_queue)(dev, i);
}
}
return ret;
}
int DRM(finish)( DRM_IOCTL_ARGS )
{
DRM_DEVICE;
int ret = 0;
drm_lock_t lock;
DRM_DEBUG("\n");
DRM_COPY_FROM_USER_IOCTL( lock, (drm_lock_t *)data, sizeof(lock) );
ret = DRM(flush_block_and_flush)(dev, lock.context, lock.flags);
DRM(flush_unblock)(dev, lock.context, lock.flags);
return ret;
}
/* If we get here, it means that the process has called DRM_IOCTL_LOCK
without calling DRM_IOCTL_UNLOCK.
If the lock is not held, then let the signal proceed as usual.
If the lock is held, then set the contended flag and keep the signal
blocked.
Return 1 if the signal should be delivered normally.
Return 0 if the signal should be blocked. */
int DRM(notifier)(void *priv)
{
drm_sigdata_t *s = (drm_sigdata_t *)priv;
unsigned int old, new;
/* Allow signal delivery if lock isn't held */
if (!_DRM_LOCK_IS_HELD(s->lock->lock)
|| _DRM_LOCKING_CONTEXT(s->lock->lock) != s->context) return 1;
/* Otherwise, set flag to force call to
drmUnlock */
do {
old = s->lock->lock;
new = old | _DRM_LOCK_CONT;
} while (!atomic_cmpset_int(&s->lock->lock, old, new));
return 0;
}

View File

@ -61,7 +61,6 @@ static drm_mem_stats_t DRM(mem_stats)[] = {
[DRM_MEM_MAGIC] = { "magic" }, [DRM_MEM_MAGIC] = { "magic" },
[DRM_MEM_IOCTLS] = { "ioctltab" }, [DRM_MEM_IOCTLS] = { "ioctltab" },
[DRM_MEM_MAPS] = { "maplist" }, [DRM_MEM_MAPS] = { "maplist" },
[DRM_MEM_VMAS] = { "vmalist" },
[DRM_MEM_BUFS] = { "buflist" }, [DRM_MEM_BUFS] = { "buflist" },
[DRM_MEM_SEGS] = { "seglist" }, [DRM_MEM_SEGS] = { "seglist" },
[DRM_MEM_PAGES] = { "pagelist" }, [DRM_MEM_PAGES] = { "pagelist" },

View File

@ -48,7 +48,11 @@
#define __REALLY_HAVE_AGP __HAVE_AGP #define __REALLY_HAVE_AGP __HAVE_AGP
#endif #endif
#ifdef __i386__
#define __REALLY_HAVE_MTRR (__HAVE_MTRR) #define __REALLY_HAVE_MTRR (__HAVE_MTRR)
#else
#define __REALLY_HAVE_MTRR 0
#endif
#define __REALLY_HAVE_SG (__HAVE_SG) #define __REALLY_HAVE_SG (__HAVE_SG)
#if __REALLY_HAVE_AGP #if __REALLY_HAVE_AGP
@ -97,7 +101,11 @@
#define DRM_CURRENTPID curproc->p_pid #define DRM_CURRENTPID curproc->p_pid
#endif #endif
#define DRM_IOCTL_ARGS dev_t kdev, u_long cmd, caddr_t data, int flags, DRM_STRUCTPROC *p /* Currently our DRMFILE (filp) is a void * which is actually the pid
* of the current process. It should be a per-open unique pointer, but
* code for that is not yet written */
#define DRMFILE void *
#define DRM_IOCTL_ARGS dev_t kdev, u_long cmd, caddr_t data, int flags, DRM_STRUCTPROC *p, DRMFILE filp
#define DRM_LOCK lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, DRM_CURPROC) #define DRM_LOCK lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, DRM_CURPROC)
#define DRM_UNLOCK lockmgr(&dev->dev_lock, LK_RELEASE, 0, DRM_CURPROC) #define DRM_UNLOCK lockmgr(&dev->dev_lock, LK_RELEASE, 0, DRM_CURPROC)
#define DRM_SUSER(p) suser(p) #define DRM_SUSER(p) suser(p)
@ -129,6 +137,16 @@
return EINVAL; \ return EINVAL; \
} }
#define LOCK_TEST_WITH_RETURN(dev, filp) \
do { \
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) || \
dev->lock.filp != filp) { \
DRM_ERROR("%s called without lock held\n", \
__FUNCTION__); \
return EINVAL; \
} \
} while (0)
#define DRM_UDELAY( udelay ) \ #define DRM_UDELAY( udelay ) \
do { \ do { \
struct timeval tv1, tv2; \ struct timeval tv1, tv2; \
@ -200,7 +218,7 @@ while (!condition) { \
#endif #endif
#define malloctype DRM(M_DRM) #define malloctype DRM(M_DRM)
/* The macros confliced in the MALLOC_DEFINE */ /* The macros conflicted in the MALLOC_DEFINE */
MALLOC_DECLARE(malloctype); MALLOC_DECLARE(malloctype);
#undef malloctype #undef malloctype
@ -214,10 +232,8 @@ typedef struct drm_chipinfo
#define cpu_to_le32(x) (x) /* FIXME */ #define cpu_to_le32(x) (x) /* FIXME */
typedef u_int32_t dma_addr_t; typedef unsigned long dma_addr_t;
typedef u_int32_t atomic_t; typedef u_int32_t atomic_t;
typedef u_int32_t cycles_t;
typedef u_int32_t spinlock_t;
typedef u_int32_t u32; typedef u_int32_t u32;
typedef u_int16_t u16; typedef u_int16_t u16;
typedef u_int8_t u8; typedef u_int8_t u8;
@ -301,10 +317,8 @@ find_first_zero_bit(volatile void *p, int max)
* exist. * exist.
*/ */
#if (__FreeBSD_version < 500002 && __FreeBSD_version > 500000) || __FreeBSD_version < 420000 #if (__FreeBSD_version < 500002 && __FreeBSD_version > 500000) || __FreeBSD_version < 420000
/* FIXME: again, what's the exact date? */
#define MODULE_VERSION(a,b) struct __hack #define MODULE_VERSION(a,b) struct __hack
#define MODULE_DEPEND(a,b,c,d,e) struct __hack #define MODULE_DEPEND(a,b,c,d,e) struct __hack
#endif #endif
/* Redefinitions to make templating easy */ /* Redefinitions to make templating easy */
@ -330,8 +344,6 @@ find_first_zero_bit(volatile void *p, int max)
#define DRM_DEBUG(fmt, arg...) do { } while (0) #define DRM_DEBUG(fmt, arg...) do { } while (0)
#endif #endif
#define DRM_PROC_LIMIT (PAGE_SIZE-80)
#if (__FreeBSD_version >= 500000) || ((__FreeBSD_version < 500000) && (__FreeBSD_version >= 410002)) #if (__FreeBSD_version >= 500000) || ((__FreeBSD_version < 500000) && (__FreeBSD_version >= 410002))
#define DRM_SYSCTL_HANDLER_ARGS (SYSCTL_HANDLER_ARGS) #define DRM_SYSCTL_HANDLER_ARGS (SYSCTL_HANDLER_ARGS)
#else #else
@ -365,12 +377,9 @@ find_first_zero_bit(volatile void *p, int max)
/* drm_drv.h */ /* drm_drv.h */
extern d_ioctl_t DRM(ioctl); extern d_ioctl_t DRM(ioctl);
extern d_ioctl_t DRM(lock);
extern d_ioctl_t DRM(unlock);
extern d_open_t DRM(open); extern d_open_t DRM(open);
extern d_close_t DRM(close); extern d_close_t DRM(close);
extern d_read_t DRM(read); extern d_read_t DRM(read);
extern d_write_t DRM(write);
extern d_poll_t DRM(poll); extern d_poll_t DRM(poll);
extern d_mmap_t DRM(mmap); extern d_mmap_t DRM(mmap);
extern int DRM(open_helper)(dev_t kdev, int flags, int fmt, extern int DRM(open_helper)(dev_t kdev, int flags, int fmt,
@ -378,81 +387,9 @@ extern int DRM(open_helper)(dev_t kdev, int flags, int fmt,
extern drm_file_t *DRM(find_file_by_proc)(drm_device_t *dev, extern drm_file_t *DRM(find_file_by_proc)(drm_device_t *dev,
DRM_STRUCTPROC *p); DRM_STRUCTPROC *p);
/* Misc. IOCTL support (drm_ioctl.h) */ /* sysctl support (drm_sysctl.h) */
extern d_ioctl_t DRM(irq_busid);
extern d_ioctl_t DRM(getunique);
extern d_ioctl_t DRM(setunique);
extern d_ioctl_t DRM(getmap);
extern d_ioctl_t DRM(getclient);
extern d_ioctl_t DRM(getstats);
/* Context IOCTL support (drm_context.h) */
extern d_ioctl_t DRM(resctx);
extern d_ioctl_t DRM(addctx);
extern d_ioctl_t DRM(modctx);
extern d_ioctl_t DRM(getctx);
extern d_ioctl_t DRM(switchctx);
extern d_ioctl_t DRM(newctx);
extern d_ioctl_t DRM(rmctx);
extern d_ioctl_t DRM(setsareactx);
extern d_ioctl_t DRM(getsareactx);
/* Drawable IOCTL support (drm_drawable.h) */
extern d_ioctl_t DRM(adddraw);
extern d_ioctl_t DRM(rmdraw);
/* Authentication IOCTL support (drm_auth.h) */
extern d_ioctl_t DRM(getmagic);
extern d_ioctl_t DRM(authmagic);
/* Locking IOCTL support (drm_lock.h) */
extern d_ioctl_t DRM(block);
extern d_ioctl_t DRM(unblock);
extern d_ioctl_t DRM(finish);
/* Buffer management support (drm_bufs.h) */
extern d_ioctl_t DRM(addmap);
extern d_ioctl_t DRM(rmmap);
#if __HAVE_DMA
extern d_ioctl_t DRM(addbufs_agp);
extern d_ioctl_t DRM(addbufs_pci);
extern d_ioctl_t DRM(addbufs_sg);
extern d_ioctl_t DRM(addbufs);
extern d_ioctl_t DRM(infobufs);
extern d_ioctl_t DRM(markbufs);
extern d_ioctl_t DRM(freebufs);
extern d_ioctl_t DRM(mapbufs);
#endif
/* Memory management support (drm_memory.h) */
extern int DRM(mem_info) DRM_SYSCTL_HANDLER_ARGS;
/* DMA support (drm_dma.h) */
#if __HAVE_DMA
extern d_ioctl_t DRM(control);
#endif
#if __HAVE_VBL_IRQ
extern d_ioctl_t DRM(wait_vblank);
#endif
/* AGP/GART support (drm_agpsupport.h) */
#if __REALLY_HAVE_AGP
extern d_ioctl_t DRM(agp_acquire);
extern d_ioctl_t DRM(agp_release);
extern d_ioctl_t DRM(agp_enable);
extern d_ioctl_t DRM(agp_info);
extern d_ioctl_t DRM(agp_alloc);
extern d_ioctl_t DRM(agp_free);
extern d_ioctl_t DRM(agp_unbind);
extern d_ioctl_t DRM(agp_bind);
#endif
/* Scatter Gather Support (drm_scatter.h) */
#if __HAVE_SG
extern d_ioctl_t DRM(sg_alloc);
extern d_ioctl_t DRM(sg_free);
#endif
/* SysCtl Support (drm_sysctl.h) */
extern int DRM(sysctl_init)(drm_device_t *dev); extern int DRM(sysctl_init)(drm_device_t *dev);
extern int DRM(sysctl_cleanup)(drm_device_t *dev); extern int DRM(sysctl_cleanup)(drm_device_t *dev);
/* Memory info sysctl (drm_memory.h) */
extern int DRM(mem_info) DRM_SYSCTL_HANDLER_ARGS;

View File

@ -9,14 +9,7 @@
static int DRM(name_info)DRM_SYSCTL_HANDLER_ARGS; static int DRM(name_info)DRM_SYSCTL_HANDLER_ARGS;
static int DRM(vm_info)DRM_SYSCTL_HANDLER_ARGS; static int DRM(vm_info)DRM_SYSCTL_HANDLER_ARGS;
static int DRM(clients_info)DRM_SYSCTL_HANDLER_ARGS; static int DRM(clients_info)DRM_SYSCTL_HANDLER_ARGS;
static int DRM(queues_info)DRM_SYSCTL_HANDLER_ARGS;
static int DRM(bufs_info)DRM_SYSCTL_HANDLER_ARGS; static int DRM(bufs_info)DRM_SYSCTL_HANDLER_ARGS;
#if DRM_DEBUG_CODExx
static int DRM(vma_info)DRM_SYSCTL_HANDLER_ARGS;
#endif
#if DRM_DMA_HISTOGRAM
static int DRM(histo_info)DRM_SYSCTL_HANDLER_ARGS;
#endif
struct DRM(sysctl_list) { struct DRM(sysctl_list) {
const char *name; const char *name;
@ -26,14 +19,7 @@ struct DRM(sysctl_list) {
{ "mem", DRM(mem_info) }, { "mem", DRM(mem_info) },
{ "vm", DRM(vm_info) }, { "vm", DRM(vm_info) },
{ "clients", DRM(clients_info) }, { "clients", DRM(clients_info) },
{ "queues", DRM(queues_info) },
{ "bufs", DRM(bufs_info) }, { "bufs", DRM(bufs_info) },
#if DRM_DEBUG_CODExx
{ "vma", DRM(vma_info) },
#endif
#if DRM_DMA_HISTOGRAM
{ "histo", drm_histo_info) },
#endif
}; };
#define DRM_SYSCTL_ENTRIES (sizeof(DRM(sysctl_list))/sizeof(DRM(sysctl_list)[0])) #define DRM_SYSCTL_ENTRIES (sizeof(DRM(sysctl_list))/sizeof(DRM(sysctl_list)[0]))
@ -176,55 +162,6 @@ static int DRM(vm_info)DRM_SYSCTL_HANDLER_ARGS
} }
static int DRM(_queues_info)DRM_SYSCTL_HANDLER_ARGS
{
drm_device_t *dev = arg1;
int i;
drm_queue_t *q;
char buf[128];
int error;
DRM_SYSCTL_PRINT(" ctx/flags use fin"
" blk/rw/rwf wait flushed queued"
" locks\n\n");
for (i = 0; i < dev->queue_count; i++) {
q = dev->queuelist[i];
atomic_inc(&q->use_count);
DRM_SYSCTL_PRINT_RET(atomic_dec(&q->use_count),
"%5d/0x%03x %5d %5d"
" %5d/%c%c/%c%c%c %5d %10d %10d %10d\n",
i,
q->flags,
atomic_read(&q->use_count),
atomic_read(&q->finalization),
atomic_read(&q->block_count),
atomic_read(&q->block_read) ? 'r' : '-',
atomic_read(&q->block_write) ? 'w' : '-',
q->read_queue ? 'r':'-',
q->write_queue ? 'w':'-',
q->flush_queue ? 'f':'-',
(int)DRM_BUFCOUNT(&q->waitlist),
atomic_read(&q->total_flushed),
atomic_read(&q->total_queued),
atomic_read(&q->total_locks));
atomic_dec(&q->use_count);
}
SYSCTL_OUT(req, "", 1);
return 0;
}
static int DRM(queues_info) DRM_SYSCTL_HANDLER_ARGS
{
drm_device_t *dev = arg1;
int ret;
DRM_LOCK;
ret = DRM(_queues_info)(oidp, arg1, arg2, req);
DRM_UNLOCK;
return ret;
}
/* drm_bufs_info is called whenever a process reads /* drm_bufs_info is called whenever a process reads
hw.dri.0.bufs. */ hw.dri.0.bufs. */
@ -309,220 +246,6 @@ static int DRM(clients_info)DRM_SYSCTL_HANDLER_ARGS
return ret; return ret;
} }
#if DRM_DEBUG_CODExx
static int DRM(_vma_info)DRM_SYSCTL_HANDLER_ARGS
{
drm_device_t *dev = arg1;
drm_vma_entry_t *pt;
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
unsigned long i;
struct vm_area_struct *vma;
unsigned long address;
#if defined(__i386__)
unsigned int pgprot;
#endif
char buf[128];
int error;
DRM_SYSCTL_PRINT("vma use count: %d, high_memory = %p, 0x%08lx\n",
atomic_read(&dev->vma_count),
high_memory, virt_to_phys(high_memory));
for (pt = dev->vmalist; pt; pt = pt->next) {
if (!(vma = pt->vma)) continue;
DRM_SYSCTL_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx",
pt->pid,
vma->vm_start,
vma->vm_end,
vma->vm_flags & VM_READ ? 'r' : '-',
vma->vm_flags & VM_WRITE ? 'w' : '-',
vma->vm_flags & VM_EXEC ? 'x' : '-',
vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
vma->vm_flags & VM_LOCKED ? 'l' : '-',
vma->vm_flags & VM_IO ? 'i' : '-',
vma->vm_offset );
#if defined(__i386__)
pgprot = pgprot_val(vma->vm_page_prot);
DRM_SYSCTL_PRINT(" %c%c%c%c%c%c%c%c%c",
pgprot & _PAGE_PRESENT ? 'p' : '-',
pgprot & _PAGE_RW ? 'w' : 'r',
pgprot & _PAGE_USER ? 'u' : 's',
pgprot & _PAGE_PWT ? 't' : 'b',
pgprot & _PAGE_PCD ? 'u' : 'c',
pgprot & _PAGE_ACCESSED ? 'a' : '-',
pgprot & _PAGE_DIRTY ? 'd' : '-',
pgprot & _PAGE_4M ? 'm' : 'k',
pgprot & _PAGE_GLOBAL ? 'g' : 'l' );
#endif
DRM_SYSCTL_PRINT("\n");
for (i = vma->vm_start; i < vma->vm_end; i += PAGE_SIZE) {
pgd = pgd_offset(vma->vm_mm, i);
pmd = pmd_offset(pgd, i);
pte = pte_offset(pmd, i);
if (pte_present(*pte)) {
address = __pa(pte_page(*pte))
+ (i & (PAGE_SIZE-1));
DRM_SYSCTL_PRINT(" 0x%08lx -> 0x%08lx"
" %c%c%c%c%c\n",
i,
address,
pte_read(*pte) ? 'r' : '-',
pte_write(*pte) ? 'w' : '-',
pte_exec(*pte) ? 'x' : '-',
pte_dirty(*pte) ? 'd' : '-',
pte_young(*pte) ? 'a' : '-' );
} else {
DRM_SYSCTL_PRINT(" 0x%08lx\n", i);
}
}
}
SYSCTL_OUT(req, "", 1);
return 0;
}
static int DRM(vma_info)DRM_SYSCTL_HANDLER_ARGS
{
drm_device_t *dev = arg1;
int ret;
DRM_LOCK;
ret = DRM(_vma_info)(oidp, arg1, arg2, req);
DRM_UNLOCK;
return ret;
}
#endif
#if DRM_DMA_HISTOGRAM
static int DRM(_histo_info)DRM_SYSCTL_HANDLER_ARGS
{
drm_device_t *dev = arg1;
drm_device_dma_t *dma = dev->dma;
int i;
unsigned long slot_value = DRM_DMA_HISTOGRAM_INITIAL;
unsigned long prev_value = 0;
drm_buf_t *buffer;
char buf[128];
int error;
DRM_SYSCTL_PRINT("general statistics:\n");
DRM_SYSCTL_PRINT("total %10u\n", atomic_read(&dev->histo.total));
DRM_SYSCTL_PRINT("open %10u\n", atomic_read(&dev->total_open));
DRM_SYSCTL_PRINT("close %10u\n", atomic_read(&dev->total_close));
DRM_SYSCTL_PRINT("ioctl %10u\n", atomic_read(&dev->total_ioctl));
DRM_SYSCTL_PRINT("irq %10u\n", atomic_read(&dev->total_irq));
DRM_SYSCTL_PRINT("ctx %10u\n", atomic_read(&dev->total_ctx));
DRM_SYSCTL_PRINT("\nlock statistics:\n");
DRM_SYSCTL_PRINT("locks %10u\n", atomic_read(&dev->total_locks));
DRM_SYSCTL_PRINT("unlocks %10u\n", atomic_read(&dev->total_unlocks));
DRM_SYSCTL_PRINT("contends %10u\n", atomic_read(&dev->total_contends));
DRM_SYSCTL_PRINT("sleeps %10u\n", atomic_read(&dev->total_sleeps));
if (dma) {
DRM_SYSCTL_PRINT("\ndma statistics:\n");
DRM_SYSCTL_PRINT("prio %10u\n",
atomic_read(&dma->total_prio));
DRM_SYSCTL_PRINT("bytes %10u\n",
atomic_read(&dma->total_bytes));
DRM_SYSCTL_PRINT("dmas %10u\n",
atomic_read(&dma->total_dmas));
DRM_SYSCTL_PRINT("missed:\n");
DRM_SYSCTL_PRINT(" dma %10u\n",
atomic_read(&dma->total_missed_dma));
DRM_SYSCTL_PRINT(" lock %10u\n",
atomic_read(&dma->total_missed_lock));
DRM_SYSCTL_PRINT(" free %10u\n",
atomic_read(&dma->total_missed_free));
DRM_SYSCTL_PRINT(" sched %10u\n",
atomic_read(&dma->total_missed_sched));
DRM_SYSCTL_PRINT("tried %10u\n",
atomic_read(&dma->total_tried));
DRM_SYSCTL_PRINT("hit %10u\n",
atomic_read(&dma->total_hit));
DRM_SYSCTL_PRINT("lost %10u\n",
atomic_read(&dma->total_lost));
buffer = dma->next_buffer;
if (buffer) {
DRM_SYSCTL_PRINT("next_buffer %7d\n", buffer->idx);
} else {
DRM_SYSCTL_PRINT("next_buffer none\n");
}
buffer = dma->this_buffer;
if (buffer) {
DRM_SYSCTL_PRINT("this_buffer %7d\n", buffer->idx);
} else {
DRM_SYSCTL_PRINT("this_buffer none\n");
}
}
DRM_SYSCTL_PRINT("\nvalues:\n");
if (dev->lock.hw_lock) {
DRM_SYSCTL_PRINT("lock 0x%08x\n",
dev->lock.hw_lock->lock);
} else {
DRM_SYSCTL_PRINT("lock none\n");
}
DRM_SYSCTL_PRINT("context_flag 0x%08x\n", dev->context_flag);
DRM_SYSCTL_PRINT("interrupt_flag 0x%08x\n", dev->interrupt_flag);
DRM_SYSCTL_PRINT("dma_flag 0x%08x\n", dev->dma_flag);
DRM_SYSCTL_PRINT("queue_count %10d\n", dev->queue_count);
DRM_SYSCTL_PRINT("last_context %10d\n", dev->last_context);
DRM_SYSCTL_PRINT("last_switch %10u\n", dev->last_switch);
DRM_SYSCTL_PRINT("last_checked %10d\n", dev->last_checked);
DRM_SYSCTL_PRINT("\n q2d d2c c2f"
" q2c q2f dma sch"
" ctx lacq lhld\n\n");
for (i = 0; i < DRM_DMA_HISTOGRAM_SLOTS; i++) {
DRM_SYSCTL_PRINT("%s %10lu %10u %10u %10u %10u %10u"
" %10u %10u %10u %10u %10u\n",
i == DRM_DMA_HISTOGRAM_SLOTS - 1 ? ">=" : "< ",
i == DRM_DMA_HISTOGRAM_SLOTS - 1
? prev_value : slot_value ,
atomic_read(&dev->histo
.queued_to_dispatched[i]),
atomic_read(&dev->histo
.dispatched_to_completed[i]),
atomic_read(&dev->histo
.completed_to_freed[i]),
atomic_read(&dev->histo
.queued_to_completed[i]),
atomic_read(&dev->histo
.queued_to_freed[i]),
atomic_read(&dev->histo.dma[i]),
atomic_read(&dev->histo.schedule[i]),
atomic_read(&dev->histo.ctx[i]),
atomic_read(&dev->histo.lacq[i]),
atomic_read(&dev->histo.lhld[i]));
prev_value = slot_value;
slot_value = DRM_DMA_HISTOGRAM_NEXT(slot_value);
}
SYSCTL_OUT(req, "", 1);
return 0;
}
static int DRM(histo_info)DRM_SYSCTL_HANDLER_ARGS
{
drm_device_t *dev = arg1;
int ret;
DRM_LOCK;
ret = _drm_histo_info(oidp, arg1, arg2, req);
DRM_UNLOCK;
return ret;
}
#endif
#elif defined(__NetBSD__) #elif defined(__NetBSD__)
/* stub it out for now, sysctl is only for debugging */ /* stub it out for now, sysctl is only for debugging */

View File

@ -45,15 +45,14 @@ paddr_t DRM(mmap)(dev_t kdev, off_t offset, int prot)
drm_map_list_entry_t *listentry=NULL; drm_map_list_entry_t *listentry=NULL;
drm_file_t *priv; drm_file_t *priv;
/* DRM_DEBUG("offset = 0x%x\n", offset);*/
priv = DRM(find_file_by_proc)(dev, DRM_CURPROC); priv = DRM(find_file_by_proc)(dev, DRM_CURPROC);
if (!priv) { if (!priv) {
DRM_DEBUG("can't find authenticator\n"); DRM_DEBUG("can't find authenticator\n");
return EINVAL; return EINVAL;
} }
if (!priv->authenticated) return DRM_ERR(EACCES); if (!priv->authenticated)
return DRM_ERR(EACCES);
if (dev->dma if (dev->dma
&& offset >= 0 && offset >= 0

View File

@ -644,9 +644,12 @@ int mga_do_cleanup_dma( drm_device_t *dev )
if ( dev->dev_private ) { if ( dev->dev_private ) {
drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_private_t *dev_priv = dev->dev_private;
DRM_IOREMAPFREE( dev_priv->warp ); if ( dev_priv->warp != NULL )
DRM_IOREMAPFREE( dev_priv->primary ); DRM_IOREMAPFREE( dev_priv->warp );
DRM_IOREMAPFREE( dev_priv->buffers ); if ( dev_priv->primary != NULL )
DRM_IOREMAPFREE( dev_priv->primary );
if ( dev_priv->buffers != NULL )
DRM_IOREMAPFREE( dev_priv->buffers );
if ( dev_priv->head != NULL ) { if ( dev_priv->head != NULL ) {
mga_freelist_cleanup( dev ); mga_freelist_cleanup( dev );
@ -688,7 +691,7 @@ int mga_dma_flush( DRM_IOCTL_ARGS )
drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private; drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
drm_lock_t lock; drm_lock_t lock;
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
DRM_COPY_FROM_USER_IOCTL( lock, (drm_lock_t *)data, sizeof(lock) ); DRM_COPY_FROM_USER_IOCTL( lock, (drm_lock_t *)data, sizeof(lock) );
@ -722,7 +725,7 @@ int mga_dma_reset( DRM_IOCTL_ARGS )
DRM_DEVICE; DRM_DEVICE;
drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private; drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
return mga_do_dma_reset( dev_priv ); return mga_do_dma_reset( dev_priv );
} }
@ -732,7 +735,8 @@ int mga_dma_reset( DRM_IOCTL_ARGS )
* DMA buffer management * DMA buffer management
*/ */
static int mga_dma_get_buffers( drm_device_t *dev, drm_dma_t *d ) static int mga_dma_get_buffers( DRMFILE filp,
drm_device_t *dev, drm_dma_t *d )
{ {
drm_buf_t *buf; drm_buf_t *buf;
int i; int i;
@ -741,7 +745,7 @@ static int mga_dma_get_buffers( drm_device_t *dev, drm_dma_t *d )
buf = mga_freelist_get( dev ); buf = mga_freelist_get( dev );
if ( !buf ) return DRM_ERR(EAGAIN); if ( !buf ) return DRM_ERR(EAGAIN);
buf->pid = DRM_CURRENTPID; buf->filp = filp;
if ( DRM_COPY_TO_USER( &d->request_indices[i], if ( DRM_COPY_TO_USER( &d->request_indices[i],
&buf->idx, sizeof(buf->idx) ) ) &buf->idx, sizeof(buf->idx) ) )
@ -763,7 +767,7 @@ int mga_dma_buffers( DRM_IOCTL_ARGS )
drm_dma_t d; drm_dma_t d;
int ret = 0; int ret = 0;
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
DRM_COPY_FROM_USER_IOCTL( d, (drm_dma_t *)data, sizeof(d) ); DRM_COPY_FROM_USER_IOCTL( d, (drm_dma_t *)data, sizeof(d) );
@ -788,7 +792,7 @@ int mga_dma_buffers( DRM_IOCTL_ARGS )
d.granted_count = 0; d.granted_count = 0;
if ( d.request_count ) { if ( d.request_count ) {
ret = mga_dma_get_buffers( dev, &d ); ret = mga_dma_get_buffers( filp, dev, &d );
} }
DRM_COPY_TO_USER_IOCTL( (drm_dma_t *)data, d, sizeof(d) ); DRM_COPY_TO_USER_IOCTL( (drm_dma_t *)data, d, sizeof(d) );

View File

@ -56,7 +56,6 @@ drm_chipinfo_t DRM(devicelist)[] = {
#include "dev/drm/drm_drawable.h" #include "dev/drm/drm_drawable.h"
#include "dev/drm/drm_drv.h" #include "dev/drm/drm_drv.h"
#include "dev/drm/drm_fops.h" #include "dev/drm/drm_fops.h"
#include "dev/drm/drm_init.h"
#include "dev/drm/drm_ioctl.h" #include "dev/drm/drm_ioctl.h"
#include "dev/drm/drm_lock.h" #include "dev/drm/drm_lock.h"
#include "dev/drm/drm_memory.h" #include "dev/drm/drm_memory.h"

View File

@ -186,16 +186,6 @@ do { \
} \ } \
} while (0) } while (0)
#define LOCK_TEST_WITH_RETURN( dev ) \
do { \
if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || \
dev->lock.pid != DRM_CURRENTPID ) { \
DRM_ERROR( "%s called without lock held\n", \
__FUNCTION__ ); \
return DRM_ERR(EINVAL); \
} \
} while (0)
#define WRAP_TEST_WITH_RETURN( dev_priv ) \ #define WRAP_TEST_WITH_RETURN( dev_priv ) \
do { \ do { \
if ( test_bit( 0, &dev_priv->prim.wrapped ) ) { \ if ( test_bit( 0, &dev_priv->prim.wrapped ) ) { \

View File

@ -889,7 +889,7 @@ int mga_dma_clear( DRM_IOCTL_ARGS )
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mga_clear_t clear; drm_mga_clear_t clear;
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
DRM_COPY_FROM_USER_IOCTL( clear, (drm_mga_clear_t *)data, sizeof(clear) ); DRM_COPY_FROM_USER_IOCTL( clear, (drm_mga_clear_t *)data, sizeof(clear) );
@ -913,7 +913,7 @@ int mga_dma_swap( DRM_IOCTL_ARGS )
drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
if ( sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS ) if ( sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS )
sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS; sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
@ -938,7 +938,7 @@ int mga_dma_vertex( DRM_IOCTL_ARGS )
drm_mga_buf_priv_t *buf_priv; drm_mga_buf_priv_t *buf_priv;
drm_mga_vertex_t vertex; drm_mga_vertex_t vertex;
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
DRM_COPY_FROM_USER_IOCTL( vertex, DRM_COPY_FROM_USER_IOCTL( vertex,
(drm_mga_vertex_t *)data, (drm_mga_vertex_t *)data,
@ -977,7 +977,7 @@ int mga_dma_indices( DRM_IOCTL_ARGS )
drm_mga_buf_priv_t *buf_priv; drm_mga_buf_priv_t *buf_priv;
drm_mga_indices_t indices; drm_mga_indices_t indices;
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
DRM_COPY_FROM_USER_IOCTL( indices, DRM_COPY_FROM_USER_IOCTL( indices,
(drm_mga_indices_t *)data, (drm_mga_indices_t *)data,
@ -1017,7 +1017,7 @@ int mga_dma_iload( DRM_IOCTL_ARGS )
drm_mga_iload_t iload; drm_mga_iload_t iload;
DRM_DEBUG( "\n" ); DRM_DEBUG( "\n" );
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
DRM_COPY_FROM_USER_IOCTL( iload, (drm_mga_iload_t *)data, sizeof(iload) ); DRM_COPY_FROM_USER_IOCTL( iload, (drm_mga_iload_t *)data, sizeof(iload) );
@ -1057,7 +1057,7 @@ int mga_dma_blit( DRM_IOCTL_ARGS )
drm_mga_blit_t blit; drm_mga_blit_t blit;
DRM_DEBUG( "\n" ); DRM_DEBUG( "\n" );
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
DRM_COPY_FROM_USER_IOCTL( blit, (drm_mga_blit_t *)data, sizeof(blit) ); DRM_COPY_FROM_USER_IOCTL( blit, (drm_mga_blit_t *)data, sizeof(blit) );

View File

@ -352,7 +352,7 @@ static void r128_cce_init_ring_buffer( drm_device_t *dev,
R128_WRITE( R128_PM4_BUFFER_DL_RPTR_ADDR, R128_WRITE( R128_PM4_BUFFER_DL_RPTR_ADDR,
entry->busaddr[page_ofs]); entry->busaddr[page_ofs]);
DRM_DEBUG( "ring rptr: offset=0x%08x handle=0x%08lx\n", DRM_DEBUG( "ring rptr: offset=0x%08lx handle=0x%08lx\n",
entry->busaddr[page_ofs], entry->busaddr[page_ofs],
entry->handle + tmp_ofs ); entry->handle + tmp_ofs );
} }
@ -621,9 +621,12 @@ int r128_do_cleanup_cce( drm_device_t *dev )
#if __REALLY_HAVE_SG #if __REALLY_HAVE_SG
if ( !dev_priv->is_pci ) { if ( !dev_priv->is_pci ) {
#endif #endif
DRM_IOREMAPFREE( dev_priv->cce_ring ); if ( dev_priv->cce_ring != NULL )
DRM_IOREMAPFREE( dev_priv->ring_rptr ); DRM_IOREMAPFREE( dev_priv->cce_ring );
DRM_IOREMAPFREE( dev_priv->buffers ); if ( dev_priv->ring_rptr != NULL )
DRM_IOREMAPFREE( dev_priv->ring_rptr );
if ( dev_priv->buffers != NULL )
DRM_IOREMAPFREE( dev_priv->buffers );
#if __REALLY_HAVE_SG #if __REALLY_HAVE_SG
} else { } else {
if (!DRM(ati_pcigart_cleanup)( dev, if (!DRM(ati_pcigart_cleanup)( dev,
@ -666,7 +669,7 @@ int r128_cce_start( DRM_IOCTL_ARGS )
drm_r128_private_t *dev_priv = dev->dev_private; drm_r128_private_t *dev_priv = dev->dev_private;
DRM_DEBUG( "\n" ); DRM_DEBUG( "\n" );
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
if ( dev_priv->cce_running || dev_priv->cce_mode == R128_PM4_NONPM4 ) { if ( dev_priv->cce_running || dev_priv->cce_mode == R128_PM4_NONPM4 ) {
DRM_DEBUG( "%s while CCE running\n", __FUNCTION__ ); DRM_DEBUG( "%s while CCE running\n", __FUNCTION__ );
@ -689,7 +692,7 @@ int r128_cce_stop( DRM_IOCTL_ARGS )
int ret; int ret;
DRM_DEBUG( "\n" ); DRM_DEBUG( "\n" );
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
DRM_COPY_FROM_USER_IOCTL(stop, (drm_r128_cce_stop_t *)data, sizeof(stop) ); DRM_COPY_FROM_USER_IOCTL(stop, (drm_r128_cce_stop_t *)data, sizeof(stop) );
@ -728,7 +731,7 @@ int r128_cce_reset( DRM_IOCTL_ARGS )
drm_r128_private_t *dev_priv = dev->dev_private; drm_r128_private_t *dev_priv = dev->dev_private;
DRM_DEBUG( "\n" ); DRM_DEBUG( "\n" );
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
if ( !dev_priv ) { if ( !dev_priv ) {
DRM_DEBUG( "%s called before init done\n", __FUNCTION__ ); DRM_DEBUG( "%s called before init done\n", __FUNCTION__ );
@ -749,7 +752,7 @@ int r128_cce_idle( DRM_IOCTL_ARGS )
drm_r128_private_t *dev_priv = dev->dev_private; drm_r128_private_t *dev_priv = dev->dev_private;
DRM_DEBUG( "\n" ); DRM_DEBUG( "\n" );
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
if ( dev_priv->cce_running ) { if ( dev_priv->cce_running ) {
r128_do_cce_flush( dev_priv ); r128_do_cce_flush( dev_priv );
@ -763,7 +766,7 @@ int r128_engine_reset( DRM_IOCTL_ARGS )
DRM_DEVICE; DRM_DEVICE;
DRM_DEBUG( "\n" ); DRM_DEBUG( "\n" );
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
return r128_do_engine_reset( dev ); return r128_do_engine_reset( dev );
} }
@ -810,7 +813,7 @@ int r128_fullscreen( DRM_IOCTL_ARGS )
DRM_DEVICE; DRM_DEVICE;
drm_r128_fullscreen_t fs; drm_r128_fullscreen_t fs;
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
DRM_COPY_FROM_USER_IOCTL( fs, (drm_r128_fullscreen_t *)data, sizeof(fs) ); DRM_COPY_FROM_USER_IOCTL( fs, (drm_r128_fullscreen_t *)data, sizeof(fs) );
@ -892,7 +895,7 @@ drm_buf_t *r128_freelist_get( drm_device_t *dev )
for ( i = 0 ; i < dma->buf_count ; i++ ) { for ( i = 0 ; i < dma->buf_count ; i++ ) {
buf = dma->buflist[i]; buf = dma->buflist[i];
buf_priv = buf->dev_private; buf_priv = buf->dev_private;
if ( buf->pid == 0 ) if ( buf->filp == 0 )
return buf; return buf;
} }
@ -951,7 +954,7 @@ int r128_wait_ring( drm_r128_private_t *dev_priv, int n )
return DRM_ERR(EBUSY); return DRM_ERR(EBUSY);
} }
static int r128_cce_get_buffers( drm_device_t *dev, drm_dma_t *d ) static int r128_cce_get_buffers( DRMFILE filp, drm_device_t *dev, drm_dma_t *d )
{ {
int i; int i;
drm_buf_t *buf; drm_buf_t *buf;
@ -960,7 +963,7 @@ static int r128_cce_get_buffers( drm_device_t *dev, drm_dma_t *d )
buf = r128_freelist_get( dev ); buf = r128_freelist_get( dev );
if ( !buf ) return DRM_ERR(EAGAIN); if ( !buf ) return DRM_ERR(EAGAIN);
buf->pid = DRM_CURRENTPID; buf->filp = filp;
if ( DRM_COPY_TO_USER( &d->request_indices[i], &buf->idx, if ( DRM_COPY_TO_USER( &d->request_indices[i], &buf->idx,
sizeof(buf->idx) ) ) sizeof(buf->idx) ) )
@ -981,7 +984,7 @@ int r128_cce_buffers( DRM_IOCTL_ARGS )
int ret = 0; int ret = 0;
drm_dma_t d; drm_dma_t d;
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
DRM_COPY_FROM_USER_IOCTL( d, (drm_dma_t *) data, sizeof(d) ); DRM_COPY_FROM_USER_IOCTL( d, (drm_dma_t *) data, sizeof(d) );
@ -1004,7 +1007,7 @@ int r128_cce_buffers( DRM_IOCTL_ARGS )
d.granted_count = 0; d.granted_count = 0;
if ( d.request_count ) { if ( d.request_count ) {
ret = r128_cce_get_buffers( dev, &d ); ret = r128_cce_get_buffers( filp, dev, &d );
} }
DRM_COPY_TO_USER_IOCTL((drm_dma_t *) data, d, sizeof(d) ); DRM_COPY_TO_USER_IOCTL((drm_dma_t *) data, d, sizeof(d) );

View File

@ -72,7 +72,6 @@ drm_chipinfo_t DRM(devicelist)[] = {
#include "dev/drm/drm_drawable.h" #include "dev/drm/drm_drawable.h"
#include "dev/drm/drm_drv.h" #include "dev/drm/drm_drv.h"
#include "dev/drm/drm_fops.h" #include "dev/drm/drm_fops.h"
#include "dev/drm/drm_init.h"
#include "dev/drm/drm_ioctl.h" #include "dev/drm/drm_ioctl.h"
#include "dev/drm/drm_lock.h" #include "dev/drm/drm_lock.h"
#include "dev/drm/drm_memory.h" #include "dev/drm/drm_memory.h"

View File

@ -401,15 +401,6 @@ extern int R128_READ_PLL(drm_device_t *dev, int addr);
* Misc helper macros * Misc helper macros
*/ */
#define LOCK_TEST_WITH_RETURN( dev ) \
do { \
if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || \
dev->lock.pid != DRM_CURRENTPID ) { \
DRM_ERROR( "%s called without lock held\n", __FUNCTION__ ); \
return DRM_ERR(EINVAL); \
} \
} while (0)
#define RING_SPACE_TEST_WITH_RETURN( dev_priv ) \ #define RING_SPACE_TEST_WITH_RETURN( dev_priv ) \
do { \ do { \
drm_r128_ring_buffer_t *ring = &dev_priv->ring; int i; \ drm_r128_ring_buffer_t *ring = &dev_priv->ring; int i; \

View File

@ -780,7 +780,8 @@ static void r128_cce_dispatch_indices( drm_device_t *dev,
sarea_priv->nbox = 0; sarea_priv->nbox = 0;
} }
static int r128_cce_dispatch_blit( drm_device_t *dev, static int r128_cce_dispatch_blit( DRMFILE filp,
drm_device_t *dev,
drm_r128_blit_t *blit ) drm_r128_blit_t *blit )
{ {
drm_r128_private_t *dev_priv = dev->dev_private; drm_r128_private_t *dev_priv = dev->dev_private;
@ -831,9 +832,9 @@ static int r128_cce_dispatch_blit( drm_device_t *dev,
buf = dma->buflist[blit->idx]; buf = dma->buflist[blit->idx];
buf_priv = buf->dev_private; buf_priv = buf->dev_private;
if ( buf->pid != DRM_CURRENTPID ) { if ( buf->filp != filp ) {
DRM_ERROR( "process %d using buffer owned by %d\n", DRM_ERROR( "process %d using buffer owned by %p\n",
DRM_CURRENTPID, buf->pid ); DRM_CURRENTPID, buf->filp );
return DRM_ERR(EINVAL); return DRM_ERR(EINVAL);
} }
if ( buf->pending ) { if ( buf->pending ) {
@ -1246,7 +1247,7 @@ int r128_cce_clear( DRM_IOCTL_ARGS )
drm_r128_clear_t clear; drm_r128_clear_t clear;
DRM_DEBUG( "\n" ); DRM_DEBUG( "\n" );
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
DRM_COPY_FROM_USER_IOCTL( clear, (drm_r128_clear_t *) data, DRM_COPY_FROM_USER_IOCTL( clear, (drm_r128_clear_t *) data,
sizeof(clear) ); sizeof(clear) );
@ -1272,7 +1273,7 @@ int r128_cce_swap( DRM_IOCTL_ARGS )
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
DRM_DEBUG( "%s\n", __FUNCTION__ ); DRM_DEBUG( "%s\n", __FUNCTION__ );
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
RING_SPACE_TEST_WITH_RETURN( dev_priv ); RING_SPACE_TEST_WITH_RETURN( dev_priv );
@ -1299,7 +1300,7 @@ int r128_cce_vertex( DRM_IOCTL_ARGS )
drm_r128_buf_priv_t *buf_priv; drm_r128_buf_priv_t *buf_priv;
drm_r128_vertex_t vertex; drm_r128_vertex_t vertex;
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
if ( !dev_priv ) { if ( !dev_priv ) {
DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
@ -1330,9 +1331,9 @@ int r128_cce_vertex( DRM_IOCTL_ARGS )
buf = dma->buflist[vertex.idx]; buf = dma->buflist[vertex.idx];
buf_priv = buf->dev_private; buf_priv = buf->dev_private;
if ( buf->pid != DRM_CURRENTPID ) { if ( buf->filp != filp ) {
DRM_ERROR( "process %d using buffer owned by %d\n", DRM_ERROR( "process %d using buffer owned by %p\n",
DRM_CURRENTPID, buf->pid ); DRM_CURRENTPID, buf->filp );
return DRM_ERR(EINVAL); return DRM_ERR(EINVAL);
} }
if ( buf->pending ) { if ( buf->pending ) {
@ -1359,7 +1360,7 @@ int r128_cce_indices( DRM_IOCTL_ARGS )
drm_r128_indices_t elts; drm_r128_indices_t elts;
int count; int count;
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
if ( !dev_priv ) { if ( !dev_priv ) {
DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
@ -1389,9 +1390,9 @@ int r128_cce_indices( DRM_IOCTL_ARGS )
buf = dma->buflist[elts.idx]; buf = dma->buflist[elts.idx];
buf_priv = buf->dev_private; buf_priv = buf->dev_private;
if ( buf->pid != DRM_CURRENTPID ) { if ( buf->filp != filp ) {
DRM_ERROR( "process %d using buffer owned by %d\n", DRM_ERROR( "process %d using buffer owned by %p\n",
DRM_CURRENTPID, buf->pid ); DRM_CURRENTPID, buf->filp );
return DRM_ERR(EINVAL); return DRM_ERR(EINVAL);
} }
if ( buf->pending ) { if ( buf->pending ) {
@ -1427,7 +1428,7 @@ int r128_cce_blit( DRM_IOCTL_ARGS )
drm_r128_private_t *dev_priv = dev->dev_private; drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_blit_t blit; drm_r128_blit_t blit;
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
DRM_COPY_FROM_USER_IOCTL( blit, (drm_r128_blit_t *) data, DRM_COPY_FROM_USER_IOCTL( blit, (drm_r128_blit_t *) data,
sizeof(blit) ); sizeof(blit) );
@ -1443,7 +1444,7 @@ int r128_cce_blit( DRM_IOCTL_ARGS )
RING_SPACE_TEST_WITH_RETURN( dev_priv ); RING_SPACE_TEST_WITH_RETURN( dev_priv );
VB_AGE_TEST_WITH_RETURN( dev_priv ); VB_AGE_TEST_WITH_RETURN( dev_priv );
return r128_cce_dispatch_blit( dev, &blit ); return r128_cce_dispatch_blit( filp, dev, &blit );
} }
int r128_cce_depth( DRM_IOCTL_ARGS ) int r128_cce_depth( DRM_IOCTL_ARGS )
@ -1452,7 +1453,7 @@ int r128_cce_depth( DRM_IOCTL_ARGS )
drm_r128_private_t *dev_priv = dev->dev_private; drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_depth_t depth; drm_r128_depth_t depth;
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
DRM_COPY_FROM_USER_IOCTL( depth, (drm_r128_depth_t *) data, DRM_COPY_FROM_USER_IOCTL( depth, (drm_r128_depth_t *) data,
sizeof(depth) ); sizeof(depth) );
@ -1480,7 +1481,7 @@ int r128_cce_stipple( DRM_IOCTL_ARGS )
drm_r128_stipple_t stipple; drm_r128_stipple_t stipple;
u32 mask[32]; u32 mask[32];
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
DRM_COPY_FROM_USER_IOCTL( stipple, (drm_r128_stipple_t *) data, DRM_COPY_FROM_USER_IOCTL( stipple, (drm_r128_stipple_t *) data,
sizeof(stipple) ); sizeof(stipple) );
@ -1508,7 +1509,7 @@ int r128_cce_indirect( DRM_IOCTL_ARGS )
RING_LOCALS; RING_LOCALS;
#endif #endif
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
if ( !dev_priv ) { if ( !dev_priv ) {
DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
@ -1531,9 +1532,9 @@ int r128_cce_indirect( DRM_IOCTL_ARGS )
buf = dma->buflist[indirect.idx]; buf = dma->buflist[indirect.idx];
buf_priv = buf->dev_private; buf_priv = buf->dev_private;
if ( buf->pid != DRM_CURRENTPID ) { if ( buf->filp != filp ) {
DRM_ERROR( "process %d using buffer owned by %d\n", DRM_ERROR( "process %d using buffer owned by %p\n",
DRM_CURRENTPID, buf->pid ); DRM_CURRENTPID, buf->filp );
return DRM_ERR(EINVAL); return DRM_ERR(EINVAL);
} }
if ( buf->pending ) { if ( buf->pending ) {

View File

@ -80,6 +80,7 @@
* Added packets R200_EMIT_PP_CUBIC_FACES_[0..5] and * Added packets R200_EMIT_PP_CUBIC_FACES_[0..5] and
* R200_EMIT_PP_CUBIC_OFFSETS_[0..5]. (brian) * R200_EMIT_PP_CUBIC_OFFSETS_[0..5]. (brian)
* 1.8 - Remove need to call cleanup ioctls on last client exit (keith) * 1.8 - Remove need to call cleanup ioctls on last client exit (keith)
* Add 'GET' queries for starting additional clients on different VT's.
*/ */
#define DRIVER_IOCTLS \ #define DRIVER_IOCTLS \
[DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { radeon_cp_buffers, 1, 0 }, \ [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { radeon_cp_buffers, 1, 0 }, \
@ -122,19 +123,17 @@ do { \
if ( dev_priv->page_flipping ) { \ if ( dev_priv->page_flipping ) { \
radeon_do_cleanup_pageflip( dev ); \ radeon_do_cleanup_pageflip( dev ); \
} \ } \
radeon_mem_release( dev_priv->agp_heap ); \ radeon_mem_release( filp, dev_priv->agp_heap ); \
radeon_mem_release( dev_priv->fb_heap ); \ radeon_mem_release( filp, dev_priv->fb_heap ); \
} \ } \
} while (0) } while (0)
/* When the last client dies, shut down the CP and free dev->dev_priv. /* When the last client dies, shut down the CP and free dev->dev_priv.
*/ */
#define __HAVE_RELEASE 1 /* #define __HAVE_RELEASE 1 */
#define DRIVER_RELEASE() \ #define DRIVER_PRETAKEDOWN() \
do { \ do { \
DRM(reclaim_buffers)( dev, priv->pid ); \ radeon_do_release( dev ); \
if ( dev->open_count == 1) \
radeon_do_release( dev ); \
} while (0) } while (0)

View File

@ -38,12 +38,6 @@
#define RADEON_FIFO_DEBUG 0 #define RADEON_FIFO_DEBUG 0
#if defined(__alpha__) || defined(__powerpc__)
# define PCIGART_ENABLED
#else
# undef PCIGART_ENABLED
#endif
/* CP microcode (from ATI) */ /* CP microcode (from ATI) */
static u32 R200_cp_microcode[][2] = { static u32 R200_cp_microcode[][2] = {
@ -779,7 +773,7 @@ static void radeon_do_cp_reset( drm_radeon_private_t *dev_priv )
cur_read_ptr = RADEON_READ( RADEON_CP_RB_RPTR ); cur_read_ptr = RADEON_READ( RADEON_CP_RB_RPTR );
RADEON_WRITE( RADEON_CP_RB_WPTR, cur_read_ptr ); RADEON_WRITE( RADEON_CP_RB_WPTR, cur_read_ptr );
*dev_priv->ring.head = cur_read_ptr; SET_RING_HEAD( dev_priv, cur_read_ptr );
dev_priv->ring.tail = cur_read_ptr; dev_priv->ring.tail = cur_read_ptr;
} }
@ -891,13 +885,18 @@ static void radeon_cp_init_ring_buffer( drm_device_t *dev,
/* Initialize the ring buffer's read and write pointers */ /* Initialize the ring buffer's read and write pointers */
cur_read_ptr = RADEON_READ( RADEON_CP_RB_RPTR ); cur_read_ptr = RADEON_READ( RADEON_CP_RB_RPTR );
RADEON_WRITE( RADEON_CP_RB_WPTR, cur_read_ptr ); RADEON_WRITE( RADEON_CP_RB_WPTR, cur_read_ptr );
*dev_priv->ring.head = cur_read_ptr; SET_RING_HEAD( dev_priv, cur_read_ptr );
dev_priv->ring.tail = cur_read_ptr; dev_priv->ring.tail = cur_read_ptr;
#if __REALLY_HAVE_AGP
if ( !dev_priv->is_pci ) { if ( !dev_priv->is_pci ) {
RADEON_WRITE( RADEON_CP_RB_RPTR_ADDR, RADEON_WRITE( RADEON_CP_RB_RPTR_ADDR,
dev_priv->ring_rptr->offset ); dev_priv->ring_rptr->offset
} else { - dev->agp->base
+ dev_priv->agp_vm_start);
} else
#endif
{
drm_sg_mem_t *entry = dev->sg; drm_sg_mem_t *entry = dev->sg;
unsigned long tmp_ofs, page_ofs; unsigned long tmp_ofs, page_ofs;
@ -906,7 +905,7 @@ static void radeon_cp_init_ring_buffer( drm_device_t *dev,
RADEON_WRITE( RADEON_CP_RB_RPTR_ADDR, RADEON_WRITE( RADEON_CP_RB_RPTR_ADDR,
entry->busaddr[page_ofs]); entry->busaddr[page_ofs]);
DRM_DEBUG( "ring rptr: offset=0x%08x handle=0x%08lx\n", DRM_DEBUG( "ring rptr: offset=0x%08lx handle=0x%08lx\n",
entry->busaddr[page_ofs], entry->busaddr[page_ofs],
entry->handle + tmp_ofs ); entry->handle + tmp_ofs );
} }
@ -922,7 +921,7 @@ static void radeon_cp_init_ring_buffer( drm_device_t *dev,
+ RADEON_SCRATCH_REG_OFFSET ); + RADEON_SCRATCH_REG_OFFSET );
dev_priv->scratch = ((__volatile__ u32 *) dev_priv->scratch = ((__volatile__ u32 *)
dev_priv->ring.head + dev_priv->ring_rptr->handle +
(RADEON_SCRATCH_REG_OFFSET / sizeof(u32))); (RADEON_SCRATCH_REG_OFFSET / sizeof(u32)));
RADEON_WRITE( RADEON_SCRATCH_UMSK, 0x7 ); RADEON_WRITE( RADEON_SCRATCH_UMSK, 0x7 );
@ -992,17 +991,6 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
dev_priv->is_pci = init->is_pci; dev_priv->is_pci = init->is_pci;
#if !defined(PCIGART_ENABLED)
/* PCI support is not 100% working, so we disable it here.
*/
if ( dev_priv->is_pci ) {
DRM_ERROR( "PCI GART not yet supported for Radeon!\n" );
dev->dev_private = (void *)dev_priv;
radeon_do_cleanup_cp(dev);
return DRM_ERR(EINVAL);
}
#endif
if ( dev_priv->is_pci && !dev->sg ) { if ( dev_priv->is_pci && !dev->sg ) {
DRM_ERROR( "PCI GART memory not allocated!\n" ); DRM_ERROR( "PCI GART memory not allocated!\n" );
dev->dev_private = (void *)dev_priv; dev->dev_private = (void *)dev_priv;
@ -1099,6 +1087,13 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
RADEON_ROUND_PREC_8TH_PIX); RADEON_ROUND_PREC_8TH_PIX);
DRM_GETSAREA(); DRM_GETSAREA();
dev_priv->fb_offset = init->fb_offset;
dev_priv->mmio_offset = init->mmio_offset;
dev_priv->ring_offset = init->ring_offset;
dev_priv->ring_rptr_offset = init->ring_rptr_offset;
dev_priv->buffers_offset = init->buffers_offset;
dev_priv->agp_textures_offset = init->agp_textures_offset;
if(!dev_priv->sarea) { if(!dev_priv->sarea) {
DRM_ERROR("could not find sarea!\n"); DRM_ERROR("could not find sarea!\n");
@ -1206,9 +1201,6 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
DRM_DEBUG( "dev_priv->agp_buffers_offset 0x%lx\n", DRM_DEBUG( "dev_priv->agp_buffers_offset 0x%lx\n",
dev_priv->agp_buffers_offset ); dev_priv->agp_buffers_offset );
dev_priv->ring.head = ((__volatile__ u32 *)
dev_priv->ring_rptr->handle);
dev_priv->ring.start = (u32 *)dev_priv->cp_ring->handle; dev_priv->ring.start = (u32 *)dev_priv->cp_ring->handle;
dev_priv->ring.end = ((u32 *)dev_priv->cp_ring->handle dev_priv->ring.end = ((u32 *)dev_priv->cp_ring->handle
+ init->ring_size / sizeof(u32)); + init->ring_size / sizeof(u32));
@ -1219,7 +1211,6 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
(dev_priv->ring.size / sizeof(u32)) - 1; (dev_priv->ring.size / sizeof(u32)) - 1;
dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK; dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
dev_priv->ring.ring_rptr = dev_priv->ring_rptr;
#if __REALLY_HAVE_SG #if __REALLY_HAVE_SG
if ( dev_priv->is_pci ) { if ( dev_priv->is_pci ) {
@ -1281,9 +1272,12 @@ int radeon_do_cleanup_cp( drm_device_t *dev )
drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_private_t *dev_priv = dev->dev_private;
if ( !dev_priv->is_pci ) { if ( !dev_priv->is_pci ) {
DRM_IOREMAPFREE( dev_priv->cp_ring ); if ( dev_priv->cp_ring != NULL )
DRM_IOREMAPFREE( dev_priv->ring_rptr ); DRM_IOREMAPFREE( dev_priv->cp_ring );
DRM_IOREMAPFREE( dev_priv->buffers ); if ( dev_priv->ring_rptr != NULL )
DRM_IOREMAPFREE( dev_priv->ring_rptr );
if ( dev_priv->buffers != NULL )
DRM_IOREMAPFREE( dev_priv->buffers );
} else { } else {
#if __REALLY_HAVE_SG #if __REALLY_HAVE_SG
if (!DRM(ati_pcigart_cleanup)( dev, if (!DRM(ati_pcigart_cleanup)( dev,
@ -1325,7 +1319,7 @@ int radeon_cp_start( DRM_IOCTL_ARGS )
drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_private_t *dev_priv = dev->dev_private;
DRM_DEBUG( "\n" ); DRM_DEBUG( "\n" );
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
if ( dev_priv->cp_running ) { if ( dev_priv->cp_running ) {
DRM_DEBUG( "%s while CP running\n", __FUNCTION__ ); DRM_DEBUG( "%s while CP running\n", __FUNCTION__ );
@ -1353,7 +1347,7 @@ int radeon_cp_stop( DRM_IOCTL_ARGS )
int ret; int ret;
DRM_DEBUG( "\n" ); DRM_DEBUG( "\n" );
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
DRM_COPY_FROM_USER_IOCTL( stop, (drm_radeon_cp_stop_t *)data, sizeof(stop) ); DRM_COPY_FROM_USER_IOCTL( stop, (drm_radeon_cp_stop_t *)data, sizeof(stop) );
@ -1428,7 +1422,7 @@ int radeon_cp_reset( DRM_IOCTL_ARGS )
drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_private_t *dev_priv = dev->dev_private;
DRM_DEBUG( "\n" ); DRM_DEBUG( "\n" );
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
if ( !dev_priv ) { if ( !dev_priv ) {
DRM_DEBUG( "%s called before init done\n", __FUNCTION__ ); DRM_DEBUG( "%s called before init done\n", __FUNCTION__ );
@ -1449,7 +1443,7 @@ int radeon_cp_idle( DRM_IOCTL_ARGS )
drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_private_t *dev_priv = dev->dev_private;
DRM_DEBUG( "\n" ); DRM_DEBUG( "\n" );
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
return radeon_do_cp_idle( dev_priv ); return radeon_do_cp_idle( dev_priv );
} }
@ -1459,7 +1453,7 @@ int radeon_engine_reset( DRM_IOCTL_ARGS )
DRM_DEVICE; DRM_DEVICE;
DRM_DEBUG( "\n" ); DRM_DEBUG( "\n" );
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
return radeon_do_engine_reset( dev ); return radeon_do_engine_reset( dev );
} }
@ -1518,7 +1512,7 @@ drm_buf_t *radeon_freelist_get( drm_device_t *dev )
for ( i = start ; i < dma->buf_count ; i++ ) { for ( i = start ; i < dma->buf_count ; i++ ) {
buf = dma->buflist[i]; buf = dma->buflist[i];
buf_priv = buf->dev_private; buf_priv = buf->dev_private;
if ( buf->pid == 0 || (buf->pending && if ( buf->filp == 0 || (buf->pending &&
buf_priv->age <= done_age) ) { buf_priv->age <= done_age) ) {
dev_priv->stats.requested_bufs++; dev_priv->stats.requested_bufs++;
buf->pending = 0; buf->pending = 0;
@ -1557,7 +1551,7 @@ drm_buf_t *radeon_freelist_get( drm_device_t *dev )
for ( i = start ; i < dma->buf_count ; i++ ) { for ( i = start ; i < dma->buf_count ; i++ ) {
buf = dma->buflist[i]; buf = dma->buflist[i];
buf_priv = buf->dev_private; buf_priv = buf->dev_private;
if ( buf->pid == 0 || (buf->pending && if ( buf->filp == 0 || (buf->pending &&
buf_priv->age <= done_age) ) { buf_priv->age <= done_age) ) {
dev_priv->stats.requested_bufs++; dev_priv->stats.requested_bufs++;
buf->pending = 0; buf->pending = 0;
@ -1594,10 +1588,10 @@ int radeon_wait_ring( drm_radeon_private_t *dev_priv, int n )
{ {
drm_radeon_ring_buffer_t *ring = &dev_priv->ring; drm_radeon_ring_buffer_t *ring = &dev_priv->ring;
int i; int i;
u32 last_head = GET_RING_HEAD(ring); u32 last_head = GET_RING_HEAD( dev_priv );
for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) {
u32 head = GET_RING_HEAD(ring); u32 head = GET_RING_HEAD( dev_priv );
ring->space = (head - ring->tail) * sizeof(u32); ring->space = (head - ring->tail) * sizeof(u32);
if ( ring->space <= 0 ) if ( ring->space <= 0 )
@ -1622,7 +1616,7 @@ int radeon_wait_ring( drm_radeon_private_t *dev_priv, int n )
return DRM_ERR(EBUSY); return DRM_ERR(EBUSY);
} }
static int radeon_cp_get_buffers( drm_device_t *dev, drm_dma_t *d ) static int radeon_cp_get_buffers( DRMFILE filp, drm_device_t *dev, drm_dma_t *d )
{ {
int i; int i;
drm_buf_t *buf; drm_buf_t *buf;
@ -1631,7 +1625,7 @@ static int radeon_cp_get_buffers( drm_device_t *dev, drm_dma_t *d )
buf = radeon_freelist_get( dev ); buf = radeon_freelist_get( dev );
if ( !buf ) return DRM_ERR(EBUSY); /* NOTE: broken client */ if ( !buf ) return DRM_ERR(EBUSY); /* NOTE: broken client */
buf->pid = DRM_CURRENTPID; buf->filp = filp;
if ( DRM_COPY_TO_USER( &d->request_indices[i], &buf->idx, if ( DRM_COPY_TO_USER( &d->request_indices[i], &buf->idx,
sizeof(buf->idx) ) ) sizeof(buf->idx) ) )
@ -1652,7 +1646,7 @@ int radeon_cp_buffers( DRM_IOCTL_ARGS )
int ret = 0; int ret = 0;
drm_dma_t d; drm_dma_t d;
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
DRM_COPY_FROM_USER_IOCTL( d, (drm_dma_t *)data, sizeof(d) ); DRM_COPY_FROM_USER_IOCTL( d, (drm_dma_t *)data, sizeof(d) );
@ -1675,7 +1669,7 @@ int radeon_cp_buffers( DRM_IOCTL_ARGS )
d.granted_count = 0; d.granted_count = 0;
if ( d.request_count ) { if ( d.request_count ) {
ret = radeon_cp_get_buffers( dev, &d ); ret = radeon_cp_get_buffers( filp, dev, &d );
} }
DRM_COPY_TO_USER_IOCTL( (drm_dma_t *)data, d, sizeof(d) ); DRM_COPY_TO_USER_IOCTL( (drm_dma_t *)data, d, sizeof(d) );

View File

@ -534,6 +534,10 @@ typedef struct drm_radeon_indirect {
#define RADEON_PARAM_LAST_CLEAR 4 #define RADEON_PARAM_LAST_CLEAR 4
#define RADEON_PARAM_IRQ_NR 5 #define RADEON_PARAM_IRQ_NR 5
#define RADEON_PARAM_AGP_BASE 6 /* card offset of agp base */ #define RADEON_PARAM_AGP_BASE 6 /* card offset of agp base */
#define RADEON_PARAM_REGISTER_HANDLE 7 /* for drmMap() */
#define RADEON_PARAM_STATUS_HANDLE 8
#define RADEON_PARAM_SAREA_HANDLE 9
#define RADEON_PARAM_AGP_TEX_HANDLE 10
typedef struct drm_radeon_getparam { typedef struct drm_radeon_getparam {
int param; int param;

View File

@ -85,7 +85,6 @@ drm_chipinfo_t DRM(devicelist)[] = {
#include "dev/drm/drm_drawable.h" #include "dev/drm/drm_drawable.h"
#include "dev/drm/drm_drv.h" #include "dev/drm/drm_drv.h"
#include "dev/drm/drm_fops.h" #include "dev/drm/drm_fops.h"
#include "dev/drm/drm_init.h"
#include "dev/drm/drm_ioctl.h" #include "dev/drm/drm_ioctl.h"
#include "dev/drm/drm_lock.h" #include "dev/drm/drm_lock.h"
#include "dev/drm/drm_memory.h" #include "dev/drm/drm_memory.h"

View File

@ -33,8 +33,8 @@
#ifndef __RADEON_DRV_H__ #ifndef __RADEON_DRV_H__
#define __RADEON_DRV_H__ #define __RADEON_DRV_H__
#define GET_RING_HEAD(ring) DRM_READ32( (ring)->ring_rptr, 0 ) /* (ring)->head */ #define GET_RING_HEAD(dev_priv) DRM_READ32( (dev_priv)->ring_rptr, 0 )
#define SET_RING_HEAD(ring,val) DRM_WRITE32( (ring)->ring_rptr, 0, (val) ) /* (ring)->head */ #define SET_RING_HEAD(dev_priv,val) DRM_WRITE32( (dev_priv)->ring_rptr, 0, (val) )
typedef struct drm_radeon_freelist { typedef struct drm_radeon_freelist {
unsigned int age; unsigned int age;
@ -49,13 +49,11 @@ typedef struct drm_radeon_ring_buffer {
int size; int size;
int size_l2qw; int size_l2qw;
volatile u32 *head;
u32 tail; u32 tail;
u32 tail_mask; u32 tail_mask;
int space; int space;
int high_mark; int high_mark;
drm_local_map_t *ring_rptr;
} drm_radeon_ring_buffer_t; } drm_radeon_ring_buffer_t;
typedef struct drm_radeon_depth_clear_t { typedef struct drm_radeon_depth_clear_t {
@ -70,7 +68,7 @@ struct mem_block {
struct mem_block *prev; struct mem_block *prev;
int start; int start;
int size; int size;
int pid; /* 0: free, -1: heap, other: real pids */ DRMFILE filp; /* 0: free, -1: heap, other: real files */
}; };
typedef struct drm_radeon_private { typedef struct drm_radeon_private {
@ -128,6 +126,13 @@ typedef struct drm_radeon_private {
u32 depth_pitch_offset; u32 depth_pitch_offset;
drm_radeon_depth_clear_t depth_clear; drm_radeon_depth_clear_t depth_clear;
unsigned long fb_offset;
unsigned long mmio_offset;
unsigned long ring_offset;
unsigned long ring_rptr_offset;
unsigned long buffers_offset;
unsigned long agp_textures_offset;
drm_local_map_t *sarea; drm_local_map_t *sarea;
drm_local_map_t *fb; drm_local_map_t *fb;
@ -186,7 +191,7 @@ extern int radeon_mem_alloc( DRM_IOCTL_ARGS );
extern int radeon_mem_free( DRM_IOCTL_ARGS ); extern int radeon_mem_free( DRM_IOCTL_ARGS );
extern int radeon_mem_init_heap( DRM_IOCTL_ARGS ); extern int radeon_mem_init_heap( DRM_IOCTL_ARGS );
extern void radeon_mem_takedown( struct mem_block **heap ); extern void radeon_mem_takedown( struct mem_block **heap );
extern void radeon_mem_release( struct mem_block *heap ); extern void radeon_mem_release( DRMFILE filp, struct mem_block *heap );
/* radeon_irq.c */ /* radeon_irq.c */
extern int radeon_irq_emit( DRM_IOCTL_ARGS ); extern int radeon_irq_emit( DRM_IOCTL_ARGS );
@ -772,22 +777,12 @@ extern int RADEON_READ_PLL( drm_device_t *dev, int addr );
* Misc helper macros * Misc helper macros
*/ */
#define LOCK_TEST_WITH_RETURN( dev ) \
do { \
if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || \
dev->lock.pid != DRM_CURRENTPID ) { \
DRM_ERROR( "%s called without lock held\n", __FUNCTION__ ); \
return DRM_ERR(EINVAL); \
} \
} while (0)
/* Perfbox functionality only. /* Perfbox functionality only.
*/ */
#define RING_SPACE_TEST_WITH_RETURN( dev_priv ) \ #define RING_SPACE_TEST_WITH_RETURN( dev_priv ) \
do { \ do { \
if (!(dev_priv->stats.boxes & RADEON_BOX_DMA_IDLE)) { \ if (!(dev_priv->stats.boxes & RADEON_BOX_DMA_IDLE)) { \
u32 head = GET_RING_HEAD(&dev_priv->ring); \ u32 head = GET_RING_HEAD( dev_priv ); \
if (head == dev_priv->ring.tail) \ if (head == dev_priv->ring.tail) \
dev_priv->stats.boxes |= RADEON_BOX_DMA_IDLE; \ dev_priv->stats.boxes |= RADEON_BOX_DMA_IDLE; \
} \ } \
@ -859,8 +854,8 @@ do { \
#define COMMIT_RING() do { \ #define COMMIT_RING() do { \
/* Flush writes to ring */ \ /* Flush writes to ring */ \
DRM_READMEMORYBARRIER(dev_priv->mmio); \ DRM_READMEMORYBARRIER( dev_priv->mmio ); \
GET_RING_HEAD( &dev_priv->ring ); \ GET_RING_HEAD( dev_priv ); \
RADEON_WRITE( RADEON_CP_RB_WPTR, dev_priv->ring.tail ); \ RADEON_WRITE( RADEON_CP_RB_WPTR, dev_priv->ring.tail ); \
/* read from PCI bus to ensure correct posting */ \ /* read from PCI bus to ensure correct posting */ \
RADEON_READ( RADEON_CP_RB_RPTR ); \ RADEON_READ( RADEON_CP_RB_RPTR ); \

View File

@ -181,7 +181,7 @@ int radeon_irq_emit( DRM_IOCTL_ARGS )
drm_radeon_irq_emit_t emit; drm_radeon_irq_emit_t emit;
int result; int result;
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
if ( !dev_priv ) { if ( !dev_priv ) {
DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );

View File

@ -42,7 +42,7 @@
*/ */
static struct mem_block *split_block(struct mem_block *p, int start, int size, static struct mem_block *split_block(struct mem_block *p, int start, int size,
int pid ) DRMFILE filp )
{ {
/* Maybe cut off the start of an existing block */ /* Maybe cut off the start of an existing block */
if (start > p->start) { if (start > p->start) {
@ -51,7 +51,7 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size,
goto out; goto out;
newblock->start = start; newblock->start = start;
newblock->size = p->size - (start - p->start); newblock->size = p->size - (start - p->start);
newblock->pid = 0; newblock->filp = 0;
newblock->next = p->next; newblock->next = p->next;
newblock->prev = p; newblock->prev = p;
p->next->prev = newblock; p->next->prev = newblock;
@ -67,7 +67,7 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size,
goto out; goto out;
newblock->start = start + size; newblock->start = start + size;
newblock->size = p->size - size; newblock->size = p->size - size;
newblock->pid = 0; newblock->filp = 0;
newblock->next = p->next; newblock->next = p->next;
newblock->prev = p; newblock->prev = p;
p->next->prev = newblock; p->next->prev = newblock;
@ -77,20 +77,20 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size,
out: out:
/* Our block is in the middle */ /* Our block is in the middle */
p->pid = pid; p->filp = filp;
return p; return p;
} }
static struct mem_block *alloc_block( struct mem_block *heap, int size, static struct mem_block *alloc_block( struct mem_block *heap, int size,
int align2, int pid ) int align2, DRMFILE filp )
{ {
struct mem_block *p; struct mem_block *p;
int mask = (1 << align2)-1; int mask = (1 << align2)-1;
for (p = heap->next ; p != heap ; p = p->next) { for (p = heap->next ; p != heap ; p = p->next) {
int start = (p->start + mask) & ~mask; int start = (p->start + mask) & ~mask;
if (p->pid == 0 && start + size <= p->start + p->size) if (p->filp == 0 && start + size <= p->start + p->size)
return split_block( p, start, size, pid ); return split_block( p, start, size, filp );
} }
return NULL; return NULL;
@ -110,12 +110,12 @@ static struct mem_block *find_block( struct mem_block *heap, int start )
static void free_block( struct mem_block *p ) static void free_block( struct mem_block *p )
{ {
p->pid = 0; p->filp = 0;
/* Assumes a single contiguous range. Needs a special pid in /* Assumes a single contiguous range. Needs a special filp in
* 'heap' to stop it being subsumed. * 'heap' to stop it being subsumed.
*/ */
if (p->next->pid == 0) { if (p->next->filp == 0) {
struct mem_block *q = p->next; struct mem_block *q = p->next;
p->size += q->size; p->size += q->size;
p->next = q->next; p->next = q->next;
@ -123,7 +123,7 @@ static void free_block( struct mem_block *p )
DRM_FREE(q, sizeof(*q)); DRM_FREE(q, sizeof(*q));
} }
if (p->prev->pid == 0) { if (p->prev->filp == 0) {
struct mem_block *q = p->prev; struct mem_block *q = p->prev;
q->size += p->size; q->size += p->size;
q->next = p->next; q->next = p->next;
@ -149,36 +149,35 @@ static int init_heap(struct mem_block **heap, int start, int size)
blocks->start = start; blocks->start = start;
blocks->size = size; blocks->size = size;
blocks->pid = 0; blocks->filp = 0;
blocks->next = blocks->prev = *heap; blocks->next = blocks->prev = *heap;
memset( *heap, 0, sizeof(**heap) ); memset( *heap, 0, sizeof(**heap) );
(*heap)->pid = -1; (*heap)->filp = (DRMFILE) -1;
(*heap)->next = (*heap)->prev = blocks; (*heap)->next = (*heap)->prev = blocks;
return 0; return 0;
} }
/* Free all blocks associated with the releasing pid. /* Free all blocks associated with the releasing file.
*/ */
void radeon_mem_release( struct mem_block *heap ) void radeon_mem_release( DRMFILE filp, struct mem_block *heap )
{ {
int pid = DRM_CURRENTPID;
struct mem_block *p; struct mem_block *p;
if (!heap || !heap->next) if (!heap || !heap->next)
return; return;
for (p = heap->next ; p != heap ; p = p->next) { for (p = heap->next ; p != heap ; p = p->next) {
if (p->pid == pid) if (p->filp == filp)
p->pid = 0; p->filp = 0;
} }
/* Assumes a single contiguous range. Needs a special pid in /* Assumes a single contiguous range. Needs a special filp in
* 'heap' to stop it being subsumed. * 'heap' to stop it being subsumed.
*/ */
for (p = heap->next ; p != heap ; p = p->next) { for (p = heap->next ; p != heap ; p = p->next) {
while (p->pid == 0 && p->next->pid == 0) { while (p->filp == 0 && p->next->filp == 0) {
struct mem_block *q = p->next; struct mem_block *q = p->next;
p->size += q->size; p->size += q->size;
p->next = q->next; p->next = q->next;
@ -250,7 +249,7 @@ int radeon_mem_alloc( DRM_IOCTL_ARGS )
alloc.alignment = 12; alloc.alignment = 12;
block = alloc_block( *heap, alloc.size, alloc.alignment, block = alloc_block( *heap, alloc.size, alloc.alignment,
DRM_CURRENTPID ); filp );
if (!block) if (!block)
return DRM_ERR(ENOMEM); return DRM_ERR(ENOMEM);
@ -289,7 +288,7 @@ int radeon_mem_free( DRM_IOCTL_ARGS )
if (!block) if (!block)
return DRM_ERR(EFAULT); return DRM_ERR(EFAULT);
if (block->pid != DRM_CURRENTPID) if (block->filp != filp)
return DRM_ERR(EPERM); return DRM_ERR(EPERM);
free_block( block ); free_block( block );

View File

@ -1065,7 +1065,8 @@ static void radeon_cp_dispatch_indices( drm_device_t *dev,
#define RADEON_MAX_TEXTURE_SIZE (RADEON_BUFFER_SIZE - 8 * sizeof(u32)) #define RADEON_MAX_TEXTURE_SIZE (RADEON_BUFFER_SIZE - 8 * sizeof(u32))
static int radeon_cp_dispatch_texture( drm_device_t *dev, static int radeon_cp_dispatch_texture( DRMFILE filp,
drm_device_t *dev,
drm_radeon_texture_t *tex, drm_radeon_texture_t *tex,
drm_radeon_tex_image_t *image ) drm_radeon_tex_image_t *image )
{ {
@ -1218,7 +1219,7 @@ static int radeon_cp_dispatch_texture( drm_device_t *dev,
} }
} }
buf->pid = DRM_CURRENTPID; buf->filp = filp;
buf->used = (dwords + 8) * sizeof(u32); buf->used = (dwords + 8) * sizeof(u32);
radeon_cp_dispatch_indirect( dev, buf, 0, buf->used ); radeon_cp_dispatch_indirect( dev, buf, 0, buf->used );
radeon_cp_discard_buffer( dev, buf ); radeon_cp_discard_buffer( dev, buf );
@ -1275,7 +1276,7 @@ int radeon_cp_clear( DRM_IOCTL_ARGS )
drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS]; drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS];
DRM_DEBUG( "\n" ); DRM_DEBUG( "\n" );
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
DRM_COPY_FROM_USER_IOCTL( clear, (drm_radeon_clear_t *)data, DRM_COPY_FROM_USER_IOCTL( clear, (drm_radeon_clear_t *)data,
sizeof(clear) ); sizeof(clear) );
@ -1344,7 +1345,7 @@ int radeon_cp_flip( DRM_IOCTL_ARGS )
drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_private_t *dev_priv = dev->dev_private;
DRM_DEBUG( "\n" ); DRM_DEBUG( "\n" );
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
RING_SPACE_TEST_WITH_RETURN( dev_priv ); RING_SPACE_TEST_WITH_RETURN( dev_priv );
@ -1364,7 +1365,7 @@ int radeon_cp_swap( DRM_IOCTL_ARGS )
drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
DRM_DEBUG( "\n" ); DRM_DEBUG( "\n" );
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
RING_SPACE_TEST_WITH_RETURN( dev_priv ); RING_SPACE_TEST_WITH_RETURN( dev_priv );
@ -1388,7 +1389,7 @@ int radeon_cp_vertex( DRM_IOCTL_ARGS )
drm_radeon_vertex_t vertex; drm_radeon_vertex_t vertex;
drm_radeon_tcl_prim_t prim; drm_radeon_tcl_prim_t prim;
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
if ( !dev_priv ) { if ( !dev_priv ) {
DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
@ -1418,9 +1419,9 @@ int radeon_cp_vertex( DRM_IOCTL_ARGS )
buf = dma->buflist[vertex.idx]; buf = dma->buflist[vertex.idx];
if ( buf->pid != DRM_CURRENTPID ) { if ( buf->filp != filp ) {
DRM_ERROR( "process %d using buffer owned by %d\n", DRM_ERROR( "process %d using buffer owned by %p\n",
DRM_CURRENTPID, buf->pid ); DRM_CURRENTPID, buf->filp );
return DRM_ERR(EINVAL); return DRM_ERR(EINVAL);
} }
if ( buf->pending ) { if ( buf->pending ) {
@ -1475,7 +1476,7 @@ int radeon_cp_indices( DRM_IOCTL_ARGS )
drm_radeon_tcl_prim_t prim; drm_radeon_tcl_prim_t prim;
int count; int count;
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
if ( !dev_priv ) { if ( !dev_priv ) {
DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
@ -1505,9 +1506,9 @@ int radeon_cp_indices( DRM_IOCTL_ARGS )
buf = dma->buflist[elts.idx]; buf = dma->buflist[elts.idx];
if ( buf->pid != DRM_CURRENTPID ) { if ( buf->filp != filp ) {
DRM_ERROR( "process %d using buffer owned by %d\n", DRM_ERROR( "process %d using buffer owned by %p\n",
DRM_CURRENTPID, buf->pid ); DRM_CURRENTPID, buf->filp );
return DRM_ERR(EINVAL); return DRM_ERR(EINVAL);
} }
if ( buf->pending ) { if ( buf->pending ) {
@ -1570,7 +1571,7 @@ int radeon_cp_texture( DRM_IOCTL_ARGS )
drm_radeon_tex_image_t image; drm_radeon_tex_image_t image;
int ret; int ret;
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
DRM_COPY_FROM_USER_IOCTL( tex, (drm_radeon_texture_t *)data, sizeof(tex) ); DRM_COPY_FROM_USER_IOCTL( tex, (drm_radeon_texture_t *)data, sizeof(tex) );
@ -1587,7 +1588,7 @@ int radeon_cp_texture( DRM_IOCTL_ARGS )
RING_SPACE_TEST_WITH_RETURN( dev_priv ); RING_SPACE_TEST_WITH_RETURN( dev_priv );
VB_AGE_TEST_WITH_RETURN( dev_priv ); VB_AGE_TEST_WITH_RETURN( dev_priv );
ret = radeon_cp_dispatch_texture( dev, &tex, &image ); ret = radeon_cp_dispatch_texture( filp, dev, &tex, &image );
COMMIT_RING(); COMMIT_RING();
return ret; return ret;
@ -1600,7 +1601,7 @@ int radeon_cp_stipple( DRM_IOCTL_ARGS )
drm_radeon_stipple_t stipple; drm_radeon_stipple_t stipple;
u32 mask[32]; u32 mask[32];
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
DRM_COPY_FROM_USER_IOCTL( stipple, (drm_radeon_stipple_t *)data, DRM_COPY_FROM_USER_IOCTL( stipple, (drm_radeon_stipple_t *)data,
sizeof(stipple) ); sizeof(stipple) );
@ -1625,7 +1626,7 @@ int radeon_cp_indirect( DRM_IOCTL_ARGS )
drm_radeon_indirect_t indirect; drm_radeon_indirect_t indirect;
RING_LOCALS; RING_LOCALS;
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
if ( !dev_priv ) { if ( !dev_priv ) {
DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
@ -1647,9 +1648,9 @@ int radeon_cp_indirect( DRM_IOCTL_ARGS )
buf = dma->buflist[indirect.idx]; buf = dma->buflist[indirect.idx];
if ( buf->pid != DRM_CURRENTPID ) { if ( buf->filp != filp ) {
DRM_ERROR( "process %d using buffer owned by %d\n", DRM_ERROR( "process %d using buffer owned by %p\n",
DRM_CURRENTPID, buf->pid ); DRM_CURRENTPID, buf->filp );
return DRM_ERR(EINVAL); return DRM_ERR(EINVAL);
} }
if ( buf->pending ) { if ( buf->pending ) {
@ -1702,7 +1703,7 @@ int radeon_cp_vertex2( DRM_IOCTL_ARGS )
int i; int i;
unsigned char laststate; unsigned char laststate;
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
if ( !dev_priv ) { if ( !dev_priv ) {
DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
@ -1727,9 +1728,9 @@ int radeon_cp_vertex2( DRM_IOCTL_ARGS )
buf = dma->buflist[vertex.idx]; buf = dma->buflist[vertex.idx];
if ( buf->pid != DRM_CURRENTPID ) { if ( buf->filp != filp ) {
DRM_ERROR( "process %d using buffer owned by %d\n", DRM_ERROR( "process %d using buffer owned by %p\n",
DRM_CURRENTPID, buf->pid ); DRM_CURRENTPID, buf->filp );
return DRM_ERR(EINVAL); return DRM_ERR(EINVAL);
} }
@ -2029,7 +2030,7 @@ int radeon_cp_cmdbuf( DRM_IOCTL_ARGS )
drm_radeon_cmd_header_t header; drm_radeon_cmd_header_t header;
int orig_nbox; int orig_nbox;
LOCK_TEST_WITH_RETURN( dev ); LOCK_TEST_WITH_RETURN( dev, filp );
if ( !dev_priv ) { if ( !dev_priv ) {
DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
@ -2098,8 +2099,9 @@ int radeon_cp_cmdbuf( DRM_IOCTL_ARGS )
} }
buf = dma->buflist[idx]; buf = dma->buflist[idx];
if ( buf->pid != DRM_CURRENTPID || buf->pending ) { if ( buf->filp != filp || buf->pending ) {
DRM_ERROR( "bad buffer\n" ); DRM_ERROR( "bad buffer %p %p %d\n",
buf->filp, filp, buf->pending);
return DRM_ERR(EINVAL); return DRM_ERR(EINVAL);
} }
@ -2191,6 +2193,19 @@ int radeon_cp_getparam( DRM_IOCTL_ARGS )
case RADEON_PARAM_AGP_BASE: case RADEON_PARAM_AGP_BASE:
value = dev_priv->agp_vm_start; value = dev_priv->agp_vm_start;
break; break;
case RADEON_PARAM_REGISTER_HANDLE:
value = dev_priv->mmio_offset;
break;
case RADEON_PARAM_STATUS_HANDLE:
value = dev_priv->ring_rptr_offset;
break;
case RADEON_PARAM_SAREA_HANDLE:
/* The lock is the first dword in the sarea. */
value = (int)dev->lock.hw_lock;
break;
case RADEON_PARAM_AGP_TEX_HANDLE:
value = dev_priv->agp_textures_offset;
break;
default: default:
return DRM_ERR(EINVAL); return DRM_ERR(EINVAL);
} }

View File

@ -86,7 +86,6 @@ drm_chipinfo_t DRM(devicelist)[] = {
#include "dev/drm/drm_fops.h" #include "dev/drm/drm_fops.h"
#include "dev/drm/drm_init.h"
#include "dev/drm/drm_ioctl.h" #include "dev/drm/drm_ioctl.h"
#include "dev/drm/drm_lock.h" #include "dev/drm/drm_lock.h"
#include "dev/drm/drm_memory.h" #include "dev/drm/drm_memory.h"

View File

@ -431,7 +431,6 @@ options ACPI_DEBUG
# tdfxdrm: 3dfx Voodoo 3/4/5 and Banshee # tdfxdrm: 3dfx Voodoo 3/4/5 and Banshee
# r128drm: ATI Rage 128 # r128drm: ATI Rage 128
# radeondrm: ATI Radeon up to 9000/9100 # radeondrm: ATI Radeon up to 9000/9100
# DRM_LINUX: include linux compatibility, requires COMPAT_LINUX
# DRM_DEBUG: include debug printfs, very slow # DRM_DEBUG: include debug printfs, very slow
# #
# mga requires AGP in the kernel, and it is recommended # mga requires AGP in the kernel, and it is recommended
@ -443,7 +442,6 @@ device radeondrm
device tdfxdrm device tdfxdrm
options DRM_DEBUG options DRM_DEBUG
options DRM_LINUX
# M-systems DiskOnchip products see src/sys/contrib/dev/fla/README # M-systems DiskOnchip products see src/sys/contrib/dev/fla/README
device fla device fla

View File

@ -376,7 +376,6 @@ options TDFX_LINUX # Enable Linuxulator support
# tdfxdrm: 3dfx Voodoo 3/4/5 and Banshee # tdfxdrm: 3dfx Voodoo 3/4/5 and Banshee
# r128drm: ATI Rage 128 # r128drm: ATI Rage 128
# radeondrm: ATI Radeon up to 9000/9100 # radeondrm: ATI Radeon up to 9000/9100
# DRM_LINUX: include linux compatibility, requires COMPAT_LINUX
# DRM_DEBUG: include debug printfs, very slow # DRM_DEBUG: include debug printfs, very slow
# #
# mga requires AGP in the kernel, and it is recommended # mga requires AGP in the kernel, and it is recommended
@ -388,7 +387,6 @@ device radeondrm
device tdfxdrm device tdfxdrm
options DRM_DEBUG options DRM_DEBUG
options DRM_LINUX
# #
# Bus mouse # Bus mouse