Remove legacy drm and drm2 from tree

As discussed on the MLs drm2 conflicts with the ports' version and there
is no upstream for most if not all of drm. Both have been merged in to
a single port.

Users on powerpc, 32-bit hardware, or with GPUs predating Radeon
and i915 will need to install the graphics/drm-legacy-kmod. All
other users should be able to use one of the LinuxKPI-based ports:
graphics/drm-stable-kmod, graphics/drm-next-kmod, graphics/drm-devel-kmod.

MFC: never
Approved by: core@
This commit is contained in:
Matt Macy 2018-08-22 01:50:12 +00:00
parent 058c692e15
commit d157fbd5b4
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=338172
462 changed files with 8 additions and 279549 deletions

View File

@ -31,6 +31,13 @@ NOTE TO PEOPLE WHO THINK THAT FreeBSD 12.x IS SLOW:
disable the most expensive debugging functionality run
"ln -s 'abort:false,junk:false' /etc/malloc.conf".)
20180821:
drm and drm2 have been removed. Users on powerpc, 32-bit hardware,
or with GPUs predating Radeon and i915 will need to install the
graphics/drm-legacy-kmod. All other users should be able to use
one of the LinuxKPI-based ports: graphics/drm-stable-kmod,
graphics/drm-next-kmod, graphics/drm-devel-kmod.
20180818:
The default interpreter has been switched from 4th to Lua.
LOADER_DEFAULT_INTERP, documented in build(7), will override the default

View File

@ -286,17 +286,6 @@ options ACPI_DEBUG
# The cpufreq(4) driver provides support for non-ACPI CPU frequency control
device cpufreq
# Direct Rendering modules for 3D acceleration.
device drm # DRM core module required by DRM drivers
device mach64drm # ATI Rage Pro, Rage Mobility P/M, Rage XL
device mgadrm # AGP Matrox G200, G400, G450, G550
device r128drm # ATI Rage 128
device savagedrm # S3 Savage3D, Savage4
device sisdrm # SiS 300/305, 540, 630
device tdfxdrm # 3dfx Voodoo 3/4/5 and Banshee
device viadrm # VIA
options DRM_DEBUG # Include debug printfs (slow)
#
# Network interfaces:
#

View File

@ -1,227 +0,0 @@
/*-
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Gareth Hughes <gareth@valinux.com>
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/** @file ati_pcigart.c
* Implementation of ATI's PCIGART, which provides an aperture in card virtual
* address space with addresses remapped to system memory.
*/
#include "dev/drm/drmP.h"
#define ATI_PCIGART_PAGE_SIZE 4096 /* PCI GART page size */
#define ATI_PCIGART_PAGE_MASK (~(ATI_PCIGART_PAGE_SIZE-1))
#define ATI_GART_NOSNOOP 0x1
#define ATI_GART_WRITE 0x4
#define ATI_GART_READ 0x8
static void
drm_ati_alloc_pcigart_table_cb(void *arg, bus_dma_segment_t *segs,
int nsegs, int error)
{
struct drm_dma_handle *dmah = arg;
if (error != 0)
return;
KASSERT(nsegs == 1,
("drm_ati_alloc_pcigart_table_cb: bad dma segment count"));
dmah->busaddr = segs[0].ds_addr;
}
static int
drm_ati_alloc_pcigart_table(struct drm_device *dev,
struct drm_ati_pcigart_info *gart_info)
{
struct drm_dma_handle *dmah;
int flags, ret;
dmah = malloc(sizeof(struct drm_dma_handle), DRM_MEM_DMA,
M_ZERO | M_NOWAIT);
if (dmah == NULL)
return ENOMEM;
DRM_UNLOCK();
ret = bus_dma_tag_create(NULL, PAGE_SIZE, 0, /* tag, align, boundary */
gart_info->table_mask, BUS_SPACE_MAXADDR, /* lowaddr, highaddr */
NULL, NULL, /* filtfunc, filtfuncargs */
gart_info->table_size, 1, /* maxsize, nsegs */
gart_info->table_size, /* maxsegsize */
0, NULL, NULL, /* flags, lockfunc, lockfuncargs */
&dmah->tag);
if (ret != 0) {
free(dmah, DRM_MEM_DMA);
return ENOMEM;
}
flags = BUS_DMA_WAITOK | BUS_DMA_ZERO;
if (gart_info->gart_reg_if == DRM_ATI_GART_IGP)
flags |= BUS_DMA_NOCACHE;
ret = bus_dmamem_alloc(dmah->tag, &dmah->vaddr, flags, &dmah->map);
if (ret != 0) {
bus_dma_tag_destroy(dmah->tag);
free(dmah, DRM_MEM_DMA);
return ENOMEM;
}
DRM_LOCK();
ret = bus_dmamap_load(dmah->tag, dmah->map, dmah->vaddr,
gart_info->table_size, drm_ati_alloc_pcigart_table_cb, dmah,
BUS_DMA_NOWAIT);
if (ret != 0) {
bus_dmamem_free(dmah->tag, dmah->vaddr, dmah->map);
bus_dma_tag_destroy(dmah->tag);
free(dmah, DRM_MEM_DMA);
return ENOMEM;
}
gart_info->dmah = dmah;
return 0;
}
static void
drm_ati_free_pcigart_table(struct drm_device *dev,
struct drm_ati_pcigart_info *gart_info)
{
struct drm_dma_handle *dmah = gart_info->dmah;
bus_dmamap_unload(dmah->tag, dmah->map);
bus_dmamem_free(dmah->tag, dmah->vaddr, dmah->map);
bus_dma_tag_destroy(dmah->tag);
free(dmah, DRM_MEM_DMA);
gart_info->dmah = NULL;
}
int
drm_ati_pcigart_cleanup(struct drm_device *dev,
struct drm_ati_pcigart_info *gart_info)
{
/* we need to support large memory configurations */
if (dev->sg == NULL) {
DRM_ERROR("no scatter/gather memory!\n");
return 0;
}
if (gart_info->bus_addr) {
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
gart_info->bus_addr = 0;
if (gart_info->dmah)
drm_ati_free_pcigart_table(dev, gart_info);
}
}
return 1;
}
int
drm_ati_pcigart_init(struct drm_device *dev,
struct drm_ati_pcigart_info *gart_info)
{
void *address = NULL;
unsigned long pages;
u32 *pci_gart, page_base;
dma_addr_t bus_address = 0;
dma_addr_t entry_addr;
int i, j, ret = 0;
int max_pages;
/* we need to support large memory configurations */
if (dev->sg == NULL) {
DRM_ERROR("no scatter/gather memory!\n");
goto done;
}
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n");
ret = drm_ati_alloc_pcigart_table(dev, gart_info);
if (ret) {
DRM_ERROR("cannot allocate PCI GART page!\n");
goto done;
}
address = (void *)gart_info->dmah->vaddr;
bus_address = gart_info->dmah->busaddr;
} else {
address = gart_info->addr;
bus_address = gart_info->bus_addr;
DRM_DEBUG("PCI: Gart Table: VRAM %08X mapped at %08lX\n",
(unsigned int)bus_address, (unsigned long)address);
}
pci_gart = (u32 *) address;
max_pages = (gart_info->table_size / sizeof(u32));
pages = (dev->sg->pages <= max_pages)
? dev->sg->pages : max_pages;
memset(pci_gart, 0, max_pages * sizeof(u32));
KASSERT(PAGE_SIZE >= ATI_PCIGART_PAGE_SIZE, ("page size too small"));
for (i = 0; i < pages; i++) {
entry_addr = dev->sg->busaddr[i];
for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) {
page_base = (u32) entry_addr & ATI_PCIGART_PAGE_MASK;
switch(gart_info->gart_reg_if) {
case DRM_ATI_GART_IGP:
page_base |=
(upper_32_bits(entry_addr) & 0xff) << 4;
page_base |= ATI_GART_READ | ATI_GART_WRITE;
page_base |= ATI_GART_NOSNOOP;
break;
case DRM_ATI_GART_PCIE:
page_base >>= 8;
page_base |=
(upper_32_bits(entry_addr) & 0xff) << 24;
page_base |= ATI_GART_READ | ATI_GART_WRITE;
page_base |= ATI_GART_NOSNOOP;
break;
default:
case DRM_ATI_GART_PCI:
break;
}
*pci_gart = cpu_to_le32(page_base);
pci_gart++;
entry_addr += ATI_PCIGART_PAGE_SIZE;
}
}
ret = 1;
done:
gart_info->addr = address;
gart_info->bus_addr = bus_address;
return ret;
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,434 +0,0 @@
/*-
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Author:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Gareth Hughes <gareth@valinux.com>
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/** @file drm_agpsupport.c
* Support code for tying the kernel AGP support to DRM drivers and
* the DRM's AGP ioctls.
*/
#include "dev/drm/drmP.h"
#include <dev/agp/agpreg.h>
#include <dev/pci/pcireg.h>
/* Returns 1 if AGP or 0 if not. */
static int
drm_device_find_capability(struct drm_device *dev, int cap)
{
return (pci_find_cap(dev->device, cap, NULL) == 0);
}
int drm_device_is_agp(struct drm_device *dev)
{
if (dev->driver->device_is_agp != NULL) {
int ret;
/* device_is_agp returns a tristate, 0 = not AGP, 1 = definitely
* AGP, 2 = fall back to PCI capability
*/
ret = (*dev->driver->device_is_agp)(dev);
if (ret != DRM_MIGHT_BE_AGP)
return ret;
}
return (drm_device_find_capability(dev, PCIY_AGP));
}
int drm_device_is_pcie(struct drm_device *dev)
{
return (drm_device_find_capability(dev, PCIY_EXPRESS));
}
int drm_agp_info(struct drm_device * dev, struct drm_agp_info *info)
{
struct agp_info *kern;
if (!dev->agp || !dev->agp->acquired)
return EINVAL;
kern = &dev->agp->info;
agp_get_info(dev->agp->agpdev, kern);
info->agp_version_major = 1;
info->agp_version_minor = 0;
info->mode = kern->ai_mode;
info->aperture_base = kern->ai_aperture_base;
info->aperture_size = kern->ai_aperture_size;
info->memory_allowed = kern->ai_memory_allowed;
info->memory_used = kern->ai_memory_used;
info->id_vendor = kern->ai_devid & 0xffff;
info->id_device = kern->ai_devid >> 16;
return 0;
}
int drm_agp_info_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
int err;
struct drm_agp_info info;
err = drm_agp_info(dev, &info);
if (err != 0)
return err;
*(struct drm_agp_info *) data = info;
return 0;
}
int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
return drm_agp_acquire(dev);
}
int drm_agp_acquire(struct drm_device *dev)
{
int retcode;
if (!dev->agp || dev->agp->acquired)
return EINVAL;
retcode = agp_acquire(dev->agp->agpdev);
if (retcode)
return retcode;
dev->agp->acquired = 1;
return 0;
}
int drm_agp_release_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
return drm_agp_release(dev);
}
int drm_agp_release(struct drm_device * dev)
{
if (!dev->agp || !dev->agp->acquired)
return EINVAL;
agp_release(dev->agp->agpdev);
dev->agp->acquired = 0;
return 0;
}
int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode)
{
if (!dev->agp || !dev->agp->acquired)
return EINVAL;
dev->agp->mode = mode.mode;
agp_enable(dev->agp->agpdev, mode.mode);
dev->agp->enabled = 1;
return 0;
}
int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_agp_mode mode;
mode = *(struct drm_agp_mode *) data;
return drm_agp_enable(dev, mode);
}
int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
{
drm_agp_mem_t *entry;
void *handle;
unsigned long pages;
u_int32_t type;
struct agp_memory_info info;
if (!dev->agp || !dev->agp->acquired)
return EINVAL;
entry = malloc(sizeof(*entry), DRM_MEM_AGPLISTS, M_NOWAIT | M_ZERO);
if (entry == NULL)
return ENOMEM;
pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
type = (u_int32_t) request->type;
DRM_UNLOCK();
handle = drm_agp_allocate_memory(pages, type);
DRM_LOCK();
if (handle == NULL) {
free(entry, DRM_MEM_AGPLISTS);
return ENOMEM;
}
entry->handle = handle;
entry->bound = 0;
entry->pages = pages;
entry->prev = NULL;
entry->next = dev->agp->memory;
if (dev->agp->memory)
dev->agp->memory->prev = entry;
dev->agp->memory = entry;
agp_memory_info(dev->agp->agpdev, entry->handle, &info);
request->handle = (unsigned long) entry->handle;
request->physical = info.ami_physical;
return 0;
}
int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_agp_buffer request;
int retcode;
request = *(struct drm_agp_buffer *) data;
DRM_LOCK();
retcode = drm_agp_alloc(dev, &request);
DRM_UNLOCK();
*(struct drm_agp_buffer *) data = request;
return retcode;
}
static drm_agp_mem_t * drm_agp_lookup_entry(struct drm_device *dev,
void *handle)
{
drm_agp_mem_t *entry;
for (entry = dev->agp->memory; entry; entry = entry->next) {
if (entry->handle == handle) return entry;
}
return NULL;
}
int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request)
{
drm_agp_mem_t *entry;
int retcode;
if (!dev->agp || !dev->agp->acquired)
return EINVAL;
entry = drm_agp_lookup_entry(dev, (void *)request->handle);
if (entry == NULL || !entry->bound)
return EINVAL;
DRM_UNLOCK();
retcode = drm_agp_unbind_memory(entry->handle);
DRM_LOCK();
if (retcode == 0)
entry->bound = 0;
return retcode;
}
int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_agp_binding request;
int retcode;
request = *(struct drm_agp_binding *) data;
DRM_LOCK();
retcode = drm_agp_unbind(dev, &request);
DRM_UNLOCK();
return retcode;
}
int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request)
{
drm_agp_mem_t *entry;
int retcode;
int page;
if (!dev->agp || !dev->agp->acquired)
return EINVAL;
DRM_DEBUG("agp_bind, page_size=%x\n", (int)PAGE_SIZE);
entry = drm_agp_lookup_entry(dev, (void *)request->handle);
if (entry == NULL || entry->bound)
return EINVAL;
page = (request->offset + PAGE_SIZE - 1) / PAGE_SIZE;
DRM_UNLOCK();
retcode = drm_agp_bind_memory(entry->handle, page);
DRM_LOCK();
if (retcode == 0)
entry->bound = dev->agp->base + (page << PAGE_SHIFT);
return retcode;
}
int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_agp_binding request;
int retcode;
request = *(struct drm_agp_binding *) data;
DRM_LOCK();
retcode = drm_agp_bind(dev, &request);
DRM_UNLOCK();
return retcode;
}
int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request)
{
drm_agp_mem_t *entry;
if (!dev->agp || !dev->agp->acquired)
return EINVAL;
entry = drm_agp_lookup_entry(dev, (void*)request->handle);
if (entry == NULL)
return EINVAL;
if (entry->prev)
entry->prev->next = entry->next;
else
dev->agp->memory = entry->next;
if (entry->next)
entry->next->prev = entry->prev;
DRM_UNLOCK();
if (entry->bound)
drm_agp_unbind_memory(entry->handle);
drm_agp_free_memory(entry->handle);
DRM_LOCK();
free(entry, DRM_MEM_AGPLISTS);
return 0;
}
int drm_agp_free_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_agp_buffer request;
int retcode;
request = *(struct drm_agp_buffer *) data;
DRM_LOCK();
retcode = drm_agp_free(dev, &request);
DRM_UNLOCK();
return retcode;
}
drm_agp_head_t *drm_agp_init(void)
{
device_t agpdev;
drm_agp_head_t *head = NULL;
int agp_available = 1;
agpdev = DRM_AGP_FIND_DEVICE();
if (!agpdev)
agp_available = 0;
DRM_DEBUG("agp_available = %d\n", agp_available);
if (agp_available) {
head = malloc(sizeof(*head), DRM_MEM_AGPLISTS,
M_NOWAIT | M_ZERO);
if (head == NULL)
return NULL;
head->agpdev = agpdev;
agp_get_info(agpdev, &head->info);
head->base = head->info.ai_aperture_base;
head->memory = NULL;
DRM_INFO("AGP at 0x%08lx %dMB\n",
(long)head->info.ai_aperture_base,
(int)(head->info.ai_aperture_size >> 20));
}
return head;
}
void *drm_agp_allocate_memory(size_t pages, u32 type)
{
device_t agpdev;
agpdev = DRM_AGP_FIND_DEVICE();
if (!agpdev)
return NULL;
return agp_alloc_memory(agpdev, type, pages << AGP_PAGE_SHIFT);
}
int drm_agp_free_memory(void *handle)
{
device_t agpdev;
agpdev = DRM_AGP_FIND_DEVICE();
if (!agpdev || !handle)
return 0;
agp_free_memory(agpdev, handle);
return 1;
}
int drm_agp_bind_memory(void *handle, off_t start)
{
device_t agpdev;
agpdev = DRM_AGP_FIND_DEVICE();
if (!agpdev || !handle)
return EINVAL;
return agp_bind_memory(agpdev, handle, start * PAGE_SIZE);
}
int drm_agp_unbind_memory(void *handle)
{
device_t agpdev;
agpdev = DRM_AGP_FIND_DEVICE();
if (!agpdev || !handle)
return EINVAL;
return agp_unbind_memory(agpdev, handle);
}

View File

@ -1,91 +0,0 @@
/**
* \file drm_atomic.h
* Atomic operations used in the DRM which may or may not be provided by the OS.
*
* \author Eric Anholt <anholt@FreeBSD.org>
*/
/*-
* Copyright 2004 Eric Anholt
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/* Many of these implementations are rather fake, but good enough. */
typedef u_int32_t atomic_t;
#define atomic_set(p, v) (*(p) = (v))
#define atomic_read(p) (*(p))
#define atomic_inc(p) atomic_add_int(p, 1)
#define atomic_dec(p) atomic_subtract_int(p, 1)
#define atomic_add(n, p) atomic_add_int(p, n)
#define atomic_sub(n, p) atomic_subtract_int(p, n)
static __inline atomic_t
test_and_set_bit(int b, volatile void *p)
{
int s = splhigh();
unsigned int m = 1<<b;
unsigned int r = *(volatile int *)p & m;
*(volatile int *)p |= m;
splx(s);
return r;
}
static __inline void
clear_bit(int b, volatile void *p)
{
atomic_clear_int(((volatile int *)p) + (b >> 5), 1 << (b & 0x1f));
}
static __inline void
set_bit(int b, volatile void *p)
{
atomic_set_int(((volatile int *)p) + (b >> 5), 1 << (b & 0x1f));
}
static __inline int
test_bit(int b, volatile void *p)
{
return ((volatile int *)p)[b >> 5] & (1 << (b & 0x1f));
}
static __inline int
find_first_zero_bit(volatile void *p, int max)
{
int b;
volatile int *ptr = (volatile int *)p;
for (b = 0; b < max; b += 32) {
if (ptr[b >> 5] != ~0) {
for (;;) {
if ((ptr[b >> 5] & (1 << (b & 0x1f))) == 0)
return b;
b++;
}
}
}
return max;
}

View File

@ -1,190 +0,0 @@
/*-
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Gareth Hughes <gareth@valinux.com>
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/** @file drm_auth.c
* Implementation of the get/authmagic ioctls implementing the authentication
* scheme between the master and clients.
*/
#include "dev/drm/drmP.h"
static int drm_hash_magic(drm_magic_t magic)
{
return magic & (DRM_HASH_SIZE-1);
}
/**
* Returns the file private associated with the given magic number.
*/
static struct drm_file *drm_find_file(struct drm_device *dev, drm_magic_t magic)
{
drm_magic_entry_t *pt;
int hash = drm_hash_magic(magic);
DRM_SPINLOCK_ASSERT(&dev->dev_lock);
for (pt = dev->magiclist[hash].head; pt; pt = pt->next) {
if (pt->magic == magic) {
return pt->priv;
}
}
return NULL;
}
/**
* Inserts the given magic number into the hash table of used magic number
* lists.
*/
static int drm_add_magic(struct drm_device *dev, struct drm_file *priv,
drm_magic_t magic)
{
int hash;
drm_magic_entry_t *entry;
DRM_DEBUG("%d\n", magic);
DRM_SPINLOCK_ASSERT(&dev->dev_lock);
hash = drm_hash_magic(magic);
entry = malloc(sizeof(*entry), DRM_MEM_MAGIC, M_ZERO | M_NOWAIT);
if (!entry)
return ENOMEM;
entry->magic = magic;
entry->priv = priv;
entry->next = NULL;
if (dev->magiclist[hash].tail) {
dev->magiclist[hash].tail->next = entry;
dev->magiclist[hash].tail = entry;
} else {
dev->magiclist[hash].head = entry;
dev->magiclist[hash].tail = entry;
}
return 0;
}
/**
* Removes the given magic number from the hash table of used magic number
* lists.
*/
static int drm_remove_magic(struct drm_device *dev, drm_magic_t magic)
{
drm_magic_entry_t *prev = NULL;
drm_magic_entry_t *pt;
int hash;
DRM_SPINLOCK_ASSERT(&dev->dev_lock);
DRM_DEBUG("%d\n", magic);
hash = drm_hash_magic(magic);
for (pt = dev->magiclist[hash].head; pt; prev = pt, pt = pt->next) {
if (pt->magic == magic) {
if (dev->magiclist[hash].head == pt) {
dev->magiclist[hash].head = pt->next;
}
if (dev->magiclist[hash].tail == pt) {
dev->magiclist[hash].tail = prev;
}
if (prev) {
prev->next = pt->next;
}
free(pt, DRM_MEM_MAGIC);
return 0;
}
}
return EINVAL;
}
/**
* Called by the client, this returns a unique magic number to be authorized
* by the master.
*
* The master may use its own knowledge of the client (such as the X
* connection that the magic is passed over) to determine if the magic number
* should be authenticated.
*/
int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
static drm_magic_t sequence = 0;
struct drm_auth *auth = data;
/* Find unique magic */
if (file_priv->magic) {
auth->magic = file_priv->magic;
} else {
DRM_LOCK();
do {
int old = sequence;
auth->magic = old+1;
if (!atomic_cmpset_int(&sequence, old, auth->magic))
continue;
} while (drm_find_file(dev, auth->magic));
file_priv->magic = auth->magic;
drm_add_magic(dev, file_priv, auth->magic);
DRM_UNLOCK();
}
DRM_DEBUG("%u\n", auth->magic);
return 0;
}
/**
* Marks the client associated with the given magic number as authenticated.
*/
int drm_authmagic(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_auth *auth = data;
struct drm_file *priv;
DRM_DEBUG("%u\n", auth->magic);
DRM_LOCK();
priv = drm_find_file(dev, auth->magic);
if (priv != NULL) {
priv->authenticated = 1;
drm_remove_magic(dev, auth->magic);
DRM_UNLOCK();
return 0;
} else {
DRM_UNLOCK();
return EINVAL;
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,312 +0,0 @@
/*-
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Gareth Hughes <gareth@valinux.com>
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/** @file drm_context.c
* Implementation of the context management ioctls.
*/
#include "dev/drm/drmP.h"
/* ================================================================
* Context bitmap support
*/
void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle)
{
if (ctx_handle < 0 || ctx_handle >= DRM_MAX_CTXBITMAP ||
dev->ctx_bitmap == NULL) {
DRM_ERROR("Attempt to free invalid context handle: %d\n",
ctx_handle);
return;
}
DRM_LOCK();
clear_bit(ctx_handle, dev->ctx_bitmap);
dev->context_sareas[ctx_handle] = NULL;
DRM_UNLOCK();
return;
}
int drm_ctxbitmap_next(struct drm_device *dev)
{
int bit;
if (dev->ctx_bitmap == NULL)
return -1;
DRM_LOCK();
bit = find_first_zero_bit(dev->ctx_bitmap, DRM_MAX_CTXBITMAP);
if (bit >= DRM_MAX_CTXBITMAP) {
DRM_UNLOCK();
return -1;
}
set_bit(bit, dev->ctx_bitmap);
DRM_DEBUG("bit : %d\n", bit);
if ((bit+1) > dev->max_context) {
drm_local_map_t **ctx_sareas;
int max_ctx = (bit+1);
ctx_sareas = realloc(dev->context_sareas,
max_ctx * sizeof(*dev->context_sareas),
DRM_MEM_SAREA, M_NOWAIT);
if (ctx_sareas == NULL) {
clear_bit(bit, dev->ctx_bitmap);
DRM_DEBUG("failed to allocate bit : %d\n", bit);
DRM_UNLOCK();
return -1;
}
dev->max_context = max_ctx;
dev->context_sareas = ctx_sareas;
dev->context_sareas[bit] = NULL;
}
DRM_UNLOCK();
return bit;
}
int drm_ctxbitmap_init(struct drm_device *dev)
{
int i;
int temp;
DRM_LOCK();
dev->ctx_bitmap = malloc(PAGE_SIZE, DRM_MEM_CTXBITMAP,
M_NOWAIT | M_ZERO);
if (dev->ctx_bitmap == NULL) {
DRM_UNLOCK();
return ENOMEM;
}
dev->context_sareas = NULL;
dev->max_context = -1;
DRM_UNLOCK();
for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
temp = drm_ctxbitmap_next(dev);
DRM_DEBUG("drm_ctxbitmap_init : %d\n", temp);
}
return 0;
}
void drm_ctxbitmap_cleanup(struct drm_device *dev)
{
DRM_LOCK();
if (dev->context_sareas != NULL)
free(dev->context_sareas, DRM_MEM_SAREA);
free(dev->ctx_bitmap, DRM_MEM_CTXBITMAP);
DRM_UNLOCK();
}
/* ================================================================
* Per Context SAREA Support
*/
int drm_getsareactx(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_ctx_priv_map *request = data;
drm_local_map_t *map;
DRM_LOCK();
if (dev->max_context < 0 ||
request->ctx_id >= (unsigned) dev->max_context) {
DRM_UNLOCK();
return EINVAL;
}
map = dev->context_sareas[request->ctx_id];
DRM_UNLOCK();
request->handle = (void *)map->handle;
return 0;
}
int drm_setsareactx(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_ctx_priv_map *request = data;
drm_local_map_t *map = NULL;
DRM_LOCK();
TAILQ_FOREACH(map, &dev->maplist, link) {
if (map->handle == request->handle) {
if (dev->max_context < 0)
goto bad;
if (request->ctx_id >= (unsigned) dev->max_context)
goto bad;
dev->context_sareas[request->ctx_id] = map;
DRM_UNLOCK();
return 0;
}
}
bad:
DRM_UNLOCK();
return EINVAL;
}
/* ================================================================
* The actual DRM context handling routines
*/
int drm_context_switch(struct drm_device *dev, int old, int new)
{
if (test_and_set_bit(0, &dev->context_flag)) {
DRM_ERROR("Reentering -- FIXME\n");
return EBUSY;
}
DRM_DEBUG("Context switch from %d to %d\n", old, new);
if (new == dev->last_context) {
clear_bit(0, &dev->context_flag);
return 0;
}
return 0;
}
int drm_context_switch_complete(struct drm_device *dev, int new)
{
dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
DRM_ERROR("Lock isn't held after context switch\n");
}
/* If a context switch is ever initiated
when the kernel holds the lock, release
that lock here. */
clear_bit(0, &dev->context_flag);
return 0;
}
int drm_resctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_ctx_res *res = data;
struct drm_ctx ctx;
int i;
if (res->count >= DRM_RESERVED_CONTEXTS) {
bzero(&ctx, sizeof(ctx));
for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
ctx.handle = i;
if (DRM_COPY_TO_USER(&res->contexts[i],
&ctx, sizeof(ctx)))
return EFAULT;
}
}
res->count = DRM_RESERVED_CONTEXTS;
return 0;
}
int drm_addctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_ctx *ctx = data;
ctx->handle = drm_ctxbitmap_next(dev);
if (ctx->handle == DRM_KERNEL_CONTEXT) {
/* Skip kernel's context and get a new one. */
ctx->handle = drm_ctxbitmap_next(dev);
}
DRM_DEBUG("%d\n", ctx->handle);
if (ctx->handle == -1) {
DRM_DEBUG("Not enough free contexts.\n");
/* Should this return -EBUSY instead? */
return ENOMEM;
}
if (dev->driver->context_ctor && ctx->handle != DRM_KERNEL_CONTEXT) {
DRM_LOCK();
dev->driver->context_ctor(dev, ctx->handle);
DRM_UNLOCK();
}
return 0;
}
int drm_modctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
/* This does nothing */
return 0;
}
int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_ctx *ctx = data;
/* This is 0, because we don't handle any context flags */
ctx->flags = 0;
return 0;
}
int drm_switchctx(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_ctx *ctx = data;
DRM_DEBUG("%d\n", ctx->handle);
return drm_context_switch(dev, dev->last_context, ctx->handle);
}
int drm_newctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_ctx *ctx = data;
DRM_DEBUG("%d\n", ctx->handle);
drm_context_switch_complete(dev, ctx->handle);
return 0;
}
int drm_rmctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_ctx *ctx = data;
DRM_DEBUG("%d\n", ctx->handle);
if (ctx->handle != DRM_KERNEL_CONTEXT) {
if (dev->driver->context_dtor) {
DRM_LOCK();
dev->driver->context_dtor(dev, ctx->handle);
DRM_UNLOCK();
}
drm_ctxbitmap_free(dev, ctx->handle);
}
return 0;
}

View File

@ -1,139 +0,0 @@
/*-
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Gareth Hughes <gareth@valinux.com>
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/** @file drm_dma.c
* Support code for DMA buffer management.
*
* The implementation used to be significantly more complicated, but the
* complexity has been moved into the drivers as different buffer management
* schemes evolved.
*/
#include "dev/drm/drmP.h"
int drm_dma_setup(struct drm_device *dev)
{
dev->dma = malloc(sizeof(*dev->dma), DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
if (dev->dma == NULL)
return ENOMEM;
DRM_SPININIT(&dev->dma_lock, "drmdma");
return 0;
}
void drm_dma_takedown(struct drm_device *dev)
{
drm_device_dma_t *dma = dev->dma;
int i, j;
if (dma == NULL)
return;
/* Clear dma buffers */
for (i = 0; i <= DRM_MAX_ORDER; i++) {
if (dma->bufs[i].seg_count) {
DRM_DEBUG("order %d: buf_count = %d,"
" seg_count = %d\n", i, dma->bufs[i].buf_count,
dma->bufs[i].seg_count);
for (j = 0; j < dma->bufs[i].seg_count; j++) {
drm_pci_free(dev, dma->bufs[i].seglist[j]);
}
free(dma->bufs[i].seglist, DRM_MEM_SEGS);
}
if (dma->bufs[i].buf_count) {
for (j = 0; j < dma->bufs[i].buf_count; j++) {
free(dma->bufs[i].buflist[j].dev_private,
DRM_MEM_BUFS);
}
free(dma->bufs[i].buflist, DRM_MEM_BUFS);
}
}
free(dma->buflist, DRM_MEM_BUFS);
free(dma->pagelist, DRM_MEM_PAGES);
free(dev->dma, DRM_MEM_DRIVER);
dev->dma = NULL;
DRM_SPINUNINIT(&dev->dma_lock);
}
void drm_free_buffer(struct drm_device *dev, drm_buf_t *buf)
{
if (!buf)
return;
buf->pending = 0;
buf->file_priv= NULL;
buf->used = 0;
}
void drm_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
{
drm_device_dma_t *dma = dev->dma;
int i;
if (!dma)
return;
for (i = 0; i < dma->buf_count; i++) {
if (dma->buflist[i]->file_priv == file_priv) {
switch (dma->buflist[i]->list) {
case DRM_LIST_NONE:
drm_free_buffer(dev, dma->buflist[i]);
break;
case DRM_LIST_WAIT:
dma->buflist[i]->list = DRM_LIST_RECLAIM;
break;
default:
/* Buffer already on hardware. */
break;
}
}
}
}
/* Call into the driver-specific DMA handler */
int drm_dma(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
if (dev->driver->dma_ioctl) {
/* shared code returns -errno */
return -dev->driver->dma_ioctl(dev, data, file_priv);
} else {
DRM_DEBUG("DMA ioctl on driver with no dma handler\n");
return EINVAL;
}
}

View File

@ -1,173 +0,0 @@
/*-
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Gareth Hughes <gareth@valinux.com>
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/** @file drm_drawable.c
* This file implements ioctls to store information along with DRM drawables,
* such as the current set of cliprects for vblank-synced buffer swaps.
*/
#include "dev/drm/drmP.h"
struct bsd_drm_drawable_info {
struct drm_drawable_info info;
int handle;
RB_ENTRY(bsd_drm_drawable_info) tree;
};
static int
drm_drawable_compare(struct bsd_drm_drawable_info *a,
struct bsd_drm_drawable_info *b)
{
if (a->handle > b->handle)
return 1;
if (a->handle < b->handle)
return -1;
return 0;
}
RB_GENERATE_STATIC(drawable_tree, bsd_drm_drawable_info, tree,
drm_drawable_compare);
struct drm_drawable_info *
drm_get_drawable_info(struct drm_device *dev, int handle)
{
struct bsd_drm_drawable_info find, *result;
find.handle = handle;
result = RB_FIND(drawable_tree, &dev->drw_head, &find);
return &result->info;
}
int drm_adddraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_draw *draw = data;
struct bsd_drm_drawable_info *info;
info = malloc(sizeof(struct bsd_drm_drawable_info), DRM_MEM_DRAWABLE,
M_NOWAIT | M_ZERO);
if (info == NULL)
return ENOMEM;
info->handle = alloc_unr(dev->drw_unrhdr);
DRM_SPINLOCK(&dev->drw_lock);
RB_INSERT(drawable_tree, &dev->drw_head, info);
draw->handle = info->handle;
DRM_SPINUNLOCK(&dev->drw_lock);
DRM_DEBUG("%d\n", draw->handle);
return 0;
}
int drm_rmdraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_draw *draw = (struct drm_draw *)data;
struct drm_drawable_info *info;
DRM_SPINLOCK(&dev->drw_lock);
info = drm_get_drawable_info(dev, draw->handle);
if (info != NULL) {
RB_REMOVE(drawable_tree, &dev->drw_head,
(struct bsd_drm_drawable_info *)info);
DRM_SPINUNLOCK(&dev->drw_lock);
free_unr(dev->drw_unrhdr, draw->handle);
free(info->rects, DRM_MEM_DRAWABLE);
free(info, DRM_MEM_DRAWABLE);
return 0;
} else {
DRM_SPINUNLOCK(&dev->drw_lock);
return EINVAL;
}
}
int drm_update_draw(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_drawable_info *info;
struct drm_update_draw *update = (struct drm_update_draw *)data;
int ret;
info = drm_get_drawable_info(dev, update->handle);
if (info == NULL)
return EINVAL;
switch (update->type) {
case DRM_DRAWABLE_CLIPRECTS:
DRM_SPINLOCK(&dev->drw_lock);
if (update->num != info->num_rects) {
free(info->rects, DRM_MEM_DRAWABLE);
info->rects = NULL;
info->num_rects = 0;
}
if (update->num == 0) {
DRM_SPINUNLOCK(&dev->drw_lock);
return 0;
}
if (info->rects == NULL) {
info->rects = malloc(sizeof(*info->rects) *
update->num, DRM_MEM_DRAWABLE, M_NOWAIT);
if (info->rects == NULL) {
DRM_SPINUNLOCK(&dev->drw_lock);
return ENOMEM;
}
info->num_rects = update->num;
}
/* For some reason the pointer arg is unsigned long long. */
ret = copyin((void *)(intptr_t)update->data, info->rects,
sizeof(*info->rects) * info->num_rects);
DRM_SPINUNLOCK(&dev->drw_lock);
return ret;
default:
return EINVAL;
}
}
void drm_drawable_free_all(struct drm_device *dev)
{
struct bsd_drm_drawable_info *info, *next;
DRM_SPINLOCK(&dev->drw_lock);
for (info = RB_MIN(drawable_tree, &dev->drw_head);
info != NULL ; info = next) {
next = RB_NEXT(drawable_tree, &dev->drw_head, info);
RB_REMOVE(drawable_tree, &dev->drw_head,
(struct bsd_drm_drawable_info *)info);
DRM_SPINUNLOCK(&dev->drw_lock);
free_unr(dev->drw_unrhdr, info->handle);
free(info->info.rects, DRM_MEM_DRAWABLE);
free(info, DRM_MEM_DRAWABLE);
DRM_SPINLOCK(&dev->drw_lock);
}
DRM_SPINUNLOCK(&dev->drw_lock);
}

View File

@ -1,833 +0,0 @@
/*-
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Gareth Hughes <gareth@valinux.com>
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/** @file drm_drv.c
* The catch-all file for DRM device support, including module setup/teardown,
* open/close, and ioctl dispatch.
*/
#include <sys/limits.h>
#include "dev/drm/drmP.h"
#include "dev/drm/drm.h"
#include "dev/drm/drm_sarea.h"
#ifdef DRM_DEBUG_DEFAULT_ON
int drm_debug_flag = 1;
#else
int drm_debug_flag = 0;
#endif
static int drm_load(struct drm_device *dev);
static void drm_unload(struct drm_device *dev);
static drm_pci_id_list_t *drm_find_description(int vendor, int device,
drm_pci_id_list_t *idlist);
MODULE_VERSION(drm, 1);
MODULE_DEPEND(drm, agp, 1, 1, 1);
MODULE_DEPEND(drm, pci, 1, 1, 1);
MODULE_DEPEND(drm, mem, 1, 1, 1);
static drm_ioctl_desc_t drm_ioctls[256] = {
DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0),
DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_dma, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_draw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
};
static struct cdevsw drm_cdevsw = {
.d_version = D_VERSION,
.d_open = drm_open,
.d_read = drm_read,
.d_ioctl = drm_ioctl,
.d_poll = drm_poll,
.d_mmap = drm_mmap,
.d_name = "drm",
.d_flags = D_TRACKCLOSE
};
static int drm_msi = 1; /* Enable by default. */
SYSCTL_NODE(_hw, OID_AUTO, drm, CTLFLAG_RW, NULL, "DRM device");
SYSCTL_INT(_hw_drm, OID_AUTO, msi, CTLFLAG_RDTUN, &drm_msi, 1,
"Enable MSI interrupts for drm devices");
static struct drm_msi_blacklist_entry drm_msi_blacklist[] = {
{0x8086, 0x2772}, /* Intel i945G */ \
{0x8086, 0x27A2}, /* Intel i945GM */ \
{0x8086, 0x27AE}, /* Intel i945GME */ \
{0, 0}
};
static int drm_msi_is_blacklisted(int vendor, int device)
{
int i = 0;
for (i = 0; drm_msi_blacklist[i].vendor != 0; i++) {
if ((drm_msi_blacklist[i].vendor == vendor) &&
(drm_msi_blacklist[i].device == device)) {
return 1;
}
}
return 0;
}
int drm_probe(device_t kdev, drm_pci_id_list_t *idlist)
{
drm_pci_id_list_t *id_entry;
int vendor, device;
vendor = pci_get_vendor(kdev);
device = pci_get_device(kdev);
if (pci_get_class(kdev) != PCIC_DISPLAY
|| pci_get_subclass(kdev) != PCIS_DISPLAY_VGA)
return ENXIO;
id_entry = drm_find_description(vendor, device, idlist);
if (id_entry != NULL) {
if (!device_get_desc(kdev)) {
DRM_DEBUG("desc : %s\n", device_get_desc(kdev));
device_set_desc(kdev, id_entry->name);
}
return 0;
}
return ENXIO;
}
int drm_attach(device_t kdev, drm_pci_id_list_t *idlist)
{
struct drm_device *dev;
drm_pci_id_list_t *id_entry;
int unit, msicount;
unit = device_get_unit(kdev);
dev = device_get_softc(kdev);
dev->device = kdev;
dev->devnode = make_dev(&drm_cdevsw,
0,
DRM_DEV_UID,
DRM_DEV_GID,
DRM_DEV_MODE,
"dri/card%d", unit);
dev->devnode->si_drv1 = dev;
dev->pci_domain = pci_get_domain(dev->device);
dev->pci_bus = pci_get_bus(dev->device);
dev->pci_slot = pci_get_slot(dev->device);
dev->pci_func = pci_get_function(dev->device);
dev->pci_vendor = pci_get_vendor(dev->device);
dev->pci_device = pci_get_device(dev->device);
if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) {
if (drm_msi &&
!drm_msi_is_blacklisted(dev->pci_vendor, dev->pci_device)) {
msicount = pci_msi_count(dev->device);
DRM_DEBUG("MSI count = %d\n", msicount);
if (msicount > 1)
msicount = 1;
if (pci_alloc_msi(dev->device, &msicount) == 0) {
DRM_INFO("MSI enabled %d message(s)\n",
msicount);
dev->msi_enabled = 1;
dev->irqrid = 1;
}
}
dev->irqr = bus_alloc_resource_any(dev->device, SYS_RES_IRQ,
&dev->irqrid, RF_SHAREABLE);
if (!dev->irqr) {
return ENOENT;
}
dev->irq = (int) rman_get_start(dev->irqr);
}
mtx_init(&dev->dev_lock, "drmdev", NULL, MTX_DEF);
mtx_init(&dev->irq_lock, "drmirq", NULL, MTX_DEF);
mtx_init(&dev->vbl_lock, "drmvbl", NULL, MTX_DEF);
mtx_init(&dev->drw_lock, "drmdrw", NULL, MTX_DEF);
id_entry = drm_find_description(dev->pci_vendor,
dev->pci_device, idlist);
dev->id_entry = id_entry;
return drm_load(dev);
}
int drm_detach(device_t kdev)
{
struct drm_device *dev;
dev = device_get_softc(kdev);
drm_unload(dev);
if (dev->irqr) {
bus_release_resource(dev->device, SYS_RES_IRQ, dev->irqrid,
dev->irqr);
if (dev->msi_enabled) {
pci_release_msi(dev->device);
DRM_INFO("MSI released\n");
}
}
return 0;
}
#ifndef DRM_DEV_NAME
#define DRM_DEV_NAME "drm"
#endif
devclass_t drm_devclass;
drm_pci_id_list_t *drm_find_description(int vendor, int device,
drm_pci_id_list_t *idlist)
{
int i = 0;
for (i = 0; idlist[i].vendor != 0; i++) {
if ((idlist[i].vendor == vendor) &&
((idlist[i].device == device) ||
(idlist[i].device == 0))) {
return &idlist[i];
}
}
return NULL;
}
static int drm_firstopen(struct drm_device *dev)
{
drm_local_map_t *map;
int i;
DRM_SPINLOCK_ASSERT(&dev->dev_lock);
/* prebuild the SAREA */
i = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM,
_DRM_CONTAINS_LOCK, &map);
if (i != 0)
return i;
if (dev->driver->firstopen)
dev->driver->firstopen(dev);
dev->buf_use = 0;
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) {
i = drm_dma_setup(dev);
if (i != 0)
return i;
}
for (i = 0; i < DRM_HASH_SIZE; i++) {
dev->magiclist[i].head = NULL;
dev->magiclist[i].tail = NULL;
}
dev->lock.lock_queue = 0;
dev->irq_enabled = 0;
dev->context_flag = 0;
dev->last_context = 0;
dev->if_version = 0;
dev->buf_sigio = NULL;
DRM_DEBUG("\n");
return 0;
}
static int drm_lastclose(struct drm_device *dev)
{
drm_magic_entry_t *pt, *next;
drm_local_map_t *map, *mapsave;
int i;
DRM_SPINLOCK_ASSERT(&dev->dev_lock);
DRM_DEBUG("\n");
if (dev->driver->lastclose != NULL)
dev->driver->lastclose(dev);
if (dev->irq_enabled)
drm_irq_uninstall(dev);
if (dev->unique) {
free(dev->unique, DRM_MEM_DRIVER);
dev->unique = NULL;
dev->unique_len = 0;
}
/* Clear pid list */
for (i = 0; i < DRM_HASH_SIZE; i++) {
for (pt = dev->magiclist[i].head; pt; pt = next) {
next = pt->next;
free(pt, DRM_MEM_MAGIC);
}
dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
}
DRM_UNLOCK();
drm_drawable_free_all(dev);
DRM_LOCK();
/* Clear AGP information */
if (dev->agp) {
drm_agp_mem_t *entry;
drm_agp_mem_t *nexte;
/* Remove AGP resources, but leave dev->agp intact until
* drm_unload is called.
*/
for (entry = dev->agp->memory; entry; entry = nexte) {
nexte = entry->next;
if (entry->bound)
drm_agp_unbind_memory(entry->handle);
drm_agp_free_memory(entry->handle);
free(entry, DRM_MEM_AGPLISTS);
}
dev->agp->memory = NULL;
if (dev->agp->acquired)
drm_agp_release(dev);
dev->agp->acquired = 0;
dev->agp->enabled = 0;
}
if (dev->sg != NULL) {
drm_sg_cleanup(dev->sg);
dev->sg = NULL;
}
TAILQ_FOREACH_SAFE(map, &dev->maplist, link, mapsave) {
if (!(map->flags & _DRM_DRIVER))
drm_rmmap(dev, map);
}
drm_dma_takedown(dev);
if (dev->lock.hw_lock) {
dev->lock.hw_lock = NULL; /* SHM removed */
dev->lock.file_priv = NULL;
DRM_WAKEUP_INT((void *)&dev->lock.lock_queue);
}
return 0;
}
static int drm_load(struct drm_device *dev)
{
int i, retcode;
DRM_DEBUG("\n");
TAILQ_INIT(&dev->maplist);
dev->map_unrhdr = new_unrhdr(1, ((1 << DRM_MAP_HANDLE_BITS) - 1), NULL);
if (dev->map_unrhdr == NULL) {
DRM_ERROR("Couldn't allocate map number allocator\n");
return EINVAL;
}
drm_mem_init();
drm_sysctl_init(dev);
TAILQ_INIT(&dev->files);
dev->counters = 6;
dev->types[0] = _DRM_STAT_LOCK;
dev->types[1] = _DRM_STAT_OPENS;
dev->types[2] = _DRM_STAT_CLOSES;
dev->types[3] = _DRM_STAT_IOCTLS;
dev->types[4] = _DRM_STAT_LOCKS;
dev->types[5] = _DRM_STAT_UNLOCKS;
for (i = 0; i < DRM_ARRAY_SIZE(dev->counts); i++)
atomic_set(&dev->counts[i], 0);
if (dev->driver->load != NULL) {
DRM_LOCK();
/* Shared code returns -errno. */
retcode = -dev->driver->load(dev,
dev->id_entry->driver_private);
if (pci_enable_busmaster(dev->device))
DRM_ERROR("Request to enable bus-master failed.\n");
DRM_UNLOCK();
if (retcode != 0)
goto error;
}
if (drm_core_has_AGP(dev)) {
if (drm_device_is_agp(dev))
dev->agp = drm_agp_init();
if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) &&
dev->agp == NULL) {
DRM_ERROR("Card isn't AGP, or couldn't initialize "
"AGP.\n");
retcode = ENOMEM;
goto error;
}
if (dev->agp != NULL && dev->agp->info.ai_aperture_base != 0) {
if (drm_mtrr_add(dev->agp->info.ai_aperture_base,
dev->agp->info.ai_aperture_size, DRM_MTRR_WC) == 0)
dev->agp->mtrr = 1;
}
}
retcode = drm_ctxbitmap_init(dev);
if (retcode != 0) {
DRM_ERROR("Cannot allocate memory for context bitmap.\n");
goto error;
}
dev->drw_unrhdr = new_unrhdr(1, INT_MAX, NULL);
if (dev->drw_unrhdr == NULL) {
DRM_ERROR("Couldn't allocate drawable number allocator\n");
goto error;
}
DRM_INFO("Initialized %s %d.%d.%d %s\n",
dev->driver->name,
dev->driver->major,
dev->driver->minor,
dev->driver->patchlevel,
dev->driver->date);
return 0;
error:
drm_sysctl_cleanup(dev);
DRM_LOCK();
drm_lastclose(dev);
DRM_UNLOCK();
destroy_dev(dev->devnode);
mtx_destroy(&dev->drw_lock);
mtx_destroy(&dev->vbl_lock);
mtx_destroy(&dev->irq_lock);
mtx_destroy(&dev->dev_lock);
return retcode;
}
static void drm_unload(struct drm_device *dev)
{
int i;
DRM_DEBUG("\n");
drm_sysctl_cleanup(dev);
destroy_dev(dev->devnode);
drm_ctxbitmap_cleanup(dev);
if (dev->agp && dev->agp->mtrr) {
int __unused retcode;
retcode = drm_mtrr_del(0, dev->agp->info.ai_aperture_base,
dev->agp->info.ai_aperture_size, DRM_MTRR_WC);
DRM_DEBUG("mtrr_del = %d", retcode);
}
drm_vblank_cleanup(dev);
DRM_LOCK();
drm_lastclose(dev);
DRM_UNLOCK();
/* Clean up PCI resources allocated by drm_bufs.c. We're not really
* worried about resource consumption while the DRM is inactive (between
* lastclose and firstopen or unload) because these aren't actually
* taking up KVA, just keeping the PCI resource allocated.
*/
for (i = 0; i < DRM_MAX_PCI_RESOURCE; i++) {
if (dev->pcir[i] == NULL)
continue;
bus_release_resource(dev->device, SYS_RES_MEMORY,
dev->pcirid[i], dev->pcir[i]);
dev->pcir[i] = NULL;
}
if (dev->agp) {
free(dev->agp, DRM_MEM_AGPLISTS);
dev->agp = NULL;
}
if (dev->driver->unload != NULL) {
DRM_LOCK();
dev->driver->unload(dev);
DRM_UNLOCK();
}
delete_unrhdr(dev->drw_unrhdr);
delete_unrhdr(dev->map_unrhdr);
drm_mem_uninit();
if (pci_disable_busmaster(dev->device))
DRM_ERROR("Request to disable bus-master failed.\n");
mtx_destroy(&dev->drw_lock);
mtx_destroy(&dev->vbl_lock);
mtx_destroy(&dev->irq_lock);
mtx_destroy(&dev->dev_lock);
}
int drm_version(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_version *version = data;
int len;
#define DRM_COPY( name, value ) \
len = strlen( value ); \
if ( len > name##_len ) len = name##_len; \
name##_len = strlen( value ); \
if ( len && name ) { \
if ( DRM_COPY_TO_USER( name, value, len ) ) \
return EFAULT; \
}
version->version_major = dev->driver->major;
version->version_minor = dev->driver->minor;
version->version_patchlevel = dev->driver->patchlevel;
DRM_COPY(version->name, dev->driver->name);
DRM_COPY(version->date, dev->driver->date);
DRM_COPY(version->desc, dev->driver->desc);
return 0;
}
int drm_open(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p)
{
struct drm_device *dev = NULL;
int retcode = 0;
dev = kdev->si_drv1;
DRM_DEBUG("open_count = %d\n", dev->open_count);
retcode = drm_open_helper(kdev, flags, fmt, p, dev);
if (!retcode) {
atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
DRM_LOCK();
device_busy(dev->device);
if (!dev->open_count++)
retcode = drm_firstopen(dev);
DRM_UNLOCK();
}
return retcode;
}
void drm_close(void *data)
{
struct drm_file *file_priv = data;
struct drm_device *dev = file_priv->dev;
int retcode = 0;
DRM_DEBUG("open_count = %d\n", dev->open_count);
DRM_LOCK();
if (dev->driver->preclose != NULL)
dev->driver->preclose(dev, file_priv);
/* ========================================================
* Begin inline drm_release
*/
DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
DRM_CURRENTPID, (long)dev->device, dev->open_count);
if (dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)
&& dev->lock.file_priv == file_priv) {
DRM_DEBUG("Process %d dead, freeing lock for context %d\n",
DRM_CURRENTPID,
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
if (dev->driver->reclaim_buffers_locked != NULL)
dev->driver->reclaim_buffers_locked(dev, file_priv);
drm_lock_free(&dev->lock,
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
/* FIXME: may require heavy-handed reset of
hardware at this point, possibly
processed via a callback to the X
server. */
} else if (dev->driver->reclaim_buffers_locked != NULL &&
dev->lock.hw_lock != NULL) {
/* The lock is required to reclaim buffers */
for (;;) {
if (!dev->lock.hw_lock) {
/* Device has been unregistered */
retcode = EINTR;
break;
}
if (drm_lock_take(&dev->lock, DRM_KERNEL_CONTEXT)) {
dev->lock.file_priv = file_priv;
dev->lock.lock_time = jiffies;
atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
break; /* Got lock */
}
/* Contention */
retcode = mtx_sleep((void *)&dev->lock.lock_queue,
&dev->dev_lock, PCATCH, "drmlk2", 0);
if (retcode)
break;
}
if (retcode == 0) {
dev->driver->reclaim_buffers_locked(dev, file_priv);
drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT);
}
}
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
!dev->driver->reclaim_buffers_locked)
drm_reclaim_buffers(dev, file_priv);
funsetown(&dev->buf_sigio);
if (dev->driver->postclose != NULL)
dev->driver->postclose(dev, file_priv);
TAILQ_REMOVE(&dev->files, file_priv, link);
free(file_priv, DRM_MEM_FILES);
/* ========================================================
* End inline drm_release
*/
atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
device_unbusy(dev->device);
if (--dev->open_count == 0) {
retcode = drm_lastclose(dev);
}
DRM_UNLOCK();
}
/* drm_ioctl is called whenever a process performs an ioctl on /dev/drm.
*/
int drm_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int flags,
DRM_STRUCTPROC *p)
{
struct drm_device *dev = drm_get_device_from_kdev(kdev);
int retcode = 0;
drm_ioctl_desc_t *ioctl;
int (*func)(struct drm_device *dev, void *data, struct drm_file *file_priv);
int nr = DRM_IOCTL_NR(cmd);
int is_driver_ioctl = 0;
struct drm_file *file_priv;
retcode = devfs_get_cdevpriv((void **)&file_priv);
if (retcode != 0) {
DRM_ERROR("can't find authenticator\n");
return EINVAL;
}
atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
++file_priv->ioctl_count;
DRM_DEBUG("pid=%d, cmd=0x%02lx, nr=0x%02x, dev 0x%lx, auth=%d\n",
DRM_CURRENTPID, cmd, nr, (long)dev->device,
file_priv->authenticated);
switch (cmd) {
case FIONBIO:
case FIOASYNC:
return 0;
case FIOSETOWN:
return fsetown(*(int *)data, &dev->buf_sigio);
case FIOGETOWN:
*(int *) data = fgetown(&dev->buf_sigio);
return 0;
}
if (IOCGROUP(cmd) != DRM_IOCTL_BASE) {
DRM_DEBUG("Bad ioctl group 0x%x\n", (int)IOCGROUP(cmd));
return EINVAL;
}
ioctl = &drm_ioctls[nr];
/* It's not a core DRM ioctl, try driver-specific. */
if (ioctl->func == NULL && nr >= DRM_COMMAND_BASE) {
/* The array entries begin at DRM_COMMAND_BASE ioctl nr */
nr -= DRM_COMMAND_BASE;
if (nr > dev->driver->max_ioctl) {
DRM_DEBUG("Bad driver ioctl number, 0x%x (of 0x%x)\n",
nr, dev->driver->max_ioctl);
return EINVAL;
}
ioctl = &dev->driver->ioctls[nr];
is_driver_ioctl = 1;
}
func = ioctl->func;
if (func == NULL) {
DRM_DEBUG("no function\n");
return EINVAL;
}
if (((ioctl->flags & DRM_ROOT_ONLY) && !DRM_SUSER(p)) ||
((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) ||
((ioctl->flags & DRM_MASTER) && !file_priv->master))
return EACCES;
if (is_driver_ioctl) {
DRM_LOCK();
/* shared code returns -errno */
retcode = -func(dev, data, file_priv);
DRM_UNLOCK();
} else {
retcode = func(dev, data, file_priv);
}
if (retcode != 0)
DRM_DEBUG(" returning %d\n", retcode);
return retcode;
}
drm_local_map_t *drm_getsarea(struct drm_device *dev)
{
drm_local_map_t *map;
DRM_SPINLOCK_ASSERT(&dev->dev_lock);
TAILQ_FOREACH(map, &dev->maplist, link) {
if (map->type == _DRM_SHM && (map->flags & _DRM_CONTAINS_LOCK))
return map;
}
return NULL;
}
#if DRM_LINUX
#include <sys/sysproto.h>
MODULE_DEPEND(DRIVER_NAME, linux, 1, 1, 1);
#define LINUX_IOCTL_DRM_MIN 0x6400
#define LINUX_IOCTL_DRM_MAX 0x64ff
static linux_ioctl_function_t drm_linux_ioctl;
static struct linux_ioctl_handler drm_handler = {drm_linux_ioctl,
LINUX_IOCTL_DRM_MIN, LINUX_IOCTL_DRM_MAX};
SYSINIT(drm_register, SI_SUB_KLD, SI_ORDER_MIDDLE,
linux_ioctl_register_handler, &drm_handler);
SYSUNINIT(drm_unregister, SI_SUB_KLD, SI_ORDER_MIDDLE,
linux_ioctl_unregister_handler, &drm_handler);
/* The bits for in/out are switched on Linux */
#define LINUX_IOC_IN IOC_OUT
#define LINUX_IOC_OUT IOC_IN
static int
drm_linux_ioctl(DRM_STRUCTPROC *p, struct linux_ioctl_args* args)
{
int error;
int cmd = args->cmd;
args->cmd &= ~(LINUX_IOC_IN | LINUX_IOC_OUT);
if (cmd & LINUX_IOC_IN)
args->cmd |= IOC_IN;
if (cmd & LINUX_IOC_OUT)
args->cmd |= IOC_OUT;
error = ioctl(p, (struct ioctl_args *)args);
return error;
}
#endif /* DRM_LINUX */

View File

@ -1,105 +0,0 @@
/*-
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Daryll Strauss <daryll@valinux.com>
* Gareth Hughes <gareth@valinux.com>
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/** @file drm_fops.c
* Support code for dealing with the file privates associated with each
* open of the DRM device.
*/
#include "dev/drm/drmP.h"
/* drm_open_helper is called whenever a process opens /dev/drm. */
int drm_open_helper(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p,
struct drm_device *dev)
{
struct drm_file *priv;
int retcode;
if (flags & O_EXCL)
return EBUSY; /* No exclusive opens */
dev->flags = flags;
DRM_DEBUG("pid = %d, device = %s\n", DRM_CURRENTPID, devtoname(kdev));
priv = malloc(sizeof(*priv), DRM_MEM_FILES, M_NOWAIT | M_ZERO);
if (priv == NULL) {
return ENOMEM;
}
DRM_LOCK();
priv->dev = dev;
priv->uid = p->td_ucred->cr_svuid;
priv->pid = p->td_proc->p_pid;
priv->ioctl_count = 0;
/* for compatibility root is always authenticated */
priv->authenticated = DRM_SUSER(p);
if (dev->driver->open) {
/* shared code returns -errno */
retcode = -dev->driver->open(dev, priv);
if (retcode != 0) {
free(priv, DRM_MEM_FILES);
DRM_UNLOCK();
return retcode;
}
}
/* first opener automatically becomes master */
priv->master = TAILQ_EMPTY(&dev->files);
TAILQ_INSERT_TAIL(&dev->files, priv, link);
DRM_UNLOCK();
kdev->si_drv1 = dev;
retcode = devfs_set_cdevpriv(priv, drm_close);
if (retcode != 0)
drm_close(priv);
return (retcode);
}
/* The drm_read and drm_poll are stubs to prevent spurious errors
* on older X Servers (4.3.0 and earlier) */
int drm_read(struct cdev *kdev, struct uio *uio, int ioflag)
{
return 0;
}
int drm_poll(struct cdev *kdev, int events, DRM_STRUCTPROC *p)
{
return 0;
}

View File

@ -1,181 +0,0 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*
**************************************************************************/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*
* Simple open hash tab implementation.
*
* Authors:
* Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include "dev/drm/drmP.h"
#include "dev/drm/drm_hashtab.h"
#include <sys/hash.h>
int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
{
ht->size = 1 << order;
ht->order = order;
ht->table = NULL;
ht->table = hashinit_flags(ht->size, DRM_MEM_HASHTAB, &ht->mask,
HASH_NOWAIT);
if (!ht->table) {
DRM_ERROR("Out of memory for hash table\n");
return -ENOMEM;
}
return 0;
}
void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
{
struct drm_hash_item *entry;
struct drm_hash_item_list *h_list;
unsigned int hashed_key;
int count = 0;
hashed_key = hash32_buf(&key, sizeof(key), ht->order);
DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
h_list = &ht->table[hashed_key & ht->mask];
LIST_FOREACH(entry, h_list, head)
DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
}
static struct drm_hash_item *
drm_ht_find_key(struct drm_open_hash *ht, unsigned long key)
{
struct drm_hash_item *entry;
struct drm_hash_item_list *h_list;
unsigned int hashed_key;
hashed_key = hash32_buf(&key, sizeof(key), ht->order);
h_list = &ht->table[hashed_key & ht->mask];
LIST_FOREACH(entry, h_list, head) {
if (entry->key == key)
return entry;
if (entry->key > key)
break;
}
return NULL;
}
int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
{
struct drm_hash_item *entry, *parent;
struct drm_hash_item_list *h_list;
unsigned int hashed_key;
unsigned long key = item->key;
hashed_key = hash32_buf(&key, sizeof(key), ht->order);
h_list = &ht->table[hashed_key & ht->mask];
parent = NULL;
LIST_FOREACH(entry, h_list, head) {
if (entry->key == key)
return -EINVAL;
if (entry->key > key)
break;
parent = entry;
}
if (parent) {
LIST_INSERT_AFTER(parent, item, head);
} else {
LIST_INSERT_HEAD(h_list, item, head);
}
return 0;
}
/*
* Just insert an item and return any "bits" bit key that hasn't been
* used before.
*/
int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
unsigned long seed, int bits, int shift,
unsigned long add)
{
int ret;
unsigned long mask = (1 << bits) - 1;
unsigned long first, unshifted_key = 0;
unshifted_key = hash32_buf(&seed, sizeof(seed), unshifted_key);
first = unshifted_key;
do {
item->key = (unshifted_key << shift) + add;
ret = drm_ht_insert_item(ht, item);
if (ret)
unshifted_key = (unshifted_key + 1) & mask;
} while(ret && (unshifted_key != first));
if (ret) {
DRM_ERROR("Available key bit space exhausted\n");
return -EINVAL;
}
return 0;
}
int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
struct drm_hash_item **item)
{
struct drm_hash_item *entry;
entry = drm_ht_find_key(ht, key);
if (!entry)
return -EINVAL;
*item = entry;
return 0;
}
int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
{
struct drm_hash_item *entry;
entry = drm_ht_find_key(ht, key);
if (entry) {
LIST_REMOVE(entry, head);
return 0;
}
return -EINVAL;
}
int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
{
LIST_REMOVE(item, head);
return 0;
}
void drm_ht_remove(struct drm_open_hash *ht)
{
if (ht->table) {
hashdestroy(ht->table, DRM_MEM_HASHTAB, ht->mask);
ht->table = NULL;
}
}

View File

@ -1,68 +0,0 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismack, ND. USA.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*
**************************************************************************/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*
* Simple open hash tab implementation.
*
* Authors:
* Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#ifndef DRM_HASHTAB_H
#define DRM_HASHTAB_H
#define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
struct drm_hash_item {
LIST_ENTRY(drm_hash_item) head;
unsigned long key;
};
struct drm_open_hash {
LIST_HEAD(drm_hash_item_list, drm_hash_item) *table;
unsigned int size;
unsigned int order;
unsigned long mask;
};
extern int drm_ht_create(struct drm_open_hash *ht, unsigned int order);
extern int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item);
extern int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
unsigned long seed, int bits, int shift,
unsigned long add);
extern int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item);
extern void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key);
extern int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key);
extern int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item);
extern void drm_ht_remove(struct drm_open_hash *ht);
#endif

View File

@ -1,43 +0,0 @@
/*-
* Copyright 2007 Red Hat, Inc
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/* This header file holds function prototypes and data types that are
* internal to the drm (not exported to user space) but shared across
* drivers and platforms */
#ifndef __DRM_INTERNAL_H__
#define __DRM_INTERNAL_H__
/**
* Drawable information.
*/
struct drm_drawable_info {
unsigned int num_rects;
struct drm_clip_rect *rects;
};
#endif

View File

@ -1,286 +0,0 @@
/*-
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Gareth Hughes <gareth@valinux.com>
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/** @file drm_ioctl.c
* Varios minor DRM ioctls not applicable to other files, such as versioning
* information and reporting DRM information to userland.
*/
#include "dev/drm/drmP.h"
/*
* Beginning in revision 1.1 of the DRM interface, getunique will return
* a unique in the form pci:oooo:bb:dd.f (o=domain, b=bus, d=device, f=function)
* before setunique has been called. The format for the bus-specific part of
* the unique is not defined for any other bus.
*/
int drm_getunique(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_unique *u = data;
if (u->unique_len >= dev->unique_len) {
if (DRM_COPY_TO_USER(u->unique, dev->unique, dev->unique_len))
return EFAULT;
}
u->unique_len = dev->unique_len;
return 0;
}
/* Deprecated in DRM version 1.1, and will return EBUSY when setversion has
* requested version 1.1 or greater.
*/
int drm_setunique(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_unique *u = data;
int domain, bus, slot, func, ret;
char *busid;
/* Check and copy in the submitted Bus ID */
if (!u->unique_len || u->unique_len > 1024)
return EINVAL;
busid = malloc(u->unique_len + 1, DRM_MEM_DRIVER, M_WAITOK);
if (busid == NULL)
return ENOMEM;
if (DRM_COPY_FROM_USER(busid, u->unique, u->unique_len)) {
free(busid, DRM_MEM_DRIVER);
return EFAULT;
}
busid[u->unique_len] = '\0';
/* Return error if the busid submitted doesn't match the device's actual
* busid.
*/
ret = sscanf(busid, "PCI:%d:%d:%d", &bus, &slot, &func);
if (ret != 3) {
free(busid, DRM_MEM_DRIVER);
return EINVAL;
}
domain = bus >> 8;
bus &= 0xff;
if ((domain != dev->pci_domain) ||
(bus != dev->pci_bus) ||
(slot != dev->pci_slot) ||
(func != dev->pci_func)) {
free(busid, DRM_MEM_DRIVER);
return EINVAL;
}
/* Actually set the device's busid now. */
DRM_LOCK();
if (dev->unique_len || dev->unique) {
DRM_UNLOCK();
free(busid, DRM_MEM_DRIVER);
return EBUSY;
}
dev->unique_len = u->unique_len;
dev->unique = busid;
DRM_UNLOCK();
return 0;
}
static int
drm_set_busid(struct drm_device *dev)
{
DRM_LOCK();
if (dev->unique != NULL) {
DRM_UNLOCK();
return EBUSY;
}
dev->unique_len = 20;
dev->unique = malloc(dev->unique_len + 1, DRM_MEM_DRIVER, M_NOWAIT);
if (dev->unique == NULL) {
DRM_UNLOCK();
return ENOMEM;
}
snprintf(dev->unique, dev->unique_len, "pci:%04x:%02x:%02x.%1x",
dev->pci_domain, dev->pci_bus, dev->pci_slot, dev->pci_func);
DRM_UNLOCK();
return 0;
}
int drm_getmap(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_map *map = data;
drm_local_map_t *mapinlist;
int idx;
int i = 0;
idx = map->offset;
DRM_LOCK();
if (idx < 0) {
DRM_UNLOCK();
return EINVAL;
}
TAILQ_FOREACH(mapinlist, &dev->maplist, link) {
if (i == idx) {
map->offset = mapinlist->offset;
map->size = mapinlist->size;
map->type = mapinlist->type;
map->flags = mapinlist->flags;
map->handle = mapinlist->handle;
map->mtrr = mapinlist->mtrr;
break;
}
i++;
}
DRM_UNLOCK();
if (mapinlist == NULL)
return EINVAL;
return 0;
}
int drm_getclient(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_client *client = data;
struct drm_file *pt;
int idx;
int i = 0;
idx = client->idx;
DRM_LOCK();
TAILQ_FOREACH(pt, &dev->files, link) {
if (i == idx) {
client->auth = pt->authenticated;
client->pid = pt->pid;
client->uid = pt->uid;
client->magic = pt->magic;
client->iocs = pt->ioctl_count;
DRM_UNLOCK();
return 0;
}
i++;
}
DRM_UNLOCK();
return EINVAL;
}
int drm_getstats(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_stats *stats = data;
int i;
memset(stats, 0, sizeof(struct drm_stats));
DRM_LOCK();
for (i = 0; i < dev->counters; i++) {
if (dev->types[i] == _DRM_STAT_LOCK)
stats->data[i].value =
(dev->lock.hw_lock ? dev->lock.hw_lock->lock : 0);
else
stats->data[i].value = atomic_read(&dev->counts[i]);
stats->data[i].type = dev->types[i];
}
stats->count = dev->counters;
DRM_UNLOCK();
return 0;
}
#define DRM_IF_MAJOR 1
#define DRM_IF_MINOR 2
int drm_setversion(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_set_version *sv = data;
struct drm_set_version ver;
int if_version;
/* Save the incoming data, and set the response before continuing
* any further.
*/
ver = *sv;
sv->drm_di_major = DRM_IF_MAJOR;
sv->drm_di_minor = DRM_IF_MINOR;
sv->drm_dd_major = dev->driver->major;
sv->drm_dd_minor = dev->driver->minor;
if (ver.drm_di_major != -1) {
if (ver.drm_di_major != DRM_IF_MAJOR ||
ver.drm_di_minor < 0 || ver.drm_di_minor > DRM_IF_MINOR) {
return EINVAL;
}
if_version = DRM_IF_VERSION(ver.drm_di_major,
ver.drm_dd_minor);
dev->if_version = DRM_MAX(if_version, dev->if_version);
if (ver.drm_di_minor >= 1) {
/*
* Version 1.1 includes tying of DRM to specific device
*/
drm_set_busid(dev);
}
}
if (ver.drm_dd_major != -1) {
if (ver.drm_dd_major != dev->driver->major ||
ver.drm_dd_minor < 0 ||
ver.drm_dd_minor > dev->driver->minor)
{
return EINVAL;
}
}
return 0;
}
int drm_noop(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEBUG("\n");
return 0;
}

View File

@ -1,491 +0,0 @@
/*-
* Copyright 2003 Eric Anholt
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* ERIC ANHOLT BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <anholt@FreeBSD.org>
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/** @file drm_irq.c
* Support code for handling setup/teardown of interrupt handlers and
* handing interrupt handlers off to the drivers.
*/
#include "dev/drm/drmP.h"
#include "dev/drm/drm.h"
int drm_irq_by_busid(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_irq_busid *irq = data;
if ((irq->busnum >> 8) != dev->pci_domain ||
(irq->busnum & 0xff) != dev->pci_bus ||
irq->devnum != dev->pci_slot ||
irq->funcnum != dev->pci_func)
return EINVAL;
irq->irq = dev->irq;
DRM_DEBUG("%d:%d:%d => IRQ %d\n",
irq->busnum, irq->devnum, irq->funcnum, irq->irq);
return 0;
}
static irqreturn_t
drm_irq_handler_wrap(DRM_IRQ_ARGS)
{
struct drm_device *dev = arg;
DRM_SPINLOCK(&dev->irq_lock);
dev->driver->irq_handler(arg);
DRM_SPINUNLOCK(&dev->irq_lock);
}
static void vblank_disable_fn(void *arg)
{
struct drm_device *dev = (struct drm_device *)arg;
int i;
/* Make sure that we are called with the lock held */
mtx_assert(&dev->vbl_lock, MA_OWNED);
if (callout_pending(&dev->vblank_disable_timer)) {
/* callout was reset */
return;
}
if (!callout_active(&dev->vblank_disable_timer)) {
/* callout was stopped */
return;
}
callout_deactivate(&dev->vblank_disable_timer);
DRM_DEBUG("vblank_disable: %s\n", dev->vblank_disable_allowed ?
"allowed" : "denied");
if (!dev->vblank_disable_allowed)
return;
for (i = 0; i < dev->num_crtcs; i++) {
if (dev->vblank[i].refcount == 0 &&
dev->vblank[i].enabled && !dev->vblank[i].inmodeset) {
DRM_DEBUG("disabling vblank on crtc %d\n", i);
dev->vblank[i].last =
dev->driver->get_vblank_counter(dev, i);
dev->driver->disable_vblank(dev, i);
dev->vblank[i].enabled = 0;
}
}
}
void drm_vblank_cleanup(struct drm_device *dev)
{
/* Bail if the driver didn't call drm_vblank_init() */
if (dev->num_crtcs == 0)
return;
DRM_SPINLOCK(&dev->vbl_lock);
callout_stop(&dev->vblank_disable_timer);
DRM_SPINUNLOCK(&dev->vbl_lock);
callout_drain(&dev->vblank_disable_timer);
DRM_SPINLOCK(&dev->vbl_lock);
vblank_disable_fn((void *)dev);
DRM_SPINUNLOCK(&dev->vbl_lock);
free(dev->vblank, DRM_MEM_DRIVER);
dev->num_crtcs = 0;
}
int drm_vblank_init(struct drm_device *dev, int num_crtcs)
{
int i, ret = ENOMEM;
callout_init_mtx(&dev->vblank_disable_timer, &dev->vbl_lock, 0);
dev->num_crtcs = num_crtcs;
dev->vblank = malloc(sizeof(struct drm_vblank_info) * num_crtcs,
DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
if (!dev->vblank)
goto err;
DRM_DEBUG("\n");
/* Zero per-crtc vblank stuff */
DRM_SPINLOCK(&dev->vbl_lock);
for (i = 0; i < num_crtcs; i++) {
DRM_INIT_WAITQUEUE(&dev->vblank[i].queue);
dev->vblank[i].refcount = 0;
atomic_store_rel_32(&dev->vblank[i].count, 0);
}
dev->vblank_disable_allowed = 0;
DRM_SPINUNLOCK(&dev->vbl_lock);
return 0;
err:
drm_vblank_cleanup(dev);
return ret;
}
int drm_irq_install(struct drm_device *dev)
{
int crtc, retcode;
if (dev->irq == 0 || dev->dev_private == NULL)
return EINVAL;
DRM_DEBUG("irq=%d\n", dev->irq);
DRM_LOCK();
if (dev->irq_enabled) {
DRM_UNLOCK();
return EBUSY;
}
dev->irq_enabled = 1;
dev->context_flag = 0;
/* Before installing handler */
dev->driver->irq_preinstall(dev);
DRM_UNLOCK();
/* Install handler */
retcode = bus_setup_intr(dev->device, dev->irqr,
INTR_TYPE_TTY | INTR_MPSAFE,
NULL, drm_irq_handler_wrap, dev, &dev->irqh);
if (retcode != 0)
goto err;
/* After installing handler */
DRM_LOCK();
dev->driver->irq_postinstall(dev);
DRM_UNLOCK();
if (dev->driver->enable_vblank) {
DRM_SPINLOCK(&dev->vbl_lock);
for( crtc = 0 ; crtc < dev->num_crtcs ; crtc++) {
if (dev->driver->enable_vblank(dev, crtc) == 0) {
dev->vblank[crtc].enabled = 1;
}
}
callout_reset(&dev->vblank_disable_timer, 5 * DRM_HZ,
(timeout_t *)vblank_disable_fn, (void *)dev);
DRM_SPINUNLOCK(&dev->vbl_lock);
}
return 0;
err:
DRM_LOCK();
dev->irq_enabled = 0;
DRM_UNLOCK();
return retcode;
}
int drm_irq_uninstall(struct drm_device *dev)
{
int crtc;
if (!dev->irq_enabled)
return EINVAL;
dev->irq_enabled = 0;
/*
* Wake up any waiters so they don't hang.
*/
DRM_SPINLOCK(&dev->vbl_lock);
for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
if (dev->vblank[crtc].enabled) {
DRM_WAKEUP(&dev->vblank[crtc].queue);
dev->vblank[crtc].last =
dev->driver->get_vblank_counter(dev, crtc);
dev->vblank[crtc].enabled = 0;
}
}
DRM_SPINUNLOCK(&dev->vbl_lock);
DRM_DEBUG("irq=%d\n", dev->irq);
dev->driver->irq_uninstall(dev);
DRM_UNLOCK();
bus_teardown_intr(dev->device, dev->irqr, dev->irqh);
DRM_LOCK();
return 0;
}
int drm_control(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_control *ctl = data;
int err;
switch (ctl->func) {
case DRM_INST_HANDLER:
/* Handle drivers whose DRM used to require IRQ setup but the
* no longer does.
*/
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return 0;
if (dev->if_version < DRM_IF_VERSION(1, 2) &&
ctl->irq != dev->irq)
return EINVAL;
return drm_irq_install(dev);
case DRM_UNINST_HANDLER:
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return 0;
DRM_LOCK();
err = drm_irq_uninstall(dev);
DRM_UNLOCK();
return err;
default:
return EINVAL;
}
}
u32 drm_vblank_count(struct drm_device *dev, int crtc)
{
return atomic_load_acq_32(&dev->vblank[crtc].count);
}
static void drm_update_vblank_count(struct drm_device *dev, int crtc)
{
u32 cur_vblank, diff;
/*
* Interrupts were disabled prior to this call, so deal with counter
* wrap if needed.
* NOTE! It's possible we lost a full dev->max_vblank_count events
* here if the register is small or we had vblank interrupts off for
* a long time.
*/
cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
diff = cur_vblank - dev->vblank[crtc].last;
if (cur_vblank < dev->vblank[crtc].last) {
diff += dev->max_vblank_count;
DRM_DEBUG("vblank[%d].last=0x%x, cur_vblank=0x%x => diff=0x%x\n",
crtc, dev->vblank[crtc].last, cur_vblank, diff);
}
DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
crtc, diff);
atomic_add_rel_32(&dev->vblank[crtc].count, diff);
}
int drm_vblank_get(struct drm_device *dev, int crtc)
{
int ret = 0;
/* Make sure that we are called with the lock held */
mtx_assert(&dev->vbl_lock, MA_OWNED);
/* Going from 0->1 means we have to enable interrupts again */
if (++dev->vblank[crtc].refcount == 1 &&
!dev->vblank[crtc].enabled) {
ret = dev->driver->enable_vblank(dev, crtc);
DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
if (ret)
--dev->vblank[crtc].refcount;
else {
dev->vblank[crtc].enabled = 1;
drm_update_vblank_count(dev, crtc);
}
}
if (dev->vblank[crtc].enabled)
dev->vblank[crtc].last =
dev->driver->get_vblank_counter(dev, crtc);
return ret;
}
void drm_vblank_put(struct drm_device *dev, int crtc)
{
/* Make sure that we are called with the lock held */
mtx_assert(&dev->vbl_lock, MA_OWNED);
KASSERT(dev->vblank[crtc].refcount > 0,
("invalid refcount"));
/* Last user schedules interrupt disable */
if (--dev->vblank[crtc].refcount == 0)
callout_reset(&dev->vblank_disable_timer, 5 * DRM_HZ,
(timeout_t *)vblank_disable_fn, (void *)dev);
}
int drm_modeset_ctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_modeset_ctl *modeset = data;
int crtc, ret = 0;
/* If drm_vblank_init() hasn't been called yet, just no-op */
if (!dev->num_crtcs)
goto out;
crtc = modeset->crtc;
if (crtc < 0 || crtc >= dev->num_crtcs) {
ret = EINVAL;
goto out;
}
/*
* To avoid all the problems that might happen if interrupts
* were enabled/disabled around or between these calls, we just
* have the kernel take a reference on the CRTC (just once though
* to avoid corrupting the count if multiple, mismatch calls occur),
* so that interrupts remain enabled in the interim.
*/
switch (modeset->cmd) {
case _DRM_PRE_MODESET:
DRM_DEBUG("pre-modeset, crtc %d\n", crtc);
DRM_SPINLOCK(&dev->vbl_lock);
if (!dev->vblank[crtc].inmodeset) {
dev->vblank[crtc].inmodeset = 0x1;
if (drm_vblank_get(dev, crtc) == 0)
dev->vblank[crtc].inmodeset |= 0x2;
}
DRM_SPINUNLOCK(&dev->vbl_lock);
break;
case _DRM_POST_MODESET:
DRM_DEBUG("post-modeset, crtc %d\n", crtc);
DRM_SPINLOCK(&dev->vbl_lock);
if (dev->vblank[crtc].inmodeset) {
if (dev->vblank[crtc].inmodeset & 0x2)
drm_vblank_put(dev, crtc);
dev->vblank[crtc].inmodeset = 0;
}
dev->vblank_disable_allowed = 1;
DRM_SPINUNLOCK(&dev->vbl_lock);
break;
default:
ret = EINVAL;
break;
}
out:
return ret;
}
int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
union drm_wait_vblank *vblwait = data;
unsigned int flags, seq, crtc;
int ret = 0;
if (!dev->irq_enabled)
return EINVAL;
if (vblwait->request.type &
~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) {
DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n",
vblwait->request.type,
(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK));
return EINVAL;
}
flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
if (crtc >= dev->num_crtcs)
return EINVAL;
DRM_SPINLOCK(&dev->vbl_lock);
ret = drm_vblank_get(dev, crtc);
DRM_SPINUNLOCK(&dev->vbl_lock);
if (ret) {
DRM_ERROR("failed to acquire vblank counter, %d\n", ret);
return ret;
}
seq = drm_vblank_count(dev, crtc);
switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
case _DRM_VBLANK_RELATIVE:
vblwait->request.sequence += seq;
vblwait->request.type &= ~_DRM_VBLANK_RELATIVE;
case _DRM_VBLANK_ABSOLUTE:
break;
default:
ret = EINVAL;
goto done;
}
if ((flags & _DRM_VBLANK_NEXTONMISS) &&
(seq - vblwait->request.sequence) <= (1<<23)) {
vblwait->request.sequence = seq + 1;
}
if (flags & _DRM_VBLANK_SIGNAL) {
/* There have never been any consumers */
ret = EINVAL;
} else {
DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
vblwait->request.sequence, crtc);
for ( ret = 0 ; !ret && !(((drm_vblank_count(dev, crtc) -
vblwait->request.sequence) <= (1 << 23)) ||
!dev->irq_enabled) ; ) {
mtx_lock(&dev->irq_lock);
if (!(((drm_vblank_count(dev, crtc) -
vblwait->request.sequence) <= (1 << 23)) ||
!dev->irq_enabled))
ret = mtx_sleep(&dev->vblank[crtc].queue,
&dev->irq_lock, PCATCH, "vblwtq",
DRM_HZ);
mtx_unlock(&dev->irq_lock);
}
if (ret != EINTR && ret != ERESTART) {
struct timeval now;
microtime(&now);
vblwait->reply.tval_sec = now.tv_sec;
vblwait->reply.tval_usec = now.tv_usec;
vblwait->reply.sequence = drm_vblank_count(dev, crtc);
DRM_DEBUG("returning %d to client, irq_enabled %d\n",
vblwait->reply.sequence, dev->irq_enabled);
} else {
DRM_DEBUG("vblank wait interrupted by signal\n");
}
}
done:
DRM_SPINLOCK(&dev->vbl_lock);
drm_vblank_put(dev, crtc);
DRM_SPINUNLOCK(&dev->vbl_lock);
return ret;
}
void drm_handle_vblank(struct drm_device *dev, int crtc)
{
atomic_add_rel_32(&dev->vblank[crtc].count, 1);
DRM_WAKEUP(&dev->vblank[crtc].queue);
}

View File

@ -1,110 +0,0 @@
/* drm_linux_list.h -- linux list functions for the BSDs.
* Created: Mon Apr 7 14:30:16 1999 by anholt@FreeBSD.org
*/
/*-
* Copyright 2003 Eric Anholt
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <anholt@FreeBSD.org>
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#ifndef _DRM_LINUX_LIST_H_
#define _DRM_LINUX_LIST_H_
struct list_head {
struct list_head *next, *prev;
};
#define list_entry(ptr, type, member) container_of(ptr,type,member)
#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
static __inline__ void
INIT_LIST_HEAD(struct list_head *head) {
(head)->next = head;
(head)->prev = head;
}
static __inline__ int
list_empty(struct list_head *head) {
return (head)->next == head;
}
static __inline__ void
list_add(struct list_head *new, struct list_head *head) {
(head)->next->prev = new;
(new)->next = (head)->next;
(new)->prev = head;
(head)->next = new;
}
static __inline__ void
list_add_tail(struct list_head *entry, struct list_head *head) {
(entry)->prev = (head)->prev;
(entry)->next = head;
(head)->prev->next = entry;
(head)->prev = entry;
}
static __inline__ void
list_del(struct list_head *entry) {
(entry)->next->prev = (entry)->prev;
(entry)->prev->next = (entry)->next;
}
static __inline__ void
list_del_init(struct list_head *entry) {
(entry)->next->prev = (entry)->prev;
(entry)->prev->next = (entry)->next;
INIT_LIST_HEAD(entry);
}
#define list_for_each(entry, head) \
for (entry = (head)->next; entry != head; entry = (entry)->next)
#define list_for_each_prev(entry, head) \
for (entry = (head)->prev; entry != (head); \
entry = entry->prev)
#define list_for_each_safe(entry, temp, head) \
for (entry = (head)->next, temp = (entry)->next; \
entry != head; \
entry = temp, temp = entry->next)
/**
* list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
* @pos: the type * to use as a loop cursor.
* @n: another type * to use as temporary storage
* @head: the head for your list.
* @member: the name of the list_struct within the struct.
*/
#define list_for_each_entry_safe(pos, n, head, member) \
for (pos = list_entry((head)->next, __typeof(*pos), member), \
n = list_entry(pos->member.next, __typeof(*pos), member); \
&pos->member != (head); \
pos = n, n = list_entry(n->member.next, __typeof(*n), member))
#endif /* _DRM_LINUX_LIST_H_ */

View File

@ -1,199 +0,0 @@
/*-
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Gareth Hughes <gareth@valinux.com>
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/** @file drm_lock.c
* Implementation of the ioctls and other support code for dealing with the
* hardware lock.
*
* The DRM hardware lock is a shared structure between the kernel and userland.
*
* On uncontended access where the new context was the last context, the
* client may take the lock without dropping down into the kernel, using atomic
* compare-and-set.
*
* If the client finds during compare-and-set that it was not the last owner
* of the lock, it calls the DRM lock ioctl, which may sleep waiting for the
* lock, and may have side-effects of kernel-managed context switching.
*
* When the client releases the lock, if the lock is marked as being contended
* by another client, then the DRM unlock ioctl is called so that the
* contending client may be woken up.
*/
#include "dev/drm/drmP.h"
int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_lock *lock = data;
int ret = 0;
if (lock->context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
DRM_CURRENTPID, lock->context);
return EINVAL;
}
DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
lock->context, DRM_CURRENTPID, dev->lock.hw_lock->lock,
lock->flags);
if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) &&
lock->context < 0)
return EINVAL;
DRM_LOCK();
for (;;) {
if (drm_lock_take(&dev->lock, lock->context)) {
dev->lock.file_priv = file_priv;
dev->lock.lock_time = jiffies;
atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
break; /* Got lock */
}
/* Contention */
ret = mtx_sleep((void *)&dev->lock.lock_queue, &dev->dev_lock,
PCATCH, "drmlk2", 0);
if (ret != 0)
break;
}
DRM_UNLOCK();
if (ret == ERESTART)
DRM_DEBUG("restarting syscall\n");
else
DRM_DEBUG("%d %s\n", lock->context,
ret ? "interrupted" : "has lock");
if (ret != 0)
return ret;
/* XXX: Add signal blocking here */
if (dev->driver->dma_quiescent != NULL &&
(lock->flags & _DRM_LOCK_QUIESCENT))
dev->driver->dma_quiescent(dev);
return 0;
}
int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_lock *lock = data;
DRM_DEBUG("%d (pid %d) requests unlock (0x%08x), flags = 0x%08x\n",
lock->context, DRM_CURRENTPID, dev->lock.hw_lock->lock,
lock->flags);
if (lock->context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
DRM_CURRENTPID, lock->context);
return EINVAL;
}
atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
DRM_LOCK();
drm_lock_transfer(&dev->lock, DRM_KERNEL_CONTEXT);
if (drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT)) {
DRM_ERROR("\n");
}
DRM_UNLOCK();
return 0;
}
int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context)
{
volatile unsigned int *lock = &lock_data->hw_lock->lock;
unsigned int old, new;
do {
old = *lock;
if (old & _DRM_LOCK_HELD)
new = old | _DRM_LOCK_CONT;
else
new = context | _DRM_LOCK_HELD;
} while (!atomic_cmpset_int(lock, old, new));
if (_DRM_LOCKING_CONTEXT(old) == context) {
if (old & _DRM_LOCK_HELD) {
if (context != DRM_KERNEL_CONTEXT) {
DRM_ERROR("%d holds heavyweight lock\n",
context);
}
return 0;
}
}
if (new == (context | _DRM_LOCK_HELD)) {
/* Have lock */
return 1;
}
return 0;
}
/* This takes a lock forcibly and hands it to context. Should ONLY be used
inside *_unlock to give lock to kernel before calling *_dma_schedule. */
int drm_lock_transfer(struct drm_lock_data *lock_data, unsigned int context)
{
volatile unsigned int *lock = &lock_data->hw_lock->lock;
unsigned int old, new;
lock_data->file_priv = NULL;
do {
old = *lock;
new = context | _DRM_LOCK_HELD;
} while (!atomic_cmpset_int(lock, old, new));
return 1;
}
int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context)
{
volatile unsigned int *lock = &lock_data->hw_lock->lock;
unsigned int old, new;
lock_data->file_priv = NULL;
do {
old = *lock;
new = 0;
} while (!atomic_cmpset_int(lock, old, new));
if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
DRM_ERROR("%d freed heavyweight lock held by %d\n",
context, _DRM_LOCKING_CONTEXT(old));
return 1;
}
DRM_WAKEUP_INT((void *)&lock_data->lock_queue);
return 0;
}

View File

@ -1,115 +0,0 @@
/*-
*Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Gareth Hughes <gareth@valinux.com>
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/** @file drm_memory.c
* Wrappers for kernel memory allocation routines, and MTRR management support.
*
* This file previously implemented a memory consumption tracking system using
* the "area" argument for various different types of allocations, but that
* has been stripped out for now.
*/
#include "dev/drm/drmP.h"
MALLOC_DEFINE(DRM_MEM_DMA, "drm_dma", "DRM DMA Data Structures");
MALLOC_DEFINE(DRM_MEM_SAREA, "drm_sarea", "DRM SAREA Data Structures");
MALLOC_DEFINE(DRM_MEM_DRIVER, "drm_driver", "DRM DRIVER Data Structures");
MALLOC_DEFINE(DRM_MEM_MAGIC, "drm_magic", "DRM MAGIC Data Structures");
MALLOC_DEFINE(DRM_MEM_IOCTLS, "drm_ioctls", "DRM IOCTL Data Structures");
MALLOC_DEFINE(DRM_MEM_MAPS, "drm_maps", "DRM MAP Data Structures");
MALLOC_DEFINE(DRM_MEM_BUFS, "drm_bufs", "DRM BUFFER Data Structures");
MALLOC_DEFINE(DRM_MEM_SEGS, "drm_segs", "DRM SEGMENTS Data Structures");
MALLOC_DEFINE(DRM_MEM_PAGES, "drm_pages", "DRM PAGES Data Structures");
MALLOC_DEFINE(DRM_MEM_FILES, "drm_files", "DRM FILE Data Structures");
MALLOC_DEFINE(DRM_MEM_QUEUES, "drm_queues", "DRM QUEUE Data Structures");
MALLOC_DEFINE(DRM_MEM_CMDS, "drm_cmds", "DRM COMMAND Data Structures");
MALLOC_DEFINE(DRM_MEM_MAPPINGS, "drm_mapping", "DRM MAPPING Data Structures");
MALLOC_DEFINE(DRM_MEM_BUFLISTS, "drm_buflists", "DRM BUFLISTS Data Structures");
MALLOC_DEFINE(DRM_MEM_AGPLISTS, "drm_agplists", "DRM AGPLISTS Data Structures");
MALLOC_DEFINE(DRM_MEM_CTXBITMAP, "drm_ctxbitmap",
"DRM CTXBITMAP Data Structures");
MALLOC_DEFINE(DRM_MEM_SGLISTS, "drm_sglists", "DRM SGLISTS Data Structures");
MALLOC_DEFINE(DRM_MEM_DRAWABLE, "drm_drawable", "DRM DRAWABLE Data Structures");
MALLOC_DEFINE(DRM_MEM_MM, "drm_sman", "DRM MEMORY MANAGER Data Structures");
MALLOC_DEFINE(DRM_MEM_HASHTAB, "drm_hashtab", "DRM HASHTABLE Data Structures");
void drm_mem_init(void)
{
}
void drm_mem_uninit(void)
{
}
void *drm_ioremap_wc(struct drm_device *dev, drm_local_map_t *map)
{
return pmap_mapdev_attr(map->offset, map->size, VM_MEMATTR_WRITE_COMBINING);
}
void *drm_ioremap(struct drm_device *dev, drm_local_map_t *map)
{
return pmap_mapdev(map->offset, map->size);
}
void drm_ioremapfree(drm_local_map_t *map)
{
pmap_unmapdev((vm_offset_t) map->virtual, map->size);
}
int
drm_mtrr_add(unsigned long offset, size_t size, int flags)
{
int act;
struct mem_range_desc mrdesc;
mrdesc.mr_base = offset;
mrdesc.mr_len = size;
mrdesc.mr_flags = flags;
act = MEMRANGE_SET_UPDATE;
strlcpy(mrdesc.mr_owner, "drm", sizeof(mrdesc.mr_owner));
return mem_range_attr_set(&mrdesc, &act);
}
int
drm_mtrr_del(int __unused handle, unsigned long offset, size_t size, int flags)
{
int act;
struct mem_range_desc mrdesc;
mrdesc.mr_base = offset;
mrdesc.mr_len = size;
mrdesc.mr_flags = flags;
act = MEMRANGE_SET_REMOVE;
strlcpy(mrdesc.mr_owner, "drm", sizeof(mrdesc.mr_owner));
return mem_range_attr_set(&mrdesc, &act);
}

View File

@ -1,369 +0,0 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*
**************************************************************************/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*
* Generic simple memory manager implementation. Intended to be used as a base
* class implementation for more advanced memory managers.
*
* Note that the algorithm used is quite simple and there might be substantial
* performance gains if a smarter free list is implemented. Currently it is just an
* unordered stack of free regions. This could easily be improved if an RB-tree
* is used instead. At least if we expect heavy fragmentation.
*
* Aligned allocations can also see improvement.
*
* Authors:
* Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include "dev/drm/drmP.h"
#include "dev/drm/drm_mm.h"
#define MM_UNUSED_TARGET 4
unsigned long drm_mm_tail_space(struct drm_mm *mm)
{
struct list_head *tail_node;
struct drm_mm_node *entry;
tail_node = mm->ml_entry.prev;
entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
if (!entry->free)
return 0;
return entry->size;
}
int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
{
struct list_head *tail_node;
struct drm_mm_node *entry;
tail_node = mm->ml_entry.prev;
entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
if (!entry->free)
return -ENOMEM;
if (entry->size <= size)
return -ENOMEM;
entry->size -= size;
return 0;
}
static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
{
struct drm_mm_node *child;
if (atomic)
child = malloc(sizeof(*child), DRM_MEM_MM, M_NOWAIT);
else
child = malloc(sizeof(*child), DRM_MEM_MM, M_WAITOK);
if (unlikely(child == NULL)) {
mtx_lock(&mm->unused_lock);
if (list_empty(&mm->unused_nodes))
child = NULL;
else {
child =
list_entry(mm->unused_nodes.next,
struct drm_mm_node, fl_entry);
list_del(&child->fl_entry);
--mm->num_unused;
}
mtx_unlock(&mm->unused_lock);
}
return child;
}
int drm_mm_pre_get(struct drm_mm *mm)
{
struct drm_mm_node *node;
mtx_lock(&mm->unused_lock);
while (mm->num_unused < MM_UNUSED_TARGET) {
mtx_unlock(&mm->unused_lock);
node = malloc(sizeof(*node), DRM_MEM_MM, M_WAITOK);
mtx_lock(&mm->unused_lock);
if (unlikely(node == NULL)) {
int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
mtx_unlock(&mm->unused_lock);
return ret;
}
++mm->num_unused;
list_add_tail(&node->fl_entry, &mm->unused_nodes);
}
mtx_unlock(&mm->unused_lock);
return 0;
}
static int drm_mm_create_tail_node(struct drm_mm *mm,
unsigned long start,
unsigned long size, int atomic)
{
struct drm_mm_node *child;
child = drm_mm_kmalloc(mm, atomic);
if (unlikely(child == NULL))
return -ENOMEM;
child->free = 1;
child->size = size;
child->start = start;
child->mm = mm;
list_add_tail(&child->ml_entry, &mm->ml_entry);
list_add_tail(&child->fl_entry, &mm->fl_entry);
return 0;
}
int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size, int atomic)
{
struct list_head *tail_node;
struct drm_mm_node *entry;
tail_node = mm->ml_entry.prev;
entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
if (!entry->free) {
return drm_mm_create_tail_node(mm, entry->start + entry->size,
size, atomic);
}
entry->size += size;
return 0;
}
static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
unsigned long size,
int atomic)
{
struct drm_mm_node *child;
child = drm_mm_kmalloc(parent->mm, atomic);
if (unlikely(child == NULL))
return NULL;
INIT_LIST_HEAD(&child->fl_entry);
child->free = 0;
child->size = size;
child->start = parent->start;
child->mm = parent->mm;
list_add_tail(&child->ml_entry, &parent->ml_entry);
INIT_LIST_HEAD(&child->fl_entry);
parent->size -= size;
parent->start += size;
return child;
}
struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
unsigned long size,
unsigned alignment,
int atomic)
{
struct drm_mm_node *align_splitoff = NULL;
unsigned tmp = 0;
if (alignment)
tmp = node->start % alignment;
if (tmp) {
align_splitoff =
drm_mm_split_at_start(node, alignment - tmp, atomic);
if (unlikely(align_splitoff == NULL))
return NULL;
}
if (node->size == size) {
list_del_init(&node->fl_entry);
node->free = 0;
} else {
node = drm_mm_split_at_start(node, size, atomic);
}
if (align_splitoff)
drm_mm_put_block(align_splitoff);
return node;
}
/*
* Put a block. Merge with the previous and / or next block if they are free.
* Otherwise add to the free stack.
*/
void drm_mm_put_block(struct drm_mm_node *cur)
{
struct drm_mm *mm = cur->mm;
struct list_head *cur_head = &cur->ml_entry;
struct list_head *root_head = &mm->ml_entry;
struct drm_mm_node *prev_node = NULL;
struct drm_mm_node *next_node;
int merged = 0;
if (cur_head->prev != root_head) {
prev_node =
list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
if (prev_node->free) {
prev_node->size += cur->size;
merged = 1;
}
}
if (cur_head->next != root_head) {
next_node =
list_entry(cur_head->next, struct drm_mm_node, ml_entry);
if (next_node->free) {
if (merged) {
prev_node->size += next_node->size;
list_del(&next_node->ml_entry);
list_del(&next_node->fl_entry);
if (mm->num_unused < MM_UNUSED_TARGET) {
list_add(&next_node->fl_entry,
&mm->unused_nodes);
++mm->num_unused;
} else
free(next_node, DRM_MEM_MM);
} else {
next_node->size += cur->size;
next_node->start = cur->start;
merged = 1;
}
}
}
if (!merged) {
cur->free = 1;
list_add(&cur->fl_entry, &mm->fl_entry);
} else {
list_del(&cur->ml_entry);
if (mm->num_unused < MM_UNUSED_TARGET) {
list_add(&cur->fl_entry, &mm->unused_nodes);
++mm->num_unused;
} else
free(cur, DRM_MEM_MM);
}
}
struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
unsigned long size,
unsigned alignment, int best_match)
{
struct list_head *list;
const struct list_head *free_stack = &mm->fl_entry;
struct drm_mm_node *entry;
struct drm_mm_node *best;
unsigned long best_size;
unsigned wasted;
best = NULL;
best_size = ~0UL;
list_for_each(list, free_stack) {
entry = list_entry(list, struct drm_mm_node, fl_entry);
wasted = 0;
if (entry->size < size)
continue;
if (alignment) {
register unsigned tmp = entry->start % alignment;
if (tmp)
wasted += alignment - tmp;
}
if (entry->size >= size + wasted) {
if (!best_match)
return entry;
if (size < best_size) {
best = entry;
best_size = entry->size;
}
}
}
return best;
}
int drm_mm_clean(struct drm_mm * mm)
{
struct list_head *head = &mm->ml_entry;
return (head->next->next == head);
}
int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
{
INIT_LIST_HEAD(&mm->ml_entry);
INIT_LIST_HEAD(&mm->fl_entry);
INIT_LIST_HEAD(&mm->unused_nodes);
mm->num_unused = 0;
mtx_init(&mm->unused_lock, "drm_unused", NULL, MTX_DEF);
/* XXX This could be non-atomic but gets called from a locked path */
return drm_mm_create_tail_node(mm, start, size, 1);
}
void drm_mm_takedown(struct drm_mm * mm)
{
struct list_head *bnode = mm->fl_entry.next;
struct drm_mm_node *entry;
struct drm_mm_node *next;
entry = list_entry(bnode, struct drm_mm_node, fl_entry);
if (entry->ml_entry.next != &mm->ml_entry ||
entry->fl_entry.next != &mm->fl_entry) {
DRM_ERROR("Memory manager not clean. Delaying takedown\n");
return;
}
list_del(&entry->fl_entry);
list_del(&entry->ml_entry);
free(entry, DRM_MEM_MM);
mtx_lock(&mm->unused_lock);
list_for_each_entry_safe(entry, next, &mm->unused_nodes, fl_entry) {
list_del(&entry->fl_entry);
free(entry, DRM_MEM_MM);
--mm->num_unused;
}
mtx_unlock(&mm->unused_lock);
mtx_destroy(&mm->unused_lock);
KASSERT(mm->num_unused == 0, ("num_unused != 0"));
}

View File

@ -1,100 +0,0 @@
/**************************************************************************
*
* Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX. USA.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*
**************************************************************************/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*
* Authors:
* Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
*/
#ifndef _DRM_MM_H_
#define _DRM_MM_H_
#include "dev/drm/drm_linux_list.h"
struct drm_mm_node {
struct list_head fl_entry;
struct list_head ml_entry;
int free;
unsigned long start;
unsigned long size;
struct drm_mm *mm;
void *private;
};
struct drm_mm {
struct list_head fl_entry;
struct list_head ml_entry;
struct list_head unused_nodes;
int num_unused;
struct mtx unused_lock;
};
/*
* Basic range manager support (drm_mm.c)
*/
extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
unsigned long size,
unsigned alignment,
int atomic);
static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
unsigned long size,
unsigned alignment)
{
return drm_mm_get_block_generic(parent, size, alignment, 0);
}
static inline struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent,
unsigned long size,
unsigned alignment)
{
return drm_mm_get_block_generic(parent, size, alignment, 1);
}
extern void drm_mm_put_block(struct drm_mm_node *cur);
extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
int best_match);
extern int drm_mm_init(struct drm_mm *mm, unsigned long start,
unsigned long size);
extern void drm_mm_takedown(struct drm_mm *mm);
extern int drm_mm_clean(struct drm_mm *mm);
extern unsigned long drm_mm_tail_space(struct drm_mm *mm);
extern int drm_mm_remove_space_from_tail(struct drm_mm *mm,
unsigned long size);
extern int drm_mm_add_space_to_tail(struct drm_mm *mm,
unsigned long size, int atomic);
extern int drm_mm_pre_get(struct drm_mm *mm);
static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
{
return block->mm;
}
#endif

View File

@ -1,129 +0,0 @@
/*-
* Copyright 2003 Eric Anholt.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/**
* \file drm_pci.h
* \brief PCI consistent, DMA-accessible memory allocation.
*
* \author Eric Anholt <anholt@FreeBSD.org>
*/
#include "dev/drm/drmP.h"
/**********************************************************************/
/** \name PCI memory */
/*@{*/
static void
drm_pci_busdma_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
{
drm_dma_handle_t *dmah = arg;
if (error != 0)
return;
KASSERT(nsegs == 1, ("drm_pci_busdma_callback: bad dma segment count"));
dmah->busaddr = segs[0].ds_addr;
}
/**
* \brief Allocate a physically contiguous DMA-accessible consistent
* memory block.
*/
drm_dma_handle_t *
drm_pci_alloc(struct drm_device *dev, size_t size,
size_t align, dma_addr_t maxaddr)
{
drm_dma_handle_t *dmah;
int ret;
/* Need power-of-two alignment, so fail the allocation if it isn't. */
if ((align & (align - 1)) != 0) {
DRM_ERROR("drm_pci_alloc with non-power-of-two alignment %d\n",
(int)align);
return NULL;
}
dmah = malloc(sizeof(drm_dma_handle_t), DRM_MEM_DMA, M_ZERO | M_NOWAIT);
if (dmah == NULL)
return NULL;
/* Make sure we aren't holding locks here */
mtx_assert(&dev->dev_lock, MA_NOTOWNED);
if (mtx_owned(&dev->dev_lock))
DRM_ERROR("called while holding dev_lock\n");
mtx_assert(&dev->dma_lock, MA_NOTOWNED);
if (mtx_owned(&dev->dma_lock))
DRM_ERROR("called while holding dma_lock\n");
ret = bus_dma_tag_create(NULL, align, 0, /* tag, align, boundary */
maxaddr, BUS_SPACE_MAXADDR, /* lowaddr, highaddr */
NULL, NULL, /* filtfunc, filtfuncargs */
size, 1, size, /* maxsize, nsegs, maxsegsize */
0, NULL, NULL, /* flags, lockfunc, lockfuncargs */
&dmah->tag);
if (ret != 0) {
free(dmah, DRM_MEM_DMA);
return NULL;
}
ret = bus_dmamem_alloc(dmah->tag, &dmah->vaddr,
BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_NOCACHE, &dmah->map);
if (ret != 0) {
bus_dma_tag_destroy(dmah->tag);
free(dmah, DRM_MEM_DMA);
return NULL;
}
ret = bus_dmamap_load(dmah->tag, dmah->map, dmah->vaddr, size,
drm_pci_busdma_callback, dmah, BUS_DMA_NOWAIT);
if (ret != 0) {
bus_dmamem_free(dmah->tag, dmah->vaddr, dmah->map);
bus_dma_tag_destroy(dmah->tag);
free(dmah, DRM_MEM_DMA);
return NULL;
}
return dmah;
}
/**
* \brief Free a DMA-accessible consistent memory block.
*/
void
drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah)
{
if (dmah == NULL)
return;
bus_dmamap_unload(dmah->tag, dmah->map);
bus_dmamem_free(dmah->tag, dmah->vaddr, dmah->map);
bus_dma_tag_destroy(dmah->tag);
free(dmah, DRM_MEM_DMA);
}
/*@}*/

View File

@ -1,348 +0,0 @@
/*
* $FreeBSD$
*/
/*
This file is auto-generated from the drm_pciids.txt in the DRM CVS
Please contact dri-devel@lists.sf.net to add new cards to this list
*/
#define r128_PCI_IDS \
{0x1002, 0x4c45, 0, "ATI Rage 128 Mobility LE (PCI)"}, \
{0x1002, 0x4c46, 0, "ATI Rage 128 Mobility LF (AGP)"}, \
{0x1002, 0x4d46, 0, "ATI Rage 128 Mobility MF (AGP)"}, \
{0x1002, 0x4d4c, 0, "ATI Rage 128 Mobility ML (AGP)"}, \
{0x1002, 0x5041, 0, "ATI Rage 128 Pro PA (PCI)"}, \
{0x1002, 0x5042, 0, "ATI Rage 128 Pro PB (AGP)"}, \
{0x1002, 0x5043, 0, "ATI Rage 128 Pro PC (AGP)"}, \
{0x1002, 0x5044, 0, "ATI Rage 128 Pro PD (PCI)"}, \
{0x1002, 0x5045, 0, "ATI Rage 128 Pro PE (AGP)"}, \
{0x1002, 0x5046, 0, "ATI Rage 128 Pro PF (AGP)"}, \
{0x1002, 0x5047, 0, "ATI Rage 128 Pro PG (PCI)"}, \
{0x1002, 0x5048, 0, "ATI Rage 128 Pro PH (AGP)"}, \
{0x1002, 0x5049, 0, "ATI Rage 128 Pro PI (AGP)"}, \
{0x1002, 0x504A, 0, "ATI Rage 128 Pro PJ (PCI)"}, \
{0x1002, 0x504B, 0, "ATI Rage 128 Pro PK (AGP)"}, \
{0x1002, 0x504C, 0, "ATI Rage 128 Pro PL (AGP)"}, \
{0x1002, 0x504D, 0, "ATI Rage 128 Pro PM (PCI)"}, \
{0x1002, 0x504E, 0, "ATI Rage 128 Pro PN (AGP)"}, \
{0x1002, 0x504F, 0, "ATI Rage 128 Pro PO (AGP)"}, \
{0x1002, 0x5050, 0, "ATI Rage 128 Pro PP (PCI)"}, \
{0x1002, 0x5051, 0, "ATI Rage 128 Pro PQ (AGP)"}, \
{0x1002, 0x5052, 0, "ATI Rage 128 Pro PR (PCI)"}, \
{0x1002, 0x5053, 0, "ATI Rage 128 Pro PS (PCI)"}, \
{0x1002, 0x5054, 0, "ATI Rage 128 Pro PT (AGP)"}, \
{0x1002, 0x5055, 0, "ATI Rage 128 Pro PU (AGP)"}, \
{0x1002, 0x5056, 0, "ATI Rage 128 Pro PV (PCI)"}, \
{0x1002, 0x5057, 0, "ATI Rage 128 Pro PW (AGP)"}, \
{0x1002, 0x5058, 0, "ATI Rage 128 Pro PX (AGP)"}, \
{0x1002, 0x5245, 0, "ATI Rage 128 RE (PCI)"}, \
{0x1002, 0x5246, 0, "ATI Rage 128 RF (AGP)"}, \
{0x1002, 0x5247, 0, "ATI Rage 128 RG (AGP)"}, \
{0x1002, 0x524b, 0, "ATI Rage 128 RK (PCI)"}, \
{0x1002, 0x524c, 0, "ATI Rage 128 RL (AGP)"}, \
{0x1002, 0x534d, 0, "ATI Rage 128 SM (AGP)"}, \
{0x1002, 0x5446, 0, "ATI Rage 128 Pro Ultra TF (AGP)"}, \
{0x1002, 0x544C, 0, "ATI Rage 128 Pro Ultra TL (AGP)"}, \
{0x1002, 0x5452, 0, "ATI Rage 128 Pro Ultra TR (AGP)"}, \
{0, 0, 0, NULL}
#define mga_PCI_IDS \
{0x102b, 0x0520, MGA_CARD_TYPE_G200, "Matrox G200 (PCI)"}, \
{0x102b, 0x0521, MGA_CARD_TYPE_G200, "Matrox G200 (AGP)"}, \
{0x102b, 0x0525, MGA_CARD_TYPE_G400, "Matrox G400/G450 (AGP)"}, \
{0x102b, 0x2527, MGA_CARD_TYPE_G550, "Matrox G550 (AGP)"}, \
{0, 0, 0, NULL}
#define mach64_PCI_IDS \
{0x1002, 0x4749, 0, "3D Rage Pro"}, \
{0x1002, 0x4750, 0, "3D Rage Pro 215GP"}, \
{0x1002, 0x4751, 0, "3D Rage Pro 215GQ"}, \
{0x1002, 0x4742, 0, "3D Rage Pro AGP 1X/2X"}, \
{0x1002, 0x4744, 0, "3D Rage Pro AGP 1X"}, \
{0x1002, 0x4c49, 0, "3D Rage LT Pro"}, \
{0x1002, 0x4c50, 0, "3D Rage LT Pro"}, \
{0x1002, 0x4c51, 0, "3D Rage LT Pro"}, \
{0x1002, 0x4c42, 0, "3D Rage LT Pro AGP-133"}, \
{0x1002, 0x4c44, 0, "3D Rage LT Pro AGP-66"}, \
{0x1002, 0x474c, 0, "Rage XC"}, \
{0x1002, 0x474f, 0, "Rage XL"}, \
{0x1002, 0x4752, 0, "Rage XL"}, \
{0x1002, 0x4753, 0, "Rage XC"}, \
{0x1002, 0x474d, 0, "Rage XL AGP 2X"}, \
{0x1002, 0x474e, 0, "Rage XC AGP"}, \
{0x1002, 0x4c52, 0, "Rage Mobility P/M"}, \
{0x1002, 0x4c53, 0, "Rage Mobility L"}, \
{0x1002, 0x4c4d, 0, "Rage Mobility P/M AGP 2X"}, \
{0x1002, 0x4c4e, 0, "Rage Mobility L AGP 2X"}, \
{0, 0, 0, NULL}
#define sis_PCI_IDS \
{0x1039, 0x0300, 0, "SiS 300/305"}, \
{0x1039, 0x5300, 0, "SiS 540"}, \
{0x1039, 0x6300, 0, "SiS 630"}, \
{0x1039, 0x6330, SIS_CHIP_315, "SiS 661"}, \
{0x1039, 0x7300, 0, "SiS 730"}, \
{0x18CA, 0x0040, SIS_CHIP_315, "Volari V3XT/V5/V8"}, \
{0x18CA, 0x0042, SIS_CHIP_315, "Volari Unknown"}, \
{0, 0, 0, NULL}
#define tdfx_PCI_IDS \
{0x121a, 0x0003, 0, "3dfx Voodoo Banshee"}, \
{0x121a, 0x0004, 0, "3dfx Voodoo3 2000"}, \
{0x121a, 0x0005, 0, "3dfx Voodoo3 3000"}, \
{0x121a, 0x0007, 0, "3dfx Voodoo4 4500"}, \
{0x121a, 0x0009, 0, "3dfx Voodoo5 5500"}, \
{0x121a, 0x000b, 0, "3dfx Voodoo4 4200"}, \
{0, 0, 0, NULL}
#define viadrv_PCI_IDS \
{0x1106, 0x3022, 0, "VIA CLE266 3022"}, \
{0x1106, 0x3118, VIA_PRO_GROUP_A, "VIA CN400 / PM8X0"}, \
{0x1106, 0x3122, 0, "VIA CLE266"}, \
{0x1106, 0x7205, 0, "VIA KM400"}, \
{0x1106, 0x3108, 0, "VIA K8M800"}, \
{0x1106, 0x3344, 0, "VIA CN700 / VM800 / P4M800Pro"}, \
{0x1106, 0x3343, 0, "VIA P4M890"}, \
{0x1106, 0x3230, VIA_DX9_0, "VIA K8M890"}, \
{0x1106, 0x3157, VIA_PRO_GROUP_A, "VIA CX700"}, \
{0x1106, 0x3371, VIA_DX9_0, "VIA P4M900 / VN896"}, \
{0, 0, 0, NULL}
#define i810_PCI_IDS \
{0x8086, 0x7121, 0, "Intel i810 GMCH"}, \
{0x8086, 0x7123, 0, "Intel i810-DC100 GMCH"}, \
{0x8086, 0x7125, 0, "Intel i810E GMCH"}, \
{0x8086, 0x1132, 0, "Intel i815 GMCH"}, \
{0, 0, 0, NULL}
#define i830_PCI_IDS \
{0x8086, 0x3577, 0, "Intel i830M GMCH"}, \
{0x8086, 0x2562, 0, "Intel i845G GMCH"}, \
{0x8086, 0x3582, 0, "Intel i852GM/i855GM GMCH"}, \
{0x8086, 0x2572, 0, "Intel i865G GMCH"}, \
{0, 0, 0, NULL}
#define gamma_PCI_IDS \
{0x3d3d, 0x0008, 0, "3DLabs GLINT Gamma G1"}, \
{0, 0, 0, NULL}
#define savage_PCI_IDS \
{0x5333, 0x8a20, S3_SAVAGE3D, "Savage 3D"}, \
{0x5333, 0x8a21, S3_SAVAGE3D, "Savage 3D/MV"}, \
{0x5333, 0x8a22, S3_SAVAGE4, "Savage4"}, \
{0x5333, 0x8a23, S3_SAVAGE4, "Savage4"}, \
{0x5333, 0x8c10, S3_SAVAGE_MX, "Savage/MX-MV"}, \
{0x5333, 0x8c11, S3_SAVAGE_MX, "Savage/MX"}, \
{0x5333, 0x8c12, S3_SAVAGE_MX, "Savage/IX-MV"}, \
{0x5333, 0x8c13, S3_SAVAGE_MX, "Savage/IX"}, \
{0x5333, 0x8c22, S3_SUPERSAVAGE, "SuperSavage MX/128"}, \
{0x5333, 0x8c24, S3_SUPERSAVAGE, "SuperSavage MX/64"}, \
{0x5333, 0x8c26, S3_SUPERSAVAGE, "SuperSavage MX/64C"}, \
{0x5333, 0x8c2a, S3_SUPERSAVAGE, "SuperSavage IX/128 SDR"}, \
{0x5333, 0x8c2b, S3_SUPERSAVAGE, "SuperSavage IX/128 DDR"}, \
{0x5333, 0x8c2c, S3_SUPERSAVAGE, "SuperSavage IX/64 SDR"}, \
{0x5333, 0x8c2d, S3_SUPERSAVAGE, "SuperSavage IX/64 DDR"}, \
{0x5333, 0x8c2e, S3_SUPERSAVAGE, "SuperSavage IX/C SDR"}, \
{0x5333, 0x8c2f, S3_SUPERSAVAGE, "SuperSavage IX/C DDR"}, \
{0x5333, 0x8a25, S3_PROSAVAGE, "ProSavage PM133"}, \
{0x5333, 0x8a26, S3_PROSAVAGE, "ProSavage KM133"}, \
{0x5333, 0x8d01, S3_TWISTER, "ProSavage Twister PN133"}, \
{0x5333, 0x8d02, S3_TWISTER, "ProSavage Twister KN133"}, \
{0x5333, 0x8d03, S3_PROSAVAGEDDR, "ProSavage DDR"}, \
{0x5333, 0x8d04, S3_PROSAVAGEDDR, "ProSavage DDR-K"}, \
{0, 0, 0, NULL}
#define ffb_PCI_IDS \
{0, 0, 0, NULL}
#define imagine_PCI_IDS \
{0x105d, 0x2309, IMAGINE_128, "Imagine 128"}, \
{0x105d, 0x2339, IMAGINE_128_2, "Imagine 128-II"}, \
{0x105d, 0x493d, IMAGINE_T2R, "Ticket to Ride"}, \
{0x105d, 0x5348, IMAGINE_REV4, "Revolution IV"}, \
{0, 0, 0, NULL}
#define nv_PCI_IDS \
{0x10DE, 0x0020, NV04, "NVidia RIVA TNT"}, \
{0x10DE, 0x0028, NV04, "NVidia RIVA TNT2"}, \
{0x10DE, 0x002A, NV04, "NVidia Unknown TNT2"}, \
{0x10DE, 0x002C, NV04, "NVidia Vanta"}, \
{0x10DE, 0x0029, NV04, "NVidia RIVA TNT2 Ultra"}, \
{0x10DE, 0x002D, NV04, "NVidia RIVA TNT2 Model 64"}, \
{0x10DE, 0x00A0, NV04, "NVidia Aladdin TNT2"}, \
{0x10DE, 0x0100, NV10, "NVidia GeForce 256"}, \
{0x10DE, 0x0101, NV10, "NVidia GeForce DDR"}, \
{0x10DE, 0x0103, NV10, "NVidia Quadro"}, \
{0x10DE, 0x0110, NV10, "NVidia GeForce2 MX/MX 400"}, \
{0x10DE, 0x0111, NV10, "NVidia GeForce2 MX 100/200"}, \
{0x10DE, 0x0112, NV10, "NVidia GeForce2 Go"}, \
{0x10DE, 0x0113, NV10, "NVidia Quadro2 MXR/EX/Go"}, \
{0x10DE, 0x0150, NV10, "NVidia GeForce2 GTS"}, \
{0x10DE, 0x0151, NV10, "NVidia GeForce2 Ti"}, \
{0x10DE, 0x0152, NV10, "NVidia GeForce2 Ultra"}, \
{0x10DE, 0x0153, NV10, "NVidia Quadro2 Pro"}, \
{0x10DE, 0x0170, NV10, "NVidia GeForce4 MX 460"}, \
{0x10DE, 0x0171, NV10, "NVidia GeForce4 MX 440"}, \
{0x10DE, 0x0172, NV10, "NVidia GeForce4 MX 420"}, \
{0x10DE, 0x0173, NV10, "NVidia GeForce4 MX 440-SE"}, \
{0x10DE, 0x0174, NV10, "NVidia GeForce4 440 Go"}, \
{0x10DE, 0x0175, NV10, "NVidia GeForce4 420 Go"}, \
{0x10DE, 0x0176, NV10, "NVidia GeForce4 420 Go 32M"}, \
{0x10DE, 0x0177, NV10, "NVidia GeForce4 460 Go"}, \
{0x10DE, 0x0178, NV10, "NVidia Quadro4 550 XGL"}, \
{0x10DE, 0x0179, NV10, "NVidia GeForce4"}, \
{0x10DE, 0x017A, NV10, "NVidia Quadro4 NVS"}, \
{0x10DE, 0x017C, NV10, "NVidia Quadro4 500 GoGL"}, \
{0x10DE, 0x017D, NV10, "NVidia GeForce4 410 Go 16M"}, \
{0x10DE, 0x0181, NV10, "NVidia GeForce4 MX 440 with AGP8X"}, \
{0x10DE, 0x0182, NV10, "NVidia GeForce4 MX 440SE with AGP8X"}, \
{0x10DE, 0x0183, NV10, "NVidia GeForce4 MX 420 with AGP8X"}, \
{0x10DE, 0x0185, NV10, "NVidia GeForce4 MX 4000"}, \
{0x10DE, 0x0186, NV10, "NVidia GeForce4 448 Go"}, \
{0x10DE, 0x0187, NV10, "NVidia GeForce4 488 Go"}, \
{0x10DE, 0x0188, NV10, "NVidia Quadro4 580 XGL"}, \
{0x10DE, 0x0189, NV10, "NVidia GeForce4 MX with AGP8X (Mac)"}, \
{0x10DE, 0x018A, NV10, "NVidia Quadro4 280 NVS"}, \
{0x10DE, 0x018B, NV10, "NVidia Quadro4 380 XGL"}, \
{0x10DE, 0x018C, NV10, "NVidia Quadro NVS 50 PCI"}, \
{0x10DE, 0x018D, NV10, "NVidia GeForce4 448 Go"}, \
{0x10DE, 0x01A0, NV10, "NVidia GeForce2 Integrated GPU"}, \
{0x10DE, 0x01F0, NV10, "NVidia GeForce4 MX Integrated GPU"}, \
{0x10DE, 0x0200, NV20, "NVidia GeForce3"}, \
{0x10DE, 0x0201, NV20, "NVidia GeForce3 Ti 200"}, \
{0x10DE, 0x0202, NV20, "NVidia GeForce3 Ti 500"}, \
{0x10DE, 0x0203, NV20, "NVidia Quadro DCC"}, \
{0x10DE, 0x0250, NV20, "NVidia GeForce4 Ti 4600"}, \
{0x10DE, 0x0251, NV20, "NVidia GeForce4 Ti 4400"}, \
{0x10DE, 0x0252, NV20, "NVidia 0x0252"}, \
{0x10DE, 0x0253, NV20, "NVidia GeForce4 Ti 4200"}, \
{0x10DE, 0x0258, NV20, "NVidia Quadro4 900 XGL"}, \
{0x10DE, 0x0259, NV20, "NVidia Quadro4 750 XGL"}, \
{0x10DE, 0x025B, NV20, "NVidia Quadro4 700 XGL"}, \
{0x10DE, 0x0280, NV20, "NVidia GeForce4 Ti 4800"}, \
{0x10DE, 0x0281, NV20, "NVidia GeForce4 Ti 4200 with AGP8X"}, \
{0x10DE, 0x0282, NV20, "NVidia GeForce4 Ti 4800 SE"}, \
{0x10DE, 0x0286, NV20, "NVidia GeForce4 4200 Go"}, \
{0x10DE, 0x028C, NV20, "NVidia Quadro4 700 GoGL"}, \
{0x10DE, 0x0288, NV20, "NVidia Quadro4 980 XGL"}, \
{0x10DE, 0x0289, NV20, "NVidia Quadro4 780 XGL"}, \
{0x10DE, 0x0301, NV30, "NVidia GeForce FX 5800 Ultra"}, \
{0x10DE, 0x0302, NV30, "NVidia GeForce FX 5800"}, \
{0x10DE, 0x0308, NV30, "NVidia Quadro FX 2000"}, \
{0x10DE, 0x0309, NV30, "NVidia Quadro FX 1000"}, \
{0x10DE, 0x0311, NV30, "NVidia GeForce FX 5600 Ultra"}, \
{0x10DE, 0x0312, NV30, "NVidia GeForce FX 5600"}, \
{0x10DE, 0x0313, NV30, "NVidia 0x0313"}, \
{0x10DE, 0x0314, NV30, "NVidia GeForce FX 5600SE"}, \
{0x10DE, 0x0316, NV30, "NVidia 0x0316"}, \
{0x10DE, 0x0317, NV30, "NVidia 0x0317"}, \
{0x10DE, 0x031A, NV30, "NVidia GeForce FX Go5600"}, \
{0x10DE, 0x031B, NV30, "NVidia GeForce FX Go5650"}, \
{0x10DE, 0x031C, NV30, "NVidia Quadro FX Go700"}, \
{0x10DE, 0x031D, NV30, "NVidia 0x031D"}, \
{0x10DE, 0x031E, NV30, "NVidia 0x031E"}, \
{0x10DE, 0x031F, NV30, "NVidia 0x031F"}, \
{0x10DE, 0x0320, NV30, "NVidia GeForce FX 5200"}, \
{0x10DE, 0x0321, NV30, "NVidia GeForce FX 5200 Ultra"}, \
{0x10DE, 0x0322, NV30, "NVidia GeForce FX 5200"}, \
{0x10DE, 0x0323, NV30, "NVidia GeForce FX 5200SE"}, \
{0x10DE, 0x0324, NV30, "NVidia GeForce FX Go5200"}, \
{0x10DE, 0x0325, NV30, "NVidia GeForce FX Go5250"}, \
{0x10DE, 0x0326, NV30, "NVidia GeForce FX 5500"}, \
{0x10DE, 0x0327, NV30, "NVidia GeForce FX 5100"}, \
{0x10DE, 0x0328, NV30, "NVidia GeForce FX Go5200 32M/64M"}, \
{0x10DE, 0x0329, NV30, "NVidia GeForce FX 5200 (Mac)"}, \
{0x10DE, 0x032A, NV30, "NVidia Quadro NVS 280 PCI"}, \
{0x10DE, 0x032B, NV30, "NVidia Quadro FX 500/600 PCI"}, \
{0x10DE, 0x032C, NV30, "NVidia GeForce FX Go53xx Series"}, \
{0x10DE, 0x032D, NV30, "NVidia GeForce FX Go5100"}, \
{0x10DE, 0x032F, NV30, "NVidia 0x032F"}, \
{0x10DE, 0x0330, NV30, "NVidia GeForce FX 5900 Ultra"}, \
{0x10DE, 0x0331, NV30, "NVidia GeForce FX 5900"}, \
{0x10DE, 0x0332, NV30, "NVidia GeForce FX 5900XT"}, \
{0x10DE, 0x0333, NV30, "NVidia GeForce FX 5950 Ultra"}, \
{0x10DE, 0x033F, NV30, "NVidia Quadro FX 700"}, \
{0x10DE, 0x0334, NV30, "NVidia GeForce FX 5900ZT"}, \
{0x10DE, 0x0338, NV30, "NVidia Quadro FX 3000"}, \
{0x10DE, 0x0341, NV30, "NVidia GeForce FX 5700 Ultra"}, \
{0x10DE, 0x0342, NV30, "NVidia GeForce FX 5700"}, \
{0x10DE, 0x0343, NV30, "NVidia GeForce FX 5700LE"}, \
{0x10DE, 0x0344, NV30, "NVidia GeForce FX 5700VE"}, \
{0x10DE, 0x0345, NV30, "NVidia 0x0345"}, \
{0x10DE, 0x0347, NV30, "NVidia GeForce FX Go5700"}, \
{0x10DE, 0x0348, NV30, "NVidia GeForce FX Go5700"}, \
{0x10DE, 0x0349, NV30, "NVidia 0x0349"}, \
{0x10DE, 0x034B, NV30, "NVidia 0x034B"}, \
{0x10DE, 0x034C, NV30, "NVidia Quadro FX Go1000"}, \
{0x10DE, 0x034E, NV30, "NVidia Quadro FX 1100"}, \
{0x10DE, 0x034F, NV30, "NVidia 0x034F"}, \
{0x10DE, 0x0040, NV40, "NVidia GeForce 6800 Ultra"}, \
{0x10DE, 0x0041, NV40, "NVidia GeForce 6800"}, \
{0x10DE, 0x0042, NV40, "NVidia GeForce 6800 LE"}, \
{0x10DE, 0x0043, NV40, "NVidia 0x0043"}, \
{0x10DE, 0x0045, NV40, "NVidia GeForce 6800 GT"}, \
{0x10DE, 0x0046, NV40, "NVidia GeForce 6800 GT"}, \
{0x10DE, 0x0049, NV40, "NVidia 0x0049"}, \
{0x10DE, 0x004E, NV40, "NVidia Quadro FX 4000"}, \
{0x10DE, 0x00C0, NV40, "NVidia 0x00C0"}, \
{0x10DE, 0x00C1, NV40, "NVidia GeForce 6800"}, \
{0x10DE, 0x00C2, NV40, "NVidia GeForce 6800 LE"}, \
{0x10DE, 0x00C8, NV40, "NVidia GeForce Go 6800"}, \
{0x10DE, 0x00C9, NV40, "NVidia GeForce Go 6800 Ultra"}, \
{0x10DE, 0x00CC, NV40, "NVidia Quadro FX Go1400"}, \
{0x10DE, 0x00CD, NV40, "NVidia Quadro FX 3450/4000 SDI"}, \
{0x10DE, 0x00CE, NV40, "NVidia Quadro FX 1400"}, \
{0x10de, 0x00f0, NV40, "Nvidia GeForce 6600 GT"}, \
{0x10de, 0x00f1, NV40, "Nvidia GeForce 6600 GT"}, \
{0x10DE, 0x0140, NV40, "NVidia GeForce 6600 GT"}, \
{0x10DE, 0x0141, NV40, "NVidia GeForce 6600"}, \
{0x10DE, 0x0142, NV40, "NVidia GeForce 6600 LE"}, \
{0x10DE, 0x0143, NV40, "NVidia 0x0143"}, \
{0x10DE, 0x0144, NV40, "NVidia GeForce Go 6600"}, \
{0x10DE, 0x0145, NV40, "NVidia GeForce 6610 XL"}, \
{0x10DE, 0x0146, NV40, "NVidia GeForce Go 6600 TE/6200 TE"}, \
{0x10DE, 0x0147, NV40, "NVidia GeForce 6700 XL"}, \
{0x10DE, 0x0148, NV40, "NVidia GeForce Go 6600"}, \
{0x10DE, 0x0149, NV40, "NVidia GeForce Go 6600 GT"}, \
{0x10DE, 0x014B, NV40, "NVidia 0x014B"}, \
{0x10DE, 0x014C, NV40, "NVidia 0x014C"}, \
{0x10DE, 0x014D, NV40, "NVidia 0x014D"}, \
{0x10DE, 0x014E, NV40, "NVidia Quadro FX 540"}, \
{0x10DE, 0x014F, NV40, "NVidia GeForce 6200"}, \
{0x10DE, 0x0160, NV40, "NVidia 0x0160"}, \
{0x10DE, 0x0161, NV40, "NVidia GeForce 6200 TurboCache(TM)"}, \
{0x10DE, 0x0162, NV40, "NVidia GeForce 6200SE TurboCache(TM)"}, \
{0x10DE, 0x0163, NV40, "NVidia 0x0163"}, \
{0x10DE, 0x0164, NV40, "NVidia GeForce Go 6200"}, \
{0x10DE, 0x0165, NV40, "NVidia Quadro NVS 285"}, \
{0x10DE, 0x0166, NV40, "NVidia GeForce Go 6400"}, \
{0x10DE, 0x0167, NV40, "NVidia GeForce Go 6200"}, \
{0x10DE, 0x0168, NV40, "NVidia GeForce Go 6400"}, \
{0x10DE, 0x0169, NV40, "NVidia 0x0169"}, \
{0x10DE, 0x016B, NV40, "NVidia 0x016B"}, \
{0x10DE, 0x016C, NV40, "NVidia 0x016C"}, \
{0x10DE, 0x016D, NV40, "NVidia 0x016D"}, \
{0x10DE, 0x016E, NV40, "NVidia 0x016E"}, \
{0x10DE, 0x0210, NV40, "NVidia 0x0210"}, \
{0x10DE, 0x0211, NV40, "NVidia GeForce 6800"}, \
{0x10DE, 0x0212, NV40, "NVidia GeForce 6800 LE"}, \
{0x10DE, 0x0215, NV40, "NVidia GeForce 6800 GT"}, \
{0x10DE, 0x0220, NV40, "NVidia 0x0220"}, \
{0x10DE, 0x0221, NV40, "NVidia GeForce 6200"}, \
{0x10DE, 0x0222, NV40, "NVidia 0x0222"}, \
{0x10DE, 0x0228, NV40, "NVidia 0x0228"}, \
{0x10DE, 0x0090, NV40, "NVidia 0x0090"}, \
{0x10DE, 0x0091, NV40, "NVidia GeForce 7800 GTX"}, \
{0x10DE, 0x0092, NV40, "NVidia 0x0092"}, \
{0x10DE, 0x0093, NV40, "NVidia 0x0093"}, \
{0x10DE, 0x0094, NV40, "NVidia 0x0094"}, \
{0x10DE, 0x0098, NV40, "NVidia 0x0098"}, \
{0x10DE, 0x0099, NV40, "NVidia GeForce Go 7800 GTX"}, \
{0x10DE, 0x009C, NV40, "NVidia 0x009C"}, \
{0x10DE, 0x009D, NV40, "NVidia Quadro FX 4500"}, \
{0x10DE, 0x009E, NV40, "NVidia 0x009E"}, \
{0, 0, 0, NULL}
#define xgi_PCI_IDS \
{0x18ca, 0x2200, 0, "XP5"}, \
{0x18ca, 0x0047, 0, "XP10 / XG47"}, \
{0, 0, 0, NULL}

View File

@ -1,85 +0,0 @@
/**
* \file drm_sarea.h
* \brief SAREA definitions
*
* \author Michel D<EFBFBD>zer <michel@daenzer.net>
*/
/*-
* Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#ifndef _DRM_SAREA_H_
#define _DRM_SAREA_H_
#include "dev/drm/drm.h"
/* SAREA area needs to be at least a page */
#if defined(__alpha__)
#define SAREA_MAX 0x2000
#else
/* Intel 830M driver needs at least 8k SAREA */
#define SAREA_MAX 0x2000UL
#endif
/** Maximum number of drawables in the SAREA */
#define SAREA_MAX_DRAWABLES 256
#define SAREA_DRAWABLE_CLAIMED_ENTRY 0x80000000
/** SAREA drawable */
struct drm_sarea_drawable {
unsigned int stamp;
unsigned int flags;
};
/** SAREA frame */
struct drm_sarea_frame {
unsigned int x;
unsigned int y;
unsigned int width;
unsigned int height;
unsigned int fullscreen;
};
/** SAREA */
struct drm_sarea {
/** first thing is always the DRM locking structure */
struct drm_hw_lock lock;
/** \todo Use readers/writer lock for drm_sarea::drawable_lock */
struct drm_hw_lock drawable_lock;
struct drm_sarea_drawable drawableTable[SAREA_MAX_DRAWABLES]; /**< drawables */
struct drm_sarea_frame frame; /**< frame */
drm_context_t dummy_context;
};
#ifndef __KERNEL__
typedef struct drm_sarea_drawable drm_sarea_drawable_t;
typedef struct drm_sarea_frame drm_sarea_frame_t;
typedef struct drm_sarea drm_sarea_t;
#endif
#endif /* _DRM_SAREA_H_ */

View File

@ -1,129 +0,0 @@
/*-
* Copyright (c) 2009 Robert C. Noland III <rnoland@FreeBSD.org>
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/** @file drm_scatter.c
* Allocation of memory for scatter-gather mappings by the graphics chip.
* The memory allocated here is then made into an aperture in the card
* by mapping the pages into the GART.
*/
#include "dev/drm/drmP.h"
int
drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather *request)
{
struct drm_sg_mem *entry;
vm_size_t size;
vm_pindex_t pindex;
if (dev->sg)
return EINVAL;
DRM_DEBUG("request size=%ld\n", request->size);
entry = malloc(sizeof(*entry), DRM_MEM_DRIVER, M_WAITOK | M_ZERO);
size = round_page(request->size);
entry->pages = atop(size);
entry->busaddr = malloc(entry->pages * sizeof(*entry->busaddr),
DRM_MEM_SGLISTS, M_WAITOK | M_ZERO);
entry->vaddr = kmem_alloc_attr(size, M_WAITOK | M_ZERO, 0,
BUS_SPACE_MAXADDR_32BIT, VM_MEMATTR_WRITE_COMBINING);
if (entry->vaddr == 0) {
drm_sg_cleanup(entry);
return (ENOMEM);
}
for(pindex = 0; pindex < entry->pages; pindex++) {
entry->busaddr[pindex] =
vtophys(entry->vaddr + IDX_TO_OFF(pindex));
}
DRM_LOCK();
if (dev->sg) {
DRM_UNLOCK();
drm_sg_cleanup(entry);
return (EINVAL);
}
dev->sg = entry;
DRM_UNLOCK();
request->handle = entry->vaddr;
DRM_DEBUG("allocated %ju pages @ 0x%08zx, contents=%08lx\n",
entry->pages, entry->vaddr, *(unsigned long *)entry->vaddr);
return (0);
}
int
drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_scatter_gather *request = data;
DRM_DEBUG("\n");
return (drm_sg_alloc(dev, request));
}
void
drm_sg_cleanup(struct drm_sg_mem *entry)
{
if (entry == NULL)
return;
if (entry->vaddr != 0)
kmem_free(kernel_arena, entry->vaddr, IDX_TO_OFF(entry->pages));
free(entry->busaddr, DRM_MEM_SGLISTS);
free(entry, DRM_MEM_DRIVER);
return;
}
int
drm_sg_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_scatter_gather *request = data;
struct drm_sg_mem *entry;
DRM_LOCK();
entry = dev->sg;
dev->sg = NULL;
DRM_UNLOCK();
if (!entry || entry->vaddr != request->handle)
return (EINVAL);
DRM_DEBUG("free 0x%zx\n", entry->vaddr);
drm_sg_cleanup(entry);
return (0);
}

View File

@ -1,352 +0,0 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck., ND., USA.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*
* Simple memory manager interface that keeps track on allocate regions on a
* per "owner" basis. All regions associated with an "owner" can be released
* with a simple call. Typically if the "owner" exists. The owner is any
* "unsigned long" identifier. Can typically be a pointer to a file private
* struct or a context identifier.
*
* Authors:
* Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include "dev/drm/drmP.h"
#include "dev/drm/drm_sman.h"
struct drm_owner_item {
struct drm_hash_item owner_hash;
struct list_head sman_list;
struct list_head mem_blocks;
};
void drm_sman_takedown(struct drm_sman * sman)
{
drm_ht_remove(&sman->user_hash_tab);
drm_ht_remove(&sman->owner_hash_tab);
if (sman->mm)
drm_free(sman->mm, sman->num_managers * sizeof(*sman->mm),
DRM_MEM_MM);
}
int
drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
unsigned int user_order, unsigned int owner_order)
{
int ret = 0;
sman->mm = (struct drm_sman_mm *) drm_calloc(num_managers,
sizeof(*sman->mm), DRM_MEM_MM);
if (!sman->mm) {
ret = -ENOMEM;
goto out;
}
sman->num_managers = num_managers;
INIT_LIST_HEAD(&sman->owner_items);
ret = drm_ht_create(&sman->owner_hash_tab, owner_order);
if (ret)
goto out1;
ret = drm_ht_create(&sman->user_hash_tab, user_order);
if (!ret)
goto out;
drm_ht_remove(&sman->owner_hash_tab);
out1:
drm_free(sman->mm, num_managers * sizeof(*sman->mm), DRM_MEM_MM);
out:
return ret;
}
static void *drm_sman_mm_allocate(void *private, unsigned long size,
unsigned alignment)
{
struct drm_mm *mm = (struct drm_mm *) private;
struct drm_mm_node *tmp;
tmp = drm_mm_search_free(mm, size, alignment, 1);
if (!tmp) {
return NULL;
}
/* This could be non-atomic, but we are called from a locked path */
tmp = drm_mm_get_block_atomic(tmp, size, alignment);
return tmp;
}
static void drm_sman_mm_free(void *private, void *ref)
{
struct drm_mm_node *node = (struct drm_mm_node *) ref;
drm_mm_put_block(node);
}
static void drm_sman_mm_destroy(void *private)
{
struct drm_mm *mm = (struct drm_mm *) private;
drm_mm_takedown(mm);
drm_free(mm, sizeof(*mm), DRM_MEM_MM);
}
static unsigned long drm_sman_mm_offset(void *private, void *ref)
{
struct drm_mm_node *node = (struct drm_mm_node *) ref;
return node->start;
}
int
drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
unsigned long start, unsigned long size)
{
struct drm_sman_mm *sman_mm;
struct drm_mm *mm;
int ret;
KASSERT(manager < sman->num_managers, ("Invalid manager"));
sman_mm = &sman->mm[manager];
mm = malloc(sizeof(*mm), DRM_MEM_MM, M_NOWAIT | M_ZERO);
if (!mm) {
return -ENOMEM;
}
sman_mm->private = mm;
ret = drm_mm_init(mm, start, size);
if (ret) {
drm_free(mm, sizeof(*mm), DRM_MEM_MM);
return ret;
}
sman_mm->allocate = drm_sman_mm_allocate;
sman_mm->free = drm_sman_mm_free;
sman_mm->destroy = drm_sman_mm_destroy;
sman_mm->offset = drm_sman_mm_offset;
return 0;
}
int
drm_sman_set_manager(struct drm_sman * sman, unsigned int manager,
struct drm_sman_mm * allocator)
{
KASSERT(manager < sman->num_managers, ("Invalid manager"));
sman->mm[manager] = *allocator;
return 0;
}
static struct drm_owner_item *drm_sman_get_owner_item(struct drm_sman * sman,
unsigned long owner)
{
int ret;
struct drm_hash_item *owner_hash_item;
struct drm_owner_item *owner_item;
ret = drm_ht_find_item(&sman->owner_hash_tab, owner, &owner_hash_item);
if (!ret) {
return drm_hash_entry(owner_hash_item, struct drm_owner_item,
owner_hash);
}
owner_item = malloc(sizeof(*owner_item), DRM_MEM_MM, M_NOWAIT | M_ZERO);
if (!owner_item)
goto out;
INIT_LIST_HEAD(&owner_item->mem_blocks);
owner_item->owner_hash.key = owner;
DRM_DEBUG("owner_item = %p, mem_blocks = %p\n", owner_item, &owner_item->mem_blocks);
if (drm_ht_insert_item(&sman->owner_hash_tab, &owner_item->owner_hash))
goto out1;
list_add_tail(&owner_item->sman_list, &sman->owner_items);
return owner_item;
out1:
drm_free(owner_item, sizeof(*owner_item), DRM_MEM_MM);
out:
return NULL;
}
struct drm_memblock_item *drm_sman_alloc(struct drm_sman *sman, unsigned int manager,
unsigned long size, unsigned alignment,
unsigned long owner)
{
void *tmp;
struct drm_sman_mm *sman_mm;
struct drm_owner_item *owner_item;
struct drm_memblock_item *memblock;
KASSERT(manager < sman->num_managers, ("Invalid manager"));
sman_mm = &sman->mm[manager];
tmp = sman_mm->allocate(sman_mm->private, size, alignment);
if (!tmp) {
return NULL;
}
memblock = malloc(sizeof(*memblock), DRM_MEM_MM, M_NOWAIT | M_ZERO);
DRM_DEBUG("allocated mem_block %p\n", memblock);
if (!memblock)
goto out;
memblock->mm_info = tmp;
memblock->mm = sman_mm;
memblock->sman = sman;
INIT_LIST_HEAD(&memblock->owner_list);
if (drm_ht_just_insert_please
(&sman->user_hash_tab, &memblock->user_hash,
(unsigned long)memblock, 32, 0, 0))
goto out1;
owner_item = drm_sman_get_owner_item(sman, owner);
if (!owner_item)
goto out2;
DRM_DEBUG("owner_item = %p, mem_blocks = %p\n", owner_item, &owner_item->mem_blocks);
DRM_DEBUG("owner_list.prev = %p, mem_blocks.prev = %p\n", memblock->owner_list.prev, owner_item->mem_blocks.prev);
DRM_DEBUG("owner_list.next = %p, mem_blocks.next = %p\n", memblock->owner_list.next, owner_item->mem_blocks.next);
list_add_tail(&memblock->owner_list, &owner_item->mem_blocks);
DRM_DEBUG("Complete\n");
return memblock;
out2:
drm_ht_remove_item(&sman->user_hash_tab, &memblock->user_hash);
out1:
drm_free(memblock, sizeof(*memblock), DRM_MEM_MM);
out:
sman_mm->free(sman_mm->private, tmp);
return NULL;
}
static void drm_sman_free(struct drm_memblock_item *item)
{
struct drm_sman *sman = item->sman;
list_del(&item->owner_list);
drm_ht_remove_item(&sman->user_hash_tab, &item->user_hash);
item->mm->free(item->mm->private, item->mm_info);
drm_free(item, sizeof(*item), DRM_MEM_MM);
}
int drm_sman_free_key(struct drm_sman *sman, unsigned int key)
{
struct drm_hash_item *hash_item;
struct drm_memblock_item *memblock_item;
if (drm_ht_find_item(&sman->user_hash_tab, key, &hash_item))
return -EINVAL;
memblock_item = drm_hash_entry(hash_item, struct drm_memblock_item,
user_hash);
drm_sman_free(memblock_item);
return 0;
}
static void drm_sman_remove_owner(struct drm_sman *sman,
struct drm_owner_item *owner_item)
{
list_del(&owner_item->sman_list);
drm_ht_remove_item(&sman->owner_hash_tab, &owner_item->owner_hash);
drm_free(owner_item, sizeof(*owner_item), DRM_MEM_MM);
}
int drm_sman_owner_clean(struct drm_sman *sman, unsigned long owner)
{
struct drm_hash_item *hash_item;
struct drm_owner_item *owner_item;
if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
return -1;
}
owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash);
DRM_DEBUG("cleaning owner_item %p\n", owner_item);
if (owner_item->mem_blocks.next == &owner_item->mem_blocks) {
drm_sman_remove_owner(sman, owner_item);
return -1;
}
return 0;
}
static void drm_sman_do_owner_cleanup(struct drm_sman *sman,
struct drm_owner_item *owner_item)
{
struct drm_memblock_item *entry, *next;
list_for_each_entry_safe(entry, next, &owner_item->mem_blocks,
owner_list) {
DRM_DEBUG("freeing mem_block %p\n", entry);
drm_sman_free(entry);
}
drm_sman_remove_owner(sman, owner_item);
}
void drm_sman_owner_cleanup(struct drm_sman *sman, unsigned long owner)
{
struct drm_hash_item *hash_item;
struct drm_owner_item *owner_item;
if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
return;
}
owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash);
drm_sman_do_owner_cleanup(sman, owner_item);
}
void drm_sman_cleanup(struct drm_sman *sman)
{
struct drm_owner_item *entry, *next;
unsigned int i;
struct drm_sman_mm *sman_mm;
DRM_DEBUG("sman = %p, owner_items = %p\n",
sman, &sman->owner_items);
list_for_each_entry_safe(entry, next, &sman->owner_items, sman_list) {
DRM_DEBUG("cleaning owner_item = %p\n", entry);
drm_sman_do_owner_cleanup(sman, entry);
}
if (sman->mm) {
for (i = 0; i < sman->num_managers; ++i) {
sman_mm = &sman->mm[i];
if (sman_mm->private) {
sman_mm->destroy(sman_mm->private);
sman_mm->private = NULL;
}
}
}
}

View File

@ -1,181 +0,0 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*
**************************************************************************/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*
* Simple memory MANager interface that keeps track on allocate regions on a
* per "owner" basis. All regions associated with an "owner" can be released
* with a simple call. Typically if the "owner" exists. The owner is any
* "unsigned long" identifier. Can typically be a pointer to a file private
* struct or a context identifier.
*
* Authors:
* Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#ifndef DRM_SMAN_H
#define DRM_SMAN_H
#include "dev/drm/drm_hashtab.h"
#include "dev/drm/drm_linux_list.h"
#include "dev/drm/drm_mm.h"
/*
* A class that is an abstration of a simple memory allocator.
* The sman implementation provides a default such allocator
* using the drm_mm.c implementation. But the user can replace it.
* See the SiS implementation, which may use the SiS FB kernel module
* for memory management.
*/
struct drm_sman_mm {
/* private info. If allocated, needs to be destroyed by the destroy
function */
void *private;
/* Allocate a memory block with given size and alignment.
Return an opaque reference to the memory block */
void *(*allocate) (void *private, unsigned long size,
unsigned alignment);
/* Free a memory block. "ref" is the opaque reference that we got from
the "alloc" function */
void (*free) (void *private, void *ref);
/* Free all resources associated with this allocator */
void (*destroy) (void *private);
/* Return a memory offset from the opaque reference returned from the
"alloc" function */
unsigned long (*offset) (void *private, void *ref);
};
struct drm_memblock_item {
struct list_head owner_list;
struct drm_hash_item user_hash;
void *mm_info;
struct drm_sman_mm *mm;
struct drm_sman *sman;
};
struct drm_sman {
struct drm_sman_mm *mm;
int num_managers;
struct drm_open_hash owner_hash_tab;
struct drm_open_hash user_hash_tab;
struct list_head owner_items;
};
/*
* Take down a memory manager. This function should only be called after a
* successful init and after a call to drm_sman_cleanup.
*/
extern void drm_sman_takedown(struct drm_sman * sman);
/*
* Allocate structures for a manager.
* num_managers are the number of memory pools to manage. (VRAM, AGP, ....)
* user_order is the log2 of the number of buckets in the user hash table.
* set this to approximately log2 of the max number of memory regions
* that will be allocated for _all_ pools together.
* owner_order is the log2 of the number of buckets in the owner hash table.
* set this to approximately log2 of
* the number of client file connections that will
* be using the manager.
*
*/
extern int drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
unsigned int user_order, unsigned int owner_order);
/*
* Initialize a drm_mm.c allocator. Should be called only once for each
* manager unless a customized allogator is used.
*/
extern int drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
unsigned long start, unsigned long size);
/*
* Initialize a customized allocator for one of the managers.
* (See the SiS module). The object pointed to by "allocator" is copied,
* so it can be destroyed after this call.
*/
extern int drm_sman_set_manager(struct drm_sman * sman, unsigned int mananger,
struct drm_sman_mm * allocator);
/*
* Allocate a memory block. Aligment is not implemented yet.
*/
extern struct drm_memblock_item *drm_sman_alloc(struct drm_sman * sman,
unsigned int manager,
unsigned long size,
unsigned alignment,
unsigned long owner);
/*
* Free a memory block identified by its user hash key.
*/
extern int drm_sman_free_key(struct drm_sman * sman, unsigned int key);
/*
* returns 1 iff there are no stale memory blocks associated with this owner.
* Typically called to determine if we need to idle the hardware and call
* drm_sman_owner_cleanup. If there are no stale memory blocks, it removes all
* resources associated with owner.
*/
extern int drm_sman_owner_clean(struct drm_sman * sman, unsigned long owner);
/*
* Frees all stale memory blocks associated with this owner. Note that this
* requires that the hardware is finished with all blocks, so the graphics engine
* should be idled before this call is made. This function also frees
* any resources associated with "owner" and should be called when owner
* is not going to be referenced anymore.
*/
extern void drm_sman_owner_cleanup(struct drm_sman * sman, unsigned long owner);
/*
* Frees all stale memory blocks associated with the memory manager.
* See idling above.
*/
extern void drm_sman_cleanup(struct drm_sman * sman);
#endif

View File

@ -1,343 +0,0 @@
/*-
* Copyright 2003 Eric Anholt
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* ERIC ANHOLT BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/** @file drm_sysctl.c
* Implementation of various sysctls for controlling DRM behavior and reporting
* debug information.
*/
#include "dev/drm/drmP.h"
#include "dev/drm/drm.h"
#include <sys/sysctl.h>
static int drm_name_info DRM_SYSCTL_HANDLER_ARGS;
static int drm_vm_info DRM_SYSCTL_HANDLER_ARGS;
static int drm_clients_info DRM_SYSCTL_HANDLER_ARGS;
static int drm_bufs_info DRM_SYSCTL_HANDLER_ARGS;
static int drm_vblank_info DRM_SYSCTL_HANDLER_ARGS;
struct drm_sysctl_list {
const char *name;
int (*f) DRM_SYSCTL_HANDLER_ARGS;
} drm_sysctl_list[] = {
{"name", drm_name_info},
{"vm", drm_vm_info},
{"clients", drm_clients_info},
{"bufs", drm_bufs_info},
{"vblank", drm_vblank_info},
};
#define DRM_SYSCTL_ENTRIES (sizeof(drm_sysctl_list)/sizeof(drm_sysctl_list[0]))
struct drm_sysctl_info {
struct sysctl_ctx_list ctx;
char name[2];
};
int drm_sysctl_init(struct drm_device *dev)
{
struct drm_sysctl_info *info;
struct sysctl_oid *oid;
struct sysctl_oid *top, *drioid;
int i;
info = malloc(sizeof *info, DRM_MEM_DRIVER, M_WAITOK | M_ZERO);
if ( !info )
return 1;
dev->sysctl = info;
/* Add the sysctl node for DRI if it doesn't already exist */
drioid = SYSCTL_ADD_NODE(&info->ctx, SYSCTL_CHILDREN(&sysctl___hw), OID_AUTO, "dri", CTLFLAG_RW, NULL, "DRI Graphics");
if (!drioid)
return 1;
/* Find the next free slot under hw.dri */
i = 0;
SLIST_FOREACH(oid, SYSCTL_CHILDREN(drioid), oid_link) {
if (i <= oid->oid_arg2)
i = oid->oid_arg2 + 1;
}
if (i>9)
return 1;
/* Add the hw.dri.x for our device */
info->name[0] = '0' + i;
info->name[1] = 0;
top = SYSCTL_ADD_NODE( &info->ctx, SYSCTL_CHILDREN(drioid), OID_AUTO, info->name, CTLFLAG_RW, NULL, NULL);
if (!top)
return 1;
for (i = 0; i < DRM_SYSCTL_ENTRIES; i++) {
oid = SYSCTL_ADD_OID(&info->ctx,
SYSCTL_CHILDREN(top),
OID_AUTO,
drm_sysctl_list[i].name,
CTLTYPE_STRING | CTLFLAG_RD,
dev,
0,
drm_sysctl_list[i].f,
"A",
NULL);
if (!oid)
return 1;
}
SYSCTL_ADD_INT(&info->ctx, SYSCTL_CHILDREN(top), OID_AUTO, "debug",
CTLFLAG_RW, &drm_debug_flag, sizeof(drm_debug_flag),
"Enable debugging output");
return 0;
}
int drm_sysctl_cleanup(struct drm_device *dev)
{
int error;
error = sysctl_ctx_free( &dev->sysctl->ctx );
free(dev->sysctl, DRM_MEM_DRIVER);
dev->sysctl = NULL;
return error;
}
#define DRM_SYSCTL_PRINT(fmt, arg...) \
do { \
snprintf(buf, sizeof(buf), fmt, ##arg); \
retcode = SYSCTL_OUT(req, buf, strlen(buf)); \
if (retcode) \
goto done; \
} while (0)
static int drm_name_info DRM_SYSCTL_HANDLER_ARGS
{
struct drm_device *dev = arg1;
char buf[128];
int retcode;
int hasunique = 0;
DRM_SYSCTL_PRINT("%s 0x%jx", dev->driver->name,
(uintmax_t)dev2udev(dev->devnode));
DRM_LOCK();
if (dev->unique) {
snprintf(buf, sizeof(buf), " %s", dev->unique);
hasunique = 1;
}
DRM_UNLOCK();
if (hasunique)
SYSCTL_OUT(req, buf, strlen(buf));
SYSCTL_OUT(req, "", 1);
done:
return retcode;
}
static int drm_vm_info DRM_SYSCTL_HANDLER_ARGS
{
struct drm_device *dev = arg1;
drm_local_map_t *map, *tempmaps;
const char *types[] = { "FB", "REG", "SHM", "AGP", "SG" };
const char *type, *yesno;
int i, mapcount;
char buf[128];
int retcode;
/* We can't hold the lock while doing SYSCTL_OUTs, so allocate a
* temporary copy of all the map entries and then SYSCTL_OUT that.
*/
DRM_LOCK();
mapcount = 0;
TAILQ_FOREACH(map, &dev->maplist, link)
mapcount++;
tempmaps = malloc(sizeof(drm_local_map_t) * mapcount, DRM_MEM_DRIVER,
M_NOWAIT);
if (tempmaps == NULL) {
DRM_UNLOCK();
return ENOMEM;
}
i = 0;
TAILQ_FOREACH(map, &dev->maplist, link)
tempmaps[i++] = *map;
DRM_UNLOCK();
DRM_SYSCTL_PRINT("\nslot offset size "
"type flags address handle mtrr\n");
for (i = 0; i < mapcount; i++) {
map = &tempmaps[i];
if (map->type > 4)
type = "??";
else
type = types[map->type];
if (!map->mtrr)
yesno = "no";
else
yesno = "yes";
DRM_SYSCTL_PRINT(
"%4d 0x%016lx 0x%08lx %4.4s 0x%02x 0x%016lx %6d %s\n",
i, map->offset, map->size, type, map->flags,
(unsigned long)map->virtual,
(unsigned int)((unsigned long)map->handle >>
DRM_MAP_HANDLE_SHIFT), yesno);
}
SYSCTL_OUT(req, "", 1);
done:
free(tempmaps, DRM_MEM_DRIVER);
return retcode;
}
static int drm_bufs_info DRM_SYSCTL_HANDLER_ARGS
{
struct drm_device *dev = arg1;
drm_device_dma_t *dma = dev->dma;
drm_device_dma_t tempdma;
int *templists;
int i;
char buf[128];
int retcode;
/* We can't hold the locks around DRM_SYSCTL_PRINT, so make a temporary
* copy of the whole structure and the relevant data from buflist.
*/
DRM_LOCK();
if (dma == NULL) {
DRM_UNLOCK();
return 0;
}
DRM_SPINLOCK(&dev->dma_lock);
tempdma = *dma;
templists = malloc(sizeof(int) * dma->buf_count, DRM_MEM_DRIVER,
M_NOWAIT);
for (i = 0; i < dma->buf_count; i++)
templists[i] = dma->buflist[i]->list;
dma = &tempdma;
DRM_SPINUNLOCK(&dev->dma_lock);
DRM_UNLOCK();
DRM_SYSCTL_PRINT("\n o size count free segs pages kB\n");
for (i = 0; i <= DRM_MAX_ORDER; i++) {
if (dma->bufs[i].buf_count)
DRM_SYSCTL_PRINT("%2d %8d %5d %5d %5d %5d %5d\n",
i,
dma->bufs[i].buf_size,
dma->bufs[i].buf_count,
atomic_read(&dma->bufs[i]
.freelist.count),
dma->bufs[i].seg_count,
dma->bufs[i].seg_count
*(1 << dma->bufs[i].page_order),
(dma->bufs[i].seg_count
* (1 << dma->bufs[i].page_order))
* (int)PAGE_SIZE / 1024);
}
DRM_SYSCTL_PRINT("\n");
for (i = 0; i < dma->buf_count; i++) {
if (i && !(i%32)) DRM_SYSCTL_PRINT("\n");
DRM_SYSCTL_PRINT(" %d", templists[i]);
}
DRM_SYSCTL_PRINT("\n");
SYSCTL_OUT(req, "", 1);
done:
free(templists, DRM_MEM_DRIVER);
return retcode;
}
static int drm_clients_info DRM_SYSCTL_HANDLER_ARGS
{
struct drm_device *dev = arg1;
struct drm_file *priv, *tempprivs;
char buf[128];
int retcode;
int privcount, i;
DRM_LOCK();
privcount = 0;
TAILQ_FOREACH(priv, &dev->files, link)
privcount++;
tempprivs = malloc(sizeof(struct drm_file) * privcount, DRM_MEM_DRIVER,
M_NOWAIT);
if (tempprivs == NULL) {
DRM_UNLOCK();
return ENOMEM;
}
i = 0;
TAILQ_FOREACH(priv, &dev->files, link)
tempprivs[i++] = *priv;
DRM_UNLOCK();
DRM_SYSCTL_PRINT(
"\na dev pid uid magic ioctls\n");
for (i = 0; i < privcount; i++) {
priv = &tempprivs[i];
DRM_SYSCTL_PRINT("%c %-12s %5d %5d %10u %10lu\n",
priv->authenticated ? 'y' : 'n',
devtoname(priv->dev->devnode),
priv->pid,
priv->uid,
priv->magic,
priv->ioctl_count);
}
SYSCTL_OUT(req, "", 1);
done:
free(tempprivs, DRM_MEM_DRIVER);
return retcode;
}
static int drm_vblank_info DRM_SYSCTL_HANDLER_ARGS
{
struct drm_device *dev = arg1;
char buf[128];
int retcode;
int i;
DRM_SYSCTL_PRINT("\ncrtc ref count last enabled inmodeset\n");
for(i = 0 ; i < dev->num_crtcs ; i++) {
DRM_SYSCTL_PRINT(" %02d %02d %08d %08d %02d %02d\n",
i, atomic_load_acq_32(&dev->vblank[i].refcount),
atomic_load_acq_32(&dev->vblank[i].count),
atomic_load_acq_32(&dev->vblank[i].last),
atomic_load_acq_int(&dev->vblank[i].enabled),
atomic_load_acq_int(&dev->vblank[i].inmodeset));
}
SYSCTL_OUT(req, "", -1);
done:
return retcode;
}

View File

@ -1,133 +0,0 @@
/*-
* Copyright 2003 Eric Anholt
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* ERIC ANHOLT BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/** @file drm_vm.c
* Support code for mmaping of DRM maps.
*/
#include "dev/drm/drmP.h"
#include "dev/drm/drm.h"
int drm_mmap(struct cdev *kdev, vm_ooffset_t offset, vm_paddr_t *paddr,
int prot, vm_memattr_t *memattr)
{
struct drm_device *dev = drm_get_device_from_kdev(kdev);
struct drm_file *file_priv = NULL;
drm_local_map_t *map;
enum drm_map_type type;
vm_paddr_t phys;
int error;
/* d_mmap gets called twice, we can only reference file_priv during
* the first call. We need to assume that if error is EBADF the
* call was successful and the client is authenticated.
*/
error = devfs_get_cdevpriv((void **)&file_priv);
if (error == ENOENT) {
DRM_ERROR("Could not find authenticator!\n");
return EINVAL;
}
if (file_priv && !file_priv->authenticated)
return EACCES;
DRM_DEBUG("called with offset %016jx\n", offset);
if (dev->dma && offset < ptoa(dev->dma->page_count)) {
drm_device_dma_t *dma = dev->dma;
DRM_SPINLOCK(&dev->dma_lock);
if (dma->pagelist != NULL) {
unsigned long page = offset >> PAGE_SHIFT;
unsigned long phys = dma->pagelist[page];
DRM_SPINUNLOCK(&dev->dma_lock);
*paddr = phys;
return 0;
} else {
DRM_SPINUNLOCK(&dev->dma_lock);
return -1;
}
}
/* A sequential search of a linked list is
fine here because: 1) there will only be
about 5-10 entries in the list and, 2) a
DRI client only has to do this mapping
once, so it doesn't have to be optimized
for performance, even if the list was a
bit longer.
*/
DRM_LOCK();
TAILQ_FOREACH(map, &dev->maplist, link) {
if (offset >> DRM_MAP_HANDLE_SHIFT ==
(unsigned long)map->handle >> DRM_MAP_HANDLE_SHIFT)
break;
}
if (map == NULL) {
DRM_DEBUG("Can't find map, request offset = %016jx\n", offset);
TAILQ_FOREACH(map, &dev->maplist, link) {
DRM_DEBUG("map offset = %016lx, handle = %016lx\n",
map->offset, (unsigned long)map->handle);
}
DRM_UNLOCK();
return -1;
}
if (((map->flags & _DRM_RESTRICTED) && !DRM_SUSER(DRM_CURPROC))) {
DRM_UNLOCK();
DRM_DEBUG("restricted map\n");
return -1;
}
type = map->type;
DRM_UNLOCK();
offset = offset & ((1ULL << DRM_MAP_HANDLE_SHIFT) - 1);
switch (type) {
case _DRM_FRAME_BUFFER:
case _DRM_AGP:
*memattr = VM_MEMATTR_WRITE_COMBINING;
/* FALLTHROUGH */
case _DRM_REGISTERS:
phys = map->offset + offset;
break;
case _DRM_SCATTER_GATHER:
*memattr = VM_MEMATTR_WRITE_COMBINING;
/* FALLTHROUGH */
case _DRM_CONSISTENT:
case _DRM_SHM:
phys = vtophys((char *)map->virtual + offset);
break;
default:
DRM_ERROR("bad map type %d\n", type);
return -1; /* This should never happen. */
}
*paddr = phys;
return 0;
}

File diff suppressed because it is too large Load Diff

View File

@ -1,259 +0,0 @@
/* mach64_drm.h -- Public header for the mach64 driver -*- linux-c -*-
* Created: Thu Nov 30 20:04:32 2000 by gareth@valinux.com
*/
/*-
* Copyright 2000 Gareth Hughes
* Copyright 2002 Frank C. Earl
* Copyright 2002-2003 Leif Delgass
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT OWNER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Gareth Hughes <gareth@valinux.com>
* Frank C. Earl <fearl@airmail.net>
* Leif Delgass <ldelgass@retinalburn.net>
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#ifndef __MACH64_DRM_H__
#define __MACH64_DRM_H__
/* WARNING: If you change any of these defines, make sure to change the
* defines in the Xserver file (mach64_sarea.h)
*/
#ifndef __MACH64_SAREA_DEFINES__
#define __MACH64_SAREA_DEFINES__
/* What needs to be changed for the current vertex buffer?
* GH: We're going to be pedantic about this. We want the card to do as
* little as possible, so let's avoid having it fetch a whole bunch of
* register values that don't change all that often, if at all.
*/
#define MACH64_UPLOAD_DST_OFF_PITCH 0x0001
#define MACH64_UPLOAD_Z_OFF_PITCH 0x0002
#define MACH64_UPLOAD_Z_ALPHA_CNTL 0x0004
#define MACH64_UPLOAD_SCALE_3D_CNTL 0x0008
#define MACH64_UPLOAD_DP_FOG_CLR 0x0010
#define MACH64_UPLOAD_DP_WRITE_MASK 0x0020
#define MACH64_UPLOAD_DP_PIX_WIDTH 0x0040
#define MACH64_UPLOAD_SETUP_CNTL 0x0080
#define MACH64_UPLOAD_MISC 0x0100
#define MACH64_UPLOAD_TEXTURE 0x0200
#define MACH64_UPLOAD_TEX0IMAGE 0x0400
#define MACH64_UPLOAD_TEX1IMAGE 0x0800
#define MACH64_UPLOAD_CLIPRECTS 0x1000 /* handled client-side */
#define MACH64_UPLOAD_CONTEXT 0x00ff
#define MACH64_UPLOAD_ALL 0x1fff
/* DMA buffer size
*/
#define MACH64_BUFFER_SIZE 16384
/* Max number of swaps allowed on the ring
* before the client must wait
*/
#define MACH64_MAX_QUEUED_FRAMES 3U
/* Byte offsets for host blit buffer data
*/
#define MACH64_HOSTDATA_BLIT_OFFSET 104
/* Keep these small for testing.
*/
#define MACH64_NR_SAREA_CLIPRECTS 8
#define MACH64_CARD_HEAP 0
#define MACH64_AGP_HEAP 1
#define MACH64_NR_TEX_HEAPS 2
#define MACH64_NR_TEX_REGIONS 64
#define MACH64_LOG_TEX_GRANULARITY 16
#define MACH64_TEX_MAXLEVELS 1
#define MACH64_NR_CONTEXT_REGS 15
#define MACH64_NR_TEXTURE_REGS 4
#endif /* __MACH64_SAREA_DEFINES__ */
typedef struct {
unsigned int dst_off_pitch;
unsigned int z_off_pitch;
unsigned int z_cntl;
unsigned int alpha_tst_cntl;
unsigned int scale_3d_cntl;
unsigned int sc_left_right;
unsigned int sc_top_bottom;
unsigned int dp_fog_clr;
unsigned int dp_write_mask;
unsigned int dp_pix_width;
unsigned int dp_mix;
unsigned int dp_src;
unsigned int clr_cmp_cntl;
unsigned int gui_traj_cntl;
unsigned int setup_cntl;
unsigned int tex_size_pitch;
unsigned int tex_cntl;
unsigned int secondary_tex_off;
unsigned int tex_offset;
} drm_mach64_context_regs_t;
typedef struct drm_mach64_sarea {
/* The channel for communication of state information to the kernel
* on firing a vertex dma buffer.
*/
drm_mach64_context_regs_t context_state;
unsigned int dirty;
unsigned int vertsize;
/* The current cliprects, or a subset thereof.
*/
struct drm_clip_rect boxes[MACH64_NR_SAREA_CLIPRECTS];
unsigned int nbox;
/* Counters for client-side throttling of rendering clients.
*/
unsigned int frames_queued;
/* Texture memory LRU.
*/
struct drm_tex_region tex_list[MACH64_NR_TEX_HEAPS][MACH64_NR_TEX_REGIONS +
1];
unsigned int tex_age[MACH64_NR_TEX_HEAPS];
int ctx_owner;
} drm_mach64_sarea_t;
/* WARNING: If you change any of these defines, make sure to change the
* defines in the Xserver file (mach64_common.h)
*/
/* Mach64 specific ioctls
* The device specific ioctl range is 0x40 to 0x79.
*/
#define DRM_MACH64_INIT 0x00
#define DRM_MACH64_IDLE 0x01
#define DRM_MACH64_RESET 0x02
#define DRM_MACH64_SWAP 0x03
#define DRM_MACH64_CLEAR 0x04
#define DRM_MACH64_VERTEX 0x05
#define DRM_MACH64_BLIT 0x06
#define DRM_MACH64_FLUSH 0x07
#define DRM_MACH64_GETPARAM 0x08
#define DRM_IOCTL_MACH64_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_MACH64_INIT, drm_mach64_init_t)
#define DRM_IOCTL_MACH64_IDLE DRM_IO( DRM_COMMAND_BASE + DRM_MACH64_IDLE )
#define DRM_IOCTL_MACH64_RESET DRM_IO( DRM_COMMAND_BASE + DRM_MACH64_RESET )
#define DRM_IOCTL_MACH64_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_MACH64_SWAP )
#define DRM_IOCTL_MACH64_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_MACH64_CLEAR, drm_mach64_clear_t)
#define DRM_IOCTL_MACH64_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_MACH64_VERTEX, drm_mach64_vertex_t)
#define DRM_IOCTL_MACH64_BLIT DRM_IOW( DRM_COMMAND_BASE + DRM_MACH64_BLIT, drm_mach64_blit_t)
#define DRM_IOCTL_MACH64_FLUSH DRM_IO( DRM_COMMAND_BASE + DRM_MACH64_FLUSH )
#define DRM_IOCTL_MACH64_GETPARAM DRM_IOWR( DRM_COMMAND_BASE + DRM_MACH64_GETPARAM, drm_mach64_getparam_t)
/* Buffer flags for clears
*/
#define MACH64_FRONT 0x1
#define MACH64_BACK 0x2
#define MACH64_DEPTH 0x4
/* Primitive types for vertex buffers
*/
#define MACH64_PRIM_POINTS 0x00000000
#define MACH64_PRIM_LINES 0x00000001
#define MACH64_PRIM_LINE_LOOP 0x00000002
#define MACH64_PRIM_LINE_STRIP 0x00000003
#define MACH64_PRIM_TRIANGLES 0x00000004
#define MACH64_PRIM_TRIANGLE_STRIP 0x00000005
#define MACH64_PRIM_TRIANGLE_FAN 0x00000006
#define MACH64_PRIM_QUADS 0x00000007
#define MACH64_PRIM_QUAD_STRIP 0x00000008
#define MACH64_PRIM_POLYGON 0x00000009
typedef enum _drm_mach64_dma_mode_t {
MACH64_MODE_DMA_ASYNC,
MACH64_MODE_DMA_SYNC,
MACH64_MODE_MMIO
} drm_mach64_dma_mode_t;
typedef struct drm_mach64_init {
enum {
DRM_MACH64_INIT_DMA = 0x01,
DRM_MACH64_CLEANUP_DMA = 0x02
} func;
unsigned long sarea_priv_offset;
int is_pci;
drm_mach64_dma_mode_t dma_mode;
unsigned int fb_bpp;
unsigned int front_offset, front_pitch;
unsigned int back_offset, back_pitch;
unsigned int depth_bpp;
unsigned int depth_offset, depth_pitch;
unsigned long fb_offset;
unsigned long mmio_offset;
unsigned long ring_offset;
unsigned long buffers_offset;
unsigned long agp_textures_offset;
} drm_mach64_init_t;
typedef struct drm_mach64_clear {
unsigned int flags;
int x, y, w, h;
unsigned int clear_color;
unsigned int clear_depth;
} drm_mach64_clear_t;
typedef struct drm_mach64_vertex {
int prim;
void *buf; /* Address of vertex buffer */
unsigned long used; /* Number of bytes in buffer */
int discard; /* Client finished with buffer? */
} drm_mach64_vertex_t;
typedef struct drm_mach64_blit {
void *buf;
int pitch;
int offset;
int format;
unsigned short x, y;
unsigned short width, height;
} drm_mach64_blit_t;
typedef struct drm_mach64_getparam {
enum {
MACH64_PARAM_FRAMES_QUEUED = 0x01,
MACH64_PARAM_IRQ_NR = 0x02
} param;
void *value;
} drm_mach64_getparam_t;
#endif

View File

@ -1,134 +0,0 @@
/* mach64_drv.c -- ATI Rage 128 driver -*- linux-c -*-
* Created: Mon Dec 13 09:47:27 1999 by faith@precisioninsight.com
*/
/*-
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Gareth Hughes <gareth@valinux.com>
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/types.h>
#include "dev/drm/drmP.h"
#include "dev/drm/drm.h"
#include "dev/drm/mach64_drm.h"
#include "dev/drm/mach64_drv.h"
#include "dev/drm/drm_pciids.h"
/* drv_PCI_IDs comes from drm_pciids.h, generated from drm_pciids.txt. */
static drm_pci_id_list_t mach64_pciidlist[] = {
mach64_PCI_IDS
};
static void mach64_configure(struct drm_device *dev)
{
dev->driver->driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA |
DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ;
dev->driver->buf_priv_size = 1; /* No dev_priv */
dev->driver->load = mach64_driver_load;
dev->driver->lastclose = mach64_driver_lastclose;
dev->driver->get_vblank_counter = mach64_get_vblank_counter;
dev->driver->enable_vblank = mach64_enable_vblank;
dev->driver->disable_vblank = mach64_disable_vblank;
dev->driver->irq_preinstall = mach64_driver_irq_preinstall;
dev->driver->irq_postinstall = mach64_driver_irq_postinstall;
dev->driver->irq_uninstall = mach64_driver_irq_uninstall;
dev->driver->irq_handler = mach64_driver_irq_handler;
dev->driver->dma_ioctl = mach64_dma_buffers;
dev->driver->ioctls = mach64_ioctls;
dev->driver->max_ioctl = mach64_max_ioctl;
dev->driver->name = DRIVER_NAME;
dev->driver->desc = DRIVER_DESC;
dev->driver->date = DRIVER_DATE;
dev->driver->major = DRIVER_MAJOR;
dev->driver->minor = DRIVER_MINOR;
dev->driver->patchlevel = DRIVER_PATCHLEVEL;
}
static int
mach64_probe(device_t kdev)
{
return drm_probe(kdev, mach64_pciidlist);
}
static int
mach64_attach(device_t kdev)
{
struct drm_device *dev = device_get_softc(kdev);
dev->driver = malloc(sizeof(struct drm_driver_info), DRM_MEM_DRIVER,
M_WAITOK | M_ZERO);
mach64_configure(dev);
return drm_attach(kdev, mach64_pciidlist);
}
int
mach64_driver_load(struct drm_device * dev, unsigned long flags)
{
return drm_vblank_init(dev, 1);
}
static int
mach64_detach(device_t kdev)
{
struct drm_device *dev = device_get_softc(kdev);
int ret;
ret = drm_detach(kdev);
free(dev->driver, DRM_MEM_DRIVER);
return ret;
}
static device_method_t mach64_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, mach64_probe),
DEVMETHOD(device_attach, mach64_attach),
DEVMETHOD(device_detach, mach64_detach),
{ 0, 0 }
};
static driver_t mach64_driver = {
"drm",
mach64_methods,
sizeof(struct drm_device)
};
extern devclass_t drm_devclass;
DRIVER_MODULE(mach64, vgapci, mach64_driver, drm_devclass, 0, 0);
MODULE_DEPEND(mach64, drm, 1, 1, 1);

View File

@ -1,863 +0,0 @@
/* mach64_drv.h -- Private header for mach64 driver -*- linux-c -*-
* Created: Fri Nov 24 22:07:58 2000 by gareth@valinux.com
*/
/*-
* Copyright 2000 Gareth Hughes
* Copyright 2002 Frank C. Earl
* Copyright 2002-2003 Leif Delgass
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT OWNER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Gareth Hughes <gareth@valinux.com>
* Frank C. Earl <fearl@airmail.net>
* Leif Delgass <ldelgass@retinalburn.net>
* José Fonseca <j_r_fonseca@yahoo.co.uk>
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#ifndef __MACH64_DRV_H__
#define __MACH64_DRV_H__
/* General customization:
*/
#define DRIVER_AUTHOR "Gareth Hughes, Leif Delgass, José Fonseca"
#define DRIVER_NAME "mach64"
#define DRIVER_DESC "DRM module for the ATI Rage Pro"
#define DRIVER_DATE "20060718"
#define DRIVER_MAJOR 2
#define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 0
/* FIXME: remove these when not needed */
/* Development driver options */
#define MACH64_EXTRA_CHECKING 0 /* Extra sanity checks for DMA/freelist management */
#define MACH64_VERBOSE 0 /* Verbose debugging output */
typedef struct drm_mach64_freelist {
struct list_head list; /* List pointers for free_list, placeholders, or pending list */
struct drm_buf *buf; /* Pointer to the buffer */
int discard; /* This flag is set when we're done (re)using a buffer */
u32 ring_ofs; /* dword offset in ring of last descriptor for this buffer */
} drm_mach64_freelist_t;
typedef struct drm_mach64_descriptor_ring {
void *start; /* write pointer (cpu address) to start of descriptor ring */
u32 start_addr; /* bus address of beginning of descriptor ring */
int size; /* size of ring in bytes */
u32 head_addr; /* bus address of descriptor ring head */
u32 head; /* dword offset of descriptor ring head */
u32 tail; /* dword offset of descriptor ring tail */
u32 tail_mask; /* mask used to wrap ring */
int space; /* number of free bytes in ring */
} drm_mach64_descriptor_ring_t;
typedef struct drm_mach64_private {
drm_mach64_sarea_t *sarea_priv;
int is_pci;
drm_mach64_dma_mode_t driver_mode; /* Async DMA, sync DMA, or MMIO */
int usec_timeout; /* Timeout for the wait functions */
drm_mach64_descriptor_ring_t ring; /* DMA descriptor table (ring buffer) */
int ring_running; /* Is bus mastering is enabled */
struct list_head free_list; /* Free-list head */
struct list_head placeholders; /* Placeholder list for buffers held by clients */
struct list_head pending; /* Buffers pending completion */
u32 frame_ofs[MACH64_MAX_QUEUED_FRAMES]; /* dword ring offsets of most recent frame swaps */
unsigned int fb_bpp;
unsigned int front_offset, front_pitch;
unsigned int back_offset, back_pitch;
unsigned int depth_bpp;
unsigned int depth_offset, depth_pitch;
atomic_t vbl_received; /**< Number of vblanks received. */
u32 front_offset_pitch;
u32 back_offset_pitch;
u32 depth_offset_pitch;
drm_local_map_t *sarea;
drm_local_map_t *fb;
drm_local_map_t *mmio;
drm_local_map_t *ring_map;
drm_local_map_t *dev_buffers; /* this is a pointer to a structure in dev */
drm_local_map_t *agp_textures;
} drm_mach64_private_t;
extern struct drm_ioctl_desc mach64_ioctls[];
extern int mach64_max_ioctl;
/* mach64_dma.c */
extern int mach64_dma_init(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int mach64_dma_idle(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int mach64_dma_flush(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int mach64_engine_reset(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int mach64_dma_buffers(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern void mach64_driver_lastclose(struct drm_device * dev);
extern int mach64_init_freelist(struct drm_device * dev);
extern void mach64_destroy_freelist(struct drm_device * dev);
extern struct drm_buf *mach64_freelist_get(drm_mach64_private_t * dev_priv);
extern int mach64_freelist_put(drm_mach64_private_t * dev_priv,
struct drm_buf * copy_buf);
extern int mach64_do_wait_for_fifo(drm_mach64_private_t * dev_priv,
int entries);
extern int mach64_do_wait_for_idle(drm_mach64_private_t * dev_priv);
extern int mach64_wait_ring(drm_mach64_private_t * dev_priv, int n);
extern int mach64_do_dispatch_pseudo_dma(drm_mach64_private_t * dev_priv);
extern int mach64_do_release_used_buffers(drm_mach64_private_t * dev_priv);
extern void mach64_dump_engine_info(drm_mach64_private_t * dev_priv);
extern void mach64_dump_ring_info(drm_mach64_private_t * dev_priv);
extern int mach64_do_engine_reset(drm_mach64_private_t * dev_priv);
extern int mach64_add_buf_to_ring(drm_mach64_private_t *dev_priv,
drm_mach64_freelist_t *_entry);
extern int mach64_add_hostdata_buf_to_ring(drm_mach64_private_t *dev_priv,
drm_mach64_freelist_t *_entry);
extern int mach64_do_dma_idle(drm_mach64_private_t * dev_priv);
extern int mach64_do_dma_flush(drm_mach64_private_t * dev_priv);
extern int mach64_do_cleanup_dma(struct drm_device * dev);
/* mach64_state.c */
extern int mach64_dma_clear(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int mach64_dma_swap(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int mach64_dma_vertex(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int mach64_dma_blit(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int mach64_get_param(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int mach64_driver_load(struct drm_device * dev, unsigned long flags);
extern u32 mach64_get_vblank_counter(struct drm_device *dev, int crtc);
extern int mach64_enable_vblank(struct drm_device *dev, int crtc);
extern void mach64_disable_vblank(struct drm_device *dev, int crtc);
extern irqreturn_t mach64_driver_irq_handler(DRM_IRQ_ARGS);
extern void mach64_driver_irq_preinstall(struct drm_device *dev);
extern int mach64_driver_irq_postinstall(struct drm_device *dev);
extern void mach64_driver_irq_uninstall(struct drm_device *dev);
/* ================================================================
* Registers
*/
#define MACH64_AGP_BASE 0x0148
#define MACH64_AGP_CNTL 0x014c
#define MACH64_ALPHA_TST_CNTL 0x0550
#define MACH64_DSP_CONFIG 0x0420
#define MACH64_DSP_ON_OFF 0x0424
#define MACH64_EXT_MEM_CNTL 0x04ac
#define MACH64_GEN_TEST_CNTL 0x04d0
#define MACH64_HW_DEBUG 0x047c
#define MACH64_MEM_ADDR_CONFIG 0x0434
#define MACH64_MEM_BUF_CNTL 0x042c
#define MACH64_MEM_CNTL 0x04b0
#define MACH64_BM_ADDR 0x0648
#define MACH64_BM_COMMAND 0x0188
#define MACH64_BM_DATA 0x0648
#define MACH64_BM_FRAME_BUF_OFFSET 0x0180
#define MACH64_BM_GUI_TABLE 0x01b8
#define MACH64_BM_GUI_TABLE_CMD 0x064c
# define MACH64_CIRCULAR_BUF_SIZE_16KB (0 << 0)
# define MACH64_CIRCULAR_BUF_SIZE_32KB (1 << 0)
# define MACH64_CIRCULAR_BUF_SIZE_64KB (2 << 0)
# define MACH64_CIRCULAR_BUF_SIZE_128KB (3 << 0)
# define MACH64_LAST_DESCRIPTOR (1U << 31)
#define MACH64_BM_HOSTDATA 0x0644
#define MACH64_BM_STATUS 0x018c
#define MACH64_BM_SYSTEM_MEM_ADDR 0x0184
#define MACH64_BM_SYSTEM_TABLE 0x01bc
#define MACH64_BUS_CNTL 0x04a0
# define MACH64_BUS_MSTR_RESET (1 << 1)
# define MACH64_BUS_APER_REG_DIS (1 << 4)
# define MACH64_BUS_FLUSH_BUF (1 << 2)
# define MACH64_BUS_MASTER_DIS (1 << 6)
# define MACH64_BUS_EXT_REG_EN (1 << 27)
#define MACH64_CLR_CMP_CLR 0x0700
#define MACH64_CLR_CMP_CNTL 0x0708
#define MACH64_CLR_CMP_MASK 0x0704
#define MACH64_CONFIG_CHIP_ID 0x04e0
#define MACH64_CONFIG_CNTL 0x04dc
#define MACH64_CONFIG_STAT0 0x04e4
#define MACH64_CONFIG_STAT1 0x0494
#define MACH64_CONFIG_STAT2 0x0498
#define MACH64_CONTEXT_LOAD_CNTL 0x072c
#define MACH64_CONTEXT_MASK 0x0720
#define MACH64_COMPOSITE_SHADOW_ID 0x0798
#define MACH64_CRC_SIG 0x04e8
#define MACH64_CUSTOM_MACRO_CNTL 0x04d4
#define MACH64_DP_BKGD_CLR 0x06c0
#define MACH64_DP_FOG_CLR 0x06c4
#define MACH64_DP_FGRD_BKGD_CLR 0x06e0
#define MACH64_DP_FRGD_CLR 0x06c4
#define MACH64_DP_FGRD_CLR_MIX 0x06dc
#define MACH64_DP_MIX 0x06d4
# define BKGD_MIX_NOT_D (0 << 0)
# define BKGD_MIX_ZERO (1 << 0)
# define BKGD_MIX_ONE (2 << 0)
# define MACH64_BKGD_MIX_D (3 << 0)
# define BKGD_MIX_NOT_S (4 << 0)
# define BKGD_MIX_D_XOR_S (5 << 0)
# define BKGD_MIX_NOT_D_XOR_S (6 << 0)
# define MACH64_BKGD_MIX_S (7 << 0)
# define BKGD_MIX_NOT_D_OR_NOT_S (8 << 0)
# define BKGD_MIX_D_OR_NOT_S (9 << 0)
# define BKGD_MIX_NOT_D_OR_S (10 << 0)
# define BKGD_MIX_D_OR_S (11 << 0)
# define BKGD_MIX_D_AND_S (12 << 0)
# define BKGD_MIX_NOT_D_AND_S (13 << 0)
# define BKGD_MIX_D_AND_NOT_S (14 << 0)
# define BKGD_MIX_NOT_D_AND_NOT_S (15 << 0)
# define BKGD_MIX_D_PLUS_S_DIV2 (23 << 0)
# define FRGD_MIX_NOT_D (0 << 16)
# define FRGD_MIX_ZERO (1 << 16)
# define FRGD_MIX_ONE (2 << 16)
# define FRGD_MIX_D (3 << 16)
# define FRGD_MIX_NOT_S (4 << 16)
# define FRGD_MIX_D_XOR_S (5 << 16)
# define FRGD_MIX_NOT_D_XOR_S (6 << 16)
# define MACH64_FRGD_MIX_S (7 << 16)
# define FRGD_MIX_NOT_D_OR_NOT_S (8 << 16)
# define FRGD_MIX_D_OR_NOT_S (9 << 16)
# define FRGD_MIX_NOT_D_OR_S (10 << 16)
# define FRGD_MIX_D_OR_S (11 << 16)
# define FRGD_MIX_D_AND_S (12 << 16)
# define FRGD_MIX_NOT_D_AND_S (13 << 16)
# define FRGD_MIX_D_AND_NOT_S (14 << 16)
# define FRGD_MIX_NOT_D_AND_NOT_S (15 << 16)
# define FRGD_MIX_D_PLUS_S_DIV2 (23 << 16)
#define MACH64_DP_PIX_WIDTH 0x06d0
# define MACH64_HOST_TRIPLE_ENABLE (1 << 13)
# define MACH64_BYTE_ORDER_MSB_TO_LSB (0 << 24)
# define MACH64_BYTE_ORDER_LSB_TO_MSB (1 << 24)
#define MACH64_DP_SRC 0x06d8
# define MACH64_BKGD_SRC_BKGD_CLR (0 << 0)
# define MACH64_BKGD_SRC_FRGD_CLR (1 << 0)
# define MACH64_BKGD_SRC_HOST (2 << 0)
# define MACH64_BKGD_SRC_BLIT (3 << 0)
# define MACH64_BKGD_SRC_PATTERN (4 << 0)
# define MACH64_BKGD_SRC_3D (5 << 0)
# define MACH64_FRGD_SRC_BKGD_CLR (0 << 8)
# define MACH64_FRGD_SRC_FRGD_CLR (1 << 8)
# define MACH64_FRGD_SRC_HOST (2 << 8)
# define MACH64_FRGD_SRC_BLIT (3 << 8)
# define MACH64_FRGD_SRC_PATTERN (4 << 8)
# define MACH64_FRGD_SRC_3D (5 << 8)
# define MACH64_MONO_SRC_ONE (0 << 16)
# define MACH64_MONO_SRC_PATTERN (1 << 16)
# define MACH64_MONO_SRC_HOST (2 << 16)
# define MACH64_MONO_SRC_BLIT (3 << 16)
#define MACH64_DP_WRITE_MASK 0x06c8
#define MACH64_DST_CNTL 0x0530
# define MACH64_DST_X_RIGHT_TO_LEFT (0 << 0)
# define MACH64_DST_X_LEFT_TO_RIGHT (1 << 0)
# define MACH64_DST_Y_BOTTOM_TO_TOP (0 << 1)
# define MACH64_DST_Y_TOP_TO_BOTTOM (1 << 1)
# define MACH64_DST_X_MAJOR (0 << 2)
# define MACH64_DST_Y_MAJOR (1 << 2)
# define MACH64_DST_X_TILE (1 << 3)
# define MACH64_DST_Y_TILE (1 << 4)
# define MACH64_DST_LAST_PEL (1 << 5)
# define MACH64_DST_POLYGON_ENABLE (1 << 6)
# define MACH64_DST_24_ROTATION_ENABLE (1 << 7)
#define MACH64_DST_HEIGHT_WIDTH 0x0518
#define MACH64_DST_OFF_PITCH 0x0500
#define MACH64_DST_WIDTH_HEIGHT 0x06ec
#define MACH64_DST_X_Y 0x06e8
#define MACH64_DST_Y_X 0x050c
#define MACH64_FIFO_STAT 0x0710
# define MACH64_FIFO_SLOT_MASK 0x0000ffff
# define MACH64_FIFO_ERR (1U << 31)
#define MACH64_GEN_TEST_CNTL 0x04d0
# define MACH64_GUI_ENGINE_ENABLE (1 << 8)
#define MACH64_GUI_CMDFIFO_DEBUG 0x0170
#define MACH64_GUI_CMDFIFO_DATA 0x0174
#define MACH64_GUI_CNTL 0x0178
# define MACH64_CMDFIFO_SIZE_MASK 0x00000003ul
# define MACH64_CMDFIFO_SIZE_192 0x00000000ul
# define MACH64_CMDFIFO_SIZE_128 0x00000001ul
# define MACH64_CMDFIFO_SIZE_64 0x00000002ul
#define MACH64_GUI_STAT 0x0738
# define MACH64_GUI_ACTIVE (1 << 0)
#define MACH64_GUI_TRAJ_CNTL 0x0730
#define MACH64_HOST_CNTL 0x0640
#define MACH64_HOST_DATA0 0x0600
#define MACH64_ONE_OVER_AREA 0x029c
#define MACH64_ONE_OVER_AREA_UC 0x0300
#define MACH64_PAT_REG0 0x0680
#define MACH64_PAT_REG1 0x0684
#define MACH64_SC_LEFT 0x06a0
#define MACH64_SC_RIGHT 0x06a4
#define MACH64_SC_LEFT_RIGHT 0x06a8
#define MACH64_SC_TOP 0x06ac
#define MACH64_SC_BOTTOM 0x06b0
#define MACH64_SC_TOP_BOTTOM 0x06b4
#define MACH64_SCALE_3D_CNTL 0x05fc
#define MACH64_SCRATCH_REG0 0x0480
#define MACH64_SCRATCH_REG1 0x0484
#define MACH64_SECONDARY_TEX_OFF 0x0778
#define MACH64_SETUP_CNTL 0x0304
#define MACH64_SRC_CNTL 0x05b4
# define MACH64_SRC_BM_ENABLE (1 << 8)
# define MACH64_SRC_BM_SYNC (1 << 9)
# define MACH64_SRC_BM_OP_FRAME_TO_SYSTEM (0 << 10)
# define MACH64_SRC_BM_OP_SYSTEM_TO_FRAME (1 << 10)
# define MACH64_SRC_BM_OP_REG_TO_SYSTEM (2 << 10)
# define MACH64_SRC_BM_OP_SYSTEM_TO_REG (3 << 10)
#define MACH64_SRC_HEIGHT1 0x0594
#define MACH64_SRC_HEIGHT2 0x05ac
#define MACH64_SRC_HEIGHT1_WIDTH1 0x0598
#define MACH64_SRC_HEIGHT2_WIDTH2 0x05b0
#define MACH64_SRC_OFF_PITCH 0x0580
#define MACH64_SRC_WIDTH1 0x0590
#define MACH64_SRC_Y_X 0x058c
#define MACH64_TEX_0_OFF 0x05c0
#define MACH64_TEX_CNTL 0x0774
#define MACH64_TEX_SIZE_PITCH 0x0770
#define MACH64_TIMER_CONFIG 0x0428
#define MACH64_VERTEX_1_ARGB 0x0254
#define MACH64_VERTEX_1_S 0x0240
#define MACH64_VERTEX_1_SECONDARY_S 0x0328
#define MACH64_VERTEX_1_SECONDARY_T 0x032c
#define MACH64_VERTEX_1_SECONDARY_W 0x0330
#define MACH64_VERTEX_1_SPEC_ARGB 0x024c
#define MACH64_VERTEX_1_T 0x0244
#define MACH64_VERTEX_1_W 0x0248
#define MACH64_VERTEX_1_X_Y 0x0258
#define MACH64_VERTEX_1_Z 0x0250
#define MACH64_VERTEX_2_ARGB 0x0274
#define MACH64_VERTEX_2_S 0x0260
#define MACH64_VERTEX_2_SECONDARY_S 0x0334
#define MACH64_VERTEX_2_SECONDARY_T 0x0338
#define MACH64_VERTEX_2_SECONDARY_W 0x033c
#define MACH64_VERTEX_2_SPEC_ARGB 0x026c
#define MACH64_VERTEX_2_T 0x0264
#define MACH64_VERTEX_2_W 0x0268
#define MACH64_VERTEX_2_X_Y 0x0278
#define MACH64_VERTEX_2_Z 0x0270
#define MACH64_VERTEX_3_ARGB 0x0294
#define MACH64_VERTEX_3_S 0x0280
#define MACH64_VERTEX_3_SECONDARY_S 0x02a0
#define MACH64_VERTEX_3_SECONDARY_T 0x02a4
#define MACH64_VERTEX_3_SECONDARY_W 0x02a8
#define MACH64_VERTEX_3_SPEC_ARGB 0x028c
#define MACH64_VERTEX_3_T 0x0284
#define MACH64_VERTEX_3_W 0x0288
#define MACH64_VERTEX_3_X_Y 0x0298
#define MACH64_VERTEX_3_Z 0x0290
#define MACH64_Z_CNTL 0x054c
#define MACH64_Z_OFF_PITCH 0x0548
#define MACH64_CRTC_VLINE_CRNT_VLINE 0x0410
# define MACH64_CRTC_VLINE_MASK 0x000007ff
# define MACH64_CRTC_CRNT_VLINE_MASK 0x07ff0000
#define MACH64_CRTC_OFF_PITCH 0x0414
#define MACH64_CRTC_INT_CNTL 0x0418
# define MACH64_CRTC_VBLANK (1 << 0)
# define MACH64_CRTC_VBLANK_INT_EN (1 << 1)
# define MACH64_CRTC_VBLANK_INT (1 << 2)
# define MACH64_CRTC_VLINE_INT_EN (1 << 3)
# define MACH64_CRTC_VLINE_INT (1 << 4)
# define MACH64_CRTC_VLINE_SYNC (1 << 5) /* 0=even, 1=odd */
# define MACH64_CRTC_FRAME (1 << 6) /* 0=even, 1=odd */
# define MACH64_CRTC_SNAPSHOT_INT_EN (1 << 7)
# define MACH64_CRTC_SNAPSHOT_INT (1 << 8)
# define MACH64_CRTC_I2C_INT_EN (1 << 9)
# define MACH64_CRTC_I2C_INT (1 << 10)
# define MACH64_CRTC2_VBLANK (1 << 11) /* LT Pro */
# define MACH64_CRTC2_VBLANK_INT_EN (1 << 12) /* LT Pro */
# define MACH64_CRTC2_VBLANK_INT (1 << 13) /* LT Pro */
# define MACH64_CRTC2_VLINE_INT_EN (1 << 14) /* LT Pro */
# define MACH64_CRTC2_VLINE_INT (1 << 15) /* LT Pro */
# define MACH64_CRTC_CAPBUF0_INT_EN (1 << 16)
# define MACH64_CRTC_CAPBUF0_INT (1 << 17)
# define MACH64_CRTC_CAPBUF1_INT_EN (1 << 18)
# define MACH64_CRTC_CAPBUF1_INT (1 << 19)
# define MACH64_CRTC_OVERLAY_EOF_INT_EN (1 << 20)
# define MACH64_CRTC_OVERLAY_EOF_INT (1 << 21)
# define MACH64_CRTC_ONESHOT_CAP_INT_EN (1 << 22)
# define MACH64_CRTC_ONESHOT_CAP_INT (1 << 23)
# define MACH64_CRTC_BUSMASTER_EOL_INT_EN (1 << 24)
# define MACH64_CRTC_BUSMASTER_EOL_INT (1 << 25)
# define MACH64_CRTC_GP_INT_EN (1 << 26)
# define MACH64_CRTC_GP_INT (1 << 27)
# define MACH64_CRTC2_VLINE_SYNC (1 << 28) /* LT Pro */ /* 0=even, 1=odd */
# define MACH64_CRTC_SNAPSHOT2_INT_EN (1 << 29) /* LT Pro */
# define MACH64_CRTC_SNAPSHOT2_INT (1 << 30) /* LT Pro */
# define MACH64_CRTC_VBLANK2_INT (1U << 31)
# define MACH64_CRTC_INT_ENS \
( \
MACH64_CRTC_VBLANK_INT_EN | \
MACH64_CRTC_VLINE_INT_EN | \
MACH64_CRTC_SNAPSHOT_INT_EN | \
MACH64_CRTC_I2C_INT_EN | \
MACH64_CRTC2_VBLANK_INT_EN | \
MACH64_CRTC2_VLINE_INT_EN | \
MACH64_CRTC_CAPBUF0_INT_EN | \
MACH64_CRTC_CAPBUF1_INT_EN | \
MACH64_CRTC_OVERLAY_EOF_INT_EN | \
MACH64_CRTC_ONESHOT_CAP_INT_EN | \
MACH64_CRTC_BUSMASTER_EOL_INT_EN | \
MACH64_CRTC_GP_INT_EN | \
MACH64_CRTC_SNAPSHOT2_INT_EN | \
0 \
)
# define MACH64_CRTC_INT_ACKS \
( \
MACH64_CRTC_VBLANK_INT | \
MACH64_CRTC_VLINE_INT | \
MACH64_CRTC_SNAPSHOT_INT | \
MACH64_CRTC_I2C_INT | \
MACH64_CRTC2_VBLANK_INT | \
MACH64_CRTC2_VLINE_INT | \
MACH64_CRTC_CAPBUF0_INT | \
MACH64_CRTC_CAPBUF1_INT | \
MACH64_CRTC_OVERLAY_EOF_INT | \
MACH64_CRTC_ONESHOT_CAP_INT | \
MACH64_CRTC_BUSMASTER_EOL_INT | \
MACH64_CRTC_GP_INT | \
MACH64_CRTC_SNAPSHOT2_INT | \
MACH64_CRTC_VBLANK2_INT | \
0 \
)
#define MACH64_DATATYPE_CI8 2
#define MACH64_DATATYPE_ARGB1555 3
#define MACH64_DATATYPE_RGB565 4
#define MACH64_DATATYPE_ARGB8888 6
#define MACH64_DATATYPE_RGB332 7
#define MACH64_DATATYPE_Y8 8
#define MACH64_DATATYPE_RGB8 9
#define MACH64_DATATYPE_VYUY422 11
#define MACH64_DATATYPE_YVYU422 12
#define MACH64_DATATYPE_AYUV444 14
#define MACH64_DATATYPE_ARGB4444 15
#define MACH64_READ(reg) DRM_READ32(dev_priv->mmio, (reg) )
#define MACH64_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio, (reg), (val) )
#define DWMREG0 0x0400
#define DWMREG0_END 0x07ff
#define DWMREG1 0x0000
#define DWMREG1_END 0x03ff
#define ISREG0(r) (((r) >= DWMREG0) && ((r) <= DWMREG0_END))
#define DMAREG0(r) (((r) - DWMREG0) >> 2)
#define DMAREG1(r) ((((r) - DWMREG1) >> 2 ) | 0x0100)
#define DMAREG(r) (ISREG0(r) ? DMAREG0(r) : DMAREG1(r))
#define MMREG0 0x0000
#define MMREG0_END 0x00ff
#define ISMMREG0(r) (((r) >= MMREG0) && ((r) <= MMREG0_END))
#define MMSELECT0(r) (((r) << 2) + DWMREG0)
#define MMSELECT1(r) (((((r) & 0xff) << 2) + DWMREG1))
#define MMSELECT(r) (ISMMREG0(r) ? MMSELECT0(r) : MMSELECT1(r))
/* ================================================================
* DMA constants
*/
/* DMA descriptor field indices:
* The descriptor fields are loaded into the read-only
* BM_* system bus master registers during a bus-master operation
*/
#define MACH64_DMA_FRAME_BUF_OFFSET 0 /* BM_FRAME_BUF_OFFSET */
#define MACH64_DMA_SYS_MEM_ADDR 1 /* BM_SYSTEM_MEM_ADDR */
#define MACH64_DMA_COMMAND 2 /* BM_COMMAND */
#define MACH64_DMA_RESERVED 3 /* BM_STATUS */
/* BM_COMMAND descriptor field flags */
#define MACH64_DMA_HOLD_OFFSET (1<<30) /* Don't increment DMA_FRAME_BUF_OFFSET */
#define MACH64_DMA_EOL (1<<31) /* End of descriptor list flag */
#define MACH64_DMA_CHUNKSIZE 0x1000 /* 4kB per DMA descriptor */
#define MACH64_APERTURE_OFFSET 0x7ff800 /* frame-buffer offset for gui-masters */
/* ================================================================
* Ring operations
*
* Since the Mach64 bus master engine requires polling, these functions end
* up being called frequently, hence being inline.
*/
static __inline__ void mach64_ring_start(drm_mach64_private_t * dev_priv)
{
drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
DRM_DEBUG("head_addr: 0x%08x head: %d tail: %d space: %d\n",
ring->head_addr, ring->head, ring->tail, ring->space);
if (mach64_do_wait_for_idle(dev_priv) < 0) {
mach64_do_engine_reset(dev_priv);
}
if (dev_priv->driver_mode != MACH64_MODE_MMIO) {
/* enable bus mastering and block 1 registers */
MACH64_WRITE(MACH64_BUS_CNTL,
(MACH64_READ(MACH64_BUS_CNTL) &
~MACH64_BUS_MASTER_DIS)
| MACH64_BUS_EXT_REG_EN);
mach64_do_wait_for_idle(dev_priv);
}
/* reset descriptor table ring head */
MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD,
ring->head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB);
dev_priv->ring_running = 1;
}
static __inline__ void mach64_ring_resume(drm_mach64_private_t * dev_priv,
drm_mach64_descriptor_ring_t * ring)
{
DRM_DEBUG("head_addr: 0x%08x head: %d tail: %d space: %d\n",
ring->head_addr, ring->head, ring->tail, ring->space);
/* reset descriptor table ring head */
MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD,
ring->head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB);
if (dev_priv->driver_mode == MACH64_MODE_MMIO) {
mach64_do_dispatch_pseudo_dma(dev_priv);
} else {
/* enable GUI bus mastering, and sync the bus master to the GUI */
MACH64_WRITE(MACH64_SRC_CNTL,
MACH64_SRC_BM_ENABLE | MACH64_SRC_BM_SYNC |
MACH64_SRC_BM_OP_SYSTEM_TO_REG);
/* kick off the transfer */
MACH64_WRITE(MACH64_DST_HEIGHT_WIDTH, 0);
if (dev_priv->driver_mode == MACH64_MODE_DMA_SYNC) {
if ((mach64_do_wait_for_idle(dev_priv)) < 0) {
DRM_ERROR("idle failed, resetting engine\n");
mach64_dump_engine_info(dev_priv);
mach64_do_engine_reset(dev_priv);
return;
}
mach64_do_release_used_buffers(dev_priv);
}
}
}
/**
* Poll the ring head and make sure the bus master is alive.
*
* Mach64's bus master engine will stop if there are no more entries to process.
* This function polls the engine for the last processed entry and calls
* mach64_ring_resume if there is an unprocessed entry.
*
* Note also that, since we update the ring tail while the bus master engine is
* in operation, it is possible that the last tail update was too late to be
* processed, and the bus master engine stops at the previous tail position.
* Therefore it is important to call this function frequently.
*/
static __inline__ void mach64_ring_tick(drm_mach64_private_t * dev_priv,
drm_mach64_descriptor_ring_t * ring)
{
DRM_DEBUG("head_addr: 0x%08x head: %d tail: %d space: %d\n",
ring->head_addr, ring->head, ring->tail, ring->space);
if (!dev_priv->ring_running) {
mach64_ring_start(dev_priv);
if (ring->head != ring->tail) {
mach64_ring_resume(dev_priv, ring);
}
} else {
/* GUI_ACTIVE must be read before BM_GUI_TABLE to
* correctly determine the ring head
*/
int gui_active =
MACH64_READ(MACH64_GUI_STAT) & MACH64_GUI_ACTIVE;
ring->head_addr = MACH64_READ(MACH64_BM_GUI_TABLE) & 0xfffffff0;
if (gui_active) {
/* If not idle, BM_GUI_TABLE points one descriptor
* past the current head
*/
if (ring->head_addr == ring->start_addr) {
ring->head_addr += ring->size;
}
ring->head_addr -= 4 * sizeof(u32);
}
if (ring->head_addr < ring->start_addr ||
ring->head_addr >= ring->start_addr + ring->size) {
DRM_ERROR("bad ring head address: 0x%08x\n",
ring->head_addr);
mach64_dump_ring_info(dev_priv);
mach64_do_engine_reset(dev_priv);
return;
}
ring->head = (ring->head_addr - ring->start_addr) / sizeof(u32);
if (!gui_active && ring->head != ring->tail) {
mach64_ring_resume(dev_priv, ring);
}
}
}
static __inline__ void mach64_ring_stop(drm_mach64_private_t * dev_priv)
{
DRM_DEBUG("head_addr: 0x%08x head: %d tail: %d space: %d\n",
dev_priv->ring.head_addr, dev_priv->ring.head,
dev_priv->ring.tail, dev_priv->ring.space);
/* restore previous SRC_CNTL to disable busmastering */
mach64_do_wait_for_fifo(dev_priv, 1);
MACH64_WRITE(MACH64_SRC_CNTL, 0);
/* disable busmastering but keep the block 1 registers enabled */
mach64_do_wait_for_idle(dev_priv);
MACH64_WRITE(MACH64_BUS_CNTL, MACH64_READ(MACH64_BUS_CNTL)
| MACH64_BUS_MASTER_DIS | MACH64_BUS_EXT_REG_EN);
dev_priv->ring_running = 0;
}
static __inline__ void
mach64_update_ring_snapshot(drm_mach64_private_t * dev_priv)
{
drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
DRM_DEBUG("\n");
mach64_ring_tick(dev_priv, ring);
ring->space = (ring->head - ring->tail) * sizeof(u32);
if (ring->space <= 0) {
ring->space += ring->size;
}
}
/* ================================================================
* DMA macros
*
* Mach64's ring buffer doesn't take register writes directly. These
* have to be written indirectly in DMA buffers. These macros simplify
* the task of setting up a buffer, writing commands to it, and
* queuing the buffer in the ring.
*/
#define DMALOCALS \
drm_mach64_freelist_t *_entry = NULL; \
struct drm_buf *_buf = NULL; \
u32 *_buf_wptr; int _outcount
#define GETBUFPTR( __buf ) \
((dev_priv->is_pci) ? \
((u32 *)(__buf)->address) : \
((u32 *)((char *)dev_priv->dev_buffers->handle + (__buf)->offset)))
#define GETBUFADDR( __buf ) ((u32)(__buf)->bus_address)
#define GETRINGOFFSET() (_entry->ring_ofs)
static __inline__ int mach64_find_pending_buf_entry(drm_mach64_private_t *
dev_priv,
drm_mach64_freelist_t **
entry, struct drm_buf * buf)
{
struct list_head *ptr;
#if MACH64_EXTRA_CHECKING
if (list_empty(&dev_priv->pending)) {
DRM_ERROR("Empty pending list in \n");
return -EINVAL;
}
#endif
ptr = dev_priv->pending.prev;
*entry = list_entry(ptr, drm_mach64_freelist_t, list);
while ((*entry)->buf != buf) {
if (ptr == &dev_priv->pending) {
return -EFAULT;
}
ptr = ptr->prev;
*entry = list_entry(ptr, drm_mach64_freelist_t, list);
}
return 0;
}
#define DMASETPTR( _p ) \
do { \
_buf = (_p); \
_outcount = 0; \
_buf_wptr = GETBUFPTR( _buf ); \
} while(0)
/* FIXME: use a private set of smaller buffers for state emits, clears, and swaps? */
#define DMAGETPTR( file_priv, dev_priv, n ) \
do { \
if ( MACH64_VERBOSE ) { \
DRM_INFO( "DMAGETPTR( %d )\n", (n) ); \
} \
_buf = mach64_freelist_get( dev_priv ); \
if (_buf == NULL) { \
DRM_ERROR("couldn't get buffer in DMAGETPTR\n"); \
return -EAGAIN; \
} \
if (_buf->pending) { \
DRM_ERROR("pending buf in DMAGETPTR\n"); \
return -EFAULT; \
} \
_buf->file_priv = file_priv; \
_outcount = 0; \
\
_buf_wptr = GETBUFPTR( _buf ); \
} while (0)
#define DMAOUTREG( reg, val ) \
do { \
if ( MACH64_VERBOSE ) { \
DRM_INFO( " DMAOUTREG( 0x%x = 0x%08x )\n", \
reg, val ); \
} \
_buf_wptr[_outcount++] = cpu_to_le32(DMAREG(reg)); \
_buf_wptr[_outcount++] = cpu_to_le32((val)); \
_buf->used += 8; \
} while (0)
#define DMAADVANCE( dev_priv, _discard ) \
do { \
struct list_head *ptr; \
int ret; \
\
if ( MACH64_VERBOSE ) { \
DRM_INFO( "DMAADVANCE() in \n" ); \
} \
\
if (_buf->used <= 0) { \
DRM_ERROR( "DMAADVANCE(): sending empty buf %d\n", \
_buf->idx ); \
return -EFAULT; \
} \
if (_buf->pending) { \
/* This is a resued buffer, so we need to find it in the pending list */ \
if ((ret = mach64_find_pending_buf_entry(dev_priv, &_entry, _buf))) { \
DRM_ERROR( "DMAADVANCE(): couldn't find pending buf %d\n", _buf->idx ); \
return ret; \
} \
if (_entry->discard) { \
DRM_ERROR( "DMAADVANCE(): sending discarded pending buf %d\n", _buf->idx ); \
return -EFAULT; \
} \
} else { \
if (list_empty(&dev_priv->placeholders)) { \
DRM_ERROR( "DMAADVANCE(): empty placeholder list\n"); \
return -EFAULT; \
} \
ptr = dev_priv->placeholders.next; \
list_del(ptr); \
_entry = list_entry(ptr, drm_mach64_freelist_t, list); \
_buf->pending = 1; \
_entry->buf = _buf; \
list_add_tail(ptr, &dev_priv->pending); \
} \
_entry->discard = (_discard); \
if ((ret = mach64_add_buf_to_ring( dev_priv, _entry ))) \
return ret; \
} while (0)
#define DMADISCARDBUF() \
do { \
if (_entry == NULL) { \
int ret; \
if ((ret = mach64_find_pending_buf_entry(dev_priv, &_entry, _buf))) { \
DRM_ERROR( "couldn't find pending buf %d\n", \
_buf->idx ); \
return ret; \
} \
} \
_entry->discard = 1; \
} while(0)
#define DMAADVANCEHOSTDATA( dev_priv ) \
do { \
struct list_head *ptr; \
int ret; \
\
if ( MACH64_VERBOSE ) { \
DRM_INFO( "DMAADVANCEHOSTDATA() in \n" ); \
} \
\
if (_buf->used <= 0) { \
DRM_ERROR( "DMAADVANCEHOSTDATA(): sending empty buf %d\n", _buf->idx ); \
return -EFAULT; \
} \
if (list_empty(&dev_priv->placeholders)) { \
DRM_ERROR( "empty placeholder list in DMAADVANCEHOSTDATA()\n" ); \
return -EFAULT; \
} \
\
ptr = dev_priv->placeholders.next; \
list_del(ptr); \
_entry = list_entry(ptr, drm_mach64_freelist_t, list); \
_entry->buf = _buf; \
_entry->buf->pending = 1; \
list_add_tail(ptr, &dev_priv->pending); \
_entry->discard = 1; \
if ((ret = mach64_add_hostdata_buf_to_ring( dev_priv, _entry ))) \
return ret; \
} while (0)
#endif /* __MACH64_DRV_H__ */

View File

@ -1,162 +0,0 @@
/* mach64_irq.c -- IRQ handling for ATI Mach64 -*- linux-c -*-
* Created: Tue Feb 25, 2003 by Leif Delgass, based on radeon_irq.c/r128_irq.c
*/
/*-
* Copyright (C) The Weather Channel, Inc. 2002.
* Copyright 2003 Leif Delgass
* All Rights Reserved.
*
* The Weather Channel (TM) funded Tungsten Graphics to develop the
* initial release of the Radeon 8500 driver under the XFree86 license.
* This notice must be preserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Keith Whitwell <keith@tungstengraphics.com>
* Eric Anholt <anholt@FreeBSD.org>
* Leif Delgass <ldelgass@retinalburn.net>
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "dev/drm/drmP.h"
#include "dev/drm/drm.h"
#include "dev/drm/mach64_drm.h"
#include "dev/drm/mach64_drv.h"
irqreturn_t mach64_driver_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = arg;
drm_mach64_private_t *dev_priv = dev->dev_private;
int status;
status = MACH64_READ(MACH64_CRTC_INT_CNTL);
/* VBLANK interrupt */
if (status & MACH64_CRTC_VBLANK_INT) {
/* Mask off all interrupt ack bits before setting the ack bit, since
* there may be other handlers outside the DRM.
*
* NOTE: On mach64, you need to keep the enable bits set when doing
* the ack, despite what the docs say about not acking and enabling
* in a single write.
*/
MACH64_WRITE(MACH64_CRTC_INT_CNTL,
(status & ~MACH64_CRTC_INT_ACKS)
| MACH64_CRTC_VBLANK_INT);
atomic_inc(&dev_priv->vbl_received);
drm_handle_vblank(dev, 0);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
u32 mach64_get_vblank_counter(struct drm_device * dev, int crtc)
{
const drm_mach64_private_t *const dev_priv = dev->dev_private;
if (crtc != 0)
return 0;
return atomic_read(&dev_priv->vbl_received);
}
int mach64_enable_vblank(struct drm_device * dev, int crtc)
{
drm_mach64_private_t *dev_priv = dev->dev_private;
u32 status = MACH64_READ(MACH64_CRTC_INT_CNTL);
if (crtc != 0) {
DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
crtc);
return -EINVAL;
}
DRM_DEBUG("before enable vblank CRTC_INT_CTNL: 0x%08x\n", status);
/* Turn on VBLANK interrupt */
MACH64_WRITE(MACH64_CRTC_INT_CNTL, MACH64_READ(MACH64_CRTC_INT_CNTL)
| MACH64_CRTC_VBLANK_INT_EN);
return 0;
}
void mach64_disable_vblank(struct drm_device * dev, int crtc)
{
if (crtc != 0) {
DRM_ERROR("tried to disable vblank on non-existent crtc %d\n",
crtc);
return;
}
/*
* FIXME: implement proper interrupt disable by using the vblank
* counter register (if available).
*/
}
static void mach64_disable_vblank_local(struct drm_device * dev, int crtc)
{
drm_mach64_private_t *dev_priv = dev->dev_private;
u32 status = MACH64_READ(MACH64_CRTC_INT_CNTL);
if (crtc != 0) {
DRM_ERROR("tried to disable vblank on non-existent crtc %d\n",
crtc);
return;
}
DRM_DEBUG("before disable vblank CRTC_INT_CTNL: 0x%08x\n", status);
/* Disable and clear VBLANK interrupt */
MACH64_WRITE(MACH64_CRTC_INT_CNTL, (status & ~MACH64_CRTC_VBLANK_INT_EN)
| MACH64_CRTC_VBLANK_INT);
}
void mach64_driver_irq_preinstall(struct drm_device * dev)
{
drm_mach64_private_t *dev_priv = dev->dev_private;
u32 status = MACH64_READ(MACH64_CRTC_INT_CNTL);
DRM_DEBUG("before install CRTC_INT_CTNL: 0x%08x\n", status);
mach64_disable_vblank_local(dev, 0);
}
int mach64_driver_irq_postinstall(struct drm_device * dev)
{
return 0;
}
void mach64_driver_irq_uninstall(struct drm_device * dev)
{
drm_mach64_private_t *dev_priv = dev->dev_private;
if (!dev_priv)
return;
mach64_disable_vblank_local(dev, 0);
DRM_DEBUG("after uninstall CRTC_INT_CTNL: 0x%08x\n",
MACH64_READ(MACH64_CRTC_INT_CNTL));
}

View File

@ -1,914 +0,0 @@
/* mach64_state.c -- State support for mach64 (Rage Pro) driver -*- linux-c -*-
* Created: Sun Dec 03 19:20:26 2000 by gareth@valinux.com
*/
/*-
* Copyright 2000 Gareth Hughes
* Copyright 2002-2003 Leif Delgass
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT OWNER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Gareth Hughes <gareth@valinux.com>
* Leif Delgass <ldelgass@retinalburn.net>
* José Fonseca <j_r_fonseca@yahoo.co.uk>
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "dev/drm/drmP.h"
#include "dev/drm/drm.h"
#include "dev/drm/mach64_drm.h"
#include "dev/drm/mach64_drv.h"
/* Interface history:
*
* 1.0 - Initial mach64 DRM
*
*/
struct drm_ioctl_desc mach64_ioctls[] = {
DRM_IOCTL_DEF(DRM_MACH64_INIT, mach64_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_MACH64_CLEAR, mach64_dma_clear, DRM_AUTH),
DRM_IOCTL_DEF(DRM_MACH64_SWAP, mach64_dma_swap, DRM_AUTH),
DRM_IOCTL_DEF(DRM_MACH64_IDLE, mach64_dma_idle, DRM_AUTH),
DRM_IOCTL_DEF(DRM_MACH64_RESET, mach64_engine_reset, DRM_AUTH),
DRM_IOCTL_DEF(DRM_MACH64_VERTEX, mach64_dma_vertex, DRM_AUTH),
DRM_IOCTL_DEF(DRM_MACH64_BLIT, mach64_dma_blit, DRM_AUTH),
DRM_IOCTL_DEF(DRM_MACH64_FLUSH, mach64_dma_flush, DRM_AUTH),
DRM_IOCTL_DEF(DRM_MACH64_GETPARAM, mach64_get_param, DRM_AUTH),
};
int mach64_max_ioctl = DRM_ARRAY_SIZE(mach64_ioctls);
/* ================================================================
* DMA hardware state programming functions
*/
static void mach64_print_dirty(const char *msg, unsigned int flags)
{
DRM_DEBUG("%s: (0x%x) %s%s%s%s%s%s%s%s%s%s%s%s\n",
msg,
flags,
(flags & MACH64_UPLOAD_DST_OFF_PITCH) ? "dst_off_pitch, " :
"",
(flags & MACH64_UPLOAD_Z_ALPHA_CNTL) ? "z_alpha_cntl, " : "",
(flags & MACH64_UPLOAD_SCALE_3D_CNTL) ? "scale_3d_cntl, " :
"", (flags & MACH64_UPLOAD_DP_FOG_CLR) ? "dp_fog_clr, " : "",
(flags & MACH64_UPLOAD_DP_WRITE_MASK) ? "dp_write_mask, " :
"",
(flags & MACH64_UPLOAD_DP_PIX_WIDTH) ? "dp_pix_width, " : "",
(flags & MACH64_UPLOAD_SETUP_CNTL) ? "setup_cntl, " : "",
(flags & MACH64_UPLOAD_MISC) ? "misc, " : "",
(flags & MACH64_UPLOAD_TEXTURE) ? "texture, " : "",
(flags & MACH64_UPLOAD_TEX0IMAGE) ? "tex0 image, " : "",
(flags & MACH64_UPLOAD_TEX1IMAGE) ? "tex1 image, " : "",
(flags & MACH64_UPLOAD_CLIPRECTS) ? "cliprects, " : "");
}
/* Mach64 doesn't have hardware cliprects, just one hardware scissor,
* so the GL scissor is intersected with each cliprect here
*/
/* This function returns 0 on success, 1 for no intersection, and
* negative for an error
*/
static int mach64_emit_cliprect(struct drm_file *file_priv,
drm_mach64_private_t * dev_priv,
struct drm_clip_rect * box)
{
u32 sc_left_right, sc_top_bottom;
struct drm_clip_rect scissor;
drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mach64_context_regs_t *regs = &sarea_priv->context_state;
DMALOCALS;
DRM_DEBUG("box=%p\n", box);
/* Get GL scissor */
/* FIXME: store scissor in SAREA as a cliprect instead of in
* hardware format, or do intersection client-side
*/
scissor.x1 = regs->sc_left_right & 0xffff;
scissor.x2 = (regs->sc_left_right & 0xffff0000) >> 16;
scissor.y1 = regs->sc_top_bottom & 0xffff;
scissor.y2 = (regs->sc_top_bottom & 0xffff0000) >> 16;
/* Intersect GL scissor with cliprect */
if (box->x1 > scissor.x1)
scissor.x1 = box->x1;
if (box->y1 > scissor.y1)
scissor.y1 = box->y1;
if (box->x2 < scissor.x2)
scissor.x2 = box->x2;
if (box->y2 < scissor.y2)
scissor.y2 = box->y2;
/* positive return means skip */
if (scissor.x1 >= scissor.x2)
return 1;
if (scissor.y1 >= scissor.y2)
return 1;
DMAGETPTR(file_priv, dev_priv, 2); /* returns on failure to get buffer */
sc_left_right = ((scissor.x1 << 0) | (scissor.x2 << 16));
sc_top_bottom = ((scissor.y1 << 0) | (scissor.y2 << 16));
DMAOUTREG(MACH64_SC_LEFT_RIGHT, sc_left_right);
DMAOUTREG(MACH64_SC_TOP_BOTTOM, sc_top_bottom);
DMAADVANCE(dev_priv, 1);
return 0;
}
static __inline__ int mach64_emit_state(struct drm_file *file_priv,
drm_mach64_private_t * dev_priv)
{
drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mach64_context_regs_t *regs = &sarea_priv->context_state;
unsigned int dirty = sarea_priv->dirty;
u32 offset = ((regs->tex_size_pitch & 0xf0) >> 2);
DMALOCALS;
if (MACH64_VERBOSE) {
mach64_print_dirty(__FUNCTION__, dirty);
} else {
DRM_DEBUG("dirty=0x%08x\n", dirty);
}
DMAGETPTR(file_priv, dev_priv, 17); /* returns on failure to get buffer */
if (dirty & MACH64_UPLOAD_MISC) {
DMAOUTREG(MACH64_DP_MIX, regs->dp_mix);
DMAOUTREG(MACH64_DP_SRC, regs->dp_src);
DMAOUTREG(MACH64_CLR_CMP_CNTL, regs->clr_cmp_cntl);
DMAOUTREG(MACH64_GUI_TRAJ_CNTL, regs->gui_traj_cntl);
sarea_priv->dirty &= ~MACH64_UPLOAD_MISC;
}
if (dirty & MACH64_UPLOAD_DST_OFF_PITCH) {
DMAOUTREG(MACH64_DST_OFF_PITCH, regs->dst_off_pitch);
sarea_priv->dirty &= ~MACH64_UPLOAD_DST_OFF_PITCH;
}
if (dirty & MACH64_UPLOAD_Z_OFF_PITCH) {
DMAOUTREG(MACH64_Z_OFF_PITCH, regs->z_off_pitch);
sarea_priv->dirty &= ~MACH64_UPLOAD_Z_OFF_PITCH;
}
if (dirty & MACH64_UPLOAD_Z_ALPHA_CNTL) {
DMAOUTREG(MACH64_Z_CNTL, regs->z_cntl);
DMAOUTREG(MACH64_ALPHA_TST_CNTL, regs->alpha_tst_cntl);
sarea_priv->dirty &= ~MACH64_UPLOAD_Z_ALPHA_CNTL;
}
if (dirty & MACH64_UPLOAD_SCALE_3D_CNTL) {
DMAOUTREG(MACH64_SCALE_3D_CNTL, regs->scale_3d_cntl);
sarea_priv->dirty &= ~MACH64_UPLOAD_SCALE_3D_CNTL;
}
if (dirty & MACH64_UPLOAD_DP_FOG_CLR) {
DMAOUTREG(MACH64_DP_FOG_CLR, regs->dp_fog_clr);
sarea_priv->dirty &= ~MACH64_UPLOAD_DP_FOG_CLR;
}
if (dirty & MACH64_UPLOAD_DP_WRITE_MASK) {
DMAOUTREG(MACH64_DP_WRITE_MASK, regs->dp_write_mask);
sarea_priv->dirty &= ~MACH64_UPLOAD_DP_WRITE_MASK;
}
if (dirty & MACH64_UPLOAD_DP_PIX_WIDTH) {
DMAOUTREG(MACH64_DP_PIX_WIDTH, regs->dp_pix_width);
sarea_priv->dirty &= ~MACH64_UPLOAD_DP_PIX_WIDTH;
}
if (dirty & MACH64_UPLOAD_SETUP_CNTL) {
DMAOUTREG(MACH64_SETUP_CNTL, regs->setup_cntl);
sarea_priv->dirty &= ~MACH64_UPLOAD_SETUP_CNTL;
}
if (dirty & MACH64_UPLOAD_TEXTURE) {
DMAOUTREG(MACH64_TEX_SIZE_PITCH, regs->tex_size_pitch);
DMAOUTREG(MACH64_TEX_CNTL, regs->tex_cntl);
DMAOUTREG(MACH64_SECONDARY_TEX_OFF, regs->secondary_tex_off);
DMAOUTREG(MACH64_TEX_0_OFF + offset, regs->tex_offset);
sarea_priv->dirty &= ~MACH64_UPLOAD_TEXTURE;
}
DMAADVANCE(dev_priv, 1);
sarea_priv->dirty &= MACH64_UPLOAD_CLIPRECTS;
return 0;
}
/* ================================================================
* DMA command dispatch functions
*/
static int mach64_dma_dispatch_clear(struct drm_device * dev,
struct drm_file *file_priv,
unsigned int flags,
int cx, int cy, int cw, int ch,
unsigned int clear_color,
unsigned int clear_depth)
{
drm_mach64_private_t *dev_priv = dev->dev_private;
drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mach64_context_regs_t *ctx = &sarea_priv->context_state;
int nbox = sarea_priv->nbox;
struct drm_clip_rect *pbox = sarea_priv->boxes;
u32 fb_bpp, depth_bpp;
int i;
DMALOCALS;
DRM_DEBUG("\n");
switch (dev_priv->fb_bpp) {
case 16:
fb_bpp = MACH64_DATATYPE_RGB565;
break;
case 32:
fb_bpp = MACH64_DATATYPE_ARGB8888;
break;
default:
return -EINVAL;
}
switch (dev_priv->depth_bpp) {
case 16:
depth_bpp = MACH64_DATATYPE_RGB565;
break;
case 24:
case 32:
depth_bpp = MACH64_DATATYPE_ARGB8888;
break;
default:
return -EINVAL;
}
if (!nbox)
return 0;
DMAGETPTR(file_priv, dev_priv, nbox * 31); /* returns on failure to get buffer */
for (i = 0; i < nbox; i++) {
int x = pbox[i].x1;
int y = pbox[i].y1;
int w = pbox[i].x2 - x;
int h = pbox[i].y2 - y;
DRM_DEBUG("dispatch clear %d,%d-%d,%d flags 0x%x\n",
pbox[i].x1, pbox[i].y1,
pbox[i].x2, pbox[i].y2, flags);
if (flags & (MACH64_FRONT | MACH64_BACK)) {
/* Setup for color buffer clears
*/
DMAOUTREG(MACH64_Z_CNTL, 0);
DMAOUTREG(MACH64_SCALE_3D_CNTL, 0);
DMAOUTREG(MACH64_SC_LEFT_RIGHT, ctx->sc_left_right);
DMAOUTREG(MACH64_SC_TOP_BOTTOM, ctx->sc_top_bottom);
DMAOUTREG(MACH64_CLR_CMP_CNTL, 0);
DMAOUTREG(MACH64_GUI_TRAJ_CNTL,
(MACH64_DST_X_LEFT_TO_RIGHT |
MACH64_DST_Y_TOP_TO_BOTTOM));
DMAOUTREG(MACH64_DP_PIX_WIDTH, ((fb_bpp << 0) |
(fb_bpp << 4) |
(fb_bpp << 8) |
(fb_bpp << 16) |
(fb_bpp << 28)));
DMAOUTREG(MACH64_DP_FRGD_CLR, clear_color);
DMAOUTREG(MACH64_DP_WRITE_MASK, ctx->dp_write_mask);
DMAOUTREG(MACH64_DP_MIX, (MACH64_BKGD_MIX_D |
MACH64_FRGD_MIX_S));
DMAOUTREG(MACH64_DP_SRC, (MACH64_BKGD_SRC_FRGD_CLR |
MACH64_FRGD_SRC_FRGD_CLR |
MACH64_MONO_SRC_ONE));
}
if (flags & MACH64_FRONT) {
DMAOUTREG(MACH64_DST_OFF_PITCH,
dev_priv->front_offset_pitch);
DMAOUTREG(MACH64_DST_X_Y, (y << 16) | x);
DMAOUTREG(MACH64_DST_WIDTH_HEIGHT, (h << 16) | w);
}
if (flags & MACH64_BACK) {
DMAOUTREG(MACH64_DST_OFF_PITCH,
dev_priv->back_offset_pitch);
DMAOUTREG(MACH64_DST_X_Y, (y << 16) | x);
DMAOUTREG(MACH64_DST_WIDTH_HEIGHT, (h << 16) | w);
}
if (flags & MACH64_DEPTH) {
/* Setup for depth buffer clear
*/
DMAOUTREG(MACH64_Z_CNTL, 0);
DMAOUTREG(MACH64_SCALE_3D_CNTL, 0);
DMAOUTREG(MACH64_SC_LEFT_RIGHT, ctx->sc_left_right);
DMAOUTREG(MACH64_SC_TOP_BOTTOM, ctx->sc_top_bottom);
DMAOUTREG(MACH64_CLR_CMP_CNTL, 0);
DMAOUTREG(MACH64_GUI_TRAJ_CNTL,
(MACH64_DST_X_LEFT_TO_RIGHT |
MACH64_DST_Y_TOP_TO_BOTTOM));
DMAOUTREG(MACH64_DP_PIX_WIDTH, ((depth_bpp << 0) |
(depth_bpp << 4) |
(depth_bpp << 8) |
(depth_bpp << 16) |
(depth_bpp << 28)));
DMAOUTREG(MACH64_DP_FRGD_CLR, clear_depth);
DMAOUTREG(MACH64_DP_WRITE_MASK, 0xffffffff);
DMAOUTREG(MACH64_DP_MIX, (MACH64_BKGD_MIX_D |
MACH64_FRGD_MIX_S));
DMAOUTREG(MACH64_DP_SRC, (MACH64_BKGD_SRC_FRGD_CLR |
MACH64_FRGD_SRC_FRGD_CLR |
MACH64_MONO_SRC_ONE));
DMAOUTREG(MACH64_DST_OFF_PITCH,
dev_priv->depth_offset_pitch);
DMAOUTREG(MACH64_DST_X_Y, (y << 16) | x);
DMAOUTREG(MACH64_DST_WIDTH_HEIGHT, (h << 16) | w);
}
}
DMAADVANCE(dev_priv, 1);
return 0;
}
static int mach64_dma_dispatch_swap(struct drm_device * dev,
struct drm_file *file_priv)
{
drm_mach64_private_t *dev_priv = dev->dev_private;
drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
int nbox = sarea_priv->nbox;
struct drm_clip_rect *pbox = sarea_priv->boxes;
u32 fb_bpp;
int i;
DMALOCALS;
DRM_DEBUG("\n");
switch (dev_priv->fb_bpp) {
case 16:
fb_bpp = MACH64_DATATYPE_RGB565;
break;
case 32:
default:
fb_bpp = MACH64_DATATYPE_ARGB8888;
break;
}
if (!nbox)
return 0;
DMAGETPTR(file_priv, dev_priv, 13 + nbox * 4); /* returns on failure to get buffer */
DMAOUTREG(MACH64_Z_CNTL, 0);
DMAOUTREG(MACH64_SCALE_3D_CNTL, 0);
DMAOUTREG(MACH64_SC_LEFT_RIGHT, 0 | (8191 << 16)); /* no scissor */
DMAOUTREG(MACH64_SC_TOP_BOTTOM, 0 | (16383 << 16));
DMAOUTREG(MACH64_CLR_CMP_CNTL, 0);
DMAOUTREG(MACH64_GUI_TRAJ_CNTL, (MACH64_DST_X_LEFT_TO_RIGHT |
MACH64_DST_Y_TOP_TO_BOTTOM));
DMAOUTREG(MACH64_DP_PIX_WIDTH, ((fb_bpp << 0) |
(fb_bpp << 4) |
(fb_bpp << 8) |
(fb_bpp << 16) | (fb_bpp << 28)));
DMAOUTREG(MACH64_DP_WRITE_MASK, 0xffffffff);
DMAOUTREG(MACH64_DP_MIX, (MACH64_BKGD_MIX_D | MACH64_FRGD_MIX_S));
DMAOUTREG(MACH64_DP_SRC, (MACH64_BKGD_SRC_BKGD_CLR |
MACH64_FRGD_SRC_BLIT | MACH64_MONO_SRC_ONE));
DMAOUTREG(MACH64_SRC_OFF_PITCH, dev_priv->back_offset_pitch);
DMAOUTREG(MACH64_DST_OFF_PITCH, dev_priv->front_offset_pitch);
for (i = 0; i < nbox; i++) {
int x = pbox[i].x1;
int y = pbox[i].y1;
int w = pbox[i].x2 - x;
int h = pbox[i].y2 - y;
DRM_DEBUG("dispatch swap %d,%d-%d,%d\n",
pbox[i].x1, pbox[i].y1, pbox[i].x2, pbox[i].y2);
DMAOUTREG(MACH64_SRC_WIDTH1, w);
DMAOUTREG(MACH64_SRC_Y_X, (x << 16) | y);
DMAOUTREG(MACH64_DST_Y_X, (x << 16) | y);
DMAOUTREG(MACH64_DST_WIDTH_HEIGHT, (h << 16) | w);
}
DMAADVANCE(dev_priv, 1);
if (dev_priv->driver_mode == MACH64_MODE_DMA_ASYNC) {
for (i = 0; i < MACH64_MAX_QUEUED_FRAMES - 1; i++) {
dev_priv->frame_ofs[i] = dev_priv->frame_ofs[i + 1];
}
dev_priv->frame_ofs[i] = GETRINGOFFSET();
dev_priv->sarea_priv->frames_queued++;
}
return 0;
}
static int mach64_do_get_frames_queued(drm_mach64_private_t * dev_priv)
{
drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
int i, start;
u32 head, tail, ofs;
DRM_DEBUG("\n");
if (sarea_priv->frames_queued == 0)
return 0;
tail = ring->tail;
mach64_ring_tick(dev_priv, ring);
head = ring->head;
start = (MACH64_MAX_QUEUED_FRAMES -
DRM_MIN(MACH64_MAX_QUEUED_FRAMES, sarea_priv->frames_queued));
if (head == tail) {
sarea_priv->frames_queued = 0;
for (i = start; i < MACH64_MAX_QUEUED_FRAMES; i++) {
dev_priv->frame_ofs[i] = ~0;
}
return 0;
}
for (i = start; i < MACH64_MAX_QUEUED_FRAMES; i++) {
ofs = dev_priv->frame_ofs[i];
DRM_DEBUG("frame_ofs[%d] ofs: %d\n", i, ofs);
if (ofs == ~0 ||
(head < tail && (ofs < head || ofs >= tail)) ||
(head > tail && (ofs < head && ofs >= tail))) {
sarea_priv->frames_queued =
(MACH64_MAX_QUEUED_FRAMES - 1) - i;
dev_priv->frame_ofs[i] = ~0;
}
}
return sarea_priv->frames_queued;
}
/* Copy and verify a client submited buffer.
* FIXME: Make an assembly optimized version
*/
static __inline__ int copy_from_user_vertex(u32 *to,
const u32 __user *ufrom,
unsigned long bytes)
{
unsigned long n = bytes; /* dwords remaining in buffer */
u32 *from, *orig_from;
from = drm_alloc(bytes, DRM_MEM_DRIVER);
if (from == NULL)
return -ENOMEM;
if (DRM_COPY_FROM_USER(from, ufrom, bytes)) {
drm_free(from, bytes, DRM_MEM_DRIVER);
return -EFAULT;
}
orig_from = from; /* we'll be modifying the "from" ptr, so save it */
n >>= 2;
while (n > 1) {
u32 data, reg, count;
data = *from++;
n--;
reg = le32_to_cpu(data);
count = (reg >> 16) + 1;
if (count <= n) {
n -= count;
reg &= 0xffff;
/* This is an exact match of Mach64's Setup Engine registers,
* excluding SETUP_CNTL (1_C1).
*/
if ((reg >= 0x0190 && reg < 0x01c1) ||
(reg >= 0x01ca && reg <= 0x01cf)) {
*to++ = data;
memcpy(to, from, count << 2);
from += count;
to += count;
} else {
DRM_ERROR("Got bad command: 0x%04x\n", reg);
drm_free(orig_from, bytes, DRM_MEM_DRIVER);
return -EACCES;
}
} else {
DRM_ERROR
("Got bad command count(=%u) dwords remaining=%lu\n",
count, n);
drm_free(orig_from, bytes, DRM_MEM_DRIVER);
return -EINVAL;
}
}
drm_free(orig_from, bytes, DRM_MEM_DRIVER);
if (n == 0)
return 0;
else {
DRM_ERROR("Bad buf->used(=%lu)\n", bytes);
return -EINVAL;
}
}
static int mach64_dma_dispatch_vertex(struct drm_device * dev,
struct drm_file *file_priv,
drm_mach64_vertex_t * vertex)
{
drm_mach64_private_t *dev_priv = dev->dev_private;
drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
struct drm_buf *copy_buf;
void *buf = vertex->buf;
unsigned long used = vertex->used;
int ret = 0;
int i = 0;
int done = 0;
int verify_ret = 0;
DMALOCALS;
DRM_DEBUG("buf=%p used=%lu nbox=%d\n",
buf, used, sarea_priv->nbox);
if (!used)
goto _vertex_done;
copy_buf = mach64_freelist_get(dev_priv);
if (copy_buf == NULL) {
DRM_ERROR("couldn't get buffer\n");
return -EAGAIN;
}
/* Mach64's vertex data is actually register writes. To avoid security
* compromises these register writes have to be verified and copied from
* user space into a private DMA buffer.
*/
verify_ret = copy_from_user_vertex(GETBUFPTR(copy_buf), buf, used);
if (verify_ret != 0) {
mach64_freelist_put(dev_priv, copy_buf);
goto _vertex_done;
}
copy_buf->used = used;
DMASETPTR(copy_buf);
if (sarea_priv->dirty & ~MACH64_UPLOAD_CLIPRECTS) {
ret = mach64_emit_state(file_priv, dev_priv);
if (ret < 0)
return ret;
}
do {
/* Emit the next cliprect */
if (i < sarea_priv->nbox) {
ret = mach64_emit_cliprect(file_priv, dev_priv,
&sarea_priv->boxes[i]);
if (ret < 0) {
/* failed to get buffer */
return ret;
} else if (ret != 0) {
/* null intersection with scissor */
continue;
}
}
if ((i >= sarea_priv->nbox - 1))
done = 1;
/* Add the buffer to the DMA queue */
DMAADVANCE(dev_priv, done);
} while (++i < sarea_priv->nbox);
if (!done) {
if (copy_buf->pending) {
DMADISCARDBUF();
} else {
/* This buffer wasn't used (no cliprects), so place it
* back on the free list
*/
mach64_freelist_put(dev_priv, copy_buf);
}
}
_vertex_done:
sarea_priv->dirty &= ~MACH64_UPLOAD_CLIPRECTS;
sarea_priv->nbox = 0;
return verify_ret;
}
static __inline__ int copy_from_user_blit(u32 *to,
const u32 __user *ufrom,
unsigned long bytes)
{
to = (u32 *)((char *)to + MACH64_HOSTDATA_BLIT_OFFSET);
if (DRM_COPY_FROM_USER(to, ufrom, bytes)) {
return -EFAULT;
}
return 0;
}
static int mach64_dma_dispatch_blit(struct drm_device * dev,
struct drm_file *file_priv,
drm_mach64_blit_t * blit)
{
drm_mach64_private_t *dev_priv = dev->dev_private;
int dword_shift, dwords;
unsigned long used;
struct drm_buf *copy_buf;
int verify_ret = 0;
DMALOCALS;
/* The compiler won't optimize away a division by a variable,
* even if the only legal values are powers of two. Thus, we'll
* use a shift instead.
*/
switch (blit->format) {
case MACH64_DATATYPE_ARGB8888:
dword_shift = 0;
break;
case MACH64_DATATYPE_ARGB1555:
case MACH64_DATATYPE_RGB565:
case MACH64_DATATYPE_VYUY422:
case MACH64_DATATYPE_YVYU422:
case MACH64_DATATYPE_ARGB4444:
dword_shift = 1;
break;
case MACH64_DATATYPE_CI8:
case MACH64_DATATYPE_RGB8:
dword_shift = 2;
break;
default:
DRM_ERROR("invalid blit format %d\n", blit->format);
return -EINVAL;
}
/* Set buf->used to the bytes of blit data based on the blit dimensions
* and verify the size. When the setup is emitted to the buffer with
* the DMA* macros below, buf->used is incremented to include the bytes
* used for setup as well as the blit data.
*/
dwords = (blit->width * blit->height) >> dword_shift;
used = dwords << 2;
if (used <= 0 ||
used > MACH64_BUFFER_SIZE - MACH64_HOSTDATA_BLIT_OFFSET) {
DRM_ERROR("Invalid blit size: %lu bytes\n", used);
return -EINVAL;
}
copy_buf = mach64_freelist_get(dev_priv);
if (copy_buf == NULL) {
DRM_ERROR("couldn't get buffer\n");
return -EAGAIN;
}
/* Copy the blit data from userspace.
*
* XXX: This is overkill. The most efficient solution would be having
* two sets of buffers (one set private for vertex data, the other set
* client-writable for blits). However that would bring more complexity
* and would break backward compatibility. The solution currently
* implemented is keeping all buffers private, allowing to secure the
* driver, without increasing complexity at the expense of some speed
* transferring data.
*/
verify_ret = copy_from_user_blit(GETBUFPTR(copy_buf), blit->buf, used);
if (verify_ret != 0) {
mach64_freelist_put(dev_priv, copy_buf);
goto _blit_done;
}
copy_buf->used = used;
/* FIXME: Use a last buffer flag and reduce the state emitted for subsequent,
* continuation buffers?
*/
/* Blit via BM_HOSTDATA (gui-master) - like HOST_DATA[0-15], but doesn't require
* a register command every 16 dwords. State setup is added at the start of the
* buffer -- the client leaves space for this based on MACH64_HOSTDATA_BLIT_OFFSET
*/
DMASETPTR(copy_buf);
DMAOUTREG(MACH64_Z_CNTL, 0);
DMAOUTREG(MACH64_SCALE_3D_CNTL, 0);
DMAOUTREG(MACH64_SC_LEFT_RIGHT, 0 | (8191 << 16)); /* no scissor */
DMAOUTREG(MACH64_SC_TOP_BOTTOM, 0 | (16383 << 16));
DMAOUTREG(MACH64_CLR_CMP_CNTL, 0); /* disable */
DMAOUTREG(MACH64_GUI_TRAJ_CNTL,
MACH64_DST_X_LEFT_TO_RIGHT | MACH64_DST_Y_TOP_TO_BOTTOM);
DMAOUTREG(MACH64_DP_PIX_WIDTH, (blit->format << 0) /* dst pix width */
|(blit->format << 4) /* composite pix width */
|(blit->format << 8) /* src pix width */
|(blit->format << 16) /* host data pix width */
|(blit->format << 28) /* scaler/3D pix width */
);
DMAOUTREG(MACH64_DP_WRITE_MASK, 0xffffffff); /* enable all planes */
DMAOUTREG(MACH64_DP_MIX, MACH64_BKGD_MIX_D | MACH64_FRGD_MIX_S);
DMAOUTREG(MACH64_DP_SRC,
MACH64_BKGD_SRC_BKGD_CLR
| MACH64_FRGD_SRC_HOST | MACH64_MONO_SRC_ONE);
DMAOUTREG(MACH64_DST_OFF_PITCH,
(blit->pitch << 22) | (blit->offset >> 3));
DMAOUTREG(MACH64_DST_X_Y, (blit->y << 16) | blit->x);
DMAOUTREG(MACH64_DST_WIDTH_HEIGHT, (blit->height << 16) | blit->width);
DRM_DEBUG("%lu bytes\n", used);
/* Add the buffer to the queue */
DMAADVANCEHOSTDATA(dev_priv);
_blit_done:
return verify_ret;
}
/* ================================================================
* IOCTL functions
*/
int mach64_dma_clear(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_mach64_private_t *dev_priv = dev->dev_private;
drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mach64_clear_t *clear = data;
int ret;
DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (sarea_priv->nbox > MACH64_NR_SAREA_CLIPRECTS)
sarea_priv->nbox = MACH64_NR_SAREA_CLIPRECTS;
ret = mach64_dma_dispatch_clear(dev, file_priv, clear->flags,
clear->x, clear->y, clear->w, clear->h,
clear->clear_color,
clear->clear_depth);
/* Make sure we restore the 3D state next time.
*/
sarea_priv->dirty |= (MACH64_UPLOAD_CONTEXT | MACH64_UPLOAD_MISC);
return ret;
}
int mach64_dma_swap(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_mach64_private_t *dev_priv = dev->dev_private;
drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
int ret;
DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (sarea_priv->nbox > MACH64_NR_SAREA_CLIPRECTS)
sarea_priv->nbox = MACH64_NR_SAREA_CLIPRECTS;
ret = mach64_dma_dispatch_swap(dev, file_priv);
/* Make sure we restore the 3D state next time.
*/
sarea_priv->dirty |= (MACH64_UPLOAD_CONTEXT | MACH64_UPLOAD_MISC);
return ret;
}
int mach64_dma_vertex(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_mach64_private_t *dev_priv = dev->dev_private;
drm_mach64_sarea_t *sarea_priv;
drm_mach64_vertex_t *vertex = data;
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (!dev_priv) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
sarea_priv = dev_priv->sarea_priv;
DRM_DEBUG("pid=%d buf=%p used=%lu discard=%d\n",
DRM_CURRENTPID,
vertex->buf, vertex->used, vertex->discard);
if (vertex->prim < 0 || vertex->prim > MACH64_PRIM_POLYGON) {
DRM_ERROR("buffer prim %d\n", vertex->prim);
return -EINVAL;
}
if (vertex->used > MACH64_BUFFER_SIZE || (vertex->used & 3) != 0) {
DRM_ERROR("Invalid vertex buffer size: %lu bytes\n",
vertex->used);
return -EINVAL;
}
if (sarea_priv->nbox > MACH64_NR_SAREA_CLIPRECTS)
sarea_priv->nbox = MACH64_NR_SAREA_CLIPRECTS;
return mach64_dma_dispatch_vertex(dev, file_priv, vertex);
}
int mach64_dma_blit(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_mach64_private_t *dev_priv = dev->dev_private;
drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mach64_blit_t *blit = data;
int ret;
LOCK_TEST_WITH_RETURN(dev, file_priv);
ret = mach64_dma_dispatch_blit(dev, file_priv, blit);
/* Make sure we restore the 3D state next time.
*/
sarea_priv->dirty |= (MACH64_UPLOAD_CONTEXT |
MACH64_UPLOAD_MISC | MACH64_UPLOAD_CLIPRECTS);
return ret;
}
int mach64_get_param(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_mach64_private_t *dev_priv = dev->dev_private;
drm_mach64_getparam_t *param = data;
int value;
DRM_DEBUG("\n");
if (!dev_priv) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
switch (param->param) {
case MACH64_PARAM_FRAMES_QUEUED:
/* Needs lock since it calls mach64_ring_tick() */
LOCK_TEST_WITH_RETURN(dev, file_priv);
value = mach64_do_get_frames_queued(dev_priv);
break;
case MACH64_PARAM_IRQ_NR:
value = dev->irq;
break;
default:
return -EINVAL;
}
if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
DRM_ERROR("copy_to_user\n");
return -EFAULT;
}
return 0;
}

File diff suppressed because it is too large Load Diff

View File

@ -1,428 +0,0 @@
/* mga_drm.h -- Public header for the Matrox g200/g400 driver -*- linux-c -*-
* Created: Tue Jan 25 01:50:01 1999 by jhartmann@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Jeff Hartmann <jhartmann@valinux.com>
* Keith Whitwell <keith@tungstengraphics.com>
*
* Rewritten by:
* Gareth Hughes <gareth@valinux.com>
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#ifndef __MGA_DRM_H__
#define __MGA_DRM_H__
/* WARNING: If you change any of these defines, make sure to change the
* defines in the Xserver file (mga_sarea.h)
*/
#ifndef __MGA_SAREA_DEFINES__
#define __MGA_SAREA_DEFINES__
/* WARP pipe flags
*/
#define MGA_F 0x1 /* fog */
#define MGA_A 0x2 /* alpha */
#define MGA_S 0x4 /* specular */
#define MGA_T2 0x8 /* multitexture */
#define MGA_WARP_TGZ 0
#define MGA_WARP_TGZF (MGA_F)
#define MGA_WARP_TGZA (MGA_A)
#define MGA_WARP_TGZAF (MGA_F|MGA_A)
#define MGA_WARP_TGZS (MGA_S)
#define MGA_WARP_TGZSF (MGA_S|MGA_F)
#define MGA_WARP_TGZSA (MGA_S|MGA_A)
#define MGA_WARP_TGZSAF (MGA_S|MGA_F|MGA_A)
#define MGA_WARP_T2GZ (MGA_T2)
#define MGA_WARP_T2GZF (MGA_T2|MGA_F)
#define MGA_WARP_T2GZA (MGA_T2|MGA_A)
#define MGA_WARP_T2GZAF (MGA_T2|MGA_A|MGA_F)
#define MGA_WARP_T2GZS (MGA_T2|MGA_S)
#define MGA_WARP_T2GZSF (MGA_T2|MGA_S|MGA_F)
#define MGA_WARP_T2GZSA (MGA_T2|MGA_S|MGA_A)
#define MGA_WARP_T2GZSAF (MGA_T2|MGA_S|MGA_F|MGA_A)
#define MGA_MAX_G200_PIPES 8 /* no multitex */
#define MGA_MAX_G400_PIPES 16
#define MGA_MAX_WARP_PIPES MGA_MAX_G400_PIPES
#define MGA_WARP_UCODE_SIZE 32768 /* in bytes */
#define MGA_CARD_TYPE_G200 1
#define MGA_CARD_TYPE_G400 2
#define MGA_CARD_TYPE_G450 3 /* not currently used */
#define MGA_CARD_TYPE_G550 4
#define MGA_FRONT 0x1
#define MGA_BACK 0x2
#define MGA_DEPTH 0x4
/* What needs to be changed for the current vertex dma buffer?
*/
#define MGA_UPLOAD_CONTEXT 0x1
#define MGA_UPLOAD_TEX0 0x2
#define MGA_UPLOAD_TEX1 0x4
#define MGA_UPLOAD_PIPE 0x8
#define MGA_UPLOAD_TEX0IMAGE 0x10 /* handled client-side */
#define MGA_UPLOAD_TEX1IMAGE 0x20 /* handled client-side */
#define MGA_UPLOAD_2D 0x40
#define MGA_WAIT_AGE 0x80 /* handled client-side */
#define MGA_UPLOAD_CLIPRECTS 0x100 /* handled client-side */
#if 0
#define MGA_DMA_FLUSH 0x200 /* set when someone gets the lock
quiescent */
#endif
/* 32 buffers of 64k each, total 2 meg.
*/
#define MGA_BUFFER_SIZE (1 << 16)
#define MGA_NUM_BUFFERS 128
/* Keep these small for testing.
*/
#define MGA_NR_SAREA_CLIPRECTS 8
/* 2 heaps (1 for card, 1 for agp), each divided into up to 128
* regions, subject to a minimum region size of (1<<16) == 64k.
*
* Clients may subdivide regions internally, but when sharing between
* clients, the region size is the minimum granularity.
*/
#define MGA_CARD_HEAP 0
#define MGA_AGP_HEAP 1
#define MGA_NR_TEX_HEAPS 2
#define MGA_NR_TEX_REGIONS 16
#define MGA_LOG_MIN_TEX_REGION_SIZE 16
#define DRM_MGA_IDLE_RETRY 2048
#endif /* __MGA_SAREA_DEFINES__ */
/* Setup registers for 3D context
*/
typedef struct {
unsigned int dstorg;
unsigned int maccess;
unsigned int plnwt;
unsigned int dwgctl;
unsigned int alphactrl;
unsigned int fogcolor;
unsigned int wflag;
unsigned int tdualstage0;
unsigned int tdualstage1;
unsigned int fcol;
unsigned int stencil;
unsigned int stencilctl;
} drm_mga_context_regs_t;
/* Setup registers for 2D, X server
*/
typedef struct {
unsigned int pitch;
} drm_mga_server_regs_t;
/* Setup registers for each texture unit
*/
typedef struct {
unsigned int texctl;
unsigned int texctl2;
unsigned int texfilter;
unsigned int texbordercol;
unsigned int texorg;
unsigned int texwidth;
unsigned int texheight;
unsigned int texorg1;
unsigned int texorg2;
unsigned int texorg3;
unsigned int texorg4;
} drm_mga_texture_regs_t;
/* General aging mechanism
*/
typedef struct {
unsigned int head; /* Position of head pointer */
unsigned int wrap; /* Primary DMA wrap count */
} drm_mga_age_t;
typedef struct _drm_mga_sarea {
/* The channel for communication of state information to the kernel
* on firing a vertex dma buffer.
*/
drm_mga_context_regs_t context_state;
drm_mga_server_regs_t server_state;
drm_mga_texture_regs_t tex_state[2];
unsigned int warp_pipe;
unsigned int dirty;
unsigned int vertsize;
/* The current cliprects, or a subset thereof.
*/
struct drm_clip_rect boxes[MGA_NR_SAREA_CLIPRECTS];
unsigned int nbox;
/* Information about the most recently used 3d drawable. The
* client fills in the req_* fields, the server fills in the
* exported_ fields and puts the cliprects into boxes, above.
*
* The client clears the exported_drawable field before
* clobbering the boxes data.
*/
unsigned int req_drawable; /* the X drawable id */
unsigned int req_draw_buffer; /* MGA_FRONT or MGA_BACK */
unsigned int exported_drawable;
unsigned int exported_index;
unsigned int exported_stamp;
unsigned int exported_buffers;
unsigned int exported_nfront;
unsigned int exported_nback;
int exported_back_x, exported_front_x, exported_w;
int exported_back_y, exported_front_y, exported_h;
struct drm_clip_rect exported_boxes[MGA_NR_SAREA_CLIPRECTS];
/* Counters for aging textures and for client-side throttling.
*/
unsigned int status[4];
unsigned int last_wrap;
drm_mga_age_t last_frame;
unsigned int last_enqueue; /* last time a buffer was enqueued */
unsigned int last_dispatch; /* age of the most recently dispatched buffer */
unsigned int last_quiescent; /* */
/* LRU lists for texture memory in agp space and on the card.
*/
struct drm_tex_region texList[MGA_NR_TEX_HEAPS][MGA_NR_TEX_REGIONS + 1];
unsigned int texAge[MGA_NR_TEX_HEAPS];
/* Mechanism to validate card state.
*/
int ctxOwner;
} drm_mga_sarea_t;
/* MGA specific ioctls
* The device specific ioctl range is 0x40 to 0x79.
*/
#define DRM_MGA_INIT 0x00
#define DRM_MGA_FLUSH 0x01
#define DRM_MGA_RESET 0x02
#define DRM_MGA_SWAP 0x03
#define DRM_MGA_CLEAR 0x04
#define DRM_MGA_VERTEX 0x05
#define DRM_MGA_INDICES 0x06
#define DRM_MGA_ILOAD 0x07
#define DRM_MGA_BLIT 0x08
#define DRM_MGA_GETPARAM 0x09
/* 3.2:
* ioctls for operating on fences.
*/
#define DRM_MGA_SET_FENCE 0x0a
#define DRM_MGA_WAIT_FENCE 0x0b
#define DRM_MGA_DMA_BOOTSTRAP 0x0c
#define DRM_IOCTL_MGA_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INIT, drm_mga_init_t)
#define DRM_IOCTL_MGA_FLUSH DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_FLUSH, drm_lock_t)
#define DRM_IOCTL_MGA_RESET DRM_IO( DRM_COMMAND_BASE + DRM_MGA_RESET)
#define DRM_IOCTL_MGA_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_MGA_SWAP)
#define DRM_IOCTL_MGA_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_CLEAR, drm_mga_clear_t)
#define DRM_IOCTL_MGA_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_VERTEX, drm_mga_vertex_t)
#define DRM_IOCTL_MGA_INDICES DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INDICES, drm_mga_indices_t)
#define DRM_IOCTL_MGA_ILOAD DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_ILOAD, drm_mga_iload_t)
#define DRM_IOCTL_MGA_BLIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_BLIT, drm_mga_blit_t)
#define DRM_IOCTL_MGA_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_GETPARAM, drm_mga_getparam_t)
#define DRM_IOCTL_MGA_SET_FENCE DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_SET_FENCE, uint32_t)
#define DRM_IOCTL_MGA_WAIT_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_WAIT_FENCE, uint32_t)
#define DRM_IOCTL_MGA_DMA_BOOTSTRAP DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_DMA_BOOTSTRAP, drm_mga_dma_bootstrap_t)
typedef struct _drm_mga_warp_index {
int installed;
unsigned long phys_addr;
int size;
} drm_mga_warp_index_t;
typedef struct drm_mga_init {
enum {
MGA_INIT_DMA = 0x01,
MGA_CLEANUP_DMA = 0x02
} func;
unsigned long sarea_priv_offset;
int chipset;
int sgram;
unsigned int maccess;
unsigned int fb_cpp;
unsigned int front_offset, front_pitch;
unsigned int back_offset, back_pitch;
unsigned int depth_cpp;
unsigned int depth_offset, depth_pitch;
unsigned int texture_offset[MGA_NR_TEX_HEAPS];
unsigned int texture_size[MGA_NR_TEX_HEAPS];
unsigned long fb_offset;
unsigned long mmio_offset;
unsigned long status_offset;
unsigned long warp_offset;
unsigned long primary_offset;
unsigned long buffers_offset;
} drm_mga_init_t;
typedef struct drm_mga_dma_bootstrap {
/**
* \name AGP texture region
*
* On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, these fields will
* be filled in with the actual AGP texture settings.
*
* \warning
* If these fields are non-zero, but dma_mga_dma_bootstrap::agp_mode
* is zero, it means that PCI memory (most likely through the use of
* an IOMMU) is being used for "AGP" textures.
*/
/*@{*/
unsigned long texture_handle; /**< Handle used to map AGP textures. */
uint32_t texture_size; /**< Size of the AGP texture region. */
/*@}*/
/**
* Requested size of the primary DMA region.
*
* On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
* filled in with the actual AGP mode. If AGP was not available
*/
uint32_t primary_size;
/**
* Requested number of secondary DMA buffers.
*
* On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
* filled in with the actual number of secondary DMA buffers
* allocated. Particularly when PCI DMA is used, this may be
* (subtantially) less than the number requested.
*/
uint32_t secondary_bin_count;
/**
* Requested size of each secondary DMA buffer.
*
* While the kernel \b is free to reduce
* dma_mga_dma_bootstrap::secondary_bin_count, it is \b not allowed
* to reduce dma_mga_dma_bootstrap::secondary_bin_size.
*/
uint32_t secondary_bin_size;
/**
* Bit-wise mask of AGPSTAT2_* values. Currently only \c AGPSTAT2_1X,
* \c AGPSTAT2_2X, and \c AGPSTAT2_4X are supported. If this value is
* zero, it means that PCI DMA should be used, even if AGP is
* possible.
*
* On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
* filled in with the actual AGP mode. If AGP was not available
* (i.e., PCI DMA was used), this value will be zero.
*/
uint32_t agp_mode;
/**
* Desired AGP GART size, measured in megabytes.
*/
uint8_t agp_size;
} drm_mga_dma_bootstrap_t;
typedef struct drm_mga_clear {
unsigned int flags;
unsigned int clear_color;
unsigned int clear_depth;
unsigned int color_mask;
unsigned int depth_mask;
} drm_mga_clear_t;
typedef struct drm_mga_vertex {
int idx; /* buffer to queue */
int used; /* bytes in use */
int discard; /* client finished with buffer? */
} drm_mga_vertex_t;
typedef struct drm_mga_indices {
int idx; /* buffer to queue */
unsigned int start;
unsigned int end;
int discard; /* client finished with buffer? */
} drm_mga_indices_t;
typedef struct drm_mga_iload {
int idx;
unsigned int dstorg;
unsigned int length;
} drm_mga_iload_t;
typedef struct _drm_mga_blit {
unsigned int planemask;
unsigned int srcorg;
unsigned int dstorg;
int src_pitch, dst_pitch;
int delta_sx, delta_sy;
int delta_dx, delta_dy;
int height, ydir; /* flip image vertically */
int source_pitch, dest_pitch;
} drm_mga_blit_t;
/* 3.1: An ioctl to get parameters that aren't available to the 3d
* client any other way.
*/
#define MGA_PARAM_IRQ_NR 1
/* 3.2: Query the actual card type. The DDX only distinguishes between
* G200 chips and non-G200 chips, which it calls G400. It turns out that
* there are some very sublte differences between the G4x0 chips and the G550
* chips. Using this parameter query, a client-side driver can detect the
* difference between a G4x0 and a G550.
*/
#define MGA_PARAM_CARD_TYPE 2
typedef struct drm_mga_getparam {
int param;
void __user *value;
} drm_mga_getparam_t;
#endif

View File

@ -1,167 +0,0 @@
/* mga_drv.c -- Matrox G200/G400 driver -*- linux-c -*-
* Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com
*/
/*-
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Gareth Hughes <gareth@valinux.com>
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "dev/drm/drmP.h"
#include "dev/drm/drm.h"
#include "dev/drm/mga_drm.h"
#include "dev/drm/mga_drv.h"
#include "dev/drm/drm_pciids.h"
/* drv_PCI_IDs comes from drm_pciids.h, generated from drm_pciids.txt. */
static drm_pci_id_list_t mga_pciidlist[] = {
mga_PCI_IDS
};
/**
* Determine if the device really is AGP or not.
*
* In addition to the usual tests performed by \c drm_device_is_agp, this
* function detects PCI G450 cards that appear to the system exactly like
* AGP G450 cards.
*
* \param dev The device to be tested.
*
* \returns
* If the device is a PCI G450, zero is returned. Otherwise non-zero is
* returned.
*
* \bug
* This function needs to be filled in! The implementation in
* linux-core/mga_drv.c shows what needs to be done.
*/
static int mga_driver_device_is_agp(struct drm_device * dev)
{
device_t bus;
/* There are PCI versions of the G450. These cards have the
* same PCI ID as the AGP G450, but have an additional PCI-to-PCI
* bridge chip. We detect these cards, which are not currently
* supported by this driver, by looking at the device ID of the
* bus the "card" is on. If vendor is 0x3388 (Hint Corp) and the
* device is 0x0021 (HB6 Universal PCI-PCI bridge), we reject the
* device.
*/
bus = device_get_parent(device_get_parent(dev->device));
if (pci_get_device(dev->device) == 0x0525 &&
pci_get_vendor(bus) == 0x3388 &&
pci_get_device(bus) == 0x0021)
return DRM_IS_NOT_AGP;
else
return DRM_MIGHT_BE_AGP;
}
static void mga_configure(struct drm_device *dev)
{
dev->driver->driver_features =
DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ;
dev->driver->buf_priv_size = sizeof(drm_mga_buf_priv_t);
dev->driver->load = mga_driver_load;
dev->driver->unload = mga_driver_unload;
dev->driver->lastclose = mga_driver_lastclose;
dev->driver->get_vblank_counter = mga_get_vblank_counter;
dev->driver->enable_vblank = mga_enable_vblank;
dev->driver->disable_vblank = mga_disable_vblank;
dev->driver->irq_preinstall = mga_driver_irq_preinstall;
dev->driver->irq_postinstall = mga_driver_irq_postinstall;
dev->driver->irq_uninstall = mga_driver_irq_uninstall;
dev->driver->irq_handler = mga_driver_irq_handler;
dev->driver->dma_ioctl = mga_dma_buffers;
dev->driver->dma_quiescent = mga_driver_dma_quiescent;
dev->driver->device_is_agp = mga_driver_device_is_agp;
dev->driver->ioctls = mga_ioctls;
dev->driver->max_ioctl = mga_max_ioctl;
dev->driver->name = DRIVER_NAME;
dev->driver->desc = DRIVER_DESC;
dev->driver->date = DRIVER_DATE;
dev->driver->major = DRIVER_MAJOR;
dev->driver->minor = DRIVER_MINOR;
dev->driver->patchlevel = DRIVER_PATCHLEVEL;
}
static int
mga_probe(device_t kdev)
{
return drm_probe(kdev, mga_pciidlist);
}
static int
mga_attach(device_t kdev)
{
struct drm_device *dev = device_get_softc(kdev);
dev->driver = malloc(sizeof(struct drm_driver_info), DRM_MEM_DRIVER,
M_WAITOK | M_ZERO);
mga_configure(dev);
return drm_attach(kdev, mga_pciidlist);
}
static int
mga_detach(device_t kdev)
{
struct drm_device *dev = device_get_softc(kdev);
int ret;
ret = drm_detach(kdev);
free(dev->driver, DRM_MEM_DRIVER);
return ret;
}
static device_method_t mga_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, mga_probe),
DEVMETHOD(device_attach, mga_attach),
DEVMETHOD(device_detach, mga_detach),
{ 0, 0 }
};
static driver_t mga_driver = {
"drm",
mga_methods,
sizeof(struct drm_device)
};
extern devclass_t drm_devclass;
DRIVER_MODULE(mga, vgapci, mga_driver, drm_devclass, 0, 0);
MODULE_DEPEND(mga, drm, 1, 1, 1);

View File

@ -1,694 +0,0 @@
/* mga_drv.h -- Private header for the Matrox G200/G400 driver -*- linux-c -*-
* Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Gareth Hughes <gareth@valinux.com>
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#ifndef __MGA_DRV_H__
#define __MGA_DRV_H__
/* General customization:
*/
#define DRIVER_AUTHOR "Gareth Hughes, VA Linux Systems Inc."
#define DRIVER_NAME "mga"
#define DRIVER_DESC "Matrox G200/G400"
#define DRIVER_DATE "20060319"
#define DRIVER_MAJOR 3
#define DRIVER_MINOR 2
#define DRIVER_PATCHLEVEL 2
typedef struct drm_mga_primary_buffer {
u8 *start;
u8 *end;
int size;
u32 tail;
int space;
volatile long wrapped;
volatile u32 *status;
u32 last_flush;
u32 last_wrap;
u32 high_mark;
} drm_mga_primary_buffer_t;
typedef struct drm_mga_freelist {
struct drm_mga_freelist *next;
struct drm_mga_freelist *prev;
drm_mga_age_t age;
struct drm_buf *buf;
} drm_mga_freelist_t;
typedef struct {
drm_mga_freelist_t *list_entry;
int discard;
int dispatched;
} drm_mga_buf_priv_t;
typedef struct drm_mga_private {
drm_mga_primary_buffer_t prim;
drm_mga_sarea_t *sarea_priv;
drm_mga_freelist_t *head;
drm_mga_freelist_t *tail;
unsigned int warp_pipe;
unsigned long warp_pipe_phys[MGA_MAX_WARP_PIPES];
int chipset;
int usec_timeout;
/**
* If set, the new DMA initialization sequence was used. This is
* primarilly used to select how the driver should uninitialized its
* internal DMA structures.
*/
int used_new_dma_init;
/**
* If AGP memory is used for DMA buffers, this will be the value
* \c MGA_PAGPXFER. Otherwise, it will be zero (for a PCI transfer).
*/
u32 dma_access;
/**
* If AGP memory is used for DMA buffers, this will be the value
* \c MGA_WAGP_ENABLE. Otherwise, it will be zero (for a PCI
* transfer).
*/
u32 wagp_enable;
/**
* \name MMIO region parameters.
*
* \sa drm_mga_private_t::mmio
*/
/*@{*/
u32 mmio_base; /**< Bus address of base of MMIO. */
u32 mmio_size; /**< Size of the MMIO region. */
/*@}*/
u32 clear_cmd;
u32 maccess;
atomic_t vbl_received; /**< Number of vblanks received. */
wait_queue_head_t fence_queue;
atomic_t last_fence_retired;
u32 next_fence_to_post;
unsigned int fb_cpp;
unsigned int front_offset;
unsigned int front_pitch;
unsigned int back_offset;
unsigned int back_pitch;
unsigned int depth_cpp;
unsigned int depth_offset;
unsigned int depth_pitch;
unsigned int texture_offset;
unsigned int texture_size;
drm_local_map_t *sarea;
drm_local_map_t *mmio;
drm_local_map_t *status;
drm_local_map_t *warp;
drm_local_map_t *primary;
drm_local_map_t *agp_textures;
unsigned long agp_handle;
unsigned int agp_size;
} drm_mga_private_t;
extern struct drm_ioctl_desc mga_ioctls[];
extern int mga_max_ioctl;
/* mga_dma.c */
extern int mga_dma_bootstrap(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int mga_dma_init(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int mga_dma_flush(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int mga_dma_reset(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int mga_dma_buffers(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int mga_driver_load(struct drm_device *dev, unsigned long flags);
extern int mga_driver_unload(struct drm_device * dev);
extern void mga_driver_lastclose(struct drm_device * dev);
extern int mga_driver_dma_quiescent(struct drm_device * dev);
extern int mga_do_wait_for_idle(drm_mga_private_t * dev_priv);
extern void mga_do_dma_flush(drm_mga_private_t * dev_priv);
extern void mga_do_dma_wrap_start(drm_mga_private_t * dev_priv);
extern void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv);
extern int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf);
/* mga_warp.c */
extern unsigned int mga_warp_microcode_size(const drm_mga_private_t * dev_priv);
extern int mga_warp_install_microcode(drm_mga_private_t * dev_priv);
extern int mga_warp_init(drm_mga_private_t * dev_priv);
/* mga_irq.c */
extern int mga_enable_vblank(struct drm_device *dev, int crtc);
extern void mga_disable_vblank(struct drm_device *dev, int crtc);
extern u32 mga_get_vblank_counter(struct drm_device *dev, int crtc);
extern int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence);
extern int mga_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence);
extern irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS);
extern void mga_driver_irq_preinstall(struct drm_device * dev);
extern int mga_driver_irq_postinstall(struct drm_device * dev);
extern void mga_driver_irq_uninstall(struct drm_device * dev);
extern long mga_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
#define mga_flush_write_combine() DRM_WRITEMEMORYBARRIER()
#if defined(__linux__) && defined(__alpha__)
#define MGA_BASE( reg ) ((unsigned long)(dev_priv->mmio->handle))
#define MGA_ADDR( reg ) (MGA_BASE(reg) + reg)
#define MGA_DEREF( reg ) *(volatile u32 *)MGA_ADDR( reg )
#define MGA_DEREF8( reg ) *(volatile u8 *)MGA_ADDR( reg )
#define MGA_READ( reg ) (_MGA_READ((u32 *)MGA_ADDR(reg)))
#define MGA_READ8( reg ) (_MGA_READ((u8 *)MGA_ADDR(reg)))
#define MGA_WRITE( reg, val ) do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF( reg ) = val; } while (0)
#define MGA_WRITE8( reg, val ) do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF8( reg ) = val; } while (0)
static inline u32 _MGA_READ(u32 * addr)
{
DRM_MEMORYBARRIER();
return *(volatile u32 *)addr;
}
#else
#define MGA_READ8( reg ) DRM_READ8(dev_priv->mmio, (reg))
#define MGA_READ( reg ) DRM_READ32(dev_priv->mmio, (reg))
#define MGA_WRITE8( reg, val ) DRM_WRITE8(dev_priv->mmio, (reg), (val))
#define MGA_WRITE( reg, val ) DRM_WRITE32(dev_priv->mmio, (reg), (val))
#endif
#define DWGREG0 0x1c00
#define DWGREG0_END 0x1dff
#define DWGREG1 0x2c00
#define DWGREG1_END 0x2dff
#define ISREG0(r) (r >= DWGREG0 && r <= DWGREG0_END)
#define DMAREG0(r) (u8)((r - DWGREG0) >> 2)
#define DMAREG1(r) (u8)(((r - DWGREG1) >> 2) | 0x80)
#define DMAREG(r) (ISREG0(r) ? DMAREG0(r) : DMAREG1(r))
/* ================================================================
* Helper macross...
*/
#define MGA_EMIT_STATE( dev_priv, dirty ) \
do { \
if ( (dirty) & ~MGA_UPLOAD_CLIPRECTS ) { \
if ( dev_priv->chipset >= MGA_CARD_TYPE_G400 ) { \
mga_g400_emit_state( dev_priv ); \
} else { \
mga_g200_emit_state( dev_priv ); \
} \
} \
} while (0)
#define WRAP_TEST_WITH_RETURN( dev_priv ) \
do { \
if ( test_bit( 0, &dev_priv->prim.wrapped ) ) { \
if ( mga_is_idle( dev_priv ) ) { \
mga_do_dma_wrap_end( dev_priv ); \
} else if ( dev_priv->prim.space < \
dev_priv->prim.high_mark ) { \
if ( MGA_DMA_DEBUG ) \
DRM_INFO( "wrap...\n"); \
return -EBUSY; \
} \
} \
} while (0)
#define WRAP_WAIT_WITH_RETURN( dev_priv ) \
do { \
if ( test_bit( 0, &dev_priv->prim.wrapped ) ) { \
if ( mga_do_wait_for_idle( dev_priv ) < 0 ) { \
if ( MGA_DMA_DEBUG ) \
DRM_INFO( "wrap...\n"); \
return -EBUSY; \
} \
mga_do_dma_wrap_end( dev_priv ); \
} \
} while (0)
/* ================================================================
* Primary DMA command stream
*/
#define MGA_VERBOSE 0
#define DMA_LOCALS unsigned int write; volatile u8 *prim;
#define DMA_BLOCK_SIZE (5 * sizeof(u32))
#define BEGIN_DMA( n ) \
do { \
if ( MGA_VERBOSE ) { \
DRM_INFO( "BEGIN_DMA( %d )\n", (n) ); \
DRM_INFO( " space=0x%x req=0x%zx\n", \
dev_priv->prim.space, (n) * DMA_BLOCK_SIZE ); \
} \
prim = dev_priv->prim.start; \
write = dev_priv->prim.tail; \
} while (0)
#define BEGIN_DMA_WRAP() \
do { \
if ( MGA_VERBOSE ) { \
DRM_INFO( "BEGIN_DMA()\n" ); \
DRM_INFO( " space=0x%x\n", dev_priv->prim.space ); \
} \
prim = dev_priv->prim.start; \
write = dev_priv->prim.tail; \
} while (0)
#define ADVANCE_DMA() \
do { \
dev_priv->prim.tail = write; \
if ( MGA_VERBOSE ) { \
DRM_INFO( "ADVANCE_DMA() tail=0x%05x sp=0x%x\n", \
write, dev_priv->prim.space ); \
} \
} while (0)
#define FLUSH_DMA() \
do { \
if ( 0 ) { \
DRM_INFO( "\n" ); \
DRM_INFO( " tail=0x%06x head=0x%06lx\n", \
dev_priv->prim.tail, \
MGA_READ( MGA_PRIMADDRESS ) - \
dev_priv->primary->offset ); \
} \
if ( !test_bit( 0, &dev_priv->prim.wrapped ) ) { \
if ( dev_priv->prim.space < \
dev_priv->prim.high_mark ) { \
mga_do_dma_wrap_start( dev_priv ); \
} else { \
mga_do_dma_flush( dev_priv ); \
} \
} \
} while (0)
/* Never use this, always use DMA_BLOCK(...) for primary DMA output.
*/
#define DMA_WRITE( offset, val ) \
do { \
if ( MGA_VERBOSE ) { \
DRM_INFO( " DMA_WRITE( 0x%08x ) at 0x%04zx\n", \
(u32)(val), write + (offset) * sizeof(u32) ); \
} \
*(volatile u32 *)(prim + write + (offset) * sizeof(u32)) = val; \
} while (0)
#define DMA_BLOCK( reg0, val0, reg1, val1, reg2, val2, reg3, val3 ) \
do { \
DMA_WRITE( 0, ((DMAREG( reg0 ) << 0) | \
(DMAREG( reg1 ) << 8) | \
(DMAREG( reg2 ) << 16) | \
(DMAREG( reg3 ) << 24)) ); \
DMA_WRITE( 1, val0 ); \
DMA_WRITE( 2, val1 ); \
DMA_WRITE( 3, val2 ); \
DMA_WRITE( 4, val3 ); \
write += DMA_BLOCK_SIZE; \
} while (0)
/* Buffer aging via primary DMA stream head pointer.
*/
#define SET_AGE( age, h, w ) \
do { \
(age)->head = h; \
(age)->wrap = w; \
} while (0)
#define TEST_AGE( age, h, w ) ( (age)->wrap < w || \
( (age)->wrap == w && \
(age)->head < h ) )
#define AGE_BUFFER( buf_priv ) \
do { \
drm_mga_freelist_t *entry = (buf_priv)->list_entry; \
if ( (buf_priv)->dispatched ) { \
entry->age.head = (dev_priv->prim.tail + \
dev_priv->primary->offset); \
entry->age.wrap = dev_priv->sarea_priv->last_wrap; \
} else { \
entry->age.head = 0; \
entry->age.wrap = 0; \
} \
} while (0)
#define MGA_ENGINE_IDLE_MASK (MGA_SOFTRAPEN | \
MGA_DWGENGSTS | \
MGA_ENDPRDMASTS)
#define MGA_DMA_IDLE_MASK (MGA_SOFTRAPEN | \
MGA_ENDPRDMASTS)
#define MGA_DMA_DEBUG 0
/* A reduced set of the mga registers.
*/
#define MGA_CRTC_INDEX 0x1fd4
#define MGA_CRTC_DATA 0x1fd5
/* CRTC11 */
#define MGA_VINTCLR (1 << 4)
#define MGA_VINTEN (1 << 5)
#define MGA_ALPHACTRL 0x2c7c
#define MGA_AR0 0x1c60
#define MGA_AR1 0x1c64
#define MGA_AR2 0x1c68
#define MGA_AR3 0x1c6c
#define MGA_AR4 0x1c70
#define MGA_AR5 0x1c74
#define MGA_AR6 0x1c78
#define MGA_CXBNDRY 0x1c80
#define MGA_CXLEFT 0x1ca0
#define MGA_CXRIGHT 0x1ca4
#define MGA_DMAPAD 0x1c54
#define MGA_DSTORG 0x2cb8
#define MGA_DWGCTL 0x1c00
# define MGA_OPCOD_MASK (15 << 0)
# define MGA_OPCOD_TRAP (4 << 0)
# define MGA_OPCOD_TEXTURE_TRAP (6 << 0)
# define MGA_OPCOD_BITBLT (8 << 0)
# define MGA_OPCOD_ILOAD (9 << 0)
# define MGA_ATYPE_MASK (7 << 4)
# define MGA_ATYPE_RPL (0 << 4)
# define MGA_ATYPE_RSTR (1 << 4)
# define MGA_ATYPE_ZI (3 << 4)
# define MGA_ATYPE_BLK (4 << 4)
# define MGA_ATYPE_I (7 << 4)
# define MGA_LINEAR (1 << 7)
# define MGA_ZMODE_MASK (7 << 8)
# define MGA_ZMODE_NOZCMP (0 << 8)
# define MGA_ZMODE_ZE (2 << 8)
# define MGA_ZMODE_ZNE (3 << 8)
# define MGA_ZMODE_ZLT (4 << 8)
# define MGA_ZMODE_ZLTE (5 << 8)
# define MGA_ZMODE_ZGT (6 << 8)
# define MGA_ZMODE_ZGTE (7 << 8)
# define MGA_SOLID (1 << 11)
# define MGA_ARZERO (1 << 12)
# define MGA_SGNZERO (1 << 13)
# define MGA_SHIFTZERO (1 << 14)
# define MGA_BOP_MASK (15 << 16)
# define MGA_BOP_ZERO (0 << 16)
# define MGA_BOP_DST (10 << 16)
# define MGA_BOP_SRC (12 << 16)
# define MGA_BOP_ONE (15 << 16)
# define MGA_TRANS_SHIFT 20
# define MGA_TRANS_MASK (15 << 20)
# define MGA_BLTMOD_MASK (15 << 25)
# define MGA_BLTMOD_BMONOLEF (0 << 25)
# define MGA_BLTMOD_BMONOWF (4 << 25)
# define MGA_BLTMOD_PLAN (1 << 25)
# define MGA_BLTMOD_BFCOL (2 << 25)
# define MGA_BLTMOD_BU32BGR (3 << 25)
# define MGA_BLTMOD_BU32RGB (7 << 25)
# define MGA_BLTMOD_BU24BGR (11 << 25)
# define MGA_BLTMOD_BU24RGB (15 << 25)
# define MGA_PATTERN (1 << 29)
# define MGA_TRANSC (1 << 30)
# define MGA_CLIPDIS (1U << 31)
#define MGA_DWGSYNC 0x2c4c
#define MGA_FCOL 0x1c24
#define MGA_FIFOSTATUS 0x1e10
#define MGA_FOGCOL 0x1cf4
#define MGA_FXBNDRY 0x1c84
#define MGA_FXLEFT 0x1ca8
#define MGA_FXRIGHT 0x1cac
#define MGA_ICLEAR 0x1e18
# define MGA_SOFTRAPICLR (1 << 0)
# define MGA_VLINEICLR (1 << 5)
#define MGA_IEN 0x1e1c
# define MGA_SOFTRAPIEN (1 << 0)
# define MGA_VLINEIEN (1 << 5)
#define MGA_LEN 0x1c5c
#define MGA_MACCESS 0x1c04
#define MGA_PITCH 0x1c8c
#define MGA_PLNWT 0x1c1c
#define MGA_PRIMADDRESS 0x1e58
# define MGA_DMA_GENERAL (0 << 0)
# define MGA_DMA_BLIT (1 << 0)
# define MGA_DMA_VECTOR (2 << 0)
# define MGA_DMA_VERTEX (3 << 0)
#define MGA_PRIMEND 0x1e5c
# define MGA_PRIMNOSTART (1 << 0)
# define MGA_PAGPXFER (1 << 1)
#define MGA_PRIMPTR 0x1e50
# define MGA_PRIMPTREN0 (1 << 0)
# define MGA_PRIMPTREN1 (1 << 1)
#define MGA_RST 0x1e40
# define MGA_SOFTRESET (1 << 0)
# define MGA_SOFTEXTRST (1 << 1)
#define MGA_SECADDRESS 0x2c40
#define MGA_SECEND 0x2c44
#define MGA_SETUPADDRESS 0x2cd0
#define MGA_SETUPEND 0x2cd4
#define MGA_SGN 0x1c58
#define MGA_SOFTRAP 0x2c48
#define MGA_SRCORG 0x2cb4
# define MGA_SRMMAP_MASK (1 << 0)
# define MGA_SRCMAP_FB (0 << 0)
# define MGA_SRCMAP_SYSMEM (1 << 0)
# define MGA_SRCACC_MASK (1 << 1)
# define MGA_SRCACC_PCI (0 << 1)
# define MGA_SRCACC_AGP (1 << 1)
#define MGA_STATUS 0x1e14
# define MGA_SOFTRAPEN (1 << 0)
# define MGA_VSYNCPEN (1 << 4)
# define MGA_VLINEPEN (1 << 5)
# define MGA_DWGENGSTS (1 << 16)
# define MGA_ENDPRDMASTS (1 << 17)
#define MGA_STENCIL 0x2cc8
#define MGA_STENCILCTL 0x2ccc
#define MGA_TDUALSTAGE0 0x2cf8
#define MGA_TDUALSTAGE1 0x2cfc
#define MGA_TEXBORDERCOL 0x2c5c
#define MGA_TEXCTL 0x2c30
#define MGA_TEXCTL2 0x2c3c
# define MGA_DUALTEX (1 << 7)
# define MGA_G400_TC2_MAGIC (1 << 15)
# define MGA_MAP1_ENABLE (1U << 31)
#define MGA_TEXFILTER 0x2c58
#define MGA_TEXHEIGHT 0x2c2c
#define MGA_TEXORG 0x2c24
# define MGA_TEXORGMAP_MASK (1 << 0)
# define MGA_TEXORGMAP_FB (0 << 0)
# define MGA_TEXORGMAP_SYSMEM (1 << 0)
# define MGA_TEXORGACC_MASK (1 << 1)
# define MGA_TEXORGACC_PCI (0 << 1)
# define MGA_TEXORGACC_AGP (1 << 1)
#define MGA_TEXORG1 0x2ca4
#define MGA_TEXORG2 0x2ca8
#define MGA_TEXORG3 0x2cac
#define MGA_TEXORG4 0x2cb0
#define MGA_TEXTRANS 0x2c34
#define MGA_TEXTRANSHIGH 0x2c38
#define MGA_TEXWIDTH 0x2c28
#define MGA_WACCEPTSEQ 0x1dd4
#define MGA_WCODEADDR 0x1e6c
#define MGA_WFLAG 0x1dc4
#define MGA_WFLAG1 0x1de0
#define MGA_WFLAGNB 0x1e64
#define MGA_WFLAGNB1 0x1e08
#define MGA_WGETMSB 0x1dc8
#define MGA_WIADDR 0x1dc0
#define MGA_WIADDR2 0x1dd8
# define MGA_WMODE_SUSPEND (0 << 0)
# define MGA_WMODE_RESUME (1 << 0)
# define MGA_WMODE_JUMP (2 << 0)
# define MGA_WMODE_START (3 << 0)
# define MGA_WAGP_ENABLE (1 << 2)
#define MGA_WMISC 0x1e70
# define MGA_WUCODECACHE_ENABLE (1 << 0)
# define MGA_WMASTER_ENABLE (1 << 1)
# define MGA_WCACHEFLUSH_ENABLE (1 << 3)
#define MGA_WVRTXSZ 0x1dcc
#define MGA_YBOT 0x1c9c
#define MGA_YDST 0x1c90
#define MGA_YDSTLEN 0x1c88
#define MGA_YDSTORG 0x1c94
#define MGA_YTOP 0x1c98
#define MGA_ZORG 0x1c0c
/* This finishes the current batch of commands
*/
#define MGA_EXEC 0x0100
/* AGP PLL encoding (for G200 only).
*/
#define MGA_AGP_PLL 0x1e4c
# define MGA_AGP2XPLL_DISABLE (0 << 0)
# define MGA_AGP2XPLL_ENABLE (1 << 0)
/* Warp registers
*/
#define MGA_WR0 0x2d00
#define MGA_WR1 0x2d04
#define MGA_WR2 0x2d08
#define MGA_WR3 0x2d0c
#define MGA_WR4 0x2d10
#define MGA_WR5 0x2d14
#define MGA_WR6 0x2d18
#define MGA_WR7 0x2d1c
#define MGA_WR8 0x2d20
#define MGA_WR9 0x2d24
#define MGA_WR10 0x2d28
#define MGA_WR11 0x2d2c
#define MGA_WR12 0x2d30
#define MGA_WR13 0x2d34
#define MGA_WR14 0x2d38
#define MGA_WR15 0x2d3c
#define MGA_WR16 0x2d40
#define MGA_WR17 0x2d44
#define MGA_WR18 0x2d48
#define MGA_WR19 0x2d4c
#define MGA_WR20 0x2d50
#define MGA_WR21 0x2d54
#define MGA_WR22 0x2d58
#define MGA_WR23 0x2d5c
#define MGA_WR24 0x2d60
#define MGA_WR25 0x2d64
#define MGA_WR26 0x2d68
#define MGA_WR27 0x2d6c
#define MGA_WR28 0x2d70
#define MGA_WR29 0x2d74
#define MGA_WR30 0x2d78
#define MGA_WR31 0x2d7c
#define MGA_WR32 0x2d80
#define MGA_WR33 0x2d84
#define MGA_WR34 0x2d88
#define MGA_WR35 0x2d8c
#define MGA_WR36 0x2d90
#define MGA_WR37 0x2d94
#define MGA_WR38 0x2d98
#define MGA_WR39 0x2d9c
#define MGA_WR40 0x2da0
#define MGA_WR41 0x2da4
#define MGA_WR42 0x2da8
#define MGA_WR43 0x2dac
#define MGA_WR44 0x2db0
#define MGA_WR45 0x2db4
#define MGA_WR46 0x2db8
#define MGA_WR47 0x2dbc
#define MGA_WR48 0x2dc0
#define MGA_WR49 0x2dc4
#define MGA_WR50 0x2dc8
#define MGA_WR51 0x2dcc
#define MGA_WR52 0x2dd0
#define MGA_WR53 0x2dd4
#define MGA_WR54 0x2dd8
#define MGA_WR55 0x2ddc
#define MGA_WR56 0x2de0
#define MGA_WR57 0x2de4
#define MGA_WR58 0x2de8
#define MGA_WR59 0x2dec
#define MGA_WR60 0x2df0
#define MGA_WR61 0x2df4
#define MGA_WR62 0x2df8
#define MGA_WR63 0x2dfc
# define MGA_G400_WR_MAGIC (1 << 6)
# define MGA_G400_WR56_MAGIC 0x46480000 /* 12800.0f */
#define MGA_ILOAD_ALIGN 64
#define MGA_ILOAD_MASK (MGA_ILOAD_ALIGN - 1)
#define MGA_DWGCTL_FLUSH (MGA_OPCOD_TEXTURE_TRAP | \
MGA_ATYPE_I | \
MGA_ZMODE_NOZCMP | \
MGA_ARZERO | \
MGA_SGNZERO | \
MGA_BOP_SRC | \
(15 << MGA_TRANS_SHIFT))
#define MGA_DWGCTL_CLEAR (MGA_OPCOD_TRAP | \
MGA_ZMODE_NOZCMP | \
MGA_SOLID | \
MGA_ARZERO | \
MGA_SGNZERO | \
MGA_SHIFTZERO | \
MGA_BOP_SRC | \
(0 << MGA_TRANS_SHIFT) | \
MGA_BLTMOD_BMONOLEF | \
MGA_TRANSC | \
MGA_CLIPDIS)
#define MGA_DWGCTL_COPY (MGA_OPCOD_BITBLT | \
MGA_ATYPE_RPL | \
MGA_SGNZERO | \
MGA_SHIFTZERO | \
MGA_BOP_SRC | \
(0 << MGA_TRANS_SHIFT) | \
MGA_BLTMOD_BFCOL | \
MGA_CLIPDIS)
/* Simple idle test.
*/
static __inline__ int mga_is_idle(drm_mga_private_t * dev_priv)
{
u32 status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK;
return (status == MGA_ENDPRDMASTS);
}
#endif

View File

@ -1,183 +0,0 @@
/* mga_irq.c -- IRQ handling for radeon -*- linux-c -*-
*/
/*-
* Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
*
* The Weather Channel (TM) funded Tungsten Graphics to develop the
* initial release of the Radeon 8500 driver under the XFree86 license.
* This notice must be preserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Keith Whitwell <keith@tungstengraphics.com>
* Eric Anholt <anholt@FreeBSD.org>
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "dev/drm/drmP.h"
#include "dev/drm/drm.h"
#include "dev/drm/mga_drm.h"
#include "dev/drm/mga_drv.h"
u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
{
const drm_mga_private_t *const dev_priv =
(drm_mga_private_t *) dev->dev_private;
if (crtc != 0) {
return 0;
}
return atomic_read(&dev_priv->vbl_received);
}
irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
int status;
int handled = 0;
status = MGA_READ(MGA_STATUS);
/* VBLANK interrupt */
if (status & MGA_VLINEPEN) {
MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
atomic_inc(&dev_priv->vbl_received);
drm_handle_vblank(dev, 0);
handled = 1;
}
/* SOFTRAP interrupt */
if (status & MGA_SOFTRAPEN) {
const u32 prim_start = MGA_READ(MGA_PRIMADDRESS);
const u32 prim_end = MGA_READ(MGA_PRIMEND);
MGA_WRITE(MGA_ICLEAR, MGA_SOFTRAPICLR);
/* In addition to clearing the interrupt-pending bit, we
* have to write to MGA_PRIMEND to re-start the DMA operation.
*/
if ((prim_start & ~0x03) != (prim_end & ~0x03)) {
MGA_WRITE(MGA_PRIMEND, prim_end);
}
atomic_inc(&dev_priv->last_fence_retired);
DRM_WAKEUP(&dev_priv->fence_queue);
handled = 1;
}
if (handled)
return IRQ_HANDLED;
return IRQ_NONE;
}
int mga_enable_vblank(struct drm_device *dev, int crtc)
{
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
if (crtc != 0) {
DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
crtc);
return 0;
}
MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN);
return 0;
}
void mga_disable_vblank(struct drm_device *dev, int crtc)
{
if (crtc != 0) {
DRM_ERROR("tried to disable vblank on non-existent crtc %d\n",
crtc);
}
/* Do *NOT* disable the vertical refresh interrupt. MGA doesn't have
* a nice hardware counter that tracks the number of refreshes when
* the interrupt is disabled, and the kernel doesn't know the refresh
* rate to calculate an estimate.
*/
/* MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN); */
}
int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence)
{
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
unsigned int cur_fence;
int ret = 0;
/* Assume that the user has missed the current sequence number
* by about a day rather than she wants to wait for years
* using fences.
*/
DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
(((cur_fence = atomic_read(&dev_priv->last_fence_retired))
- *sequence) <= (1 << 23)));
if (ret == -ERESTART)
DRM_DEBUG("restarting syscall\n");
*sequence = cur_fence;
return ret;
}
void mga_driver_irq_preinstall(struct drm_device * dev)
{
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
/* Disable *all* interrupts */
MGA_WRITE(MGA_IEN, 0);
/* Clear bits if they're already high */
MGA_WRITE(MGA_ICLEAR, ~0);
}
int mga_driver_irq_postinstall(struct drm_device * dev)
{
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
DRM_INIT_WAITQUEUE(&dev_priv->fence_queue);
/* Turn on soft trap interrupt. Vertical blank interrupts are enabled
* in mga_enable_vblank.
*/
MGA_WRITE(MGA_IEN, MGA_SOFTRAPEN);
return 0;
}
void mga_driver_irq_uninstall(struct drm_device * dev)
{
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
if (!dev_priv)
return;
/* Disable *all* interrupts */
MGA_WRITE(MGA_IEN, 0);
dev->irq_enabled = 0;
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,201 +0,0 @@
/* mga_warp.c -- Matrox G200/G400 WARP engine management -*- linux-c -*-
* Created: Thu Jan 11 21:29:32 2001 by gareth@valinux.com
*/
/*-
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Gareth Hughes <gareth@valinux.com>
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "dev/drm/drmP.h"
#include "dev/drm/drm.h"
#include "dev/drm/mga_drm.h"
#include "dev/drm/mga_drv.h"
#include "dev/drm/mga_ucode.h"
#define MGA_WARP_CODE_ALIGN 256 /* in bytes */
#define WARP_UCODE_SIZE( which ) \
((sizeof(which) / MGA_WARP_CODE_ALIGN + 1) * MGA_WARP_CODE_ALIGN)
#define WARP_UCODE_INSTALL( which, where ) \
do { \
DRM_DEBUG( " pcbase = 0x%08lx vcbase = %p\n", pcbase, vcbase );\
dev_priv->warp_pipe_phys[where] = pcbase; \
memcpy( vcbase, which, sizeof(which) ); \
pcbase += WARP_UCODE_SIZE( which ); \
vcbase += WARP_UCODE_SIZE( which ); \
} while (0)
static const unsigned int mga_warp_g400_microcode_size =
(WARP_UCODE_SIZE(warp_g400_tgz) +
WARP_UCODE_SIZE(warp_g400_tgza) +
WARP_UCODE_SIZE(warp_g400_tgzaf) +
WARP_UCODE_SIZE(warp_g400_tgzf) +
WARP_UCODE_SIZE(warp_g400_tgzs) +
WARP_UCODE_SIZE(warp_g400_tgzsa) +
WARP_UCODE_SIZE(warp_g400_tgzsaf) +
WARP_UCODE_SIZE(warp_g400_tgzsf) +
WARP_UCODE_SIZE(warp_g400_t2gz) +
WARP_UCODE_SIZE(warp_g400_t2gza) +
WARP_UCODE_SIZE(warp_g400_t2gzaf) +
WARP_UCODE_SIZE(warp_g400_t2gzf) +
WARP_UCODE_SIZE(warp_g400_t2gzs) +
WARP_UCODE_SIZE(warp_g400_t2gzsa) +
WARP_UCODE_SIZE(warp_g400_t2gzsaf) +
WARP_UCODE_SIZE(warp_g400_t2gzsf));
static const unsigned int mga_warp_g200_microcode_size =
(WARP_UCODE_SIZE(warp_g200_tgz) +
WARP_UCODE_SIZE(warp_g200_tgza) +
WARP_UCODE_SIZE(warp_g200_tgzaf) +
WARP_UCODE_SIZE(warp_g200_tgzf) +
WARP_UCODE_SIZE(warp_g200_tgzs) +
WARP_UCODE_SIZE(warp_g200_tgzsa) +
WARP_UCODE_SIZE(warp_g200_tgzsaf) +
WARP_UCODE_SIZE(warp_g200_tgzsf));
unsigned int mga_warp_microcode_size(const drm_mga_private_t * dev_priv)
{
switch (dev_priv->chipset) {
case MGA_CARD_TYPE_G400:
case MGA_CARD_TYPE_G550:
return PAGE_ALIGN(mga_warp_g400_microcode_size);
case MGA_CARD_TYPE_G200:
return PAGE_ALIGN(mga_warp_g200_microcode_size);
default:
DRM_ERROR("Unknown chipset value: 0x%x\n", dev_priv->chipset);
return 0;
}
}
static int mga_warp_install_g400_microcode(drm_mga_private_t * dev_priv)
{
unsigned char *vcbase = dev_priv->warp->virtual;
unsigned long pcbase = dev_priv->warp->offset;
memset(dev_priv->warp_pipe_phys, 0, sizeof(dev_priv->warp_pipe_phys));
WARP_UCODE_INSTALL(warp_g400_tgz, MGA_WARP_TGZ);
WARP_UCODE_INSTALL(warp_g400_tgzf, MGA_WARP_TGZF);
WARP_UCODE_INSTALL(warp_g400_tgza, MGA_WARP_TGZA);
WARP_UCODE_INSTALL(warp_g400_tgzaf, MGA_WARP_TGZAF);
WARP_UCODE_INSTALL(warp_g400_tgzs, MGA_WARP_TGZS);
WARP_UCODE_INSTALL(warp_g400_tgzsf, MGA_WARP_TGZSF);
WARP_UCODE_INSTALL(warp_g400_tgzsa, MGA_WARP_TGZSA);
WARP_UCODE_INSTALL(warp_g400_tgzsaf, MGA_WARP_TGZSAF);
WARP_UCODE_INSTALL(warp_g400_t2gz, MGA_WARP_T2GZ);
WARP_UCODE_INSTALL(warp_g400_t2gzf, MGA_WARP_T2GZF);
WARP_UCODE_INSTALL(warp_g400_t2gza, MGA_WARP_T2GZA);
WARP_UCODE_INSTALL(warp_g400_t2gzaf, MGA_WARP_T2GZAF);
WARP_UCODE_INSTALL(warp_g400_t2gzs, MGA_WARP_T2GZS);
WARP_UCODE_INSTALL(warp_g400_t2gzsf, MGA_WARP_T2GZSF);
WARP_UCODE_INSTALL(warp_g400_t2gzsa, MGA_WARP_T2GZSA);
WARP_UCODE_INSTALL(warp_g400_t2gzsaf, MGA_WARP_T2GZSAF);
return 0;
}
static int mga_warp_install_g200_microcode(drm_mga_private_t * dev_priv)
{
unsigned char *vcbase = dev_priv->warp->virtual;
unsigned long pcbase = dev_priv->warp->offset;
memset(dev_priv->warp_pipe_phys, 0, sizeof(dev_priv->warp_pipe_phys));
WARP_UCODE_INSTALL(warp_g200_tgz, MGA_WARP_TGZ);
WARP_UCODE_INSTALL(warp_g200_tgzf, MGA_WARP_TGZF);
WARP_UCODE_INSTALL(warp_g200_tgza, MGA_WARP_TGZA);
WARP_UCODE_INSTALL(warp_g200_tgzaf, MGA_WARP_TGZAF);
WARP_UCODE_INSTALL(warp_g200_tgzs, MGA_WARP_TGZS);
WARP_UCODE_INSTALL(warp_g200_tgzsf, MGA_WARP_TGZSF);
WARP_UCODE_INSTALL(warp_g200_tgzsa, MGA_WARP_TGZSA);
WARP_UCODE_INSTALL(warp_g200_tgzsaf, MGA_WARP_TGZSAF);
return 0;
}
int mga_warp_install_microcode(drm_mga_private_t * dev_priv)
{
const unsigned int size = mga_warp_microcode_size(dev_priv);
DRM_DEBUG("MGA ucode size = %d bytes\n", size);
if (size > dev_priv->warp->size) {
DRM_ERROR("microcode too large! (%u > %lu)\n",
size, dev_priv->warp->size);
return -ENOMEM;
}
switch (dev_priv->chipset) {
case MGA_CARD_TYPE_G400:
case MGA_CARD_TYPE_G550:
return mga_warp_install_g400_microcode(dev_priv);
case MGA_CARD_TYPE_G200:
return mga_warp_install_g200_microcode(dev_priv);
default:
return -EINVAL;
}
}
#define WMISC_EXPECTED (MGA_WUCODECACHE_ENABLE | MGA_WMASTER_ENABLE)
int mga_warp_init(drm_mga_private_t * dev_priv)
{
u32 wmisc;
/* FIXME: Get rid of these damned magic numbers...
*/
switch (dev_priv->chipset) {
case MGA_CARD_TYPE_G400:
case MGA_CARD_TYPE_G550:
MGA_WRITE(MGA_WIADDR2, MGA_WMODE_SUSPEND);
MGA_WRITE(MGA_WGETMSB, 0x00000E00);
MGA_WRITE(MGA_WVRTXSZ, 0x00001807);
MGA_WRITE(MGA_WACCEPTSEQ, 0x18000000);
break;
case MGA_CARD_TYPE_G200:
MGA_WRITE(MGA_WIADDR, MGA_WMODE_SUSPEND);
MGA_WRITE(MGA_WGETMSB, 0x1606);
MGA_WRITE(MGA_WVRTXSZ, 7);
break;
default:
return -EINVAL;
}
MGA_WRITE(MGA_WMISC, (MGA_WUCODECACHE_ENABLE |
MGA_WMASTER_ENABLE | MGA_WCACHEFLUSH_ENABLE));
wmisc = MGA_READ(MGA_WMISC);
if (wmisc != WMISC_EXPECTED) {
DRM_ERROR("WARP engine config failed! 0x%x != 0x%x\n",
wmisc, WMISC_EXPECTED);
return -EINVAL;
}
return 0;
}

View File

@ -1,936 +0,0 @@
/* r128_cce.c -- ATI Rage 128 driver -*- linux-c -*-
* Created: Wed Apr 5 19:24:19 2000 by kevin@precisioninsight.com
*/
/*-
* Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Gareth Hughes <gareth@valinux.com>
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "dev/drm/drmP.h"
#include "dev/drm/drm.h"
#include "dev/drm/r128_drm.h"
#include "dev/drm/r128_drv.h"
#define R128_FIFO_DEBUG 0
/* CCE microcode (from ATI) */
static u32 r128_cce_microcode[] = {
0, 276838400, 0, 268449792, 2, 142, 2, 145, 0, 1076765731, 0,
1617039951, 0, 774592877, 0, 1987540286, 0, 2307490946U, 0,
599558925, 0, 589505315, 0, 596487092, 0, 589505315, 1,
11544576, 1, 206848, 1, 311296, 1, 198656, 2, 912273422, 11,
262144, 0, 0, 1, 33559837, 1, 7438, 1, 14809, 1, 6615, 12, 28,
1, 6614, 12, 28, 2, 23, 11, 18874368, 0, 16790922, 1, 409600, 9,
30, 1, 147854772, 16, 420483072, 3, 8192, 0, 10240, 1, 198656,
1, 15630, 1, 51200, 10, 34858, 9, 42, 1, 33559823, 2, 10276, 1,
15717, 1, 15718, 2, 43, 1, 15936948, 1, 570480831, 1, 14715071,
12, 322123831, 1, 33953125, 12, 55, 1, 33559908, 1, 15718, 2,
46, 4, 2099258, 1, 526336, 1, 442623, 4, 4194365, 1, 509952, 1,
459007, 3, 0, 12, 92, 2, 46, 12, 176, 1, 15734, 1, 206848, 1,
18432, 1, 133120, 1, 100670734, 1, 149504, 1, 165888, 1,
15975928, 1, 1048576, 6, 3145806, 1, 15715, 16, 2150645232U, 2,
268449859, 2, 10307, 12, 176, 1, 15734, 1, 15735, 1, 15630, 1,
15631, 1, 5253120, 6, 3145810, 16, 2150645232U, 1, 15864, 2, 82,
1, 343310, 1, 1064207, 2, 3145813, 1, 15728, 1, 7817, 1, 15729,
3, 15730, 12, 92, 2, 98, 1, 16168, 1, 16167, 1, 16002, 1, 16008,
1, 15974, 1, 15975, 1, 15990, 1, 15976, 1, 15977, 1, 15980, 0,
15981, 1, 10240, 1, 5253120, 1, 15720, 1, 198656, 6, 110, 1,
180224, 1, 103824738, 2, 112, 2, 3145839, 0, 536885440, 1,
114880, 14, 125, 12, 206975, 1, 33559995, 12, 198784, 0,
33570236, 1, 15803, 0, 15804, 3, 294912, 1, 294912, 3, 442370,
1, 11544576, 0, 811612160, 1, 12593152, 1, 11536384, 1,
14024704, 7, 310382726, 0, 10240, 1, 14796, 1, 14797, 1, 14793,
1, 14794, 0, 14795, 1, 268679168, 1, 9437184, 1, 268449792, 1,
198656, 1, 9452827, 1, 1075854602, 1, 1075854603, 1, 557056, 1,
114880, 14, 159, 12, 198784, 1, 1109409213, 12, 198783, 1,
1107312059, 12, 198784, 1, 1109409212, 2, 162, 1, 1075854781, 1,
1073757627, 1, 1075854780, 1, 540672, 1, 10485760, 6, 3145894,
16, 274741248, 9, 168, 3, 4194304, 3, 4209949, 0, 0, 0, 256, 14,
174, 1, 114857, 1, 33560007, 12, 176, 0, 10240, 1, 114858, 1,
33560018, 1, 114857, 3, 33560007, 1, 16008, 1, 114874, 1,
33560360, 1, 114875, 1, 33560154, 0, 15963, 0, 256, 0, 4096, 1,
409611, 9, 188, 0, 10240, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
static int R128_READ_PLL(struct drm_device * dev, int addr)
{
drm_r128_private_t *dev_priv = dev->dev_private;
R128_WRITE8(R128_CLOCK_CNTL_INDEX, addr & 0x1f);
return R128_READ(R128_CLOCK_CNTL_DATA);
}
#if R128_FIFO_DEBUG
static void r128_status(drm_r128_private_t * dev_priv)
{
printk("GUI_STAT = 0x%08x\n",
(unsigned int)R128_READ(R128_GUI_STAT));
printk("PM4_STAT = 0x%08x\n",
(unsigned int)R128_READ(R128_PM4_STAT));
printk("PM4_BUFFER_DL_WPTR = 0x%08x\n",
(unsigned int)R128_READ(R128_PM4_BUFFER_DL_WPTR));
printk("PM4_BUFFER_DL_RPTR = 0x%08x\n",
(unsigned int)R128_READ(R128_PM4_BUFFER_DL_RPTR));
printk("PM4_MICRO_CNTL = 0x%08x\n",
(unsigned int)R128_READ(R128_PM4_MICRO_CNTL));
printk("PM4_BUFFER_CNTL = 0x%08x\n",
(unsigned int)R128_READ(R128_PM4_BUFFER_CNTL));
}
#endif
/* ================================================================
* Engine, FIFO control
*/
static int r128_do_pixcache_flush(drm_r128_private_t * dev_priv)
{
u32 tmp;
int i;
tmp = R128_READ(R128_PC_NGUI_CTLSTAT) | R128_PC_FLUSH_ALL;
R128_WRITE(R128_PC_NGUI_CTLSTAT, tmp);
for (i = 0; i < dev_priv->usec_timeout; i++) {
if (!(R128_READ(R128_PC_NGUI_CTLSTAT) & R128_PC_BUSY)) {
return 0;
}
DRM_UDELAY(1);
}
#if R128_FIFO_DEBUG
DRM_ERROR("failed!\n");
#endif
return -EBUSY;
}
static int r128_do_wait_for_fifo(drm_r128_private_t * dev_priv, int entries)
{
int i;
for (i = 0; i < dev_priv->usec_timeout; i++) {
int slots = R128_READ(R128_GUI_STAT) & R128_GUI_FIFOCNT_MASK;
if (slots >= entries)
return 0;
DRM_UDELAY(1);
}
#if R128_FIFO_DEBUG
DRM_ERROR("failed!\n");
#endif
return -EBUSY;
}
static int r128_do_wait_for_idle(drm_r128_private_t * dev_priv)
{
int i, ret;
ret = r128_do_wait_for_fifo(dev_priv, 64);
if (ret)
return ret;
for (i = 0; i < dev_priv->usec_timeout; i++) {
if (!(R128_READ(R128_GUI_STAT) & R128_GUI_ACTIVE)) {
r128_do_pixcache_flush(dev_priv);
return 0;
}
DRM_UDELAY(1);
}
#if R128_FIFO_DEBUG
DRM_ERROR("failed!\n");
#endif
return -EBUSY;
}
/* ================================================================
* CCE control, initialization
*/
/* Load the microcode for the CCE */
static void r128_cce_load_microcode(drm_r128_private_t * dev_priv)
{
int i;
DRM_DEBUG("\n");
r128_do_wait_for_idle(dev_priv);
R128_WRITE(R128_PM4_MICROCODE_ADDR, 0);
for (i = 0; i < 256; i++) {
R128_WRITE(R128_PM4_MICROCODE_DATAH, r128_cce_microcode[i * 2]);
R128_WRITE(R128_PM4_MICROCODE_DATAL,
r128_cce_microcode[i * 2 + 1]);
}
}
/* Flush any pending commands to the CCE. This should only be used just
* prior to a wait for idle, as it informs the engine that the command
* stream is ending.
*/
static void r128_do_cce_flush(drm_r128_private_t * dev_priv)
{
u32 tmp;
tmp = R128_READ(R128_PM4_BUFFER_DL_WPTR) | R128_PM4_BUFFER_DL_DONE;
R128_WRITE(R128_PM4_BUFFER_DL_WPTR, tmp);
}
/* Wait for the CCE to go idle.
*/
int r128_do_cce_idle(drm_r128_private_t * dev_priv)
{
int i;
for (i = 0; i < dev_priv->usec_timeout; i++) {
if (GET_RING_HEAD(dev_priv) == dev_priv->ring.tail) {
int pm4stat = R128_READ(R128_PM4_STAT);
if (((pm4stat & R128_PM4_FIFOCNT_MASK) >=
dev_priv->cce_fifo_size) &&
!(pm4stat & (R128_PM4_BUSY |
R128_PM4_GUI_ACTIVE))) {
return r128_do_pixcache_flush(dev_priv);
}
}
DRM_UDELAY(1);
}
#if R128_FIFO_DEBUG
DRM_ERROR("failed!\n");
r128_status(dev_priv);
#endif
return -EBUSY;
}
/* Start the Concurrent Command Engine.
*/
static void r128_do_cce_start(drm_r128_private_t * dev_priv)
{
r128_do_wait_for_idle(dev_priv);
R128_WRITE(R128_PM4_BUFFER_CNTL,
dev_priv->cce_mode | dev_priv->ring.size_l2qw
| R128_PM4_BUFFER_CNTL_NOUPDATE);
R128_READ(R128_PM4_BUFFER_ADDR); /* as per the sample code */
R128_WRITE(R128_PM4_MICRO_CNTL, R128_PM4_MICRO_FREERUN);
dev_priv->cce_running = 1;
}
/* Reset the Concurrent Command Engine. This will not flush any pending
* commands, so you must wait for the CCE command stream to complete
* before calling this routine.
*/
static void r128_do_cce_reset(drm_r128_private_t * dev_priv)
{
R128_WRITE(R128_PM4_BUFFER_DL_WPTR, 0);
R128_WRITE(R128_PM4_BUFFER_DL_RPTR, 0);
dev_priv->ring.tail = 0;
}
/* Stop the Concurrent Command Engine. This will not flush any pending
* commands, so you must flush the command stream and wait for the CCE
* to go idle before calling this routine.
*/
static void r128_do_cce_stop(drm_r128_private_t * dev_priv)
{
R128_WRITE(R128_PM4_MICRO_CNTL, 0);
R128_WRITE(R128_PM4_BUFFER_CNTL,
R128_PM4_NONPM4 | R128_PM4_BUFFER_CNTL_NOUPDATE);
dev_priv->cce_running = 0;
}
/* Reset the engine. This will stop the CCE if it is running.
*/
static int r128_do_engine_reset(struct drm_device * dev)
{
drm_r128_private_t *dev_priv = dev->dev_private;
u32 clock_cntl_index, mclk_cntl, gen_reset_cntl;
r128_do_pixcache_flush(dev_priv);
clock_cntl_index = R128_READ(R128_CLOCK_CNTL_INDEX);
mclk_cntl = R128_READ_PLL(dev, R128_MCLK_CNTL);
R128_WRITE_PLL(R128_MCLK_CNTL,
mclk_cntl | R128_FORCE_GCP | R128_FORCE_PIPE3D_CP);
gen_reset_cntl = R128_READ(R128_GEN_RESET_CNTL);
/* Taken from the sample code - do not change */
R128_WRITE(R128_GEN_RESET_CNTL, gen_reset_cntl | R128_SOFT_RESET_GUI);
R128_READ(R128_GEN_RESET_CNTL);
R128_WRITE(R128_GEN_RESET_CNTL, gen_reset_cntl & ~R128_SOFT_RESET_GUI);
R128_READ(R128_GEN_RESET_CNTL);
R128_WRITE_PLL(R128_MCLK_CNTL, mclk_cntl);
R128_WRITE(R128_CLOCK_CNTL_INDEX, clock_cntl_index);
R128_WRITE(R128_GEN_RESET_CNTL, gen_reset_cntl);
/* Reset the CCE ring */
r128_do_cce_reset(dev_priv);
/* The CCE is no longer running after an engine reset */
dev_priv->cce_running = 0;
/* Reset any pending vertex, indirect buffers */
r128_freelist_reset(dev);
return 0;
}
static void r128_cce_init_ring_buffer(struct drm_device * dev,
drm_r128_private_t * dev_priv)
{
u32 ring_start;
u32 tmp;
DRM_DEBUG("\n");
/* The manual (p. 2) says this address is in "VM space". This
* means it's an offset from the start of AGP space.
*/
#if __OS_HAS_AGP
if (!dev_priv->is_pci)
ring_start = dev_priv->cce_ring->offset - dev->agp->base;
else
#endif
ring_start = dev_priv->cce_ring->offset - dev->sg->vaddr;
R128_WRITE(R128_PM4_BUFFER_OFFSET, ring_start | R128_AGP_OFFSET);
R128_WRITE(R128_PM4_BUFFER_DL_WPTR, 0);
R128_WRITE(R128_PM4_BUFFER_DL_RPTR, 0);
/* Set watermark control */
R128_WRITE(R128_PM4_BUFFER_WM_CNTL,
((R128_WATERMARK_L / 4) << R128_WMA_SHIFT)
| ((R128_WATERMARK_M / 4) << R128_WMB_SHIFT)
| ((R128_WATERMARK_N / 4) << R128_WMC_SHIFT)
| ((R128_WATERMARK_K / 64) << R128_WB_WM_SHIFT));
/* Force read. Why? Because it's in the examples... */
R128_READ(R128_PM4_BUFFER_ADDR);
/* Turn on bus mastering */
tmp = R128_READ(R128_BUS_CNTL) & ~R128_BUS_MASTER_DIS;
R128_WRITE(R128_BUS_CNTL, tmp);
}
static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
{
drm_r128_private_t *dev_priv;
DRM_DEBUG("\n");
dev_priv = drm_alloc(sizeof(drm_r128_private_t), DRM_MEM_DRIVER);
if (dev_priv == NULL)
return -ENOMEM;
memset(dev_priv, 0, sizeof(drm_r128_private_t));
dev_priv->is_pci = init->is_pci;
if (dev_priv->is_pci && !dev->sg) {
DRM_ERROR("PCI GART memory not allocated!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce(dev);
return -EINVAL;
}
dev_priv->usec_timeout = init->usec_timeout;
if (dev_priv->usec_timeout < 1 ||
dev_priv->usec_timeout > R128_MAX_USEC_TIMEOUT) {
DRM_DEBUG("TIMEOUT problem!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce(dev);
return -EINVAL;
}
dev_priv->cce_mode = init->cce_mode;
/* GH: Simple idle check.
*/
atomic_set(&dev_priv->idle_count, 0);
/* We don't support anything other than bus-mastering ring mode,
* but the ring can be in either AGP or PCI space for the ring
* read pointer.
*/
if ((init->cce_mode != R128_PM4_192BM) &&
(init->cce_mode != R128_PM4_128BM_64INDBM) &&
(init->cce_mode != R128_PM4_64BM_128INDBM) &&
(init->cce_mode != R128_PM4_64BM_64VCBM_64INDBM)) {
DRM_DEBUG("Bad cce_mode!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce(dev);
return -EINVAL;
}
switch (init->cce_mode) {
case R128_PM4_NONPM4:
dev_priv->cce_fifo_size = 0;
break;
case R128_PM4_192PIO:
case R128_PM4_192BM:
dev_priv->cce_fifo_size = 192;
break;
case R128_PM4_128PIO_64INDBM:
case R128_PM4_128BM_64INDBM:
dev_priv->cce_fifo_size = 128;
break;
case R128_PM4_64PIO_128INDBM:
case R128_PM4_64BM_128INDBM:
case R128_PM4_64PIO_64VCBM_64INDBM:
case R128_PM4_64BM_64VCBM_64INDBM:
case R128_PM4_64PIO_64VCPIO_64INDPIO:
dev_priv->cce_fifo_size = 64;
break;
}
switch (init->fb_bpp) {
case 16:
dev_priv->color_fmt = R128_DATATYPE_RGB565;
break;
case 32:
default:
dev_priv->color_fmt = R128_DATATYPE_ARGB8888;
break;
}
dev_priv->front_offset = init->front_offset;
dev_priv->front_pitch = init->front_pitch;
dev_priv->back_offset = init->back_offset;
dev_priv->back_pitch = init->back_pitch;
switch (init->depth_bpp) {
case 16:
dev_priv->depth_fmt = R128_DATATYPE_RGB565;
break;
case 24:
case 32:
default:
dev_priv->depth_fmt = R128_DATATYPE_ARGB8888;
break;
}
dev_priv->depth_offset = init->depth_offset;
dev_priv->depth_pitch = init->depth_pitch;
dev_priv->span_offset = init->span_offset;
dev_priv->front_pitch_offset_c = (((dev_priv->front_pitch / 8) << 21) |
(dev_priv->front_offset >> 5));
dev_priv->back_pitch_offset_c = (((dev_priv->back_pitch / 8) << 21) |
(dev_priv->back_offset >> 5));
dev_priv->depth_pitch_offset_c = (((dev_priv->depth_pitch / 8) << 21) |
(dev_priv->depth_offset >> 5) |
R128_DST_TILE);
dev_priv->span_pitch_offset_c = (((dev_priv->depth_pitch / 8) << 21) |
(dev_priv->span_offset >> 5));
dev_priv->sarea = drm_getsarea(dev);
if (!dev_priv->sarea) {
DRM_ERROR("could not find sarea!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce(dev);
return -EINVAL;
}
dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
if (!dev_priv->mmio) {
DRM_ERROR("could not find mmio region!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce(dev);
return -EINVAL;
}
dev_priv->cce_ring = drm_core_findmap(dev, init->ring_offset);
if (!dev_priv->cce_ring) {
DRM_ERROR("could not find cce ring region!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce(dev);
return -EINVAL;
}
dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset);
if (!dev_priv->ring_rptr) {
DRM_ERROR("could not find ring read pointer!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce(dev);
return -EINVAL;
}
dev->agp_buffer_token = init->buffers_offset;
dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
if (!dev->agp_buffer_map) {
DRM_ERROR("could not find dma buffer region!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce(dev);
return -EINVAL;
}
if (!dev_priv->is_pci) {
dev_priv->agp_textures =
drm_core_findmap(dev, init->agp_textures_offset);
if (!dev_priv->agp_textures) {
DRM_ERROR("could not find agp texture region!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce(dev);
return -EINVAL;
}
}
dev_priv->sarea_priv =
(drm_r128_sarea_t *) ((u8 *) dev_priv->sarea->virtual +
init->sarea_priv_offset);
#if __OS_HAS_AGP
if (!dev_priv->is_pci) {
drm_core_ioremap(dev_priv->cce_ring, dev);
drm_core_ioremap(dev_priv->ring_rptr, dev);
drm_core_ioremap(dev->agp_buffer_map, dev);
if (!dev_priv->cce_ring->virtual ||
!dev_priv->ring_rptr->virtual ||
!dev->agp_buffer_map->virtual) {
DRM_ERROR("Could not ioremap agp regions!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce(dev);
return -ENOMEM;
}
} else
#endif
{
dev_priv->cce_ring->virtual =
(void *)dev_priv->cce_ring->offset;
dev_priv->ring_rptr->virtual =
(void *)dev_priv->ring_rptr->offset;
dev->agp_buffer_map->virtual =
(void *)dev->agp_buffer_map->offset;
}
#if __OS_HAS_AGP
if (!dev_priv->is_pci)
dev_priv->cce_buffers_offset = dev->agp->base;
else
#endif
dev_priv->cce_buffers_offset = dev->sg->vaddr;
dev_priv->ring.start = (u32 *) dev_priv->cce_ring->virtual;
dev_priv->ring.end = ((u32 *) dev_priv->cce_ring->virtual
+ init->ring_size / sizeof(u32));
dev_priv->ring.size = init->ring_size;
dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8);
dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
dev_priv->ring.high_mark = 128;
dev_priv->sarea_priv->last_frame = 0;
R128_WRITE(R128_LAST_FRAME_REG, dev_priv->sarea_priv->last_frame);
dev_priv->sarea_priv->last_dispatch = 0;
R128_WRITE(R128_LAST_DISPATCH_REG, dev_priv->sarea_priv->last_dispatch);
#if __OS_HAS_AGP
if (dev_priv->is_pci) {
#endif
dev_priv->gart_info.table_mask = DMA_BIT_MASK(32);
dev_priv->gart_info.gart_table_location = DRM_ATI_GART_MAIN;
dev_priv->gart_info.table_size = R128_PCIGART_TABLE_SIZE;
dev_priv->gart_info.addr = NULL;
dev_priv->gart_info.bus_addr = 0;
dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI;
if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) {
DRM_ERROR("failed to init PCI GART!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce(dev);
return -ENOMEM;
}
R128_WRITE(R128_PCI_GART_PAGE, dev_priv->gart_info.bus_addr);
#if __OS_HAS_AGP
}
#endif
r128_cce_init_ring_buffer(dev, dev_priv);
r128_cce_load_microcode(dev_priv);
dev->dev_private = (void *)dev_priv;
r128_do_engine_reset(dev);
return 0;
}
int r128_do_cleanup_cce(struct drm_device * dev)
{
/* Make sure interrupts are disabled here because the uninstall ioctl
* may not have been called from userspace and after dev_private
* is freed, it's too late.
*/
if (dev->irq_enabled)
drm_irq_uninstall(dev);
if (dev->dev_private) {
drm_r128_private_t *dev_priv = dev->dev_private;
#if __OS_HAS_AGP
if (!dev_priv->is_pci) {
if (dev_priv->cce_ring != NULL)
drm_core_ioremapfree(dev_priv->cce_ring, dev);
if (dev_priv->ring_rptr != NULL)
drm_core_ioremapfree(dev_priv->ring_rptr, dev);
if (dev->agp_buffer_map != NULL) {
drm_core_ioremapfree(dev->agp_buffer_map, dev);
dev->agp_buffer_map = NULL;
}
} else
#endif
{
if (dev_priv->gart_info.bus_addr)
if (!drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info))
DRM_ERROR("failed to cleanup PCI GART!\n");
}
drm_free(dev->dev_private, sizeof(drm_r128_private_t),
DRM_MEM_DRIVER);
dev->dev_private = NULL;
}
return 0;
}
int r128_cce_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_r128_init_t *init = data;
DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, file_priv);
switch (init->func) {
case R128_INIT_CCE:
return r128_do_init_cce(dev, init);
case R128_CLEANUP_CCE:
return r128_do_cleanup_cce(dev);
}
return -EINVAL;
}
int r128_cce_start(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_r128_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (dev_priv->cce_running || dev_priv->cce_mode == R128_PM4_NONPM4) {
DRM_DEBUG("while CCE running\n");
return 0;
}
r128_do_cce_start(dev_priv);
return 0;
}
/* Stop the CCE. The engine must have been idled before calling this
* routine.
*/
int r128_cce_stop(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_cce_stop_t *stop = data;
int ret;
DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, file_priv);
/* Flush any pending CCE commands. This ensures any outstanding
* commands are exectuted by the engine before we turn it off.
*/
if (stop->flush) {
r128_do_cce_flush(dev_priv);
}
/* If we fail to make the engine go idle, we return an error
* code so that the DRM ioctl wrapper can try again.
*/
if (stop->idle) {
ret = r128_do_cce_idle(dev_priv);
if (ret)
return ret;
}
/* Finally, we can turn off the CCE. If the engine isn't idle,
* we will get some dropped triangles as they won't be fully
* rendered before the CCE is shut down.
*/
r128_do_cce_stop(dev_priv);
/* Reset the engine */
r128_do_engine_reset(dev);
return 0;
}
/* Just reset the CCE ring. Called as part of an X Server engine reset.
*/
int r128_cce_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_r128_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (!dev_priv) {
DRM_DEBUG("called before init done\n");
return -EINVAL;
}
r128_do_cce_reset(dev_priv);
/* The CCE is no longer running after an engine reset */
dev_priv->cce_running = 0;
return 0;
}
int r128_cce_idle(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_r128_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (dev_priv->cce_running) {
r128_do_cce_flush(dev_priv);
}
return r128_do_cce_idle(dev_priv);
}
int r128_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, file_priv);
return r128_do_engine_reset(dev);
}
int r128_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
return -EINVAL;
}
/* ================================================================
* Freelist management
*/
#define R128_BUFFER_USED 0xffffffff
#define R128_BUFFER_FREE 0
#if 0
static int r128_freelist_init(struct drm_device * dev)
{
struct drm_device_dma *dma = dev->dma;
drm_r128_private_t *dev_priv = dev->dev_private;
struct drm_buf *buf;
drm_r128_buf_priv_t *buf_priv;
drm_r128_freelist_t *entry;
int i;
dev_priv->head = drm_alloc(sizeof(drm_r128_freelist_t), DRM_MEM_DRIVER);
if (dev_priv->head == NULL)
return -ENOMEM;
memset(dev_priv->head, 0, sizeof(drm_r128_freelist_t));
dev_priv->head->age = R128_BUFFER_USED;
for (i = 0; i < dma->buf_count; i++) {
buf = dma->buflist[i];
buf_priv = buf->dev_private;
entry = drm_alloc(sizeof(drm_r128_freelist_t), DRM_MEM_DRIVER);
if (!entry)
return -ENOMEM;
entry->age = R128_BUFFER_FREE;
entry->buf = buf;
entry->prev = dev_priv->head;
entry->next = dev_priv->head->next;
if (!entry->next)
dev_priv->tail = entry;
buf_priv->discard = 0;
buf_priv->dispatched = 0;
buf_priv->list_entry = entry;
dev_priv->head->next = entry;
if (dev_priv->head->next)
dev_priv->head->next->prev = entry;
}
return 0;
}
#endif
static struct drm_buf *r128_freelist_get(struct drm_device * dev)
{
struct drm_device_dma *dma = dev->dma;
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_buf_priv_t *buf_priv;
struct drm_buf *buf;
int i, t;
/* FIXME: Optimize -- use freelist code */
for (i = 0; i < dma->buf_count; i++) {
buf = dma->buflist[i];
buf_priv = buf->dev_private;
if (buf->file_priv == 0)
return buf;
}
for (t = 0; t < dev_priv->usec_timeout; t++) {
u32 done_age = R128_READ(R128_LAST_DISPATCH_REG);
for (i = 0; i < dma->buf_count; i++) {
buf = dma->buflist[i];
buf_priv = buf->dev_private;
if (buf->pending && buf_priv->age <= done_age) {
/* The buffer has been processed, so it
* can now be used.
*/
buf->pending = 0;
return buf;
}
}
DRM_UDELAY(1);
}
DRM_DEBUG("returning NULL!\n");
return NULL;
}
void r128_freelist_reset(struct drm_device * dev)
{
struct drm_device_dma *dma = dev->dma;
int i;
for (i = 0; i < dma->buf_count; i++) {
struct drm_buf *buf = dma->buflist[i];
drm_r128_buf_priv_t *buf_priv = buf->dev_private;
buf_priv->age = 0;
}
}
/* ================================================================
* CCE command submission
*/
int r128_wait_ring(drm_r128_private_t * dev_priv, int n)
{
drm_r128_ring_buffer_t *ring = &dev_priv->ring;
int i;
for (i = 0; i < dev_priv->usec_timeout; i++) {
r128_update_ring_snapshot(dev_priv);
if (ring->space >= n)
return 0;
DRM_UDELAY(1);
}
/* FIXME: This is being ignored... */
DRM_ERROR("failed!\n");
return -EBUSY;
}
static int r128_cce_get_buffers(struct drm_device * dev,
struct drm_file *file_priv,
struct drm_dma * d)
{
int i;
struct drm_buf *buf;
for (i = d->granted_count; i < d->request_count; i++) {
buf = r128_freelist_get(dev);
if (!buf)
return -EAGAIN;
buf->file_priv = file_priv;
if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx,
sizeof(buf->idx)))
return -EFAULT;
if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total,
sizeof(buf->total)))
return -EFAULT;
d->granted_count++;
}
return 0;
}
int r128_cce_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_device_dma *dma = dev->dma;
int ret = 0;
struct drm_dma *d = data;
LOCK_TEST_WITH_RETURN(dev, file_priv);
/* Please don't send us buffers.
*/
if (d->send_count != 0) {
DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
DRM_CURRENTPID, d->send_count);
return -EINVAL;
}
/* We'll send you buffers.
*/
if (d->request_count < 0 || d->request_count > dma->buf_count) {
DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
DRM_CURRENTPID, d->request_count, dma->buf_count);
return -EINVAL;
}
d->granted_count = 0;
if (d->request_count) {
ret = r128_cce_get_buffers(dev, file_priv, d);
}
return ret;
}

View File

@ -1,329 +0,0 @@
/* r128_drm.h -- Public header for the r128 driver -*- linux-c -*-
* Created: Wed Apr 5 19:24:19 2000 by kevin@precisioninsight.com
*/
/*-
* Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Gareth Hughes <gareth@valinux.com>
* Kevin E. Martin <martin@valinux.com>
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#ifndef __R128_DRM_H__
#define __R128_DRM_H__
/* WARNING: If you change any of these defines, make sure to change the
* defines in the X server file (r128_sarea.h)
*/
#ifndef __R128_SAREA_DEFINES__
#define __R128_SAREA_DEFINES__
/* What needs to be changed for the current vertex buffer?
*/
#define R128_UPLOAD_CONTEXT 0x001
#define R128_UPLOAD_SETUP 0x002
#define R128_UPLOAD_TEX0 0x004
#define R128_UPLOAD_TEX1 0x008
#define R128_UPLOAD_TEX0IMAGES 0x010
#define R128_UPLOAD_TEX1IMAGES 0x020
#define R128_UPLOAD_CORE 0x040
#define R128_UPLOAD_MASKS 0x080
#define R128_UPLOAD_WINDOW 0x100
#define R128_UPLOAD_CLIPRECTS 0x200 /* handled client-side */
#define R128_REQUIRE_QUIESCENCE 0x400
#define R128_UPLOAD_ALL 0x7ff
#define R128_FRONT 0x1
#define R128_BACK 0x2
#define R128_DEPTH 0x4
/* Primitive types
*/
#define R128_POINTS 0x1
#define R128_LINES 0x2
#define R128_LINE_STRIP 0x3
#define R128_TRIANGLES 0x4
#define R128_TRIANGLE_FAN 0x5
#define R128_TRIANGLE_STRIP 0x6
/* Vertex/indirect buffer size
*/
#define R128_BUFFER_SIZE 16384
/* Byte offsets for indirect buffer data
*/
#define R128_INDEX_PRIM_OFFSET 20
#define R128_HOSTDATA_BLIT_OFFSET 32
/* Keep these small for testing.
*/
#define R128_NR_SAREA_CLIPRECTS 12
/* There are 2 heaps (local/AGP). Each region within a heap is a
* minimum of 64k, and there are at most 64 of them per heap.
*/
#define R128_LOCAL_TEX_HEAP 0
#define R128_AGP_TEX_HEAP 1
#define R128_NR_TEX_HEAPS 2
#define R128_NR_TEX_REGIONS 64
#define R128_LOG_TEX_GRANULARITY 16
#define R128_NR_CONTEXT_REGS 12
#define R128_MAX_TEXTURE_LEVELS 11
#define R128_MAX_TEXTURE_UNITS 2
#endif /* __R128_SAREA_DEFINES__ */
typedef struct {
/* Context state - can be written in one large chunk */
unsigned int dst_pitch_offset_c;
unsigned int dp_gui_master_cntl_c;
unsigned int sc_top_left_c;
unsigned int sc_bottom_right_c;
unsigned int z_offset_c;
unsigned int z_pitch_c;
unsigned int z_sten_cntl_c;
unsigned int tex_cntl_c;
unsigned int misc_3d_state_cntl_reg;
unsigned int texture_clr_cmp_clr_c;
unsigned int texture_clr_cmp_msk_c;
unsigned int fog_color_c;
/* Texture state */
unsigned int tex_size_pitch_c;
unsigned int constant_color_c;
/* Setup state */
unsigned int pm4_vc_fpu_setup;
unsigned int setup_cntl;
/* Mask state */
unsigned int dp_write_mask;
unsigned int sten_ref_mask_c;
unsigned int plane_3d_mask_c;
/* Window state */
unsigned int window_xy_offset;
/* Core state */
unsigned int scale_3d_cntl;
} drm_r128_context_regs_t;
/* Setup registers for each texture unit
*/
typedef struct {
unsigned int tex_cntl;
unsigned int tex_combine_cntl;
unsigned int tex_size_pitch;
unsigned int tex_offset[R128_MAX_TEXTURE_LEVELS];
unsigned int tex_border_color;
} drm_r128_texture_regs_t;
typedef struct drm_r128_sarea {
/* The channel for communication of state information to the kernel
* on firing a vertex buffer.
*/
drm_r128_context_regs_t context_state;
drm_r128_texture_regs_t tex_state[R128_MAX_TEXTURE_UNITS];
unsigned int dirty;
unsigned int vertsize;
unsigned int vc_format;
/* The current cliprects, or a subset thereof.
*/
struct drm_clip_rect boxes[R128_NR_SAREA_CLIPRECTS];
unsigned int nbox;
/* Counters for client-side throttling of rendering clients.
*/
unsigned int last_frame;
unsigned int last_dispatch;
struct drm_tex_region tex_list[R128_NR_TEX_HEAPS][R128_NR_TEX_REGIONS + 1];
unsigned int tex_age[R128_NR_TEX_HEAPS];
int ctx_owner;
int pfAllowPageFlip; /* number of 3d windows (0,1,2 or more) */
int pfCurrentPage; /* which buffer is being displayed? */
} drm_r128_sarea_t;
/* WARNING: If you change any of these defines, make sure to change the
* defines in the Xserver file (xf86drmR128.h)
*/
/* Rage 128 specific ioctls
* The device specific ioctl range is 0x40 to 0x79.
*/
#define DRM_R128_INIT 0x00
#define DRM_R128_CCE_START 0x01
#define DRM_R128_CCE_STOP 0x02
#define DRM_R128_CCE_RESET 0x03
#define DRM_R128_CCE_IDLE 0x04
/* 0x05 not used */
#define DRM_R128_RESET 0x06
#define DRM_R128_SWAP 0x07
#define DRM_R128_CLEAR 0x08
#define DRM_R128_VERTEX 0x09
#define DRM_R128_INDICES 0x0a
#define DRM_R128_BLIT 0x0b
#define DRM_R128_DEPTH 0x0c
#define DRM_R128_STIPPLE 0x0d
/* 0x0e not used */
#define DRM_R128_INDIRECT 0x0f
#define DRM_R128_FULLSCREEN 0x10
#define DRM_R128_CLEAR2 0x11
#define DRM_R128_GETPARAM 0x12
#define DRM_R128_FLIP 0x13
#define DRM_IOCTL_R128_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_R128_INIT, drm_r128_init_t)
#define DRM_IOCTL_R128_CCE_START DRM_IO( DRM_COMMAND_BASE + DRM_R128_CCE_START)
#define DRM_IOCTL_R128_CCE_STOP DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CCE_STOP, drm_r128_cce_stop_t)
#define DRM_IOCTL_R128_CCE_RESET DRM_IO( DRM_COMMAND_BASE + DRM_R128_CCE_RESET)
#define DRM_IOCTL_R128_CCE_IDLE DRM_IO( DRM_COMMAND_BASE + DRM_R128_CCE_IDLE)
/* 0x05 not used */
#define DRM_IOCTL_R128_RESET DRM_IO( DRM_COMMAND_BASE + DRM_R128_RESET)
#define DRM_IOCTL_R128_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_R128_SWAP)
#define DRM_IOCTL_R128_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CLEAR, drm_r128_clear_t)
#define DRM_IOCTL_R128_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_R128_VERTEX, drm_r128_vertex_t)
#define DRM_IOCTL_R128_INDICES DRM_IOW( DRM_COMMAND_BASE + DRM_R128_INDICES, drm_r128_indices_t)
#define DRM_IOCTL_R128_BLIT DRM_IOW( DRM_COMMAND_BASE + DRM_R128_BLIT, drm_r128_blit_t)
#define DRM_IOCTL_R128_DEPTH DRM_IOW( DRM_COMMAND_BASE + DRM_R128_DEPTH, drm_r128_depth_t)
#define DRM_IOCTL_R128_STIPPLE DRM_IOW( DRM_COMMAND_BASE + DRM_R128_STIPPLE, drm_r128_stipple_t)
/* 0x0e not used */
#define DRM_IOCTL_R128_INDIRECT DRM_IOWR(DRM_COMMAND_BASE + DRM_R128_INDIRECT, drm_r128_indirect_t)
#define DRM_IOCTL_R128_FULLSCREEN DRM_IOW( DRM_COMMAND_BASE + DRM_R128_FULLSCREEN, drm_r128_fullscreen_t)
#define DRM_IOCTL_R128_CLEAR2 DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CLEAR2, drm_r128_clear2_t)
#define DRM_IOCTL_R128_GETPARAM DRM_IOWR( DRM_COMMAND_BASE + DRM_R128_GETPARAM, drm_r128_getparam_t)
#define DRM_IOCTL_R128_FLIP DRM_IO( DRM_COMMAND_BASE + DRM_R128_FLIP)
typedef struct drm_r128_init {
enum {
R128_INIT_CCE = 0x01,
R128_CLEANUP_CCE = 0x02
} func;
unsigned long sarea_priv_offset;
int is_pci;
int cce_mode;
int cce_secure;
int ring_size;
int usec_timeout;
unsigned int fb_bpp;
unsigned int front_offset, front_pitch;
unsigned int back_offset, back_pitch;
unsigned int depth_bpp;
unsigned int depth_offset, depth_pitch;
unsigned int span_offset;
unsigned long fb_offset;
unsigned long mmio_offset;
unsigned long ring_offset;
unsigned long ring_rptr_offset;
unsigned long buffers_offset;
unsigned long agp_textures_offset;
} drm_r128_init_t;
typedef struct drm_r128_cce_stop {
int flush;
int idle;
} drm_r128_cce_stop_t;
typedef struct drm_r128_clear {
unsigned int flags;
unsigned int clear_color;
unsigned int clear_depth;
unsigned int color_mask;
unsigned int depth_mask;
} drm_r128_clear_t;
typedef struct drm_r128_vertex {
int prim;
int idx; /* Index of vertex buffer */
int count; /* Number of vertices in buffer */
int discard; /* Client finished with buffer? */
} drm_r128_vertex_t;
typedef struct drm_r128_indices {
int prim;
int idx;
int start;
int end;
int discard; /* Client finished with buffer? */
} drm_r128_indices_t;
typedef struct drm_r128_blit {
int idx;
int pitch;
int offset;
int format;
unsigned short x, y;
unsigned short width, height;
} drm_r128_blit_t;
typedef struct drm_r128_depth {
enum {
R128_WRITE_SPAN = 0x01,
R128_WRITE_PIXELS = 0x02,
R128_READ_SPAN = 0x03,
R128_READ_PIXELS = 0x04
} func;
int n;
int __user *x;
int __user *y;
unsigned int __user *buffer;
unsigned char __user *mask;
} drm_r128_depth_t;
typedef struct drm_r128_stipple {
unsigned int __user *mask;
} drm_r128_stipple_t;
typedef struct drm_r128_indirect {
int idx;
int start;
int end;
int discard;
} drm_r128_indirect_t;
typedef struct drm_r128_fullscreen {
enum {
R128_INIT_FULLSCREEN = 0x01,
R128_CLEANUP_FULLSCREEN = 0x02
} func;
} drm_r128_fullscreen_t;
/* 2.3: An ioctl to get parameters that aren't available to the 3d
* client any other way.
*/
#define R128_PARAM_IRQ_NR 1
typedef struct drm_r128_getparam {
int param;
void __user *value;
} drm_r128_getparam_t;
#endif

View File

@ -1,132 +0,0 @@
/* r128_drv.c -- ATI Rage 128 driver -*- linux-c -*-
* Created: Mon Dec 13 09:47:27 1999 by faith@precisioninsight.com
*/
/*-
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Gareth Hughes <gareth@valinux.com>
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "dev/drm/drmP.h"
#include "dev/drm/drm.h"
#include "dev/drm/r128_drm.h"
#include "dev/drm/r128_drv.h"
#include "dev/drm/drm_pciids.h"
/* drv_PCI_IDs comes from drm_pciids.h, generated from drm_pciids.txt. */
static drm_pci_id_list_t r128_pciidlist[] = {
r128_PCI_IDS
};
static void r128_configure(struct drm_device *dev)
{
dev->driver->driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA |
DRIVER_SG | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ;
dev->driver->buf_priv_size = sizeof(drm_r128_buf_priv_t);
dev->driver->load = r128_driver_load;
dev->driver->preclose = r128_driver_preclose;
dev->driver->lastclose = r128_driver_lastclose;
dev->driver->get_vblank_counter = r128_get_vblank_counter;
dev->driver->enable_vblank = r128_enable_vblank;
dev->driver->disable_vblank = r128_disable_vblank;
dev->driver->irq_preinstall = r128_driver_irq_preinstall;
dev->driver->irq_postinstall = r128_driver_irq_postinstall;
dev->driver->irq_uninstall = r128_driver_irq_uninstall;
dev->driver->irq_handler = r128_driver_irq_handler;
dev->driver->dma_ioctl = r128_cce_buffers;
dev->driver->ioctls = r128_ioctls;
dev->driver->max_ioctl = r128_max_ioctl;
dev->driver->name = DRIVER_NAME;
dev->driver->desc = DRIVER_DESC;
dev->driver->date = DRIVER_DATE;
dev->driver->major = DRIVER_MAJOR;
dev->driver->minor = DRIVER_MINOR;
dev->driver->patchlevel = DRIVER_PATCHLEVEL;
}
static int
r128_probe(device_t kdev)
{
return drm_probe(kdev, r128_pciidlist);
}
static int
r128_attach(device_t kdev)
{
struct drm_device *dev = device_get_softc(kdev);
dev->driver = malloc(sizeof(struct drm_driver_info), DRM_MEM_DRIVER,
M_WAITOK | M_ZERO);
r128_configure(dev);
return drm_attach(kdev, r128_pciidlist);
}
int r128_driver_load(struct drm_device * dev, unsigned long flags)
{
return drm_vblank_init(dev, 1);
}
static int
r128_detach(device_t kdev)
{
struct drm_device *dev = device_get_softc(kdev);
int ret;
ret = drm_detach(kdev);
free(dev->driver, DRM_MEM_DRIVER);
return ret;
}
static device_method_t r128_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, r128_probe),
DEVMETHOD(device_attach, r128_attach),
DEVMETHOD(device_detach, r128_detach),
{ 0, 0 }
};
static driver_t r128_driver = {
"drm",
r128_methods,
sizeof(struct drm_device)
};
extern devclass_t drm_devclass;
DRIVER_MODULE(r128, vgapci, r128_driver, drm_devclass, 0, 0);
MODULE_DEPEND(r128, drm, 1, 1, 1);

View File

@ -1,529 +0,0 @@
/* r128_drv.h -- Private header for r128 driver -*- linux-c -*-
* Created: Mon Dec 13 09:51:11 1999 by faith@precisioninsight.com
*/
/*-
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Kevin E. Martin <martin@valinux.com>
* Gareth Hughes <gareth@valinux.com>
* Michel D<EFBFBD>zer <daenzerm@student.ethz.ch>
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#ifndef __R128_DRV_H__
#define __R128_DRV_H__
/* General customization:
*/
#define DRIVER_AUTHOR "Gareth Hughes, VA Linux Systems Inc."
#define DRIVER_NAME "r128"
#define DRIVER_DESC "ATI Rage 128"
#define DRIVER_DATE "20030725"
/* Interface history:
*
* ?? - ??
* 2.4 - Add support for ycbcr textures (no new ioctls)
* 2.5 - Add FLIP ioctl, disable FULLSCREEN.
*/
#define DRIVER_MAJOR 2
#define DRIVER_MINOR 5
#define DRIVER_PATCHLEVEL 0
#define GET_RING_HEAD(dev_priv) R128_READ( R128_PM4_BUFFER_DL_RPTR )
typedef struct drm_r128_freelist {
unsigned int age;
struct drm_buf *buf;
struct drm_r128_freelist *next;
struct drm_r128_freelist *prev;
} drm_r128_freelist_t;
typedef struct drm_r128_ring_buffer {
u32 *start;
u32 *end;
int size;
int size_l2qw;
u32 tail;
u32 tail_mask;
int space;
int high_mark;
} drm_r128_ring_buffer_t;
typedef struct drm_r128_private {
drm_r128_ring_buffer_t ring;
drm_r128_sarea_t *sarea_priv;
int cce_mode;
int cce_fifo_size;
int cce_running;
drm_r128_freelist_t *head;
drm_r128_freelist_t *tail;
int usec_timeout;
int is_pci;
unsigned long cce_buffers_offset;
atomic_t idle_count;
int page_flipping;
int current_page;
u32 crtc_offset;
u32 crtc_offset_cntl;
atomic_t vbl_received;
u32 color_fmt;
unsigned int front_offset;
unsigned int front_pitch;
unsigned int back_offset;
unsigned int back_pitch;
u32 depth_fmt;
unsigned int depth_offset;
unsigned int depth_pitch;
unsigned int span_offset;
u32 front_pitch_offset_c;
u32 back_pitch_offset_c;
u32 depth_pitch_offset_c;
u32 span_pitch_offset_c;
drm_local_map_t *sarea;
drm_local_map_t *mmio;
drm_local_map_t *cce_ring;
drm_local_map_t *ring_rptr;
drm_local_map_t *agp_textures;
struct drm_ati_pcigart_info gart_info;
} drm_r128_private_t;
typedef struct drm_r128_buf_priv {
u32 age;
int prim;
int discard;
int dispatched;
drm_r128_freelist_t *list_entry;
} drm_r128_buf_priv_t;
extern struct drm_ioctl_desc r128_ioctls[];
extern int r128_max_ioctl;
/* r128_cce.c */
extern int r128_cce_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int r128_cce_start(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int r128_cce_stop(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int r128_cce_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int r128_cce_idle(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int r128_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int r128_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int r128_cce_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern void r128_freelist_reset(struct drm_device * dev);
extern int r128_wait_ring(drm_r128_private_t * dev_priv, int n);
extern int r128_do_cce_idle(drm_r128_private_t * dev_priv);
extern int r128_do_cleanup_cce(struct drm_device * dev);
extern int r128_enable_vblank(struct drm_device *dev, int crtc);
extern void r128_disable_vblank(struct drm_device *dev, int crtc);
extern u32 r128_get_vblank_counter(struct drm_device *dev, int crtc);
extern irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS);
extern void r128_driver_irq_preinstall(struct drm_device * dev);
extern int r128_driver_irq_postinstall(struct drm_device * dev);
extern void r128_driver_irq_uninstall(struct drm_device * dev);
extern void r128_driver_lastclose(struct drm_device * dev);
extern int r128_driver_load(struct drm_device * dev, unsigned long flags);
extern void r128_driver_preclose(struct drm_device * dev,
struct drm_file *file_priv);
extern long r128_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
/* Register definitions, register access macros and drmAddMap constants
* for Rage 128 kernel driver.
*/
#define R128_AUX_SC_CNTL 0x1660
# define R128_AUX1_SC_EN (1 << 0)
# define R128_AUX1_SC_MODE_OR (0 << 1)
# define R128_AUX1_SC_MODE_NAND (1 << 1)
# define R128_AUX2_SC_EN (1 << 2)
# define R128_AUX2_SC_MODE_OR (0 << 3)
# define R128_AUX2_SC_MODE_NAND (1 << 3)
# define R128_AUX3_SC_EN (1 << 4)
# define R128_AUX3_SC_MODE_OR (0 << 5)
# define R128_AUX3_SC_MODE_NAND (1 << 5)
#define R128_AUX1_SC_LEFT 0x1664
#define R128_AUX1_SC_RIGHT 0x1668
#define R128_AUX1_SC_TOP 0x166c
#define R128_AUX1_SC_BOTTOM 0x1670
#define R128_AUX2_SC_LEFT 0x1674
#define R128_AUX2_SC_RIGHT 0x1678
#define R128_AUX2_SC_TOP 0x167c
#define R128_AUX2_SC_BOTTOM 0x1680
#define R128_AUX3_SC_LEFT 0x1684
#define R128_AUX3_SC_RIGHT 0x1688
#define R128_AUX3_SC_TOP 0x168c
#define R128_AUX3_SC_BOTTOM 0x1690
#define R128_BRUSH_DATA0 0x1480
#define R128_BUS_CNTL 0x0030
# define R128_BUS_MASTER_DIS (1 << 6)
#define R128_CLOCK_CNTL_INDEX 0x0008
#define R128_CLOCK_CNTL_DATA 0x000c
# define R128_PLL_WR_EN (1 << 7)
#define R128_CONSTANT_COLOR_C 0x1d34
#define R128_CRTC_OFFSET 0x0224
#define R128_CRTC_OFFSET_CNTL 0x0228
# define R128_CRTC_OFFSET_FLIP_CNTL (1 << 16)
#define R128_DP_GUI_MASTER_CNTL 0x146c
# define R128_GMC_SRC_PITCH_OFFSET_CNTL (1 << 0)
# define R128_GMC_DST_PITCH_OFFSET_CNTL (1 << 1)
# define R128_GMC_BRUSH_SOLID_COLOR (13 << 4)
# define R128_GMC_BRUSH_NONE (15 << 4)
# define R128_GMC_DST_16BPP (4 << 8)
# define R128_GMC_DST_24BPP (5 << 8)
# define R128_GMC_DST_32BPP (6 << 8)
# define R128_GMC_DST_DATATYPE_SHIFT 8
# define R128_GMC_SRC_DATATYPE_COLOR (3 << 12)
# define R128_DP_SRC_SOURCE_MEMORY (2 << 24)
# define R128_DP_SRC_SOURCE_HOST_DATA (3 << 24)
# define R128_GMC_CLR_CMP_CNTL_DIS (1 << 28)
# define R128_GMC_AUX_CLIP_DIS (1 << 29)
# define R128_GMC_WR_MSK_DIS (1 << 30)
# define R128_ROP3_S 0x00cc0000
# define R128_ROP3_P 0x00f00000
#define R128_DP_WRITE_MASK 0x16cc
#define R128_DST_PITCH_OFFSET_C 0x1c80
# define R128_DST_TILE (1U << 31)
#define R128_GEN_INT_CNTL 0x0040
# define R128_CRTC_VBLANK_INT_EN (1 << 0)
#define R128_GEN_INT_STATUS 0x0044
# define R128_CRTC_VBLANK_INT (1 << 0)
# define R128_CRTC_VBLANK_INT_AK (1 << 0)
#define R128_GEN_RESET_CNTL 0x00f0
# define R128_SOFT_RESET_GUI (1 << 0)
#define R128_GUI_SCRATCH_REG0 0x15e0
#define R128_GUI_SCRATCH_REG1 0x15e4
#define R128_GUI_SCRATCH_REG2 0x15e8
#define R128_GUI_SCRATCH_REG3 0x15ec
#define R128_GUI_SCRATCH_REG4 0x15f0
#define R128_GUI_SCRATCH_REG5 0x15f4
#define R128_GUI_STAT 0x1740
# define R128_GUI_FIFOCNT_MASK 0x0fff
# define R128_GUI_ACTIVE (1U << 31)
#define R128_MCLK_CNTL 0x000f
# define R128_FORCE_GCP (1 << 16)
# define R128_FORCE_PIPE3D_CP (1 << 17)
# define R128_FORCE_RCP (1 << 18)
#define R128_PC_GUI_CTLSTAT 0x1748
#define R128_PC_NGUI_CTLSTAT 0x0184
# define R128_PC_FLUSH_GUI (3 << 0)
# define R128_PC_RI_GUI (1 << 2)
# define R128_PC_FLUSH_ALL 0x00ff
# define R128_PC_BUSY (1U << 31)
#define R128_PCI_GART_PAGE 0x017c
#define R128_PRIM_TEX_CNTL_C 0x1cb0
#define R128_SCALE_3D_CNTL 0x1a00
#define R128_SEC_TEX_CNTL_C 0x1d00
#define R128_SEC_TEXTURE_BORDER_COLOR_C 0x1d3c
#define R128_SETUP_CNTL 0x1bc4
#define R128_STEN_REF_MASK_C 0x1d40
#define R128_TEX_CNTL_C 0x1c9c
# define R128_TEX_CACHE_FLUSH (1 << 23)
#define R128_WAIT_UNTIL 0x1720
# define R128_EVENT_CRTC_OFFSET (1 << 0)
#define R128_WINDOW_XY_OFFSET 0x1bcc
/* CCE registers
*/
#define R128_PM4_BUFFER_OFFSET 0x0700
#define R128_PM4_BUFFER_CNTL 0x0704
# define R128_PM4_MASK (15 << 28)
# define R128_PM4_NONPM4 (0 << 28)
# define R128_PM4_192PIO (1 << 28)
# define R128_PM4_192BM (2 << 28)
# define R128_PM4_128PIO_64INDBM (3 << 28)
# define R128_PM4_128BM_64INDBM (4 << 28)
# define R128_PM4_64PIO_128INDBM (5 << 28)
# define R128_PM4_64BM_128INDBM (6 << 28)
# define R128_PM4_64PIO_64VCBM_64INDBM (7 << 28)
# define R128_PM4_64BM_64VCBM_64INDBM (8 << 28)
# define R128_PM4_64PIO_64VCPIO_64INDPIO (15 << 28)
# define R128_PM4_BUFFER_CNTL_NOUPDATE (1 << 27)
#define R128_PM4_BUFFER_WM_CNTL 0x0708
# define R128_WMA_SHIFT 0
# define R128_WMB_SHIFT 8
# define R128_WMC_SHIFT 16
# define R128_WB_WM_SHIFT 24
#define R128_PM4_BUFFER_DL_RPTR_ADDR 0x070c
#define R128_PM4_BUFFER_DL_RPTR 0x0710
#define R128_PM4_BUFFER_DL_WPTR 0x0714
# define R128_PM4_BUFFER_DL_DONE (1U << 31)
#define R128_PM4_VC_FPU_SETUP 0x071c
#define R128_PM4_IW_INDOFF 0x0738
#define R128_PM4_IW_INDSIZE 0x073c
#define R128_PM4_STAT 0x07b8
# define R128_PM4_FIFOCNT_MASK 0x0fff
# define R128_PM4_BUSY (1 << 16)
# define R128_PM4_GUI_ACTIVE (1U << 31)
#define R128_PM4_MICROCODE_ADDR 0x07d4
#define R128_PM4_MICROCODE_RADDR 0x07d8
#define R128_PM4_MICROCODE_DATAH 0x07dc
#define R128_PM4_MICROCODE_DATAL 0x07e0
#define R128_PM4_BUFFER_ADDR 0x07f0
#define R128_PM4_MICRO_CNTL 0x07fc
# define R128_PM4_MICRO_FREERUN (1 << 30)
#define R128_PM4_FIFO_DATA_EVEN 0x1000
#define R128_PM4_FIFO_DATA_ODD 0x1004
/* CCE command packets
*/
#define R128_CCE_PACKET0 0x00000000
#define R128_CCE_PACKET1 0x40000000
#define R128_CCE_PACKET2 0x80000000
#define R128_CCE_PACKET3 0xC0000000
# define R128_CNTL_HOSTDATA_BLT 0x00009400
# define R128_CNTL_PAINT_MULTI 0x00009A00
# define R128_CNTL_BITBLT_MULTI 0x00009B00
# define R128_3D_RNDR_GEN_INDX_PRIM 0x00002300
#define R128_CCE_PACKET_MASK 0xC0000000
#define R128_CCE_PACKET_COUNT_MASK 0x3fff0000
#define R128_CCE_PACKET0_REG_MASK 0x000007ff
#define R128_CCE_PACKET1_REG0_MASK 0x000007ff
#define R128_CCE_PACKET1_REG1_MASK 0x003ff800
#define R128_CCE_VC_CNTL_PRIM_TYPE_NONE 0x00000000
#define R128_CCE_VC_CNTL_PRIM_TYPE_POINT 0x00000001
#define R128_CCE_VC_CNTL_PRIM_TYPE_LINE 0x00000002
#define R128_CCE_VC_CNTL_PRIM_TYPE_POLY_LINE 0x00000003
#define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_LIST 0x00000004
#define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_FAN 0x00000005
#define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_STRIP 0x00000006
#define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2 0x00000007
#define R128_CCE_VC_CNTL_PRIM_WALK_IND 0x00000010
#define R128_CCE_VC_CNTL_PRIM_WALK_LIST 0x00000020
#define R128_CCE_VC_CNTL_PRIM_WALK_RING 0x00000030
#define R128_CCE_VC_CNTL_NUM_SHIFT 16
#define R128_DATATYPE_VQ 0
#define R128_DATATYPE_CI4 1
#define R128_DATATYPE_CI8 2
#define R128_DATATYPE_ARGB1555 3
#define R128_DATATYPE_RGB565 4
#define R128_DATATYPE_RGB888 5
#define R128_DATATYPE_ARGB8888 6
#define R128_DATATYPE_RGB332 7
#define R128_DATATYPE_Y8 8
#define R128_DATATYPE_RGB8 9
#define R128_DATATYPE_CI16 10
#define R128_DATATYPE_YVYU422 11
#define R128_DATATYPE_VYUY422 12
#define R128_DATATYPE_AYUV444 14
#define R128_DATATYPE_ARGB4444 15
/* Constants */
#define R128_AGP_OFFSET 0x02000000
#define R128_WATERMARK_L 16
#define R128_WATERMARK_M 8
#define R128_WATERMARK_N 8
#define R128_WATERMARK_K 128
#define R128_MAX_USEC_TIMEOUT 100000 /* 100 ms */
#define R128_LAST_FRAME_REG R128_GUI_SCRATCH_REG0
#define R128_LAST_DISPATCH_REG R128_GUI_SCRATCH_REG1
#define R128_MAX_VB_AGE 0x7fffffff
#define R128_MAX_VB_VERTS (0xffff)
#define R128_RING_HIGH_MARK 128
#define R128_PERFORMANCE_BOXES 0
#define R128_PCIGART_TABLE_SIZE 32768
#define R128_READ(reg) DRM_READ32( dev_priv->mmio, (reg) )
#define R128_WRITE(reg,val) DRM_WRITE32( dev_priv->mmio, (reg), (val) )
#define R128_READ8(reg) DRM_READ8( dev_priv->mmio, (reg) )
#define R128_WRITE8(reg,val) DRM_WRITE8( dev_priv->mmio, (reg), (val) )
#define R128_WRITE_PLL(addr,val) \
do { \
R128_WRITE8(R128_CLOCK_CNTL_INDEX, \
((addr) & 0x1f) | R128_PLL_WR_EN); \
R128_WRITE(R128_CLOCK_CNTL_DATA, (val)); \
} while (0)
#define CCE_PACKET0( reg, n ) (R128_CCE_PACKET0 | \
((n) << 16) | ((reg) >> 2))
#define CCE_PACKET1( reg0, reg1 ) (R128_CCE_PACKET1 | \
(((reg1) >> 2) << 11) | ((reg0) >> 2))
#define CCE_PACKET2() (R128_CCE_PACKET2)
#define CCE_PACKET3( pkt, n ) (R128_CCE_PACKET3 | \
(pkt) | ((n) << 16))
static __inline__ void r128_update_ring_snapshot(drm_r128_private_t * dev_priv)
{
drm_r128_ring_buffer_t *ring = &dev_priv->ring;
ring->space = (GET_RING_HEAD(dev_priv) - ring->tail) * sizeof(u32);
if (ring->space <= 0)
ring->space += ring->size;
}
/* ================================================================
* Misc helper macros
*/
#define RING_SPACE_TEST_WITH_RETURN( dev_priv ) \
do { \
drm_r128_ring_buffer_t *ring = &dev_priv->ring; int i; \
if ( ring->space < ring->high_mark ) { \
for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { \
r128_update_ring_snapshot( dev_priv ); \
if ( ring->space >= ring->high_mark ) \
goto __ring_space_done; \
DRM_UDELAY(1); \
} \
DRM_ERROR( "ring space check failed!\n" ); \
return -EBUSY; \
} \
__ring_space_done: \
; \
} while (0)
#define VB_AGE_TEST_WITH_RETURN( dev_priv ) \
do { \
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; \
if ( sarea_priv->last_dispatch >= R128_MAX_VB_AGE ) { \
int __ret = r128_do_cce_idle( dev_priv ); \
if ( __ret ) return __ret; \
sarea_priv->last_dispatch = 0; \
r128_freelist_reset( dev ); \
} \
} while (0)
#define R128_WAIT_UNTIL_PAGE_FLIPPED() do { \
OUT_RING( CCE_PACKET0( R128_WAIT_UNTIL, 0 ) ); \
OUT_RING( R128_EVENT_CRTC_OFFSET ); \
} while (0)
/* ================================================================
* Ring control
*/
#define R128_VERBOSE 0
#define RING_LOCALS \
int write, _nr; unsigned int tail_mask; volatile u32 *ring;
#define BEGIN_RING( n ) do { \
if ( R128_VERBOSE ) { \
DRM_INFO( "BEGIN_RING( %d )\n", (n)); \
} \
if ( dev_priv->ring.space <= (n) * sizeof(u32) ) { \
COMMIT_RING(); \
r128_wait_ring( dev_priv, (n) * sizeof(u32) ); \
} \
_nr = n; dev_priv->ring.space -= (n) * sizeof(u32); \
ring = dev_priv->ring.start; \
write = dev_priv->ring.tail; \
tail_mask = dev_priv->ring.tail_mask; \
} while (0)
/* You can set this to zero if you want. If the card locks up, you'll
* need to keep this set. It works around a bug in early revs of the
* Rage 128 chipset, where the CCE would read 32 dwords past the end of
* the ring buffer before wrapping around.
*/
#define R128_BROKEN_CCE 1
#define ADVANCE_RING() do { \
if ( R128_VERBOSE ) { \
DRM_INFO( "ADVANCE_RING() wr=0x%06x tail=0x%06x\n", \
write, dev_priv->ring.tail ); \
} \
if ( R128_BROKEN_CCE && write < 32 ) { \
memcpy( dev_priv->ring.end, \
dev_priv->ring.start, \
write * sizeof(u32) ); \
} \
if (((dev_priv->ring.tail + _nr) & tail_mask) != write) { \
DRM_ERROR( \
"ADVANCE_RING(): mismatch: nr: %x write: %x line: %d\n", \
((dev_priv->ring.tail + _nr) & tail_mask), \
write, __LINE__); \
} else \
dev_priv->ring.tail = write; \
} while (0)
#define COMMIT_RING() do { \
if ( R128_VERBOSE ) { \
DRM_INFO( "COMMIT_RING() tail=0x%06x\n", \
dev_priv->ring.tail ); \
} \
DRM_MEMORYBARRIER(); \
R128_WRITE( R128_PM4_BUFFER_DL_WPTR, dev_priv->ring.tail ); \
R128_READ( R128_PM4_BUFFER_DL_WPTR ); \
} while (0)
#define OUT_RING( x ) do { \
if ( R128_VERBOSE ) { \
DRM_INFO( " OUT_RING( 0x%08x ) at 0x%x\n", \
(unsigned int)(x), write ); \
} \
ring[write++] = cpu_to_le32( x ); \
write &= tail_mask; \
} while (0)
#endif /* __R128_DRV_H__ */

View File

@ -1,119 +0,0 @@
/* r128_irq.c -- IRQ handling for radeon -*- linux-c -*- */
/*-
* Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
*
* The Weather Channel (TM) funded Tungsten Graphics to develop the
* initial release of the Radeon 8500 driver under the XFree86 license.
* This notice must be preserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Keith Whitwell <keith@tungstengraphics.com>
* Eric Anholt <anholt@FreeBSD.org>
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "dev/drm/drmP.h"
#include "dev/drm/drm.h"
#include "dev/drm/r128_drm.h"
#include "dev/drm/r128_drv.h"
u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
{
const drm_r128_private_t *dev_priv = dev->dev_private;
if (crtc != 0)
return 0;
return atomic_read(&dev_priv->vbl_received);
}
irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
int status;
status = R128_READ(R128_GEN_INT_STATUS);
/* VBLANK interrupt */
if (status & R128_CRTC_VBLANK_INT) {
R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
atomic_inc(&dev_priv->vbl_received);
drm_handle_vblank(dev, 0);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
int r128_enable_vblank(struct drm_device *dev, int crtc)
{
drm_r128_private_t *dev_priv = dev->dev_private;
if (crtc != 0) {
DRM_ERROR("%s: bad crtc %d\n", __FUNCTION__, crtc);
return -EINVAL;
}
R128_WRITE(R128_GEN_INT_CNTL, R128_CRTC_VBLANK_INT_EN);
return 0;
}
void r128_disable_vblank(struct drm_device *dev, int crtc)
{
if (crtc != 0)
DRM_ERROR("%s: bad crtc %d\n", __FUNCTION__, crtc);
/*
* FIXME: implement proper interrupt disable by using the vblank
* counter register (if available)
*
* R128_WRITE(R128_GEN_INT_CNTL,
* R128_READ(R128_GEN_INT_CNTL) & ~R128_CRTC_VBLANK_INT_EN);
*/
}
void r128_driver_irq_preinstall(struct drm_device * dev)
{
drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
/* Disable *all* interrupts */
R128_WRITE(R128_GEN_INT_CNTL, 0);
/* Clear vblank bit if it's already high */
R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
}
int r128_driver_irq_postinstall(struct drm_device * dev)
{
return 0;
}
void r128_driver_irq_uninstall(struct drm_device * dev)
{
drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
if (!dev_priv)
return;
/* Disable *all* interrupts */
R128_WRITE(R128_GEN_INT_CNTL, 0);
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,212 +0,0 @@
/* savage_drm.h -- Public header for the savage driver
*
* Copyright 2004 Felix Kuehling
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#ifndef __SAVAGE_DRM_H__
#define __SAVAGE_DRM_H__
#ifndef __SAVAGE_SAREA_DEFINES__
#define __SAVAGE_SAREA_DEFINES__
/* 2 heaps (1 for card, 1 for agp), each divided into up to 128
* regions, subject to a minimum region size of (1<<16) == 64k.
*
* Clients may subdivide regions internally, but when sharing between
* clients, the region size is the minimum granularity.
*/
#define SAVAGE_CARD_HEAP 0
#define SAVAGE_AGP_HEAP 1
#define SAVAGE_NR_TEX_HEAPS 2
#define SAVAGE_NR_TEX_REGIONS 16
#define SAVAGE_LOG_MIN_TEX_REGION_SIZE 16
#endif /* __SAVAGE_SAREA_DEFINES__ */
typedef struct _drm_savage_sarea {
/* LRU lists for texture memory in agp space and on the card.
*/
struct drm_tex_region texList[SAVAGE_NR_TEX_HEAPS][SAVAGE_NR_TEX_REGIONS+1];
unsigned int texAge[SAVAGE_NR_TEX_HEAPS];
/* Mechanism to validate card state.
*/
int ctxOwner;
} drm_savage_sarea_t, *drm_savage_sarea_ptr;
/* Savage-specific ioctls
*/
#define DRM_SAVAGE_BCI_INIT 0x00
#define DRM_SAVAGE_BCI_CMDBUF 0x01
#define DRM_SAVAGE_BCI_EVENT_EMIT 0x02
#define DRM_SAVAGE_BCI_EVENT_WAIT 0x03
#define DRM_IOCTL_SAVAGE_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_INIT, drm_savage_init_t)
#define DRM_IOCTL_SAVAGE_CMDBUF DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_CMDBUF, drm_savage_cmdbuf_t)
#define DRM_IOCTL_SAVAGE_EVENT_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_EMIT, drm_savage_event_emit_t)
#define DRM_IOCTL_SAVAGE_EVENT_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_WAIT, drm_savage_event_wait_t)
#define SAVAGE_DMA_PCI 1
#define SAVAGE_DMA_AGP 3
typedef struct drm_savage_init {
enum {
SAVAGE_INIT_BCI = 1,
SAVAGE_CLEANUP_BCI = 2
} func;
unsigned int sarea_priv_offset;
/* some parameters */
unsigned int cob_size;
unsigned int bci_threshold_lo, bci_threshold_hi;
unsigned int dma_type;
/* frame buffer layout */
unsigned int fb_bpp;
unsigned int front_offset, front_pitch;
unsigned int back_offset, back_pitch;
unsigned int depth_bpp;
unsigned int depth_offset, depth_pitch;
/* local textures */
unsigned int texture_offset;
unsigned int texture_size;
/* physical locations of non-permanent maps */
unsigned long status_offset;
unsigned long buffers_offset;
unsigned long agp_textures_offset;
unsigned long cmd_dma_offset;
} drm_savage_init_t;
typedef union drm_savage_cmd_header drm_savage_cmd_header_t;
typedef struct drm_savage_cmdbuf {
/* command buffer in client's address space */
drm_savage_cmd_header_t __user *cmd_addr;
unsigned int size; /* size of the command buffer in 64bit units */
unsigned int dma_idx; /* DMA buffer index to use */
int discard; /* discard DMA buffer when done */
/* vertex buffer in client's address space */
unsigned int __user *vb_addr;
unsigned int vb_size; /* size of client vertex buffer in bytes */
unsigned int vb_stride; /* stride of vertices in 32bit words */
/* boxes in client's address space */
struct drm_clip_rect __user *box_addr;
unsigned int nbox; /* number of clipping boxes */
} drm_savage_cmdbuf_t;
#define SAVAGE_WAIT_2D 0x1 /* wait for 2D idle before updating event tag */
#define SAVAGE_WAIT_3D 0x2 /* wait for 3D idle before updating event tag */
#define SAVAGE_WAIT_IRQ 0x4 /* emit or wait for IRQ, not implemented yet */
typedef struct drm_savage_event {
unsigned int count;
unsigned int flags;
} drm_savage_event_emit_t, drm_savage_event_wait_t;
/* Commands for the cmdbuf ioctl
*/
#define SAVAGE_CMD_STATE 0 /* a range of state registers */
#define SAVAGE_CMD_DMA_PRIM 1 /* vertices from DMA buffer */
#define SAVAGE_CMD_VB_PRIM 2 /* vertices from client vertex buffer */
#define SAVAGE_CMD_DMA_IDX 3 /* indexed vertices from DMA buffer */
#define SAVAGE_CMD_VB_IDX 4 /* indexed vertices client vertex buffer */
#define SAVAGE_CMD_CLEAR 5 /* clear buffers */
#define SAVAGE_CMD_SWAP 6 /* swap buffers */
/* Primitive types
*/
#define SAVAGE_PRIM_TRILIST 0 /* triangle list */
#define SAVAGE_PRIM_TRISTRIP 1 /* triangle strip */
#define SAVAGE_PRIM_TRIFAN 2 /* triangle fan */
#define SAVAGE_PRIM_TRILIST_201 3 /* reorder verts for correct flat
* shading on s3d */
/* Skip flags (vertex format)
*/
#define SAVAGE_SKIP_Z 0x01
#define SAVAGE_SKIP_W 0x02
#define SAVAGE_SKIP_C0 0x04
#define SAVAGE_SKIP_C1 0x08
#define SAVAGE_SKIP_S0 0x10
#define SAVAGE_SKIP_T0 0x20
#define SAVAGE_SKIP_ST0 0x30
#define SAVAGE_SKIP_S1 0x40
#define SAVAGE_SKIP_T1 0x80
#define SAVAGE_SKIP_ST1 0xc0
#define SAVAGE_SKIP_ALL_S3D 0x3f
#define SAVAGE_SKIP_ALL_S4 0xff
/* Buffer names for clear command
*/
#define SAVAGE_FRONT 0x1
#define SAVAGE_BACK 0x2
#define SAVAGE_DEPTH 0x4
/* 64-bit command header
*/
union drm_savage_cmd_header {
struct {
unsigned char cmd; /* command */
unsigned char pad0;
unsigned short pad1;
unsigned short pad2;
unsigned short pad3;
} cmd; /* generic */
struct {
unsigned char cmd;
unsigned char global; /* need idle engine? */
unsigned short count; /* number of consecutive registers */
unsigned short start; /* first register */
unsigned short pad3;
} state; /* SAVAGE_CMD_STATE */
struct {
unsigned char cmd;
unsigned char prim; /* primitive type */
unsigned short skip; /* vertex format (skip flags) */
unsigned short count; /* number of vertices */
unsigned short start; /* first vertex in DMA/vertex buffer */
} prim; /* SAVAGE_CMD_DMA_PRIM, SAVAGE_CMD_VB_PRIM */
struct {
unsigned char cmd;
unsigned char prim;
unsigned short skip;
unsigned short count; /* number of indices that follow */
unsigned short pad3;
} idx; /* SAVAGE_CMD_DMA_IDX, SAVAGE_CMD_VB_IDX */
struct {
unsigned char cmd;
unsigned char pad0;
unsigned short pad1;
unsigned int flags;
} clear0; /* SAVAGE_CMD_CLEAR */
struct {
unsigned int mask;
unsigned int value;
} clear1; /* SAVAGE_CMD_CLEAR data */
};
#endif

View File

@ -1,117 +0,0 @@
/* savage_drv.c -- Savage DRI driver
*/
/*-
* Copyright 2005 Eric Anholt
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* ERIC ANHOLT BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <anholt@FreeBSD.org>
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "dev/drm/drmP.h"
#include "dev/drm/drm.h"
#include "dev/drm/savage_drm.h"
#include "dev/drm/savage_drv.h"
#include "dev/drm/drm_pciids.h"
/* drv_PCI_IDs comes from drm_pciids.h, generated from drm_pciids.txt. */
static drm_pci_id_list_t savage_pciidlist[] = {
savage_PCI_IDS
};
static void savage_configure(struct drm_device *dev)
{
dev->driver->driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA |
DRIVER_HAVE_DMA;
dev->driver->buf_priv_size = sizeof(drm_savage_buf_priv_t);
dev->driver->load = savage_driver_load;
dev->driver->firstopen = savage_driver_firstopen;
dev->driver->lastclose = savage_driver_lastclose;
dev->driver->unload = savage_driver_unload;
dev->driver->reclaim_buffers_locked = savage_reclaim_buffers;
dev->driver->dma_ioctl = savage_bci_buffers;
dev->driver->ioctls = savage_ioctls;
dev->driver->max_ioctl = savage_max_ioctl;
dev->driver->name = DRIVER_NAME;
dev->driver->desc = DRIVER_DESC;
dev->driver->date = DRIVER_DATE;
dev->driver->major = DRIVER_MAJOR;
dev->driver->minor = DRIVER_MINOR;
dev->driver->patchlevel = DRIVER_PATCHLEVEL;
}
static int
savage_probe(device_t kdev)
{
return drm_probe(kdev, savage_pciidlist);
}
static int
savage_attach(device_t kdev)
{
struct drm_device *dev = device_get_softc(kdev);
dev->driver = malloc(sizeof(struct drm_driver_info), DRM_MEM_DRIVER,
M_WAITOK | M_ZERO);
savage_configure(dev);
return drm_attach(kdev, savage_pciidlist);
}
static int
savage_detach(device_t kdev)
{
struct drm_device *dev = device_get_softc(kdev);
int ret;
ret = drm_detach(kdev);
free(dev->driver, DRM_MEM_DRIVER);
return ret;
}
static device_method_t savage_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, savage_probe),
DEVMETHOD(device_attach, savage_attach),
DEVMETHOD(device_detach, savage_detach),
{ 0, 0 }
};
static driver_t savage_driver = {
"drm",
savage_methods,
sizeof(struct drm_device)
};
extern devclass_t drm_devclass;
DRIVER_MODULE(savage, vgapci, savage_driver, drm_devclass, 0, 0);
MODULE_DEPEND(savage, drm, 1, 1, 1);

View File

@ -1,578 +0,0 @@
/* savage_drv.h -- Private header for the savage driver */
/*-
* Copyright 2004 Felix Kuehling
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#ifndef __SAVAGE_DRV_H__
#define __SAVAGE_DRV_H__
#define DRIVER_AUTHOR "Felix Kuehling"
#define DRIVER_NAME "savage"
#define DRIVER_DESC "Savage3D/MX/IX, Savage4, SuperSavage, Twister, ProSavage[DDR]"
#define DRIVER_DATE "20050313"
#define DRIVER_MAJOR 2
#define DRIVER_MINOR 4
#define DRIVER_PATCHLEVEL 1
/* Interface history:
*
* 1.x The DRM driver from the VIA/S3 code drop, basically a dummy
* 2.0 The first real DRM
* 2.1 Scissors registers managed by the DRM, 3D operations clipped by
* cliprects of the cmdbuf ioctl
* 2.2 Implemented SAVAGE_CMD_DMA_IDX and SAVAGE_CMD_VB_IDX
* 2.3 Event counters used by BCI_EVENT_EMIT/WAIT ioctls are now 32 bits
* wide and thus very long lived (unlikely to ever wrap). The size
* in the struct was 32 bits before, but only 16 bits were used
* 2.4 Implemented command DMA. Now drm_savage_init_t.cmd_dma_offset is
* actually used
*/
typedef struct drm_savage_age {
uint16_t event;
unsigned int wrap;
} drm_savage_age_t;
typedef struct drm_savage_buf_priv {
struct drm_savage_buf_priv *next;
struct drm_savage_buf_priv *prev;
drm_savage_age_t age;
struct drm_buf *buf;
} drm_savage_buf_priv_t;
typedef struct drm_savage_dma_page {
drm_savage_age_t age;
unsigned int used, flushed;
} drm_savage_dma_page_t;
#define SAVAGE_DMA_PAGE_SIZE 1024 /* in dwords */
/* Fake DMA buffer size in bytes. 4 pages. Allows a maximum command
* size of 16kbytes or 4k entries. Minimum requirement would be
* 10kbytes for 255 40-byte vertices in one drawing command. */
#define SAVAGE_FAKE_DMA_SIZE (SAVAGE_DMA_PAGE_SIZE*4*4)
/* interesting bits of hardware state that are saved in dev_priv */
typedef union {
struct drm_savage_common_state {
uint32_t vbaddr;
} common;
struct {
unsigned char pad[sizeof(struct drm_savage_common_state)];
uint32_t texctrl, texaddr;
uint32_t scstart, new_scstart;
uint32_t scend, new_scend;
} s3d;
struct {
unsigned char pad[sizeof(struct drm_savage_common_state)];
uint32_t texdescr, texaddr0, texaddr1;
uint32_t drawctrl0, new_drawctrl0;
uint32_t drawctrl1, new_drawctrl1;
} s4;
} drm_savage_state_t;
/* these chip tags should match the ones in the 2D driver in savage_regs.h. */
enum savage_family {
S3_UNKNOWN = 0,
S3_SAVAGE3D,
S3_SAVAGE_MX,
S3_SAVAGE4,
S3_PROSAVAGE,
S3_TWISTER,
S3_PROSAVAGEDDR,
S3_SUPERSAVAGE,
S3_SAVAGE2000,
S3_LAST
};
extern struct drm_ioctl_desc savage_ioctls[];
extern int savage_max_ioctl;
#define S3_SAVAGE3D_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX))
#define S3_SAVAGE4_SERIES(chip) ((chip==S3_SAVAGE4) \
|| (chip==S3_PROSAVAGE) \
|| (chip==S3_TWISTER) \
|| (chip==S3_PROSAVAGEDDR))
#define S3_SAVAGE_MOBILE_SERIES(chip) ((chip==S3_SAVAGE_MX) || (chip==S3_SUPERSAVAGE))
#define S3_SAVAGE_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE2000))
#define S3_MOBILE_TWISTER_SERIES(chip) ((chip==S3_TWISTER) \
||(chip==S3_PROSAVAGEDDR))
/* flags */
#define SAVAGE_IS_AGP 1
typedef struct drm_savage_private {
drm_savage_sarea_t *sarea_priv;
drm_savage_buf_priv_t head, tail;
/* who am I? */
enum savage_family chipset;
unsigned int cob_size;
unsigned int bci_threshold_lo, bci_threshold_hi;
unsigned int dma_type;
/* frame buffer layout */
unsigned int fb_bpp;
unsigned int front_offset, front_pitch;
unsigned int back_offset, back_pitch;
unsigned int depth_bpp;
unsigned int depth_offset, depth_pitch;
/* bitmap descriptors for swap and clear */
unsigned int front_bd, back_bd, depth_bd;
/* local textures */
unsigned int texture_offset;
unsigned int texture_size;
/* memory regions in physical memory */
drm_local_map_t *sarea;
drm_local_map_t *mmio;
drm_local_map_t *fb;
drm_local_map_t *aperture;
drm_local_map_t *status;
drm_local_map_t *agp_textures;
drm_local_map_t *cmd_dma;
drm_local_map_t fake_dma;
struct {
int handle;
unsigned long base, size;
} mtrr[3];
/* BCI and status-related stuff */
volatile uint32_t *status_ptr, *bci_ptr;
uint32_t status_used_mask;
uint16_t event_counter;
unsigned int event_wrap;
/* Savage4 command DMA */
drm_savage_dma_page_t *dma_pages;
unsigned int nr_dma_pages, first_dma_page, current_dma_page;
drm_savage_age_t last_dma_age;
/* saved hw state for global/local check on S3D */
uint32_t hw_draw_ctrl, hw_zbuf_ctrl;
/* and for scissors (global, so don't emit if not changed) */
uint32_t hw_scissors_start, hw_scissors_end;
drm_savage_state_t state;
/* after emitting a wait cmd Savage3D needs 63 nops before next DMA */
unsigned int waiting;
/* config/hardware-dependent function pointers */
int (*wait_fifo)(struct drm_savage_private *dev_priv, unsigned int n);
int (*wait_evnt)(struct drm_savage_private *dev_priv, uint16_t e);
/* Err, there is a macro wait_event in include/linux/wait.h.
* Avoid unwanted macro expansion. */
void (*emit_clip_rect)(struct drm_savage_private *dev_priv,
const struct drm_clip_rect *pbox);
void (*dma_flush)(struct drm_savage_private *dev_priv);
} drm_savage_private_t;
/* ioctls */
extern int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int savage_bci_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv);
/* BCI functions */
extern uint16_t savage_bci_emit_event(drm_savage_private_t *dev_priv,
unsigned int flags);
extern void savage_freelist_put(struct drm_device *dev, struct drm_buf *buf);
extern void savage_dma_reset(drm_savage_private_t *dev_priv);
extern void savage_dma_wait(drm_savage_private_t *dev_priv, unsigned int page);
extern uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv,
unsigned int n);
extern int savage_driver_load(struct drm_device *dev, unsigned long chipset);
extern int savage_driver_firstopen(struct drm_device *dev);
extern void savage_driver_lastclose(struct drm_device *dev);
extern int savage_driver_unload(struct drm_device *dev);
extern void savage_reclaim_buffers(struct drm_device *dev,
struct drm_file *file_priv);
/* state functions */
extern void savage_emit_clip_rect_s3d(drm_savage_private_t *dev_priv,
const struct drm_clip_rect *pbox);
extern void savage_emit_clip_rect_s4(drm_savage_private_t *dev_priv,
const struct drm_clip_rect *pbox);
#define SAVAGE_FB_SIZE_S3 0x01000000 /* 16MB */
#define SAVAGE_FB_SIZE_S4 0x02000000 /* 32MB */
#define SAVAGE_MMIO_SIZE 0x00080000 /* 512kB */
#define SAVAGE_APERTURE_OFFSET 0x02000000 /* 32MB */
#define SAVAGE_APERTURE_SIZE 0x05000000 /* 5 tiled surfaces, 16MB each */
#define SAVAGE_BCI_OFFSET 0x00010000 /* offset of the BCI region
* inside the MMIO region */
#define SAVAGE_BCI_FIFO_SIZE 32 /* number of entries in on-chip
* BCI FIFO */
/*
* MMIO registers
*/
#define SAVAGE_STATUS_WORD0 0x48C00
#define SAVAGE_STATUS_WORD1 0x48C04
#define SAVAGE_ALT_STATUS_WORD0 0x48C60
#define SAVAGE_FIFO_USED_MASK_S3D 0x0001ffff
#define SAVAGE_FIFO_USED_MASK_S4 0x001fffff
/* Copied from savage_bci.h in the 2D driver with some renaming. */
/* Bitmap descriptors */
#define SAVAGE_BD_STRIDE_SHIFT 0
#define SAVAGE_BD_BPP_SHIFT 16
#define SAVAGE_BD_TILE_SHIFT 24
#define SAVAGE_BD_BW_DISABLE (1<<28)
/* common: */
#define SAVAGE_BD_TILE_LINEAR 0
/* savage4, MX, IX, 3D */
#define SAVAGE_BD_TILE_16BPP 2
#define SAVAGE_BD_TILE_32BPP 3
/* twister, prosavage, DDR, supersavage, 2000 */
#define SAVAGE_BD_TILE_DEST 1
#define SAVAGE_BD_TILE_TEXTURE 2
/* GBD - BCI enable */
/* savage4, MX, IX, 3D */
#define SAVAGE_GBD_BCI_ENABLE 8
/* twister, prosavage, DDR, supersavage, 2000 */
#define SAVAGE_GBD_BCI_ENABLE_TWISTER 0
#define SAVAGE_GBD_BIG_ENDIAN 4
#define SAVAGE_GBD_LITTLE_ENDIAN 0
#define SAVAGE_GBD_64 1
/* Global Bitmap Descriptor */
#define SAVAGE_BCI_GLB_BD_LOW 0x8168
#define SAVAGE_BCI_GLB_BD_HIGH 0x816C
/*
* BCI registers
*/
/* Savage4/Twister/ProSavage 3D registers */
#define SAVAGE_DRAWLOCALCTRL_S4 0x1e
#define SAVAGE_TEXPALADDR_S4 0x1f
#define SAVAGE_TEXCTRL0_S4 0x20
#define SAVAGE_TEXCTRL1_S4 0x21
#define SAVAGE_TEXADDR0_S4 0x22
#define SAVAGE_TEXADDR1_S4 0x23
#define SAVAGE_TEXBLEND0_S4 0x24
#define SAVAGE_TEXBLEND1_S4 0x25
#define SAVAGE_TEXXPRCLR_S4 0x26 /* never used */
#define SAVAGE_TEXDESCR_S4 0x27
#define SAVAGE_FOGTABLE_S4 0x28
#define SAVAGE_FOGCTRL_S4 0x30
#define SAVAGE_STENCILCTRL_S4 0x31
#define SAVAGE_ZBUFCTRL_S4 0x32
#define SAVAGE_ZBUFOFF_S4 0x33
#define SAVAGE_DESTCTRL_S4 0x34
#define SAVAGE_DRAWCTRL0_S4 0x35
#define SAVAGE_DRAWCTRL1_S4 0x36
#define SAVAGE_ZWATERMARK_S4 0x37
#define SAVAGE_DESTTEXRWWATERMARK_S4 0x38
#define SAVAGE_TEXBLENDCOLOR_S4 0x39
/* Savage3D/MX/IX 3D registers */
#define SAVAGE_TEXPALADDR_S3D 0x18
#define SAVAGE_TEXXPRCLR_S3D 0x19 /* never used */
#define SAVAGE_TEXADDR_S3D 0x1A
#define SAVAGE_TEXDESCR_S3D 0x1B
#define SAVAGE_TEXCTRL_S3D 0x1C
#define SAVAGE_FOGTABLE_S3D 0x20
#define SAVAGE_FOGCTRL_S3D 0x30
#define SAVAGE_DRAWCTRL_S3D 0x31
#define SAVAGE_ZBUFCTRL_S3D 0x32
#define SAVAGE_ZBUFOFF_S3D 0x33
#define SAVAGE_DESTCTRL_S3D 0x34
#define SAVAGE_SCSTART_S3D 0x35
#define SAVAGE_SCEND_S3D 0x36
#define SAVAGE_ZWATERMARK_S3D 0x37
#define SAVAGE_DESTTEXRWWATERMARK_S3D 0x38
/* common stuff */
#define SAVAGE_VERTBUFADDR 0x3e
#define SAVAGE_BITPLANEWTMASK 0xd7
#define SAVAGE_DMABUFADDR 0x51
/* texture enable bits (needed for tex addr checking) */
#define SAVAGE_TEXCTRL_TEXEN_MASK 0x00010000 /* S3D */
#define SAVAGE_TEXDESCR_TEX0EN_MASK 0x02000000 /* S4 */
#define SAVAGE_TEXDESCR_TEX1EN_MASK 0x04000000 /* S4 */
/* Global fields in Savage4/Twister/ProSavage 3D registers:
*
* All texture registers and DrawLocalCtrl are local. All other
* registers are global. */
/* Global fields in Savage3D/MX/IX 3D registers:
*
* All texture registers are local. DrawCtrl and ZBufCtrl are
* partially local. All other registers are global.
*
* DrawCtrl global fields: cullMode, alphaTestCmpFunc, alphaTestEn, alphaRefVal
* ZBufCtrl global fields: zCmpFunc, zBufEn
*/
#define SAVAGE_DRAWCTRL_S3D_GLOBAL 0x03f3c00c
#define SAVAGE_ZBUFCTRL_S3D_GLOBAL 0x00000027
/* Masks for scissor bits (drawCtrl[01] on s4, scissorStart/End on s3d)
*/
#define SAVAGE_SCISSOR_MASK_S4 0x00fff7ff
#define SAVAGE_SCISSOR_MASK_S3D 0x07ff07ff
/*
* BCI commands
*/
#define BCI_CMD_NOP 0x40000000
#define BCI_CMD_RECT 0x48000000
#define BCI_CMD_RECT_XP 0x01000000
#define BCI_CMD_RECT_YP 0x02000000
#define BCI_CMD_SCANLINE 0x50000000
#define BCI_CMD_LINE 0x5C000000
#define BCI_CMD_LINE_LAST_PIXEL 0x58000000
#define BCI_CMD_BYTE_TEXT 0x63000000
#define BCI_CMD_NT_BYTE_TEXT 0x67000000
#define BCI_CMD_BIT_TEXT 0x6C000000
#define BCI_CMD_GET_ROP(cmd) (((cmd) >> 16) & 0xFF)
#define BCI_CMD_SET_ROP(cmd, rop) ((cmd) |= ((rop & 0xFF) << 16))
#define BCI_CMD_SEND_COLOR 0x00008000
#define BCI_CMD_CLIP_NONE 0x00000000
#define BCI_CMD_CLIP_CURRENT 0x00002000
#define BCI_CMD_CLIP_LR 0x00004000
#define BCI_CMD_CLIP_NEW 0x00006000
#define BCI_CMD_DEST_GBD 0x00000000
#define BCI_CMD_DEST_PBD 0x00000800
#define BCI_CMD_DEST_PBD_NEW 0x00000C00
#define BCI_CMD_DEST_SBD 0x00001000
#define BCI_CMD_DEST_SBD_NEW 0x00001400
#define BCI_CMD_SRC_TRANSPARENT 0x00000200
#define BCI_CMD_SRC_SOLID 0x00000000
#define BCI_CMD_SRC_GBD 0x00000020
#define BCI_CMD_SRC_COLOR 0x00000040
#define BCI_CMD_SRC_MONO 0x00000060
#define BCI_CMD_SRC_PBD_COLOR 0x00000080
#define BCI_CMD_SRC_PBD_MONO 0x000000A0
#define BCI_CMD_SRC_PBD_COLOR_NEW 0x000000C0
#define BCI_CMD_SRC_PBD_MONO_NEW 0x000000E0
#define BCI_CMD_SRC_SBD_COLOR 0x00000100
#define BCI_CMD_SRC_SBD_MONO 0x00000120
#define BCI_CMD_SRC_SBD_COLOR_NEW 0x00000140
#define BCI_CMD_SRC_SBD_MONO_NEW 0x00000160
#define BCI_CMD_PAT_TRANSPARENT 0x00000010
#define BCI_CMD_PAT_NONE 0x00000000
#define BCI_CMD_PAT_COLOR 0x00000002
#define BCI_CMD_PAT_MONO 0x00000003
#define BCI_CMD_PAT_PBD_COLOR 0x00000004
#define BCI_CMD_PAT_PBD_MONO 0x00000005
#define BCI_CMD_PAT_PBD_COLOR_NEW 0x00000006
#define BCI_CMD_PAT_PBD_MONO_NEW 0x00000007
#define BCI_CMD_PAT_SBD_COLOR 0x00000008
#define BCI_CMD_PAT_SBD_MONO 0x00000009
#define BCI_CMD_PAT_SBD_COLOR_NEW 0x0000000A
#define BCI_CMD_PAT_SBD_MONO_NEW 0x0000000B
#define BCI_BD_BW_DISABLE 0x10000000
#define BCI_BD_TILE_MASK 0x03000000
#define BCI_BD_TILE_NONE 0x00000000
#define BCI_BD_TILE_16 0x02000000
#define BCI_BD_TILE_32 0x03000000
#define BCI_BD_GET_BPP(bd) (((bd) >> 16) & 0xFF)
#define BCI_BD_SET_BPP(bd, bpp) ((bd) |= (((bpp) & 0xFF) << 16))
#define BCI_BD_GET_STRIDE(bd) ((bd) & 0xFFFF)
#define BCI_BD_SET_STRIDE(bd, st) ((bd) |= ((st) & 0xFFFF))
#define BCI_CMD_SET_REGISTER 0x96000000
#define BCI_CMD_WAIT 0xC0000000
#define BCI_CMD_WAIT_3D 0x00010000
#define BCI_CMD_WAIT_2D 0x00020000
#define BCI_CMD_UPDATE_EVENT_TAG 0x98000000
#define BCI_CMD_DRAW_PRIM 0x80000000
#define BCI_CMD_DRAW_INDEXED_PRIM 0x88000000
#define BCI_CMD_DRAW_CONT 0x01000000
#define BCI_CMD_DRAW_TRILIST 0x00000000
#define BCI_CMD_DRAW_TRISTRIP 0x02000000
#define BCI_CMD_DRAW_TRIFAN 0x04000000
#define BCI_CMD_DRAW_SKIPFLAGS 0x000000ff
#define BCI_CMD_DRAW_NO_Z 0x00000001
#define BCI_CMD_DRAW_NO_W 0x00000002
#define BCI_CMD_DRAW_NO_CD 0x00000004
#define BCI_CMD_DRAW_NO_CS 0x00000008
#define BCI_CMD_DRAW_NO_U0 0x00000010
#define BCI_CMD_DRAW_NO_V0 0x00000020
#define BCI_CMD_DRAW_NO_UV0 0x00000030
#define BCI_CMD_DRAW_NO_U1 0x00000040
#define BCI_CMD_DRAW_NO_V1 0x00000080
#define BCI_CMD_DRAW_NO_UV1 0x000000c0
#define BCI_CMD_DMA 0xa8000000
#define BCI_W_H(w, h) ((((h) << 16) | (w)) & 0x0FFF0FFF)
#define BCI_X_Y(x, y) ((((y) << 16) | (x)) & 0x0FFF0FFF)
#define BCI_X_W(x, y) ((((w) << 16) | (x)) & 0x0FFF0FFF)
#define BCI_CLIP_LR(l, r) ((((r) << 16) | (l)) & 0x0FFF0FFF)
#define BCI_CLIP_TL(t, l) ((((t) << 16) | (l)) & 0x0FFF0FFF)
#define BCI_CLIP_BR(b, r) ((((b) << 16) | (r)) & 0x0FFF0FFF)
#define BCI_LINE_X_Y(x, y) (((y) << 16) | ((x) & 0xFFFF))
#define BCI_LINE_STEPS(diag, axi) (((axi) << 16) | ((diag) & 0xFFFF))
#define BCI_LINE_MISC(maj, ym, xp, yp, err) \
(((maj) & 0x1FFF) | \
((ym) ? 1<<13 : 0) | \
((xp) ? 1<<14 : 0) | \
((yp) ? 1<<15 : 0) | \
((err) << 16))
/*
* common commands
*/
#define BCI_SET_REGISTERS( first, n ) \
BCI_WRITE(BCI_CMD_SET_REGISTER | \
((uint32_t)(n) & 0xff) << 16 | \
((uint32_t)(first) & 0xffff))
#define DMA_SET_REGISTERS( first, n ) \
DMA_WRITE(BCI_CMD_SET_REGISTER | \
((uint32_t)(n) & 0xff) << 16 | \
((uint32_t)(first) & 0xffff))
#define BCI_DRAW_PRIMITIVE(n, type, skip) \
BCI_WRITE(BCI_CMD_DRAW_PRIM | (type) | (skip) | \
((n) << 16))
#define DMA_DRAW_PRIMITIVE(n, type, skip) \
DMA_WRITE(BCI_CMD_DRAW_PRIM | (type) | (skip) | \
((n) << 16))
#define BCI_DRAW_INDICES_S3D(n, type, i0) \
BCI_WRITE(BCI_CMD_DRAW_INDEXED_PRIM | (type) | \
((n) << 16) | (i0))
#define BCI_DRAW_INDICES_S4(n, type, skip) \
BCI_WRITE(BCI_CMD_DRAW_INDEXED_PRIM | (type) | \
(skip) | ((n) << 16))
#define BCI_DMA(n) \
BCI_WRITE(BCI_CMD_DMA | (((n) >> 1) - 1))
/*
* access to MMIO
*/
#define SAVAGE_READ(reg) DRM_READ32( dev_priv->mmio, (reg) )
#define SAVAGE_WRITE(reg) DRM_WRITE32( dev_priv->mmio, (reg) )
/*
* access to the burst command interface (BCI)
*/
#define SAVAGE_BCI_DEBUG 1
#define BCI_LOCALS volatile uint32_t *bci_ptr;
#define BEGIN_BCI( n ) do { \
dev_priv->wait_fifo(dev_priv, (n)); \
bci_ptr = dev_priv->bci_ptr; \
} while(0)
#define BCI_WRITE( val ) *bci_ptr++ = (uint32_t)(val)
/*
* command DMA support
*/
#define SAVAGE_DMA_DEBUG 1
#define DMA_LOCALS uint32_t *dma_ptr;
#define BEGIN_DMA( n ) do { \
unsigned int cur = dev_priv->current_dma_page; \
unsigned int rest = SAVAGE_DMA_PAGE_SIZE - \
dev_priv->dma_pages[cur].used; \
if ((n) > rest) { \
dma_ptr = savage_dma_alloc(dev_priv, (n)); \
} else { /* fast path for small allocations */ \
dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle + \
cur * SAVAGE_DMA_PAGE_SIZE + \
dev_priv->dma_pages[cur].used; \
if (dev_priv->dma_pages[cur].used == 0) \
savage_dma_wait(dev_priv, cur); \
dev_priv->dma_pages[cur].used += (n); \
} \
} while(0)
#define DMA_WRITE( val ) *dma_ptr++ = (uint32_t)(val)
#define DMA_COPY(src, n) do { \
memcpy(dma_ptr, (src), (n)*4); \
dma_ptr += n; \
} while(0)
#if SAVAGE_DMA_DEBUG
#define DMA_COMMIT() do { \
unsigned int cur = dev_priv->current_dma_page; \
uint32_t *expected = (uint32_t *)dev_priv->cmd_dma->handle + \
cur * SAVAGE_DMA_PAGE_SIZE + \
dev_priv->dma_pages[cur].used; \
if (dma_ptr != expected) { \
DRM_ERROR("DMA allocation and use don't match: " \
"%p != %p\n", expected, dma_ptr); \
savage_dma_reset(dev_priv); \
} \
} while(0)
#else
#define DMA_COMMIT() do {/* nothing */} while(0)
#endif
#define DMA_FLUSH() dev_priv->dma_flush(dev_priv)
/* Buffer aging via event tag
*/
#define UPDATE_EVENT_COUNTER( ) do { \
if (dev_priv->status_ptr) { \
uint16_t count; \
/* coordinate with Xserver */ \
count = dev_priv->status_ptr[1023]; \
if (count < dev_priv->event_counter) \
dev_priv->event_wrap++; \
dev_priv->event_counter = count; \
} \
} while(0)
#define SET_AGE( age, e, w ) do { \
(age)->event = e; \
(age)->wrap = w; \
} while(0)
#define TEST_AGE( age, e, w ) \
( (age)->wrap < (w) || ( (age)->wrap == (w) && (age)->event <= (e) ) )
#endif /* __SAVAGE_DRV_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -1,70 +0,0 @@
/* sis_drv.h -- Private header for sis driver -*- linux-c -*- */
/*-
* Copyright 2005 Eric Anholt
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#ifndef __SIS_DRM_H__
#define __SIS_DRM_H__
/* SiS specific ioctls */
#define NOT_USED_0_3
#define DRM_SIS_FB_ALLOC 0x04
#define DRM_SIS_FB_FREE 0x05
#define NOT_USED_6_12
#define DRM_SIS_AGP_INIT 0x13
#define DRM_SIS_AGP_ALLOC 0x14
#define DRM_SIS_AGP_FREE 0x15
#define DRM_SIS_FB_INIT 0x16
#define DRM_IOCTL_SIS_FB_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_SIS_FB_ALLOC, drm_sis_mem_t)
#define DRM_IOCTL_SIS_FB_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_SIS_FB_FREE, drm_sis_mem_t)
#define DRM_IOCTL_SIS_AGP_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_SIS_AGP_INIT, drm_sis_agp_t)
#define DRM_IOCTL_SIS_AGP_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_SIS_AGP_ALLOC, drm_sis_mem_t)
#define DRM_IOCTL_SIS_AGP_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_SIS_AGP_FREE, drm_sis_mem_t)
#define DRM_IOCTL_SIS_FB_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_SIS_FB_INIT, drm_sis_fb_t)
/*
#define DRM_IOCTL_SIS_FLIP DRM_IOW( 0x48, drm_sis_flip_t)
#define DRM_IOCTL_SIS_FLIP_INIT DRM_IO( 0x49)
#define DRM_IOCTL_SIS_FLIP_FINAL DRM_IO( 0x50)
*/
typedef struct {
int context;
unsigned int offset;
unsigned int size;
unsigned long free;
} drm_sis_mem_t;
typedef struct {
unsigned int offset, size;
} drm_sis_agp_t;
typedef struct {
unsigned int offset, size;
} drm_sis_fb_t;
#endif /* __SIS_DRM_H__ */

View File

@ -1,111 +0,0 @@
/* sis.c -- sis driver -*- linux-c -*-
*/
/*-
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "dev/drm/drmP.h"
#include "dev/drm/sis_drm.h"
#include "dev/drm/sis_drv.h"
#include "dev/drm/drm_pciids.h"
/* drv_PCI_IDs comes from drm_pciids.h, generated from drm_pciids.txt. */
static drm_pci_id_list_t sis_pciidlist[] = {
sis_PCI_IDS
};
static void sis_configure(struct drm_device *dev)
{
dev->driver->driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR;
dev->driver->buf_priv_size = 1; /* No dev_priv */
dev->driver->context_ctor = sis_init_context;
dev->driver->context_dtor = sis_final_context;
dev->driver->ioctls = sis_ioctls;
dev->driver->max_ioctl = sis_max_ioctl;
dev->driver->name = DRIVER_NAME;
dev->driver->desc = DRIVER_DESC;
dev->driver->date = DRIVER_DATE;
dev->driver->major = DRIVER_MAJOR;
dev->driver->minor = DRIVER_MINOR;
dev->driver->patchlevel = DRIVER_PATCHLEVEL;
}
static int
sis_probe(device_t kdev)
{
return drm_probe(kdev, sis_pciidlist);
}
static int
sis_attach(device_t kdev)
{
struct drm_device *dev = device_get_softc(kdev);
dev->driver = malloc(sizeof(struct drm_driver_info), DRM_MEM_DRIVER,
M_WAITOK | M_ZERO);
sis_configure(dev);
return drm_attach(kdev, sis_pciidlist);
}
static int
sis_detach(device_t kdev)
{
struct drm_device *dev = device_get_softc(kdev);
int ret;
ret = drm_detach(kdev);
free(dev->driver, DRM_MEM_DRIVER);
return ret;
}
static device_method_t sis_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, sis_probe),
DEVMETHOD(device_attach, sis_attach),
DEVMETHOD(device_detach, sis_detach),
{ 0, 0 }
};
static driver_t sis_driver = {
"drm",
sis_methods,
sizeof(struct drm_device)
};
extern devclass_t drm_devclass;
DRIVER_MODULE(sisdrm, vgapci, sis_driver, drm_devclass, 0, 0);
MODULE_DEPEND(sisdrm, drm, 1, 1, 1);

View File

@ -1,93 +0,0 @@
/* sis_drv.h -- Private header for sis driver -*- linux-c -*- */
/*-
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#ifndef _SIS_DRV_H_
#define _SIS_DRV_H_
/* General customization:
*/
#define DRIVER_AUTHOR "SIS, Tungsten Graphics"
#define DRIVER_NAME "sis"
#define DRIVER_DESC "SIS 300/630/540 and XGI V3XE/V5/V8"
#define DRIVER_DATE "20070626"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 3
#define DRIVER_PATCHLEVEL 0
enum sis_family {
SIS_OTHER = 0,
SIS_CHIP_315 = 1,
};
#if defined(__linux__)
#define SIS_HAVE_CORE_MM
#endif
#ifdef SIS_HAVE_CORE_MM
#include "dev/drm/drm_sman.h"
#define SIS_BASE (dev_priv->mmio)
#define SIS_READ(reg) DRM_READ32(SIS_BASE, reg);
#define SIS_WRITE(reg, val) DRM_WRITE32(SIS_BASE, reg, val);
typedef struct drm_sis_private {
drm_local_map_t *mmio;
unsigned int idle_fault;
struct drm_sman sman;
unsigned int chipset;
int vram_initialized;
int agp_initialized;
unsigned long vram_offset;
unsigned long agp_offset;
} drm_sis_private_t;
extern int sis_idle(struct drm_device *dev);
extern void sis_reclaim_buffers_locked(struct drm_device *dev,
struct drm_file *file_priv);
extern void sis_lastclose(struct drm_device *dev);
#else
#include "dev/drm/sis_ds.h"
typedef struct drm_sis_private {
memHeap_t *AGPHeap;
memHeap_t *FBHeap;
} drm_sis_private_t;
extern int sis_init_context(struct drm_device * dev, int context);
extern int sis_final_context(struct drm_device * dev, int context);
#endif
extern struct drm_ioctl_desc sis_ioctls[];
extern int sis_max_ioctl;
#endif

View File

@ -1,302 +0,0 @@
/* sis_ds.c -- Private header for Direct Rendering Manager -*- linux-c -*-
* Created: Mon Jan 4 10:05:05 1999 by sclin@sis.com.tw
*
* Copyright 2000 Silicon Integrated Systems Corp, Inc., HsinChu, Taiwan.
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Sung-Ching Lin <sclin@sis.com.tw>
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "dev/drm/drmP.h"
#include "dev/drm/drm.h"
#include "dev/drm/sis_ds.h"
/* Set Data Structure, not check repeated value
* temporarily used
*/
set_t *setInit(void)
{
int i;
set_t *set;
set = (set_t *) drm_alloc(sizeof(set_t), DRM_MEM_DRIVER);
if (set != NULL) {
for (i = 0; i < SET_SIZE; i++) {
set->list[i].free_next = i + 1;
set->list[i].alloc_next = -1;
}
set->list[SET_SIZE - 1].free_next = -1;
set->free = 0;
set->alloc = -1;
set->trace = -1;
}
return set;
}
int setAdd(set_t * set, ITEM_TYPE item)
{
int free = set->free;
if (free != -1) {
set->list[free].val = item;
set->free = set->list[free].free_next;
} else {
return 0;
}
set->list[free].alloc_next = set->alloc;
set->alloc = free;
set->list[free].free_next = -1;
return 1;
}
int setDel(set_t * set, ITEM_TYPE item)
{
int alloc = set->alloc;
int prev = -1;
while (alloc != -1) {
if (set->list[alloc].val == item) {
if (prev != -1)
set->list[prev].alloc_next =
set->list[alloc].alloc_next;
else
set->alloc = set->list[alloc].alloc_next;
break;
}
prev = alloc;
alloc = set->list[alloc].alloc_next;
}
if (alloc == -1)
return 0;
set->list[alloc].free_next = set->free;
set->free = alloc;
set->list[alloc].alloc_next = -1;
return 1;
}
/* setFirst -> setAdd -> setNext is wrong */
int setFirst(set_t * set, ITEM_TYPE * item)
{
if (set->alloc == -1)
return 0;
*item = set->list[set->alloc].val;
set->trace = set->list[set->alloc].alloc_next;
return 1;
}
int setNext(set_t * set, ITEM_TYPE * item)
{
if (set->trace == -1)
return 0;
*item = set->list[set->trace].val;
set->trace = set->list[set->trace].alloc_next;
return 1;
}
int setDestroy(set_t * set)
{
drm_free(set, sizeof(set_t), DRM_MEM_DRIVER);
return 1;
}
/*
* GLX Hardware Device Driver common code
* Copyright (C) 1999 Wittawat Yamwong
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* WITTAWAT YAMWONG, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
* OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#define ISFREE(bptr) ((bptr)->free)
memHeap_t *mmInit(int ofs, int size)
{
PMemBlock blocks;
if (size <= 0)
return NULL;
blocks = (TMemBlock *) drm_calloc(1, sizeof(TMemBlock), DRM_MEM_DRIVER);
if (blocks != NULL) {
blocks->ofs = ofs;
blocks->size = size;
blocks->free = 1;
return (memHeap_t *) blocks;
} else
return NULL;
}
/* Checks if a pointer 'b' is part of the heap 'heap' */
int mmBlockInHeap(memHeap_t * heap, PMemBlock b)
{
TMemBlock *p;
if (heap == NULL || b == NULL)
return 0;
p = heap;
while (p != NULL && p != b) {
p = p->next;
}
if (p == b)
return 1;
else
return 0;
}
static TMemBlock *SliceBlock(TMemBlock * p,
int startofs, int size,
int reserved, int alignment)
{
TMemBlock *newblock;
/* break left */
if (startofs > p->ofs) {
newblock = (TMemBlock *) drm_calloc(1, sizeof(TMemBlock),
DRM_MEM_DRIVER);
newblock->ofs = startofs;
newblock->size = p->size - (startofs - p->ofs);
newblock->free = 1;
newblock->next = p->next;
p->size -= newblock->size;
p->next = newblock;
p = newblock;
}
/* break right */
if (size < p->size) {
newblock = (TMemBlock *) drm_calloc(1, sizeof(TMemBlock),
DRM_MEM_DRIVER);
newblock->ofs = startofs + size;
newblock->size = p->size - size;
newblock->free = 1;
newblock->next = p->next;
p->size = size;
p->next = newblock;
}
/* p = middle block */
p->align = alignment;
p->free = 0;
p->reserved = reserved;
return p;
}
PMemBlock mmAllocMem(memHeap_t * heap, int size, int align2, int startSearch)
{
int mask, startofs, endofs;
TMemBlock *p;
if (heap == NULL || align2 < 0 || size <= 0)
return NULL;
mask = (1 << align2) - 1;
startofs = 0;
p = (TMemBlock *) heap;
while (p != NULL) {
if (ISFREE(p)) {
startofs = (p->ofs + mask) & ~mask;
if (startofs < startSearch) {
startofs = startSearch;
}
endofs = startofs + size;
if (endofs <= (p->ofs + p->size))
break;
}
p = p->next;
}
if (p == NULL)
return NULL;
p = SliceBlock(p, startofs, size, 0, mask + 1);
p->heap = heap;
return p;
}
static __inline__ int Join2Blocks(TMemBlock * p)
{
if (p->free && p->next && p->next->free) {
TMemBlock *q = p->next;
p->size += q->size;
p->next = q->next;
drm_free(q, sizeof(TMemBlock), DRM_MEM_DRIVER);
return 1;
}
return 0;
}
int mmFreeMem(PMemBlock b)
{
TMemBlock *p, *prev;
if (b == NULL)
return 0;
if (b->heap == NULL)
return -1;
p = b->heap;
prev = NULL;
while (p != NULL && p != b) {
prev = p;
p = p->next;
}
if (p == NULL || p->free || p->reserved)
return -1;
p->free = 1;
Join2Blocks(p);
if (prev)
Join2Blocks(prev);
return 0;
}

View File

@ -1,149 +0,0 @@
/* sis_ds.h -- Private header for Direct Rendering Manager -*- linux-c -*-
* Created: Mon Jan 4 10:05:05 1999 by sclin@sis.com.tw
*/
/*-
* Copyright 2000 Silicon Integrated Systems Corp, Inc., HsinChu, Taiwan.
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Sung-Ching Lin <sclin@sis.com.tw>
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#ifndef __SIS_DS_H__
#define __SIS_DS_H__
/* Set Data Structure */
#define SET_SIZE 5000
typedef unsigned long ITEM_TYPE;
typedef struct {
ITEM_TYPE val;
int alloc_next, free_next;
} list_item_t;
typedef struct {
int alloc;
int free;
int trace;
list_item_t list[SET_SIZE];
} set_t;
set_t *setInit(void);
int setAdd(set_t * set, ITEM_TYPE item);
int setDel(set_t * set, ITEM_TYPE item);
int setFirst(set_t * set, ITEM_TYPE * item);
int setNext(set_t * set, ITEM_TYPE * item);
int setDestroy(set_t * set);
/*
* GLX Hardware Device Driver common code
* Copyright (C) 1999 Wittawat Yamwong
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* WITTAWAT YAMWONG, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
* OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
struct mem_block_t {
struct mem_block_t *next;
struct mem_block_t *heap;
int ofs, size;
int align;
unsigned int free:1;
unsigned int reserved:1;
};
typedef struct mem_block_t TMemBlock;
typedef struct mem_block_t *PMemBlock;
/* a heap is just the first block in a chain */
typedef struct mem_block_t memHeap_t;
static __inline__ int mmBlockSize(PMemBlock b)
{
return b->size;
}
static __inline__ int mmOffset(PMemBlock b)
{
return b->ofs;
}
static __inline__ void mmMarkReserved(PMemBlock b)
{
b->reserved = 1;
}
/*
* input: total size in bytes
* return: a heap pointer if OK, NULL if error
*/
memHeap_t *mmInit(int ofs, int size);
/*
* Allocate 'size' bytes with 2^align2 bytes alignment,
* restrict the search to free memory after 'startSearch'
* depth and back buffers should be in different 4mb banks
* to get better page hits if possible
* input: size = size of block
* align2 = 2^align2 bytes alignment
* startSearch = linear offset from start of heap to begin search
* return: pointer to the allocated block, 0 if error
*/
PMemBlock mmAllocMem(memHeap_t * heap, int size, int align2, int startSearch);
/*
* Returns 1 if the block 'b' is part of the heap 'heap'
*/
int mmBlockInHeap(PMemBlock heap, PMemBlock b);
/*
* Free block starts at offset
* input: pointer to a block
* return: 0 if OK, -1 if error
*/
int mmFreeMem(PMemBlock b);
/* For debuging purpose. */
void mmDumpMemInfo(memHeap_t * mmInit);
#endif /* __SIS_DS_H__ */

View File

@ -1,389 +0,0 @@
/* sis_mm.c -- Private header for Direct Rendering Manager -*- linux-c -*-
* Created: Mon Jan 4 10:05:05 1999 by sclin@sis.com.tw
*
* Copyright 2000 Silicon Integrated Systems Corp, Inc., HsinChu, Taiwan.
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Sung-Ching Lin <sclin@sis.com.tw>
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#if defined(__linux__) && defined(CONFIG_FB_SIS)
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
#include <video/sisfb.h>
#else
#include <linux/sisfb.h>
#endif
#endif
#include "dev/drm/drmP.h"
#include "dev/drm/sis_drm.h"
#include "dev/drm/sis_drv.h"
#include "dev/drm/sis_ds.h"
#define MAX_CONTEXT 100
#define VIDEO_TYPE 0
#define AGP_TYPE 1
typedef struct {
int used;
int context;
set_t *sets[2]; /* 0 for video, 1 for AGP */
} sis_context_t;
static sis_context_t global_ppriv[MAX_CONTEXT];
static int add_alloc_set(int context, int type, unsigned int val)
{
int i, retval = 0;
for (i = 0; i < MAX_CONTEXT; i++) {
if (global_ppriv[i].used && global_ppriv[i].context == context) {
retval = setAdd(global_ppriv[i].sets[type], val);
break;
}
}
return retval;
}
static int del_alloc_set(int context, int type, unsigned int val)
{
int i, retval = 0;
for (i = 0; i < MAX_CONTEXT; i++) {
if (global_ppriv[i].used && global_ppriv[i].context == context) {
retval = setDel(global_ppriv[i].sets[type], val);
break;
}
}
return retval;
}
/* fb management via fb device */
#if defined(__linux__) && defined(CONFIG_FB_SIS)
static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
return 0;
}
static int sis_fb_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_sis_mem_t *fb = data;
struct sis_memreq req;
int retval = 0;
req.size = fb->size;
sis_malloc(&req);
if (req.offset) {
/* TODO */
fb->offset = req.offset;
fb->free = req.offset;
if (!add_alloc_set(fb->context, VIDEO_TYPE, fb->free)) {
DRM_DEBUG("adding to allocation set fails\n");
sis_free(req.offset);
retval = -EINVAL;
}
} else {
fb->offset = 0;
fb->size = 0;
fb->free = 0;
}
DRM_DEBUG("alloc fb, size = %d, offset = %ld\n", fb->size, req.offset);
return retval;
}
static int sis_fb_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_sis_mem_t fb;
int retval = 0;
if (!fb->free)
return -EINVAL;
if (!del_alloc_set(fb->context, VIDEO_TYPE, fb->free))
retval = -EINVAL;
sis_free(fb->free);
DRM_DEBUG("free fb, offset = 0x%lx\n", fb->free);
return retval;
}
#else
/* Called by the X Server to initialize the FB heap. Allocations will fail
* unless this is called. Offset is the beginning of the heap from the
* framebuffer offset (MaxXFBMem in XFree86).
*
* Memory layout according to Thomas Winischofer:
* |------------------|DDDDDDDDDDDDDDDDDDDDDDDDDDDDD|HHHH|CCCCCCCCCCC|
*
* X driver/sisfb HW- Command-
* framebuffer memory DRI heap Cursor queue
*/
static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_sis_private_t *dev_priv = dev->dev_private;
drm_sis_fb_t *fb = data;
if (dev_priv == NULL) {
dev->dev_private = drm_calloc(1, sizeof(drm_sis_private_t),
DRM_MEM_DRIVER);
dev_priv = dev->dev_private;
if (dev_priv == NULL)
return ENOMEM;
}
if (dev_priv->FBHeap != NULL)
return -EINVAL;
dev_priv->FBHeap = mmInit(fb->offset, fb->size);
DRM_DEBUG("offset = %u, size = %u", fb->offset, fb->size);
return 0;
}
static int sis_fb_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_sis_private_t *dev_priv = dev->dev_private;
drm_sis_mem_t *fb = data;
PMemBlock block;
int retval = 0;
if (dev_priv == NULL || dev_priv->FBHeap == NULL)
return -EINVAL;
block = mmAllocMem(dev_priv->FBHeap, fb->size, 0, 0);
if (block) {
/* TODO */
fb->offset = block->ofs;
fb->free = (unsigned long)block;
if (!add_alloc_set(fb->context, VIDEO_TYPE, fb->free)) {
DRM_DEBUG("adding to allocation set fails\n");
mmFreeMem((PMemBlock) fb->free);
retval = -EINVAL;
}
} else {
fb->offset = 0;
fb->size = 0;
fb->free = 0;
}
DRM_DEBUG("alloc fb, size = %d, offset = %d\n", fb->size, fb->offset);
return retval;
}
static int sis_fb_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_sis_private_t *dev_priv = dev->dev_private;
drm_sis_mem_t *fb = data;
if (dev_priv == NULL || dev_priv->FBHeap == NULL)
return -EINVAL;
if (!mmBlockInHeap(dev_priv->FBHeap, (PMemBlock) fb->free))
return -EINVAL;
if (!del_alloc_set(fb->context, VIDEO_TYPE, fb->free))
return -EINVAL;
mmFreeMem((PMemBlock) fb->free);
DRM_DEBUG("free fb, free = 0x%lx\n", fb->free);
return 0;
}
#endif
/* agp memory management */
static int sis_ioctl_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_sis_private_t *dev_priv = dev->dev_private;
drm_sis_agp_t *agp = data;
if (dev_priv == NULL) {
dev->dev_private = drm_calloc(1, sizeof(drm_sis_private_t),
DRM_MEM_DRIVER);
dev_priv = dev->dev_private;
if (dev_priv == NULL)
return ENOMEM;
}
if (dev_priv->AGPHeap != NULL)
return -EINVAL;
dev_priv->AGPHeap = mmInit(agp->offset, agp->size);
DRM_DEBUG("offset = %u, size = %u", agp->offset, agp->size);
return 0;
}
static int sis_ioctl_agp_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_sis_private_t *dev_priv = dev->dev_private;
drm_sis_mem_t *agp = data;
PMemBlock block;
int retval = 0;
if (dev_priv == NULL || dev_priv->AGPHeap == NULL)
return -EINVAL;
block = mmAllocMem(dev_priv->AGPHeap, agp->size, 0, 0);
if (block) {
/* TODO */
agp->offset = block->ofs;
agp->free = (unsigned long)block;
if (!add_alloc_set(agp->context, AGP_TYPE, agp->free)) {
DRM_DEBUG("adding to allocation set fails\n");
mmFreeMem((PMemBlock) agp->free);
retval = -1;
}
} else {
agp->offset = 0;
agp->size = 0;
agp->free = 0;
}
DRM_DEBUG("alloc agp, size = %d, offset = %d\n", agp->size,
agp->offset);
return retval;
}
static int sis_ioctl_agp_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_sis_private_t *dev_priv = dev->dev_private;
drm_sis_mem_t *agp = data;
if (dev_priv == NULL || dev_priv->AGPHeap == NULL)
return -EINVAL;
if (!mmBlockInHeap(dev_priv->AGPHeap, (PMemBlock) agp->free))
return -EINVAL;
mmFreeMem((PMemBlock) agp->free);
if (!del_alloc_set(agp->context, AGP_TYPE, agp->free))
return -EINVAL;
DRM_DEBUG("free agp, free = 0x%lx\n", agp->free);
return 0;
}
int sis_init_context(struct drm_device *dev, int context)
{
int i;
for (i = 0; i < MAX_CONTEXT; i++) {
if (global_ppriv[i].used &&
(global_ppriv[i].context == context))
break;
}
if (i >= MAX_CONTEXT) {
for (i = 0; i < MAX_CONTEXT; i++) {
if (!global_ppriv[i].used) {
global_ppriv[i].context = context;
global_ppriv[i].used = 1;
global_ppriv[i].sets[0] = setInit();
global_ppriv[i].sets[1] = setInit();
DRM_DEBUG("init allocation set, socket=%d, "
"context = %d\n", i, context);
break;
}
}
if ((i >= MAX_CONTEXT) || (global_ppriv[i].sets[0] == NULL) ||
(global_ppriv[i].sets[1] == NULL)) {
return 0;
}
}
return 1;
}
int sis_final_context(struct drm_device *dev, int context)
{
int i;
for (i = 0; i < MAX_CONTEXT; i++) {
if (global_ppriv[i].used &&
(global_ppriv[i].context == context))
break;
}
if (i < MAX_CONTEXT) {
set_t *set;
ITEM_TYPE item;
int retval;
DRM_DEBUG("find socket %d, context = %d\n", i, context);
/* Video Memory */
set = global_ppriv[i].sets[0];
retval = setFirst(set, &item);
while (retval) {
DRM_DEBUG("free video memory 0x%lx\n", item);
#if defined(__linux__) && defined(CONFIG_FB_SIS)
sis_free(item);
#else
mmFreeMem((PMemBlock) item);
#endif
retval = setNext(set, &item);
}
setDestroy(set);
/* AGP Memory */
set = global_ppriv[i].sets[1];
retval = setFirst(set, &item);
while (retval) {
DRM_DEBUG("free agp memory 0x%lx\n", item);
mmFreeMem((PMemBlock) item);
retval = setNext(set, &item);
}
setDestroy(set);
global_ppriv[i].used = 0;
}
return 1;
}
drm_ioctl_desc_t sis_ioctls[] = {
DRM_IOCTL_DEF(DRM_SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH),
DRM_IOCTL_DEF(DRM_SIS_FB_FREE, sis_fb_free, DRM_AUTH),
DRM_IOCTL_DEF(DRM_SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_SIS_AGP_ALLOC, sis_ioctl_agp_alloc, DRM_AUTH),
DRM_IOCTL_DEF(DRM_SIS_AGP_FREE, sis_ioctl_agp_free, DRM_AUTH),
DRM_IOCTL_DEF(DRM_SIS_FB_INIT, sis_fb_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY)
};
int sis_max_ioctl = DRM_ARRAY_SIZE(sis_ioctls);

View File

@ -1,113 +0,0 @@
/* tdfx_drv.c -- tdfx driver -*- linux-c -*-
* Created: Thu Oct 7 10:38:32 1999 by faith@precisioninsight.com
*/
/*-
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Daryll Strauss <daryll@valinux.com>
* Gareth Hughes <gareth@valinux.com>
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "dev/drm/tdfx_drv.h"
#include "dev/drm/drmP.h"
#include "dev/drm/drm_pciids.h"
/* drv_PCI_IDs comes from drm_pciids.h, generated from drm_pciids.txt. */
static drm_pci_id_list_t tdfx_pciidlist[] = {
tdfx_PCI_IDS
};
static void tdfx_configure(struct drm_device *dev)
{
dev->driver->driver_features =
DRIVER_USE_MTRR;
dev->driver->buf_priv_size = 1; /* No dev_priv */
dev->driver->max_ioctl = 0;
dev->driver->name = DRIVER_NAME;
dev->driver->desc = DRIVER_DESC;
dev->driver->date = DRIVER_DATE;
dev->driver->major = DRIVER_MAJOR;
dev->driver->minor = DRIVER_MINOR;
dev->driver->patchlevel = DRIVER_PATCHLEVEL;
}
static int
tdfx_probe(device_t kdev)
{
return drm_probe(kdev, tdfx_pciidlist);
}
static int
tdfx_attach(device_t kdev)
{
struct drm_device *dev = device_get_softc(kdev);
dev->driver = malloc(sizeof(struct drm_driver_info), DRM_MEM_DRIVER,
M_WAITOK | M_ZERO);
tdfx_configure(dev);
return drm_attach(kdev, tdfx_pciidlist);
}
static int
tdfx_detach(device_t kdev)
{
struct drm_device *dev = device_get_softc(kdev);
int ret;
ret = drm_detach(kdev);
free(dev->driver, DRM_MEM_DRIVER);
return ret;
}
static device_method_t tdfx_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, tdfx_probe),
DEVMETHOD(device_attach, tdfx_attach),
DEVMETHOD(device_detach, tdfx_detach),
{ 0, 0 }
};
static driver_t tdfx_driver = {
"drm",
tdfx_methods,
sizeof(struct drm_device)
};
extern devclass_t drm_devclass;
DRIVER_MODULE(tdfx, vgapci, tdfx_driver, drm_devclass, 0, 0);
MODULE_DEPEND(tdfx, drm, 1, 1, 1);

View File

@ -1,50 +0,0 @@
/* tdfx.h -- 3dfx DRM template customization -*- linux-c -*-
* Created: Wed Feb 14 12:32:32 2001 by gareth@valinux.com
*/
/*-
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Gareth Hughes <gareth@valinux.com>
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#ifndef __TDFX_H__
#define __TDFX_H__
/* General customization:
*/
#define DRIVER_AUTHOR "VA Linux Systems Inc."
#define DRIVER_NAME "tdfx"
#define DRIVER_DESC "3dfx Banshee/Voodoo3+"
#define DRIVER_DATE "20010216"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 0
#endif

File diff suppressed because it is too large Load Diff

View File

@ -1,767 +0,0 @@
/* via_dma.c -- DMA support for the VIA Unichrome/Pro
*
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Copyright 2004 Digeo, Inc., Palo Alto, CA, U.S.A.
* All Rights Reserved.
*
* Copyright 2004 The Unichrome project.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Tungsten Graphics,
* Erdi Chen,
* Thomas Hellstrom.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "dev/drm/drmP.h"
#include "dev/drm/drm.h"
#include "dev/drm/via_drm.h"
#include "dev/drm/via_drv.h"
#include "dev/drm/via_3d_reg.h"
#define CMDBUF_ALIGNMENT_SIZE (0x100)
#define CMDBUF_ALIGNMENT_MASK (0x0ff)
/* defines for VIA 3D registers */
#define VIA_REG_STATUS 0x400
#define VIA_REG_TRANSET 0x43C
#define VIA_REG_TRANSPACE 0x440
/* VIA_REG_STATUS(0x400): Engine Status */
#define VIA_CMD_RGTR_BUSY 0x00000080 /* Command Regulator is busy */
#define VIA_2D_ENG_BUSY 0x00000001 /* 2D Engine is busy */
#define VIA_3D_ENG_BUSY 0x00000002 /* 3D Engine is busy */
#define VIA_VR_QUEUE_BUSY 0x00020000 /* Virtual Queue is busy */
#define SetReg2DAGP(nReg, nData) { \
*((uint32_t *)(vb)) = ((nReg) >> 2) | HALCYON_HEADER1; \
*((uint32_t *)(vb) + 1) = (nData); \
vb = ((uint32_t *)vb) + 2; \
dev_priv->dma_low +=8; \
}
#define via_flush_write_combine() DRM_MEMORYBARRIER()
#define VIA_OUT_RING_QW(w1,w2) \
*vb++ = (w1); \
*vb++ = (w2); \
dev_priv->dma_low += 8;
static void via_cmdbuf_start(drm_via_private_t * dev_priv);
static void via_cmdbuf_pause(drm_via_private_t * dev_priv);
static void via_cmdbuf_reset(drm_via_private_t * dev_priv);
static void via_cmdbuf_rewind(drm_via_private_t * dev_priv);
static int via_wait_idle(drm_via_private_t * dev_priv);
static void via_pad_cache(drm_via_private_t * dev_priv, int qwords);
/*
* Free space in command buffer.
*/
static uint32_t via_cmdbuf_space(drm_via_private_t * dev_priv)
{
uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base;
return ((hw_addr <= dev_priv->dma_low) ?
(dev_priv->dma_high + hw_addr - dev_priv->dma_low) :
(hw_addr - dev_priv->dma_low));
}
/*
* How much does the command regulator lag behind?
*/
static uint32_t via_cmdbuf_lag(drm_via_private_t * dev_priv)
{
uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base;
return ((hw_addr <= dev_priv->dma_low) ?
(dev_priv->dma_low - hw_addr) :
(dev_priv->dma_wrap + dev_priv->dma_low - hw_addr));
}
/*
* Check that the given size fits in the buffer, otherwise wait.
*/
static inline int
via_cmdbuf_wait(drm_via_private_t * dev_priv, unsigned int size)
{
uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
uint32_t cur_addr, hw_addr, next_addr;
volatile uint32_t *hw_addr_ptr;
uint32_t count;
hw_addr_ptr = dev_priv->hw_addr_ptr;
cur_addr = dev_priv->dma_low;
next_addr = cur_addr + size + 512 * 1024;
count = 1000000;
do {
hw_addr = *hw_addr_ptr - agp_base;
if (count-- == 0) {
DRM_ERROR
("via_cmdbuf_wait timed out hw %x cur_addr %x next_addr %x\n",
hw_addr, cur_addr, next_addr);
return -1;
}
if ((cur_addr < hw_addr) && (next_addr >= hw_addr))
DRM_UDELAY(1000);
} while ((cur_addr < hw_addr) && (next_addr >= hw_addr));
return 0;
}
/*
* Checks whether buffer head has reach the end. Rewind the ring buffer
* when necessary.
*
* Returns virtual pointer to ring buffer.
*/
static inline uint32_t *via_check_dma(drm_via_private_t * dev_priv,
unsigned int size)
{
if ((dev_priv->dma_low + size + 4 * CMDBUF_ALIGNMENT_SIZE) >
dev_priv->dma_high) {
via_cmdbuf_rewind(dev_priv);
}
if (via_cmdbuf_wait(dev_priv, size) != 0) {
return NULL;
}
return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
}
int via_dma_cleanup(struct drm_device * dev)
{
drm_via_blitq_t *blitq;
int i;
if (dev->dev_private) {
drm_via_private_t *dev_priv =
(drm_via_private_t *) dev->dev_private;
if (dev_priv->ring.virtual_start) {
via_cmdbuf_reset(dev_priv);
drm_core_ioremapfree(&dev_priv->ring.map, dev);
dev_priv->ring.virtual_start = NULL;
}
for (i=0; i< VIA_NUM_BLIT_ENGINES; ++i) {
blitq = dev_priv->blit_queues + i;
mtx_destroy(&blitq->blit_lock);
}
}
return 0;
}
static int via_initialize(struct drm_device * dev,
drm_via_private_t * dev_priv,
drm_via_dma_init_t * init)
{
if (!dev_priv || !dev_priv->mmio) {
DRM_ERROR("via_dma_init called before via_map_init\n");
return -EFAULT;
}
if (dev_priv->ring.virtual_start != NULL) {
DRM_ERROR("called again without calling cleanup\n");
return -EFAULT;
}
if (!dev->agp || !dev->agp->base) {
DRM_ERROR("called with no agp memory available\n");
return -EFAULT;
}
if (dev_priv->chipset == VIA_DX9_0) {
DRM_ERROR("AGP DMA is not supported on this chip\n");
return -EINVAL;
}
dev_priv->ring.map.offset = dev->agp->base + init->offset;
dev_priv->ring.map.size = init->size;
dev_priv->ring.map.type = 0;
dev_priv->ring.map.flags = 0;
dev_priv->ring.map.mtrr = 0;
drm_core_ioremap_wc(&dev_priv->ring.map, dev);
if (dev_priv->ring.map.virtual == NULL) {
via_dma_cleanup(dev);
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
return -ENOMEM;
}
dev_priv->ring.virtual_start = dev_priv->ring.map.virtual;
dev_priv->dma_ptr = dev_priv->ring.virtual_start;
dev_priv->dma_low = 0;
dev_priv->dma_high = init->size;
dev_priv->dma_wrap = init->size;
dev_priv->dma_offset = init->offset;
dev_priv->last_pause_ptr = NULL;
dev_priv->hw_addr_ptr =
(volatile uint32_t *)((char *)dev_priv->mmio->virtual +
init->reg_pause_addr);
via_cmdbuf_start(dev_priv);
return 0;
}
static int via_dma_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
drm_via_dma_init_t *init = data;
int retcode = 0;
switch (init->func) {
case VIA_INIT_DMA:
if (!DRM_SUSER(DRM_CURPROC))
retcode = -EPERM;
else
retcode = via_initialize(dev, dev_priv, init);
break;
case VIA_CLEANUP_DMA:
if (!DRM_SUSER(DRM_CURPROC))
retcode = -EPERM;
else
retcode = via_dma_cleanup(dev);
break;
case VIA_DMA_INITIALIZED:
retcode = (dev_priv->ring.virtual_start != NULL) ?
0 : -EFAULT;
break;
default:
retcode = -EINVAL;
break;
}
return retcode;
}
static int via_dispatch_cmdbuffer(struct drm_device * dev, drm_via_cmdbuffer_t * cmd)
{
drm_via_private_t *dev_priv;
uint32_t *vb;
int ret;
dev_priv = (drm_via_private_t *) dev->dev_private;
if (dev_priv->ring.virtual_start == NULL) {
DRM_ERROR("called without initializing AGP ring buffer.\n");
return -EFAULT;
}
if (cmd->size > VIA_PCI_BUF_SIZE) {
return -ENOMEM;
}
if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size))
return -EFAULT;
/*
* Running this function on AGP memory is dead slow. Therefore
* we run it on a temporary cacheable system memory buffer and
* copy it to AGP memory when ready.
*/
if ((ret =
via_verify_command_stream((uint32_t *) dev_priv->pci_buf,
cmd->size, dev, 1))) {
return ret;
}
vb = via_check_dma(dev_priv, (cmd->size < 0x100) ? 0x102 : cmd->size);
if (vb == NULL) {
return -EAGAIN;
}
memcpy(vb, dev_priv->pci_buf, cmd->size);
dev_priv->dma_low += cmd->size;
/*
* Small submissions somehow stalls the CPU. (AGP cache effects?)
* pad to greater size.
*/
if (cmd->size < 0x100)
via_pad_cache(dev_priv, (0x100 - cmd->size) >> 3);
via_cmdbuf_pause(dev_priv);
return 0;
}
int via_driver_dma_quiescent(struct drm_device * dev)
{
drm_via_private_t *dev_priv = dev->dev_private;
if (!via_wait_idle(dev_priv)) {
return -EBUSY;
}
return 0;
}
static int via_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
LOCK_TEST_WITH_RETURN(dev, file_priv);
return via_driver_dma_quiescent(dev);
}
static int via_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_via_cmdbuffer_t *cmdbuf = data;
int ret;
LOCK_TEST_WITH_RETURN(dev, file_priv);
DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size);
ret = via_dispatch_cmdbuffer(dev, cmdbuf);
if (ret) {
return ret;
}
return 0;
}
static int via_dispatch_pci_cmdbuffer(struct drm_device * dev,
drm_via_cmdbuffer_t * cmd)
{
drm_via_private_t *dev_priv = dev->dev_private;
int ret;
if (cmd->size > VIA_PCI_BUF_SIZE) {
return -ENOMEM;
}
if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size))
return -EFAULT;
if ((ret =
via_verify_command_stream((uint32_t *) dev_priv->pci_buf,
cmd->size, dev, 0))) {
return ret;
}
ret =
via_parse_command_stream(dev, (const uint32_t *)dev_priv->pci_buf,
cmd->size);
return ret;
}
static int via_pci_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_via_cmdbuffer_t *cmdbuf = data;
int ret;
LOCK_TEST_WITH_RETURN(dev, file_priv);
DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size);
ret = via_dispatch_pci_cmdbuffer(dev, cmdbuf);
if (ret) {
return ret;
}
return 0;
}
static inline uint32_t *via_align_buffer(drm_via_private_t * dev_priv,
uint32_t * vb, int qw_count)
{
for (; qw_count > 0; --qw_count) {
VIA_OUT_RING_QW(HC_DUMMY, HC_DUMMY);
}
return vb;
}
/*
* This function is used internally by ring buffer management code.
*
* Returns virtual pointer to ring buffer.
*/
static inline uint32_t *via_get_dma(drm_via_private_t * dev_priv)
{
return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
}
/*
* Hooks a segment of data into the tail of the ring-buffer by
* modifying the pause address stored in the buffer itself. If
* the regulator has already paused, restart it.
*/
static int via_hook_segment(drm_via_private_t * dev_priv,
uint32_t pause_addr_hi, uint32_t pause_addr_lo,
int no_pci_fire)
{
int paused, count;
volatile uint32_t *paused_at = dev_priv->last_pause_ptr;
uint32_t reader,ptr;
uint32_t diff;
paused = 0;
via_flush_write_combine();
(void) *(volatile uint32_t *)(via_get_dma(dev_priv) -1);
*paused_at = pause_addr_lo;
via_flush_write_combine();
(void) *paused_at;
reader = *(dev_priv->hw_addr_ptr);
ptr = ((volatile char *)paused_at - dev_priv->dma_ptr) +
dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4;
dev_priv->last_pause_ptr = via_get_dma(dev_priv) - 1;
/*
* If there is a possibility that the command reader will
* miss the new pause address and pause on the old one,
* In that case we need to program the new start address
* using PCI.
*/
diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
count = 10000000;
while(diff == 0 && count--) {
paused = (VIA_READ(0x41c) & 0x80000000);
if (paused)
break;
reader = *(dev_priv->hw_addr_ptr);
diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
}
paused = VIA_READ(0x41c) & 0x80000000;
if (paused && !no_pci_fire) {
reader = *(dev_priv->hw_addr_ptr);
diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
diff &= (dev_priv->dma_high - 1);
if (diff != 0 && diff < (dev_priv->dma_high >> 1)) {
DRM_ERROR("Paused at incorrect address. "
"0x%08x, 0x%08x 0x%08x\n",
ptr, reader, dev_priv->dma_diff);
} else if (diff == 0) {
/*
* There is a concern that these writes may stall the PCI bus
* if the GPU is not idle. However, idling the GPU first
* doesn't make a difference.
*/
VIA_WRITE(VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi);
VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo);
(void)VIA_READ(VIA_REG_TRANSPACE);
}
}
return paused;
}
static int via_wait_idle(drm_via_private_t * dev_priv)
{
int count = 10000000;
while (!(VIA_READ(VIA_REG_STATUS) & VIA_VR_QUEUE_BUSY) && --count)
;
while (count && (VIA_READ(VIA_REG_STATUS) &
(VIA_CMD_RGTR_BUSY | VIA_2D_ENG_BUSY |
VIA_3D_ENG_BUSY)))
--count;
return count;
}
static uint32_t *via_align_cmd(drm_via_private_t * dev_priv, uint32_t cmd_type,
uint32_t addr, uint32_t * cmd_addr_hi,
uint32_t * cmd_addr_lo, int skip_wait)
{
uint32_t agp_base;
uint32_t cmd_addr, addr_lo, addr_hi;
uint32_t *vb;
uint32_t qw_pad_count;
if (!skip_wait)
via_cmdbuf_wait(dev_priv, 2 * CMDBUF_ALIGNMENT_SIZE);
vb = via_get_dma(dev_priv);
VIA_OUT_RING_QW(HC_HEADER2 | ((VIA_REG_TRANSET >> 2) << 12) |
(VIA_REG_TRANSPACE >> 2), HC_ParaType_PreCR << 16);
agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
qw_pad_count = (CMDBUF_ALIGNMENT_SIZE >> 3) -
((dev_priv->dma_low & CMDBUF_ALIGNMENT_MASK) >> 3);
cmd_addr = (addr) ? addr :
agp_base + dev_priv->dma_low - 8 + (qw_pad_count << 3);
addr_lo = ((HC_SubA_HAGPBpL << 24) | (cmd_type & HC_HAGPBpID_MASK) |
(cmd_addr & HC_HAGPBpL_MASK));
addr_hi = ((HC_SubA_HAGPBpH << 24) | (cmd_addr >> 24));
vb = via_align_buffer(dev_priv, vb, qw_pad_count - 1);
VIA_OUT_RING_QW(*cmd_addr_hi = addr_hi, *cmd_addr_lo = addr_lo);
return vb;
}
static void via_cmdbuf_start(drm_via_private_t * dev_priv)
{
uint32_t pause_addr_lo, pause_addr_hi;
uint32_t start_addr, start_addr_lo;
uint32_t end_addr, end_addr_lo;
uint32_t command;
uint32_t agp_base;
uint32_t ptr;
uint32_t reader;
int count;
dev_priv->dma_low = 0;
agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
start_addr = agp_base;
end_addr = agp_base + dev_priv->dma_high;
start_addr_lo = ((HC_SubA_HAGPBstL << 24) | (start_addr & 0xFFFFFF));
end_addr_lo = ((HC_SubA_HAGPBendL << 24) | (end_addr & 0xFFFFFF));
command = ((HC_SubA_HAGPCMNT << 24) | (start_addr >> 24) |
((end_addr & 0xff000000) >> 16));
dev_priv->last_pause_ptr =
via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0,
&pause_addr_hi, &pause_addr_lo, 1) - 1;
via_flush_write_combine();
(void) *(volatile uint32_t *)dev_priv->last_pause_ptr;
VIA_WRITE(VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
VIA_WRITE(VIA_REG_TRANSPACE, command);
VIA_WRITE(VIA_REG_TRANSPACE, start_addr_lo);
VIA_WRITE(VIA_REG_TRANSPACE, end_addr_lo);
VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi);
VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo);
DRM_WRITEMEMORYBARRIER();
VIA_WRITE(VIA_REG_TRANSPACE, command | HC_HAGPCMNT_MASK);
(void)VIA_READ(VIA_REG_TRANSPACE);
dev_priv->dma_diff = 0;
count = 10000000;
while (!(VIA_READ(0x41c) & 0x80000000) && count--);
reader = *(dev_priv->hw_addr_ptr);
ptr = ((volatile char *)dev_priv->last_pause_ptr - dev_priv->dma_ptr) +
dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4;
/*
* This is the difference between where we tell the
* command reader to pause and where it actually pauses.
* This differs between hw implementation so we need to
* detect it.
*/
dev_priv->dma_diff = ptr - reader;
}
static void via_pad_cache(drm_via_private_t * dev_priv, int qwords)
{
uint32_t *vb;
via_cmdbuf_wait(dev_priv, qwords + 2);
vb = via_get_dma(dev_priv);
VIA_OUT_RING_QW(HC_HEADER2, HC_ParaType_NotTex << 16);
via_align_buffer(dev_priv, vb, qwords);
}
static inline void via_dummy_bitblt(drm_via_private_t * dev_priv)
{
uint32_t *vb = via_get_dma(dev_priv);
SetReg2DAGP(0x0C, (0 | (0 << 16)));
SetReg2DAGP(0x10, 0 | (0 << 16));
SetReg2DAGP(0x0, 0x1 | 0x2000 | 0xAA000000);
}
static void via_cmdbuf_jump(drm_via_private_t * dev_priv)
{
uint32_t agp_base;
uint32_t pause_addr_lo, pause_addr_hi;
uint32_t jump_addr_lo, jump_addr_hi;
volatile uint32_t *last_pause_ptr;
uint32_t dma_low_save1, dma_low_save2;
agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
via_align_cmd(dev_priv, HC_HAGPBpID_JUMP, 0, &jump_addr_hi,
&jump_addr_lo, 0);
dev_priv->dma_wrap = dev_priv->dma_low;
/*
* Wrap command buffer to the beginning.
*/
dev_priv->dma_low = 0;
if (via_cmdbuf_wait(dev_priv, CMDBUF_ALIGNMENT_SIZE) != 0) {
DRM_ERROR("via_cmdbuf_jump failed\n");
}
via_dummy_bitblt(dev_priv);
via_dummy_bitblt(dev_priv);
last_pause_ptr =
via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
&pause_addr_lo, 0) - 1;
via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
&pause_addr_lo, 0);
*last_pause_ptr = pause_addr_lo;
dma_low_save1 = dev_priv->dma_low;
/*
* Now, set a trap that will pause the regulator if it tries to rerun the old
* command buffer. (Which may happen if via_hook_segment detecs a command regulator pause
* and reissues the jump command over PCI, while the regulator has already taken the jump
* and actually paused at the current buffer end).
* There appears to be no other way to detect this condition, since the hw_addr_pointer
* does not seem to get updated immediately when a jump occurs.
*/
last_pause_ptr =
via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
&pause_addr_lo, 0) - 1;
via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
&pause_addr_lo, 0);
*last_pause_ptr = pause_addr_lo;
dma_low_save2 = dev_priv->dma_low;
dev_priv->dma_low = dma_low_save1;
via_hook_segment(dev_priv, jump_addr_hi, jump_addr_lo, 0);
dev_priv->dma_low = dma_low_save2;
via_hook_segment(dev_priv, pause_addr_hi, pause_addr_lo, 0);
}
static void via_cmdbuf_rewind(drm_via_private_t * dev_priv)
{
via_cmdbuf_jump(dev_priv);
}
static void via_cmdbuf_flush(drm_via_private_t * dev_priv, uint32_t cmd_type)
{
uint32_t pause_addr_lo, pause_addr_hi;
via_align_cmd(dev_priv, cmd_type, 0, &pause_addr_hi, &pause_addr_lo, 0);
via_hook_segment(dev_priv, pause_addr_hi, pause_addr_lo, 0);
}
static void via_cmdbuf_pause(drm_via_private_t * dev_priv)
{
via_cmdbuf_flush(dev_priv, HC_HAGPBpID_PAUSE);
}
static void via_cmdbuf_reset(drm_via_private_t * dev_priv)
{
via_cmdbuf_flush(dev_priv, HC_HAGPBpID_STOP);
via_wait_idle(dev_priv);
}
/*
* User interface to the space and lag functions.
*/
static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_via_cmdbuf_size_t *d_siz = data;
int ret = 0;
uint32_t tmp_size, count;
drm_via_private_t *dev_priv;
DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, file_priv);
dev_priv = (drm_via_private_t *) dev->dev_private;
if (dev_priv->ring.virtual_start == NULL) {
DRM_ERROR("called without initializing AGP ring buffer.\n");
return -EFAULT;
}
count = 1000000;
tmp_size = d_siz->size;
switch (d_siz->func) {
case VIA_CMDBUF_SPACE:
while (((tmp_size = via_cmdbuf_space(dev_priv)) < d_siz->size)
&& --count) {
if (!d_siz->wait) {
break;
}
}
if (!count) {
DRM_ERROR("VIA_CMDBUF_SPACE timed out.\n");
ret = -EAGAIN;
}
break;
case VIA_CMDBUF_LAG:
while (((tmp_size = via_cmdbuf_lag(dev_priv)) > d_siz->size)
&& --count) {
if (!d_siz->wait) {
break;
}
}
if (!count) {
DRM_ERROR("VIA_CMDBUF_LAG timed out.\n");
ret = -EAGAIN;
}
break;
default:
ret = -EFAULT;
}
d_siz->size = tmp_size;
return ret;
}
struct drm_ioctl_desc via_ioctls[] = {
DRM_IOCTL_DEF(DRM_VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH),
DRM_IOCTL_DEF(DRM_VIA_FREEMEM, via_mem_free, DRM_AUTH),
DRM_IOCTL_DEF(DRM_VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER),
DRM_IOCTL_DEF(DRM_VIA_FB_INIT, via_fb_init, DRM_AUTH|DRM_MASTER),
DRM_IOCTL_DEF(DRM_VIA_MAP_INIT, via_map_init, DRM_AUTH|DRM_MASTER),
DRM_IOCTL_DEF(DRM_VIA_DEC_FUTEX, via_decoder_futex, DRM_AUTH),
DRM_IOCTL_DEF(DRM_VIA_DMA_INIT, via_dma_init, DRM_AUTH),
DRM_IOCTL_DEF(DRM_VIA_CMDBUFFER, via_cmdbuffer, DRM_AUTH),
DRM_IOCTL_DEF(DRM_VIA_FLUSH, via_flush_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_VIA_PCICMD, via_pci_cmdbuffer, DRM_AUTH),
DRM_IOCTL_DEF(DRM_VIA_CMDBUF_SIZE, via_cmdbuf_size, DRM_AUTH),
DRM_IOCTL_DEF(DRM_VIA_WAIT_IRQ, via_wait_irq, DRM_AUTH),
DRM_IOCTL_DEF(DRM_VIA_DMA_BLIT, via_dma_blit, DRM_AUTH),
DRM_IOCTL_DEF(DRM_VIA_BLIT_SYNC, via_dma_blit_sync, DRM_AUTH)
};
int via_max_ioctl = DRM_ARRAY_SIZE(via_ioctls);

View File

@ -1,790 +0,0 @@
/* via_dmablit.c -- PCI DMA BitBlt support for the VIA Unichrome/Pro
*
* Copyright (C) 2005 Thomas Hellstrom, All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Thomas Hellstrom.
* Partially based on code obtained from Digeo Inc.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*
* Unmaps the DMA mappings.
* FIXME: Is this a NoOp on x86? Also
* FIXME: What happens if this one is called and a pending blit has previously done
* the same DMA mappings?
*/
#include "dev/drm/drmP.h"
#include "dev/drm/via_drm.h"
#include "dev/drm/via_drv.h"
#include "dev/drm/via_dmablit.h"
#define VIA_PGDN(x) (((unsigned long)(x)) & ~PAGE_MASK)
#define VIA_PGOFF(x) (((unsigned long)(x)) & PAGE_MASK)
#define VIA_PFN(x) ((unsigned long)(x) >> PAGE_SHIFT)
typedef struct _drm_via_descriptor {
uint32_t mem_addr;
uint32_t dev_addr;
uint32_t size;
uint32_t next;
} drm_via_descriptor_t;
static void via_dmablit_timer(void *arg);
/*
* Unmap a DMA mapping.
*/
static void
via_unmap_blit_from_device(drm_via_sg_info_t *vsg)
{
int num_desc = vsg->num_desc;
unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page;
unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page;
drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] +
descriptor_this_page;
dma_addr_t next = vsg->chain_start;
while(num_desc--) {
if (descriptor_this_page-- == 0) {
cur_descriptor_page--;
descriptor_this_page = vsg->descriptors_per_page - 1;
desc_ptr = vsg->desc_pages[cur_descriptor_page] +
descriptor_this_page;
}
next = (dma_addr_t) desc_ptr->next;
desc_ptr--;
}
}
/*
* If mode = 0, count how many descriptors are needed.
* If mode = 1, Map the DMA pages for the device, put together and map also the descriptors.
* Descriptors are run in reverse order by the hardware because we are not allowed to update the
* 'next' field without syncing calls when the descriptor is already mapped.
*/
static void
via_map_blit_for_device(const drm_via_dmablit_t *xfer,
drm_via_sg_info_t *vsg, int mode)
{
unsigned cur_descriptor_page = 0;
unsigned num_descriptors_this_page = 0;
unsigned char *mem_addr = xfer->mem_addr;
unsigned char *cur_mem;
unsigned char *first_addr = (unsigned char *)VIA_PGDN(mem_addr);
uint32_t fb_addr = xfer->fb_addr;
uint32_t cur_fb;
unsigned long line_len;
unsigned remaining_len;
int num_desc = 0;
int cur_line;
dma_addr_t next = 0 | VIA_DMA_DPR_EC;
drm_via_descriptor_t *desc_ptr = NULL;
if (mode == 1)
desc_ptr = vsg->desc_pages[cur_descriptor_page];
for (cur_line = 0; cur_line < xfer->num_lines; ++cur_line) {
line_len = xfer->line_length;
cur_fb = fb_addr;
cur_mem = mem_addr;
while (line_len > 0) {
remaining_len = min(PAGE_SIZE - VIA_PGOFF(cur_mem),
line_len);
line_len -= remaining_len;
if (mode == 1) {
desc_ptr->mem_addr =
VM_PAGE_TO_PHYS(
vsg->pages[VIA_PFN(cur_mem) -
VIA_PFN(first_addr)]) + VIA_PGOFF(cur_mem);
desc_ptr->dev_addr = cur_fb;
desc_ptr->size = remaining_len;
desc_ptr->next = (uint32_t) next;
next = vtophys(desc_ptr);
desc_ptr++;
if (++num_descriptors_this_page >= vsg->descriptors_per_page) {
num_descriptors_this_page = 0;
desc_ptr = vsg->desc_pages[++cur_descriptor_page];
}
}
num_desc++;
cur_mem += remaining_len;
cur_fb += remaining_len;
}
mem_addr += xfer->mem_stride;
fb_addr += xfer->fb_stride;
}
if (mode == 1) {
vsg->chain_start = next;
vsg->state = dr_via_device_mapped;
}
vsg->num_desc = num_desc;
}
/*
* Function that frees up all resources for a blit. It is usable even if the
* blit info has only been partially built as long as the status enum is consistent
* with the actual status of the used resources.
*/
static void
via_free_sg_info(drm_via_sg_info_t *vsg)
{
vm_page_t page;
int i;
switch(vsg->state) {
case dr_via_device_mapped:
via_unmap_blit_from_device(vsg);
case dr_via_desc_pages_alloc:
for (i=0; i<vsg->num_desc_pages; ++i) {
if (vsg->desc_pages[i] != NULL)
free(vsg->desc_pages[i], DRM_MEM_PAGES);
}
free(vsg->desc_pages, DRM_MEM_DRIVER);
case dr_via_pages_locked:
for (i=0; i < vsg->num_pages; ++i) {
page = vsg->pages[i];
vm_page_lock(page);
vm_page_unwire(page, PQ_INACTIVE);
vm_page_unlock(page);
}
case dr_via_pages_alloc:
free(vsg->pages, DRM_MEM_DRIVER);
default:
vsg->state = dr_via_sg_init;
}
free(vsg->bounce_buffer, DRM_MEM_DRIVER);
vsg->bounce_buffer = NULL;
vsg->free_on_sequence = 0;
}
/*
* Fire a blit engine.
*/
static void
via_fire_dmablit(struct drm_device *dev, drm_via_sg_info_t *vsg, int engine)
{
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
VIA_WRITE(VIA_PCI_DMA_MAR0 + engine*0x10, 0);
VIA_WRITE(VIA_PCI_DMA_DAR0 + engine*0x10, 0);
VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD |
VIA_DMA_CSR_DE);
VIA_WRITE(VIA_PCI_DMA_MR0 + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE);
VIA_WRITE(VIA_PCI_DMA_BCR0 + engine*0x10, 0);
VIA_WRITE(VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start);
DRM_WRITEMEMORYBARRIER();
VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS);
(void)VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04);
}
/*
* Obtain a page pointer array and lock all pages into system memory. A segmentation violation will
* occur here if the calling user does not have access to the submitted address.
*/
static int
via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
{
unsigned long first_pfn = VIA_PFN(xfer->mem_addr);
vm_page_t m;
int i;
vsg->num_pages = VIA_PFN(xfer->mem_addr +
(xfer->num_lines * xfer->mem_stride -1)) - first_pfn + 1;
if (NULL == (vsg->pages = malloc(sizeof(vm_page_t) * vsg->num_pages,
DRM_MEM_DRIVER, M_NOWAIT)))
return -ENOMEM;
vsg->state = dr_via_pages_alloc;
if (vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map,
(vm_offset_t)xfer->mem_addr, vsg->num_pages * PAGE_SIZE,
VM_PROT_READ | VM_PROT_WRITE, vsg->pages, vsg->num_pages) < 0)
return -EACCES;
for (i = 0; i < vsg->num_pages; i++) {
m = vsg->pages[i];
vm_page_lock(m);
vm_page_wire(m);
vm_page_unhold(m);
vm_page_unlock(m);
}
vsg->state = dr_via_pages_locked;
DRM_DEBUG("DMA pages locked\n");
return 0;
}
/*
* Allocate DMA capable memory for the blit descriptor chain, and an array that keeps track of the
* pages we allocate. We don't want to use kmalloc for the descriptor chain because it may be
* quite large for some blits, and pages don't need to be contingous.
*/
static int
via_alloc_desc_pages(drm_via_sg_info_t *vsg)
{
int i;
vsg->descriptors_per_page = PAGE_SIZE / sizeof(drm_via_descriptor_t);
vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) /
vsg->descriptors_per_page;
if (NULL == (vsg->desc_pages = malloc(vsg->num_desc_pages *
sizeof(void *), DRM_MEM_DRIVER, M_NOWAIT | M_ZERO)))
return -ENOMEM;
vsg->state = dr_via_desc_pages_alloc;
for (i = 0; i < vsg->num_desc_pages; ++i) {
if (NULL == (vsg->desc_pages[i] =
(drm_via_descriptor_t *)malloc(PAGE_SIZE, DRM_MEM_PAGES,
M_NOWAIT | M_ZERO)))
return -ENOMEM;
}
DRM_DEBUG("Allocated %d pages for %d descriptors.\n",
vsg->num_desc_pages, vsg->num_desc);
return 0;
}
static void
via_abort_dmablit(struct drm_device *dev, int engine)
{
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TA);
}
static void
via_dmablit_engine_off(struct drm_device *dev, int engine)
{
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD);
}
/*
* The dmablit part of the IRQ handler. Trying to do only reasonably fast things here.
* The rest, like unmapping and freeing memory for done blits is done in a separate workqueue
* task. Basically the task of the interrupt handler is to submit a new blit to the engine, while
* the workqueue task takes care of processing associated with the old blit.
*/
void
via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
{
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
int cur;
int done_transfer;
uint32_t status = 0;
DRM_DEBUG("DMA blit handler called. engine = %d, from_irq = %d, blitq = 0x%lx\n",
engine, from_irq, (unsigned long) blitq);
mtx_lock(&blitq->blit_lock);
done_transfer = blitq->is_active &&
(( status = VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD);
done_transfer = done_transfer || ( blitq->aborting && !(status & VIA_DMA_CSR_DE));
cur = blitq->cur;
if (done_transfer) {
blitq->blits[cur]->aborted = blitq->aborting;
blitq->done_blit_handle++;
DRM_WAKEUP(&blitq->blit_queue[cur]);
cur++;
if (cur >= VIA_NUM_BLIT_SLOTS)
cur = 0;
blitq->cur = cur;
/*
* Clear transfer done flag.
*/
VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD);
blitq->is_active = 0;
blitq->aborting = 0;
taskqueue_enqueue(taskqueue_swi, &blitq->wq);
} else if (blitq->is_active && (ticks >= blitq->end)) {
/*
* Abort transfer after one second.
*/
via_abort_dmablit(dev, engine);
blitq->aborting = 1;
blitq->end = ticks + DRM_HZ;
}
if (!blitq->is_active) {
if (blitq->num_outstanding) {
via_fire_dmablit(dev, blitq->blits[cur], engine);
blitq->is_active = 1;
blitq->cur = cur;
blitq->num_outstanding--;
blitq->end = ticks + DRM_HZ;
if (!callout_pending(&blitq->poll_timer))
callout_reset(&blitq->poll_timer,
1, (timeout_t *)via_dmablit_timer,
(void *)blitq);
} else {
if (callout_pending(&blitq->poll_timer)) {
callout_stop(&blitq->poll_timer);
}
via_dmablit_engine_off(dev, engine);
}
}
mtx_unlock(&blitq->blit_lock);
}
/*
* Check whether this blit is still active, performing necessary locking.
*/
static int
via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_queue_head_t **queue)
{
uint32_t slot;
int active;
mtx_lock(&blitq->blit_lock);
/*
* Allow for handle wraparounds.
*/
active = ((blitq->done_blit_handle - handle) > (1 << 23)) &&
((blitq->cur_blit_handle - handle) <= (1 << 23));
if (queue && active) {
slot = handle - blitq->done_blit_handle + blitq->cur -1;
if (slot >= VIA_NUM_BLIT_SLOTS) {
slot -= VIA_NUM_BLIT_SLOTS;
}
*queue = blitq->blit_queue + slot;
}
mtx_unlock(&blitq->blit_lock);
return active;
}
/*
* Sync. Wait for at least three seconds for the blit to be performed.
*/
static int
via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
{
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
wait_queue_head_t *queue;
int ret = 0;
if (via_dmablit_active(blitq, engine, handle, &queue)) {
DRM_WAIT_ON(ret, *queue, 3 * DRM_HZ,
!via_dmablit_active(blitq, engine, handle, NULL));
}
DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n",
handle, engine, ret);
return ret;
}
/*
* A timer that regularly polls the blit engine in cases where we don't have interrupts:
* a) Broken hardware (typically those that don't have any video capture facility).
* b) Blit abort. The hardware doesn't send an interrupt when a blit is aborted.
* The timer and hardware IRQ's can and do work in parallel. If the hardware has
* irqs, it will shorten the latency somewhat.
*/
static void
via_dmablit_timer(void *arg)
{
drm_via_blitq_t *blitq = (drm_via_blitq_t *)arg;
struct drm_device *dev = blitq->dev;
int engine = (int)
(blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues);
DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine,
(unsigned long) jiffies);
via_dmablit_handler(dev, engine, 0);
if (!callout_pending(&blitq->poll_timer)) {
callout_schedule(&blitq->poll_timer, 1);
/*
* Rerun handler to delete timer if engines are off, and
* to shorten abort latency. This is a little nasty.
*/
via_dmablit_handler(dev, engine, 0);
}
}
/*
* Workqueue task that frees data and mappings associated with a blit.
* Also wakes up waiting processes. Each of these tasks handles one
* blit engine only and may not be called on each interrupt.
*/
static void
via_dmablit_workqueue(void *arg, int pending)
{
drm_via_blitq_t *blitq = (drm_via_blitq_t *)arg;
struct drm_device *dev = blitq->dev;
drm_via_sg_info_t *cur_sg;
int cur_released;
DRM_DEBUG("task called for blit engine %ld\n",(unsigned long)
(blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues));
mtx_lock(&blitq->blit_lock);
while(blitq->serviced != blitq->cur) {
cur_released = blitq->serviced++;
DRM_DEBUG("Releasing blit slot %d\n", cur_released);
if (blitq->serviced >= VIA_NUM_BLIT_SLOTS)
blitq->serviced = 0;
cur_sg = blitq->blits[cur_released];
blitq->num_free++;
mtx_unlock(&blitq->blit_lock);
DRM_WAKEUP(&blitq->busy_queue);
via_free_sg_info(cur_sg);
free(cur_sg, DRM_MEM_DRIVER);
mtx_lock(&blitq->blit_lock);
}
mtx_unlock(&blitq->blit_lock);
}
/*
* Init all blit engines. Currently we use two, but some hardware have 4.
*/
void
via_init_dmablit(struct drm_device *dev)
{
int i,j;
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
drm_via_blitq_t *blitq;
for (i=0; i< VIA_NUM_BLIT_ENGINES; ++i) {
blitq = dev_priv->blit_queues + i;
blitq->dev = dev;
blitq->cur_blit_handle = 0;
blitq->done_blit_handle = 0;
blitq->head = 0;
blitq->cur = 0;
blitq->serviced = 0;
blitq->num_free = VIA_NUM_BLIT_SLOTS - 1;
blitq->num_outstanding = 0;
blitq->is_active = 0;
blitq->aborting = 0;
mtx_init(&blitq->blit_lock, "via_blit_lk", NULL, MTX_DEF);
for (j=0; j<VIA_NUM_BLIT_SLOTS; ++j) {
DRM_INIT_WAITQUEUE(blitq->blit_queue + j);
}
DRM_INIT_WAITQUEUE(&blitq->busy_queue);
TASK_INIT(&blitq->wq, 0, via_dmablit_workqueue, blitq);
callout_init(&blitq->poll_timer, 0);
}
}
/*
* Build all info and do all mappings required for a blit.
*/
static int
via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg,
drm_via_dmablit_t *xfer)
{
int ret = 0;
vsg->bounce_buffer = NULL;
vsg->state = dr_via_sg_init;
if (xfer->num_lines <= 0 || xfer->line_length <= 0) {
DRM_ERROR("Zero size bitblt.\n");
return -EINVAL;
}
/*
* Below check is a driver limitation, not a hardware one. We
* don't want to lock unused pages, and don't want to incoporate the
* extra logic of avoiding them. Make sure there are no.
* (Not a big limitation anyway.)
*/
if ((xfer->mem_stride - xfer->line_length) > 2 * PAGE_SIZE) {
DRM_ERROR("Too large system memory stride. Stride: %d, "
"Length: %d\n", xfer->mem_stride, xfer->line_length);
return -EINVAL;
}
if ((xfer->mem_stride == xfer->line_length) &&
(xfer->fb_stride == xfer->line_length)) {
xfer->mem_stride *= xfer->num_lines;
xfer->line_length = xfer->mem_stride;
xfer->fb_stride = xfer->mem_stride;
xfer->num_lines = 1;
}
/*
* Don't lock an arbitrary large number of pages, since that causes a
* DOS security hole.
*/
if (xfer->num_lines > 2048 ||
(xfer->num_lines*xfer->mem_stride > (2048*2048*4))) {
DRM_ERROR("Too large PCI DMA bitblt.\n");
return -EINVAL;
}
/*
* we allow a negative fb stride to allow flipping of images in
* transfer.
*/
if (xfer->mem_stride < xfer->line_length ||
abs(xfer->fb_stride) < xfer->line_length) {
DRM_ERROR("Invalid frame-buffer / memory stride.\n");
return -EINVAL;
}
/*
* A hardware bug seems to be worked around if system memory addresses
* start on 16 byte boundaries. This seems a bit restrictive however.
* VIA is contacted about this. Meanwhile, impose the following
* restrictions:
*/
#ifdef VIA_BUGFREE
if ((((unsigned long)xfer->mem_addr & 3) !=
((unsigned long)xfer->fb_addr & 3)) ||
((xfer->num_lines > 1) && ((xfer->mem_stride & 3) !=
(xfer->fb_stride & 3)))) {
DRM_ERROR("Invalid DRM bitblt alignment.\n");
return -EINVAL;
}
#else
if ((((unsigned long)xfer->mem_addr & 15) ||
((unsigned long)xfer->fb_addr & 3)) ||
((xfer->num_lines > 1) &&
((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) {
DRM_ERROR("Invalid DRM bitblt alignment.\n");
return -EINVAL;
}
#endif
if (0 != (ret = via_lock_all_dma_pages(vsg, xfer))) {
DRM_ERROR("Could not lock DMA pages.\n");
via_free_sg_info(vsg);
return ret;
}
via_map_blit_for_device(xfer, vsg, 0);
if (0 != (ret = via_alloc_desc_pages(vsg))) {
DRM_ERROR("Could not allocate DMA descriptor pages.\n");
via_free_sg_info(vsg);
return ret;
}
via_map_blit_for_device(xfer, vsg, 1);
return 0;
}
/*
* Reserve one free slot in the blit queue. Will wait for one second for one
* to become available. Otherwise -EBUSY is returned.
*/
static int
via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
{
struct drm_device *dev = blitq->dev;
int ret=0;
DRM_DEBUG("Num free is %d\n", blitq->num_free);
mtx_lock(&blitq->blit_lock);
while(blitq->num_free == 0) {
mtx_unlock(&blitq->blit_lock);
DRM_WAIT_ON(ret, blitq->busy_queue, DRM_HZ,
blitq->num_free > 0);
if (ret) {
return (-EINTR == ret) ? -EAGAIN : ret;
}
mtx_lock(&blitq->blit_lock);
}
blitq->num_free--;
mtx_unlock(&blitq->blit_lock);
return 0;
}
/*
* Hand back a free slot if we changed our mind.
*/
static void
via_dmablit_release_slot(drm_via_blitq_t *blitq)
{
mtx_lock(&blitq->blit_lock);
blitq->num_free++;
mtx_unlock(&blitq->blit_lock);
DRM_WAKEUP( &blitq->busy_queue );
}
/*
* Grab a free slot. Build blit info and queue a blit.
*/
static int
via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
{
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
drm_via_sg_info_t *vsg;
drm_via_blitq_t *blitq;
int ret;
int engine;
if (dev_priv == NULL) {
DRM_ERROR("Called without initialization.\n");
return -EINVAL;
}
engine = (xfer->to_fb) ? 0 : 1;
blitq = dev_priv->blit_queues + engine;
if (0 != (ret = via_dmablit_grab_slot(blitq, engine))) {
return ret;
}
if (NULL == (vsg = malloc(sizeof(*vsg), DRM_MEM_DRIVER,
M_NOWAIT | M_ZERO))) {
via_dmablit_release_slot(blitq);
return -ENOMEM;
}
if (0 != (ret = via_build_sg_info(dev, vsg, xfer))) {
via_dmablit_release_slot(blitq);
free(vsg, DRM_MEM_DRIVER);
return ret;
}
mtx_lock(&blitq->blit_lock);
blitq->blits[blitq->head++] = vsg;
if (blitq->head >= VIA_NUM_BLIT_SLOTS)
blitq->head = 0;
blitq->num_outstanding++;
xfer->sync.sync_handle = ++blitq->cur_blit_handle;
mtx_unlock(&blitq->blit_lock);
xfer->sync.engine = engine;
via_dmablit_handler(dev, engine, 0);
return 0;
}
/*
* Sync on a previously submitted blit. Note that the X server use signals
* extensively, and that there is a very big probability that this IOCTL will
* be interrupted by a signal. In that case it returns with -EAGAIN for the
* signal to be delivered. The caller should then reissue the IOCTL. This is
* similar to what is being done for drmGetLock().
*/
int
via_dma_blit_sync( struct drm_device *dev, void *data,
struct drm_file *file_priv )
{
drm_via_blitsync_t *sync = data;
int err;
if (sync->engine >= VIA_NUM_BLIT_ENGINES)
return -EINVAL;
err = via_dmablit_sync(dev, sync->sync_handle, sync->engine);
if (-EINTR == err)
err = -EAGAIN;
return err;
}
/*
* Queue a blit and hand back a handle to be used for sync. This IOCTL may be
* interrupted by a signal while waiting for a free slot in the blit queue.
* In that case it returns with -EAGAIN and should be reissued. See the above
* IOCTL code.
*/
int
via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv )
{
drm_via_dmablit_t *xfer = data;
int err;
err = via_dmablit(dev, xfer);
return err;
}

View File

@ -1,140 +0,0 @@
/* via_dmablit.h -- PCI DMA BitBlt support for the VIA Unichrome/Pro
*
* Copyright 2005 Thomas Hellstrom.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Thomas Hellstrom.
* Register info from Digeo Inc.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#ifndef _VIA_DMABLIT_H
#define _VIA_DMABLIT_H
#define VIA_NUM_BLIT_ENGINES 2
#define VIA_NUM_BLIT_SLOTS 8
struct _drm_via_descriptor;
typedef struct _drm_via_sg_info {
vm_page_t *pages;
unsigned long num_pages;
struct _drm_via_descriptor **desc_pages;
int num_desc_pages;
int num_desc;
unsigned char *bounce_buffer;
dma_addr_t chain_start;
uint32_t free_on_sequence;
unsigned int descriptors_per_page;
int aborted;
enum {
dr_via_device_mapped,
dr_via_desc_pages_alloc,
dr_via_pages_locked,
dr_via_pages_alloc,
dr_via_sg_init
} state;
} drm_via_sg_info_t;
typedef struct _drm_via_blitq {
struct drm_device *dev;
uint32_t cur_blit_handle;
uint32_t done_blit_handle;
unsigned serviced;
unsigned head;
unsigned cur;
unsigned num_free;
unsigned num_outstanding;
unsigned long end;
int aborting;
int is_active;
drm_via_sg_info_t *blits[VIA_NUM_BLIT_SLOTS];
struct mtx blit_lock;
wait_queue_head_t blit_queue[VIA_NUM_BLIT_SLOTS];
wait_queue_head_t busy_queue;
struct task wq;
struct callout poll_timer;
} drm_via_blitq_t;
/*
* PCI DMA Registers
* Channels 2 & 3 don't seem to be implemented in hardware.
*/
#define VIA_PCI_DMA_MAR0 0xE40 /* Memory Address Register of Channel 0 */
#define VIA_PCI_DMA_DAR0 0xE44 /* Device Address Register of Channel 0 */
#define VIA_PCI_DMA_BCR0 0xE48 /* Byte Count Register of Channel 0 */
#define VIA_PCI_DMA_DPR0 0xE4C /* Descriptor Pointer Register of Channel 0 */
#define VIA_PCI_DMA_MAR1 0xE50 /* Memory Address Register of Channel 1 */
#define VIA_PCI_DMA_DAR1 0xE54 /* Device Address Register of Channel 1 */
#define VIA_PCI_DMA_BCR1 0xE58 /* Byte Count Register of Channel 1 */
#define VIA_PCI_DMA_DPR1 0xE5C /* Descriptor Pointer Register of Channel 1 */
#define VIA_PCI_DMA_MAR2 0xE60 /* Memory Address Register of Channel 2 */
#define VIA_PCI_DMA_DAR2 0xE64 /* Device Address Register of Channel 2 */
#define VIA_PCI_DMA_BCR2 0xE68 /* Byte Count Register of Channel 2 */
#define VIA_PCI_DMA_DPR2 0xE6C /* Descriptor Pointer Register of Channel 2 */
#define VIA_PCI_DMA_MAR3 0xE70 /* Memory Address Register of Channel 3 */
#define VIA_PCI_DMA_DAR3 0xE74 /* Device Address Register of Channel 3 */
#define VIA_PCI_DMA_BCR3 0xE78 /* Byte Count Register of Channel 3 */
#define VIA_PCI_DMA_DPR3 0xE7C /* Descriptor Pointer Register of Channel 3 */
#define VIA_PCI_DMA_MR0 0xE80 /* Mode Register of Channel 0 */
#define VIA_PCI_DMA_MR1 0xE84 /* Mode Register of Channel 1 */
#define VIA_PCI_DMA_MR2 0xE88 /* Mode Register of Channel 2 */
#define VIA_PCI_DMA_MR3 0xE8C /* Mode Register of Channel 3 */
#define VIA_PCI_DMA_CSR0 0xE90 /* Command/Status Register of Channel 0 */
#define VIA_PCI_DMA_CSR1 0xE94 /* Command/Status Register of Channel 1 */
#define VIA_PCI_DMA_CSR2 0xE98 /* Command/Status Register of Channel 2 */
#define VIA_PCI_DMA_CSR3 0xE9C /* Command/Status Register of Channel 3 */
#define VIA_PCI_DMA_PTR 0xEA0 /* Priority Type Register */
/* Define for DMA engine */
/* DPR */
#define VIA_DMA_DPR_EC (1<<1) /* end of chain */
#define VIA_DMA_DPR_DDIE (1<<2) /* descriptor done interrupt enable */
#define VIA_DMA_DPR_DT (1<<3) /* direction of transfer (RO) */
/* MR */
#define VIA_DMA_MR_CM (1<<0) /* chaining mode */
#define VIA_DMA_MR_TDIE (1<<1) /* transfer done interrupt enable */
#define VIA_DMA_MR_HENDMACMD (1<<7) /* ? */
/* CSR */
#define VIA_DMA_CSR_DE (1<<0) /* DMA enable */
#define VIA_DMA_CSR_TS (1<<1) /* transfer start */
#define VIA_DMA_CSR_TA (1<<2) /* transfer abort */
#define VIA_DMA_CSR_TD (1<<3) /* transfer done */
#define VIA_DMA_CSR_DD (1<<4) /* descriptor done */
#define VIA_DMA_DPR_EC (1<<1) /* end of chain */
#endif

View File

@ -1,279 +0,0 @@
/*-
* Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
* Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#ifndef _VIA_DRM_H_
#define _VIA_DRM_H_
/* WARNING: These defines must be the same as what the Xserver uses.
* if you change them, you must change the defines in the Xserver.
*/
#ifndef _VIA_DEFINES_
#define _VIA_DEFINES_
#if !defined(__KERNEL__) && !defined(_KERNEL)
#include "via_drmclient.h"
#endif
#define VIA_NR_SAREA_CLIPRECTS 8
#define VIA_NR_XVMC_PORTS 10
#define VIA_NR_XVMC_LOCKS 5
#define VIA_MAX_CACHELINE_SIZE 64
#define XVMCLOCKPTR(saPriv,lockNo) \
((volatile struct drm_hw_lock *)(((((unsigned long) (saPriv)->XvMCLockArea) + \
(VIA_MAX_CACHELINE_SIZE - 1)) & \
~(VIA_MAX_CACHELINE_SIZE - 1)) + \
VIA_MAX_CACHELINE_SIZE*(lockNo)))
/* Each region is a minimum of 64k, and there are at most 64 of them.
*/
#define VIA_NR_TEX_REGIONS 64
#define VIA_LOG_MIN_TEX_REGION_SIZE 16
#endif
#define VIA_UPLOAD_TEX0IMAGE 0x1 /* handled clientside */
#define VIA_UPLOAD_TEX1IMAGE 0x2 /* handled clientside */
#define VIA_UPLOAD_CTX 0x4
#define VIA_UPLOAD_BUFFERS 0x8
#define VIA_UPLOAD_TEX0 0x10
#define VIA_UPLOAD_TEX1 0x20
#define VIA_UPLOAD_CLIPRECTS 0x40
#define VIA_UPLOAD_ALL 0xff
/* VIA specific ioctls */
#define DRM_VIA_ALLOCMEM 0x00
#define DRM_VIA_FREEMEM 0x01
#define DRM_VIA_AGP_INIT 0x02
#define DRM_VIA_FB_INIT 0x03
#define DRM_VIA_MAP_INIT 0x04
#define DRM_VIA_DEC_FUTEX 0x05
#define NOT_USED
#define DRM_VIA_DMA_INIT 0x07
#define DRM_VIA_CMDBUFFER 0x08
#define DRM_VIA_FLUSH 0x09
#define DRM_VIA_PCICMD 0x0a
#define DRM_VIA_CMDBUF_SIZE 0x0b
#define NOT_USED
#define DRM_VIA_WAIT_IRQ 0x0d
#define DRM_VIA_DMA_BLIT 0x0e
#define DRM_VIA_BLIT_SYNC 0x0f
#define DRM_IOCTL_VIA_ALLOCMEM DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_ALLOCMEM, drm_via_mem_t)
#define DRM_IOCTL_VIA_FREEMEM DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_FREEMEM, drm_via_mem_t)
#define DRM_IOCTL_VIA_AGP_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_AGP_INIT, drm_via_agp_t)
#define DRM_IOCTL_VIA_FB_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_FB_INIT, drm_via_fb_t)
#define DRM_IOCTL_VIA_MAP_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_MAP_INIT, drm_via_init_t)
#define DRM_IOCTL_VIA_DEC_FUTEX DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_DEC_FUTEX, drm_via_futex_t)
#define DRM_IOCTL_VIA_DMA_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_DMA_INIT, drm_via_dma_init_t)
#define DRM_IOCTL_VIA_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_CMDBUFFER, drm_via_cmdbuffer_t)
#define DRM_IOCTL_VIA_FLUSH DRM_IO( DRM_COMMAND_BASE + DRM_VIA_FLUSH)
#define DRM_IOCTL_VIA_PCICMD DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_PCICMD, drm_via_cmdbuffer_t)
#define DRM_IOCTL_VIA_CMDBUF_SIZE DRM_IOWR( DRM_COMMAND_BASE + DRM_VIA_CMDBUF_SIZE, \
drm_via_cmdbuf_size_t)
#define DRM_IOCTL_VIA_WAIT_IRQ DRM_IOWR( DRM_COMMAND_BASE + DRM_VIA_WAIT_IRQ, drm_via_irqwait_t)
#define DRM_IOCTL_VIA_DMA_BLIT DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_DMA_BLIT, drm_via_dmablit_t)
#define DRM_IOCTL_VIA_BLIT_SYNC DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_BLIT_SYNC, drm_via_blitsync_t)
/* Indices into buf.Setup where various bits of state are mirrored per
* context and per buffer. These can be fired at the card as a unit,
* or in a piecewise fashion as required.
*/
#define VIA_TEX_SETUP_SIZE 8
/* Flags for clear ioctl
*/
#define VIA_FRONT 0x1
#define VIA_BACK 0x2
#define VIA_DEPTH 0x4
#define VIA_STENCIL 0x8
#define VIA_MEM_VIDEO 0 /* matches drm constant */
#define VIA_MEM_AGP 1 /* matches drm constant */
#define VIA_MEM_SYSTEM 2
#define VIA_MEM_MIXED 3
#define VIA_MEM_UNKNOWN 4
typedef struct {
u32 offset;
u32 size;
} drm_via_agp_t;
typedef struct {
u32 offset;
u32 size;
} drm_via_fb_t;
typedef struct {
u32 context;
u32 type;
u32 size;
unsigned long index;
unsigned long offset;
} drm_via_mem_t;
typedef struct _drm_via_init {
enum {
VIA_INIT_MAP = 0x01,
VIA_CLEANUP_MAP = 0x02
} func;
unsigned long sarea_priv_offset;
unsigned long fb_offset;
unsigned long mmio_offset;
unsigned long agpAddr;
} drm_via_init_t;
typedef struct _drm_via_futex {
enum {
VIA_FUTEX_WAIT = 0x00,
VIA_FUTEX_WAKE = 0X01
} func;
u32 ms;
u32 lock;
u32 val;
} drm_via_futex_t;
typedef struct _drm_via_dma_init {
enum {
VIA_INIT_DMA = 0x01,
VIA_CLEANUP_DMA = 0x02,
VIA_DMA_INITIALIZED = 0x03
} func;
unsigned long offset;
unsigned long size;
unsigned long reg_pause_addr;
} drm_via_dma_init_t;
typedef struct _drm_via_cmdbuffer {
char __user *buf;
unsigned long size;
} drm_via_cmdbuffer_t;
/* Warning: If you change the SAREA structure you must change the Xserver
* structure as well */
typedef struct _drm_via_tex_region {
unsigned char next, prev; /* indices to form a circular LRU */
unsigned char inUse; /* owned by a client, or free? */
int age; /* tracked by clients to update local LRU's */
} drm_via_tex_region_t;
typedef struct _drm_via_sarea {
unsigned int dirty;
unsigned int nbox;
struct drm_clip_rect boxes[VIA_NR_SAREA_CLIPRECTS];
drm_via_tex_region_t texList[VIA_NR_TEX_REGIONS + 1];
int texAge; /* last time texture was uploaded */
int ctxOwner; /* last context to upload state */
int vertexPrim;
/*
* Below is for XvMC.
* We want the lock integers alone on, and aligned to, a cache line.
* Therefore this somewhat strange construct.
*/
char XvMCLockArea[VIA_MAX_CACHELINE_SIZE * (VIA_NR_XVMC_LOCKS + 1)];
unsigned int XvMCDisplaying[VIA_NR_XVMC_PORTS];
unsigned int XvMCSubPicOn[VIA_NR_XVMC_PORTS];
unsigned int XvMCCtxNoGrabbed; /* Last context to hold decoder */
/* Used by the 3d driver only at this point, for pageflipping:
*/
unsigned int pfCurrentOffset;
} drm_via_sarea_t;
typedef struct _drm_via_cmdbuf_size {
enum {
VIA_CMDBUF_SPACE = 0x01,
VIA_CMDBUF_LAG = 0x02
} func;
int wait;
u32 size;
} drm_via_cmdbuf_size_t;
typedef enum {
VIA_IRQ_ABSOLUTE = 0x0,
VIA_IRQ_RELATIVE = 0x1,
VIA_IRQ_SIGNAL = 0x10000000,
VIA_IRQ_FORCE_SEQUENCE = 0x20000000
} via_irq_seq_type_t;
#define VIA_IRQ_FLAGS_MASK 0xF0000000
enum drm_via_irqs {
drm_via_irq_hqv0 = 0,
drm_via_irq_hqv1,
drm_via_irq_dma0_dd,
drm_via_irq_dma0_td,
drm_via_irq_dma1_dd,
drm_via_irq_dma1_td,
drm_via_irq_num
};
struct drm_via_wait_irq_request {
unsigned irq;
via_irq_seq_type_t type;
u32 sequence;
u32 signal;
};
typedef union drm_via_irqwait {
struct drm_via_wait_irq_request request;
struct drm_wait_vblank_reply reply;
} drm_via_irqwait_t;
typedef struct drm_via_blitsync {
u32 sync_handle;
unsigned engine;
} drm_via_blitsync_t;
/* - * Below,"flags" is currently unused but will be used for possible future
* extensions like kernel space bounce buffers for bad alignments and
* blit engine busy-wait polling for better latency in the absence of
* interrupts.
*/
typedef struct drm_via_dmablit {
u32 num_lines;
u32 line_length;
u32 fb_addr;
u32 fb_stride;
unsigned char *mem_addr;
u32 mem_stride;
u32 flags;
int to_fb;
drm_via_blitsync_t sync;
} drm_via_dmablit_t;
#endif /* _VIA_DRM_H_ */

View File

@ -1,123 +0,0 @@
/* via_drv.c -- VIA unichrome driver -*- linux-c -*-
* Created: Fri Aug 12 2005 by anholt@FreeBSD.org
*/
/*-
* Copyright 2005 Eric Anholt
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* ERIC ANHOLT BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <anholt@FreeBSD.org>
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "dev/drm/drmP.h"
#include "dev/drm/drm.h"
#include "dev/drm/via_drm.h"
#include "dev/drm/via_drv.h"
#include "dev/drm/drm_pciids.h"
/* drv_PCI_IDs comes from drm_pciids.h, generated from drm_pciids.txt. */
static drm_pci_id_list_t via_pciidlist[] = {
viadrv_PCI_IDS
};
static void via_configure(struct drm_device *dev)
{
dev->driver->driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ;
dev->driver->buf_priv_size = sizeof(drm_via_private_t);
dev->driver->load = via_driver_load;
dev->driver->unload = via_driver_unload;
dev->driver->lastclose = via_lastclose;
dev->driver->get_vblank_counter = via_get_vblank_counter;
dev->driver->enable_vblank = via_enable_vblank;
dev->driver->disable_vblank = via_disable_vblank;
dev->driver->irq_preinstall = via_driver_irq_preinstall;
dev->driver->irq_postinstall = via_driver_irq_postinstall;
dev->driver->irq_uninstall = via_driver_irq_uninstall;
dev->driver->irq_handler = via_driver_irq_handler;
dev->driver->dma_quiescent = via_driver_dma_quiescent;
dev->driver->ioctls = via_ioctls;
dev->driver->max_ioctl = via_max_ioctl;
dev->driver->name = DRIVER_NAME;
dev->driver->desc = DRIVER_DESC;
dev->driver->date = DRIVER_DATE;
dev->driver->major = DRIVER_MAJOR;
dev->driver->minor = DRIVER_MINOR;
dev->driver->patchlevel = DRIVER_PATCHLEVEL;
}
static int
via_probe(device_t kdev)
{
return drm_probe(kdev, via_pciidlist);
}
static int
via_attach(device_t kdev)
{
struct drm_device *dev = device_get_softc(kdev);
dev->driver = malloc(sizeof(struct drm_driver_info), DRM_MEM_DRIVER,
M_WAITOK | M_ZERO);
via_configure(dev);
return drm_attach(kdev, via_pciidlist);
}
static int
via_detach(device_t kdev)
{
struct drm_device *dev = device_get_softc(kdev);
int ret;
ret = drm_detach(kdev);
free(dev->driver, DRM_MEM_DRIVER);
return ret;
}
static device_method_t via_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, via_probe),
DEVMETHOD(device_attach, via_attach),
DEVMETHOD(device_detach, via_detach),
{ 0, 0 }
};
static driver_t via_driver = {
"drm",
via_methods,
sizeof(struct drm_device)
};
extern devclass_t drm_devclass;
DRIVER_MODULE(via, vgapci, via_driver, drm_devclass, 0, 0);
MODULE_DEPEND(via, drm, 1, 1, 1);

View File

@ -1,161 +0,0 @@
/*-
* Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
* Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#ifndef _VIA_DRV_H_
#define _VIA_DRV_H_
#include "dev/drm/drm_sman.h"
#define DRIVER_AUTHOR "Various"
#define DRIVER_NAME "via"
#define DRIVER_DESC "VIA Unichrome / Pro"
#define DRIVER_DATE "20070202"
#define DRIVER_MAJOR 2
#define DRIVER_MINOR 11
#define DRIVER_PATCHLEVEL 1
#include "dev/drm/via_verifier.h"
#include "dev/drm/via_dmablit.h"
#define VIA_PCI_BUF_SIZE 60000
#define VIA_FIRE_BUF_SIZE 1024
#define VIA_NUM_IRQS 4
typedef struct drm_via_ring_buffer {
drm_local_map_t map;
char *virtual_start;
} drm_via_ring_buffer_t;
typedef uint32_t maskarray_t[5];
typedef struct drm_via_irq {
atomic_t irq_received;
uint32_t pending_mask;
uint32_t enable_mask;
wait_queue_head_t irq_queue;
} drm_via_irq_t;
typedef struct drm_via_private {
drm_via_sarea_t *sarea_priv;
drm_local_map_t *sarea;
drm_local_map_t *fb;
drm_local_map_t *mmio;
unsigned long agpAddr;
wait_queue_head_t decoder_queue[VIA_NR_XVMC_LOCKS];
char *dma_ptr;
unsigned int dma_low;
unsigned int dma_high;
unsigned int dma_offset;
uint32_t dma_wrap;
volatile uint32_t *last_pause_ptr;
volatile uint32_t *hw_addr_ptr;
drm_via_ring_buffer_t ring;
struct timeval last_vblank;
int last_vblank_valid;
unsigned usec_per_vblank;
atomic_t vbl_received;
drm_via_state_t hc_state;
char pci_buf[VIA_PCI_BUF_SIZE];
const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
uint32_t num_fire_offsets;
int chipset;
drm_via_irq_t via_irqs[VIA_NUM_IRQS];
unsigned num_irqs;
maskarray_t *irq_masks;
uint32_t irq_enable_mask;
uint32_t irq_pending_mask;
int *irq_map;
unsigned int idle_fault;
struct drm_sman sman;
int vram_initialized;
int agp_initialized;
unsigned long vram_offset;
unsigned long agp_offset;
drm_via_blitq_t blit_queues[VIA_NUM_BLIT_ENGINES];
uint32_t dma_diff;
} drm_via_private_t;
enum via_family {
VIA_OTHER = 0, /* Baseline */
VIA_PRO_GROUP_A, /* Another video engine and DMA commands */
VIA_DX9_0 /* Same video as pro_group_a, but 3D is unsupported */
};
/* VIA MMIO register access */
#define VIA_BASE ((dev_priv->mmio))
#define VIA_READ(reg) DRM_READ32(VIA_BASE, reg)
#define VIA_WRITE(reg,val) DRM_WRITE32(VIA_BASE, reg, val)
#define VIA_READ8(reg) DRM_READ8(VIA_BASE, reg)
#define VIA_WRITE8(reg,val) DRM_WRITE8(VIA_BASE, reg, val)
extern struct drm_ioctl_desc via_ioctls[];
extern int via_max_ioctl;
extern int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int via_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int via_map_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_priv );
extern int via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv );
extern int via_driver_load(struct drm_device *dev, unsigned long chipset);
extern int via_driver_unload(struct drm_device *dev);
extern int via_init_context(struct drm_device * dev, int context);
extern int via_final_context(struct drm_device * dev, int context);
extern int via_do_cleanup_map(struct drm_device * dev);
extern u32 via_get_vblank_counter(struct drm_device *dev, int crtc);
extern int via_enable_vblank(struct drm_device *dev, int crtc);
extern void via_disable_vblank(struct drm_device *dev, int crtc);
extern irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS);
extern void via_driver_irq_preinstall(struct drm_device * dev);
extern int via_driver_irq_postinstall(struct drm_device *dev);
extern void via_driver_irq_uninstall(struct drm_device * dev);
extern int via_dma_cleanup(struct drm_device * dev);
extern void via_init_command_verifier(void);
extern int via_driver_dma_quiescent(struct drm_device * dev);
extern void via_init_futex(drm_via_private_t *dev_priv);
extern void via_cleanup_futex(drm_via_private_t *dev_priv);
extern void via_release_futex(drm_via_private_t *dev_priv, int context);
extern void via_reclaim_buffers_locked(struct drm_device *dev,
struct drm_file *file_priv);
extern void via_lastclose(struct drm_device *dev);
extern void via_dmablit_handler(struct drm_device *dev, int engine, int from_irq);
extern void via_init_dmablit(struct drm_device *dev);
#endif

View File

@ -1,392 +0,0 @@
/* via_irq.c
*
* Copyright 2004 BEAM Ltd.
* Copyright 2002 Tungsten Graphics, Inc.
* Copyright 2005 Thomas Hellstrom.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* BEAM LTD, TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Terry Barnaby <terry1@beam.ltd.uk>
* Keith Whitwell <keith@tungstengraphics.com>
* Thomas Hellstrom <unichrome@shipmail.org>
*
* This code provides standard DRM access to the Via Unichrome / Pro Vertical blank
* interrupt, as well as an infrastructure to handle other interrupts of the chip.
* The refresh rate is also calculated for video playback sync purposes.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "dev/drm/drmP.h"
#include "dev/drm/drm.h"
#include "dev/drm/via_drm.h"
#include "dev/drm/via_drv.h"
#define VIA_REG_INTERRUPT 0x200
/* VIA_REG_INTERRUPT */
#define VIA_IRQ_GLOBAL (1U << 31)
#define VIA_IRQ_VBLANK_ENABLE (1 << 19)
#define VIA_IRQ_VBLANK_PENDING (1 << 3)
#define VIA_IRQ_HQV0_ENABLE (1 << 11)
#define VIA_IRQ_HQV1_ENABLE (1 << 25)
#define VIA_IRQ_HQV0_PENDING (1 << 9)
#define VIA_IRQ_HQV1_PENDING (1 << 10)
#define VIA_IRQ_DMA0_DD_ENABLE (1 << 20)
#define VIA_IRQ_DMA0_TD_ENABLE (1 << 21)
#define VIA_IRQ_DMA1_DD_ENABLE (1 << 22)
#define VIA_IRQ_DMA1_TD_ENABLE (1 << 23)
#define VIA_IRQ_DMA0_DD_PENDING (1 << 4)
#define VIA_IRQ_DMA0_TD_PENDING (1 << 5)
#define VIA_IRQ_DMA1_DD_PENDING (1 << 6)
#define VIA_IRQ_DMA1_TD_PENDING (1 << 7)
/*
* Device-specific IRQs go here. This type might need to be extended with
* the register if there are multiple IRQ control registers.
* Currently we activate the HQV interrupts of Unichrome Pro group A.
*/
static maskarray_t via_pro_group_a_irqs[] = {
{VIA_IRQ_HQV0_ENABLE, VIA_IRQ_HQV0_PENDING, 0x000003D0, 0x00008010,
0x00000000 },
{VIA_IRQ_HQV1_ENABLE, VIA_IRQ_HQV1_PENDING, 0x000013D0, 0x00008010,
0x00000000 },
{VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0,
VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
{VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
};
static int via_num_pro_group_a = DRM_ARRAY_SIZE(via_pro_group_a_irqs);
static int via_irqmap_pro_group_a[] = {0, 1, -1, 2, -1, 3};
static maskarray_t via_unichrome_irqs[] = {
{VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0,
VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
{VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}
};
static int via_num_unichrome = DRM_ARRAY_SIZE(via_unichrome_irqs);
static int via_irqmap_unichrome[] = {-1, -1, -1, 0, -1, 1};
static unsigned time_diff(struct timeval *now, struct timeval *then)
{
return (now->tv_usec >= then->tv_usec) ?
now->tv_usec - then->tv_usec :
1000000 - (then->tv_usec - now->tv_usec);
}
u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
{
drm_via_private_t *dev_priv = dev->dev_private;
if (crtc != 0)
return 0;
return atomic_read(&dev_priv->vbl_received);
}
irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
u32 status;
int handled = 0;
struct timeval cur_vblank;
drm_via_irq_t *cur_irq = dev_priv->via_irqs;
int i;
status = VIA_READ(VIA_REG_INTERRUPT);
if (status & VIA_IRQ_VBLANK_PENDING) {
atomic_inc(&dev_priv->vbl_received);
if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
microtime(&cur_vblank);
if (dev_priv->last_vblank_valid) {
dev_priv->usec_per_vblank =
time_diff(&cur_vblank,
&dev_priv->last_vblank) >> 4;
}
dev_priv->last_vblank = cur_vblank;
dev_priv->last_vblank_valid = 1;
}
if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
DRM_DEBUG("US per vblank is: %u\n",
dev_priv->usec_per_vblank);
}
drm_handle_vblank(dev, 0);
handled = 1;
}
for (i = 0; i < dev_priv->num_irqs; ++i) {
if (status & cur_irq->pending_mask) {
atomic_inc(&cur_irq->irq_received);
DRM_WAKEUP(&cur_irq->irq_queue);
handled = 1;
if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
via_dmablit_handler(dev, 0, 1);
} else if (dev_priv->irq_map[drm_via_irq_dma1_td] == i) {
via_dmablit_handler(dev, 1, 1);
}
}
cur_irq++;
}
/* Acknowlege interrupts */
VIA_WRITE(VIA_REG_INTERRUPT, status);
if (handled)
return IRQ_HANDLED;
else
return IRQ_NONE;
}
static __inline__ void viadrv_acknowledge_irqs(drm_via_private_t * dev_priv)
{
u32 status;
if (dev_priv) {
/* Acknowlege interrupts */
status = VIA_READ(VIA_REG_INTERRUPT);
VIA_WRITE(VIA_REG_INTERRUPT, status |
dev_priv->irq_pending_mask);
}
}
int via_enable_vblank(struct drm_device *dev, int crtc)
{
drm_via_private_t *dev_priv = dev->dev_private;
u32 status;
if (crtc != 0) {
DRM_ERROR("%s: bad crtc %d\n", __func__, crtc);
return -EINVAL;
}
status = VIA_READ(VIA_REG_INTERRUPT);
VIA_WRITE(VIA_REG_INTERRUPT, status & VIA_IRQ_VBLANK_ENABLE);
VIA_WRITE8(0x83d4, 0x11);
VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30);
return 0;
}
void via_disable_vblank(struct drm_device *dev, int crtc)
{
drm_via_private_t *dev_priv = dev->dev_private;
VIA_WRITE8(0x83d4, 0x11);
VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) & ~0x30);
if (crtc != 0)
DRM_ERROR("%s: bad crtc %d\n", __func__, crtc);
}
static int
via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequence,
unsigned int *sequence)
{
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
unsigned int cur_irq_sequence;
drm_via_irq_t *cur_irq;
int ret = 0;
maskarray_t *masks;
int real_irq;
DRM_DEBUG("\n");
if (!dev_priv) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
if (irq >= drm_via_irq_num) {
DRM_ERROR("Trying to wait on unknown irq %d\n", irq);
return -EINVAL;
}
real_irq = dev_priv->irq_map[irq];
if (real_irq < 0) {
DRM_ERROR("Video IRQ %d not available on this hardware.\n",
irq);
return -EINVAL;
}
masks = dev_priv->irq_masks;
cur_irq = dev_priv->via_irqs + real_irq;
if (masks[real_irq][2] && !force_sequence) {
DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
masks[irq][4]));
cur_irq_sequence = atomic_read(&cur_irq->irq_received);
} else {
DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
(((cur_irq_sequence =
atomic_read(&cur_irq->irq_received)) -
*sequence) <= (1 << 23)));
}
*sequence = cur_irq_sequence;
return ret;
}
/*
* drm_dma.h hooks
*/
void via_driver_irq_preinstall(struct drm_device * dev)
{
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
u32 status;
drm_via_irq_t *cur_irq;
int i;
DRM_DEBUG("dev_priv: %p\n", dev_priv);
if (dev_priv) {
cur_irq = dev_priv->via_irqs;
dev_priv->irq_enable_mask = VIA_IRQ_VBLANK_ENABLE;
dev_priv->irq_pending_mask = VIA_IRQ_VBLANK_PENDING;
if (dev_priv->chipset == VIA_PRO_GROUP_A ||
dev_priv->chipset == VIA_DX9_0) {
dev_priv->irq_masks = via_pro_group_a_irqs;
dev_priv->num_irqs = via_num_pro_group_a;
dev_priv->irq_map = via_irqmap_pro_group_a;
} else {
dev_priv->irq_masks = via_unichrome_irqs;
dev_priv->num_irqs = via_num_unichrome;
dev_priv->irq_map = via_irqmap_unichrome;
}
for (i = 0; i < dev_priv->num_irqs; ++i) {
atomic_set(&cur_irq->irq_received, 0);
cur_irq->enable_mask = dev_priv->irq_masks[i][0];
cur_irq->pending_mask = dev_priv->irq_masks[i][1];
DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
dev_priv->irq_enable_mask |= cur_irq->enable_mask;
dev_priv->irq_pending_mask |= cur_irq->pending_mask;
cur_irq++;
DRM_DEBUG("Initializing IRQ %d\n", i);
}
dev_priv->last_vblank_valid = 0;
/* Clear VSync interrupt regs */
status = VIA_READ(VIA_REG_INTERRUPT);
VIA_WRITE(VIA_REG_INTERRUPT, status &
~(dev_priv->irq_enable_mask));
/* Clear bits if they're already high */
viadrv_acknowledge_irqs(dev_priv);
}
}
int via_driver_irq_postinstall(struct drm_device *dev)
{
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
u32 status;
DRM_DEBUG("via_driver_irq_postinstall\n");
if (!dev_priv)
return -EINVAL;
status = VIA_READ(VIA_REG_INTERRUPT);
VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL
| dev_priv->irq_enable_mask);
/* Some magic, oh for some data sheets ! */
VIA_WRITE8(0x83d4, 0x11);
VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30);
return 0;
}
void via_driver_irq_uninstall(struct drm_device * dev)
{
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
u32 status;
DRM_DEBUG("\n");
if (dev_priv) {
/* Some more magic, oh for some data sheets ! */
VIA_WRITE8(0x83d4, 0x11);
VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) & ~0x30);
status = VIA_READ(VIA_REG_INTERRUPT);
VIA_WRITE(VIA_REG_INTERRUPT, status &
~(VIA_IRQ_VBLANK_ENABLE | dev_priv->irq_enable_mask));
}
}
int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_via_irqwait_t *irqwait = data;
struct timeval now;
int ret = 0;
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
drm_via_irq_t *cur_irq = dev_priv->via_irqs;
int force_sequence;
if (irqwait->request.irq >= dev_priv->num_irqs) {
DRM_ERROR("Trying to wait on unknown irq %d\n",
irqwait->request.irq);
return -EINVAL;
}
cur_irq += irqwait->request.irq;
switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
case VIA_IRQ_RELATIVE:
irqwait->request.sequence +=
atomic_read(&cur_irq->irq_received);
irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
case VIA_IRQ_ABSOLUTE:
break;
default:
return -EINVAL;
}
if (irqwait->request.type & VIA_IRQ_SIGNAL) {
DRM_ERROR("Signals on Via IRQs not implemented yet.\n");
return -EINVAL;
}
force_sequence = (irqwait->request.type & VIA_IRQ_FORCE_SEQUENCE);
ret = via_driver_irq_wait(dev, irqwait->request.irq, force_sequence,
&irqwait->request.sequence);
microtime(&now);
irqwait->reply.tval_sec = now.tv_sec;
irqwait->reply.tval_usec = now.tv_usec;
return ret;
}

View File

@ -1,137 +0,0 @@
/*-
* Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
* Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "dev/drm/drmP.h"
#include "dev/drm/via_drm.h"
#include "dev/drm/via_drv.h"
static int via_do_init_map(struct drm_device * dev, drm_via_init_t * init)
{
drm_via_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
dev_priv->sarea = drm_getsarea(dev);
if (!dev_priv->sarea) {
DRM_ERROR("could not find sarea!\n");
dev->dev_private = (void *)dev_priv;
via_do_cleanup_map(dev);
return -EINVAL;
}
dev_priv->fb = drm_core_findmap(dev, init->fb_offset);
if (!dev_priv->fb) {
DRM_ERROR("could not find framebuffer!\n");
dev->dev_private = (void *)dev_priv;
via_do_cleanup_map(dev);
return -EINVAL;
}
dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
if (!dev_priv->mmio) {
DRM_ERROR("could not find mmio region!\n");
dev->dev_private = (void *)dev_priv;
via_do_cleanup_map(dev);
return -EINVAL;
}
dev_priv->sarea_priv =
(drm_via_sarea_t *) ((u8 *) dev_priv->sarea->virtual +
init->sarea_priv_offset);
dev_priv->agpAddr = init->agpAddr;
via_init_futex(dev_priv);
via_init_dmablit(dev);
dev->dev_private = (void *)dev_priv;
return 0;
}
int via_do_cleanup_map(struct drm_device * dev)
{
via_dma_cleanup(dev);
return 0;
}
int via_map_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_via_init_t *init = data;
DRM_DEBUG("\n");
switch (init->func) {
case VIA_INIT_MAP:
return via_do_init_map(dev, init);
case VIA_CLEANUP_MAP:
return via_do_cleanup_map(dev);
}
return -EINVAL;
}
int via_driver_load(struct drm_device *dev, unsigned long chipset)
{
drm_via_private_t *dev_priv;
int ret = 0;
dev_priv = drm_alloc(sizeof(drm_via_private_t), DRM_MEM_DRIVER);
if (dev_priv == NULL)
return -ENOMEM;
dev->dev_private = (void *)dev_priv;
dev_priv->chipset = chipset;
ret = drm_sman_init(&dev_priv->sman, 2, 12, 8);
if (ret) {
drm_free(dev_priv, sizeof(drm_via_private_t), DRM_MEM_DRIVER);
return ret;
}
ret = drm_vblank_init(dev, 1);
if (ret) {
drm_sman_takedown(&dev_priv->sman);
drm_free(dev_priv, sizeof(drm_via_private_t), DRM_MEM_DRIVER);
return ret;
}
return 0;
}
int via_driver_unload(struct drm_device *dev)
{
drm_via_private_t *dev_priv = dev->dev_private;
drm_sman_takedown(&dev_priv->sman);
drm_free(dev_priv, sizeof(drm_via_private_t), DRM_MEM_DRIVER);
dev->dev_private = NULL;
return 0;
}

View File

@ -1,180 +0,0 @@
/*-
* Copyright 2006 Tungsten Graphics Inc., Bismarck, ND., USA.
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "dev/drm/drmP.h"
#include "dev/drm/via_drm.h"
#include "dev/drm/via_drv.h"
#include "dev/drm/drm_sman.h"
#define VIA_MM_ALIGN_SHIFT 4
#define VIA_MM_ALIGN_MASK ( (1 << VIA_MM_ALIGN_SHIFT) - 1)
int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_via_agp_t *agp = data;
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
int ret;
ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_AGP, 0,
agp->size >> VIA_MM_ALIGN_SHIFT);
if (ret) {
DRM_ERROR("AGP memory manager initialisation error\n");
return ret;
}
dev_priv->agp_initialized = 1;
dev_priv->agp_offset = agp->offset;
DRM_DEBUG("offset = %u, size = %u\n", agp->offset, agp->size);
return 0;
}
int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_via_fb_t *fb = data;
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
int ret;
ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_VIDEO, 0,
fb->size >> VIA_MM_ALIGN_SHIFT);
if (ret) {
DRM_ERROR("VRAM memory manager initialisation error\n");
return ret;
}
dev_priv->vram_initialized = 1;
dev_priv->vram_offset = fb->offset;
DRM_DEBUG("offset = %u, size = %u\n", fb->offset, fb->size);
return 0;
}
int via_final_context(struct drm_device *dev, int context)
{
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
via_release_futex(dev_priv, context);
#ifdef __linux__
/* Linux specific until context tracking code gets ported to BSD */
/* Last context, perform cleanup */
if (dev->ctx_count == 1 && dev->dev_private) {
DRM_DEBUG("Last Context\n");
drm_irq_uninstall(dev);
via_cleanup_futex(dev_priv);
via_do_cleanup_map(dev);
}
#endif
return 1;
}
void via_lastclose(struct drm_device *dev)
{
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
if (!dev_priv)
return;
drm_sman_cleanup(&dev_priv->sman);
dev_priv->vram_initialized = 0;
dev_priv->agp_initialized = 0;
}
int via_mem_alloc(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_via_mem_t *mem = data;
int retval = 0;
struct drm_memblock_item *item;
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
unsigned long tmpSize;
if (mem->type > VIA_MEM_AGP) {
DRM_ERROR("Unknown memory type allocation\n");
return -EINVAL;
}
if (0 == ((mem->type == VIA_MEM_VIDEO) ? dev_priv->vram_initialized :
dev_priv->agp_initialized)) {
DRM_ERROR
("Attempt to allocate from uninitialized memory manager.\n");
return -EINVAL;
}
tmpSize = (mem->size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT;
item = drm_sman_alloc(&dev_priv->sman, mem->type, tmpSize, 0,
(unsigned long)file_priv);
if (item) {
mem->offset = ((mem->type == VIA_MEM_VIDEO) ?
dev_priv->vram_offset : dev_priv->agp_offset) +
(item->mm->
offset(item->mm, item->mm_info) << VIA_MM_ALIGN_SHIFT);
mem->index = item->user_hash.key;
} else {
mem->offset = 0;
mem->size = 0;
mem->index = 0;
DRM_DEBUG("Video memory allocation failed\n");
retval = -ENOMEM;
}
return retval;
}
int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_via_private_t *dev_priv = dev->dev_private;
drm_via_mem_t *mem = data;
int ret;
ret = drm_sman_free_key(&dev_priv->sman, mem->index);
DRM_DEBUG("free = 0x%lx\n", mem->index);
return ret;
}
void via_reclaim_buffers_locked(struct drm_device * dev,
struct drm_file *file_priv)
{
drm_via_private_t *dev_priv = dev->dev_private;
if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)file_priv))
return;
if (dev->driver->dma_quiescent)
dev->driver->dma_quiescent(dev);
drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv);
return;
}

File diff suppressed because it is too large Load Diff

View File

@ -1,65 +0,0 @@
/*-
* Copyright 2004 The Unichrome Project. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE UNICHROME PROJECT, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Author: Thomas Hellström 2004.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#ifndef _VIA_VERIFIER_H_
#define _VIA_VERIFIER_H_
typedef enum {
no_sequence = 0,
z_address,
dest_address,
tex_address
} drm_via_sequence_t;
typedef struct {
unsigned texture;
uint32_t z_addr;
uint32_t d_addr;
uint32_t t_addr[2][10];
uint32_t pitch[2][10];
uint32_t height[2][10];
uint32_t tex_level_lo[2];
uint32_t tex_level_hi[2];
uint32_t tex_palette_size[2];
uint32_t tex_npot[2];
drm_via_sequence_t unfinished;
int agp_texture;
int multitex;
struct drm_device *dev;
drm_local_map_t *map_cache;
uint32_t vertex_count;
int agp;
const uint32_t *buf_start;
} drm_via_state_t;
extern int via_verify_command_stream(const uint32_t * buf, unsigned int size,
struct drm_device * dev, int agp);
extern int via_parse_command_stream(struct drm_device *dev, const uint32_t *buf,
unsigned int size);
#endif

View File

@ -1,96 +0,0 @@
/*-
* Copyright 2005 Thomas Hellstrom. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE AUTHOR(S), AND/OR THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Author: Thomas Hellstrom 2005.
*
* Video and XvMC related functions.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "dev/drm/drmP.h"
#include "dev/drm/via_drm.h"
#include "dev/drm/via_drv.h"
void via_init_futex(drm_via_private_t * dev_priv)
{
unsigned int i;
DRM_DEBUG("\n");
for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) {
DRM_INIT_WAITQUEUE(&(dev_priv->decoder_queue[i]));
XVMCLOCKPTR(dev_priv->sarea_priv, i)->lock = 0;
}
}
void via_cleanup_futex(drm_via_private_t * dev_priv)
{
}
void via_release_futex(drm_via_private_t * dev_priv, int context)
{
unsigned int i;
volatile int *lock;
if (!dev_priv->sarea_priv)
return;
for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) {
lock = (volatile int *)XVMCLOCKPTR(dev_priv->sarea_priv, i);
if ((_DRM_LOCKING_CONTEXT(*lock) == context)) {
if (_DRM_LOCK_IS_HELD(*lock)
&& (*lock & _DRM_LOCK_CONT)) {
DRM_WAKEUP(&(dev_priv->decoder_queue[i]));
}
*lock = 0;
}
}
}
int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_via_futex_t *fx = data;
volatile int *lock;
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
drm_via_sarea_t *sAPriv = dev_priv->sarea_priv;
int ret = 0;
DRM_DEBUG("\n");
if (fx->lock >= VIA_NR_XVMC_LOCKS)
return -EFAULT;
lock = (volatile int *)XVMCLOCKPTR(sAPriv, fx->lock);
switch (fx->func) {
case VIA_FUTEX_WAIT:
DRM_WAIT_ON(ret, dev_priv->decoder_queue[fx->lock],
(fx->ms / 10) * (DRM_HZ / 100), *lock != fx->val);
return ret;
case VIA_FUTEX_WAKE:
DRM_WAKEUP(&(dev_priv->decoder_queue[fx->lock]));
return 0;
}
return 0;
}

View File

@ -1,212 +0,0 @@
/**
* \file ati_pcigart.c
* ATI PCI GART support
*
* \author Gareth Hughes <gareth@valinux.com>
*/
/*
* Created: Wed Dec 13 21:52:19 2000 by gareth@valinux.com
*
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <dev/drm2/drmP.h>
# define ATI_PCIGART_PAGE_SIZE 4096 /**< PCI GART page size */
static int drm_ati_alloc_pcigart_table(struct drm_device *dev,
struct drm_ati_pcigart_info *gart_info)
{
gart_info->table_handle = drm_pci_alloc(dev, gart_info->table_size,
PAGE_SIZE, BUS_SPACE_MAXADDR);
if (gart_info->table_handle == NULL)
return -ENOMEM;
return 0;
}
static void drm_ati_free_pcigart_table(struct drm_device *dev,
struct drm_ati_pcigart_info *gart_info)
{
drm_pci_free(dev, gart_info->table_handle);
gart_info->table_handle = NULL;
}
int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
{
struct drm_sg_mem *entry = dev->sg;
#ifdef __linux__
unsigned long pages;
int i;
int max_pages;
#endif
/* we need to support large memory configurations */
if (!entry) {
DRM_ERROR("no scatter/gather memory!\n");
return 0;
}
if (gart_info->bus_addr) {
#ifdef __linux__
max_pages = (gart_info->table_size / sizeof(u32));
pages = (entry->pages <= max_pages)
? entry->pages : max_pages;
for (i = 0; i < pages; i++) {
if (!entry->busaddr[i])
break;
pci_unmap_page(dev->pdev, entry->busaddr[i],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
}
#endif
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN)
gart_info->bus_addr = 0;
}
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN &&
gart_info->table_handle) {
drm_ati_free_pcigart_table(dev, gart_info);
}
return 1;
}
EXPORT_SYMBOL(drm_ati_pcigart_cleanup);
int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
{
struct drm_local_map *map = &gart_info->mapping;
struct drm_sg_mem *entry = dev->sg;
void *address = NULL;
unsigned long pages;
u32 *pci_gart = NULL, page_base, gart_idx;
dma_addr_t bus_address = 0;
int i, j, ret = 0;
int max_ati_pages, max_real_pages;
if (!entry) {
DRM_ERROR("no scatter/gather memory!\n");
goto done;
}
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n");
#ifdef __linux__
if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) {
DRM_ERROR("fail to set dma mask to 0x%Lx\n",
(unsigned long long)gart_info->table_mask);
ret = 1;
goto done;
}
#endif
ret = drm_ati_alloc_pcigart_table(dev, gart_info);
if (ret) {
DRM_ERROR("cannot allocate PCI GART page!\n");
goto done;
}
pci_gart = gart_info->table_handle->vaddr;
address = gart_info->table_handle->vaddr;
bus_address = gart_info->table_handle->busaddr;
} else {
address = gart_info->addr;
bus_address = gart_info->bus_addr;
DRM_DEBUG("PCI: Gart Table: VRAM %08LX mapped at %08lX\n",
(unsigned long long)bus_address,
(unsigned long)address);
}
max_ati_pages = (gart_info->table_size / sizeof(u32));
max_real_pages = max_ati_pages / (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE);
pages = (entry->pages <= max_real_pages)
? entry->pages : max_real_pages;
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
memset(pci_gart, 0, max_ati_pages * sizeof(u32));
} else {
memset_io((void __iomem *)map->handle, 0, max_ati_pages * sizeof(u32));
}
gart_idx = 0;
for (i = 0; i < pages; i++) {
#ifdef __linux__
/* we need to support large memory configurations */
entry->busaddr[i] = pci_map_page(dev->pdev, entry->pagelist[i],
0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(dev->pdev, entry->busaddr[i])) {
DRM_ERROR("unable to map PCIGART pages!\n");
drm_ati_pcigart_cleanup(dev, gart_info);
address = NULL;
bus_address = 0;
goto done;
}
#endif
page_base = (u32) entry->busaddr[i];
for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) {
u32 val;
switch(gart_info->gart_reg_if) {
case DRM_ATI_GART_IGP:
val = page_base | 0xc;
break;
case DRM_ATI_GART_PCIE:
val = (page_base >> 8) | 0xc;
break;
default:
case DRM_ATI_GART_PCI:
val = page_base;
break;
}
if (gart_info->gart_table_location ==
DRM_ATI_GART_MAIN)
pci_gart[gart_idx] = cpu_to_le32(val);
else
DRM_WRITE32(map, gart_idx * sizeof(u32), val);
gart_idx++;
page_base += ATI_PCIGART_PAGE_SIZE;
}
}
ret = 1;
#if defined(__i386__) || defined(__x86_64__)
wbinvd();
#else
mb();
#endif
done:
gart_info->addr = address;
gart_info->bus_addr = bus_address;
return ret;
}
EXPORT_SYMBOL(drm_ati_pcigart_init);

View File

@ -1,835 +0,0 @@
/**
* \file drm.h
* Header for the Direct Rendering Manager
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
*
* \par Acknowledgments:
* Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic \c cmpxchg.
*/
/*-
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#ifndef _DRM_H_
#define _DRM_H_
#if defined(__linux__)
#include <linux/types.h>
#include <asm/ioctl.h>
typedef unsigned int drm_handle_t;
#else /* One of the BSDs */
#include <sys/ioccom.h>
#include <sys/types.h>
typedef int8_t __s8;
typedef uint8_t __u8;
typedef int16_t __s16;
typedef uint16_t __u16;
typedef int32_t __s32;
typedef uint32_t __u32;
typedef int64_t __s64;
typedef uint64_t __u64;
typedef unsigned long drm_handle_t;
#include <dev/drm2/drm_os_freebsd.h>
#endif
#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */
#define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */
#define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */
#define DRM_RAM_PERCENT 10 /**< How much system ram can we lock? */
#define _DRM_LOCK_HELD 0x80000000U /**< Hardware lock is held */
#define _DRM_LOCK_CONT 0x40000000U /**< Hardware lock is contended */
#define _DRM_LOCK_IS_HELD(lock) ((lock) & _DRM_LOCK_HELD)
#define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT)
#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
typedef unsigned int drm_context_t;
typedef unsigned int drm_drawable_t;
typedef unsigned int drm_magic_t;
/**
* Cliprect.
*
* \warning: If you change this structure, make sure you change
* XF86DRIClipRectRec in the server as well
*
* \note KW: Actually it's illegal to change either for
* backwards-compatibility reasons.
*/
struct drm_clip_rect {
unsigned short x1;
unsigned short y1;
unsigned short x2;
unsigned short y2;
};
/**
* Drawable information.
*/
struct drm_drawable_info {
unsigned int num_rects;
struct drm_clip_rect *rects;
};
/**
* Texture region,
*/
struct drm_tex_region {
unsigned char next;
unsigned char prev;
unsigned char in_use;
unsigned char padding;
unsigned int age;
};
/**
* Hardware lock.
*
* The lock structure is a simple cache-line aligned integer. To avoid
* processor bus contention on a multiprocessor system, there should not be any
* other data stored in the same cache line.
*/
struct drm_hw_lock {
__volatile__ unsigned int lock; /**< lock variable */
char padding[60]; /**< Pad to cache line */
};
/**
* DRM_IOCTL_VERSION ioctl argument type.
*
* \sa drmGetVersion().
*/
struct drm_version {
int version_major; /**< Major version */
int version_minor; /**< Minor version */
int version_patchlevel; /**< Patch level */
size_t name_len; /**< Length of name buffer */
char __user *name; /**< Name of driver */
size_t date_len; /**< Length of date buffer */
char __user *date; /**< User-space buffer to hold date */
size_t desc_len; /**< Length of desc buffer */
char __user *desc; /**< User-space buffer to hold desc */
};
/**
* DRM_IOCTL_GET_UNIQUE ioctl argument type.
*
* \sa drmGetBusid() and drmSetBusId().
*/
struct drm_unique {
size_t unique_len; /**< Length of unique */
char __user *unique; /**< Unique name for driver instantiation */
};
struct drm_list {
int count; /**< Length of user-space structures */
struct drm_version __user *version;
};
struct drm_block {
int unused;
};
/**
* DRM_IOCTL_CONTROL ioctl argument type.
*
* \sa drmCtlInstHandler() and drmCtlUninstHandler().
*/
struct drm_control {
enum {
DRM_ADD_COMMAND,
DRM_RM_COMMAND,
DRM_INST_HANDLER,
DRM_UNINST_HANDLER
} func;
int irq;
};
/**
* Type of memory to map.
*/
enum drm_map_type {
_DRM_FRAME_BUFFER = 0, /**< WC (no caching), no core dump */
_DRM_REGISTERS = 1, /**< no caching, no core dump */
_DRM_SHM = 2, /**< shared, cached */
_DRM_AGP = 3, /**< AGP/GART */
_DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */
_DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */
_DRM_GEM = 6, /**< GEM object */
};
/**
* Memory mapping flags.
*/
enum drm_map_flags {
_DRM_RESTRICTED = 0x01, /**< Cannot be mapped to user-virtual */
_DRM_READ_ONLY = 0x02,
_DRM_LOCKED = 0x04, /**< shared, cached, locked */
_DRM_KERNEL = 0x08, /**< kernel requires access */
_DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */
_DRM_CONTAINS_LOCK = 0x20, /**< SHM page that contains lock */
_DRM_REMOVABLE = 0x40, /**< Removable mapping */
_DRM_DRIVER = 0x80 /**< Managed by driver */
};
struct drm_ctx_priv_map {
unsigned int ctx_id; /**< Context requesting private mapping */
void *handle; /**< Handle of map */
};
/**
* DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls
* argument type.
*
* \sa drmAddMap().
*/
struct drm_map {
unsigned long offset; /**< Requested physical address (0 for SAREA)*/
unsigned long size; /**< Requested physical size (bytes) */
enum drm_map_type type; /**< Type of memory to map */
enum drm_map_flags flags; /**< Flags */
void *handle; /**< User-space: "Handle" to pass to mmap() */
/**< Kernel-space: kernel-virtual address */
int mtrr; /**< MTRR slot used */
/* Private data */
};
/**
* DRM_IOCTL_GET_CLIENT ioctl argument type.
*/
struct drm_client {
int idx; /**< Which client desired? */
int auth; /**< Is client authenticated? */
unsigned long pid; /**< Process ID */
unsigned long uid; /**< User ID */
unsigned long magic; /**< Magic */
unsigned long iocs; /**< Ioctl count */
};
enum drm_stat_type {
_DRM_STAT_LOCK,
_DRM_STAT_OPENS,
_DRM_STAT_CLOSES,
_DRM_STAT_IOCTLS,
_DRM_STAT_LOCKS,
_DRM_STAT_UNLOCKS,
_DRM_STAT_VALUE, /**< Generic value */
_DRM_STAT_BYTE, /**< Generic byte counter (1024bytes/K) */
_DRM_STAT_COUNT, /**< Generic non-byte counter (1000/k) */
_DRM_STAT_IRQ, /**< IRQ */
_DRM_STAT_PRIMARY, /**< Primary DMA bytes */
_DRM_STAT_SECONDARY, /**< Secondary DMA bytes */
_DRM_STAT_DMA, /**< DMA */
_DRM_STAT_SPECIAL, /**< Special DMA (e.g., priority or polled) */
_DRM_STAT_MISSED /**< Missed DMA opportunity */
/* Add to the *END* of the list */
};
/**
* DRM_IOCTL_GET_STATS ioctl argument type.
*/
struct drm_stats {
unsigned long count;
struct {
unsigned long value;
enum drm_stat_type type;
} data[15];
};
/**
* Hardware locking flags.
*/
enum drm_lock_flags {
_DRM_LOCK_READY = 0x01, /**< Wait until hardware is ready for DMA */
_DRM_LOCK_QUIESCENT = 0x02, /**< Wait until hardware quiescent */
_DRM_LOCK_FLUSH = 0x04, /**< Flush this context's DMA queue first */
_DRM_LOCK_FLUSH_ALL = 0x08, /**< Flush all DMA queues first */
/* These *HALT* flags aren't supported yet
-- they will be used to support the
full-screen DGA-like mode. */
_DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */
_DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */
};
/**
* DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type.
*
* \sa drmGetLock() and drmUnlock().
*/
struct drm_lock {
int context;
enum drm_lock_flags flags;
};
/**
* DMA flags
*
* \warning
* These values \e must match xf86drm.h.
*
* \sa drm_dma.
*/
enum drm_dma_flags {
/* Flags for DMA buffer dispatch */
_DRM_DMA_BLOCK = 0x01, /**<
* Block until buffer dispatched.
*
* \note The buffer may not yet have
* been processed by the hardware --
* getting a hardware lock with the
* hardware quiescent will ensure
* that the buffer has been
* processed.
*/
_DRM_DMA_WHILE_LOCKED = 0x02, /**< Dispatch while lock held */
_DRM_DMA_PRIORITY = 0x04, /**< High priority dispatch */
/* Flags for DMA buffer request */
_DRM_DMA_WAIT = 0x10, /**< Wait for free buffers */
_DRM_DMA_SMALLER_OK = 0x20, /**< Smaller-than-requested buffers OK */
_DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */
};
/**
* DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type.
*
* \sa drmAddBufs().
*/
struct drm_buf_desc {
int count; /**< Number of buffers of this size */
int size; /**< Size in bytes */
int low_mark; /**< Low water mark */
int high_mark; /**< High water mark */
enum {
_DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */
_DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */
_DRM_SG_BUFFER = 0x04, /**< Scatter/gather memory buffer */
_DRM_FB_BUFFER = 0x08, /**< Buffer is in frame buffer */
_DRM_PCI_BUFFER_RO = 0x10 /**< Map PCI DMA buffer read-only */
} flags;
unsigned long agp_start; /**<
* Start address of where the AGP buffers are
* in the AGP aperture
*/
};
/**
* DRM_IOCTL_INFO_BUFS ioctl argument type.
*/
struct drm_buf_info {
int count; /**< Entries in list */
struct drm_buf_desc __user *list;
};
/**
* DRM_IOCTL_FREE_BUFS ioctl argument type.
*/
struct drm_buf_free {
int count;
int __user *list;
};
/**
* Buffer information
*
* \sa drm_buf_map.
*/
struct drm_buf_pub {
int idx; /**< Index into the master buffer list */
int total; /**< Buffer size */
int used; /**< Amount of buffer in use (for DMA) */
void __user *address; /**< Address of buffer */
};
/**
* DRM_IOCTL_MAP_BUFS ioctl argument type.
*/
struct drm_buf_map {
int count; /**< Length of the buffer list */
void __user *virtual; /**< Mmap'd area in user-virtual */
struct drm_buf_pub __user *list; /**< Buffer information */
};
/**
* DRM_IOCTL_DMA ioctl argument type.
*
* Indices here refer to the offset into the buffer list in drm_buf_get.
*
* \sa drmDMA().
*/
struct drm_dma {
int context; /**< Context handle */
int send_count; /**< Number of buffers to send */
int __user *send_indices; /**< List of handles to buffers */
int __user *send_sizes; /**< Lengths of data to send */
enum drm_dma_flags flags; /**< Flags */
int request_count; /**< Number of buffers requested */
int request_size; /**< Desired size for buffers */
int __user *request_indices; /**< Buffer information */
int __user *request_sizes;
int granted_count; /**< Number of buffers granted */
};
enum drm_ctx_flags {
_DRM_CONTEXT_PRESERVED = 0x01,
_DRM_CONTEXT_2DONLY = 0x02
};
/**
* DRM_IOCTL_ADD_CTX ioctl argument type.
*
* \sa drmCreateContext() and drmDestroyContext().
*/
struct drm_ctx {
drm_context_t handle;
enum drm_ctx_flags flags;
};
/**
* DRM_IOCTL_RES_CTX ioctl argument type.
*/
struct drm_ctx_res {
int count;
struct drm_ctx __user *contexts;
};
/**
* DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type.
*/
struct drm_draw {
drm_drawable_t handle;
};
/**
* DRM_IOCTL_UPDATE_DRAW ioctl argument type.
*/
typedef enum {
DRM_DRAWABLE_CLIPRECTS,
} drm_drawable_info_type_t;
struct drm_update_draw {
drm_drawable_t handle;
unsigned int type;
unsigned int num;
unsigned long long data;
};
/**
* DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type.
*/
struct drm_auth {
drm_magic_t magic;
};
/**
* DRM_IOCTL_IRQ_BUSID ioctl argument type.
*
* \sa drmGetInterruptFromBusID().
*/
struct drm_irq_busid {
int irq; /**< IRQ number */
int busnum; /**< bus number */
int devnum; /**< device number */
int funcnum; /**< function number */
};
enum drm_vblank_seq_type {
_DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
_DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
/* bits 1-6 are reserved for high crtcs */
_DRM_VBLANK_HIGH_CRTC_MASK = 0x0000003e,
_DRM_VBLANK_EVENT = 0x4000000, /**< Send event instead of blocking */
_DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */
_DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */
_DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */
_DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking, unsupported */
};
#define _DRM_VBLANK_HIGH_CRTC_SHIFT 1
#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE)
#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_EVENT | _DRM_VBLANK_SIGNAL | \
_DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)
struct drm_wait_vblank_request {
enum drm_vblank_seq_type type;
unsigned int sequence;
unsigned long signal;
};
struct drm_wait_vblank_reply {
enum drm_vblank_seq_type type;
unsigned int sequence;
long tval_sec;
long tval_usec;
};
/**
* DRM_IOCTL_WAIT_VBLANK ioctl argument type.
*
* \sa drmWaitVBlank().
*/
union drm_wait_vblank {
struct drm_wait_vblank_request request;
struct drm_wait_vblank_reply reply;
};
#define _DRM_PRE_MODESET 1
#define _DRM_POST_MODESET 2
/**
* DRM_IOCTL_MODESET_CTL ioctl argument type
*
* \sa drmModesetCtl().
*/
struct drm_modeset_ctl {
__u32 crtc;
__u32 cmd;
};
/**
* DRM_IOCTL_AGP_ENABLE ioctl argument type.
*
* \sa drmAgpEnable().
*/
struct drm_agp_mode {
unsigned long mode; /**< AGP mode */
};
/**
* DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type.
*
* \sa drmAgpAlloc() and drmAgpFree().
*/
struct drm_agp_buffer {
unsigned long size; /**< In bytes -- will round to page boundary */
unsigned long handle; /**< Used for binding / unbinding */
unsigned long type; /**< Type of memory to allocate */
unsigned long physical; /**< Physical used by i810 */
};
/**
* DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type.
*
* \sa drmAgpBind() and drmAgpUnbind().
*/
struct drm_agp_binding {
unsigned long handle; /**< From drm_agp_buffer */
unsigned long offset; /**< In bytes -- will round to page boundary */
};
/**
* DRM_IOCTL_AGP_INFO ioctl argument type.
*
* \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(),
* drmAgpBase(), drmAgpSize(), drmAgpMemoryUsed(), drmAgpMemoryAvail(),
* drmAgpVendorId() and drmAgpDeviceId().
*/
struct drm_agp_info {
int agp_version_major;
int agp_version_minor;
unsigned long mode;
unsigned long aperture_base; /* physical address */
unsigned long aperture_size; /* bytes */
unsigned long memory_allowed; /* bytes */
unsigned long memory_used;
/* PCI information */
unsigned short id_vendor;
unsigned short id_device;
};
/**
* DRM_IOCTL_SG_ALLOC ioctl argument type.
*/
struct drm_scatter_gather {
unsigned long size; /**< In bytes -- will round to page boundary */
unsigned long handle; /**< Used for mapping / unmapping */
};
/**
* DRM_IOCTL_SET_VERSION ioctl argument type.
*/
struct drm_set_version {
int drm_di_major;
int drm_di_minor;
int drm_dd_major;
int drm_dd_minor;
};
/** DRM_IOCTL_GEM_CLOSE ioctl argument type */
struct drm_gem_close {
/** Handle of the object to be closed. */
__u32 handle;
__u32 pad;
};
/** DRM_IOCTL_GEM_FLINK ioctl argument type */
struct drm_gem_flink {
/** Handle for the object being named */
__u32 handle;
/** Returned global name */
__u32 name;
};
/** DRM_IOCTL_GEM_OPEN ioctl argument type */
struct drm_gem_open {
/** Name of object being opened */
__u32 name;
/** Returned handle for the object */
__u32 handle;
/** Returned size of the object */
__u64 size;
};
/** DRM_IOCTL_GET_CAP ioctl argument type */
struct drm_get_cap {
__u64 capability;
__u64 value;
};
#define DRM_CLOEXEC O_CLOEXEC
struct drm_prime_handle {
__u32 handle;
/** Flags.. only applicable for handle->fd */
__u32 flags;
/** Returned dmabuf file descriptor */
__s32 fd;
};
#include <dev/drm2/drm_mode.h>
#define DRM_IOCTL_BASE 'd'
#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
#define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type)
#define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE,nr,type)
#define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE,nr,type)
#define DRM_IOCTL_VERSION DRM_IOWR(0x00, struct drm_version)
#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, struct drm_unique)
#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, struct drm_auth)
#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, struct drm_irq_busid)
#define DRM_IOCTL_GET_MAP DRM_IOWR(0x04, struct drm_map)
#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client)
#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats)
#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close)
#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink)
#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open)
#define DRM_IOCTL_GET_CAP DRM_IOWR(0x0c, struct drm_get_cap)
#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique)
#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)
#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, struct drm_block)
#define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, struct drm_block)
#define DRM_IOCTL_CONTROL DRM_IOW( 0x14, struct drm_control)
#define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, struct drm_map)
#define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, struct drm_buf_desc)
#define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, struct drm_buf_desc)
#define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, struct drm_buf_info)
#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, struct drm_buf_map)
#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, struct drm_buf_free)
#define DRM_IOCTL_RM_MAP DRM_IOW( 0x1b, struct drm_map)
#define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, struct drm_ctx_priv_map)
#define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, struct drm_ctx_priv_map)
#define DRM_IOCTL_SET_MASTER DRM_IO(0x1e)
#define DRM_IOCTL_DROP_MASTER DRM_IO(0x1f)
#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, struct drm_ctx)
#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, struct drm_ctx)
#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, struct drm_ctx)
#define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, struct drm_ctx)
#define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, struct drm_ctx)
#define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, struct drm_ctx)
#define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, struct drm_ctx_res)
#define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, struct drm_draw)
#define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, struct drm_draw)
#define DRM_IOCTL_DMA DRM_IOWR(0x29, struct drm_dma)
#define DRM_IOCTL_LOCK DRM_IOW( 0x2a, struct drm_lock)
#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, struct drm_lock)
#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, struct drm_lock)
#define DRM_IOCTL_PRIME_HANDLE_TO_FD DRM_IOWR(0x2d, struct drm_prime_handle)
#define DRM_IOCTL_PRIME_FD_TO_HANDLE DRM_IOWR(0x2e, struct drm_prime_handle)
#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30)
#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31)
#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, struct drm_agp_mode)
#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, struct drm_agp_info)
#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, struct drm_agp_buffer)
#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, struct drm_agp_buffer)
#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, struct drm_agp_binding)
#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, struct drm_agp_binding)
#define DRM_IOCTL_SG_ALLOC DRM_IOWR(0x38, struct drm_scatter_gather)
#define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, struct drm_scatter_gather)
#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank)
#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw)
#define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res)
#define DRM_IOCTL_MODE_GETCRTC DRM_IOWR(0xA1, struct drm_mode_crtc)
#define DRM_IOCTL_MODE_SETCRTC DRM_IOWR(0xA2, struct drm_mode_crtc)
#define DRM_IOCTL_MODE_CURSOR DRM_IOWR(0xA3, struct drm_mode_cursor)
#define DRM_IOCTL_MODE_GETGAMMA DRM_IOWR(0xA4, struct drm_mode_crtc_lut)
#define DRM_IOCTL_MODE_SETGAMMA DRM_IOWR(0xA5, struct drm_mode_crtc_lut)
#define DRM_IOCTL_MODE_GETENCODER DRM_IOWR(0xA6, struct drm_mode_get_encoder)
#define DRM_IOCTL_MODE_GETCONNECTOR DRM_IOWR(0xA7, struct drm_mode_get_connector)
#define DRM_IOCTL_MODE_ATTACHMODE DRM_IOWR(0xA8, struct drm_mode_mode_cmd)
#define DRM_IOCTL_MODE_DETACHMODE DRM_IOWR(0xA9, struct drm_mode_mode_cmd)
#define DRM_IOCTL_MODE_GETPROPERTY DRM_IOWR(0xAA, struct drm_mode_get_property)
#define DRM_IOCTL_MODE_SETPROPERTY DRM_IOWR(0xAB, struct drm_mode_connector_set_property)
#define DRM_IOCTL_MODE_GETPROPBLOB DRM_IOWR(0xAC, struct drm_mode_get_blob)
#define DRM_IOCTL_MODE_GETFB DRM_IOWR(0xAD, struct drm_mode_fb_cmd)
#define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xAE, struct drm_mode_fb_cmd)
#define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xAF, unsigned int)
#define DRM_IOCTL_MODE_PAGE_FLIP DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip)
#define DRM_IOCTL_MODE_DIRTYFB DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd)
#define DRM_IOCTL_MODE_CREATE_DUMB DRM_IOWR(0xB2, struct drm_mode_create_dumb)
#define DRM_IOCTL_MODE_MAP_DUMB DRM_IOWR(0xB3, struct drm_mode_map_dumb)
#define DRM_IOCTL_MODE_DESTROY_DUMB DRM_IOWR(0xB4, struct drm_mode_destroy_dumb)
#define DRM_IOCTL_MODE_GETPLANERESOURCES DRM_IOWR(0xB5, struct drm_mode_get_plane_res)
#define DRM_IOCTL_MODE_GETPLANE DRM_IOWR(0xB6, struct drm_mode_get_plane)
#define DRM_IOCTL_MODE_SETPLANE DRM_IOWR(0xB7, struct drm_mode_set_plane)
#define DRM_IOCTL_MODE_ADDFB2 DRM_IOWR(0xB8, struct drm_mode_fb_cmd2)
#define DRM_IOCTL_MODE_OBJ_GETPROPERTIES DRM_IOWR(0xB9, struct drm_mode_obj_get_properties)
#define DRM_IOCTL_MODE_OBJ_SETPROPERTY DRM_IOWR(0xBA, struct drm_mode_obj_set_property)
/**
* Device specific ioctls should only be in their respective headers
* The device specific ioctl range is from 0x40 to 0x99.
* Generic IOCTLS restart at 0xA0.
*
* \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and
* drmCommandReadWrite().
*/
#define DRM_COMMAND_BASE 0x40
#define DRM_COMMAND_END 0xA0
/**
* Header for events written back to userspace on the drm fd. The
* type defines the type of event, the length specifies the total
* length of the event (including the header), and user_data is
* typically a 64 bit value passed with the ioctl that triggered the
* event. A read on the drm fd will always only return complete
* events, that is, if for example the read buffer is 100 bytes, and
* there are two 64 byte events pending, only one will be returned.
*
* Event types 0 - 0x7fffffff are generic drm events, 0x80000000 and
* up are chipset specific.
*/
struct drm_event {
__u32 type;
__u32 length;
};
#define DRM_EVENT_VBLANK 0x01
#define DRM_EVENT_FLIP_COMPLETE 0x02
struct drm_event_vblank {
struct drm_event base;
__u64 user_data;
__u32 tv_sec;
__u32 tv_usec;
__u32 sequence;
__u32 reserved;
};
#define DRM_CAP_DUMB_BUFFER 0x1
#define DRM_CAP_VBLANK_HIGH_CRTC 0x2
#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3
#define DRM_CAP_DUMB_PREFER_SHADOW 0x4
#define DRM_CAP_PRIME 0x5
#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
#define DRM_PRIME_CAP_IMPORT 0x1
#define DRM_PRIME_CAP_EXPORT 0x2
/* typedef area */
#ifndef __KERNEL__
typedef struct drm_clip_rect drm_clip_rect_t;
typedef struct drm_drawable_info drm_drawable_info_t;
typedef struct drm_tex_region drm_tex_region_t;
typedef struct drm_hw_lock drm_hw_lock_t;
typedef struct drm_version drm_version_t;
typedef struct drm_unique drm_unique_t;
typedef struct drm_list drm_list_t;
typedef struct drm_block drm_block_t;
typedef struct drm_control drm_control_t;
typedef enum drm_map_type drm_map_type_t;
typedef enum drm_map_flags drm_map_flags_t;
typedef struct drm_ctx_priv_map drm_ctx_priv_map_t;
typedef struct drm_map drm_map_t;
typedef struct drm_client drm_client_t;
typedef enum drm_stat_type drm_stat_type_t;
typedef struct drm_stats drm_stats_t;
typedef enum drm_lock_flags drm_lock_flags_t;
typedef struct drm_lock drm_lock_t;
typedef enum drm_dma_flags drm_dma_flags_t;
typedef struct drm_buf_desc drm_buf_desc_t;
typedef struct drm_buf_info drm_buf_info_t;
typedef struct drm_buf_free drm_buf_free_t;
typedef struct drm_buf_pub drm_buf_pub_t;
typedef struct drm_buf_map drm_buf_map_t;
typedef struct drm_dma drm_dma_t;
typedef union drm_wait_vblank drm_wait_vblank_t;
typedef struct drm_agp_mode drm_agp_mode_t;
typedef enum drm_ctx_flags drm_ctx_flags_t;
typedef struct drm_ctx drm_ctx_t;
typedef struct drm_ctx_res drm_ctx_res_t;
typedef struct drm_draw drm_draw_t;
typedef struct drm_update_draw drm_update_draw_t;
typedef struct drm_auth drm_auth_t;
typedef struct drm_irq_busid drm_irq_busid_t;
typedef enum drm_vblank_seq_type drm_vblank_seq_type_t;
typedef struct drm_agp_buffer drm_agp_buffer_t;
typedef struct drm_agp_binding drm_agp_binding_t;
typedef struct drm_agp_info drm_agp_info_t;
typedef struct drm_scatter_gather drm_scatter_gather_t;
typedef struct drm_set_version drm_set_version_t;
#endif
#endif

File diff suppressed because it is too large Load Diff

View File

@ -1,468 +0,0 @@
/**
* \file drm_agpsupport.c
* DRM support for AGP/GART backend
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
* \author Gareth Hughes <gareth@valinux.com>
*/
/*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <dev/drm2/drmP.h>
#if __OS_HAS_AGP
/**
* Get AGP information.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg pointer to a (output) drm_agp_info structure.
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device has been initialized and acquired and fills in the
* drm_agp_info structure with the information in drm_agp_head::agp_info.
*/
int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info)
{
DRM_AGP_KERN *kern;
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
kern = &dev->agp->agp_info;
agp_get_info(dev->agp->bridge, kern);
info->agp_version_major = 1;
info->agp_version_minor = 0;
info->mode = kern->ai_mode;
info->aperture_base = kern->ai_aperture_base;
info->aperture_size = kern->ai_aperture_size;
info->memory_allowed = kern->ai_memory_allowed;
info->memory_used = kern->ai_memory_used;
info->id_vendor = kern->ai_devid & 0xffff;
info->id_device = kern->ai_devid >> 16;
return 0;
}
EXPORT_SYMBOL(drm_agp_info);
int drm_agp_info_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_agp_info *info = data;
int err;
err = drm_agp_info(dev, info);
if (err)
return err;
return 0;
}
/**
* Acquire the AGP device.
*
* \param dev DRM device that is to acquire AGP.
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device hasn't been acquired before and calls
* \c agp_backend_acquire.
*/
int drm_agp_acquire(struct drm_device * dev)
{
int retcode;
if (!dev->agp)
return -ENODEV;
if (dev->agp->acquired)
return -EBUSY;
retcode = agp_acquire(dev->agp->bridge);
if (retcode)
return -retcode;
dev->agp->acquired = 1;
return 0;
}
EXPORT_SYMBOL(drm_agp_acquire);
/**
* Acquire the AGP device (ioctl).
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument.
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device hasn't been acquired before and calls
* \c agp_backend_acquire.
*/
int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
return drm_agp_acquire((struct drm_device *) file_priv->minor->dev);
}
/**
* Release the AGP device.
*
* \param dev DRM device that is to release AGP.
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device has been acquired and calls \c agp_backend_release.
*/
int drm_agp_release(struct drm_device * dev)
{
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
agp_release(dev->agp->bridge);
dev->agp->acquired = 0;
return 0;
}
EXPORT_SYMBOL(drm_agp_release);
int drm_agp_release_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
return drm_agp_release(dev);
}
/**
* Enable the AGP bus.
*
* \param dev DRM device that has previously acquired AGP.
* \param mode Requested AGP mode.
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device has been acquired but not enabled, and calls
* \c agp_enable.
*/
int drm_agp_enable(struct drm_device * dev, struct drm_agp_mode mode)
{
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
dev->agp->mode = mode.mode;
agp_enable(dev->agp->bridge, mode.mode);
dev->agp->enabled = 1;
return 0;
}
EXPORT_SYMBOL(drm_agp_enable);
int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_agp_mode *mode = data;
return drm_agp_enable(dev, *mode);
}
/**
* Allocate AGP memory.
*
* \param inode device inode.
* \param file_priv file private pointer.
* \param cmd command.
* \param arg pointer to a drm_agp_buffer structure.
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device is present and has been acquired, allocates the
* memory via agp_allocate_memory() and creates a drm_agp_mem entry for it.
*/
int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
{
struct drm_agp_mem *entry;
DRM_AGP_MEM *memory;
unsigned long pages;
u32 type;
struct agp_memory_info info;
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
if (!(entry = malloc(sizeof(*entry), DRM_MEM_AGPLISTS, M_NOWAIT)))
return -ENOMEM;
memset(entry, 0, sizeof(*entry));
pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
type = (u32) request->type;
if (!(memory = agp_alloc_memory(dev->agp->bridge, type, pages << PAGE_SHIFT))) {
free(entry, DRM_MEM_AGPLISTS);
return -ENOMEM;
}
entry->handle = (unsigned long)memory;
entry->memory = memory;
entry->bound = 0;
entry->pages = pages;
list_add(&entry->head, &dev->agp->memory);
agp_memory_info(dev->agp->bridge, entry->memory, &info);
request->handle = entry->handle;
request->physical = info.ami_physical;
return 0;
}
EXPORT_SYMBOL(drm_agp_alloc);
int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_agp_buffer *request = data;
return drm_agp_alloc(dev, request);
}
/**
* Search for the AGP memory entry associated with a handle.
*
* \param dev DRM device structure.
* \param handle AGP memory handle.
* \return pointer to the drm_agp_mem structure associated with \p handle.
*
* Walks through drm_agp_head::memory until finding a matching handle.
*/
static struct drm_agp_mem *drm_agp_lookup_entry(struct drm_device * dev,
unsigned long handle)
{
struct drm_agp_mem *entry;
list_for_each_entry(entry, &dev->agp->memory, head) {
if (entry->handle == handle)
return entry;
}
return NULL;
}
/**
* Unbind AGP memory from the GATT (ioctl).
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg pointer to a drm_agp_binding structure.
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device is present and acquired, looks-up the AGP memory
* entry and passes it to the unbind_agp() function.
*/
int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request)
{
struct drm_agp_mem *entry;
int ret;
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
return -EINVAL;
if (!entry->bound)
return -EINVAL;
ret = drm_unbind_agp(entry->memory);
if (ret == 0)
entry->bound = 0;
return ret;
}
EXPORT_SYMBOL(drm_agp_unbind);
int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_agp_binding *request = data;
return drm_agp_unbind(dev, request);
}
/**
* Bind AGP memory into the GATT (ioctl)
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg pointer to a drm_agp_binding structure.
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device is present and has been acquired and that no memory
* is currently bound into the GATT. Looks-up the AGP memory entry and passes
* it to bind_agp() function.
*/
int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request)
{
struct drm_agp_mem *entry;
int retcode;
int page;
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
return -EINVAL;
if (entry->bound)
return -EINVAL;
page = (request->offset + PAGE_SIZE - 1) / PAGE_SIZE;
if ((retcode = drm_bind_agp(entry->memory, page)))
return retcode;
entry->bound = dev->agp->base + (page << PAGE_SHIFT);
DRM_DEBUG("base = 0x%lx entry->bound = 0x%lx\n",
dev->agp->base, entry->bound);
return 0;
}
EXPORT_SYMBOL(drm_agp_bind);
int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_agp_binding *request = data;
return drm_agp_bind(dev, request);
}
/**
* Free AGP memory (ioctl).
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg pointer to a drm_agp_buffer structure.
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device is present and has been acquired and looks up the
* AGP memory entry. If the memory it's currently bound, unbind it via
* unbind_agp(). Frees it via free_agp() as well as the entry itself
* and unlinks from the doubly linked list it's inserted in.
*/
int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request)
{
struct drm_agp_mem *entry;
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
return -EINVAL;
if (entry->bound)
drm_unbind_agp(entry->memory);
list_del(&entry->head);
drm_free_agp(entry->memory, entry->pages);
free(entry, DRM_MEM_AGPLISTS);
return 0;
}
EXPORT_SYMBOL(drm_agp_free);
int drm_agp_free_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_agp_buffer *request = data;
return drm_agp_free(dev, request);
}
/**
* Initialize the AGP resources.
*
* \return pointer to a drm_agp_head structure.
*
* Gets the drm_agp_t structure which is made available by the agpgart module
* via the inter_module_* functions. Creates and initializes a drm_agp_head
* structure.
*/
struct drm_agp_head *drm_agp_init(struct drm_device *dev)
{
struct drm_agp_head *head = NULL;
if (!(head = malloc(sizeof(*head), DRM_MEM_AGPLISTS, M_NOWAIT)))
return NULL;
memset((void *)head, 0, sizeof(*head));
head->bridge = agp_find_device();
if (!head->bridge) {
free(head, DRM_MEM_AGPLISTS);
return NULL;
} else {
agp_get_info(head->bridge, &head->agp_info);
}
INIT_LIST_HEAD(&head->memory);
head->cant_use_aperture = 0;
head->base = head->agp_info.ai_aperture_base;
return head;
}
#ifdef FREEBSD_NOTYET
/**
* Binds a collection of pages into AGP memory at the given offset, returning
* the AGP memory structure containing them.
*
* No reference is held on the pages during this time -- it is up to the
* caller to handle that.
*/
DRM_AGP_MEM *
drm_agp_bind_pages(struct drm_device *dev,
struct page **pages,
unsigned long num_pages,
uint32_t gtt_offset,
u32 type)
{
DRM_AGP_MEM *mem;
int ret, i;
DRM_DEBUG("\n");
mem = agp_allocate_memory(dev->agp->bridge, num_pages,
type);
if (mem == NULL) {
DRM_ERROR("Failed to allocate memory for %ld pages\n",
num_pages);
return NULL;
}
for (i = 0; i < num_pages; i++)
mem->pages[i] = pages[i];
mem->page_count = num_pages;
mem->is_flushed = true;
ret = agp_bind_memory(mem, gtt_offset / PAGE_SIZE);
if (ret != 0) {
DRM_ERROR("Failed to bind AGP memory: %d\n", ret);
agp_free_memory(mem);
return NULL;
}
return mem;
}
EXPORT_SYMBOL(drm_agp_bind_pages);
#endif /* FREEBSD_NOTYET */
#endif /* __OS_HAS_AGP */

View File

@ -1,96 +0,0 @@
/**
* \file drm_atomic.h
* Atomic operations used in the DRM which may or may not be provided by the OS.
*
* \author Eric Anholt <anholt@FreeBSD.org>
*/
/*-
* Copyright 2004 Eric Anholt
* Copyright 2013 Jung-uk Kim <jkim@FreeBSD.org>
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
typedef u_int atomic_t;
typedef uint64_t atomic64_t;
#define NB_BITS_PER_LONG (sizeof(long) * NBBY)
#define BITS_TO_LONGS(x) howmany(x, NB_BITS_PER_LONG)
#define atomic_read(p) atomic_load_acq_int(p)
#define atomic_set(p, v) atomic_store_rel_int(p, v)
#define atomic64_read(p) atomic_load_acq_64(p)
#define atomic64_set(p, v) atomic_store_rel_64(p, v)
#define atomic_add(v, p) atomic_add_int(p, v)
#define atomic_sub(v, p) atomic_subtract_int(p, v)
#define atomic_inc(p) atomic_add(1, p)
#define atomic_dec(p) atomic_sub(1, p)
#define atomic_add_return(v, p) (atomic_fetchadd_int(p, v) + (v))
#define atomic_sub_return(v, p) (atomic_fetchadd_int(p, -(v)) - (v))
#define atomic_inc_return(p) atomic_add_return(1, p)
#define atomic_dec_return(p) atomic_sub_return(1, p)
#define atomic_add_and_test(v, p) (atomic_add_return(v, p) == 0)
#define atomic_sub_and_test(v, p) (atomic_sub_return(v, p) == 0)
#define atomic_inc_and_test(p) (atomic_inc_return(p) == 0)
#define atomic_dec_and_test(p) (atomic_dec_return(p) == 0)
#define atomic_xchg(p, v) atomic_swap_int(p, v)
#define atomic64_xchg(p, v) atomic_swap_64(p, v)
#define __bit_word(b) ((b) / NB_BITS_PER_LONG)
#define __bit_mask(b) (1UL << (b) % NB_BITS_PER_LONG)
#define __bit_addr(p, b) ((volatile u_long *)(p) + __bit_word(b))
#define clear_bit(b, p) \
atomic_clear_long(__bit_addr(p, b), __bit_mask(b))
#define set_bit(b, p) \
atomic_set_long(__bit_addr(p, b), __bit_mask(b))
#define test_bit(b, p) \
((*__bit_addr(p, b) & __bit_mask(b)) != 0)
#define test_and_set_bit(b, p) \
(atomic_xchg((p), 1) != b)
#define cmpxchg(ptr, old, new) \
(atomic_cmpset_int((volatile u_int *)(ptr),(old),(new)) ? (old) : (0))
#define atomic_inc_not_zero(p) atomic_inc(p)
#define atomic_clear_mask(b, p) atomic_clear_int((p), (b))
static __inline u_long
find_first_zero_bit(const u_long *p, u_long max)
{
u_long i, n;
KASSERT(max % NB_BITS_PER_LONG == 0, ("invalid bitmap size %lu", max));
for (i = 0; i < max / NB_BITS_PER_LONG; i++) {
n = ~p[i];
if (n != 0)
return (i * NB_BITS_PER_LONG + ffsl(n) - 1);
}
return (max);
}

View File

@ -1,216 +0,0 @@
/**
* \file drm_auth.c
* IOCTLs for authentication
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
* \author Gareth Hughes <gareth@valinux.com>
*/
/*
* Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <dev/drm2/drmP.h>
static struct mtx drm_magic_lock;
/**
* Find the file with the given magic number.
*
* \param dev DRM device.
* \param magic magic number.
*
* Searches in drm_device::magiclist within all files with the same hash key
* the one with matching magic number, while holding the drm_device::struct_mutex
* lock.
*/
static struct drm_file *drm_find_file(struct drm_master *master, drm_magic_t magic)
{
struct drm_file *retval = NULL;
struct drm_magic_entry *pt;
struct drm_hash_item *hash;
struct drm_device *dev = master->minor->dev;
DRM_LOCK(dev);
if (!drm_ht_find_item(&master->magiclist, (unsigned long)magic, &hash)) {
pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item);
retval = pt->priv;
}
DRM_UNLOCK(dev);
return retval;
}
/**
* Adds a magic number.
*
* \param dev DRM device.
* \param priv file private data.
* \param magic magic number.
*
* Creates a drm_magic_entry structure and appends to the linked list
* associated the magic number hash key in drm_device::magiclist, while holding
* the drm_device::struct_mutex lock.
*/
static int drm_add_magic(struct drm_master *master, struct drm_file *priv,
drm_magic_t magic)
{
struct drm_magic_entry *entry;
struct drm_device *dev = master->minor->dev;
DRM_DEBUG("%d\n", magic);
entry = malloc(sizeof(*entry), DRM_MEM_MAGIC, M_ZERO | M_NOWAIT);
if (!entry)
return -ENOMEM;
entry->priv = priv;
entry->hash_item.key = (unsigned long)magic;
DRM_LOCK(dev);
drm_ht_insert_item(&master->magiclist, &entry->hash_item);
list_add_tail(&entry->head, &master->magicfree);
DRM_UNLOCK(dev);
return 0;
}
/**
* Remove a magic number.
*
* \param dev DRM device.
* \param magic magic number.
*
* Searches and unlinks the entry in drm_device::magiclist with the magic
* number hash key, while holding the drm_device::struct_mutex lock.
*/
int drm_remove_magic(struct drm_master *master, drm_magic_t magic)
{
struct drm_magic_entry *pt;
struct drm_hash_item *hash;
struct drm_device *dev = master->minor->dev;
DRM_DEBUG("%d\n", magic);
DRM_LOCK(dev);
if (drm_ht_find_item(&master->magiclist, (unsigned long)magic, &hash)) {
DRM_UNLOCK(dev);
return -EINVAL;
}
pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item);
drm_ht_remove_item(&master->magiclist, hash);
list_del(&pt->head);
DRM_UNLOCK(dev);
free(pt, DRM_MEM_MAGIC);
return 0;
}
/**
* Get a unique magic number (ioctl).
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg pointer to a resulting drm_auth structure.
* \return zero on success, or a negative number on failure.
*
* If there is a magic number in drm_file::magic then use it, otherwise
* searches an unique non-zero magic number and add it associating it with \p
* file_priv.
* This ioctl needs protection by the drm_global_mutex, which protects
* struct drm_file::magic and struct drm_magic_entry::priv.
*/
int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
static drm_magic_t sequence = 0;
struct drm_auth *auth = data;
/* Find unique magic */
if (file_priv->magic) {
auth->magic = file_priv->magic;
} else {
do {
mtx_lock(&drm_magic_lock);
if (!sequence)
++sequence; /* reserve 0 */
auth->magic = sequence++;
mtx_unlock(&drm_magic_lock);
} while (drm_find_file(file_priv->master, auth->magic));
file_priv->magic = auth->magic;
drm_add_magic(file_priv->master, file_priv, auth->magic);
}
DRM_DEBUG("%u\n", auth->magic);
return 0;
}
/**
* Authenticate with a magic.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg pointer to a drm_auth structure.
* \return zero if authentication successed, or a negative number otherwise.
*
* Checks if \p file_priv is associated with the magic number passed in \arg.
* This ioctl needs protection by the drm_global_mutex, which protects
* struct drm_file::magic and struct drm_magic_entry::priv.
*/
int drm_authmagic(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_auth *auth = data;
struct drm_file *file;
DRM_DEBUG("%u\n", auth->magic);
if ((file = drm_find_file(file_priv->master, auth->magic))) {
file->authenticated = 1;
drm_remove_magic(file_priv->master, auth->magic);
return 0;
}
return -EINVAL;
}
static int
drm_magic_init(void *arg)
{
mtx_init(&drm_magic_lock, "drm_getmagic__lock", NULL, MTX_DEF);
return (0);
}
static void
drm_magic_fini(void *arg)
{
mtx_destroy(&drm_magic_lock);
}
SYSINIT(drm_magic_init, SI_SUB_KLD, SI_ORDER_MIDDLE, drm_magic_init, NULL);
SYSUNINIT(drm_magic_fini, SI_SUB_KLD, SI_ORDER_MIDDLE, drm_magic_fini, NULL);

View File

@ -1,187 +0,0 @@
/**************************************************************************
*
* Copyright 2010 Pauli Nieminen.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*
**************************************************************************/
/*
* Multipart buffer for coping data which is larger than the page size.
*
* Authors:
* Pauli Nieminen <suokkos-at-gmail-dot-com>
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <dev/drm2/drm_buffer.h>
/**
* Allocate the drm buffer object.
*
* buf: Pointer to a pointer where the object is stored.
* size: The number of bytes to allocate.
*/
int drm_buffer_alloc(struct drm_buffer **buf, int size)
{
int nr_pages = size / PAGE_SIZE + 1;
int idx;
/* Allocating pointer table to end of structure makes drm_buffer
* variable sized */
*buf = malloc(sizeof(struct drm_buffer) + nr_pages*sizeof(char *),
DRM_MEM_DRIVER, M_ZERO | M_WAITOK);
if (*buf == NULL) {
DRM_ERROR("Failed to allocate drm buffer object to hold"
" %d bytes in %d pages.\n",
size, nr_pages);
return -ENOMEM;
}
(*buf)->size = size;
for (idx = 0; idx < nr_pages; ++idx) {
(*buf)->data[idx] =
malloc(min(PAGE_SIZE, size - idx * PAGE_SIZE),
DRM_MEM_DRIVER, M_WAITOK);
if ((*buf)->data[idx] == NULL) {
DRM_ERROR("Failed to allocate %dth page for drm"
" buffer with %d bytes and %d pages.\n",
idx + 1, size, nr_pages);
goto error_out;
}
}
return 0;
error_out:
/* Only last element can be null pointer so check for it first. */
if ((*buf)->data[idx])
free((*buf)->data[idx], DRM_MEM_DRIVER);
for (--idx; idx >= 0; --idx)
free((*buf)->data[idx], DRM_MEM_DRIVER);
free(*buf, DRM_MEM_DRIVER);
return -ENOMEM;
}
EXPORT_SYMBOL(drm_buffer_alloc);
/**
* Copy the user data to the begin of the buffer and reset the processing
* iterator.
*
* user_data: A pointer the data that is copied to the buffer.
* size: The Number of bytes to copy.
*/
int drm_buffer_copy_from_user(struct drm_buffer *buf,
void __user *user_data, int size)
{
int nr_pages = size / PAGE_SIZE + 1;
int idx;
if (size > buf->size) {
DRM_ERROR("Requesting to copy %d bytes to a drm buffer with"
" %d bytes space\n",
size, buf->size);
return -EFAULT;
}
for (idx = 0; idx < nr_pages; ++idx) {
if (DRM_COPY_FROM_USER(buf->data[idx],
(char *)user_data + idx * PAGE_SIZE,
min(PAGE_SIZE, size - idx * PAGE_SIZE))) {
DRM_ERROR("Failed to copy user data (%p) to drm buffer"
" (%p) %dth page.\n",
user_data, buf, idx);
return -EFAULT;
}
}
buf->iterator = 0;
return 0;
}
EXPORT_SYMBOL(drm_buffer_copy_from_user);
/**
* Free the drm buffer object
*/
void drm_buffer_free(struct drm_buffer *buf)
{
if (buf != NULL) {
int nr_pages = buf->size / PAGE_SIZE + 1;
int idx;
for (idx = 0; idx < nr_pages; ++idx)
free(buf->data[idx], DRM_MEM_DRIVER);
free(buf, DRM_MEM_DRIVER);
}
}
EXPORT_SYMBOL(drm_buffer_free);
/**
* Read an object from buffer that may be split to multiple parts. If object
* is not split function just returns the pointer to object in buffer. But in
* case of split object data is copied to given stack object that is suplied
* by caller.
*
* The processing location of the buffer is also advanced to the next byte
* after the object.
*
* objsize: The size of the objet in bytes.
* stack_obj: A pointer to a memory location where object can be copied.
*/
void *drm_buffer_read_object(struct drm_buffer *buf,
int objsize, void *stack_obj)
{
int idx = drm_buffer_index(buf);
int page = drm_buffer_page(buf);
void *obj = NULL;
if (idx + objsize <= PAGE_SIZE) {
obj = &buf->data[page][idx];
} else {
/* The object is split which forces copy to temporary object.*/
int beginsz = PAGE_SIZE - idx;
memcpy(stack_obj, &buf->data[page][idx], beginsz);
memcpy((char *)stack_obj + beginsz, &buf->data[page + 1][0],
objsize - beginsz);
obj = stack_obj;
}
drm_buffer_advance(buf, objsize);
return obj;
}
EXPORT_SYMBOL(drm_buffer_read_object);

View File

@ -1,151 +0,0 @@
/**************************************************************************
*
* Copyright 2010 Pauli Nieminen.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*
**************************************************************************/
/*
* Multipart buffer for coping data which is larger than the page size.
*
* Authors:
* Pauli Nieminen <suokkos-at-gmail-dot-com>
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#ifndef _DRM_BUFFER_H_
#define _DRM_BUFFER_H_
#include <dev/drm2/drmP.h>
struct drm_buffer {
int iterator;
int size;
char *data[];
};
/**
* Return the index of page that buffer is currently pointing at.
*/
static inline int drm_buffer_page(struct drm_buffer *buf)
{
return buf->iterator / PAGE_SIZE;
}
/**
* Return the index of the current byte in the page
*/
static inline int drm_buffer_index(struct drm_buffer *buf)
{
return buf->iterator & (PAGE_SIZE - 1);
}
/**
* Return number of bytes that is left to process
*/
static inline int drm_buffer_unprocessed(struct drm_buffer *buf)
{
return buf->size - buf->iterator;
}
/**
* Advance the buffer iterator number of bytes that is given.
*/
static inline void drm_buffer_advance(struct drm_buffer *buf, int bytes)
{
buf->iterator += bytes;
}
/**
* Allocate the drm buffer object.
*
* buf: A pointer to a pointer where the object is stored.
* size: The number of bytes to allocate.
*/
extern int drm_buffer_alloc(struct drm_buffer **buf, int size);
/**
* Copy the user data to the begin of the buffer and reset the processing
* iterator.
*
* user_data: A pointer the data that is copied to the buffer.
* size: The Number of bytes to copy.
*/
extern int drm_buffer_copy_from_user(struct drm_buffer *buf,
void __user *user_data, int size);
/**
* Free the drm buffer object
*/
extern void drm_buffer_free(struct drm_buffer *buf);
/**
* Read an object from buffer that may be split to multiple parts. If object
* is not split function just returns the pointer to object in buffer. But in
* case of split object data is copied to given stack object that is suplied
* by caller.
*
* The processing location of the buffer is also advanced to the next byte
* after the object.
*
* objsize: The size of the objet in bytes.
* stack_obj: A pointer to a memory location where object can be copied.
*/
extern void *drm_buffer_read_object(struct drm_buffer *buf,
int objsize, void *stack_obj);
/**
* Returns the pointer to the dword which is offset number of elements from the
* current processing location.
*
* Caller must make sure that dword is not split in the buffer. This
* requirement is easily met if all the sizes of objects in buffer are
* multiples of dword and PAGE_SIZE is multiple dword.
*
* Call to this function doesn't change the processing location.
*
* offset: The index of the dword relative to the internat iterator.
*/
static inline void *drm_buffer_pointer_to_dword(struct drm_buffer *buffer,
int offset)
{
int iter = buffer->iterator + offset * 4;
return &buffer->data[iter / PAGE_SIZE][iter & (PAGE_SIZE - 1)];
}
/**
* Returns the pointer to the dword which is offset number of elements from
* the current processing location.
*
* Call to this function doesn't change the processing location.
*
* offset: The index of the byte relative to the internat iterator.
*/
static inline void *drm_buffer_pointer_to_byte(struct drm_buffer *buffer,
int offset)
{
int iter = buffer->iterator + offset;
return &buffer->data[iter / PAGE_SIZE][iter & (PAGE_SIZE - 1)];
}
#endif

File diff suppressed because it is too large Load Diff

View File

@ -1,494 +0,0 @@
/**
* \file drm_context.c
* IOCTLs for generic contexts
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
* \author Gareth Hughes <gareth@valinux.com>
*/
/*
* Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com
*
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*
* ChangeLog:
* 2001-11-16 Torsten Duwe <duwe@caldera.de>
* added context constructor/destructor hooks,
* needed by SiS driver's memory management.
*/
#include <dev/drm2/drmP.h>
/******************************************************************/
/** \name Context bitmap support */
/*@{*/
/**
* Free a handle from the context bitmap.
*
* \param dev DRM device.
* \param ctx_handle context handle.
*
* Clears the bit specified by \p ctx_handle in drm_device::ctx_bitmap and the entry
* in drm_device::ctx_idr, while holding the drm_device::struct_mutex
* lock.
*/
void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
{
if (ctx_handle < 0 || ctx_handle >= DRM_MAX_CTXBITMAP ||
dev->ctx_bitmap == NULL) {
DRM_ERROR("Attempt to free invalid context handle: %d\n",
ctx_handle);
return;
}
DRM_LOCK(dev);
clear_bit(ctx_handle, dev->ctx_bitmap);
dev->context_sareas[ctx_handle] = NULL;
DRM_UNLOCK(dev);
}
/**
* Context bitmap allocation.
*
* \param dev DRM device.
* \return (non-negative) context handle on success or a negative number on failure.
*
* Allocate a new idr from drm_device::ctx_idr while holding the
* drm_device::struct_mutex lock.
*/
static int drm_ctxbitmap_next(struct drm_device * dev)
{
int bit;
if (dev->ctx_bitmap == NULL)
return -1;
DRM_LOCK(dev);
bit = find_first_zero_bit(dev->ctx_bitmap, DRM_MAX_CTXBITMAP);
if (bit >= DRM_MAX_CTXBITMAP) {
DRM_UNLOCK(dev);
return -1;
}
set_bit(bit, dev->ctx_bitmap);
DRM_DEBUG("bit : %d\n", bit);
if ((bit+1) > dev->max_context) {
struct drm_local_map **ctx_sareas;
int max_ctx = (bit+1);
ctx_sareas = realloc(dev->context_sareas,
max_ctx * sizeof(*dev->context_sareas),
DRM_MEM_SAREA, M_NOWAIT);
if (ctx_sareas == NULL) {
clear_bit(bit, dev->ctx_bitmap);
DRM_DEBUG("failed to allocate bit : %d\n", bit);
DRM_UNLOCK(dev);
return -1;
}
dev->max_context = max_ctx;
dev->context_sareas = ctx_sareas;
dev->context_sareas[bit] = NULL;
}
DRM_UNLOCK(dev);
return bit;
}
/**
* Context bitmap initialization.
*
* \param dev DRM device.
*
* Initialise the drm_device::ctx_idr
*/
int drm_ctxbitmap_init(struct drm_device * dev)
{
int i;
int temp;
DRM_LOCK(dev);
dev->ctx_bitmap = malloc(PAGE_SIZE, DRM_MEM_CTXBITMAP,
M_NOWAIT | M_ZERO);
if (dev->ctx_bitmap == NULL) {
DRM_UNLOCK(dev);
return ENOMEM;
}
dev->context_sareas = NULL;
dev->max_context = -1;
DRM_UNLOCK(dev);
for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
temp = drm_ctxbitmap_next(dev);
DRM_DEBUG("drm_ctxbitmap_init : %d\n", temp);
}
return 0;
}
/**
* Context bitmap cleanup.
*
* \param dev DRM device.
*
* Free all idr members using drm_ctx_sarea_free helper function
* while holding the drm_device::struct_mutex lock.
*/
void drm_ctxbitmap_cleanup(struct drm_device * dev)
{
DRM_LOCK(dev);
if (dev->context_sareas != NULL)
free(dev->context_sareas, DRM_MEM_SAREA);
free(dev->ctx_bitmap, DRM_MEM_CTXBITMAP);
DRM_UNLOCK(dev);
}
/*@}*/
/******************************************************************/
/** \name Per Context SAREA Support */
/*@{*/
/**
* Get per-context SAREA.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument pointing to a drm_ctx_priv_map structure.
* \return zero on success or a negative number on failure.
*
* Gets the map from drm_device::ctx_idr with the handle specified and
* returns its handle.
*/
int drm_getsareactx(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_ctx_priv_map *request = data;
struct drm_local_map *map;
DRM_LOCK(dev);
if (dev->max_context < 0 ||
request->ctx_id >= (unsigned) dev->max_context) {
DRM_UNLOCK(dev);
return EINVAL;
}
map = dev->context_sareas[request->ctx_id];
DRM_UNLOCK(dev);
request->handle = (void *)map->handle;
return 0;
}
/**
* Set per-context SAREA.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument pointing to a drm_ctx_priv_map structure.
* \return zero on success or a negative number on failure.
*
* Searches the mapping specified in \p arg and update the entry in
* drm_device::ctx_idr with it.
*/
int drm_setsareactx(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_ctx_priv_map *request = data;
struct drm_local_map *map = NULL;
struct drm_map_list *r_list = NULL;
DRM_LOCK(dev);
list_for_each_entry(r_list, &dev->maplist, head) {
if (r_list->map
&& r_list->user_token == (unsigned long) request->handle) {
if (dev->max_context < 0)
goto bad;
if (request->ctx_id >= (unsigned) dev->max_context)
goto bad;
dev->context_sareas[request->ctx_id] = map;
DRM_UNLOCK(dev);
return 0;
}
}
bad:
DRM_UNLOCK(dev);
return EINVAL;
}
/*@}*/
/******************************************************************/
/** \name The actual DRM context handling routines */
/*@{*/
/**
* Switch context.
*
* \param dev DRM device.
* \param old old context handle.
* \param new new context handle.
* \return zero on success or a negative number on failure.
*
* Attempt to set drm_device::context_flag.
*/
static int drm_context_switch(struct drm_device * dev, int old, int new)
{
if (test_and_set_bit(0, &dev->context_flag)) {
DRM_ERROR("Reentering -- FIXME\n");
return -EBUSY;
}
DRM_DEBUG("Context switch from %d to %d\n", old, new);
if (new == dev->last_context) {
clear_bit(0, &dev->context_flag);
return 0;
}
return 0;
}
/**
* Complete context switch.
*
* \param dev DRM device.
* \param new new context handle.
* \return zero on success or a negative number on failure.
*
* Updates drm_device::last_context and drm_device::last_switch. Verifies the
* hardware lock is held, clears the drm_device::context_flag and wakes up
* drm_device::context_wait.
*/
static int drm_context_switch_complete(struct drm_device *dev,
struct drm_file *file_priv, int new)
{
dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
dev->last_switch = jiffies;
if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) {
DRM_ERROR("Lock isn't held after context switch\n");
}
/* If a context switch is ever initiated
when the kernel holds the lock, release
that lock here. */
clear_bit(0, &dev->context_flag);
wakeup(&dev->context_wait);
return 0;
}
/**
* Reserve contexts.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument pointing to a drm_ctx_res structure.
* \return zero on success or a negative number on failure.
*/
int drm_resctx(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_ctx_res *res = data;
struct drm_ctx ctx;
int i;
if (res->count >= DRM_RESERVED_CONTEXTS) {
memset(&ctx, 0, sizeof(ctx));
for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
ctx.handle = i;
if (copy_to_user(&res->contexts[i], &ctx, sizeof(ctx)))
return -EFAULT;
}
}
res->count = DRM_RESERVED_CONTEXTS;
return 0;
}
/**
* Add context.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument pointing to a drm_ctx structure.
* \return zero on success or a negative number on failure.
*
* Get a new handle for the context and copy to userspace.
*/
int drm_addctx(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_ctx_list *ctx_entry;
struct drm_ctx *ctx = data;
ctx->handle = drm_ctxbitmap_next(dev);
if (ctx->handle == DRM_KERNEL_CONTEXT) {
/* Skip kernel's context and get a new one. */
ctx->handle = drm_ctxbitmap_next(dev);
}
DRM_DEBUG("%d\n", ctx->handle);
if (ctx->handle == -1) {
DRM_DEBUG("Not enough free contexts.\n");
/* Should this return -EBUSY instead? */
return -ENOMEM;
}
ctx_entry = malloc(sizeof(*ctx_entry), DRM_MEM_CTXBITMAP, M_NOWAIT);
if (!ctx_entry) {
DRM_DEBUG("out of memory\n");
return -ENOMEM;
}
INIT_LIST_HEAD(&ctx_entry->head);
ctx_entry->handle = ctx->handle;
ctx_entry->tag = file_priv;
DRM_LOCK(dev);
list_add(&ctx_entry->head, &dev->ctxlist);
++dev->ctx_count;
DRM_UNLOCK(dev);
return 0;
}
int drm_modctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
/* This does nothing */
return 0;
}
/**
* Get context.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument pointing to a drm_ctx structure.
* \return zero on success or a negative number on failure.
*/
int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_ctx *ctx = data;
/* This is 0, because we don't handle any context flags */
ctx->flags = 0;
return 0;
}
/**
* Switch context.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument pointing to a drm_ctx structure.
* \return zero on success or a negative number on failure.
*
* Calls context_switch().
*/
int drm_switchctx(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_ctx *ctx = data;
DRM_DEBUG("%d\n", ctx->handle);
return drm_context_switch(dev, dev->last_context, ctx->handle);
}
/**
* New context.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument pointing to a drm_ctx structure.
* \return zero on success or a negative number on failure.
*
* Calls context_switch_complete().
*/
int drm_newctx(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_ctx *ctx = data;
DRM_DEBUG("%d\n", ctx->handle);
drm_context_switch_complete(dev, file_priv, ctx->handle);
return 0;
}
/**
* Remove context.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument pointing to a drm_ctx structure.
* \return zero on success or a negative number on failure.
*
* If not the special kernel context, calls ctxbitmap_free() to free the specified context.
*/
int drm_rmctx(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_ctx *ctx = data;
DRM_DEBUG("%d\n", ctx->handle);
if (ctx->handle != DRM_KERNEL_CONTEXT) {
if (dev->driver->context_dtor)
dev->driver->context_dtor(dev, ctx->handle);
drm_ctxbitmap_free(dev, ctx->handle);
}
DRM_LOCK(dev);
if (!list_empty(&dev->ctxlist)) {
struct drm_ctx_list *pos, *n;
list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
if (pos->handle == ctx->handle) {
list_del(&pos->head);
free(pos, DRM_MEM_CTXBITMAP);
--dev->ctx_count;
}
}
}
DRM_UNLOCK(dev);
return 0;
}
/*@}*/

View File

@ -1,38 +0,0 @@
/*
* Copyright 2004 Jon Smirl <jonsmirl@gmail.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#define CORE_AUTHOR "Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl"
#define CORE_NAME "drm"
#define CORE_DESC "DRM shared core routines"
#define CORE_DATE "20060810"
#define DRM_IF_MAJOR 1
#define DRM_IF_MINOR 4
#define CORE_MAJOR 1
#define CORE_MINOR 1
#define CORE_PATCHLEVEL 0

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,167 +0,0 @@
/*
* Copyright © 2006 Keith Packard
* Copyright © 2007-2008 Dave Airlie
* Copyright © 2007-2008 Intel Corporation
* Jesse Barnes <jesse.barnes@intel.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* $FreeBSD$
*/
/*
* The DRM mode setting helper functions are common code for drivers to use if
* they wish. Drivers are not forced to use this code in their
* implementations but it would be useful if they code they do use at least
* provides a consistent interface and operation to userspace
*/
#ifndef __DRM_CRTC_HELPER_H__
#define __DRM_CRTC_HELPER_H__
enum mode_set_atomic {
LEAVE_ATOMIC_MODE_SET,
ENTER_ATOMIC_MODE_SET,
};
/**
* drm_crtc_helper_funcs - helper operations for CRTCs
* @mode_fixup: try to fixup proposed mode for this connector
* @mode_set: set this mode
*
* The helper operations are called by the mid-layer CRTC helper.
*/
struct drm_crtc_helper_funcs {
/*
* Control power levels on the CRTC. If the mode passed in is
* unsupported, the provider must use the next lowest power level.
*/
void (*dpms)(struct drm_crtc *crtc, int mode);
void (*prepare)(struct drm_crtc *crtc);
void (*commit)(struct drm_crtc *crtc);
/* Provider can fixup or change mode timings before modeset occurs */
bool (*mode_fixup)(struct drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
/* Actually set the mode */
int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode, int x, int y,
struct drm_framebuffer *old_fb);
/* Move the crtc on the current fb to the given position *optional* */
int (*mode_set_base)(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb);
int (*mode_set_base_atomic)(struct drm_crtc *crtc,
struct drm_framebuffer *fb, int x, int y,
enum mode_set_atomic);
/* reload the current crtc LUT */
void (*load_lut)(struct drm_crtc *crtc);
/* disable crtc when not in use - more explicit than dpms off */
void (*disable)(struct drm_crtc *crtc);
};
/**
* drm_encoder_helper_funcs - helper operations for encoders
* @mode_fixup: try to fixup proposed mode for this connector
* @mode_set: set this mode
*
* The helper operations are called by the mid-layer CRTC helper.
*/
struct drm_encoder_helper_funcs {
void (*dpms)(struct drm_encoder *encoder, int mode);
void (*save)(struct drm_encoder *encoder);
void (*restore)(struct drm_encoder *encoder);
bool (*mode_fixup)(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
void (*prepare)(struct drm_encoder *encoder);
void (*commit)(struct drm_encoder *encoder);
void (*mode_set)(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
struct drm_crtc *(*get_crtc)(struct drm_encoder *encoder);
/* detect for DAC style encoders */
enum drm_connector_status (*detect)(struct drm_encoder *encoder,
struct drm_connector *connector);
/* disable encoder when not in use - more explicit than dpms off */
void (*disable)(struct drm_encoder *encoder);
};
/**
* drm_connector_helper_funcs - helper operations for connectors
* @get_modes: get mode list for this connector
* @mode_valid: is this mode valid on the given connector?
*
* The helper operations are called by the mid-layer CRTC helper.
*/
struct drm_connector_helper_funcs {
int (*get_modes)(struct drm_connector *connector);
int (*mode_valid)(struct drm_connector *connector,
struct drm_display_mode *mode);
struct drm_encoder *(*best_encoder)(struct drm_connector *connector);
};
extern int drm_helper_probe_single_connector_modes(struct drm_connector *connector, uint32_t maxX, uint32_t maxY);
extern void drm_helper_disable_unused_functions(struct drm_device *dev);
extern int drm_crtc_helper_set_config(struct drm_mode_set *set);
extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
struct drm_display_mode *mode,
int x, int y,
struct drm_framebuffer *old_fb);
extern bool drm_helper_crtc_in_use(struct drm_crtc *crtc);
extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder);
extern void drm_helper_connector_dpms(struct drm_connector *connector, int mode);
extern void drm_helper_move_panel_connectors_to_head(struct drm_device *);
extern int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
struct drm_mode_fb_cmd2 *mode_cmd);
static inline void drm_crtc_helper_add(struct drm_crtc *crtc,
const struct drm_crtc_helper_funcs *funcs)
{
crtc->helper_private = __DECONST(void *, funcs);
}
static inline void drm_encoder_helper_add(struct drm_encoder *encoder,
const struct drm_encoder_helper_funcs *funcs)
{
encoder->helper_private = __DECONST(void *, funcs);
}
static inline void drm_connector_helper_add(struct drm_connector *connector,
const struct drm_connector_helper_funcs *funcs)
{
connector->helper_private = __DECONST(void *, funcs);
}
extern int drm_helper_resume_force_mode(struct drm_device *dev);
extern void drm_kms_helper_poll_init(struct drm_device *dev);
extern void drm_kms_helper_poll_fini(struct drm_device *dev);
extern void drm_helper_hpd_irq_event(struct drm_device *dev);
extern void drm_kms_helper_hotplug_event(struct drm_device *dev);
extern void drm_kms_helper_poll_disable(struct drm_device *dev);
extern void drm_kms_helper_poll_enable(struct drm_device *dev);
#endif

View File

@ -1,160 +0,0 @@
/**
* \file drm_dma.c
* DMA IOCTL and function support
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
* \author Gareth Hughes <gareth@valinux.com>
*/
/*-
* Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
*
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <dev/drm2/drmP.h>
/**
* Initialize the DMA data.
*
* \param dev DRM device.
* \return zero on success or a negative value on failure.
*
* Allocate and initialize a drm_device_dma structure.
*/
int drm_dma_setup(struct drm_device *dev)
{
int i;
dev->dma = malloc(sizeof(*dev->dma), DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
if (!dev->dma)
return -ENOMEM;
for (i = 0; i <= DRM_MAX_ORDER; i++)
memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));
return 0;
}
/**
* Cleanup the DMA resources.
*
* \param dev DRM device.
*
* Free all pages associated with DMA buffers, the buffers and pages lists, and
* finally the drm_device::dma structure itself.
*/
void drm_dma_takedown(struct drm_device *dev)
{
struct drm_device_dma *dma = dev->dma;
int i, j;
if (!dma)
return;
/* Clear dma buffers */
for (i = 0; i <= DRM_MAX_ORDER; i++) {
if (dma->bufs[i].seg_count) {
DRM_DEBUG("order %d: buf_count = %d,"
" seg_count = %d\n",
i,
dma->bufs[i].buf_count,
dma->bufs[i].seg_count);
for (j = 0; j < dma->bufs[i].seg_count; j++) {
if (dma->bufs[i].seglist[j]) {
drm_pci_free(dev, dma->bufs[i].seglist[j]);
}
}
free(dma->bufs[i].seglist, DRM_MEM_SEGS);
}
if (dma->bufs[i].buf_count) {
for (j = 0; j < dma->bufs[i].buf_count; j++) {
free(dma->bufs[i].buflist[j].dev_private,
DRM_MEM_BUFS);
}
free(dma->bufs[i].buflist, DRM_MEM_BUFS);
}
}
free(dma->buflist, DRM_MEM_BUFS);
free(dma->pagelist, DRM_MEM_PAGES);
free(dev->dma, DRM_MEM_DRIVER);
dev->dma = NULL;
}
/**
* Free a buffer.
*
* \param dev DRM device.
* \param buf buffer to free.
*
* Resets the fields of \p buf.
*/
void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf)
{
if (!buf)
return;
buf->waiting = 0;
buf->pending = 0;
buf->file_priv = NULL;
buf->used = 0;
}
/**
* Reclaim the buffers.
*
* \param file_priv DRM file private.
*
* Frees each buffer associated with \p file_priv not already on the hardware.
*/
void drm_core_reclaim_buffers(struct drm_device *dev,
struct drm_file *file_priv)
{
struct drm_device_dma *dma = dev->dma;
int i;
if (!dma)
return;
for (i = 0; i < dma->buf_count; i++) {
if (dma->buflist[i]->file_priv == file_priv) {
switch (dma->buflist[i]->list) {
case DRM_LIST_NONE:
drm_free_buffer(dev, dma->buflist[i]);
break;
case DRM_LIST_WAIT:
dma->buflist[i]->list = DRM_LIST_RECLAIM;
break;
default:
/* Buffer already on hardware. */
break;
}
}
}
}
EXPORT_SYMBOL(drm_core_reclaim_buffers);

View File

@ -1,156 +0,0 @@
/*
* Copyright © 2009 Keith Packard
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting documentation, and
* that the name of the copyright holders not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. The copyright holders make no representations
* about the suitability of this software for any purpose. It is provided "as
* is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THIS SOFTWARE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <dev/drm2/drmP.h>
#include <dev/drm2/drm_dp_helper.h>
/**
* DOC: dp helpers
*
* These functions contain some common logic and helpers at various abstraction
* levels to deal with Display Port sink devices and related things like DP aux
* channel transfers, EDID reading over DP aux channels, decoding certain DPCD
* blocks, ...
*/
/* Helpers for DP link training */
static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r)
{
return link_status[r - DP_LANE0_1_STATUS];
}
static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
int lane)
{
int i = DP_LANE0_1_STATUS + (lane >> 1);
int s = (lane & 1) * 4;
u8 l = dp_link_status(link_status, i);
return (l >> s) & 0xf;
}
bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
int lane_count)
{
u8 lane_align;
u8 lane_status;
int lane;
lane_align = dp_link_status(link_status,
DP_LANE_ALIGN_STATUS_UPDATED);
if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
return false;
for (lane = 0; lane < lane_count; lane++) {
lane_status = dp_get_lane_status(link_status, lane);
if ((lane_status & DP_CHANNEL_EQ_BITS) != DP_CHANNEL_EQ_BITS)
return false;
}
return true;
}
EXPORT_SYMBOL(drm_dp_channel_eq_ok);
bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
int lane_count)
{
int lane;
u8 lane_status;
for (lane = 0; lane < lane_count; lane++) {
lane_status = dp_get_lane_status(link_status, lane);
if ((lane_status & DP_LANE_CR_DONE) == 0)
return false;
}
return true;
}
EXPORT_SYMBOL(drm_dp_clock_recovery_ok);
u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
int lane)
{
int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
int s = ((lane & 1) ?
DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
u8 l = dp_link_status(link_status, i);
return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
}
EXPORT_SYMBOL(drm_dp_get_adjust_request_voltage);
u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
int lane)
{
int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
int s = ((lane & 1) ?
DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
u8 l = dp_link_status(link_status, i);
return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
}
EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis);
void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
udelay(100);
else
mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4);
}
EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay);
void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
udelay(400);
else
mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4);
}
EXPORT_SYMBOL(drm_dp_link_train_channel_eq_delay);
u8 drm_dp_link_rate_to_bw_code(int link_rate)
{
switch (link_rate) {
case 162000:
default:
return DP_LINK_BW_1_62;
case 270000:
return DP_LINK_BW_2_7;
case 540000:
return DP_LINK_BW_5_4;
}
}
EXPORT_SYMBOL(drm_dp_link_rate_to_bw_code);
int drm_dp_bw_code_to_link_rate(u8 link_bw)
{
switch (link_bw) {
case DP_LINK_BW_1_62:
default:
return 162000;
case DP_LINK_BW_2_7:
return 270000;
case DP_LINK_BW_5_4:
return 540000;
}
}
EXPORT_SYMBOL(drm_dp_bw_code_to_link_rate);

View File

@ -1,356 +0,0 @@
/*
* Copyright © 2008 Keith Packard
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting documentation, and
* that the name of the copyright holders not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. The copyright holders make no representations
* about the suitability of this software for any purpose. It is provided "as
* is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THIS SOFTWARE.
*
* $FreeBSD$
*/
#ifndef _DRM_DP_HELPER_H_
#define _DRM_DP_HELPER_H_
/*
* Unless otherwise noted, all values are from the DP 1.1a spec. Note that
* DP and DPCD versions are independent. Differences from 1.0 are not noted,
* 1.0 devices basically don't exist in the wild.
*
* Abbreviations, in chronological order:
*
* eDP: Embedded DisplayPort version 1
* DPI: DisplayPort Interoperability Guideline v1.1a
* 1.2: DisplayPort 1.2
*
* 1.2 formally includes both eDP and DPI definitions.
*/
#define AUX_NATIVE_WRITE 0x8
#define AUX_NATIVE_READ 0x9
#define AUX_I2C_WRITE 0x0
#define AUX_I2C_READ 0x1
#define AUX_I2C_STATUS 0x2
#define AUX_I2C_MOT 0x4
#define AUX_NATIVE_REPLY_ACK (0x0 << 4)
#define AUX_NATIVE_REPLY_NACK (0x1 << 4)
#define AUX_NATIVE_REPLY_DEFER (0x2 << 4)
#define AUX_NATIVE_REPLY_MASK (0x3 << 4)
#define AUX_I2C_REPLY_ACK (0x0 << 6)
#define AUX_I2C_REPLY_NACK (0x1 << 6)
#define AUX_I2C_REPLY_DEFER (0x2 << 6)
#define AUX_I2C_REPLY_MASK (0x3 << 6)
/* AUX CH addresses */
/* DPCD */
#define DP_DPCD_REV 0x000
#define DP_MAX_LINK_RATE 0x001
#define DP_MAX_LANE_COUNT 0x002
# define DP_MAX_LANE_COUNT_MASK 0x1f
# define DP_TPS3_SUPPORTED (1 << 6) /* 1.2 */
# define DP_ENHANCED_FRAME_CAP (1 << 7)
#define DP_MAX_DOWNSPREAD 0x003
# define DP_NO_AUX_HANDSHAKE_LINK_TRAINING (1 << 6)
#define DP_NORP 0x004
#define DP_DOWNSTREAMPORT_PRESENT 0x005
# define DP_DWN_STRM_PORT_PRESENT (1 << 0)
# define DP_DWN_STRM_PORT_TYPE_MASK 0x06
/* 00b = DisplayPort */
/* 01b = Analog */
/* 10b = TMDS or HDMI */
/* 11b = Other */
# define DP_FORMAT_CONVERSION (1 << 3)
# define DP_DETAILED_CAP_INFO_AVAILABLE (1 << 4) /* DPI */
#define DP_MAIN_LINK_CHANNEL_CODING 0x006
#define DP_DOWN_STREAM_PORT_COUNT 0x007
# define DP_PORT_COUNT_MASK 0x0f
# define DP_MSA_TIMING_PAR_IGNORED (1 << 6) /* eDP */
# define DP_OUI_SUPPORT (1 << 7)
#define DP_I2C_SPEED_CAP 0x00c /* DPI */
# define DP_I2C_SPEED_1K 0x01
# define DP_I2C_SPEED_5K 0x02
# define DP_I2C_SPEED_10K 0x04
# define DP_I2C_SPEED_100K 0x08
# define DP_I2C_SPEED_400K 0x10
# define DP_I2C_SPEED_1M 0x20
#define DP_EDP_CONFIGURATION_CAP 0x00d /* XXX 1.2? */
#define DP_TRAINING_AUX_RD_INTERVAL 0x00e /* XXX 1.2? */
/* Multiple stream transport */
#define DP_MSTM_CAP 0x021 /* 1.2 */
# define DP_MST_CAP (1 << 0)
#define DP_PSR_SUPPORT 0x070 /* XXX 1.2? */
# define DP_PSR_IS_SUPPORTED 1
#define DP_PSR_CAPS 0x071 /* XXX 1.2? */
# define DP_PSR_NO_TRAIN_ON_EXIT 1
# define DP_PSR_SETUP_TIME_330 (0 << 1)
# define DP_PSR_SETUP_TIME_275 (1 << 1)
# define DP_PSR_SETUP_TIME_220 (2 << 1)
# define DP_PSR_SETUP_TIME_165 (3 << 1)
# define DP_PSR_SETUP_TIME_110 (4 << 1)
# define DP_PSR_SETUP_TIME_55 (5 << 1)
# define DP_PSR_SETUP_TIME_0 (6 << 1)
# define DP_PSR_SETUP_TIME_MASK (7 << 1)
# define DP_PSR_SETUP_TIME_SHIFT 1
/*
* 0x80-0x8f describe downstream port capabilities, but there are two layouts
* based on whether DP_DETAILED_CAP_INFO_AVAILABLE was set. If it was not,
* each port's descriptor is one byte wide. If it was set, each port's is
* four bytes wide, starting with the one byte from the base info. As of
* DP interop v1.1a only VGA defines additional detail.
*/
/* offset 0 */
#define DP_DOWNSTREAM_PORT_0 0x80
# define DP_DS_PORT_TYPE_MASK (7 << 0)
# define DP_DS_PORT_TYPE_DP 0
# define DP_DS_PORT_TYPE_VGA 1
# define DP_DS_PORT_TYPE_DVI 2
# define DP_DS_PORT_TYPE_HDMI 3
# define DP_DS_PORT_TYPE_NON_EDID 4
# define DP_DS_PORT_HPD (1 << 3)
/* offset 1 for VGA is maximum megapixels per second / 8 */
/* offset 2 */
# define DP_DS_VGA_MAX_BPC_MASK (3 << 0)
# define DP_DS_VGA_8BPC 0
# define DP_DS_VGA_10BPC 1
# define DP_DS_VGA_12BPC 2
# define DP_DS_VGA_16BPC 3
/* link configuration */
#define DP_LINK_BW_SET 0x100
# define DP_LINK_BW_1_62 0x06
# define DP_LINK_BW_2_7 0x0a
# define DP_LINK_BW_5_4 0x14 /* 1.2 */
#define DP_LANE_COUNT_SET 0x101
# define DP_LANE_COUNT_MASK 0x0f
# define DP_LANE_COUNT_ENHANCED_FRAME_EN (1 << 7)
#define DP_TRAINING_PATTERN_SET 0x102
# define DP_TRAINING_PATTERN_DISABLE 0
# define DP_TRAINING_PATTERN_1 1
# define DP_TRAINING_PATTERN_2 2
# define DP_TRAINING_PATTERN_3 3 /* 1.2 */
# define DP_TRAINING_PATTERN_MASK 0x3
# define DP_LINK_QUAL_PATTERN_DISABLE (0 << 2)
# define DP_LINK_QUAL_PATTERN_D10_2 (1 << 2)
# define DP_LINK_QUAL_PATTERN_ERROR_RATE (2 << 2)
# define DP_LINK_QUAL_PATTERN_PRBS7 (3 << 2)
# define DP_LINK_QUAL_PATTERN_MASK (3 << 2)
# define DP_RECOVERED_CLOCK_OUT_EN (1 << 4)
# define DP_LINK_SCRAMBLING_DISABLE (1 << 5)
# define DP_SYMBOL_ERROR_COUNT_BOTH (0 << 6)
# define DP_SYMBOL_ERROR_COUNT_DISPARITY (1 << 6)
# define DP_SYMBOL_ERROR_COUNT_SYMBOL (2 << 6)
# define DP_SYMBOL_ERROR_COUNT_MASK (3 << 6)
#define DP_TRAINING_LANE0_SET 0x103
#define DP_TRAINING_LANE1_SET 0x104
#define DP_TRAINING_LANE2_SET 0x105
#define DP_TRAINING_LANE3_SET 0x106
# define DP_TRAIN_VOLTAGE_SWING_MASK 0x3
# define DP_TRAIN_VOLTAGE_SWING_SHIFT 0
# define DP_TRAIN_MAX_SWING_REACHED (1 << 2)
# define DP_TRAIN_VOLTAGE_SWING_400 (0 << 0)
# define DP_TRAIN_VOLTAGE_SWING_600 (1 << 0)
# define DP_TRAIN_VOLTAGE_SWING_800 (2 << 0)
# define DP_TRAIN_VOLTAGE_SWING_1200 (3 << 0)
# define DP_TRAIN_PRE_EMPHASIS_MASK (3 << 3)
# define DP_TRAIN_PRE_EMPHASIS_0 (0 << 3)
# define DP_TRAIN_PRE_EMPHASIS_3_5 (1 << 3)
# define DP_TRAIN_PRE_EMPHASIS_6 (2 << 3)
# define DP_TRAIN_PRE_EMPHASIS_9_5 (3 << 3)
# define DP_TRAIN_PRE_EMPHASIS_SHIFT 3
# define DP_TRAIN_MAX_PRE_EMPHASIS_REACHED (1 << 5)
#define DP_DOWNSPREAD_CTRL 0x107
# define DP_SPREAD_AMP_0_5 (1 << 4)
# define DP_MSA_TIMING_PAR_IGNORE_EN (1 << 7) /* eDP */
#define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108
# define DP_SET_ANSI_8B10B (1 << 0)
#define DP_I2C_SPEED_CONTROL_STATUS 0x109 /* DPI */
/* bitmask as for DP_I2C_SPEED_CAP */
#define DP_EDP_CONFIGURATION_SET 0x10a /* XXX 1.2? */
#define DP_MSTM_CTRL 0x111 /* 1.2 */
# define DP_MST_EN (1 << 0)
# define DP_UP_REQ_EN (1 << 1)
# define DP_UPSTREAM_IS_SRC (1 << 2)
#define DP_PSR_EN_CFG 0x170 /* XXX 1.2? */
# define DP_PSR_ENABLE (1 << 0)
# define DP_PSR_MAIN_LINK_ACTIVE (1 << 1)
# define DP_PSR_CRC_VERIFICATION (1 << 2)
# define DP_PSR_FRAME_CAPTURE (1 << 3)
#define DP_SINK_COUNT 0x200
/* prior to 1.2 bit 7 was reserved mbz */
# define DP_GET_SINK_COUNT(x) ((((x) & 0x80) >> 1) | ((x) & 0x3f))
# define DP_SINK_CP_READY (1 << 6)
#define DP_DEVICE_SERVICE_IRQ_VECTOR 0x201
# define DP_REMOTE_CONTROL_COMMAND_PENDING (1 << 0)
# define DP_AUTOMATED_TEST_REQUEST (1 << 1)
# define DP_CP_IRQ (1 << 2)
# define DP_SINK_SPECIFIC_IRQ (1 << 6)
#define DP_LANE0_1_STATUS 0x202
#define DP_LANE2_3_STATUS 0x203
# define DP_LANE_CR_DONE (1 << 0)
# define DP_LANE_CHANNEL_EQ_DONE (1 << 1)
# define DP_LANE_SYMBOL_LOCKED (1 << 2)
#define DP_CHANNEL_EQ_BITS (DP_LANE_CR_DONE | \
DP_LANE_CHANNEL_EQ_DONE | \
DP_LANE_SYMBOL_LOCKED)
#define DP_LANE_ALIGN_STATUS_UPDATED 0x204
#define DP_INTERLANE_ALIGN_DONE (1 << 0)
#define DP_DOWNSTREAM_PORT_STATUS_CHANGED (1 << 6)
#define DP_LINK_STATUS_UPDATED (1 << 7)
#define DP_SINK_STATUS 0x205
#define DP_RECEIVE_PORT_0_STATUS (1 << 0)
#define DP_RECEIVE_PORT_1_STATUS (1 << 1)
#define DP_ADJUST_REQUEST_LANE0_1 0x206
#define DP_ADJUST_REQUEST_LANE2_3 0x207
# define DP_ADJUST_VOLTAGE_SWING_LANE0_MASK 0x03
# define DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT 0
# define DP_ADJUST_PRE_EMPHASIS_LANE0_MASK 0x0c
# define DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT 2
# define DP_ADJUST_VOLTAGE_SWING_LANE1_MASK 0x30
# define DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT 4
# define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0
# define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6
#define DP_TEST_REQUEST 0x218
# define DP_TEST_LINK_TRAINING (1 << 0)
# define DP_TEST_LINK_PATTERN (1 << 1)
# define DP_TEST_LINK_EDID_READ (1 << 2)
# define DP_TEST_LINK_PHY_TEST_PATTERN (1 << 3) /* DPCD >= 1.1 */
#define DP_TEST_LINK_RATE 0x219
# define DP_LINK_RATE_162 (0x6)
# define DP_LINK_RATE_27 (0xa)
#define DP_TEST_LANE_COUNT 0x220
#define DP_TEST_PATTERN 0x221
#define DP_TEST_RESPONSE 0x260
# define DP_TEST_ACK (1 << 0)
# define DP_TEST_NAK (1 << 1)
# define DP_TEST_EDID_CHECKSUM_WRITE (1 << 2)
#define DP_SOURCE_OUI 0x300
#define DP_SINK_OUI 0x400
#define DP_BRANCH_OUI 0x500
#define DP_SET_POWER 0x600
# define DP_SET_POWER_D0 0x1
# define DP_SET_POWER_D3 0x2
#define DP_PSR_ERROR_STATUS 0x2006 /* XXX 1.2? */
# define DP_PSR_LINK_CRC_ERROR (1 << 0)
# define DP_PSR_RFB_STORAGE_ERROR (1 << 1)
#define DP_PSR_ESI 0x2007 /* XXX 1.2? */
# define DP_PSR_CAPS_CHANGE (1 << 0)
#define DP_PSR_STATUS 0x2008 /* XXX 1.2? */
# define DP_PSR_SINK_INACTIVE 0
# define DP_PSR_SINK_ACTIVE_SRC_SYNCED 1
# define DP_PSR_SINK_ACTIVE_RFB 2
# define DP_PSR_SINK_ACTIVE_SINK_SYNCED 3
# define DP_PSR_SINK_ACTIVE_RESYNC 4
# define DP_PSR_SINK_INTERNAL_ERROR 7
# define DP_PSR_SINK_STATE_MASK 0x07
#define MODE_I2C_START 1
#define MODE_I2C_WRITE 2
#define MODE_I2C_READ 4
#define MODE_I2C_STOP 8
struct iic_dp_aux_data {
bool running;
u16 address;
void *priv;
int (*aux_ch)(device_t adapter, int mode, uint8_t write_byte,
uint8_t *read_byte);
device_t port;
};
int iic_dp_aux_add_bus(device_t dev, const char *name,
int (*ch)(device_t idev, int mode, uint8_t write_byte, uint8_t *read_byte),
void *priv, device_t *bus, device_t *adapter);
#define DP_LINK_STATUS_SIZE 6
bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
int lane_count);
bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
int lane_count);
u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
int lane);
u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
int lane);
#define DP_RECEIVER_CAP_SIZE 0xf
void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]);
void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]);
u8 drm_dp_link_rate_to_bw_code(int link_rate);
int drm_dp_bw_code_to_link_rate(u8 link_bw);
static inline int
drm_dp_max_link_rate(u8 dpcd[DP_RECEIVER_CAP_SIZE])
{
return drm_dp_bw_code_to_link_rate(dpcd[DP_MAX_LINK_RATE]);
}
static inline u8
drm_dp_max_lane_count(u8 dpcd[DP_RECEIVER_CAP_SIZE])
{
return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
}
#endif /* _DRM_DP_HELPER_H_ */

View File

@ -1,278 +0,0 @@
/*
* Copyright © 2009 Keith Packard
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting documentation, and
* that the name of the copyright holders not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. The copyright holders make no representations
* about the suitability of this software for any purpose. It is provided "as
* is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THIS SOFTWARE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/types.h>
#include <sys/kobj.h>
#include <sys/bus.h>
#include <dev/iicbus/iic.h>
#include "iicbus_if.h"
#include <dev/iicbus/iiconf.h>
#include <dev/drm2/drmP.h>
#include <dev/drm2/drm_dp_helper.h>
static int
iic_dp_aux_transaction(device_t idev, int mode, uint8_t write_byte,
uint8_t *read_byte)
{
struct iic_dp_aux_data *aux_data;
int ret;
aux_data = device_get_softc(idev);
ret = (*aux_data->aux_ch)(idev, mode, write_byte, read_byte);
if (ret < 0)
return (ret);
return (0);
}
/*
* I2C over AUX CH
*/
/*
* Send the address. If the I2C link is running, this 'restarts'
* the connection with the new address, this is used for doing
* a write followed by a read (as needed for DDC)
*/
static int
iic_dp_aux_address(device_t idev, u16 address, bool reading)
{
struct iic_dp_aux_data *aux_data;
int mode, ret;
aux_data = device_get_softc(idev);
mode = MODE_I2C_START;
if (reading)
mode |= MODE_I2C_READ;
else
mode |= MODE_I2C_WRITE;
aux_data->address = address;
aux_data->running = true;
ret = iic_dp_aux_transaction(idev, mode, 0, NULL);
return (ret);
}
/*
* Stop the I2C transaction. This closes out the link, sending
* a bare address packet with the MOT bit turned off
*/
static void
iic_dp_aux_stop(device_t idev, bool reading)
{
struct iic_dp_aux_data *aux_data;
int mode;
aux_data = device_get_softc(idev);
mode = MODE_I2C_STOP;
if (reading)
mode |= MODE_I2C_READ;
else
mode |= MODE_I2C_WRITE;
if (aux_data->running) {
(void)iic_dp_aux_transaction(idev, mode, 0, NULL);
aux_data->running = false;
}
}
/*
* Write a single byte to the current I2C address, the
* the I2C link must be running or this returns -EIO
*/
static int
iic_dp_aux_put_byte(device_t idev, u8 byte)
{
struct iic_dp_aux_data *aux_data;
int ret;
aux_data = device_get_softc(idev);
if (!aux_data->running)
return (-EIO);
ret = iic_dp_aux_transaction(idev, MODE_I2C_WRITE, byte, NULL);
return (ret);
}
/*
* Read a single byte from the current I2C address, the
* I2C link must be running or this returns -EIO
*/
static int
iic_dp_aux_get_byte(device_t idev, u8 *byte_ret)
{
struct iic_dp_aux_data *aux_data;
int ret;
aux_data = device_get_softc(idev);
if (!aux_data->running)
return (-EIO);
ret = iic_dp_aux_transaction(idev, MODE_I2C_READ, 0, byte_ret);
return (ret);
}
static int
iic_dp_aux_xfer(device_t idev, struct iic_msg *msgs, uint32_t num)
{
u8 *buf;
int b, m, ret;
u16 len;
bool reading;
ret = 0;
reading = false;
for (m = 0; m < num; m++) {
len = msgs[m].len;
buf = msgs[m].buf;
reading = (msgs[m].flags & IIC_M_RD) != 0;
ret = iic_dp_aux_address(idev, msgs[m].slave >> 1, reading);
if (ret < 0)
break;
if (reading) {
for (b = 0; b < len; b++) {
ret = iic_dp_aux_get_byte(idev, &buf[b]);
if (ret != 0)
break;
}
} else {
for (b = 0; b < len; b++) {
ret = iic_dp_aux_put_byte(idev, buf[b]);
if (ret < 0)
break;
}
}
if (ret != 0)
break;
}
iic_dp_aux_stop(idev, reading);
DRM_DEBUG_KMS("dp_aux_xfer return %d\n", ret);
return (-ret);
}
static void
iic_dp_aux_reset_bus(device_t idev)
{
(void)iic_dp_aux_address(idev, 0, false);
(void)iic_dp_aux_stop(idev, false);
}
static int
iic_dp_aux_reset(device_t idev, u_char speed, u_char addr, u_char *oldaddr)
{
iic_dp_aux_reset_bus(idev);
return (0);
}
static int
iic_dp_aux_prepare_bus(device_t idev)
{
/* adapter->retries = 3; */
iic_dp_aux_reset_bus(idev);
return (0);
}
static int
iic_dp_aux_probe(device_t idev)
{
return (BUS_PROBE_DEFAULT);
}
static int
iic_dp_aux_attach(device_t idev)
{
struct iic_dp_aux_data *aux_data;
aux_data = device_get_softc(idev);
aux_data->port = device_add_child(idev, "iicbus", -1);
if (aux_data->port == NULL)
return (ENXIO);
device_quiet(aux_data->port);
bus_generic_attach(idev);
return (0);
}
int
iic_dp_aux_add_bus(device_t dev, const char *name,
int (*ch)(device_t idev, int mode, uint8_t write_byte, uint8_t *read_byte),
void *priv, device_t *bus, device_t *adapter)
{
device_t ibus;
struct iic_dp_aux_data *data;
int idx, error;
static int dp_bus_counter;
mtx_lock(&Giant);
idx = atomic_fetchadd_int(&dp_bus_counter, 1);
ibus = device_add_child(dev, "drm_iic_dp_aux", idx);
if (ibus == NULL) {
mtx_unlock(&Giant);
DRM_ERROR("drm_iic_dp_aux bus %d creation error\n", idx);
return (-ENXIO);
}
device_quiet(ibus);
error = device_probe_and_attach(ibus);
if (error != 0) {
device_delete_child(dev, ibus);
mtx_unlock(&Giant);
DRM_ERROR("drm_iic_dp_aux bus %d attach failed, %d\n",
idx, error);
return (-error);
}
data = device_get_softc(ibus);
data->running = false;
data->address = 0;
data->aux_ch = ch;
data->priv = priv;
error = iic_dp_aux_prepare_bus(ibus);
if (error == 0) {
*bus = ibus;
*adapter = data->port;
}
mtx_unlock(&Giant);
return (-error);
}
static device_method_t drm_iic_dp_aux_methods[] = {
DEVMETHOD(device_probe, iic_dp_aux_probe),
DEVMETHOD(device_attach, iic_dp_aux_attach),
DEVMETHOD(device_detach, bus_generic_detach),
DEVMETHOD(iicbus_reset, iic_dp_aux_reset),
DEVMETHOD(iicbus_transfer, iic_dp_aux_xfer),
DEVMETHOD_END
};
static driver_t drm_iic_dp_aux_driver = {
"drm_iic_dp_aux",
drm_iic_dp_aux_methods,
sizeof(struct iic_dp_aux_data)
};
static devclass_t drm_iic_dp_aux_devclass;
DRIVER_MODULE_ORDERED(drm_iic_dp_aux, drmn, drm_iic_dp_aux_driver,
drm_iic_dp_aux_devclass, 0, 0, SI_ORDER_SECOND);

View File

@ -1,511 +0,0 @@
/**
* \file drm_drv.c
* Generic driver template
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
* \author Gareth Hughes <gareth@valinux.com>
*
* To use this template, you must at least define the following (samples
* given for the MGA driver):
*
* \code
* #define DRIVER_AUTHOR "VA Linux Systems, Inc."
*
* #define DRIVER_NAME "mga"
* #define DRIVER_DESC "Matrox G200/G400"
* #define DRIVER_DATE "20001127"
*
* #define drm_x mga_##x
* \endcode
*/
/*
* Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
*
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/sysent.h>
#include <dev/drm2/drmP.h>
#include <dev/drm2/drm_core.h>
#include <dev/drm2/drm_global.h>
struct sx drm_global_mutex;
/** Ioctl table */
static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
/* The DRM_IOCTL_DMA ioctl should be defined by the driver. */
DRM_IOCTL_DEF(DRM_IOCTL_DMA, NULL, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
#if __OS_HAS_AGP
DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
#endif
DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
#ifdef FREEBSD_NOTYET
DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED),
#endif /* FREEBSD_NOTYET */
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
};
#ifdef COMPAT_FREEBSD32
extern struct drm_ioctl_desc drm_compat_ioctls[];
#endif
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
/**
* Take down the DRM device.
*
* \param dev DRM device structure.
*
* Frees every resource in \p dev.
*
* \sa drm_device
*/
int drm_lastclose(struct drm_device * dev)
{
#ifdef __linux__
struct drm_vma_entry *vma, *vma_temp;
#endif
DRM_DEBUG("\n");
if (dev->driver->lastclose)
dev->driver->lastclose(dev);
DRM_DEBUG("driver lastclose completed\n");
if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET))
drm_irq_uninstall(dev);
DRM_LOCK(dev);
/* Clear AGP information */
if (drm_core_has_AGP(dev) && dev->agp &&
!drm_core_check_feature(dev, DRIVER_MODESET)) {
struct drm_agp_mem *entry, *tempe;
/* Remove AGP resources, but leave dev->agp
intact until drv_cleanup is called. */
list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
if (entry->bound)
drm_unbind_agp(entry->memory);
drm_free_agp(entry->memory, entry->pages);
free(entry, DRM_MEM_AGPLISTS);
}
INIT_LIST_HEAD(&dev->agp->memory);
if (dev->agp->acquired)
drm_agp_release(dev);
dev->agp->acquired = 0;
dev->agp->enabled = 0;
}
if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg &&
!drm_core_check_feature(dev, DRIVER_MODESET)) {
drm_sg_cleanup(dev->sg);
dev->sg = NULL;
}
#ifdef __linux__
/* Clear vma list (only built for debugging) */
list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
list_del(&vma->head);
kfree(vma);
}
#endif
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
!drm_core_check_feature(dev, DRIVER_MODESET))
drm_dma_takedown(dev);
DRM_UNLOCK(dev);
DRM_DEBUG("lastclose completed\n");
return 0;
}
#ifdef __linux__
/** File operations structure */
static const struct file_operations drm_stub_fops = {
.owner = THIS_MODULE,
.open = drm_stub_open,
.llseek = noop_llseek,
};
#endif
static int __init drm_core_init(void)
{
sx_init(&drm_global_mutex, "drm_global_mutex");
drm_global_init();
#if DRM_LINUX
linux_ioctl_register_handler(&drm_handler);
#endif /* DRM_LINUX */
DRM_INFO("Initialized %s %d.%d.%d %s\n",
CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
return 0;
}
static void __exit drm_core_exit(void)
{
#if DRM_LINUX
linux_ioctl_unregister_handler(&drm_handler);
#endif /* DRM_LINUX */
drm_global_release();
sx_destroy(&drm_global_mutex);
}
SYSINIT(drm_register, SI_SUB_KLD, SI_ORDER_MIDDLE,
drm_core_init, NULL);
SYSUNINIT(drm_unregister, SI_SUB_KLD, SI_ORDER_MIDDLE,
drm_core_exit, NULL);
/**
* Copy and IOCTL return string to user space
*/
static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
{
int len;
/* don't overflow userbuf */
len = strlen(value);
if (len > *buf_len)
len = *buf_len;
/* let userspace know exact length of driver value (which could be
* larger than the userspace-supplied buffer) */
*buf_len = strlen(value);
/* finally, try filling in the userbuf */
if (len && buf)
if (copy_to_user(buf, value, len))
return -EFAULT;
return 0;
}
/**
* Get version information
*
* \param inode device inode.
* \param filp file pointer.
* \param cmd command.
* \param arg user argument, pointing to a drm_version structure.
* \return zero on success or negative number on failure.
*
* Fills in the version information in \p arg.
*/
int drm_version(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_version *version = data;
int err;
version->version_major = dev->driver->major;
version->version_minor = dev->driver->minor;
version->version_patchlevel = dev->driver->patchlevel;
err = drm_copy_field(version->name, &version->name_len,
dev->driver->name);
if (!err)
err = drm_copy_field(version->date, &version->date_len,
dev->driver->date);
if (!err)
err = drm_copy_field(version->desc, &version->desc_len,
dev->driver->desc);
return err;
}
/**
* Called whenever a process performs an ioctl on /dev/drm.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument.
* \return zero on success or negative number on failure.
*
* Looks up the ioctl function in the ::ioctls table, checking for root
* previleges if so required, and dispatches to the respective function.
*/
int drm_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int flags,
DRM_STRUCTPROC *p)
{
struct drm_file *file_priv;
struct drm_device *dev;
struct drm_ioctl_desc *ioctl;
drm_ioctl_t *func;
unsigned int nr = DRM_IOCTL_NR(cmd);
int retcode;
dev = drm_get_device_from_kdev(kdev);
retcode = devfs_get_cdevpriv((void **)&file_priv);
if (retcode != 0) {
DRM_ERROR("can't find authenticator\n");
return EINVAL;
}
retcode = -EINVAL;
atomic_inc(&dev->ioctl_count);
atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
++file_priv->ioctl_count;
DRM_DEBUG("pid=%d, cmd=0x%02lx, nr=0x%02x, dev 0x%lx, auth=%d\n",
DRM_CURRENTPID, cmd, nr,
(long)file_priv->minor->device,
file_priv->authenticated);
switch (cmd) {
case FIONBIO:
case FIOASYNC:
atomic_dec(&dev->ioctl_count);
return 0;
case FIOSETOWN:
atomic_dec(&dev->ioctl_count);
return fsetown(*(int *)data, &file_priv->minor->buf_sigio);
case FIOGETOWN:
atomic_dec(&dev->ioctl_count);
*(int *) data = fgetown(&file_priv->minor->buf_sigio);
return 0;
}
if (IOCGROUP(cmd) != DRM_IOCTL_BASE) {
atomic_dec(&dev->ioctl_count);
DRM_DEBUG("Bad ioctl group 0x%x\n", (int)IOCGROUP(cmd));
return EINVAL;
}
if ((nr >= DRM_CORE_IOCTL_COUNT) &&
((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
goto err_i1;
#ifdef COMPAT_FREEBSD32
if (SV_CURPROC_FLAG(SV_ILP32) &&
(nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) &&
(nr < DRM_COMMAND_BASE + *dev->driver->num_compat_ioctls) &&
(dev->driver->compat_ioctls[nr - DRM_COMMAND_BASE].func != NULL)) {
ioctl = &dev->driver->compat_ioctls[nr - DRM_COMMAND_BASE];
} else
#endif
if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) &&
(nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
}
else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
#ifdef COMPAT_FREEBSD32
/*
* Called whenever a 32-bit process running under a 64-bit
* kernel performs an ioctl on /dev/drm.
*/
if (SV_CURPROC_FLAG(SV_ILP32) && drm_compat_ioctls[nr].func != NULL)
/*
* Assume that ioctls without an explicit compat
* routine will just work. This may not always be a
* good assumption, but it's better than always
* failing.
*/
ioctl = &drm_compat_ioctls[nr];
else
#endif
ioctl = &drm_ioctls[nr];
} else
goto err_i1;
/* Do not trust userspace, use our own definition */
func = ioctl->func;
/* is there a local override? */
#ifdef FREEBSD_NOTYET
#ifdef COMPAT_FREEBSD32
if (SV_CURPROC_FLAG(SV_ILP32) &&
(nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_compat_ioctl)
func = dev->driver->dma_compat_ioctl;
else
#endif
#endif /* FREEBSD_NOTYET */
if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl)
func = dev->driver->dma_ioctl;
if (!func) {
DRM_DEBUG("no function\n");
retcode = -EINVAL;
} else if (((ioctl->flags & DRM_ROOT_ONLY) && !DRM_SUSER(p)) ||
((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) ||
((ioctl->flags & DRM_MASTER) && !file_priv->is_master) ||
(!(ioctl->flags & DRM_CONTROL_ALLOW) && (file_priv->minor->type == DRM_MINOR_CONTROL))) {
retcode = -EACCES;
} else {
if (ioctl->flags & DRM_UNLOCKED)
retcode = func(dev, data, file_priv);
else {
sx_xlock(&drm_global_mutex);
retcode = func(dev, data, file_priv);
sx_xunlock(&drm_global_mutex);
}
}
err_i1:
atomic_dec(&dev->ioctl_count);
if (retcode == -ERESTARTSYS) {
/*
* FIXME: Find where in i915 ERESTARTSYS should be
* converted to EINTR.
*/
DRM_DEBUG("ret = %d -> %d\n", retcode, -EINTR);
retcode = -EINTR;
}
if (retcode)
DRM_DEBUG("ret = %d\n", retcode);
if (retcode != 0 &&
(drm_debug & DRM_DEBUGBITS_FAILED_IOCTL) != 0) {
printf(
"pid %d, cmd 0x%02lx, nr 0x%02x, dev 0x%lx, auth %d, res %d\n",
DRM_CURRENTPID, cmd, nr, (long)file_priv->minor->device,
file_priv->authenticated, -retcode);
}
return -retcode;
}
EXPORT_SYMBOL(drm_ioctl);
struct drm_local_map *drm_getsarea(struct drm_device *dev)
{
struct drm_map_list *entry;
list_for_each_entry(entry, &dev->maplist, head) {
if (entry->map && entry->map->type == _DRM_SHM &&
(entry->map->flags & _DRM_CONTAINS_LOCK)) {
return entry->map;
}
}
return NULL;
}
EXPORT_SYMBOL(drm_getsarea);

File diff suppressed because it is too large Load Diff

View File

@ -1,257 +0,0 @@
/*
* Copyright © 2007-2008 Intel Corporation
* Jesse Barnes <jesse.barnes@intel.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* $FreeBSD$
*/
#ifndef __DRM_EDID_H__
#define __DRM_EDID_H__
#define EDID_LENGTH 128
#define DDC_ADDR 0x50
#define CEA_EXT 0x02
#define VTB_EXT 0x10
#define DI_EXT 0x40
#define LS_EXT 0x50
#define MI_EXT 0x60
struct est_timings {
u8 t1;
u8 t2;
u8 mfg_rsvd;
} __attribute__((packed));
/* 00=16:10, 01=4:3, 10=5:4, 11=16:9 */
#define EDID_TIMING_ASPECT_SHIFT 6
#define EDID_TIMING_ASPECT_MASK (0x3 << EDID_TIMING_ASPECT_SHIFT)
/* need to add 60 */
#define EDID_TIMING_VFREQ_SHIFT 0
#define EDID_TIMING_VFREQ_MASK (0x3f << EDID_TIMING_VFREQ_SHIFT)
struct std_timing {
u8 hsize; /* need to multiply by 8 then add 248 */
u8 vfreq_aspect;
} __attribute__((packed));
#define DRM_EDID_PT_HSYNC_POSITIVE (1 << 1)
#define DRM_EDID_PT_VSYNC_POSITIVE (1 << 2)
#define DRM_EDID_PT_SEPARATE_SYNC (3 << 3)
#define DRM_EDID_PT_STEREO (1 << 5)
#define DRM_EDID_PT_INTERLACED (1 << 7)
/* If detailed data is pixel timing */
struct detailed_pixel_timing {
u8 hactive_lo;
u8 hblank_lo;
u8 hactive_hblank_hi;
u8 vactive_lo;
u8 vblank_lo;
u8 vactive_vblank_hi;
u8 hsync_offset_lo;
u8 hsync_pulse_width_lo;
u8 vsync_offset_pulse_width_lo;
u8 hsync_vsync_offset_pulse_width_hi;
u8 width_mm_lo;
u8 height_mm_lo;
u8 width_height_mm_hi;
u8 hborder;
u8 vborder;
u8 misc;
} __attribute__((packed));
/* If it's not pixel timing, it'll be one of the below */
struct detailed_data_string {
u8 str[13];
} __attribute__((packed));
struct detailed_data_monitor_range {
u8 min_vfreq;
u8 max_vfreq;
u8 min_hfreq_khz;
u8 max_hfreq_khz;
u8 pixel_clock_mhz; /* need to multiply by 10 */
u8 flags;
union {
struct {
u8 reserved;
u8 hfreq_start_khz; /* need to multiply by 2 */
u8 c; /* need to divide by 2 */
__le16 m;
u8 k;
u8 j; /* need to divide by 2 */
} __attribute__((packed)) gtf2;
struct {
u8 version;
u8 data1; /* high 6 bits: extra clock resolution */
u8 data2; /* plus low 2 of above: max hactive */
u8 supported_aspects;
u8 flags; /* preferred aspect and blanking support */
u8 supported_scalings;
u8 preferred_refresh;
} __attribute__((packed)) cvt;
} formula;
} __attribute__((packed));
struct detailed_data_wpindex {
u8 white_yx_lo; /* Lower 2 bits each */
u8 white_x_hi;
u8 white_y_hi;
u8 gamma; /* need to divide by 100 then add 1 */
} __attribute__((packed));
struct detailed_data_color_point {
u8 windex1;
u8 wpindex1[3];
u8 windex2;
u8 wpindex2[3];
} __attribute__((packed));
struct cvt_timing {
u8 code[3];
} __attribute__((packed));
struct detailed_non_pixel {
u8 pad1;
u8 type; /* ff=serial, fe=string, fd=monitor range, fc=monitor name
fb=color point data, fa=standard timing data,
f9=undefined, f8=mfg. reserved */
u8 pad2;
union {
struct detailed_data_string str;
struct detailed_data_monitor_range range;
struct detailed_data_wpindex color;
struct std_timing timings[6];
struct cvt_timing cvt[4];
} data;
} __attribute__((packed));
#define EDID_DETAIL_EST_TIMINGS 0xf7
#define EDID_DETAIL_CVT_3BYTE 0xf8
#define EDID_DETAIL_COLOR_MGMT_DATA 0xf9
#define EDID_DETAIL_STD_MODES 0xfa
#define EDID_DETAIL_MONITOR_CPDATA 0xfb
#define EDID_DETAIL_MONITOR_NAME 0xfc
#define EDID_DETAIL_MONITOR_RANGE 0xfd
#define EDID_DETAIL_MONITOR_STRING 0xfe
#define EDID_DETAIL_MONITOR_SERIAL 0xff
struct detailed_timing {
__le16 pixel_clock; /* need to multiply by 10 KHz */
union {
struct detailed_pixel_timing pixel_data;
struct detailed_non_pixel other_data;
} data;
} __attribute__((packed));
#define DRM_EDID_INPUT_SERRATION_VSYNC (1 << 0)
#define DRM_EDID_INPUT_SYNC_ON_GREEN (1 << 1)
#define DRM_EDID_INPUT_COMPOSITE_SYNC (1 << 2)
#define DRM_EDID_INPUT_SEPARATE_SYNCS (1 << 3)
#define DRM_EDID_INPUT_BLANK_TO_BLACK (1 << 4)
#define DRM_EDID_INPUT_VIDEO_LEVEL (3 << 5)
#define DRM_EDID_INPUT_DIGITAL (1 << 7)
#define DRM_EDID_DIGITAL_DEPTH_MASK (7 << 4)
#define DRM_EDID_DIGITAL_DEPTH_UNDEF (0 << 4)
#define DRM_EDID_DIGITAL_DEPTH_6 (1 << 4)
#define DRM_EDID_DIGITAL_DEPTH_8 (2 << 4)
#define DRM_EDID_DIGITAL_DEPTH_10 (3 << 4)
#define DRM_EDID_DIGITAL_DEPTH_12 (4 << 4)
#define DRM_EDID_DIGITAL_DEPTH_14 (5 << 4)
#define DRM_EDID_DIGITAL_DEPTH_16 (6 << 4)
#define DRM_EDID_DIGITAL_DEPTH_RSVD (7 << 4)
#define DRM_EDID_DIGITAL_TYPE_UNDEF (0)
#define DRM_EDID_DIGITAL_TYPE_DVI (1)
#define DRM_EDID_DIGITAL_TYPE_HDMI_A (2)
#define DRM_EDID_DIGITAL_TYPE_HDMI_B (3)
#define DRM_EDID_DIGITAL_TYPE_MDDI (4)
#define DRM_EDID_DIGITAL_TYPE_DP (5)
#define DRM_EDID_FEATURE_DEFAULT_GTF (1 << 0)
#define DRM_EDID_FEATURE_PREFERRED_TIMING (1 << 1)
#define DRM_EDID_FEATURE_STANDARD_COLOR (1 << 2)
/* If analog */
#define DRM_EDID_FEATURE_DISPLAY_TYPE (3 << 3) /* 00=mono, 01=rgb, 10=non-rgb, 11=unknown */
/* If digital */
#define DRM_EDID_FEATURE_COLOR_MASK (3 << 3)
#define DRM_EDID_FEATURE_RGB (0 << 3)
#define DRM_EDID_FEATURE_RGB_YCRCB444 (1 << 3)
#define DRM_EDID_FEATURE_RGB_YCRCB422 (2 << 3)
#define DRM_EDID_FEATURE_RGB_YCRCB (3 << 3) /* both 4:4:4 and 4:2:2 */
#define DRM_EDID_FEATURE_PM_ACTIVE_OFF (1 << 5)
#define DRM_EDID_FEATURE_PM_SUSPEND (1 << 6)
#define DRM_EDID_FEATURE_PM_STANDBY (1 << 7)
struct edid {
u8 header[8];
/* Vendor & product info */
u8 mfg_id[2];
u8 prod_code[2];
u32 serial; /* FIXME: byte order */
u8 mfg_week;
u8 mfg_year;
/* EDID version */
u8 version;
u8 revision;
/* Display info: */
u8 input;
u8 width_cm;
u8 height_cm;
u8 gamma;
u8 features;
/* Color characteristics */
u8 red_green_lo;
u8 black_white_lo;
u8 red_x;
u8 red_y;
u8 green_x;
u8 green_y;
u8 blue_x;
u8 blue_y;
u8 white_x;
u8 white_y;
/* Est. timings and mfg rsvd timings*/
struct est_timings established_timings;
/* Standard timings 1-8*/
struct std_timing standard_timings[8];
/* Detailing timings 1-4 */
struct detailed_timing detailed_timings[4];
/* Number of 128 byte ext. blocks */
u8 extensions;
/* Checksum */
u8 checksum;
} __attribute__((packed));
#define EDID_PRODUCT_ID(e) ((e)->prod_code[0] | ((e)->prod_code[1] << 8))
struct drm_encoder;
struct drm_connector;
struct drm_display_mode;
void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid);
int drm_av_sync_delay(struct drm_connector *connector,
struct drm_display_mode *mode);
struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
struct drm_display_mode *mode);
int drm_load_edid_firmware(struct drm_connector *connector);
#endif /* __DRM_EDID_H__ */

Some files were not shown because too many files have changed in this diff Show More