drm: Update the device-independent code to match Linux 3.8.13

This update brings few features:
    o  Support for the setmaster/dropmaster ioctls. For instance, they
       are used to run multiple X servers simultaneously.
    o  Support for minor devices. The only user-visible change is a new
       entry in /dev/dri but it is useless at the moment. This is a
       first step to support render nodes [1].

The main benefit is to greatly reduce the diff with Linux (at the
expense of an unreadable commit diff). Hopefully, next upgrades will be
easier.

No updates were made to the drivers, beside adapting them to API
changes.

[1] https://en.wikipedia.org/wiki/Direct_Rendering_Manager#Render_nodes

Tested by:	Many people
MFC after:	1 month
Relnotes:	yes
This commit is contained in:
Jean-Sébastien Pédron 2015-03-17 18:50:33 +00:00
parent 758cc3dcd5
commit 455fa6518a
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=280183
158 changed files with 10892 additions and 8744 deletions

View File

@ -42,7 +42,7 @@ static int drm_ati_alloc_pcigart_table(struct drm_device *dev,
struct drm_ati_pcigart_info *gart_info)
{
gart_info->table_handle = drm_pci_alloc(dev, gart_info->table_size,
PAGE_SIZE, 0xFFFFFFFFUL);
PAGE_SIZE, BUS_SPACE_MAXADDR);
if (gart_info->table_handle == NULL)
return -ENOMEM;
@ -97,6 +97,7 @@ int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info
return 1;
}
EXPORT_SYMBOL(drm_ati_pcigart_cleanup);
int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
{
@ -197,7 +198,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
}
ret = 1;
#if defined(__i386) || defined(__amd64)
#if defined(__i386__) || defined(__x86_64__)
wbinvd();
#else
mb();
@ -208,3 +209,4 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
gart_info->bus_addr = bus_address;
return ret;
}
EXPORT_SYMBOL(drm_ati_pcigart_init);

View File

@ -36,69 +36,31 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/**
* \mainpage
*
* The Direct Rendering Manager (DRM) is a device-independent kernel-level
* device driver that provides support for the XFree86 Direct Rendering
* Infrastructure (DRI).
*
* The DRM supports the Direct Rendering Infrastructure (DRI) in four major
* ways:
* -# The DRM provides synchronized access to the graphics hardware via
* the use of an optimized two-tiered lock.
* -# The DRM enforces the DRI security policy for access to the graphics
* hardware by only allowing authenticated X11 clients access to
* restricted regions of memory.
* -# The DRM provides a generic DMA engine, complete with multiple
* queues and the ability to detect the need for an OpenGL context
* switch.
* -# The DRM is extensible via the use of small device-specific modules
* that rely extensively on the API exported by the DRM module.
*
*/
#ifndef _DRM_H_
#define _DRM_H_
#ifndef __user
#define __user
#endif
#ifndef __iomem
#define __iomem
#endif
#ifdef __GNUC__
# define DEPRECATED __attribute__ ((deprecated))
#else
# define DEPRECATED
#endif
#if defined(__linux__)
#include <asm/ioctl.h> /* For _IO* macros */
#define DRM_IOCTL_NR(n) _IOC_NR(n)
#define DRM_IOC_VOID _IOC_NONE
#define DRM_IOC_READ _IOC_READ
#define DRM_IOC_WRITE _IOC_WRITE
#define DRM_IOC_READWRITE _IOC_READ|_IOC_WRITE
#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__DragonFly__)
#include <sys/ioccom.h>
#define DRM_IOCTL_NR(n) ((n) & 0xff)
#define DRM_IOC_VOID IOC_VOID
#define DRM_IOC_READ IOC_OUT
#define DRM_IOC_WRITE IOC_IN
#define DRM_IOC_READWRITE IOC_INOUT
#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
#endif
#ifdef __OpenBSD__
#define DRM_MAJOR 81
#include <linux/types.h>
#include <asm/ioctl.h>
typedef unsigned int drm_handle_t;
#else /* One of the BSDs */
#include <sys/ioccom.h>
#include <sys/types.h>
typedef int8_t __s8;
typedef uint8_t __u8;
typedef int16_t __s16;
typedef uint16_t __u16;
typedef int32_t __s32;
typedef uint32_t __u32;
typedef int64_t __s64;
typedef uint64_t __u64;
typedef unsigned long drm_handle_t;
#include <dev/drm2/drm_os_freebsd.h>
#endif
#if defined(__linux__) || defined(__NetBSD__)
#define DRM_MAJOR 226
#endif
#define DRM_MAX_MINOR 15
#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */
#define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */
@ -111,20 +73,14 @@ __FBSDID("$FreeBSD$");
#define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT)
#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
#if defined(__linux__)
typedef unsigned int drm_handle_t;
#else
#include <sys/types.h>
typedef unsigned long drm_handle_t; /**< To mapped regions */
#endif
typedef unsigned int drm_context_t; /**< GLXContext handle */
typedef unsigned int drm_context_t;
typedef unsigned int drm_drawable_t;
typedef unsigned int drm_magic_t; /**< Magic for authentication */
typedef unsigned int drm_magic_t;
/**
* Cliprect.
*
* \warning If you change this structure, make sure you change
* \warning: If you change this structure, make sure you change
* XF86DRIClipRectRec in the server as well
*
* \note KW: Actually it's illegal to change either for
@ -137,6 +93,14 @@ struct drm_clip_rect {
unsigned short y2;
};
/**
* Drawable information.
*/
struct drm_drawable_info {
unsigned int num_rects;
struct drm_clip_rect *rects;
};
/**
* Texture region,
*/
@ -160,22 +124,6 @@ struct drm_hw_lock {
char padding[60]; /**< Pad to cache line */
};
/* This is beyond ugly, and only works on GCC. However, it allows me to use
* drm.h in places (i.e., in the X-server) where I can't use size_t. The real
* fix is to use uint32_t instead of size_t, but that fix will break existing
* LP64 (i.e., PowerPC64, SPARC64, Alpha, etc.) systems. That *will*
* eventually happen, though. I chose 'unsigned long' to be the fallback type
* because that works on all the platforms I know about. Hopefully, the
* real fix will happen before that bites us.
*/
#ifdef __SIZE_TYPE__
# define DRM_SIZE_T __SIZE_TYPE__
#else
# warning "__SIZE_TYPE__ not defined. Assuming sizeof(size_t) == sizeof(unsigned long)!"
# define DRM_SIZE_T unsigned long
#endif
/**
* DRM_IOCTL_VERSION ioctl argument type.
*
@ -185,12 +133,12 @@ struct drm_version {
int version_major; /**< Major version */
int version_minor; /**< Minor version */
int version_patchlevel; /**< Patch level */
DRM_SIZE_T name_len; /**< Length of name buffer */
char __user *name; /**< Name of driver */
DRM_SIZE_T date_len; /**< Length of date buffer */
char __user *date; /**< User-space buffer to hold date */
DRM_SIZE_T desc_len; /**< Length of desc buffer */
char __user *desc; /**< User-space buffer to hold desc */
size_t name_len; /**< Length of name buffer */
char __user *name; /**< Name of driver */
size_t date_len; /**< Length of date buffer */
char __user *date; /**< User-space buffer to hold date */
size_t desc_len; /**< Length of desc buffer */
char __user *desc; /**< User-space buffer to hold desc */
};
/**
@ -199,12 +147,10 @@ struct drm_version {
* \sa drmGetBusid() and drmSetBusId().
*/
struct drm_unique {
DRM_SIZE_T unique_len; /**< Length of unique */
char __user *unique; /**< Unique name for driver instantiation */
size_t unique_len; /**< Length of unique */
char __user *unique; /**< Unique name for driver instantiation */
};
#undef DRM_SIZE_T
struct drm_list {
int count; /**< Length of user-space structures */
struct drm_version __user *version;
@ -239,7 +185,7 @@ enum drm_map_type {
_DRM_AGP = 3, /**< AGP/GART */
_DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */
_DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */
_DRM_GEM = 6 /**< GEM */
_DRM_GEM = 6, /**< GEM object */
};
/**
@ -388,8 +334,8 @@ struct drm_buf_desc {
enum {
_DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */
_DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */
_DRM_SG_BUFFER = 0x04, /**< Scatter/gather memory buffer */
_DRM_FB_BUFFER = 0x08, /**< Buffer is in frame buffer */
_DRM_SG_BUFFER = 0x04, /**< Scatter/gather memory buffer */
_DRM_FB_BUFFER = 0x08, /**< Buffer is in frame buffer */
_DRM_PCI_BUFFER_RO = 0x10 /**< Map PCI DMA buffer read-only */
} flags;
unsigned long agp_start; /**<
@ -402,8 +348,8 @@ struct drm_buf_desc {
* DRM_IOCTL_INFO_BUFS ioctl argument type.
*/
struct drm_buf_info {
int count; /**< Number of buffers described in list */
struct drm_buf_desc __user *list; /**< List of buffer descriptions */
int count; /**< Entries in list */
struct drm_buf_desc __user *list;
};
/**
@ -431,11 +377,7 @@ struct drm_buf_pub {
*/
struct drm_buf_map {
int count; /**< Length of the buffer list */
#if defined(__cplusplus)
void __user *c_virtual;
#else
void __user *virtual; /**< Mmap'd area in user-virtual */
#endif
struct drm_buf_pub __user *list; /**< Buffer information */
};
@ -454,7 +396,7 @@ struct drm_dma {
enum drm_dma_flags flags; /**< Flags */
int request_count; /**< Number of buffers requested */
int request_size; /**< Desired size for buffers */
int __user *request_indices; /**< Buffer information */
int __user *request_indices; /**< Buffer information */
int __user *request_sizes;
int granted_count; /**< Number of buffers granted */
};
@ -525,12 +467,13 @@ struct drm_irq_busid {
enum drm_vblank_seq_type {
_DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
_DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
/* bits 1-6 are reserved for high crtcs */
_DRM_VBLANK_HIGH_CRTC_MASK = 0x0000003e,
_DRM_VBLANK_EVENT = 0x4000000, /**< Send event instead of blocking */
_DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */
_DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */
_DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */
_DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */
_DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking */
_DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking, unsupported */
};
#define _DRM_VBLANK_HIGH_CRTC_SHIFT 1
@ -561,7 +504,6 @@ union drm_wait_vblank {
struct drm_wait_vblank_reply reply;
};
#define _DRM_PRE_MODESET 1
#define _DRM_POST_MODESET 2
@ -571,8 +513,8 @@ union drm_wait_vblank {
* \sa drmModesetCtl().
*/
struct drm_modeset_ctl {
uint32_t crtc;
uint32_t cmd;
__u32 crtc;
__u32 cmd;
};
/**
@ -617,16 +559,14 @@ struct drm_agp_info {
int agp_version_major;
int agp_version_minor;
unsigned long mode;
unsigned long aperture_base; /**< physical address */
unsigned long aperture_size; /**< bytes */
unsigned long memory_allowed; /**< bytes */
unsigned long aperture_base; /* physical address */
unsigned long aperture_size; /* bytes */
unsigned long memory_allowed; /* bytes */
unsigned long memory_used;
/** \name PCI information */
/*@{ */
/* PCI information */
unsigned short id_vendor;
unsigned short id_device;
/*@} */
};
/**
@ -647,386 +587,52 @@ struct drm_set_version {
int drm_dd_minor;
};
#define DRM_FENCE_FLAG_EMIT 0x00000001
#define DRM_FENCE_FLAG_SHAREABLE 0x00000002
/**
* On hardware with no interrupt events for operation completion,
* indicates that the kernel should sleep while waiting for any blocking
* operation to complete rather than spinning.
*
* Has no effect otherwise.
*/
#define DRM_FENCE_FLAG_WAIT_LAZY 0x00000004
#define DRM_FENCE_FLAG_NO_USER 0x00000010
/* Reserved for driver use */
#define DRM_FENCE_MASK_DRIVER 0xFF000000
#define DRM_FENCE_TYPE_EXE 0x00000001
struct drm_fence_arg {
unsigned int handle;
unsigned int fence_class;
unsigned int type;
unsigned int flags;
unsigned int signaled;
unsigned int error;
unsigned int sequence;
unsigned int pad64;
uint64_t expand_pad[2]; /* Future expansion */
};
/* Buffer permissions, referring to how the GPU uses the buffers.
* these translate to fence types used for the buffers.
* Typically a texture buffer is read, A destination buffer is write and
* a command (batch-) buffer is exe. Can be or-ed together.
*/
#define DRM_BO_FLAG_READ (1ULL << 0)
#define DRM_BO_FLAG_WRITE (1ULL << 1)
#define DRM_BO_FLAG_EXE (1ULL << 2)
/*
* All of the bits related to access mode
*/
#define DRM_BO_MASK_ACCESS (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_EXE)
/*
* Status flags. Can be read to determine the actual state of a buffer.
* Can also be set in the buffer mask before validation.
*/
/*
* Mask: Never evict this buffer. Not even with force. This type of buffer is only
* available to root and must be manually removed before buffer manager shutdown
* or lock.
* Flags: Acknowledge
*/
#define DRM_BO_FLAG_NO_EVICT (1ULL << 4)
/*
* Mask: Require that the buffer is placed in mappable memory when validated.
* If not set the buffer may or may not be in mappable memory when validated.
* Flags: If set, the buffer is in mappable memory.
*/
#define DRM_BO_FLAG_MAPPABLE (1ULL << 5)
/* Mask: The buffer should be shareable with other processes.
* Flags: The buffer is shareable with other processes.
*/
#define DRM_BO_FLAG_SHAREABLE (1ULL << 6)
/* Mask: If set, place the buffer in cache-coherent memory if available.
* If clear, never place the buffer in cache coherent memory if validated.
* Flags: The buffer is currently in cache-coherent memory.
*/
#define DRM_BO_FLAG_CACHED (1ULL << 7)
/* Mask: Make sure that every time this buffer is validated,
* it ends up on the same location provided that the memory mask is the same.
* The buffer will also not be evicted when claiming space for
* other buffers. Basically a pinned buffer but it may be thrown out as
* part of buffer manager shutdown or locking.
* Flags: Acknowledge.
*/
#define DRM_BO_FLAG_NO_MOVE (1ULL << 8)
/* Mask: Make sure the buffer is in cached memory when mapped. In conjunction
* with DRM_BO_FLAG_CACHED it also allows the buffer to be bound into the GART
* with unsnooped PTEs instead of snooped, by using chipset-specific cache
* flushing at bind time. A better name might be DRM_BO_FLAG_TT_UNSNOOPED,
* as the eviction to local memory (TTM unbind) on map is just a side effect
* to prevent aggressive cache prefetch from the GPU disturbing the cache
* management that the DRM is doing.
*
* Flags: Acknowledge.
* Buffers allocated with this flag should not be used for suballocators
* This type may have issues on CPUs with over-aggressive caching
* http://marc.info/?l=linux-kernel&m=102376926732464&w=2
*/
#define DRM_BO_FLAG_CACHED_MAPPED (1ULL << 19)
/* Mask: Force DRM_BO_FLAG_CACHED flag strictly also if it is set.
* Flags: Acknowledge.
*/
#define DRM_BO_FLAG_FORCE_CACHING (1ULL << 13)
/*
* Mask: Force DRM_BO_FLAG_MAPPABLE flag strictly also if it is clear.
* Flags: Acknowledge.
*/
#define DRM_BO_FLAG_FORCE_MAPPABLE (1ULL << 14)
#define DRM_BO_FLAG_TILE (1ULL << 15)
/*
* Memory type flags that can be or'ed together in the mask, but only
* one appears in flags.
*/
/* System memory */
#define DRM_BO_FLAG_MEM_LOCAL (1ULL << 24)
/* Translation table memory */
#define DRM_BO_FLAG_MEM_TT (1ULL << 25)
/* Vram memory */
#define DRM_BO_FLAG_MEM_VRAM (1ULL << 26)
/* Up to the driver to define. */
#define DRM_BO_FLAG_MEM_PRIV0 (1ULL << 27)
#define DRM_BO_FLAG_MEM_PRIV1 (1ULL << 28)
#define DRM_BO_FLAG_MEM_PRIV2 (1ULL << 29)
#define DRM_BO_FLAG_MEM_PRIV3 (1ULL << 30)
#define DRM_BO_FLAG_MEM_PRIV4 (1ULL << 31)
/* We can add more of these now with a 64-bit flag type */
/*
* This is a mask covering all of the memory type flags; easier to just
* use a single constant than a bunch of | values. It covers
* DRM_BO_FLAG_MEM_LOCAL through DRM_BO_FLAG_MEM_PRIV4
*/
#define DRM_BO_MASK_MEM 0x00000000FF000000ULL
/*
* This adds all of the CPU-mapping options in with the memory
* type to label all bits which change how the page gets mapped
*/
#define DRM_BO_MASK_MEMTYPE (DRM_BO_MASK_MEM | \
DRM_BO_FLAG_CACHED_MAPPED | \
DRM_BO_FLAG_CACHED | \
DRM_BO_FLAG_MAPPABLE)
/* Driver-private flags */
#define DRM_BO_MASK_DRIVER 0xFFFF000000000000ULL
/*
* Don't block on validate and map. Instead, return EBUSY.
*/
#define DRM_BO_HINT_DONT_BLOCK 0x00000002
/*
* Don't place this buffer on the unfenced list. This means
* that the buffer will not end up having a fence associated
* with it as a result of this operation
*/
#define DRM_BO_HINT_DONT_FENCE 0x00000004
/**
* On hardware with no interrupt events for operation completion,
* indicates that the kernel should sleep while waiting for any blocking
* operation to complete rather than spinning.
*
* Has no effect otherwise.
*/
#define DRM_BO_HINT_WAIT_LAZY 0x00000008
/*
* The client has compute relocations refering to this buffer using the
* offset in the presumed_offset field. If that offset ends up matching
* where this buffer lands, the kernel is free to skip executing those
* relocations
*/
#define DRM_BO_HINT_PRESUMED_OFFSET 0x00000010
#define DRM_BO_INIT_MAGIC 0xfe769812
#define DRM_BO_INIT_MAJOR 1
#define DRM_BO_INIT_MINOR 0
#define DRM_BO_INIT_PATCH 0
struct drm_bo_info_req {
uint64_t mask;
uint64_t flags;
unsigned int handle;
unsigned int hint;
unsigned int fence_class;
unsigned int desired_tile_stride;
unsigned int tile_info;
unsigned int pad64;
uint64_t presumed_offset;
};
struct drm_bo_create_req {
uint64_t flags;
uint64_t size;
uint64_t buffer_start;
unsigned int hint;
unsigned int page_alignment;
};
/*
* Reply flags
*/
#define DRM_BO_REP_BUSY 0x00000001
struct drm_bo_info_rep {
uint64_t flags;
uint64_t proposed_flags;
uint64_t size;
uint64_t offset;
uint64_t arg_handle;
uint64_t buffer_start;
unsigned int handle;
unsigned int fence_flags;
unsigned int rep_flags;
unsigned int page_alignment;
unsigned int desired_tile_stride;
unsigned int hw_tile_stride;
unsigned int tile_info;
unsigned int pad64;
uint64_t expand_pad[4]; /*Future expansion */
};
struct drm_bo_arg_rep {
struct drm_bo_info_rep bo_info;
int ret;
unsigned int pad64;
};
struct drm_bo_create_arg {
union {
struct drm_bo_create_req req;
struct drm_bo_info_rep rep;
} d;
};
struct drm_bo_handle_arg {
unsigned int handle;
};
struct drm_bo_reference_info_arg {
union {
struct drm_bo_handle_arg req;
struct drm_bo_info_rep rep;
} d;
};
struct drm_bo_map_wait_idle_arg {
union {
struct drm_bo_info_req req;
struct drm_bo_info_rep rep;
} d;
};
struct drm_bo_op_req {
enum {
drm_bo_validate,
drm_bo_fence,
drm_bo_ref_fence,
} op;
unsigned int arg_handle;
struct drm_bo_info_req bo_req;
};
struct drm_bo_op_arg {
uint64_t next;
union {
struct drm_bo_op_req req;
struct drm_bo_arg_rep rep;
} d;
int handled;
unsigned int pad64;
};
#define DRM_BO_MEM_LOCAL 0
#define DRM_BO_MEM_TT 1
#define DRM_BO_MEM_VRAM 2
#define DRM_BO_MEM_PRIV0 3
#define DRM_BO_MEM_PRIV1 4
#define DRM_BO_MEM_PRIV2 5
#define DRM_BO_MEM_PRIV3 6
#define DRM_BO_MEM_PRIV4 7
#define DRM_BO_MEM_TYPES 8 /* For now. */
#define DRM_BO_LOCK_UNLOCK_BM (1 << 0)
#define DRM_BO_LOCK_IGNORE_NO_EVICT (1 << 1)
struct drm_bo_version_arg {
uint32_t major;
uint32_t minor;
uint32_t patchlevel;
};
struct drm_mm_type_arg {
unsigned int mem_type;
unsigned int lock_flags;
};
struct drm_mm_init_arg {
unsigned int magic;
unsigned int major;
unsigned int minor;
unsigned int mem_type;
uint64_t p_offset;
uint64_t p_size;
};
struct drm_mm_info_arg {
unsigned int mem_type;
uint64_t p_size;
};
/** DRM_IOCTL_GEM_CLOSE ioctl argument type */
struct drm_gem_close {
/** Handle of the object to be closed. */
uint32_t handle;
uint32_t pad;
__u32 handle;
__u32 pad;
};
/** DRM_IOCTL_GEM_FLINK ioctl argument type */
struct drm_gem_flink {
/** Handle for the object being named */
uint32_t handle;
__u32 handle;
/** Returned global name */
uint32_t name;
__u32 name;
};
/** DRM_IOCTL_GEM_OPEN ioctl argument type */
struct drm_gem_open {
/** Name of object being opened */
uint32_t name;
__u32 name;
/** Returned handle for the object */
uint32_t handle;
__u32 handle;
/** Returned size of the object */
uint64_t size;
__u64 size;
};
/** DRM_IOCTL_GET_CAP ioctl argument type */
struct drm_get_cap {
uint64_t capability;
uint64_t value;
__u64 capability;
__u64 value;
};
struct drm_event {
uint32_t type;
uint32_t length;
#define DRM_CLOEXEC O_CLOEXEC
struct drm_prime_handle {
__u32 handle;
/** Flags.. only applicable for handle->fd */
__u32 flags;
/** Returned dmabuf file descriptor */
__s32 fd;
};
#define DRM_EVENT_VBLANK 0x01
#define DRM_EVENT_FLIP_COMPLETE 0x02
struct drm_event_vblank {
struct drm_event base;
uint64_t user_data;
uint32_t tv_sec;
uint32_t tv_usec;
uint32_t sequence;
uint32_t reserved;
};
#define DRM_CAP_DUMB_BUFFER 0x1
#define DRM_CAP_VBLANK_HIGH_CRTC 0x2
#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3
#define DRM_CAP_DUMB_PREFER_SHADOW 0x4
#define DRM_CAP_PRIME 0x5
#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
#define DRM_PRIME_CAP_IMPORT 0x1
#define DRM_PRIME_CAP_EXPORT 0x2
#include "drm_mode.h"
/**
* \name Ioctls Definitions
*/
/*@{*/
#include <dev/drm2/drm_mode.h>
#define DRM_IOCTL_BASE 'd'
#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
@ -1042,12 +648,10 @@ struct drm_event_vblank {
#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client)
#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats)
#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close)
#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink)
#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open)
#define DRM_IOCTL_GET_CAP DRM_IOWR(0x0c, struct drm_get_cap)
#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique)
@ -1065,7 +669,7 @@ struct drm_event_vblank {
#define DRM_IOCTL_RM_MAP DRM_IOW( 0x1b, struct drm_map)
#define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, struct drm_ctx_priv_map)
#define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, struct drm_ctx_priv_map)
#define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, struct drm_ctx_priv_map)
#define DRM_IOCTL_SET_MASTER DRM_IO(0x1e)
#define DRM_IOCTL_DROP_MASTER DRM_IO(0x1f)
@ -1084,7 +688,8 @@ struct drm_event_vblank {
#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, struct drm_lock)
#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, struct drm_lock)
#define DRM_IOCTL_GEM_PRIME_OPEN DRM_IOWR(0x2e, struct drm_gem_open)
#define DRM_IOCTL_PRIME_HANDLE_TO_FD DRM_IOWR(0x2d, struct drm_prime_handle)
#define DRM_IOCTL_PRIME_FD_TO_HANDLE DRM_IOWR(0x2e, struct drm_prime_handle)
#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30)
#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31)
@ -1100,7 +705,7 @@ struct drm_event_vblank {
#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank)
#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw)
#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw)
#define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res)
#define DRM_IOCTL_MODE_GETCRTC DRM_IOWR(0xA1, struct drm_mode_crtc)
@ -1122,43 +727,16 @@ struct drm_event_vblank {
#define DRM_IOCTL_MODE_PAGE_FLIP DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip)
#define DRM_IOCTL_MODE_DIRTYFB DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd)
#define DRM_IOCTL_MODE_CREATE_DUMB DRM_IOWR(0xB2, struct drm_mode_create_dumb)
#define DRM_IOCTL_MODE_MAP_DUMB DRM_IOWR(0xB3, struct drm_mode_map_dumb)
#define DRM_IOCTL_MODE_DESTROY_DUMB DRM_IOWR(0xB4, struct drm_mode_destroy_dumb)
#define DRM_IOCTL_MODE_CREATE_DUMB DRM_IOWR(0xB2, struct drm_mode_create_dumb)
#define DRM_IOCTL_MODE_MAP_DUMB DRM_IOWR(0xB3, struct drm_mode_map_dumb)
#define DRM_IOCTL_MODE_DESTROY_DUMB DRM_IOWR(0xB4, struct drm_mode_destroy_dumb)
#define DRM_IOCTL_MODE_GETPLANERESOURCES DRM_IOWR(0xB5, struct drm_mode_get_plane_res)
#define DRM_IOCTL_MODE_GETPLANE DRM_IOWR(0xB6, struct drm_mode_get_plane)
#define DRM_IOCTL_MODE_SETPLANE DRM_IOWR(0xB7, struct drm_mode_set_plane)
#define DRM_IOCTL_MODE_GETPLANE DRM_IOWR(0xB6, struct drm_mode_get_plane)
#define DRM_IOCTL_MODE_SETPLANE DRM_IOWR(0xB7, struct drm_mode_set_plane)
#define DRM_IOCTL_MODE_ADDFB2 DRM_IOWR(0xB8, struct drm_mode_fb_cmd2)
#define DRM_IOCTL_MODE_OBJ_GETPROPERTIES DRM_IOWR(0xB9, struct drm_mode_obj_get_properties)
#define DRM_IOCTL_MODE_OBJ_SETPROPERTY DRM_IOWR(0xBA, struct drm_mode_obj_set_property)
#define DRM_IOCTL_MM_INIT DRM_IOWR(0xc0, struct drm_mm_init_arg)
#define DRM_IOCTL_MM_TAKEDOWN DRM_IOWR(0xc1, struct drm_mm_type_arg)
#define DRM_IOCTL_MM_LOCK DRM_IOWR(0xc2, struct drm_mm_type_arg)
#define DRM_IOCTL_MM_UNLOCK DRM_IOWR(0xc3, struct drm_mm_type_arg)
#define DRM_IOCTL_FENCE_CREATE DRM_IOWR(0xc4, struct drm_fence_arg)
#define DRM_IOCTL_FENCE_REFERENCE DRM_IOWR(0xc6, struct drm_fence_arg)
#define DRM_IOCTL_FENCE_UNREFERENCE DRM_IOWR(0xc7, struct drm_fence_arg)
#define DRM_IOCTL_FENCE_SIGNALED DRM_IOWR(0xc8, struct drm_fence_arg)
#define DRM_IOCTL_FENCE_FLUSH DRM_IOWR(0xc9, struct drm_fence_arg)
#define DRM_IOCTL_FENCE_WAIT DRM_IOWR(0xca, struct drm_fence_arg)
#define DRM_IOCTL_FENCE_EMIT DRM_IOWR(0xcb, struct drm_fence_arg)
#define DRM_IOCTL_FENCE_BUFFERS DRM_IOWR(0xcc, struct drm_fence_arg)
#define DRM_IOCTL_BO_CREATE DRM_IOWR(0xcd, struct drm_bo_create_arg)
#define DRM_IOCTL_BO_MAP DRM_IOWR(0xcf, struct drm_bo_map_wait_idle_arg)
#define DRM_IOCTL_BO_UNMAP DRM_IOWR(0xd0, struct drm_bo_handle_arg)
#define DRM_IOCTL_BO_REFERENCE DRM_IOWR(0xd1, struct drm_bo_reference_info_arg)
#define DRM_IOCTL_BO_UNREFERENCE DRM_IOWR(0xd2, struct drm_bo_handle_arg)
#define DRM_IOCTL_BO_SETSTATUS DRM_IOWR(0xd3, struct drm_bo_map_wait_idle_arg)
#define DRM_IOCTL_BO_INFO DRM_IOWR(0xd4, struct drm_bo_reference_info_arg)
#define DRM_IOCTL_BO_WAIT_IDLE DRM_IOWR(0xd5, struct drm_bo_map_wait_idle_arg)
#define DRM_IOCTL_BO_VERSION DRM_IOR(0xd6, struct drm_bo_version_arg)
#define DRM_IOCTL_MM_INFO DRM_IOWR(0xd7, struct drm_mm_info_arg)
/*@}*/
/**
* Device specific ioctls should only be in their respective headers
* The device specific ioctl range is from 0x40 to 0x99.
@ -1168,11 +746,51 @@ struct drm_event_vblank {
* drmCommandReadWrite().
*/
#define DRM_COMMAND_BASE 0x40
#define DRM_COMMAND_END 0xA0
#define DRM_COMMAND_END 0xA0
/**
* Header for events written back to userspace on the drm fd. The
* type defines the type of event, the length specifies the total
* length of the event (including the header), and user_data is
* typically a 64 bit value passed with the ioctl that triggered the
* event. A read on the drm fd will always only return complete
* events, that is, if for example the read buffer is 100 bytes, and
* there are two 64 byte events pending, only one will be returned.
*
* Event types 0 - 0x7fffffff are generic drm events, 0x80000000 and
* up are chipset specific.
*/
struct drm_event {
__u32 type;
__u32 length;
};
#define DRM_EVENT_VBLANK 0x01
#define DRM_EVENT_FLIP_COMPLETE 0x02
struct drm_event_vblank {
struct drm_event base;
__u64 user_data;
__u32 tv_sec;
__u32 tv_usec;
__u32 sequence;
__u32 reserved;
};
#define DRM_CAP_DUMB_BUFFER 0x1
#define DRM_CAP_VBLANK_HIGH_CRTC 0x2
#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3
#define DRM_CAP_DUMB_PREFER_SHADOW 0x4
#define DRM_CAP_PRIME 0x5
#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
#define DRM_PRIME_CAP_IMPORT 0x1
#define DRM_PRIME_CAP_EXPORT 0x2
/* typedef area */
#ifndef __KERNEL__
typedef struct drm_clip_rect drm_clip_rect_t;
typedef struct drm_drawable_info drm_drawable_info_t;
typedef struct drm_tex_region drm_tex_region_t;
typedef struct drm_hw_lock drm_hw_lock_t;
typedef struct drm_version drm_version_t;
@ -1206,16 +824,12 @@ typedef struct drm_update_draw drm_update_draw_t;
typedef struct drm_auth drm_auth_t;
typedef struct drm_irq_busid drm_irq_busid_t;
typedef enum drm_vblank_seq_type drm_vblank_seq_type_t;
typedef struct drm_agp_buffer drm_agp_buffer_t;
typedef struct drm_agp_binding drm_agp_binding_t;
typedef struct drm_agp_info drm_agp_info_t;
typedef struct drm_scatter_gather drm_scatter_gather_t;
typedef struct drm_set_version drm_set_version_t;
typedef struct drm_fence_arg drm_fence_arg_t;
typedef struct drm_mm_type_arg drm_mm_type_arg_t;
typedef struct drm_mm_init_arg drm_mm_init_arg_t;
typedef enum drm_bo_type drm_bo_type_t;
#endif
#endif

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,12 @@
/*-
/**
* \file drm_agpsupport.c
* DRM support for AGP/GART backend
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
* \author Gareth Hughes <gareth@valinux.com>
*/
/*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
@ -21,64 +29,36 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Author:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Gareth Hughes <gareth@valinux.com>
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/** @file drm_agpsupport.c
* Support code for tying the kernel AGP support to DRM drivers and
* the DRM's AGP ioctls.
*/
#include <dev/drm2/drmP.h>
#include <dev/agp/agpreg.h>
#include <dev/pci/pcireg.h>
#if __OS_HAS_AGP
/* Returns 1 if AGP or 0 if not. */
static int
drm_device_find_capability(struct drm_device *dev, int cap)
/**
* Get AGP information.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg pointer to a (output) drm_agp_info structure.
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device has been initialized and acquired and fills in the
* drm_agp_info structure with the information in drm_agp_head::agp_info.
*/
int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info)
{
return (pci_find_cap(dev->device, cap, NULL) == 0);
}
int drm_device_is_agp(struct drm_device *dev)
{
if (dev->driver->device_is_agp != NULL) {
int ret;
/* device_is_agp returns a tristate, 0 = not AGP, 1 = definitely
* AGP, 2 = fall back to PCI capability
*/
ret = (*dev->driver->device_is_agp)(dev);
if (ret != DRM_MIGHT_BE_AGP)
return ret;
}
return (drm_device_find_capability(dev, PCIY_AGP));
}
int drm_device_is_pcie(struct drm_device *dev)
{
return (drm_device_find_capability(dev, PCIY_EXPRESS));
}
int drm_agp_info(struct drm_device * dev, struct drm_agp_info *info)
{
struct agp_info *kern;
DRM_AGP_KERN *kern;
if (!dev->agp || !dev->agp->acquired)
return EINVAL;
return -EINVAL;
kern = &dev->agp->info;
agp_get_info(dev->agp->agpdev, kern);
kern = &dev->agp->agp_info;
agp_get_info(dev->agp->bridge, kern);
info->agp_version_major = 1;
info->agp_version_minor = 0;
info->mode = kern->ai_mode;
@ -92,343 +72,397 @@ int drm_agp_info(struct drm_device * dev, struct drm_agp_info *info)
return 0;
}
EXPORT_SYMBOL(drm_agp_info);
int drm_agp_info_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_agp_info *info = data;
int err;
struct drm_agp_info info;
err = drm_agp_info(dev, &info);
if (err != 0)
err = drm_agp_info(dev, info);
if (err)
return err;
*(struct drm_agp_info *) data = info;
return 0;
}
int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
return drm_agp_acquire(dev);
}
int drm_agp_acquire(struct drm_device *dev)
/**
* Acquire the AGP device.
*
* \param dev DRM device that is to acquire AGP.
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device hasn't been acquired before and calls
* \c agp_backend_acquire.
*/
int drm_agp_acquire(struct drm_device * dev)
{
int retcode;
if (!dev->agp || dev->agp->acquired)
return EINVAL;
retcode = agp_acquire(dev->agp->agpdev);
if (!dev->agp)
return -ENODEV;
if (dev->agp->acquired)
return -EBUSY;
retcode = agp_acquire(dev->agp->bridge);
if (retcode)
return retcode;
return -retcode;
dev->agp->acquired = 1;
return 0;
}
int drm_agp_release_ioctl(struct drm_device *dev, void *data,
EXPORT_SYMBOL(drm_agp_acquire);
/**
* Acquire the AGP device (ioctl).
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument.
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device hasn't been acquired before and calls
* \c agp_backend_acquire.
*/
int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
return drm_agp_release(dev);
return drm_agp_acquire((struct drm_device *) file_priv->minor->dev);
}
/**
* Release the AGP device.
*
* \param dev DRM device that is to release AGP.
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device has been acquired and calls \c agp_backend_release.
*/
int drm_agp_release(struct drm_device * dev)
{
if (!dev->agp || !dev->agp->acquired)
return EINVAL;
agp_release(dev->agp->agpdev);
return -EINVAL;
agp_release(dev->agp->bridge);
dev->agp->acquired = 0;
return 0;
}
EXPORT_SYMBOL(drm_agp_release);
int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode)
int drm_agp_release_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
return drm_agp_release(dev);
}
/**
* Enable the AGP bus.
*
* \param dev DRM device that has previously acquired AGP.
* \param mode Requested AGP mode.
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device has been acquired but not enabled, and calls
* \c agp_enable.
*/
int drm_agp_enable(struct drm_device * dev, struct drm_agp_mode mode)
{
if (!dev->agp || !dev->agp->acquired)
return EINVAL;
dev->agp->mode = mode.mode;
agp_enable(dev->agp->agpdev, mode.mode);
return -EINVAL;
dev->agp->mode = mode.mode;
agp_enable(dev->agp->bridge, mode.mode);
dev->agp->enabled = 1;
return 0;
}
EXPORT_SYMBOL(drm_agp_enable);
int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_agp_mode mode;
struct drm_agp_mode *mode = data;
mode = *(struct drm_agp_mode *) data;
return drm_agp_enable(dev, mode);
return drm_agp_enable(dev, *mode);
}
/**
* Allocate AGP memory.
*
* \param inode device inode.
* \param file_priv file private pointer.
* \param cmd command.
* \param arg pointer to a drm_agp_buffer structure.
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device is present and has been acquired, allocates the
* memory via agp_allocate_memory() and creates a drm_agp_mem entry for it.
*/
int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
{
drm_agp_mem_t *entry;
void *handle;
unsigned long pages;
u_int32_t type;
struct drm_agp_mem *entry;
DRM_AGP_MEM *memory;
unsigned long pages;
u32 type;
struct agp_memory_info info;
if (!dev->agp || !dev->agp->acquired)
return EINVAL;
return -EINVAL;
if (!(entry = malloc(sizeof(*entry), DRM_MEM_AGPLISTS, M_NOWAIT)))
return -ENOMEM;
entry = malloc(sizeof(*entry), DRM_MEM_AGPLISTS, M_NOWAIT | M_ZERO);
if (entry == NULL)
return ENOMEM;
memset(entry, 0, sizeof(*entry));
pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
type = (u_int32_t) request->type;
DRM_UNLOCK(dev);
handle = drm_agp_allocate_memory(pages, type);
DRM_LOCK(dev);
if (handle == NULL) {
type = (u32) request->type;
if (!(memory = agp_alloc_memory(dev->agp->bridge, type, pages << PAGE_SHIFT))) {
free(entry, DRM_MEM_AGPLISTS);
return ENOMEM;
return -ENOMEM;
}
entry->handle = handle;
entry->bound = 0;
entry->pages = pages;
entry->prev = NULL;
entry->next = dev->agp->memory;
if (dev->agp->memory)
dev->agp->memory->prev = entry;
dev->agp->memory = entry;
agp_memory_info(dev->agp->agpdev, entry->handle, &info);
entry->handle = (unsigned long)memory;
entry->memory = memory;
entry->bound = 0;
entry->pages = pages;
list_add(&entry->head, &dev->agp->memory);
request->handle = (unsigned long) entry->handle;
request->physical = info.ami_physical;
agp_memory_info(dev->agp->bridge, entry->memory, &info);
request->handle = entry->handle;
request->physical = info.ami_physical;
return 0;
}
EXPORT_SYMBOL(drm_agp_alloc);
int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_agp_buffer request;
int retcode;
struct drm_agp_buffer *request = data;
request = *(struct drm_agp_buffer *) data;
DRM_LOCK(dev);
retcode = drm_agp_alloc(dev, &request);
DRM_UNLOCK(dev);
*(struct drm_agp_buffer *) data = request;
return retcode;
return drm_agp_alloc(dev, request);
}
static drm_agp_mem_t * drm_agp_lookup_entry(struct drm_device *dev,
void *handle)
/**
* Search for the AGP memory entry associated with a handle.
*
* \param dev DRM device structure.
* \param handle AGP memory handle.
* \return pointer to the drm_agp_mem structure associated with \p handle.
*
* Walks through drm_agp_head::memory until finding a matching handle.
*/
static struct drm_agp_mem *drm_agp_lookup_entry(struct drm_device * dev,
unsigned long handle)
{
drm_agp_mem_t *entry;
struct drm_agp_mem *entry;
for (entry = dev->agp->memory; entry; entry = entry->next) {
if (entry->handle == handle) return entry;
list_for_each_entry(entry, &dev->agp->memory, head) {
if (entry->handle == handle)
return entry;
}
return NULL;
}
/**
* Unbind AGP memory from the GATT (ioctl).
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg pointer to a drm_agp_binding structure.
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device is present and acquired, looks-up the AGP memory
* entry and passes it to the unbind_agp() function.
*/
int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request)
{
drm_agp_mem_t *entry;
int retcode;
struct drm_agp_mem *entry;
int ret;
if (!dev->agp || !dev->agp->acquired)
return EINVAL;
entry = drm_agp_lookup_entry(dev, (void *)request->handle);
if (entry == NULL || !entry->bound)
return EINVAL;
DRM_UNLOCK(dev);
retcode = drm_agp_unbind_memory(entry->handle);
DRM_LOCK(dev);
if (retcode == 0)
return -EINVAL;
if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
return -EINVAL;
if (!entry->bound)
return -EINVAL;
ret = drm_unbind_agp(entry->memory);
if (ret == 0)
entry->bound = 0;
return retcode;
return ret;
}
EXPORT_SYMBOL(drm_agp_unbind);
int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_agp_binding request;
int retcode;
struct drm_agp_binding *request = data;
request = *(struct drm_agp_binding *) data;
DRM_LOCK(dev);
retcode = drm_agp_unbind(dev, &request);
DRM_UNLOCK(dev);
return retcode;
return drm_agp_unbind(dev, request);
}
/**
* Bind AGP memory into the GATT (ioctl)
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg pointer to a drm_agp_binding structure.
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device is present and has been acquired and that no memory
* is currently bound into the GATT. Looks-up the AGP memory entry and passes
* it to bind_agp() function.
*/
int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request)
{
drm_agp_mem_t *entry;
int retcode;
int page;
struct drm_agp_mem *entry;
int retcode;
int page;
if (!dev->agp || !dev->agp->acquired)
return EINVAL;
DRM_DEBUG("agp_bind, page_size=%x\n", (int)PAGE_SIZE);
entry = drm_agp_lookup_entry(dev, (void *)request->handle);
if (entry == NULL || entry->bound)
return EINVAL;
return -EINVAL;
if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
return -EINVAL;
if (entry->bound)
return -EINVAL;
page = (request->offset + PAGE_SIZE - 1) / PAGE_SIZE;
DRM_UNLOCK(dev);
retcode = drm_agp_bind_memory(entry->handle, page);
DRM_LOCK(dev);
if (retcode == 0)
entry->bound = dev->agp->base + (page << PAGE_SHIFT);
return retcode;
if ((retcode = drm_bind_agp(entry->memory, page)))
return retcode;
entry->bound = dev->agp->base + (page << PAGE_SHIFT);
DRM_DEBUG("base = 0x%lx entry->bound = 0x%lx\n",
dev->agp->base, entry->bound);
return 0;
}
EXPORT_SYMBOL(drm_agp_bind);
int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_agp_binding request;
int retcode;
struct drm_agp_binding *request = data;
request = *(struct drm_agp_binding *) data;
DRM_LOCK(dev);
retcode = drm_agp_bind(dev, &request);
DRM_UNLOCK(dev);
return retcode;
return drm_agp_bind(dev, request);
}
/**
* Free AGP memory (ioctl).
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg pointer to a drm_agp_buffer structure.
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device is present and has been acquired and looks up the
* AGP memory entry. If the memory it's currently bound, unbind it via
* unbind_agp(). Frees it via free_agp() as well as the entry itself
* and unlinks from the doubly linked list it's inserted in.
*/
int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request)
{
drm_agp_mem_t *entry;
struct drm_agp_mem *entry;
if (!dev->agp || !dev->agp->acquired)
return EINVAL;
entry = drm_agp_lookup_entry(dev, (void*)request->handle);
if (entry == NULL)
return EINVAL;
if (entry->prev)
entry->prev->next = entry->next;
else
dev->agp->memory = entry->next;
if (entry->next)
entry->next->prev = entry->prev;
DRM_UNLOCK(dev);
return -EINVAL;
if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
return -EINVAL;
if (entry->bound)
drm_agp_unbind_memory(entry->handle);
drm_agp_free_memory(entry->handle);
DRM_LOCK(dev);
drm_unbind_agp(entry->memory);
list_del(&entry->head);
drm_free_agp(entry->memory, entry->pages);
free(entry, DRM_MEM_AGPLISTS);
return 0;
}
EXPORT_SYMBOL(drm_agp_free);
int drm_agp_free_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_agp_buffer request;
int retcode;
struct drm_agp_buffer *request = data;
request = *(struct drm_agp_buffer *) data;
DRM_LOCK(dev);
retcode = drm_agp_free(dev, &request);
DRM_UNLOCK(dev);
return retcode;
return drm_agp_free(dev, request);
}
drm_agp_head_t *drm_agp_init(void)
/**
* Initialize the AGP resources.
*
* \return pointer to a drm_agp_head structure.
*
* Gets the drm_agp_t structure which is made available by the agpgart module
* via the inter_module_* functions. Creates and initializes a drm_agp_head
* structure.
*/
struct drm_agp_head *drm_agp_init(struct drm_device *dev)
{
device_t agpdev;
drm_agp_head_t *head = NULL;
int agp_available = 1;
agpdev = DRM_AGP_FIND_DEVICE();
if (!agpdev)
agp_available = 0;
struct drm_agp_head *head = NULL;
DRM_DEBUG("agp_available = %d\n", agp_available);
if (agp_available) {
head = malloc(sizeof(*head), DRM_MEM_AGPLISTS,
M_NOWAIT | M_ZERO);
if (head == NULL)
return NULL;
head->agpdev = agpdev;
agp_get_info(agpdev, &head->info);
head->base = head->info.ai_aperture_base;
head->memory = NULL;
DRM_INFO("AGP at 0x%08lx %dMB\n",
(long)head->info.ai_aperture_base,
(int)(head->info.ai_aperture_size >> 20));
if (!(head = malloc(sizeof(*head), DRM_MEM_AGPLISTS, M_NOWAIT)))
return NULL;
memset((void *)head, 0, sizeof(*head));
head->bridge = agp_find_device();
if (!head->bridge) {
free(head, DRM_MEM_AGPLISTS);
return NULL;
} else {
agp_get_info(head->bridge, &head->agp_info);
}
INIT_LIST_HEAD(&head->memory);
head->cant_use_aperture = 0;
head->base = head->agp_info.ai_aperture_base;
return head;
}
void *drm_agp_allocate_memory(size_t pages, u32 type)
#ifdef FREEBSD_NOTYET
/**
* Binds a collection of pages into AGP memory at the given offset, returning
* the AGP memory structure containing them.
*
* No reference is held on the pages during this time -- it is up to the
* caller to handle that.
*/
DRM_AGP_MEM *
drm_agp_bind_pages(struct drm_device *dev,
struct page **pages,
unsigned long num_pages,
uint32_t gtt_offset,
u32 type)
{
device_t agpdev;
DRM_AGP_MEM *mem;
int ret, i;
agpdev = DRM_AGP_FIND_DEVICE();
if (!agpdev)
DRM_DEBUG("\n");
mem = agp_allocate_memory(dev->agp->bridge, num_pages,
type);
if (mem == NULL) {
DRM_ERROR("Failed to allocate memory for %ld pages\n",
num_pages);
return NULL;
}
return agp_alloc_memory(agpdev, type, pages << PAGE_SHIFT);
for (i = 0; i < num_pages; i++)
mem->pages[i] = pages[i];
mem->page_count = num_pages;
mem->is_flushed = true;
ret = agp_bind_memory(mem, gtt_offset / PAGE_SIZE);
if (ret != 0) {
DRM_ERROR("Failed to bind AGP memory: %d\n", ret);
agp_free_memory(mem);
return NULL;
}
return mem;
}
EXPORT_SYMBOL(drm_agp_bind_pages);
#endif /* FREEBSD_NOTYET */
int drm_agp_free_memory(void *handle)
{
device_t agpdev;
agpdev = DRM_AGP_FIND_DEVICE();
if (!agpdev || !handle)
return 0;
agp_free_memory(agpdev, handle);
return 1;
}
int drm_agp_bind_memory(void *handle, off_t start)
{
device_t agpdev;
agpdev = DRM_AGP_FIND_DEVICE();
if (!agpdev || !handle)
return EINVAL;
return agp_bind_memory(agpdev, handle, start * PAGE_SIZE);
}
int drm_agp_unbind_memory(void *handle)
{
device_t agpdev;
agpdev = DRM_AGP_FIND_DEVICE();
if (!agpdev || !handle)
return EINVAL;
return agp_unbind_memory(agpdev, handle);
}
#endif /* __OS_HAS_AGP */

View File

@ -36,12 +36,15 @@ __FBSDID("$FreeBSD$");
typedef u_int atomic_t;
typedef uint64_t atomic64_t;
#define BITS_PER_LONG (sizeof(long) * NBBY)
#define BITS_TO_LONGS(x) howmany(x, BITS_PER_LONG)
#define NB_BITS_PER_LONG (sizeof(long) * NBBY)
#define BITS_TO_LONGS(x) howmany(x, NB_BITS_PER_LONG)
#define atomic_read(p) (*(volatile u_int *)(p))
#define atomic_set(p, v) do { *(u_int *)(p) = (v); } while (0)
#define atomic64_read(p) atomic_load_acq_64(p)
#define atomic64_set(p, v) atomic_store_rel_64(p, v)
#define atomic_add(v, p) atomic_add_int(p, v)
#define atomic_sub(v, p) atomic_subtract_int(p, v)
#define atomic_inc(p) atomic_add(1, p)
@ -60,8 +63,8 @@ typedef uint64_t atomic64_t;
#define atomic_xchg(p, v) atomic_swap_int(p, v)
#define atomic64_xchg(p, v) atomic_swap_64(p, v)
#define __bit_word(b) ((b) / BITS_PER_LONG)
#define __bit_mask(b) (1UL << (b) % BITS_PER_LONG)
#define __bit_word(b) ((b) / NB_BITS_PER_LONG)
#define __bit_mask(b) (1UL << (b) % NB_BITS_PER_LONG)
#define __bit_addr(p, b) ((volatile u_long *)(p) + __bit_word(b))
#define clear_bit(b, p) \
@ -70,17 +73,21 @@ typedef uint64_t atomic64_t;
atomic_set_long(__bit_addr(p, b), __bit_mask(b))
#define test_bit(b, p) \
((*__bit_addr(p, b) & __bit_mask(b)) != 0)
#define test_and_set_bit(b, p) \
(atomic_xchg((p), 1) != b)
#define cmpxchg(ptr, old, new) \
(atomic_cmpset_int((volatile u_int *)(ptr),(old),(new)) ? (old) : (0))
static __inline u_long
find_first_zero_bit(const u_long *p, u_long max)
{
u_long i, n;
KASSERT(max % BITS_PER_LONG == 0, ("invalid bitmap size %lu", max));
for (i = 0; i < max / BITS_PER_LONG; i++) {
KASSERT(max % NB_BITS_PER_LONG == 0, ("invalid bitmap size %lu", max));
for (i = 0; i < max / NB_BITS_PER_LONG; i++) {
n = ~p[i];
if (n != 0)
return (i * BITS_PER_LONG + ffsl(n) - 1);
return (i * NB_BITS_PER_LONG + ffsl(n) - 1);
}
return (max);
}

View File

@ -1,4 +1,14 @@
/*-
/**
* \file drm_auth.c
* IOCTLs for authentication
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
* \author Gareth Hughes <gareth@valinux.com>
*/
/*
* Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
@ -21,121 +31,118 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Gareth Hughes <gareth@valinux.com>
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/** @file drm_auth.c
* Implementation of the get/authmagic ioctls implementing the authentication
* scheme between the master and clients.
*/
#include <dev/drm2/drmP.h>
static int drm_hash_magic(drm_magic_t magic)
{
return magic & (DRM_HASH_SIZE-1);
}
static struct mtx drm_magic_lock;
/**
* Returns the file private associated with the given magic number.
* Find the file with the given magic number.
*
* \param dev DRM device.
* \param magic magic number.
*
* Searches in drm_device::magiclist within all files with the same hash key
* the one with matching magic number, while holding the drm_device::struct_mutex
* lock.
*/
static struct drm_file *drm_find_file(struct drm_device *dev, drm_magic_t magic)
static struct drm_file *drm_find_file(struct drm_master *master, drm_magic_t magic)
{
drm_magic_entry_t *pt;
int hash = drm_hash_magic(magic);
struct drm_file *retval = NULL;
struct drm_magic_entry *pt;
struct drm_hash_item *hash;
struct drm_device *dev = master->minor->dev;
DRM_LOCK_ASSERT(dev);
for (pt = dev->magiclist[hash].head; pt; pt = pt->next) {
if (pt->magic == magic) {
return pt->priv;
}
DRM_LOCK(dev);
if (!drm_ht_find_item(&master->magiclist, (unsigned long)magic, &hash)) {
pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item);
retval = pt->priv;
}
return NULL;
DRM_UNLOCK(dev);
return retval;
}
/**
* Inserts the given magic number into the hash table of used magic number
* lists.
* Adds a magic number.
*
* \param dev DRM device.
* \param priv file private data.
* \param magic magic number.
*
* Creates a drm_magic_entry structure and appends to the linked list
* associated the magic number hash key in drm_device::magiclist, while holding
* the drm_device::struct_mutex lock.
*/
static int drm_add_magic(struct drm_device *dev, struct drm_file *priv,
static int drm_add_magic(struct drm_master *master, struct drm_file *priv,
drm_magic_t magic)
{
int hash;
drm_magic_entry_t *entry;
struct drm_magic_entry *entry;
struct drm_device *dev = master->minor->dev;
DRM_DEBUG("%d\n", magic);
DRM_LOCK_ASSERT(dev);
hash = drm_hash_magic(magic);
entry = malloc(sizeof(*entry), DRM_MEM_MAGIC, M_ZERO | M_NOWAIT);
if (!entry)
return ENOMEM;
entry->magic = magic;
entry->priv = priv;
entry->next = NULL;
if (dev->magiclist[hash].tail) {
dev->magiclist[hash].tail->next = entry;
dev->magiclist[hash].tail = entry;
} else {
dev->magiclist[hash].head = entry;
dev->magiclist[hash].tail = entry;
}
return -ENOMEM;
entry->priv = priv;
entry->hash_item.key = (unsigned long)magic;
DRM_LOCK(dev);
drm_ht_insert_item(&master->magiclist, &entry->hash_item);
list_add_tail(&entry->head, &master->magicfree);
DRM_UNLOCK(dev);
return 0;
}
/**
* Removes the given magic number from the hash table of used magic number
* lists.
* Remove a magic number.
*
* \param dev DRM device.
* \param magic magic number.
*
* Searches and unlinks the entry in drm_device::magiclist with the magic
* number hash key, while holding the drm_device::struct_mutex lock.
*/
static int drm_remove_magic(struct drm_device *dev, drm_magic_t magic)
int drm_remove_magic(struct drm_master *master, drm_magic_t magic)
{
drm_magic_entry_t *prev = NULL;
drm_magic_entry_t *pt;
int hash;
DRM_LOCK_ASSERT(dev);
struct drm_magic_entry *pt;
struct drm_hash_item *hash;
struct drm_device *dev = master->minor->dev;
DRM_DEBUG("%d\n", magic);
hash = drm_hash_magic(magic);
for (pt = dev->magiclist[hash].head; pt; prev = pt, pt = pt->next) {
if (pt->magic == magic) {
if (dev->magiclist[hash].head == pt) {
dev->magiclist[hash].head = pt->next;
}
if (dev->magiclist[hash].tail == pt) {
dev->magiclist[hash].tail = prev;
}
if (prev) {
prev->next = pt->next;
}
free(pt, DRM_MEM_MAGIC);
return 0;
}
DRM_LOCK(dev);
if (drm_ht_find_item(&master->magiclist, (unsigned long)magic, &hash)) {
DRM_UNLOCK(dev);
return -EINVAL;
}
pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item);
drm_ht_remove_item(&master->magiclist, hash);
list_del(&pt->head);
DRM_UNLOCK(dev);
return EINVAL;
free(pt, DRM_MEM_MAGIC);
return 0;
}
/**
* Called by the client, this returns a unique magic number to be authorized
* by the master.
* Get a unique magic number (ioctl).
*
* The master may use its own knowledge of the client (such as the X
* connection that the magic is passed over) to determine if the magic number
* should be authenticated.
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg pointer to a resulting drm_auth structure.
* \return zero on success, or a negative number on failure.
*
* If there is a magic number in drm_file::magic then use it, otherwise
* searches an unique non-zero magic number and add it associating it with \p
* file_priv.
* This ioctl needs protection by the drm_global_mutex, which protects
* struct drm_file::magic and struct drm_magic_entry::priv.
*/
int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
@ -146,18 +153,15 @@ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
if (file_priv->magic) {
auth->magic = file_priv->magic;
} else {
DRM_LOCK(dev);
do {
int old = sequence;
auth->magic = old+1;
if (!atomic_cmpset_int(&sequence, old, auth->magic))
continue;
} while (drm_find_file(dev, auth->magic));
mtx_lock(&drm_magic_lock);
if (!sequence)
++sequence; /* reserve 0 */
auth->magic = sequence++;
mtx_unlock(&drm_magic_lock);
} while (drm_find_file(file_priv->master, auth->magic));
file_priv->magic = auth->magic;
drm_add_magic(dev, file_priv, auth->magic);
DRM_UNLOCK(dev);
drm_add_magic(file_priv->master, file_priv, auth->magic);
}
DRM_DEBUG("%u\n", auth->magic);
@ -166,25 +170,47 @@ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
}
/**
* Marks the client associated with the given magic number as authenticated.
* Authenticate with a magic.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg pointer to a drm_auth structure.
* \return zero if authentication successed, or a negative number otherwise.
*
* Checks if \p file_priv is associated with the magic number passed in \arg.
* This ioctl needs protection by the drm_global_mutex, which protects
* struct drm_file::magic and struct drm_magic_entry::priv.
*/
int drm_authmagic(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_auth *auth = data;
struct drm_file *priv;
struct drm_file *file;
DRM_DEBUG("%u\n", auth->magic);
DRM_LOCK(dev);
priv = drm_find_file(dev, auth->magic);
if (priv != NULL) {
priv->authenticated = 1;
drm_remove_magic(dev, auth->magic);
DRM_UNLOCK(dev);
if ((file = drm_find_file(file_priv->master, auth->magic))) {
file->authenticated = 1;
drm_remove_magic(file_priv->master, auth->magic);
return 0;
} else {
DRM_UNLOCK(dev);
return EINVAL;
}
return -EINVAL;
}
static int
drm_magic_init(void *arg)
{
mtx_init(&drm_magic_lock, "drm_getmagic__lock", NULL, MTX_DEF);
return (0);
}
static void
drm_magic_fini(void *arg)
{
mtx_destroy(&drm_magic_lock);
}
SYSINIT(drm_magic_init, SI_SUB_KLD, SI_ORDER_MIDDLE, drm_magic_init, NULL);
SYSUNINIT(drm_magic_fini, SI_SUB_KLD, SI_ORDER_MIDDLE, drm_magic_fini, NULL);

View File

@ -92,6 +92,7 @@ int drm_buffer_alloc(struct drm_buffer **buf, int size)
free(*buf, DRM_MEM_DRIVER);
return -ENOMEM;
}
EXPORT_SYMBOL(drm_buffer_alloc);
/**
* Copy the user data to the begin of the buffer and reset the processing
@ -128,6 +129,7 @@ int drm_buffer_copy_from_user(struct drm_buffer *buf,
buf->iterator = 0;
return 0;
}
EXPORT_SYMBOL(drm_buffer_copy_from_user);
/**
* Free the drm buffer object
@ -145,6 +147,7 @@ void drm_buffer_free(struct drm_buffer *buf)
free(buf, DRM_MEM_DRIVER);
}
}
EXPORT_SYMBOL(drm_buffer_free);
/**
* Read an object from buffer that may be split to multiple parts. If object
@ -181,3 +184,4 @@ void *drm_buffer_read_object(struct drm_buffer *buf,
drm_buffer_advance(buf, objsize);
return obj;
}
EXPORT_SYMBOL(drm_buffer_read_object);

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,14 @@
/*-
/**
* \file drm_context.c
* IOCTLs for generic contexts
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
* \author Gareth Hughes <gareth@valinux.com>
*/
/*
* Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com
*
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
@ -21,29 +31,37 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Gareth Hughes <gareth@valinux.com>
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/** @file drm_context.c
* Implementation of the context management ioctls.
/*
* ChangeLog:
* 2001-11-16 Torsten Duwe <duwe@caldera.de>
* added context constructor/destructor hooks,
* needed by SiS driver's memory management.
*/
#include <dev/drm2/drmP.h>
/* ================================================================
* Context bitmap support
*/
/******************************************************************/
/** \name Context bitmap support */
/*@{*/
void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle)
/**
* Free a handle from the context bitmap.
*
* \param dev DRM device.
* \param ctx_handle context handle.
*
* Clears the bit specified by \p ctx_handle in drm_device::ctx_bitmap and the entry
* in drm_device::ctx_idr, while holding the drm_device::struct_mutex
* lock.
*/
void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
{
if (ctx_handle < 0 || ctx_handle >= DRM_MAX_CTXBITMAP ||
if (ctx_handle < 0 || ctx_handle >= DRM_MAX_CTXBITMAP ||
dev->ctx_bitmap == NULL) {
DRM_ERROR("Attempt to free invalid context handle: %d\n",
ctx_handle);
@ -54,10 +72,18 @@ void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle)
clear_bit(ctx_handle, dev->ctx_bitmap);
dev->context_sareas[ctx_handle] = NULL;
DRM_UNLOCK(dev);
return;
}
int drm_ctxbitmap_next(struct drm_device *dev)
/**
* Context bitmap allocation.
*
* \param dev DRM device.
* \return (non-negative) context handle on success or a negative number on failure.
*
* Allocate a new idr from drm_device::ctx_idr while holding the
* drm_device::struct_mutex lock.
*/
static int drm_ctxbitmap_next(struct drm_device * dev)
{
int bit;
@ -74,7 +100,7 @@ int drm_ctxbitmap_next(struct drm_device *dev)
set_bit(bit, dev->ctx_bitmap);
DRM_DEBUG("bit : %d\n", bit);
if ((bit+1) > dev->max_context) {
drm_local_map_t **ctx_sareas;
struct drm_local_map **ctx_sareas;
int max_ctx = (bit+1);
ctx_sareas = realloc(dev->context_sareas,
@ -94,7 +120,14 @@ int drm_ctxbitmap_next(struct drm_device *dev)
return bit;
}
int drm_ctxbitmap_init(struct drm_device *dev)
/**
* Context bitmap initialization.
*
* \param dev DRM device.
*
* Initialise the drm_device::ctx_idr
*/
int drm_ctxbitmap_init(struct drm_device * dev)
{
int i;
int temp;
@ -118,7 +151,15 @@ int drm_ctxbitmap_init(struct drm_device *dev)
return 0;
}
void drm_ctxbitmap_cleanup(struct drm_device *dev)
/**
* Context bitmap cleanup.
*
* \param dev DRM device.
*
* Free all idr members using drm_ctx_sarea_free helper function
* while holding the drm_device::struct_mutex lock.
*/
void drm_ctxbitmap_cleanup(struct drm_device * dev)
{
DRM_LOCK(dev);
if (dev->context_sareas != NULL)
@ -127,15 +168,29 @@ void drm_ctxbitmap_cleanup(struct drm_device *dev)
DRM_UNLOCK(dev);
}
/* ================================================================
* Per Context SAREA Support
*/
/*@}*/
/******************************************************************/
/** \name Per Context SAREA Support */
/*@{*/
/**
* Get per-context SAREA.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument pointing to a drm_ctx_priv_map structure.
* \return zero on success or a negative number on failure.
*
* Gets the map from drm_device::ctx_idr with the handle specified and
* returns its handle.
*/
int drm_getsareactx(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_ctx_priv_map *request = data;
drm_local_map_t *map;
struct drm_local_map *map;
DRM_LOCK(dev);
if (dev->max_context < 0 ||
@ -152,15 +207,29 @@ int drm_getsareactx(struct drm_device *dev, void *data,
return 0;
}
/**
* Set per-context SAREA.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument pointing to a drm_ctx_priv_map structure.
* \return zero on success or a negative number on failure.
*
* Searches the mapping specified in \p arg and update the entry in
* drm_device::ctx_idr with it.
*/
int drm_setsareactx(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_ctx_priv_map *request = data;
drm_local_map_t *map = NULL;
struct drm_local_map *map = NULL;
struct drm_map_list *r_list = NULL;
DRM_LOCK(dev);
TAILQ_FOREACH(map, &dev->maplist, link) {
if (map->handle == request->handle) {
list_for_each_entry(r_list, &dev->maplist, head) {
if (r_list->map
&& r_list->user_token == (unsigned long) request->handle) {
if (dev->max_context < 0)
goto bad;
if (request->ctx_id >= (unsigned) dev->max_context)
@ -176,56 +245,91 @@ int drm_setsareactx(struct drm_device *dev, void *data,
return EINVAL;
}
/* ================================================================
* The actual DRM context handling routines
*/
/*@}*/
int drm_context_switch(struct drm_device *dev, int old, int new)
/******************************************************************/
/** \name The actual DRM context handling routines */
/*@{*/
/**
* Switch context.
*
* \param dev DRM device.
* \param old old context handle.
* \param new new context handle.
* \return zero on success or a negative number on failure.
*
* Attempt to set drm_device::context_flag.
*/
static int drm_context_switch(struct drm_device * dev, int old, int new)
{
if (atomic_xchg(&dev->context_flag, 1) != 0) {
if (test_and_set_bit(0, &dev->context_flag)) {
DRM_ERROR("Reentering -- FIXME\n");
return EBUSY;
return -EBUSY;
}
DRM_DEBUG("Context switch from %d to %d\n", old, new);
if (new == dev->last_context) {
atomic_xchg(&dev->context_flag, 0);
clear_bit(0, &dev->context_flag);
return 0;
}
return 0;
}
int drm_context_switch_complete(struct drm_device *dev, int new)
/**
* Complete context switch.
*
* \param dev DRM device.
* \param new new context handle.
* \return zero on success or a negative number on failure.
*
* Updates drm_device::last_context and drm_device::last_switch. Verifies the
* hardware lock is held, clears the drm_device::context_flag and wakes up
* drm_device::context_wait.
*/
static int drm_context_switch_complete(struct drm_device *dev,
struct drm_file *file_priv, int new)
{
dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
dev->last_switch = jiffies;
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) {
DRM_ERROR("Lock isn't held after context switch\n");
}
/* If a context switch is ever initiated
when the kernel holds the lock, release
that lock here. */
atomic_xchg(&dev->context_flag, 0);
clear_bit(0, &dev->context_flag);
wakeup(&dev->context_wait);
return 0;
}
int drm_resctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
/**
* Reserve contexts.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument pointing to a drm_ctx_res structure.
* \return zero on success or a negative number on failure.
*/
int drm_resctx(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_ctx_res *res = data;
struct drm_ctx ctx;
int i;
if (res->count >= DRM_RESERVED_CONTEXTS) {
bzero(&ctx, sizeof(ctx));
memset(&ctx, 0, sizeof(ctx));
for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
ctx.handle = i;
if (DRM_COPY_TO_USER(&res->contexts[i],
&ctx, sizeof(ctx)))
return EFAULT;
if (copy_to_user(&res->contexts[i], &ctx, sizeof(ctx)))
return -EFAULT;
}
}
res->count = DRM_RESERVED_CONTEXTS;
@ -233,8 +337,21 @@ int drm_resctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
return 0;
}
int drm_addctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
/**
* Add context.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument pointing to a drm_ctx structure.
* \return zero on success or a negative number on failure.
*
* Get a new handle for the context and copy to userspace.
*/
int drm_addctx(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_ctx_list *ctx_entry;
struct drm_ctx *ctx = data;
ctx->handle = drm_ctxbitmap_next(dev);
@ -246,15 +363,24 @@ int drm_addctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
if (ctx->handle == -1) {
DRM_DEBUG("Not enough free contexts.\n");
/* Should this return -EBUSY instead? */
return ENOMEM;
return -ENOMEM;
}
if (dev->driver->context_ctor && ctx->handle != DRM_KERNEL_CONTEXT) {
DRM_LOCK(dev);
dev->driver->context_ctor(dev, ctx->handle);
DRM_UNLOCK(dev);
ctx_entry = malloc(sizeof(*ctx_entry), DRM_MEM_CTXBITMAP, M_NOWAIT);
if (!ctx_entry) {
DRM_DEBUG("out of memory\n");
return -ENOMEM;
}
INIT_LIST_HEAD(&ctx_entry->head);
ctx_entry->handle = ctx->handle;
ctx_entry->tag = file_priv;
DRM_LOCK(dev);
list_add(&ctx_entry->head, &dev->ctxlist);
++dev->ctx_count;
DRM_UNLOCK(dev);
return 0;
}
@ -264,6 +390,15 @@ int drm_modctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
return 0;
}
/**
* Get context.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument pointing to a drm_ctx structure.
* \return zero on success or a negative number on failure.
*/
int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_ctx *ctx = data;
@ -274,6 +409,17 @@ int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
return 0;
}
/**
* Switch context.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument pointing to a drm_ctx structure.
* \return zero on success or a negative number on failure.
*
* Calls context_switch().
*/
int drm_switchctx(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@ -283,30 +429,66 @@ int drm_switchctx(struct drm_device *dev, void *data,
return drm_context_switch(dev, dev->last_context, ctx->handle);
}
int drm_newctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
/**
* New context.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument pointing to a drm_ctx structure.
* \return zero on success or a negative number on failure.
*
* Calls context_switch_complete().
*/
int drm_newctx(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_ctx *ctx = data;
DRM_DEBUG("%d\n", ctx->handle);
drm_context_switch_complete(dev, ctx->handle);
drm_context_switch_complete(dev, file_priv, ctx->handle);
return 0;
}
int drm_rmctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
/**
* Remove context.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument pointing to a drm_ctx structure.
* \return zero on success or a negative number on failure.
*
* If not the special kernel context, calls ctxbitmap_free() to free the specified context.
*/
int drm_rmctx(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_ctx *ctx = data;
DRM_DEBUG("%d\n", ctx->handle);
if (ctx->handle != DRM_KERNEL_CONTEXT) {
if (dev->driver->context_dtor) {
DRM_LOCK(dev);
if (dev->driver->context_dtor)
dev->driver->context_dtor(dev, ctx->handle);
DRM_UNLOCK(dev);
}
drm_ctxbitmap_free(dev, ctx->handle);
}
DRM_LOCK(dev);
if (!list_empty(&dev->ctxlist)) {
struct drm_ctx_list *pos, *n;
list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
if (pos->handle == ctx->handle) {
list_del(&pos->head);
free(pos, DRM_MEM_CTXBITMAP);
--dev->ctx_count;
}
}
}
DRM_UNLOCK(dev);
return 0;
}
/*@}*/

File diff suppressed because it is too large Load Diff

View File

@ -27,13 +27,15 @@
#ifndef __DRM_CRTC_H__
#define __DRM_CRTC_H__
#include <dev/drm2/drm_gem_names.h>
#include <dev/drm2/drm_mode.h>
#include <dev/drm2/drm_fourcc.h>
struct drm_device;
struct drm_mode_set;
struct drm_framebuffer;
struct i2c_adapter;
struct drm_object_properties;
#define DRM_MODE_OBJECT_CRTC 0xcccccccc
#define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0
@ -50,7 +52,7 @@ struct drm_mode_object {
struct drm_object_properties *properties;
};
#define DRM_OBJECT_MAX_PROPERTY 16
#define DRM_OBJECT_MAX_PROPERTY 24
struct drm_object_properties {
int count;
uint32_t ids[DRM_OBJECT_MAX_PROPERTY];
@ -72,7 +74,7 @@ enum drm_mode_status {
MODE_H_ILLEGAL, /* mode has illegal horizontal timings */
MODE_V_ILLEGAL, /* mode has illegal horizontal timings */
MODE_BAD_WIDTH, /* requires an unsupported linepitch */
MODE_NOMODE, /* no mode with a maching name */
MODE_NOMODE, /* no mode with a matching name */
MODE_NO_INTERLACE, /* interlaced mode not supported */
MODE_NO_DBLESCAN, /* doublescan mode not supported */
MODE_NO_VSCAN, /* multiscan mode not supported */
@ -114,7 +116,8 @@ enum drm_mode_status {
.hdisplay = (hd), .hsync_start = (hss), .hsync_end = (hse), \
.htotal = (ht), .hskew = (hsk), .vdisplay = (vd), \
.vsync_start = (vss), .vsync_end = (vse), .vtotal = (vt), \
.vscan = (vs), .flags = (f), .vrefresh = 0
.vscan = (vs), .flags = (f), .vrefresh = 0, \
.base.type = DRM_MODE_OBJECT_MODE
#define CRTC_INTERLACE_HALVE_V 0x1 /* halve V values for interlacing */
@ -125,9 +128,8 @@ struct drm_display_mode {
char name[DRM_DISPLAY_MODE_LEN];
int connector_count;
enum drm_mode_status status;
int type;
unsigned int type;
/* Proposed mode values */
int clock; /* in kHz */
@ -163,8 +165,6 @@ struct drm_display_mode {
int crtc_vsync_start;
int crtc_vsync_end;
int crtc_vtotal;
int crtc_hadjusted;
int crtc_vadjusted;
/* Driver private mode info */
int private_size;
@ -213,11 +213,10 @@ struct drm_display_info {
u32 color_formats;
u8 cea_rev;
char *raw_edid; /* if any */
};
struct drm_framebuffer_funcs {
/* note: use drm_framebuffer_remove() */
void (*destroy)(struct drm_framebuffer *framebuffer);
int (*create_handle)(struct drm_framebuffer *fb,
struct drm_file *file_priv,
@ -242,6 +241,16 @@ struct drm_framebuffer_funcs {
struct drm_framebuffer {
struct drm_device *dev;
/*
* Note that the fb is refcounted for the benefit of driver internals,
* for example some hw, disabling a CRTC/plane is asynchronous, and
* scanout does not actually complete until the next vblank. So some
* cleanup (like releasing the reference(s) on the backing GEM bo(s))
* should be deferred. In cases like this, the driver would like to
* hold a ref to the fb even though it has already been removed from
* userspace perspective.
*/
unsigned int refcount;
struct list_head head;
struct drm_mode_object base;
const struct drm_framebuffer_funcs *funcs;
@ -291,20 +300,16 @@ struct drm_plane;
/**
* drm_crtc_funcs - control CRTCs for a given device
* @reset: reset CRTC after state has been invalidate (e.g. resume)
* @dpms: control display power levels
* @save: save CRTC state
* @resore: restore CRTC state
* @lock: lock the CRTC
* @unlock: unlock the CRTC
* @shadow_allocate: allocate shadow pixmap
* @shadow_create: create shadow pixmap for rotation support
* @shadow_destroy: free shadow pixmap
* @mode_fixup: fixup proposed mode
* @mode_set: set the desired mode on the CRTC
* @restore: restore CRTC state
* @reset: reset CRTC after state has been invalidate (e.g. resume)
* @cursor_set: setup the cursor
* @cursor_move: move the cursor
* @gamma_set: specify color ramp for CRTC
* @destroy: deinit and free object
* @set_property: called when a property is changed
* @set_config: apply a new CRTC configuration
* @page_flip: initiate a page flip
*
* The drm_crtc_funcs structure is the central CRTC management structure
* in the DRM. Each CRTC controls one or more connectors (note that the name
@ -338,7 +343,7 @@ struct drm_crtc_funcs {
/*
* Flip to the given framebuffer. This implements the page
* flip ioctl descibed in drm_mode.h, specifically, the
* flip ioctl described in drm_mode.h, specifically, the
* implementation must return immediately and block all
* rendering to the current fb until the flip has completed.
* If userspace set the event flag in the ioctl, the event
@ -348,16 +353,31 @@ struct drm_crtc_funcs {
int (*page_flip)(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event);
int (*set_property)(struct drm_crtc *crtc,
struct drm_property *property, uint64_t val);
};
/**
* drm_crtc - central CRTC control structure
* @dev: parent DRM device
* @head: list management
* @base: base KMS object for ID tracking etc.
* @enabled: is this CRTC enabled?
* @mode: current mode timings
* @hwmode: mode timings as programmed to hw regs
* @invert_dimensions: for purposes of error checking crtc vs fb sizes,
* invert the width/height of the crtc. This is used if the driver
* is performing 90 or 270 degree rotated scanout
* @x: x position on screen
* @y: y position on screen
* @funcs: CRTC control functions
* @gamma_size: size of gamma ramp
* @gamma_store: gamma ramp values
* @framedur_ns: precise frame timing
* @framedur_ns: precise line timing
* @pixeldur_ns: precise pixel timing
* @helper_private: mid-layer private data
* @properties: property tracking for this CRTC
*
* Each CRTC may have one or more connectors associated with it. This structure
@ -382,6 +402,8 @@ struct drm_crtc {
*/
struct drm_display_mode hwmode;
bool invert_dimensions;
int x, y;
const struct drm_crtc_funcs *funcs;
@ -390,7 +412,7 @@ struct drm_crtc {
uint16_t *gamma_store;
/* Constants needed for precise vblank and swap timestamping. */
int64_t framedur_ns, linedur_ns, pixeldur_ns;
s64 framedur_ns, linedur_ns, pixeldur_ns;
/* if you are using the helper */
void *helper_private;
@ -405,11 +427,8 @@ struct drm_crtc {
* @save: save connector state
* @restore: restore connector state
* @reset: reset connector after state has been invalidate (e.g. resume)
* @mode_valid: is this mode valid on the given connector?
* @mode_fixup: try to fixup proposed mode for this connector
* @mode_set: set this mode
* @detect: is this connector active?
* @get_modes: get mode list for this connector
* @fill_modes: fill mode list for this connector
* @set_property: property for this connector may need update
* @destroy: make object go away
* @force: notify the driver the connector is forced on
@ -439,6 +458,13 @@ struct drm_connector_funcs {
void (*force)(struct drm_connector *connector);
};
/**
* drm_encoder_funcs - encoder controls
* @reset: reset state (e.g. at init or resume time)
* @destroy: cleanup and free associated data
*
* Encoders sit between CRTCs and connectors.
*/
struct drm_encoder_funcs {
void (*reset)(struct drm_encoder *encoder);
void (*destroy)(struct drm_encoder *encoder);
@ -446,10 +472,22 @@ struct drm_encoder_funcs {
#define DRM_CONNECTOR_MAX_UMODES 16
#define DRM_CONNECTOR_LEN 32
#define DRM_CONNECTOR_MAX_ENCODER 2
#define DRM_CONNECTOR_MAX_ENCODER 3
/**
* drm_encoder - central DRM encoder structure
* @dev: parent DRM device
* @head: list management
* @base: base KMS object
* @encoder_type: one of the %DRM_MODE_ENCODER_<foo> types in drm_mode.h
* @possible_crtcs: bitmask of potential CRTC bindings
* @possible_clones: bitmask of potential sibling encoders for cloning
* @crtc: currently bound CRTC
* @funcs: control functions
* @helper_private: mid-layer private data
*
* CRTCs drive pixels to encoders, which convert them into signals
* appropriate for a given connector or set of connectors.
*/
struct drm_encoder {
struct drm_device *dev;
@ -485,14 +523,36 @@ enum drm_connector_force {
/**
* drm_connector - central DRM connector control structure
* @crtc: CRTC this connector is currently connected to, NULL if none
* @dev: parent DRM device
* @kdev: kernel device for sysfs attributes
* @attr: sysfs attributes
* @head: list management
* @base: base KMS object
* @connector_type: one of the %DRM_MODE_CONNECTOR_<foo> types from drm_mode.h
* @connector_type_id: index into connector type enum
* @interlace_allowed: can this connector handle interlaced modes?
* @doublescan_allowed: can this connector handle doublescan?
* @available_modes: modes available on this connector (from get_modes() + user)
* @initial_x: initial x position for this connector
* @initial_y: initial y position for this connector
* @status: connector connected?
* @modes: modes available on this connector (from fill_modes() + user)
* @status: one of the drm_connector_status enums (connected, not, or unknown)
* @probed_modes: list of modes derived directly from the display
* @display_info: information about attached display (e.g. from EDID)
* @funcs: connector control functions
* @user_modes: user added mode list
* @edid_blob_ptr: DRM property containing EDID if present
* @properties: property tracking for this connector
* @polled: a %DRM_CONNECTOR_POLL_<foo> value for core driven polling
* @dpms: current dpms state
* @helper_private: mid-layer private data
* @force: a %DRM_FORCE_<foo> state for forced mode sets
* @encoder_ids: valid encoders for this connector
* @encoder: encoder driving this connector, if any
* @eld: EDID-like data, if present
* @dvi_dual: dual link DVI, if found
* @max_tmds_clock: max clock rate, if found
* @latency_present: AV delay info from ELD, if found
* @video_latency: video latency info from ELD, if found
* @audio_latency: audio latency info from ELD, if found
* @null_edid_counter: track sinks that give us all zeros for the EDID
*
* Each connector may be connected to one or more CRTCs, or may be clonable by
* another connector if they can share a CRTC. Each connector also has a specific
@ -501,7 +561,9 @@ enum drm_connector_force {
*/
struct drm_connector {
struct drm_device *dev;
/* struct device kdev; XXXKIB */
#ifdef FREEBSD_NOTYET
struct device kdev;
#endif /* FREEBSD_NOTYET */
struct device_attribute *attr;
struct list_head head;
@ -513,7 +575,6 @@ struct drm_connector {
bool doublescan_allowed;
struct list_head modes; /* list of modes on this connector */
int initial_x, initial_y;
enum drm_connector_status status;
/* these are modes added by probing with DDC or the BIOS */
@ -536,7 +597,6 @@ struct drm_connector {
/* forced on connector */
enum drm_connector_force force;
uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER];
uint32_t force_encoder_id;
struct drm_encoder *encoder; /* currently active encoder */
/* EDID bits */
@ -546,8 +606,8 @@ struct drm_connector {
bool latency_present[2];
int video_latency[2]; /* [0]: progressive, [1]: interlaced */
int audio_latency[2];
int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */
unsigned bad_edid_counter;
};
/**
@ -566,6 +626,7 @@ struct drm_plane_funcs {
uint32_t src_w, uint32_t src_h);
int (*disable_plane)(struct drm_plane *plane);
void (*destroy)(struct drm_plane *plane);
int (*set_property)(struct drm_plane *plane,
struct drm_property *property, uint64_t val);
};
@ -585,7 +646,7 @@ struct drm_plane_funcs {
* @enabled: enabled flag
* @funcs: helper functions
* @helper_private: storage for drver layer
@properties: property tracking for this plane
* @properties: property tracking for this plane
*/
struct drm_plane {
struct drm_device *dev;
@ -613,7 +674,15 @@ struct drm_plane {
};
/**
* struct drm_mode_set
* drm_mode_set - new values for a CRTC config change
* @head: list management
* @fb: framebuffer to use for new config
* @crtc: CRTC whose configuration we're about to change
* @mode: mode timings to use
* @x: position of this CRTC relative to @fb
* @y: position of this CRTC relative to @fb
* @connectors: array of connectors to drive with this CRTC if possible
* @num_connectors: size of @connectors array
*
* Represents a single crtc the connectors that it drives with what mode
* and from which framebuffer it scans out from.
@ -621,8 +690,6 @@ struct drm_plane {
* This is used to set modes.
*/
struct drm_mode_set {
struct list_head head;
struct drm_framebuffer *fb;
struct drm_crtc *crtc;
struct drm_display_mode *mode;
@ -635,15 +702,34 @@ struct drm_mode_set {
};
/**
* struct drm_mode_config_funcs - configure CRTCs for a given screen layout
* struct drm_mode_config_funcs - basic driver provided mode setting functions
* @fb_create: create a new framebuffer object
* @output_poll_changed: function to handle output configuration changes
*
* Some global (i.e. not per-CRTC, connector, etc) mode setting functions that
* involve drivers.
*/
struct drm_mode_config_funcs {
int (*fb_create)(struct drm_device *dev,
struct drm_file *file_priv, struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_framebuffer **res);
struct drm_file *file_priv,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_framebuffer **fb);
void (*output_poll_changed)(struct drm_device *dev);
};
/**
* drm_mode_group - group of mode setting resources for potential sub-grouping
* @num_crtcs: CRTC count
* @num_encoders: encoder count
* @num_connectors: connector count
* @id_list: list of KMS object IDs in this group
*
* Currently this simply tracks the global mode setting state. But in the
* future it could allow groups of objects to be set aside into independent
* control groups for use by different user level processes (e.g. two X servers
* running simultaneously on different heads, each with their own mode
* configuration and freedom of mode setting).
*/
struct drm_mode_group {
uint32_t num_crtcs;
uint32_t num_encoders;
@ -655,11 +741,34 @@ struct drm_mode_group {
/**
* drm_mode_config - Mode configuration control structure
* @mutex: mutex protecting KMS related lists and structures
* @idr_mutex: mutex for KMS ID allocation and management
* @crtc_idr: main KMS ID tracking object
* @num_fb: number of fbs available
* @fb_list: list of framebuffers available
* @num_connector: number of connectors on this device
* @connector_list: list of connector objects
* @num_encoder: number of encoders on this device
* @encoder_list: list of encoder objects
* @num_crtc: number of CRTCs on this device
* @crtc_list: list of CRTC objects
* @min_width: minimum pixel width on this device
* @min_height: minimum pixel height on this device
* @max_width: maximum pixel width on this device
* @max_height: maximum pixel height on this device
* @funcs: core driver provided mode setting functions
* @fb_base: base address of the framebuffer
* @poll_enabled: track polling status for this device
* @output_poll_work: delayed work for polling in process context
* @*_property: core property tracking
*
* Core mode resource tracking structure. All CRTC, encoders, and connectors
* enumerated by the driver are added here, as are global properties. Some
* global restrictions are also here, e.g. dimension restrictions.
*/
struct drm_mode_config {
struct sx mutex; /* protects configuration (mode lists etc.) */
struct drm_gem_names crtc_names; /* use this idr for all IDs, fb, crtc, connector, modes */
struct drm_gem_names crtc_names; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */
/* this is limited to one for now */
int num_fb;
struct list_head fb_list;
@ -682,7 +791,8 @@ struct drm_mode_config {
/* output poll support */
bool poll_enabled;
struct timeout_task output_poll_task;
bool poll_running;
struct timeout_task output_poll_work;
/* pointers to standard properties */
struct list_head property_blob_list;
@ -731,16 +841,6 @@ struct drm_prop_enum_list {
char *name;
};
#if defined(MODE_SETTING_LOCKING_IS_NOT_BROKEN)
#define DRM_MODE_CONFIG_ASSERT_LOCKED(dev) \
sx_assert(&dev->mode_config.mutex, SA_XLOCKED)
#else
#define DRM_MODE_CONFIG_ASSERT_LOCKED(dev)
#endif
extern char *drm_get_dirty_info_name(int val);
extern char *drm_get_connector_status_name(enum drm_connector_status status);
extern int drm_crtc_init(struct drm_device *dev,
struct drm_crtc *crtc,
const struct drm_crtc_funcs *funcs);
@ -752,6 +852,8 @@ extern int drm_connector_init(struct drm_device *dev,
int connector_type);
extern void drm_connector_cleanup(struct drm_connector *connector);
/* helper to unplug all connectors from sysfs for device */
extern void drm_connector_unplug_all(struct drm_device *dev);
extern int drm_encoder_init(struct drm_device *dev,
struct drm_encoder *encoder,
@ -769,28 +871,34 @@ extern void drm_plane_cleanup(struct drm_plane *plane);
extern void drm_encoder_cleanup(struct drm_encoder *encoder);
extern char *drm_get_connector_name(struct drm_connector *connector);
extern char *drm_get_connector_status_name(enum drm_connector_status status);
extern char *drm_get_dpms_name(int val);
extern char *drm_get_dvi_i_subconnector_name(int val);
extern char *drm_get_dvi_i_select_name(int val);
extern char *drm_get_tv_subconnector_name(int val);
extern char *drm_get_dirty_info_name(int val);
extern char *drm_get_tv_select_name(int val);
extern void drm_fb_release(struct drm_file *file_priv);
extern int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group);
extern void drm_mode_group_free(struct drm_mode_group *group);
extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group);
extern bool drm_probe_ddc(device_t adapter);
extern struct edid *drm_get_edid(struct drm_connector *connector,
device_t adapter);
extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode);
extern void drm_mode_remove(struct drm_connector *connector, struct drm_display_mode *mode);
extern void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src);
extern struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
const struct drm_display_mode *mode);
extern void drm_mode_debug_printmodeline(struct drm_display_mode *mode);
const struct drm_display_mode *mode);
extern void drm_mode_debug_printmodeline(const struct drm_display_mode *mode);
extern void drm_mode_config_init(struct drm_device *dev);
extern void drm_mode_config_reset(struct drm_device *dev);
extern void drm_mode_config_cleanup(struct drm_device *dev);
extern void drm_mode_set_name(struct drm_display_mode *mode);
extern bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2);
extern int drm_mode_width(struct drm_display_mode *mode);
extern int drm_mode_height(struct drm_display_mode *mode);
extern bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2);
extern int drm_mode_width(const struct drm_display_mode *mode);
extern int drm_mode_height(const struct drm_display_mode *mode);
/* for us by fb module */
extern int drm_mode_attachmode_crtc(struct drm_device *dev,
@ -806,8 +914,8 @@ extern void drm_mode_validate_size(struct drm_device *dev,
struct list_head *mode_list,
int maxX, int maxY, int maxPitch);
extern void drm_mode_validate_clocks(struct drm_device *dev,
struct list_head *mode_list,
int *min, int *max, int n_ranges);
struct list_head *mode_list,
int *min, int *max, int n_ranges);
extern void drm_mode_prune_invalid(struct drm_device *dev,
struct list_head *mode_list, bool verbose);
extern void drm_mode_sort(struct list_head *mode_list);
@ -818,15 +926,6 @@ extern void drm_mode_set_crtcinfo(struct drm_display_mode *p,
extern void drm_mode_connector_list_update(struct drm_connector *connector);
extern int drm_mode_connector_update_edid_property(struct drm_connector *connector,
struct edid *edid);
extern int drm_connector_property_set_value(struct drm_connector *connector,
struct drm_property *property,
uint64_t value);
extern int drm_connector_property_get_value(struct drm_connector *connector,
struct drm_property *property,
uint64_t *value);
void drm_object_attach_property(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t init_val);
extern int drm_object_property_set_value(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t val);
@ -839,14 +938,18 @@ extern void drm_framebuffer_set_object(struct drm_device *dev,
extern int drm_framebuffer_init(struct drm_device *dev,
struct drm_framebuffer *fb,
const struct drm_framebuffer_funcs *funcs);
extern void drm_framebuffer_unreference(struct drm_framebuffer *fb);
extern void drm_framebuffer_reference(struct drm_framebuffer *fb);
extern void drm_framebuffer_remove(struct drm_framebuffer *fb);
extern void drm_framebuffer_cleanup(struct drm_framebuffer *fb);
extern int drmfb_probe(struct drm_device *dev, struct drm_crtc *crtc);
extern int drmfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
extern void drm_crtc_probe_connector_modes(struct drm_device *dev, int maxX, int maxY);
extern bool drm_crtc_in_use(struct drm_crtc *crtc);
extern void drm_connector_attach_property(struct drm_connector *connector,
struct drm_property *property, uint64_t init_val);
extern void drm_object_attach_property(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t init_val);
extern struct drm_property *drm_property_create(struct drm_device *dev, int flags,
const char *name, int num_values);
extern struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
@ -854,9 +957,9 @@ extern struct drm_property *drm_property_create_enum(struct drm_device *dev, int
const struct drm_prop_enum_list *props,
int num_values);
struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
int flags, const char *name,
const struct drm_prop_enum_list *props,
int num_values);
int flags, const char *name,
const struct drm_prop_enum_list *props,
int num_values);
struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
const char *name,
uint64_t min, uint64_t max);
@ -932,6 +1035,8 @@ extern int drm_mode_gamma_get_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_gamma_set_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern u8 *drm_find_cea_extension(struct edid *edid);
extern u8 drm_match_cea_mode(struct drm_display_mode *to_match);
extern bool drm_detect_hdmi_monitor(struct edid *edid);
extern bool drm_detect_monitor_audio(struct edid *edid);
extern int drm_mode_page_flip_ioctl(struct drm_device *dev,
@ -948,8 +1053,10 @@ extern struct drm_display_mode *drm_gtf_mode_complex(struct drm_device *dev,
int GTF_2C, int GTF_K, int GTF_2J);
extern int drm_add_modes_noedid(struct drm_connector *connector,
int hdisplay, int vdisplay);
extern uint8_t drm_mode_cea_vic(const struct drm_display_mode *mode);
extern int drm_edid_header_is_valid(const u8 *raw_edid);
extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid);
extern bool drm_edid_is_valid(struct edid *edid);
struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
int hsize, int vsize, int fresh,

View File

@ -32,48 +32,54 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <dev/drm2/drmP.h>
#include <dev/drm2/drm_crtc.h>
#include <dev/drm2/drm_fourcc.h>
#include <dev/drm2/drm_crtc_helper.h>
#include <dev/drm2/drm_fb_helper.h>
#include <dev/drm2/drm_edid.h>
bool
drm_fetch_cmdline_mode_from_kenv(struct drm_connector *connector,
struct drm_cmdline_mode *cmdline_mode)
/**
* drm_helper_move_panel_connectors_to_head() - move panels to the front in the
* connector list
* @dev: drm device to operate on
*
* Some userspace presumes that the first connected connector is the main
* display, where it's supposed to display e.g. the login screen. For
* laptops, this should be the main panel. Use this function to sort all
* (eDP/LVDS) panels to the front of the connector list, instead of
* painstakingly trying to initialize them in the right order.
*/
void drm_helper_move_panel_connectors_to_head(struct drm_device *dev)
{
char *tun_var_name, *tun_mode;
static const char tun_prefix[] = "drm_mode.";
bool res;
struct drm_connector *connector, *tmp;
struct list_head panel_list;
res = false;
tun_var_name = malloc(sizeof(tun_prefix) +
strlen(drm_get_connector_name(connector)), M_TEMP, M_WAITOK);
strcpy(tun_var_name, tun_prefix);
strcat(tun_var_name, drm_get_connector_name(connector));
tun_mode = kern_getenv(tun_var_name);
if (tun_mode != NULL) {
res = drm_mode_parse_command_line_for_connector(tun_mode,
connector, cmdline_mode);
freeenv(tun_mode);
INIT_LIST_HEAD(&panel_list);
list_for_each_entry_safe(connector, tmp,
&dev->mode_config.connector_list, head) {
if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS ||
connector->connector_type == DRM_MODE_CONNECTOR_eDP)
list_move_tail(&connector->head, &panel_list);
}
free(tun_var_name, M_TEMP);
return (res);
list_splice(&panel_list, &dev->mode_config.connector_list);
}
EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head);
static bool drm_kms_helper_poll = true;
module_param_named(poll, drm_kms_helper_poll, bool, 0600);
static void drm_mode_validate_flag(struct drm_connector *connector,
int flags)
{
struct drm_display_mode *mode, *t;
struct drm_display_mode *mode;
if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE))
return;
list_for_each_entry_safe(mode, t, &connector->modes, head) {
list_for_each_entry(mode, &connector->modes, head) {
if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
!(flags & DRM_MODE_FLAG_INTERLACE))
mode->status = MODE_NO_INTERLACE;
@ -87,22 +93,21 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
/**
* drm_helper_probe_single_connector_modes - get complete set of display modes
* @dev: DRM device
* @connector: connector to probe
* @maxX: max width for modes
* @maxY: max height for modes
*
* LOCKING:
* Caller must hold mode config lock.
*
* Based on @dev's mode_config layout, scan all the connectors and try to detect
* modes on them. Modes will first be added to the connector's probed_modes
* list, then culled (based on validity and the @maxX, @maxY parameters) and
* put into the normal modes list.
* Based on the helper callbacks implemented by @connector try to detect all
* valid modes. Modes will first be added to the connector's probed_modes list,
* then culled (based on validity and the @maxX, @maxY parameters) and put into
* the normal modes list.
*
* Intended to be used either at bootup time or when major configuration
* changes have occurred.
*
* FIXME: take into account monitor limits
* Intended to be use as a generic implementation of the ->probe() @connector
* callback for drivers that use the crtc helpers for output mode filtering and
* detection.
*
* RETURNS:
* Number of modes found on @connector.
@ -111,17 +116,16 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
uint32_t maxX, uint32_t maxY)
{
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode, *t;
struct drm_display_mode *mode;
struct drm_connector_helper_funcs *connector_funcs =
connector->helper_private;
struct drm_cmdline_mode cmdline_mode;
int count = 0;
int mode_flags = 0;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
drm_get_connector_name(connector));
/* set all modes to the unverified state */
list_for_each_entry_safe(mode, t, &connector->modes, head)
list_for_each_entry(mode, &connector->modes, head)
mode->status = MODE_UNVERIFIED;
if (connector->force) {
@ -133,9 +137,14 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
connector->funcs->force(connector);
} else {
connector->status = connector->funcs->detect(connector, true);
drm_kms_helper_poll_enable(dev);
}
/* Re-enable polling in case the global poll config changed. */
if (drm_kms_helper_poll != dev->mode_config.poll_running)
drm_kms_helper_poll_enable(dev);
dev->mode_config.poll_running = drm_kms_helper_poll;
if (connector->status == connector_status_disconnected) {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
connector->base.id, drm_get_connector_name(connector));
@ -143,26 +152,14 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
goto prune;
}
count = (*connector_funcs->get_modes)(connector);
if (count == 0 && drm_fetch_cmdline_mode_from_kenv(connector,
&cmdline_mode)) {
mode = drm_mode_create_from_cmdline_mode(dev,
&cmdline_mode);
if (mode != NULL) {
DRM_DEBUG_KMS(
"[CONNECTOR:%d:%s] found manual override ",
connector->base.id,
drm_get_connector_name(connector));
drm_mode_debug_printmodeline(mode);
drm_mode_probed_add(connector, mode);
count++;
} else {
DRM_ERROR(
"[CONNECTOR:%d:%s] manual override mode: parse error\n",
connector->base.id,
drm_get_connector_name(connector));
}
}
#ifdef FREEBSD_NOTYET
#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE
count = drm_load_edid_firmware(connector);
if (count == 0)
#endif
#endif /* FREEBSD_NOTYET */
count = (*connector_funcs->get_modes)(connector);
if (count == 0 && connector->status == connector_status_connected)
count = drm_add_modes_noedid(connector, 1024, 768);
if (count == 0)
@ -180,7 +177,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
mode_flags |= DRM_MODE_FLAG_DBLSCAN;
drm_mode_validate_flag(connector, mode_flags);
list_for_each_entry_safe(mode, t, &connector->modes, head) {
list_for_each_entry(mode, &connector->modes, head) {
if (mode->status == MODE_OK)
mode->status = connector_funcs->mode_valid(connector,
mode);
@ -196,7 +193,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] probed modes :\n", connector->base.id,
drm_get_connector_name(connector));
list_for_each_entry_safe(mode, t, &connector->modes, head) {
list_for_each_entry(mode, &connector->modes, head) {
mode->vrefresh = drm_mode_vrefresh(mode);
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
@ -205,6 +202,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
return count;
}
EXPORT_SYMBOL(drm_helper_probe_single_connector_modes);
/**
* drm_helper_encoder_in_use - check if a given encoder is in use
@ -227,6 +225,7 @@ bool drm_helper_encoder_in_use(struct drm_encoder *encoder)
return true;
return false;
}
EXPORT_SYMBOL(drm_helper_encoder_in_use);
/**
* drm_helper_crtc_in_use - check if a given CRTC is in a mode_config
@ -250,6 +249,7 @@ bool drm_helper_crtc_in_use(struct drm_crtc *crtc)
return true;
return false;
}
EXPORT_SYMBOL(drm_helper_crtc_in_use);
static void
drm_encoder_disable(struct drm_encoder *encoder)
@ -305,6 +305,7 @@ void drm_helper_disable_unused_functions(struct drm_device *dev)
}
}
}
EXPORT_SYMBOL(drm_helper_disable_unused_functions);
/**
* drm_encoder_crtc_ok - can a given crtc drive a given encoder?
@ -360,17 +361,24 @@ drm_crtc_prepare_encoders(struct drm_device *dev)
}
/**
* drm_crtc_set_mode - set a mode
* drm_crtc_helper_set_mode - internal helper to set a mode
* @crtc: CRTC to program
* @mode: mode to use
* @x: width of mode
* @y: height of mode
* @x: horizontal offset into the surface
* @y: vertical offset into the surface
* @old_fb: old framebuffer, for cleanup
*
* LOCKING:
* Caller must hold mode config lock.
*
* Try to set @mode on @crtc. Give @crtc and its associated connectors a chance
* to fixup or reject the mode prior to trying to set it.
* to fixup or reject the mode prior to trying to set it. This is an internal
* helper that drivers could e.g. use to update properties that require the
* entire output pipe to be disabled and re-enabled in a new configuration. For
* example for changing whether audio is enabled on a hdmi link or for changing
* panel fitter or dither attributes. It is also called by the
* drm_crtc_helper_set_config() helper function to drive the mode setting
* sequence.
*
* RETURNS:
* True if the mode was set successfully, or false otherwise.
@ -419,11 +427,13 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
encoder_funcs = encoder->helper_private;
if (!(ret = encoder_funcs->mode_fixup(encoder, mode,
adjusted_mode))) {
DRM_DEBUG_KMS("Encoder fixup failed\n");
goto done;
}
}
if (!(ret = crtc_funcs->mode_fixup(crtc, mode, adjusted_mode))) {
DRM_DEBUG_KMS("CRTC fixup failed\n");
goto done;
}
DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
@ -495,6 +505,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
return ret;
}
EXPORT_SYMBOL(drm_crtc_helper_set_mode);
static int
drm_crtc_helper_disable(struct drm_crtc *crtc)
@ -522,20 +534,19 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
/**
* drm_crtc_helper_set_config - set a new config from userspace
* @crtc: CRTC to setup
* @crtc_info: user provided configuration
* @new_mode: new mode to set
* @connector_set: set of connectors for the new config
* @fb: new framebuffer
* @set: mode set configuration
*
* LOCKING:
* Caller must hold mode config lock.
*
* Setup a new configuration, provided by the user in @crtc_info, and enable
* it.
* Setup a new configuration, provided by the upper layers (either an ioctl call
* from userspace or internally e.g. from the fbdev suppport code) in @set, and
* enable it. This is the main helper functions for drivers that implement
* kernel mode setting with the crtc helper functions and the assorted
* ->prepare(), ->modeset() and ->commit() helper callbacks.
*
* RETURNS:
* Zero. (FIXME)
* Returns 0 on success, -ERRNO on failure.
*/
int drm_crtc_helper_set_config(struct drm_mode_set *set)
{
@ -581,12 +592,25 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
/* Allocate space for the backup of all (non-pointer) crtc, encoder and
* connector data. */
save_crtcs = malloc(dev->mode_config.num_crtc * sizeof(struct drm_crtc),
DRM_MEM_KMS, M_WAITOK | M_ZERO);
save_crtcs = malloc(dev->mode_config.num_crtc *
sizeof(struct drm_crtc), DRM_MEM_KMS, M_NOWAIT | M_ZERO);
if (!save_crtcs)
return -ENOMEM;
save_encoders = malloc(dev->mode_config.num_encoder *
sizeof(struct drm_encoder), DRM_MEM_KMS, M_WAITOK | M_ZERO);
sizeof(struct drm_encoder), DRM_MEM_KMS, M_NOWAIT | M_ZERO);
if (!save_encoders) {
free(save_crtcs, DRM_MEM_KMS);
return -ENOMEM;
}
save_connectors = malloc(dev->mode_config.num_connector *
sizeof(struct drm_connector), DRM_MEM_KMS, M_WAITOK | M_ZERO);
sizeof(struct drm_connector), DRM_MEM_KMS, M_NOWAIT | M_ZERO);
if (!save_connectors) {
free(save_crtcs, DRM_MEM_KMS);
free(save_encoders, DRM_MEM_KMS);
return -ENOMEM;
}
/* Copy data. Note that driver private data is not affected.
* Should anything bad happen only the expected state is
@ -622,6 +646,11 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
mode_changed = true;
} else if (set->fb == NULL) {
mode_changed = true;
} else if (set->fb->depth != set->crtc->fb->depth) {
mode_changed = true;
} else if (set->fb->bits_per_pixel !=
set->crtc->fb->bits_per_pixel) {
mode_changed = true;
} else
fb_changed = true;
}
@ -732,7 +761,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
for (i = 0; i < set->num_connectors; i++) {
DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
drm_get_connector_name(set->connectors[i]));
set->connectors[i]->dpms = DRM_MODE_DPMS_ON;
set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON);
}
}
drm_helper_disable_unused_functions(dev);
@ -784,6 +813,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
free(save_crtcs, DRM_MEM_KMS);
return ret;
}
EXPORT_SYMBOL(drm_crtc_helper_set_config);
static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder)
{
@ -812,12 +842,14 @@ static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
}
/**
* drm_helper_connector_dpms
* @connector affected connector
* @mode DPMS mode
* drm_helper_connector_dpms() - connector dpms helper implementation
* @connector: affected connector
* @mode: DPMS mode
*
* Calls the low-level connector DPMS function, then
* calls appropriate encoder and crtc DPMS functions as well
* This is the main helper function provided by the crtc helper framework for
* implementing the DPMS connector attribute. It computes the new desired DPMS
* state for all encoders and crtcs in the output mesh and calls the ->dpms()
* callback provided by the driver appropriately.
*/
void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
{
@ -865,6 +897,7 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
return;
}
EXPORT_SYMBOL(drm_helper_connector_dpms);
int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
struct drm_mode_fb_cmd2 *mode_cmd)
@ -883,6 +916,7 @@ int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
return 0;
}
EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct);
int drm_helper_resume_force_mode(struct drm_device *dev)
{
@ -900,7 +934,7 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
ret = drm_crtc_helper_set_mode(crtc, &crtc->mode,
crtc->x, crtc->y, crtc->fb);
if (!ret)
if (ret == false)
DRM_ERROR("failed to set mode on crtc %p\n", crtc);
/* Turn off outputs that were already powered off */
@ -913,24 +947,36 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
encoder_funcs = encoder->helper_private;
if (encoder_funcs->dpms)
(*encoder_funcs->dpms) (encoder,
drm_helper_choose_encoder_dpms(encoder));
drm_helper_choose_encoder_dpms(encoder));
}
crtc_funcs = crtc->helper_private;
if (crtc_funcs->dpms)
(*crtc_funcs->dpms) (crtc,
drm_helper_choose_crtc_dpms(crtc));
drm_helper_choose_crtc_dpms(crtc));
}
}
/* disable the unused connectors while restoring the modesetting */
drm_helper_disable_unused_functions(dev);
return 0;
}
EXPORT_SYMBOL(drm_helper_resume_force_mode);
#define DRM_OUTPUT_POLL_PERIOD (10 * hz)
void drm_kms_helper_hotplug_event(struct drm_device *dev)
{
/* send a uevent + call fbdev */
#ifdef FREEBSD_NOTYET
drm_sysfs_hotplug_event(dev);
#endif /* FREEBSD_NOTYET */
if (dev->mode_config.funcs->output_poll_changed)
dev->mode_config.funcs->output_poll_changed(dev);
}
EXPORT_SYMBOL(drm_kms_helper_hotplug_event);
#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
static void output_poll_execute(void *ctx, int pending)
{
struct drm_device *dev;
struct drm_device *dev = ctx;
struct drm_connector *connector;
enum drm_connector_status old_status;
bool repoll = false, changed = false;
@ -938,26 +984,25 @@ static void output_poll_execute(void *ctx, int pending)
if (!drm_kms_helper_poll)
return;
dev = ctx;
sx_xlock(&dev->mode_config.mutex);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
/* if this is HPD or polled don't check it -
TV out for instance */
if (!connector->polled)
/* Ignore forced connectors. */
if (connector->force)
continue;
else if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT |
DRM_CONNECTOR_POLL_DISCONNECT))
repoll = true;
/* Ignore HPD capable connectors and connectors where we don't
* want any hotplug detection at all for polling. */
if (!connector->polled || connector->polled == DRM_CONNECTOR_POLL_HPD)
continue;
repoll = true;
old_status = connector->status;
/* if we are connected and don't want to poll for disconnect
skip it */
if (old_status == connector_status_connected &&
!(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT) &&
!(connector->polled & DRM_CONNECTOR_POLL_HPD))
!(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT))
continue;
connector->status = connector->funcs->detect(connector, false);
@ -971,20 +1016,13 @@ static void output_poll_execute(void *ctx, int pending)
sx_xunlock(&dev->mode_config.mutex);
if (changed) {
#if 0
/* send a uevent + call fbdev */
drm_sysfs_hotplug_event(dev);
#endif
if (dev->mode_config.funcs->output_poll_changed)
dev->mode_config.funcs->output_poll_changed(dev);
}
if (changed)
drm_kms_helper_hotplug_event(dev);
if (repoll) {
if (repoll)
taskqueue_enqueue_timeout(taskqueue_thread,
&dev->mode_config.output_poll_task,
&dev->mode_config.output_poll_work,
DRM_OUTPUT_POLL_PERIOD);
}
}
void drm_kms_helper_poll_disable(struct drm_device *dev)
@ -992,8 +1030,9 @@ void drm_kms_helper_poll_disable(struct drm_device *dev)
if (!dev->mode_config.poll_enabled)
return;
taskqueue_cancel_timeout(taskqueue_thread,
&dev->mode_config.output_poll_task, NULL);
&dev->mode_config.output_poll_work, NULL);
}
EXPORT_SYMBOL(drm_kms_helper_poll_disable);
void drm_kms_helper_poll_enable(struct drm_device *dev)
{
@ -1004,40 +1043,63 @@ void drm_kms_helper_poll_enable(struct drm_device *dev)
return;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (connector->polled)
if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT |
DRM_CONNECTOR_POLL_DISCONNECT))
poll = true;
}
if (poll) {
if (poll)
taskqueue_enqueue_timeout(taskqueue_thread,
&dev->mode_config.output_poll_task, DRM_OUTPUT_POLL_PERIOD);
}
&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
}
EXPORT_SYMBOL(drm_kms_helper_poll_enable);
void drm_kms_helper_poll_init(struct drm_device *dev)
{
TIMEOUT_TASK_INIT(taskqueue_thread, &dev->mode_config.output_poll_task,
TIMEOUT_TASK_INIT(taskqueue_thread, &dev->mode_config.output_poll_work,
0, output_poll_execute, dev);
dev->mode_config.poll_enabled = true;
drm_kms_helper_poll_enable(dev);
}
EXPORT_SYMBOL(drm_kms_helper_poll_init);
void drm_kms_helper_poll_fini(struct drm_device *dev)
{
drm_kms_helper_poll_disable(dev);
}
EXPORT_SYMBOL(drm_kms_helper_poll_fini);
void drm_helper_hpd_irq_event(struct drm_device *dev)
{
struct drm_connector *connector;
enum drm_connector_status old_status;
bool changed = false;
if (!dev->mode_config.poll_enabled)
return;
/* kill timer and schedule immediate execution, this doesn't block */
taskqueue_cancel_timeout(taskqueue_thread,
&dev->mode_config.output_poll_task, NULL);
if (drm_kms_helper_poll)
taskqueue_enqueue_timeout(taskqueue_thread,
&dev->mode_config.output_poll_task, 0);
sx_xlock(&dev->mode_config.mutex);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
/* Only handle HPD capable connectors. */
if (!(connector->polled & DRM_CONNECTOR_POLL_HPD))
continue;
old_status = connector->status;
connector->status = connector->funcs->detect(connector, false);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
connector->base.id,
drm_get_connector_name(connector),
old_status, connector->status);
if (old_status != connector->status)
changed = true;
}
sx_xunlock(&dev->mode_config.mutex);
if (changed)
drm_kms_helper_hotplug_event(dev);
}
EXPORT_SYMBOL(drm_helper_hpd_irq_event);

View File

@ -40,6 +40,13 @@ enum mode_set_atomic {
ENTER_ATOMIC_MODE_SET,
};
/**
* drm_crtc_helper_funcs - helper operations for CRTCs
* @mode_fixup: try to fixup proposed mode for this connector
* @mode_set: set this mode
*
* The helper operations are called by the mid-layer CRTC helper.
*/
struct drm_crtc_helper_funcs {
/*
* Control power levels on the CRTC. If the mode passed in is
@ -72,13 +79,20 @@ struct drm_crtc_helper_funcs {
void (*disable)(struct drm_crtc *crtc);
};
/**
* drm_encoder_helper_funcs - helper operations for encoders
* @mode_fixup: try to fixup proposed mode for this connector
* @mode_set: set this mode
*
* The helper operations are called by the mid-layer CRTC helper.
*/
struct drm_encoder_helper_funcs {
void (*dpms)(struct drm_encoder *encoder, int mode);
void (*save)(struct drm_encoder *encoder);
void (*restore)(struct drm_encoder *encoder);
bool (*mode_fixup)(struct drm_encoder *encoder,
struct drm_display_mode *mode,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
void (*prepare)(struct drm_encoder *encoder);
void (*commit)(struct drm_encoder *encoder);
@ -93,6 +107,13 @@ struct drm_encoder_helper_funcs {
void (*disable)(struct drm_encoder *encoder);
};
/**
* drm_connector_helper_funcs - helper operations for connectors
* @get_modes: get mode list for this connector
* @mode_valid: is this mode valid on the given connector?
*
* The helper operations are called by the mid-layer CRTC helper.
*/
struct drm_connector_helper_funcs {
int (*get_modes)(struct drm_connector *connector);
int (*mode_valid)(struct drm_connector *connector,
@ -112,6 +133,8 @@ extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder);
extern void drm_helper_connector_dpms(struct drm_connector *connector, int mode);
extern void drm_helper_move_panel_connectors_to_head(struct drm_device *);
extern int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
struct drm_mode_fb_cmd2 *mode_cmd);
@ -137,10 +160,8 @@ extern int drm_helper_resume_force_mode(struct drm_device *dev);
extern void drm_kms_helper_poll_init(struct drm_device *dev);
extern void drm_kms_helper_poll_fini(struct drm_device *dev);
extern void drm_helper_hpd_irq_event(struct drm_device *dev);
extern void drm_kms_helper_hotplug_event(struct drm_device *dev);
extern void drm_kms_helper_poll_disable(struct drm_device *dev);
extern void drm_kms_helper_poll_enable(struct drm_device *dev);
extern bool drm_fetch_cmdline_mode_from_kenv(struct drm_connector *connector,
struct drm_cmdline_mode *cmdline_mode);
#endif

View File

@ -1,4 +1,14 @@
/**
* \file drm_dma.c
* DMA IOCTL and function support
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
* \author Gareth Hughes <gareth@valinux.com>
*/
/*-
* Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
*
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
@ -21,64 +31,72 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Gareth Hughes <gareth@valinux.com>
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/** @file drm_dma.c
* Support code for DMA buffer management.
*
* The implementation used to be significantly more complicated, but the
* complexity has been moved into the drivers as different buffer management
* schemes evolved.
*/
#include <dev/drm2/drmP.h>
/**
* Initialize the DMA data.
*
* \param dev DRM device.
* \return zero on success or a negative value on failure.
*
* Allocate and initialize a drm_device_dma structure.
*/
int drm_dma_setup(struct drm_device *dev)
{
int i;
dev->dma = malloc(sizeof(*dev->dma), DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
if (dev->dma == NULL)
return ENOMEM;
if (!dev->dma)
return -ENOMEM;
DRM_SPININIT(&dev->dma_lock, "drmdma");
for (i = 0; i <= DRM_MAX_ORDER; i++)
memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));
return 0;
}
/**
* Cleanup the DMA resources.
*
* \param dev DRM device.
*
* Free all pages associated with DMA buffers, the buffers and pages lists, and
* finally the drm_device::dma structure itself.
*/
void drm_dma_takedown(struct drm_device *dev)
{
drm_device_dma_t *dma = dev->dma;
int i, j;
struct drm_device_dma *dma = dev->dma;
int i, j;
if (dma == NULL)
if (!dma)
return;
/* Clear dma buffers */
for (i = 0; i <= DRM_MAX_ORDER; i++) {
if (dma->bufs[i].seg_count) {
DRM_DEBUG("order %d: buf_count = %d,"
" seg_count = %d\n", i, dma->bufs[i].buf_count,
dma->bufs[i].seg_count);
" seg_count = %d\n",
i,
dma->bufs[i].buf_count,
dma->bufs[i].seg_count);
for (j = 0; j < dma->bufs[i].seg_count; j++) {
drm_pci_free(dev, dma->bufs[i].seglist[j]);
if (dma->bufs[i].seglist[j]) {
drm_pci_free(dev, dma->bufs[i].seglist[j]);
}
}
free(dma->bufs[i].seglist, DRM_MEM_SEGS);
}
if (dma->bufs[i].buf_count) {
for (j = 0; j < dma->bufs[i].buf_count; j++) {
if (dma->bufs[i].buf_count) {
for (j = 0; j < dma->bufs[i].buf_count; j++) {
free(dma->bufs[i].buflist[j].dev_private,
DRM_MEM_BUFS);
}
free(dma->bufs[i].buflist, DRM_MEM_BUFS);
free(dma->bufs[i].buflist, DRM_MEM_BUFS);
}
}
@ -86,28 +104,42 @@ void drm_dma_takedown(struct drm_device *dev)
free(dma->pagelist, DRM_MEM_PAGES);
free(dev->dma, DRM_MEM_DRIVER);
dev->dma = NULL;
DRM_SPINUNINIT(&dev->dma_lock);
}
void drm_free_buffer(struct drm_device *dev, drm_buf_t *buf)
/**
* Free a buffer.
*
* \param dev DRM device.
* \param buf buffer to free.
*
* Resets the fields of \p buf.
*/
void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf)
{
if (!buf)
return;
buf->pending = 0;
buf->file_priv= NULL;
buf->used = 0;
buf->waiting = 0;
buf->pending = 0;
buf->file_priv = NULL;
buf->used = 0;
}
void drm_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
/**
* Reclaim the buffers.
*
* \param file_priv DRM file private.
*
* Frees each buffer associated with \p file_priv not already on the hardware.
*/
void drm_core_reclaim_buffers(struct drm_device *dev,
struct drm_file *file_priv)
{
drm_device_dma_t *dma = dev->dma;
int i;
struct drm_device_dma *dma = dev->dma;
int i;
if (!dma)
return;
for (i = 0; i < dma->buf_count; i++) {
if (dma->buflist[i]->file_priv == file_priv) {
switch (dma->buflist[i]->list) {
@ -125,15 +157,4 @@ void drm_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
}
}
/* Call into the driver-specific DMA handler */
int drm_dma(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
if (dev->driver->dma_ioctl) {
/* shared code returns -errno */
return -dev->driver->dma_ioctl(dev, data, file_priv);
} else {
DRM_DEBUG("DMA ioctl on driver with no dma handler\n");
return EINVAL;
}
}
EXPORT_SYMBOL(drm_core_reclaim_buffers);

View File

@ -35,6 +35,7 @@ __FBSDID("$FreeBSD$");
* blocks, ...
*/
/* Helpers for DP link training */
static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r)
{
return link_status[r - DP_LANE0_1_STATUS];
@ -67,6 +68,7 @@ bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
}
return true;
}
EXPORT_SYMBOL(drm_dp_channel_eq_ok);
bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
int lane_count)
@ -81,6 +83,7 @@ bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
}
return true;
}
EXPORT_SYMBOL(drm_dp_clock_recovery_ok);
u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
int lane)
@ -93,6 +96,7 @@ u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
}
EXPORT_SYMBOL(drm_dp_get_adjust_request_voltage);
u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
int lane)
@ -105,20 +109,23 @@ u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
}
EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis);
void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
DRM_UDELAY(100);
udelay(100);
else
DRM_MDELAY(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4);
mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4);
}
EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay);
void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
DRM_UDELAY(400);
udelay(400);
else
DRM_MDELAY(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4);
mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4);
}
EXPORT_SYMBOL(drm_dp_link_train_channel_eq_delay);
u8 drm_dp_link_rate_to_bw_code(int link_rate)
{
@ -132,6 +139,7 @@ u8 drm_dp_link_rate_to_bw_code(int link_rate)
return DP_LINK_BW_5_4;
}
}
EXPORT_SYMBOL(drm_dp_link_rate_to_bw_code);
int drm_dp_bw_code_to_link_rate(u8 link_bw)
{
@ -145,3 +153,4 @@ int drm_dp_bw_code_to_link_rate(u8 link_bw)
return 540000;
}
}
EXPORT_SYMBOL(drm_dp_bw_code_to_link_rate);

View File

@ -41,7 +41,9 @@ iic_dp_aux_transaction(device_t idev, int mode, uint8_t write_byte,
aux_data = device_get_softc(idev);
ret = (*aux_data->aux_ch)(idev, mode, write_byte, read_byte);
return (ret);
if (ret < 0)
return (ret);
return (0);
}
/*
@ -106,7 +108,7 @@ iic_dp_aux_put_byte(device_t idev, u8 byte)
aux_data = device_get_softc(idev);
if (!aux_data->running)
return (EIO);
return (-EIO);
ret = iic_dp_aux_transaction(idev, MODE_I2C_WRITE, byte, NULL);
return (ret);
@ -125,7 +127,7 @@ iic_dp_aux_get_byte(device_t idev, u8 *byte_ret)
aux_data = device_get_softc(idev);
if (!aux_data->running)
return (EIO);
return (-EIO);
ret = iic_dp_aux_transaction(idev, MODE_I2C_READ, 0, byte_ret);
return (ret);
@ -167,7 +169,7 @@ iic_dp_aux_xfer(device_t idev, struct iic_msg *msgs, uint32_t num)
}
iic_dp_aux_stop(idev, reading);
DRM_DEBUG_KMS("dp_aux_xfer return %d\n", ret);
return (ret);
return (-ret);
}
static void
@ -183,7 +185,7 @@ iic_dp_aux_reset(device_t idev, u_char speed, u_char addr, u_char *oldaddr)
{
iic_dp_aux_reset_bus(idev);
return (0);
return (0);
}
static int
@ -255,7 +257,7 @@ iic_dp_aux_add_bus(device_t dev, const char *name,
*adapter = data->port;
}
mtx_unlock(&Giant);
return (error);
return (-error);
}
static device_method_t drm_iic_dp_aux_methods[] = {

View File

@ -1,173 +0,0 @@
/*-
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Gareth Hughes <gareth@valinux.com>
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/** @file drm_drawable.c
* This file implements ioctls to store information along with DRM drawables,
* such as the current set of cliprects for vblank-synced buffer swaps.
*/
#include <dev/drm2/drmP.h>
struct bsd_drm_drawable_info {
struct drm_drawable_info info;
int handle;
RB_ENTRY(bsd_drm_drawable_info) tree;
};
static int
drm_drawable_compare(struct bsd_drm_drawable_info *a,
struct bsd_drm_drawable_info *b)
{
if (a->handle > b->handle)
return 1;
if (a->handle < b->handle)
return -1;
return 0;
}
RB_GENERATE_STATIC(drawable_tree, bsd_drm_drawable_info, tree,
drm_drawable_compare);
struct drm_drawable_info *
drm_get_drawable_info(struct drm_device *dev, int handle)
{
struct bsd_drm_drawable_info find, *result;
find.handle = handle;
result = RB_FIND(drawable_tree, &dev->drw_head, &find);
return &result->info;
}
int drm_adddraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_draw *draw = data;
struct bsd_drm_drawable_info *info;
info = malloc(sizeof(struct bsd_drm_drawable_info), DRM_MEM_DRAWABLE,
M_NOWAIT | M_ZERO);
if (info == NULL)
return ENOMEM;
info->handle = alloc_unr(dev->drw_unrhdr);
DRM_SPINLOCK(&dev->drw_lock);
RB_INSERT(drawable_tree, &dev->drw_head, info);
draw->handle = info->handle;
DRM_SPINUNLOCK(&dev->drw_lock);
DRM_DEBUG("%d\n", draw->handle);
return 0;
}
int drm_rmdraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_draw *draw = (struct drm_draw *)data;
struct drm_drawable_info *info;
DRM_SPINLOCK(&dev->drw_lock);
info = drm_get_drawable_info(dev, draw->handle);
if (info != NULL) {
RB_REMOVE(drawable_tree, &dev->drw_head,
(struct bsd_drm_drawable_info *)info);
DRM_SPINUNLOCK(&dev->drw_lock);
free_unr(dev->drw_unrhdr, draw->handle);
free(info->rects, DRM_MEM_DRAWABLE);
free(info, DRM_MEM_DRAWABLE);
return 0;
} else {
DRM_SPINUNLOCK(&dev->drw_lock);
return EINVAL;
}
}
int drm_update_draw(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_drawable_info *info;
struct drm_update_draw *update = (struct drm_update_draw *)data;
int ret;
info = drm_get_drawable_info(dev, update->handle);
if (info == NULL)
return EINVAL;
switch (update->type) {
case DRM_DRAWABLE_CLIPRECTS:
DRM_SPINLOCK(&dev->drw_lock);
if (update->num != info->num_rects) {
free(info->rects, DRM_MEM_DRAWABLE);
info->rects = NULL;
info->num_rects = 0;
}
if (update->num == 0) {
DRM_SPINUNLOCK(&dev->drw_lock);
return 0;
}
if (info->rects == NULL) {
info->rects = malloc(sizeof(*info->rects) *
update->num, DRM_MEM_DRAWABLE, M_NOWAIT);
if (info->rects == NULL) {
DRM_SPINUNLOCK(&dev->drw_lock);
return ENOMEM;
}
info->num_rects = update->num;
}
/* For some reason the pointer arg is unsigned long long. */
ret = copyin((void *)(intptr_t)update->data, info->rects,
sizeof(*info->rects) * info->num_rects);
DRM_SPINUNLOCK(&dev->drw_lock);
return ret;
default:
return EINVAL;
}
}
void drm_drawable_free_all(struct drm_device *dev)
{
struct bsd_drm_drawable_info *info, *next;
DRM_SPINLOCK(&dev->drw_lock);
for (info = RB_MIN(drawable_tree, &dev->drw_head);
info != NULL ; info = next) {
next = RB_NEXT(drawable_tree, &dev->drw_head, info);
RB_REMOVE(drawable_tree, &dev->drw_head,
(struct bsd_drm_drawable_info *)info);
DRM_SPINUNLOCK(&dev->drw_lock);
free_unr(dev->drw_unrhdr, info->handle);
free(info->info.rects, DRM_MEM_DRAWABLE);
free(info, DRM_MEM_DRAWABLE);
DRM_SPINLOCK(&dev->drw_lock);
}
DRM_SPINUNLOCK(&dev->drw_lock);
}

File diff suppressed because it is too large Load Diff

View File

@ -33,7 +33,7 @@ __FBSDID("$FreeBSD$");
#include <dev/drm2/drmP.h>
#include <dev/drm2/drm_edid.h>
#include <dev/drm2/drm_edid_modes.h>
#include "drm_edid_modes.h"
#include <dev/iicbus/iic.h>
#include <dev/iicbus/iiconf.h>
#include "iicbus_if.h"
@ -69,6 +69,8 @@ __FBSDID("$FreeBSD$");
#define EDID_QUIRK_FIRST_DETAILED_PREFERRED (1 << 5)
/* use +hsync +vsync for detailed mode */
#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
/* Force reduced-blanking timings for detailed modes */
#define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7)
struct detailed_mode_closure {
struct drm_connector *connector;
@ -123,6 +125,9 @@ static struct edid_quirk {
/* Samsung SyncMaster 22[5-6]BW */
{ "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 },
{ "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
/* ViewSonic VA2026w */
{ "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING },
};
/*** DDC fetch and block validation ***/
@ -145,22 +150,30 @@ int drm_edid_header_is_valid(const u8 *raw_edid)
return score;
}
EXPORT_SYMBOL(drm_edid_header_is_valid);
static int edid_fixup __read_mostly = 6;
module_param_named(edid_fixup, edid_fixup, int, 0400);
MODULE_PARM_DESC(edid_fixup,
"Minimum number of valid EDID header bytes (0-8, default 6)");
/*
* Sanity check the EDID block (base or extension). Return 0 if the block
* doesn't check out, or 1 if it's valid.
*/
static bool
drm_edid_block_valid(u8 *raw_edid, int block)
bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid)
{
int i;
u8 csum = 0;
struct edid *edid = (struct edid *)raw_edid;
if (edid_fixup > 8 || edid_fixup < 0)
edid_fixup = 6;
if (block == 0) {
int score = drm_edid_header_is_valid(raw_edid);
if (score == 8) ;
else if (score >= 6) {
else if (score >= edid_fixup) {
DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
memcpy(raw_edid, edid_header, sizeof(edid_header));
} else {
@ -171,7 +184,9 @@ drm_edid_block_valid(u8 *raw_edid, int block)
for (i = 0; i < EDID_LENGTH; i++)
csum += raw_edid[i];
if (csum) {
DRM_DEBUG_KMS("EDID checksum is invalid, remainder is %d\n", csum);
if (print_bad_edid) {
DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
}
/* allow CEA to slide through, switches mangle this */
if (raw_edid[0] != 0x02)
@ -197,23 +212,22 @@ drm_edid_block_valid(u8 *raw_edid, int block)
return 1;
bad:
if (raw_edid) {
if (raw_edid && print_bad_edid) {
DRM_DEBUG_KMS("Raw EDID:\n");
if ((drm_debug_flag & DRM_DEBUGBITS_KMS) != 0) {
for (i = 0; i < EDID_LENGTH; ) {
printf("%02x", raw_edid[i]);
i++;
if (i % 16 == 0 || i == EDID_LENGTH)
printf("\n");
else if (i % 8 == 0)
printf(" ");
else
printf(" ");
}
for (i = 0; i < EDID_LENGTH; ) {
printf("%02x", raw_edid[i]);
i++;
if (i % 16 == 0 || i == EDID_LENGTH)
printf("\n");
else if (i % 8 == 0)
printf(" ");
else
printf(" ");
}
}
return 0;
}
EXPORT_SYMBOL(drm_edid_block_valid);
/**
* drm_edid_is_valid - sanity check EDID data
@ -230,13 +244,13 @@ bool drm_edid_is_valid(struct edid *edid)
return false;
for (i = 0; i <= edid->extensions; i++)
if (!drm_edid_block_valid(raw + i * EDID_LENGTH, i))
if (!drm_edid_block_valid(raw + i * EDID_LENGTH, i, true))
return false;
return true;
}
EXPORT_SYMBOL(drm_edid_is_valid);
#define DDC_ADDR 0x50
#define DDC_SEGMENT_ADDR 0x30
/**
* Get EDID information via I2C.
@ -266,13 +280,13 @@ drm_do_probe_ddc_edid(device_t adapter, unsigned char *buf,
do {
struct iic_msg msgs[] = {
{
.slave = DDC_SEGMENT_ADDR << 1,
.flags = 0,
.len = 1,
.buf = &segment,
.slave = DDC_SEGMENT_ADDR << 1,
.flags = 0,
.len = 1,
.buf = &segment,
}, {
.slave = DDC_ADDR << 1,
.flags = IIC_M_WR,
.flags = 0,
.len = 1,
.buf = &start,
}, {
@ -294,7 +308,7 @@ drm_do_probe_ddc_edid(device_t adapter, unsigned char *buf,
retries, ret);
} while (ret != 0 && --retries);
return (ret == 0 ? 0 : -1);
return ret == 0 ? 0 : -1;
}
static bool drm_edid_is_zero(u8 *in_edid, int length)
@ -305,6 +319,7 @@ static bool drm_edid_is_zero(u8 *in_edid, int length)
for (i = 0; i < length / 4; i++)
if (*(raw_edid + i) != 0)
return false;
return true;
}
@ -313,14 +328,16 @@ drm_do_get_edid(struct drm_connector *connector, device_t adapter)
{
int i, j = 0, valid_extensions = 0;
u8 *block, *new;
bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_DEBUGBITS_KMS);
block = malloc(EDID_LENGTH, DRM_MEM_KMS, M_WAITOK | M_ZERO);
if ((block = malloc(EDID_LENGTH, DRM_MEM_KMS, M_NOWAIT)) == NULL)
return NULL;
/* base block fetch */
for (i = 0; i < 4; i++) {
if (drm_do_probe_ddc_edid(adapter, block, 0, EDID_LENGTH))
goto out;
if (drm_edid_block_valid(block, 0))
if (drm_edid_block_valid(block, 0, print_bad_edid))
break;
if (i == 0 && drm_edid_is_zero(block, EDID_LENGTH)) {
connector->null_edid_counter++;
@ -335,7 +352,9 @@ drm_do_get_edid(struct drm_connector *connector, device_t adapter)
return block;
new = reallocf(block, (block[0x7e] + 1) * EDID_LENGTH, DRM_MEM_KMS,
M_WAITOK);
M_NOWAIT);
if (!new)
goto out;
block = new;
for (j = 1; j <= block[0x7e]; j++) {
@ -344,30 +363,39 @@ drm_do_get_edid(struct drm_connector *connector, device_t adapter)
block + (valid_extensions + 1) * EDID_LENGTH,
j, EDID_LENGTH))
goto out;
if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH, j)) {
if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH, j, print_bad_edid)) {
valid_extensions++;
break;
}
}
if (i == 4)
DRM_DEBUG_KMS("%s: Ignoring invalid EDID block %d.\n",
drm_get_connector_name(connector), j);
if (i == 4 && print_bad_edid) {
dev_warn(connector->dev->dev,
"%s: Ignoring invalid EDID block %d.\n",
drm_get_connector_name(connector), j);
connector->bad_edid_counter++;
}
}
if (valid_extensions != block[0x7e]) {
block[EDID_LENGTH-1] += block[0x7e] - valid_extensions;
block[0x7e] = valid_extensions;
new = reallocf(block, (valid_extensions + 1) * EDID_LENGTH,
DRM_MEM_KMS, M_WAITOK);
DRM_MEM_KMS, M_NOWAIT);
if (!new)
goto out;
block = new;
}
DRM_DEBUG_KMS("got EDID from %s\n", drm_get_connector_name(connector));
return block;
carp:
DRM_DEBUG_KMS("%s: EDID block %d invalid.\n",
drm_get_connector_name(connector), j);
if (print_bad_edid) {
dev_warn(connector->dev->dev, "%s: EDID block %d invalid.\n",
drm_get_connector_name(connector), j);
}
connector->bad_edid_counter++;
out:
free(block, DRM_MEM_KMS);
@ -380,13 +408,14 @@ drm_do_get_edid(struct drm_connector *connector, device_t adapter)
* \param adapter : i2c device adaptor
* \return 1 on success
*/
static bool
bool
drm_probe_ddc(device_t adapter)
{
unsigned char out;
return (drm_do_probe_ddc_edid(adapter, &out, 0, 1) == 0);
}
EXPORT_SYMBOL(drm_probe_ddc);
/**
* drm_get_edid - get EDID data, if available
@ -406,11 +435,9 @@ struct edid *drm_get_edid(struct drm_connector *connector,
if (drm_probe_ddc(adapter))
edid = (struct edid *)drm_do_get_edid(connector, adapter);
connector->display_info.raw_edid = (char *)edid;
return edid;
}
EXPORT_SYMBOL(drm_get_edid);
/*** EDID parsing ***/
@ -444,7 +471,7 @@ static u32 edid_get_quirks(struct edid *edid)
struct edid_quirk *quirk;
int i;
for (i = 0; i < DRM_ARRAY_SIZE(edid_quirk_list); i++) {
for (i = 0; i < ARRAY_SIZE(edid_quirk_list); i++) {
quirk = &edid_quirk_list[i];
if (edid_vendor(edid, quirk->vendor) &&
@ -531,7 +558,7 @@ struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
int i;
for (i = 0; i < drm_num_dmt_modes; i++) {
struct drm_display_mode *ptr = &drm_dmt_modes[i];
const struct drm_display_mode *ptr = &drm_dmt_modes[i];
if (hsize != ptr->hdisplay)
continue;
if (vsize != ptr->vdisplay)
@ -546,6 +573,7 @@ struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
return NULL;
}
EXPORT_SYMBOL(drm_mode_find_dmt);
typedef void detailed_cb(struct detailed_timing *timing, void *closure);
@ -553,25 +581,10 @@ static void
cea_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure)
{
int i, n = 0;
u8 rev = ext[0x01], d = ext[0x02];
u8 d = ext[0x02];
u8 *det_base = ext + d;
switch (rev) {
case 0:
/* can't happen */
return;
case 1:
/* have to infer how many blocks we have, check pixel clock */
for (i = 0; i < 6; i++)
if (det_base[18*i] || det_base[18*i+1])
n++;
break;
default:
/* explicit count */
n = min(ext[0x03] & 0x0f, 6);
break;
}
n = (127 - d) / 18;
for (i = 0; i < n; i++)
cb((struct detailed_timing *)(det_base + 18 * i), closure);
}
@ -630,7 +643,7 @@ static bool
drm_monitor_supports_rb(struct edid *edid)
{
if (edid->revision >= 4) {
bool ret;
bool ret = false;
drm_for_each_detailed_block((u8 *)edid, is_rb, &ret);
return ret;
}
@ -814,7 +827,7 @@ drm_mode_std(struct drm_connector *connector, struct edid *edid,
if (!mode)
return NULL;
if (drm_mode_hsync(mode) > drm_gtf2_hbreak(edid)) {
free(mode, DRM_MEM_KMS);
drm_mode_destroy(dev, mode);
mode = drm_gtf_mode_complex(dev, hsize, vsize,
vrefresh_rate, 0, 0,
drm_gtf2_m(edid),
@ -859,7 +872,7 @@ drm_mode_do_interlace_quirk(struct drm_display_mode *mode,
if (!(pt->misc & DRM_EDID_PT_INTERLACED))
return;
for (i = 0; i < DRM_ARRAY_SIZE(cea_interlaced); i++) {
for (i = 0; i < ARRAY_SIZE(cea_interlaced); i++) {
if ((mode->hdisplay == cea_interlaced[i].w) &&
(mode->vdisplay == cea_interlaced[i].h / 2)) {
mode->vdisplay *= 2;
@ -896,7 +909,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo;
unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo;
unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo;
unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) >> 2 | pt->vsync_offset_pulse_width_lo >> 4;
unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) << 2 | pt->vsync_offset_pulse_width_lo >> 4;
unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf);
/* ignore tiny modes */
@ -917,16 +930,23 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
"Wrong Hsync/Vsync pulse width\n");
return NULL;
}
if (quirks & EDID_QUIRK_FORCE_REDUCED_BLANKING) {
mode = drm_cvt_mode(dev, hactive, vactive, 60, true, false, false);
if (!mode)
return NULL;
goto set_size;
}
mode = drm_mode_create(dev);
if (!mode)
return NULL;
mode->type = DRM_MODE_TYPE_DRIVER;
if (quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH)
timing->pixel_clock = htole16(1088);
timing->pixel_clock = cpu_to_le16(1088);
mode->clock = le16toh(timing->pixel_clock) * 10;
mode->clock = le16_to_cpu(timing->pixel_clock) * 10;
mode->hdisplay = hactive;
mode->hsync_start = mode->hdisplay + hsync_offset;
@ -946,8 +966,6 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
drm_mode_do_interlace_quirk(mode, pt);
drm_mode_set_name(mode);
if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
}
@ -957,6 +975,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
set_size:
mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4;
mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8;
@ -970,11 +989,15 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
mode->height_mm = edid->height_cm * 10;
}
mode->type = DRM_MODE_TYPE_DRIVER;
mode->vrefresh = drm_mode_vrefresh(mode);
drm_mode_set_name(mode);
return mode;
}
static bool
mode_in_hsync_range(struct drm_display_mode *mode,
mode_in_hsync_range(const struct drm_display_mode *mode,
struct edid *edid, u8 *t)
{
int hsync, hmin, hmax;
@ -991,7 +1014,7 @@ mode_in_hsync_range(struct drm_display_mode *mode,
}
static bool
mode_in_vsync_range(struct drm_display_mode *mode,
mode_in_vsync_range(const struct drm_display_mode *mode,
struct edid *edid, u8 *t)
{
int vsync, vmin, vmax;
@ -1023,7 +1046,7 @@ range_pixel_clock(struct edid *edid, u8 *t)
}
static bool
mode_in_range(struct drm_display_mode *mode, struct edid *edid,
mode_in_range(const struct drm_display_mode *mode, struct edid *edid,
struct detailed_timing *timing)
{
u32 max_clock;
@ -1050,6 +1073,24 @@ mode_in_range(struct drm_display_mode *mode, struct edid *edid,
return true;
}
static bool valid_inferred_mode(const struct drm_connector *connector,
const struct drm_display_mode *mode)
{
struct drm_display_mode *m;
bool ok = false;
list_for_each_entry(m, &connector->probed_modes, head) {
if (mode->hdisplay == m->hdisplay &&
mode->vdisplay == m->vdisplay &&
drm_mode_vrefresh(mode) == drm_mode_vrefresh(m))
return false; /* duplicated */
if (mode->hdisplay <= m->hdisplay &&
mode->vdisplay <= m->vdisplay)
ok = true;
}
return ok;
}
static int
drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
struct detailed_timing *timing)
@ -1059,7 +1100,8 @@ drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
struct drm_device *dev = connector->dev;
for (i = 0; i < drm_num_dmt_modes; i++) {
if (mode_in_range(drm_dmt_modes + i, edid, timing)) {
if (mode_in_range(drm_dmt_modes + i, edid, timing) &&
valid_inferred_mode(connector, drm_dmt_modes + i)) {
newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
if (newmode) {
drm_mode_probed_add(connector, newmode);
@ -1099,7 +1141,8 @@ drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
return modes;
fixup_mode_1366x768(newmode);
if (!mode_in_range(newmode, edid, timing)) {
if (!mode_in_range(newmode, edid, timing) ||
!valid_inferred_mode(connector, newmode)) {
drm_mode_destroy(dev, newmode);
continue;
}
@ -1127,7 +1170,8 @@ drm_cvt_modes_for_range(struct drm_connector *connector, struct edid *edid,
return modes;
fixup_mode_1366x768(newmode);
if (!mode_in_range(newmode, edid, timing)) {
if (!mode_in_range(newmode, edid, timing) ||
!valid_inferred_mode(connector, newmode)) {
drm_mode_destroy(dev, newmode);
continue;
}
@ -1201,7 +1245,7 @@ drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing)
for (i = 0; i < 6; i++) {
for (j = 7; j > 0; j--) {
m = (i * 8) + (7 - j);
if (m >= DRM_ARRAY_SIZE(est3_modes))
if (m >= ARRAY_SIZE(est3_modes))
break;
if (est[i] & (1 << j)) {
mode = drm_mode_find_dmt(connector->dev,
@ -1453,6 +1497,7 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
#define HDMI_IDENTIFIER 0x000C03
#define AUDIO_BLOCK 0x01
#define VIDEO_BLOCK 0x02
#define VENDOR_BLOCK 0x03
#define SPEAKER_BLOCK 0x04
#define EDID_BASIC_AUDIO (1 << 6)
@ -1483,21 +1528,133 @@ u8 *drm_find_cea_extension(struct edid *edid)
return edid_ext;
}
EXPORT_SYMBOL(drm_find_cea_extension);
/*
* Looks for a CEA mode matching given drm_display_mode.
* Returns its CEA Video ID code, or 0 if not found.
*/
u8 drm_match_cea_mode(struct drm_display_mode *to_match)
{
struct drm_display_mode *cea_mode;
u8 mode;
for (mode = 0; mode < drm_num_cea_modes; mode++) {
cea_mode = (struct drm_display_mode *)&edid_cea_modes[mode];
if (drm_mode_equal(to_match, cea_mode))
return mode + 1;
}
return 0;
}
EXPORT_SYMBOL(drm_match_cea_mode);
static int
do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
{
struct drm_device *dev = connector->dev;
u8 * mode, cea_mode;
int modes = 0;
for (mode = db; mode < db + len; mode++) {
cea_mode = (*mode & 127) - 1; /* CEA modes are numbered 1..127 */
if (cea_mode < drm_num_cea_modes) {
struct drm_display_mode *newmode;
newmode = drm_mode_duplicate(dev,
&edid_cea_modes[cea_mode]);
if (newmode) {
drm_mode_probed_add(connector, newmode);
modes++;
}
}
}
return modes;
}
static int
cea_db_payload_len(const u8 *db)
{
return db[0] & 0x1f;
}
static int
cea_db_tag(const u8 *db)
{
return db[0] >> 5;
}
static int
cea_revision(const u8 *cea)
{
return cea[1];
}
static int
cea_db_offsets(const u8 *cea, int *start, int *end)
{
/* Data block offset in CEA extension block */
*start = 4;
*end = cea[2];
if (*end == 0)
*end = 127;
if (*end < 4 || *end > 127)
return -ERANGE;
return 0;
}
#define for_each_cea_db(cea, i, start, end) \
for ((i) = (start); (i) < (end) && (i) + cea_db_payload_len(&(cea)[(i)]) < (end); (i) += cea_db_payload_len(&(cea)[(i)]) + 1)
static int
add_cea_modes(struct drm_connector *connector, struct edid *edid)
{
u8 * cea = drm_find_cea_extension(edid);
u8 * db, dbl;
int modes = 0;
if (cea && cea_revision(cea) >= 3) {
int i, start, end;
if (cea_db_offsets(cea, &start, &end))
return 0;
for_each_cea_db(cea, i, start, end) {
db = &cea[i];
dbl = cea_db_payload_len(db);
if (cea_db_tag(db) == VIDEO_BLOCK)
modes += do_cea_modes (connector, db+1, dbl);
}
}
return modes;
}
static void
parse_hdmi_vsdb(struct drm_connector *connector, uint8_t *db)
parse_hdmi_vsdb(struct drm_connector *connector, const u8 *db)
{
connector->eld[5] |= (db[6] >> 7) << 1; /* Supports_AI */
u8 len = cea_db_payload_len(db);
connector->dvi_dual = db[6] & 1;
connector->max_tmds_clock = db[7] * 5;
connector->latency_present[0] = db[8] >> 7;
connector->latency_present[1] = (db[8] >> 6) & 1;
connector->video_latency[0] = db[9];
connector->audio_latency[0] = db[10];
connector->video_latency[1] = db[11];
connector->audio_latency[1] = db[12];
if (len >= 6) {
connector->eld[5] |= (db[6] >> 7) << 1; /* Supports_AI */
connector->dvi_dual = db[6] & 1;
}
if (len >= 7)
connector->max_tmds_clock = db[7] * 5;
if (len >= 8) {
connector->latency_present[0] = db[8] >> 7;
connector->latency_present[1] = (db[8] >> 6) & 1;
}
if (len >= 9)
connector->video_latency[0] = db[9];
if (len >= 10)
connector->audio_latency[0] = db[10];
if (len >= 11)
connector->video_latency[1] = db[11];
if (len >= 12)
connector->audio_latency[1] = db[12];
DRM_DEBUG_KMS("HDMI: DVI dual %d, "
"max TMDS clock %d, "
@ -1521,6 +1678,21 @@ monitor_name(struct detailed_timing *t, void *data)
*(u8 **)data = t->data.other_data.data.str.str;
}
static bool cea_db_is_hdmi_vsdb(const u8 *db)
{
int hdmi_id;
if (cea_db_tag(db) != VENDOR_BLOCK)
return false;
if (cea_db_payload_len(db) < 5)
return false;
hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16);
return hdmi_id == HDMI_IDENTIFIER;
}
/**
* drm_edid_to_eld - build ELD from EDID
* @connector: connector corresponding to the HDMI/DP sink
@ -1567,24 +1739,38 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
eld[18] = edid->prod_code[0];
eld[19] = edid->prod_code[1];
for (db = cea + 4; db < cea + cea[2]; db += dbl + 1) {
dbl = db[0] & 0x1f;
if (cea_revision(cea) >= 3) {
int i, start, end;
switch ((db[0] & 0xe0) >> 5) {
case AUDIO_BLOCK: /* Audio Data Block, contains SADs */
sad_count = dbl / 3;
memcpy(eld + 20 + mnl, &db[1], dbl);
break;
case SPEAKER_BLOCK: /* Speaker Allocation Data Block */
eld[7] = db[1];
break;
case VENDOR_BLOCK:
/* HDMI Vendor-Specific Data Block */
if (db[1] == 0x03 && db[2] == 0x0c && db[3] == 0)
parse_hdmi_vsdb(connector, db);
break;
default:
break;
if (cea_db_offsets(cea, &start, &end)) {
start = 0;
end = 0;
}
for_each_cea_db(cea, i, start, end) {
db = &cea[i];
dbl = cea_db_payload_len(db);
switch (cea_db_tag(db)) {
case AUDIO_BLOCK:
/* Audio Data Block, contains SADs */
sad_count = dbl / 3;
if (dbl >= 1)
memcpy(eld + 20 + mnl, &db[1], dbl);
break;
case SPEAKER_BLOCK:
/* Speaker Allocation Data Block */
if (dbl >= 1)
eld[7] = db[1];
break;
case VENDOR_BLOCK:
/* HDMI Vendor-Specific Data Block */
if (cea_db_is_hdmi_vsdb(db))
parse_hdmi_vsdb(connector, db);
break;
default:
break;
}
}
}
eld[5] |= sad_count << 4;
@ -1592,6 +1778,7 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
DRM_DEBUG_KMS("ELD size %d, SAD count %d\n", (int)eld[2], sad_count);
}
EXPORT_SYMBOL(drm_edid_to_eld);
/**
* drm_av_sync_delay - HDMI/DP sink audio-video sync delay in millisecond
@ -1629,6 +1816,7 @@ int drm_av_sync_delay(struct drm_connector *connector,
return max(v - a, 0);
}
EXPORT_SYMBOL(drm_av_sync_delay);
/**
* drm_select_eld - select one ELD from multiple HDMI/DP sinks
@ -1650,6 +1838,7 @@ struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
return NULL;
}
EXPORT_SYMBOL(drm_select_eld);
/**
* drm_detect_hdmi_monitor - detect whether monitor is hdmi.
@ -1661,39 +1850,28 @@ struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
bool drm_detect_hdmi_monitor(struct edid *edid)
{
u8 *edid_ext;
int i, hdmi_id;
int i;
int start_offset, end_offset;
bool is_hdmi = false;
edid_ext = drm_find_cea_extension(edid);
if (!edid_ext)
goto end;
return false;
/* Data block offset in CEA extension block */
start_offset = 4;
end_offset = edid_ext[2];
if (cea_db_offsets(edid_ext, &start_offset, &end_offset))
return false;
/*
* Because HDMI identifier is in Vendor Specific Block,
* search it from all data blocks of CEA extension.
*/
for (i = start_offset; i < end_offset;
/* Increased by data block len */
i += ((edid_ext[i] & 0x1f) + 1)) {
/* Find vendor specific block */
if ((edid_ext[i] >> 5) == VENDOR_BLOCK) {
hdmi_id = edid_ext[i + 1] | (edid_ext[i + 2] << 8) |
edid_ext[i + 3] << 16;
/* Find HDMI identifier */
if (hdmi_id == HDMI_IDENTIFIER)
is_hdmi = true;
break;
}
for_each_cea_db(edid_ext, i, start_offset, end_offset) {
if (cea_db_is_hdmi_vsdb(&edid_ext[i]))
return true;
}
end:
return is_hdmi;
return false;
}
EXPORT_SYMBOL(drm_detect_hdmi_monitor);
/**
* drm_detect_monitor_audio - check monitor audio capability
@ -1723,15 +1901,13 @@ bool drm_detect_monitor_audio(struct edid *edid)
goto end;
}
/* Data block offset in CEA extension block */
start_offset = 4;
end_offset = edid_ext[2];
if (cea_db_offsets(edid_ext, &start_offset, &end_offset))
goto end;
for (i = start_offset; i < end_offset;
i += ((edid_ext[i] & 0x1f) + 1)) {
if ((edid_ext[i] >> 5) == AUDIO_BLOCK) {
for_each_cea_db(edid_ext, i, start_offset, end_offset) {
if (cea_db_tag(&edid_ext[i]) == AUDIO_BLOCK) {
has_audio = true;
for (j = 1; j < (edid_ext[i] & 0x1f); j += 3)
for (j = 1; j < cea_db_payload_len(&edid_ext[i]) + 1; j += 3)
DRM_DEBUG_KMS("CEA audio format %d\n",
(edid_ext[i + j] >> 3) & 0xf);
goto end;
@ -1740,6 +1916,7 @@ bool drm_detect_monitor_audio(struct edid *edid)
end:
return has_audio;
}
EXPORT_SYMBOL(drm_detect_monitor_audio);
/**
* drm_add_display_info - pull display info out if present
@ -1835,7 +2012,7 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
return 0;
}
if (!drm_edid_is_valid(edid)) {
device_printf(connector->dev->device, "%s: EDID invalid.\n",
dev_warn(connector->dev->dev, "%s: EDID invalid.\n",
drm_get_connector_name(connector));
return 0;
}
@ -1860,7 +2037,9 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
num_modes += add_cvt_modes(connector, edid);
num_modes += add_standard_modes(connector, edid);
num_modes += add_established_modes(connector, edid);
num_modes += add_inferred_modes(connector, edid);
if (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)
num_modes += add_inferred_modes(connector, edid);
num_modes += add_cea_modes(connector, edid);
if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
edid_fixup_preferred(connector, quirks);
@ -1869,6 +2048,7 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
return num_modes;
}
EXPORT_SYMBOL(drm_add_edid_modes);
/**
* drm_add_modes_noedid - add modes for the connectors without EDID
@ -1895,7 +2075,7 @@ int drm_add_modes_noedid(struct drm_connector *connector,
vdisplay = 0;
for (i = 0; i < count; i++) {
struct drm_display_mode *ptr = &drm_dmt_modes[i];
const struct drm_display_mode *ptr = &drm_dmt_modes[i];
if (hdisplay && vdisplay) {
/*
* Only when two are valid, they will be used to check
@ -1916,3 +2096,23 @@ int drm_add_modes_noedid(struct drm_connector *connector,
}
return num_modes;
}
EXPORT_SYMBOL(drm_add_modes_noedid);
/**
* drm_mode_cea_vic - return the CEA-861 VIC of a given mode
* @mode: mode
*
* RETURNS:
* The VIC number, 0 in case it's not a CEA-861 mode.
*/
uint8_t drm_mode_cea_vic(const struct drm_display_mode *mode)
{
uint8_t i;
for (i = 0; i < drm_num_cea_modes; i++)
if (drm_mode_equal(mode, &edid_cea_modes[i]))
return i + 1;
return 0;
}
EXPORT_SYMBOL(drm_mode_cea_vic);

View File

@ -25,9 +25,6 @@
#ifndef __DRM_EDID_H__
#define __DRM_EDID_H__
#include <sys/types.h>
#include <dev/drm2/drmP.h>
#define EDID_LENGTH 128
#define DDC_ADDR 0x50
@ -99,7 +96,7 @@ struct detailed_data_monitor_range {
u8 reserved;
u8 hfreq_start_khz; /* need to multiply by 2 */
u8 c; /* need to divide by 2 */
u16 m;
__le16 m;
u8 k;
u8 j; /* need to divide by 2 */
} __attribute__((packed)) gtf2;
@ -159,7 +156,7 @@ struct detailed_non_pixel {
#define EDID_DETAIL_MONITOR_SERIAL 0xff
struct detailed_timing {
u16 pixel_clock; /* need to multiply by 10 KHz */
__le16 pixel_clock; /* need to multiply by 10 KHz */
union {
struct detailed_pixel_timing pixel_data;
struct detailed_non_pixel other_data;
@ -192,6 +189,7 @@ struct detailed_timing {
#define DRM_EDID_FEATURE_DEFAULT_GTF (1 << 0)
#define DRM_EDID_FEATURE_PREFERRED_TIMING (1 << 1)
#define DRM_EDID_FEATURE_STANDARD_COLOR (1 << 2)
/* If analog */
#define DRM_EDID_FEATURE_DISPLAY_TYPE (3 << 3) /* 00=mono, 01=rgb, 10=non-rgb, 11=unknown */
/* If digital */
#define DRM_EDID_FEATURE_COLOR_MASK (3 << 3)
@ -254,5 +252,6 @@ int drm_av_sync_delay(struct drm_connector *connector,
struct drm_display_mode *mode);
struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
struct drm_display_mode *mode);
int drm_load_edid_firmware(struct drm_connector *connector);
#endif /* __DRM_EDID_H__ */

View File

@ -32,7 +32,7 @@
* Autogenerated from the DMT spec.
* This table is copied from xfree86/modes/xf86EdidModes.c.
*/
static struct drm_display_mode drm_dmt_modes[] = {
static const struct drm_display_mode drm_dmt_modes[] = {
/* 640x350@85Hz */
{ DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
736, 832, 0, 350, 382, 385, 445, 0,
@ -90,7 +90,7 @@ static struct drm_display_mode drm_dmt_modes[] = {
976, 1088, 0, 480, 486, 494, 517, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1024x768@43Hz, interlace */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
{ DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
1208, 1264, 0, 768, 768, 772, 817, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
@ -359,7 +359,7 @@ static struct drm_display_mode drm_dmt_modes[] = {
static const int drm_num_dmt_modes =
sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
static struct drm_display_mode edid_est_modes[] = {
static const struct drm_display_mode edid_est_modes[] = {
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
968, 1056, 0, 600, 601, 605, 628, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@60Hz */
@ -396,7 +396,7 @@ static struct drm_display_mode edid_est_modes[] = {
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
1184, 1344, 0, 768, 771, 777, 806, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@60Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
{ DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
1208, 1264, 0, 768, 768, 776, 817, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@43Hz */
{ DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864,
@ -472,7 +472,7 @@ static const struct minimode est3_modes[] = {
{ 1920, 1440, 60, 0 },
{ 1920, 1440, 75, 0 },
};
static const int num_est3_modes = DRM_ARRAY_SIZE(est3_modes);
static const int num_est3_modes = ARRAY_SIZE(est3_modes);
static const struct minimode extra_modes[] = {
{ 1024, 576, 60, 0 },
@ -483,7 +483,7 @@ static const struct minimode extra_modes[] = {
{ 2048, 1152, 60, 0 },
{ 2048, 1536, 60, 0 },
};
static const int num_extra_modes = DRM_ARRAY_SIZE(extra_modes);
static const int num_extra_modes = ARRAY_SIZE(extra_modes);
/*
* Probably taken from CEA-861 spec.
@ -507,17 +507,17 @@ static const struct drm_display_mode edid_cea_modes[] = {
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 5 - 1920x1080i@60Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 6 - 1440x480i@60Hz */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 7 - 1440x480i@60Hz */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
@ -532,12 +532,12 @@ static const struct drm_display_mode edid_cea_modes[] = {
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK) },
/* 10 - 2880x480i@60Hz */
{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
{ DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 11 - 2880x480i@60Hz */
{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
{ DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE) },
@ -574,17 +574,17 @@ static const struct drm_display_mode edid_cea_modes[] = {
1760, 1980, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 20 - 1920x1080i@50Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 21 - 1440x576i@50Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 22 - 1440x576i@50Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
@ -599,12 +599,12 @@ static const struct drm_display_mode edid_cea_modes[] = {
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK) },
/* 25 - 2880x576i@50Hz */
{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
{ DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 26 - 2880x576i@50Hz */
{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
{ DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE) },
@ -657,12 +657,12 @@ static const struct drm_display_mode edid_cea_modes[] = {
3184, 3456, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 39 - 1920x1080i@50Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 40 - 1920x1080i@100Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
@ -689,7 +689,7 @@ static const struct drm_display_mode edid_cea_modes[] = {
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK) },
/* 46 - 1920x1080i@120Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
@ -706,12 +706,12 @@ static const struct drm_display_mode edid_cea_modes[] = {
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 50 - 1440x480i@120Hz */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 51 - 1440x480i@120Hz */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
@ -724,12 +724,12 @@ static const struct drm_display_mode edid_cea_modes[] = {
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 54 - 1440x576i@200Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 55 - 1440x576i@200Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
@ -742,12 +742,12 @@ static const struct drm_display_mode edid_cea_modes[] = {
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 58 - 1440x480i@240 */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 59 - 1440x480i@240 */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
@ -772,4 +772,4 @@ static const struct drm_display_mode edid_cea_modes[] = {
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
};
static const int drm_num_cea_modes = DRM_ARRAY_SIZE(edid_cea_modes);
static const int drm_num_cea_modes = ARRAY_SIZE(edid_cea_modes);

View File

@ -31,23 +31,28 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <dev/drm2/drmP.h>
#include <dev/drm2/drm_crtc.h>
#include <dev/drm2/drm_fb_helper.h>
#include <dev/drm2/drm_crtc_helper.h>
MODULE_AUTHOR("David Airlie, Jesse Barnes");
MODULE_DESCRIPTION("DRM KMS helper");
MODULE_LICENSE("GPL and additional rights");
static DRM_LIST_HEAD(kernel_fb_helper_list);
#include <sys/kdb.h>
#include <sys/param.h>
#include <sys/systm.h>
struct vt_kms_softc {
struct drm_fb_helper *fb_helper;
struct task fb_mode_task;
struct drm_fb_helper *fb_helper;
struct task fb_mode_task;
};
static fb_enter_t vt_kms_postswitch;
static void vt_restore_fbdev_mode(void *, int);
/* Call restore out of vt(9) locks. */
static void
vt_restore_fbdev_mode(void *arg, int pending)
@ -77,25 +82,29 @@ vt_kms_postswitch(void *arg)
return (0);
}
static DRM_LIST_HEAD(kernel_fb_helper_list);
/* simple single crtc case helper function */
int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
struct fb_info *
framebuffer_alloc()
{
struct drm_device *dev = fb_helper->dev;
struct drm_connector *connector;
struct fb_info *info;
struct vt_kms_softc *sc;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct drm_fb_helper_connector *fb_helper_connector;
info = malloc(sizeof(*info), DRM_MEM_KMS, M_WAITOK | M_ZERO);
fb_helper_connector = malloc(
sizeof(struct drm_fb_helper_connector), DRM_MEM_KMS,
M_WAITOK | M_ZERO);
sc = malloc(sizeof(*sc), DRM_MEM_KMS, M_WAITOK | M_ZERO);
TASK_INIT(&sc->fb_mode_task, 0, vt_restore_fbdev_mode, sc);
fb_helper_connector->connector = connector;
fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector;
}
return 0;
info->fb_priv = sc;
info->enter = &vt_kms_postswitch;
return (info);
}
void
framebuffer_release(struct fb_info *info)
{
free(info->fb_priv, DRM_MEM_KMS);
free(info, DRM_MEM_KMS);
}
static int
@ -125,9 +134,47 @@ fb_get_options(const char *connector_name, char **option)
if (*option == NULL)
*option = kern_getenv("kern.vt.fb.default_mode");
return (*option != NULL ? 0 : 1);
return (*option != NULL ? 0 : -ENOENT);
}
/**
* DOC: fbdev helpers
*
* The fb helper functions are useful to provide an fbdev on top of a drm kernel
* mode setting driver. They can be used mostly independantely from the crtc
* helper functions used by many drivers to implement the kernel mode setting
* interfaces.
*/
/* simple single crtc case helper function */
int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
{
struct drm_device *dev = fb_helper->dev;
struct drm_connector *connector;
int i;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct drm_fb_helper_connector *fb_helper_connector;
fb_helper_connector = malloc(sizeof(struct drm_fb_helper_connector),
DRM_MEM_KMS, M_NOWAIT | M_ZERO);
if (!fb_helper_connector)
goto fail;
fb_helper_connector->connector = connector;
fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector;
}
return 0;
fail:
for (i = 0; i < fb_helper->connector_count; i++) {
free(fb_helper->connector_info[i], DRM_MEM_KMS);
fb_helper->connector_info[i] = NULL;
}
fb_helper->connector_count = 0;
return -ENOMEM;
}
EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors);
static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
{
struct drm_fb_helper_connector *fb_helper_conn;
@ -169,7 +216,7 @@ static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
connector->force = mode->force;
}
DRM_INFO("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n",
DRM_DEBUG_KMS("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n",
drm_get_connector_name(connector),
mode->xres, mode->yres,
mode->refresh_specified ? mode->refresh : 60,
@ -179,12 +226,11 @@ static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
}
freeenv(option);
}
return 0;
}
#if 0
#if 0 && defined(FREEBSD_NOTYET)
static void drm_fb_helper_save_lut_atomic(struct drm_crtc *crtc, struct drm_fb_helper *helper)
{
uint16_t *r_base, *g_base, *b_base;
@ -211,9 +257,7 @@ static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc)
crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size);
}
#endif
#if 0
int drm_fb_helper_debug_enter(struct fb_info *info)
{
struct drm_fb_helper *helper = info->par;
@ -243,9 +287,8 @@ int drm_fb_helper_debug_enter(struct fb_info *info)
return 0;
}
#endif
EXPORT_SYMBOL(drm_fb_helper_debug_enter);
#if 0
/* Find the real fb for a given fb helper CRTC */
static struct drm_framebuffer *drm_mode_config_fb(struct drm_crtc *crtc)
{
@ -259,9 +302,7 @@ static struct drm_framebuffer *drm_mode_config_fb(struct drm_crtc *crtc)
return NULL;
}
#endif
#if 0
int drm_fb_helper_debug_leave(struct fb_info *info)
{
struct drm_fb_helper *helper = info->par;
@ -291,7 +332,8 @@ int drm_fb_helper_debug_leave(struct fb_info *info)
return 0;
}
#endif
EXPORT_SYMBOL(drm_fb_helper_debug_leave);
#endif /* FREEBSD_NOTYET */
bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper)
{
@ -299,15 +341,15 @@ bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper)
int i, ret;
for (i = 0; i < fb_helper->crtc_count; i++) {
struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set;
ret = drm_crtc_helper_set_config(mode_set);
ret = mode_set->crtc->funcs->set_config(mode_set);
if (ret)
error = true;
}
return error;
}
EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode);
#if 0
bool drm_fb_helper_force_kernel_mode(void)
static bool drm_fb_helper_force_kernel_mode(void)
{
bool ret, error = false;
struct drm_fb_helper *helper;
@ -325,20 +367,27 @@ bool drm_fb_helper_force_kernel_mode(void)
}
return error;
}
#endif
#if 0
#if 0 && defined(FREEBSD_NOTYET)
int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
void *panic_str)
{
printf("panic occurred, switching back to text console\n");
/*
* It's a waste of time and effort to switch back to text console
* if the kernel should reboot before panic messages can be seen.
*/
if (panic_timeout < 0)
return 0;
pr_err("panic occurred, switching back to text console\n");
return drm_fb_helper_force_kernel_mode();
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_panic);
static struct notifier_block paniced = {
.notifier_call = drm_fb_helper_panic,
};
#endif /* FREEBSD_NOTYET */
/**
* drm_fb_helper_restore - restore the framebuffer console (kernel) config
@ -352,7 +401,9 @@ void drm_fb_helper_restore(void)
if (ret == true)
DRM_ERROR("Failed to restore crtc configuration\n");
}
EXPORT_SYMBOL(drm_fb_helper_restore);
#ifdef __linux__
#ifdef CONFIG_MAGIC_SYSRQ
static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
{
@ -375,127 +426,64 @@ static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { };
#endif
#endif
#if 0
static void drm_fb_helper_on(struct fb_info *info)
#if 0 && defined(FREEBSD_NOTYET)
static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_device *dev = fb_helper->dev;
struct drm_crtc *crtc;
struct drm_crtc_helper_funcs *crtc_funcs;
struct drm_connector *connector;
struct drm_encoder *encoder;
int i, j;
/*
* For each CRTC in this fb, turn the crtc on then,
* find all associated encoders and turn them on.
* For each CRTC in this fb, turn the connectors on/off.
*/
sx_xlock(&dev->mode_config.mutex);
for (i = 0; i < fb_helper->crtc_count; i++) {
crtc = fb_helper->crtc_info[i].mode_set.crtc;
crtc_funcs = crtc->helper_private;
if (!crtc->enabled)
continue;
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
/* Walk the connectors & encoders on this fb turning them on */
/* Walk the connectors & encoders on this fb turning them on/off */
for (j = 0; j < fb_helper->connector_count; j++) {
connector = fb_helper->connector_info[j]->connector;
connector->dpms = DRM_MODE_DPMS_ON;
drm_connector_property_set_value(connector,
dev->mode_config.dpms_property,
DRM_MODE_DPMS_ON);
}
/* Found a CRTC on this fb, now find encoders */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc) {
struct drm_encoder_helper_funcs *encoder_funcs;
encoder_funcs = encoder->helper_private;
encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
}
connector->funcs->dpms(connector, dpms_mode);
drm_object_property_set_value(&connector->base,
dev->mode_config.dpms_property, dpms_mode);
}
}
sx_xunlock(&dev->mode_config.mutex);
}
#endif
#if 0
static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_device *dev = fb_helper->dev;
struct drm_crtc *crtc;
struct drm_crtc_helper_funcs *crtc_funcs;
struct drm_connector *connector;
struct drm_encoder *encoder;
int i, j;
/*
* For each CRTC in this fb, find all associated encoders
* and turn them off, then turn off the CRTC.
*/
sx_xlock(&dev->mode_config.mutex);
for (i = 0; i < fb_helper->crtc_count; i++) {
crtc = fb_helper->crtc_info[i].mode_set.crtc;
crtc_funcs = crtc->helper_private;
if (!crtc->enabled)
continue;
/* Walk the connectors on this fb and mark them off */
for (j = 0; j < fb_helper->connector_count; j++) {
connector = fb_helper->connector_info[j]->connector;
connector->dpms = dpms_mode;
drm_connector_property_set_value(connector,
dev->mode_config.dpms_property,
dpms_mode);
}
/* Found a CRTC on this fb, now find encoders */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc) {
struct drm_encoder_helper_funcs *encoder_funcs;
encoder_funcs = encoder->helper_private;
encoder_funcs->dpms(encoder, dpms_mode);
}
}
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
}
sx_xunlock(&dev->mode_config.mutex);
}
#endif
#if 0
int drm_fb_helper_blank(int blank, struct fb_info *info)
{
switch (blank) {
/* Display: On; HSync: On, VSync: On */
case FB_BLANK_UNBLANK:
drm_fb_helper_on(info);
drm_fb_helper_dpms(info, DRM_MODE_DPMS_ON);
break;
/* Display: Off; HSync: On, VSync: On */
case FB_BLANK_NORMAL:
drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY);
drm_fb_helper_dpms(info, DRM_MODE_DPMS_STANDBY);
break;
/* Display: Off; HSync: Off, VSync: On */
case FB_BLANK_HSYNC_SUSPEND:
drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY);
drm_fb_helper_dpms(info, DRM_MODE_DPMS_STANDBY);
break;
/* Display: Off; HSync: On, VSync: Off */
case FB_BLANK_VSYNC_SUSPEND:
drm_fb_helper_off(info, DRM_MODE_DPMS_SUSPEND);
drm_fb_helper_dpms(info, DRM_MODE_DPMS_SUSPEND);
break;
/* Display: Off; HSync: Off, VSync: Off */
case FB_BLANK_POWERDOWN:
drm_fb_helper_off(info, DRM_MODE_DPMS_OFF);
drm_fb_helper_dpms(info, DRM_MODE_DPMS_OFF);
break;
}
return 0;
}
#endif
EXPORT_SYMBOL(drm_fb_helper_blank);
#endif /* FREEBSD_NOTYET */
static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
{
@ -523,52 +511,64 @@ int drm_fb_helper_init(struct drm_device *dev,
INIT_LIST_HEAD(&fb_helper->kernel_fb_list);
fb_helper->crtc_info = malloc(crtc_count *
sizeof(struct drm_fb_helper_crtc), DRM_MEM_KMS, M_WAITOK | M_ZERO);
fb_helper->crtc_info = malloc(crtc_count * sizeof(struct drm_fb_helper_crtc),
DRM_MEM_KMS, M_NOWAIT | M_ZERO);
if (!fb_helper->crtc_info)
return -ENOMEM;
fb_helper->crtc_count = crtc_count;
fb_helper->connector_info = malloc(dev->mode_config.num_connector *
sizeof(struct drm_fb_helper_connector *), DRM_MEM_KMS,
M_WAITOK | M_ZERO);
fb_helper->connector_info = malloc(dev->mode_config.num_connector * sizeof(struct drm_fb_helper_connector *),
DRM_MEM_KMS, M_NOWAIT | M_ZERO);
if (!fb_helper->connector_info) {
free(fb_helper->crtc_info, DRM_MEM_KMS);
return -ENOMEM;
}
fb_helper->connector_count = 0;
for (i = 0; i < crtc_count; i++) {
fb_helper->crtc_info[i].mode_set.connectors =
malloc(max_conn_count * sizeof(struct drm_connector *),
DRM_MEM_KMS, M_WAITOK | M_ZERO);
malloc(max_conn_count *
sizeof(struct drm_connector *),
DRM_MEM_KMS, M_NOWAIT | M_ZERO);
if (!fb_helper->crtc_info[i].mode_set.connectors)
goto out_free;
fb_helper->crtc_info[i].mode_set.num_connectors = 0;
}
i = 0;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
fb_helper->crtc_info[i].crtc_id = crtc->base.id;
fb_helper->crtc_info[i].mode_set.crtc = crtc;
i++;
}
fb_helper->conn_limit = max_conn_count;
return 0;
out_free:
drm_fb_helper_crtc_free(fb_helper);
return -ENOMEM;
}
EXPORT_SYMBOL(drm_fb_helper_init);
void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
{
if (!list_empty(&fb_helper->kernel_fb_list)) {
list_del(&fb_helper->kernel_fb_list);
#if 0 && defined(FREEBSD_NOTYET)
if (list_empty(&kernel_fb_helper_list)) {
#if 0
printk(KERN_INFO "drm: unregistered panic notifier\n");
pr_info("drm: unregistered panic notifier\n");
atomic_notifier_chain_unregister(&panic_notifier_list,
&paniced);
unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
#endif
}
#endif /* FREEBSD_NOTYET */
}
drm_fb_helper_crtc_free(fb_helper);
}
EXPORT_SYMBOL(drm_fb_helper_fini);
#if 0
#if 0 && defined(FREEBSD_NOTYET)
static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, u16 regno, struct fb_info *info)
{
@ -576,7 +576,7 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
struct drm_framebuffer *fb = fb_helper->fb;
int pindex;
if (info->fix.visual == FB_VISUAL_trueCOLOR) {
if (info->fix.visual == FB_VISUAL_TRUECOLOR) {
u32 *palette;
u32 value;
/* place color in psuedopalette */
@ -632,9 +632,7 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
fb_helper->funcs->gamma_set(crtc, red, green, blue, pindex);
return 0;
}
#endif
#if 0
int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
@ -672,9 +670,8 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
}
return rc;
}
#endif
EXPORT_SYMBOL(drm_fb_helper_setcmap);
#if 0
int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
@ -765,9 +762,8 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
}
return 0;
}
#endif
EXPORT_SYMBOL(drm_fb_helper_check_var);
#if 0
/* this will let fbcon do the mode init */
int drm_fb_helper_set_par(struct fb_info *info)
{
@ -783,16 +779,16 @@ int drm_fb_helper_set_par(struct fb_info *info)
return -EINVAL;
}
mutex_lock(&dev->mode_config.mutex);
sx_xlock(&dev->mode_config.mutex);
for (i = 0; i < fb_helper->crtc_count; i++) {
crtc = fb_helper->crtc_info[i].mode_set.crtc;
ret = crtc->funcs->set_config(&fb_helper->crtc_info[i].mode_set);
if (ret) {
mutex_unlock(&dev->mode_config.mutex);
sx_xunlock(&dev->mode_config.mutex);
return ret;
}
}
mutex_unlock(&dev->mode_config.mutex);
sx_xunlock(&dev->mode_config.mutex);
if (fb_helper->delayed_hotplug) {
fb_helper->delayed_hotplug = false;
@ -800,9 +796,8 @@ int drm_fb_helper_set_par(struct fb_info *info)
}
return 0;
}
#endif
EXPORT_SYMBOL(drm_fb_helper_set_par);
#if 0
int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
@ -813,7 +808,7 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
int ret = 0;
int i;
mutex_lock(&dev->mode_config.mutex);
sx_xlock(&dev->mode_config.mutex);
for (i = 0; i < fb_helper->crtc_count; i++) {
crtc = fb_helper->crtc_info[i].mode_set.crtc;
@ -830,10 +825,11 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
}
}
}
mutex_unlock(&dev->mode_config.mutex);
sx_xunlock(&dev->mode_config.mutex);
return ret;
}
#endif
EXPORT_SYMBOL(drm_fb_helper_pan_display);
#endif /* FREEBSD_NOTYET */
int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
int preferred_bpp)
@ -844,8 +840,9 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
struct fb_info *info;
struct drm_fb_helper_surface_size sizes;
int gamma_size = 0;
struct vt_kms_softc *sc;
#if defined(__FreeBSD__)
device_t kdev;
#endif
memset(&sizes, 0, sizeof(struct drm_fb_helper_surface_size));
sizes.surface_depth = 24;
@ -855,9 +852,9 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
/* if driver picks 8 or 16 by default use that
for both depth/bpp */
if (preferred_bpp != sizes.surface_bpp) {
if (preferred_bpp != sizes.surface_bpp)
sizes.surface_depth = sizes.surface_bpp = preferred_bpp;
}
/* first up get a count of crtcs now in use and new min/maxes width/heights */
for (i = 0; i < fb_helper->connector_count; i++) {
struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i];
@ -922,29 +919,16 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
if (new_fb < 0)
return new_fb;
sc = malloc(sizeof(struct vt_kms_softc), DRM_MEM_KMS,
M_WAITOK | M_ZERO);
sc->fb_helper = fb_helper;
TASK_INIT(&sc->fb_mode_task, 0, vt_restore_fbdev_mode, sc);
info = fb_helper->fbdev;
info->fb_name = device_get_nameunit(fb_helper->dev->device);
info->fb_depth = fb_helper->fb->bits_per_pixel;
info->fb_height = fb_helper->fb->height;
info->fb_width = fb_helper->fb->width;
info->fb_stride = fb_helper->fb->pitches[0];
info->fb_priv = sc;
info->enter = &vt_kms_postswitch;
kdev = fb_helper->dev->device;
kdev = fb_helper->dev->dev;
info->fb_video_dev = device_get_parent(kdev);
/* set the fb pointer */
for (i = 0; i < fb_helper->crtc_count; i++) {
for (i = 0; i < fb_helper->crtc_count; i++)
fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb;
}
#if defined(__FreeBSD__)
if (new_fb) {
int ret;
@ -959,102 +943,61 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
DRM_ERROR("Failed to attach fbd device: %d\n", ret);
#endif
}
#else
if (new_fb) {
info->var.pixclock = 0;
if (register_framebuffer(info) < 0)
return -EINVAL;
dev_info(fb_helper->dev->dev, "fb%d: %s frame buffer device\n",
info->node, info->fix.id);
} else {
drm_fb_helper_set_par(info);
}
#endif
#if 0 && defined(FREEBSD_NOTYET)
/* Switch back to kernel console on panic */
/* multi card linked list maybe */
if (list_empty(&kernel_fb_helper_list)) {
dev_info(fb_helper->dev->dev, "registered panic notifier\n");
atomic_notifier_chain_register(&panic_notifier_list,
&paniced);
register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
}
#endif /* FREEBSD_NOTYET */
if (new_fb)
list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_single_fb_probe);
#if 0
void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
uint32_t depth)
{
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.visual = depth == 8 ? FB_VISUAL_PSEUDOCOLOR :
FB_VISUAL_trueCOLOR;
info->fix.mmio_start = 0;
info->fix.mmio_len = 0;
info->fix.type_aux = 0;
info->fix.xpanstep = 1; /* doing it in hw */
info->fix.ypanstep = 1; /* doing it in hw */
info->fix.ywrapstep = 0;
info->fix.accel = FB_ACCEL_NONE;
info->fix.type_aux = 0;
info->fb_stride = pitch;
info->fix.line_length = pitch;
return;
}
EXPORT_SYMBOL(drm_fb_helper_fill_fix);
void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
uint32_t fb_width, uint32_t fb_height)
{
struct drm_framebuffer *fb = fb_helper->fb;
info->pseudo_palette = fb_helper->pseudo_palette;
info->var.xres_virtual = fb->width;
info->var.yres_virtual = fb->height;
info->var.bits_per_pixel = fb->bits_per_pixel;
info->var.accel_flags = FB_ACCELF_TEXT;
info->var.xoffset = 0;
info->var.yoffset = 0;
info->var.activate = FB_ACTIVATE_NOW;
info->var.height = -1;
info->var.width = -1;
struct vt_kms_softc *sc;
switch (fb->depth) {
case 8:
info->var.red.offset = 0;
info->var.green.offset = 0;
info->var.blue.offset = 0;
info->var.red.length = 8; /* 8bit DAC */
info->var.green.length = 8;
info->var.blue.length = 8;
info->var.transp.offset = 0;
info->var.transp.length = 0;
break;
case 15:
info->var.red.offset = 10;
info->var.green.offset = 5;
info->var.blue.offset = 0;
info->var.red.length = 5;
info->var.green.length = 5;
info->var.blue.length = 5;
info->var.transp.offset = 15;
info->var.transp.length = 1;
break;
case 16:
info->var.red.offset = 11;
info->var.green.offset = 5;
info->var.blue.offset = 0;
info->var.red.length = 5;
info->var.green.length = 6;
info->var.blue.length = 5;
info->var.transp.offset = 0;
break;
case 24:
info->var.red.offset = 16;
info->var.green.offset = 8;
info->var.blue.offset = 0;
info->var.red.length = 8;
info->var.green.length = 8;
info->var.blue.length = 8;
info->var.transp.offset = 0;
info->var.transp.length = 0;
break;
case 32:
info->var.red.offset = 16;
info->var.green.offset = 8;
info->var.blue.offset = 0;
info->var.red.length = 8;
info->var.green.length = 8;
info->var.blue.length = 8;
info->var.transp.offset = 24;
info->var.transp.length = 8;
break;
default:
break;
}
info->fb_name = device_get_nameunit(fb_helper->dev->dev);
info->fb_width = fb->width;
info->fb_height = fb->height;
info->fb_depth = fb->bits_per_pixel;
info->var.xres = fb_width;
info->var.yres = fb_height;
sc = (struct vt_kms_softc *)info->fb_priv;
sc->fb_helper = fb_helper;
}
#endif
EXPORT_SYMBOL(drm_fb_helper_fill_var);
static int drm_fb_helper_probe_connector_modes(struct drm_fb_helper *fb_helper,
uint32_t maxX,
@ -1101,7 +1044,7 @@ static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_conne
cmdline_mode = &fb_helper_conn->cmdline_mode;
if (cmdline_mode->specified == false)
return (NULL);
return mode;
/* attempt to find a matching mode in the list of modes
* we have gotten so far, if not add a CVT mode that conforms
@ -1128,19 +1071,8 @@ static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_conne
}
create_mode:
if (cmdline_mode->cvt)
mode = drm_cvt_mode(fb_helper_conn->connector->dev,
cmdline_mode->xres, cmdline_mode->yres,
cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
cmdline_mode->rb, cmdline_mode->interlace,
cmdline_mode->margins);
else
mode = drm_gtf_mode(fb_helper_conn->connector->dev,
cmdline_mode->xres, cmdline_mode->yres,
cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
cmdline_mode->interlace,
cmdline_mode->margins);
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
mode = drm_mode_create_from_cmdline_mode(fb_helper_conn->connector->dev,
cmdline_mode);
list_add(&mode->head, &fb_helper_conn->connector->modes);
return mode;
}
@ -1149,11 +1081,11 @@ static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
{
bool enable;
if (strict) {
if (strict)
enable = connector->status == connector_status_connected;
} else {
else
enable = connector->status != connector_status_disconnected;
}
return enable;
}
@ -1316,8 +1248,9 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
return best_score;
crtcs = malloc(dev->mode_config.num_connector *
sizeof(struct drm_fb_helper_crtc *), DRM_MEM_KMS,
M_WAITOK | M_ZERO);
sizeof(struct drm_fb_helper_crtc *), DRM_MEM_KMS, M_NOWAIT | M_ZERO);
if (!crtcs)
return best_score;
my_score = 1;
if (connector->status == connector_status_connected)
@ -1337,9 +1270,8 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
for (c = 0; c < fb_helper->crtc_count; c++) {
crtc = &fb_helper->crtc_info[c];
if ((encoder->possible_crtcs & (1 << c)) == 0) {
if ((encoder->possible_crtcs & (1 << c)) == 0)
continue;
}
for (o = 0; o < n; o++)
if (best_crtcs[o] == crtc)
@ -1376,7 +1308,6 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
struct drm_device *dev = fb_helper->dev;
struct drm_fb_helper_crtc **crtcs;
struct drm_display_mode **modes;
struct drm_encoder *encoder;
struct drm_mode_set *modeset;
bool *enabled;
int width, height;
@ -1387,19 +1318,17 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
width = dev->mode_config.max_width;
height = dev->mode_config.max_height;
/* clean out all the encoder/crtc combos */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
encoder->crtc = NULL;
crtcs = malloc(dev->mode_config.num_connector *
sizeof(struct drm_fb_helper_crtc *), DRM_MEM_KMS, M_NOWAIT | M_ZERO);
modes = malloc(dev->mode_config.num_connector *
sizeof(struct drm_display_mode *), DRM_MEM_KMS, M_NOWAIT | M_ZERO);
enabled = malloc(dev->mode_config.num_connector *
sizeof(bool), DRM_MEM_KMS, M_NOWAIT | M_ZERO);
if (!crtcs || !modes || !enabled) {
DRM_ERROR("Memory allocation failed\n");
goto out;
}
crtcs = malloc(dev->mode_config.num_connector *
sizeof(struct drm_fb_helper_crtc *), DRM_MEM_KMS,
M_WAITOK | M_ZERO);
modes = malloc(dev->mode_config.num_connector *
sizeof(struct drm_display_mode *), DRM_MEM_KMS,
M_WAITOK | M_ZERO);
enabled = malloc(dev->mode_config.num_connector *
sizeof(bool), DRM_MEM_KMS, M_WAITOK | M_ZERO);
drm_enable_connectors(fb_helper, enabled);
@ -1438,6 +1367,7 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
}
}
out:
free(crtcs, DRM_MEM_KMS);
free(modes, DRM_MEM_KMS);
free(enabled, DRM_MEM_KMS);
@ -1445,12 +1375,14 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
/**
* drm_helper_initial_config - setup a sane initial connector configuration
* @dev: DRM device
* @fb_helper: fb_helper device struct
* @bpp_sel: bpp value to use for the framebuffer configuration
*
* LOCKING:
* Called at init time, must take mode config lock.
* Called at init time by the driver to set up the @fb_helper initial
* configuration, must take the mode config lock.
*
* Scan the CRTCs and connectors and try to put together an initial setup.
* Scans the CRTCs and connectors and tries to put together an initial setup.
* At the moment, this is a cloned configuration across all heads with
* a new framebuffer object as the backing store.
*
@ -1473,20 +1405,35 @@ bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
/*
* we shouldn't end up with no modes here.
*/
if (count == 0) {
printf("No connectors reported connected with modes\n");
}
if (count == 0)
dev_info(fb_helper->dev->dev, "No connectors reported connected with modes\n");
drm_setup_crtcs(fb_helper);
return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
}
EXPORT_SYMBOL(drm_fb_helper_initial_config);
/**
* drm_fb_helper_hotplug_event - respond to a hotplug notification by
* probing all the outputs attached to the fb
* @fb_helper: the drm_fb_helper
*
* LOCKING:
* Called at runtime, must take mode config lock.
*
* Scan the connectors attached to the fb_helper and try to put together a
* setup after *notification of a change in output configuration.
*
* RETURNS:
* 0 on success and a non-zero error code otherwise.
*/
int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
{
struct drm_device *dev = fb_helper->dev;
int count = 0;
u32 max_width, max_height, bpp_sel;
bool bound = false, crtcs_bound = false;
int bound = 0, crtcs_bound = 0;
struct drm_crtc *crtc;
if (!fb_helper->fb)
@ -1495,12 +1442,12 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
sx_xlock(&dev->mode_config.mutex);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (crtc->fb)
crtcs_bound = true;
crtcs_bound++;
if (crtc->fb == fb_helper->fb)
bound = true;
bound++;
}
if (!bound && crtcs_bound) {
if (bound < crtcs_bound) {
fb_helper->delayed_hotplug = true;
sx_xunlock(&dev->mode_config.mutex);
return 0;
@ -1518,4 +1465,4 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
}
EXPORT_SYMBOL(drm_fb_helper_hotplug_event);

View File

@ -35,7 +35,6 @@
struct drm_fb_helper;
struct drm_fb_helper_crtc {
uint32_t crtc_id;
struct drm_mode_set mode_set;
struct drm_display_mode *desired_mode;
};
@ -74,7 +73,6 @@ struct drm_fb_helper {
int connector_count;
struct drm_fb_helper_connector **connector_info;
struct drm_fb_helper_funcs *funcs;
int conn_limit;
struct fb_info *fbdev;
u32 pseudo_palette[17];
struct list_head kernel_fb_list;
@ -84,9 +82,6 @@ struct drm_fb_helper {
bool delayed_hotplug;
};
struct fb_var_screeninfo;
struct fb_cmap;
int drm_fb_helper_single_fb_probe(struct drm_fb_helper *helper,
int preferred_bpp);
@ -95,11 +90,15 @@ int drm_fb_helper_init(struct drm_device *dev,
int max_conn);
void drm_fb_helper_fini(struct drm_fb_helper *helper);
int drm_fb_helper_blank(int blank, struct fb_info *info);
#ifdef FREEBSD_NOTYET
int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info);
#endif /* FREEBSD_NOTYET */
int drm_fb_helper_set_par(struct fb_info *info);
#ifdef FREEBSD_NOTYET
int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
struct fb_info *info);
#endif /* FREEBSD_NOTYET */
int drm_fb_helper_setcolreg(unsigned regno,
unsigned red,
unsigned green,
@ -114,13 +113,14 @@ void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helpe
void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
uint32_t depth);
#ifdef FREEBSD_NOTYET
int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info);
#endif /* FREEBSD_NOTYET */
int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper);
bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel);
int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper);
int drm_fb_helper_debug_enter(struct fb_info *info);
int drm_fb_helper_debug_leave(struct fb_info *info);
bool drm_fb_helper_force_kernel_mode(void);
#endif

View File

@ -1,4 +1,15 @@
/*-
/**
* \file drm_fops.c
* File operations for DRM
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
* \author Daryll Strauss <daryll@valinux.com>
* \author Gareth Hughes <gareth@valinux.com>
*/
/*
* Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
@ -21,51 +32,177 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Daryll Strauss <daryll@valinux.com>
* Gareth Hughes <gareth@valinux.com>
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/** @file drm_fops.c
* Support code for dealing with the file privates associated with each
* open of the DRM device.
*/
#include <dev/drm2/drmP.h>
/* drm_open_helper is called whenever a process opens /dev/drm. */
int drm_open_helper(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p,
struct drm_device *dev)
static int drm_open_helper(struct cdev *kdev, int flags, int fmt,
DRM_STRUCTPROC *p, struct drm_device *dev);
static int drm_setup(struct drm_device * dev)
{
int i;
int ret;
if (dev->driver->firstopen) {
ret = dev->driver->firstopen(dev);
if (ret != 0)
return ret;
}
atomic_set(&dev->ioctl_count, 0);
atomic_set(&dev->vma_count, 0);
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
!drm_core_check_feature(dev, DRIVER_MODESET)) {
dev->buf_use = 0;
atomic_set(&dev->buf_alloc, 0);
i = drm_dma_setup(dev);
if (i < 0)
return i;
}
/*
* FIXME Linux<->FreeBSD: counter incremented in drm_open() and
* reset to 0 here.
*/
#if 0
for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
atomic_set(&dev->counts[i], 0);
#endif
dev->sigdata.lock = NULL;
dev->context_flag = 0;
dev->interrupt_flag = 0;
dev->dma_flag = 0;
dev->last_context = 0;
dev->last_switch = 0;
dev->last_checked = 0;
DRM_INIT_WAITQUEUE(&dev->context_wait);
dev->if_version = 0;
#ifdef FREEBSD_NOTYET
dev->ctx_start = 0;
dev->lck_start = 0;
dev->buf_async = NULL;
DRM_INIT_WAITQUEUE(&dev->buf_readers);
DRM_INIT_WAITQUEUE(&dev->buf_writers);
#endif /* FREEBSD_NOTYET */
DRM_DEBUG("\n");
/*
* The kernel's context could be created here, but is now created
* in drm_dma_enqueue. This is more resource-efficient for
* hardware that does not do DMA, but may mean that
* drm_select_queue fails between the time the interrupt is
* initialized and the time the queues are initialized.
*/
return 0;
}
/**
* Open file.
*
* \param inode device inode
* \param filp file pointer.
* \return zero on success or a negative number on failure.
*
* Searches the DRM device with the same minor number, calls open_helper(), and
* increments the device open count. If the open count was previous at zero,
* i.e., it's the first that the device is open, then calls setup().
*/
int drm_open(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p)
{
struct drm_device *dev = NULL;
struct drm_minor *minor;
int retcode = 0;
int need_setup = 0;
minor = kdev->si_drv1;
if (!minor)
return ENODEV;
if (!(dev = minor->dev))
return ENODEV;
sx_xlock(&drm_global_mutex);
/*
* FIXME Linux<->FreeBSD: On Linux, counter updated outisde
* global mutex.
*/
if (!dev->open_count++)
need_setup = 1;
retcode = drm_open_helper(kdev, flags, fmt, p, dev);
if (retcode) {
sx_xunlock(&drm_global_mutex);
return (-retcode);
}
atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
if (need_setup) {
retcode = drm_setup(dev);
if (retcode)
goto err_undo;
}
sx_xunlock(&drm_global_mutex);
return 0;
err_undo:
mtx_lock(&Giant); /* FIXME: Giant required? */
device_unbusy(dev->dev);
mtx_unlock(&Giant);
dev->open_count--;
sx_xunlock(&drm_global_mutex);
return -retcode;
}
EXPORT_SYMBOL(drm_open);
/**
* Called whenever a process opens /dev/drm.
*
* \param inode device inode.
* \param filp file pointer.
* \param dev device.
* \return zero on success or a negative number on failure.
*
* Creates and initializes a drm_file structure for the file private data in \p
* filp and add it into the double linked list in \p dev.
*/
static int drm_open_helper(struct cdev *kdev, int flags, int fmt,
DRM_STRUCTPROC *p, struct drm_device *dev)
{
struct drm_file *priv;
int retcode;
int ret;
if (flags & O_EXCL)
return EBUSY; /* No exclusive opens */
dev->flags = flags;
return -EBUSY; /* No exclusive opens */
if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
return -EINVAL;
DRM_DEBUG("pid = %d, device = %s\n", DRM_CURRENTPID, devtoname(kdev));
priv = malloc(sizeof(*priv), DRM_MEM_FILES, M_NOWAIT | M_ZERO);
if (priv == NULL) {
return ENOMEM;
}
DRM_LOCK(dev);
priv->dev = dev;
priv->uid = p->td_ucred->cr_svuid;
priv->pid = p->td_proc->p_pid;
priv->ioctl_count = 0;
if (!priv)
return -ENOMEM;
priv->uid = p->td_ucred->cr_svuid;
priv->pid = p->td_proc->p_pid;
priv->minor = kdev->si_drv1;
priv->ioctl_count = 0;
/* for compatibility root is always authenticated */
priv->authenticated = DRM_SUSER(p);
priv->authenticated = DRM_SUSER(p);
priv->lock_count = 0;
INIT_LIST_HEAD(&priv->lhead);
INIT_LIST_HEAD(&priv->fbs);
INIT_LIST_HEAD(&priv->event_list);
priv->event_space = 4096; /* set aside 4k for event buffer */
@ -73,47 +210,289 @@ int drm_open_helper(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p,
if (dev->driver->driver_features & DRIVER_GEM)
drm_gem_open(dev, priv);
#ifdef FREEBSD_NOTYET
if (drm_core_check_feature(dev, DRIVER_PRIME))
drm_prime_init_file_private(&priv->prime);
#endif /* FREEBSD_NOTYET */
if (dev->driver->open) {
/* shared code returns -errno */
retcode = -dev->driver->open(dev, priv);
if (retcode != 0) {
free(priv, DRM_MEM_FILES);
ret = dev->driver->open(dev, priv);
if (ret < 0)
goto out_free;
}
/* if there is no current master make this fd it */
DRM_LOCK(dev);
if (!priv->minor->master) {
/* create a new master */
priv->minor->master = drm_master_create(priv->minor);
if (!priv->minor->master) {
DRM_UNLOCK(dev);
return retcode;
ret = -ENOMEM;
goto out_free;
}
priv->is_master = 1;
/* take another reference for the copy in the local file priv */
priv->master = drm_master_get(priv->minor->master);
priv->authenticated = 1;
DRM_UNLOCK(dev);
if (dev->driver->master_create) {
ret = dev->driver->master_create(dev, priv->master);
if (ret) {
DRM_LOCK(dev);
/* drop both references if this fails */
drm_master_put(&priv->minor->master);
drm_master_put(&priv->master);
DRM_UNLOCK(dev);
goto out_free;
}
}
DRM_LOCK(dev);
if (dev->driver->master_set) {
ret = dev->driver->master_set(dev, priv, true);
if (ret) {
/* drop both references if this fails */
drm_master_put(&priv->minor->master);
drm_master_put(&priv->master);
DRM_UNLOCK(dev);
goto out_free;
}
}
DRM_UNLOCK(dev);
} else {
/* get a reference to the master */
priv->master = drm_master_get(priv->minor->master);
DRM_UNLOCK(dev);
}
DRM_LOCK(dev);
list_add(&priv->lhead, &dev->filelist);
DRM_UNLOCK(dev);
mtx_lock(&Giant); /* FIXME: Giant required? */
device_busy(dev->dev);
mtx_unlock(&Giant);
ret = devfs_set_cdevpriv(priv, drm_release);
if (ret != 0)
drm_release(priv);
return ret;
out_free:
free(priv, DRM_MEM_FILES);
return ret;
}
static void drm_master_release(struct drm_device *dev, struct drm_file *file_priv)
{
if (drm_i_have_hw_lock(dev, file_priv)) {
DRM_DEBUG("File %p released, freeing lock for context %d\n",
file_priv, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
drm_lock_free(&file_priv->master->lock,
_DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
}
}
static void drm_events_release(struct drm_file *file_priv)
{
struct drm_device *dev = file_priv->minor->dev;
struct drm_pending_event *e, *et;
struct drm_pending_vblank_event *v, *vt;
unsigned long flags;
DRM_SPINLOCK_IRQSAVE(&dev->event_lock, flags);
/* Remove pending flips */
list_for_each_entry_safe(v, vt, &dev->vblank_event_list, base.link)
if (v->base.file_priv == file_priv) {
list_del(&v->base.link);
drm_vblank_put(dev, v->pipe);
v->base.destroy(&v->base);
}
/* Remove unconsumed events */
list_for_each_entry_safe(e, et, &file_priv->event_list, link)
e->destroy(e);
DRM_SPINUNLOCK_IRQRESTORE(&dev->event_lock, flags);
}
/**
* Release file.
*
* \param inode device inode
* \param file_priv DRM file private.
* \return zero on success or a negative number on failure.
*
* If the hardware lock is held then free it, and take it again for the kernel
* context since it's necessary to reclaim buffers. Unlink the file private
* data from its list and free it. Decreases the open count and if it reaches
* zero calls drm_lastclose().
*/
void drm_release(void *data)
{
struct drm_file *file_priv = data;
struct drm_device *dev = file_priv->minor->dev;
sx_xlock(&drm_global_mutex);
DRM_DEBUG("open_count = %d\n", dev->open_count);
if (dev->driver->preclose)
dev->driver->preclose(dev, file_priv);
/* ========================================================
* Begin inline drm_release
*/
DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
DRM_CURRENTPID,
(long)file_priv->minor->device,
dev->open_count);
/* Release any auth tokens that might point to this file_priv,
(do that under the drm_global_mutex) */
if (file_priv->magic)
(void) drm_remove_magic(file_priv->master, file_priv->magic);
/* if the master has gone away we can't do anything with the lock */
if (file_priv->minor->master)
drm_master_release(dev, file_priv);
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
drm_core_reclaim_buffers(dev, file_priv);
drm_events_release(file_priv);
seldrain(&file_priv->event_poll);
if (dev->driver->driver_features & DRIVER_MODESET)
drm_fb_release(file_priv);
if (dev->driver->driver_features & DRIVER_GEM)
drm_gem_release(dev, file_priv);
#ifdef FREEBSD_NOTYET
mutex_lock(&dev->ctxlist_mutex);
if (!list_empty(&dev->ctxlist)) {
struct drm_ctx_list *pos, *n;
list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
if (pos->tag == file_priv &&
pos->handle != DRM_KERNEL_CONTEXT) {
if (dev->driver->context_dtor)
dev->driver->context_dtor(dev,
pos->handle);
drm_ctxbitmap_free(dev, pos->handle);
list_del(&pos->head);
kfree(pos);
--dev->ctx_count;
}
}
}
mutex_unlock(&dev->ctxlist_mutex);
#endif /* FREEBSD_NOTYET */
DRM_LOCK(dev);
if (file_priv->is_master) {
struct drm_master *master = file_priv->master;
struct drm_file *temp;
list_for_each_entry(temp, &dev->filelist, lhead) {
if ((temp->master == file_priv->master) &&
(temp != file_priv))
temp->authenticated = 0;
}
/**
* Since the master is disappearing, so is the
* possibility to lock.
*/
if (master->lock.hw_lock) {
if (dev->sigdata.lock == master->lock.hw_lock)
dev->sigdata.lock = NULL;
master->lock.hw_lock = NULL;
master->lock.file_priv = NULL;
DRM_WAKEUP_INT(&master->lock.lock_queue);
}
if (file_priv->minor->master == file_priv->master) {
/* drop the reference held my the minor */
if (dev->driver->master_drop)
dev->driver->master_drop(dev, file_priv, true);
drm_master_put(&file_priv->minor->master);
}
}
/* first opener automatically becomes master */
priv->master = TAILQ_EMPTY(&dev->files);
TAILQ_INSERT_TAIL(&dev->files, priv, link);
/* drop the reference held my the file priv */
drm_master_put(&file_priv->master);
file_priv->is_master = 0;
list_del(&file_priv->lhead);
DRM_UNLOCK(dev);
kdev->si_drv1 = dev;
retcode = devfs_set_cdevpriv(priv, drm_close);
if (retcode != 0)
drm_close(priv);
if (dev->driver->postclose)
dev->driver->postclose(dev, file_priv);
return (retcode);
#ifdef FREEBSD_NOTYET
if (drm_core_check_feature(dev, DRIVER_PRIME))
drm_prime_destroy_file_private(&file_priv->prime);
#endif /* FREEBSD_NOTYET */
free(file_priv, DRM_MEM_FILES);
/* ========================================================
* End inline drm_release
*/
atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
mtx_lock(&Giant);
device_unbusy(dev->dev);
mtx_unlock(&Giant);
if (!--dev->open_count) {
if (atomic_read(&dev->ioctl_count)) {
DRM_ERROR("Device busy: %d\n",
atomic_read(&dev->ioctl_count));
} else
drm_lastclose(dev);
}
sx_xunlock(&drm_global_mutex);
}
EXPORT_SYMBOL(drm_release);
static bool
drm_dequeue_event(struct drm_device *dev, struct drm_file *file_priv,
struct uio *uio, struct drm_pending_event **out)
drm_dequeue_event(struct drm_file *file_priv, struct uio *uio,
struct drm_pending_event **out)
{
struct drm_pending_event *e;
bool ret = false;
/* Already locked in drm_read(). */
/* DRM_SPINLOCK_IRQSAVE(&dev->event_lock, flags); */
*out = NULL;
if (list_empty(&file_priv->event_list))
return (false);
goto out;
e = list_first_entry(&file_priv->event_list,
struct drm_pending_event, link);
struct drm_pending_event, link);
if (e->event->length > uio->uio_resid)
return (false);
goto out;
file_priv->event_space += e->event->length;
list_del(&e->link);
*out = e;
return (true);
ret = true;
out:
/* DRM_SPINUNLOCK_IRQRESTORE(&dev->event_lock, flags); */
return ret;
}
int
@ -122,13 +501,14 @@ drm_read(struct cdev *kdev, struct uio *uio, int ioflag)
struct drm_file *file_priv;
struct drm_device *dev;
struct drm_pending_event *e;
int error;
ssize_t error;
error = devfs_get_cdevpriv((void **)&file_priv);
if (error != 0) {
DRM_ERROR("can't find authenticator\n");
return (EINVAL);
}
dev = drm_get_device_from_kdev(kdev);
mtx_lock(&dev->event_lock);
while (list_empty(&file_priv->event_list)) {
@ -141,20 +521,24 @@ drm_read(struct cdev *kdev, struct uio *uio, int ioflag)
if (error != 0)
goto out;
}
while (drm_dequeue_event(dev, file_priv, uio, &e)) {
while (drm_dequeue_event(file_priv, uio, &e)) {
mtx_unlock(&dev->event_lock);
error = uiomove(e->event, e->event->length, uio);
CTR3(KTR_DRM, "drm_event_dequeued %d %d %d", curproc->p_pid,
e->event->type, e->event->length);
e->destroy(e);
if (error != 0)
return (error);
mtx_lock(&dev->event_lock);
}
out:
mtx_unlock(&dev->event_lock);
return (error);
}
EXPORT_SYMBOL(drm_read);
void
drm_event_wakeup(struct drm_pending_event *e)
@ -163,7 +547,7 @@ drm_event_wakeup(struct drm_pending_event *e)
struct drm_device *dev;
file_priv = e->file_priv;
dev = file_priv->dev;
dev = file_priv->minor->dev;
mtx_assert(&dev->event_lock, MA_OWNED);
wakeup(&file_priv->event_space);
@ -182,6 +566,7 @@ drm_poll(struct cdev *kdev, int events, struct thread *td)
DRM_ERROR("can't find authenticator\n");
return (EINVAL);
}
dev = drm_get_device_from_kdev(kdev);
revents = 0;
@ -198,3 +583,21 @@ drm_poll(struct cdev *kdev, int events, struct thread *td)
mtx_unlock(&dev->event_lock);
return (revents);
}
EXPORT_SYMBOL(drm_poll);
int
drm_mmap_single(struct cdev *kdev, vm_ooffset_t *offset, vm_size_t size,
struct vm_object **obj_res, int nprot)
{
struct drm_device *dev;
dev = drm_get_device_from_kdev(kdev);
if (dev->drm_ttm_bdev != NULL) {
return (-ttm_bo_mmap_single(dev->drm_ttm_bdev, offset, size,
obj_res, nprot));
} else if ((dev->driver->driver_features & DRIVER_GEM) != 0) {
return (-drm_gem_mmap_single(dev, offset, size, obj_res, nprot));
} else {
return (ENODEV);
}
}

View File

@ -26,10 +26,8 @@
#ifndef DRM_FOURCC_H
#define DRM_FOURCC_H
#include <sys/types.h>
#define fourcc_code(a, b, c, d) ((uint32_t)(a) | ((uint32_t)(b) << 8) | \
((uint32_t)(c) << 16) | ((uint32_t)(d) << 24))
#define fourcc_code(a, b, c, d) ((__u32)(a) | ((__u32)(b) << 8) | \
((__u32)(c) << 16) | ((__u32)(d) << 24))
#define DRM_FORMAT_BIG_ENDIAN (1<<31) /* format is big endian instead of little endian */
@ -108,9 +106,10 @@
#define DRM_FORMAT_NV21 fourcc_code('N', 'V', '2', '1') /* 2x2 subsampled Cb:Cr plane */
#define DRM_FORMAT_NV16 fourcc_code('N', 'V', '1', '6') /* 2x1 subsampled Cr:Cb plane */
#define DRM_FORMAT_NV61 fourcc_code('N', 'V', '6', '1') /* 2x1 subsampled Cb:Cr plane */
#define DRM_FORMAT_NV24 fourcc_code('N', 'V', '2', '4') /* non-subsampled Cr:Cb plane */
#define DRM_FORMAT_NV42 fourcc_code('N', 'V', '4', '2') /* non-subsampled Cb:Cr plane */
/* 2 non contiguous plane YCbCr */
#define DRM_FORMAT_NV12M fourcc_code('N', 'M', '1', '2') /* 2x2 subsampled Cr:Cb plane */
/* special NV12 tiled format */
#define DRM_FORMAT_NV12MT fourcc_code('T', 'M', '1', '2') /* 2x2 subsampled Cr:Cb plane 64x32 macroblocks */
/*
@ -133,7 +132,4 @@
#define DRM_FORMAT_YUV444 fourcc_code('Y', 'U', '2', '4') /* non-subsampled Cb (1) and Cr (2) planes */
#define DRM_FORMAT_YVU444 fourcc_code('Y', 'V', '2', '4') /* non-subsampled Cr (1) and Cb (2) planes */
/* 3 non contiguous plane YCbCr */
#define DRM_FORMAT_YUV420M fourcc_code('Y', 'M', '1', '2') /* 2x2 subsampled Cb (1) and Cr (2) planes */
#endif /* DRM_FOURCC_H */

View File

@ -54,7 +54,7 @@ __FBSDID("$FreeBSD$");
* the faked up offset will fit
*/
#if ULONG_MAX == UINT64_MAX
#if BITS_PER_LONG == 64
#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
#else
@ -62,28 +62,40 @@ __FBSDID("$FreeBSD$");
#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
#endif
/**
* Initialize the GEM device fields
*/
int
drm_gem_init(struct drm_device *dev)
{
struct drm_gem_mm *mm;
drm_gem_names_init(&dev->object_names);
mm = malloc(sizeof(*mm), DRM_MEM_DRIVER, M_WAITOK);
dev->mm_private = mm;
if (drm_ht_create(&mm->offset_hash, 19) != 0) {
free(mm, DRM_MEM_DRIVER);
return (ENOMEM);
mm = malloc(sizeof(*mm), DRM_MEM_DRIVER, M_NOWAIT);
if (!mm) {
DRM_ERROR("out of memory\n");
return -ENOMEM;
}
dev->mm_private = mm;
if (drm_ht_create(&mm->offset_hash, 19)) {
free(mm, DRM_MEM_DRIVER);
return -ENOMEM;
}
mm->idxunr = new_unrhdr(0, DRM_GEM_MAX_IDX, NULL);
return (0);
return 0;
}
void
drm_gem_destroy(struct drm_device *dev)
{
struct drm_gem_mm *mm;
struct drm_gem_mm *mm = dev->mm_private;
mm = dev->mm_private;
dev->mm_private = NULL;
drm_ht_remove(&mm->offset_hash);
delete_unrhdr(mm->idxunr);
@ -91,11 +103,9 @@ drm_gem_destroy(struct drm_device *dev)
drm_gem_names_fini(&dev->object_names);
}
int
drm_gem_object_init(struct drm_device *dev, struct drm_gem_object *obj,
size_t size)
int drm_gem_object_init(struct drm_device *dev,
struct drm_gem_object *obj, size_t size)
{
KASSERT((size & (PAGE_SIZE - 1)) == 0,
("Bad size %ju", (uintmax_t)size));
@ -107,14 +117,18 @@ drm_gem_object_init(struct drm_device *dev, struct drm_gem_object *obj,
obj->handle_count = 0;
obj->size = size;
return (0);
return 0;
}
EXPORT_SYMBOL(drm_gem_object_init);
int
drm_gem_private_object_init(struct drm_device *dev, struct drm_gem_object *obj,
size_t size)
/**
* Initialize an already allocated GEM object of the specified size with
* no GEM provided backing store. Instead the caller is responsible for
* backing the object and handling it.
*/
int drm_gem_private_object_init(struct drm_device *dev,
struct drm_gem_object *obj, size_t size)
{
MPASS((size & (PAGE_SIZE - 1)) == 0);
obj->dev = dev;
@ -124,159 +138,264 @@ drm_gem_private_object_init(struct drm_device *dev, struct drm_gem_object *obj,
atomic_store_rel_int(&obj->handle_count, 0);
obj->size = size;
return (0);
return 0;
}
EXPORT_SYMBOL(drm_gem_private_object_init);
struct drm_gem_object *
drm_gem_object_alloc(struct drm_device *dev, size_t size)
{
struct drm_gem_object *obj;
obj = malloc(sizeof(*obj), DRM_MEM_DRIVER, M_WAITOK | M_ZERO);
obj = malloc(sizeof(*obj), DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
if (!obj)
goto free;
if (drm_gem_object_init(dev, obj, size) != 0)
goto free;
if (dev->driver->gem_init_object != NULL &&
dev->driver->gem_init_object(obj) != 0)
dev->driver->gem_init_object(obj) != 0) {
goto dealloc;
return (obj);
}
return obj;
dealloc:
vm_object_deallocate(obj->vm_obj);
free:
free(obj, DRM_MEM_DRIVER);
return (NULL);
return NULL;
}
EXPORT_SYMBOL(drm_gem_object_alloc);
void
drm_gem_object_free(struct drm_gem_object *obj)
#if defined(FREEBSD_NOTYET)
static void
drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
{
struct drm_device *dev;
dev = obj->dev;
DRM_LOCK_ASSERT(dev);
if (dev->driver->gem_free_object != NULL)
dev->driver->gem_free_object(obj);
}
void
drm_gem_object_reference(struct drm_gem_object *obj)
{
KASSERT(obj->refcount > 0, ("Dangling obj %p", obj));
refcount_acquire(&obj->refcount);
}
void
drm_gem_object_unreference(struct drm_gem_object *obj)
{
if (obj == NULL)
return;
if (refcount_release(&obj->refcount))
drm_gem_object_free(obj);
}
void
drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
{
struct drm_device *dev;
if (obj == NULL)
return;
dev = obj->dev;
DRM_LOCK(dev);
drm_gem_object_unreference(obj);
DRM_UNLOCK(dev);
}
void
drm_gem_object_handle_reference(struct drm_gem_object *obj)
{
drm_gem_object_reference(obj);
atomic_add_rel_int(&obj->handle_count, 1);
}
void
drm_gem_object_handle_free(struct drm_gem_object *obj)
{
struct drm_device *dev;
struct drm_gem_object *obj1;
dev = obj->dev;
if (obj->name != 0) {
obj1 = drm_gem_names_remove(&dev->object_names, obj->name);
obj->name = 0;
drm_gem_object_unreference(obj1);
if (obj->import_attach) {
drm_prime_remove_buf_handle(&filp->prime,
obj->import_attach->dmabuf);
}
if (obj->export_dma_buf) {
drm_prime_remove_buf_handle(&filp->prime,
obj->export_dma_buf);
}
}
#endif
void
drm_gem_object_handle_unreference(struct drm_gem_object *obj)
{
if (obj == NULL ||
atomic_load_acq_int(&obj->handle_count) == 0)
return;
if (atomic_fetchadd_int(&obj->handle_count, -1) == 1)
drm_gem_object_handle_free(obj);
drm_gem_object_unreference(obj);
}
void
drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
{
if (obj == NULL ||
atomic_load_acq_int(&obj->handle_count) == 0)
return;
if (atomic_fetchadd_int(&obj->handle_count, -1) == 1)
drm_gem_object_handle_free(obj);
drm_gem_object_unreference_unlocked(obj);
}
/**
* Removes the mapping from handle to filp for this object.
*/
int
drm_gem_handle_create(struct drm_file *file_priv, struct drm_gem_object *obj,
uint32_t *handle)
drm_gem_handle_delete(struct drm_file *filp, u32 handle)
{
struct drm_device *dev;
struct drm_gem_object *obj;
obj = drm_gem_names_remove(&filp->object_names, handle);
if (obj == NULL) {
return -EINVAL;
}
dev = obj->dev;
#if defined(FREEBSD_NOTYET)
drm_gem_remove_prime_handles(obj, filp);
#endif
if (dev->driver->gem_close_object)
dev->driver->gem_close_object(obj, filp);
drm_gem_object_handle_unreference_unlocked(obj);
return 0;
}
EXPORT_SYMBOL(drm_gem_handle_delete);
/**
* Create a handle for this object. This adds a handle reference
* to the object, which includes a regular reference count. Callers
* will likely want to dereference the object afterwards.
*/
int
drm_gem_handle_create(struct drm_file *file_priv,
struct drm_gem_object *obj,
u32 *handlep)
{
struct drm_device *dev = obj->dev;
int ret;
ret = drm_gem_name_create(&file_priv->object_names, obj, handle);
*handlep = 0;
ret = drm_gem_name_create(&file_priv->object_names, obj, handlep);
if (ret != 0)
return (ret);
return ret;
drm_gem_object_handle_reference(obj);
if (dev->driver->gem_open_object) {
ret = dev->driver->gem_open_object(obj, file_priv);
if (ret) {
drm_gem_handle_delete(file_priv, *handle);
drm_gem_handle_delete(file_priv, *handlep);
return ret;
}
}
return (0);
return 0;
}
EXPORT_SYMBOL(drm_gem_handle_create);
void
drm_gem_free_mmap_offset(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
struct drm_gem_mm *mm = dev->mm_private;
struct drm_hash_item *list = &obj->map_list;
if (!obj->on_map)
return;
drm_ht_remove_item(&mm->offset_hash, list);
free_unr(mm->idxunr, list->key);
obj->on_map = false;
}
EXPORT_SYMBOL(drm_gem_free_mmap_offset);
int
drm_gem_create_mmap_offset(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
struct drm_gem_mm *mm = dev->mm_private;
int ret;
if (obj->on_map)
return 0;
obj->map_list.key = alloc_unr(mm->idxunr);
ret = drm_ht_insert_item(&mm->offset_hash, &obj->map_list);
if (ret) {
DRM_ERROR("failed to add to map hash\n");
free_unr(mm->idxunr, obj->map_list.key);
return ret;
}
obj->on_map = true;
return 0;
}
EXPORT_SYMBOL(drm_gem_create_mmap_offset);
/** Returns a reference to the object named by the handle. */
struct drm_gem_object *
drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
u32 handle)
{
struct drm_gem_object *obj;
obj = drm_gem_name_ref(&filp->object_names, handle,
(void (*)(void *))drm_gem_object_reference);
return obj;
}
EXPORT_SYMBOL(drm_gem_object_lookup);
int
drm_gem_close_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_gem_close *args = data;
int ret;
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
ret = drm_gem_handle_delete(file_priv, args->handle);
return ret;
}
int
drm_gem_handle_delete(struct drm_file *file_priv, uint32_t handle)
drm_gem_flink_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_device *dev;
struct drm_gem_flink *args = data;
struct drm_gem_object *obj;
int ret;
obj = drm_gem_names_remove(&file_priv->object_names, handle);
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
return (EINVAL);
return -ENOENT;
ret = drm_gem_name_create(&dev->object_names, obj, &obj->name);
if (ret != 0) {
if (ret == -EALREADY)
ret = 0;
drm_gem_object_unreference_unlocked(obj);
}
if (ret == 0)
args->name = obj->name;
return ret;
}
int
drm_gem_open_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_gem_open *args = data;
struct drm_gem_object *obj;
int ret;
u32 handle;
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
obj = drm_gem_name_ref(&dev->object_names, args->name,
(void (*)(void *))drm_gem_object_reference);
if (!obj)
return -ENOENT;
ret = drm_gem_handle_create(file_priv, obj, &handle);
drm_gem_object_unreference_unlocked(obj);
if (ret)
return ret;
args->handle = handle;
args->size = obj->size;
return 0;
}
void
drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
{
drm_gem_names_init(&file_private->object_names);
}
static int
drm_gem_object_release_handle(uint32_t name, void *ptr, void *data)
{
struct drm_file *file_priv = data;
struct drm_gem_object *obj = ptr;
struct drm_device *dev = obj->dev;
#if defined(FREEBSD_NOTYET)
drm_gem_remove_prime_handles(obj, file_priv);
#endif
dev = obj->dev;
if (dev->driver->gem_close_object)
dev->driver->gem_close_object(obj, file_priv);
drm_gem_object_handle_unreference_unlocked(obj);
return (0);
return 0;
}
void
drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
{
drm_gem_names_foreach(&file_private->object_names,
drm_gem_object_release_handle, file_private);
drm_gem_names_fini(&file_private->object_names);
}
void
@ -288,119 +407,29 @@ drm_gem_object_release(struct drm_gem_object *obj)
*/
vm_object_deallocate(obj->vm_obj);
}
int
drm_gem_open_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_gem_open *args;
struct drm_gem_object *obj;
int ret;
uint32_t handle;
if (!drm_core_check_feature(dev, DRIVER_GEM))
return (ENODEV);
args = data;
obj = drm_gem_name_ref(&dev->object_names, args->name,
(void (*)(void *))drm_gem_object_reference);
if (obj == NULL)
return (ENOENT);
handle = 0;
ret = drm_gem_handle_create(file_priv, obj, &handle);
drm_gem_object_unreference_unlocked(obj);
if (ret != 0)
return (ret);
args->handle = handle;
args->size = obj->size;
return (0);
}
EXPORT_SYMBOL(drm_gem_object_release);
void
drm_gem_open(struct drm_device *dev, struct drm_file *file_priv)
drm_gem_object_free(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
drm_gem_names_init(&file_priv->object_names);
DRM_LOCK_ASSERT(dev);
if (dev->driver->gem_free_object != NULL)
dev->driver->gem_free_object(obj);
}
EXPORT_SYMBOL(drm_gem_object_free);
static int
drm_gem_object_release_handle(uint32_t name, void *ptr, void *arg)
void drm_gem_object_handle_free(struct drm_gem_object *obj)
{
struct drm_file *file_priv;
struct drm_gem_object *obj;
struct drm_device *dev;
struct drm_device *dev = obj->dev;
struct drm_gem_object *obj1;
file_priv = arg;
obj = ptr;
dev = obj->dev;
if (dev->driver->gem_close_object)
dev->driver->gem_close_object(obj, file_priv);
drm_gem_object_handle_unreference(obj);
return (0);
}
void
drm_gem_release(struct drm_device *dev, struct drm_file *file_priv)
{
drm_gem_names_foreach(&file_priv->object_names,
drm_gem_object_release_handle, file_priv);
drm_gem_names_fini(&file_priv->object_names);
}
int
drm_gem_close_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_gem_close *args;
if (!drm_core_check_feature(dev, DRIVER_GEM))
return (ENODEV);
args = data;
return (drm_gem_handle_delete(file_priv, args->handle));
}
int
drm_gem_flink_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_gem_flink *args;
struct drm_gem_object *obj;
int error;
if (!drm_core_check_feature(dev, DRIVER_GEM))
return (ENODEV);
args = data;
obj = drm_gem_name_ref(&file_priv->object_names, args->handle,
(void (*)(void *))drm_gem_object_reference);
if (obj == NULL)
return (ENOENT);
error = drm_gem_name_create(&dev->object_names, obj, &obj->name);
if (error != 0) {
if (error == EALREADY)
error = 0;
drm_gem_object_unreference_unlocked(obj);
if (obj->name) {
obj1 = drm_gem_names_remove(&dev->object_names, obj->name);
obj->name = 0;
drm_gem_object_unreference(obj1);
}
if (error == 0)
args->name = obj->name;
return (error);
}
struct drm_gem_object *
drm_gem_object_lookup(struct drm_device *dev, struct drm_file *file_priv,
uint32_t handle)
{
struct drm_gem_object *obj;
obj = drm_gem_name_ref(&file_priv->object_names, handle,
(void (*)(void *))drm_gem_object_reference);
return (obj);
}
static struct drm_gem_object *
@ -424,46 +453,6 @@ drm_gem_object_from_offset(struct drm_device *dev, vm_ooffset_t offset)
return (obj);
}
int
drm_gem_create_mmap_offset(struct drm_gem_object *obj)
{
struct drm_device *dev;
struct drm_gem_mm *mm;
int ret;
if (obj->on_map)
return (0);
dev = obj->dev;
mm = dev->mm_private;
ret = 0;
obj->map_list.key = alloc_unr(mm->idxunr);
ret = drm_ht_insert_item(&mm->offset_hash, &obj->map_list);
if (ret != 0) {
DRM_ERROR("failed to add to map hash\n");
free_unr(mm->idxunr, obj->map_list.key);
return (ret);
}
obj->on_map = true;
return (0);
}
void
drm_gem_free_mmap_offset(struct drm_gem_object *obj)
{
struct drm_hash_item *list;
struct drm_gem_mm *mm;
if (!obj->on_map)
return;
mm = obj->dev->mm_private;
list = &obj->map_list;
drm_ht_remove_item(&mm->offset_hash, list);
free_unr(mm->idxunr, list->key);
obj->on_map = false;
}
int
drm_gem_mmap_single(struct drm_device *dev, vm_ooffset_t *offset, vm_size_t size,
struct vm_object **obj_res, int nprot)
@ -475,7 +464,7 @@ drm_gem_mmap_single(struct drm_device *dev, vm_ooffset_t *offset, vm_size_t size
gem_obj = drm_gem_object_from_offset(dev, *offset);
if (gem_obj == NULL) {
DRM_UNLOCK(dev);
return (ENODEV);
return (-ENODEV);
}
drm_gem_object_reference(gem_obj);
DRM_UNLOCK(dev);
@ -484,7 +473,7 @@ drm_gem_mmap_single(struct drm_device *dev, vm_ooffset_t *offset, vm_size_t size
DRM_GEM_MAPPING_MAPOFF(*offset), curthread->td_ucred);
if (vm_obj == NULL) {
drm_gem_object_unreference_unlocked(gem_obj);
return (EINVAL);
return (-EINVAL);
}
*offset = DRM_GEM_MAPPING_MAPOFF(*offset);
*obj_res = vm_obj;

View File

@ -151,7 +151,7 @@ drm_gem_name_create(struct drm_gem_names *names, void *p, uint32_t *name)
struct drm_gem_name *np;
if (*name != 0) {
return (EALREADY);
return (-EALREADY);
}
np = malloc(sizeof(struct drm_gem_name), M_GEM_NAMES, M_WAITOK);
@ -160,7 +160,7 @@ drm_gem_name_create(struct drm_gem_names *names, void *p, uint32_t *name)
if (np->name == -1) {
mtx_unlock(&names->lock);
free(np, M_GEM_NAMES);
return (ENOMEM);
return (-ENOMEM);
}
*name = np->name;
np->ptr = p;

View File

@ -76,7 +76,11 @@ int drm_global_item_ref(struct drm_global_reference *ref)
sx_xlock(&item->mutex);
if (item->refcount == 0) {
item->object = malloc(ref->size, M_DRM_GLOBAL,
M_WAITOK | M_ZERO);
M_NOWAIT | M_ZERO);
if (unlikely(item->object == NULL)) {
ret = -ENOMEM;
goto out_err;
}
ref->object = item->object;
ret = ref->init(ref);
@ -94,6 +98,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
item->object = NULL;
return ret;
}
EXPORT_SYMBOL(drm_global_item_ref);
void drm_global_item_unref(struct drm_global_reference *ref)
{
@ -109,3 +114,4 @@ void drm_global_item_unref(struct drm_global_reference *ref)
}
sx_xunlock(&item->mutex);
}
EXPORT_SYMBOL(drm_global_item_unref);

View File

@ -54,6 +54,7 @@ int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
}
return 0;
}
EXPORT_SYMBOL(drm_ht_create);
void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
{
@ -69,8 +70,8 @@ void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
}
static struct drm_hash_item *
drm_ht_find_key(struct drm_open_hash *ht, unsigned long key)
static struct drm_hash_item *drm_ht_find_key(struct drm_open_hash *ht,
unsigned long key)
{
struct drm_hash_item *entry;
struct drm_hash_item_list *h_list;
@ -112,6 +113,7 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
}
return 0;
}
EXPORT_SYMBOL(drm_ht_insert_item);
/*
* Just insert an item and return any "bits" bit key that hasn't been
@ -140,6 +142,7 @@ int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *it
}
return 0;
}
EXPORT_SYMBOL(drm_ht_just_insert_please);
int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
struct drm_hash_item **item)
@ -153,6 +156,7 @@ int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
*item = entry;
return 0;
}
EXPORT_SYMBOL(drm_ht_find_item);
int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
{
@ -171,6 +175,7 @@ int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
LIST_REMOVE(item, head);
return 0;
}
EXPORT_SYMBOL(drm_ht_remove_item);
void drm_ht_remove(struct drm_open_hash *ht)
{
@ -179,3 +184,4 @@ void drm_ht_remove(struct drm_open_hash *ht)
ht->table = NULL;
}
}
EXPORT_SYMBOL(drm_ht_remove);

View File

@ -1,43 +0,0 @@
/*-
* Copyright 2007 Red Hat, Inc
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/* This header file holds function prototypes and data types that are
* internal to the drm (not exported to user space) but shared across
* drivers and platforms */
#ifndef __DRM_INTERNAL_H__
#define __DRM_INTERNAL_H__
/**
* Drawable information.
*/
struct drm_drawable_info {
unsigned int num_rects;
struct drm_clip_rect *rects;
};
#endif

View File

@ -20,9 +20,6 @@
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Paul Mackerras <paulus@samba.org>
*/
#include <sys/cdefs.h>
@ -35,10 +32,6 @@ __FBSDID("$FreeBSD$");
#include <dev/drm2/drmP.h>
#include <dev/drm2/drm.h>
/** @file drm_ioc32.c
* 32-bit ioctl compatibility routines for the DRM.
*/
#define DRM_IOCTL_VERSION32 DRM_IOWR(0x00, drm_version32_t)
#define DRM_IOCTL_GET_UNIQUE32 DRM_IOWR(0x01, drm_unique32_t)
#define DRM_IOCTL_GET_MAP32 DRM_IOWR(0x04, drm_map32_t)
@ -87,7 +80,8 @@ typedef struct drm_version_32 {
u32 desc; /**< User-space buffer to hold desc */
} drm_version32_t;
static int compat_drm_version(struct drm_device *dev, void *data, struct drm_file *file_priv)
static int compat_drm_version(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_version32_t *v32 = data;
struct drm_version version;
@ -99,7 +93,7 @@ static int compat_drm_version(struct drm_device *dev, void *data, struct drm_fil
version.date = (void *)(unsigned long)v32->date;
version.desc_len = v32->desc_len;
version.desc = (void *)(unsigned long)v32->desc;
err = drm_version(dev, (void *)&version, file_priv);
if (err)
return err;
@ -119,7 +113,8 @@ typedef struct drm_unique32 {
u32 unique; /**< Unique name for driver instantiation */
} drm_unique32_t;
static int compat_drm_getunique(struct drm_device *dev, void *data, struct drm_file *file_priv)
static int compat_drm_getunique(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_unique32_t *uq32 = data;
struct drm_unique u;
@ -137,7 +132,8 @@ static int compat_drm_getunique(struct drm_device *dev, void *data, struct drm_f
return 0;
}
static int compat_drm_setunique(struct drm_device *dev, void *data, struct drm_file *file_priv)
static int compat_drm_setunique(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_unique32_t *uq32 = data;
struct drm_unique u;
@ -157,7 +153,8 @@ typedef struct drm_map32 {
int mtrr; /**< MTRR slot used */
} drm_map32_t;
static int compat_drm_getmap(struct drm_device *dev, void *data, struct drm_file *file_priv)
static int compat_drm_getmap(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_map32_t *m32 = data;
struct drm_map map;
@ -183,13 +180,14 @@ static int compat_drm_getmap(struct drm_device *dev, void *data, struct drm_file
}
static int compat_drm_addmap(struct drm_device *dev, void *data, struct drm_file *file_priv)
static int compat_drm_addmap(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_map32_t *m32 = data;
struct drm_map map;
int err;
void *handle;
map.offset = (unsigned long)m32->offset;
map.size = (unsigned long)m32->size;
map.type = m32->type;
@ -202,7 +200,7 @@ static int compat_drm_addmap(struct drm_device *dev, void *data, struct drm_file
m32->offset = map.offset;
m32->mtrr = map.mtrr;
handle = map.handle;
m32->handle = (unsigned long)handle;
if (m32->handle != (unsigned long)handle)
DRM_DEBUG("compat_drm_addmap truncated handle"
@ -212,7 +210,8 @@ static int compat_drm_addmap(struct drm_device *dev, void *data, struct drm_file
return 0;
}
static int compat_drm_rmmap(struct drm_device *dev, void *data, struct drm_file *file_priv)
static int compat_drm_rmmap(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_map32_t *m32 = data;
struct drm_map map;
@ -231,7 +230,8 @@ typedef struct drm_client32 {
u32 iocs; /**< Ioctl count */
} drm_client32_t;
static int compat_drm_getclient(struct drm_device *dev, void *data, struct drm_file *file_priv)
static int compat_drm_getclient(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_client32_t *c32 = data;
struct drm_client client;
@ -261,7 +261,8 @@ typedef struct drm_stats32 {
} data[15];
} drm_stats32_t;
static int compat_drm_getstats(struct drm_device *dev, void *data, struct drm_file *file_priv)
static int compat_drm_getstats(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_stats32_t *s32 = data;
struct drm_stats stats;
@ -289,7 +290,8 @@ typedef struct drm_buf_desc32 {
u32 agp_start; /**< Start address in the AGP aperture */
} drm_buf_desc32_t;
static int compat_drm_addbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
static int compat_drm_addbufs(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_buf_desc32_t *b32 = data;
struct drm_buf_desc buf;
@ -312,11 +314,12 @@ static int compat_drm_addbufs(struct drm_device *dev, void *data, struct drm_fil
b32->high_mark = buf.high_mark;
b32->flags = buf.flags;
b32->agp_start = buf.agp_start;
return 0;
}
static int compat_drm_markbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
static int compat_drm_markbufs(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_buf_desc32_t *b32 = data;
struct drm_buf_desc buf;
@ -324,7 +327,7 @@ static int compat_drm_markbufs(struct drm_device *dev, void *data, struct drm_fi
buf.size = b32->size;
buf.low_mark = b32->low_mark;
buf.high_mark = b32->high_mark;
return drm_markbufs(dev, (void *)&buf, file_priv);
}
@ -333,7 +336,8 @@ typedef struct drm_buf_info32 {
u32 list;
} drm_buf_info32_t;
static int compat_drm_infobufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
static int compat_drm_infobufs(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_buf_info32_t *req32 = data;
drm_buf_desc32_t *to;
@ -351,7 +355,7 @@ static int compat_drm_infobufs(struct drm_device *dev, void *data, struct drm_fi
nbytes = sizeof(*request) + count * sizeof(struct drm_buf_desc);
request = malloc(nbytes, DRM_MEM_BUFLISTS, M_ZERO | M_NOWAIT);
if (!request)
return -EFAULT;
return -ENOMEM;
list = (struct drm_buf_desc *) (request + 1);
request->count = count;
@ -389,7 +393,8 @@ typedef struct drm_buf_map32 {
u32 list; /**< Buffer information */
} drm_buf_map32_t;
static int compat_drm_mapbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
static int compat_drm_mapbufs(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_buf_map32_t *req32 = data;
drm_buf_pub32_t *list32;
@ -407,7 +412,7 @@ static int compat_drm_mapbufs(struct drm_device *dev, void *data, struct drm_fil
nbytes = sizeof(*request) + count * sizeof(struct drm_buf_pub);
request = malloc(nbytes, DRM_MEM_BUFLISTS, M_ZERO | M_NOWAIT);
if (!request)
return -EFAULT;
return -ENOMEM;
list = (struct drm_buf_pub *) (request + 1);
request->count = count;
@ -437,7 +442,8 @@ typedef struct drm_buf_free32 {
u32 list;
} drm_buf_free32_t;
static int compat_drm_freebufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
static int compat_drm_freebufs(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_buf_free32_t *req32 = data;
struct drm_buf_free request;
@ -453,7 +459,8 @@ typedef struct drm_ctx_priv_map32 {
u32 handle; /**< Handle of map */
} drm_ctx_priv_map32_t;
static int compat_drm_setsareactx(struct drm_device *dev, void *data, struct drm_file *file_priv)
static int compat_drm_setsareactx(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_ctx_priv_map32_t *req32 = data;
struct drm_ctx_priv_map request;
@ -464,7 +471,8 @@ static int compat_drm_setsareactx(struct drm_device *dev, void *data, struct drm
return drm_setsareactx(dev, (void *)&request, file_priv);
}
static int compat_drm_getsareactx(struct drm_device *dev, void *data, struct drm_file *file_priv)
static int compat_drm_getsareactx(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_ctx_priv_map32_t *req32 = data;
struct drm_ctx_priv_map request;
@ -486,7 +494,8 @@ typedef struct drm_ctx_res32 {
u32 contexts;
} drm_ctx_res32_t;
static int compat_drm_resctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
static int compat_drm_resctx(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_ctx_res32_t *res32 = data;
struct drm_ctx_res res;
@ -517,12 +526,18 @@ typedef struct drm_dma32 {
int granted_count; /**< Number of buffers granted */
} drm_dma32_t;
static int compat_drm_dma(struct drm_device *dev, void *data, struct drm_file *file_priv)
static int compat_drm_dma(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_dma32_t *d32 = data;
struct drm_dma d;
int err;
if (!dev->driver->dma_ioctl) {
DRM_DEBUG("DMA ioctl on driver with no dma handler\n");
return -EINVAL;
}
d.context = d32->context;
d.send_count = d32->send_count;
d.send_indices = (int *)(unsigned long)d32->send_indices;
@ -532,7 +547,7 @@ static int compat_drm_dma(struct drm_device *dev, void *data, struct drm_file *f
d.request_indices = (int *)(unsigned long)d32->request_indices;
d.request_sizes = (int *)(unsigned long)d32->request_sizes;
err = drm_dma(dev, (void *)&d, file_priv);
err = dev->driver->dma_ioctl(dev, (void *)&d, file_priv);
if (err)
return err;
@ -542,11 +557,13 @@ static int compat_drm_dma(struct drm_device *dev, void *data, struct drm_file *f
return 0;
}
#if __OS_HAS_AGP
typedef struct drm_agp_mode32 {
u32 mode; /**< AGP mode */
} drm_agp_mode32_t;
static int compat_drm_agp_enable(struct drm_device *dev, void *data, struct drm_file *file_priv)
static int compat_drm_agp_enable(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_agp_mode32_t *m32 = data;
struct drm_agp_mode mode;
@ -570,7 +587,8 @@ typedef struct drm_agp_info32 {
unsigned short id_device;
} drm_agp_info32_t;
static int compat_drm_agp_info(struct drm_device *dev, void *data, struct drm_file *file_priv)
static int compat_drm_agp_info(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_agp_info32_t *i32 = data;
struct drm_agp_info info;
@ -600,7 +618,8 @@ typedef struct drm_agp_buffer32 {
u32 physical; /**< Physical used by i810 */
} drm_agp_buffer32_t;
static int compat_drm_agp_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv)
static int compat_drm_agp_alloc(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_agp_buffer32_t *req32 = data;
struct drm_agp_buffer request;
@ -619,7 +638,8 @@ static int compat_drm_agp_alloc(struct drm_device *dev, void *data, struct drm_f
return 0;
}
static int compat_drm_agp_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
static int compat_drm_agp_free(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_agp_buffer32_t *req32 = data;
struct drm_agp_buffer request;
@ -634,7 +654,8 @@ typedef struct drm_agp_binding32 {
u32 offset; /**< In bytes -- will round to page boundary */
} drm_agp_binding32_t;
static int compat_drm_agp_bind(struct drm_device *dev, void *data, struct drm_file *file_priv)
static int compat_drm_agp_bind(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_agp_binding32_t *req32 = data;
struct drm_agp_binding request;
@ -645,22 +666,25 @@ static int compat_drm_agp_bind(struct drm_device *dev, void *data, struct drm_fi
return drm_agp_bind_ioctl(dev, (void *)&request, file_priv);
}
static int compat_drm_agp_unbind(struct drm_device *dev, void *data, struct drm_file *file_priv)
static int compat_drm_agp_unbind(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_agp_binding32_t *req32 = data;
struct drm_agp_binding request;
request.handle = req32->handle;
return drm_agp_unbind_ioctl(dev, (void *)&request, file_priv);
}
#endif /* __OS_HAS_AGP */
typedef struct drm_scatter_gather32 {
u32 size; /**< In bytes -- will round to page boundary */
u32 handle; /**< Used for mapping / unmapping */
} drm_scatter_gather32_t;
static int compat_drm_sg_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv)
static int compat_drm_sg_alloc(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_scatter_gather32_t *req32 = data;
struct drm_scatter_gather request;
@ -678,7 +702,8 @@ static int compat_drm_sg_alloc(struct drm_device *dev, void *data, struct drm_fi
return 0;
}
static int compat_drm_sg_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
static int compat_drm_sg_free(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_scatter_gather32_t *req32 = data;
struct drm_scatter_gather request;
@ -688,6 +713,7 @@ static int compat_drm_sg_free(struct drm_device *dev, void *data, struct drm_fil
return drm_sg_free(dev, (void *)&request, file_priv);
}
#if defined(CONFIG_X86) || defined(CONFIG_IA64)
typedef struct drm_update_draw32 {
drm_drawable_t handle;
unsigned int type;
@ -695,21 +721,7 @@ typedef struct drm_update_draw32 {
/* 64-bit version has a 32-bit pad here */
u64 data; /**< Pointer */
} __attribute__((packed)) drm_update_draw32_t;
static int compat_drm_update_draw(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_update_draw32_t *update32 = data;
struct drm_update_draw request;
int err;
request.handle = update32->handle;
request.type = update32->type;
request.num = update32->num;
request.data = update32->data;
err = drm_update_draw(dev, (void *)&request, file_priv);
return err;
}
#endif
struct drm_wait_vblank_request32 {
enum drm_vblank_seq_type type;
@ -729,7 +741,8 @@ typedef union drm_wait_vblank32 {
struct drm_wait_vblank_reply32 reply;
} drm_wait_vblank32_t;
static int compat_drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_priv)
static int compat_drm_wait_vblank(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_wait_vblank32_t *req32 = data;
union drm_wait_vblank request;
@ -751,12 +764,12 @@ static int compat_drm_wait_vblank(struct drm_device *dev, void *data, struct drm
return 0;
}
drm_ioctl_desc_t drm_compat_ioctls[256] = {
DRM_IOCTL_DEF(DRM_IOCTL_VERSION32, compat_drm_version, 0),
struct drm_ioctl_desc drm_compat_ioctls[256] = {
DRM_IOCTL_DEF(DRM_IOCTL_VERSION32, compat_drm_version, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE32, compat_drm_getunique, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP32, compat_drm_getmap, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT32, compat_drm_getclient, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS32, compat_drm_getstats, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP32, compat_drm_getmap, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT32, compat_drm_getclient, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS32, compat_drm_getstats, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE32, compat_drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP32, compat_drm_addmap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS32, compat_drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@ -769,19 +782,19 @@ drm_ioctl_desc_t drm_compat_ioctls[256] = {
DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX32, compat_drm_getsareactx, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX32, compat_drm_resctx, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_DMA32, compat_drm_dma, DRM_AUTH),
#if __OS_HAS_AGP
DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE32, compat_drm_agp_enable, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO32, compat_drm_agp_info, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC32, compat_drm_agp_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE32, compat_drm_agp_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND32, compat_drm_agp_bind, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND32, compat_drm_agp_unbind, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
#endif
DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC32, compat_drm_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE32, compat_drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW32, compat_drm_update_draw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
#if defined(CONFIG_X86) || defined(CONFIG_IA64)
DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW32, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
#endif
DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK32, compat_drm_wait_vblank, DRM_UNLOCKED),
};

View File

@ -1,4 +1,14 @@
/*-
/**
* \file drm_ioctl.c
* IOCTL processing for DRM
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
* \author Gareth Hughes <gareth@valinux.com>
*/
/*
* Created: Fri Jan 8 09:01:26 1999 by faith@valinux.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
@ -21,216 +31,237 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Gareth Hughes <gareth@valinux.com>
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/** @file drm_ioctl.c
* Varios minor DRM ioctls not applicable to other files, such as versioning
* information and reporting DRM information to userland.
*/
#include <dev/drm2/drmP.h>
#include <dev/drm2/drm_core.h>
/*
* Beginning in revision 1.1 of the DRM interface, getunique will return
* a unique in the form pci:oooo:bb:dd.f (o=domain, b=bus, d=device, f=function)
* before setunique has been called. The format for the bus-specific part of
* the unique is not defined for any other bus.
/**
* Get the bus id.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument, pointing to a drm_unique structure.
* \return zero on success or a negative number on failure.
*
* Copies the bus id from drm_device::unique into user space.
*/
int drm_getunique(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_unique *u = data;
struct drm_master *master = file_priv->master;
if (u->unique_len >= dev->unique_len) {
if (DRM_COPY_TO_USER(u->unique, dev->unique, dev->unique_len))
return EFAULT;
if (u->unique_len >= master->unique_len) {
if (copy_to_user(u->unique, master->unique, master->unique_len))
return -EFAULT;
}
u->unique_len = dev->unique_len;
u->unique_len = master->unique_len;
return 0;
}
/* Deprecated in DRM version 1.1, and will return EBUSY when setversion has
* requested version 1.1 or greater.
static void
drm_unset_busid(struct drm_device *dev,
struct drm_master *master)
{
free(master->unique, DRM_MEM_DRIVER);
master->unique = NULL;
master->unique_len = 0;
master->unique_size = 0;
}
/**
* Set the bus id.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument, pointing to a drm_unique structure.
* \return zero on success or a negative number on failure.
*
* Copies the bus id from userspace into drm_device::unique, and verifies that
* it matches the device this DRM is attached to (EINVAL otherwise). Deprecated
* in interface version 1.1 and will return EBUSY when setversion has requested
* version 1.1 or greater.
*/
int drm_setunique(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_unique *u = data;
int domain, bus, slot, func, ret;
char *busid;
struct drm_master *master = file_priv->master;
int ret;
if (master->unique_len || master->unique)
return -EBUSY;
/* Check and copy in the submitted Bus ID */
if (!u->unique_len || u->unique_len > 1024)
return EINVAL;
return -EINVAL;
busid = malloc(u->unique_len + 1, DRM_MEM_DRIVER, M_WAITOK);
if (busid == NULL)
return ENOMEM;
if (!dev->driver->bus->set_unique)
return -EINVAL;
if (DRM_COPY_FROM_USER(busid, u->unique, u->unique_len)) {
free(busid, DRM_MEM_DRIVER);
return EFAULT;
}
busid[u->unique_len] = '\0';
/* Return error if the busid submitted doesn't match the device's actual
* busid.
*/
ret = sscanf(busid, "PCI:%d:%d:%d", &bus, &slot, &func);
if (ret != 3) {
free(busid, DRM_MEM_DRIVER);
return EINVAL;
}
domain = bus >> 8;
bus &= 0xff;
if ((domain != dev->pci_domain) ||
(bus != dev->pci_bus) ||
(slot != dev->pci_slot) ||
(func != dev->pci_func)) {
free(busid, DRM_MEM_DRIVER);
return EINVAL;
}
/* Actually set the device's busid now. */
DRM_LOCK(dev);
if (dev->unique_len || dev->unique) {
DRM_UNLOCK(dev);
return EBUSY;
}
dev->unique_len = u->unique_len;
dev->unique = busid;
DRM_UNLOCK(dev);
ret = dev->driver->bus->set_unique(dev, master, u);
if (ret)
goto err;
return 0;
err:
drm_unset_busid(dev, master);
return ret;
}
static int
drm_set_busid(struct drm_device *dev)
static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
{
struct drm_master *master = file_priv->master;
int ret;
DRM_LOCK(dev);
if (dev->unique != NULL) {
DRM_UNLOCK(dev);
return EBUSY;
}
dev->unique_len = 20;
dev->unique = malloc(dev->unique_len + 1, DRM_MEM_DRIVER, M_NOWAIT);
if (dev->unique == NULL) {
DRM_UNLOCK(dev);
return ENOMEM;
}
snprintf(dev->unique, dev->unique_len, "pci:%04x:%02x:%02x.%1x",
dev->pci_domain, dev->pci_bus, dev->pci_slot, dev->pci_func);
DRM_UNLOCK(dev);
if (master->unique != NULL)
drm_unset_busid(dev, master);
ret = dev->driver->bus->set_busid(dev, master);
if (ret)
goto err;
return 0;
err:
drm_unset_busid(dev, master);
return ret;
}
int drm_getmap(struct drm_device *dev, void *data, struct drm_file *file_priv)
/**
* Get a mapping information.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument, pointing to a drm_map structure.
*
* \return zero on success or a negative number on failure.
*
* Searches for the mapping with the specified offset and copies its information
* into userspace
*/
int drm_getmap(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_map *map = data;
drm_local_map_t *mapinlist;
int idx;
int i = 0;
struct drm_map *map = data;
struct drm_map_list *r_list = NULL;
struct list_head *list;
int idx;
int i;
idx = map->offset;
if (idx < 0)
return -EINVAL;
i = 0;
DRM_LOCK(dev);
if (idx < 0) {
DRM_UNLOCK(dev);
return EINVAL;
}
TAILQ_FOREACH(mapinlist, &dev->maplist, link) {
list_for_each(list, &dev->maplist) {
if (i == idx) {
map->offset = mapinlist->offset;
map->size = mapinlist->size;
map->type = mapinlist->type;
map->flags = mapinlist->flags;
map->handle = mapinlist->handle;
map->mtrr = mapinlist->mtrr;
r_list = list_entry(list, struct drm_map_list, head);
break;
}
i++;
}
if (!r_list || !r_list->map) {
DRM_UNLOCK(dev);
return -EINVAL;
}
map->offset = r_list->map->offset;
map->size = r_list->map->size;
map->type = r_list->map->type;
map->flags = r_list->map->flags;
map->handle = (void *)(unsigned long) r_list->user_token;
map->mtrr = r_list->map->mtrr;
DRM_UNLOCK(dev);
if (mapinlist == NULL)
return EINVAL;
return 0;
}
/**
* Get client information.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument, pointing to a drm_client structure.
*
* \return zero on success or a negative number on failure.
*
* Searches for the client with the specified index and copies its information
* into userspace
*/
int drm_getclient(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_client *client = data;
struct drm_file *pt;
int idx;
int i = 0;
int i;
idx = client->idx;
i = 0;
DRM_LOCK(dev);
TAILQ_FOREACH(pt, &dev->files, link) {
if (i == idx) {
client->auth = pt->authenticated;
client->pid = pt->pid;
client->uid = pt->uid;
list_for_each_entry(pt, &dev->filelist, lhead) {
if (i++ >= idx) {
client->auth = pt->authenticated;
client->pid = pt->pid;
client->uid = pt->uid;
client->magic = pt->magic;
client->iocs = pt->ioctl_count;
client->iocs = pt->ioctl_count;
DRM_UNLOCK(dev);
return 0;
}
i++;
}
DRM_UNLOCK(dev);
return EINVAL;
return -EINVAL;
}
int drm_getstats(struct drm_device *dev, void *data, struct drm_file *file_priv)
/**
* Get statistics information.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument, pointing to a drm_stats structure.
*
* \return zero on success or a negative number on failure.
*/
int drm_getstats(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_stats *stats = data;
int i;
int i;
memset(stats, 0, sizeof(struct drm_stats));
DRM_LOCK(dev);
memset(stats, 0, sizeof(*stats));
for (i = 0; i < dev->counters; i++) {
if (dev->types[i] == _DRM_STAT_LOCK)
stats->data[i].value =
(dev->lock.hw_lock ? dev->lock.hw_lock->lock : 0);
else
(file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
else
stats->data[i].value = atomic_read(&dev->counts[i]);
stats->data[i].type = dev->types[i];
}
stats->count = dev->counters;
DRM_UNLOCK(dev);
stats->count = dev->counters;
return 0;
}
/**
* Get device/driver capabilities
*/
int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_get_cap *req = data;
@ -258,67 +289,73 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
req->value = drm_timestamp_monotonic;
break;
default:
return EINVAL;
return -EINVAL;
}
return 0;
}
int drm_setversion(struct drm_device *dev, void *data,
struct drm_file *file_priv)
/**
* Setversion ioctl.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument, pointing to a drm_lock structure.
* \return zero on success or negative number on failure.
*
* Sets the requested interface version
*/
int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_set_version *sv = data;
struct drm_set_version ver;
int if_version;
int if_version, retcode = 0;
/* Save the incoming data, and set the response before continuing
* any further.
*/
ver = *sv;
if (sv->drm_di_major != -1) {
if (sv->drm_di_major != DRM_IF_MAJOR ||
sv->drm_di_minor < 0 || sv->drm_di_minor > DRM_IF_MINOR) {
retcode = -EINVAL;
goto done;
}
if_version = DRM_IF_VERSION(sv->drm_di_major,
sv->drm_di_minor);
dev->if_version = max(if_version, dev->if_version);
if (sv->drm_di_minor >= 1) {
/*
* Version 1.1 includes tying of DRM to specific device
* Version 1.4 has proper PCI domain support
*/
retcode = drm_set_busid(dev, file_priv);
if (retcode)
goto done;
}
}
if (sv->drm_dd_major != -1) {
if (sv->drm_dd_major != dev->driver->major ||
sv->drm_dd_minor < 0 || sv->drm_dd_minor >
dev->driver->minor) {
retcode = -EINVAL;
goto done;
}
if (dev->driver->set_version)
dev->driver->set_version(dev, sv);
}
done:
sv->drm_di_major = DRM_IF_MAJOR;
sv->drm_di_minor = DRM_IF_MINOR;
sv->drm_dd_major = dev->driver->major;
sv->drm_dd_minor = dev->driver->minor;
DRM_DEBUG("ver.drm_di_major %d ver.drm_di_minor %d "
"ver.drm_dd_major %d ver.drm_dd_minor %d\n",
ver.drm_di_major, ver.drm_di_minor, ver.drm_dd_major,
ver.drm_dd_minor);
DRM_DEBUG("sv->drm_di_major %d sv->drm_di_minor %d "
"sv->drm_dd_major %d sv->drm_dd_minor %d\n",
sv->drm_di_major, sv->drm_di_minor, sv->drm_dd_major,
sv->drm_dd_minor);
if (ver.drm_di_major != -1) {
if (ver.drm_di_major != DRM_IF_MAJOR ||
ver.drm_di_minor < 0 || ver.drm_di_minor > DRM_IF_MINOR) {
return EINVAL;
}
if_version = DRM_IF_VERSION(ver.drm_di_major,
ver.drm_dd_minor);
dev->if_version = DRM_MAX(if_version, dev->if_version);
if (ver.drm_di_minor >= 1) {
/*
* Version 1.1 includes tying of DRM to specific device
*/
drm_set_busid(dev);
}
}
if (ver.drm_dd_major != -1) {
if (ver.drm_dd_major != dev->driver->major ||
ver.drm_dd_minor < 0 ||
ver.drm_dd_minor > dev->driver->minor)
{
return EINVAL;
}
}
return 0;
return retcode;
}
int drm_noop(struct drm_device *dev, void *data, struct drm_file *file_priv)
/** No-op ioctl. */
int drm_noop(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
DRM_DEBUG("\n");
return 0;
}
EXPORT_SYMBOL(drm_noop);

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,14 @@
/*-
/**
* \file drm_lock.c
* IOCTLs for locking
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
* \author Gareth Hughes <gareth@valinux.com>
*/
/*
* Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
@ -21,179 +31,346 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Gareth Hughes <gareth@valinux.com>
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/** @file drm_lock.c
* Implementation of the ioctls and other support code for dealing with the
* hardware lock.
*
* The DRM hardware lock is a shared structure between the kernel and userland.
*
* On uncontended access where the new context was the last context, the
* client may take the lock without dropping down into the kernel, using atomic
* compare-and-set.
*
* If the client finds during compare-and-set that it was not the last owner
* of the lock, it calls the DRM lock ioctl, which may sleep waiting for the
* lock, and may have side-effects of kernel-managed context switching.
*
* When the client releases the lock, if the lock is marked as being contended
* by another client, then the DRM unlock ioctl is called so that the
* contending client may be woken up.
*/
#include <dev/drm2/drmP.h>
static int drm_notifier(void *priv);
static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
/**
* Lock ioctl.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument, pointing to a drm_lock structure.
* \return zero on success or negative number on failure.
*
* Add the current task to the lock wait queue, and attempt to take to lock.
*/
int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_lock *lock = data;
struct drm_master *master = file_priv->master;
int ret = 0;
++file_priv->lock_count;
if (lock->context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
DRM_CURRENTPID, lock->context);
return EINVAL;
DRM_CURRENTPID, lock->context);
return -EINVAL;
}
DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
lock->context, DRM_CURRENTPID, dev->lock.hw_lock->lock,
lock->flags);
lock->context, DRM_CURRENTPID,
master->lock.hw_lock->lock, lock->flags);
if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) &&
lock->context < 0)
return EINVAL;
mtx_lock(&master->lock.spinlock);
master->lock.user_waiters++;
mtx_unlock(&master->lock.spinlock);
DRM_LOCK(dev);
for (;;) {
if (drm_lock_take(&dev->lock, lock->context)) {
dev->lock.file_priv = file_priv;
dev->lock.lock_time = jiffies;
#if defined(__linux__)
if (!master->lock.hw_lock) {
/* Device has been unregistered */
send_sig(SIGTERM, current, 0);
ret = -EINTR;
break;
}
#endif
if (drm_lock_take(&master->lock, lock->context)) {
master->lock.file_priv = file_priv;
master->lock.lock_time = jiffies;
atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
break; /* Got lock */
break; /* Got lock */
}
/* Contention */
ret = DRM_LOCK_SLEEP(dev, &dev->lock.lock_queue,
DRM_UNLOCK_ASSERT(dev);
ret = -sx_sleep(&master->lock.lock_queue, &drm_global_mutex,
PCATCH, "drmlk2", 0);
if (ret == -ERESTART)
ret = -ERESTARTSYS;
if (ret != 0)
break;
}
DRM_UNLOCK(dev);
mtx_lock(&master->lock.spinlock);
master->lock.user_waiters--;
mtx_unlock(&master->lock.spinlock);
if (ret == ERESTART)
DRM_DEBUG("restarting syscall\n");
else
DRM_DEBUG("%d %s\n", lock->context,
ret ? "interrupted" : "has lock");
DRM_DEBUG("%d %s\n", lock->context,
ret ? "interrupted" : "has lock");
if (ret) return ret;
if (ret != 0)
return ret;
#if defined(__linux__)
/* don't set the block all signals on the master process for now
* really probably not the correct answer but lets us debug xkb
* xserver for now */
if (!file_priv->is_master) {
sigemptyset(&dev->sigmask);
sigaddset(&dev->sigmask, SIGSTOP);
sigaddset(&dev->sigmask, SIGTSTP);
sigaddset(&dev->sigmask, SIGTTIN);
sigaddset(&dev->sigmask, SIGTTOU);
dev->sigdata.context = lock->context;
dev->sigdata.lock = master->lock.hw_lock;
block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
}
#endif
/* XXX: Add signal blocking here */
if (dev->driver->dma_quiescent != NULL &&
(lock->flags & _DRM_LOCK_QUIESCENT))
dev->driver->dma_quiescent(dev);
if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT))
{
if (dev->driver->dma_quiescent(dev)) {
DRM_DEBUG("%d waiting for DMA quiescent\n",
lock->context);
return -EBUSY;
}
}
return 0;
}
/**
* Unlock ioctl.
*
* \param inode device inode.
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument, pointing to a drm_lock structure.
* \return zero on success or negative number on failure.
*
* Transfer and free the lock.
*/
int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_lock *lock = data;
DRM_DEBUG("%d (pid %d) requests unlock (0x%08x), flags = 0x%08x\n",
lock->context, DRM_CURRENTPID, dev->lock.hw_lock->lock,
lock->flags);
struct drm_master *master = file_priv->master;
if (lock->context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
DRM_CURRENTPID, lock->context);
return EINVAL;
DRM_CURRENTPID, lock->context);
return -EINVAL;
}
atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
DRM_LOCK(dev);
drm_lock_transfer(&dev->lock, DRM_KERNEL_CONTEXT);
if (drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT)) {
DRM_ERROR("\n");
if (drm_lock_free(&master->lock, lock->context)) {
/* FIXME: Should really bail out here. */
}
DRM_UNLOCK(dev);
#if defined(__linux__)
unblock_all_signals();
#endif
return 0;
}
int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context)
/**
* Take the heavyweight lock.
*
* \param lock lock pointer.
* \param context locking context.
* \return one if the lock is held, or zero otherwise.
*
* Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
*/
static
int drm_lock_take(struct drm_lock_data *lock_data,
unsigned int context)
{
unsigned int old, new, prev;
volatile unsigned int *lock = &lock_data->hw_lock->lock;
unsigned int old, new;
mtx_lock(&lock_data->spinlock);
do {
old = *lock;
if (old & _DRM_LOCK_HELD)
new = old | _DRM_LOCK_CONT;
else
new = context | _DRM_LOCK_HELD;
} while (!atomic_cmpset_int(lock, old, new));
else {
new = context | _DRM_LOCK_HELD |
((lock_data->user_waiters + lock_data->kernel_waiters > 1) ?
_DRM_LOCK_CONT : 0);
}
prev = cmpxchg(lock, old, new);
} while (prev != old);
mtx_unlock(&lock_data->spinlock);
if (_DRM_LOCKING_CONTEXT(old) == context) {
if (old & _DRM_LOCK_HELD) {
if (context != DRM_KERNEL_CONTEXT) {
DRM_ERROR("%d holds heavyweight lock\n",
context);
context);
}
return 0;
}
}
if (new == (context | _DRM_LOCK_HELD)) {
if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) {
/* Have lock */
return 1;
}
return 0;
}
/* This takes a lock forcibly and hands it to context. Should ONLY be used
inside *_unlock to give lock to kernel before calling *_dma_schedule. */
int drm_lock_transfer(struct drm_lock_data *lock_data, unsigned int context)
/**
* This takes a lock forcibly and hands it to context. Should ONLY be used
* inside *_unlock to give lock to kernel before calling *_dma_schedule.
*
* \param dev DRM device.
* \param lock lock pointer.
* \param context locking context.
* \return always one.
*
* Resets the lock file pointer.
* Marks the lock as held by the given context, via the \p cmpxchg instruction.
*/
static int drm_lock_transfer(struct drm_lock_data *lock_data,
unsigned int context)
{
unsigned int old, new, prev;
volatile unsigned int *lock = &lock_data->hw_lock->lock;
unsigned int old, new;
lock_data->file_priv = NULL;
do {
old = *lock;
new = context | _DRM_LOCK_HELD;
} while (!atomic_cmpset_int(lock, old, new));
prev = cmpxchg(lock, old, new);
} while (prev != old);
return 1;
}
/**
* Free lock.
*
* \param dev DRM device.
* \param lock lock.
* \param context context.
*
* Resets the lock file pointer.
* Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task
* waiting on the lock queue.
*/
int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context)
{
unsigned int old, new, prev;
volatile unsigned int *lock = &lock_data->hw_lock->lock;
unsigned int old, new;
lock_data->file_priv = NULL;
mtx_lock(&lock_data->spinlock);
if (lock_data->kernel_waiters != 0) {
drm_lock_transfer(lock_data, 0);
lock_data->idle_has_lock = 1;
mtx_unlock(&lock_data->spinlock);
return 1;
}
mtx_unlock(&lock_data->spinlock);
do {
old = *lock;
new = 0;
} while (!atomic_cmpset_int(lock, old, new));
new = _DRM_LOCKING_CONTEXT(old);
prev = cmpxchg(lock, old, new);
} while (prev != old);
if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
DRM_ERROR("%d freed heavyweight lock held by %d\n",
context, _DRM_LOCKING_CONTEXT(old));
context, _DRM_LOCKING_CONTEXT(old));
return 1;
}
DRM_WAKEUP_INT((void *)&lock_data->lock_queue);
wake_up_interruptible(&lock_data->lock_queue);
return 0;
}
/**
* If we get here, it means that the process has called DRM_IOCTL_LOCK
* without calling DRM_IOCTL_UNLOCK.
*
* If the lock is not held, then let the signal proceed as usual. If the lock
* is held, then set the contended flag and keep the signal blocked.
*
* \param priv pointer to a drm_sigdata structure.
* \return one if the signal should be delivered normally, or zero if the
* signal should be blocked.
*/
static int drm_notifier(void *priv)
{
struct drm_sigdata *s = (struct drm_sigdata *) priv;
unsigned int old, new, prev;
/* Allow signal delivery if lock isn't held */
if (!s->lock || !_DRM_LOCK_IS_HELD(s->lock->lock)
|| _DRM_LOCKING_CONTEXT(s->lock->lock) != s->context)
return 1;
/* Otherwise, set flag to force call to
drmUnlock */
do {
old = s->lock->lock;
new = old | _DRM_LOCK_CONT;
prev = cmpxchg(&s->lock->lock, old, new);
} while (prev != old);
return 0;
}
/**
* This function returns immediately and takes the hw lock
* with the kernel context if it is free, otherwise it gets the highest priority when and if
* it is eventually released.
*
* This guarantees that the kernel will _eventually_ have the lock _unless_ it is held
* by a blocked process. (In the latter case an explicit wait for the hardware lock would cause
* a deadlock, which is why the "idlelock" was invented).
*
* This should be sufficient to wait for GPU idle without
* having to worry about starvation.
*/
void drm_idlelock_take(struct drm_lock_data *lock_data)
{
int ret;
mtx_lock(&lock_data->spinlock);
lock_data->kernel_waiters++;
if (!lock_data->idle_has_lock) {
mtx_unlock(&lock_data->spinlock);
ret = drm_lock_take(lock_data, DRM_KERNEL_CONTEXT);
mtx_lock(&lock_data->spinlock);
if (ret == 1)
lock_data->idle_has_lock = 1;
}
mtx_unlock(&lock_data->spinlock);
}
EXPORT_SYMBOL(drm_idlelock_take);
void drm_idlelock_release(struct drm_lock_data *lock_data)
{
unsigned int old, prev;
volatile unsigned int *lock = &lock_data->hw_lock->lock;
mtx_lock(&lock_data->spinlock);
if (--lock_data->kernel_waiters == 0) {
if (lock_data->idle_has_lock) {
do {
old = *lock;
prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT);
} while (prev != old);
wake_up_interruptible(&lock_data->lock_queue);
lock_data->idle_has_lock = 0;
}
}
mtx_unlock(&lock_data->spinlock);
}
EXPORT_SYMBOL(drm_idlelock_release);
int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
{
struct drm_master *master = file_priv->master;
return (file_priv->lock_count && master->lock.hw_lock &&
_DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) &&
master->lock.file_priv == file_priv);
}

View File

@ -1,11 +1,17 @@
/*-
*Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* Copyright (c) 2011 The FreeBSD Foundation
* All rights reserved.
/**
* \file drm_memory.c
* Memory management wrappers for DRM
*
* Portions of this software were developed by Konstantin Belousov
* under sponsorship from the FreeBSD Foundation.
* \author Rickard E. (Rik) Faith <faith@valinux.com>
* \author Gareth Hughes <gareth@valinux.com>
*/
/*
* Created: Thu Feb 4 14:00:34 1999 by faith@valinux.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@ -25,111 +31,104 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Gareth Hughes <gareth@valinux.com>
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/** @file drm_memory.c
* Wrappers for kernel memory allocation routines, and MTRR management support.
*
* This file previously implemented a memory consumption tracking system using
* the "area" argument for various different types of allocations, but that
* has been stripped out for now.
*/
#include <dev/drm2/drmP.h>
MALLOC_DEFINE(DRM_MEM_DMA, "drm_dma", "DRM DMA Data Structures");
MALLOC_DEFINE(DRM_MEM_SAREA, "drm_sarea", "DRM SAREA Data Structures");
MALLOC_DEFINE(DRM_MEM_DRIVER, "drm_driver", "DRM DRIVER Data Structures");
MALLOC_DEFINE(DRM_MEM_MAGIC, "drm_magic", "DRM MAGIC Data Structures");
MALLOC_DEFINE(DRM_MEM_IOCTLS, "drm_ioctls", "DRM IOCTL Data Structures");
MALLOC_DEFINE(DRM_MEM_MAPS, "drm_maps", "DRM MAP Data Structures");
MALLOC_DEFINE(DRM_MEM_BUFS, "drm_bufs", "DRM BUFFER Data Structures");
MALLOC_DEFINE(DRM_MEM_SEGS, "drm_segs", "DRM SEGMENTS Data Structures");
MALLOC_DEFINE(DRM_MEM_PAGES, "drm_pages", "DRM PAGES Data Structures");
MALLOC_DEFINE(DRM_MEM_FILES, "drm_files", "DRM FILE Data Structures");
MALLOC_DEFINE(DRM_MEM_QUEUES, "drm_queues", "DRM QUEUE Data Structures");
MALLOC_DEFINE(DRM_MEM_CMDS, "drm_cmds", "DRM COMMAND Data Structures");
MALLOC_DEFINE(DRM_MEM_MAPPINGS, "drm_mapping", "DRM MAPPING Data Structures");
MALLOC_DEFINE(DRM_MEM_BUFLISTS, "drm_buflists", "DRM BUFLISTS Data Structures");
MALLOC_DEFINE(DRM_MEM_AGPLISTS, "drm_agplists", "DRM AGPLISTS Data Structures");
MALLOC_DEFINE(DRM_MEM_CTXBITMAP, "drm_ctxbitmap",
"DRM CTXBITMAP Data Structures");
MALLOC_DEFINE(DRM_MEM_SGLISTS, "drm_sglists", "DRM SGLISTS Data Structures");
MALLOC_DEFINE(DRM_MEM_DRAWABLE, "drm_drawable", "DRM DRAWABLE Data Structures");
MALLOC_DEFINE(DRM_MEM_MM, "drm_sman", "DRM MEMORY MANAGER Data Structures");
MALLOC_DEFINE(DRM_MEM_HASHTAB, "drm_hashtab", "DRM HASHTABLE Data Structures");
MALLOC_DEFINE(DRM_MEM_KMS, "drm_kms", "DRM KMS Data Structures");
void drm_mem_init(void)
#if __OS_HAS_AGP
static void *agp_remap(unsigned long offset, unsigned long size,
struct drm_device * dev)
{
/*
* FIXME Linux<->FreeBSD: Not implemented. This is never called
* on FreeBSD anyway, because drm_agp_mem->cant_use_aperture is
* set to 0.
*/
return NULL;
}
void drm_mem_uninit(void)
#define vunmap(handle)
/** Wrapper around agp_free_memory() */
void drm_free_agp(DRM_AGP_MEM * handle, int pages)
{
device_t agpdev;
agpdev = agp_find_device();
if (!agpdev || !handle)
return;
agp_free_memory(agpdev, handle);
}
EXPORT_SYMBOL(drm_free_agp);
/** Wrapper around agp_bind_memory() */
int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
{
device_t agpdev;
agpdev = agp_find_device();
if (!agpdev || !handle)
return -EINVAL;
return -agp_bind_memory(agpdev, handle, start * PAGE_SIZE);
}
void *drm_ioremap_wc(struct drm_device *dev, drm_local_map_t *map)
/** Wrapper around agp_unbind_memory() */
int drm_unbind_agp(DRM_AGP_MEM * handle)
{
return pmap_mapdev_attr(map->offset, map->size, VM_MEMATTR_WRITE_COMBINING);
device_t agpdev;
agpdev = agp_find_device();
if (!agpdev || !handle)
return -EINVAL;
return -agp_unbind_memory(agpdev, handle);
}
EXPORT_SYMBOL(drm_unbind_agp);
#else /* __OS_HAS_AGP */
static inline void *agp_remap(unsigned long offset, unsigned long size,
struct drm_device * dev)
{
return NULL;
}
void *drm_ioremap(struct drm_device *dev, drm_local_map_t *map)
#endif /* agp */
void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev)
{
return pmap_mapdev(map->offset, map->size);
if (drm_core_has_AGP(dev) &&
dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
map->handle = agp_remap(map->offset, map->size, dev);
else
map->handle = pmap_mapdev(map->offset, map->size);
}
EXPORT_SYMBOL(drm_core_ioremap);
void drm_ioremapfree(drm_local_map_t *map)
void drm_core_ioremap_wc(struct drm_local_map *map, struct drm_device *dev)
{
pmap_unmapdev((vm_offset_t) map->virtual, map->size);
if (drm_core_has_AGP(dev) &&
dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
map->handle = agp_remap(map->offset, map->size, dev);
else
map->handle = pmap_mapdev_attr(map->offset, map->size,
VM_MEMATTR_WRITE_COMBINING);
}
EXPORT_SYMBOL(drm_core_ioremap_wc);
int
drm_mtrr_add(unsigned long offset, size_t size, int flags)
void drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev)
{
int act;
struct mem_range_desc mrdesc;
if (!map->handle || !map->size)
return;
mrdesc.mr_base = offset;
mrdesc.mr_len = size;
mrdesc.mr_flags = flags;
act = MEMRANGE_SET_UPDATE;
strlcpy(mrdesc.mr_owner, "drm", sizeof(mrdesc.mr_owner));
return mem_range_attr_set(&mrdesc, &act);
}
int
drm_mtrr_del(int __unused handle, unsigned long offset, size_t size, int flags)
{
int act;
struct mem_range_desc mrdesc;
mrdesc.mr_base = offset;
mrdesc.mr_len = size;
mrdesc.mr_flags = flags;
act = MEMRANGE_SET_REMOVE;
strlcpy(mrdesc.mr_owner, "drm", sizeof(mrdesc.mr_owner));
return mem_range_attr_set(&mrdesc, &act);
}
void
drm_clflush_pages(vm_page_t *pages, unsigned long num_pages)
{
pmap_invalidate_cache_pages(pages, num_pages);
}
void
drm_clflush_virt_range(char *addr, unsigned long length)
{
pmap_invalidate_cache_range((vm_offset_t)addr,
(vm_offset_t)addr + length, TRUE);
if (drm_core_has_AGP(dev) &&
dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
vunmap(map->handle);
else
pmap_unmapdev((vm_offset_t)map->handle, map->size);
}
EXPORT_SYMBOL(drm_core_ioremapfree);

View File

@ -53,8 +53,7 @@ static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
{
struct drm_mm_node *child;
child = malloc(sizeof(*child), DRM_MEM_MM, M_ZERO |
(atomic ? M_NOWAIT : M_WAITOK));
child = malloc(sizeof(*child), DRM_MEM_MM, M_NOWAIT | M_ZERO);
if (unlikely(child == NULL)) {
mtx_lock(&mm->unused_lock);
@ -72,6 +71,11 @@ static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
return child;
}
/* drm_mm_pre_get() - pre allocate drm_mm_node structure
* drm_mm: memory manager struct we are pre-allocating for
*
* Returns 0 on success or -ENOMEM if allocation fails.
*/
int drm_mm_pre_get(struct drm_mm *mm)
{
struct drm_mm_node *node;
@ -79,7 +83,7 @@ int drm_mm_pre_get(struct drm_mm *mm)
mtx_lock(&mm->unused_lock);
while (mm->num_unused < MM_UNUSED_TARGET) {
mtx_unlock(&mm->unused_lock);
node = malloc(sizeof(*node), DRM_MEM_MM, M_WAITOK);
node = malloc(sizeof(*node), DRM_MEM_MM, M_NOWAIT | M_ZERO);
mtx_lock(&mm->unused_lock);
if (unlikely(node == NULL)) {
@ -93,6 +97,7 @@ int drm_mm_pre_get(struct drm_mm *mm)
mtx_unlock(&mm->unused_lock);
return 0;
}
EXPORT_SYMBOL(drm_mm_pre_get);
static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
{
@ -110,45 +115,53 @@ static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
struct drm_mm_node *node,
unsigned long size, unsigned alignment)
unsigned long size, unsigned alignment,
unsigned long color)
{
struct drm_mm *mm = hole_node->mm;
unsigned long tmp = 0, wasted = 0;
unsigned long hole_start = drm_mm_hole_node_start(hole_node);
unsigned long hole_end = drm_mm_hole_node_end(hole_node);
unsigned long adj_start = hole_start;
unsigned long adj_end = hole_end;
KASSERT(hole_node->hole_follows && !node->allocated, ("hole_node"));
BUG_ON(!hole_node->hole_follows || node->allocated);
if (alignment)
tmp = hole_start % alignment;
if (mm->color_adjust)
mm->color_adjust(hole_node, color, &adj_start, &adj_end);
if (!tmp) {
if (alignment) {
unsigned tmp = adj_start % alignment;
if (tmp)
adj_start += alignment - tmp;
}
if (adj_start == hole_start) {
hole_node->hole_follows = 0;
list_del_init(&hole_node->hole_stack);
} else
wasted = alignment - tmp;
list_del(&hole_node->hole_stack);
}
node->start = hole_start + wasted;
node->start = adj_start;
node->size = size;
node->mm = mm;
node->color = color;
node->allocated = 1;
INIT_LIST_HEAD(&node->hole_stack);
list_add(&node->node_list, &hole_node->node_list);
KASSERT(node->start + node->size <= hole_end, ("hole pos"));
BUG_ON(node->start + node->size > adj_end);
node->hole_follows = 0;
if (node->start + node->size < hole_end) {
list_add(&node->hole_stack, &mm->hole_stack);
node->hole_follows = 1;
} else {
node->hole_follows = 0;
}
}
struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
unsigned long size,
unsigned alignment,
unsigned long color,
int atomic)
{
struct drm_mm_node *node;
@ -157,72 +170,96 @@ struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
if (unlikely(node == NULL))
return NULL;
drm_mm_insert_helper(hole_node, node, size, alignment);
drm_mm_insert_helper(hole_node, node, size, alignment, color);
return node;
}
EXPORT_SYMBOL(drm_mm_get_block_generic);
/**
* Search for free space and insert a preallocated memory node. Returns
* -ENOSPC if no suitable free area is available. The preallocated memory node
* must be cleared.
*/
int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
unsigned long size, unsigned alignment,
unsigned long color)
{
struct drm_mm_node *hole_node;
hole_node = drm_mm_search_free_generic(mm, size, alignment,
color, 0);
if (!hole_node)
return -ENOSPC;
drm_mm_insert_helper(hole_node, node, size, alignment, color);
return 0;
}
EXPORT_SYMBOL(drm_mm_insert_node_generic);
int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
unsigned long size, unsigned alignment)
{
struct drm_mm_node *hole_node;
hole_node = drm_mm_search_free(mm, size, alignment, 0);
if (!hole_node)
return -ENOSPC;
drm_mm_insert_helper(hole_node, node, size, alignment);
return 0;
return drm_mm_insert_node_generic(mm, node, size, alignment, 0);
}
EXPORT_SYMBOL(drm_mm_insert_node);
static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
struct drm_mm_node *node,
unsigned long size, unsigned alignment,
unsigned long color,
unsigned long start, unsigned long end)
{
struct drm_mm *mm = hole_node->mm;
unsigned long tmp = 0, wasted = 0;
unsigned long hole_start = drm_mm_hole_node_start(hole_node);
unsigned long hole_end = drm_mm_hole_node_end(hole_node);
unsigned long adj_start = hole_start;
unsigned long adj_end = hole_end;
KASSERT(hole_node->hole_follows && !node->allocated, ("hole_node"));
BUG_ON(!hole_node->hole_follows || node->allocated);
if (hole_start < start)
wasted += start - hole_start;
if (alignment)
tmp = (hole_start + wasted) % alignment;
if (adj_start < start)
adj_start = start;
if (adj_end > end)
adj_end = end;
if (tmp)
wasted += alignment - tmp;
if (mm->color_adjust)
mm->color_adjust(hole_node, color, &adj_start, &adj_end);
if (!wasted) {
hole_node->hole_follows = 0;
list_del_init(&hole_node->hole_stack);
if (alignment) {
unsigned tmp = adj_start % alignment;
if (tmp)
adj_start += alignment - tmp;
}
node->start = hole_start + wasted;
if (adj_start == hole_start) {
hole_node->hole_follows = 0;
list_del(&hole_node->hole_stack);
}
node->start = adj_start;
node->size = size;
node->mm = mm;
node->color = color;
node->allocated = 1;
INIT_LIST_HEAD(&node->hole_stack);
list_add(&node->node_list, &hole_node->node_list);
KASSERT(node->start + node->size <= hole_end, ("hole_end"));
KASSERT(node->start + node->size <= end, ("end"));
BUG_ON(node->start + node->size > adj_end);
BUG_ON(node->start + node->size > end);
node->hole_follows = 0;
if (node->start + node->size < hole_end) {
list_add(&node->hole_stack, &mm->hole_stack);
node->hole_follows = 1;
} else {
node->hole_follows = 0;
}
}
struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
unsigned long size,
unsigned alignment,
unsigned long color,
unsigned long start,
unsigned long end,
int atomic)
@ -233,47 +270,66 @@ struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node
if (unlikely(node == NULL))
return NULL;
drm_mm_insert_helper_range(hole_node, node, size, alignment,
drm_mm_insert_helper_range(hole_node, node, size, alignment, color,
start, end);
return node;
}
EXPORT_SYMBOL(drm_mm_get_block_range_generic);
/**
* Search for free space and insert a preallocated memory node. Returns
* -ENOSPC if no suitable free area is available. This is for range
* restricted allocations. The preallocated memory node must be cleared.
*/
int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
unsigned long size, unsigned alignment, unsigned long color,
unsigned long start, unsigned long end)
{
struct drm_mm_node *hole_node;
hole_node = drm_mm_search_free_in_range_generic(mm,
size, alignment, color,
start, end, 0);
if (!hole_node)
return -ENOSPC;
drm_mm_insert_helper_range(hole_node, node,
size, alignment, color,
start, end);
return 0;
}
EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
unsigned long size, unsigned alignment,
unsigned long start, unsigned long end)
{
struct drm_mm_node *hole_node;
hole_node = drm_mm_search_free_in_range(mm, size, alignment,
start, end, 0);
if (!hole_node)
return -ENOSPC;
drm_mm_insert_helper_range(hole_node, node, size, alignment,
start, end);
return 0;
return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, 0, start, end);
}
EXPORT_SYMBOL(drm_mm_insert_node_in_range);
/**
* Remove a memory node from the allocator.
*/
void drm_mm_remove_node(struct drm_mm_node *node)
{
struct drm_mm *mm = node->mm;
struct drm_mm_node *prev_node;
KASSERT(!node->scanned_block && !node->scanned_prev_free
&& !node->scanned_next_free, ("node"));
BUG_ON(node->scanned_block || node->scanned_prev_free
|| node->scanned_next_free);
prev_node =
list_entry(node->node_list.prev, struct drm_mm_node, node_list);
if (node->hole_follows) {
KASSERT(drm_mm_hole_node_start(node)
!= drm_mm_hole_node_end(node), ("hole_follows"));
BUG_ON(drm_mm_hole_node_start(node)
== drm_mm_hole_node_end(node));
list_del(&node->hole_stack);
} else
KASSERT(drm_mm_hole_node_start(node)
== drm_mm_hole_node_end(node), ("!hole_follows"));
BUG_ON(drm_mm_hole_node_start(node)
!= drm_mm_hole_node_end(node));
if (!prev_node->hole_follows) {
prev_node->hole_follows = 1;
@ -284,14 +340,16 @@ void drm_mm_remove_node(struct drm_mm_node *node)
list_del(&node->node_list);
node->allocated = 0;
}
EXPORT_SYMBOL(drm_mm_remove_node);
/*
* Put a block. Merge with the previous and / or next block if they are free.
* Otherwise add to the free stack.
* Remove a memory node from the allocator and free the allocated struct
* drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
* drm_mm_get_block functions.
*/
void drm_mm_put_block(struct drm_mm_node *node)
{
struct drm_mm *mm = node->mm;
drm_mm_remove_node(node);
@ -304,82 +362,49 @@ void drm_mm_put_block(struct drm_mm_node *node)
free(node, DRM_MEM_MM);
mtx_unlock(&mm->unused_lock);
}
EXPORT_SYMBOL(drm_mm_put_block);
static int check_free_hole(unsigned long start, unsigned long end,
unsigned long size, unsigned alignment)
{
unsigned wasted = 0;
if (end - start < size)
return 0;
if (alignment) {
unsigned tmp = start % alignment;
if (tmp)
wasted = alignment - tmp;
start += alignment - tmp;
}
if (end >= start + size + wasted) {
return 1;
}
return 0;
return end >= start + size;
}
struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
unsigned long size,
unsigned alignment, int best_match)
struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long color,
bool best_match)
{
struct drm_mm_node *entry;
struct drm_mm_node *best;
unsigned long best_size;
BUG_ON(mm->scanned_blocks);
best = NULL;
best_size = ~0UL;
list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
KASSERT(entry->hole_follows, ("hole_follows"));
if (!check_free_hole(drm_mm_hole_node_start(entry),
drm_mm_hole_node_end(entry),
size, alignment))
continue;
unsigned long adj_start = drm_mm_hole_node_start(entry);
unsigned long adj_end = drm_mm_hole_node_end(entry);
if (!best_match)
return entry;
if (entry->size < best_size) {
best = entry;
best_size = entry->size;
if (mm->color_adjust) {
mm->color_adjust(entry, color, &adj_start, &adj_end);
if (adj_end <= adj_start)
continue;
}
}
return best;
}
struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long start,
unsigned long end,
int best_match)
{
struct drm_mm_node *entry;
struct drm_mm_node *best;
unsigned long best_size;
KASSERT(!mm->scanned_blocks, ("scanned"));
best = NULL;
best_size = ~0UL;
list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
unsigned long adj_start = drm_mm_hole_node_start(entry) < start ?
start : drm_mm_hole_node_start(entry);
unsigned long adj_end = drm_mm_hole_node_end(entry) > end ?
end : drm_mm_hole_node_end(entry);
KASSERT(entry->hole_follows, ("hole_follows"));
BUG_ON(!entry->hole_follows);
if (!check_free_hole(adj_start, adj_end, size, alignment))
continue;
@ -394,7 +419,58 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
return best;
}
EXPORT_SYMBOL(drm_mm_search_free_generic);
struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long color,
unsigned long start,
unsigned long end,
bool best_match)
{
struct drm_mm_node *entry;
struct drm_mm_node *best;
unsigned long best_size;
BUG_ON(mm->scanned_blocks);
best = NULL;
best_size = ~0UL;
list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
unsigned long adj_start = drm_mm_hole_node_start(entry) < start ?
start : drm_mm_hole_node_start(entry);
unsigned long adj_end = drm_mm_hole_node_end(entry) > end ?
end : drm_mm_hole_node_end(entry);
BUG_ON(!entry->hole_follows);
if (mm->color_adjust) {
mm->color_adjust(entry, color, &adj_start, &adj_end);
if (adj_end <= adj_start)
continue;
}
if (!check_free_hole(adj_start, adj_end, size, alignment))
continue;
if (!best_match)
return entry;
if (entry->size < best_size) {
best = entry;
best_size = entry->size;
}
}
return best;
}
EXPORT_SYMBOL(drm_mm_search_free_in_range_generic);
/**
* Moves an allocation. To be used with embedded struct drm_mm_node.
*/
void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
{
list_replace(&old->node_list, &new->node_list);
@ -403,50 +479,83 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
new->mm = old->mm;
new->start = old->start;
new->size = old->size;
new->color = old->color;
old->allocated = 0;
new->allocated = 1;
}
EXPORT_SYMBOL(drm_mm_replace_node);
void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
unsigned alignment)
/**
* Initializa lru scanning.
*
* This simply sets up the scanning routines with the parameters for the desired
* hole.
*
* Warning: As long as the scan list is non-empty, no other operations than
* adding/removing nodes to/from the scan list are allowed.
*/
void drm_mm_init_scan(struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long color)
{
mm->scan_color = color;
mm->scan_alignment = alignment;
mm->scan_size = size;
mm->scanned_blocks = 0;
mm->scan_hit_start = 0;
mm->scan_hit_size = 0;
mm->scan_hit_end = 0;
mm->scan_check_range = 0;
mm->prev_scanned_node = NULL;
}
EXPORT_SYMBOL(drm_mm_init_scan);
void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
/**
* Initializa lru scanning.
*
* This simply sets up the scanning routines with the parameters for the desired
* hole. This version is for range-restricted scans.
*
* Warning: As long as the scan list is non-empty, no other operations than
* adding/removing nodes to/from the scan list are allowed.
*/
void drm_mm_init_scan_with_range(struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long color,
unsigned long start,
unsigned long end)
{
mm->scan_color = color;
mm->scan_alignment = alignment;
mm->scan_size = size;
mm->scanned_blocks = 0;
mm->scan_hit_start = 0;
mm->scan_hit_size = 0;
mm->scan_hit_end = 0;
mm->scan_start = start;
mm->scan_end = end;
mm->scan_check_range = 1;
mm->prev_scanned_node = NULL;
}
EXPORT_SYMBOL(drm_mm_init_scan_with_range);
/**
* Add a node to the scan list that might be freed to make space for the desired
* hole.
*
* Returns non-zero, if a hole has been found, zero otherwise.
*/
int drm_mm_scan_add_block(struct drm_mm_node *node)
{
struct drm_mm *mm = node->mm;
struct drm_mm_node *prev_node;
unsigned long hole_start, hole_end;
unsigned long adj_start;
unsigned long adj_end;
unsigned long adj_start, adj_end;
mm->scanned_blocks++;
KASSERT(!node->scanned_block, ("node->scanned_block"));
BUG_ON(node->scanned_block);
node->scanned_block = 1;
prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
@ -459,29 +568,45 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
node->node_list.next = &mm->prev_scanned_node->node_list;
mm->prev_scanned_node = node;
hole_start = drm_mm_hole_node_start(prev_node);
hole_end = drm_mm_hole_node_end(prev_node);
adj_start = hole_start = drm_mm_hole_node_start(prev_node);
adj_end = hole_end = drm_mm_hole_node_end(prev_node);
if (mm->scan_check_range) {
adj_start = hole_start < mm->scan_start ?
mm->scan_start : hole_start;
adj_end = hole_end > mm->scan_end ?
mm->scan_end : hole_end;
} else {
adj_start = hole_start;
adj_end = hole_end;
if (adj_start < mm->scan_start)
adj_start = mm->scan_start;
if (adj_end > mm->scan_end)
adj_end = mm->scan_end;
}
if (check_free_hole(adj_start , adj_end,
if (mm->color_adjust)
mm->color_adjust(prev_node, mm->scan_color,
&adj_start, &adj_end);
if (check_free_hole(adj_start, adj_end,
mm->scan_size, mm->scan_alignment)) {
mm->scan_hit_start = hole_start;
mm->scan_hit_size = hole_end;
mm->scan_hit_end = hole_end;
return 1;
}
return 0;
}
EXPORT_SYMBOL(drm_mm_scan_add_block);
/**
* Remove a node from the scan list.
*
* Nodes _must_ be removed in the exact same order from the scan list as they
* have been added, otherwise the internal state of the memory manager will be
* corrupted.
*
* When the scan list is empty, the selected memory nodes can be freed. An
* immediately following drm_mm_search_free with best_match = 0 will then return
* the just freed block (because its at the top of the free_stack list).
*
* Returns one if this block should be evicted, zero otherwise. Will always
* return zero when no hole has been found.
*/
int drm_mm_scan_remove_block(struct drm_mm_node *node)
{
struct drm_mm *mm = node->mm;
@ -489,27 +614,19 @@ int drm_mm_scan_remove_block(struct drm_mm_node *node)
mm->scanned_blocks--;
KASSERT(node->scanned_block, ("scanned_block"));
BUG_ON(!node->scanned_block);
node->scanned_block = 0;
prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
node_list);
prev_node->hole_follows = node->scanned_preceeds_hole;
INIT_LIST_HEAD(&node->node_list);
list_add(&node->node_list, &prev_node->node_list);
/* Only need to check for containement because start&size for the
* complete resulting free block (not just the desired part) is
* stored. */
if (node->start >= mm->scan_hit_start &&
node->start + node->size
<= mm->scan_hit_start + mm->scan_hit_size) {
return 1;
}
return 0;
return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
node->start < mm->scan_hit_end);
}
EXPORT_SYMBOL(drm_mm_scan_remove_block);
int drm_mm_clean(struct drm_mm * mm)
{
@ -517,6 +634,7 @@ int drm_mm_clean(struct drm_mm * mm)
return (head->next->next == head);
}
EXPORT_SYMBOL(drm_mm_clean);
int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
{
@ -526,6 +644,7 @@ int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
mm->scanned_blocks = 0;
mtx_init(&mm->unused_lock, "drm_unused", NULL, MTX_DEF);
/* Clever trick to avoid a special case in the free hole tracking. */
INIT_LIST_HEAD(&mm->head_node.node_list);
INIT_LIST_HEAD(&mm->head_node.hole_stack);
mm->head_node.hole_follows = 1;
@ -537,8 +656,11 @@ int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
mm->head_node.size = start - mm->head_node.start;
list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
mm->color_adjust = NULL;
return 0;
}
EXPORT_SYMBOL(drm_mm_init);
void drm_mm_takedown(struct drm_mm * mm)
{
@ -557,10 +679,9 @@ void drm_mm_takedown(struct drm_mm * mm)
}
mtx_unlock(&mm->unused_lock);
mtx_destroy(&mm->unused_lock);
KASSERT(mm->num_unused == 0, ("num_unused != 0"));
BUG_ON(mm->num_unused != 0);
}
EXPORT_SYMBOL(drm_mm_takedown);
void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
{
@ -572,13 +693,13 @@ void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
hole_end = drm_mm_hole_node_end(&mm->head_node);
hole_size = hole_end - hole_start;
if (hole_size)
printf("%s 0x%08lx-0x%08lx: %8lu: free\n",
printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
prefix, hole_start, hole_end,
hole_size);
total_free += hole_size;
drm_mm_for_each_node(entry, mm) {
printf("%s 0x%08lx-0x%08lx: %8lu: used\n",
printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
prefix, entry->start, entry->start + entry->size,
entry->size);
total_used += entry->size;
@ -587,7 +708,7 @@ void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
hole_start = drm_mm_hole_node_start(entry);
hole_end = drm_mm_hole_node_end(entry);
hole_size = hole_end - hole_start;
printf("%s 0x%08lx-0x%08lx: %8lu: free\n",
printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
prefix, hole_start, hole_end,
hole_size);
total_free += hole_size;
@ -595,6 +716,7 @@ void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
}
total = total_free + total_used;
printf("%s total: %lu, used %lu free %lu\n", prefix, total,
printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
total_used, total_free);
}
EXPORT_SYMBOL(drm_mm_debug_table);

View File

@ -25,18 +25,20 @@
*
*
**************************************************************************/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*
* Authors:
* Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#ifndef _DRM_MM_H_
#define _DRM_MM_H_
/*
* Generic range manager structs
*/
#include <dev/drm2/drm_linux_list.h>
struct drm_mm_node {
@ -48,27 +50,34 @@ struct drm_mm_node {
unsigned scanned_next_free : 1;
unsigned scanned_preceeds_hole : 1;
unsigned allocated : 1;
unsigned long color;
unsigned long start;
unsigned long size;
struct drm_mm *mm;
void *private;
};
struct drm_mm {
/* List of all memory nodes that immediately precede a free hole. */
struct list_head hole_stack;
/* head_node.node_list is the list of all memory nodes, ordered
* according to the (increasing) start address of the memory node. */
struct drm_mm_node head_node;
struct list_head unused_nodes;
int num_unused;
struct mtx unused_lock;
unsigned int scan_check_range : 1;
unsigned scan_alignment;
unsigned long scan_color;
unsigned long scan_size;
unsigned long scan_hit_start;
unsigned scan_hit_size;
unsigned long scan_hit_end;
unsigned scanned_blocks;
unsigned long scan_start;
unsigned long scan_end;
struct drm_mm_node *prev_scanned_node;
void (*color_adjust)(struct drm_mm_node *node, unsigned long color,
unsigned long *start, unsigned long *end);
};
static inline bool drm_mm_node_allocated(struct drm_mm_node *node)
@ -78,7 +87,7 @@ static inline bool drm_mm_node_allocated(struct drm_mm_node *node)
static inline bool drm_mm_initialized(struct drm_mm *mm)
{
return (mm->hole_stack.next != NULL);
return mm->hole_stack.next;
}
#define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \
&(mm)->head_node.node_list, \
@ -89,19 +98,20 @@ static inline bool drm_mm_initialized(struct drm_mm *mm)
struct drm_mm_node, node_list) : NULL; \
entry != NULL; entry = next, \
next = entry ? list_entry(entry->node_list.next, \
struct drm_mm_node, node_list) : NULL)
struct drm_mm_node, node_list) : NULL) \
/*
* Basic range manager support (drm_mm.c)
*/
extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
unsigned long size,
unsigned alignment,
unsigned long color,
int atomic);
extern struct drm_mm_node *drm_mm_get_block_range_generic(
struct drm_mm_node *node,
unsigned long size,
unsigned alignment,
unsigned long color,
unsigned long start,
unsigned long end,
int atomic);
@ -109,13 +119,13 @@ static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
unsigned long size,
unsigned alignment)
{
return drm_mm_get_block_generic(parent, size, alignment, 0);
return drm_mm_get_block_generic(parent, size, alignment, 0, 0);
}
static inline struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent,
unsigned long size,
unsigned alignment)
{
return drm_mm_get_block_generic(parent, size, alignment, 1);
return drm_mm_get_block_generic(parent, size, alignment, 0, 1);
}
static inline struct drm_mm_node *drm_mm_get_block_range(
struct drm_mm_node *parent,
@ -124,8 +134,19 @@ static inline struct drm_mm_node *drm_mm_get_block_range(
unsigned long start,
unsigned long end)
{
return drm_mm_get_block_range_generic(parent, size, alignment,
start, end, 0);
return drm_mm_get_block_range_generic(parent, size, alignment, 0,
start, end, 0);
}
static inline struct drm_mm_node *drm_mm_get_color_block_range(
struct drm_mm_node *parent,
unsigned long size,
unsigned alignment,
unsigned long color,
unsigned long start,
unsigned long end)
{
return drm_mm_get_block_range_generic(parent, size, alignment, color,
start, end, 0);
}
static inline struct drm_mm_node *drm_mm_get_block_atomic_range(
struct drm_mm_node *parent,
@ -134,38 +155,91 @@ static inline struct drm_mm_node *drm_mm_get_block_atomic_range(
unsigned long start,
unsigned long end)
{
return drm_mm_get_block_range_generic(parent, size, alignment,
return drm_mm_get_block_range_generic(parent, size, alignment, 0,
start, end, 1);
}
extern int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
unsigned long size, unsigned alignment);
extern int drm_mm_insert_node(struct drm_mm *mm,
struct drm_mm_node *node,
unsigned long size,
unsigned alignment);
extern int drm_mm_insert_node_in_range(struct drm_mm *mm,
struct drm_mm_node *node,
unsigned long size, unsigned alignment,
unsigned long start, unsigned long end);
unsigned long size,
unsigned alignment,
unsigned long start,
unsigned long end);
extern int drm_mm_insert_node_generic(struct drm_mm *mm,
struct drm_mm_node *node,
unsigned long size,
unsigned alignment,
unsigned long color);
extern int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
struct drm_mm_node *node,
unsigned long size,
unsigned alignment,
unsigned long color,
unsigned long start,
unsigned long end);
extern void drm_mm_put_block(struct drm_mm_node *cur);
extern void drm_mm_remove_node(struct drm_mm_node *node);
extern void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
int best_match);
extern struct drm_mm_node *drm_mm_search_free_in_range(
extern struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long color,
bool best_match);
extern struct drm_mm_node *drm_mm_search_free_in_range_generic(
const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long color,
unsigned long start,
unsigned long end,
bool best_match);
static inline struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
bool best_match)
{
return drm_mm_search_free_generic(mm,size, alignment, 0, best_match);
}
static inline struct drm_mm_node *drm_mm_search_free_in_range(
const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long start,
unsigned long end,
int best_match);
extern int drm_mm_init(struct drm_mm *mm, unsigned long start,
bool best_match)
{
return drm_mm_search_free_in_range_generic(mm, size, alignment, 0,
start, end, best_match);
}
static inline struct drm_mm_node *drm_mm_search_free_color(const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long color,
bool best_match)
{
return drm_mm_search_free_generic(mm,size, alignment, color, best_match);
}
static inline struct drm_mm_node *drm_mm_search_free_in_range_color(
const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long color,
unsigned long start,
unsigned long end,
bool best_match)
{
return drm_mm_search_free_in_range_generic(mm, size, alignment, color,
start, end, best_match);
}
extern int drm_mm_init(struct drm_mm *mm,
unsigned long start,
unsigned long size);
extern void drm_mm_takedown(struct drm_mm *mm);
extern int drm_mm_clean(struct drm_mm *mm);
extern unsigned long drm_mm_tail_space(struct drm_mm *mm);
extern int drm_mm_remove_space_from_tail(struct drm_mm *mm,
unsigned long size);
extern int drm_mm_add_space_to_tail(struct drm_mm *mm,
unsigned long size, int atomic);
extern int drm_mm_pre_get(struct drm_mm *mm);
static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
@ -173,15 +247,19 @@ static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
return block->mm;
}
void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
unsigned alignment);
void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
void drm_mm_init_scan(struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long color);
void drm_mm_init_scan_with_range(struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long color,
unsigned long start,
unsigned long end);
int drm_mm_scan_add_block(struct drm_mm_node *node);
int drm_mm_scan_remove_block(struct drm_mm_node *node);
void drm_mm_debug_table(struct drm_mm *mm, const char *prefix);
extern void drm_mm_debug_table(struct drm_mm *mm, const char *prefix);
#endif

View File

@ -29,6 +29,8 @@
#ifndef _DRM_MODE_H
#define _DRM_MODE_H
#include <dev/drm2/drm_os_freebsd.h>
#define DRM_DISPLAY_INFO_LEN 32
#define DRM_CONNECTOR_NAME_LEN 32
#define DRM_DISPLAY_MODE_LEN 32
@ -84,41 +86,41 @@
#define DRM_MODE_DIRTY_ANNOTATE 2
struct drm_mode_modeinfo {
uint32_t clock;
uint16_t hdisplay, hsync_start, hsync_end, htotal, hskew;
uint16_t vdisplay, vsync_start, vsync_end, vtotal, vscan;
__u32 clock;
__u16 hdisplay, hsync_start, hsync_end, htotal, hskew;
__u16 vdisplay, vsync_start, vsync_end, vtotal, vscan;
uint32_t vrefresh;
__u32 vrefresh;
uint32_t flags;
uint32_t type;
__u32 flags;
__u32 type;
char name[DRM_DISPLAY_MODE_LEN];
};
struct drm_mode_card_res {
uint64_t fb_id_ptr;
uint64_t crtc_id_ptr;
uint64_t connector_id_ptr;
uint64_t encoder_id_ptr;
uint32_t count_fbs;
uint32_t count_crtcs;
uint32_t count_connectors;
uint32_t count_encoders;
uint32_t min_width, max_width;
uint32_t min_height, max_height;
__u64 fb_id_ptr;
__u64 crtc_id_ptr;
__u64 connector_id_ptr;
__u64 encoder_id_ptr;
__u32 count_fbs;
__u32 count_crtcs;
__u32 count_connectors;
__u32 count_encoders;
__u32 min_width, max_width;
__u32 min_height, max_height;
};
struct drm_mode_crtc {
uint64_t set_connectors_ptr;
uint32_t count_connectors;
__u64 set_connectors_ptr;
__u32 count_connectors;
uint32_t crtc_id; /**< Id */
uint32_t fb_id; /**< Id of framebuffer */
__u32 crtc_id; /**< Id */
__u32 fb_id; /**< Id of framebuffer */
uint32_t x, y; /**< Position on the frameuffer */
__u32 x, y; /**< Position on the frameuffer */
uint32_t gamma_size;
uint32_t mode_valid;
__u32 gamma_size;
__u32 mode_valid;
struct drm_mode_modeinfo mode;
};
@ -127,36 +129,36 @@ struct drm_mode_crtc {
/* Planes blend with or override other bits on the CRTC */
struct drm_mode_set_plane {
uint32_t plane_id;
uint32_t crtc_id;
uint32_t fb_id; /* fb object contains surface format type */
uint32_t flags; /* see above flags */
__u32 plane_id;
__u32 crtc_id;
__u32 fb_id; /* fb object contains surface format type */
__u32 flags; /* see above flags */
/* Signed dest location allows it to be partially off screen */
int32_t crtc_x, crtc_y;
uint32_t crtc_w, crtc_h;
__s32 crtc_x, crtc_y;
__u32 crtc_w, crtc_h;
/* Source values are 16.16 fixed point */
uint32_t src_x, src_y;
uint32_t src_h, src_w;
__u32 src_x, src_y;
__u32 src_h, src_w;
};
struct drm_mode_get_plane {
uint32_t plane_id;
__u32 plane_id;
uint32_t crtc_id;
uint32_t fb_id;
__u32 crtc_id;
__u32 fb_id;
uint32_t possible_crtcs;
uint32_t gamma_size;
__u32 possible_crtcs;
__u32 gamma_size;
uint32_t count_format_types;
uint64_t format_type_ptr;
__u32 count_format_types;
__u64 format_type_ptr;
};
struct drm_mode_get_plane_res {
uint64_t plane_id_ptr;
uint32_t count_planes;
__u64 plane_id_ptr;
__u32 count_planes;
};
#define DRM_MODE_ENCODER_NONE 0
@ -164,15 +166,16 @@ struct drm_mode_get_plane_res {
#define DRM_MODE_ENCODER_TMDS 2
#define DRM_MODE_ENCODER_LVDS 3
#define DRM_MODE_ENCODER_TVDAC 4
#define DRM_MODE_ENCODER_VIRTUAL 5
struct drm_mode_get_encoder {
uint32_t encoder_id;
uint32_t encoder_type;
__u32 encoder_id;
__u32 encoder_type;
uint32_t crtc_id; /**< Id of crtc */
__u32 crtc_id; /**< Id of crtc */
uint32_t possible_crtcs;
uint32_t possible_clones;
__u32 possible_crtcs;
__u32 possible_clones;
};
/* This is for connectors with multiple signal types. */
@ -201,26 +204,27 @@ struct drm_mode_get_encoder {
#define DRM_MODE_CONNECTOR_HDMIB 12
#define DRM_MODE_CONNECTOR_TV 13
#define DRM_MODE_CONNECTOR_eDP 14
#define DRM_MODE_CONNECTOR_VIRTUAL 15
struct drm_mode_get_connector {
uint64_t encoders_ptr;
uint64_t modes_ptr;
uint64_t props_ptr;
uint64_t prop_values_ptr;
__u64 encoders_ptr;
__u64 modes_ptr;
__u64 props_ptr;
__u64 prop_values_ptr;
uint32_t count_modes;
uint32_t count_props;
uint32_t count_encoders;
__u32 count_modes;
__u32 count_props;
__u32 count_encoders;
uint32_t encoder_id; /**< Current Encoder */
uint32_t connector_id; /**< Id */
uint32_t connector_type;
uint32_t connector_type_id;
__u32 encoder_id; /**< Current Encoder */
__u32 connector_id; /**< Id */
__u32 connector_type;
__u32 connector_type_id;
uint32_t connection;
uint32_t mm_width, mm_height; /**< HxW in millimeters */
uint32_t subpixel;
__u32 connection;
__u32 mm_width, mm_height; /**< HxW in millimeters */
__u32 subpixel;
};
#define DRM_MODE_PROP_PENDING (1<<0)
@ -231,66 +235,66 @@ struct drm_mode_get_connector {
#define DRM_MODE_PROP_BITMASK (1<<5) /* bitmask of enumerated types */
struct drm_mode_property_enum {
uint64_t value;
__u64 value;
char name[DRM_PROP_NAME_LEN];
};
struct drm_mode_get_property {
uint64_t values_ptr; /* values and blob lengths */
uint64_t enum_blob_ptr; /* enum and blob id ptrs */
__u64 values_ptr; /* values and blob lengths */
__u64 enum_blob_ptr; /* enum and blob id ptrs */
uint32_t prop_id;
uint32_t flags;
__u32 prop_id;
__u32 flags;
char name[DRM_PROP_NAME_LEN];
uint32_t count_values;
uint32_t count_enum_blobs;
__u32 count_values;
__u32 count_enum_blobs;
};
struct drm_mode_connector_set_property {
uint64_t value;
uint32_t prop_id;
uint32_t connector_id;
__u64 value;
__u32 prop_id;
__u32 connector_id;
};
struct drm_mode_obj_get_properties {
uint64_t props_ptr;
uint64_t prop_values_ptr;
uint32_t count_props;
uint32_t obj_id;
uint32_t obj_type;
__u64 props_ptr;
__u64 prop_values_ptr;
__u32 count_props;
__u32 obj_id;
__u32 obj_type;
};
struct drm_mode_obj_set_property {
uint64_t value;
uint32_t prop_id;
uint32_t obj_id;
uint32_t obj_type;
__u64 value;
__u32 prop_id;
__u32 obj_id;
__u32 obj_type;
};
struct drm_mode_get_blob {
uint32_t blob_id;
uint32_t length;
uint64_t data;
__u32 blob_id;
__u32 length;
__u64 data;
};
struct drm_mode_fb_cmd {
uint32_t fb_id;
uint32_t width, height;
uint32_t pitch;
uint32_t bpp;
uint32_t depth;
__u32 fb_id;
__u32 width, height;
__u32 pitch;
__u32 bpp;
__u32 depth;
/* driver specific handle */
uint32_t handle;
__u32 handle;
};
#define DRM_MODE_FB_INTERLACED (1<<0 /* for interlaced framebuffers */
#define DRM_MODE_FB_INTERLACED (1<<0) /* for interlaced framebuffers */
struct drm_mode_fb_cmd2 {
uint32_t fb_id;
uint32_t width, height;
uint32_t pixel_format; /* fourcc code from drm_fourcc.h */
uint32_t flags; /* see above flags */
__u32 fb_id;
__u32 width, height;
__u32 pixel_format; /* fourcc code from drm_fourcc.h */
__u32 flags; /* see above flags */
/*
* In case of planar formats, this ioctl allows up to 4
@ -306,9 +310,9 @@ struct drm_mode_fb_cmd2 {
* offeset[1]. Note that offset[0] will generally
* be 0.
*/
uint32_t handles[4];
uint32_t pitches[4]; /* pitch for each plane */
uint32_t offsets[4]; /* offset of each plane */
__u32 handles[4];
__u32 pitches[4]; /* pitch for each plane */
__u32 offsets[4]; /* offset of each plane */
};
#define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01
@ -345,23 +349,24 @@ struct drm_mode_fb_cmd2 {
*/
struct drm_mode_fb_dirty_cmd {
uint32_t fb_id;
uint32_t flags;
uint32_t color;
uint32_t num_clips;
uint64_t clips_ptr;
__u32 fb_id;
__u32 flags;
__u32 color;
__u32 num_clips;
__u64 clips_ptr;
};
struct drm_mode_mode_cmd {
uint32_t connector_id;
__u32 connector_id;
struct drm_mode_modeinfo mode;
};
#define DRM_MODE_CURSOR_BO (1<<0)
#define DRM_MODE_CURSOR_MOVE (1<<1)
#define DRM_MODE_CURSOR_BO 0x01
#define DRM_MODE_CURSOR_MOVE 0x02
#define DRM_MODE_CURSOR_FLAGS 0x03
/*
* depending on the value in flags diffrent members are used.
* depending on the value in flags different members are used.
*
* CURSOR_BO uses
* crtc
@ -375,24 +380,24 @@ struct drm_mode_mode_cmd {
* y
*/
struct drm_mode_cursor {
uint32_t flags;
uint32_t crtc_id;
int32_t x;
int32_t y;
uint32_t width;
uint32_t height;
__u32 flags;
__u32 crtc_id;
__s32 x;
__s32 y;
__u32 width;
__u32 height;
/* driver specific handle */
uint32_t handle;
__u32 handle;
};
struct drm_mode_crtc_lut {
uint32_t crtc_id;
uint32_t gamma_size;
__u32 crtc_id;
__u32 gamma_size;
/* pointers to arrays */
uint64_t red;
uint64_t green;
uint64_t blue;
__u64 red;
__u64 green;
__u64 blue;
};
#define DRM_MODE_PAGE_FLIP_EVENT 0x01
@ -421,11 +426,11 @@ struct drm_mode_crtc_lut {
*/
struct drm_mode_crtc_page_flip {
uint32_t crtc_id;
uint32_t fb_id;
uint32_t flags;
uint32_t reserved;
uint64_t user_data;
__u32 crtc_id;
__u32 fb_id;
__u32 flags;
__u32 reserved;
__u64 user_data;
};
/* create a dumb scanout buffer */
@ -443,14 +448,14 @@ struct drm_mode_create_dumb {
/* set up for mmap of a dumb scanout buffer */
struct drm_mode_map_dumb {
/** Handle for the object being mapped. */
uint32_t handle;
uint32_t pad;
__u32 handle;
__u32 pad;
/**
* Fake offset to use for subsequent mmap call
*
* This is a fixed-size type for 32/64 compatibility.
*/
uint64_t offset;
__u64 offset;
};
struct drm_mode_destroy_dumb {

View File

@ -34,11 +34,8 @@
__FBSDID("$FreeBSD$");
#include <dev/drm2/drmP.h>
#include <dev/drm2/drm.h>
#include <dev/drm2/drm_crtc.h>
#define KHZ2PICOS(a) (1000000000UL/(a))
/**
* drm_mode_debug_printmodeline - debug print a mode
* @dev: DRM device
@ -49,7 +46,7 @@ __FBSDID("$FreeBSD$");
*
* Describe @mode using DRM_DEBUG.
*/
void drm_mode_debug_printmodeline(struct drm_display_mode *mode)
void drm_mode_debug_printmodeline(const struct drm_display_mode *mode)
{
DRM_DEBUG_KMS("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d "
"0x%x 0x%x\n",
@ -59,6 +56,7 @@ void drm_mode_debug_printmodeline(struct drm_display_mode *mode)
mode->vdisplay, mode->vsync_start,
mode->vsync_end, mode->vtotal, mode->type, mode->flags);
}
EXPORT_SYMBOL(drm_mode_debug_printmodeline);
/**
* drm_cvt_mode -create a modeline based on CVT algorithm
@ -278,6 +276,7 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
return drm_mode;
}
EXPORT_SYMBOL(drm_cvt_mode);
/**
* drm_gtf_mode_complex - create the modeline based on full GTF algorithm
@ -463,6 +462,7 @@ drm_gtf_mode_complex(struct drm_device *dev, int hdisplay, int vdisplay,
return drm_mode;
}
EXPORT_SYMBOL(drm_gtf_mode_complex);
/**
* drm_gtf_mode - create the modeline based on GTF algorithm
@ -502,6 +502,7 @@ drm_gtf_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh,
return drm_gtf_mode_complex(dev, hdisplay, vdisplay, vrefresh, lace,
margins, 600, 40 * 2, 128, 20 * 2);
}
EXPORT_SYMBOL(drm_gtf_mode);
/**
* drm_mode_set_name - set the name on a mode
@ -520,6 +521,7 @@ void drm_mode_set_name(struct drm_display_mode *mode)
mode->hdisplay, mode->vdisplay,
interlaced ? "i" : "");
}
EXPORT_SYMBOL(drm_mode_set_name);
/**
* drm_mode_list_concat - move modes from one list to another
@ -540,6 +542,7 @@ void drm_mode_list_concat(struct list_head *head, struct list_head *new)
list_move_tail(entry, new);
}
}
EXPORT_SYMBOL(drm_mode_list_concat);
/**
* drm_mode_width - get the width of a mode
@ -555,11 +558,12 @@ void drm_mode_list_concat(struct list_head *head, struct list_head *new)
* RETURNS:
* @mode->hdisplay
*/
int drm_mode_width(struct drm_display_mode *mode)
int drm_mode_width(const struct drm_display_mode *mode)
{
return mode->hdisplay;
}
EXPORT_SYMBOL(drm_mode_width);
/**
* drm_mode_height - get the height of a mode
@ -575,10 +579,11 @@ int drm_mode_width(struct drm_display_mode *mode)
* RETURNS:
* @mode->vdisplay
*/
int drm_mode_height(struct drm_display_mode *mode)
int drm_mode_height(const struct drm_display_mode *mode)
{
return mode->vdisplay;
}
EXPORT_SYMBOL(drm_mode_height);
/** drm_mode_hsync - get the hsync of a mode
* @mode: mode
@ -604,6 +609,7 @@ int drm_mode_hsync(const struct drm_display_mode *mode)
return calc_val;
}
EXPORT_SYMBOL(drm_mode_hsync);
/**
* drm_mode_vrefresh - get the vrefresh of a mode
@ -645,6 +651,7 @@ int drm_mode_vrefresh(const struct drm_display_mode *mode)
}
return refresh;
}
EXPORT_SYMBOL(drm_mode_vrefresh);
/**
* drm_mode_set_crtcinfo - set CRTC modesetting parameters
@ -678,8 +685,6 @@ void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
p->crtc_vsync_end /= 2;
p->crtc_vtotal /= 2;
}
p->crtc_vtotal |= 1;
}
if (p->flags & DRM_MODE_FLAG_DBLSCAN) {
@ -700,12 +705,31 @@ void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal);
p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay);
p->crtc_hblank_end = max(p->crtc_hsync_end, p->crtc_htotal);
p->crtc_hadjusted = false;
p->crtc_vadjusted = false;
}
EXPORT_SYMBOL(drm_mode_set_crtcinfo);
/**
* drm_mode_copy - copy the mode
* @dst: mode to overwrite
* @src: mode to copy
*
* LOCKING:
* None.
*
* Copy an existing mode into another mode, preserving the object id
* of the destination mode.
*/
void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src)
{
int id = dst->base.id;
*dst = *src;
dst->base.id = id;
INIT_LIST_HEAD(&dst->head);
}
EXPORT_SYMBOL(drm_mode_copy);
/**
* drm_mode_duplicate - allocate and duplicate an existing mode
* @m: mode to duplicate
@ -720,18 +744,16 @@ struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
const struct drm_display_mode *mode)
{
struct drm_display_mode *nmode;
int new_id;
nmode = drm_mode_create(dev);
if (!nmode)
return NULL;
new_id = nmode->base.id;
*nmode = *mode;
nmode->base.id = new_id;
INIT_LIST_HEAD(&nmode->head);
drm_mode_copy(nmode, mode);
return nmode;
}
EXPORT_SYMBOL(drm_mode_duplicate);
/**
* drm_mode_equal - test modes for equality
@ -744,9 +766,9 @@ struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
* Check to see if @mode1 and @mode2 are equivalent.
*
* RETURNS:
* true if the modes are equal, false otherwise.
* True if the modes are equal, false otherwise.
*/
bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2)
bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
{
/* do clock check convert to PICOS so fb modes get matched
* the same */
@ -771,6 +793,7 @@ bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mod
return false;
}
EXPORT_SYMBOL(drm_mode_equal);
/**
* drm_mode_validate_size - make sure modes adhere to size constraints
@ -804,6 +827,7 @@ void drm_mode_validate_size(struct drm_device *dev,
mode->status = MODE_VIRTUAL_Y;
}
}
EXPORT_SYMBOL(drm_mode_validate_size);
/**
* drm_mode_validate_clocks - validate modes against clock limits
@ -840,6 +864,7 @@ void drm_mode_validate_clocks(struct drm_device *dev,
mode->status = MODE_CLOCK_RANGE;
}
}
EXPORT_SYMBOL(drm_mode_validate_clocks);
/**
* drm_mode_prune_invalid - remove invalid modes from mode list
@ -871,6 +896,7 @@ void drm_mode_prune_invalid(struct drm_device *dev,
}
}
}
EXPORT_SYMBOL(drm_mode_prune_invalid);
/**
* drm_mode_compare - compare modes for favorability
@ -918,6 +944,7 @@ void drm_mode_sort(struct list_head *mode_list)
{
drm_list_sort(NULL, mode_list, drm_mode_compare);
}
EXPORT_SYMBOL(drm_mode_sort);
/**
* drm_mode_connector_list_update - update the mode list for the connector
@ -959,6 +986,7 @@ void drm_mode_connector_list_update(struct drm_connector *connector)
}
}
}
EXPORT_SYMBOL(drm_mode_connector_list_update);
/**
* drm_mode_parse_command_line_for_connector - parse command line for connector
@ -986,7 +1014,7 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
int i;
enum drm_connector_force force = DRM_FORCE_UNSPECIFIED;
#ifdef XXX_CONFIG_FB
#ifdef CONFIG_FB
if (!mode_option)
mode_option = fb_mode_option;
#endif
@ -1003,7 +1031,7 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
case '@':
if (!refresh_specified && !bpp_specified &&
!yres_specified && !cvt && !rb && was_digit) {
refresh = strtol(&name[i+1], NULL, 10);
refresh = simple_strtol(&name[i+1], NULL, 10);
refresh_specified = true;
was_digit = false;
} else
@ -1012,7 +1040,7 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
case '-':
if (!bpp_specified && !yres_specified && !cvt &&
!rb && was_digit) {
bpp = strtol(&name[i+1], NULL, 10);
bpp = simple_strtol(&name[i+1], NULL, 10);
bpp_specified = true;
was_digit = false;
} else
@ -1020,7 +1048,7 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
break;
case 'x':
if (!yres_specified && was_digit) {
yres = strtol(&name[i+1], NULL, 10);
yres = simple_strtol(&name[i+1], NULL, 10);
yres_specified = true;
was_digit = false;
} else
@ -1080,7 +1108,7 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
if (i < 0 && yres_specified) {
char *ch;
xres = strtol(name, &ch, 10);
xres = simple_strtol(name, &ch, 10);
if ((ch != NULL) && (*ch == 'x'))
res_specified = true;
else
@ -1091,7 +1119,8 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
}
done:
if (i >= 0) {
printf("parse error at position %i in video mode '%s'\n",
DRM_WARNING(
"parse error at position %i in video mode '%s'\n",
i, name);
mode->specified = false;
return false;
@ -1120,6 +1149,7 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
return true;
}
EXPORT_SYMBOL(drm_mode_parse_command_line_for_connector);
struct drm_display_mode *
drm_mode_create_from_cmdline_mode(struct drm_device *dev,
@ -1145,3 +1175,4 @@ drm_mode_create_from_cmdline_mode(struct drm_device *dev,
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
return mode;
}
EXPORT_SYMBOL(drm_mode_create_from_cmdline_mode);

View File

@ -0,0 +1,393 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <dev/drm2/drmP.h>
#include <dev/agp/agpreg.h>
#include <dev/pci/pcireg.h>
devclass_t drm_devclass;
MALLOC_DEFINE(DRM_MEM_DMA, "drm_dma", "DRM DMA Data Structures");
MALLOC_DEFINE(DRM_MEM_SAREA, "drm_sarea", "DRM SAREA Data Structures");
MALLOC_DEFINE(DRM_MEM_DRIVER, "drm_driver", "DRM DRIVER Data Structures");
MALLOC_DEFINE(DRM_MEM_MAGIC, "drm_magic", "DRM MAGIC Data Structures");
MALLOC_DEFINE(DRM_MEM_MINOR, "drm_minor", "DRM MINOR Data Structures");
MALLOC_DEFINE(DRM_MEM_IOCTLS, "drm_ioctls", "DRM IOCTL Data Structures");
MALLOC_DEFINE(DRM_MEM_MAPS, "drm_maps", "DRM MAP Data Structures");
MALLOC_DEFINE(DRM_MEM_BUFS, "drm_bufs", "DRM BUFFER Data Structures");
MALLOC_DEFINE(DRM_MEM_SEGS, "drm_segs", "DRM SEGMENTS Data Structures");
MALLOC_DEFINE(DRM_MEM_PAGES, "drm_pages", "DRM PAGES Data Structures");
MALLOC_DEFINE(DRM_MEM_FILES, "drm_files", "DRM FILE Data Structures");
MALLOC_DEFINE(DRM_MEM_QUEUES, "drm_queues", "DRM QUEUE Data Structures");
MALLOC_DEFINE(DRM_MEM_CMDS, "drm_cmds", "DRM COMMAND Data Structures");
MALLOC_DEFINE(DRM_MEM_MAPPINGS, "drm_mapping", "DRM MAPPING Data Structures");
MALLOC_DEFINE(DRM_MEM_BUFLISTS, "drm_buflists", "DRM BUFLISTS Data Structures");
MALLOC_DEFINE(DRM_MEM_AGPLISTS, "drm_agplists", "DRM AGPLISTS Data Structures");
MALLOC_DEFINE(DRM_MEM_CTXBITMAP, "drm_ctxbitmap",
"DRM CTXBITMAP Data Structures");
MALLOC_DEFINE(DRM_MEM_SGLISTS, "drm_sglists", "DRM SGLISTS Data Structures");
MALLOC_DEFINE(DRM_MEM_MM, "drm_sman", "DRM MEMORY MANAGER Data Structures");
MALLOC_DEFINE(DRM_MEM_HASHTAB, "drm_hashtab", "DRM HASHTABLE Data Structures");
MALLOC_DEFINE(DRM_MEM_KMS, "drm_kms", "DRM KMS Data Structures");
MALLOC_DEFINE(DRM_MEM_VBLANK, "drm_vblank", "DRM VBLANK Handling Data");
const char *fb_mode_option = NULL;
#define NSEC_PER_USEC 1000L
#define NSEC_PER_SEC 1000000000L
int64_t
timeval_to_ns(const struct timeval *tv)
{
return ((int64_t)tv->tv_sec * NSEC_PER_SEC) +
tv->tv_usec * NSEC_PER_USEC;
}
struct timeval
ns_to_timeval(const int64_t nsec)
{
struct timeval tv;
long rem;
if (nsec == 0) {
tv.tv_sec = 0;
tv.tv_usec = 0;
return (tv);
}
tv.tv_sec = nsec / NSEC_PER_SEC;
rem = nsec % NSEC_PER_SEC;
if (rem < 0) {
tv.tv_sec--;
rem += NSEC_PER_SEC;
}
tv.tv_usec = rem / 1000;
return (tv);
}
static drm_pci_id_list_t *
drm_find_description(int vendor, int device, drm_pci_id_list_t *idlist)
{
int i = 0;
for (i = 0; idlist[i].vendor != 0; i++) {
if ((idlist[i].vendor == vendor) &&
((idlist[i].device == device) ||
(idlist[i].device == 0))) {
return (&idlist[i]);
}
}
return (NULL);
}
/*
* drm_probe_helper: called by a driver at the end of its probe
* method.
*/
int
drm_probe_helper(device_t kdev, drm_pci_id_list_t *idlist)
{
drm_pci_id_list_t *id_entry;
int vendor, device;
vendor = pci_get_vendor(kdev);
device = pci_get_device(kdev);
if (pci_get_class(kdev) != PCIC_DISPLAY ||
(pci_get_subclass(kdev) != PCIS_DISPLAY_VGA &&
pci_get_subclass(kdev) != PCIS_DISPLAY_OTHER))
return (-ENXIO);
id_entry = drm_find_description(vendor, device, idlist);
if (id_entry != NULL) {
if (device_get_desc(kdev) == NULL) {
DRM_DEBUG("%s desc: %s\n",
device_get_nameunit(kdev), id_entry->name);
device_set_desc(kdev, id_entry->name);
}
return (0);
}
return (-ENXIO);
}
/*
* drm_attach_helper: called by a driver at the end of its attach
* method.
*/
int
drm_attach_helper(device_t kdev, drm_pci_id_list_t *idlist,
struct drm_driver *driver)
{
struct drm_device *dev;
int vendor, device;
int ret;
dev = device_get_softc(kdev);
vendor = pci_get_vendor(kdev);
device = pci_get_device(kdev);
dev->id_entry = drm_find_description(vendor, device, idlist);
ret = drm_get_pci_dev(kdev, dev, driver);
return (ret);
}
int
drm_generic_detach(device_t kdev)
{
struct drm_device *dev;
int i;
dev = device_get_softc(kdev);
drm_put_dev(dev);
/* Clean up PCI resources allocated by drm_bufs.c. We're not really
* worried about resource consumption while the DRM is inactive (between
* lastclose and firstopen or unload) because these aren't actually
* taking up KVA, just keeping the PCI resource allocated.
*/
for (i = 0; i < DRM_MAX_PCI_RESOURCE; i++) {
if (dev->pcir[i] == NULL)
continue;
bus_release_resource(dev->dev, SYS_RES_MEMORY,
dev->pcirid[i], dev->pcir[i]);
dev->pcir[i] = NULL;
}
if (pci_disable_busmaster(dev->dev))
DRM_ERROR("Request to disable bus-master failed.\n");
return (0);
}
int
drm_add_busid_modesetting(struct drm_device *dev, struct sysctl_ctx_list *ctx,
struct sysctl_oid *top)
{
struct sysctl_oid *oid;
snprintf(dev->busid_str, sizeof(dev->busid_str),
"pci:%04x:%02x:%02x.%d", dev->pci_domain, dev->pci_bus,
dev->pci_slot, dev->pci_func);
oid = SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "busid",
CTLFLAG_RD, dev->busid_str, 0, NULL);
if (oid == NULL)
return (-ENOMEM);
dev->modesetting = (dev->driver->driver_features & DRIVER_MODESET) != 0;
oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO,
"modesetting", CTLFLAG_RD, &dev->modesetting, 0, NULL);
if (oid == NULL)
return (-ENOMEM);
return (0);
}
static int
drm_device_find_capability(struct drm_device *dev, int cap)
{
return (pci_find_cap(dev->dev, cap, NULL) == 0);
}
int
drm_pci_device_is_agp(struct drm_device *dev)
{
if (dev->driver->device_is_agp != NULL) {
int ret;
/* device_is_agp returns a tristate, 0 = not AGP, 1 = definitely
* AGP, 2 = fall back to PCI capability
*/
ret = (*dev->driver->device_is_agp)(dev);
if (ret != DRM_MIGHT_BE_AGP)
return ret;
}
return (drm_device_find_capability(dev, PCIY_AGP));
}
int
drm_pci_device_is_pcie(struct drm_device *dev)
{
return (drm_device_find_capability(dev, PCIY_EXPRESS));
}
static bool
dmi_found(const struct dmi_system_id *dsi)
{
char *hw_vendor, *hw_prod;
int i, slot;
bool res;
hw_vendor = kern_getenv("smbios.planar.maker");
hw_prod = kern_getenv("smbios.planar.product");
res = true;
for (i = 0; i < nitems(dsi->matches); i++) {
slot = dsi->matches[i].slot;
switch (slot) {
case DMI_NONE:
break;
case DMI_SYS_VENDOR:
case DMI_BOARD_VENDOR:
if (hw_vendor != NULL &&
!strcmp(hw_vendor, dsi->matches[i].substr)) {
break;
} else {
res = false;
goto out;
}
case DMI_PRODUCT_NAME:
case DMI_BOARD_NAME:
if (hw_prod != NULL &&
!strcmp(hw_prod, dsi->matches[i].substr)) {
break;
} else {
res = false;
goto out;
}
default:
res = false;
goto out;
}
}
out:
freeenv(hw_vendor);
freeenv(hw_prod);
return (res);
}
bool
dmi_check_system(const struct dmi_system_id *sysid)
{
const struct dmi_system_id *dsi;
bool res;
for (res = false, dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
if (dmi_found(dsi)) {
res = true;
if (dsi->callback != NULL && dsi->callback(dsi))
break;
}
}
return (res);
}
int
drm_mtrr_add(unsigned long offset, unsigned long size, unsigned int flags)
{
int act;
struct mem_range_desc mrdesc;
mrdesc.mr_base = offset;
mrdesc.mr_len = size;
mrdesc.mr_flags = flags;
act = MEMRANGE_SET_UPDATE;
strlcpy(mrdesc.mr_owner, "drm", sizeof(mrdesc.mr_owner));
return (-mem_range_attr_set(&mrdesc, &act));
}
int
drm_mtrr_del(int handle __unused, unsigned long offset, unsigned long size,
unsigned int flags)
{
int act;
struct mem_range_desc mrdesc;
mrdesc.mr_base = offset;
mrdesc.mr_len = size;
mrdesc.mr_flags = flags;
act = MEMRANGE_SET_REMOVE;
strlcpy(mrdesc.mr_owner, "drm", sizeof(mrdesc.mr_owner));
return (-mem_range_attr_set(&mrdesc, &act));
}
void
drm_clflush_pages(vm_page_t *pages, unsigned long num_pages)
{
#if defined(__i386__) || defined(__amd64__)
pmap_invalidate_cache_pages(pages, num_pages);
#else
DRM_ERROR("drm_clflush_pages not implemented on this architecture");
#endif
}
void
drm_clflush_virt_range(char *addr, unsigned long length)
{
#if defined(__i386__) || defined(__amd64__)
pmap_invalidate_cache_range((vm_offset_t)addr,
(vm_offset_t)addr + length, TRUE);
#else
DRM_ERROR("drm_clflush_virt_range not implemented on this architecture");
#endif
}
#if DRM_LINUX
#include <sys/sysproto.h>
MODULE_DEPEND(DRIVER_NAME, linux, 1, 1, 1);
#define LINUX_IOCTL_DRM_MIN 0x6400
#define LINUX_IOCTL_DRM_MAX 0x64ff
static linux_ioctl_function_t drm_linux_ioctl;
static struct linux_ioctl_handler drm_handler = {drm_linux_ioctl,
LINUX_IOCTL_DRM_MIN, LINUX_IOCTL_DRM_MAX};
/* The bits for in/out are switched on Linux */
#define LINUX_IOC_IN IOC_OUT
#define LINUX_IOC_OUT IOC_IN
static int
drm_linux_ioctl(DRM_STRUCTPROC *p, struct linux_ioctl_args* args)
{
int error;
int cmd = args->cmd;
args->cmd &= ~(LINUX_IOC_IN | LINUX_IOC_OUT);
if (cmd & LINUX_IOC_IN)
args->cmd |= IOC_IN;
if (cmd & LINUX_IOC_OUT)
args->cmd |= IOC_OUT;
error = ioctl(p, (struct ioctl_args *)args);
return error;
}
#endif /* DRM_LINUX */
static int
drm_modevent(module_t mod, int type, void *data)
{
switch (type) {
case MOD_LOAD:
TUNABLE_INT_FETCH("drm.debug", &drm_debug);
TUNABLE_INT_FETCH("drm.notyet", &drm_notyet);
break;
}
return (0);
}
static moduledata_t drm_mod = {
"drmn",
drm_modevent,
0
};
DECLARE_MODULE(drmn, drm_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
MODULE_VERSION(drmn, 1);
MODULE_DEPEND(drmn, agp, 1, 1, 1);
MODULE_DEPEND(drmn, pci, 1, 1, 1);
MODULE_DEPEND(drmn, mem, 1, 1, 1);
MODULE_DEPEND(drmn, iicbus, 1, 1, 1);

View File

@ -6,6 +6,9 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#ifndef _DRM_OS_FREEBSD_H_
#define _DRM_OS_FREEBSD_H_
#include <sys/fbio.h>
#if _BYTE_ORDER == _BIG_ENDIAN
@ -14,6 +17,19 @@ __FBSDID("$FreeBSD$");
#define __LITTLE_ENDIAN 1234
#endif
#ifdef __LP64__
#define BITS_PER_LONG 64
#else
#define BITS_PER_LONG 32
#endif
#ifndef __user
#define __user
#endif
#ifndef __iomem
#define __iomem
#endif
#define cpu_to_le16(x) htole16(x)
#define le16_to_cpu(x) le16toh(x)
#define cpu_to_le32(x) htole32(x)
@ -26,31 +42,96 @@ __FBSDID("$FreeBSD$");
#define be32_to_cpup(x) be32toh(*x)
typedef vm_paddr_t dma_addr_t;
typedef vm_paddr_t resource_size_t;
#define wait_queue_head_t atomic_t
typedef uint64_t u64;
typedef uint32_t u32;
typedef uint16_t u16;
typedef uint8_t u8;
typedef uint8_t u8;
typedef int64_t s64;
typedef int32_t s32;
typedef int16_t s16;
typedef int8_t s8;
typedef int32_t __be32;
typedef int8_t s8;
typedef uint16_t __le16;
typedef uint32_t __le32;
typedef uint64_t __le64;
typedef uint16_t __be16;
typedef uint32_t __be32;
typedef uint64_t __be64;
#define DRM_IRQ_ARGS void *arg
typedef void irqreturn_t;
#define IRQ_HANDLED /* nothing */
#define IRQ_NONE /* nothing */
#define __init
#define __exit
#define __read_mostly
#define WARN_ON(cond) KASSERT(!(cond), ("WARN ON: " #cond))
#define WARN_ON_SMP(cond) WARN_ON(cond)
#define BUG_ON(cond) KASSERT(!(cond), ("BUG ON: " #cond))
#define unlikely(x) __builtin_expect(!!(x), 0)
#define likely(x) __builtin_expect(!!(x), 1)
#define container_of(ptr, type, member) ({ \
__typeof( ((type *)0)->member ) *__mptr = (ptr); \
(type *)( (char *)__mptr - offsetof(type,member) );})
#define DRM_HZ hz
#define DRM_UDELAY(udelay) DELAY(udelay)
#define DRM_MDELAY(msecs) do { int loops = (msecs); \
#define KHZ2PICOS(a) (1000000000UL/(a))
#define ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0]))
#define HZ hz
#define DRM_HZ hz
#define DRM_CURRENTPID curthread->td_proc->p_pid
#define DRM_SUSER(p) (priv_check(p, PRIV_DRIVER) == 0)
#define udelay(usecs) DELAY(usecs)
#define mdelay(msecs) do { int loops = (msecs); \
while (loops--) DELAY(1000); \
} while (0)
#define DRM_MSLEEP(msecs) drm_msleep((msecs), "drm_msleep")
#define DRM_TIME_SLICE (hz/20) /* Time slice for GLXContexts */
#define DRM_UDELAY(udelay) DELAY(udelay)
#define drm_msleep(x, msg) pause((msg), ((int64_t)(x)) * hz / 1000)
#define DRM_MSLEEP(msecs) drm_msleep((msecs), "drm_msleep")
#define DRM_READ8(map, offset) \
*(volatile u_int8_t *)(((vm_offset_t)(map)->handle) + \
(vm_offset_t)(offset))
#define DRM_READ16(map, offset) \
le16toh(*(volatile u_int16_t *)(((vm_offset_t)(map)->handle) + \
(vm_offset_t)(offset)))
#define DRM_READ32(map, offset) \
le32toh(*(volatile u_int32_t *)(((vm_offset_t)(map)->handle) + \
(vm_offset_t)(offset)))
#define DRM_READ64(map, offset) \
le64toh(*(volatile u_int64_t *)(((vm_offset_t)(map)->handle) + \
(vm_offset_t)(offset)))
#define DRM_WRITE8(map, offset, val) \
*(volatile u_int8_t *)(((vm_offset_t)(map)->handle) + \
(vm_offset_t)(offset)) = val
#define DRM_WRITE16(map, offset, val) \
*(volatile u_int16_t *)(((vm_offset_t)(map)->handle) + \
(vm_offset_t)(offset)) = htole16(val)
#define DRM_WRITE32(map, offset, val) \
*(volatile u_int32_t *)(((vm_offset_t)(map)->handle) + \
(vm_offset_t)(offset)) = htole32(val)
#define DRM_WRITE64(map, offset, val) \
*(volatile u_int64_t *)(((vm_offset_t)(map)->handle) + \
(vm_offset_t)(offset)) = htole64(val)
/* DRM_READMEMORYBARRIER() prevents reordering of reads.
* DRM_WRITEMEMORYBARRIER() prevents reordering of writes.
* DRM_MEMORYBARRIER() prevents reordering of reads and writes.
*/
#define DRM_READMEMORYBARRIER() rmb()
#define DRM_WRITEMEMORYBARRIER() wmb()
#define DRM_MEMORYBARRIER() mb()
#define smp_rmb() rmb()
#define smp_mb__before_atomic_inc() mb()
#define smp_mb__after_atomic_inc() mb()
#define do_div(a, b) ((a) /= (b))
#define div64_u64(a, b) ((a) / (b))
#define lower_32_bits(n) ((u32)(n))
#define min_t(type, x, y) ({ \
@ -70,11 +151,14 @@ typedef int32_t __be32;
/* XXXKIB what is the right code for the FreeBSD ? */
/* kib@ used ENXIO here -- dumbbell@ */
#define EREMOTEIO EIO
#define ERESTARTSYS ERESTART
#define ERESTARTSYS 512 /* Same value as Linux. */
#define KTR_DRM KTR_DEV
#define KTR_DRM_REG KTR_SPARE3
#define DRM_AGP_KERN struct agp_info
#define DRM_AGP_MEM void
#define PCI_VENDOR_ID_APPLE 0x106b
#define PCI_VENDOR_ID_ASUSTEK 0x1043
#define PCI_VENDOR_ID_ATI 0x1002
@ -92,6 +176,7 @@ typedef int32_t __be32;
static inline unsigned long
roundup_pow_of_two(unsigned long x)
{
return (1UL << flsl(x - 1));
}
@ -102,8 +187,10 @@ roundup_pow_of_two(unsigned long x)
*
* Source: include/linux/bitops.h
*/
static inline uint32_t ror32(uint32_t word, unsigned int shift)
static inline uint32_t
ror32(uint32_t word, unsigned int shift)
{
return (word >> shift) | (word << (32 - shift));
}
@ -116,31 +203,184 @@ static inline uint32_t ror32(uint32_t word, unsigned int shift)
/* Taken from linux/include/linux/unaligned/le_struct.h. */
struct __una_u32 { u32 x; } __packed;
static inline u32 __get_unaligned_cpu32(const void *p)
static inline u32
__get_unaligned_cpu32(const void *p)
{
const struct __una_u32 *ptr = (const struct __una_u32 *)p;
return ptr->x;
return (ptr->x);
}
static inline u32 get_unaligned_le32(const void *p)
static inline u32
get_unaligned_le32(const void *p)
{
return __get_unaligned_cpu32((const u8 *)p);
return (__get_unaligned_cpu32((const u8 *)p));
}
#else
/* Taken from linux/include/linux/unaligned/le_byteshift.h. */
static inline u32 __get_unaligned_le32(const u8 *p)
static inline u32
__get_unaligned_le32(const u8 *p)
{
return p[0] | p[1] << 8 | p[2] << 16 | p[3] << 24;
return (p[0] | p[1] << 8 | p[2] << 16 | p[3] << 24);
}
static inline u32 get_unaligned_le32(const void *p)
static inline u32
get_unaligned_le32(const void *p)
{
return __get_unaligned_le32((const u8 *)p);
return (__get_unaligned_le32((const u8 *)p));
}
#endif
static inline unsigned long
ilog2(unsigned long x)
{
return (flsl(x) - 1);
}
static inline int64_t
abs64(int64_t x)
{
return (x < 0 ? -x : x);
}
int64_t timeval_to_ns(const struct timeval *tv);
struct timeval ns_to_timeval(const int64_t nsec);
#define PAGE_ALIGN(addr) round_page(addr)
#define drm_get_device_from_kdev(_kdev) (((struct drm_minor *)(_kdev)->si_drv1)->dev)
#define DRM_IOC_VOID IOC_VOID
#define DRM_IOC_READ IOC_OUT
#define DRM_IOC_WRITE IOC_IN
#define DRM_IOC_READWRITE IOC_INOUT
#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
static inline long
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
return (copyout(from, to, n) != 0 ? n : 0);
}
#define copy_to_user(to, from, n) __copy_to_user((to), (from), (n))
static inline int
__put_user(size_t size, void *ptr, void *x)
{
size = copy_to_user(ptr, x, size);
return (size ? -EFAULT : size);
}
#define put_user(x, ptr) __put_user(sizeof(*ptr), (ptr), &(x))
static inline unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
return ((copyin(__DECONST(void *, from), to, n) != 0 ? n : 0));
}
#define copy_from_user(to, from, n) __copy_from_user((to), (from), (n))
static inline int
__get_user(size_t size, const void *ptr, void *x)
{
size = copy_from_user(x, ptr, size);
return (size ? -EFAULT : size);
}
#define get_user(x, ptr) __get_user(sizeof(*ptr), (ptr), &(x))
#define sigemptyset(set) SIGEMPTYSET(set)
#define sigaddset(set, sig) SIGADDSET(set, sig)
#define DRM_LOCK(dev) sx_xlock(&(dev)->dev_struct_lock)
#define DRM_UNLOCK(dev) sx_xunlock(&(dev)->dev_struct_lock)
#define jiffies ticks
#define jiffies_to_msecs(x) (((int64_t)(x)) * 1000 / hz)
#define msecs_to_jiffies(x) (((int64_t)(x)) * hz / 1000)
#define time_after(a,b) ((long)(b) - (long)(a) < 0)
#define time_after_eq(a,b) ((long)(b) - (long)(a) <= 0)
#define wake_up(queue) wakeup((void *)queue)
#define wake_up_interruptible(queue) wakeup((void *)queue)
MALLOC_DECLARE(DRM_MEM_DMA);
MALLOC_DECLARE(DRM_MEM_SAREA);
MALLOC_DECLARE(DRM_MEM_DRIVER);
MALLOC_DECLARE(DRM_MEM_MAGIC);
MALLOC_DECLARE(DRM_MEM_MINOR);
MALLOC_DECLARE(DRM_MEM_IOCTLS);
MALLOC_DECLARE(DRM_MEM_MAPS);
MALLOC_DECLARE(DRM_MEM_BUFS);
MALLOC_DECLARE(DRM_MEM_SEGS);
MALLOC_DECLARE(DRM_MEM_PAGES);
MALLOC_DECLARE(DRM_MEM_FILES);
MALLOC_DECLARE(DRM_MEM_QUEUES);
MALLOC_DECLARE(DRM_MEM_CMDS);
MALLOC_DECLARE(DRM_MEM_MAPPINGS);
MALLOC_DECLARE(DRM_MEM_BUFLISTS);
MALLOC_DECLARE(DRM_MEM_AGPLISTS);
MALLOC_DECLARE(DRM_MEM_CTXBITMAP);
MALLOC_DECLARE(DRM_MEM_SGLISTS);
MALLOC_DECLARE(DRM_MEM_MM);
MALLOC_DECLARE(DRM_MEM_HASHTAB);
MALLOC_DECLARE(DRM_MEM_KMS);
MALLOC_DECLARE(DRM_MEM_VBLANK);
#define simple_strtol(a, b, c) strtol((a), (b), (c))
typedef struct drm_pci_id_list
{
int vendor;
int device;
long driver_private;
char *name;
} drm_pci_id_list_t;
#ifdef __i386__
#define CONFIG_X86 1
#endif
#ifdef __amd64__
#define CONFIG_X86 1
#define CONFIG_X86_64 1
#endif
#ifdef __ia64__
#define CONFIG_IA64 1
#endif
#if defined(__i386__) || defined(__amd64__)
#define CONFIG_ACPI
#endif
#define CONFIG_AGP 1
#define CONFIG_MTRR 1
#define CONFIG_FB 1
extern const char *fb_mode_option;
#define EXPORT_SYMBOL(x)
#define MODULE_AUTHOR(author)
#define MODULE_DESCRIPTION(desc)
#define MODULE_LICENSE(license)
#define MODULE_PARM_DESC(name, desc)
#define module_param_named(name, var, type, perm)
#define printk printf
#define KERN_DEBUG ""
struct fb_info * framebuffer_alloc(void);
void framebuffer_release(struct fb_info *info);
#define KIB_NOTYET() \
do { \
if (drm_debug_flag && drm_notyet_flag) \
if (drm_debug && drm_notyet) \
printf("NOTYET: %s at %s:%d\n", __func__, __FILE__, __LINE__); \
} while (0)
#endif /* _DRM_OS_FREEBSD_H_ */

View File

@ -1,5 +1,20 @@
/*-
* Copyright 2003 Eric Anholt.
/* drm_pci.h -- PCI DMA memory management wrappers for DRM -*- linux-c -*- */
/**
* \file drm_pci.c
* \brief Functions and ioctls to manage PCI memory
*
* \warning These interfaces aren't stable yet.
*
* \todo Implement the remaining ioctl's for the PCI pools.
* \todo The wrappers here are so thin that they would be better off inlined..
*
* \author José Fonseca <jrfonseca@tungstengraphics.com>
* \author Leif Delgass <ldelgass@retinalburn.net>
*/
/*
* Copyright 2003 José Fonseca.
* Copyright 2003 Leif Delgass.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -16,7 +31,7 @@
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
@ -24,15 +39,13 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/**
* \file drm_pci.h
* \brief PCI consistent, DMA-accessible memory allocation.
*
* \author Eric Anholt <anholt@FreeBSD.org>
*/
#include <dev/drm2/drmP.h>
static int drm_msi = 1; /* Enable by default. */
SYSCTL_NODE(_hw, OID_AUTO, drm, CTLFLAG_RW, NULL, "DRM device");
SYSCTL_INT(_hw_drm, OID_AUTO, msi, CTLFLAG_RDTUN, &drm_msi, 1,
"Enable MSI interrupts for drm devices");
/**********************************************************************/
/** \name PCI memory */
/*@{*/
@ -50,12 +63,10 @@ drm_pci_busdma_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error
}
/**
* \brief Allocate a physically contiguous DMA-accessible consistent
* memory block.
* \brief Allocate a PCI consistent memory block, for DMA.
*/
drm_dma_handle_t *
drm_pci_alloc(struct drm_device *dev, size_t size,
size_t align, dma_addr_t maxaddr)
drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size,
size_t align, dma_addr_t maxaddr)
{
drm_dma_handle_t *dmah;
int ret;
@ -77,7 +88,7 @@ drm_pci_alloc(struct drm_device *dev, size_t size,
DRM_ERROR("called while holding dma_lock\n");
ret = bus_dma_tag_create(
bus_get_dma_tag(dev->device), /* parent */
bus_get_dma_tag(dev->dev), /* parent */
align, 0, /* align, boundary */
maxaddr, BUS_SPACE_MAXADDR, /* lowaddr, highaddr */
NULL, NULL, /* filtfunc, filtfuncargs */
@ -109,11 +120,14 @@ drm_pci_alloc(struct drm_device *dev, size_t size,
return dmah;
}
EXPORT_SYMBOL(drm_pci_alloc);
/**
* \brief Free a DMA-accessible consistent memory block.
* \brief Free a PCI consistent memory block without freeing its descriptor.
*
* This function is for internal use in the Linux-specific DRM core code.
*/
void
drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah)
void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
{
if (dmah == NULL)
return;
@ -121,11 +135,306 @@ drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah)
bus_dmamap_unload(dmah->tag, dmah->map);
bus_dmamem_free(dmah->tag, dmah->vaddr, dmah->map);
bus_dma_tag_destroy(dmah->tag);
}
/**
* \brief Free a PCI consistent memory block
*/
void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
{
__drm_pci_free(dev, dmah);
free(dmah, DRM_MEM_DMA);
}
/*@}*/
EXPORT_SYMBOL(drm_pci_free);
static int drm_get_pci_domain(struct drm_device *dev)
{
return dev->pci_domain;
}
static int drm_pci_get_irq(struct drm_device *dev)
{
if (dev->irqr)
return (dev->irq);
dev->irqr = bus_alloc_resource_any(dev->dev, SYS_RES_IRQ,
&dev->irqrid, RF_SHAREABLE);
if (!dev->irqr) {
dev_err(dev->dev, "Failed to allocate IRQ\n");
return (0);
}
dev->irq = (int) rman_get_start(dev->irqr);
return (dev->irq);
}
static void drm_pci_free_irq(struct drm_device *dev)
{
if (dev->irqr == NULL)
return;
bus_release_resource(dev->dev, SYS_RES_IRQ,
dev->irqrid, dev->irqr);
dev->irqr = NULL;
dev->irq = 0;
}
static const char *drm_pci_get_name(struct drm_device *dev)
{
return dev->driver->name;
}
int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
{
int len, ret;
master->unique_len = 40;
master->unique_size = master->unique_len;
master->unique = malloc(master->unique_size, DRM_MEM_DRIVER, M_NOWAIT);
if (master->unique == NULL)
return -ENOMEM;
len = snprintf(master->unique, master->unique_len,
"pci:%04x:%02x:%02x.%d",
dev->pci_domain,
dev->pci_bus,
dev->pci_slot,
dev->pci_func);
if (len >= master->unique_len) {
DRM_ERROR("buffer overflow");
ret = -EINVAL;
goto err;
} else
master->unique_len = len;
return 0;
err:
return ret;
}
int drm_pci_set_unique(struct drm_device *dev,
struct drm_master *master,
struct drm_unique *u)
{
int domain, bus, slot, func, ret;
master->unique_len = u->unique_len;
master->unique_size = u->unique_len + 1;
master->unique = malloc(master->unique_size, DRM_MEM_DRIVER, M_WAITOK);
if (!master->unique) {
ret = -ENOMEM;
goto err;
}
if (copy_from_user(master->unique, u->unique, master->unique_len)) {
ret = -EFAULT;
goto err;
}
master->unique[master->unique_len] = '\0';
/* Return error if the busid submitted doesn't match the device's actual
* busid.
*/
ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
if (ret != 3) {
ret = -EINVAL;
goto err;
}
domain = bus >> 8;
bus &= 0xff;
if ((domain != dev->pci_domain) ||
(bus != dev->pci_bus) ||
(slot != dev->pci_slot) ||
(func != dev->pci_func)) {
ret = -EINVAL;
goto err;
}
return 0;
err:
return ret;
}
static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
{
if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
(p->busnum & 0xff) != dev->pci_bus ||
p->devnum != dev->pci_slot || p->funcnum != dev->pci_func)
return -EINVAL;
p->irq = dev->irq;
DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
p->irq);
return 0;
}
int drm_pci_agp_init(struct drm_device *dev)
{
if (drm_core_has_AGP(dev)) {
if (drm_pci_device_is_agp(dev))
dev->agp = drm_agp_init(dev);
if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP)
&& (dev->agp == NULL)) {
DRM_ERROR("Cannot initialize the agpgart module.\n");
return -EINVAL;
}
if (drm_core_has_MTRR(dev)) {
if (dev->agp && dev->agp->agp_info.ai_aperture_base != 0) {
if (drm_mtrr_add(dev->agp->agp_info.ai_aperture_base,
dev->agp->agp_info.ai_aperture_size, DRM_MTRR_WC) == 0)
dev->agp->agp_mtrr = 1;
else
dev->agp->agp_mtrr = -1;
}
}
}
return 0;
}
static struct drm_bus drm_pci_bus = {
.bus_type = DRIVER_BUS_PCI,
.get_irq = drm_pci_get_irq,
.free_irq = drm_pci_free_irq,
.get_name = drm_pci_get_name,
.set_busid = drm_pci_set_busid,
.set_unique = drm_pci_set_unique,
.irq_by_busid = drm_pci_irq_by_busid,
.agp_init = drm_pci_agp_init,
};
/**
* Register.
*
* \param pdev - PCI device structure
* \param ent entry from the PCI ID table with device type flags
* \return zero on success or a negative number on failure.
*
* Attempt to gets inter module "drm" information. If we are first
* then register the character device and inter module information.
* Try and register, if we fail to register, backout previous work.
*/
int drm_get_pci_dev(device_t kdev, struct drm_device *dev,
struct drm_driver *driver)
{
int ret;
DRM_DEBUG("\n");
driver->bus = &drm_pci_bus;
dev->dev = kdev;
dev->pci_domain = pci_get_domain(dev->dev);
dev->pci_bus = pci_get_bus(dev->dev);
dev->pci_slot = pci_get_slot(dev->dev);
dev->pci_func = pci_get_function(dev->dev);
dev->pci_vendor = pci_get_vendor(dev->dev);
dev->pci_device = pci_get_device(dev->dev);
dev->pci_subvendor = pci_get_subvendor(dev->dev);
dev->pci_subdevice = pci_get_subdevice(dev->dev);
sx_xlock(&drm_global_mutex);
if ((ret = drm_fill_in_dev(dev, driver))) {
DRM_ERROR("Failed to fill in dev: %d\n", ret);
goto err_g1;
}
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
if (ret)
goto err_g2;
}
if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
goto err_g3;
if (dev->driver->load) {
ret = dev->driver->load(dev,
dev->id_entry->driver_private);
if (ret)
goto err_g4;
}
/* setup the grouping for the legacy output */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = drm_mode_group_init_legacy_group(dev,
&dev->primary->mode_group);
if (ret)
goto err_g5;
}
#ifdef FREEBSD_NOTYET
list_add_tail(&dev->driver_item, &driver->device_list);
#endif /* FREEBSD_NOTYET */
DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
driver->name, driver->major, driver->minor, driver->patchlevel,
driver->date, device_get_nameunit(dev->dev), dev->primary->index);
sx_xunlock(&drm_global_mutex);
return 0;
err_g5:
if (dev->driver->unload)
dev->driver->unload(dev);
err_g4:
drm_put_minor(&dev->primary);
err_g3:
if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_put_minor(&dev->control);
err_g2:
drm_cancel_fill_in_dev(dev);
err_g1:
sx_xunlock(&drm_global_mutex);
return ret;
}
EXPORT_SYMBOL(drm_get_pci_dev);
int
drm_pci_enable_msi(struct drm_device *dev)
{
int msicount, ret;
if (!drm_msi)
return (-ENOENT);
msicount = pci_msi_count(dev->dev);
DRM_DEBUG("MSI count = %d\n", msicount);
if (msicount > 1)
msicount = 1;
ret = pci_alloc_msi(dev->dev, &msicount);
if (ret == 0) {
DRM_INFO("MSI enabled %d message(s)\n", msicount);
dev->msi_enabled = 1;
dev->irqrid = 1;
}
return (-ret);
}
void
drm_pci_disable_msi(struct drm_device *dev)
{
if (!dev->msi_enabled)
return;
pci_release_msi(dev->dev);
dev->msi_enabled = 0;
dev->irqrid = 0;
}
int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
{
@ -134,14 +443,14 @@ int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
u32 lnkcap = 0, lnkcap2 = 0;
*mask = 0;
if (!drm_device_is_pcie(dev))
if (!drm_pci_device_is_pcie(dev))
return -EINVAL;
root =
device_get_parent( /* pcib */
device_get_parent( /* `-- pci */
device_get_parent( /* `-- vgapci */
dev->device))); /* `-- drmn */
dev->dev))); /* `-- drmn */
pos = 0;
pci_find_cap(root, PCIY_EXPRESS, &pos);
@ -180,3 +489,4 @@ int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
DRM_INFO("probing gen 2 caps for device %x:%x = %x/%x\n", pci_get_vendor(root), pci_get_device(root), lnkcap, lnkcap2);
return 0;
}
EXPORT_SYMBOL(drm_pcie_get_speed_cap_mask);

View File

@ -49,12 +49,12 @@
{0x8086, 0x0166, CHIP_I9XX|CHIP_I915, "Intel IvyBridge (M)"}, \
{0x8086, 0x016A, CHIP_I9XX|CHIP_I915, "Intel IvyBridge (S)"}, \
{0x8086, 0x0402, CHIP_I9XX|CHIP_I915, "Intel Haswell"}, \
{0x8086, 0x0412, CHIP_I9XX|CHIP_I915, "Intel Haswell"}, \
{0x8086, 0x040a, CHIP_I9XX|CHIP_I915, "Intel Haswell (S)"}, \
{0x8086, 0x041a, CHIP_I9XX|CHIP_I915, "Intel Haswell (S)"}, \
{0x8086, 0x0406, CHIP_I9XX|CHIP_I915, "Intel Haswell (M)"}, \
{0x8086, 0x040A, CHIP_I9XX|CHIP_I915, "Intel Haswell (S)"}, \
{0x8086, 0x0412, CHIP_I9XX|CHIP_I915, "Intel Haswell"}, \
{0x8086, 0x0416, CHIP_I9XX|CHIP_I915, "Intel Haswell (M)"}, \
{0x8086, 0x0c16, CHIP_I9XX|CHIP_I915, "Intel Haswell (SDV)"}, \
{0x8086, 0x041A, CHIP_I9XX|CHIP_I915, "Intel Haswell (S)"}, \
{0x8086, 0x0C16, CHIP_I9XX|CHIP_I915, "Intel Haswell (SDV)"}, \
{0x8086, 0x2562, CHIP_I8XX, "Intel i845G GMCH"}, \
{0x8086, 0x2572, CHIP_I8XX, "Intel i865G GMCH"}, \
{0x8086, 0x2582, CHIP_I9XX|CHIP_I915, "Intel i915G"}, \
@ -574,6 +574,7 @@
{0x1002, 0x6819, CHIP_PITCAIRN|RADEON_NEW_MEMMAP, "Pitcairn PRO [Radeon HD 7800]"}, \
{0x1002, 0x6820, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Radeon HD 8800M Series"}, \
{0x1002, 0x6821, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Radeon HD 8800M Series"}, \
{0x1002, 0x6822, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Venus PRO [Radeon E8860]"}, \
{0x1002, 0x6823, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Radeon HD 8800M Series"}, \
{0x1002, 0x6824, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Chelsea [Radeon HD 7700M Series]"}, \
{0x1002, 0x6825, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Cape Verde [Radeon HD 7800M Series]"}, \
@ -581,11 +582,13 @@
{0x1002, 0x6827, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Cape Verde [Radeon HD 7800M Series]"}, \
{0x1002, 0x6828, CHIP_VERDE|RADEON_NEW_MEMMAP, "Cape Verde"}, \
{0x1002, 0x6829, CHIP_VERDE|RADEON_NEW_MEMMAP, "Cape Verde"}, \
{0x1002, 0x682A, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Venus PRO"}, \
{0x1002, 0x682B, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Radeon HD 8800M Series"}, \
{0x1002, 0x682D, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Unknown device name"}, \
{0x1002, 0x682F, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Cape Verde [Radeon HD 7700M Series]"}, \
{0x1002, 0x6830, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Cape Verde [Radeon HD 7800M Series]"}, \
{0x1002, 0x6831, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP, "Cape Verde [AMD Radeon HD 7700M Series]"}, \
{0x1002, 0x6835, CHIP_VERDE|RADEON_NEW_MEMMAP, "Cape Verde PRX [Radeon R9 255 OEM]"}, \
{0x1002, 0x6837, CHIP_VERDE|RADEON_NEW_MEMMAP, "Cape Verde LE [Radeon HD 7700 Series]"}, \
{0x1002, 0x6838, CHIP_VERDE|RADEON_NEW_MEMMAP, "Cape Verde"}, \
{0x1002, 0x6839, CHIP_VERDE|RADEON_NEW_MEMMAP, "Cape Verde"}, \
@ -915,6 +918,10 @@
{0x1002, 0x9908, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [Radeon HD 7600G]"}, \
{0x1002, 0x9909, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [Radeon HD 7500G]"}, \
{0x1002, 0x990A, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [Radeon HD 7500G]"}, \
{0x1002, 0x990B, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Richland [Radeon HD 8650G]"}, \
{0x1002, 0x990C, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Richland [Radeon HD 8670D]"}, \
{0x1002, 0x990D, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Richland [Radeon HD 8550G]"}, \
{0x1002, 0x990E, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Richland [Radeon HD 8570D]"}, \
{0x1002, 0x990F, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Unknown device name"}, \
{0x1002, 0x9910, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [Radeon HD 7660G]"}, \
{0x1002, 0x9913, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [Radeon HD 7640G]"}, \
@ -926,6 +933,15 @@
{0x1002, 0x9992, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [Radeon HD 7420G]"}, \
{0x1002, 0x9993, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [Radeon HD 7480D]"}, \
{0x1002, 0x9994, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [Radeon HD 7400G]"}, \
{0x1002, 0x9995, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Richland [Radeon HD 8450G]"}, \
{0x1002, 0x9996, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Richland [Radeon HD 8470D]"}, \
{0x1002, 0x9997, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Richland [Radeon HD 8350G]"}, \
{0x1002, 0x9998, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Richland [Radeon HD 8370D]"}, \
{0x1002, 0x9999, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Richland [Radeon HD 8510G]"}, \
{0x1002, 0x999A, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Richland [Radeon HD 8410G]"}, \
{0x1002, 0x999B, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Richland [Radeon HD 8310G]"}, \
{0x1002, 0x999C, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Richland"}, \
{0x1002, 0x999D, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Richland [Radeon HD 8550D]"}, \
{0x1002, 0x99A0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [Radeon HD 7520G]"}, \
{0x1002, 0x99A2, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [Radeon HD 7420G]"}, \
{0x1002, 0x99A4, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP, "Trinity [Radeon HD 7400G]"}, \

View File

@ -39,10 +39,12 @@ __FBSDID("$FreeBSD$");
/* SAREA area needs to be at least a page */
#if defined(__alpha__)
#define SAREA_MAX 0x2000
#define SAREA_MAX 0x2000U
#elif defined(__mips__)
#define SAREA_MAX 0x4000U
#else
/* Intel 830M driver needs at least 8k SAREA */
#define SAREA_MAX 0x2000UL
#define SAREA_MAX 0x2000U
#endif
/** Maximum number of drawables in the SAREA */

View File

@ -33,67 +33,15 @@ __FBSDID("$FreeBSD$");
#include <dev/drm2/drmP.h>
int
drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather *request)
#define DEBUG_SCATTER 0
static inline vm_offset_t drm_vmalloc_dma(vm_size_t size)
{
struct drm_sg_mem *entry;
vm_size_t size;
vm_pindex_t pindex;
if (dev->sg)
return EINVAL;
DRM_DEBUG("request size=%ld\n", request->size);
entry = malloc(sizeof(*entry), DRM_MEM_DRIVER, M_WAITOK | M_ZERO);
size = round_page(request->size);
entry->pages = OFF_TO_IDX(size);
entry->busaddr = malloc(entry->pages * sizeof(*entry->busaddr),
DRM_MEM_SGLISTS, M_WAITOK | M_ZERO);
entry->vaddr = kmem_alloc_attr(kernel_arena, size, M_WAITOK | M_ZERO,
return kmem_alloc_attr(kernel_arena, size, M_NOWAIT | M_ZERO,
0, BUS_SPACE_MAXADDR_32BIT, VM_MEMATTR_WRITE_COMBINING);
if (entry->vaddr == 0) {
drm_sg_cleanup(entry);
return (ENOMEM);
}
for(pindex = 0; pindex < entry->pages; pindex++) {
entry->busaddr[pindex] =
vtophys(entry->vaddr + IDX_TO_OFF(pindex));
}
DRM_LOCK(dev);
if (dev->sg) {
DRM_UNLOCK(dev);
drm_sg_cleanup(entry);
return (EINVAL);
}
dev->sg = entry;
DRM_UNLOCK(dev);
request->handle = entry->vaddr;
DRM_DEBUG("allocated %ju pages @ 0x%08zx, contents=%08lx\n",
entry->pages, entry->vaddr, *(unsigned long *)entry->vaddr);
return (0);
}
int
drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_scatter_gather *request = data;
DRM_DEBUG("\n");
return (drm_sg_alloc(dev, request));
}
void
drm_sg_cleanup(struct drm_sg_mem *entry)
void drm_sg_cleanup(struct drm_sg_mem * entry)
{
if (entry == NULL)
return;
@ -103,27 +51,86 @@ drm_sg_cleanup(struct drm_sg_mem *entry)
free(entry->busaddr, DRM_MEM_SGLISTS);
free(entry, DRM_MEM_DRIVER);
return;
}
int
drm_sg_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
{
struct drm_sg_mem *entry;
vm_size_t size;
vm_pindex_t pindex;
DRM_DEBUG("\n");
if (!drm_core_check_feature(dev, DRIVER_SG))
return -EINVAL;
if (dev->sg)
return -EINVAL;
entry = malloc(sizeof(*entry), DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
if (!entry)
return -ENOMEM;
DRM_DEBUG("request size=%ld\n", request->size);
size = round_page(request->size);
entry->pages = OFF_TO_IDX(size);
entry->busaddr = malloc(entry->pages * sizeof(*entry->busaddr),
DRM_MEM_SGLISTS, M_NOWAIT | M_ZERO);
if (!entry->busaddr) {
free(entry, DRM_MEM_DRIVER);
return -ENOMEM;
}
entry->vaddr = drm_vmalloc_dma(size);
if (entry->vaddr == 0) {
free(entry->busaddr, DRM_MEM_DRIVER);
free(entry, DRM_MEM_DRIVER);
return -ENOMEM;
}
for (pindex = 0; pindex < entry->pages; pindex++) {
entry->busaddr[pindex] =
vtophys(entry->vaddr + IDX_TO_OFF(pindex));
}
request->handle = entry->vaddr;
dev->sg = entry;
DRM_DEBUG("allocated %ju pages @ 0x%08zx, contents=%08lx\n",
entry->pages, entry->vaddr, *(unsigned long *)entry->vaddr);
return 0;
}
int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_scatter_gather *request = data;
return drm_sg_alloc(dev, request);
}
int drm_sg_free(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_scatter_gather *request = data;
struct drm_sg_mem *entry;
DRM_LOCK(dev);
if (!drm_core_check_feature(dev, DRIVER_SG))
return -EINVAL;
entry = dev->sg;
dev->sg = NULL;
DRM_UNLOCK(dev);
if (!entry || entry->vaddr != request->handle)
return (EINVAL);
return -EINVAL;
DRM_DEBUG("free 0x%zx\n", entry->vaddr);
drm_sg_cleanup(entry);
return (0);
return 0;
}

View File

@ -1,352 +0,0 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck., ND., USA.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*
* Simple memory manager interface that keeps track on allocate regions on a
* per "owner" basis. All regions associated with an "owner" can be released
* with a simple call. Typically if the "owner" exists. The owner is any
* "unsigned long" identifier. Can typically be a pointer to a file private
* struct or a context identifier.
*
* Authors:
* Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include <dev/drm2/drmP.h>
#include <dev/drm2/drm_sman.h>
struct drm_owner_item {
struct drm_hash_item owner_hash;
struct list_head sman_list;
struct list_head mem_blocks;
};
void drm_sman_takedown(struct drm_sman * sman)
{
drm_ht_remove(&sman->user_hash_tab);
drm_ht_remove(&sman->owner_hash_tab);
if (sman->mm)
drm_free(sman->mm, sman->num_managers * sizeof(*sman->mm),
DRM_MEM_MM);
}
int
drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
unsigned int user_order, unsigned int owner_order)
{
int ret = 0;
sman->mm = (struct drm_sman_mm *) drm_calloc(num_managers,
sizeof(*sman->mm), DRM_MEM_MM);
if (!sman->mm) {
ret = -ENOMEM;
goto out;
}
sman->num_managers = num_managers;
INIT_LIST_HEAD(&sman->owner_items);
ret = drm_ht_create(&sman->owner_hash_tab, owner_order);
if (ret)
goto out1;
ret = drm_ht_create(&sman->user_hash_tab, user_order);
if (!ret)
goto out;
drm_ht_remove(&sman->owner_hash_tab);
out1:
drm_free(sman->mm, num_managers * sizeof(*sman->mm), DRM_MEM_MM);
out:
return ret;
}
static void *drm_sman_mm_allocate(void *private, unsigned long size,
unsigned alignment)
{
struct drm_mm *mm = (struct drm_mm *) private;
struct drm_mm_node *tmp;
tmp = drm_mm_search_free(mm, size, alignment, 1);
if (!tmp) {
return NULL;
}
/* This could be non-atomic, but we are called from a locked path */
tmp = drm_mm_get_block_atomic(tmp, size, alignment);
return tmp;
}
static void drm_sman_mm_free(void *private, void *ref)
{
struct drm_mm_node *node = (struct drm_mm_node *) ref;
drm_mm_put_block(node);
}
static void drm_sman_mm_destroy(void *private)
{
struct drm_mm *mm = (struct drm_mm *) private;
drm_mm_takedown(mm);
drm_free(mm, sizeof(*mm), DRM_MEM_MM);
}
static unsigned long drm_sman_mm_offset(void *private, void *ref)
{
struct drm_mm_node *node = (struct drm_mm_node *) ref;
return node->start;
}
int
drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
unsigned long start, unsigned long size)
{
struct drm_sman_mm *sman_mm;
struct drm_mm *mm;
int ret;
KASSERT(manager < sman->num_managers, ("Invalid manager"));
sman_mm = &sman->mm[manager];
mm = malloc(sizeof(*mm), DRM_MEM_MM, M_NOWAIT | M_ZERO);
if (!mm) {
return -ENOMEM;
}
sman_mm->private = mm;
ret = drm_mm_init(mm, start, size);
if (ret) {
drm_free(mm, sizeof(*mm), DRM_MEM_MM);
return ret;
}
sman_mm->allocate = drm_sman_mm_allocate;
sman_mm->free = drm_sman_mm_free;
sman_mm->destroy = drm_sman_mm_destroy;
sman_mm->offset = drm_sman_mm_offset;
return 0;
}
int
drm_sman_set_manager(struct drm_sman * sman, unsigned int manager,
struct drm_sman_mm * allocator)
{
KASSERT(manager < sman->num_managers, ("Invalid manager"));
sman->mm[manager] = *allocator;
return 0;
}
static struct drm_owner_item *drm_sman_get_owner_item(struct drm_sman * sman,
unsigned long owner)
{
int ret;
struct drm_hash_item *owner_hash_item;
struct drm_owner_item *owner_item;
ret = drm_ht_find_item(&sman->owner_hash_tab, owner, &owner_hash_item);
if (!ret) {
return drm_hash_entry(owner_hash_item, struct drm_owner_item,
owner_hash);
}
owner_item = malloc(sizeof(*owner_item), DRM_MEM_MM, M_NOWAIT | M_ZERO);
if (!owner_item)
goto out;
INIT_LIST_HEAD(&owner_item->mem_blocks);
owner_item->owner_hash.key = owner;
DRM_DEBUG("owner_item = %p, mem_blocks = %p\n", owner_item, &owner_item->mem_blocks);
if (drm_ht_insert_item(&sman->owner_hash_tab, &owner_item->owner_hash))
goto out1;
list_add_tail(&owner_item->sman_list, &sman->owner_items);
return owner_item;
out1:
drm_free(owner_item, sizeof(*owner_item), DRM_MEM_MM);
out:
return NULL;
}
struct drm_memblock_item *drm_sman_alloc(struct drm_sman *sman, unsigned int manager,
unsigned long size, unsigned alignment,
unsigned long owner)
{
void *tmp;
struct drm_sman_mm *sman_mm;
struct drm_owner_item *owner_item;
struct drm_memblock_item *memblock;
KASSERT(manager < sman->num_managers, ("Invalid manager"));
sman_mm = &sman->mm[manager];
tmp = sman_mm->allocate(sman_mm->private, size, alignment);
if (!tmp) {
return NULL;
}
memblock = malloc(sizeof(*memblock), DRM_MEM_MM, M_NOWAIT | M_ZERO);
DRM_DEBUG("allocated mem_block %p\n", memblock);
if (!memblock)
goto out;
memblock->mm_info = tmp;
memblock->mm = sman_mm;
memblock->sman = sman;
INIT_LIST_HEAD(&memblock->owner_list);
if (drm_ht_just_insert_please
(&sman->user_hash_tab, &memblock->user_hash,
(unsigned long)memblock, 32, 0, 0))
goto out1;
owner_item = drm_sman_get_owner_item(sman, owner);
if (!owner_item)
goto out2;
DRM_DEBUG("owner_item = %p, mem_blocks = %p\n", owner_item, &owner_item->mem_blocks);
DRM_DEBUG("owner_list.prev = %p, mem_blocks.prev = %p\n", memblock->owner_list.prev, owner_item->mem_blocks.prev);
DRM_DEBUG("owner_list.next = %p, mem_blocks.next = %p\n", memblock->owner_list.next, owner_item->mem_blocks.next);
list_add_tail(&memblock->owner_list, &owner_item->mem_blocks);
DRM_DEBUG("Complete\n");
return memblock;
out2:
drm_ht_remove_item(&sman->user_hash_tab, &memblock->user_hash);
out1:
drm_free(memblock, sizeof(*memblock), DRM_MEM_MM);
out:
sman_mm->free(sman_mm->private, tmp);
return NULL;
}
static void drm_sman_free(struct drm_memblock_item *item)
{
struct drm_sman *sman = item->sman;
list_del(&item->owner_list);
drm_ht_remove_item(&sman->user_hash_tab, &item->user_hash);
item->mm->free(item->mm->private, item->mm_info);
drm_free(item, sizeof(*item), DRM_MEM_MM);
}
int drm_sman_free_key(struct drm_sman *sman, unsigned int key)
{
struct drm_hash_item *hash_item;
struct drm_memblock_item *memblock_item;
if (drm_ht_find_item(&sman->user_hash_tab, key, &hash_item))
return -EINVAL;
memblock_item = drm_hash_entry(hash_item, struct drm_memblock_item,
user_hash);
drm_sman_free(memblock_item);
return 0;
}
static void drm_sman_remove_owner(struct drm_sman *sman,
struct drm_owner_item *owner_item)
{
list_del(&owner_item->sman_list);
drm_ht_remove_item(&sman->owner_hash_tab, &owner_item->owner_hash);
drm_free(owner_item, sizeof(*owner_item), DRM_MEM_MM);
}
int drm_sman_owner_clean(struct drm_sman *sman, unsigned long owner)
{
struct drm_hash_item *hash_item;
struct drm_owner_item *owner_item;
if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
return -1;
}
owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash);
DRM_DEBUG("cleaning owner_item %p\n", owner_item);
if (owner_item->mem_blocks.next == &owner_item->mem_blocks) {
drm_sman_remove_owner(sman, owner_item);
return -1;
}
return 0;
}
static void drm_sman_do_owner_cleanup(struct drm_sman *sman,
struct drm_owner_item *owner_item)
{
struct drm_memblock_item *entry, *next;
list_for_each_entry_safe(entry, next, &owner_item->mem_blocks,
owner_list) {
DRM_DEBUG("freeing mem_block %p\n", entry);
drm_sman_free(entry);
}
drm_sman_remove_owner(sman, owner_item);
}
void drm_sman_owner_cleanup(struct drm_sman *sman, unsigned long owner)
{
struct drm_hash_item *hash_item;
struct drm_owner_item *owner_item;
if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
return;
}
owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash);
drm_sman_do_owner_cleanup(sman, owner_item);
}
void drm_sman_cleanup(struct drm_sman *sman)
{
struct drm_owner_item *entry, *next;
unsigned int i;
struct drm_sman_mm *sman_mm;
DRM_DEBUG("sman = %p, owner_items = %p\n",
sman, &sman->owner_items);
list_for_each_entry_safe(entry, next, &sman->owner_items, sman_list) {
DRM_DEBUG("cleaning owner_item = %p\n", entry);
drm_sman_do_owner_cleanup(sman, entry);
}
if (sman->mm) {
for (i = 0; i < sman->num_managers; ++i) {
sman_mm = &sman->mm[i];
if (sman_mm->private) {
sman_mm->destroy(sman_mm->private);
sman_mm->private = NULL;
}
}
}
}

View File

@ -1,181 +0,0 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*
**************************************************************************/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*
* Simple memory MANager interface that keeps track on allocate regions on a
* per "owner" basis. All regions associated with an "owner" can be released
* with a simple call. Typically if the "owner" exists. The owner is any
* "unsigned long" identifier. Can typically be a pointer to a file private
* struct or a context identifier.
*
* Authors:
* Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#ifndef DRM_SMAN_H
#define DRM_SMAN_H
#include <dev/drm2/drm_hashtab.h>
#include <dev/drm2/drm_linux_list.h>
#include <dev/drm2/drm_mm.h>
/*
* A class that is an abstration of a simple memory allocator.
* The sman implementation provides a default such allocator
* using the drm_mm.c implementation. But the user can replace it.
* See the SiS implementation, which may use the SiS FB kernel module
* for memory management.
*/
struct drm_sman_mm {
/* private info. If allocated, needs to be destroyed by the destroy
function */
void *private;
/* Allocate a memory block with given size and alignment.
Return an opaque reference to the memory block */
void *(*allocate) (void *private, unsigned long size,
unsigned alignment);
/* Free a memory block. "ref" is the opaque reference that we got from
the "alloc" function */
void (*free) (void *private, void *ref);
/* Free all resources associated with this allocator */
void (*destroy) (void *private);
/* Return a memory offset from the opaque reference returned from the
"alloc" function */
unsigned long (*offset) (void *private, void *ref);
};
struct drm_memblock_item {
struct list_head owner_list;
struct drm_hash_item user_hash;
void *mm_info;
struct drm_sman_mm *mm;
struct drm_sman *sman;
};
struct drm_sman {
struct drm_sman_mm *mm;
int num_managers;
struct drm_open_hash owner_hash_tab;
struct drm_open_hash user_hash_tab;
struct list_head owner_items;
};
/*
* Take down a memory manager. This function should only be called after a
* successful init and after a call to drm_sman_cleanup.
*/
extern void drm_sman_takedown(struct drm_sman * sman);
/*
* Allocate structures for a manager.
* num_managers are the number of memory pools to manage. (VRAM, AGP, ....)
* user_order is the log2 of the number of buckets in the user hash table.
* set this to approximately log2 of the max number of memory regions
* that will be allocated for _all_ pools together.
* owner_order is the log2 of the number of buckets in the owner hash table.
* set this to approximately log2 of
* the number of client file connections that will
* be using the manager.
*
*/
extern int drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
unsigned int user_order, unsigned int owner_order);
/*
* Initialize a drm_mm.c allocator. Should be called only once for each
* manager unless a customized allogator is used.
*/
extern int drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
unsigned long start, unsigned long size);
/*
* Initialize a customized allocator for one of the managers.
* (See the SiS module). The object pointed to by "allocator" is copied,
* so it can be destroyed after this call.
*/
extern int drm_sman_set_manager(struct drm_sman * sman, unsigned int mananger,
struct drm_sman_mm * allocator);
/*
* Allocate a memory block. Aligment is not implemented yet.
*/
extern struct drm_memblock_item *drm_sman_alloc(struct drm_sman * sman,
unsigned int manager,
unsigned long size,
unsigned alignment,
unsigned long owner);
/*
* Free a memory block identified by its user hash key.
*/
extern int drm_sman_free_key(struct drm_sman * sman, unsigned int key);
/*
* returns 1 iff there are no stale memory blocks associated with this owner.
* Typically called to determine if we need to idle the hardware and call
* drm_sman_owner_cleanup. If there are no stale memory blocks, it removes all
* resources associated with owner.
*/
extern int drm_sman_owner_clean(struct drm_sman * sman, unsigned long owner);
/*
* Frees all stale memory blocks associated with this owner. Note that this
* requires that the hardware is finished with all blocks, so the graphics engine
* should be idled before this call is made. This function also frees
* any resources associated with "owner" and should be called when owner
* is not going to be referenced anymore.
*/
extern void drm_sman_owner_cleanup(struct drm_sman * sman, unsigned long owner);
/*
* Frees all stale memory blocks associated with the memory manager.
* See idling above.
*/
extern void drm_sman_cleanup(struct drm_sman * sman);
#endif

View File

@ -34,27 +34,468 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "drmP.h"
#include <dev/drm2/drmP.h>
#include <dev/drm2/drm_core.h>
int
drm_setmaster_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
#ifdef DRM_DEBUG_DEFAULT_ON
unsigned int drm_debug = (DRM_DEBUGBITS_DEBUG | DRM_DEBUGBITS_KMS |
DRM_DEBUGBITS_FAILED_IOCTL);
#else
unsigned int drm_debug = 0; /* 1 to enable debug output */
#endif
EXPORT_SYMBOL(drm_debug);
unsigned int drm_notyet = 0;
unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
EXPORT_SYMBOL(drm_vblank_offdelay);
unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
EXPORT_SYMBOL(drm_timestamp_precision);
/*
* Default to use monotonic timestamps for wait-for-vblank and page-flip
* complete events.
*/
unsigned int drm_timestamp_monotonic = 1;
MODULE_AUTHOR(CORE_AUTHOR);
MODULE_DESCRIPTION(CORE_DESC);
MODULE_LICENSE("GPL and additional rights");
MODULE_PARM_DESC(debug, "Enable debug output");
MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
module_param_named(debug, drm_debug, int, 0600);
module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
static struct cdevsw drm_cdevsw = {
.d_version = D_VERSION,
.d_open = drm_open,
.d_read = drm_read,
.d_ioctl = drm_ioctl,
.d_poll = drm_poll,
.d_mmap_single = drm_mmap_single,
.d_name = "drm",
.d_flags = D_TRACKCLOSE
};
static int drm_minor_get_id(struct drm_device *dev, int type)
{
int new_id;
DRM_DEBUG("setmaster\n");
new_id = device_get_unit(dev->dev);
if (file_priv->master != 0)
return (0);
return (EPERM);
if (new_id >= 64)
return -EINVAL;
if (type == DRM_MINOR_CONTROL) {
new_id += 64;
} else if (type == DRM_MINOR_RENDER) {
new_id += 128;
}
return new_id;
}
int
drm_dropmaster_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
struct drm_master *drm_master_create(struct drm_minor *minor)
{
struct drm_master *master;
DRM_DEBUG("dropmaster\n");
if (file_priv->master != 0)
return (EINVAL);
return (0);
master = malloc(sizeof(*master), DRM_MEM_KMS, M_NOWAIT | M_ZERO);
if (!master)
return NULL;
refcount_init(&master->refcount, 1);
mtx_init(&master->lock.spinlock, "drm_master__lock__spinlock",
NULL, MTX_DEF);
DRM_INIT_WAITQUEUE(&master->lock.lock_queue);
drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER);
INIT_LIST_HEAD(&master->magicfree);
master->minor = minor;
list_add_tail(&master->head, &minor->master_list);
return master;
}
struct drm_master *drm_master_get(struct drm_master *master)
{
refcount_acquire(&master->refcount);
return master;
}
EXPORT_SYMBOL(drm_master_get);
static void drm_master_destroy(struct drm_master *master)
{
struct drm_magic_entry *pt, *next;
struct drm_device *dev = master->minor->dev;
struct drm_map_list *r_list, *list_temp;
list_del(&master->head);
if (dev->driver->master_destroy)
dev->driver->master_destroy(dev, master);
list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
if (r_list->master == master) {
drm_rmmap_locked(dev, r_list->map);
r_list = NULL;
}
}
if (master->unique) {
free(master->unique, DRM_MEM_DRIVER);
master->unique = NULL;
master->unique_len = 0;
}
list_for_each_entry_safe(pt, next, &master->magicfree, head) {
list_del(&pt->head);
drm_ht_remove_item(&master->magiclist, &pt->hash_item);
free(pt, DRM_MEM_MAGIC);
}
drm_ht_remove(&master->magiclist);
free(master, DRM_MEM_KMS);
}
void drm_master_put(struct drm_master **master)
{
if (refcount_release(&(*master)->refcount))
drm_master_destroy(*master);
*master = NULL;
}
EXPORT_SYMBOL(drm_master_put);
int drm_setmaster_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
int ret;
if (file_priv->is_master)
return 0;
if (file_priv->minor->master && file_priv->minor->master != file_priv->master)
return -EINVAL;
if (!file_priv->master)
return -EINVAL;
if (file_priv->minor->master)
return -EINVAL;
DRM_LOCK(dev);
file_priv->minor->master = drm_master_get(file_priv->master);
file_priv->is_master = 1;
if (dev->driver->master_set) {
ret = dev->driver->master_set(dev, file_priv, false);
if (unlikely(ret != 0)) {
file_priv->is_master = 0;
drm_master_put(&file_priv->minor->master);
}
}
DRM_UNLOCK(dev);
return 0;
}
int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
if (!file_priv->is_master)
return -EINVAL;
if (!file_priv->minor->master)
return -EINVAL;
DRM_LOCK(dev);
if (dev->driver->master_drop)
dev->driver->master_drop(dev, file_priv, false);
drm_master_put(&file_priv->minor->master);
file_priv->is_master = 0;
DRM_UNLOCK(dev);
return 0;
}
int drm_fill_in_dev(struct drm_device *dev,
struct drm_driver *driver)
{
int retcode, i;
INIT_LIST_HEAD(&dev->filelist);
INIT_LIST_HEAD(&dev->ctxlist);
INIT_LIST_HEAD(&dev->maplist);
INIT_LIST_HEAD(&dev->vblank_event_list);
mtx_init(&dev->irq_lock, "drmirq", NULL, MTX_DEF);
mtx_init(&dev->count_lock, "drmcount", NULL, MTX_DEF);
mtx_init(&dev->event_lock, "drmev", NULL, MTX_DEF);
sx_init(&dev->dev_struct_lock, "drmslk");
mtx_init(&dev->ctxlist_mutex, "drmctxlist", NULL, MTX_DEF);
mtx_init(&dev->pcir_lock, "drmpcir", NULL, MTX_DEF);
if (drm_ht_create(&dev->map_hash, 12)) {
return -ENOMEM;
}
/* the DRM has 6 basic counters */
dev->counters = 6;
dev->types[0] = _DRM_STAT_LOCK;
dev->types[1] = _DRM_STAT_OPENS;
dev->types[2] = _DRM_STAT_CLOSES;
dev->types[3] = _DRM_STAT_IOCTLS;
dev->types[4] = _DRM_STAT_LOCKS;
dev->types[5] = _DRM_STAT_UNLOCKS;
/*
* FIXME Linux<->FreeBSD: this is done in drm_setup() on Linux.
*/
for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
atomic_set(&dev->counts[i], 0);
dev->driver = driver;
retcode = drm_pci_agp_init(dev);
if (retcode)
goto error_out_unreg;
retcode = drm_ctxbitmap_init(dev);
if (retcode) {
DRM_ERROR("Cannot allocate memory for context bitmap.\n");
goto error_out_unreg;
}
if (driver->driver_features & DRIVER_GEM) {
retcode = drm_gem_init(dev);
if (retcode) {
DRM_ERROR("Cannot initialize graphics execution "
"manager (GEM)\n");
goto error_out_unreg;
}
}
retcode = drm_sysctl_init(dev);
if (retcode != 0) {
DRM_ERROR("Failed to create hw.dri sysctl entry: %d\n",
retcode);
}
return 0;
error_out_unreg:
drm_cancel_fill_in_dev(dev);
return retcode;
}
EXPORT_SYMBOL(drm_fill_in_dev);
void drm_cancel_fill_in_dev(struct drm_device *dev)
{
struct drm_driver *driver;
driver = dev->driver;
drm_sysctl_cleanup(dev);
if (driver->driver_features & DRIVER_GEM)
drm_gem_destroy(dev);
drm_ctxbitmap_cleanup(dev);
if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) &&
dev->agp && dev->agp->agp_mtrr >= 0) {
int retval;
retval = drm_mtrr_del(dev->agp->agp_mtrr,
dev->agp->agp_info.ai_aperture_base,
dev->agp->agp_info.ai_aperture_size,
DRM_MTRR_WC);
DRM_DEBUG("mtrr_del=%d\n", retval);
}
free(dev->agp, DRM_MEM_AGPLISTS);
dev->agp = NULL;
drm_ht_remove(&dev->map_hash);
mtx_destroy(&dev->irq_lock);
mtx_destroy(&dev->count_lock);
mtx_destroy(&dev->event_lock);
sx_destroy(&dev->dev_struct_lock);
mtx_destroy(&dev->ctxlist_mutex);
mtx_destroy(&dev->pcir_lock);
}
/**
* Get a secondary minor number.
*
* \param dev device data structure
* \param sec-minor structure to hold the assigned minor
* \return negative number on failure.
*
* Search an empty entry and initialize it to the given parameters, and
* create the proc init entry via proc_init(). This routines assigns
* minor numbers to secondary heads of multi-headed cards
*/
int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
{
struct drm_minor *new_minor;
int ret;
int minor_id;
const char *minor_devname;
DRM_DEBUG("\n");
minor_id = drm_minor_get_id(dev, type);
if (minor_id < 0)
return minor_id;
new_minor = malloc(sizeof(struct drm_minor), DRM_MEM_MINOR,
M_NOWAIT | M_ZERO);
if (!new_minor) {
ret = -ENOMEM;
goto err_idr;
}
new_minor->type = type;
new_minor->dev = dev;
new_minor->index = minor_id;
INIT_LIST_HEAD(&new_minor->master_list);
new_minor->buf_sigio = NULL;
switch (type) {
case DRM_MINOR_CONTROL:
minor_devname = "dri/controlD%d";
break;
case DRM_MINOR_RENDER:
minor_devname = "dri/renderD%d";
break;
default:
minor_devname = "dri/card%d";
break;
}
ret = make_dev_p(MAKEDEV_WAITOK | MAKEDEV_CHECKNAME, &new_minor->device,
&drm_cdevsw, 0, DRM_DEV_UID, DRM_DEV_GID,
DRM_DEV_MODE, minor_devname, minor_id);
if (ret) {
DRM_ERROR("Failed to create cdev: %d\n", ret);
goto err_mem;
}
new_minor->device->si_drv1 = new_minor;
*minor = new_minor;
DRM_DEBUG("new minor assigned %d\n", minor_id);
return 0;
err_mem:
free(new_minor, DRM_MEM_MINOR);
err_idr:
*minor = NULL;
return ret;
}
EXPORT_SYMBOL(drm_get_minor);
/**
* Put a secondary minor number.
*
* \param sec_minor - structure to be released
* \return always zero
*
* Cleans up the proc resources. Not legal for this to be the
* last minor released.
*
*/
int drm_put_minor(struct drm_minor **minor_p)
{
struct drm_minor *minor = *minor_p;
DRM_DEBUG("release secondary minor %d\n", minor->index);
funsetown(&minor->buf_sigio);
destroy_dev(minor->device);
free(minor, DRM_MEM_MINOR);
*minor_p = NULL;
return 0;
}
EXPORT_SYMBOL(drm_put_minor);
/**
* Called via drm_exit() at module unload time or when pci device is
* unplugged.
*
* Cleans up all DRM device, calling drm_lastclose().
*
*/
void drm_put_dev(struct drm_device *dev)
{
struct drm_driver *driver;
struct drm_map_list *r_list, *list_temp;
DRM_DEBUG("\n");
if (!dev) {
DRM_ERROR("cleanup called no dev\n");
return;
}
driver = dev->driver;
drm_lastclose(dev);
if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) &&
dev->agp && dev->agp->agp_mtrr >= 0) {
int retval;
retval = drm_mtrr_del(dev->agp->agp_mtrr,
dev->agp->agp_info.ai_aperture_base,
dev->agp->agp_info.ai_aperture_size,
DRM_MTRR_WC);
DRM_DEBUG("mtrr_del=%d\n", retval);
}
if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_mode_group_free(&dev->primary->mode_group);
if (dev->driver->unload)
dev->driver->unload(dev);
drm_sysctl_cleanup(dev);
if (drm_core_has_AGP(dev) && dev->agp) {
free(dev->agp, DRM_MEM_AGPLISTS);
dev->agp = NULL;
}
drm_vblank_cleanup(dev);
list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
drm_rmmap(dev, r_list->map);
drm_ht_remove(&dev->map_hash);
drm_ctxbitmap_cleanup(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_put_minor(&dev->control);
if (driver->driver_features & DRIVER_GEM)
drm_gem_destroy(dev);
drm_put_minor(&dev->primary);
mtx_destroy(&dev->irq_lock);
mtx_destroy(&dev->count_lock);
mtx_destroy(&dev->event_lock);
sx_destroy(&dev->dev_struct_lock);
mtx_destroy(&dev->ctxlist_mutex);
mtx_destroy(&dev->pcir_lock);
#ifdef FREEBSD_NOTYET
list_del(&dev->driver_item);
#endif /* FREEBSD_NOTYET */
}
EXPORT_SYMBOL(drm_put_dev);

View File

@ -8,11 +8,11 @@
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
@ -70,8 +70,11 @@ int drm_sysctl_init(struct drm_device *dev)
/* Add the sysctl node for DRI if it doesn't already exist */
drioid = SYSCTL_ADD_NODE(&info->ctx, SYSCTL_CHILDREN(&sysctl___hw), OID_AUTO,
"dri", CTLFLAG_RW, NULL, "DRI Graphics");
if (!drioid)
return 1;
if (!drioid) {
free(dev->sysctl, DRM_MEM_DRIVER);
dev->sysctl = NULL;
return (-ENOMEM);
}
/* Find the next free slot under hw.dri */
i = 0;
@ -79,18 +82,22 @@ int drm_sysctl_init(struct drm_device *dev)
if (i <= oid->oid_arg2)
i = oid->oid_arg2 + 1;
}
if (i > 9)
return (1);
if (i > 9) {
drm_sysctl_cleanup(dev);
return (-ENOSPC);
}
dev->sysctl_node_idx = i;
/* Add the hw.dri.x for our device */
info->name[0] = '0' + i;
info->name[1] = 0;
top = SYSCTL_ADD_NODE(&info->ctx, SYSCTL_CHILDREN(drioid),
OID_AUTO, info->name, CTLFLAG_RW, NULL, NULL);
if (!top)
return 1;
if (!top) {
drm_sysctl_cleanup(dev);
return (-ENOMEM);
}
for (i = 0; i < DRM_SYSCTL_ENTRIES; i++) {
oid = SYSCTL_ADD_OID(&info->ctx,
SYSCTL_CHILDREN(top),
@ -102,14 +109,16 @@ int drm_sysctl_init(struct drm_device *dev)
drm_sysctl_list[i].f,
"A",
NULL);
if (!oid)
return 1;
if (!oid) {
drm_sysctl_cleanup(dev);
return (-ENOMEM);
}
}
SYSCTL_ADD_INT(&info->ctx, SYSCTL_CHILDREN(drioid), OID_AUTO, "debug",
CTLFLAG_RW, &drm_debug_flag, sizeof(drm_debug_flag),
CTLFLAG_RW, &drm_debug, sizeof(drm_debug),
"Enable debugging output");
SYSCTL_ADD_INT(&info->ctx, SYSCTL_CHILDREN(drioid), OID_AUTO, "notyet",
CTLFLAG_RW, &drm_notyet_flag, sizeof(drm_debug_flag),
CTLFLAG_RW, &drm_notyet, sizeof(drm_debug),
"Enable notyet reminders");
if (dev->driver->sysctl_init != NULL)
@ -131,13 +140,16 @@ int drm_sysctl_cleanup(struct drm_device *dev)
{
int error;
if (dev->sysctl == NULL)
return (0);
error = sysctl_ctx_free(&dev->sysctl->ctx);
free(dev->sysctl, DRM_MEM_DRIVER);
dev->sysctl = NULL;
if (dev->driver->sysctl_cleanup != NULL)
dev->driver->sysctl_cleanup(dev);
return (error);
return (-error);
}
#define DRM_SYSCTL_PRINT(fmt, arg...) \
@ -151,20 +163,25 @@ do { \
static int drm_name_info DRM_SYSCTL_HANDLER_ARGS
{
struct drm_device *dev = arg1;
struct drm_minor *minor;
struct drm_master *master;
char buf[128];
int retcode;
int hasunique = 0;
/* FIXME: This still uses primary minor. */
minor = dev->primary;
DRM_SYSCTL_PRINT("%s 0x%jx", dev->driver->name,
(uintmax_t)dev2udev(dev->devnode));
(uintmax_t)dev2udev(minor->device));
DRM_LOCK(dev);
if (dev->unique) {
snprintf(buf, sizeof(buf), " %s", dev->unique);
master = minor->master;
if (master != NULL && master->unique) {
snprintf(buf, sizeof(buf), " %s", master->unique);
hasunique = 1;
}
DRM_UNLOCK(dev);
if (hasunique)
SYSCTL_OUT(req, buf, strlen(buf));
@ -177,7 +194,8 @@ static int drm_name_info DRM_SYSCTL_HANDLER_ARGS
static int drm_vm_info DRM_SYSCTL_HANDLER_ARGS
{
struct drm_device *dev = arg1;
drm_local_map_t *map, *tempmaps;
struct drm_map_list *entry;
struct drm_local_map *map, *tempmaps;
const char *types[] = {
[_DRM_FRAME_BUFFER] = "FB",
[_DRM_REGISTERS] = "REG",
@ -198,10 +216,12 @@ static int drm_vm_info DRM_SYSCTL_HANDLER_ARGS
DRM_LOCK(dev);
mapcount = 0;
TAILQ_FOREACH(map, &dev->maplist, link)
mapcount++;
list_for_each_entry(entry, &dev->maplist, head) {
if (entry->map != NULL)
mapcount++;
}
tempmaps = malloc(sizeof(drm_local_map_t) * mapcount, DRM_MEM_DRIVER,
tempmaps = malloc(sizeof(*tempmaps) * mapcount, DRM_MEM_DRIVER,
M_NOWAIT);
if (tempmaps == NULL) {
DRM_UNLOCK(dev);
@ -209,13 +229,15 @@ static int drm_vm_info DRM_SYSCTL_HANDLER_ARGS
}
i = 0;
TAILQ_FOREACH(map, &dev->maplist, link)
tempmaps[i++] = *map;
list_for_each_entry(entry, &dev->maplist, head) {
if (entry->map != NULL)
tempmaps[i++] = *entry->map;
}
DRM_UNLOCK(dev);
DRM_SYSCTL_PRINT("\nslot offset size "
"type flags address handle mtrr\n");
"type flags address mtrr\n");
for (i = 0; i < mapcount; i++) {
map = &tempmaps[i];
@ -235,17 +257,15 @@ static int drm_vm_info DRM_SYSCTL_HANDLER_ARGS
break;
}
if (!map->mtrr)
if (map->mtrr < 0)
yesno = "no";
else
yesno = "yes";
DRM_SYSCTL_PRINT(
"%4d 0x%016lx 0x%08lx %4.4s 0x%02x 0x%016lx %6d %s\n",
i, map->offset, map->size, type, map->flags,
(unsigned long)map->virtual,
(unsigned int)((unsigned long)map->handle >>
DRM_MAP_HANDLE_SHIFT), yesno);
"%4d 0x%016llx 0x%08lx %4.4s 0x%02x 0x%016lx %s\n",
i, (unsigned long long)map->offset, map->size, type,
map->flags, (unsigned long)map->handle, yesno);
}
SYSCTL_OUT(req, "", 1);
@ -257,8 +277,8 @@ static int drm_vm_info DRM_SYSCTL_HANDLER_ARGS
static int drm_bufs_info DRM_SYSCTL_HANDLER_ARGS
{
struct drm_device *dev = arg1;
drm_device_dma_t *dma = dev->dma;
drm_device_dma_t tempdma;
struct drm_device_dma *dma = dev->dma;
struct drm_device_dma tempdma;
int *templists;
int i;
char buf[128];
@ -322,7 +342,7 @@ static int drm_clients_info DRM_SYSCTL_HANDLER_ARGS
DRM_LOCK(dev);
privcount = 0;
TAILQ_FOREACH(priv, &dev->files, link)
list_for_each_entry(priv, &dev->filelist, lhead)
privcount++;
tempprivs = malloc(sizeof(struct drm_file) * privcount, DRM_MEM_DRIVER,
@ -332,7 +352,7 @@ static int drm_clients_info DRM_SYSCTL_HANDLER_ARGS
return ENOMEM;
}
i = 0;
TAILQ_FOREACH(priv, &dev->files, link)
list_for_each_entry(priv, &dev->filelist, lhead)
tempprivs[i++] = *priv;
DRM_UNLOCK(dev);
@ -343,7 +363,7 @@ static int drm_clients_info DRM_SYSCTL_HANDLER_ARGS
priv = &tempprivs[i];
DRM_SYSCTL_PRINT("%c %-12s %5d %5d %10u %10lu\n",
priv->authenticated ? 'y' : 'n',
devtoname(priv->dev->devnode),
devtoname(priv->minor->device),
priv->pid,
priv->uid,
priv->magic,

View File

@ -1,5 +1,16 @@
/*-
* Copyright 2003 Eric Anholt
/**
* \file drm_vm.c
* Memory mapping for DRM
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
* \author Gareth Hughes <gareth@valinux.com>
*/
/*
* Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@ -8,17 +19,18 @@
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* ERIC ANHOLT BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <sys/cdefs.h>
@ -31,6 +43,7 @@ __FBSDID("$FreeBSD$");
#include <dev/drm2/drmP.h>
#include <dev/drm2/drm.h>
#ifdef FREEBSD_NOTYET
int
drm_mmap(struct cdev *kdev, vm_ooffset_t offset, vm_paddr_t *paddr,
int prot, vm_memattr_t *memattr)
@ -131,4 +144,4 @@ drm_mmap(struct cdev *kdev, vm_ooffset_t offset, vm_paddr_t *paddr,
*paddr = phys;
return 0;
}
#endif /* FREEBSD_NOTYET */

View File

@ -45,14 +45,12 @@ enum {
PINNED_LIST,
};
static const char *
yesno(int v)
static const char *yesno(int v)
{
return (v ? "yes" : "no");
return v ? "yes" : "no";
}
static int
i915_capabilities(struct drm_device *dev, struct sbuf *m, void *data)
static int i915_capabilities(struct drm_device *dev, struct sbuf *m, void *data)
{
const struct intel_device_info *info = INTEL_INFO(dev);
@ -80,11 +78,10 @@ i915_capabilities(struct drm_device *dev, struct sbuf *m, void *data)
B(has_llc);
#undef B
return (0);
return 0;
}
static const char *
get_pin_flag(struct drm_i915_gem_object *obj)
static const char *get_pin_flag(struct drm_i915_gem_object *obj)
{
if (obj->user_pin_count > 0)
return "P";
@ -94,25 +91,23 @@ get_pin_flag(struct drm_i915_gem_object *obj)
return " ";
}
static const char *
get_tiling_flag(struct drm_i915_gem_object *obj)
static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
{
switch (obj->tiling_mode) {
default:
case I915_TILING_NONE: return (" ");
case I915_TILING_X: return ("X");
case I915_TILING_Y: return ("Y");
case I915_TILING_NONE: return " ";
case I915_TILING_X: return "X";
case I915_TILING_Y: return "Y";
}
}
static const char *
cache_level_str(int type)
static const char *cache_level_str(int type)
{
switch (type) {
case I915_CACHE_NONE: return " uncached";
case I915_CACHE_LLC: return " snooped (LLC)";
case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)";
default: return ("");
default: return "";
}
}
@ -154,8 +149,7 @@ describe_obj(struct sbuf *m, struct drm_i915_gem_object *obj)
sbuf_printf(m, " (%s)", obj->ring->name);
}
static int
i915_gem_object_list_info(struct drm_device *dev, struct sbuf *m, void *data)
static int i915_gem_object_list_info(struct drm_device *dev, struct sbuf *m, void *data)
{
uintptr_t list = (uintptr_t)data;
struct list_head *head;
@ -165,7 +159,7 @@ i915_gem_object_list_info(struct drm_device *dev, struct sbuf *m, void *data)
int count;
if (sx_xlock_sig(&dev->dev_struct_lock))
return (EINTR);
return -EINTR;
switch (list) {
case ACTIVE_LIST:
@ -182,7 +176,7 @@ i915_gem_object_list_info(struct drm_device *dev, struct sbuf *m, void *data)
break;
default:
DRM_UNLOCK(dev);
return (EINVAL);
return -EINVAL;
}
total_obj_size = total_gtt_size = count = 0;
@ -198,7 +192,7 @@ i915_gem_object_list_info(struct drm_device *dev, struct sbuf *m, void *data)
sbuf_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
count, total_obj_size, total_gtt_size);
return (0);
return 0;
}
#define count_objects(list, member) do { \
@ -212,8 +206,7 @@ i915_gem_object_list_info(struct drm_device *dev, struct sbuf *m, void *data)
} \
} while (0)
static int
i915_gem_object_info(struct drm_device *dev, struct sbuf *m, void *data)
static int i915_gem_object_info(struct drm_device *dev, struct sbuf *m, void *data)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 count, mappable_count;
@ -221,7 +214,7 @@ i915_gem_object_info(struct drm_device *dev, struct sbuf *m, void *data)
struct drm_i915_gem_object *obj;
if (sx_xlock_sig(&dev->dev_struct_lock))
return (EINTR);
return -EINTR;
sbuf_printf(m, "%u objects, %zu bytes\n",
dev_priv->mm.object_count,
dev_priv->mm.object_memory);
@ -260,13 +253,13 @@ i915_gem_object_info(struct drm_device *dev, struct sbuf *m, void *data)
sbuf_printf(m, "%zu [%zu] gtt total\n",
dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total);
DRM_UNLOCK(dev);
return (0);
return 0;
}
static int
i915_gem_gtt_info(struct drm_device *dev, struct sbuf *m, void *data)
static int i915_gem_gtt_info(struct drm_device *dev, struct sbuf *m, void *data)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uintptr_t list = (uintptr_t)data;
@ -275,7 +268,7 @@ i915_gem_gtt_info(struct drm_device *dev, struct sbuf *m, void *data)
int count;
if (sx_xlock_sig(&dev->dev_struct_lock))
return (EINTR);
return -EINTR;
total_obj_size = total_gtt_size = count = 0;
list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
@ -295,11 +288,10 @@ i915_gem_gtt_info(struct drm_device *dev, struct sbuf *m, void *data)
sbuf_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
count, total_obj_size, total_gtt_size);
return (0);
return 0;
}
static int
i915_gem_pageflip_info(struct drm_device *dev, struct sbuf *m, void *data)
static int i915_gem_pageflip_info(struct drm_device *dev, struct sbuf *m, void *data)
{
struct intel_crtc *crtc;
struct drm_i915_gem_object *obj;
@ -307,8 +299,6 @@ i915_gem_pageflip_info(struct drm_device *dev, struct sbuf *m, void *data)
char pipe;
char plane;
if ((dev->driver->driver_features & DRIVER_MODESET) == 0)
return (0);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
pipe = pipe_name(crtc->pipe);
plane = plane_name(crtc->plane);
@ -346,18 +336,17 @@ i915_gem_pageflip_info(struct drm_device *dev, struct sbuf *m, void *data)
mtx_unlock(&dev->event_lock);
}
return (0);
return 0;
}
static int
i915_gem_request_info(struct drm_device *dev, struct sbuf *m, void *data)
static int i915_gem_request_info(struct drm_device *dev, struct sbuf *m, void *data)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_request *gem_request;
int count;
if (sx_xlock_sig(&dev->dev_struct_lock))
return (EINTR);
return -EINTR;
count = 0;
if (!list_empty(&dev_priv->rings[RCS].request_list)) {
@ -401,8 +390,7 @@ i915_gem_request_info(struct drm_device *dev, struct sbuf *m, void *data)
return 0;
}
static void
i915_ring_seqno_info(struct sbuf *m, struct intel_ring_buffer *ring)
static void i915_ring_seqno_info(struct sbuf *m, struct intel_ring_buffer *ring)
{
if (ring->get_seqno) {
sbuf_printf(m, "Current sequence (%s): %d\n",
@ -410,29 +398,30 @@ i915_ring_seqno_info(struct sbuf *m, struct intel_ring_buffer *ring)
}
}
static int
i915_gem_seqno_info(struct drm_device *dev, struct sbuf *m, void *data)
static int i915_gem_seqno_info(struct drm_device *dev, struct sbuf *m, void *data)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int i;
if (sx_xlock_sig(&dev->dev_struct_lock))
return (EINTR);
return -EINTR;
for (i = 0; i < I915_NUM_RINGS; i++)
i915_ring_seqno_info(m, &dev_priv->rings[i]);
DRM_UNLOCK(dev);
return (0);
return 0;
}
static int
i915_interrupt_info(struct drm_device *dev, struct sbuf *m, void *data)
static int i915_interrupt_info(struct drm_device *dev, struct sbuf *m, void *data)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int i, pipe;
if (sx_xlock_sig(&dev->dev_struct_lock))
return (EINTR);
return -EINTR;
if (IS_VALLEYVIEW(dev)) {
sbuf_printf(m, "Display IER:\t%08x\n",
@ -515,17 +504,16 @@ i915_interrupt_info(struct drm_device *dev, struct sbuf *m, void *data)
}
DRM_UNLOCK(dev);
return (0);
return 0;
}
static int
i915_gem_fence_regs_info(struct drm_device *dev, struct sbuf *m, void *data)
static int i915_gem_fence_regs_info(struct drm_device *dev, struct sbuf *m, void *data)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int i;
if (sx_xlock_sig(&dev->dev_struct_lock))
return (EINTR);
return -EINTR;
sbuf_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
sbuf_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
@ -541,50 +529,47 @@ i915_gem_fence_regs_info(struct drm_device *dev, struct sbuf *m, void *data)
}
DRM_UNLOCK(dev);
return (0);
return 0;
}
static int
i915_hws_info(struct drm_device *dev, struct sbuf *m, void *data)
static int i915_hws_info(struct drm_device *dev, struct sbuf *m, void *data)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
const volatile u32 *hws;
const volatile u32 __iomem *hws;
int i;
ring = &dev_priv->rings[(uintptr_t)data];
hws = (volatile u32 *)ring->status_page.page_addr;
if (hws == NULL)
return (0);
return 0;
for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
sbuf_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
i * 4,
hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
}
return (0);
return 0;
}
static const char *
ring_str(int ring)
static const char *ring_str(int ring)
{
switch (ring) {
case RCS: return (" render");
case VCS: return (" bsd");
case BCS: return (" blt");
default: return ("");
case RCS: return " render";
case VCS: return " bsd";
case BCS: return " blt";
default: return "";
}
}
static const char *
pin_flag(int pinned)
static const char *pin_flag(int pinned)
{
if (pinned > 0)
return (" P");
return " P";
else if (pinned < 0)
return (" p");
return " p";
else
return ("");
return "";
}
static const char *tiling_flag(int tiling)
@ -607,8 +592,10 @@ static const char *purgeable_flag(int purgeable)
return purgeable ? " purgeable" : "";
}
static void print_error_buffers(struct sbuf *m, const char *name,
struct drm_i915_error_buffer *err, int count)
static void print_error_buffers(struct sbuf *m,
const char *name,
struct drm_i915_error_buffer *err,
int count)
{
sbuf_printf(m, "%s [%d]:\n", name, count);
@ -638,9 +625,10 @@ static void print_error_buffers(struct sbuf *m, const char *name,
}
}
static void
i915_ring_error_state(struct sbuf *m, struct drm_device *dev,
struct drm_i915_error_state *error, unsigned ring)
static void i915_ring_error_state(struct sbuf *m,
struct drm_device *dev,
struct drm_i915_error_state *error,
unsigned ring)
{
MPASS((ring < I915_NUM_RINGS)); /* shut up confused gcc */
@ -672,8 +660,7 @@ i915_ring_error_state(struct sbuf *m, struct drm_device *dev,
sbuf_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
}
static int
i915_error_state(struct drm_device *dev, struct sbuf *m,
static int i915_error_state(struct drm_device *dev, struct sbuf *m,
void *unused)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@ -686,9 +673,9 @@ i915_error_state(struct drm_device *dev, struct sbuf *m,
if (error != NULL)
refcount_acquire(&error->ref);
mtx_unlock(&dev_priv->error_lock);
if (error == NULL) {
if (!error) {
sbuf_printf(m, "no error state collected\n");
return (0);
return 0;
}
error = dev_priv->first_error;
@ -722,9 +709,9 @@ i915_error_state(struct drm_device *dev, struct sbuf *m,
error->pinned_bo,
error->pinned_bo_count);
for (i = 0; i < DRM_ARRAY_SIZE(error->ring); i++) {
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
struct drm_i915_error_object *obj;
if ((obj = error->ring[i].batchbuffer)) {
sbuf_printf(m, "%s --- gtt_offset = 0x%08x\n",
dev_priv->rings[i].name,
@ -776,7 +763,7 @@ i915_error_state(struct drm_device *dev, struct sbuf *m,
if (refcount_release(&error->ref))
i915_error_state_free(error);
return (0);
return 0;
}
static int
@ -802,8 +789,10 @@ i915_rstdby_delays(struct drm_device *dev, struct sbuf *m, void *unused)
u16 crstanddelay;
if (sx_xlock_sig(&dev->dev_struct_lock))
return (EINTR);
return -EINTR;
crstanddelay = I915_READ16(CRSTANDVID);
DRM_UNLOCK(dev);
sbuf_printf(m, "w/ctx: %d, w/o ctx: %d\n",
@ -812,8 +801,7 @@ i915_rstdby_delays(struct drm_device *dev, struct sbuf *m, void *unused)
return 0;
}
static int
i915_cur_delayinfo(struct drm_device *dev, struct sbuf *m, void *unused)
static int i915_cur_delayinfo(struct drm_device *dev, struct sbuf *m, void *unused)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@ -838,7 +826,7 @@ i915_cur_delayinfo(struct drm_device *dev, struct sbuf *m, void *unused)
/* RPSTAT1 is in the GT power well */
if (sx_xlock_sig(&dev->dev_struct_lock))
return (EINTR);
return -EINTR;
gen6_gt_force_wake_get(dev_priv);
rpstat = I915_READ(GEN6_RPSTAT1);
@ -893,50 +881,51 @@ i915_cur_delayinfo(struct drm_device *dev, struct sbuf *m, void *unused)
return 0;
}
static int
i915_delayfreq_table(struct drm_device *dev, struct sbuf *m, void *unused)
static int i915_delayfreq_table(struct drm_device *dev, struct sbuf *m, void *unused)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 delayfreq;
int i;
if (sx_xlock_sig(&dev->dev_struct_lock))
return (EINTR);
return -EINTR;
for (i = 0; i < 16; i++) {
delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
sbuf_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
(delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
}
DRM_UNLOCK(dev);
return (0);
return 0;
}
static inline int
MAP_TO_MV(int map)
static inline int MAP_TO_MV(int map)
{
return 1250 - (map * 25);
}
static int
i915_inttoext_table(struct drm_device *dev, struct sbuf *m, void *unused)
static int i915_inttoext_table(struct drm_device *dev, struct sbuf *m, void *unused)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 inttoext;
int i;
if (sx_xlock_sig(&dev->dev_struct_lock))
return (EINTR);
return -EINTR;
for (i = 1; i <= 32; i++) {
inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
sbuf_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
}
DRM_UNLOCK(dev);
return (0);
return 0;
}
static int
ironlake_drpc_info(struct drm_device *dev, struct sbuf *m)
static int ironlake_drpc_info(struct drm_device *dev, struct sbuf *m)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 rgvmodectl;
@ -944,10 +933,12 @@ ironlake_drpc_info(struct drm_device *dev, struct sbuf *m)
u16 crstandvid;
if (sx_xlock_sig(&dev->dev_struct_lock))
return (EINTR);
return -EINTR;
rgvmodectl = I915_READ(MEMMODECTL);
rstdbyctl = I915_READ(RSTDBYCTL);
crstandvid = I915_READ16(CRSTANDVID);
DRM_UNLOCK(dev);
sbuf_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
@ -998,16 +989,16 @@ ironlake_drpc_info(struct drm_device *dev, struct sbuf *m)
return 0;
}
static int
gen6_drpc_info(struct drm_device *dev, struct sbuf *m)
static int gen6_drpc_info(struct drm_device *dev, struct sbuf *m)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 rpmodectl1, gt_core_status, rcctl1;
unsigned forcewake_count;
int count=0;
if (sx_xlock_sig(&dev->dev_struct_lock))
return (EINTR);
return -EINTR;
mtx_lock(&dev_priv->gt_lock);
forcewake_count = dev_priv->forcewake_count;
@ -1019,7 +1010,7 @@ gen6_drpc_info(struct drm_device *dev, struct sbuf *m)
} else {
/* NB: we cannot use forcewake, else we read the wrong values */
while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
DRM_UDELAY(10);
udelay(10);
sbuf_printf(m, "RC information accurate: %s\n", yesno(count < 51));
}
@ -1087,12 +1078,12 @@ static int i915_drpc_info(struct drm_device *dev, struct sbuf *m, void *unused)
{
if (IS_GEN6(dev) || IS_GEN7(dev))
return (gen6_drpc_info(dev, m));
return gen6_drpc_info(dev, m);
else
return (ironlake_drpc_info(dev, m));
return ironlake_drpc_info(dev, m);
}
static int
i915_fbc_status(struct drm_device *dev, struct sbuf *m, void *unused)
static int i915_fbc_status(struct drm_device *dev, struct sbuf *m, void *unused)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@ -1134,8 +1125,7 @@ i915_fbc_status(struct drm_device *dev, struct sbuf *m, void *unused)
return 0;
}
static int
i915_sr_status(struct drm_device *dev, struct sbuf *m, void *unused)
static int i915_sr_status(struct drm_device *dev, struct sbuf *m, void *unused)
{
drm_i915_private_t *dev_priv = dev->dev_private;
bool sr_enabled = false;
@ -1152,7 +1142,31 @@ i915_sr_status(struct drm_device *dev, struct sbuf *m, void *unused)
sbuf_printf(m, "self-refresh: %s",
sr_enabled ? "enabled" : "disabled");
return (0);
return 0;
}
static int i915_emon_status(struct drm_device *dev, struct sbuf *m, void *unused)
{
drm_i915_private_t *dev_priv = dev->dev_private;
unsigned long temp, chipset, gfx;
if (!IS_GEN5(dev))
return -ENODEV;
if (sx_xlock_sig(&dev->dev_struct_lock))
return -EINTR;
temp = i915_mch_val(dev_priv);
chipset = i915_chipset_val(dev_priv);
gfx = i915_gfx_val(dev_priv);
DRM_UNLOCK(dev);
sbuf_printf(m, "GMCH temp: %ld\n", temp);
sbuf_printf(m, "Chipset power: %ld\n", chipset);
sbuf_printf(m, "GFX power: %ld\n", gfx);
sbuf_printf(m, "Total power: %ld\n", chipset + gfx);
return 0;
}
static int i915_ring_freq_table(struct drm_device *dev, struct sbuf *m,
@ -1163,11 +1177,11 @@ static int i915_ring_freq_table(struct drm_device *dev, struct sbuf *m,
if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
sbuf_printf(m, "unsupported on this chipset");
return (0);
return 0;
}
if (sx_xlock_sig(&dev->dev_struct_lock))
return (EINTR);
return -EINTR;
sbuf_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n");
@ -1188,79 +1202,54 @@ static int i915_ring_freq_table(struct drm_device *dev, struct sbuf *m,
DRM_UNLOCK(dev);
return (0);
return 0;
}
static int
i915_emon_status(struct drm_device *dev, struct sbuf *m, void *unused)
{
drm_i915_private_t *dev_priv = dev->dev_private;
unsigned long temp, chipset, gfx;
if (!IS_GEN5(dev)) {
sbuf_printf(m, "Not supported\n");
return (0);
}
if (sx_xlock_sig(&dev->dev_struct_lock))
return (EINTR);
temp = i915_mch_val(dev_priv);
chipset = i915_chipset_val(dev_priv);
gfx = i915_gfx_val(dev_priv);
DRM_UNLOCK(dev);
sbuf_printf(m, "GMCH temp: %ld\n", temp);
sbuf_printf(m, "Chipset power: %ld\n", chipset);
sbuf_printf(m, "GFX power: %ld\n", gfx);
sbuf_printf(m, "Total power: %ld\n", chipset + gfx);
return (0);
}
static int
i915_gfxec(struct drm_device *dev, struct sbuf *m, void *unused)
static int i915_gfxec(struct drm_device *dev, struct sbuf *m, void *unused)
{
drm_i915_private_t *dev_priv = dev->dev_private;
if (sx_xlock_sig(&dev->dev_struct_lock))
return (EINTR);
return -EINTR;
sbuf_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
DRM_UNLOCK(dev);
return (0);
return 0;
}
#if 0
static int
i915_opregion(struct drm_device *dev, struct sbuf *m, void *unused)
static int i915_opregion(struct drm_device *dev, struct sbuf *m, void *unused)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
if (sx_xlock_sig(&dev->dev_struct_lock))
return (EINTR);
return -EINTR;
if (opregion->header)
seq_write(m, opregion->header, OPREGION_SIZE);
DRM_UNLOCK(dev);
return 0;
}
#endif
static int
i915_gem_framebuffer_info(struct drm_device *dev, struct sbuf *m, void *data)
static int i915_gem_framebuffer_info(struct drm_device *dev, struct sbuf *m, void *data)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_fbdev *ifbdev;
struct intel_framebuffer *fb;
if (sx_xlock_sig(&dev->dev_struct_lock))
return (EINTR);
return -EINTR;
ifbdev = dev_priv->fbdev;
if (ifbdev == NULL) {
DRM_UNLOCK(dev);
return (0);
return 0;
}
fb = to_intel_framebuffer(ifbdev->helper.fb);
@ -1287,22 +1276,18 @@ i915_gem_framebuffer_info(struct drm_device *dev, struct sbuf *m, void *data)
DRM_UNLOCK(dev);
return (0);
return 0;
}
static int
i915_context_status(struct drm_device *dev, struct sbuf *m, void *data)
static int i915_context_status(struct drm_device *dev, struct sbuf *m, void *data)
{
drm_i915_private_t *dev_priv;
int ret;
if ((dev->driver->driver_features & DRIVER_MODESET) == 0)
return (0);
dev_priv = dev->dev_private;
ret = sx_xlock_sig(&dev->mode_config.mutex);
if (ret != 0)
return (EINTR);
return -EINTR;
if (dev_priv->pwrctx != NULL) {
sbuf_printf(m, "power context ");
@ -1318,11 +1303,10 @@ i915_context_status(struct drm_device *dev, struct sbuf *m, void *data)
sx_xunlock(&dev->mode_config.mutex);
return (0);
return 0;
}
static int
i915_gen6_forcewake_count_info(struct drm_device *dev, struct sbuf *m,
static int i915_gen6_forcewake_count_info(struct drm_device *dev, struct sbuf *m,
void *data)
{
struct drm_i915_private *dev_priv;
@ -1335,11 +1319,10 @@ i915_gen6_forcewake_count_info(struct drm_device *dev, struct sbuf *m,
sbuf_printf(m, "forcewake count = %u\n", forcewake_count);
return (0);
return 0;
}
static const char *
swizzle_string(unsigned swizzle)
static const char *swizzle_string(unsigned swizzle)
{
switch(swizzle) {
@ -1364,8 +1347,7 @@ swizzle_string(unsigned swizzle)
return "bug";
}
static int
i915_swizzle_info(struct drm_device *dev, struct sbuf *m, void *data)
static int i915_swizzle_info(struct drm_device *dev, struct sbuf *m, void *data)
{
struct drm_i915_private *dev_priv;
int ret;
@ -1373,7 +1355,7 @@ i915_swizzle_info(struct drm_device *dev, struct sbuf *m, void *data)
dev_priv = dev->dev_private;
ret = sx_xlock_sig(&dev->dev_struct_lock);
if (ret != 0)
return (EINTR);
return -EINTR;
sbuf_printf(m, "bit6 swizzle for X-tiling = %s\n",
swizzle_string(dev_priv->mm.bit_6_swizzle_x));
@ -1400,14 +1382,13 @@ i915_swizzle_info(struct drm_device *dev, struct sbuf *m, void *data)
I915_READ(ARB_MODE));
sbuf_printf(m, "DISP_ARB_CTL = 0x%08x\n",
I915_READ(DISP_ARB_CTL));
}
}
DRM_UNLOCK(dev);
return (0);
return 0;
}
static int
i915_ppgtt_info(struct drm_device *dev, struct sbuf *m, void *data)
static int i915_ppgtt_info(struct drm_device *dev, struct sbuf *m, void *data)
{
struct drm_i915_private *dev_priv;
struct intel_ring_buffer *ring;
@ -1417,7 +1398,7 @@ i915_ppgtt_info(struct drm_device *dev, struct sbuf *m, void *data)
ret = sx_xlock_sig(&dev->dev_struct_lock);
if (ret != 0)
return (EINTR);
return -EINTR;
if (INTEL_INFO(dev)->gen == 6)
sbuf_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
@ -1440,7 +1421,7 @@ i915_ppgtt_info(struct drm_device *dev, struct sbuf *m, void *data)
sbuf_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
DRM_UNLOCK(dev);
return (0);
return 0;
}
static int i915_dpio_info(struct drm_device *dev, struct sbuf *m, void *data)
@ -1457,7 +1438,7 @@ static int i915_dpio_info(struct drm_device *dev, struct sbuf *m, void *data)
ret = sx_xlock_sig(&dev->mode_config.mutex);
if (ret != 0)
return (EINTR);
return -EINTR;
sbuf_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL));
@ -1663,7 +1644,7 @@ i915_info_sysctl_handler(SYSCTL_HANDLER_ARGS)
if (error != 0)
return (error);
sbuf_new_for_sysctl(&m, NULL, 128, req);
error = i915_info_sysctl_list[thunk->idx].ptr(dev, &m,
error = -i915_info_sysctl_list[thunk->idx].ptr(dev, &m,
thunk->arg);
if (error == 0)
error = sbuf_finish(&m);
@ -1697,9 +1678,9 @@ i915_sysctl_init(struct drm_device *dev, struct sysctl_ctx_list *ctx,
struct i915_info_sysctl_thunk *thunks;
int i, error;
thunks = malloc(sizeof(*thunks) * DRM_ARRAY_SIZE(i915_info_sysctl_list),
thunks = malloc(sizeof(*thunks) * ARRAY_SIZE(i915_info_sysctl_list),
DRM_MEM_DRIVER, M_WAITOK | M_ZERO);
for (i = 0; i < DRM_ARRAY_SIZE(i915_info_sysctl_list); i++) {
for (i = 0; i < ARRAY_SIZE(i915_info_sysctl_list); i++) {
thunks[i].dev = dev;
thunks[i].idx = i;
thunks[i].arg = i915_info_sysctl_list[i].data;
@ -1708,15 +1689,15 @@ i915_sysctl_init(struct drm_device *dev, struct sysctl_ctx_list *ctx,
info = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "info",
CTLFLAG_RW, NULL, NULL);
if (info == NULL)
return (ENOMEM);
for (i = 0; i < DRM_ARRAY_SIZE(i915_info_sysctl_list); i++) {
return (-ENOMEM);
for (i = 0; i < ARRAY_SIZE(i915_info_sysctl_list); i++) {
oid = SYSCTL_ADD_OID(ctx, SYSCTL_CHILDREN(info), OID_AUTO,
i915_info_sysctl_list[i].name, CTLTYPE_STRING |
(i915_info_sysctl_list[i].ptr_w != NULL ? CTLFLAG_RW :
CTLFLAG_RD),
&thunks[i], 0, i915_info_sysctl_handler, "A", NULL);
if (oid == NULL)
return (ENOMEM);
return (-ENOMEM);
}
oid = SYSCTL_ADD_LONG(ctx, SYSCTL_CHILDREN(info), OID_AUTO,
"i915_gem_wired_pages", CTLFLAG_RD, &i915_gem_wired_pages_cnt,
@ -1725,34 +1706,34 @@ i915_sysctl_init(struct drm_device *dev, struct sysctl_ctx_list *ctx,
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, dev, 0,
i915_debug_set_wedged, "I", NULL);
if (oid == NULL)
return (ENOMEM);
return (-ENOMEM);
oid = SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "max_freq",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, dev, 0, i915_max_freq,
"I", NULL);
if (oid == NULL)
return (ENOMEM);
return (-ENOMEM);
oid = SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(top), OID_AUTO,
"cache_sharing", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, dev,
0, i915_cache_sharing, "I", NULL);
if (oid == NULL)
return (ENOMEM);
return (-ENOMEM);
oid = SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(top), OID_AUTO,
"stop_rings", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, dev,
0, i915_stop_rings, "I", NULL);
if (oid == NULL)
return (ENOMEM);
return (-ENOMEM);
oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "sync_exec",
CTLFLAG_RW, &i915_gem_sync_exec_requests, 0, NULL);
if (oid == NULL)
return (ENOMEM);
return (-ENOMEM);
oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "fix_mi",
CTLFLAG_RW, &i915_fix_mi_batchbuffer_end, 0, NULL);
if (oid == NULL)
return (ENOMEM);
return (-ENOMEM);
oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "intr_pf",
CTLFLAG_RW, &i915_intr_pf, 0, NULL);
if (oid == NULL)
return (ENOMEM);
return (-ENOMEM);
error = drm_add_busid_modesetting(dev, ctx, top);
if (error != 0)

View File

@ -65,12 +65,9 @@ intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg)
#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
#define I915_BREADCRUMB_INDEX 0x21
static int i915_driver_unload_int(struct drm_device *dev, bool locked);
void i915_update_dri1_breadcrumb(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
#if 0
struct drm_i915_master_private *master_priv;
if (dev->primary->master) {
@ -79,11 +76,6 @@ void i915_update_dri1_breadcrumb(struct drm_device *dev)
master_priv->sarea_priv->last_dispatch =
READ_BREADCRUMB(dev_priv);
}
#else
if (dev_priv->sarea_priv)
dev_priv->sarea_priv->last_dispatch =
READ_BREADCRUMB(dev_priv);
#endif
}
static void i915_write_hws_pga(struct drm_device *dev)
@ -112,10 +104,8 @@ static int i915_init_phys_hws(struct drm_device *dev)
* of allocation is used on <= 965 hardware, that has several
* erratas regarding the use of physical memory > 4 GB.
*/
DRM_UNLOCK(dev);
dev_priv->status_page_dmah =
drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
DRM_LOCK(dev);
drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, BUS_SPACE_MAXADDR);
if (!dev_priv->status_page_dmah) {
DRM_ERROR("Can not allocate hardware status page\n");
return -ENOMEM;
@ -160,6 +150,7 @@ static void i915_free_hws(struct drm_device *dev)
void i915_kernel_lost_context(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
struct intel_ring_buffer *ring = LP_RING(dev_priv);
/*
@ -175,15 +166,12 @@ void i915_kernel_lost_context(struct drm_device * dev)
if (ring->space < 0)
ring->space += ring->size;
#if 1
KIB_NOTYET();
#else
if (!dev->primary->master)
return;
#endif
if (ring->head == ring->tail && dev_priv->sarea_priv)
dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
master_priv = dev->primary->master->driver_priv;
if (ring->head == ring->tail && master_priv->sarea_priv)
master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
}
static int i915_dma_cleanup(struct drm_device * dev)
@ -199,8 +187,10 @@ static int i915_dma_cleanup(struct drm_device * dev)
if (dev->irq_enabled)
drm_irq_uninstall(dev);
DRM_LOCK(dev);
for (i = 0; i < I915_NUM_RINGS; i++)
intel_cleanup_ring_buffer(&dev_priv->rings[i]);
DRM_UNLOCK(dev);
/* Clear the HWS virtual address at teardown */
if (I915_NEED_GFX_HWS(dev))
@ -212,18 +202,17 @@ static int i915_dma_cleanup(struct drm_device * dev)
static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
int ret;
dev_priv->sarea = drm_getsarea(dev);
if (!dev_priv->sarea) {
DRM_ERROR("can not find sarea!\n");
i915_dma_cleanup(dev);
return -EINVAL;
master_priv->sarea = drm_getsarea(dev);
if (master_priv->sarea) {
master_priv->sarea_priv = (drm_i915_sarea_t *)
((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
} else {
DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n");
}
dev_priv->sarea_priv = (drm_i915_sarea_t *)
((u8 *) dev_priv->sarea->virtual + init->sarea_priv_offset);
if (init->ring_size != 0) {
if (LP_RING(dev_priv)->obj != NULL) {
i915_dma_cleanup(dev);
@ -245,7 +234,8 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
dev_priv->back_offset = init->back_offset;
dev_priv->front_offset = init->front_offset;
dev_priv->current_page = 0;
dev_priv->sarea_priv->pf_current_page = 0;
if (master_priv->sarea_priv)
master_priv->sarea_priv->pf_current_page = 0;
/* Allow hardware batchbuffers unless told otherwise.
*/
@ -485,11 +475,12 @@ i915_emit_box_p(struct drm_device *dev, struct drm_clip_rect *box,
static void i915_emit_breadcrumb(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
if (++dev_priv->counter > 0x7FFFFFFFUL)
dev_priv->counter = 0;
if (dev_priv->sarea_priv)
dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
if (master_priv->sarea_priv)
master_priv->sarea_priv->last_enqueue = dev_priv->counter;
if (BEGIN_LP_RING(4) == 0) {
OUT_RING(MI_STORE_DWORD_INDEX);
@ -594,15 +585,17 @@ i915_dispatch_batchbuffer(struct drm_device * dev,
static int i915_dispatch_flip(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv =
dev->primary->master->driver_priv;
int ret;
if (!dev_priv->sarea_priv)
if (!master_priv->sarea_priv)
return -EINVAL;
DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
__func__,
dev_priv->current_page,
dev_priv->sarea_priv->pf_current_page);
master_priv->sarea_priv->pf_current_page);
i915_kernel_lost_context(dev);
@ -628,10 +621,7 @@ static int i915_dispatch_flip(struct drm_device * dev)
ADVANCE_LP_RING();
if (++dev_priv->counter > 0x7FFFFFFFUL)
dev_priv->counter = 0;
if (dev_priv->sarea_priv)
dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
master_priv->sarea_priv->last_enqueue = dev_priv->counter++;
if (BEGIN_LP_RING(4) == 0) {
OUT_RING(MI_STORE_DWORD_INDEX);
@ -641,7 +631,7 @@ static int i915_dispatch_flip(struct drm_device * dev)
ADVANCE_LP_RING();
}
dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
master_priv->sarea_priv->pf_current_page = dev_priv->current_page;
return 0;
}
@ -675,7 +665,9 @@ int i915_batchbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
drm_i915_sarea_t *sarea_priv;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
master_priv->sarea_priv;
drm_i915_batchbuffer_t *batch = data;
struct drm_clip_rect *cliprects;
size_t cliplen;
@ -685,7 +677,6 @@ int i915_batchbuffer(struct drm_device *dev, void *data,
DRM_ERROR("Batchbuffer ioctl disabled\n");
return -EINVAL;
}
DRM_UNLOCK(dev);
DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
batch->start, batch->used, batch->num_cliprects);
@ -700,18 +691,16 @@ int i915_batchbuffer(struct drm_device *dev, void *data,
ret = -copyin(batch->cliprects, cliprects,
batch->num_cliprects * sizeof(struct drm_clip_rect));
if (ret != 0) {
DRM_LOCK(dev);
if (ret != 0)
goto fail_free;
}
} else
cliprects = NULL;
DRM_LOCK(dev);
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
DRM_UNLOCK(dev);
sarea_priv = (drm_i915_sarea_t *)dev_priv->sarea_priv;
if (sarea_priv)
sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
@ -724,7 +713,9 @@ int i915_cmdbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
drm_i915_sarea_t *sarea_priv;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
master_priv->sarea_priv;
drm_i915_cmdbuffer_t *cmdbuf = data;
struct drm_clip_rect *cliprects = NULL;
void *batch_data;
@ -739,15 +730,11 @@ int i915_cmdbuffer(struct drm_device *dev, void *data,
if (cmdbuf->num_cliprects < 0)
return -EINVAL;
DRM_UNLOCK(dev);
batch_data = malloc(cmdbuf->sz, DRM_MEM_DMA, M_WAITOK);
ret = -copyin(cmdbuf->buf, batch_data, cmdbuf->sz);
if (ret != 0) {
DRM_LOCK(dev);
if (ret != 0)
goto fail_batch_free;
}
if (cmdbuf->num_cliprects) {
cliprects = malloc(cmdbuf->num_cliprects *
@ -755,21 +742,19 @@ int i915_cmdbuffer(struct drm_device *dev, void *data,
M_WAITOK | M_ZERO);
ret = -copyin(cmdbuf->cliprects, cliprects,
cmdbuf->num_cliprects * sizeof(struct drm_clip_rect));
if (ret != 0) {
DRM_LOCK(dev);
if (ret != 0)
goto fail_clip_free;
}
}
DRM_LOCK(dev);
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
DRM_UNLOCK(dev);
if (ret) {
DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
goto fail_clip_free;
}
sarea_priv = (drm_i915_sarea_t *)dev_priv->sarea_priv;
if (sarea_priv)
sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
@ -783,9 +768,7 @@ int i915_cmdbuffer(struct drm_device *dev, void *data,
static int i915_emit_irq(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
#if 0
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
#endif
i915_kernel_lost_context(dev);
@ -794,13 +777,8 @@ static int i915_emit_irq(struct drm_device * dev)
dev_priv->counter++;
if (dev_priv->counter > 0x7FFFFFFFUL)
dev_priv->counter = 1;
#if 0
if (master_priv->sarea_priv)
master_priv->sarea_priv->last_enqueue = dev_priv->counter;
#else
if (dev_priv->sarea_priv)
dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
#endif
if (BEGIN_LP_RING(4) == 0) {
OUT_RING(MI_STORE_DWORD_INDEX);
@ -816,16 +794,13 @@ static int i915_emit_irq(struct drm_device * dev)
static int i915_wait_irq(struct drm_device * dev, int irq_nr)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
#if 0
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
#endif
int ret;
struct intel_ring_buffer *ring = LP_RING(dev_priv);
DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
READ_BREADCRUMB(dev_priv));
#if 0
if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
if (master_priv->sarea_priv)
master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
@ -834,30 +809,18 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
if (master_priv->sarea_priv)
master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
#else
if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
if (dev_priv->sarea_priv) {
dev_priv->sarea_priv->last_dispatch =
READ_BREADCRUMB(dev_priv);
}
return 0;
}
if (dev_priv->sarea_priv)
dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
#endif
ret = 0;
mtx_lock(&dev_priv->irq_lock);
if (ring->irq_get(ring)) {
DRM_UNLOCK(dev);
while (ret == 0 && READ_BREADCRUMB(dev_priv) < irq_nr) {
ret = -msleep(ring, &dev_priv->irq_lock, PCATCH,
"915wtq", 3 * hz);
if (ret == -ERESTART)
ret = -ERESTARTSYS;
}
ring->irq_put(ring);
mtx_unlock(&dev_priv->irq_lock);
DRM_LOCK(dev);
} else {
mtx_unlock(&dev_priv->irq_lock);
if (_intel_wait_for(dev, READ_BREADCRUMB(dev_priv) >= irq_nr,
@ -977,7 +940,9 @@ static int i915_flip_bufs(struct drm_device *dev, void *data,
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
DRM_LOCK(dev);
ret = i915_dispatch_flip(dev);
DRM_UNLOCK(dev);
return ret;
}
@ -1200,6 +1165,31 @@ i915_load_modeset_init(struct drm_device *dev)
return (ret);
}
int i915_master_create(struct drm_device *dev, struct drm_master *master)
{
struct drm_i915_master_private *master_priv;
master_priv = malloc(sizeof(*master_priv), DRM_MEM_DMA,
M_NOWAIT | M_ZERO);
if (!master_priv)
return -ENOMEM;
master->driver_priv = master_priv;
return 0;
}
void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
{
struct drm_i915_master_private *master_priv = master->driver_priv;
if (!master_priv)
return;
free(master_priv, DRM_MEM_DMA);
master->driver_priv = NULL;
}
static int
i915_get_bridge_dev(struct drm_device *dev)
{
@ -1250,10 +1240,10 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
#endif
/* Get some space for it */
vga = device_get_parent(dev->device);
vga = device_get_parent(dev->dev);
dev_priv->mch_res_rid = 0x100;
dev_priv->mch_res = BUS_ALLOC_RESOURCE(device_get_parent(vga),
dev->device, SYS_RES_MEMORY, &dev_priv->mch_res_rid, 0, ~0UL,
dev->dev, SYS_RES_MEMORY, &dev_priv->mch_res_rid, 0, ~0UL,
MCHBAR_SIZE, RF_ACTIVE | RF_SHAREABLE);
if (dev_priv->mch_res == NULL) {
DRM_ERROR("failed mchbar resource alloc\n");
@ -1340,10 +1330,10 @@ intel_teardown_mchbar(struct drm_device *dev)
}
if (dev_priv->mch_res != NULL) {
vga = device_get_parent(dev->device);
BUS_DEACTIVATE_RESOURCE(device_get_parent(vga), dev->device,
vga = device_get_parent(dev->dev);
BUS_DEACTIVATE_RESOURCE(device_get_parent(vga), dev->dev,
SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res);
BUS_RELEASE_RESOURCE(device_get_parent(vga), dev->device,
BUS_RELEASE_RESOURCE(device_get_parent(vga), dev->dev,
SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res);
dev_priv->mch_res = NULL;
}
@ -1393,6 +1383,11 @@ i915_driver_load(struct drm_device *dev, unsigned long flags)
ret = drm_addmap(dev, base, size, _DRM_REGISTERS,
_DRM_KERNEL | _DRM_DRIVER, &dev_priv->mmio_map);
if (ret != 0) {
DRM_ERROR("Failed to allocate mmio_map: %d\n", ret);
free(dev_priv, DRM_MEM_DRIVER);
return (ret);
}
dev_priv->tq = taskqueue_create("915", M_WAITOK,
taskqueue_thread_enqueue, &dev_priv->tq);
@ -1413,13 +1408,26 @@ i915_driver_load(struct drm_device *dev, unsigned long flags)
i915_gem_load(dev);
/* On the 945G/GM, the chipset reports the MSI capability on the
* integrated graphics even though the support isn't actually there
* according to the published specs. It doesn't appear to function
* correctly in testing on 945G.
* This may be a side effect of MSI having been made available for PEG
* and the registers being closely associated.
*
* According to chipset errata, on the 965GM, MSI interrupts may
* be lost or delayed, but we use them anyways to avoid
* stuck interrupts on some machines.
*/
if (!IS_I945G(dev) && !IS_I945GM(dev))
drm_pci_enable_msi(dev);
/* Init HWS */
if (!I915_NEED_GFX_HWS(dev)) {
ret = i915_init_phys_hws(dev);
if (ret != 0) {
drm_rmmap(dev, dev_priv->mmio_map);
drm_free(dev_priv, sizeof(struct drm_i915_private),
DRM_MEM_DRIVER);
free(dev_priv, DRM_MEM_DRIVER);
return ret;
}
}
@ -1443,9 +1451,7 @@ i915_driver_load(struct drm_device *dev, unsigned long flags)
intel_detect_pch(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
DRM_UNLOCK(dev);
ret = i915_load_modeset_init(dev);
DRM_LOCK(dev);
if (ret < 0) {
DRM_ERROR("failed to init modeset\n");
goto out_gem_unload;
@ -1465,31 +1471,27 @@ i915_driver_load(struct drm_device *dev, unsigned long flags)
out_gem_unload:
/* XXXKIB */
(void) i915_driver_unload_int(dev, true);
(void) i915_driver_unload(dev);
return (ret);
}
static int
i915_driver_unload_int(struct drm_device *dev, bool locked)
int
i915_driver_unload(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
if (!locked)
DRM_LOCK(dev);
DRM_LOCK(dev);
ret = i915_gpu_idle(dev);
if (ret)
DRM_ERROR("failed to idle hardware: %d\n", ret);
i915_gem_retire_requests(dev);
if (!locked)
DRM_UNLOCK(dev);
DRM_UNLOCK(dev);
i915_free_hws(dev);
intel_teardown_mchbar(dev);
if (locked)
DRM_UNLOCK(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
intel_fbdev_fini(dev);
intel_modeset_cleanup(dev);
@ -1501,19 +1503,17 @@ i915_driver_unload_int(struct drm_device *dev, bool locked)
i915_destroy_error_state(dev);
if (dev->msi_enabled)
drm_pci_disable_msi(dev);
intel_opregion_fini(dev);
if (locked)
DRM_LOCK(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
if (!locked)
DRM_LOCK(dev);
DRM_LOCK(dev);
i915_gem_free_all_phys_object(dev);
i915_gem_cleanup_ringbuffer(dev);
i915_gem_context_fini(dev);
if (!locked)
DRM_UNLOCK(dev);
DRM_UNLOCK(dev);
i915_gem_cleanup_aliasing_ppgtt(dev);
#if 1
KIB_NOTYET();
@ -1536,7 +1536,7 @@ i915_driver_unload_int(struct drm_device *dev, bool locked)
if (dev_priv->tq != NULL)
taskqueue_free(dev_priv->tq);
bus_generic_detach(dev->device);
bus_generic_detach(dev->dev);
drm_rmmap(dev, dev_priv->mmio_map);
intel_teardown_gmbus(dev);
@ -1544,19 +1544,11 @@ i915_driver_unload_int(struct drm_device *dev, bool locked)
mtx_destroy(&dev_priv->error_lock);
mtx_destroy(&dev_priv->error_completion_lock);
mtx_destroy(&dev_priv->rps_lock);
drm_free(dev->dev_private, sizeof(drm_i915_private_t),
DRM_MEM_DRIVER);
free(dev->dev_private, DRM_MEM_DRIVER);
return (0);
}
int
i915_driver_unload(struct drm_device *dev)
{
return (i915_driver_unload_int(dev, true));
}
int
i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
{
@ -1604,7 +1596,7 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
mtx_destroy(&i915_file_priv->mm.lck);
drm_free(i915_file_priv, sizeof(*i915_file_priv), DRM_MEM_FILES);
free(i915_file_priv, DRM_MEM_FILES);
}
struct drm_ioctl_desc i915_ioctls[] = {
@ -1625,24 +1617,24 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ),
DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH | DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH | DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, 0),
DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0),
DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0),
DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0),
DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
@ -1655,14 +1647,18 @@ struct drm_ioctl_desc i915_ioctls[] = {
};
#ifdef COMPAT_FREEBSD32
extern drm_ioctl_desc_t i915_compat_ioctls[];
extern struct drm_ioctl_desc i915_compat_ioctls[];
extern int i915_compat_ioctls_nr;
#endif
struct drm_driver_info i915_driver_info = {
.driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
DRIVER_USE_MTRR | DRIVER_HAVE_IRQ | DRIVER_LOCKLESS_IRQ |
DRIVER_GEM /*| DRIVER_MODESET*/,
struct drm_driver i915_driver_info = {
/*
* FIXME Linux<->FreeBSD: DRIVER_USE_MTRR is commented out on
* Linux.
*/
.driver_features =
DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME,
.buf_priv_size = sizeof(drm_i915_private_t),
.load = i915_driver_load,
@ -1672,6 +1668,8 @@ struct drm_driver_info i915_driver_info = {
.lastclose = i915_driver_lastclose,
.postclose = i915_driver_postclose,
.device_is_agp = i915_driver_device_is_agp,
.master_create = i915_master_create,
.master_destroy = i915_master_destroy,
.gem_init_object = i915_gem_init_object,
.gem_free_object = i915_gem_free_object,
.gem_pager_ops = &i915_gem_pager_ops,
@ -1684,9 +1682,9 @@ struct drm_driver_info i915_driver_info = {
.ioctls = i915_ioctls,
#ifdef COMPAT_FREEBSD32
.compat_ioctls = i915_compat_ioctls,
.compat_ioctls_nr = &i915_compat_ioctls_nr,
.num_compat_ioctls = &i915_compat_ioctls_nr,
#endif
.max_ioctl = DRM_ARRAY_SIZE(i915_ioctls),
.num_ioctls = ARRAY_SIZE(i915_ioctls),
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@ -1705,4 +1703,3 @@ int i915_driver_device_is_agp(struct drm_device * dev)
{
return 1;
}

View File

@ -72,7 +72,7 @@ typedef struct _drm_i915_init {
unsigned int sarea_handle;
} drm_i915_init_t;
typedef struct drm_i915_sarea {
typedef struct _drm_i915_sarea {
struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];
int last_upload; /* last time texture was uploaded */
int last_enqueue; /* last time a buffer was enqueued */
@ -114,14 +114,14 @@ typedef struct drm_i915_sarea {
unsigned int rotated_tiled;
unsigned int rotated2_tiled;
int planeA_x;
int planeA_y;
int planeA_w;
int planeA_h;
int planeB_x;
int planeB_y;
int planeB_w;
int planeB_h;
int pipeA_x;
int pipeA_y;
int pipeA_w;
int pipeA_h;
int pipeB_x;
int pipeB_y;
int pipeB_w;
int pipeB_h;
/* Triple buffering */
drm_handle_t third_handle;
@ -139,6 +139,16 @@ typedef struct drm_i915_sarea {
unsigned int depth_bo_handle;
} drm_i915_sarea_t;
/* due to userspace building against these headers we need some compat here */
#define planeA_x pipeA_x
#define planeA_y pipeA_y
#define planeA_w pipeA_w
#define planeA_h pipeA_h
#define planeB_x pipeB_x
#define planeB_y pipeB_y
#define planeB_w pipeB_w
#define planeB_h pipeB_h
/* Driver specific fence types and classes.
*/
@ -224,7 +234,6 @@ typedef struct drm_i915_sarea {
#define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
#define DRM_IOCTL_I915_MMIO DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_MMIO, drm_i915_mmio)
#define DRM_IOCTL_I915_EXECBUFFER DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_EXECBUFFER, struct drm_i915_execbuffer)
#define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
#define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
#define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
@ -447,26 +456,6 @@ typedef struct drm_i915_hws_addr {
#define I915_RELOC1_STRIDE 4
struct drm_i915_op_arg {
uint64_t next;
uint64_t reloc_ptr;
int handled;
unsigned int pad64;
union {
struct drm_bo_op_req req;
struct drm_bo_arg_rep rep;
} d;
};
struct drm_i915_execbuffer {
uint64_t ops_list;
uint32_t num_buffers;
struct drm_i915_batchbuffer batch;
drm_context_t context; /* for lockless use in the future */
struct drm_fence_arg fence_arg;
};
struct drm_i915_gem_init {
/**
* Beginning offset in the GTT to be managed by the DRM memory

View File

@ -298,13 +298,11 @@ static int i915_drm_freeze(struct drm_device *dev)
pci_save_state(dev->pdev);
#endif
DRM_LOCK(dev);
/* If KMS is active, we do the leavevt stuff here */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
error = -i915_gem_idle(dev);
error = i915_gem_idle(dev);
if (error) {
DRM_UNLOCK(dev);
device_printf(dev->device,
device_printf(dev->dev,
"GEM idle failed, resume might fail\n");
return (error);
}
@ -317,7 +315,6 @@ static int i915_drm_freeze(struct drm_device *dev)
/* Modeset on resume, not lid events */
dev_priv->modeset_on_lid = 0;
DRM_UNLOCK(dev);
return 0;
}
@ -331,13 +328,13 @@ i915_suspend(device_t kdev)
dev = device_get_softc(kdev);
if (dev == NULL || dev->dev_private == NULL) {
DRM_ERROR("DRM not initialized, aborting suspend.\n");
return -ENODEV;
return ENODEV;
}
DRM_DEBUG_KMS("starting suspend\n");
error = i915_drm_freeze(dev);
if (error)
return (error);
return (-error);
error = bus_generic_suspend(kdev);
DRM_DEBUG_KMS("finished suspend %d\n", error);
@ -349,9 +346,10 @@ static int i915_drm_thaw(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int error = 0;
DRM_LOCK(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
DRM_LOCK(dev);
i915_gem_restore_gtt_mappings(dev);
DRM_UNLOCK(dev);
}
i915_restore_state(dev);
@ -362,6 +360,7 @@ static int i915_drm_thaw(struct drm_device *dev)
if (HAS_PCH_SPLIT(dev))
ironlake_init_pch_refclk(dev);
DRM_LOCK(dev);
dev_priv->mm.suspended = 0;
error = i915_gem_init_hw(dev);
@ -377,15 +376,12 @@ static int i915_drm_thaw(struct drm_device *dev)
/* Resume the modeset for every activated CRTC */
drm_helper_resume_force_mode(dev);
sx_xunlock(&dev->mode_config.mutex);
DRM_LOCK(dev);
}
intel_opregion_init(dev);
dev_priv->modeset_on_lid = 0;
DRM_UNLOCK(dev);
return error;
}
@ -404,9 +400,9 @@ i915_resume(device_t kdev)
pci_set_master(dev->pdev);
#endif
ret = -i915_drm_thaw(dev);
ret = i915_drm_thaw(dev);
if (ret != 0)
return (ret);
return (-ret);
drm_kms_helper_poll_enable(dev);
ret = bus_generic_resume(kdev);
@ -420,9 +416,9 @@ i915_probe(device_t kdev)
const struct intel_device_info *info;
int error;
error = drm_probe(kdev, i915_pciidlist);
error = drm_probe_helper(kdev, i915_pciidlist);
if (error != 0)
return (error);
return (-error);
info = i915_get_device_id(pci_get_device(kdev));
if (info == NULL)
return (ENXIO);
@ -434,13 +430,10 @@ int i915_modeset;
static int
i915_attach(device_t kdev)
{
struct drm_device *dev;
dev = device_get_softc(kdev);
if (i915_modeset == 1)
i915_driver_info.driver_features |= DRIVER_MODESET;
dev->driver = &i915_driver_info;
return (drm_attach(kdev, i915_pciidlist));
return (-drm_attach_helper(kdev, i915_pciidlist, &i915_driver_info));
}
static struct fb_info *
@ -483,7 +476,7 @@ static device_method_t i915_methods[] = {
DEVMETHOD(device_attach, i915_attach),
DEVMETHOD(device_suspend, i915_suspend),
DEVMETHOD(device_resume, i915_resume),
DEVMETHOD(device_detach, drm_detach),
DEVMETHOD(device_detach, drm_generic_detach),
/* Framebuffer service methods */
DEVMETHOD(fb_getinfo, i915_fb_helper_getinfo),
@ -773,7 +766,7 @@ i965_reset_complete(struct drm_device *dev)
{
u8 gdrst;
gdrst = pci_read_config(dev->device, I965_GDRST, 1);
gdrst = pci_read_config(dev->dev, I965_GDRST, 1);
return (gdrst & GRDOM_RESET_ENABLE) == 0;
}
@ -788,8 +781,8 @@ i965_do_reset(struct drm_device *dev)
* well as the reset bit (GR/bit 0). Setting the GR bit
* triggers the reset; when done, the hardware will clear it.
*/
gdrst = pci_read_config(dev->device, I965_GDRST, 1);
pci_write_config(dev->device, I965_GDRST,
gdrst = pci_read_config(dev->dev, I965_GDRST, 1);
pci_write_config(dev->dev, I965_GDRST,
gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE, 1);
ret = wait_for(i965_reset_complete(dev), 500);
@ -797,8 +790,8 @@ i965_do_reset(struct drm_device *dev)
return ret;
/* We can't reset render&media without also resetting display ... */
gdrst = pci_read_config(dev->device, I965_GDRST, 1);
pci_write_config(dev->device, I965_GDRST,
gdrst = pci_read_config(dev->dev, I965_GDRST, 1);
pci_write_config(dev->dev, I965_GDRST,
gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE, 1);
return wait_for(i965_reset_complete(dev), 500);
@ -949,9 +942,7 @@ int i915_reset(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET))
intel_modeset_init_hw(dev);
DRM_LOCK(dev);
drm_irq_uninstall(dev);
DRM_UNLOCK(dev);
drm_irq_install(dev);
} else
DRM_UNLOCK(dev);

View File

@ -248,6 +248,10 @@ struct intel_opregion {
};
#define OPREGION_SIZE (8*1024)
struct drm_i915_master_private {
drm_local_map_t *sarea;
struct _drm_i915_sarea *sarea_priv;
};
#define I915_FENCE_REG_NONE -1
#define I915_MAX_NUM_FENCES 16
/* 16 fences + sign bit for FENCE_REG_NONE */
@ -295,7 +299,6 @@ typedef struct drm_i915_private {
int relative_constants_mode;
drm_local_map_t *sarea;
drm_local_map_t *mmio_map;
/** gt_fifo_count and the subsequent register write are synchronized
@ -306,7 +309,6 @@ typedef struct drm_i915_private {
/** gt_lock is also taken in irq contexts. */
struct mtx gt_lock;
drm_i915_sarea_t *sarea_priv;
/* drm_i915_ring_buffer_t ring; */
struct intel_ring_buffer rings[I915_NUM_RINGS];
uint32_t next_seqno;
@ -1067,7 +1069,7 @@ struct drm_i915_error_state {
extern int intel_iommu_enabled;
extern struct drm_ioctl_desc i915_ioctls[];
extern struct drm_driver_info i915_driver_info;
extern struct drm_driver i915_driver_info;
extern struct cdev_pager_ops i915_gem_pager_ops;
extern unsigned int i915_fbpercrtc;
extern int i915_panel_ignore_lid;
@ -1094,6 +1096,9 @@ int i915_sysctl_init(struct drm_device *dev, struct sysctl_ctx_list *ctx,
struct sysctl_oid *top);
void i915_sysctl_cleanup(struct drm_device *dev);
extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
/* i915_dma.c */
int i915_batchbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv);

View File

@ -161,6 +161,8 @@ i915_gem_wait_for_error(struct drm_device *dev)
while (dev_priv->error_completion == 0) {
ret = -msleep(&dev_priv->error_completion,
&dev_priv->error_completion_lock, PCATCH, "915wco", 0);
if (ret == -ERESTART)
ret = -ERESTARTSYS;
if (ret != 0) {
mtx_unlock(&dev_priv->error_completion_lock);
return (ret);
@ -299,6 +301,7 @@ i915_gem_init_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_init *args;
drm_i915_private_t *dev_priv;
int error;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
@ -321,8 +324,11 @@ i915_gem_init_ioctl(struct drm_device *dev, void *data,
* XXXKIB. The second-time initialization should be guarded
* against.
*/
return (i915_gem_init_global_gtt(dev, args->gtt_start, args->gtt_end,
args->gtt_end));
DRM_LOCK(dev);
error = i915_gem_init_global_gtt(dev, args->gtt_start,
args->gtt_end, args->gtt_end);
DRM_UNLOCK(dev);
return (error);
}
int
@ -331,20 +337,28 @@ i915_gem_idle(struct drm_device *dev)
drm_i915_private_t *dev_priv;
int ret;
DRM_LOCK(dev);
dev_priv = dev->dev_private;
if (dev_priv->mm.suspended)
if (dev_priv->mm.suspended) {
DRM_UNLOCK(dev);
return (0);
}
ret = i915_gpu_idle(dev);
if (ret != 0)
if (ret != 0) {
DRM_UNLOCK(dev);
return (ret);
}
i915_gem_retire_requests(dev);
/* Under UMS, be paranoid and evict. */
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = i915_gem_evict_everything(dev, false);
if (ret != 0)
if (ret != 0) {
DRM_UNLOCK(dev);
return ret;
}
}
i915_gem_reset_fences(dev);
@ -359,6 +373,8 @@ i915_gem_idle(struct drm_device *dev)
i915_kernel_lost_context(dev);
i915_gem_cleanup_ringbuffer(dev);
DRM_UNLOCK(dev);
/* Cancel the retire work handler, which should be idle now. */
taskqueue_cancel_timeout(dev_priv->tq, &dev_priv->mm.retire_task, NULL);
return (ret);
@ -613,7 +629,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, uint32_t alignment,
void
i915_gem_object_unpin(struct drm_i915_gem_object *obj)
{
KASSERT(obj->pin_count != 0, ("zero pin count"));
KASSERT(obj->gtt_space != NULL, ("No gtt mapping"));
@ -873,10 +889,12 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
atomic_store_rel_int(&dev_priv->mm.wedged, 0);
}
DRM_LOCK(dev);
dev_priv->mm.suspended = 0;
ret = i915_gem_init_hw(dev);
if (ret != 0) {
DRM_UNLOCK(dev);
return (ret);
}
@ -884,16 +902,18 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
KASSERT(list_empty(&dev_priv->mm.flushing_list), ("flushing list"));
KASSERT(list_empty(&dev_priv->mm.inactive_list), ("inactive list"));
DRM_UNLOCK(dev);
ret = drm_irq_install(dev);
DRM_LOCK(dev);
if (ret)
goto cleanup_ringbuffer;
return (0);
cleanup_ringbuffer:
DRM_LOCK(dev);
i915_gem_cleanup_ringbuffer(dev);
dev_priv->mm.suspended = 1;
DRM_UNLOCK(dev);
return (ret);
}
@ -926,13 +946,12 @@ i915_gem_create(struct drm_file *file, struct drm_device *dev, uint64_t size,
if (obj == NULL)
return (-ENOMEM);
handle = 0;
ret = drm_gem_handle_create(file, &obj->base, &handle);
if (ret != 0) {
drm_gem_object_release(&obj->base);
i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
free(obj, DRM_I915_GEM);
return (-ret);
return (ret);
}
/* drop reference from allocate - handle holds it now */
@ -976,23 +995,12 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
#define to_user_ptr(x) ((void *)(uintptr_t)(x))
#define offset_in_page(x) ((x) & PAGE_MASK)
#define page_to_phys(x) VM_PAGE_TO_PHYS(x)
static inline long
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
return (copyout(from, to, n) != 0 ? n : 0);
}
static inline int
__copy_to_user_inatomic(void __user *to, const void *from, unsigned n)
{
return (copyout_nofault(from, to, n) != 0 ? n : 0);
}
static inline unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
return ((copyin(__DECONST(void *, from), to, n) != 0 ? n : 0));
}
#define copy_from_user(to, from, n) __copy_from_user((to), (from), (n))
static inline unsigned long
__copy_from_user_inatomic_nocache(void *to, const void __user *from,
unsigned long n)
{
@ -1017,7 +1025,7 @@ fault_in_multipages_readable(const char __user *uaddr, int size)
return ret;
while (uaddr <= end) {
ret = copyin(uaddr, &c, 1);
ret = -copyin(uaddr, &c, 1);
if (ret != 0)
return -EFAULT;
uaddr += PAGE_SIZE;
@ -1026,10 +1034,10 @@ fault_in_multipages_readable(const char __user *uaddr, int size)
/* Check whether the range spilled into the next page. */
if (((unsigned long)uaddr & ~PAGE_MASK) ==
((unsigned long)end & ~PAGE_MASK)) {
ret = copyin(end, &c, 1);
ret = -copyin(end, &c, 1);
}
return -ret;
return ret;
}
static inline int
@ -1868,14 +1876,13 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
PROC_LOCK(p);
if (map->size + size > lim_cur(p, RLIMIT_VMEM)) {
PROC_UNLOCK(p);
error = ENOMEM;
error = -ENOMEM;
goto out;
}
PROC_UNLOCK(p);
addr = 0;
vm_object_reference(obj->vm_obj);
DRM_UNLOCK(dev);
rv = vm_map_find(map, obj->vm_obj, args->offset, &addr, args->size, 0,
VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE,
VM_PROT_READ | VM_PROT_WRITE, MAP_INHERIT_SHARE);
@ -1885,7 +1892,6 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
} else {
args->addr_ptr = (uint64_t)addr;
}
DRM_LOCK(dev);
out:
drm_gem_object_unreference(obj);
return (error);
@ -2631,11 +2637,11 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
if (free_space != NULL) {
if (map_and_fenceable)
obj->gtt_space = drm_mm_get_block_range_generic(
free_space, size, alignment, 0,
free_space, size, alignment, 0, 0,
dev_priv->mm.gtt_mappable_end, 1);
else
obj->gtt_space = drm_mm_get_block_generic(free_space,
size, alignment, 1);
size, alignment, 0, 1);
}
if (obj->gtt_space == NULL) {
ret = i915_gem_evict_something(dev, size, alignment,
@ -2771,14 +2777,14 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
}
ret = i915_gem_object_finish_gpu(obj);
if (ret == -ERESTART || ret == -EINTR)
if (ret == -ERESTARTSYS || ret == -EINTR)
return (ret);
i915_gem_object_finish_gtt(obj);
if (ret == 0)
ret = i915_gem_object_set_to_cpu_domain(obj, 1);
if (ret == -ERESTART || ret == -EINTR)
if (ret == -ERESTARTSYS || ret == -EINTR)
return (ret);
if (ret != 0) {
i915_gem_clflush_object(obj);
@ -3326,8 +3332,11 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
flags = interruptible ? PCATCH : 0;
while (!i915_seqno_passed(ring->get_seqno(ring), seqno)
&& !atomic_load_acq_int(&dev_priv->mm.wedged) &&
ret == 0)
ret == 0) {
ret = -msleep(ring, &dev_priv->irq_lock, flags, "915gwr", 0);
if (ret == -ERESTART)
ret = -ERESTARTSYS;
}
ring->irq_put(ring);
mtx_unlock(&dev_priv->irq_lock);
@ -3584,7 +3593,7 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
seqno = ring->get_seqno(ring);
CTR2(KTR_DRM, "retire_request_ring %s %d", ring->name, seqno);
for (i = 0; i < DRM_ARRAY_SIZE(ring->sync_seqno); i++)
for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
if (seqno >= ring->sync_seqno[i])
ring->sync_seqno[i] = 0;
@ -4047,7 +4056,7 @@ i915_gem_init_phys_object(struct drm_device *dev, int id, int size, int align)
phys_obj->id = id;
phys_obj->handle = drm_pci_alloc(dev, size, align, ~0);
phys_obj->handle = drm_pci_alloc(dev, size, align, BUS_SPACE_MAXADDR);
if (phys_obj->handle == NULL) {
ret = -ENOMEM;
goto free_obj;
@ -4196,7 +4205,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
}
VM_OBJECT_WUNLOCK(obj->base.vm_obj);
return (0);
return (ret);
}
static int

View File

@ -317,10 +317,10 @@ void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
//DRM_LOCK(dev); /* Called from preclose(), the lock is already owned. */
DRM_LOCK(dev);
drm_gem_names_foreach(&file_priv->context_idr, context_idr_cleanup, NULL);
drm_gem_names_fini(&file_priv->context_idr);
//DRM_UNLOCK(dev);
DRM_UNLOCK(dev);
}
static struct i915_hw_context *

View File

@ -82,10 +82,10 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
INIT_LIST_HEAD(&unwind_list);
if (mappable)
drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, min_size,
alignment, 0,
alignment, 0, 0,
dev_priv->mm.gtt_mappable_end);
else
drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment, 0);
/* First see if there is a large enough contiguous idle region... */
list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {

View File

@ -1240,14 +1240,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto pre_struct_lock_err;
if (dev_priv->mm.suspended) {
DRM_UNLOCK(dev);
ret = -EBUSY;
goto struct_lock_err;
goto pre_struct_lock_err;
}
eb = eb_create(args->buffer_count);
if (eb == NULL) {
DRM_UNLOCK(dev);
ret = -ENOMEM;
goto struct_lock_err;
goto pre_struct_lock_err;
}
/* Look up object handles */
@ -1394,7 +1396,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
list_del_init(&obj->exec_list);
drm_gem_object_unreference(&obj->base);
}
struct_lock_err:
DRM_UNLOCK(dev);
pre_struct_lock_err:

View File

@ -355,7 +355,7 @@ i915_gem_init_global_gtt(struct drm_device *dev, unsigned long start,
/* ... but ensure that we clear the entire range. */
intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
device_printf(dev->device,
device_printf(dev->dev,
"taking over the fictitious range 0x%lx-0x%lx\n",
dev->agp->base + start, dev->agp->base + start + mappable);
error = -vm_phys_fictitious_reg_range(dev->agp->base + start,

View File

@ -352,6 +352,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
}
}
DRM_LOCK(dev);
if (args->tiling_mode != obj->tiling_mode ||
args->stride != obj->stride) {
/* We need to rebind the object if its current allocation
@ -399,6 +400,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
args->stride = obj->stride;
args->tiling_mode = obj->tiling_mode;
drm_gem_object_unreference(&obj->base);
DRM_UNLOCK(dev);
return (ret);
}
@ -418,6 +420,8 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
if (&obj->base == NULL)
return -ENOENT;
DRM_LOCK(dev);
args->tiling_mode = obj->tiling_mode;
switch (obj->tiling_mode) {
case I915_TILING_X:
@ -440,6 +444,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
drm_gem_object_unreference(&obj->base);
DRM_UNLOCK(dev);
return 0;
}

View File

@ -127,12 +127,12 @@ typedef struct drm_i915_mem_alloc32 {
u32 region_offset; /* offset from start of fb or agp */
} drm_i915_mem_alloc32_t;
drm_ioctl_desc_t i915_compat_ioctls[] = {
struct drm_ioctl_desc i915_compat_ioctls[] = {
DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, compat_i915_batchbuffer, DRM_AUTH),
DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, compat_i915_cmdbuffer, DRM_AUTH),
DRM_IOCTL_DEF(DRM_I915_GETPARAM, compat_i915_getparam, DRM_AUTH),
DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, compat_i915_irq_emit, DRM_AUTH)
};
int i915_compat_ioctls_nr = DRM_ARRAY_SIZE(i915_compat_ioctls);
int i915_compat_ioctls_nr = ARRAY_SIZE(i915_compat_ioctls);
#endif

View File

@ -1458,11 +1458,11 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
dev_priv->pipestat[1] = 0;
/* Hack for broken MSIs on VLV */
pci_write_config(dev->device, 0x94, 0xfee00000, 4);
msid = pci_read_config(dev->device, 0x98, 2);
pci_write_config(dev->dev, 0x94, 0xfee00000, 4);
msid = pci_read_config(dev->dev, 0x98, 2);
msid &= 0xff; /* mask out delivery bits */
msid |= (1<<14);
pci_write_config(dev->device, 0x98, msid, 2);
pci_write_config(dev->dev, 0x98, msid, 2);
I915_WRITE(VLV_IMR, dev_priv->irq_mask);
I915_WRITE(VLV_IER, enable_mask);
@ -2329,7 +2329,7 @@ i915_error_state_free(struct drm_i915_error_state *error)
{
int i;
for (i = 0; i < DRM_ARRAY_SIZE(error->ring); i++) {
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
i915_error_object_free(error->ring[i].batchbuffer);
i915_error_object_free(error->ring[i].ringbuffer);
free(error->ring[i].requests, DRM_I915_GEM);

View File

@ -460,19 +460,19 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A &
~DPLL_VCO_ENABLE);
POSTING_READ(dpll_a_reg);
DRM_UDELAY(150);
udelay(150);
}
I915_WRITE(fpa0_reg, dev_priv->saveFPA0);
I915_WRITE(fpa1_reg, dev_priv->saveFPA1);
/* Actually enable it */
I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A);
POSTING_READ(dpll_a_reg);
DRM_UDELAY(150);
udelay(150);
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
I915_WRITE(_DPLL_A_MD, dev_priv->saveDPLL_A_MD);
POSTING_READ(_DPLL_A_MD);
}
DRM_UDELAY(150);
udelay(150);
/* Restore mode */
I915_WRITE(_HTOTAL_A, dev_priv->saveHTOTAL_A);
@ -529,19 +529,19 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B &
~DPLL_VCO_ENABLE);
POSTING_READ(dpll_b_reg);
DRM_UDELAY(150);
udelay(150);
}
I915_WRITE(fpb0_reg, dev_priv->saveFPB0);
I915_WRITE(fpb1_reg, dev_priv->saveFPB1);
/* Actually enable it */
I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B);
POSTING_READ(dpll_b_reg);
DRM_UDELAY(150);
udelay(150);
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
I915_WRITE(_DPLL_B_MD, dev_priv->saveDPLL_B_MD);
POSTING_READ(_DPLL_B_MD);
}
DRM_UDELAY(150);
udelay(150);
/* Restore mode */
I915_WRITE(_HTOTAL_B, dev_priv->saveHTOTAL_B);
@ -792,7 +792,7 @@ static void i915_restore_display(struct drm_device *dev)
I915_WRITE(VGA1, dev_priv->saveVGA1);
I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
POSTING_READ(VGA_PD);
DRM_UDELAY(150);
udelay(150);
i915_restore_vga(dev);
}
@ -802,11 +802,13 @@ int i915_save_state(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
dev_priv->saveLBB = pci_read_config(dev->device, LBB, 1);
dev_priv->saveLBB = pci_read_config(dev->dev, LBB, 1);
/* Hardware status page */
dev_priv->saveHWS = I915_READ(HWS_PGA);
DRM_LOCK(dev);
i915_save_display(dev);
/* Interrupt state */
@ -844,6 +846,8 @@ int i915_save_state(struct drm_device *dev)
for (i = 0; i < 3; i++)
dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
DRM_UNLOCK(dev);
return 0;
}
@ -852,8 +856,9 @@ int i915_restore_state(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
pci_write_config(dev->device, LBB, dev_priv->saveLBB, 1);
pci_write_config(dev->dev, LBB, dev_priv->saveLBB, 1);
DRM_LOCK(dev);
/* Hardware status page */
I915_WRITE(HWS_PGA, dev_priv->saveHWS);
@ -887,6 +892,8 @@ int i915_restore_state(struct drm_device *dev)
for (i = 0; i < 3; i++)
I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
DRM_UNLOCK(dev);
intel_iic_reset(dev);
return 0;

View File

@ -129,7 +129,7 @@ static int intel_crt_mode_valid(struct drm_connector *connector,
}
static bool intel_crt_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
@ -310,7 +310,6 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
*/
if (edid != NULL) {
is_digital = edid->input & DRM_EDID_INPUT_DIGITAL;
connector->display_info.raw_edid = NULL;
free(edid, DRM_MEM_KMS);
}

View File

@ -2051,9 +2051,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
#if 0
struct drm_i915_master_private *master_priv;
#endif
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int ret;
@ -2099,7 +2097,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
intel_update_fbc(dev);
DRM_UNLOCK(dev);
#if 0
if (!dev->primary->master)
return 0;
@ -2114,19 +2111,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
master_priv->sarea_priv->pipeA_x = x;
master_priv->sarea_priv->pipeA_y = y;
}
#else
if (!dev_priv->sarea_priv)
return 0;
if (intel_crtc->pipe) {
dev_priv->sarea_priv->planeB_x = x;
dev_priv->sarea_priv->planeB_y = y;
} else {
dev_priv->sarea_priv->planeA_x = x;
dev_priv->sarea_priv->planeA_y = y;
}
#endif
return 0;
}
@ -3329,9 +3313,7 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
#if 0
struct drm_i915_master_private *master_priv;
#endif
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
bool enabled;
@ -3343,38 +3325,23 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
dev_priv->display.dpms(crtc, mode);
#if 0
if (!dev->primary->master)
return;
master_priv = dev->primary->master->driver_priv;
if (!master_priv->sarea_priv)
return;
#else
if (!dev_priv->sarea_priv)
return;
#endif
enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
switch (pipe) {
case 0:
#if 0
master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
#else
dev_priv->sarea_priv->planeA_w = enabled ? crtc->mode.hdisplay : 0;
dev_priv->sarea_priv->planeA_h = enabled ? crtc->mode.vdisplay : 0;
#endif
break;
case 1:
#if 0
master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
#else
dev_priv->sarea_priv->planeB_w = enabled ? crtc->mode.hdisplay : 0;
dev_priv->sarea_priv->planeB_h = enabled ? crtc->mode.vdisplay : 0;
#endif
break;
default:
DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
@ -3502,7 +3469,7 @@ static int i915gm_get_display_clock_speed(struct drm_device *dev)
{
u16 gcfgc = 0;
gcfgc = pci_read_config(dev->device, GCFGC, 2);
gcfgc = pci_read_config(dev->dev, GCFGC, 2);
if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
return 133000;
@ -6946,8 +6913,8 @@ static void intel_init_quirks(struct drm_device *dev)
device_t d;
int i;
d = dev->device;
for (i = 0; i < DRM_ARRAY_SIZE(intel_quirks); i++) {
d = dev->dev;
for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
q = &intel_quirks[i];
if (pci_get_device(d) == q->device &&
(pci_get_subvendor(d) == q->subsystem_vendor ||
@ -7116,10 +7083,11 @@ void intel_modeset_cleanup(struct drm_device *dev)
if (IS_VALLEYVIEW(dev))
vlv_init_dpio(dev);
DRM_UNLOCK(dev);
/* Disable the irq before mode object teardown, for the irq might
* enqueue unpin/hotplug work. */
drm_irq_uninstall(dev);
DRM_UNLOCK(dev);
if (taskqueue_cancel(dev_priv->tq, &dev_priv->hotplug_task, NULL))
taskqueue_drain(dev_priv->tq, &dev_priv->hotplug_task);

View File

@ -665,7 +665,7 @@ intel_dp_i2c_init(struct intel_dp *intel_dp,
DRM_DEBUG_KMS("i2c_init %s\n", name);
ironlake_edp_panel_vdd_on(intel_dp);
ret = iic_dp_aux_add_bus(intel_connector->base.dev->device, name,
ret = iic_dp_aux_add_bus(intel_connector->base.dev->dev, name,
intel_dp_i2c_aux_ch, intel_dp, &intel_dp->dp_iic_bus,
&intel_dp->adapter);
ironlake_edp_panel_vdd_off(intel_dp, false);
@ -673,7 +673,7 @@ intel_dp_i2c_init(struct intel_dp *intel_dp,
}
static bool
intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
intel_dp_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
@ -688,11 +688,6 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
intel_fixed_panel_mode(intel_dp->panel_fixed_mode, adjusted_mode);
intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN,
mode, adjusted_mode);
/*
* the mode->clock is used to calculate the Data&Link M/N
* of the pipe. For the eDP the fixed clock should be used.
*/
mode->clock = intel_dp->panel_fixed_mode->clock;
}
DRM_DEBUG_KMS("DP link computation with max lane count %i "
@ -703,7 +698,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
return false;
bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
mode_rate = intel_dp_link_required(mode->clock, bpp);
mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp);
for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
for (clock = 0; clock <= max_clock; clock++) {
@ -2151,7 +2146,6 @@ intel_dp_detect(struct drm_connector *connector, bool force)
edid = intel_dp_get_edid(connector, intel_dp->adapter);
if (edid) {
intel_dp->has_audio = drm_detect_monitor_audio(edid);
connector->display_info.raw_edid = NULL;
free(edid, DRM_MEM_KMS);
}
}
@ -2217,7 +2211,6 @@ intel_dp_detect_audio(struct drm_connector *connector)
if (edid) {
has_audio = drm_detect_monitor_audio(edid);
connector->display_info.raw_edid = NULL;
free(edid, DRM_MEM_KMS);
}
@ -2233,7 +2226,7 @@ intel_dp_set_property(struct drm_connector *connector,
struct intel_dp *intel_dp = intel_attached_dp(connector);
int ret;
ret = drm_connector_property_set_value(connector, property, val);
ret = drm_object_property_set_value(&connector->base, property, val);
if (ret)
return ret;
@ -2307,7 +2300,7 @@ static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
device_delete_child(intel_dp->dp_iic_bus,
intel_dp->adapter);
}
device_delete_child(dev->device, intel_dp->dp_iic_bus);
device_delete_child(dev->dev, intel_dp->dp_iic_bus);
}
drm_encoder_cleanup(encoder);
if (is_edp(intel_dp)) {

View File

@ -79,20 +79,17 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
goto out_unref;
}
#if 0
info = framebuffer_alloc(0, device);
info = framebuffer_alloc();
if (!info) {
ret = -ENOMEM;
goto out_unpin;
}
#if 0
info->par = ifbdev;
#else
info = malloc(sizeof(struct fb_info), DRM_MEM_KMS, M_WAITOK | M_ZERO);
info->fb_size = size;
info->fb_bpp = sizes->surface_bpp;
info->fb_width = sizes->fb_width;
info->fb_height = sizes->fb_height;
info->fb_pbase = dev->agp->base + obj->gtt_offset;
info->fb_vbase = (vm_offset_t)pmap_mapdev_attr(info->fb_pbase, size,
PAT_WRITE_COMBINING);
@ -140,12 +137,12 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
info->screen_size = size;
// memset(info->screen_base, 0, size);
#endif
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
#endif
DRM_DEBUG_KMS("allocated %dx%d (s %dbits) fb: 0x%08x, bo %p\n",
fb->width, fb->height, fb->depth,
obj->gtt_offset, obj);
@ -192,21 +189,19 @@ static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
static void intel_fbdev_destroy(struct drm_device *dev,
struct intel_fbdev *ifbdev)
{
#if 0
struct fb_info *info;
#endif
struct intel_framebuffer *ifb = &ifbdev->ifb;
#if 0
if (ifbdev->helper.fbdev) {
info = ifbdev->helper.fbdev;
#if 0
unregister_framebuffer(info);
iounmap(info->screen_base);
if (info->cmap.len)
fb_dealloc_cmap(&info->cmap);
#endif
framebuffer_release(info);
}
#endif
drm_fb_helper_fini(&ifbdev->helper);

View File

@ -446,7 +446,7 @@ static int intel_hdmi_mode_valid(struct drm_connector *connector,
}
static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
@ -473,7 +473,6 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
drm_detect_hdmi_monitor(edid);
intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
}
connector->display_info.raw_edid = NULL;
free(edid, DRM_MEM_KMS);
} else {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] got no edid, ddc port %d\n",
@ -519,7 +518,6 @@ intel_hdmi_detect_audio(struct drm_connector *connector)
if (edid->input & DRM_EDID_INPUT_DIGITAL)
has_audio = drm_detect_monitor_audio(edid);
connector->display_info.raw_edid = NULL;
free(edid, DRM_MEM_KMS);
}
@ -535,7 +533,7 @@ intel_hdmi_set_property(struct drm_connector *connector,
struct drm_i915_private *dev_priv = connector->dev->dev_private;
int ret;
ret = drm_connector_property_set_value(connector, property, val);
ret = drm_object_property_set_value(&connector->base, property, val);
if (ret)
return ret;

View File

@ -258,9 +258,9 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct iic_msg *msg,
(GMBUS_SATOER | GMBUS_HW_RDY)),
50, 1, "915gbr");
if (ret)
return (ETIMEDOUT);
return (-ETIMEDOUT);
if (gmbus2 & GMBUS_SATOER)
return (ENXIO);
return (-ENXIO);
val = I915_READ(GMBUS3 + reg_offset);
do {
@ -308,9 +308,9 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct iic_msg *msg)
(GMBUS_SATOER | GMBUS_HW_RDY)),
50, 1, "915gbw");
if (ret)
return (ETIMEDOUT);
return (-ETIMEDOUT);
if (gmbus2 & GMBUS_SATOER)
return (ENXIO);
return (-ENXIO);
}
return 0;
}
@ -369,7 +369,7 @@ intel_gmbus_transfer(device_t idev, struct iic_msg *msgs, uint32_t nmsgs)
sx_xlock(&dev_priv->gmbus_sx);
if (sc->force_bit_dev) {
error = IICBUS_TRANSFER(dev_priv->bbbus[unit], msgs, nmsgs);
error = -IICBUS_TRANSFER(dev_priv->bbbus[unit], msgs, nmsgs);
goto out;
}
@ -389,9 +389,9 @@ intel_gmbus_transfer(device_t idev, struct iic_msg *msgs, uint32_t nmsgs)
error = gmbus_xfer_write(dev_priv, &msgs[i]);
}
if (error == ETIMEDOUT)
if (error == -ETIMEDOUT)
goto timeout;
if (error == ENXIO)
if (error == -ENXIO)
goto clear_err;
ret = _intel_wait_for(sc->drm_dev,
@ -419,7 +419,7 @@ intel_gmbus_transfer(device_t idev, struct iic_msg *msgs, uint32_t nmsgs)
10, 1, "915gbu")) {
DRM_DEBUG_KMS("GMBUS [%s] timed out waiting for idle\n",
sc->name);
error = ETIMEDOUT;
error = -ETIMEDOUT;
}
I915_WRITE(GMBUS0 + reg_offset, 0);
goto out;
@ -454,7 +454,7 @@ intel_gmbus_transfer(device_t idev, struct iic_msg *msgs, uint32_t nmsgs)
* So, we always return -ENXIO in all NAK cases, to ensure we send
* it at least during the one case that is specified.
*/
error = ENXIO;
error = -ENXIO;
goto out;
timeout:
@ -467,12 +467,12 @@ intel_gmbus_transfer(device_t idev, struct iic_msg *msgs, uint32_t nmsgs)
* Try GPIO bitbanging instead.
*/
sc->force_bit_dev = true;
error = IICBUS_TRANSFER(idev, msgs, nmsgs);
error = -IICBUS_TRANSFER(idev, msgs, nmsgs);
goto out;
out:
sx_xunlock(&dev_priv->gmbus_sx);
return (error);
return (-error);
}
device_t
@ -730,15 +730,15 @@ intel_setup_gmbus(struct drm_device *dev)
* gmbus may decide to force quirk transfer in the
* attachment code.
*/
dev_priv->bbbus_bridge[i] = device_add_child(dev->device,
dev_priv->bbbus_bridge[i] = device_add_child(dev->dev,
"intel_iicbb", i);
if (dev_priv->bbbus_bridge[i] == NULL) {
DRM_ERROR("bbbus bridge %d creation failed\n", i);
ret = ENXIO;
ret = -ENXIO;
goto err;
}
device_quiet(dev_priv->bbbus_bridge[i]);
ret = device_probe_and_attach(dev_priv->bbbus_bridge[i]);
ret = -device_probe_and_attach(dev_priv->bbbus_bridge[i]);
if (ret != 0) {
DRM_ERROR("bbbus bridge %d attach failed, %d\n", i,
ret);
@ -760,19 +760,19 @@ intel_setup_gmbus(struct drm_device *dev)
dev_priv->bbbus[i] = iic_dev;
dev_priv->gmbus_bridge[i] = device_add_child(dev->device,
dev_priv->gmbus_bridge[i] = device_add_child(dev->dev,
"intel_gmbus", i);
if (dev_priv->gmbus_bridge[i] == NULL) {
DRM_ERROR("gmbus bridge %d creation failed\n", i);
ret = ENXIO;
ret = -ENXIO;
goto err;
}
device_quiet(dev_priv->gmbus_bridge[i]);
ret = device_probe_and_attach(dev_priv->gmbus_bridge[i]);
ret = -device_probe_and_attach(dev_priv->gmbus_bridge[i]);
if (ret != 0) {
DRM_ERROR("gmbus bridge %d attach failed, %d\n", i,
ret);
ret = ENXIO;
ret = -ENXIO;
goto err;
}

View File

@ -230,7 +230,7 @@ static inline u32 panel_fitter_scaling(u32 source, u32 target)
}
static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
@ -978,7 +978,7 @@ bool intel_lvds_init(struct drm_device *dev)
* the initial panel fitting mode will be FULL_SCREEN.
*/
drm_connector_attach_property(&intel_connector->base,
drm_object_attach_property(&connector->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_ASPECT);
intel_lvds->fitting_mode = DRM_MODE_SCALE_ASPECT;

View File

@ -80,7 +80,6 @@ intel_ddc_get_modes(struct drm_connector *connector, device_t adapter)
drm_mode_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
drm_edid_to_eld(connector, edid);
connector->display_info.raw_edid = NULL;
free(edid, DRM_MEM_KMS);
}
@ -106,13 +105,13 @@ intel_attach_force_audio_property(struct drm_connector *connector)
prop = drm_property_create_enum(dev, 0,
"audio",
force_audio_names,
DRM_ARRAY_SIZE(force_audio_names));
ARRAY_SIZE(force_audio_names));
if (prop == NULL)
return;
dev_priv->force_audio_property = prop;
}
drm_connector_attach_property(connector, prop, 0);
drm_object_attach_property(&connector->base, prop, 0);
}
static const struct drm_prop_enum_list broadcast_rgb_names[] = {
@ -132,12 +131,12 @@ intel_attach_broadcast_rgb_property(struct drm_connector *connector)
prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM,
"Broadcast RGB",
broadcast_rgb_names,
DRM_ARRAY_SIZE(broadcast_rgb_names));
ARRAY_SIZE(broadcast_rgb_names));
if (prop == NULL)
return;
dev_priv->broadcast_rgb_property = prop;
}
drm_connector_attach_property(connector, prop, 0);
drm_object_attach_property(&connector->base, prop, 0);
}

View File

@ -147,7 +147,7 @@ struct opregion_asle {
#define ACPI_DIGITAL_OUTPUT (3<<8)
#define ACPI_LVDS_OUTPUT (4<<8)
#if 1
#if defined(CONFIG_ACPI)
static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@ -350,7 +350,7 @@ static void intel_didl_outputs(struct drm_device *dev)
ACPI_STATUS status;
int i = 0;
handle = acpi_get_handle(dev->device);
handle = acpi_get_handle(dev->dev);
if (!handle)
return;
@ -377,7 +377,7 @@ static void intel_didl_outputs(struct drm_device *dev)
}
if (!acpi_video_bus) {
device_printf(dev->device, "No ACPI video bus found\n");
device_printf(dev->dev, "No ACPI video bus found\n");
return;
}
@ -385,7 +385,7 @@ static void intel_didl_outputs(struct drm_device *dev)
while (AcpiGetNextObject(ACPI_TYPE_DEVICE, acpi_video_bus, acpi_cdev,
&acpi_cdev) != AE_NOT_FOUND) {
if (i >= 8) {
device_printf(dev->device, "More than 8 outputs detected\n");
device_printf(dev->dev, "More than 8 outputs detected\n");
return;
}
status = acpi_GetInteger(acpi_cdev, "_ADR", &device_id);
@ -426,7 +426,7 @@ static void intel_didl_outputs(struct drm_device *dev)
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
int output_type = ACPI_OTHER_OUTPUT;
if (i >= 8) {
device_printf(dev->device,
device_printf(dev->dev,
"More than 8 outputs detected\n");
return;
}
@ -569,7 +569,7 @@ int intel_opregion_setup(struct drm_device *dev)
u32 asls, mboxes;
int err = 0;
asls = pci_read_config(dev->device, PCI_ASLS, 4);
asls = pci_read_config(dev->dev, PCI_ASLS, 4);
DRM_DEBUG("graphic opregion physical addr: 0x%x\n", asls);
if (asls == 0) {
DRM_DEBUG("ACPI OpRegion not supported!\n");

View File

@ -774,7 +774,6 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
KASSERT(overlay != NULL, ("No overlay ?"));
DRM_LOCK_ASSERT(overlay->dev);
DRM_MODE_CONFIG_ASSERT_LOCKED(overlay->dev);
ret = intel_overlay_release_old_vid(overlay);
if (ret != 0)
@ -877,7 +876,6 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
int ret;
DRM_LOCK_ASSERT(overlay->dev);
DRM_MODE_CONFIG_ASSERT_LOCKED(overlay->dev);
ret = intel_overlay_recover_from_interrupt(overlay);
if (ret != 0)
@ -1458,8 +1456,8 @@ void intel_setup_overlay(struct drm_device *dev)
intel_overlay_unmap_regs(overlay, regs);
dev_priv->overlay = overlay;
DRM_INFO("initialized overlay support\n");
DRM_UNLOCK(dev);
DRM_INFO("initialized overlay support\n");
return;
out_unpin_bo:

View File

@ -226,7 +226,7 @@ u32 intel_panel_get_backlight(struct drm_device *dev)
if (is_backlight_combination_mode(dev)) {
u8 lbpc;
lbpc = pci_read_config(dev->device, PCI_LBPC, 1);
lbpc = pci_read_config(dev->dev, PCI_LBPC, 1);
val *= lbpc;
}
}
@ -260,7 +260,7 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level
lbpc = level * 0xfe / max + 1;
level /= lbpc;
pci_write_config(dev->device, PCI_LBPC, lbpc, 4);
pci_write_config(dev->dev, PCI_LBPC, lbpc, 4);
}
tmp = I915_READ(BLC_PWM_CTL);

View File

@ -1206,16 +1206,11 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
return 0;
}
#if 0
if (dev->primary->master) {
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
if (master_priv->sarea_priv)
master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
}
#else
if (dev_priv->sarea_priv)
dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
#endif
pause("915rng", 1);
if (atomic_load_acq_32(&dev_priv->mm.wedged) != 0) {

View File

@ -416,20 +416,20 @@ static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
{
int i;
if ((drm_debug_flag & DRM_DEBUGBITS_KMS) == 0)
if ((drm_debug & DRM_DEBUGBITS_KMS) == 0)
return;
DRM_DEBUG_KMS("%s: W: %02X ", SDVO_NAME(intel_sdvo), cmd);
for (i = 0; i < args_len; i++)
printf("%02X ", ((const u8 *)args)[i]);
for (; i < 8; i++)
printf(" ");
for (i = 0; i < DRM_ARRAY_SIZE(sdvo_cmd_names); i++) {
for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) {
if (cmd == sdvo_cmd_names[i].cmd) {
printf("(%s)", sdvo_cmd_names[i].name);
break;
}
}
if (i == DRM_ARRAY_SIZE(sdvo_cmd_names))
if (i == ARRAY_SIZE(sdvo_cmd_names))
printf("(%02X)", cmd);
printf("\n");
}
@ -525,7 +525,7 @@ intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, void *response,
goto log_fail;
}
if ((drm_debug_flag & DRM_DEBUGBITS_KMS) != 0) {
if ((drm_debug & DRM_DEBUGBITS_KMS) != 0) {
if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
printf("(%s)", cmd_status_names[status]);
else
@ -541,15 +541,15 @@ intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, void *response,
SDVO_I2C_RETURN_0 + i,
&((u8 *)response)[i]))
goto log_fail;
if ((drm_debug_flag & DRM_DEBUGBITS_KMS) != 0)
if ((drm_debug & DRM_DEBUGBITS_KMS) != 0)
printf(" %02X", ((u8 *)response)[i]);
}
if ((drm_debug_flag & DRM_DEBUGBITS_KMS) != 0)
if ((drm_debug & DRM_DEBUGBITS_KMS) != 0)
printf("\n");
return (true);
log_fail:
if ((drm_debug_flag & DRM_DEBUGBITS_KMS) != 0)
if ((drm_debug & DRM_DEBUGBITS_KMS) != 0)
printf("... failed\n");
return (false);
}
@ -972,7 +972,7 @@ intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo,
}
static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
@ -1332,7 +1332,6 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
}
} else
status = connector_status_disconnected;
connector->display_info.raw_edid = NULL;
free(edid, DRM_MEM_KMS);
}
@ -1406,7 +1405,6 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
else
ret = connector_status_disconnected;
connector->display_info.raw_edid = NULL;
free(edid, DRM_MEM_KMS);
} else
ret = connector_status_connected;
@ -1452,7 +1450,6 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
drm_add_edid_modes(connector, edid);
}
connector->display_info.raw_edid = NULL;
free(edid, DRM_MEM_KMS);
}
}
@ -1547,7 +1544,7 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
if (!intel_sdvo_read_response(intel_sdvo, &reply, 3))
return;
for (i = 0; i < DRM_ARRAY_SIZE(sdvo_tv_modes); i++)
for (i = 0; i < ARRAY_SIZE(sdvo_tv_modes); i++)
if (reply & (1 << i)) {
struct drm_display_mode *nmode;
nmode = drm_mode_duplicate(connector->dev,
@ -1697,7 +1694,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
uint8_t cmd;
int ret;
ret = drm_connector_property_set_value(connector, property, val);
ret = drm_object_property_set_value(&connector->base, property, val);
if (ret)
return ret;
@ -1752,7 +1749,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
} else if (IS_TV_OR_LVDS(intel_sdvo_connector)) {
temp_value = val;
if (intel_sdvo_connector->left == property) {
drm_connector_property_set_value(connector,
drm_object_property_set_value(&connector->base,
intel_sdvo_connector->right, val);
if (intel_sdvo_connector->left_margin == temp_value)
return 0;
@ -1764,7 +1761,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
cmd = SDVO_CMD_SET_OVERSCAN_H;
goto set_value;
} else if (intel_sdvo_connector->right == property) {
drm_connector_property_set_value(connector,
drm_object_property_set_value(&connector->base,
intel_sdvo_connector->left, val);
if (intel_sdvo_connector->right_margin == temp_value)
return 0;
@ -1776,7 +1773,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
cmd = SDVO_CMD_SET_OVERSCAN_H;
goto set_value;
} else if (intel_sdvo_connector->top == property) {
drm_connector_property_set_value(connector,
drm_object_property_set_value(&connector->base,
intel_sdvo_connector->bottom, val);
if (intel_sdvo_connector->top_margin == temp_value)
return 0;
@ -1788,7 +1785,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
cmd = SDVO_CMD_SET_OVERSCAN_V;
goto set_value;
} else if (intel_sdvo_connector->bottom == property) {
drm_connector_property_set_value(connector,
drm_object_property_set_value(&connector->base,
intel_sdvo_connector->top, val);
if (intel_sdvo_connector->bottom_margin == temp_value)
return 0;
@ -1863,7 +1860,7 @@ static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
drm_mode_destroy(encoder->dev,
intel_sdvo->sdvo_lvds_fixed_mode);
device_delete_child(intel_sdvo->base.base.dev->device,
device_delete_child(intel_sdvo->base.base.dev->dev,
intel_sdvo->ddc_iic_bus);
intel_encoder_destroy(encoder);
}
@ -2294,7 +2291,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
i, tv_format_names[intel_sdvo_connector->tv_format_supported[i]]);
intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[0];
drm_connector_attach_property(&intel_sdvo_connector->base.base,
drm_object_attach_property(&intel_sdvo_connector->base.base.base,
intel_sdvo_connector->tv_format, 0);
return true;
@ -2310,7 +2307,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
intel_sdvo_connector->name = \
drm_property_create_range(dev, 0, #name, 0, data_value[0]); \
if (!intel_sdvo_connector->name) return false; \
drm_connector_attach_property(connector, \
drm_object_attach_property(&connector->base, \
intel_sdvo_connector->name, \
intel_sdvo_connector->cur_##name); \
DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
@ -2347,7 +2344,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
if (!intel_sdvo_connector->left)
return false;
drm_connector_attach_property(connector,
drm_object_attach_property(&connector->base,
intel_sdvo_connector->left,
intel_sdvo_connector->left_margin);
@ -2356,7 +2353,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
if (!intel_sdvo_connector->right)
return false;
drm_connector_attach_property(connector,
drm_object_attach_property(&connector->base,
intel_sdvo_connector->right,
intel_sdvo_connector->right_margin);
DRM_DEBUG_KMS("h_overscan: max %d, "
@ -2384,7 +2381,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
if (!intel_sdvo_connector->top)
return false;
drm_connector_attach_property(connector,
drm_object_attach_property(&connector->base,
intel_sdvo_connector->top,
intel_sdvo_connector->top_margin);
@ -2394,7 +2391,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
if (!intel_sdvo_connector->bottom)
return false;
drm_connector_attach_property(connector,
drm_object_attach_property(&connector->base,
intel_sdvo_connector->bottom,
intel_sdvo_connector->bottom_margin);
DRM_DEBUG_KMS("v_overscan: max %d, "
@ -2426,7 +2423,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
if (!intel_sdvo_connector->dot_crawl)
return false;
drm_connector_attach_property(connector,
drm_object_attach_property(&connector->base,
intel_sdvo_connector->dot_crawl,
intel_sdvo_connector->cur_dot_crawl);
DRM_DEBUG_KMS("dot crawl: current %d\n", response);
@ -2553,7 +2550,7 @@ intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo, struct drm_device *dev,
struct intel_sdvo_ddc_proxy_sc *sc;
int ret;
sdvo->ddc_iic_bus = device_add_child(dev->device,
sdvo->ddc_iic_bus = device_add_child(dev->dev,
"intel_sdvo_ddc_proxy", sdvo_reg);
if (sdvo->ddc_iic_bus == NULL) {
DRM_ERROR("cannot create ddc proxy bus %d\n", sdvo_reg);
@ -2564,7 +2561,7 @@ intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo, struct drm_device *dev,
if (ret != 0) {
DRM_ERROR("cannot attach proxy bus %d error %d\n",
sdvo_reg, ret);
device_delete_child(dev->device, sdvo->ddc_iic_bus);
device_delete_child(dev->dev, sdvo->ddc_iic_bus);
return (false);
}
sc = device_get_softc(sdvo->ddc_iic_bus);

View File

@ -846,7 +846,7 @@ intel_tv_mode_valid(struct drm_connector *connector,
static bool
intel_tv_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
intel_tv_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
@ -1233,7 +1233,7 @@ static void intel_tv_find_better_format(struct drm_connector *connector)
}
intel_tv->tv_format = tv_mode->name;
drm_connector_property_set_value(connector,
drm_object_property_set_value(&connector->base,
connector->dev->mode_config.tv_mode_property, i);
}
@ -1326,7 +1326,7 @@ intel_tv_get_modes(struct drm_connector *connector)
int j, count = 0;
u64 tmp;
for (j = 0; j < DRM_ARRAY_SIZE(input_res_table);
for (j = 0; j < ARRAY_SIZE(input_res_table);
j++) {
const struct input_res *input = &input_res_table[j];
unsigned int hactive_s = input->w;
@ -1393,7 +1393,7 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
int ret = 0;
bool changed = false;
ret = drm_connector_property_set_value(connector, property, val);
ret = drm_object_property_set_value(&connector->base, property, val);
if (ret < 0)
goto out;
@ -1414,7 +1414,7 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
intel_tv->margin[TV_MARGIN_BOTTOM] = val;
changed = true;
} else if (property == dev->mode_config.tv_mode_property) {
if (val >= DRM_ARRAY_SIZE(tv_modes)) {
if (val >= ARRAY_SIZE(tv_modes)) {
ret = -EINVAL;
goto out;
}
@ -1506,7 +1506,7 @@ intel_tv_init(struct drm_device *dev)
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
u32 tv_dac_on, tv_dac_off, save_tv_dac;
char *tv_format_names[DRM_ARRAY_SIZE(tv_modes)];
char *tv_format_names[ARRAY_SIZE(tv_modes)];
int i, initial_mode = 0;
if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
@ -1590,24 +1590,24 @@ intel_tv_init(struct drm_device *dev)
connector->doublescan_allowed = false;
/* Create TV properties then attach current values */
for (i = 0; i < DRM_ARRAY_SIZE(tv_modes); i++)
for (i = 0; i < ARRAY_SIZE(tv_modes); i++)
tv_format_names[i] = __DECONST(char *, tv_modes[i].name);
drm_mode_create_tv_properties(dev,
DRM_ARRAY_SIZE(tv_modes),
ARRAY_SIZE(tv_modes),
tv_format_names);
drm_connector_attach_property(connector, dev->mode_config.tv_mode_property,
drm_object_attach_property(&connector->base, dev->mode_config.tv_mode_property,
initial_mode);
drm_connector_attach_property(connector,
drm_object_attach_property(&connector->base,
dev->mode_config.tv_left_margin_property,
intel_tv->margin[TV_MARGIN_LEFT]);
drm_connector_attach_property(connector,
drm_object_attach_property(&connector->base,
dev->mode_config.tv_top_margin_property,
intel_tv->margin[TV_MARGIN_TOP]);
drm_connector_attach_property(connector,
drm_object_attach_property(&connector->base,
dev->mode_config.tv_right_margin_property,
intel_tv->margin[TV_MARGIN_RIGHT]);
drm_connector_attach_property(connector,
drm_object_attach_property(&connector->base,
dev->mode_config.tv_bottom_margin_property,
intel_tv->margin[TV_MARGIN_BOTTOM]);
#if 0

View File

@ -661,9 +661,9 @@ static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
unsigned count = U8((*ptr)++);
ATOM_SDEBUG_PRINT(" count: %d\n", count);
if (arg == ATOM_UNIT_MICROSEC)
DRM_UDELAY(count);
udelay(count);
else if (!drm_can_sleep())
DRM_MDELAY(count);
mdelay(count);
else
DRM_MSLEEP(count);
}
@ -1178,7 +1178,7 @@ static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32
ectx.abort = false;
ectx.last_jump = 0;
if (ws)
ectx.ws = malloc(4 * ws, DRM_MEM_DRIVER, M_ZERO | M_WAITOK);
ectx.ws = malloc(4 * ws, DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
else
ectx.ws = NULL;
@ -1234,7 +1234,7 @@ static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
static void atom_index_iio(struct atom_context *ctx, int base)
{
ctx->iio = malloc(2 * 256, DRM_MEM_DRIVER, M_ZERO | M_WAITOK);
ctx->iio = malloc(2 * 256, DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
while (CU8(base) == ATOM_IIO_START) {
ctx->iio[CU8(base + 1)] = base + 2;
base += 2;
@ -1248,7 +1248,7 @@ struct atom_context *atom_parse(struct card_info *card, void *bios)
{
int base;
struct atom_context *ctx =
malloc(sizeof(struct atom_context), DRM_MEM_DRIVER, M_ZERO | M_WAITOK);
malloc(sizeof(struct atom_context), DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
char *str;
char name[512];
int i;
@ -1386,16 +1386,16 @@ int atom_allocate_fb_scratch(struct atom_context *ctx)
firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)((char *)ctx->bios + data_offset);
DRM_DEBUG("atom firmware requested %08x %dkb\n",
firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware,
firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb);
le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware),
le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb));
usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024;
usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024;
}
ctx->scratch_size_bytes = 0;
if (usage_bytes == 0)
usage_bytes = 20 * 1024;
/* allocate some scratch memory */
ctx->scratch = malloc(usage_bytes, DRM_MEM_DRIVER, M_ZERO | M_WAITOK);
ctx->scratch = malloc(usage_bytes, DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
if (!ctx->scratch)
return -ENOMEM;
ctx->scratch_size_bytes = usage_bytes;

View File

@ -256,8 +256,6 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
radeon_crtc->enabled = true;
/* adjust pm to dpms changes BEFORE enabling crtcs */
radeon_pm_compute_clocks(rdev);
if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set)
atombios_powergate_crtc(crtc, ATOM_DISABLE);
atombios_enable_crtc(crtc, ATOM_ENABLE);
if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
@ -275,8 +273,6 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
atombios_enable_crtc(crtc, ATOM_DISABLE);
radeon_crtc->enabled = false;
if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set)
atombios_powergate_crtc(crtc, ATOM_ENABLE);
/* adjust pm to dpms changes AFTER disabling crtcs */
radeon_pm_compute_clocks(rdev);
break;
@ -565,6 +561,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
/* use frac fb div on APUs */
if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
/* use frac fb div on RS780/RS880 */
if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
if (ASIC_IS_DCE32(rdev) && mode->clock > 165000)
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
} else {
@ -1848,6 +1847,8 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
int i;
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
if (ASIC_IS_DCE6(rdev))
atombios_powergate_crtc(crtc, ATOM_ENABLE);
for (i = 0; i < rdev->num_crtc; i++) {
if (rdev->mode_info.crtcs[i] &&

View File

@ -140,7 +140,7 @@ static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector,
if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
return send_bytes;
else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
DRM_UDELAY(400);
udelay(400);
else
return -EIO;
}
@ -173,7 +173,7 @@ static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector,
if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
return ret;
else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
DRM_UDELAY(400);
udelay(400);
else if (ret == 0)
return -EPROTO;
else
@ -261,7 +261,7 @@ int radeon_dp_i2c_aux_ch(device_t dev, int mode, u8 write_byte, u8 *read_byte)
return -EREMOTEIO;
case AUX_NATIVE_REPLY_DEFER:
DRM_DEBUG_KMS("aux_ch native defer\n");
DRM_UDELAY(400);
udelay(400);
continue;
default:
DRM_ERROR("aux_ch invalid native reply 0x%02x\n", ack);
@ -272,13 +272,13 @@ int radeon_dp_i2c_aux_ch(device_t dev, int mode, u8 write_byte, u8 *read_byte)
case AUX_I2C_REPLY_ACK:
if (mode == MODE_I2C_READ)
*read_byte = reply[0];
return (0); /* Return ret on Linux. */
return ret;
case AUX_I2C_REPLY_NACK:
DRM_DEBUG_KMS("aux_i2c nack\n");
return -EREMOTEIO;
case AUX_I2C_REPLY_DEFER:
DRM_DEBUG_KMS("aux_i2c defer\n");
DRM_UDELAY(400);
udelay(400);
break;
default:
DRM_ERROR("aux_i2c invalid reply 0x%02x\n", ack);
@ -685,7 +685,7 @@ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
static int radeon_dp_link_train_finish(struct radeon_dp_link_train_info *dp_info)
{
DRM_UDELAY(400);
udelay(400);
/* disable the training pattern on the sink */
radeon_write_dpcd_reg(dp_info->radeon_connector,
@ -713,7 +713,7 @@ static int radeon_dp_link_train_cr(struct radeon_dp_link_train_info *dp_info)
memset(dp_info->train_set, 0, 4);
radeon_dp_update_vs_emph(dp_info);
DRM_UDELAY(400);
udelay(400);
/* clock recovery loop */
clock_recovery = false;

View File

@ -197,7 +197,7 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
return;
pdata = malloc(sizeof(struct radeon_backlight_privdata), DRM_MEM_DRIVER, M_WAITOK);
pdata = malloc(sizeof(struct radeon_backlight_privdata), DRM_MEM_DRIVER, M_NOWAIT);
if (!pdata) {
DRM_ERROR("Memory allocation failed\n");
goto error;
@ -279,6 +279,12 @@ static void radeon_atom_backlight_exit(struct radeon_encoder *encoder)
#endif
#ifdef FREEBSD_WIP /* FreeBSD: to please GCC 4.2. */
/* evil but including atombios.h is much worse */
bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
struct drm_display_mode *mode);
#endif
static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder)
{
@ -302,7 +308,7 @@ static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder)
}
static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@ -1342,7 +1348,7 @@ atombios_set_edp_panel_power(struct drm_connector *connector, int action)
for (i = 0; i < 300; i++) {
if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
return true;
DRM_MDELAY(1);
mdelay(1);
}
return false;
}
@ -2451,7 +2457,7 @@ radeon_atom_ext_dpms(struct drm_encoder *encoder, int mode)
}
static bool radeon_atom_ext_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
@ -2506,7 +2512,7 @@ radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder)
struct drm_device *dev = radeon_encoder->base.dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder_atom_dac *dac = malloc(sizeof(struct radeon_encoder_atom_dac),
DRM_MEM_DRIVER, M_ZERO | M_WAITOK);
DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
if (!dac)
return NULL;
@ -2520,7 +2526,7 @@ radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
{
int encoder_enum = (radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
struct radeon_encoder_atom_dig *dig = malloc(sizeof(struct radeon_encoder_atom_dig),
DRM_MEM_DRIVER, M_ZERO | M_WAITOK);
DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
if (!dig)
return NULL;
@ -2559,7 +2565,7 @@ radeon_add_atom_encoder(struct drm_device *dev,
/* add a new one */
radeon_encoder = malloc(sizeof(struct radeon_encoder),
DRM_MEM_DRIVER, M_ZERO | M_WAITOK);
DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
if (!radeon_encoder)
return;

View File

@ -60,10 +60,17 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
if (flags & HW_I2C_WRITE) {
if (num > ATOM_MAX_HW_I2C_WRITE) {
DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 2)\n", num);
return EINVAL;
return -EINVAL;
}
memcpy(&out, buf, num);
args.lpI2CDataOut = cpu_to_le16(out);
} else {
#ifdef FREEBSD_WIP /* FreeBSD: to please GCC 4.2. */
if (num > ATOM_MAX_HW_I2C_READ) {
DRM_ERROR("hw i2c: tried to read too many bytes (%d vs 255)\n", num);
return -EINVAL;
}
#endif
}
args.ucI2CSpeed = TARGET_HW_I2C_CLOCK;
@ -77,7 +84,7 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
/* error */
if (args.ucStatus != HW_ASSISTED_I2C_STATUS_SUCCESS) {
DRM_DEBUG_KMS("hw_i2c error\n");
return EIO;
return -EIO;
}
if (!(flags & HW_I2C_WRITE))
@ -101,9 +108,9 @@ radeon_atom_hw_i2c_xfer(device_t dev, struct iic_msg *msgs, u_int num)
p->slave, HW_I2C_WRITE,
&buf, 1);
if (ret)
return ret;
return -ret; /* "ret" is returned on Linux. */
else
return (0);
return (0); /* "num" is returned on Linux. */
}
for (i = 0; i < num; i++) {
@ -127,13 +134,13 @@ radeon_atom_hw_i2c_xfer(device_t dev, struct iic_msg *msgs, u_int num)
p->slave, flags,
&p->buf[buffer_offset], current_count);
if (ret)
return ret;
return -ret; /* "ret" is returned on Linux. */
remaining -= current_count;
buffer_offset += current_count;
}
}
return (0);
return (0); /* "num" is returned on Linux. */
}
static int

View File

@ -370,6 +370,6 @@ const u32 cayman_ps[] =
0x00000000,
};
const u32 cayman_ps_size = DRM_ARRAY_SIZE(cayman_ps);
const u32 cayman_vs_size = DRM_ARRAY_SIZE(cayman_vs);
const u32 cayman_default_size = DRM_ARRAY_SIZE(cayman_default_state);
const u32 cayman_ps_size = ARRAY_SIZE(cayman_ps);
const u32 cayman_vs_size = ARRAY_SIZE(cayman_vs);
const u32 cayman_default_size = ARRAY_SIZE(cayman_default_state);

View File

@ -49,7 +49,14 @@ static const u32 crtc_offsets[6] =
};
static void evergreen_gpu_init(struct radeon_device *rdev);
#ifdef FREEBSD_WIP /* FreeBSD: to please GCC 4.2. */
void evergreen_fini(struct radeon_device *rdev);
#endif
void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
#ifdef FREEBSD_WIP /* FreeBSD: to please GCC 4.2. */
extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
int ring, u32 cp_int_cntl);
#endif
void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
unsigned *bankh, unsigned *mtaspect,
@ -107,6 +114,27 @@ void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
}
}
static bool dce4_is_in_vblank(struct radeon_device *rdev, int crtc)
{
if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
return true;
else
return false;
}
static bool dce4_is_counter_moving(struct radeon_device *rdev, int crtc)
{
u32 pos1, pos2;
pos1 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
pos2 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
if (pos1 != pos2)
return true;
else
return false;
}
/**
* dce4_wait_for_vblank - vblank wait asic callback.
*
@ -117,21 +145,28 @@ void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
*/
void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)
{
int i;
unsigned i = 0;
if (crtc >= rdev->num_crtc)
return;
if (RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN) {
for (i = 0; i < rdev->usec_timeout; i++) {
if (!(RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK))
if (!(RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN))
return;
/* depending on when we hit vblank, we may be close to active; if so,
* wait for another frame.
*/
while (dce4_is_in_vblank(rdev, crtc)) {
if (i++ % 100 == 0) {
if (!dce4_is_counter_moving(rdev, crtc))
break;
DRM_UDELAY(1);
}
for (i = 0; i < rdev->usec_timeout; i++) {
if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
}
while (!dce4_is_in_vblank(rdev, crtc)) {
if (i++ % 100 == 0) {
if (!dce4_is_counter_moving(rdev, crtc))
break;
DRM_UDELAY(1);
}
}
}
@ -204,7 +239,7 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
for (i = 0; i < rdev->usec_timeout; i++) {
if (RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING)
break;
DRM_UDELAY(1);
udelay(1);
}
DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
@ -405,6 +440,19 @@ void evergreen_pm_misc(struct radeon_device *rdev)
rdev->pm.current_vddc = voltage->voltage;
DRM_DEBUG("Setting: vddc: %d\n", voltage->voltage);
}
/* starting with BTC, there is one state that is used for both
* MH and SH. Difference is that we always use the high clock index for
* mclk and vddci.
*/
if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
(rdev->family >= CHIP_BARTS) &&
rdev->pm.active_crtc_count &&
((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) ||
(rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX)))
voltage = &rdev->pm.power_state[req_ps_idx].
clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].voltage;
/* 0xff01 is a flag rather then an actual voltage */
if (voltage->vddci == 0xff01)
return;
@ -597,6 +645,16 @@ void evergreen_hpd_init(struct radeon_device *rdev)
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
/* don't try to enable hpd on eDP or LVDS avoid breaking the
* aux dp channel on imac and help (but not completely fix)
* https://bugzilla.redhat.com/show_bug.cgi?id=726143
* also avoid interrupt storms during dpms.
*/
continue;
}
switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1:
WREG32(DC_HPD1_CONTROL, tmp);
@ -1146,7 +1204,7 @@ int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
tmp = RREG32(SRBM_STATUS) & 0x1F00;
if (!tmp)
return 0;
DRM_UDELAY(1);
udelay(1);
}
return -1;
}
@ -1173,7 +1231,7 @@ void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev)
if (tmp) {
return;
}
DRM_UDELAY(1);
udelay(1);
}
}
@ -1314,17 +1372,16 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
radeon_wait_for_vblank(rdev, i);
tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
}
} else {
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
radeon_wait_for_vblank(rdev, i);
tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
}
@ -1334,8 +1391,17 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
for (j = 0; j < rdev->usec_timeout; j++) {
if (radeon_get_vblank_counter(rdev, i) != frame_count)
break;
DRM_UDELAY(1);
udelay(1);
}
/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
tmp &= ~EVERGREEN_CRTC_MASTER_EN;
WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
save->crtc_enabled[i] = false;
/* ***** */
} else {
save->crtc_enabled[i] = false;
}
@ -1352,7 +1418,23 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
}
/* wait for the MC to settle */
DRM_UDELAY(100);
udelay(100);
/* lock double buffered regs */
for (i = 0; i < rdev->num_crtc; i++) {
if (save->crtc_enabled[i]) {
tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
if (!(tmp & EVERGREEN_GRPH_UPDATE_LOCK)) {
tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
}
tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
if (!(tmp & 1)) {
tmp |= 1;
WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
}
}
}
}
void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
@ -1374,6 +1456,33 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
/* unlock regs and wait for update */
for (i = 0; i < rdev->num_crtc; i++) {
if (save->crtc_enabled[i]) {
tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
if ((tmp & 0x3) != 0) {
tmp &= ~0x3;
WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
}
tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
if (tmp & EVERGREEN_GRPH_UPDATE_LOCK) {
tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
}
tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
if (tmp & 1) {
tmp &= ~1;
WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
}
for (j = 0; j < rdev->usec_timeout; j++) {
tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
if ((tmp & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING) == 0)
break;
udelay(1);
}
}
}
/* unblackout the MC */
tmp = RREG32(MC_SHARED_BLACKOUT_CNTL);
tmp &= ~BLACKOUT_MODE_MASK;
@ -1401,13 +1510,13 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
for (j = 0; j < rdev->usec_timeout; j++) {
if (radeon_get_vblank_counter(rdev, i) != frame_count)
break;
DRM_UDELAY(1);
udelay(1);
}
}
}
/* Unlock vga access */
WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
DRM_MDELAY(1);
mdelay(1);
WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
}
@ -1639,7 +1748,7 @@ static int evergreen_cp_resume(struct radeon_device *rdev)
SOFT_RESET_SPI |
SOFT_RESET_SX));
RREG32(GRBM_SOFT_RESET);
DRM_MDELAY(15);
mdelay(15);
WREG32(GRBM_SOFT_RESET, 0);
RREG32(GRBM_SOFT_RESET);
@ -1675,7 +1784,7 @@ static int evergreen_cp_resume(struct radeon_device *rdev)
WREG32(SCRATCH_UMSK, 0);
}
DRM_MDELAY(1);
mdelay(1);
WREG32(CP_RB_CNTL, tmp);
WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
@ -2247,7 +2356,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
DRM_UDELAY(50);
udelay(50);
}
@ -2373,7 +2482,7 @@ static void evergreen_gpu_soft_reset_gfx(struct radeon_device *rdev)
dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
WREG32(GRBM_SOFT_RESET, grbm_reset);
(void)RREG32(GRBM_SOFT_RESET);
DRM_UDELAY(50);
udelay(50);
WREG32(GRBM_SOFT_RESET, 0);
(void)RREG32(GRBM_SOFT_RESET);
@ -2413,7 +2522,7 @@ static void evergreen_gpu_soft_reset_dma(struct radeon_device *rdev)
/* Reset dma */
WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
RREG32(SRBM_SOFT_RESET);
DRM_UDELAY(50);
udelay(50);
WREG32(SRBM_SOFT_RESET, 0);
dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
@ -2447,7 +2556,7 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
evergreen_gpu_soft_reset_dma(rdev);
/* Wait a little for things to settle down */
DRM_UDELAY(50);
udelay(50);
evergreen_mc_resume(rdev, &save);
return 0;
@ -2871,7 +2980,7 @@ static void evergreen_irq_disable(struct radeon_device *rdev)
{
r600_disable_interrupts(rdev);
/* Wait and acknowledge irq */
DRM_MDELAY(1);
mdelay(1);
evergreen_irq_ack(rdev);
evergreen_disable_interrupt_state(rdev);
}

View File

@ -353,6 +353,6 @@ const u32 evergreen_ps[] =
0x00000000,
};
const u32 evergreen_ps_size = DRM_ARRAY_SIZE(evergreen_ps);
const u32 evergreen_vs_size = DRM_ARRAY_SIZE(evergreen_vs);
const u32 evergreen_default_size = DRM_ARRAY_SIZE(evergreen_default_state);
const u32 evergreen_ps_size = ARRAY_SIZE(evergreen_ps);
const u32 evergreen_vs_size = ARRAY_SIZE(evergreen_vs);
const u32 evergreen_default_size = ARRAY_SIZE(evergreen_default_state);

View File

@ -40,6 +40,10 @@ __FBSDID("$FreeBSD$");
#define MAX(a,b) (((a)>(b))?(a):(b))
#define MIN(a,b) (((a)<(b))?(a):(b))
#ifdef FREEBSD_WIP /* FreeBSD: to please GCC 4.2. */
int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
struct radeon_cs_reloc **cs_reloc);
#endif
static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
struct radeon_cs_reloc **cs_reloc);
@ -1292,9 +1296,9 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
int r;
if (p->rdev->family >= CHIP_CAYMAN)
last_reg = DRM_ARRAY_SIZE(cayman_reg_safe_bm);
last_reg = ARRAY_SIZE(cayman_reg_safe_bm);
else
last_reg = DRM_ARRAY_SIZE(evergreen_reg_safe_bm);
last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
i = (reg >> 7);
if (i >= last_reg) {
@ -1960,9 +1964,9 @@ static bool evergreen_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
u32 last_reg, m, i;
if (p->rdev->family >= CHIP_CAYMAN)
last_reg = DRM_ARRAY_SIZE(cayman_reg_safe_bm);
last_reg = ARRAY_SIZE(cayman_reg_safe_bm);
else
last_reg = DRM_ARRAY_SIZE(evergreen_reg_safe_bm);
last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
i = (reg >> 7);
if (i >= last_reg) {
@ -2759,7 +2763,7 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
if (p->track == NULL) {
/* initialize tracker, we are in kms */
track = malloc(sizeof(*track), DRM_MEM_DRIVER, M_ZERO | M_WAITOK);
track = malloc(sizeof(*track), DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
if (track == NULL)
return -ENOMEM;
evergreen_cs_track_init(track);

View File

@ -229,6 +229,8 @@ __FBSDID("$FreeBSD$");
#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
#define EVERGREEN_MASTER_UPDATE_LOCK 0x6ef4
#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
#define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0
#define EVERGREEN_DC_GPIO_HPD_A 0x64b4

View File

@ -34,7 +34,20 @@ __FBSDID("$FreeBSD$");
#include "ni_reg.h"
#include "cayman_blit_shaders.h"
#ifdef FREEBSD_WIP /* FreeBSD: to please GCC 4.2. */
extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
extern int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
extern void evergreen_mc_program(struct radeon_device *rdev);
extern void evergreen_irq_suspend(struct radeon_device *rdev);
extern int evergreen_mc_init(struct radeon_device *rdev);
extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
#endif
extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
#ifdef FREEBSD_WIP /* FreeBSD: to please GCC 4.2. */
extern void si_rlc_fini(struct radeon_device *rdev);
extern int si_rlc_init(struct radeon_device *rdev);
#endif
#define EVERGREEN_PFP_UCODE_SIZE 1120
#define EVERGREEN_PM4_UCODE_SIZE 1376
@ -48,6 +61,27 @@ extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
#define ARUBA_RLC_UCODE_SIZE 1536
#ifdef __linux__
/* Firmware Names */
MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
MODULE_FIRMWARE("radeon/BARTS_me.bin");
MODULE_FIRMWARE("radeon/BARTS_mc.bin");
MODULE_FIRMWARE("radeon/BTC_rlc.bin");
MODULE_FIRMWARE("radeon/TURKS_pfp.bin");
MODULE_FIRMWARE("radeon/TURKS_me.bin");
MODULE_FIRMWARE("radeon/TURKS_mc.bin");
MODULE_FIRMWARE("radeon/CAICOS_pfp.bin");
MODULE_FIRMWARE("radeon/CAICOS_me.bin");
MODULE_FIRMWARE("radeon/CAICOS_mc.bin");
MODULE_FIRMWARE("radeon/CAYMAN_pfp.bin");
MODULE_FIRMWARE("radeon/CAYMAN_me.bin");
MODULE_FIRMWARE("radeon/CAYMAN_mc.bin");
MODULE_FIRMWARE("radeon/CAYMAN_rlc.bin");
MODULE_FIRMWARE("radeon/ARUBA_pfp.bin");
MODULE_FIRMWARE("radeon/ARUBA_me.bin");
MODULE_FIRMWARE("radeon/ARUBA_rlc.bin");
#endif
#define BTC_IO_MC_REGS_SIZE 29
static const u32 barts_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
@ -244,7 +278,7 @@ int ni_mc_load_microcode(struct radeon_device *rdev)
for (i = 0; i < rdev->usec_timeout; i++) {
if (RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD)
break;
DRM_UDELAY(1);
udelay(1);
}
if (running)
@ -478,21 +512,32 @@ static void cayman_gpu_init(struct radeon_device *rdev)
(rdev->ddev->pci_device == 0x9907) ||
(rdev->ddev->pci_device == 0x9908) ||
(rdev->ddev->pci_device == 0x9909) ||
(rdev->ddev->pci_device == 0x990B) ||
(rdev->ddev->pci_device == 0x990C) ||
(rdev->ddev->pci_device == 0x990F) ||
(rdev->ddev->pci_device == 0x9910) ||
(rdev->ddev->pci_device == 0x9917)) {
(rdev->ddev->pci_device == 0x9917) ||
(rdev->ddev->pci_device == 0x9999) ||
(rdev->ddev->pci_device == 0x999C)) {
rdev->config.cayman.max_simds_per_se = 6;
rdev->config.cayman.max_backends_per_se = 2;
} else if ((rdev->ddev->pci_device == 0x9903) ||
(rdev->ddev->pci_device == 0x9904) ||
(rdev->ddev->pci_device == 0x990A) ||
(rdev->ddev->pci_device == 0x990D) ||
(rdev->ddev->pci_device == 0x990E) ||
(rdev->ddev->pci_device == 0x9913) ||
(rdev->ddev->pci_device == 0x9918)) {
(rdev->ddev->pci_device == 0x9918) ||
(rdev->ddev->pci_device == 0x999D)) {
rdev->config.cayman.max_simds_per_se = 4;
rdev->config.cayman.max_backends_per_se = 2;
} else if ((rdev->ddev->pci_device == 0x9919) ||
(rdev->ddev->pci_device == 0x9990) ||
(rdev->ddev->pci_device == 0x9991) ||
(rdev->ddev->pci_device == 0x9994) ||
(rdev->ddev->pci_device == 0x9995) ||
(rdev->ddev->pci_device == 0x9996) ||
(rdev->ddev->pci_device == 0x999A) ||
(rdev->ddev->pci_device == 0x99A0)) {
rdev->config.cayman.max_simds_per_se = 3;
rdev->config.cayman.max_backends_per_se = 1;
@ -622,15 +667,28 @@ static void cayman_gpu_init(struct radeon_device *rdev)
WREG32(GB_ADDR_CONFIG, gb_addr_config);
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
if (ASIC_IS_DCE6(rdev))
WREG32(DMIF_ADDR_CALC, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
tmp = gb_addr_config & NUM_PIPES_MASK;
tmp = r6xx_remap_render_backend(rdev, tmp,
rdev->config.cayman.max_backends_per_se *
rdev->config.cayman.max_shader_engines,
CAYMAN_MAX_BACKENDS, disabled_rb_mask);
if ((rdev->config.cayman.max_backends_per_se == 1) &&
(rdev->flags & RADEON_IS_IGP)) {
if ((disabled_rb_mask & 3) == 1) {
/* RB0 disabled, RB1 enabled */
tmp = 0x11111111;
} else {
/* RB1 disabled, RB0 enabled */
tmp = 0x00000000;
}
} else {
tmp = gb_addr_config & NUM_PIPES_MASK;
tmp = r6xx_remap_render_backend(rdev, tmp,
rdev->config.cayman.max_backends_per_se *
rdev->config.cayman.max_shader_engines,
CAYMAN_MAX_BACKENDS, disabled_rb_mask);
}
WREG32(GB_BACKEND_MAP, tmp);
cgts_tcc_disable = 0xffff0000;
@ -725,7 +783,7 @@ static void cayman_gpu_init(struct radeon_device *rdev)
WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
DRM_UDELAY(50);
udelay(50);
}
/*
@ -1072,7 +1130,7 @@ static int cayman_cp_resume(struct radeon_device *rdev)
SOFT_RESET_SPI |
SOFT_RESET_SX));
RREG32(GRBM_SOFT_RESET);
DRM_MDELAY(15);
mdelay(15);
WREG32(GRBM_SOFT_RESET, 0);
RREG32(GRBM_SOFT_RESET);
@ -1122,7 +1180,7 @@ static int cayman_cp_resume(struct radeon_device *rdev)
WREG32(ring->rptr_reg, ring->rptr);
WREG32(ring->wptr_reg, ring->wptr);
DRM_MDELAY(1);
mdelay(1);
WREG32_P(cp_rb_cntl[i], 0, ~RB_RPTR_WR_ENA);
}
@ -1236,7 +1294,7 @@ int cayman_dma_resume(struct radeon_device *rdev)
/* Reset dma */
WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
RREG32(SRBM_SOFT_RESET);
DRM_UDELAY(50);
udelay(50);
WREG32(SRBM_SOFT_RESET, 0);
for (i = 0; i < 2; i++) {
@ -1367,7 +1425,7 @@ static void cayman_gpu_soft_reset_gfx(struct radeon_device *rdev)
dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
WREG32(GRBM_SOFT_RESET, grbm_reset);
(void)RREG32(GRBM_SOFT_RESET);
DRM_UDELAY(50);
udelay(50);
WREG32(GRBM_SOFT_RESET, 0);
(void)RREG32(GRBM_SOFT_RESET);
@ -1413,7 +1471,7 @@ static void cayman_gpu_soft_reset_dma(struct radeon_device *rdev)
/* Reset dma */
WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
RREG32(SRBM_SOFT_RESET);
DRM_UDELAY(50);
udelay(50);
WREG32(SRBM_SOFT_RESET, 0);
dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
@ -1457,7 +1515,7 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
cayman_gpu_soft_reset_dma(rdev);
/* Wait a little for things to settle down */
DRM_UDELAY(50);
udelay(50);
evergreen_mc_resume(rdev, &save);
return 0;
@ -1674,6 +1732,7 @@ int cayman_resume(struct radeon_device *rdev)
int cayman_suspend(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
radeon_vm_manager_fini(rdev);
cayman_cp_enable(rdev, false);
cayman_dma_stop(rdev);
evergreen_irq_suspend(rdev);

View File

@ -48,6 +48,10 @@ __FBSDID("$FreeBSD$");
#define ARUBA_GB_ADDR_CONFIG_GOLDEN 0x12010001
#define DMIF_ADDR_CONFIG 0xBD4
/* DCE6 only */
#define DMIF_ADDR_CALC 0xC00
#define SRBM_GFX_CNTL 0x0E44
#define RINGID(x) (((x) & 0x3) << 0)
#define VMID(x) (((x) & 0x7) << 0)

View File

@ -52,6 +52,16 @@ __FBSDID("$FreeBSD$");
#define FIRMWARE_RS600 "radeonkmsfw_RS600_cp"
#define FIRMWARE_R520 "radeonkmsfw_R520_cp"
#ifdef __linux__
MODULE_FIRMWARE(FIRMWARE_R100);
MODULE_FIRMWARE(FIRMWARE_R200);
MODULE_FIRMWARE(FIRMWARE_R300);
MODULE_FIRMWARE(FIRMWARE_R420);
MODULE_FIRMWARE(FIRMWARE_RS690);
MODULE_FIRMWARE(FIRMWARE_RS600);
MODULE_FIRMWARE(FIRMWARE_R520);
#endif
#include "r100_track.h"
/* This files gather functions specifics to:
@ -59,6 +69,38 @@ __FBSDID("$FreeBSD$");
* and others in some cases.
*/
static bool r100_is_in_vblank(struct radeon_device *rdev, int crtc)
{
if (crtc == 0) {
if (RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR)
return true;
else
return false;
} else {
if (RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR)
return true;
else
return false;
}
}
static bool r100_is_counter_moving(struct radeon_device *rdev, int crtc)
{
u32 vline1, vline2;
if (crtc == 0) {
vline1 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
vline2 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
} else {
vline1 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
vline2 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
}
if (vline1 != vline2)
return true;
else
return false;
}
/**
* r100_wait_for_vblank - vblank wait asic callback.
*
@ -69,36 +111,33 @@ __FBSDID("$FreeBSD$");
*/
void r100_wait_for_vblank(struct radeon_device *rdev, int crtc)
{
int i;
unsigned i = 0;
if (crtc >= rdev->num_crtc)
return;
if (crtc == 0) {
if (RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN) {
for (i = 0; i < rdev->usec_timeout; i++) {
if (!(RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR))
break;
DRM_UDELAY(1);
}
for (i = 0; i < rdev->usec_timeout; i++) {
if (RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR)
break;
DRM_UDELAY(1);
}
}
if (!(RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN))
return;
} else {
if (RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_EN) {
for (i = 0; i < rdev->usec_timeout; i++) {
if (!(RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR))
break;
DRM_UDELAY(1);
}
for (i = 0; i < rdev->usec_timeout; i++) {
if (RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR)
break;
DRM_UDELAY(1);
}
if (!(RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_EN))
return;
}
/* depending on when we hit vblank, we may be close to active; if so,
* wait for another frame.
*/
while (r100_is_in_vblank(rdev, crtc)) {
if (i++ % 100 == 0) {
if (!r100_is_counter_moving(rdev, crtc))
break;
}
}
while (!r100_is_in_vblank(rdev, crtc)) {
if (i++ % 100 == 0) {
if (!r100_is_counter_moving(rdev, crtc))
break;
}
}
}
@ -160,7 +199,7 @@ u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
for (i = 0; i < rdev->usec_timeout; i++) {
if (RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET)
break;
DRM_UDELAY(1);
udelay(1);
}
DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
@ -335,7 +374,7 @@ void r100_pm_misc(struct radeon_device *rdev)
tmp &= ~(voltage->gpio.mask);
WREG32(voltage->gpio.reg, tmp);
if (voltage->delay)
DRM_UDELAY(voltage->delay);
udelay(voltage->delay);
} else {
tmp = RREG32(voltage->gpio.reg);
if (voltage->active_high)
@ -344,7 +383,7 @@ void r100_pm_misc(struct radeon_device *rdev)
tmp |= voltage->gpio.mask;
WREG32(voltage->gpio.reg, tmp);
if (voltage->delay)
DRM_UDELAY(voltage->delay);
udelay(voltage->delay);
}
}
@ -713,7 +752,7 @@ void r100_irq_disable(struct radeon_device *rdev)
WREG32(R_000040_GEN_INT_CNTL, 0);
/* Wait and acknowledge irq */
DRM_MDELAY(1);
mdelay(1);
tmp = RREG32(R_000044_GEN_INT_STATUS);
WREG32(R_000044_GEN_INT_STATUS, tmp);
}
@ -924,7 +963,7 @@ static int r100_cp_wait_for_idle(struct radeon_device *rdev)
if (!G_000E40_CP_CMDSTRM_BUSY(tmp)) {
return 0;
}
DRM_UDELAY(1);
udelay(1);
}
return -1;
}
@ -1143,7 +1182,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
}
WREG32(RADEON_CP_RB_CNTL, tmp);
DRM_UDELAY(10);
udelay(10);
ring->rptr = RREG32(RADEON_CP_RB_RPTR);
/* Set cp mode to bus mastering & enable cp*/
WREG32(RADEON_CP_CSQ_MODE,
@ -2616,14 +2655,14 @@ void r100_bm_disable(struct radeon_device *rdev)
/* disable bus mastering */
tmp = RREG32(R_000030_BUS_CNTL);
WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000044);
DRM_MDELAY(1);
mdelay(1);
WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000042);
DRM_MDELAY(1);
mdelay(1);
WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040);
tmp = RREG32(RADEON_BUS_CNTL);
DRM_MDELAY(1);
mdelay(1);
pci_disable_busmaster(rdev->dev);
DRM_MDELAY(1);
mdelay(1);
}
int r100_asic_reset(struct radeon_device *rdev)
@ -2655,17 +2694,17 @@ int r100_asic_reset(struct radeon_device *rdev)
S_0000F0_SOFT_RESET_PP(1) |
S_0000F0_SOFT_RESET_RB(1));
RREG32(R_0000F0_RBBM_SOFT_RESET);
DRM_MDELAY(500);
mdelay(500);
WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
DRM_MDELAY(1);
mdelay(1);
status = RREG32(R_000E40_RBBM_STATUS);
dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
/* reset CP */
WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
RREG32(R_0000F0_RBBM_SOFT_RESET);
DRM_MDELAY(500);
mdelay(500);
WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
DRM_MDELAY(1);
mdelay(1);
status = RREG32(R_000E40_RBBM_STATUS);
dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
/* restore PCI & busmastering */
@ -2931,7 +2970,7 @@ static void r100_pll_errata_after_data(struct radeon_device *rdev)
* or the chip could hang on a subsequent access
*/
if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) {
DRM_MDELAY(5);
mdelay(5);
}
/* This function is required to workaround a hardware bug in some (all?)
@ -2973,10 +3012,10 @@ static void r100_set_safe_registers(struct radeon_device *rdev)
{
if (ASIC_IS_RN50(rdev)) {
rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm;
rdev->config.r100.reg_safe_bm_size = DRM_ARRAY_SIZE(rn50_reg_safe_bm);
rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(rn50_reg_safe_bm);
} else if (rdev->family < CHIP_R200) {
rdev->config.r100.reg_safe_bm = r100_reg_safe_bm;
rdev->config.r100.reg_safe_bm_size = DRM_ARRAY_SIZE(r100_reg_safe_bm);
rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r100_reg_safe_bm);
} else {
r200_set_safe_registers(rdev);
}

View File

@ -548,5 +548,5 @@ int r200_packet0_check(struct radeon_cs_parser *p,
void r200_set_safe_registers(struct radeon_device *rdev)
{
rdev->config.r100.reg_safe_bm = r200_reg_safe_bm;
rdev->config.r100.reg_safe_bm_size = DRM_ARRAY_SIZE(r200_reg_safe_bm);
rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r200_reg_safe_bm);
}

View File

@ -407,9 +407,9 @@ int r300_asic_reset(struct radeon_device *rdev)
WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
S_0000F0_SOFT_RESET_GA(1));
RREG32(R_0000F0_RBBM_SOFT_RESET);
DRM_MDELAY(500);
mdelay(500);
WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
DRM_MDELAY(1);
mdelay(1);
status = RREG32(R_000E40_RBBM_STATUS);
dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
/* resetting the CP seems to be problematic sometimes it end up
@ -419,9 +419,9 @@ int r300_asic_reset(struct radeon_device *rdev)
*/
WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
RREG32(R_0000F0_RBBM_SOFT_RESET);
DRM_MDELAY(500);
mdelay(500);
WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
DRM_MDELAY(1);
mdelay(1);
status = RREG32(R_000E40_RBBM_STATUS);
dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
/* restore PCI & busmastering */
@ -1254,7 +1254,7 @@ int r300_cs_parse(struct radeon_cs_parser *p)
struct r100_cs_track *track;
int r;
track = malloc(sizeof(*track), DRM_MEM_DRIVER, M_ZERO | M_WAITOK);
track = malloc(sizeof(*track), DRM_MEM_DRIVER, M_NOWAIT | M_ZERO);
if (track == NULL)
return -ENOMEM;
r100_cs_track_clear(p->rdev, track);
@ -1299,7 +1299,7 @@ int r300_cs_parse(struct radeon_cs_parser *p)
void r300_set_reg_safe(struct radeon_device *rdev)
{
rdev->config.r300.reg_safe_bm = r300_reg_safe_bm;
rdev->config.r300.reg_safe_bm_size = DRM_ARRAY_SIZE(r300_reg_safe_bm);
rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r300_reg_safe_bm);
}
void r300_mc_program(struct radeon_device *rdev)

View File

@ -1013,7 +1013,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
drm_radeon_kcmd_buffer_t *cmdbuf)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
struct drm_radeon_master_private *master_priv = file_priv->masterp->driver_priv;
struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
struct drm_device_dma *dma = dev->dma;
struct drm_buf *buf = NULL;
int emit_dispatch_age = 0;
@ -1120,7 +1120,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
}
emit_dispatch_age = 1;
r300_discard_buffer(dev, file_priv->masterp, buf);
r300_discard_buffer(dev, file_priv->master, buf);
break;
case R300_CMD_WAIT:

View File

@ -80,7 +80,7 @@ void r420_pm_init_profile(struct radeon_device *rdev)
static void r420_set_reg_safe(struct radeon_device *rdev)
{
rdev->config.r300.reg_safe_bm = r420_reg_safe_bm;
rdev->config.r300.reg_safe_bm_size = DRM_ARRAY_SIZE(r420_reg_safe_bm);
rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r420_reg_safe_bm);
}
void r420_pipes_init(struct radeon_device *rdev)

View File

@ -129,10 +129,6 @@ __FBSDID("$FreeBSD$");
# define RS690_MC_INDEX_MASK 0x1ff
# define RS690_MC_INDEX_WR_EN (1 << 9)
# define RS690_MC_INDEX_WR_ACK 0x7f
#define RS690_MC_NB_CNTL 0x0
# define RS690_HIDE_MMCFG_BAR (1 << 3)
# define RS690_AGPMODE30 (1 << 4)
# define RS690_AGP30ENHANCED (1 << 5)
#define RS690_MC_DATA 0x7c
#define RS690_MC_STATUS 0x90
#define RS690_MC_STATUS_IDLE (1 << 0)
@ -364,7 +360,9 @@ __FBSDID("$FreeBSD$");
#define AVIVO_D1CRTC_FRAME_COUNT 0x60a4
#define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4
#define AVIVO_D1MODE_MASTER_UPDATE_LOCK 0x60e0
#define AVIVO_D1MODE_MASTER_UPDATE_MODE 0x60e4
#define AVIVO_D1CRTC_UPDATE_LOCK 0x60e8
/* master controls */
#define AVIVO_DC_CRTC_MASTER_EN 0x60f8

View File

@ -50,7 +50,7 @@ __FBSDID("$FreeBSD$");
#define CAYMAN_RLC_UCODE_SIZE 1024
#define ARUBA_RLC_UCODE_SIZE 1536
#ifdef DUMBBELL_WIP
#ifdef __linux__
/* Firmware Names */
MODULE_FIRMWARE("radeon/R600_pfp.bin");
MODULE_FIRMWARE("radeon/R600_me.bin");
@ -93,12 +93,18 @@ MODULE_FIRMWARE("radeon/SUMO_pfp.bin");
MODULE_FIRMWARE("radeon/SUMO_me.bin");
MODULE_FIRMWARE("radeon/SUMO2_pfp.bin");
MODULE_FIRMWARE("radeon/SUMO2_me.bin");
#endif /* DUMBBELL_WIP */
#endif
int r600_debugfs_mc_info_init(struct radeon_device *rdev);
/* r600,rv610,rv630,rv620,rv635,rv670 */
#ifdef FREEBSD_WIP /* FreeBSD: to please GCC 4.2. */
int r600_mc_wait_for_idle(struct radeon_device *rdev);
#endif
static void r600_gpu_init(struct radeon_device *rdev);
#ifdef FREEBSD_WIP /* FreeBSD: to please GCC 4.2. */
void r600_fini(struct radeon_device *rdev);
#endif
void r600_irq_disable(struct radeon_device *rdev);
static void r600_pcie_gen2_enable(struct radeon_device *rdev);
@ -860,7 +866,7 @@ void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
if (tmp) {
return;
}
DRM_UDELAY(1);
udelay(1);
}
}
@ -1021,7 +1027,7 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev)
tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
if (!tmp)
return 0;
DRM_UDELAY(1);
udelay(1);
}
return -1;
}
@ -1209,7 +1215,7 @@ static int r600_mc_init(struct radeon_device *rdev)
int r600_vram_scratch_init(struct radeon_device *rdev)
{
int r;
void *vram_scratch_ptr_ptr;
void *vram_scratch_ptr_ptr; /* FreeBSD: to please GCC 4.2. */
if (rdev->vram_scratch.robj == NULL) {
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
@ -1324,7 +1330,7 @@ static void r600_gpu_soft_reset_gfx(struct radeon_device *rdev)
dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
WREG32(R_008020_GRBM_SOFT_RESET, tmp);
RREG32(R_008020_GRBM_SOFT_RESET);
DRM_MDELAY(15);
mdelay(15);
WREG32(R_008020_GRBM_SOFT_RESET, 0);
}
/* Reset CP (we always reset CP) */
@ -1332,7 +1338,7 @@ static void r600_gpu_soft_reset_gfx(struct radeon_device *rdev)
dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
WREG32(R_008020_GRBM_SOFT_RESET, tmp);
RREG32(R_008020_GRBM_SOFT_RESET);
DRM_MDELAY(15);
mdelay(15);
WREG32(R_008020_GRBM_SOFT_RESET, 0);
dev_info(rdev->dev, " R_008010_GRBM_STATUS = 0x%08X\n",
@ -1373,7 +1379,7 @@ static void r600_gpu_soft_reset_dma(struct radeon_device *rdev)
else
WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
RREG32(SRBM_SOFT_RESET);
DRM_UDELAY(50);
udelay(50);
WREG32(SRBM_SOFT_RESET, 0);
dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
@ -1407,7 +1413,7 @@ static int r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
r600_gpu_soft_reset_dma(rdev);
/* Wait a little for things to settle down */
DRM_MDELAY(1);
mdelay(1);
rv515_mc_resume(rdev, &save);
return 0;
@ -2168,7 +2174,7 @@ static int r600_cp_load_microcode(struct radeon_device *rdev)
/* Reset cp */
WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
RREG32(GRBM_SOFT_RESET);
DRM_MDELAY(15);
mdelay(15);
WREG32(GRBM_SOFT_RESET, 0);
WREG32(CP_ME_RAM_WADDR, 0);
@ -2231,7 +2237,7 @@ int r600_cp_resume(struct radeon_device *rdev)
/* Reset cp */
WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
RREG32(GRBM_SOFT_RESET);
DRM_MDELAY(15);
mdelay(15);
WREG32(GRBM_SOFT_RESET, 0);
/* Set ring buffer size */
@ -2265,7 +2271,7 @@ int r600_cp_resume(struct radeon_device *rdev)
WREG32(SCRATCH_UMSK, 0);
}
DRM_MDELAY(1);
mdelay(1);
WREG32(CP_RB_CNTL, tmp);
WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
@ -2362,7 +2368,7 @@ int r600_dma_resume(struct radeon_device *rdev)
else
WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
RREG32(SRBM_SOFT_RESET);
DRM_UDELAY(50);
udelay(50);
WREG32(SRBM_SOFT_RESET, 0);
WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0);
@ -3255,7 +3261,7 @@ void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
int r600_ih_ring_alloc(struct radeon_device *rdev)
{
int r;
void *ring_ptr;
void *ring_ptr; /* FreeBSD: to please GCC 4.2. */
/* Allocate ring buffer */
if (rdev->ih.ring_obj == NULL) {
@ -3320,7 +3326,7 @@ void r600_rlc_stop(struct radeon_device *rdev)
/* r7xx asics need to soft reset RLC before halting */
WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
RREG32(SRBM_SOFT_RESET);
DRM_MDELAY(15);
mdelay(15);
WREG32(SRBM_SOFT_RESET, 0);
RREG32(SRBM_SOFT_RESET);
}
@ -3813,7 +3819,7 @@ void r600_irq_disable(struct radeon_device *rdev)
{
r600_disable_interrupts(rdev);
/* Wait and acknowledge irq */
DRM_MDELAY(1);
mdelay(1);
r600_irq_ack(rdev);
r600_disable_interrupt_state(rdev);
}

View File

@ -539,7 +539,7 @@ static void r600_nomm_put_vb(struct drm_device *dev)
drm_radeon_private_t *dev_priv = dev->dev_private;
dev_priv->blit_vb->used = 0;
radeon_cp_discard_buffer(dev, dev_priv->blit_vb->file_priv->masterp, dev_priv->blit_vb);
radeon_cp_discard_buffer(dev, dev_priv->blit_vb->file_priv->master, dev_priv->blit_vb);
}
static void *r600_nomm_get_vb_ptr(struct drm_device *dev)

View File

@ -714,7 +714,7 @@ const u32 r6xx_ps[] =
0x00000000,
};
const u32 r6xx_ps_size = DRM_ARRAY_SIZE(r6xx_ps);
const u32 r6xx_vs_size = DRM_ARRAY_SIZE(r6xx_vs);
const u32 r6xx_default_size = DRM_ARRAY_SIZE(r6xx_default_state);
const u32 r7xx_default_size = DRM_ARRAY_SIZE(r7xx_default_state);
const u32 r6xx_ps_size = ARRAY_SIZE(r6xx_ps);
const u32 r6xx_vs_size = ARRAY_SIZE(r6xx_vs);
const u32 r6xx_default_size = ARRAY_SIZE(r6xx_default_state);
const u32 r7xx_default_size = ARRAY_SIZE(r7xx_default_state);

View File

@ -44,6 +44,37 @@ __FBSDID("$FreeBSD$");
#define R700_PFP_UCODE_SIZE 848
#define R700_PM4_UCODE_SIZE 1360
#ifdef __linux__
/* Firmware Names */
MODULE_FIRMWARE("radeon/R600_pfp.bin");
MODULE_FIRMWARE("radeon/R600_me.bin");
MODULE_FIRMWARE("radeon/RV610_pfp.bin");
MODULE_FIRMWARE("radeon/RV610_me.bin");
MODULE_FIRMWARE("radeon/RV630_pfp.bin");
MODULE_FIRMWARE("radeon/RV630_me.bin");
MODULE_FIRMWARE("radeon/RV620_pfp.bin");
MODULE_FIRMWARE("radeon/RV620_me.bin");
MODULE_FIRMWARE("radeon/RV635_pfp.bin");
MODULE_FIRMWARE("radeon/RV635_me.bin");
MODULE_FIRMWARE("radeon/RV670_pfp.bin");
MODULE_FIRMWARE("radeon/RV670_me.bin");
MODULE_FIRMWARE("radeon/RS780_pfp.bin");
MODULE_FIRMWARE("radeon/RS780_me.bin");
MODULE_FIRMWARE("radeon/RV770_pfp.bin");
MODULE_FIRMWARE("radeon/RV770_me.bin");
MODULE_FIRMWARE("radeon/RV730_pfp.bin");
MODULE_FIRMWARE("radeon/RV730_me.bin");
MODULE_FIRMWARE("radeon/RV710_pfp.bin");
MODULE_FIRMWARE("radeon/RV710_me.bin");
#endif
#ifdef FREEBSD_WIP /* FreeBSD: to please GCC 4.2. */
int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
unsigned family, u32 *ib, int *l);
void r600_cs_legacy_init(void);
#endif
# define ATI_PCIGART_PAGE_SIZE 4096 /**< PCI GART page size */
# define ATI_PCIGART_PAGE_MASK (~(ATI_PCIGART_PAGE_SIZE-1))
@ -392,7 +423,7 @@ static void r600_cp_load_microcode(drm_radeon_private_t *dev_priv)
RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP);
RADEON_READ(R600_GRBM_SOFT_RESET);
DRM_MDELAY(15);
mdelay(15);
RADEON_WRITE(R600_GRBM_SOFT_RESET, 0);
fw_data = (const __be32 *)dev_priv->me_fw->data;
@ -485,7 +516,7 @@ static void r700_cp_load_microcode(drm_radeon_private_t *dev_priv)
RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP);
RADEON_READ(R600_GRBM_SOFT_RESET);
DRM_MDELAY(15);
mdelay(15);
RADEON_WRITE(R600_GRBM_SOFT_RESET, 0);
fw_data = (const __be32 *)dev_priv->pfp_fw->data;
@ -1777,7 +1808,7 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev,
RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP);
RADEON_READ(R600_GRBM_SOFT_RESET);
DRM_MDELAY(15);
mdelay(15);
RADEON_WRITE(R600_GRBM_SOFT_RESET, 0);
@ -1907,7 +1938,7 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev,
RADEON_WRITE(R600_LAST_CLEAR_REG, 0);
/* reset sarea copies of these */
master_priv = file_priv->masterp->driver_priv;
master_priv = file_priv->master->driver_priv;
if (master_priv->sarea_priv) {
master_priv->sarea_priv->last_frame = 0;
master_priv->sarea_priv->last_dispatch = 0;
@ -1966,7 +1997,7 @@ int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
struct drm_file *file_priv)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
struct drm_radeon_master_private *master_priv = file_priv->masterp->driver_priv;
struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
DRM_DEBUG("\n");
@ -2401,7 +2432,7 @@ int r600_cp_dispatch_indirect(struct drm_device *dev,
void r600_cp_dispatch_swap(struct drm_device *dev, struct drm_file *file_priv)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
struct drm_master *master = file_priv->masterp;
struct drm_master *master = file_priv->master;
struct drm_radeon_master_private *master_priv = master->driver_priv;
drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv;
int nbox = sarea_priv->nbox;
@ -2523,7 +2554,7 @@ int r600_cp_dispatch_texture(struct drm_device *dev,
r600_blit_copy(dev, src_offset, dst_offset, pass_size);
radeon_cp_discard_buffer(dev, file_priv->masterp, buf);
radeon_cp_discard_buffer(dev, file_priv->master, buf);
/* Update the input parameters for next time */
image->data = (const u8 __user *)image->data + pass_size;
@ -2588,7 +2619,7 @@ static void r600_ib_free(struct drm_device *dev, struct drm_buf *buf,
if (buf) {
if (!r)
r600_cp_dispatch_indirect(dev, buf, 0, l * 4);
radeon_cp_discard_buffer(dev, fpriv->masterp, buf);
radeon_cp_discard_buffer(dev, fpriv->master, buf);
COMMIT_RING();
}
}

Some files were not shown because too many files have changed in this diff Show More