An update for the i915 GPU driver, which brings the code up to Linux

commit 4d93914ae3db4a897ead4b.  Some related drm infrastructure
changes are imported as needed.

Biggest update is the rewrite of the i915 gem io to more closely
follow Linux model, althought the mechanism used by FreeBSD port is
different.

Sponsored by:	The FreeBSD Foundation
MFC after:	2 month
This commit is contained in:
Konstantin Belousov 2015-01-21 16:10:37 +00:00
parent 459ea53d05
commit 47671d1bab
54 changed files with 12383 additions and 7141 deletions

View File

@ -1018,6 +1018,9 @@ struct drm_event_vblank {
#define DRM_CAP_PRIME 0x5
#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
#define DRM_PRIME_CAP_IMPORT 0x1
#define DRM_PRIME_CAP_EXPORT 0x2
#include "drm_mode.h"
/**
@ -1126,6 +1129,8 @@ struct drm_event_vblank {
#define DRM_IOCTL_MODE_GETPLANE DRM_IOWR(0xB6, struct drm_mode_get_plane)
#define DRM_IOCTL_MODE_SETPLANE DRM_IOWR(0xB7, struct drm_mode_set_plane)
#define DRM_IOCTL_MODE_ADDFB2 DRM_IOWR(0xB8, struct drm_mode_fb_cmd2)
#define DRM_IOCTL_MODE_OBJ_GETPROPERTIES DRM_IOWR(0xB9, struct drm_mode_obj_get_properties)
#define DRM_IOCTL_MODE_OBJ_SETPROPERTY DRM_IOWR(0xBA, struct drm_mode_obj_set_property)
#define DRM_IOCTL_MM_INIT DRM_IOWR(0xc0, struct drm_mm_init_arg)
#define DRM_IOCTL_MM_TAKEDOWN DRM_IOWR(0xc1, struct drm_mm_type_arg)

View File

@ -1246,6 +1246,7 @@ int drm_ati_pcigart_cleanup(struct drm_device *dev,
/* Cache management (drm_memory.c) */
void drm_clflush_pages(vm_page_t *pages, unsigned long num_pages);
void drm_clflush_virt_range(char *addr, unsigned long length);
/* Locking IOCTL support (drm_drv.c) */
int drm_lock(struct drm_device *dev, void *data,

View File

@ -352,7 +352,7 @@ void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
* @funcs: callbacks for the new CRTC
*
* LOCKING:
* Caller must hold mode config lock.
* Takes mode_config lock.
*
* Inits a new object created as base part of an driver crtc object.
*
@ -372,8 +372,11 @@ int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
if (ret)
goto out;
crtc->base.properties = &crtc->properties;
list_add_tail(&crtc->head, &dev->mode_config.crtc_list);
dev->mode_config.num_crtc++;
out:
sx_xunlock(&dev->mode_config.mutex);
@ -474,6 +477,7 @@ int drm_connector_init(struct drm_device *dev,
if (ret)
goto out;
connector->base.properties = &connector->properties;
connector->dev = dev;
connector->funcs = funcs;
connector->connector_type = connector_type;
@ -582,6 +586,7 @@ int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
if (ret)
goto out;
plane->base.properties = &plane->properties;
plane->dev = dev;
plane->funcs = funcs;
plane->format_types = malloc(sizeof(uint32_t) * format_count,
@ -1399,11 +1404,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
}
connector = obj_to_connector(obj);
for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
if (connector->property_ids[i] != 0) {
props_count++;
}
}
props_count = connector->properties.count;
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (connector->encoder_ids[i] != 0) {
@ -1456,21 +1457,19 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
copied = 0;
prop_ptr = (uint32_t *)(uintptr_t)(out_resp->props_ptr);
prop_values = (uint64_t *)(uintptr_t)(out_resp->prop_values_ptr);
for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
if (connector->property_ids[i] != 0) {
if (copyout(&connector->property_ids[i],
prop_ptr + copied, sizeof(uint32_t))) {
ret = EFAULT;
goto out;
}
if (copyout(&connector->property_values[i],
prop_values + copied, sizeof(uint64_t))) {
ret = EFAULT;
goto out;
}
copied++;
for (i = 0; i < connector->properties.count; i++) {
if (copyout(&connector->properties.ids[i],
prop_ptr + copied, sizeof(uint32_t))) {
ret = EFAULT;
goto out;
}
if (copyout(&connector->properties.values[i],
prop_values + copied, sizeof(uint64_t))) {
ret = EFAULT;
goto out;
}
copied++;
}
}
out_resp->count_props = props_count;
@ -1808,7 +1807,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
struct drm_display_mode *mode = NULL;
struct drm_mode_set set;
uint32_t *set_connectors_ptr;
int ret = 0;
int ret;
int i;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
@ -2070,7 +2069,7 @@ int drm_mode_addfb(struct drm_device *dev,
ret = -dev->mode_config.funcs->fb_create(dev, file_priv, &r, &fb);
if (ret != 0) {
DRM_ERROR("could not create framebuffer, error %d\n", ret);
DRM_DEBUG_KMS("could not create framebuffer, error %d\n", ret);
goto out;
}
@ -2152,6 +2151,47 @@ static int format_check(struct drm_mode_fb_cmd2 *r)
}
}
static int framebuffer_check(struct drm_mode_fb_cmd2 *r)
{
int ret, hsub, vsub, num_planes, i;
ret = format_check(r);
if (ret) {
DRM_DEBUG_KMS("bad framebuffer format 0x%08x\n", r->pixel_format);
return ret;
}
hsub = drm_format_horz_chroma_subsampling(r->pixel_format);
vsub = drm_format_vert_chroma_subsampling(r->pixel_format);
num_planes = drm_format_num_planes(r->pixel_format);
if (r->width == 0 || r->width % hsub) {
DRM_DEBUG_KMS("bad framebuffer width %u\n", r->height);
return -EINVAL;
}
if (r->height == 0 || r->height % vsub) {
DRM_DEBUG_KMS("bad framebuffer height %u\n", r->height);
return -EINVAL;
}
for (i = 0; i < num_planes; i++) {
unsigned int width = r->width / (i != 0 ? hsub : 1);
if (!r->handles[i]) {
DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i);
return -EINVAL;
}
if (r->pitches[i] < drm_format_plane_cpp(r->pixel_format, i) * width) {
DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i);
return -EINVAL;
}
}
return 0;
}
/**
* drm_mode_addfb2 - add an FB to the graphics configuration
* @inode: inode from the ioctl
@ -2181,21 +2221,19 @@ int drm_mode_addfb2(struct drm_device *dev,
return (EINVAL);
if ((config->min_width > r->width) || (r->width > config->max_width)) {
DRM_ERROR("bad framebuffer width %d, should be >= %d && <= %d\n",
DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n",
r->width, config->min_width, config->max_width);
return (EINVAL);
}
if ((config->min_height > r->height) || (r->height > config->max_height)) {
DRM_ERROR("bad framebuffer height %d, should be >= %d && <= %d\n",
DRM_DEBUG_KMS("bad framebuffer height %d, should be >= %d && <= %d\n",
r->height, config->min_height, config->max_height);
return (EINVAL);
}
ret = format_check(r);
if (ret) {
DRM_ERROR("bad framebuffer format 0x%08x\n", r->pixel_format);
return ret;
}
ret = framebuffer_check(r);
if (ret)
return -ret;
sx_xlock(&dev->mode_config.mutex);
@ -2204,7 +2242,7 @@ int drm_mode_addfb2(struct drm_device *dev,
ret = -dev->mode_config.funcs->fb_create(dev, file_priv, r, &fb);
if (ret != 0) {
DRM_ERROR("could not create framebuffer, error %d\n", ret);
DRM_DEBUG_KMS("could not create framebuffer, error %d\n", ret);
goto out;
}
@ -2335,7 +2373,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
struct drm_framebuffer *fb;
unsigned flags;
int num_clips;
int ret = 0;
int ret;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return (EINVAL);
@ -2530,7 +2568,7 @@ int drm_mode_attachmode_ioctl(struct drm_device *dev,
struct drm_display_mode *mode;
struct drm_mode_object *obj;
struct drm_mode_modeinfo *umode = &mode_cmd->mode;
int ret = 0;
int ret;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
@ -2584,7 +2622,7 @@ int drm_mode_detachmode_ioctl(struct drm_device *dev,
struct drm_connector *connector;
struct drm_display_mode mode;
struct drm_mode_modeinfo *umode = &mode_cmd->mode;
int ret = 0;
int ret;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
@ -2672,6 +2710,33 @@ struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
return property;
}
struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
int flags, const char *name,
const struct drm_prop_enum_list *props,
int num_values)
{
struct drm_property *property;
int i, ret;
flags |= DRM_MODE_PROP_BITMASK;
property = drm_property_create(dev, flags, name, num_values);
if (!property)
return NULL;
for (i = 0; i < num_values; i++) {
ret = drm_property_add_enum(property, i,
props[i].type,
props[i].name);
if (ret) {
drm_property_destroy(dev, property);
return NULL;
}
}
return property;
}
struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
const char *name,
uint64_t min, uint64_t max)
@ -2695,7 +2760,14 @@ int drm_property_add_enum(struct drm_property *property, int index,
{
struct drm_property_enum *prop_enum;
if (!(property->flags & DRM_MODE_PROP_ENUM))
if (!(property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)))
return -EINVAL;
/*
* Bitmask enum properties have the additional constraint of values
* from 0 to 63
*/
if ((property->flags & DRM_MODE_PROP_BITMASK) && (value > 63))
return -EINVAL;
if (!list_empty(&property->enum_blob_list)) {
@ -2736,56 +2808,71 @@ void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
free(property, DRM_MEM_KMS);
}
int drm_connector_attach_property(struct drm_connector *connector,
void drm_connector_attach_property(struct drm_connector *connector,
struct drm_property *property, uint64_t init_val)
{
int i;
for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
if (connector->property_ids[i] == 0) {
connector->property_ids[i] = property->base.id;
connector->property_values[i] = init_val;
break;
}
}
if (i == DRM_CONNECTOR_MAX_PROPERTY)
return -EINVAL;
return 0;
drm_object_attach_property(&connector->base, property, init_val);
}
int drm_connector_property_set_value(struct drm_connector *connector,
struct drm_property *property, uint64_t value)
{
int i;
for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
if (connector->property_ids[i] == property->base.id) {
connector->property_values[i] = value;
break;
}
}
if (i == DRM_CONNECTOR_MAX_PROPERTY)
return -EINVAL;
return 0;
return drm_object_property_set_value(&connector->base, property, value);
}
int drm_connector_property_get_value(struct drm_connector *connector,
struct drm_property *property, uint64_t *val)
{
return drm_object_property_get_value(&connector->base, property, val);
}
void drm_object_attach_property(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t init_val)
{
int count = obj->properties->count;
if (count == DRM_OBJECT_MAX_PROPERTY) {
printf("Failed to attach object property (type: 0x%x). Please "
"increase DRM_OBJECT_MAX_PROPERTY by 1 for each time "
"you see this message on the same object type.\n",
obj->type);
return;
}
obj->properties->ids[count] = property->base.id;
obj->properties->values[count] = init_val;
obj->properties->count++;
}
int drm_object_property_set_value(struct drm_mode_object *obj,
struct drm_property *property, uint64_t val)
{
int i;
for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
if (connector->property_ids[i] == property->base.id) {
*val = connector->property_values[i];
break;
for (i = 0; i < obj->properties->count; i++) {
if (obj->properties->ids[i] == property->base.id) {
obj->properties->values[i] = val;
return 0;
}
}
if (i == DRM_CONNECTOR_MAX_PROPERTY)
return -EINVAL;
return 0;
return -EINVAL;
}
int drm_object_property_get_value(struct drm_mode_object *obj,
struct drm_property *property, uint64_t *val)
{
int i;
for (i = 0; i < obj->properties->count; i++) {
if (obj->properties->ids[i] == property->base.id) {
*val = obj->properties->values[i];
return 0;
}
}
return -EINVAL;
}
int drm_mode_getproperty_ioctl(struct drm_device *dev,
@ -2817,7 +2904,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
}
property = obj_to_property(obj);
if (property->flags & DRM_MODE_PROP_ENUM) {
if (property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)) {
list_for_each_entry(prop_enum, &property->enum_blob_list, head)
enum_count++;
} else if (property->flags & DRM_MODE_PROP_BLOB) {
@ -2842,7 +2929,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
}
out_resp->count_values = value_count;
if (property->flags & DRM_MODE_PROP_ENUM) {
if (property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)) {
if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
copied = 0;
enum_ptr = (struct drm_mode_property_enum *)(uintptr_t)out_resp->enum_blob_ptr;
@ -2965,7 +3052,7 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
struct edid *edid)
{
struct drm_device *dev = connector->dev;
int ret = 0, size;
int ret, size;
if (connector->edid_blob_ptr)
drm_property_destroy_blob(dev, connector->edid_blob_ptr);
@ -2988,13 +3075,159 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
return ret;
}
static bool drm_property_change_is_valid(struct drm_property *property,
u64 value)
{
if (property->flags & DRM_MODE_PROP_IMMUTABLE)
return false;
if (property->flags & DRM_MODE_PROP_RANGE) {
if (value < property->values[0] || value > property->values[1])
return false;
return true;
} else if (property->flags & DRM_MODE_PROP_BITMASK) {
int i;
u64 valid_mask = 0;
for (i = 0; i < property->num_values; i++)
valid_mask |= (1ULL << property->values[i]);
return !(value & ~valid_mask);
} else {
int i;
for (i = 0; i < property->num_values; i++)
if (property->values[i] == value)
return true;
return false;
}
}
int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_connector_set_property *out_resp = data;
struct drm_mode_connector_set_property *conn_set_prop = data;
struct drm_mode_obj_set_property obj_set_prop = {
.value = conn_set_prop->value,
.prop_id = conn_set_prop->prop_id,
.obj_id = conn_set_prop->connector_id,
.obj_type = DRM_MODE_OBJECT_CONNECTOR
};
/* It does all the locking and checking we need */
return drm_mode_obj_set_property_ioctl(dev, &obj_set_prop, file_priv);
}
static int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t value)
{
int ret = -EINVAL;
struct drm_connector *connector = obj_to_connector(obj);
/* Do DPMS ourselves */
if (property == connector->dev->mode_config.dpms_property) {
if (connector->funcs->dpms)
(*connector->funcs->dpms)(connector, (int)value);
ret = 0;
} else if (connector->funcs->set_property)
ret = connector->funcs->set_property(connector, property, value);
/* store the property value if successful */
if (!ret)
drm_connector_property_set_value(connector, property, value);
return ret;
}
static int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t value)
{
int ret = -EINVAL;
struct drm_crtc *crtc = obj_to_crtc(obj);
if (crtc->funcs->set_property)
ret = crtc->funcs->set_property(crtc, property, value);
if (!ret)
drm_object_property_set_value(obj, property, value);
return ret;
}
static int drm_mode_plane_set_obj_prop(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t value)
{
int ret = -EINVAL;
struct drm_plane *plane = obj_to_plane(obj);
if (plane->funcs->set_property)
ret = plane->funcs->set_property(plane, property, value);
if (!ret)
drm_object_property_set_value(obj, property, value);
return ret;
}
int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_mode_obj_get_properties *arg = data;
struct drm_mode_object *obj;
int ret = 0;
int i;
int copied = 0;
int props_count = 0;
uint32_t __user *props_ptr;
uint64_t __user *prop_values_ptr;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
sx_xlock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
if (!obj) {
ret = -EINVAL;
goto out;
}
if (!obj->properties) {
ret = -EINVAL;
goto out;
}
props_count = obj->properties->count;
/* This ioctl is called twice, once to determine how much space is
* needed, and the 2nd time to fill it. */
if ((arg->count_props >= props_count) && props_count) {
copied = 0;
props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr);
prop_values_ptr = (uint64_t __user *)(unsigned long)
(arg->prop_values_ptr);
for (i = 0; i < props_count; i++) {
if (copyout(props_ptr + copied,
&obj->properties->ids[i], sizeof(uint32_t))) {
ret = -EFAULT;
goto out;
}
if (copyout(prop_values_ptr + copied,
&obj->properties->values[i], sizeof(uint64_t))) {
ret = -EFAULT;
goto out;
}
copied++;
}
}
arg->count_props = props_count;
out:
sx_xunlock(&dev->mode_config.mutex);
return ret;
}
int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_mode_obj_set_property *arg = data;
struct drm_mode_object *arg_obj;
struct drm_mode_object *prop_obj;
struct drm_property *property;
struct drm_connector *connector;
int ret = -EINVAL;
int i;
@ -3003,60 +3236,41 @@ int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
sx_xlock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, out_resp->connector_id, DRM_MODE_OBJECT_CONNECTOR);
if (!obj) {
arg_obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
if (!arg_obj)
goto out;
if (!arg_obj->properties)
goto out;
}
connector = obj_to_connector(obj);
for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
if (connector->property_ids[i] == out_resp->prop_id)
for (i = 0; i < arg_obj->properties->count; i++)
if (arg_obj->properties->ids[i] == arg->prop_id)
break;
}
if (i == DRM_CONNECTOR_MAX_PROPERTY) {
goto out;
}
obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
if (!obj) {
goto out;
}
property = obj_to_property(obj);
if (property->flags & DRM_MODE_PROP_IMMUTABLE)
if (i == arg_obj->properties->count)
goto out;
if (property->flags & DRM_MODE_PROP_RANGE) {
if (out_resp->value < property->values[0])
goto out;
prop_obj = drm_mode_object_find(dev, arg->prop_id,
DRM_MODE_OBJECT_PROPERTY);
if (!prop_obj)
goto out;
property = obj_to_property(prop_obj);
if (out_resp->value > property->values[1])
goto out;
} else {
int found = 0;
for (i = 0; i < property->num_values; i++) {
if (property->values[i] == out_resp->value) {
found = 1;
break;
}
}
if (!found) {
goto out;
}
if (!drm_property_change_is_valid(property, arg->value))
goto out;
switch (arg_obj->type) {
case DRM_MODE_OBJECT_CONNECTOR:
ret = drm_mode_connector_set_obj_prop(arg_obj, property,
arg->value);
break;
case DRM_MODE_OBJECT_CRTC:
ret = drm_mode_crtc_set_obj_prop(arg_obj, property, arg->value);
break;
case DRM_MODE_OBJECT_PLANE:
ret = drm_mode_plane_set_obj_prop(arg_obj, property, arg->value);
break;
}
/* Do DPMS ourselves */
if (property == connector->dev->mode_config.dpms_property) {
if (connector->funcs->dpms)
(*connector->funcs->dpms)(connector, (int) out_resp->value);
ret = 0;
} else if (connector->funcs->set_property)
ret = connector->funcs->set_property(connector, property, out_resp->value);
/* store the property value if successful */
if (!ret)
drm_connector_property_set_value(connector, property, out_resp->value);
out:
sx_xunlock(&dev->mode_config.mutex);
return ret;
@ -3176,6 +3390,11 @@ int drm_mode_gamma_get_ioctl(struct drm_device *dev,
}
crtc = obj_to_crtc(obj);
if (crtc->funcs->gamma_set == NULL) {
ret = -ENOSYS;
goto out;
}
/* memcpy into gamma store */
if (crtc_lut->gamma_size != crtc->gamma_size) {
ret = -EINVAL;
@ -3417,3 +3636,136 @@ void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
break;
}
}
/**
* drm_format_num_planes - get the number of planes for format
* @format: pixel format (DRM_FORMAT_*)
*
* RETURNS:
* The number of planes used by the specified pixel format.
*/
int drm_format_num_planes(uint32_t format)
{
switch (format) {
case DRM_FORMAT_YUV410:
case DRM_FORMAT_YVU410:
case DRM_FORMAT_YUV411:
case DRM_FORMAT_YVU411:
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YVU420:
case DRM_FORMAT_YUV422:
case DRM_FORMAT_YVU422:
case DRM_FORMAT_YUV444:
case DRM_FORMAT_YVU444:
return 3;
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
case DRM_FORMAT_NV16:
case DRM_FORMAT_NV61:
return 2;
default:
return 1;
}
}
/**
* drm_format_plane_cpp - determine the bytes per pixel value
* @format: pixel format (DRM_FORMAT_*)
* @plane: plane index
*
* RETURNS:
* The bytes per pixel value for the specified plane.
*/
int drm_format_plane_cpp(uint32_t format, int plane)
{
unsigned int depth;
int bpp;
if (plane >= drm_format_num_planes(format))
return 0;
switch (format) {
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_VYUY:
return 2;
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
case DRM_FORMAT_NV16:
case DRM_FORMAT_NV61:
return plane ? 2 : 1;
case DRM_FORMAT_YUV410:
case DRM_FORMAT_YVU410:
case DRM_FORMAT_YUV411:
case DRM_FORMAT_YVU411:
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YVU420:
case DRM_FORMAT_YUV422:
case DRM_FORMAT_YVU422:
case DRM_FORMAT_YUV444:
case DRM_FORMAT_YVU444:
return 1;
default:
drm_fb_get_bpp_depth(format, &depth, &bpp);
return bpp >> 3;
}
}
/**
* drm_format_horz_chroma_subsampling - get the horizontal chroma subsampling factor
* @format: pixel format (DRM_FORMAT_*)
*
* RETURNS:
* The horizontal chroma subsampling factor for the
* specified pixel format.
*/
int drm_format_horz_chroma_subsampling(uint32_t format)
{
switch (format) {
case DRM_FORMAT_YUV411:
case DRM_FORMAT_YVU411:
case DRM_FORMAT_YUV410:
case DRM_FORMAT_YVU410:
return 4;
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_VYUY:
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
case DRM_FORMAT_NV16:
case DRM_FORMAT_NV61:
case DRM_FORMAT_YUV422:
case DRM_FORMAT_YVU422:
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YVU420:
return 2;
default:
return 1;
}
}
/**
* drm_format_vert_chroma_subsampling - get the vertical chroma subsampling factor
* @format: pixel format (DRM_FORMAT_*)
*
* RETURNS:
* The vertical chroma subsampling factor for the
* specified pixel format.
*/
int drm_format_vert_chroma_subsampling(uint32_t format)
{
switch (format) {
case DRM_FORMAT_YUV410:
case DRM_FORMAT_YVU410:
return 4;
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YVU420:
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
return 2;
default:
return 1;
}
}

View File

@ -47,6 +47,14 @@ struct i2c_adapter;
struct drm_mode_object {
uint32_t id;
uint32_t type;
struct drm_object_properties *properties;
};
#define DRM_OBJECT_MAX_PROPERTY 16
struct drm_object_properties {
int count;
uint32_t ids[DRM_OBJECT_MAX_PROPERTY];
uint64_t values[DRM_OBJECT_MAX_PROPERTY];
};
/*
@ -295,7 +303,8 @@ struct drm_plane;
* @mode_fixup: fixup proposed mode
* @mode_set: set the desired mode on the CRTC
* @gamma_set: specify color ramp for CRTC
* @destroy: deinit and free object.
* @destroy: deinit and free object
* @set_property: called when a property is changed
*
* The drm_crtc_funcs structure is the central CRTC management structure
* in the DRM. Each CRTC controls one or more connectors (note that the name
@ -339,6 +348,8 @@ struct drm_crtc_funcs {
int (*page_flip)(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event);
int (*set_property)(struct drm_crtc *crtc,
struct drm_property *property, uint64_t val);
};
/**
@ -347,6 +358,7 @@ struct drm_crtc_funcs {
* @x: x position on screen
* @y: y position on screen
* @funcs: CRTC control functions
* @properties: property tracking for this CRTC
*
* Each CRTC may have one or more connectors associated with it. This structure
* allows the CRTC to be controlled.
@ -382,6 +394,8 @@ struct drm_crtc {
/* if you are using the helper */
void *helper_private;
struct drm_object_properties properties;
};
@ -431,7 +445,6 @@ struct drm_encoder_funcs {
};
#define DRM_CONNECTOR_MAX_UMODES 16
#define DRM_CONNECTOR_MAX_PROPERTY 16
#define DRM_CONNECTOR_LEN 32
#define DRM_CONNECTOR_MAX_ENCODER 2
@ -511,8 +524,7 @@ struct drm_connector {
struct list_head user_modes;
struct drm_property_blob *edid_blob_ptr;
u32 property_ids[DRM_CONNECTOR_MAX_PROPERTY];
uint64_t property_values[DRM_CONNECTOR_MAX_PROPERTY];
struct drm_object_properties properties;
uint8_t polled; /* DRM_CONNECTOR_POLL_* */
@ -543,6 +555,7 @@ struct drm_connector {
* @update_plane: update the plane configuration
* @disable_plane: shut down the plane
* @destroy: clean up plane resources
* @set_property: called when a property is changed
*/
struct drm_plane_funcs {
int (*update_plane)(struct drm_plane *plane,
@ -553,6 +566,8 @@ struct drm_plane_funcs {
uint32_t src_w, uint32_t src_h);
int (*disable_plane)(struct drm_plane *plane);
void (*destroy)(struct drm_plane *plane);
int (*set_property)(struct drm_plane *plane,
struct drm_property *property, uint64_t val);
};
/**
@ -570,6 +585,7 @@ struct drm_plane_funcs {
* @enabled: enabled flag
* @funcs: helper functions
* @helper_private: storage for drver layer
@properties: property tracking for this plane
*/
struct drm_plane {
struct drm_device *dev;
@ -592,6 +608,8 @@ struct drm_plane {
const struct drm_plane_funcs *funcs;
void *helper_private;
struct drm_object_properties properties;
};
/**
@ -806,6 +824,15 @@ extern int drm_connector_property_set_value(struct drm_connector *connector,
extern int drm_connector_property_get_value(struct drm_connector *connector,
struct drm_property *property,
uint64_t *value);
void drm_object_attach_property(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t init_val);
extern int drm_object_property_set_value(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t val);
extern int drm_object_property_get_value(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t *value);
extern struct drm_display_mode *drm_crtc_mode_create(struct drm_device *dev);
extern void drm_framebuffer_set_object(struct drm_device *dev,
unsigned long handle);
@ -818,7 +845,7 @@ extern int drmfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
extern void drm_crtc_probe_connector_modes(struct drm_device *dev, int maxX, int maxY);
extern bool drm_crtc_in_use(struct drm_crtc *crtc);
extern int drm_connector_attach_property(struct drm_connector *connector,
extern void drm_connector_attach_property(struct drm_connector *connector,
struct drm_property *property, uint64_t init_val);
extern struct drm_property *drm_property_create(struct drm_device *dev, int flags,
const char *name, int num_values);
@ -826,6 +853,10 @@ extern struct drm_property *drm_property_create_enum(struct drm_device *dev, int
const char *name,
const struct drm_prop_enum_list *props,
int num_values);
struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
int flags, const char *name,
const struct drm_prop_enum_list *props,
int num_values);
struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
const char *name,
uint64_t min, uint64_t max);
@ -921,7 +952,8 @@ extern int drm_add_modes_noedid(struct drm_connector *connector,
extern int drm_edid_header_is_valid(const u8 *raw_edid);
extern bool drm_edid_is_valid(struct edid *edid);
struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
int hsize, int vsize, int fresh);
int hsize, int vsize, int fresh,
bool rb);
extern int drm_mode_create_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
@ -929,7 +961,16 @@ extern int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
int *bpp);
extern int drm_format_num_planes(uint32_t format);
extern int drm_format_plane_cpp(uint32_t format, int plane);
extern int drm_format_horz_chroma_subsampling(uint32_t format);
extern int drm_format_vert_chroma_subsampling(uint32_t format);
#endif /* __DRM_CRTC_H__ */

View File

@ -549,7 +549,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
int count = 0, ro, fail = 0;
struct drm_crtc_helper_funcs *crtc_funcs;
struct drm_mode_set save_set;
int ret = 0;
int ret;
int i;
DRM_DEBUG_KMS("\n");

View File

@ -78,7 +78,7 @@ struct drm_encoder_helper_funcs {
void (*restore)(struct drm_encoder *encoder);
bool (*mode_fixup)(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
void (*prepare)(struct drm_encoder *encoder);
void (*commit)(struct drm_encoder *encoder);

View File

@ -188,6 +188,8 @@ static drm_ioctl_desc_t drm_ioctls[256] = {
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
};
static struct cdevsw drm_cdevsw = {
@ -246,7 +248,8 @@ int drm_probe(device_t kdev, drm_pci_id_list_t *idlist)
device = pci_get_device(kdev);
if (pci_get_class(kdev) != PCIC_DISPLAY
|| pci_get_subclass(kdev) != PCIS_DISPLAY_VGA)
|| (pci_get_subclass(kdev) != PCIS_DISPLAY_VGA &&
pci_get_subclass(kdev) != PCIS_DISPLAY_OTHER))
return ENXIO;
id_entry = drm_find_description(vendor, device, idlist);
@ -1075,10 +1078,63 @@ SYSINIT(drm_register, SI_SUB_KLD, SI_ORDER_MIDDLE,
SYSUNINIT(drm_unregister, SI_SUB_KLD, SI_ORDER_MIDDLE,
drm_core_exit, NULL);
static bool
dmi_found(const struct dmi_system_id *dsi)
{
char *hw_vendor, *hw_prod;
int i, slot;
bool res;
hw_vendor = kern_getenv("smbios.planar.maker");
hw_prod = kern_getenv("smbios.planar.product");
res = true;
for (i = 0; i < nitems(dsi->matches); i++) {
slot = dsi->matches[i].slot;
switch (slot) {
case DMI_NONE:
break;
case DMI_SYS_VENDOR:
case DMI_BOARD_VENDOR:
if (hw_vendor != NULL &&
!strcmp(hw_vendor, dsi->matches[i].substr)) {
break;
} else {
res = false;
goto out;
}
case DMI_PRODUCT_NAME:
case DMI_BOARD_NAME:
if (hw_prod != NULL &&
!strcmp(hw_prod, dsi->matches[i].substr)) {
break;
} else {
res = false;
goto out;
}
default:
res = false;
goto out;
}
}
out:
freeenv(hw_vendor);
freeenv(hw_prod);
return (res);
}
bool
dmi_check_system(const struct dmi_system_id *sysid)
{
const struct dmi_system_id *dsi;
bool res;
/* XXXKIB */
return (false);
for (res = false, dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
if (dmi_found(dsi)) {
res = true;
if (dsi->callback != NULL && dsi->callback(dsi))
break;
}
}
return (res);
}

View File

@ -84,7 +84,7 @@ struct detailed_mode_closure {
#define LEVEL_CVT 3
static struct edid_quirk {
char *vendor;
char vendor[4];
int product_id;
u32 quirks;
} edid_quirk_list[] = {
@ -151,13 +151,13 @@ int drm_edid_header_is_valid(const u8 *raw_edid)
* doesn't check out, or 1 if it's valid.
*/
static bool
drm_edid_block_valid(u8 *raw_edid)
drm_edid_block_valid(u8 *raw_edid, int block)
{
int i;
u8 csum = 0;
struct edid *edid = (struct edid *)raw_edid;
if (raw_edid[0] == 0x00) {
if (block == 0) {
int score = drm_edid_header_is_valid(raw_edid);
if (score == 8) ;
else if (score >= 6) {
@ -230,7 +230,7 @@ bool drm_edid_is_valid(struct edid *edid)
return false;
for (i = 0; i <= edid->extensions; i++)
if (!drm_edid_block_valid(raw + i * EDID_LENGTH))
if (!drm_edid_block_valid(raw + i * EDID_LENGTH, i))
return false;
return true;
@ -320,7 +320,7 @@ drm_do_get_edid(struct drm_connector *connector, device_t adapter)
for (i = 0; i < 4; i++) {
if (drm_do_probe_ddc_edid(adapter, block, 0, EDID_LENGTH))
goto out;
if (drm_edid_block_valid(block))
if (drm_edid_block_valid(block, 0))
break;
if (i == 0 && drm_edid_is_zero(block, EDID_LENGTH)) {
connector->null_edid_counter++;
@ -344,7 +344,7 @@ drm_do_get_edid(struct drm_connector *connector, device_t adapter)
block + (valid_extensions + 1) * EDID_LENGTH,
j, EDID_LENGTH))
goto out;
if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH)) {
if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH, j)) {
valid_extensions++;
break;
}
@ -504,23 +504,47 @@ static void edid_fixup_preferred(struct drm_connector *connector,
preferred_mode->type |= DRM_MODE_TYPE_PREFERRED;
}
struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
int hsize, int vsize, int fresh)
static bool
mode_is_rb(const struct drm_display_mode *mode)
{
return (mode->htotal - mode->hdisplay == 160) &&
(mode->hsync_end - mode->hdisplay == 80) &&
(mode->hsync_end - mode->hsync_start == 32) &&
(mode->vsync_start - mode->vdisplay == 3);
}
/*
* drm_mode_find_dmt - Create a copy of a mode if present in DMT
* @dev: Device to duplicate against
* @hsize: Mode width
* @vsize: Mode height
* @fresh: Mode refresh rate
* @rb: Mode reduced-blanking-ness
*
* Walk the DMT mode list looking for a match for the given parameters.
* Return a newly allocated copy of the mode, or NULL if not found.
*/
struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
int hsize, int vsize, int fresh,
bool rb)
{
struct drm_display_mode *mode = NULL;
int i;
for (i = 0; i < drm_num_dmt_modes; i++) {
struct drm_display_mode *ptr = &drm_dmt_modes[i];
if (hsize == ptr->hdisplay &&
vsize == ptr->vdisplay &&
fresh == drm_mode_vrefresh(ptr)) {
/* get the expected default mode */
mode = drm_mode_duplicate(dev, ptr);
break;
}
if (hsize != ptr->hdisplay)
continue;
if (vsize != ptr->vdisplay)
continue;
if (fresh != drm_mode_vrefresh(ptr))
continue;
if (rb != mode_is_rb(ptr))
continue;
return drm_mode_duplicate(dev, ptr);
}
return mode;
return NULL;
}
typedef void detailed_cb(struct detailed_timing *timing, void *closure);
@ -763,10 +787,17 @@ drm_mode_std(struct drm_connector *connector, struct edid *edid,
}
/* check whether it can be found in default mode table */
mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate);
if (drm_monitor_supports_rb(edid)) {
mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate,
true);
if (mode)
return mode;
}
mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate, false);
if (mode)
return mode;
/* okay, generate it */
switch (timing_level) {
case LEVEL_DMT:
break;
@ -780,6 +811,8 @@ drm_mode_std(struct drm_connector *connector, struct edid *edid,
* secondary GTF curve. Please don't do that.
*/
mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
if (!mode)
return NULL;
if (drm_mode_hsync(mode) > drm_gtf2_hbreak(edid)) {
free(mode, DRM_MEM_KMS);
mode = drm_gtf_mode_complex(dev, hsize, vsize,
@ -940,15 +973,6 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
return mode;
}
static bool
mode_is_rb(const struct drm_display_mode *mode)
{
return (mode->htotal - mode->hdisplay == 160) &&
(mode->hsync_end - mode->hdisplay == 80) &&
(mode->hsync_end - mode->hsync_start == 32) &&
(mode->vsync_start - mode->vdisplay == 3);
}
static bool
mode_in_hsync_range(struct drm_display_mode *mode,
struct edid *edid, u8 *t)
@ -1026,12 +1050,8 @@ mode_in_range(struct drm_display_mode *mode, struct edid *edid,
return true;
}
/*
* XXX If drm_dmt_modes ever regrows the CVT-R modes (and it will) this will
* need to account for them.
*/
static int
drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
struct detailed_timing *timing)
{
int i, modes = 0;
@ -1051,17 +1071,110 @@ drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
return modes;
}
/* fix up 1366x768 mode from 1368x768;
* GFT/CVT can't express 1366 width which isn't dividable by 8
*/
static void fixup_mode_1366x768(struct drm_display_mode *mode)
{
if (mode->hdisplay == 1368 && mode->vdisplay == 768) {
mode->hdisplay = 1366;
mode->hsync_start--;
mode->hsync_end--;
drm_mode_set_name(mode);
}
}
static int
drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
struct detailed_timing *timing)
{
int i, modes = 0;
struct drm_display_mode *newmode;
struct drm_device *dev = connector->dev;
for (i = 0; i < num_extra_modes; i++) {
const struct minimode *m = &extra_modes[i];
newmode = drm_gtf_mode(dev, m->w, m->h, m->r, 0, 0);
if (!newmode)
return modes;
fixup_mode_1366x768(newmode);
if (!mode_in_range(newmode, edid, timing)) {
drm_mode_destroy(dev, newmode);
continue;
}
drm_mode_probed_add(connector, newmode);
modes++;
}
return modes;
}
static int
drm_cvt_modes_for_range(struct drm_connector *connector, struct edid *edid,
struct detailed_timing *timing)
{
int i, modes = 0;
struct drm_display_mode *newmode;
struct drm_device *dev = connector->dev;
bool rb = drm_monitor_supports_rb(edid);
for (i = 0; i < num_extra_modes; i++) {
const struct minimode *m = &extra_modes[i];
newmode = drm_cvt_mode(dev, m->w, m->h, m->r, rb, 0, 0);
if (!newmode)
return modes;
fixup_mode_1366x768(newmode);
if (!mode_in_range(newmode, edid, timing)) {
drm_mode_destroy(dev, newmode);
continue;
}
drm_mode_probed_add(connector, newmode);
modes++;
}
return modes;
}
static void
do_inferred_modes(struct detailed_timing *timing, void *c)
{
struct detailed_mode_closure *closure = c;
struct detailed_non_pixel *data = &timing->data.other_data;
int gtf = (closure->edid->features & DRM_EDID_FEATURE_DEFAULT_GTF);
struct detailed_data_monitor_range *range = &data->data.range;
if (gtf && data->type == EDID_DETAIL_MONITOR_RANGE)
if (data->type != EDID_DETAIL_MONITOR_RANGE)
return;
closure->modes += drm_dmt_modes_for_range(closure->connector,
closure->edid,
timing);
if (!version_greater(closure->edid, 1, 1))
return; /* GTF not defined yet */
switch (range->flags) {
case 0x02: /* secondary gtf, XXX could do more */
case 0x00: /* default gtf */
closure->modes += drm_gtf_modes_for_range(closure->connector,
closure->edid,
timing);
break;
case 0x04: /* cvt, only in 1.4+ */
if (!version_greater(closure->edid, 1, 3))
break;
closure->modes += drm_cvt_modes_for_range(closure->connector,
closure->edid,
timing);
break;
case 0x01: /* just the ranges, no formula */
default:
break;
}
}
static int
@ -1094,8 +1207,8 @@ drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing)
mode = drm_mode_find_dmt(connector->dev,
est3_modes[m].w,
est3_modes[m].h,
est3_modes[m].r
/*, est3_modes[m].rb */);
est3_modes[m].r,
est3_modes[m].rb);
if (mode) {
drm_mode_probed_add(connector, mode);
modes++;
@ -1343,6 +1456,8 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
#define VENDOR_BLOCK 0x03
#define SPEAKER_BLOCK 0x04
#define EDID_BASIC_AUDIO (1 << 6)
#define EDID_CEA_YCRCB444 (1 << 5)
#define EDID_CEA_YCRCB422 (1 << 4)
/**
* Search EDID for CEA extension block.
@ -1647,13 +1762,29 @@ static void drm_add_display_info(struct edid *edid,
info->bpc = 0;
info->color_formats = 0;
/* Only defined for 1.4 with digital displays */
if (edid->revision < 4)
if (edid->revision < 3)
return;
if (!(edid->input & DRM_EDID_INPUT_DIGITAL))
return;
/* Get data from CEA blocks if present */
edid_ext = drm_find_cea_extension(edid);
if (edid_ext) {
info->cea_rev = edid_ext[1];
/* The existence of a CEA block should imply RGB support */
info->color_formats = DRM_COLOR_FORMAT_RGB444;
if (edid_ext[3] & EDID_CEA_YCRCB444)
info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
if (edid_ext[3] & EDID_CEA_YCRCB422)
info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
}
/* Only defined for 1.4 with digital displays */
if (edid->revision < 4)
return;
switch (edid->input & DRM_EDID_DIGITAL_DEPTH_MASK) {
case DRM_EDID_DIGITAL_DEPTH_6:
info->bpc = 6;
@ -1679,18 +1810,11 @@ static void drm_add_display_info(struct edid *edid,
break;
}
info->color_formats = DRM_COLOR_FORMAT_RGB444;
if (info->color_formats & DRM_EDID_FEATURE_RGB_YCRCB444)
info->color_formats = DRM_COLOR_FORMAT_YCRCB444;
if (info->color_formats & DRM_EDID_FEATURE_RGB_YCRCB422)
info->color_formats = DRM_COLOR_FORMAT_YCRCB422;
/* Get data from CEA blocks if present */
edid_ext = drm_find_cea_extension(edid);
if (!edid_ext)
return;
info->cea_rev = edid_ext[1];
info->color_formats |= DRM_COLOR_FORMAT_RGB444;
if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB444)
info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422)
info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
}
/**

View File

@ -93,12 +93,26 @@ struct detailed_data_monitor_range {
u8 min_hfreq_khz;
u8 max_hfreq_khz;
u8 pixel_clock_mhz; /* need to multiply by 10 */
u16 sec_gtf_toggle; /* A000=use above, 20=use below */
u8 hfreq_start_khz; /* need to multiply by 2 */
u8 c; /* need to divide by 2 */
u16 m;
u8 k;
u8 j; /* need to divide by 2 */
u8 flags;
union {
struct {
u8 reserved;
u8 hfreq_start_khz; /* need to multiply by 2 */
u8 c; /* need to divide by 2 */
u16 m;
u8 k;
u8 j; /* need to divide by 2 */
} __attribute__((packed)) gtf2;
struct {
u8 version;
u8 data1; /* high 6 bits: extra clock resolution */
u8 data2; /* plus low 2 of above: max hactive */
u8 supported_aspects;
u8 flags; /* preferred aspect and blanking support */
u8 supported_scalings;
u8 preferred_refresh;
} __attribute__((packed)) cvt;
} formula;
} __attribute__((packed));
struct detailed_data_wpindex {

View File

@ -31,7 +31,6 @@
/*
* Autogenerated from the DMT spec.
* This table is copied from xfree86/modes/xf86EdidModes.c.
* But the mode with Reduced blank feature is deleted.
*/
static struct drm_display_mode drm_dmt_modes[] = {
/* 640x350@85Hz */
@ -82,6 +81,10 @@ static struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 56250, 800, 832,
896, 1048, 0, 600, 601, 604, 631, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 800x600@120Hz RB */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 73250, 800, 848,
880, 960, 0, 600, 603, 607, 636, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 848x480@60Hz */
{ DRM_MODE("848x480", DRM_MODE_TYPE_DRIVER, 33750, 848, 864,
976, 1088, 0, 480, 486, 494, 517, 0,
@ -107,10 +110,18 @@ static struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072,
1168, 1376, 0, 768, 769, 772, 808, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1024x768@120Hz RB */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 115500, 1024, 1072,
1104, 1184, 0, 768, 771, 775, 813, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1152x864@75Hz */
{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
1344, 1600, 0, 864, 865, 868, 900, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x768@60Hz RB */
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 68250, 1280, 1328,
1360, 1440, 0, 768, 771, 778, 790, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x768@60Hz */
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
1472, 1664, 0, 768, 771, 778, 798, 0,
@ -123,6 +134,14 @@ static struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 117500, 1280, 1360,
1496, 1712, 0, 768, 771, 778, 809, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x768@120Hz RB */
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 140250, 1280, 1328,
1360, 1440, 0, 768, 771, 778, 813, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x800@60Hz RB */
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 71000, 1280, 1328,
1360, 1440, 0, 800, 803, 809, 823, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x800@60Hz */
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
1480, 1680, 0, 800, 803, 809, 831, 0,
@ -135,6 +154,10 @@ static struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 122500, 1280, 1360,
1496, 1712, 0, 800, 803, 809, 843, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x800@120Hz RB */
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 146250, 1280, 1328,
1360, 1440, 0, 800, 803, 809, 847, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x960@60Hz */
{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
1488, 1800, 0, 960, 961, 964, 1000, 0,
@ -143,6 +166,10 @@ static struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1344,
1504, 1728, 0, 960, 961, 964, 1011, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x960@120Hz RB */
{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 175500, 1280, 1328,
1360, 1440, 0, 960, 963, 967, 1017, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x1024@60Hz */
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
@ -155,22 +182,42 @@ static struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 157500, 1280, 1344,
1504, 1728, 0, 1024, 1025, 1028, 1072, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x1024@120Hz RB */
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 187250, 1280, 1328,
1360, 1440, 0, 1024, 1027, 1034, 1084, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1360x768@60Hz */
{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
1536, 1792, 0, 768, 771, 777, 795, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1440x1050@60Hz */
/* 1360x768@120Hz RB */
{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 148250, 1360, 1408,
1440, 1520, 0, 768, 771, 776, 813, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1400x1050@60Hz RB */
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 101000, 1400, 1448,
1480, 1560, 0, 1050, 1053, 1057, 1080, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1400x1050@60Hz */
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1440x1050@75Hz */
/* 1400x1050@75Hz */
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 156000, 1400, 1504,
1648, 1896, 0, 1050, 1053, 1057, 1099, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1440x1050@85Hz */
/* 1400x1050@85Hz */
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 179500, 1400, 1504,
1656, 1912, 0, 1050, 1053, 1057, 1105, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1400x1050@120Hz RB */
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 208000, 1400, 1448,
1480, 1560, 0, 1050, 1053, 1057, 1112, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1440x900@60Hz RB */
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 88750, 1440, 1488,
1520, 1600, 0, 900, 903, 909, 926, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1440x900@60Hz */
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
1672, 1904, 0, 900, 903, 909, 934, 0,
@ -183,6 +230,10 @@ static struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 157000, 1440, 1544,
1696, 1952, 0, 900, 903, 909, 948, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1440x900@120Hz RB */
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 182750, 1440, 1488,
1520, 1600, 0, 900, 903, 909, 953, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1600x1200@60Hz */
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
@ -203,6 +254,14 @@ static struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 229500, 1600, 1664,
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1600x1200@120Hz RB */
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 268250, 1600, 1648,
1680, 1760, 0, 1200, 1203, 1207, 1271, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1680x1050@60Hz RB */
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 119000, 1680, 1728,
1760, 1840, 0, 1050, 1053, 1059, 1080, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1680x1050@60Hz */
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
@ -215,15 +274,23 @@ static struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 214750, 1680, 1808,
1984, 2288, 0, 1050, 1053, 1059, 1105, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1680x1050@120Hz RB */
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 245500, 1680, 1728,
1760, 1840, 0, 1050, 1053, 1059, 1112, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1792x1344@60Hz */
{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1729x1344@75Hz */
/* 1792x1344@75Hz */
{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 261000, 1792, 1888,
2104, 2456, 0, 1344, 1345, 1348, 1417, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1853x1392@60Hz */
/* 1792x1344@120Hz RB */
{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 333250, 1792, 1840,
1872, 1952, 0, 1344, 1347, 1351, 1423, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1856x1392@60Hz */
{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
@ -231,6 +298,14 @@ static struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 288000, 1856, 1984,
2208, 2560, 0, 1392, 1395, 1399, 1500, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1856x1392@120Hz RB */
{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 356500, 1856, 1904,
1936, 2016, 0, 1392, 1395, 1399, 1474, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1920x1200@60Hz RB */
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 154000, 1920, 1968,
2000, 2080, 0, 1200, 1203, 1209, 1235, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1920x1200@60Hz */
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
@ -243,6 +318,10 @@ static struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 281250, 1920, 2064,
2272, 2624, 0, 1200, 1203, 1209, 1262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1920x1200@120Hz RB */
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 317000, 1920, 1968,
2000, 2080, 0, 1200, 1203, 1209, 1271, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1920x1440@60Hz */
{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
@ -251,6 +330,14 @@ static struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2064,
2288, 2640, 0, 1440, 1441, 1444, 1500, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1920x1440@120Hz RB */
{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 380500, 1920, 1968,
2000, 2080, 0, 1440, 1443, 1447, 1525, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 2560x1600@60Hz RB */
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 268500, 2560, 2608,
2640, 2720, 0, 1600, 1603, 1609, 1646, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 2560x1600@60Hz */
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
@ -263,6 +350,11 @@ static struct drm_display_mode drm_dmt_modes[] = {
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 505250, 2560, 2768,
3048, 3536, 0, 1600, 1603, 1609, 1682, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 2560x1600@120Hz RB */
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 552750, 2560, 2608,
2640, 2720, 0, 1600, 1603, 1609, 1694, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
};
static const int drm_num_dmt_modes =
sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
@ -321,12 +413,14 @@ static struct drm_display_mode edid_est_modes[] = {
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */
};
static const struct {
struct minimode {
short w;
short h;
short r;
short rb;
} est3_modes[] = {
};
static const struct minimode est3_modes[] = {
/* byte 6 */
{ 640, 350, 85, 0 },
{ 640, 400, 85, 0 },
@ -378,4 +472,304 @@ static const struct {
{ 1920, 1440, 60, 0 },
{ 1920, 1440, 75, 0 },
};
static const int num_est3_modes = sizeof(est3_modes) / sizeof(est3_modes[0]);
static const int num_est3_modes = DRM_ARRAY_SIZE(est3_modes);
static const struct minimode extra_modes[] = {
{ 1024, 576, 60, 0 },
{ 1366, 768, 60, 0 },
{ 1600, 900, 60, 0 },
{ 1680, 945, 60, 0 },
{ 1920, 1080, 60, 0 },
{ 2048, 1152, 60, 0 },
{ 2048, 1536, 60, 0 },
};
static const int num_extra_modes = DRM_ARRAY_SIZE(extra_modes);
/*
* Probably taken from CEA-861 spec.
* This table is converted from xorg's hw/xfree86/modes/xf86EdidModes.c.
*/
static const struct drm_display_mode edid_cea_modes[] = {
/* 1 - 640x480@60Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
752, 800, 0, 480, 490, 492, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 2 - 720x480@60Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 3 - 720x480@60Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 4 - 1280x720@60Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 5 - 1920x1080i@60Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 6 - 1440x480i@60Hz */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 7 - 1440x480i@60Hz */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 8 - 1440x240@60Hz */
{ DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK) },
/* 9 - 1440x240@60Hz */
{ DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK) },
/* 10 - 2880x480i@60Hz */
{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 11 - 2880x480i@60Hz */
{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 12 - 2880x240@60Hz */
{ DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 13 - 2880x240@60Hz */
{ DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 14 - 1440x480@60Hz */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
1596, 1716, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 15 - 1440x480@60Hz */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
1596, 1716, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 16 - 1920x1080@60Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 17 - 720x576@50Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 18 - 720x576@50Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 19 - 1280x720@50Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
1760, 1980, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 20 - 1920x1080i@50Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 21 - 1440x576i@50Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 22 - 1440x576i@50Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 23 - 1440x288@50Hz */
{ DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK) },
/* 24 - 1440x288@50Hz */
{ DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK) },
/* 25 - 2880x576i@50Hz */
{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 26 - 2880x576i@50Hz */
{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 27 - 2880x288@50Hz */
{ DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 28 - 2880x288@50Hz */
{ DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 29 - 1440x576@50Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1592, 1728, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 30 - 1440x576@50Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1592, 1728, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 31 - 1920x1080@50Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 32 - 1920x1080@24Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 33 - 1920x1080@25Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 34 - 1920x1080@30Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 35 - 2880x480@60Hz */
{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
3192, 3432, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 36 - 2880x480@60Hz */
{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
3192, 3432, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 37 - 2880x576@50Hz */
{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
3184, 3456, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 38 - 2880x576@50Hz */
{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
3184, 3456, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 39 - 1920x1080i@50Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 40 - 1920x1080i@100Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 41 - 1280x720@100Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
1760, 1980, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 42 - 720x576@100Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 43 - 720x576@100Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 44 - 1440x576i@100Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK) },
/* 45 - 1440x576i@100Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK) },
/* 46 - 1920x1080i@120Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 47 - 1280x720@120Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 48 - 720x480@120Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 49 - 720x480@120Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 50 - 1440x480i@120Hz */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 51 - 1440x480i@120Hz */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 52 - 720x576@200Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 53 - 720x576@200Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 54 - 1440x576i@200Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 55 - 1440x576i@200Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 56 - 720x480@240Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 57 - 720x480@240Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 58 - 1440x480i@240 */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 59 - 1440x480i@240 */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 60 - 1280x720@24Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 61 - 1280x720@25Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
3740, 3960, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 62 - 1280x720@30Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 63 - 1920x1080@120Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 64 - 1920x1080@100Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
};
static const int drm_num_cea_modes = DRM_ARRAY_SIZE(edid_cea_modes);

View File

@ -202,6 +202,9 @@ static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc)
{
uint16_t *r_base, *g_base, *b_base;
if (crtc->funcs->gamma_set == NULL)
return;
r_base = crtc->gamma_store;
g_base = r_base + crtc->gamma_size;
b_base = g_base + crtc->gamma_size;
@ -1225,7 +1228,7 @@ static bool drm_target_cloned(struct drm_fb_helper *fb_helper,
/* try and find a 1024x768 mode on each connector */
can_clone = true;
dmt_mode = drm_mode_find_dmt(fb_helper->dev, 1024, 768, 60);
dmt_mode = drm_mode_find_dmt(fb_helper->dev, 1024, 768, 60, false);
for (i = 0; i < fb_helper->connector_count; i++) {

View File

@ -250,6 +250,10 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
case DRM_CAP_DUMB_PREFER_SHADOW:
req->value = dev->mode_config.prefer_shadow;
break;
case DRM_CAP_PRIME:
req->value |= false /* XXXKIB dev->driver->prime_fd_to_handle */ ? DRM_PRIME_CAP_IMPORT : 0;
req->value |= false /* XXXKIB dev->driver->prime_handle_to_fd */ ? DRM_PRIME_CAP_EXPORT : 0;
break;
case DRM_CAP_TIMESTAMP_MONOTONIC:
req->value = drm_timestamp_monotonic;
break;

View File

@ -639,7 +639,7 @@ drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
struct timeval *tvblank, unsigned flags)
{
int ret = 0;
int ret;
/* Define requested maximum error on timestamps (nanoseconds). */
int max_error = (int) drm_timestamp_precision * 1000;
@ -930,18 +930,15 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_modeset_ctl *modeset = data;
int ret = 0;
unsigned int crtc;
/* If drm_vblank_init() hasn't been called yet, just no-op */
if (!dev->num_crtcs)
goto out;
return 0;
crtc = modeset->crtc;
if (crtc >= dev->num_crtcs) {
ret = -EINVAL;
goto out;
}
if (crtc >= dev->num_crtcs)
return -EINVAL;
switch (modeset->cmd) {
case _DRM_PRE_MODESET:
@ -951,12 +948,11 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
drm_vblank_post_modeset(dev, crtc);
break;
default:
ret = -EINVAL;
return -EINVAL;
break;
}
out:
return ret;
return 0;
}
static void
@ -1054,7 +1050,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
union drm_wait_vblank *vblwait = data;
int ret = 0;
int ret;
unsigned int flags, seq, crtc, high_crtc;
if (/*(!drm_dev_to_irq(dev)) || */(!dev->irq_enabled))

View File

@ -125,3 +125,11 @@ drm_clflush_pages(vm_page_t *pages, unsigned long num_pages)
pmap_invalidate_cache_pages(pages, num_pages);
}
void
drm_clflush_virt_range(char *addr, unsigned long length)
{
pmap_invalidate_cache_range((vm_offset_t)addr,
(vm_offset_t)addr + length, TRUE);
}

View File

@ -228,6 +228,7 @@ struct drm_mode_get_connector {
#define DRM_MODE_PROP_IMMUTABLE (1<<2)
#define DRM_MODE_PROP_ENUM (1<<3) /* enumerated type with text strings */
#define DRM_MODE_PROP_BLOB (1<<4)
#define DRM_MODE_PROP_BITMASK (1<<5) /* bitmask of enumerated types */
struct drm_mode_property_enum {
uint64_t value;
@ -252,6 +253,21 @@ struct drm_mode_connector_set_property {
uint32_t connector_id;
};
struct drm_mode_obj_get_properties {
uint64_t props_ptr;
uint64_t prop_values_ptr;
uint32_t count_props;
uint32_t obj_id;
uint32_t obj_type;
};
struct drm_mode_obj_set_property {
uint64_t value;
uint32_t prop_id;
uint32_t obj_id;
uint32_t obj_type;
};
struct drm_mode_get_blob {
uint32_t blob_id;
uint32_t length;

View File

@ -48,6 +48,13 @@
{0x8086, 0x0162, CHIP_I9XX|CHIP_I915, "Intel IvyBridge"}, \
{0x8086, 0x0166, CHIP_I9XX|CHIP_I915, "Intel IvyBridge (M)"}, \
{0x8086, 0x016A, CHIP_I9XX|CHIP_I915, "Intel IvyBridge (S)"}, \
{0x8086, 0x0402, CHIP_I9XX|CHIP_I915, "Intel Haswell"}, \
{0x8086, 0x0412, CHIP_I9XX|CHIP_I915, "Intel Haswell"}, \
{0x8086, 0x040a, CHIP_I9XX|CHIP_I915, "Intel Haswell (S)"}, \
{0x8086, 0x041a, CHIP_I9XX|CHIP_I915, "Intel Haswell (S)"}, \
{0x8086, 0x0406, CHIP_I9XX|CHIP_I915, "Intel Haswell (M)"}, \
{0x8086, 0x0416, CHIP_I9XX|CHIP_I915, "Intel Haswell (M)"}, \
{0x8086, 0x0c16, CHIP_I9XX|CHIP_I915, "Intel Haswell (SDV)"}, \
{0x8086, 0x2562, CHIP_I8XX, "Intel i845G GMCH"}, \
{0x8086, 0x2572, CHIP_I8XX, "Intel i865G GMCH"}, \
{0x8086, 0x2582, CHIP_I9XX|CHIP_I915, "Intel i915G"}, \

View File

@ -45,7 +45,7 @@ drm_setmaster_ioctl(struct drm_device *dev, void *data,
if (file_priv->master != 0)
return (0);
return (-EPERM);
return (EPERM);
}
int
@ -55,6 +55,6 @@ drm_dropmaster_ioctl(struct drm_device *dev, void *data,
DRM_DEBUG("dropmaster\n");
if (file_priv->master != 0)
return (-EINVAL);
return (EINVAL);
return (0);
}

View File

@ -43,7 +43,6 @@ enum {
FLUSHING_LIST,
INACTIVE_LIST,
PINNED_LIST,
DEFERRED_FREE_LIST,
};
static const char *
@ -135,6 +134,8 @@ describe_obj(struct sbuf *m, struct drm_i915_gem_object *obj)
obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
if (obj->base.name)
sbuf_printf(m, " (name: %d)", obj->base.name);
if (obj->pin_display)
sbuf_printf(m, " (display)");
if (obj->fence_reg != I915_FENCE_REG_NONE)
sbuf_printf(m, " (fence: %d)", obj->fence_reg);
if (obj->gtt_space != NULL)
@ -175,18 +176,10 @@ i915_gem_object_list_info(struct drm_device *dev, struct sbuf *m, void *data)
sbuf_printf(m, "Inactive:\n");
head = &dev_priv->mm.inactive_list;
break;
case PINNED_LIST:
sbuf_printf(m, "Pinned:\n");
head = &dev_priv->mm.pinned_list;
break;
case FLUSHING_LIST:
sbuf_printf(m, "Flushing:\n");
head = &dev_priv->mm.flushing_list;
break;
case DEFERRED_FREE_LIST:
sbuf_printf(m, "Deferred free:\n");
head = &dev_priv->mm.deferred_free_list;
break;
default:
DRM_UNLOCK(dev);
return (EINVAL);
@ -244,21 +237,11 @@ i915_gem_object_info(struct drm_device *dev, struct sbuf *m, void *data)
sbuf_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
count_objects(&dev_priv->mm.pinned_list, mm_list);
sbuf_printf(m, " %u [%u] pinned objects, %zu [%zu] bytes\n",
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
count_objects(&dev_priv->mm.inactive_list, mm_list);
sbuf_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
count_objects(&dev_priv->mm.deferred_free_list, mm_list);
sbuf_printf(m, " %u [%u] freed objects, %zu [%zu] bytes\n",
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
if (obj->fault_mappable) {
@ -283,9 +266,10 @@ i915_gem_object_info(struct drm_device *dev, struct sbuf *m, void *data)
}
static int
i915_gem_gtt_info(struct drm_device *dev, struct sbuf *m, void* data)
i915_gem_gtt_info(struct drm_device *dev, struct sbuf *m, void *data)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uintptr_t list = (uintptr_t)data;
struct drm_i915_gem_object *obj;
size_t total_obj_size, total_gtt_size;
int count;
@ -295,6 +279,9 @@ i915_gem_gtt_info(struct drm_device *dev, struct sbuf *m, void* data)
total_obj_size = total_gtt_size = count = 0;
list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
if (list == PINNED_LIST && obj->pin_count == 0)
continue;
sbuf_printf(m, " ");
describe_obj(m, obj);
sbuf_printf(m, "\n");
@ -420,10 +407,6 @@ i915_ring_seqno_info(struct sbuf *m, struct intel_ring_buffer *ring)
if (ring->get_seqno) {
sbuf_printf(m, "Current sequence (%s): %d\n",
ring->name, ring->get_seqno(ring));
sbuf_printf(m, "Waiter sequence (%s): %d\n",
ring->name, ring->waiting_seqno);
sbuf_printf(m, "IRQ sequence (%s): %d\n",
ring->name, ring->irq_seqno);
}
}
@ -451,7 +434,45 @@ i915_interrupt_info(struct drm_device *dev, struct sbuf *m, void *data)
if (sx_xlock_sig(&dev->dev_struct_lock))
return (EINTR);
if (!HAS_PCH_SPLIT(dev)) {
if (IS_VALLEYVIEW(dev)) {
sbuf_printf(m, "Display IER:\t%08x\n",
I915_READ(VLV_IER));
sbuf_printf(m, "Display IIR:\t%08x\n",
I915_READ(VLV_IIR));
sbuf_printf(m, "Display IIR_RW:\t%08x\n",
I915_READ(VLV_IIR_RW));
sbuf_printf(m, "Display IMR:\t%08x\n",
I915_READ(VLV_IMR));
for_each_pipe(pipe)
sbuf_printf(m, "Pipe %c stat:\t%08x\n",
pipe_name(pipe),
I915_READ(PIPESTAT(pipe)));
sbuf_printf(m, "Master IER:\t%08x\n",
I915_READ(VLV_MASTER_IER));
sbuf_printf(m, "Render IER:\t%08x\n",
I915_READ(GTIER));
sbuf_printf(m, "Render IIR:\t%08x\n",
I915_READ(GTIIR));
sbuf_printf(m, "Render IMR:\t%08x\n",
I915_READ(GTIMR));
sbuf_printf(m, "PM IER:\t\t%08x\n",
I915_READ(GEN6_PMIER));
sbuf_printf(m, "PM IIR:\t\t%08x\n",
I915_READ(GEN6_PMIIR));
sbuf_printf(m, "PM IMR:\t\t%08x\n",
I915_READ(GEN6_PMIMR));
sbuf_printf(m, "Port hotplug:\t%08x\n",
I915_READ(PORT_HOTPLUG_EN));
sbuf_printf(m, "DPFLIPSTAT:\t%08x\n",
I915_READ(VLV_DPFLIPSTAT));
sbuf_printf(m, "DPINVGTT:\t%08x\n",
I915_READ(DPINVGTT));
} else if (!HAS_PCH_SPLIT(dev)) {
sbuf_printf(m, "Interrupt enable: %08x\n",
I915_READ(IER));
sbuf_printf(m, "Interrupt identity: %08x\n",
@ -544,61 +565,6 @@ i915_hws_info(struct drm_device *dev, struct sbuf *m, void *data)
return (0);
}
static int
i915_ringbuffer_data(struct drm_device *dev, struct sbuf *m, void *data)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
if (sx_xlock_sig(&dev->dev_struct_lock))
return (EINTR);
ring = &dev_priv->rings[(uintptr_t)data];
if (!ring->obj) {
sbuf_printf(m, "No ringbuffer setup\n");
} else {
u8 *virt = ring->virtual_start;
uint32_t off;
for (off = 0; off < ring->size; off += 4) {
uint32_t *ptr = (uint32_t *)(virt + off);
sbuf_printf(m, "%08x : %08x\n", off, *ptr);
}
}
DRM_UNLOCK(dev);
return (0);
}
static int
i915_ringbuffer_info(struct drm_device *dev, struct sbuf *m, void *data)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
ring = &dev_priv->rings[(uintptr_t)data];
if (ring->size == 0)
return (0);
if (sx_xlock_sig(&dev->dev_struct_lock))
return (EINTR);
sbuf_printf(m, "Ring %s:\n", ring->name);
sbuf_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR);
sbuf_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR);
sbuf_printf(m, " Size : %08x\n", ring->size);
sbuf_printf(m, " Active : %08x\n", intel_ring_get_active_head(ring));
sbuf_printf(m, " NOPID : %08x\n", I915_READ_NOPID(ring));
if (IS_GEN6(dev) || IS_GEN7(dev)) {
sbuf_printf(m, " Sync 0 : %08x\n", I915_READ_SYNC_0(ring));
sbuf_printf(m, " Sync 1 : %08x\n", I915_READ_SYNC_1(ring));
}
sbuf_printf(m, " Control : %08x\n", I915_READ_CTL(ring));
sbuf_printf(m, " Start : %08x\n", I915_READ_START(ring));
DRM_UNLOCK(dev);
return (0);
}
static const char *
ring_str(int ring)
{
@ -677,6 +643,7 @@ i915_ring_error_state(struct sbuf *m, struct drm_device *dev,
struct drm_i915_error_state *error, unsigned ring)
{
MPASS((ring < I915_NUM_RINGS)); /* shut up confused gcc */
sbuf_printf(m, "%s command stream:\n", ring_str(ring));
sbuf_printf(m, " HEAD: 0x%08x\n", error->head[ring]);
sbuf_printf(m, " TAIL: 0x%08x\n", error->tail[ring]);
@ -691,8 +658,8 @@ i915_ring_error_state(struct sbuf *m, struct drm_device *dev,
if (INTEL_INFO(dev)->gen >= 4)
sbuf_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
sbuf_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
sbuf_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
if (INTEL_INFO(dev)->gen >= 6) {
sbuf_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
sbuf_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
sbuf_printf(m, " SYNC_0: 0x%08x\n",
error->semaphore_mboxes[ring][0]);
@ -700,21 +667,28 @@ i915_ring_error_state(struct sbuf *m, struct drm_device *dev,
error->semaphore_mboxes[ring][1]);
}
sbuf_printf(m, " seqno: 0x%08x\n", error->seqno[ring]);
sbuf_printf(m, " waiting: %s\n", yesno(error->waiting[ring]));
sbuf_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
sbuf_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
}
static int i915_error_state(struct drm_device *dev, struct sbuf *m,
static int
i915_error_state(struct drm_device *dev, struct sbuf *m,
void *unused)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_error_state *error;
struct intel_ring_buffer *ring;
int i, j, page, offset, elt;
mtx_lock(&dev_priv->error_lock);
if (!dev_priv->first_error) {
error = dev_priv->first_error;
if (error != NULL)
refcount_acquire(&error->ref);
mtx_unlock(&dev_priv->error_lock);
if (error == NULL) {
sbuf_printf(m, "no error state collected\n");
goto out;
return (0);
}
error = dev_priv->first_error;
@ -723,6 +697,7 @@ static int i915_error_state(struct drm_device *dev, struct sbuf *m,
(intmax_t)error->time.tv_usec);
sbuf_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
sbuf_printf(m, "EIR: 0x%08x\n", error->eir);
sbuf_printf(m, "IER: 0x%08x\n", error->ier);
sbuf_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
for (i = 0; i < dev_priv->num_fence_regs; i++)
@ -734,11 +709,8 @@ static int i915_error_state(struct drm_device *dev, struct sbuf *m,
sbuf_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
}
i915_ring_error_state(m, dev, error, RCS);
if (HAS_BLT(dev))
i915_ring_error_state(m, dev, error, BCS);
if (HAS_BSD(dev))
i915_ring_error_state(m, dev, error, VCS);
for_each_ring(ring, dev_priv, i)
i915_ring_error_state(m, dev, error, i);
if (error->active_bo)
print_error_buffers(m, "Active",
@ -801,12 +773,28 @@ static int i915_error_state(struct drm_device *dev, struct sbuf *m,
if (error->display)
intel_display_print_error_state(m, dev, error->display);
out:
mtx_unlock(&dev_priv->error_lock);
if (refcount_release(&error->ref))
i915_error_state_free(error);
return (0);
}
static int
i915_error_state_w(struct drm_device *dev, const char *str, void *unused)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_error_state *error;
DRM_DEBUG_DRIVER("Resetting error state\n");
mtx_lock(&dev_priv->error_lock);
error = dev_priv->first_error;
dev_priv->first_error = NULL;
mtx_unlock(&dev_priv->error_lock);
if (error != NULL && refcount_release(&error->ref))
i915_error_state_free(error);
return (0);
}
static int
i915_rstdby_delays(struct drm_device *dev, struct sbuf *m, void *unused)
{
@ -1081,6 +1069,17 @@ gen6_drpc_info(struct drm_device *dev, struct sbuf *m)
sbuf_printf(m, "Core Power Down: %s\n",
yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
/* Not exactly sure what this is */
sbuf_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n",
I915_READ(GEN6_GT_GFX_RC6_LOCKED));
sbuf_printf(m, "RC6 residency since boot: %u\n",
I915_READ(GEN6_GT_GFX_RC6));
sbuf_printf(m, "RC6+ residency since boot: %u\n",
I915_READ(GEN6_GT_GFX_RC6p));
sbuf_printf(m, "RC6++ residency since boot: %u\n",
I915_READ(GEN6_GT_GFX_RC6pp));
return 0;
}
@ -1444,6 +1443,52 @@ i915_ppgtt_info(struct drm_device *dev, struct sbuf *m, void *data)
return (0);
}
static int i915_dpio_info(struct drm_device *dev, struct sbuf *m, void *data)
{
struct drm_i915_private *dev_priv;
int ret;
if (!IS_VALLEYVIEW(dev)) {
sbuf_printf(m, "unsupported\n");
return 0;
}
dev_priv = dev->dev_private;
ret = sx_xlock_sig(&dev->mode_config.mutex);
if (ret != 0)
return (EINTR);
sbuf_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL));
sbuf_printf(m, "DPIO_DIV_A: 0x%08x\n",
intel_dpio_read(dev_priv, _DPIO_DIV_A));
sbuf_printf(m, "DPIO_DIV_B: 0x%08x\n",
intel_dpio_read(dev_priv, _DPIO_DIV_B));
sbuf_printf(m, "DPIO_REFSFR_A: 0x%08x\n",
intel_dpio_read(dev_priv, _DPIO_REFSFR_A));
sbuf_printf(m, "DPIO_REFSFR_B: 0x%08x\n",
intel_dpio_read(dev_priv, _DPIO_REFSFR_B));
sbuf_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n",
intel_dpio_read(dev_priv, _DPIO_CORE_CLK_A));
sbuf_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n",
intel_dpio_read(dev_priv, _DPIO_CORE_CLK_B));
sbuf_printf(m, "DPIO_LFP_COEFF_A: 0x%08x\n",
intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_A));
sbuf_printf(m, "DPIO_LFP_COEFF_B: 0x%08x\n",
intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_B));
sbuf_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
intel_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE));
sx_xunlock(&dev->mode_config.mutex);
return 0;
}
static int
i915_debug_set_wedged(SYSCTL_HANDLER_ARGS)
{
@ -1520,57 +1565,77 @@ i915_cache_sharing(SYSCTL_HANDLER_ARGS)
return (0);
}
static int
i915_stop_rings(SYSCTL_HANDLER_ARGS)
{
struct drm_device *dev;
drm_i915_private_t *dev_priv;
int error, val;
dev = arg1;
dev_priv = dev->dev_private;
if (dev_priv == NULL)
return (EBUSY);
DRM_LOCK(dev);
val = dev_priv->stop_rings;
DRM_UNLOCK(dev);
error = sysctl_handle_int(oidp, &val, 0, req);
if (error || !req->newptr)
return (error);
DRM_DEBUG("Stopping rings 0x%08x\n", val);
DRM_LOCK(dev);
dev_priv->stop_rings = val;
DRM_UNLOCK(dev);
return (0);
}
static struct i915_info_sysctl_list {
const char *name;
int (*ptr)(struct drm_device *dev, struct sbuf *m, void *data);
int (*ptr_w)(struct drm_device *dev, const char *str, void *data);
int flags;
void *data;
} i915_info_sysctl_list[] = {
{"i915_capabilities", i915_capabilities, 0},
{"i915_gem_objects", i915_gem_object_info, 0},
{"i915_gem_gtt", i915_gem_gtt_info, 0},
{"i915_gem_active", i915_gem_object_list_info, 0, (void *)ACTIVE_LIST},
{"i915_gem_flushing", i915_gem_object_list_info, 0,
{"i915_capabilities", i915_capabilities, NULL, 0},
{"i915_gem_objects", i915_gem_object_info, NULL, 0},
{"i915_gem_gtt", i915_gem_gtt_info, NULL, 0},
{"i915_gem_pinned", i915_gem_gtt_info, NULL, 0, (void *)PINNED_LIST},
{"i915_gem_active", i915_gem_object_list_info, NULL, 0,
(void *)ACTIVE_LIST},
{"i915_gem_flushing", i915_gem_object_list_info, NULL, 0,
(void *)FLUSHING_LIST},
{"i915_gem_inactive", i915_gem_object_list_info, 0,
{"i915_gem_inactive", i915_gem_object_list_info, NULL, 0,
(void *)INACTIVE_LIST},
{"i915_gem_pinned", i915_gem_object_list_info, 0,
(void *)PINNED_LIST},
{"i915_gem_deferred_free", i915_gem_object_list_info, 0,
(void *)DEFERRED_FREE_LIST},
{"i915_gem_pageflip", i915_gem_pageflip_info, 0},
{"i915_gem_request", i915_gem_request_info, 0},
{"i915_gem_seqno", i915_gem_seqno_info, 0},
{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
{"i915_gem_interrupt", i915_interrupt_info, 0},
{"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
{"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
{"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
{"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RCS},
{"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RCS},
{"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)VCS},
{"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS},
{"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS},
{"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS},
{"i915_error_state", i915_error_state, 0},
{"i915_rstdby_delays", i915_rstdby_delays, 0},
{"i915_cur_delayinfo", i915_cur_delayinfo, 0},
{"i915_delayfreq_table", i915_delayfreq_table, 0},
{"i915_inttoext_table", i915_inttoext_table, 0},
{"i915_drpc_info", i915_drpc_info, 0},
{"i915_emon_status", i915_emon_status, 0},
{"i915_ring_freq_table", i915_ring_freq_table, 0},
{"i915_gfxec", i915_gfxec, 0},
{"i915_fbc_status", i915_fbc_status, 0},
{"i915_sr_status", i915_sr_status, 0},
{"i915_gem_pageflip", i915_gem_pageflip_info, NULL, 0},
{"i915_gem_request", i915_gem_request_info, NULL, 0},
{"i915_gem_seqno", i915_gem_seqno_info, NULL, 0},
{"i915_gem_fence_regs", i915_gem_fence_regs_info, NULL, 0},
{"i915_gem_interrupt", i915_interrupt_info, NULL, 0},
{"i915_gem_hws", i915_hws_info, NULL, 0, (void *)RCS},
{"i915_gem_hws_blt", i915_hws_info, NULL, 0, (void *)BCS},
{"i915_gem_hws_bsd", i915_hws_info, NULL, 0, (void *)VCS},
{"i915_error_state", i915_error_state, i915_error_state_w, 0},
{"i915_rstdby_delays", i915_rstdby_delays, NULL, 0},
{"i915_cur_delayinfo", i915_cur_delayinfo, NULL, 0},
{"i915_delayfreq_table", i915_delayfreq_table, NULL, 0},
{"i915_inttoext_table", i915_inttoext_table, NULL, 0},
{"i915_drpc_info", i915_drpc_info, NULL, 0},
{"i915_emon_status", i915_emon_status, NULL, 0},
{"i915_ring_freq_table", i915_ring_freq_table, NULL, 0},
{"i915_gfxec", i915_gfxec, NULL, 0},
{"i915_fbc_status", i915_fbc_status, NULL, 0},
{"i915_sr_status", i915_sr_status, NULL, 0},
#if 0
{"i915_opregion", i915_opregion, 0},
{"i915_opregion", i915_opregion, NULL, 0},
#endif
{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
{"i915_context_status", i915_context_status, 0},
{"i915_gen6_forcewake_count_info", i915_gen6_forcewake_count_info, 0},
{"i915_swizzle_info", i915_swizzle_info, 0},
{"i915_ppgtt_info", i915_ppgtt_info, 0},
{"i915_gem_framebuffer", i915_gem_framebuffer_info, NULL, 0},
{"i915_context_status", i915_context_status, NULL, 0},
{"i915_gen6_forcewake_count_info", i915_gen6_forcewake_count_info,
NULL, 0},
{"i915_swizzle_info", i915_swizzle_info, NULL, 0},
{"i915_ppgtt_info", i915_ppgtt_info, NULL, 0},
{"i915_dpio", i915_dpio_info, NULL, 0},
};
struct i915_info_sysctl_thunk {
@ -1586,6 +1651,7 @@ i915_info_sysctl_handler(SYSCTL_HANDLER_ARGS)
struct i915_info_sysctl_thunk *thunk;
struct drm_device *dev;
drm_i915_private_t *dev_priv;
char *p;
int error;
thunk = arg1;
@ -1602,6 +1668,19 @@ i915_info_sysctl_handler(SYSCTL_HANDLER_ARGS)
if (error == 0)
error = sbuf_finish(&m);
sbuf_delete(&m);
if (error != 0 || req->newptr == NULL)
return (error);
if (req->newlen > 2048)
return (E2BIG);
p = malloc(req->newlen + 1, M_TEMP, M_WAITOK);
error = SYSCTL_IN(req, p, req->newlen);
if (error != 0)
goto out;
p[req->newlen] = '\0';
error = i915_info_sysctl_list[thunk->idx].ptr_w(dev, p,
thunk->arg);
out:
free(p, M_TEMP);
return (error);
}
@ -1632,7 +1711,9 @@ i915_sysctl_init(struct drm_device *dev, struct sysctl_ctx_list *ctx,
return (ENOMEM);
for (i = 0; i < DRM_ARRAY_SIZE(i915_info_sysctl_list); i++) {
oid = SYSCTL_ADD_OID(ctx, SYSCTL_CHILDREN(info), OID_AUTO,
i915_info_sysctl_list[i].name, CTLTYPE_STRING | CTLFLAG_RD,
i915_info_sysctl_list[i].name, CTLTYPE_STRING |
(i915_info_sysctl_list[i].ptr_w != NULL ? CTLFLAG_RW :
CTLFLAG_RD),
&thunks[i], 0, i915_info_sysctl_handler, "A", NULL);
if (oid == NULL)
return (ENOMEM);
@ -1655,6 +1736,11 @@ i915_sysctl_init(struct drm_device *dev, struct sysctl_ctx_list *ctx,
0, i915_cache_sharing, "I", NULL);
if (oid == NULL)
return (ENOMEM);
oid = SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(top), OID_AUTO,
"stop_rings", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, dev,
0, i915_stop_rings, "I", NULL);
if (oid == NULL)
return (ENOMEM);
oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "sync_exec",
CTLFLAG_RW, &i915_gem_sync_exec_requests, 0, NULL);
if (oid == NULL)

File diff suppressed because it is too large Load Diff

View File

@ -301,15 +301,15 @@ typedef struct drm_i915_irq_wait {
/* Ioctl to query kernel params:
*/
#define I915_PARAM_IRQ_ACTIVE 1
#define I915_PARAM_ALLOW_BATCHBUFFER 2
#define I915_PARAM_LAST_DISPATCH 3
#define I915_PARAM_CHIPSET_ID 4
#define I915_PARAM_HAS_GEM 5
#define I915_PARAM_NUM_FENCES_AVAIL 6
#define I915_PARAM_HAS_OVERLAY 7
#define I915_PARAM_IRQ_ACTIVE 1
#define I915_PARAM_ALLOW_BATCHBUFFER 2
#define I915_PARAM_LAST_DISPATCH 3
#define I915_PARAM_CHIPSET_ID 4
#define I915_PARAM_HAS_GEM 5
#define I915_PARAM_NUM_FENCES_AVAIL 6
#define I915_PARAM_HAS_OVERLAY 7
#define I915_PARAM_HAS_PAGEFLIPPING 8
#define I915_PARAM_HAS_EXECBUF2 9
#define I915_PARAM_HAS_EXECBUF2 9
#define I915_PARAM_HAS_BSD 10
#define I915_PARAM_HAS_BLT 11
#define I915_PARAM_HAS_RELAXED_FENCING 12
@ -317,7 +317,8 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_HAS_EXEC_CONSTANTS 14
#define I915_PARAM_HAS_RELAXED_DELTA 15
#define I915_PARAM_HAS_GEN7_SOL_RESET 16
#define I915_PARAM_HAS_LLC 17
#define I915_PARAM_HAS_LLC 17
#define I915_PARAM_HAS_ALIASING_PPGTT 18
typedef struct drm_i915_getparam {
int param;

View File

@ -132,6 +132,7 @@ static const struct intel_device_info intel_ironlake_d_info = {
.gen = 5,
.need_gfx_hws = 1, .has_hotplug = 1,
.has_bsd_ring = 1,
.has_pch_split = 1,
};
static const struct intel_device_info intel_ironlake_m_info = {
@ -139,6 +140,7 @@ static const struct intel_device_info intel_ironlake_m_info = {
.need_gfx_hws = 1, .has_hotplug = 1,
.has_fbc = 0, /* disabled due to buggy hardware */
.has_bsd_ring = 1,
.has_pch_split = 1,
};
static const struct intel_device_info intel_sandybridge_d_info = {
@ -147,6 +149,7 @@ static const struct intel_device_info intel_sandybridge_d_info = {
.has_bsd_ring = 1,
.has_blt_ring = 1,
.has_llc = 1,
.has_pch_split = 1,
};
static const struct intel_device_info intel_sandybridge_m_info = {
@ -156,6 +159,7 @@ static const struct intel_device_info intel_sandybridge_m_info = {
.has_bsd_ring = 1,
.has_blt_ring = 1,
.has_llc = 1,
.has_pch_split = 1,
};
static const struct intel_device_info intel_ivybridge_d_info = {
@ -164,6 +168,7 @@ static const struct intel_device_info intel_ivybridge_d_info = {
.has_bsd_ring = 1,
.has_blt_ring = 1,
.has_llc = 1,
.has_pch_split = 1,
};
static const struct intel_device_info intel_ivybridge_m_info = {
@ -173,6 +178,45 @@ static const struct intel_device_info intel_ivybridge_m_info = {
.has_bsd_ring = 1,
.has_blt_ring = 1,
.has_llc = 1,
.has_pch_split = 1,
};
#if 0
static const struct intel_device_info intel_valleyview_m_info = {
.gen = 7, .is_mobile = 1,
.need_gfx_hws = 1, .has_hotplug = 1,
.has_fbc = 0,
.has_bsd_ring = 1,
.has_blt_ring = 1,
.is_valleyview = 1,
};
static const struct intel_device_info intel_valleyview_d_info = {
.gen = 7,
.need_gfx_hws = 1, .has_hotplug = 1,
.has_fbc = 0,
.has_bsd_ring = 1,
.has_blt_ring = 1,
.is_valleyview = 1,
};
#endif
static const struct intel_device_info intel_haswell_d_info = {
.is_haswell = 1, .gen = 7,
.need_gfx_hws = 1, .has_hotplug = 1,
.has_bsd_ring = 1,
.has_blt_ring = 1,
.has_llc = 1,
.has_pch_split = 1,
};
static const struct intel_device_info intel_haswell_m_info = {
.is_haswell = 1, .gen = 7, .is_mobile = 1,
.need_gfx_hws = 1, .has_hotplug = 1,
.has_bsd_ring = 1,
.has_blt_ring = 1,
.has_llc = 1,
.has_pch_split = 1,
};
#define INTEL_VGA_DEVICE(id, info_) { \
@ -228,6 +272,13 @@ static const struct intel_gfx_device_id {
INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */
INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */
INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */
INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */
INTEL_VGA_DEVICE(0x0c16, &intel_haswell_d_info), /* SDV */
{0, 0}
};
@ -304,14 +355,15 @@ static int i915_drm_thaw(struct drm_device *dev)
/* KMS EnterVT equivalent */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
dev_priv->mm.suspended = 0;
error = i915_gem_init_hw(dev);
if (HAS_PCH_SPLIT(dev))
ironlake_init_pch_refclk(dev);
dev_priv->mm.suspended = 0;
error = i915_gem_init_hw(dev);
DRM_UNLOCK(dev);
intel_modeset_init_hw(dev);
sx_xlock(&dev->mode_config.mutex);
drm_mode_config_reset(dev);
sx_xunlock(&dev->mode_config.mutex);
@ -321,9 +373,6 @@ static int i915_drm_thaw(struct drm_device *dev)
/* Resume the modeset for every activated CRTC */
drm_helper_resume_force_mode(dev);
sx_xunlock(&dev->mode_config.mutex);
if (IS_IRONLAKE_M(dev))
ironlake_enable_rc6(dev);
DRM_LOCK(dev);
}
@ -445,7 +494,11 @@ MODULE_DEPEND(i915kms, iicbb, 1, 1, 1);
int intel_iommu_enabled = 0;
TUNABLE_INT("drm.i915.intel_iommu_enabled", &intel_iommu_enabled);
int intel_iommu_gfx_mapped = 0;
TUNABLE_INT("drm.i915.intel_iommu_gfx_mapped", &intel_iommu_gfx_mapped);
int i915_prefault_disable;
TUNABLE_INT("drm.i915.prefault_disable", &i915_prefault_disable);
int i915_semaphores = -1;
TUNABLE_INT("drm.i915.semaphores", &i915_semaphores);
static int i915_try_reset = 1;
@ -460,10 +513,14 @@ int i915_enable_fbc = 0;
TUNABLE_INT("drm.i915.enable_fbc", &i915_enable_fbc);
int i915_enable_rc6 = 0;
TUNABLE_INT("drm.i915.enable_rc6", &i915_enable_rc6);
int i915_lvds_channel_mode;
TUNABLE_INT("drm.i915.lvds_channel_mode", &i915_lvds_channel_mode);
int i915_panel_use_ssc = -1;
TUNABLE_INT("drm.i915.panel_use_ssc", &i915_panel_use_ssc);
int i915_panel_ignore_lid = 0;
TUNABLE_INT("drm.i915.panel_ignore_lid", &i915_panel_ignore_lid);
int i915_panel_invert_brightness;
TUNABLE_INT("drm.i915.panel_invert_brightness", &i915_panel_invert_brightness);
int i915_modeset = 1;
TUNABLE_INT("drm.i915.modeset", &i915_modeset);
int i915_enable_ppgtt = -1;
@ -476,9 +533,9 @@ TUNABLE_INT("drm.i915.enable_hangcheck", &i915_enable_hangcheck);
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
void
intel_detect_pch(struct drm_device *dev)
void intel_detect_pch(struct drm_device *dev)
{
struct drm_i915_private *dev_priv;
device_t pch;
@ -490,20 +547,44 @@ intel_detect_pch(struct drm_device *dev)
id = pci_get_device(pch) & INTEL_PCH_DEVICE_ID_MASK;
if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_IBX;
dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_CPT;
dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found CougarPoint PCH\n");
} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
/* PantherPoint is CPT compatible */
dev_priv->pch_type = PCH_CPT;
dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found PatherPoint PCH\n");
} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
dev_priv->num_pch_pll = 0;
DRM_DEBUG_KMS("Found LynxPoint PCH\n");
} else
DRM_DEBUG_KMS("No PCH detected\n");
KASSERT(dev_priv->num_pch_pll <= I915_NUM_PLLS,
("num_pch_pll %d\n", dev_priv->num_pch_pll));
} else
DRM_DEBUG_KMS("No Intel PCI-ISA bridge found\n");
}
bool i915_semaphore_is_enabled(struct drm_device *dev)
{
if (INTEL_INFO(dev)->gen < 6)
return 0;
if (i915_semaphores >= 0)
return i915_semaphores;
/* Enable semaphores on SNB when IO remapping is off */
if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
return false;
return 1;
}
void
__gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
{
@ -530,7 +611,7 @@ __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1))
DELAY(10);
I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 1);
I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1));
POSTING_READ(FORCEWAKE_MT);
count = 0;
@ -573,7 +654,7 @@ void
__gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 0);
I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1));
/* The below doubles as a POSTING_READ */
gen6_gt_check_fifodbg(dev_priv);
}
@ -611,6 +692,31 @@ __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
return (ret);
}
void vlv_force_wake_get(struct drm_i915_private *dev_priv)
{
int count;
count = 0;
/* Already awake? */
if ((I915_READ(0x130094) & 0xa1) == 0xa1)
return;
I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffffffff);
POSTING_READ(FORCEWAKE_VLV);
count = 0;
while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0)
DELAY(10);
}
void vlv_force_wake_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffff0000);
/* FIXME: confirm VLV behavior with Punit folks */
POSTING_READ(FORCEWAKE_VLV);
}
static int
i8xx_do_reset(struct drm_device *dev)
{
@ -653,12 +759,13 @@ i965_reset_complete(struct drm_device *dev)
u8 gdrst;
gdrst = pci_read_config(dev->device, I965_GDRST, 1);
return (gdrst & 0x1);
return (gdrst & GRDOM_RESET_ENABLE) == 0;
}
static int
i965_do_reset(struct drm_device *dev)
{
int ret;
u8 gdrst;
/*
@ -670,8 +777,16 @@ i965_do_reset(struct drm_device *dev)
pci_write_config(dev->device, I965_GDRST,
gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE, 1);
return (_intel_wait_for(dev, i965_reset_complete(dev), 500, 1,
"915rst"));
ret = wait_for(i965_reset_complete(dev), 500);
if (ret)
return ret;
/* We can't reset render&media without also resetting display ... */
gdrst = pci_read_config(dev->device, I965_GDRST, 1);
pci_write_config(dev->device, I965_GDRST,
gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE, 1);
return wait_for(i965_reset_complete(dev), 500);
}
static int
@ -679,14 +794,21 @@ ironlake_do_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv;
u32 gdrst;
int ret;
dev_priv = dev->dev_private;
gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
return (_intel_wait_for(dev,
(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1) != 0,
500, 1, "915rst"));
gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
if (ret)
return ret;
/* We can't reset render&media without also resetting display ... */
gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
}
static int
@ -713,7 +835,7 @@ gen6_do_reset(struct drm_device *dev)
/* Spin waiting for the device to ack the reset request */
ret = _intel_wait_for(dev,
(I915_READ_NOTRACE(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0,
500, 1, "915rst");
500, 0, "915rst");
/* If reset with a user forcewake, try to restore, otherwise turn it off */
if (dev_priv->forcewake_count)
@ -728,7 +850,8 @@ gen6_do_reset(struct drm_device *dev)
return (ret);
}
int intel_gpu_reset(struct drm_device *dev)
int
intel_gpu_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = -ENODEV;
@ -763,15 +886,9 @@ int intel_gpu_reset(struct drm_device *dev)
return ret;
}
int
i915_reset(struct drm_device *dev)
int i915_reset(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
/*
* We really should only reset the display subsystem if we actually
* need to
*/
bool need_display = true;
int ret;
if (!i915_try_reset)
@ -780,12 +897,14 @@ i915_reset(struct drm_device *dev)
if (!sx_try_xlock(&dev->dev_struct_lock))
return (-EBUSY);
dev_priv->stop_rings = 0;
i915_gem_reset(dev);
ret = -ENODEV;
if (time_second - dev_priv->last_gpu_reset < 5) {
if (time_second - dev_priv->last_gpu_reset < 5)
DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
} else
else
ret = intel_gpu_reset(dev);
dev_priv->last_gpu_reset = time_second;
@ -797,36 +916,41 @@ i915_reset(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET) ||
!dev_priv->mm.suspended) {
struct intel_ring_buffer *ring;
int i;
dev_priv->mm.suspended = 0;
i915_gem_init_swizzling(dev);
dev_priv->rings[RCS].init(&dev_priv->rings[RCS]);
if (HAS_BSD(dev))
dev_priv->rings[VCS].init(&dev_priv->rings[VCS]);
if (HAS_BLT(dev))
dev_priv->rings[BCS].init(&dev_priv->rings[BCS]);
for_each_ring(ring, dev_priv, i)
ring->init(ring);
i915_gem_context_init(dev);
i915_gem_init_ppgtt(dev);
DRM_UNLOCK(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET))
intel_modeset_init_hw(dev);
DRM_LOCK(dev);
drm_irq_uninstall(dev);
drm_mode_config_reset(dev);
DRM_UNLOCK(dev);
drm_irq_install(dev);
DRM_LOCK(dev);
}
DRM_UNLOCK(dev);
if (need_display) {
sx_xlock(&dev->mode_config.mutex);
drm_helper_resume_force_mode(dev);
sx_xunlock(&dev->mode_config.mutex);
}
} else
DRM_UNLOCK(dev);
return (0);
}
/* We give fast paths for the really cool registers */
#define NEEDS_FORCE_WAKE(dev_priv, reg) \
(((dev_priv)->info->gen >= 6) && \
((reg) < 0x40000) && \
((reg) != FORCEWAKE)) && \
(!IS_VALLEYVIEW((dev_priv)->dev))
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
u##x val = 0; \

View File

@ -66,10 +66,31 @@ enum plane {
};
#define plane_name(p) ((p) + 'A')
#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
enum port {
PORT_A = 0,
PORT_B,
PORT_C,
PORT_D,
PORT_E,
I915_MAX_PORTS
};
#define port_name(p) ((p) + 'A')
#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++)
struct intel_pch_pll {
int refcount; /* count of number of CRTCs sharing this PLL */
int active; /* count of number of active CRTCs (i.e. DPMS on) */
bool on; /* is the PLL actually active? Disabled during modeset */
int pll_reg;
int fp0_reg;
int fp1_reg;
};
#define I915_NUM_PLLS 2
/* Interface history:
*
* 1.1: Original.
@ -115,11 +136,15 @@ struct drm_i915_display_funcs {
void (*update_wm)(struct drm_device *dev);
void (*update_sprite_wm)(struct drm_device *dev, int pipe,
uint32_t sprite_width, int pixel_size);
void (*sanitize_pm)(struct drm_device *dev);
void (*update_linetime_wm)(struct drm_device *dev, int pipe,
struct drm_display_mode *mode);
int (*crtc_mode_set)(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
struct drm_framebuffer *old_fb);
void (*off)(struct drm_crtc *crtc);
void (*write_eld)(struct drm_connector *connector,
struct drm_crtc *crtc);
void (*fdi_link_train)(struct drm_crtc *crtc);
@ -152,6 +177,9 @@ struct intel_device_info {
u8 is_broadwater:1;
u8 is_crestline:1;
u8 is_ivybridge:1;
u8 is_valleyview:1;
u8 has_pch_split:1;
u8 is_haswell:1;
u8 has_fbc:1;
u8 has_pipe_cxsr:1;
u8 has_hotplug:1;
@ -227,7 +255,6 @@ struct intel_opregion {
struct drm_i915_fence_reg {
struct list_head lru_list;
struct drm_i915_gem_object *obj;
uint32_t setup_seqno;
int pin_count;
};
@ -243,10 +270,12 @@ struct sdvo_device_mapping {
enum intel_pch {
PCH_IBX, /* Ibexpeak PCH */
PCH_CPT, /* Cougarpoint PCH */
PCH_LPT, /* Lynxpoint PCH */
};
#define QUIRK_PIPEA_FORCE (1<<0)
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
#define QUIRK_INVERT_BRIGHTNESS (1<<2)
struct intel_fbdev;
struct intel_fbc_work;
@ -254,15 +283,15 @@ struct intel_fbc_work;
typedef struct drm_i915_private {
struct drm_device *dev;
device_t *gmbus_bridge;
device_t *bbbus_bridge;
device_t *gmbus;
device_t *bbbus;
device_t gmbus_bridge[GMBUS_NUM_PORTS + 1];
device_t bbbus_bridge[GMBUS_NUM_PORTS + 1];
device_t gmbus[GMBUS_NUM_PORTS + 1];
device_t bbbus[GMBUS_NUM_PORTS + 1];
/** gmbus_sx protects against concurrent usage of the single hw gmbus
* controller on different i2c buses. */
struct sx gmbus_sx;
uint32_t gpio_mmio_base;
int has_gem;
int relative_constants_mode;
drm_local_map_t *sarea;
@ -286,7 +315,6 @@ typedef struct drm_i915_private {
dma_addr_t dma_status_page;
uint32_t counter;
unsigned int status_gfx_addr;
drm_local_map_t hws_map;
struct drm_gem_object *hws_obj;
struct drm_i915_gem_object *pwrctx;
@ -308,23 +336,23 @@ typedef struct drm_i915_private {
u32 pch_irq_mask;
struct mtx irq_lock;
struct mtx dpio_lock;
u32 hotplug_supported_mask;
int tex_lru_log_granularity;
int allow_batchbuffer;
unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
int vblank_pipe;
int num_pipe;
int num_pch_pll;
/* For hangcheck timer */
#define DRM_I915_HANGCHECK_PERIOD ((1500 /* in ms */ * hz) / 1000)
int hangcheck_count;
uint32_t last_acthd;
uint32_t last_acthd_bsd;
uint32_t last_acthd_blt;
uint32_t last_acthd[I915_NUM_RINGS];
uint32_t last_instdone;
uint32_t last_instdone1;
unsigned int stop_rings;
struct intel_opregion opregion;
@ -346,6 +374,8 @@ typedef struct drm_i915_private {
unsigned int lvds_use_ssc:1;
unsigned int display_clock_mode:1;
int lvds_ssc_freq;
unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
unsigned int lvds_val; /* used for checking LVDS channel mode */
struct {
int rate;
int lanes;
@ -575,23 +605,9 @@ typedef struct drm_i915_private {
*/
struct list_head inactive_list;
/**
* LRU list of objects which are not in the ringbuffer but
* are still pinned in the GTT.
*/
struct list_head pinned_list;
/** LRU list of objects with fence regs on them. */
struct list_head fence_list;
/**
* List of objects currently pending being freed.
*
* These objects are no longer in use, but due to a signal
* we were prevented from freeing them at the appointed time.
*/
struct list_head deferred_free_list;
/**
* We leave the user IRQ off as much as possible,
* but this means that requests will finish and never
@ -658,6 +674,15 @@ typedef struct drm_i915_private {
const struct intel_device_info *info;
/* Old dri1 support infrastructure, beware the dragons ya fools entering
* here! */
struct {
unsigned allow_batchbuffer : 1;
u32 *gfx_hws_cpu_addr;
} dri1;
/* Kernel Modesetting */
struct sdvo_device_mapping sdvo_mappings[2];
/* indicate whether the LVDS_BORDER should be enabled or not */
unsigned int lvds_border_bits;
@ -667,7 +692,8 @@ typedef struct drm_i915_private {
struct drm_crtc *plane_to_crtc_mapping[3];
struct drm_crtc *pipe_to_crtc_mapping[3];
/* wait_queue_head_t pending_flip_queue; XXXKIB */
bool flip_pending_is_done;
struct intel_pch_pll pch_plls[I915_NUM_PLLS];
/* Reclocking support */
bool render_reclock_avail;
@ -711,7 +737,8 @@ typedef struct drm_i915_private {
enum no_fbc_reason no_fbc_reason;
unsigned int stop_rings;
struct drm_mm_node *compressed_fb;
struct drm_mm_node *compressed_llb;
unsigned long cfb_size;
unsigned int cfb_fb;
@ -726,6 +753,7 @@ typedef struct drm_i915_private {
struct task hotplug_task;
int error_completion;
struct mtx error_completion_lock;
/* Protected by dev->error_lock. */
struct drm_i915_error_state *first_error;
struct mtx error_lock;
struct callout hangcheck_timer;
@ -816,7 +844,14 @@ struct drm_i915_gem_object {
* Current tiling mode for the object.
*/
unsigned int tiling_mode:2;
unsigned int tiling_changed:1;
/**
* Whether the tiling parameters for the currently associated fence
* register have changed. Note that for the purposes of tracking
* tiling changes we also treat the unfenced register, the register
* slot that the object occupies whilst it executes a fenced
* command (such as BLT on gen2/3), as a "fence".
*/
unsigned int fence_dirty:1;
/** How many users have pinned this object in GTT space. The following
* users can each hold at most one reference: pwrite/pread, pin_ioctl
@ -843,6 +878,7 @@ struct drm_i915_gem_object {
*/
unsigned int fault_mappable:1;
unsigned int pin_mappable:1;
unsigned int pin_display:1;
/*
* Is the GPU currently using a fence to access this buffer,
@ -856,6 +892,7 @@ struct drm_i915_gem_object {
unsigned int has_global_gtt_mapping:1;
vm_page_t *pages;
int pages_pin_count;
/**
* DMAR support
@ -876,13 +913,12 @@ struct drm_i915_gem_object {
*/
uint32_t gtt_offset;
/** Breadcrumb of last rendering to the buffer. */
uint32_t last_rendering_seqno;
struct intel_ring_buffer *ring;
/** Breadcrumb of last rendering to the buffer. */
uint32_t last_rendering_seqno;
/** Breadcrumb of last fenced GPU access to the buffer. */
uint32_t last_fenced_seqno;
struct intel_ring_buffer *last_fenced_ring;
/** Current tiling stride for the object, if it's tiled. */
uint32_t stride;
@ -890,12 +926,6 @@ struct drm_i915_gem_object {
/** Record of address bit 17 of each page at last unbind. */
unsigned long *bit_17;
/**
* If present, while GEM_DOMAIN_CPU is in the read domain this array
* flags which individual pages are valid.
*/
uint8_t *page_cpu_valid;
/** User space pin count and filp owning the pin */
uint32_t user_pin_count;
struct drm_file *pin_filp;
@ -953,8 +983,11 @@ struct drm_i915_file_private {
};
struct drm_i915_error_state {
u_int ref;
u32 eir;
u32 pgtbl_er;
u32 ier;
bool waiting[I915_NUM_RINGS];
u32 pipestat[I915_MAX_PIPES];
u32 tail[I915_NUM_RINGS];
u32 head[I915_NUM_RINGS];
@ -1037,9 +1070,12 @@ extern struct drm_driver_info i915_driver_info;
extern struct cdev_pager_ops i915_gem_pager_ops;
extern unsigned int i915_fbpercrtc;
extern int i915_panel_ignore_lid;
extern int i915_panel_invert_brightness;
extern unsigned int i915_powersave;
extern int i915_prefault_disable;
extern int i915_semaphores;
extern unsigned int i915_lvds_downclock;
extern int i915_lvds_channel_mode;
extern int i915_panel_use_ssc;
extern int i915_vbt_sdvo_panel_type;
extern int i915_enable_rc6;
@ -1049,8 +1085,8 @@ extern int i915_enable_hangcheck;
const struct intel_device_info *i915_get_device_id(int device);
extern int intel_gpu_reset(struct drm_device *dev);
int i915_reset(struct drm_device *dev);
extern int intel_gpu_reset(struct drm_device *dev);
/* i915_debug.c */
int i915_sysctl_init(struct drm_device *dev, struct sysctl_ctx_list *ctx,
@ -1064,6 +1100,7 @@ int i915_cmdbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_getparam(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_update_dri1_breadcrumb(struct drm_device *dev);
extern void i915_kernel_lost_context(struct drm_device * dev);
extern int i915_driver_load(struct drm_device *, unsigned long flags);
extern int i915_driver_unload(struct drm_device *);
@ -1095,20 +1132,12 @@ bool i915_gpu_turbo_disable(void);
/* i915_irq.c */
extern int i915_irq_emit(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int i915_irq_wait(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern void intel_irq_init(struct drm_device *dev);
extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int i915_vblank_swap(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void intel_enable_asle(struct drm_device *dev);
void i915_hangcheck_elapsed(void *context);
void i915_handle_error(struct drm_device *dev, bool wedged);
void i915_error_state_free(struct drm_i915_error_state *error);
void i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
void i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
@ -1169,13 +1198,15 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
uint32_t i915_get_gem_seqno(struct drm_device *dev);
static inline void
static inline bool
i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
{
if (obj->fence_reg != I915_FENCE_REG_NONE) {
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
dev_priv->fence_regs[obj->fence_reg].pin_count++;
}
return true;
} else
return false;
}
static inline void
@ -1192,36 +1223,38 @@ void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size);
int i915_gem_do_init(struct drm_device *dev, unsigned long start,
unsigned long mappable_end, unsigned long end);
uint32_t i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
uint32_t size, int tiling_mode);
int i915_mutex_lock_interruptible(struct drm_device *dev);
int i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
bool write);
int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
bool write);
int i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment, struct intel_ring_buffer *pipelined);
void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
int i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
int i915_gem_flush_ring(struct intel_ring_buffer *ring,
uint32_t invalidate_domains, uint32_t flush_domains);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
int i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj);
int i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *to);
int i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
int i915_gem_idle(struct drm_device *dev);
int i915_gem_init(struct drm_device *dev);
int i915_gem_init_hw(struct drm_device *dev);
void i915_gem_init_swizzling(struct drm_device *dev);
void i915_gem_init_ppgtt(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
int i915_gpu_idle(struct drm_device *dev, bool do_retire);
int i915_gpu_idle(struct drm_device *dev);
void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring, uint32_t seqno);
int i915_add_request(struct intel_ring_buffer *ring, struct drm_file *file,
struct drm_i915_gem_request *request);
int i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *pipelined);
int i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
void i915_gem_reset(struct drm_device *dev);
int i915_wait_request(struct intel_ring_buffer *ring, uint32_t seqno,
bool do_retire);
int i915_wait_request(struct intel_ring_buffer *ring, uint32_t seqno);
int i915_gem_mmap(struct drm_device *dev, uint64_t offset, int prot);
int i915_gem_fault(struct drm_device *dev, uint64_t offset, int prot,
uint64_t *phys);
@ -1257,12 +1290,17 @@ int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
void i915_gem_object_do_bit_17_swizzle_page(struct drm_i915_gem_object *obj,
struct vm_page *m);
/* i915_gem_evict.c */
int i915_gem_evict_something(struct drm_device *dev, int min_size,
unsigned alignment, bool mappable);
int i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only);
int i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only);
/* i915_gem_stolen.c */
int i915_gem_init_stolen(struct drm_device *dev);
void i915_gem_cleanup_stolen(struct drm_device *dev);
/* i915_suspend.c */
extern int i915_save_state(struct drm_device *dev);
@ -1274,6 +1312,12 @@ extern void intel_teardown_gmbus(struct drm_device *dev);
extern void intel_gmbus_set_speed(device_t idev, int speed);
extern void intel_gmbus_force_bit(device_t idev, bool force_bit);
extern void intel_iic_reset(struct drm_device *dev);
static inline bool intel_gmbus_is_port_valid(unsigned port)
{
return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD);
}
extern device_t intel_gmbus_get_adapter(struct drm_i915_private *dev_priv,
unsigned port);
/* intel_opregion.c */
int intel_opregion_setup(struct drm_device *dev);
@ -1292,12 +1336,16 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj);
void i915_gem_restore_gtt_mappings(struct drm_device *dev);
int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level);
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level);
void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
int i915_gem_init_global_gtt(struct drm_device *dev, unsigned long start,
unsigned long mappable_end, unsigned long end);
/* modesetting */
extern void intel_modeset_init_hw(struct drm_device *dev);
extern void intel_modeset_init(struct drm_device *dev);
extern void intel_modeset_gem_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
@ -1310,12 +1358,19 @@ extern void ironlake_enable_rc6(struct drm_device *dev);
extern void gen6_set_rps(struct drm_device *dev, u8 val);
extern void intel_detect_pch(struct drm_device *dev);
extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
/* IPS */
extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
extern void intel_gpu_ips_teardown(void);
extern bool i915_semaphore_is_enabled(struct drm_device *dev);
extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv);
extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
extern void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv);
extern void vlv_force_wake_get(struct drm_i915_private *dev_priv);
extern void vlv_force_wake_put(struct drm_i915_private *dev_priv);
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(
struct drm_device *dev);
extern void intel_overlay_print_error_state(struct sbuf *m,
@ -1340,12 +1395,6 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
/* We give fast paths for the really cool registers */
#define NEEDS_FORCE_WAKE(dev_priv, reg) \
(((dev_priv)->info->gen >= 6) && \
((reg) < 0x40000) && \
((reg) != FORCEWAKE))
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
@ -1385,22 +1434,6 @@ __i915_write(64, 64)
#define I915_VERBOSE 0
#define LP_RING(d) (&((struct drm_i915_private *)(d))->rings[RCS])
#define BEGIN_LP_RING(n) \
intel_ring_begin(LP_RING(dev_priv), (n))
#define OUT_RING(x) \
intel_ring_emit(LP_RING(dev_priv), x)
#define ADVANCE_LP_RING() \
intel_ring_advance(LP_RING(dev_priv))
#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
if (LP_RING(dev->dev_private)->obj == NULL) \
LOCK_TEST_WITH_RETURN(dev, file); \
} while (0)
/**
* Reads a dword out of the status page, which is written to from the command
* queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
@ -1416,10 +1449,7 @@ __i915_write(64, 64)
*
* The area from dword 0x20 to 0x3ff is available for driver usage.
*/
#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg])
#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
#define I915_GEM_HWS_INDEX 0x20
#define I915_BREADCRUMB_INDEX 0x21
#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
@ -1442,6 +1472,8 @@ __i915_write(64, 64)
#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
/* XXXKIB LEGACY */
@ -1503,10 +1535,11 @@ __i915_write(64, 64)
#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev))
#define HAS_PCH_SPLIT(dev) (INTEL_INFO(dev)->has_pch_split)
#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
@ -1519,6 +1552,23 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
return ((int32_t)(seq1 - seq2) >= 0);
}
static inline void i915_gem_chipset_flush(struct drm_device *dev)
{
if (INTEL_INFO(dev)->gen < 6)
intel_gtt_chipset_flush();
}
static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{
/* KASSERT(obj->pages != NULL, ("pin and NULL pages")); */
obj->pages_pin_count++;
}
static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
{
KASSERT(obj->pages_pin_count != 0, ("zero pages_pin_count"));
obj->pages_pin_count--;
}
u32 i915_gem_next_request_seqno(struct intel_ring_buffer *ring);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -405,7 +405,7 @@ static int do_switch(struct i915_hw_context *to)
}
if (!to->obj->has_global_gtt_mapping)
i915_gem_gtt_bind_object(to->obj);
i915_gem_gtt_bind_object(to->obj, to->obj->cache_level);
if (!to->is_initialized || is_default_context(to))
hw_flags |= MI_RESTORE_INHIBIT;

View File

@ -37,6 +37,9 @@ __FBSDID("$FreeBSD$");
static bool
mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
{
if (obj->pin_count)
return false;
list_add(&obj->exec_list, unwind);
return drm_mm_scan_add_block(obj->gtt_space);
}
@ -93,7 +96,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
/* Now merge in the soon-to-be-expired objects... */
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
/* Does the object require an outstanding flush? */
if (obj->base.write_domain || obj->pin_count)
if (obj->base.write_domain)
continue;
if (mark_free(obj, &unwind_list))
@ -102,14 +105,11 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
/* Finally add anything with a pending flush (in order of retirement) */
list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
if (obj->pin_count)
continue;
if (mark_free(obj, &unwind_list))
goto found;
}
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
if (!obj->base.write_domain || obj->pin_count)
if (!obj->base.write_domain)
continue;
if (mark_free(obj, &unwind_list))
@ -169,8 +169,9 @@ int
i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
struct drm_i915_gem_object *obj, *next;
bool lists_empty;
int ret;
lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
list_empty(&dev_priv->mm.flushing_list) &&
@ -180,32 +181,25 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
CTR2(KTR_DRM, "evict_everything %p %d", dev, purgeable_only);
/* Flush everything (on to the inactive lists) and evict */
ret = i915_gpu_idle(dev, true);
/* The gpu_idle will flush everything in the write domain to the
* active list. Then we must move everything off the active list
* with retire requests.
*/
ret = i915_gpu_idle(dev);
if (ret)
return ret;
i915_gem_retire_requests(dev);
KASSERT(list_empty(&dev_priv->mm.flushing_list),
("flush list not empty"));
return i915_gem_evict_inactive(dev, purgeable_only);
}
/** Unbinds all inactive objects. */
int
i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj, *next;
CTR2(KTR_DRM, "evict_inactive %p %d", dev, purgeable_only);
/* Having flushed everything, unbind() should never raise an error */
list_for_each_entry_safe(obj, next,
&dev_priv->mm.inactive_list, mm_list) {
if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) {
int ret = i915_gem_object_unbind(obj);
if (ret)
return ret;
if (obj->pin_count == 0)
i915_gem_object_unbind(obj);
}
}

View File

@ -263,6 +263,12 @@ eb_destroy(struct eb_objects *eb)
free(eb, DRM_I915_GEM);
}
static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
{
return (obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
obj->cache_level != I915_CACHE_NONE);
}
static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
struct eb_objects *eb,
@ -270,6 +276,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
{
struct drm_device *dev = obj->base.dev;
struct drm_gem_object *target_obj;
struct drm_i915_gem_object *target_i915_obj;
uint32_t target_offset;
int ret = -EINVAL;
@ -278,7 +285,8 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
if (unlikely(target_obj == NULL))
return -ENOENT;
target_offset = to_intel_bo(target_obj)->gtt_offset;
target_i915_obj = to_intel_bo(target_obj);
target_offset = target_i915_obj->gtt_offset;
#if WATCH_RELOC
DRM_INFO("%s: obj %p offset %08x target %d "
@ -364,12 +372,20 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
return ret;
}
/* We can't wait for rendering with pagefaults disabled */
if (obj->active && (curthread->td_pflags & TDP_NOFAULTING) != 0)
return (-EFAULT);
reloc->delta += target_offset;
if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
if (use_cpu_reloc(obj)) {
uint32_t page_offset = reloc->offset & PAGE_MASK;
char *vaddr;
struct sf_buf *sf;
ret = i915_gem_object_set_to_cpu_domain(obj, 1);
if (ret)
return ret;
sf = sf_buf_alloc(obj->pages[OFF_TO_IDX(reloc->offset)],
SFB_NOWAIT);
if (sf == NULL)
@ -381,10 +397,11 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
uint32_t *reloc_entry;
char *reloc_page;
/* We can't wait for rendering with pagefaults disabled */
if (obj->active && (curthread->td_pflags & TDP_NOFAULTING) != 0)
return (-EFAULT);
ret = i915_gem_object_set_to_gtt_domain(obj, 1);
ret = i915_gem_object_set_to_gtt_domain(obj, true);
if (ret)
return ret;
ret = i915_gem_object_put_fence(obj);
if (ret)
return ret;
@ -401,6 +418,16 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
pmap_unmapdev((vm_offset_t)reloc_page, PAGE_SIZE);
}
/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
* pipe_control writes because the gpu doesn't properly redirect them
* through the ppgtt for non_secure batchbuffers. */
if (unlikely(IS_GEN6(dev) &&
reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
!target_i915_obj->has_global_gtt_mapping)) {
i915_gem_gtt_bind_object(target_i915_obj,
target_i915_obj->cache_level);
}
/* and update the user's relocation entry */
reloc->presumed_offset = target_offset;
@ -411,28 +438,44 @@ static int
i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
struct eb_objects *eb)
{
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
struct drm_i915_gem_relocation_entry *user_relocs;
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
struct drm_i915_gem_relocation_entry reloc;
int i, ret;
int remain, ret;
user_relocs = (void *)(uintptr_t)entry->relocs_ptr;
for (i = 0; i < entry->relocation_count; i++) {
ret = -copyin_nofault(user_relocs + i, &reloc, sizeof(reloc));
remain = entry->relocation_count;
while (remain) {
struct drm_i915_gem_relocation_entry *r = stack_reloc;
int count = remain;
if (count > DRM_ARRAY_SIZE(stack_reloc))
count = DRM_ARRAY_SIZE(stack_reloc);
remain -= count;
ret = -copyin_nofault(user_relocs, r, count*sizeof(r[0]));
if (ret != 0)
return (ret);
ret = i915_gem_execbuffer_relocate_entry(obj, eb, &reloc);
if (ret != 0)
return (ret);
do {
u64 offset = r->presumed_offset;
ret = i915_gem_execbuffer_relocate_entry(obj, eb, r);
if (ret)
return ret;
ret = -copyout_nofault(&reloc.presumed_offset,
&user_relocs[i].presumed_offset,
sizeof(reloc.presumed_offset));
if (ret != 0)
return (ret);
if (r->presumed_offset != offset &&
copyout_nofault(&r->presumed_offset,
&user_relocs->presumed_offset,
sizeof(r->presumed_offset))) {
return -EFAULT;
}
user_relocs++;
r++;
} while (--count);
}
#undef N_RELOC
return (0);
}
@ -486,6 +529,13 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
#define __EXEC_OBJECT_HAS_FENCE (1<<31)
static int
need_reloc_mappable(struct drm_i915_gem_object *obj)
{
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
return entry->relocation_count && !use_cpu_reloc(obj);
}
static int
pin_and_fence_object(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring)
@ -499,8 +549,7 @@ pin_and_fence_object(struct drm_i915_gem_object *obj,
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
need_mappable =
entry->relocation_count ? true : need_fence;
need_mappable = need_fence || need_reloc_mappable(obj);
ret = i915_gem_object_pin(obj, entry->alignment, need_mappable);
if (ret)
@ -508,18 +557,13 @@ pin_and_fence_object(struct drm_i915_gem_object *obj,
if (has_fenced_gpu_access) {
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
if (obj->tiling_mode) {
ret = i915_gem_object_get_fence(obj, ring);
if (ret)
goto err_unpin;
ret = i915_gem_object_get_fence(obj);
if (ret)
goto err_unpin;
if (i915_gem_object_pin_fence(obj))
entry->flags |= __EXEC_OBJECT_HAS_FENCE;
i915_gem_object_pin_fence(obj);
} else {
ret = i915_gem_object_put_fence(obj);
if (ret)
goto err_unpin;
}
obj->pending_fenced_gpu_access = true;
}
}
@ -558,8 +602,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
need_mappable =
entry->relocation_count ? true : need_fence;
need_mappable = need_fence || need_reloc_mappable(obj);
if (need_mappable)
list_move(&obj->exec_list, &ordered_objects);
@ -600,8 +643,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
need_mappable =
entry->relocation_count ? true : need_fence;
need_mappable = need_fence || need_reloc_mappable(obj);
if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
(need_mappable && !obj->map_and_fenceable))
@ -815,62 +857,6 @@ i915_gem_execbuffer_flush(struct drm_device *dev,
return 0;
}
static bool
intel_enable_semaphores(struct drm_device *dev)
{
if (INTEL_INFO(dev)->gen < 6)
return 0;
if (i915_semaphores >= 0)
return i915_semaphores;
/* Enable semaphores on SNB when IO remapping is off */
if (INTEL_INFO(dev)->gen == 6)
return !intel_iommu_enabled;
return 1;
}
static int
i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *to)
{
struct intel_ring_buffer *from = obj->ring;
u32 seqno;
int ret, idx;
if (from == NULL || to == from)
return 0;
/* XXX gpu semaphores are implicated in various hard hangs on SNB */
if (!intel_enable_semaphores(obj->base.dev))
return i915_gem_object_wait_rendering(obj);
idx = intel_ring_sync_index(from, to);
seqno = obj->last_rendering_seqno;
if (seqno <= from->sync_seqno[idx])
return 0;
if (seqno == from->outstanding_lazy_request) {
struct drm_i915_gem_request *request;
request = malloc(sizeof(*request), DRM_I915_GEM,
M_WAITOK | M_ZERO);
ret = i915_add_request(from, NULL, request);
if (ret) {
free(request, DRM_I915_GEM);
return ret;
}
seqno = request->seqno;
}
from->sync_seqno[idx] = seqno;
return to->sync_to(to, from, seqno - 1);
}
static int
i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
{
@ -937,7 +923,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
}
list_for_each_entry(obj, objects, exec_list) {
ret = i915_gem_execbuffer_sync_rings(obj, ring);
ret = i915_gem_object_sync(obj, ring);
if (ret)
return ret;
}
@ -1015,11 +1001,14 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
obj->pending_gpu_write = true;
list_move_tail(&obj->gpu_write_list,
&ring->gpu_write_list);
intel_mark_busy(ring->dev, obj);
if (obj->pin_count) /* check for potential scanout */
intel_mark_busy(ring->dev, obj);
}
CTR3(KTR_DRM, "object_change_domain move_to_active %p %x %x",
obj, old_read, old_write);
}
intel_mark_busy(ring->dev, NULL);
}
int i915_gem_sync_exec_requests;
@ -1051,8 +1040,10 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
if (request == NULL || i915_add_request(ring, file, request)) {
i915_gem_next_request_seqno(ring);
free(request, DRM_I915_GEM);
} else if (i915_gem_sync_exec_requests)
i915_wait_request(ring, request->seqno, true);
} else if (i915_gem_sync_exec_requests) {
i915_wait_request(ring, request->seqno);
i915_gem_retire_requests(dev);
}
}
static void
@ -1154,10 +1145,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
ring = &dev_priv->rings[RCS];
break;
case I915_EXEC_BSD:
if (!HAS_BSD(dev)) {
DRM_DEBUG("execbuf with invalid ring (BSD)\n");
return -EINVAL;
}
ring = &dev_priv->rings[VCS];
if (ctx_id != 0) {
DRM_DEBUG("Ring %s doesn't support contexts\n",
@ -1166,10 +1153,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
break;
case I915_EXEC_BLT:
if (!HAS_BLT(dev)) {
DRM_DEBUG("execbuf with invalid ring (BLT)\n");
return -EINVAL;
}
ring = &dev_priv->rings[BCS];
if (ctx_id != 0) {
DRM_DEBUG("Ring %s doesn't support contexts\n",
@ -1183,6 +1166,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
ret = -EINVAL;
goto pre_struct_lock_err;
}
if (!intel_ring_initialized(ring)) {
DRM_DEBUG("execbuf with invalid ring: %d\n",
(int)(args->flags & I915_EXEC_RING_MASK));
return -EINVAL;
}
mode = args->flags & I915_EXEC_CONSTANTS_MASK;
mask = I915_EXEC_CONSTANTS_MASK;
@ -1227,6 +1215,12 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto pre_struct_lock_err;
}
if (INTEL_INFO(dev)->gen >= 5) {
DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
ret = -EINVAL;
goto pre_struct_lock_err;
}
if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
DRM_DEBUG("execbuf with %u cliprects\n",
args->num_cliprects);
@ -1328,9 +1322,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
* so every billion or so execbuffers, we need to stall
* the GPU in order to reset the counters.
*/
ret = i915_gpu_idle(dev, true);
ret = i915_gpu_idle(dev);
if (ret)
goto err;
i915_gem_retire_requests(dev);
KASSERT(ring->sync_seqno[i] == 0, ("Non-zero sync_seqno"));
}

View File

@ -245,7 +245,7 @@ do_idling(struct drm_i915_private *dev_priv)
if (dev_priv->mm.gtt.do_idle_maps) {
dev_priv->mm.interruptible = false;
if (i915_gpu_idle(dev_priv->dev, false)) {
if (i915_gpu_idle(dev_priv->dev)) {
DRM_ERROR("Couldn't idle GPU\n");
/* Wait a bit, in hopes it avoids the hang */
DELAY(10);
@ -277,28 +277,21 @@ i915_gem_restore_gtt_mappings(struct drm_device *dev)
list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
i915_gem_clflush_object(obj);
i915_gem_gtt_rebind_object(obj, obj->cache_level);
i915_gem_gtt_bind_object(obj, obj->cache_level);
}
intel_gtt_chipset_flush();
}
int
i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
{
unsigned int agp_type;
agp_type = cache_level_to_agp_type(obj->base.dev, obj->cache_level);
intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT, obj->pages, agp_type);
obj->has_global_gtt_mapping = 1;
return (0);
}
void
i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
struct drm_device *dev;
@ -312,11 +305,21 @@ i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT, obj->pages, agp_type);
obj->has_global_gtt_mapping = 0;
obj->has_global_gtt_mapping = 1;
}
void
i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
{
intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT);
obj->has_global_gtt_mapping = 0;
}
void
i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@ -327,8 +330,35 @@ i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
interruptible = do_idling(dev_priv);
intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT);
undo_idling(dev_priv, interruptible);
}
int
i915_gem_init_global_gtt(struct drm_device *dev, unsigned long start,
unsigned long mappable_end, unsigned long end)
{
drm_i915_private_t *dev_priv;
unsigned long mappable;
int error;
dev_priv = dev->dev_private;
mappable = min(end, mappable_end) - start;
/* Substract the guard page ... */
drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
dev_priv->mm.gtt_start = start;
dev_priv->mm.gtt_mappable_end = mappable_end;
dev_priv->mm.gtt_end = end;
dev_priv->mm.gtt_total = end - start;
dev_priv->mm.mappable_gtt_total = mappable;
/* ... but ensure that we clear the entire range. */
intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
device_printf(dev->device,
"taking over the fictitious range 0x%lx-0x%lx\n",
dev->agp->base + start, dev->agp->base + start + mappable);
error = -vm_phys_fictitious_reg_range(dev->agp->base + start,
dev->agp->base + start + mappable, VM_MEMATTR_WRITE_COMBINING);
return (error);
}

View File

@ -0,0 +1,205 @@
/*
* Copyright © 2008-2012 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
* Chris Wilson <chris@chris-wilson.co.uk>
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <dev/drm2/drmP.h>
#include <dev/drm2/drm.h>
#include <dev/drm2/i915/i915_drm.h>
#include <dev/drm2/i915/i915_drv.h>
/*
* The BIOS typically reserves some of the system's memory for the exclusive
* use of the integrated graphics. This memory is no longer available for
* use by the OS and so the user finds that his system has less memory
* available than he put in. We refer to this memory as stolen.
*
* The BIOS will allocate its framebuffer from the stolen memory. Our
* goal is try to reuse that object for our own fbcon which must always
* be available for panics. Anything else we can reuse the stolen memory
* for is a boon.
*/
#define PTE_ADDRESS_MASK 0xfffff000
#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
#define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
#define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */
#define PTE_MAPPING_TYPE_CACHED (3 << 1)
#define PTE_MAPPING_TYPE_MASK (3 << 1)
#define PTE_VALID (1 << 0)
/**
* i915_stolen_to_phys - take an offset into stolen memory and turn it into
* a physical one
* @dev: drm device
* @offset: address to translate
*
* Some chip functions require allocations from stolen space and need the
* physical address of the memory in question.
*/
static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset)
{
struct drm_i915_private *dev_priv = dev->dev_private;
device_t pdev = dev_priv->bridge_dev;
u32 base;
#if 0
/* On the machines I have tested the Graphics Base of Stolen Memory
* is unreliable, so compute the base by subtracting the stolen memory
* from the Top of Low Usable DRAM which is where the BIOS places
* the graphics stolen memory.
*/
if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
/* top 32bits are reserved = 0 */
pci_read_config_dword(pdev, 0xA4, &base);
} else {
/* XXX presume 8xx is the same as i915 */
pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base);
}
#else
if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
u16 val;
val = pci_read_config(pdev, 0xb0, 2);
base = val >> 4 << 20;
} else {
u8 val;
val = pci_read_config(pdev, 0x9c, 1);
base = val >> 3 << 27;
}
base -= dev_priv->mm.gtt.stolen_size;
#endif
return base + offset;
}
static void i915_warn_stolen(struct drm_device *dev)
{
DRM_INFO("not enough stolen space for compressed buffer, disabling\n");
DRM_INFO("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
}
static void i915_setup_compression(struct drm_device *dev, int size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mm_node *compressed_fb, *compressed_llb;
unsigned long cfb_base;
unsigned long ll_base = 0;
/* Just in case the BIOS is doing something questionable. */
intel_disable_fbc(dev);
compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
if (compressed_fb)
compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
if (!compressed_fb)
goto err;
cfb_base = i915_stolen_to_phys(dev, compressed_fb->start);
if (!cfb_base)
goto err_fb;
if (!(IS_GM45(dev) || HAS_PCH_SPLIT(dev))) {
compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen,
4096, 4096, 0);
if (compressed_llb)
compressed_llb = drm_mm_get_block(compressed_llb,
4096, 4096);
if (!compressed_llb)
goto err_fb;
ll_base = i915_stolen_to_phys(dev, compressed_llb->start);
if (!ll_base)
goto err_llb;
}
dev_priv->cfb_size = size;
dev_priv->compressed_fb = compressed_fb;
if (HAS_PCH_SPLIT(dev))
I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
else if (IS_GM45(dev)) {
I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
} else {
I915_WRITE(FBC_CFB_BASE, cfb_base);
I915_WRITE(FBC_LL_BASE, ll_base);
dev_priv->compressed_llb = compressed_llb;
}
DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n",
cfb_base, ll_base, size >> 20);
return;
err_llb:
drm_mm_put_block(compressed_llb);
err_fb:
drm_mm_put_block(compressed_fb);
err:
dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
i915_warn_stolen(dev);
}
static void i915_cleanup_compression(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
drm_mm_put_block(dev_priv->compressed_fb);
if (dev_priv->compressed_llb)
drm_mm_put_block(dev_priv->compressed_llb);
}
void i915_gem_cleanup_stolen(struct drm_device *dev)
{
if (I915_HAS_FBC(dev) && i915_powersave)
i915_cleanup_compression(dev);
}
int i915_gem_init_stolen(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long prealloc_size = dev_priv->mm.gtt.stolen_size;
/* Basic memrange allocator for stolen space */
drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
/* Try to set up FBC with a reasonable compressed buffer size */
if (I915_HAS_FBC(dev) && i915_powersave) {
int cfb_size;
/* Leave 1M for line length buffer & misc. */
/* Try to get a 32M buffer... */
if (prealloc_size > (36*1024*1024))
cfb_size = 32*1024*1024;
else /* fall back to 7/8 of the stolen space */
cfb_size = prealloc_size * 7 / 8;
i915_setup_compression(dev, cfb_size);
}
return 0;
}

View File

@ -357,11 +357,18 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
/* We need to rebind the object if its current allocation
* no longer meets the alignment restrictions for its new
* tiling mode. Otherwise we can just leave it alone, but
* need to ensure that any fence register is cleared.
* need to ensure that any fence register is updated before
* the next fenced (either through the GTT or by the BLT unit
* on older GPUs) access.
*
* After updating the tiling parameters, we then flag whether
* we need to update an associated fence register. Note this
* has to also include the unfenced register the GPU uses
* whilst executing a fenced command for an untiled object.
*/
i915_gem_release_mmap(obj);
obj->map_and_fenceable = obj->gtt_space == NULL ||
obj->map_and_fenceable =
obj->gtt_space == NULL ||
(obj->gtt_offset + obj->base.size <=
dev_priv->mm.gtt_mappable_end &&
i915_gem_object_fence_ok(obj, args->tiling_mode));
@ -374,12 +381,20 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
if (obj->gtt_offset & (unfenced_alignment - 1))
ret = i915_gem_object_unbind(obj);
}
if (ret == 0) {
obj->tiling_changed = true;
obj->fence_dirty =
obj->fenced_gpu_access ||
obj->fence_reg != I915_FENCE_REG_NONE;
obj->tiling_mode = args->tiling_mode;
obj->stride = args->stride;
/* Force the fence to be reacquired for GTT access */
i915_gem_release_mmap(obj);
}
}
}
/* we have to maintain this existing ABI... */
args->stride = obj->stride;
args->tiling_mode = obj->tiling_mode;
@ -455,6 +470,22 @@ i915_gem_swizzle_page(vm_page_t m)
sf_buf_free(sf);
}
void
i915_gem_object_do_bit_17_swizzle_page(struct drm_i915_gem_object *obj,
vm_page_t m)
{
char new_bit_17;
if (obj->bit_17 == NULL)
return;
new_bit_17 = VM_PAGE_TO_PHYS(m) >> 17;
if ((new_bit_17 & 0x1) != (test_bit(m->pindex, obj->bit_17) != 0)) {
i915_gem_swizzle_page(m);
vm_page_dirty(m);
}
}
void
i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
{

File diff suppressed because it is too large Load Diff

View File

@ -30,6 +30,11 @@ __FBSDID("$FreeBSD$");
#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
#define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a))
#define _MASKED_BIT_DISABLE(a) ((a) << 16)
/*
* The Bridge device's PCI config space has information about the
* fb aperture size and the amount of pre-reserved memory.
@ -129,6 +134,13 @@ __FBSDID("$FreeBSD$");
#define ECOCHK_PPGTT_CACHE64B (0x3<<3)
#define ECOCHK_PPGTT_CACHE4B (0x0<<3)
#define GAC_ECO_BITS 0x14090
#define ECOBITS_PPGTT_CACHE64B (3<<8)
#define ECOBITS_PPGTT_CACHE4B (0<<8)
#define GAB_CTL 0x24000
#define GAB_CTL_CONT_AFTER_PAGEFAULT (1<<8)
/* VGA stuff */
#define VGA_ST01_MDA 0x3ba
@ -230,6 +242,7 @@ __FBSDID("$FreeBSD$");
#define MI_BATCH_NON_SECURE (1)
#define MI_BATCH_NON_SECURE_I965 (1<<8)
#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */
#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */
#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
#define MI_SEMAPHORE_UPDATE (1<<21)
@ -309,6 +322,61 @@ __FBSDID("$FreeBSD$");
#define DEBUG_RESET_RENDER (1<<8)
#define DEBUG_RESET_DISPLAY (1<<9)
/*
* DPIO - a special bus for various display related registers to hide behind:
* 0x800c: m1, m2, n, p1, p2, k dividers
* 0x8014: REF and SFR select
* 0x8014: N divider, VCO select
* 0x801c/3c: core clock bits
* 0x8048/68: low pass filter coefficients
* 0x8100: fast clock controls
*/
#define DPIO_PKT 0x2100
#define DPIO_RID (0<<24)
#define DPIO_OP_WRITE (1<<16)
#define DPIO_OP_READ (0<<16)
#define DPIO_PORTID (0x12<<8)
#define DPIO_BYTE (0xf<<4)
#define DPIO_BUSY (1<<0) /* status only */
#define DPIO_DATA 0x2104
#define DPIO_REG 0x2108
#define DPIO_CTL 0x2110
#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */
#define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */
#define DPIO_SFR_BYPASS (1<<1)
#define DPIO_RESET (1<<0)
#define _DPIO_DIV_A 0x800c
#define DPIO_POST_DIV_SHIFT (28) /* 3 bits */
#define DPIO_K_SHIFT (24) /* 4 bits */
#define DPIO_P1_SHIFT (21) /* 3 bits */
#define DPIO_P2_SHIFT (16) /* 5 bits */
#define DPIO_N_SHIFT (12) /* 4 bits */
#define DPIO_ENABLE_CALIBRATION (1<<11)
#define DPIO_M1DIV_SHIFT (8) /* 3 bits */
#define DPIO_M2DIV_MASK 0xff
#define _DPIO_DIV_B 0x802c
#define DPIO_DIV(pipe) _PIPE(pipe, _DPIO_DIV_A, _DPIO_DIV_B)
#define _DPIO_REFSFR_A 0x8014
#define DPIO_REFSEL_OVERRIDE 27
#define DPIO_PLL_MODESEL_SHIFT 24 /* 3 bits */
#define DPIO_BIAS_CURRENT_CTL_SHIFT 21 /* 3 bits, always 0x7 */
#define DPIO_PLL_REFCLK_SEL_SHIFT 16 /* 2 bits */
#define DPIO_DRIVER_CTL_SHIFT 12 /* always set to 0x8 */
#define DPIO_CLK_BIAS_CTL_SHIFT 8 /* always set to 0x5 */
#define _DPIO_REFSFR_B 0x8034
#define DPIO_REFSFR(pipe) _PIPE(pipe, _DPIO_REFSFR_A, _DPIO_REFSFR_B)
#define _DPIO_CORE_CLK_A 0x801c
#define _DPIO_CORE_CLK_B 0x803c
#define DPIO_CORE_CLK(pipe) _PIPE(pipe, _DPIO_CORE_CLK_A, _DPIO_CORE_CLK_B)
#define _DPIO_LFP_COEFF_A 0x8048
#define _DPIO_LFP_COEFF_B 0x8068
#define DPIO_LFP_COEFF(pipe) _PIPE(pipe, _DPIO_LFP_COEFF_A, _DPIO_LFP_COEFF_B)
#define DPIO_FASTCLK_DISABLE 0x8100
/*
* Fence registers
@ -368,8 +436,6 @@ __FBSDID("$FreeBSD$");
#define ARB_MODE 0x04030
#define ARB_MODE_SWIZZLE_SNB (1<<4)
#define ARB_MODE_SWIZZLE_IVB (1<<5)
#define ARB_MODE_ENABLE(x) GFX_MODE_ENABLE(x)
#define ARB_MODE_DISABLE(x) GFX_MODE_DISABLE(x)
#define RENDER_HWS_PGA_GEN7 (0x04080)
#define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id)
#define DONE_REG 0x40b0
@ -425,6 +491,7 @@ __FBSDID("$FreeBSD$");
#define INSTDONE 0x02090
#define NOPID 0x02094
#define HWSTAM 0x02098
#define DMA_FADD_I8XX 0x020d0
#define ERROR_GEN6 0x040a0
@ -440,6 +507,7 @@ __FBSDID("$FreeBSD$");
*/
# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
#define _3D_CHICKEN3 0x02090
#define _3D_CHICKEN_SF_DISABLE_FASTCLIP_CULL (1 << 5)
#define MI_MODE 0x0209c
# define VS_TIMER_DISPATCH (1 << 6)
@ -455,14 +523,16 @@ __FBSDID("$FreeBSD$");
#define GFX_PSMI_GRANULARITY (1<<10)
#define GFX_PPGTT_ENABLE (1<<9)
#define GFX_MODE_ENABLE(bit) (((bit) << 16) | (bit))
#define GFX_MODE_DISABLE(bit) (((bit) << 16) | (0))
#define SCPD0 0x0209c /* 915+ only */
#define IER 0x020a0
#define IIR 0x020a4
#define IMR 0x020a8
#define ISR 0x020ac
#define VLV_IIR_RW 0x182084
#define VLV_IER 0x1820a0
#define VLV_IIR 0x1820a4
#define VLV_IMR 0x1820a8
#define VLV_ISR 0x1820ac
#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
@ -508,7 +578,6 @@ __FBSDID("$FreeBSD$");
#define LM_BURST_LENGTH 0x00000700
#define LM_FIFO_WATERMARK 0x0000001F
#define MI_ARB_STATE 0x020e4 /* 915+ only */
#define MI_ARB_MASK_SHIFT 16 /* shift for enable bits */
/* Make render/texture TLB fetches lower priorty than associated data
* fetches. This is not turned on by default
@ -573,7 +642,6 @@ __FBSDID("$FreeBSD$");
#define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */
#define CACHE_MODE_0 0x02120 /* 915+ only */
#define CM0_MASK_SHIFT 16
#define CM0_IZ_OPT_DISABLE (1<<6)
#define CM0_ZR_OPT_DISABLE (1<<5)
#define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5)
@ -587,7 +655,12 @@ __FBSDID("$FreeBSD$");
#define ECO_GATING_CX_ONLY (1<<3)
#define ECO_FLIP_DONE (1<<0)
/* GEN6 interrupt control */
#define CACHE_MODE_1 0x7004 /* IVB+ */
#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6)
/* GEN6 interrupt control
* Note that the per-ring interrupt bits do alias with the global interrupt bits
* in GTIMR. */
#define GEN6_RENDER_HWSTAM 0x2098
#define GEN6_RENDER_IMR 0x20a8
#define GEN6_RENDER_CONTEXT_SWITCH_INTERRUPT (1 << 8)
@ -623,6 +696,21 @@ __FBSDID("$FreeBSD$");
#define GEN6_BSD_RNCID 0x12198
#define GEN7_FF_THREAD_MODE 0x20a0
#define GEN7_FF_SCHED_MASK 0x0077070
#define GEN7_FF_TS_SCHED_HS1 (0x5<<16)
#define GEN7_FF_TS_SCHED_HS0 (0x3<<16)
#define GEN7_FF_TS_SCHED_LOAD_BALANCE (0x1<<16)
#define GEN7_FF_TS_SCHED_HW (0x0<<16) /* Default */
#define GEN7_FF_VS_SCHED_HS1 (0x5<<12)
#define GEN7_FF_VS_SCHED_HS0 (0x3<<12)
#define GEN7_FF_VS_SCHED_LOAD_BALANCE (0x1<<12) /* Default */
#define GEN7_FF_VS_SCHED_HW (0x0<<12)
#define GEN7_FF_DS_SCHED_HS1 (0x5<<4)
#define GEN7_FF_DS_SCHED_HS0 (0x3<<4)
#define GEN7_FF_DS_SCHED_LOAD_BALANCE (0x1<<4) /* Default */
#define GEN7_FF_DS_SCHED_HW (0x0<<4)
/*
* Framebuffer compression (915+ only)
*/
@ -751,9 +839,9 @@ __FBSDID("$FreeBSD$");
#define GMBUS_PORT_PANEL 3
#define GMBUS_PORT_DPC 4 /* HDMIC */
#define GMBUS_PORT_DPB 5 /* SDVO, HDMIB */
/* 6 reserved */
#define GMBUS_PORT_DPD 7 /* HDMID */
#define GMBUS_NUM_PORTS 8
#define GMBUS_PORT_DPD 6 /* HDMID */
#define GMBUS_PORT_RESERVED 7 /* 7 reserved */
#define GMBUS_NUM_PORTS (GMBUS_PORT_DPD - GMBUS_PORT_SSC + 1)
#define GMBUS1 0x5104 /* command/status */
#define GMBUS_SW_CLR_INT (1<<31)
#define GMBUS_SW_RDY (1<<30)
@ -805,7 +893,9 @@ __FBSDID("$FreeBSD$");
#define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B)
#define DPLL_VCO_ENABLE (1 << 31)
#define DPLL_DVO_HIGH_SPEED (1 << 30)
#define DPLL_EXT_BUFFER_ENABLE_VLV (1 << 30)
#define DPLL_SYNCLOCK_ENABLE (1 << 29)
#define DPLL_REFA_CLK_ENABLE_VLV (1 << 29)
#define DPLL_VGA_MODE_DIS (1 << 28)
#define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
#define DPLLB_MODE_LVDS (2 << 26) /* i915 */
@ -817,6 +907,7 @@ __FBSDID("$FreeBSD$");
#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
#define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */
#define DPLL_INTEGRATED_CLOCK_VLV (1<<13)
#define SRX_INDEX 0x3c4
#define SRX_DATA 0x3c5
@ -912,6 +1003,7 @@ __FBSDID("$FreeBSD$");
#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
#define _DPLL_B_MD 0x06020 /* 965+ only */
#define DPLL_MD(pipe) _PIPE(pipe, _DPLL_A_MD, _DPLL_B_MD)
#define _FPA0 0x06040
#define _FPA1 0x06044
#define _FPB0 0x06048
@ -1052,6 +1144,9 @@ __FBSDID("$FreeBSD$");
#define RAMCLK_GATE_D 0x6210 /* CRL only */
#define DEUC 0x6214 /* CRL only */
#define FW_BLC_SELF_VLV 0x6500
#define FW_CSPWRDWNEN (1<<15)
/*
* Palette regs
*/
@ -1634,9 +1729,12 @@ __FBSDID("$FreeBSD$");
/* Video Data Island Packet control */
#define VIDEO_DIP_DATA 0x61178
#define VIDEO_DIP_CTL 0x61170
/* Pre HSW: */
#define VIDEO_DIP_ENABLE (1 << 31)
#define VIDEO_DIP_PORT_B (1 << 29)
#define VIDEO_DIP_PORT_C (2 << 29)
#define VIDEO_DIP_PORT_D (3 << 29)
#define VIDEO_DIP_PORT_MASK (3 << 29)
#define VIDEO_DIP_ENABLE_AVI (1 << 21)
#define VIDEO_DIP_ENABLE_VENDOR (2 << 21)
#define VIDEO_DIP_ENABLE_SPD (8 << 21)
@ -1647,6 +1745,10 @@ __FBSDID("$FreeBSD$");
#define VIDEO_DIP_FREQ_ONCE (0 << 16)
#define VIDEO_DIP_FREQ_VSYNC (1 << 16)
#define VIDEO_DIP_FREQ_2VSYNC (2 << 16)
#define VIDEO_DIP_FREQ_MASK (3 << 16)
/* HSW and later: */
#define VIDEO_DIP_ENABLE_AVI_HSW (1 << 12)
#define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0)
/* Panel power sequencing */
#define PP_STATUS 0x61200
@ -2413,7 +2515,8 @@ __FBSDID("$FreeBSD$");
/* Pipe A */
#define _PIPEADSL 0x70000
#define DSL_LINEMASK 0x00000fff
#define DSL_LINEMASK_GEN2 0x00000fff
#define DSL_LINEMASK_GEN3 0x00001fff
#define _PIPEACONF 0x70008
#define PIPECONF_ENABLE (1<<31)
#define PIPECONF_DISABLE 0
@ -2455,23 +2558,30 @@ __FBSDID("$FreeBSD$");
#define PIPECONF_DITHER_TYPE_TEMP (3<<2)
#define _PIPEASTAT 0x70024
#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31)
#define SPRITE1_FLIPDONE_INT_EN_VLV (1UL<<30)
#define PIPE_CRC_ERROR_ENABLE (1UL<<29)
#define PIPE_CRC_DONE_ENABLE (1UL<<28)
#define PIPE_GMBUS_EVENT_ENABLE (1UL<<27)
#define PLANE_FLIP_DONE_INT_EN_VLV (1UL<<26)
#define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL<<26)
#define PIPE_VSYNC_INTERRUPT_ENABLE (1UL<<25)
#define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24)
#define PIPE_DPST_EVENT_ENABLE (1UL<<23)
#define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL<<26)
#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22)
#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21)
#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20)
#define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL<<18) /* pre-965 */
#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */
#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17)
#define PIPEA_HBLANK_INT_EN_VLV (1UL<<16)
#define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16)
#define SPRITE1_FLIPDONE_INT_STATUS_VLV (1UL<<15)
#define SPRITE0_FLIPDONE_INT_STATUS_VLV (1UL<<15)
#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13)
#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12)
#define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11)
#define PLANE_FLIPDONE_INT_STATUS_VLV (1UL<<10)
#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10)
#define PIPE_VSYNC_INTERRUPT_STATUS (1UL<<9)
#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8)
@ -2496,6 +2606,40 @@ __FBSDID("$FreeBSD$");
#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL)
#define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT)
#define VLV_DPFLIPSTAT 0x70028
#define PIPEB_LINE_COMPARE_STATUS (1<<29)
#define PIPEB_HLINE_INT_EN (1<<28)
#define PIPEB_VBLANK_INT_EN (1<<27)
#define SPRITED_FLIPDONE_INT_EN (1<<26)
#define SPRITEC_FLIPDONE_INT_EN (1<<25)
#define PLANEB_FLIPDONE_INT_EN (1<<24)
#define PIPEA_LINE_COMPARE_STATUS (1<<21)
#define PIPEA_HLINE_INT_EN (1<<20)
#define PIPEA_VBLANK_INT_EN (1<<19)
#define SPRITEB_FLIPDONE_INT_EN (1<<18)
#define SPRITEA_FLIPDONE_INT_EN (1<<17)
#define PLANEA_FLIPDONE_INT_EN (1<<16)
#define DPINVGTT 0x7002c /* VLV only */
#define CURSORB_INVALID_GTT_INT_EN (1<<23)
#define CURSORA_INVALID_GTT_INT_EN (1<<22)
#define SPRITED_INVALID_GTT_INT_EN (1<<21)
#define SPRITEC_INVALID_GTT_INT_EN (1<<20)
#define PLANEB_INVALID_GTT_INT_EN (1<<19)
#define SPRITEB_INVALID_GTT_INT_EN (1<<18)
#define SPRITEA_INVALID_GTT_INT_EN (1<<17)
#define PLANEA_INVALID_GTT_INT_EN (1<<16)
#define DPINVGTT_EN_MASK 0xff0000
#define CURSORB_INVALID_GTT_STATUS (1<<7)
#define CURSORA_INVALID_GTT_STATUS (1<<6)
#define SPRITED_INVALID_GTT_STATUS (1<<5)
#define SPRITEC_INVALID_GTT_STATUS (1<<4)
#define PLANEB_INVALID_GTT_STATUS (1<<3)
#define SPRITEB_INVALID_GTT_STATUS (1<<2)
#define SPRITEA_INVALID_GTT_STATUS (1<<1)
#define PLANEA_INVALID_GTT_STATUS (1<<0)
#define DPINVGTT_STATUS_MASK 0xff
#define DSPARB 0x70030
#define DSPARB_CSTART_MASK (0x7f << 7)
#define DSPARB_CSTART_SHIFT 7
@ -2525,11 +2669,28 @@ __FBSDID("$FreeBSD$");
#define DSPFW_HPLL_CURSOR_MASK (0x3f<<16)
#define DSPFW_HPLL_SR_MASK (0x1ff)
/* drain latency register values*/
#define DRAIN_LATENCY_PRECISION_32 32
#define DRAIN_LATENCY_PRECISION_16 16
#define VLV_DDL1 0x70050
#define DDL_CURSORA_PRECISION_32 (1<<31)
#define DDL_CURSORA_PRECISION_16 (0<<31)
#define DDL_CURSORA_SHIFT 24
#define DDL_PLANEA_PRECISION_32 (1<<7)
#define DDL_PLANEA_PRECISION_16 (0<<7)
#define VLV_DDL2 0x70054
#define DDL_CURSORB_PRECISION_32 (1<<31)
#define DDL_CURSORB_PRECISION_16 (0<<31)
#define DDL_CURSORB_SHIFT 24
#define DDL_PLANEB_PRECISION_32 (1<<7)
#define DDL_PLANEB_PRECISION_16 (0<<7)
/* FIFO watermark sizes etc */
#define G4X_FIFO_LINE_SIZE 64
#define I915_FIFO_LINE_SIZE 64
#define I830_FIFO_LINE_SIZE 32
#define VALLEYVIEW_FIFO_SIZE 255
#define G4X_FIFO_SIZE 127
#define I965_FIFO_SIZE 512
#define I945_FIFO_SIZE 127
@ -2537,6 +2698,7 @@ __FBSDID("$FreeBSD$");
#define I855GM_FIFO_SIZE 127 /* In cachelines */
#define I830_FIFO_SIZE 95
#define VALLEYVIEW_MAX_WM 0xff
#define G4X_MAX_WM 0x3f
#define I915_MAX_WM 0x3f
@ -2551,6 +2713,7 @@ __FBSDID("$FreeBSD$");
#define PINEVIEW_CURSOR_DFT_WM 0
#define PINEVIEW_CURSOR_GUARD_WM 5
#define VALLEYVIEW_CURSOR_MAX_WM 64
#define I965_CURSOR_FIFO 64
#define I965_CURSOR_MAX_WM 32
#define I965_CURSOR_DFT_WM 8
@ -2759,6 +2922,13 @@ __FBSDID("$FreeBSD$");
#define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF)
#define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF)
/* Display/Sprite base address macros */
#define DISP_BASEADDR_MASK (0xfffff000)
#define I915_LO_DISPBASE(val) (val & ~DISP_BASEADDR_MASK)
#define I915_HI_DISPBASE(val) (val & DISP_BASEADDR_MASK)
#define I915_MODIFY_DISPBASE(reg, gfx_addr) \
(I915_WRITE(reg, gfx_addr | I915_LO_DISPBASE(I915_READ(reg))))
/* VBIOS flags */
#define SWF00 0x71410
#define SWF01 0x71414
@ -3091,26 +3261,38 @@ __FBSDID("$FreeBSD$");
#define DE_PCH_EVENT_IVB (1<<28)
#define DE_DP_A_HOTPLUG_IVB (1<<27)
#define DE_AUX_CHANNEL_A_IVB (1<<26)
#define DE_SPRITEC_FLIP_DONE_IVB (1<<14)
#define DE_PLANEC_FLIP_DONE_IVB (1<<13)
#define DE_PIPEC_VBLANK_IVB (1<<10)
#define DE_SPRITEB_FLIP_DONE_IVB (1<<9)
#define DE_SPRITEA_FLIP_DONE_IVB (1<<4)
#define DE_PLANEB_FLIP_DONE_IVB (1<<8)
#define DE_PLANEA_FLIP_DONE_IVB (1<<3)
#define DE_PIPEB_VBLANK_IVB (1<<5)
#define DE_SPRITEA_FLIP_DONE_IVB (1<<4)
#define DE_PLANEA_FLIP_DONE_IVB (1<<3)
#define DE_PIPEA_VBLANK_IVB (1<<0)
#define VLV_MASTER_IER 0x4400c /* Gunit master IER */
#define MASTER_INTERRUPT_ENABLE (1<<31)
#define DEISR 0x44000
#define DEIMR 0x44004
#define DEIIR 0x44008
#define DEIER 0x4400c
/* GT interrupt */
#define GT_PIPE_NOTIFY (1 << 4)
#define GT_RENDER_CS_ERROR (1 << 3)
#define GT_SYNC_STATUS (1 << 2)
#define GT_USER_INTERRUPT (1 << 0)
#define GT_BSD_USER_INTERRUPT (1 << 5)
#define GT_GEN6_BSD_USER_INTERRUPT (1 << 12)
#define GT_BLT_USER_INTERRUPT (1 << 22)
/* GT interrupt.
* Note that for gen6+ the ring-specific interrupt bits do alias with the
* corresponding bits in the per-ring interrupt control registers. */
#define GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT (1 << 26)
#define GT_GEN6_BLT_CS_ERROR_INTERRUPT (1 << 25)
#define GT_GEN6_BLT_USER_INTERRUPT (1 << 22)
#define GT_GEN6_BSD_CS_ERROR_INTERRUPT (1 << 15)
#define GT_GEN6_BSD_USER_INTERRUPT (1 << 12)
#define GT_BSD_USER_INTERRUPT (1 << 5) /* ilk only */
#define GT_GEN7_L3_PARITY_ERROR_INTERRUPT (1 << 5)
#define GT_PIPE_NOTIFY (1 << 4)
#define GT_RENDER_CS_ERROR_INTERRUPT (1 << 3)
#define GT_SYNC_STATUS (1 << 2)
#define GT_USER_INTERRUPT (1 << 0)
#define GTISR 0x44010
#define GTIMR 0x44014
@ -3260,15 +3442,15 @@ __FBSDID("$FreeBSD$");
#define _PCH_DPLL_A 0xc6014
#define _PCH_DPLL_B 0xc6018
#define PCH_DPLL(pipe) (pipe == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
#define _PCH_DPLL(pll) (pll == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
#define _PCH_FPA0 0xc6040
#define FP_CB_TUNE (0x3<<22)
#define _PCH_FPA1 0xc6044
#define _PCH_FPB0 0xc6048
#define _PCH_FPB1 0xc604c
#define PCH_FP0(pipe) (pipe == 0 ? _PCH_FPA0 : _PCH_FPB0)
#define PCH_FP1(pipe) (pipe == 0 ? _PCH_FPA1 : _PCH_FPB1)
#define _PCH_FP0(pll) (pll == 0 ? _PCH_FPA0 : _PCH_FPB0)
#define _PCH_FP1(pll) (pll == 0 ? _PCH_FPA1 : _PCH_FPB1)
#define PCH_DPLL_TEST 0xc606c
@ -3363,6 +3545,57 @@ __FBSDID("$FreeBSD$");
#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
#define VLV_VIDEO_DIP_CTL_A 0x60220
#define VLV_VIDEO_DIP_DATA_A 0x60208
#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A 0x60210
#define VLV_VIDEO_DIP_CTL_B 0x61170
#define VLV_VIDEO_DIP_DATA_B 0x61174
#define VLV_VIDEO_DIP_GDCP_PAYLOAD_B 0x61178
#define VLV_TVIDEO_DIP_CTL(pipe) \
_PIPE(pipe, VLV_VIDEO_DIP_CTL_A, VLV_VIDEO_DIP_CTL_B)
#define VLV_TVIDEO_DIP_DATA(pipe) \
_PIPE(pipe, VLV_VIDEO_DIP_DATA_A, VLV_VIDEO_DIP_DATA_B)
#define VLV_TVIDEO_DIP_GCP(pipe) \
_PIPE(pipe, VLV_VIDEO_DIP_GDCP_PAYLOAD_A, VLV_VIDEO_DIP_GDCP_PAYLOAD_B)
/* Haswell DIP controls */
#define HSW_VIDEO_DIP_CTL_A 0x60200
#define HSW_VIDEO_DIP_AVI_DATA_A 0x60220
#define HSW_VIDEO_DIP_VS_DATA_A 0x60260
#define HSW_VIDEO_DIP_SPD_DATA_A 0x602A0
#define HSW_VIDEO_DIP_GMP_DATA_A 0x602E0
#define HSW_VIDEO_DIP_VSC_DATA_A 0x60320
#define HSW_VIDEO_DIP_AVI_ECC_A 0x60240
#define HSW_VIDEO_DIP_VS_ECC_A 0x60280
#define HSW_VIDEO_DIP_SPD_ECC_A 0x602C0
#define HSW_VIDEO_DIP_GMP_ECC_A 0x60300
#define HSW_VIDEO_DIP_VSC_ECC_A 0x60344
#define HSW_VIDEO_DIP_GCP_A 0x60210
#define HSW_VIDEO_DIP_CTL_B 0x61200
#define HSW_VIDEO_DIP_AVI_DATA_B 0x61220
#define HSW_VIDEO_DIP_VS_DATA_B 0x61260
#define HSW_VIDEO_DIP_SPD_DATA_B 0x612A0
#define HSW_VIDEO_DIP_GMP_DATA_B 0x612E0
#define HSW_VIDEO_DIP_VSC_DATA_B 0x61320
#define HSW_VIDEO_DIP_BVI_ECC_B 0x61240
#define HSW_VIDEO_DIP_VS_ECC_B 0x61280
#define HSW_VIDEO_DIP_SPD_ECC_B 0x612C0
#define HSW_VIDEO_DIP_GMP_ECC_B 0x61300
#define HSW_VIDEO_DIP_VSC_ECC_B 0x61344
#define HSW_VIDEO_DIP_GCP_B 0x61210
#define HSW_TVIDEO_DIP_CTL(pipe) \
_PIPE(pipe, HSW_VIDEO_DIP_CTL_A, HSW_VIDEO_DIP_CTL_B)
#define HSW_TVIDEO_DIP_AVI_DATA(pipe) \
_PIPE(pipe, HSW_VIDEO_DIP_AVI_DATA_A, HSW_VIDEO_DIP_AVI_DATA_B)
#define HSW_TVIDEO_DIP_SPD_DATA(pipe) \
_PIPE(pipe, HSW_VIDEO_DIP_SPD_DATA_A, HSW_VIDEO_DIP_SPD_DATA_B)
#define HSW_TVIDEO_DIP_GCP(pipe) \
_PIPE(pipe, HSW_VIDEO_DIP_GCP_A, HSW_VIDEO_DIP_GCP_B)
#define _TRANS_HTOTAL_B 0xe1000
#define _TRANS_HBLANK_B 0xe1004
#define _TRANS_HSYNC_B 0xe1008
@ -3522,6 +3755,9 @@ __FBSDID("$FreeBSD$");
#define FDI_LINK_TRAIN_PATTERN_IDLE_CPT (2<<8)
#define FDI_LINK_TRAIN_NORMAL_CPT (3<<8)
#define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3<<8)
/* LPT */
#define FDI_PORT_WIDTH_2X_LPT (1<<19)
#define FDI_PORT_WIDTH_1X_LPT (0<<19)
#define _FDI_RXA_MISC 0xf0010
#define _FDI_RXB_MISC 0xf1010
@ -3582,6 +3818,7 @@ __FBSDID("$FreeBSD$");
#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
/* or SDVOB */
#define VLV_HDMIB 0x61140
#define HDMIB 0xe1140
#define PORT_ENABLE (1 << 31)
#define TRANSCODER(pipe) ((pipe) << 30)
@ -3747,6 +3984,8 @@ __FBSDID("$FreeBSD$");
#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f<<22)
#define FORCEWAKE 0xA18C
#define FORCEWAKE_VLV 0x1300b0
#define FORCEWAKE_ACK_VLV 0x1300b4
#define FORCEWAKE_ACK 0x130090
#define FORCEWAKE_MT 0xa188 /* multi-threaded */
#define FORCEWAKE_MT_ACK 0x130040
@ -3764,6 +4003,7 @@ __FBSDID("$FreeBSD$");
#define GEN6_UCGCTL1 0x9400
# define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5)
# define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7)
#define GEN6_UCGCTL2 0x9404
# define GEN6_RCZUNIT_CLOCK_GATE_DISABLE (1 << 13)
@ -3841,8 +4081,13 @@ __FBSDID("$FreeBSD$");
#define GEN6_PM_RP_UP_EI_EXPIRED (1<<2)
#define GEN6_PM_RP_DOWN_EI_EXPIRED (1<<1)
#define GEN6_PM_DEFERRED_EVENTS (GEN6_PM_RP_UP_THRESHOLD | \
GEN6_PM_RP_DOWN_THRESHOLD | \
GEN6_PM_RP_DOWN_TIMEOUT)
GEN6_PM_RP_DOWN_THRESHOLD | \
GEN6_PM_RP_DOWN_TIMEOUT)
#define GEN6_GT_GFX_RC6_LOCKED 0x138104
#define GEN6_GT_GFX_RC6 0x138108
#define GEN6_GT_GFX_RC6p 0x13810C
#define GEN6_GT_GFX_RC6pp 0x138110
#define GEN6_PCODE_MAILBOX 0x138124
#define GEN6_PCODE_READY (1<<31)
@ -3903,4 +4148,197 @@ __FBSDID("$FreeBSD$");
#define AUD_CONFIG_PIXEL_CLOCK_HDMI (0xf << 16)
#define AUD_CONFIG_DISABLE_NCTS (1 << 3)
/* HSW Power Wells */
#define HSW_PWR_WELL_CTL1 0x45400 /* BIOS */
#define HSW_PWR_WELL_CTL2 0x45404 /* Driver */
#define HSW_PWR_WELL_CTL3 0x45408 /* KVMR */
#define HSW_PWR_WELL_CTL4 0x4540C /* Debug */
#define HSW_PWR_WELL_ENABLE (1<<31)
#define HSW_PWR_WELL_STATE (1<<30)
#define HSW_PWR_WELL_CTL5 0x45410
#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1<<31)
#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1<<20)
#define HSW_PWR_WELL_FORCE_ON (1<<19)
#define HSW_PWR_WELL_CTL6 0x45414
/* Per-pipe DDI Function Control */
#define PIPE_DDI_FUNC_CTL_A 0x60400
#define PIPE_DDI_FUNC_CTL_B 0x61400
#define PIPE_DDI_FUNC_CTL_C 0x62400
#define PIPE_DDI_FUNC_CTL_EDP 0x6F400
#define DDI_FUNC_CTL(pipe) _PIPE(pipe, \
PIPE_DDI_FUNC_CTL_A, \
PIPE_DDI_FUNC_CTL_B)
#define PIPE_DDI_FUNC_ENABLE (1<<31)
/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
#define PIPE_DDI_PORT_MASK (0xf<<28)
#define PIPE_DDI_SELECT_PORT(x) ((x)<<28)
#define PIPE_DDI_MODE_SELECT_HDMI (0<<24)
#define PIPE_DDI_MODE_SELECT_DVI (1<<24)
#define PIPE_DDI_MODE_SELECT_DP_SST (2<<24)
#define PIPE_DDI_MODE_SELECT_DP_MST (3<<24)
#define PIPE_DDI_MODE_SELECT_FDI (4<<24)
#define PIPE_DDI_BPC_8 (0<<20)
#define PIPE_DDI_BPC_10 (1<<20)
#define PIPE_DDI_BPC_6 (2<<20)
#define PIPE_DDI_BPC_12 (3<<20)
#define PIPE_DDI_BFI_ENABLE (1<<4)
#define PIPE_DDI_PORT_WIDTH_X1 (0<<1)
#define PIPE_DDI_PORT_WIDTH_X2 (1<<1)
#define PIPE_DDI_PORT_WIDTH_X4 (3<<1)
/* DisplayPort Transport Control */
#define DP_TP_CTL_A 0x64040
#define DP_TP_CTL_B 0x64140
#define DP_TP_CTL(port) _PORT(port, \
DP_TP_CTL_A, \
DP_TP_CTL_B)
#define DP_TP_CTL_ENABLE (1<<31)
#define DP_TP_CTL_MODE_SST (0<<27)
#define DP_TP_CTL_MODE_MST (1<<27)
#define DP_TP_CTL_ENHANCED_FRAME_ENABLE (1<<18)
#define DP_TP_CTL_FDI_AUTOTRAIN (1<<15)
#define DP_TP_CTL_LINK_TRAIN_MASK (7<<8)
#define DP_TP_CTL_LINK_TRAIN_PAT1 (0<<8)
#define DP_TP_CTL_LINK_TRAIN_PAT2 (1<<8)
#define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8)
/* DisplayPort Transport Status */
#define DP_TP_STATUS_A 0x64044
#define DP_TP_STATUS_B 0x64144
#define DP_TP_STATUS(port) _PORT(port, \
DP_TP_STATUS_A, \
DP_TP_STATUS_B)
#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12)
/* DDI Buffer Control */
#define DDI_BUF_CTL_A 0x64000
#define DDI_BUF_CTL_B 0x64100
#define DDI_BUF_CTL(port) _PORT(port, \
DDI_BUF_CTL_A, \
DDI_BUF_CTL_B)
#define DDI_BUF_CTL_ENABLE (1<<31)
#define DDI_BUF_EMP_400MV_0DB_HSW (0<<24) /* Sel0 */
#define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */
#define DDI_BUF_EMP_400MV_6DB_HSW (2<<24) /* Sel2 */
#define DDI_BUF_EMP_400MV_9_5DB_HSW (3<<24) /* Sel3 */
#define DDI_BUF_EMP_600MV_0DB_HSW (4<<24) /* Sel4 */
#define DDI_BUF_EMP_600MV_3_5DB_HSW (5<<24) /* Sel5 */
#define DDI_BUF_EMP_600MV_6DB_HSW (6<<24) /* Sel6 */
#define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */
#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
#define DDI_BUF_EMP_MASK (0xf<<24)
#define DDI_BUF_IS_IDLE (1<<7)
#define DDI_PORT_WIDTH_X1 (0<<1)
#define DDI_PORT_WIDTH_X2 (1<<1)
#define DDI_PORT_WIDTH_X4 (3<<1)
#define DDI_INIT_DISPLAY_DETECTED (1<<0)
/* DDI Buffer Translations */
#define DDI_BUF_TRANS_A 0x64E00
#define DDI_BUF_TRANS_B 0x64E60
#define DDI_BUF_TRANS(port) _PORT(port, \
DDI_BUF_TRANS_A, \
DDI_BUF_TRANS_B)
/* Sideband Interface (SBI) is programmed indirectly, via
* SBI_ADDR, which contains the register offset; and SBI_DATA,
* which contains the payload */
#define SBI_ADDR 0xC6000
#define SBI_DATA 0xC6004
#define SBI_CTL_STAT 0xC6008
#define SBI_CTL_OP_CRRD (0x6<<8)
#define SBI_CTL_OP_CRWR (0x7<<8)
#define SBI_RESPONSE_FAIL (0x1<<1)
#define SBI_RESPONSE_SUCCESS (0x0<<1)
#define SBI_BUSY (0x1<<0)
#define SBI_READY (0x0<<0)
/* SBI offsets */
#define SBI_SSCDIVINTPHASE6 0x0600
#define SBI_SSCDIVINTPHASE_DIVSEL_MASK ((0x7f)<<1)
#define SBI_SSCDIVINTPHASE_DIVSEL(x) ((x)<<1)
#define SBI_SSCDIVINTPHASE_INCVAL_MASK ((0x7f)<<8)
#define SBI_SSCDIVINTPHASE_INCVAL(x) ((x)<<8)
#define SBI_SSCDIVINTPHASE_DIR(x) ((x)<<15)
#define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0)
#define SBI_SSCCTL 0x020c
#define SBI_SSCCTL6 0x060C
#define SBI_SSCCTL_DISABLE (1<<0)
#define SBI_SSCAUXDIV6 0x0610
#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4)
#define SBI_DBUFF0 0x2a00
/* LPT PIXCLK_GATE */
#define PIXCLK_GATE 0xC6020
#define PIXCLK_GATE_UNGATE 1<<0
#define PIXCLK_GATE_GATE 0<<0
/* SPLL */
#define SPLL_CTL 0x46020
#define SPLL_PLL_ENABLE (1<<31)
#define SPLL_PLL_SCC (1<<28)
#define SPLL_PLL_NON_SCC (2<<28)
#define SPLL_PLL_FREQ_810MHz (0<<26)
#define SPLL_PLL_FREQ_1350MHz (1<<26)
/* WRPLL */
#define WRPLL_CTL1 0x46040
#define WRPLL_CTL2 0x46060
#define WRPLL_PLL_ENABLE (1<<31)
#define WRPLL_PLL_SELECT_SSC (0x01<<28)
#define WRPLL_PLL_SELECT_NON_SCC (0x02<<28)
#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28)
/* WRPLL divider programming */
#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0)
#define WRPLL_DIVIDER_POST(x) ((x)<<8)
#define WRPLL_DIVIDER_FEEDBACK(x) ((x)<<16)
/* Port clock selection */
#define PORT_CLK_SEL_A 0x46100
#define PORT_CLK_SEL_B 0x46104
#define PORT_CLK_SEL(port) _PORT(port, \
PORT_CLK_SEL_A, \
PORT_CLK_SEL_B)
#define PORT_CLK_SEL_LCPLL_2700 (0<<29)
#define PORT_CLK_SEL_LCPLL_1350 (1<<29)
#define PORT_CLK_SEL_LCPLL_810 (2<<29)
#define PORT_CLK_SEL_SPLL (3<<29)
#define PORT_CLK_SEL_WRPLL1 (4<<29)
#define PORT_CLK_SEL_WRPLL2 (5<<29)
/* Pipe clock selection */
#define PIPE_CLK_SEL_A 0x46140
#define PIPE_CLK_SEL_B 0x46144
#define PIPE_CLK_SEL(pipe) _PIPE(pipe, \
PIPE_CLK_SEL_A, \
PIPE_CLK_SEL_B)
/* For each pipe, we need to select the corresponding port clock */
#define PIPE_CLK_SEL_DISABLED (0x0<<29)
#define PIPE_CLK_SEL_PORT(x) ((x+1)<<29)
/* LCPLL Control */
#define LCPLL_CTL 0x130040
#define LCPLL_PLL_DISABLE (1<<31)
#define LCPLL_PLL_LOCK (1<<30)
#define LCPLL_CD_CLOCK_DISABLE (1<<25)
#define LCPLL_CD2X_CLOCK_DISABLE (1<<23)
/* Pipe WM_LINETIME - watermark line time */
#define PIPE_WM_LINETIME_A 0x45270
#define PIPE_WM_LINETIME_B 0x45274
#define PIPE_WM_LINETIME(pipe) _PIPE(pipe, \
PIPE_WM_LINETIME_A, \
PIPE_WM_LINETIME_A)
#define PIPE_WM_LINETIME_MASK (0x1ff)
#define PIPE_WM_LINETIME_TIME(x) ((x))
#define PIPE_WM_LINETIME_IPS_LINETIME_MASK (0x1ff<<16)
#define PIPE_WM_LINETIME_IPS_LINETIME(x) ((x)<<16)
/* SFUSE_STRAP */
#define SFUSE_STRAP 0xc2014
#define SFUSE_STRAP_DDIB_DETECTED (1<<2)
#define SFUSE_STRAP_DDIC_DETECTED (1<<1)
#define SFUSE_STRAP_DDID_DETECTED (1<<0)
#endif /* _I915_REG_H_ */

View File

@ -42,7 +42,7 @@ static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
return false;
if (HAS_PCH_SPLIT(dev))
dpll_reg = PCH_DPLL(pipe);
dpll_reg = _PCH_DPLL(pipe);
else
dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B;
@ -873,22 +873,6 @@ int i915_restore_state(struct drm_device *dev)
I915_WRITE(IER, dev_priv->saveIER);
I915_WRITE(IMR, dev_priv->saveIMR);
}
DRM_UNLOCK(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET))
intel_init_clock_gating(dev);
if (IS_IRONLAKE_M(dev)) {
ironlake_enable_drps(dev);
intel_init_emon(dev);
}
if (INTEL_INFO(dev)->gen >= 6) {
gen6_enable_rps(dev_priv);
gen6_update_ring_freq(dev_priv);
}
DRM_LOCK(dev);
/* Cache mode state */
I915_WRITE(CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);

View File

@ -175,6 +175,28 @@ get_lvds_dvo_timing(const struct bdb_lvds_lfp_data *lvds_lfp_data,
return (const struct lvds_dvo_timing *)(entry + dvo_timing_offset);
}
/* get lvds_fp_timing entry
* this function may return NULL if the corresponding entry is invalid
*/
static const struct lvds_fp_timing *
get_lvds_fp_timing(const struct bdb_header *bdb,
const struct bdb_lvds_lfp_data *data,
const struct bdb_lvds_lfp_data_ptrs *ptrs,
int index)
{
size_t data_ofs = (const u8 *)data - (const u8 *)bdb;
u16 data_size = ((const u16 *)data)[-1]; /* stored in header */
size_t ofs;
if (index >= DRM_ARRAY_SIZE(ptrs->ptr))
return NULL;
ofs = ptrs->ptr[index].fp_timing_offset;
if (ofs < data_ofs ||
ofs + sizeof(struct lvds_fp_timing) > data_ofs + data_size)
return NULL;
return (const struct lvds_fp_timing *)((const u8 *)bdb + ofs);
}
/* Try to find integrated panel data */
static void
parse_lfp_panel_data(struct drm_i915_private *dev_priv,
@ -184,6 +206,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
const struct bdb_lvds_lfp_data *lvds_lfp_data;
const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs;
const struct lvds_dvo_timing *panel_dvo_timing;
const struct lvds_fp_timing *fp_timing;
struct drm_display_mode *panel_fixed_mode;
int i, downclock;
@ -244,6 +267,19 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
"Normal Clock %dKHz, downclock %dKHz\n",
panel_fixed_mode->clock, 10 * downclock);
}
fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data,
lvds_lfp_data_ptrs,
lvds_options->panel_type);
if (fp_timing) {
/* check the resolution, just to be sure */
if (fp_timing->x_res == panel_fixed_mode->hdisplay &&
fp_timing->y_res == panel_fixed_mode->vdisplay) {
dev_priv->bios_lvds_val = fp_timing->lvds_reg_val;
DRM_DEBUG_KMS("VBT initial LVDS value %x\n",
dev_priv->bios_lvds_val);
}
}
}
/* Try to find sdvo panel data */
@ -256,6 +292,11 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
int index;
index = i915_vbt_sdvo_panel_type;
if (index == -2) {
DRM_DEBUG_KMS("Ignore SDVO panel mode from BIOS VBT tables.\n");
return;
}
if (index == -1) {
struct bdb_sdvo_lvds_options *sdvo_lvds_options;
@ -331,11 +372,11 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
if (block_size >= sizeof(*general)) {
int bus_pin = general->crt_ddc_gmbus_pin;
DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
if (bus_pin >= 1 && bus_pin <= 6)
if (intel_gmbus_is_port_valid(bus_pin))
dev_priv->crt_ddc_pin = bus_pin;
} else {
DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n",
block_size);
block_size);
}
}
}

View File

@ -55,18 +55,36 @@ static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
struct intel_crt, base);
}
static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
static void pch_crt_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 temp, reg;
u32 temp;
if (HAS_PCH_SPLIT(dev))
reg = PCH_ADPA;
else
reg = ADPA;
temp = I915_READ(PCH_ADPA);
temp &= ~ADPA_DAC_ENABLE;
temp = I915_READ(reg);
switch (mode) {
case DRM_MODE_DPMS_ON:
temp |= ADPA_DAC_ENABLE;
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
/* Just leave port enable cleared */
break;
}
I915_WRITE(PCH_ADPA, temp);
}
static void gmch_crt_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 temp;
temp = I915_READ(ADPA);
temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
temp &= ~ADPA_DAC_ENABLE;
@ -85,7 +103,7 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
break;
}
I915_WRITE(reg, temp);
I915_WRITE(ADPA, temp);
}
static int intel_crt_mode_valid(struct drm_connector *connector,
@ -111,7 +129,7 @@ static int intel_crt_mode_valid(struct drm_connector *connector,
}
static bool intel_crt_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
@ -279,9 +297,10 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
if (intel_ddc_probe(&crt->base, dev_priv->crt_ddc_pin)) {
struct edid *edid;
bool is_digital = false;
device_t iic;
edid = drm_get_edid(connector,
dev_priv->gmbus[dev_priv->crt_ddc_pin]);
iic = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
edid = drm_get_edid(connector, iic);
/*
* This may be a DVI-I connector with a shared DDC
* link between analog and digital outputs, so we
@ -480,15 +499,16 @@ static int intel_crt_get_modes(struct drm_connector *connector)
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
device_t iic;
ret = intel_ddc_get_modes(connector,
dev_priv->gmbus[dev_priv->crt_ddc_pin]);
iic = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
ret = intel_ddc_get_modes(connector, iic);
if (ret || !IS_G4X(dev))
return ret;
/* Try to probe digital port for output in DVI-I -> VGA mode. */
return (intel_ddc_get_modes(connector,
dev_priv->gmbus[GMBUS_PORT_DPB]));
iic = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB);
return intel_ddc_get_modes(connector, iic);
}
static int intel_crt_set_property(struct drm_connector *connector,
@ -511,12 +531,20 @@ static void intel_crt_reset(struct drm_connector *connector)
* Routines for controlling stuff on the analog port
*/
static const struct drm_encoder_helper_funcs intel_crt_helper_funcs = {
.dpms = intel_crt_dpms,
static const struct drm_encoder_helper_funcs pch_encoder_funcs = {
.mode_fixup = intel_crt_mode_fixup,
.prepare = intel_encoder_prepare,
.commit = intel_encoder_commit,
.mode_set = intel_crt_mode_set,
.dpms = pch_crt_dpms,
};
static const struct drm_encoder_helper_funcs gmch_encoder_funcs = {
.mode_fixup = intel_crt_mode_fixup,
.prepare = intel_encoder_prepare,
.commit = intel_encoder_commit,
.mode_set = intel_crt_mode_set,
.dpms = gmch_crt_dpms,
};
static const struct drm_connector_funcs intel_crt_connector_funcs = {
@ -540,7 +568,7 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
static int intel_no_crt_dmi_callback(const struct dmi_system_id *id)
{
DRM_DEBUG_KMS("Skipping CRT initialization for %s\n", id->ident);
DRM_INFO("Skipping CRT initialization for %s\n", id->ident);
return 1;
}
@ -562,6 +590,7 @@ void intel_crt_init(struct drm_device *dev)
struct intel_crt *crt;
struct intel_connector *intel_connector;
struct drm_i915_private *dev_priv = dev->dev_private;
const struct drm_encoder_helper_funcs *encoder_helper_funcs;
/* Skip machines without VGA that falsely report hotplug events */
if (dmi_check_system(intel_no_crt))
@ -584,14 +613,23 @@ void intel_crt_init(struct drm_device *dev)
crt->base.clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT |
1 << INTEL_ANALOG_CLONE_BIT |
1 << INTEL_SDVO_LVDS_CLONE_BIT);
crt->base.crtc_mask = (1 << 0) | (1 << 1);
if (IS_HASWELL(dev))
crt->base.crtc_mask = (1 << 0);
else
crt->base.crtc_mask = (1 << 0) | (1 << 1);
if (IS_GEN2(dev))
connector->interlace_allowed = 0;
else
connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
drm_encoder_helper_add(&crt->base.base, &intel_crt_helper_funcs);
if (HAS_PCH_SPLIT(dev))
encoder_helper_funcs = &pch_encoder_funcs;
else
encoder_helper_funcs = &gmch_encoder_funcs;
drm_encoder_helper_add(&crt->base.base, encoder_helper_funcs);
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
#if 0

View File

@ -0,0 +1,761 @@
/*
* Copyright © 2012 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eugeni Dodonov <eugeni.dodonov@intel.com>
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <dev/drm2/drmP.h>
#include <dev/drm2/drm.h>
#include <dev/drm2/i915/i915_drm.h>
#include <dev/drm2/i915/i915_drv.h>
#include <dev/drm2/i915/intel_drv.h>
/* HDMI/DVI modes ignore everything but the last 2 items. So we share
* them for both DP and FDI transports, allowing those ports to
* automatically adapt to HDMI connections as well
*/
static const u32 hsw_ddi_translations_dp[] = {
0x00FFFFFF, 0x0006000E, /* DP parameters */
0x00D75FFF, 0x0005000A,
0x00C30FFF, 0x00040006,
0x80AAAFFF, 0x000B0000,
0x00FFFFFF, 0x0005000A,
0x00D75FFF, 0x000C0004,
0x80C30FFF, 0x000B0000,
0x00FFFFFF, 0x00040006,
0x80D75FFF, 0x000B0000,
0x00FFFFFF, 0x00040006 /* HDMI parameters */
};
static const u32 hsw_ddi_translations_fdi[] = {
0x00FFFFFF, 0x0007000E, /* FDI parameters */
0x00D75FFF, 0x000F000A,
0x00C30FFF, 0x00060006,
0x00AAAFFF, 0x001E0000,
0x00FFFFFF, 0x000F000A,
0x00D75FFF, 0x00160004,
0x00C30FFF, 0x001E0000,
0x00FFFFFF, 0x00060006,
0x00D75FFF, 0x001E0000,
0x00FFFFFF, 0x00040006 /* HDMI parameters */
};
/* On Haswell, DDI port buffers must be programmed with correct values
* in advance. The buffer values are different for FDI and DP modes,
* but the HDMI/DVI fields are shared among those. So we program the DDI
* in either FDI or DP modes only, as HDMI connections will work with both
* of those
*/
static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, bool use_fdi_mode)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg;
int i;
const u32 *ddi_translations = ((use_fdi_mode) ?
hsw_ddi_translations_fdi :
hsw_ddi_translations_dp);
DRM_DEBUG_DRIVER("Initializing DDI buffers for port %c in %s mode\n",
port_name(port),
use_fdi_mode ? "FDI" : "DP");
if (use_fdi_mode && (port != PORT_E))
DRM_DEBUG_KMS("Programming port %c in FDI mode, this probably will not work.\n",
port_name(port));
for (i=0, reg=DDI_BUF_TRANS(port); i < DRM_ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
I915_WRITE(reg, ddi_translations[i]);
reg += 4;
}
}
/* Program DDI buffers translations for DP. By default, program ports A-D in DP
* mode and port E for FDI.
*/
void intel_prepare_ddi(struct drm_device *dev)
{
int port;
if (IS_HASWELL(dev)) {
for (port = PORT_A; port < PORT_E; port++)
intel_prepare_ddi_buffers(dev, port, false);
/* DDI E is the suggested one to work in FDI mode, so program is as such by
* default. It will have to be re-programmed in case a digital DP output
* will be detected on it
*/
intel_prepare_ddi_buffers(dev, PORT_E, true);
}
}
static const long hsw_ddi_buf_ctl_values[] = {
DDI_BUF_EMP_400MV_0DB_HSW,
DDI_BUF_EMP_400MV_3_5DB_HSW,
DDI_BUF_EMP_400MV_6DB_HSW,
DDI_BUF_EMP_400MV_9_5DB_HSW,
DDI_BUF_EMP_600MV_0DB_HSW,
DDI_BUF_EMP_600MV_3_5DB_HSW,
DDI_BUF_EMP_600MV_6DB_HSW,
DDI_BUF_EMP_800MV_0DB_HSW,
DDI_BUF_EMP_800MV_3_5DB_HSW
};
/* Starting with Haswell, different DDI ports can work in FDI mode for
* connection to the PCH-located connectors. For this, it is necessary to train
* both the DDI port and PCH receiver for the desired DDI buffer settings.
*
* The recommended port to work in FDI mode is DDI E, which we use here. Also,
* please note that when FDI mode is active on DDI E, it shares 2 lines with
* DDI A (which is used for eDP)
*/
void hsw_fdi_link_train(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
u32 reg, temp, i;
/* Configure CPU PLL, wait for warmup */
I915_WRITE(SPLL_CTL,
SPLL_PLL_ENABLE |
SPLL_PLL_FREQ_1350MHz |
SPLL_PLL_SCC);
/* Use SPLL to drive the output when in FDI mode */
I915_WRITE(PORT_CLK_SEL(PORT_E),
PORT_CLK_SEL_SPLL);
I915_WRITE(PIPE_CLK_SEL(pipe),
PIPE_CLK_SEL_PORT(PORT_E));
DELAY(20);
/* Start the training iterating through available voltages and emphasis */
for (i=0; i < DRM_ARRAY_SIZE(hsw_ddi_buf_ctl_values); i++) {
/* Configure DP_TP_CTL with auto-training */
I915_WRITE(DP_TP_CTL(PORT_E),
DP_TP_CTL_FDI_AUTOTRAIN |
DP_TP_CTL_ENHANCED_FRAME_ENABLE |
DP_TP_CTL_LINK_TRAIN_PAT1 |
DP_TP_CTL_ENABLE);
/* Configure and enable DDI_BUF_CTL for DDI E with next voltage */
temp = I915_READ(DDI_BUF_CTL(PORT_E));
temp = (temp & ~DDI_BUF_EMP_MASK);
I915_WRITE(DDI_BUF_CTL(PORT_E),
temp |
DDI_BUF_CTL_ENABLE |
DDI_PORT_WIDTH_X2 |
hsw_ddi_buf_ctl_values[i]);
DELAY(600);
/* Enable CPU FDI Receiver with auto-training */
reg = FDI_RX_CTL(pipe);
I915_WRITE(reg,
I915_READ(reg) |
FDI_LINK_TRAIN_AUTO |
FDI_RX_ENABLE |
FDI_LINK_TRAIN_PATTERN_1_CPT |
FDI_RX_ENHANCE_FRAME_ENABLE |
FDI_PORT_WIDTH_2X_LPT |
FDI_RX_PLL_ENABLE);
POSTING_READ(reg);
DELAY(100);
temp = I915_READ(DP_TP_STATUS(PORT_E));
if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
DRM_DEBUG_DRIVER("BUF_CTL training done on %d step\n", i);
/* Enable normal pixel sending for FDI */
I915_WRITE(DP_TP_CTL(PORT_E),
DP_TP_CTL_FDI_AUTOTRAIN |
DP_TP_CTL_LINK_TRAIN_NORMAL |
DP_TP_CTL_ENHANCED_FRAME_ENABLE |
DP_TP_CTL_ENABLE);
/* Enable PIPE_DDI_FUNC_CTL for the pipe to work in FDI mode */
temp = I915_READ(DDI_FUNC_CTL(pipe));
temp &= ~PIPE_DDI_PORT_MASK;
temp |= PIPE_DDI_SELECT_PORT(PORT_E) |
PIPE_DDI_MODE_SELECT_FDI |
PIPE_DDI_FUNC_ENABLE |
PIPE_DDI_PORT_WIDTH_X2;
I915_WRITE(DDI_FUNC_CTL(pipe),
temp);
break;
} else {
DRM_ERROR("Error training BUF_CTL %d\n", i);
/* Disable DP_TP_CTL and FDI_RX_CTL) and retry */
I915_WRITE(DP_TP_CTL(PORT_E),
I915_READ(DP_TP_CTL(PORT_E)) &
~DP_TP_CTL_ENABLE);
I915_WRITE(FDI_RX_CTL(pipe),
I915_READ(FDI_RX_CTL(pipe)) &
~FDI_RX_PLL_ENABLE);
continue;
}
}
DRM_DEBUG_KMS("FDI train done.\n");
}
/* For DDI connections, it is possible to support different outputs over the
* same DDI port, such as HDMI or DP or even VGA via FDI. So we don't know by
* the time the output is detected what exactly is on the other end of it. This
* function aims at providing support for this detection and proper output
* configuration.
*/
void intel_ddi_init(struct drm_device *dev, enum port port)
{
/* For now, we don't do any proper output detection and assume that we
* handle HDMI only */
switch(port){
case PORT_A:
/* We don't handle eDP and DP yet */
DRM_DEBUG_DRIVER("Found digital output on DDI port A\n");
break;
/* Assume that the ports B, C and D are working in HDMI mode for now */
case PORT_B:
case PORT_C:
case PORT_D:
intel_hdmi_init(dev, DDI_BUF_CTL(port));
break;
default:
DRM_DEBUG_DRIVER("No handlers defined for port %d, skipping DDI initialization\n",
port);
break;
}
}
/* WRPLL clock dividers */
struct wrpll_tmds_clock {
u32 clock;
u16 p; /* Post divider */
u16 n2; /* Feedback divider */
u16 r2; /* Reference divider */
};
/* Table of matching values for WRPLL clocks programming for each frequency */
static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
{19750, 38, 25, 18},
{20000, 48, 32, 18},
{21000, 36, 21, 15},
{21912, 42, 29, 17},
{22000, 36, 22, 15},
{23000, 36, 23, 15},
{23500, 40, 40, 23},
{23750, 26, 16, 14},
{23750, 26, 16, 14},
{24000, 36, 24, 15},
{25000, 36, 25, 15},
{25175, 26, 40, 33},
{25200, 30, 21, 15},
{26000, 36, 26, 15},
{27000, 30, 21, 14},
{27027, 18, 100, 111},
{27500, 30, 29, 19},
{28000, 34, 30, 17},
{28320, 26, 30, 22},
{28322, 32, 42, 25},
{28750, 24, 23, 18},
{29000, 30, 29, 18},
{29750, 32, 30, 17},
{30000, 30, 25, 15},
{30750, 30, 41, 24},
{31000, 30, 31, 18},
{31500, 30, 28, 16},
{32000, 30, 32, 18},
{32500, 28, 32, 19},
{33000, 24, 22, 15},
{34000, 28, 30, 17},
{35000, 26, 32, 19},
{35500, 24, 30, 19},
{36000, 26, 26, 15},
{36750, 26, 46, 26},
{37000, 24, 23, 14},
{37762, 22, 40, 26},
{37800, 20, 21, 15},
{38000, 24, 27, 16},
{38250, 24, 34, 20},
{39000, 24, 26, 15},
{40000, 24, 32, 18},
{40500, 20, 21, 14},
{40541, 22, 147, 89},
{40750, 18, 19, 14},
{41000, 16, 17, 14},
{41500, 22, 44, 26},
{41540, 22, 44, 26},
{42000, 18, 21, 15},
{42500, 22, 45, 26},
{43000, 20, 43, 27},
{43163, 20, 24, 15},
{44000, 18, 22, 15},
{44900, 20, 108, 65},
{45000, 20, 25, 15},
{45250, 20, 52, 31},
{46000, 18, 23, 15},
{46750, 20, 45, 26},
{47000, 20, 40, 23},
{48000, 18, 24, 15},
{49000, 18, 49, 30},
{49500, 16, 22, 15},
{50000, 18, 25, 15},
{50500, 18, 32, 19},
{51000, 18, 34, 20},
{52000, 18, 26, 15},
{52406, 14, 34, 25},
{53000, 16, 22, 14},
{54000, 16, 24, 15},
{54054, 16, 173, 108},
{54500, 14, 24, 17},
{55000, 12, 22, 18},
{56000, 14, 45, 31},
{56250, 16, 25, 15},
{56750, 14, 25, 17},
{57000, 16, 27, 16},
{58000, 16, 43, 25},
{58250, 16, 38, 22},
{58750, 16, 40, 23},
{59000, 14, 26, 17},
{59341, 14, 40, 26},
{59400, 16, 44, 25},
{60000, 16, 32, 18},
{60500, 12, 39, 29},
{61000, 14, 49, 31},
{62000, 14, 37, 23},
{62250, 14, 42, 26},
{63000, 12, 21, 15},
{63500, 14, 28, 17},
{64000, 12, 27, 19},
{65000, 14, 32, 19},
{65250, 12, 29, 20},
{65500, 12, 32, 22},
{66000, 12, 22, 15},
{66667, 14, 38, 22},
{66750, 10, 21, 17},
{67000, 14, 33, 19},
{67750, 14, 58, 33},
{68000, 14, 30, 17},
{68179, 14, 46, 26},
{68250, 14, 46, 26},
{69000, 12, 23, 15},
{70000, 12, 28, 18},
{71000, 12, 30, 19},
{72000, 12, 24, 15},
{73000, 10, 23, 17},
{74000, 12, 23, 14},
{74176, 8, 100, 91},
{74250, 10, 22, 16},
{74481, 12, 43, 26},
{74500, 10, 29, 21},
{75000, 12, 25, 15},
{75250, 10, 39, 28},
{76000, 12, 27, 16},
{77000, 12, 53, 31},
{78000, 12, 26, 15},
{78750, 12, 28, 16},
{79000, 10, 38, 26},
{79500, 10, 28, 19},
{80000, 12, 32, 18},
{81000, 10, 21, 14},
{81081, 6, 100, 111},
{81624, 8, 29, 24},
{82000, 8, 17, 14},
{83000, 10, 40, 26},
{83950, 10, 28, 18},
{84000, 10, 28, 18},
{84750, 6, 16, 17},
{85000, 6, 17, 18},
{85250, 10, 30, 19},
{85750, 10, 27, 17},
{86000, 10, 43, 27},
{87000, 10, 29, 18},
{88000, 10, 44, 27},
{88500, 10, 41, 25},
{89000, 10, 28, 17},
{89012, 6, 90, 91},
{89100, 10, 33, 20},
{90000, 10, 25, 15},
{91000, 10, 32, 19},
{92000, 10, 46, 27},
{93000, 10, 31, 18},
{94000, 10, 40, 23},
{94500, 10, 28, 16},
{95000, 10, 44, 25},
{95654, 10, 39, 22},
{95750, 10, 39, 22},
{96000, 10, 32, 18},
{97000, 8, 23, 16},
{97750, 8, 42, 29},
{98000, 8, 45, 31},
{99000, 8, 22, 15},
{99750, 8, 34, 23},
{100000, 6, 20, 18},
{100500, 6, 19, 17},
{101000, 6, 37, 33},
{101250, 8, 21, 14},
{102000, 6, 17, 15},
{102250, 6, 25, 22},
{103000, 8, 29, 19},
{104000, 8, 37, 24},
{105000, 8, 28, 18},
{106000, 8, 22, 14},
{107000, 8, 46, 29},
{107214, 8, 27, 17},
{108000, 8, 24, 15},
{108108, 8, 173, 108},
{109000, 6, 23, 19},
{109000, 6, 23, 19},
{110000, 6, 22, 18},
{110013, 6, 22, 18},
{110250, 8, 49, 30},
{110500, 8, 36, 22},
{111000, 8, 23, 14},
{111264, 8, 150, 91},
{111375, 8, 33, 20},
{112000, 8, 63, 38},
{112500, 8, 25, 15},
{113100, 8, 57, 34},
{113309, 8, 42, 25},
{114000, 8, 27, 16},
{115000, 6, 23, 18},
{116000, 8, 43, 25},
{117000, 8, 26, 15},
{117500, 8, 40, 23},
{118000, 6, 38, 29},
{119000, 8, 30, 17},
{119500, 8, 46, 26},
{119651, 8, 39, 22},
{120000, 8, 32, 18},
{121000, 6, 39, 29},
{121250, 6, 31, 23},
{121750, 6, 23, 17},
{122000, 6, 42, 31},
{122614, 6, 30, 22},
{123000, 6, 41, 30},
{123379, 6, 37, 27},
{124000, 6, 51, 37},
{125000, 6, 25, 18},
{125250, 4, 13, 14},
{125750, 4, 27, 29},
{126000, 6, 21, 15},
{127000, 6, 24, 17},
{127250, 6, 41, 29},
{128000, 6, 27, 19},
{129000, 6, 43, 30},
{129859, 4, 25, 26},
{130000, 6, 26, 18},
{130250, 6, 42, 29},
{131000, 6, 32, 22},
{131500, 6, 38, 26},
{131850, 6, 41, 28},
{132000, 6, 22, 15},
{132750, 6, 28, 19},
{133000, 6, 34, 23},
{133330, 6, 37, 25},
{134000, 6, 61, 41},
{135000, 6, 21, 14},
{135250, 6, 167, 111},
{136000, 6, 62, 41},
{137000, 6, 35, 23},
{138000, 6, 23, 15},
{138500, 6, 40, 26},
{138750, 6, 37, 24},
{139000, 6, 34, 22},
{139050, 6, 34, 22},
{139054, 6, 34, 22},
{140000, 6, 28, 18},
{141000, 6, 36, 23},
{141500, 6, 22, 14},
{142000, 6, 30, 19},
{143000, 6, 27, 17},
{143472, 4, 17, 16},
{144000, 6, 24, 15},
{145000, 6, 29, 18},
{146000, 6, 47, 29},
{146250, 6, 26, 16},
{147000, 6, 49, 30},
{147891, 6, 23, 14},
{148000, 6, 23, 14},
{148250, 6, 28, 17},
{148352, 4, 100, 91},
{148500, 6, 33, 20},
{149000, 6, 48, 29},
{150000, 6, 25, 15},
{151000, 4, 19, 17},
{152000, 6, 27, 16},
{152280, 6, 44, 26},
{153000, 6, 34, 20},
{154000, 6, 53, 31},
{155000, 6, 31, 18},
{155250, 6, 50, 29},
{155750, 6, 45, 26},
{156000, 6, 26, 15},
{157000, 6, 61, 35},
{157500, 6, 28, 16},
{158000, 6, 65, 37},
{158250, 6, 44, 25},
{159000, 6, 53, 30},
{159500, 6, 39, 22},
{160000, 6, 32, 18},
{161000, 4, 31, 26},
{162000, 4, 18, 15},
{162162, 4, 131, 109},
{162500, 4, 53, 44},
{163000, 4, 29, 24},
{164000, 4, 17, 14},
{165000, 4, 22, 18},
{166000, 4, 32, 26},
{167000, 4, 26, 21},
{168000, 4, 46, 37},
{169000, 4, 104, 83},
{169128, 4, 64, 51},
{169500, 4, 39, 31},
{170000, 4, 34, 27},
{171000, 4, 19, 15},
{172000, 4, 51, 40},
{172750, 4, 32, 25},
{172800, 4, 32, 25},
{173000, 4, 41, 32},
{174000, 4, 49, 38},
{174787, 4, 22, 17},
{175000, 4, 35, 27},
{176000, 4, 30, 23},
{177000, 4, 38, 29},
{178000, 4, 29, 22},
{178500, 4, 37, 28},
{179000, 4, 53, 40},
{179500, 4, 73, 55},
{180000, 4, 20, 15},
{181000, 4, 55, 41},
{182000, 4, 31, 23},
{183000, 4, 42, 31},
{184000, 4, 30, 22},
{184750, 4, 26, 19},
{185000, 4, 37, 27},
{186000, 4, 51, 37},
{187000, 4, 36, 26},
{188000, 4, 32, 23},
{189000, 4, 21, 15},
{190000, 4, 38, 27},
{190960, 4, 41, 29},
{191000, 4, 41, 29},
{192000, 4, 27, 19},
{192250, 4, 37, 26},
{193000, 4, 20, 14},
{193250, 4, 53, 37},
{194000, 4, 23, 16},
{194208, 4, 23, 16},
{195000, 4, 26, 18},
{196000, 4, 45, 31},
{197000, 4, 35, 24},
{197750, 4, 41, 28},
{198000, 4, 22, 15},
{198500, 4, 25, 17},
{199000, 4, 28, 19},
{200000, 4, 37, 25},
{201000, 4, 61, 41},
{202000, 4, 112, 75},
{202500, 4, 21, 14},
{203000, 4, 146, 97},
{204000, 4, 62, 41},
{204750, 4, 44, 29},
{205000, 4, 38, 25},
{206000, 4, 29, 19},
{207000, 4, 23, 15},
{207500, 4, 40, 26},
{208000, 4, 37, 24},
{208900, 4, 48, 31},
{209000, 4, 48, 31},
{209250, 4, 31, 20},
{210000, 4, 28, 18},
{211000, 4, 25, 16},
{212000, 4, 22, 14},
{213000, 4, 30, 19},
{213750, 4, 38, 24},
{214000, 4, 46, 29},
{214750, 4, 35, 22},
{215000, 4, 43, 27},
{216000, 4, 24, 15},
{217000, 4, 37, 23},
{218000, 4, 42, 26},
{218250, 4, 42, 26},
{218750, 4, 34, 21},
{219000, 4, 47, 29},
{219000, 4, 47, 29},
{220000, 4, 44, 27},
{220640, 4, 49, 30},
{220750, 4, 36, 22},
{221000, 4, 36, 22},
{222000, 4, 23, 14},
{222525, 4, 28, 17},
{222750, 4, 33, 20},
{227000, 4, 37, 22},
{230250, 4, 29, 17},
{233500, 4, 38, 22},
{235000, 4, 40, 23},
{238000, 4, 30, 17},
{241500, 2, 17, 19},
{245250, 2, 20, 22},
{247750, 2, 22, 24},
{253250, 2, 15, 16},
{256250, 2, 18, 19},
{262500, 2, 31, 32},
{267250, 2, 66, 67},
{268500, 2, 94, 95},
{270000, 2, 14, 14},
{272500, 2, 77, 76},
{273750, 2, 57, 56},
{280750, 2, 24, 23},
{281250, 2, 23, 22},
{286000, 2, 17, 16},
{291750, 2, 26, 24},
{296703, 2, 56, 51},
{297000, 2, 22, 20},
{298000, 2, 21, 19},
};
void intel_ddi_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
int port = intel_hdmi->ddi_port;
int pipe = intel_crtc->pipe;
int p, n2, r2, valid=0;
u32 temp, i;
/* On Haswell, we need to enable the clocks and prepare DDI function to
* work in HDMI mode for this pipe.
*/
DRM_DEBUG_KMS("Preparing HDMI DDI mode for Haswell on port %c, pipe %c\n", port_name(port), pipe_name(pipe));
for (i=0; i < DRM_ARRAY_SIZE(wrpll_tmds_clock_table); i++) {
if (crtc->mode.clock == wrpll_tmds_clock_table[i].clock) {
p = wrpll_tmds_clock_table[i].p;
n2 = wrpll_tmds_clock_table[i].n2;
r2 = wrpll_tmds_clock_table[i].r2;
DRM_DEBUG_KMS("WR PLL clock: found settings for %dKHz refresh rate: p=%d, n2=%d, r2=%d\n",
crtc->mode.clock,
p, n2, r2);
valid = 1;
break;
}
}
if (!valid) {
DRM_ERROR("Unable to find WR PLL clock settings for %dKHz refresh rate\n",
crtc->mode.clock);
return;
}
/* Enable LCPLL if disabled */
temp = I915_READ(LCPLL_CTL);
if (temp & LCPLL_PLL_DISABLE)
I915_WRITE(LCPLL_CTL,
temp & ~LCPLL_PLL_DISABLE);
/* Configure WR PLL 1, program the correct divider values for
* the desired frequency and wait for warmup */
I915_WRITE(WRPLL_CTL1,
WRPLL_PLL_ENABLE |
WRPLL_PLL_SELECT_LCPLL_2700 |
WRPLL_DIVIDER_REFERENCE(r2) |
WRPLL_DIVIDER_FEEDBACK(n2) |
WRPLL_DIVIDER_POST(p));
DELAY(20);
/* Use WRPLL1 clock to drive the output to the port, and tell the pipe to use
* this port for connection.
*/
I915_WRITE(PORT_CLK_SEL(port),
PORT_CLK_SEL_WRPLL1);
I915_WRITE(PIPE_CLK_SEL(pipe),
PIPE_CLK_SEL_PORT(port));
DELAY(20);
if (intel_hdmi->has_audio) {
/* Proper support for digital audio needs a new logic and a new set
* of registers, so we leave it for future patch bombing.
*/
DRM_DEBUG_DRIVER("HDMI audio on pipe %c not yet supported on DDI\n",
pipe_name(intel_crtc->pipe));
}
/* Enable PIPE_DDI_FUNC_CTL for the pipe to work in HDMI mode */
temp = I915_READ(DDI_FUNC_CTL(pipe));
temp &= ~PIPE_DDI_PORT_MASK;
temp &= ~PIPE_DDI_BPC_12;
temp |= PIPE_DDI_SELECT_PORT(port) |
PIPE_DDI_MODE_SELECT_HDMI |
((intel_crtc->bpp > 24) ?
PIPE_DDI_BPC_12 :
PIPE_DDI_BPC_8) |
PIPE_DDI_FUNC_ENABLE;
I915_WRITE(DDI_FUNC_CTL(pipe), temp);
intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
intel_hdmi_set_spd_infoframe(encoder);
}
void intel_ddi_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
int port = intel_hdmi->ddi_port;
u32 temp;
temp = I915_READ(DDI_BUF_CTL(port));
if (mode != DRM_MODE_DPMS_ON) {
temp &= ~DDI_BUF_CTL_ENABLE;
} else {
temp |= DDI_BUF_CTL_ENABLE;
}
/* Enable DDI_BUF_CTL. In HDMI/DVI mode, the port width,
* and swing/emphasis values are ignored so nothing special needs
* to be done besides enabling the port.
*/
I915_WRITE(DDI_BUF_CTL(port),
temp);
}

File diff suppressed because it is too large Load Diff

View File

@ -673,7 +673,7 @@ intel_dp_i2c_init(struct intel_dp *intel_dp,
}
static bool
intel_dp_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode,
intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
@ -681,33 +681,44 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *
int lane_count, clock;
int max_lane_count = intel_dp_max_lane_count(intel_dp);
int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
int bpp;
int bpp, mode_rate;
static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
intel_fixed_panel_mode(intel_dp->panel_fixed_mode, adjusted_mode);
intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN,
mode, adjusted_mode);
/*
* the mode->clock is used to calculate the Data&Link M/N
* of the pipe. For the eDP the fixed clock should be used.
*/
mode->clock = intel_dp->panel_fixed_mode->clock;
}
DRM_DEBUG_KMS("DP link computation with max lane count %i "
"max bw %02x pixel clock %iKHz\n",
max_lane_count, bws[max_clock], mode->clock);
if (!intel_dp_adjust_dithering(intel_dp, adjusted_mode, adjusted_mode))
return false;
bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
mode_rate = intel_dp_link_required(mode->clock, bpp);
for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
for (clock = 0; clock <= max_clock; clock++) {
int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
if (intel_dp_link_required(adjusted_mode->clock, bpp)
<= link_avail) {
if (mode_rate <= link_avail) {
intel_dp->link_bw = bws[clock];
intel_dp->lane_count = lane_count;
adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
DRM_DEBUG_KMS("Display port link bw %02x lane "
"count %d clock %d\n",
DRM_DEBUG_KMS("DP link bw %02x lane "
"count %d clock %d bpp %d\n",
intel_dp->link_bw, intel_dp->lane_count,
adjusted_mode->clock);
adjusted_mode->clock, bpp);
DRM_DEBUG_KMS("DP link bw required %i available %i\n",
mode_rate, link_avail);
return true;
}
}
@ -1140,6 +1151,7 @@ static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
if (intel_dp->want_panel_vdd)
printf("Cannot turn power off while VDD is on\n");
ironlake_panel_vdd_off_sync(intel_dp); /* finish any pending work */
pp = ironlake_get_pp_control(dev_priv);
pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
@ -1937,6 +1949,23 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
return false;
}
static void
intel_dp_probe_oui(struct intel_dp *intel_dp)
{
u8 buf[3];
if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
return;
if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3))
DRM_DEBUG_KMS("Sink OUI: %02x%02x%02x\n",
buf[0], buf[1], buf[2]);
if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3))
DRM_DEBUG_KMS("Branch OUI: %02x%02x%02x\n",
buf[0], buf[1], buf[2]);
}
static bool
intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
{
@ -2114,6 +2143,8 @@ intel_dp_detect(struct drm_connector *connector, bool force)
if (status != connector_status_connected)
return status;
intel_dp_probe_oui(intel_dp);
if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
} else {
@ -2428,6 +2459,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
}
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
connector->interlace_allowed = true;
connector->doublescan_allowed = 0;
@ -2475,6 +2507,13 @@ intel_dp_init(struct drm_device *dev, int output_reg)
pp_off = I915_READ(PCH_PP_OFF_DELAYS);
pp_div = I915_READ(PCH_PP_DIVISOR);
if (!pp_on || !pp_off || !pp_div) {
DRM_INFO("bad panel power sequencing delays, disabling panel\n");
intel_dp_encoder_destroy(&intel_dp->base.base);
intel_dp_destroy(&intel_connector->base);
return;
}
/* Pull timing values out of registers */
cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
PANEL_POWER_UP_DELAY_SHIFT;

View File

@ -55,6 +55,21 @@
ret; \
})
#define wait_for_atomic_us(COND, US) ({ \
int i, ret__ = -ETIMEDOUT; \
for (i = 0; i < (US); i++) { \
if ((COND)) { \
ret__ = 0; \
break; \
} \
DELAY(1); \
} \
ret__; \
})
#define wait_for(COND, MS) _intel_wait_for(NULL, COND, MS, 1, "915wfi")
#define wait_for_atomic(COND, MS) _intel_wait_for(NULL, COND, MS, 0, "915wfa")
#define KHz(x) (1000*x)
#define MHz(x) KHz(1000*x)
@ -174,8 +189,8 @@ struct intel_crtc {
bool cursor_visible;
unsigned int bpp;
bool no_pll; /* tertiary pipe for IVB */
bool use_pll_a;
/* We can share PLLs across outputs if the timings match */
struct intel_pch_pll *pch_pll;
};
struct intel_plane {
@ -199,6 +214,25 @@ struct intel_plane {
struct drm_intel_sprite_colorkey *key);
};
struct intel_watermark_params {
unsigned long fifo_size;
unsigned long max_wm;
unsigned long default_wm;
unsigned long guard_size;
unsigned long cacheline_size;
};
struct cxsr_latency {
int is_desktop;
int is_ddr3;
unsigned long fsb_freq;
unsigned long mem_freq;
unsigned long display_sr;
unsigned long display_hpll_disable;
unsigned long cursor_sr;
unsigned long cursor_hpll_disable;
};
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
#define to_intel_connector(x) container_of(x, struct intel_connector, base)
#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
@ -210,6 +244,8 @@ struct intel_plane {
#define DIP_TYPE_AVI 0x82
#define DIP_VERSION_AVI 0x2
#define DIP_LEN_AVI 13
#define DIP_AVI_PR_1 0
#define DIP_AVI_PR_2 1
#define DIP_TYPE_SPD 0x83
#define DIP_VERSION_SPD 0x1
@ -243,23 +279,36 @@ struct dip_infoframe {
uint8_t ITC_EC_Q_SC;
/* PB4 - VIC 6:0 */
uint8_t VIC;
/* PB5 - PR 3:0 */
uint8_t PR;
/* PB5 - YQ 7:6, CN 5:4, PR 3:0 */
uint8_t YQ_CN_PR;
/* PB6 to PB13 */
uint16_t top_bar_end;
uint16_t bottom_bar_start;
uint16_t left_bar_end;
uint16_t right_bar_start;
} avi;
} __attribute__ ((packed)) avi;
struct {
uint8_t vn[8];
uint8_t pd[16];
uint8_t sdi;
} spd;
} __attribute__ ((packed)) spd;
uint8_t payload[27];
} __attribute__ ((packed)) body;
} __attribute__((packed));
struct intel_hdmi {
struct intel_encoder base;
u32 sdvox_reg;
int ddc_bus;
int ddi_port;
uint32_t color_range;
bool has_hdmi_sink;
bool has_audio;
enum hdmi_force_audio force_audio;
void (*write_infoframe)(struct drm_encoder *encoder,
struct dip_infoframe *frame);
};
static inline struct drm_crtc *
intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
{
@ -299,8 +348,13 @@ extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector)
extern void intel_crt_init(struct drm_device *dev);
extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
extern void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode);
extern void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder);
extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
bool is_sdvob);
extern void intel_dvo_init(struct drm_device *dev);
extern void intel_tv_init(struct drm_device *dev);
extern void intel_mark_busy(struct drm_device *dev,
@ -314,6 +368,10 @@ extern bool intel_dpd_is_edp(struct drm_device *dev);
extern void intel_edp_link_config(struct intel_encoder *, int *, int *);
extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder);
extern int intel_plane_init(struct drm_device *dev, enum pipe pipe);
extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
enum plane plane);
void intel_sanitize_pm(struct drm_device *dev);
/* intel_panel.c */
extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
@ -371,12 +429,19 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, int regno);
extern void intel_enable_clock_gating(struct drm_device *dev);
extern void ironlake_disable_rc6(struct drm_device *dev);
extern void ironlake_enable_drps(struct drm_device *dev);
extern void ironlake_disable_drps(struct drm_device *dev);
extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
extern void gen6_update_ring_freq(struct drm_i915_private *dev_priv);
extern void gen6_disable_rps(struct drm_device *dev);
extern void intel_init_emon(struct drm_device *dev);
extern int intel_enable_rc6(const struct drm_device *dev);
extern void intel_ddi_dpms(struct drm_encoder *encoder, int mode);
extern void intel_ddi_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj,
@ -414,15 +479,30 @@ extern void intel_init_clock_gating(struct drm_device *dev);
extern void intel_write_eld(struct drm_encoder *encoder,
struct drm_display_mode *mode);
extern void intel_cpt_verify_modeset(struct drm_device *dev, int pipe);
extern void intel_prepare_ddi(struct drm_device *dev);
extern void hsw_fdi_link_train(struct drm_crtc *crtc);
extern void intel_ddi_init(struct drm_device *dev, enum port port);
/* For use by IVB LP watermark workaround in intel_sprite.c */
extern void sandybridge_update_wm(struct drm_device *dev);
extern void intel_update_watermarks(struct drm_device *dev);
extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
uint32_t sprite_width,
int pixel_size);
extern void intel_update_linetime_watermarks(struct drm_device *dev, int pipe,
struct drm_display_mode *mode);
extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
#endif
extern u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg);
/* Power-related functions, located in intel_pm.c */
extern void intel_init_pm(struct drm_device *dev);
/* FBC */
extern bool intel_fbc_enabled(struct drm_device *dev);
extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
extern void intel_update_fbc(struct drm_device *dev);
#endif /* __INTEL_DRV_H__ */

View File

@ -73,7 +73,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
DRM_LOCK(dev);
/* Flush everything out, we'll be doing GTT only from now on */
ret = intel_pin_and_fence_fb_obj(dev, obj, false);
ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
if (ret) {
DRM_ERROR("failed to pin fb: %d\n", ret);
goto out_unref;

View File

@ -37,19 +37,7 @@ __FBSDID("$FreeBSD$");
#include <dev/drm2/i915/i915_drv.h>
#include <dev/drm2/i915/intel_drv.h>
struct intel_hdmi {
struct intel_encoder base;
u32 sdvox_reg;
int ddc_bus;
uint32_t color_range;
bool has_hdmi_sink;
bool has_audio;
enum hdmi_force_audio force_audio;
void (*write_infoframe)(struct drm_encoder *encoder,
struct dip_infoframe *frame);
};
static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
{
return container_of(encoder, struct intel_hdmi, base.base);
}
@ -75,107 +63,244 @@ void intel_dip_infoframe_csum(struct dip_infoframe *frame)
frame->checksum = 0x100 - sum;
}
static u32 intel_infoframe_index(struct dip_infoframe *frame)
static u32 g4x_infoframe_index(struct dip_infoframe *frame)
{
u32 flags = 0;
switch (frame->type) {
case DIP_TYPE_AVI:
flags |= VIDEO_DIP_SELECT_AVI;
break;
return VIDEO_DIP_SELECT_AVI;
case DIP_TYPE_SPD:
flags |= VIDEO_DIP_SELECT_SPD;
break;
return VIDEO_DIP_SELECT_SPD;
default:
DRM_DEBUG("unknown info frame type %d\n", frame->type);
break;
DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
return 0;
}
return flags;
}
static u32 intel_infoframe_flags(struct dip_infoframe *frame)
static u32 g4x_infoframe_enable(struct dip_infoframe *frame)
{
u32 flags = 0;
switch (frame->type) {
case DIP_TYPE_AVI:
flags |= VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_FREQ_VSYNC;
break;
return VIDEO_DIP_ENABLE_AVI;
case DIP_TYPE_SPD:
flags |= VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_FREQ_VSYNC;
break;
return VIDEO_DIP_ENABLE_SPD;
default:
DRM_DEBUG("unknown info frame type %d\n", frame->type);
break;
DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
return 0;
}
return flags;
}
static void i9xx_write_infoframe(struct drm_encoder *encoder,
struct dip_infoframe *frame)
static u32 hsw_infoframe_enable(struct dip_infoframe *frame)
{
switch (frame->type) {
case DIP_TYPE_AVI:
return VIDEO_DIP_ENABLE_AVI_HSW;
case DIP_TYPE_SPD:
return VIDEO_DIP_ENABLE_SPD_HSW;
default:
DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
return 0;
}
}
static u32 hsw_infoframe_data_reg(struct dip_infoframe *frame, enum pipe pipe)
{
switch (frame->type) {
case DIP_TYPE_AVI:
return HSW_TVIDEO_DIP_AVI_DATA(pipe);
case DIP_TYPE_SPD:
return HSW_TVIDEO_DIP_SPD_DATA(pipe);
default:
DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
return 0;
}
}
static void g4x_write_infoframe(struct drm_encoder *encoder,
struct dip_infoframe *frame)
{
uint32_t *data = (uint32_t *)frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 port, flags, val = I915_READ(VIDEO_DIP_CTL);
u32 val = I915_READ(VIDEO_DIP_CTL);
unsigned i, len = DIP_HEADER_SIZE + frame->len;
/* XXX first guess at handling video port, is this corrent? */
val &= ~VIDEO_DIP_PORT_MASK;
if (intel_hdmi->sdvox_reg == SDVOB)
port = VIDEO_DIP_PORT_B;
val |= VIDEO_DIP_PORT_B;
else if (intel_hdmi->sdvox_reg == SDVOC)
port = VIDEO_DIP_PORT_C;
val |= VIDEO_DIP_PORT_C;
else
return;
flags = intel_infoframe_index(frame);
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(frame);
val &= ~VIDEO_DIP_SELECT_MASK;
val &= ~g4x_infoframe_enable(frame);
val |= VIDEO_DIP_ENABLE;
I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | val | port | flags);
I915_WRITE(VIDEO_DIP_CTL, val);
for (i = 0; i < len; i += 4) {
I915_WRITE(VIDEO_DIP_DATA, *data);
data++;
}
flags |= intel_infoframe_flags(frame);
val |= g4x_infoframe_enable(frame);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | val | port | flags);
I915_WRITE(VIDEO_DIP_CTL, val);
}
static void ironlake_write_infoframe(struct drm_encoder *encoder,
struct dip_infoframe *frame)
static void ibx_write_infoframe(struct drm_encoder *encoder,
struct dip_infoframe *frame)
{
uint32_t *data = (uint32_t *)frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
unsigned i, len = DIP_HEADER_SIZE + frame->len;
u32 flags, val = I915_READ(reg);
u32 val = I915_READ(reg);
val &= ~VIDEO_DIP_PORT_MASK;
switch (intel_hdmi->sdvox_reg) {
case HDMIB:
val |= VIDEO_DIP_PORT_B;
break;
case HDMIC:
val |= VIDEO_DIP_PORT_C;
break;
case HDMID:
val |= VIDEO_DIP_PORT_D;
break;
default:
return;
}
intel_wait_for_vblank(dev, intel_crtc->pipe);
flags = intel_infoframe_index(frame);
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(frame);
I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags);
val &= ~g4x_infoframe_enable(frame);
val |= VIDEO_DIP_ENABLE;
I915_WRITE(reg, val);
for (i = 0; i < len; i += 4) {
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
}
flags |= intel_infoframe_flags(frame);
val |= g4x_infoframe_enable(frame);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags);
I915_WRITE(reg, val);
}
static void cpt_write_infoframe(struct drm_encoder *encoder,
struct dip_infoframe *frame)
{
uint32_t *data = (uint32_t *)frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
unsigned i, len = DIP_HEADER_SIZE + frame->len;
u32 val = I915_READ(reg);
intel_wait_for_vblank(dev, intel_crtc->pipe);
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(frame);
/* The DIP control register spec says that we need to update the AVI
* infoframe without clearing its enable bit */
if (frame->type == DIP_TYPE_AVI)
val |= VIDEO_DIP_ENABLE_AVI;
else
val &= ~g4x_infoframe_enable(frame);
val |= VIDEO_DIP_ENABLE;
I915_WRITE(reg, val);
for (i = 0; i < len; i += 4) {
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
}
val |= g4x_infoframe_enable(frame);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
I915_WRITE(reg, val);
}
static void vlv_write_infoframe(struct drm_encoder *encoder,
struct dip_infoframe *frame)
{
uint32_t *data = (uint32_t *)frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
unsigned i, len = DIP_HEADER_SIZE + frame->len;
u32 val = I915_READ(reg);
intel_wait_for_vblank(dev, intel_crtc->pipe);
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(frame);
val &= ~g4x_infoframe_enable(frame);
val |= VIDEO_DIP_ENABLE;
I915_WRITE(reg, val);
for (i = 0; i < len; i += 4) {
I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
}
val |= g4x_infoframe_enable(frame);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
I915_WRITE(reg, val);
}
static void hsw_write_infoframe(struct drm_encoder *encoder,
struct dip_infoframe *frame)
{
uint32_t *data = (uint32_t *)frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 data_reg = hsw_infoframe_data_reg(frame, intel_crtc->pipe);
unsigned int i, len = DIP_HEADER_SIZE + frame->len;
u32 val = I915_READ(ctl_reg);
if (data_reg == 0)
return;
intel_wait_for_vblank(dev, intel_crtc->pipe);
val &= ~hsw_infoframe_enable(frame);
I915_WRITE(ctl_reg, val);
for (i = 0; i < len; i += 4) {
I915_WRITE(data_reg + i, *data);
data++;
}
val |= hsw_infoframe_enable(frame);
I915_WRITE(ctl_reg, val);
}
static void intel_set_infoframe(struct drm_encoder *encoder,
@ -190,7 +315,8 @@ static void intel_set_infoframe(struct drm_encoder *encoder,
intel_hdmi->write_infoframe(encoder, frame);
}
static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct dip_infoframe avi_if = {
.type = DIP_TYPE_AVI,
@ -198,10 +324,13 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
.len = DIP_LEN_AVI,
};
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2;
intel_set_infoframe(encoder, &avi_if);
}
static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
{
struct dip_infoframe spd_if;
@ -222,8 +351,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 sdvox;
@ -260,7 +388,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
I915_WRITE(intel_hdmi->sdvox_reg, sdvox);
POSTING_READ(intel_hdmi->sdvox_reg);
intel_hdmi_set_avi_infoframe(encoder);
intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
intel_hdmi_set_spd_infoframe(encoder);
}
@ -318,7 +446,7 @@ static int intel_hdmi_mode_valid(struct drm_connector *connector,
}
static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
@ -334,7 +462,8 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
intel_hdmi->has_hdmi_sink = false;
intel_hdmi->has_audio = false;
edid = drm_get_edid(connector, dev_priv->gmbus[intel_hdmi->ddc_bus]);
edid = drm_get_edid(connector, intel_gmbus_get_adapter(dev_priv,
intel_hdmi->ddc_bus));
if (edid) {
if (edid->input & DRM_EDID_INPUT_DIGITAL) {
@ -371,7 +500,8 @@ static int intel_hdmi_get_modes(struct drm_connector *connector)
*/
return intel_ddc_get_modes(connector,
dev_priv->gmbus[intel_hdmi->ddc_bus]);
intel_gmbus_get_adapter(dev_priv,
intel_hdmi->ddc_bus));
}
static bool
@ -382,7 +512,9 @@ intel_hdmi_detect_audio(struct drm_connector *connector)
struct edid *edid;
bool has_audio = false;
edid = drm_get_edid(connector, dev_priv->gmbus[intel_hdmi->ddc_bus]);
edid = drm_get_edid(connector,
intel_gmbus_get_adapter(dev_priv,
intel_hdmi->ddc_bus));
if (edid) {
if (edid->input & DRM_EDID_INPUT_DIGITAL)
has_audio = drm_detect_monitor_audio(edid);
@ -458,6 +590,14 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
free(connector, DRM_MEM_KMS);
}
static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs_hsw = {
.dpms = intel_ddi_dpms,
.mode_fixup = intel_hdmi_mode_fixup,
.prepare = intel_encoder_prepare,
.mode_set = intel_ddi_mode_set,
.commit = intel_encoder_commit,
};
static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
.dpms = intel_hdmi_dpms,
.mode_fixup = intel_hdmi_mode_fixup,
@ -542,21 +682,59 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == DDI_BUF_CTL(PORT_B)) {
DRM_DEBUG_DRIVER("LPT: detected output on DDI B\n");
intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
intel_hdmi->ddi_port = PORT_B;
dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == DDI_BUF_CTL(PORT_C)) {
DRM_DEBUG_DRIVER("LPT: detected output on DDI C\n");
intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
intel_hdmi->ddi_port = PORT_C;
dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == DDI_BUF_CTL(PORT_D)) {
DRM_DEBUG_DRIVER("LPT: detected output on DDI D\n");
intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
intel_hdmi->ddi_port = PORT_D;
dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
} else {
/* If we got an unknown sdvox_reg, things are pretty much broken
* in a way that we should let the kernel know about it */
DRM_DEBUG_KMS("unknown sdvox_reg %d\n", sdvox_reg);
}
intel_hdmi->sdvox_reg = sdvox_reg;
if (!HAS_PCH_SPLIT(dev)) {
intel_hdmi->write_infoframe = i9xx_write_infoframe;
intel_hdmi->write_infoframe = g4x_write_infoframe;
I915_WRITE(VIDEO_DIP_CTL, 0);
} else if (IS_VALLEYVIEW(dev)) {
intel_hdmi->write_infoframe = vlv_write_infoframe;
for_each_pipe(i)
I915_WRITE(VLV_TVIDEO_DIP_CTL(i), 0);
} else if (IS_HASWELL(dev)) {
/* FIXME: Haswell has a new set of DIP frame registers, but we are
* just doing the minimal required for HDMI to work at this stage.
*/
intel_hdmi->write_infoframe = hsw_write_infoframe;
for_each_pipe(i)
I915_WRITE(HSW_TVIDEO_DIP_CTL(i), 0);
} else if (HAS_PCH_IBX(dev)) {
intel_hdmi->write_infoframe = ibx_write_infoframe;
for_each_pipe(i)
I915_WRITE(TVIDEO_DIP_CTL(i), 0);
} else {
intel_hdmi->write_infoframe = ironlake_write_infoframe;
intel_hdmi->write_infoframe = cpt_write_infoframe;
for_each_pipe(i)
I915_WRITE(TVIDEO_DIP_CTL(i), 0);
}
drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
if (IS_HASWELL(dev))
drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs_hsw);
else
drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
intel_hdmi_add_properties(intel_hdmi, connector);

View File

@ -67,9 +67,22 @@ __FBSDID("$FreeBSD$");
#include "iicbus_if.h"
#include "iicbb_if.h"
static int intel_iic_quirk_xfer(device_t idev, struct iic_msg *msgs, int nmsgs);
static void intel_teardown_gmbus_m(struct drm_device *dev, int m);
struct gmbus_port {
const char *name;
int reg;
};
static const struct gmbus_port gmbus_ports[] = {
{ "ssc", GPIOB },
{ "vga", GPIOA },
{ "panel", GPIOC },
{ "dpc", GPIOD },
{ "dpb", GPIOE },
{ "dpd", GPIOF },
};
/* Intel GPIO access functions */
#define I2C_RISEFALL_TIME 10
@ -128,10 +141,7 @@ intel_iic_reset(struct drm_device *dev)
struct drm_i915_private *dev_priv;
dev_priv = dev->dev_private;
if (HAS_PCH_SPLIT(dev))
I915_WRITE(PCH_GMBUS0, 0);
else
I915_WRITE(GMBUS0, 0);
I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0);
}
static int
@ -224,129 +234,232 @@ intel_iicbb_getscl(device_t idev)
return ((I915_READ_NOTRACE(sc->reg) & GPIO_CLOCK_VAL_IN) != 0);
}
static int
gmbus_xfer_read(struct drm_i915_private *dev_priv, struct iic_msg *msg,
u32 gmbus1_index)
{
int reg_offset = dev_priv->gpio_mmio_base;
u16 len = msg->len;
u8 *buf = msg->buf;
I915_WRITE(GMBUS1 + reg_offset,
gmbus1_index |
GMBUS_CYCLE_WAIT |
(len << GMBUS_BYTE_COUNT_SHIFT) |
(msg->slave << (GMBUS_SLAVE_ADDR_SHIFT - 1)) |
GMBUS_SLAVE_READ | GMBUS_SW_RDY);
while (len) {
int ret;
u32 val, loop = 0;
u32 gmbus2;
ret = _intel_wait_for(sc->drm_dev,
((gmbus2 = I915_READ(GMBUS2 + reg_offset)) &
(GMBUS_SATOER | GMBUS_HW_RDY)),
50, 1, "915gbr");
if (ret)
return (ETIMEDOUT);
if (gmbus2 & GMBUS_SATOER)
return (ENXIO);
val = I915_READ(GMBUS3 + reg_offset);
do {
*buf++ = val & 0xff;
val >>= 8;
} while (--len != 0 && ++loop < 4);
}
return 0;
}
static int
gmbus_xfer_write(struct drm_i915_private *dev_priv, struct iic_msg *msg)
{
int reg_offset = dev_priv->gpio_mmio_base;
u16 len = msg->len;
u8 *buf = msg->buf;
u32 val, loop;
val = loop = 0;
while (len && loop < 4) {
val |= *buf++ << (8 * loop++);
len -= 1;
}
I915_WRITE(GMBUS3 + reg_offset, val);
I915_WRITE(GMBUS1 + reg_offset,
GMBUS_CYCLE_WAIT |
(msg->len << GMBUS_BYTE_COUNT_SHIFT) |
(msg->slave << (GMBUS_SLAVE_ADDR_SHIFT - 1)) |
GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
while (len) {
int ret;
u32 gmbus2;
val = loop = 0;
do {
val |= *buf++ << (8 * loop);
} while (--len != 0 && ++loop < 4);
I915_WRITE(GMBUS3 + reg_offset, val);
ret = _intel_wait_for(sc->drm_dev,
((gmbus2 = I915_READ(GMBUS2 + reg_offset)) &
(GMBUS_SATOER | GMBUS_HW_RDY)),
50, 1, "915gbw");
if (ret)
return (ETIMEDOUT);
if (gmbus2 & GMBUS_SATOER)
return (ENXIO);
}
return 0;
}
/*
* The gmbus controller can combine a 1 or 2 byte write with a read that
* immediately follows it by using an "INDEX" cycle.
*/
static bool
gmbus_is_index_read(struct iic_msg *msgs, int i, int num)
{
return (i + 1 < num &&
!(msgs[i].flags & IIC_M_RD) && msgs[i].len <= 2 &&
(msgs[i + 1].flags & IIC_M_RD));
}
static int
gmbus_xfer_index_read(struct drm_i915_private *dev_priv, struct iic_msg *msgs)
{
int reg_offset = dev_priv->gpio_mmio_base;
u32 gmbus1_index = 0;
u32 gmbus5 = 0;
int ret;
if (msgs[0].len == 2)
gmbus5 = GMBUS_2BYTE_INDEX_EN |
msgs[0].buf[1] | (msgs[0].buf[0] << 8);
if (msgs[0].len == 1)
gmbus1_index = GMBUS_CYCLE_INDEX |
(msgs[0].buf[0] << GMBUS_SLAVE_INDEX_SHIFT);
/* GMBUS5 holds 16-bit index */
if (gmbus5)
I915_WRITE(GMBUS5 + reg_offset, gmbus5);
ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus1_index);
/* Clear GMBUS5 after each index transfer */
if (gmbus5)
I915_WRITE(GMBUS5 + reg_offset, 0);
return ret;
}
static int
intel_gmbus_transfer(device_t idev, struct iic_msg *msgs, uint32_t nmsgs)
{
struct intel_iic_softc *sc;
struct drm_i915_private *dev_priv;
u8 *buf;
int error, i, reg_offset, unit;
u32 val, loop;
u16 len;
int error, i, ret, reg_offset, unit;
error = 0;
sc = device_get_softc(idev);
dev_priv = sc->drm_dev->dev_private;
unit = device_get_unit(idev);
sx_xlock(&dev_priv->gmbus_sx);
if (sc->force_bit_dev) {
error = intel_iic_quirk_xfer(dev_priv->bbbus[unit], msgs, nmsgs);
error = IICBUS_TRANSFER(dev_priv->bbbus[unit], msgs, nmsgs);
goto out;
}
reg_offset = HAS_PCH_SPLIT(dev_priv->dev) ? PCH_GMBUS0 - GMBUS0 : 0;
reg_offset = dev_priv->gpio_mmio_base;
I915_WRITE(GMBUS0 + reg_offset, sc->reg0);
for (i = 0; i < nmsgs; i++) {
len = msgs[i].len;
buf = msgs[i].buf;
u32 gmbus2;
if ((msgs[i].flags & IIC_M_RD) != 0) {
I915_WRITE(GMBUS1 + reg_offset, GMBUS_CYCLE_WAIT |
(i + 1 == nmsgs ? GMBUS_CYCLE_STOP : 0) |
(len << GMBUS_BYTE_COUNT_SHIFT) |
(msgs[i].slave << (GMBUS_SLAVE_ADDR_SHIFT - 1)) |
GMBUS_SLAVE_READ | GMBUS_SW_RDY);
POSTING_READ(GMBUS2 + reg_offset);
do {
loop = 0;
if (_intel_wait_for(sc->drm_dev,
(I915_READ(GMBUS2 + reg_offset) &
(GMBUS_SATOER | GMBUS_HW_RDY)) != 0,
50, 1, "915gbr"))
goto timeout;
if ((I915_READ(GMBUS2 + reg_offset) &
GMBUS_SATOER) != 0)
goto clear_err;
val = I915_READ(GMBUS3 + reg_offset);
do {
*buf++ = val & 0xff;
val >>= 8;
} while (--len != 0 && ++loop < 4);
} while (len != 0);
if (gmbus_is_index_read(msgs, i, nmsgs)) {
error = gmbus_xfer_index_read(dev_priv, &msgs[i]);
i += 1; /* set i to the index of the read xfer */
} else if (msgs[i].flags & IIC_M_RD) {
error = gmbus_xfer_read(dev_priv, &msgs[i], 0);
} else {
val = loop = 0;
do {
val |= *buf++ << (8 * loop);
} while (--len != 0 && ++loop < 4);
I915_WRITE(GMBUS3 + reg_offset, val);
I915_WRITE(GMBUS1 + reg_offset, GMBUS_CYCLE_WAIT |
(i + 1 == nmsgs ? GMBUS_CYCLE_STOP : 0) |
(msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) |
(msgs[i].slave << (GMBUS_SLAVE_ADDR_SHIFT - 1)) |
GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
POSTING_READ(GMBUS2+reg_offset);
while (len != 0) {
if (_intel_wait_for(sc->drm_dev,
(I915_READ(GMBUS2 + reg_offset) &
(GMBUS_SATOER | GMBUS_HW_RDY)) != 0,
50, 1, "915gbw"))
goto timeout;
if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
goto clear_err;
val = loop = 0;
do {
val |= *buf++ << (8 * loop);
} while (--len != 0 && ++loop < 4);
I915_WRITE(GMBUS3 + reg_offset, val);
POSTING_READ(GMBUS2 + reg_offset);
}
error = gmbus_xfer_write(dev_priv, &msgs[i]);
}
if (i + 1 < nmsgs && _intel_wait_for(sc->drm_dev,
(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER |
GMBUS_HW_WAIT_PHASE)) != 0,
50, 1, "915gbh"))
if (error == ETIMEDOUT)
goto timeout;
if ((I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER) != 0)
if (error == ENXIO)
goto clear_err;
ret = _intel_wait_for(sc->drm_dev,
((gmbus2 = I915_READ(GMBUS2 + reg_offset)) &
(GMBUS_SATOER | GMBUS_HW_WAIT_PHASE)),
50, 1, "915gbh");
if (ret)
goto timeout;
if (gmbus2 & GMBUS_SATOER)
goto clear_err;
}
error = 0;
done:
/* Generate a STOP condition on the bus. Note that gmbus can't generata
* a STOP on the very first cycle. To simplify the code we
* unconditionally generate the STOP condition with an additional gmbus
* cycle. */
I915_WRITE(GMBUS1 + reg_offset, GMBUS_CYCLE_STOP | GMBUS_SW_RDY);
/* Mark the GMBUS interface as disabled after waiting for idle.
* We will re-enable it at the start of the next xfer,
* till then let it sleep.
*/
if (_intel_wait_for(dev,
(I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0,
10, 1, "915gbu"))
DRM_INFO("GMBUS timed out waiting for idle\n");
10, 1, "915gbu")) {
DRM_DEBUG_KMS("GMBUS [%s] timed out waiting for idle\n",
sc->name);
error = ETIMEDOUT;
}
I915_WRITE(GMBUS0 + reg_offset, 0);
out:
sx_xunlock(&dev_priv->gmbus_sx);
return (error);
goto out;
clear_err:
/*
* Wait for bus to IDLE before clearing NAK.
* If we clear the NAK while bus is still active, then it will stay
* active and the next transaction may fail.
*/
if (_intel_wait_for(dev,
(I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0,
10, 1, "915gbu"))
DRM_DEBUG_KMS("GMBUS [%s] timed out after NAK\n", sc->name);
/* Toggle the Software Clear Interrupt bit. This has the effect
* of resetting the GMBUS controller and so clearing the
* BUS_ERROR raised by the slave's NAK.
*/
I915_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT);
I915_WRITE(GMBUS1 + reg_offset, 0);
error = EIO;
goto done;
I915_WRITE(GMBUS0 + reg_offset, 0);
DRM_DEBUG_KMS("GMBUS [%s] NAK for addr: %04x %c(%d)\n",
sc->name, msgs[i].slave,
(msgs[i].flags & IIC_M_RD) ? 'r' : 'w', msgs[i].len);
/*
* If no ACK is received during the address phase of a transaction,
* the adapter must report -ENXIO.
* It is not clear what to return if no ACK is received at other times.
* So, we always return -ENXIO in all NAK cases, to ensure we send
* it at least during the one case that is specified.
*/
error = ENXIO;
goto out;
timeout:
DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n",
sc->reg0 & 0xff, sc->name);
DRM_INFO("GMBUS [%s] timed out, falling back to bit banging on pin %d\n",
sc->name, sc->reg0 & 0xff);
I915_WRITE(GMBUS0 + reg_offset, 0);
/*
@ -354,9 +467,23 @@ intel_gmbus_transfer(device_t idev, struct iic_msg *msgs, uint32_t nmsgs)
* Try GPIO bitbanging instead.
*/
sc->force_bit_dev = true;
error = intel_iic_quirk_xfer(dev_priv->bbbus[unit], msgs, nmsgs);
error = IICBUS_TRANSFER(idev, msgs, nmsgs);
goto out;
out:
sx_xunlock(&dev_priv->gmbus_sx);
return (error);
}
device_t
intel_gmbus_get_adapter(struct drm_i915_private *dev_priv,
unsigned port)
{
if (!intel_gmbus_is_port_valid(port))
DRM_ERROR("GMBUS get adapter %d: invalid port\n", port);
return (intel_gmbus_is_port_valid(port) ? dev_priv->gmbus[port - 1] :
NULL);
}
void
@ -379,46 +506,35 @@ intel_gmbus_force_bit(device_t idev, bool force_bit)
}
static int
intel_iic_quirk_xfer(device_t idev, struct iic_msg *msgs, int nmsgs)
intel_iicbb_pre_xfer(device_t idev)
{
device_t bridge_dev;
struct intel_iic_softc *sc;
struct drm_i915_private *dev_priv;
int ret;
int i;
bridge_dev = device_get_parent(device_get_parent(idev));
sc = device_get_softc(bridge_dev);
sc = device_get_softc(idev);
dev_priv = sc->drm_dev->dev_private;
intel_iic_reset(sc->drm_dev);
intel_iic_quirk_set(dev_priv, true);
IICBB_SETSDA(bridge_dev, 1);
IICBB_SETSCL(bridge_dev, 1);
IICBB_SETSDA(idev, 1);
IICBB_SETSCL(idev, 1);
DELAY(I2C_RISEFALL_TIME);
for (i = 0; i < nmsgs - 1; i++) {
/* force use of repeated start instead of default stop+start */
msgs[i].flags |= IIC_M_NOSTOP;
}
ret = iicbus_transfer(idev, msgs, nmsgs);
IICBB_SETSDA(bridge_dev, 1);
IICBB_SETSCL(bridge_dev, 1);
intel_iic_quirk_set(dev_priv, false);
return (ret);
return (0);
}
static const char *gpio_names[GMBUS_NUM_PORTS] = {
"disabled",
"ssc",
"vga",
"panel",
"dpc",
"dpb",
"reserved",
"dpd",
};
static void
intel_iicbb_post_xfer(device_t idev)
{
struct intel_iic_softc *sc;
struct drm_i915_private *dev_priv;
sc = device_get_softc(idev);
dev_priv = sc->drm_dev->dev_private;
IICBB_SETSDA(idev, 1);
IICBB_SETSCL(idev, 1);
intel_iic_quirk_set(dev_priv, false);
}
static int
intel_gmbus_probe(device_t dev)
@ -432,23 +548,28 @@ intel_gmbus_attach(device_t idev)
{
struct drm_i915_private *dev_priv;
struct intel_iic_softc *sc;
int pin;
int pin, port;
sc = device_get_softc(idev);
sc->drm_dev = device_get_softc(device_get_parent(idev));
dev_priv = sc->drm_dev->dev_private;
pin = device_get_unit(idev);
port = pin + 1;
snprintf(sc->name, sizeof(sc->name), "gmbus bus %s", gpio_names[pin]);
snprintf(sc->name, sizeof(sc->name), "gmbus %s", gmbus_ports[pin].name);
device_set_desc(idev, sc->name);
/* By default use a conservative clock rate */
sc->reg0 = pin | GMBUS_RATE_100KHZ;
sc->reg0 = port | GMBUS_RATE_100KHZ;
/* XXX force bit banging until GMBUS is fully debugged */
/* gmbus seems to be broken on i830 */
if (IS_I830(sc->drm_dev))
sc->force_bit_dev = true;
#if 0
if (IS_GEN2(sc->drm_dev)) {
sc->force_bit_dev = true;
}
#endif
/* add bus interface device */
sc->iic_dev = device_add_child(idev, "iicbus", -1);
@ -490,17 +611,6 @@ intel_iicbb_probe(device_t dev)
static int
intel_iicbb_attach(device_t idev)
{
static const int map_pin_to_reg[] = {
0,
GPIOB,
GPIOA,
GPIOC,
GPIOD,
GPIOE,
0,
GPIOF
};
struct intel_iic_softc *sc;
struct drm_i915_private *dev_priv;
int pin;
@ -510,13 +620,12 @@ intel_iicbb_attach(device_t idev)
dev_priv = sc->drm_dev->dev_private;
pin = device_get_unit(idev);
snprintf(sc->name, sizeof(sc->name), "i915 iicbb %s", gpio_names[pin]);
snprintf(sc->name, sizeof(sc->name), "i915 iicbb %s",
gmbus_ports[pin].name);
device_set_desc(idev, sc->name);
sc->reg0 = pin | GMBUS_RATE_100KHZ;
sc->reg = map_pin_to_reg[pin];
if (HAS_PCH_SPLIT(dev_priv->dev))
sc->reg += PCH_GPIOA - GPIOA;
sc->reg = dev_priv->gpio_mmio_base + gmbus_ports[pin].reg;
/* add generic bit-banging code */
sc->iic_dev = device_add_child(idev, "iicbb", -1);
@ -524,6 +633,7 @@ intel_iicbb_attach(device_t idev)
return (ENXIO);
device_quiet(sc->iic_dev);
bus_generic_attach(idev);
iicbus_set_nostop(idev, true);
return (0);
}
@ -574,6 +684,8 @@ static device_method_t intel_iicbb_methods[] = {
DEVMETHOD(iicbb_setscl, intel_iicbb_setscl),
DEVMETHOD(iicbb_getsda, intel_iicbb_getsda),
DEVMETHOD(iicbb_getscl, intel_iicbb_getscl),
DEVMETHOD(iicbb_pre_xfer, intel_iicbb_pre_xfer),
DEVMETHOD(iicbb_post_xfer, intel_iicbb_post_xfer),
DEVMETHOD_END
};
static driver_t intel_iicbb_driver = {
@ -595,14 +707,10 @@ intel_setup_gmbus(struct drm_device *dev)
dev_priv = dev->dev_private;
sx_init(&dev_priv->gmbus_sx, "gmbus");
dev_priv->gmbus_bridge = malloc(sizeof(device_t) * GMBUS_NUM_PORTS,
DRM_MEM_DRIVER, M_WAITOK | M_ZERO);
dev_priv->bbbus_bridge = malloc(sizeof(device_t) * GMBUS_NUM_PORTS,
DRM_MEM_DRIVER, M_WAITOK | M_ZERO);
dev_priv->gmbus = malloc(sizeof(device_t) * GMBUS_NUM_PORTS,
DRM_MEM_DRIVER, M_WAITOK | M_ZERO);
dev_priv->bbbus = malloc(sizeof(device_t) * GMBUS_NUM_PORTS,
DRM_MEM_DRIVER, M_WAITOK | M_ZERO);
if (HAS_PCH_SPLIT(dev))
dev_priv->gpio_mmio_base = PCH_GPIOA - GPIOA;
else
dev_priv->gpio_mmio_base = 0;
/*
* The Giant there is recursed, most likely. Normally, the
@ -610,7 +718,7 @@ intel_setup_gmbus(struct drm_device *dev)
* driver.
*/
mtx_lock(&Giant);
for (i = 0; i < GMBUS_NUM_PORTS; i++) {
for (i = 0; i <= GMBUS_NUM_PORTS; i++) {
/*
* Initialized bbbus_bridge before gmbus_bridge, since
* gmbus may decide to force quirk transfer in the
@ -689,14 +797,6 @@ intel_teardown_gmbus_m(struct drm_device *dev, int m)
dev_priv = dev->dev_private;
free(dev_priv->gmbus, DRM_MEM_DRIVER);
dev_priv->gmbus = NULL;
free(dev_priv->bbbus, DRM_MEM_DRIVER);
dev_priv->bbbus = NULL;
free(dev_priv->gmbus_bridge, DRM_MEM_DRIVER);
dev_priv->gmbus_bridge = NULL;
free(dev_priv->bbbus_bridge, DRM_MEM_DRIVER);
dev_priv->bbbus_bridge = NULL;
sx_destroy(&dev_priv->gmbus_sx);
}

View File

@ -230,7 +230,7 @@ static inline u32 panel_fitter_scaling(u32 source, u32 target)
}
static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
@ -482,7 +482,7 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id)
{
DRM_DEBUG_KMS("Skipping forced modeset for %s\n", id->ident);
DRM_INFO("Skipping forced modeset for %s\n", id->ident);
return 1;
}
@ -638,7 +638,7 @@ static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
static int intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
{
DRM_DEBUG_KMS("Skipping LVDS initialization for %s\n", id->ident);
DRM_INFO("Skipping LVDS initialization for %s\n", id->ident);
return 1;
}
@ -861,8 +861,8 @@ static bool lvds_is_present_in_vbt(struct drm_device *dev,
child->device_type != DEVICE_TYPE_LFP)
continue;
if (child->i2c_pin)
*i2c_pin = child->i2c_pin;
if (intel_gmbus_is_port_valid(child->i2c_pin))
*i2c_pin = child->i2c_pin;
/* However, we cannot trust the BIOS writers to populate
* the VBT correctly. Since LVDS requires additional
@ -996,7 +996,8 @@ bool intel_lvds_init(struct drm_device *dev)
* Attempt to get the fixed panel mode from DDC. Assume that the
* preferred mode is the right one.
*/
intel_lvds->edid = drm_get_edid(connector, dev_priv->gmbus[pin]);
intel_lvds->edid = drm_get_edid(connector,
intel_gmbus_get_adapter(dev_priv, pin));
if (intel_lvds->edid) {
if (drm_add_edid_modes(connector,
intel_lvds->edid)) {

View File

@ -58,8 +58,8 @@ bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus)
}
};
return (iicbus_transfer(dev_priv->gmbus[ddc_bus], msgs, 2)
== 0/* XXXKIB 2*/);
return (iicbus_transfer(intel_gmbus_get_adapter(dev_priv, ddc_bus),
msgs, 2) == 0/* XXXKIB 2*/);
}
/**

View File

@ -219,20 +219,21 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
{
struct drm_device *dev = overlay->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
int ret;
KASSERT(!overlay->last_flip_req, ("Overlay already has flip req"));
ret = i915_add_request(LP_RING(dev_priv), NULL, request);
ret = i915_add_request(ring, NULL, request);
if (ret) {
free(request, DRM_I915_GEM);
return ret;
}
overlay->last_flip_req = request->seqno;
overlay->flip_tail = tail;
ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req,
true);
ret = i915_wait_request(ring, overlay->last_flip_req);
if (ret)
return ret;
i915_gem_retire_requests(dev);
overlay->last_flip_req = 0;
return 0;
@ -266,7 +267,7 @@ i830_activate_pipe_a(struct drm_device *dev)
DRM_DEBUG_DRIVER("Enabling pipe A in order to enable overlay\n");
mode = drm_mode_duplicate(dev, &vesa_640x480);
drm_mode_set_crtcinfo(mode, 0);
if (!drm_crtc_helper_set_mode(&crtc->base, mode,
crtc->base.x, crtc->base.y,
crtc->base.fb))
@ -291,6 +292,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
struct drm_i915_gem_request *request;
int pipe_a_quirk = 0;
int ret;
@ -306,17 +308,17 @@ static int intel_overlay_on(struct intel_overlay *overlay)
request = malloc(sizeof(*request), DRM_I915_GEM, M_WAITOK | M_ZERO);
ret = BEGIN_LP_RING(4);
ret = intel_ring_begin(ring, 4);
if (ret) {
free(request, DRM_I915_GEM);
goto out;
}
OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON);
OUT_RING(overlay->flip_addr | OFC_UPDATE);
OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
ret = intel_overlay_do_wait_request(overlay, request, NULL);
out:
@ -332,6 +334,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
{
struct drm_device *dev = overlay->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
struct drm_i915_gem_request *request;
u32 flip_addr = overlay->flip_addr;
u32 tmp;
@ -349,16 +352,16 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
if (tmp & (1 << 17))
DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
ret = BEGIN_LP_RING(2);
ret = intel_ring_begin(ring, 2);
if (ret) {
free(request, DRM_I915_GEM);
return ret;
}
OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
OUT_RING(flip_addr);
ADVANCE_LP_RING();
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
intel_ring_emit(ring, flip_addr);
intel_ring_advance(ring);
ret = i915_add_request(LP_RING(dev_priv), NULL, request);
ret = i915_add_request(ring, NULL, request);
if (ret) {
free(request, DRM_I915_GEM);
return ret;
@ -399,6 +402,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
u32 flip_addr = overlay->flip_addr;
struct drm_i915_gem_request *request;
int ret;
@ -413,20 +417,20 @@ static int intel_overlay_off(struct intel_overlay *overlay)
* of the hw. Do it in both cases */
flip_addr |= OFC_UPDATE;
ret = BEGIN_LP_RING(6);
ret = intel_ring_begin(ring, 6);
if (ret) {
free(request, DRM_I915_GEM);
return ret;
}
/* wait for overlay to go idle */
OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
OUT_RING(flip_addr);
OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
intel_ring_emit(ring, flip_addr);
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
/* turn overlay off */
OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
OUT_RING(flip_addr);
OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
ADVANCE_LP_RING();
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
intel_ring_emit(ring, flip_addr);
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
intel_ring_advance(ring);
return intel_overlay_do_wait_request(overlay, request,
intel_overlay_off_tail);
@ -438,15 +442,16 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
int ret;
if (overlay->last_flip_req == 0)
return 0;
ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req,
true);
ret = i915_wait_request(ring, overlay->last_flip_req);
if (ret)
return ret;
i915_gem_retire_requests(dev);
if (overlay->flip_tail)
overlay->flip_tail(overlay);
@ -463,6 +468,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
int ret;
/* Only wait if there is actually an old frame to release to
@ -477,15 +483,15 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
/* synchronous slowpath */
request = malloc(sizeof(*request), DRM_I915_GEM, M_WAITOK | M_ZERO);
ret = BEGIN_LP_RING(2);
ret = intel_ring_begin(ring, 2);
if (ret) {
free(request, DRM_I915_GEM);
return ret;
}
OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
ret = intel_overlay_do_wait_request(overlay, request,
intel_overlay_release_old_vid_tail);
@ -764,6 +770,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
int ret, tmp_width;
struct overlay_registers *regs;
bool scale_changed = false;
u32 swidth, swidthsw, sheight, ostride;
KASSERT(overlay != NULL, ("No overlay ?"));
DRM_LOCK_ASSERT(overlay->dev);
@ -782,16 +789,18 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
goto out_unpin;
if (!overlay->active) {
u32 oconfig;
regs = intel_overlay_map_regs(overlay);
if (!regs) {
ret = -ENOMEM;
goto out_unpin;
}
regs->OCONFIG = OCONF_CC_OUT_8BIT;
oconfig = OCONF_CC_OUT_8BIT;
if (IS_GEN4(overlay->dev))
regs->OCONFIG |= OCONF_CSC_MODE_BT709;
regs->OCONFIG |= overlay->crtc->pipe == 0 ?
oconfig |= OCONF_CSC_MODE_BT709;
oconfig |= overlay->crtc->pipe == 0 ?
OCONF_PIPE_A : OCONF_PIPE_B;
regs->OCONFIG = oconfig;
intel_overlay_unmap_regs(overlay, regs);
ret = intel_overlay_on(overlay);
@ -813,29 +822,33 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
else
tmp_width = params->src_w;
regs->SWIDTH = params->src_w;
regs->SWIDTHSW = calc_swidthsw(overlay->dev,
params->offset_Y, tmp_width);
regs->SHEIGHT = params->src_h;
swidth = params->src_w;
swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width);
sheight = params->src_h;
regs->OBUF_0Y = new_bo->gtt_offset + params->offset_Y;
regs->OSTRIDE = params->stride_Y;
ostride = params->stride_Y;
if (params->format & I915_OVERLAY_YUV_PLANAR) {
int uv_hscale = uv_hsubsampling(params->format);
int uv_vscale = uv_vsubsampling(params->format);
u32 tmp_U, tmp_V;
regs->SWIDTH |= (params->src_w/uv_hscale) << 16;
swidth |= (params->src_w/uv_hscale) << 16;
tmp_U = calc_swidthsw(overlay->dev, params->offset_U,
params->src_w/uv_hscale);
tmp_V = calc_swidthsw(overlay->dev, params->offset_V,
params->src_w/uv_hscale);
regs->SWIDTHSW |= max_u32(tmp_U, tmp_V) << 16;
regs->SHEIGHT |= (params->src_h/uv_vscale) << 16;
swidthsw |= max_u32(tmp_U, tmp_V) << 16;
sheight |= (params->src_h/uv_vscale) << 16;
regs->OBUF_0U = new_bo->gtt_offset + params->offset_U;
regs->OBUF_0V = new_bo->gtt_offset + params->offset_V;
regs->OSTRIDE |= params->stride_UV << 16;
ostride |= params->stride_UV << 16;
}
regs->SWIDTH = swidth;
regs->SWIDTHSW = swidthsw;
regs->SHEIGHT = sheight;
regs->OSTRIDE = ostride;
scale_changed = update_scaling_factors(overlay, regs, params);
update_colorkey(overlay, regs);
@ -1108,11 +1121,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
struct put_image_params *params;
int ret;
if (!dev_priv) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
/* No need to check for DRIVER_MODESET - we don't set it up then. */
overlay = dev_priv->overlay;
if (!overlay) {
DRM_DEBUG("userspace bug: no overlay\n");
@ -1307,11 +1316,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
struct overlay_registers *regs;
int ret;
if (!dev_priv) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
/* No need to check for DRIVER_MODESET - we don't set it up then. */
overlay = dev_priv->overlay;
if (!overlay) {
DRM_DEBUG("userspace bug: no overlay\n");

View File

@ -197,6 +197,20 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
return max;
}
static u32 intel_panel_compute_brightness(struct drm_device *dev, u32 val)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (i915_panel_invert_brightness < 0)
return val;
if (i915_panel_invert_brightness > 0 ||
dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS)
return intel_panel_get_max_backlight(dev) - val;
return val;
}
u32 intel_panel_get_backlight(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@ -217,7 +231,8 @@ u32 intel_panel_get_backlight(struct drm_device *dev)
}
}
DRM_DEBUG("get backlight PWM = %d\n", val);
val = intel_panel_compute_brightness(dev, val);
DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
return val;
}
@ -233,7 +248,8 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level
struct drm_i915_private *dev_priv = dev->dev_private;
u32 tmp;
DRM_DEBUG("set backlight PWM = %d\n", level);
DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
level = intel_panel_compute_brightness(dev, level);
if (HAS_PCH_SPLIT(dev))
return intel_pch_panel_set_backlight(dev, level);

3788
sys/dev/drm2/i915/intel_pm.c Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -6,7 +6,7 @@
#define _INTEL_RINGBUFFER_H_
struct intel_hw_status_page {
uint32_t *page_addr;
u32 *page_addr;
unsigned int gfx_addr;
struct drm_i915_gem_object *obj;
};
@ -38,13 +38,13 @@ struct intel_ring_buffer {
BCS,
} id;
#define I915_NUM_RINGS 3
uint32_t mmio_base;
u32 mmio_base;
void *virtual_start;
struct drm_device *dev;
struct drm_i915_gem_object *obj;
uint32_t head;
uint32_t tail;
u32 head;
u32 tail;
int space;
int size;
int effective_size;
@ -60,13 +60,10 @@ struct intel_ring_buffer {
*/
u32 last_retired_head;
struct mtx irq_lock;
uint32_t irq_refcount;
uint32_t irq_mask;
uint32_t irq_seqno; /* last seq seem at irq time */
uint32_t trace_irq_seqno;
uint32_t waiting_seqno;
uint32_t sync_seqno[I915_NUM_RINGS-1];
u32 irq_refcount;
u32 irq_enable_mask; /* bitmask to enable ring interrupt */
u32 trace_irq_seqno;
u32 sync_seqno[I915_NUM_RINGS-1];
bool (*irq_get)(struct intel_ring_buffer *ring);
void (*irq_put)(struct intel_ring_buffer *ring);
@ -120,7 +117,7 @@ struct intel_ring_buffer {
/**
* Do we have some not yet emitted requests outstanding?
*/
uint32_t outstanding_lazy_request;
u32 outstanding_lazy_request;
/**
* Do an explicit TLB flush before MI_SET_CONTEXT
@ -169,6 +166,8 @@ static inline uint32_t
intel_read_status_page(struct intel_ring_buffer *ring, int reg)
{
/* Ensure that the compiler doesn't optimize away the load. */
__compiler_membar();
return (atomic_load_acq_32(ring->status_page.page_addr + reg));
}

View File

@ -44,7 +44,7 @@ __FBSDID("$FreeBSD$");
#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)
#define SDVO_RGB_MASK (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)
#define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)
#define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0)
#define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_YPRPB0)
#define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\
SDVO_TV_MASK)
@ -77,7 +77,7 @@ struct intel_sdvo {
device_t ddc_iic_bus, ddc;
/* Register for the SDVO device: SDVOB or SDVOC */
int sdvo_reg;
uint32_t sdvo_reg;
/* Active outputs controlled by this SDVO output */
uint16_t controlled_output;
@ -117,6 +117,9 @@ struct intel_sdvo {
*/
bool is_tv;
/* On different gens SDVOB is at different places. */
bool is_sdvob;
/* This is for current tv format name */
int tv_format_index;
@ -406,12 +409,10 @@ static const struct _sdvo_cmd_name {
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
};
#define IS_SDVOB(reg) (reg == SDVOB || reg == PCH_SDVOB)
#define SDVO_NAME(svdo) (IS_SDVOB((svdo)->sdvo_reg) ? "SDVOB" : "SDVOC")
#define SDVO_NAME(svdo) ((svdo)->is_sdvob ? "SDVOB" : "SDVOC")
static void
intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
const void *args, int args_len)
static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
const void *args, int args_len)
{
int i;
@ -744,18 +745,18 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
uint16_t h_sync_offset, v_sync_offset;
int mode_clock;
width = mode->crtc_hdisplay;
height = mode->crtc_vdisplay;
width = mode->hdisplay;
height = mode->vdisplay;
/* do some mode translations */
h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start;
h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
/* do some mode translations */
h_blank_len = mode->htotal - mode->hdisplay;
h_sync_len = mode->hsync_end - mode->hsync_start;
v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start;
v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
v_blank_len = mode->vtotal - mode->vdisplay;
v_sync_len = mode->vsync_end - mode->vsync_start;
h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
h_sync_offset = mode->hsync_start - mode->hdisplay;
v_sync_offset = mode->vsync_start - mode->vdisplay;
mode_clock = mode->clock;
mode_clock /= intel_mode_get_pixel_multiplier(mode) ?: 1;
@ -884,17 +885,24 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
};
uint8_t tx_rate = SDVO_HBUF_TX_VSYNC;
uint8_t set_buf_index[2] = { 1, 0 };
uint64_t *data = (uint64_t *)&avi_if;
uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)];
uint64_t *data = (uint64_t *)sdvo_data;
unsigned i;
intel_dip_infoframe_csum(&avi_if);
/* sdvo spec says that the ecc is handled by the hw, and it looks like
* we must not send the ecc field, either. */
memcpy(sdvo_data, &avi_if, 3);
sdvo_data[3] = avi_if.checksum;
memcpy(&sdvo_data[4], &avi_if.body, sizeof(avi_if.body.avi));
if (!intel_sdvo_set_value(intel_sdvo,
SDVO_CMD_SET_HBUF_INDEX,
set_buf_index, 2))
return false;
for (i = 0; i < sizeof(avi_if); i += 8) {
for (i = 0; i < sizeof(sdvo_data); i += 8) {
if (!intel_sdvo_set_value(intel_sdvo,
SDVO_CMD_SET_HBUF_DATA,
data, 8))
@ -964,7 +972,7 @@ intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo,
}
static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
@ -1272,7 +1280,8 @@ intel_sdvo_get_analog_edid(struct drm_connector *connector)
struct drm_i915_private *dev_priv = connector->dev->dev_private;
return drm_get_edid(connector,
dev_priv->gmbus[dev_priv->crt_ddc_pin]);
intel_gmbus_get_adapter(dev_priv,
dev_priv->crt_ddc_pin));
}
static enum drm_connector_status
@ -1361,8 +1370,7 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
return connector_status_unknown;
/* add 30ms delay when the output type might be TV */
if (intel_sdvo->caps.output_flags &
(SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0))
if (intel_sdvo->caps.output_flags & SDVO_TV_MASK)
drm_msleep(30, "915svo");
if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
@ -1582,9 +1590,6 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
intel_sdvo->sdvo_lvds_fixed_mode =
drm_mode_duplicate(connector->dev, newmode);
drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode,
0);
intel_sdvo->is_lvds = true;
break;
}
@ -1916,7 +1921,7 @@ intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
{
struct sdvo_device_mapping *mapping;
if (IS_SDVOB(reg))
if (sdvo->is_sdvob)
mapping = &(dev_priv->sdvo_mappings[0]);
else
mapping = &(dev_priv->sdvo_mappings[1]);
@ -1934,7 +1939,7 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
struct sdvo_device_mapping *mapping;
u8 pin;
if (IS_SDVOB(reg))
if (sdvo->is_sdvob)
mapping = &dev_priv->sdvo_mappings[0];
else
mapping = &dev_priv->sdvo_mappings[1];
@ -1943,12 +1948,12 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
if (mapping->initialized)
pin = mapping->i2c_pin;
if (pin < GMBUS_NUM_PORTS) {
sdvo->i2c = dev_priv->gmbus[pin];
if (intel_gmbus_is_port_valid(pin)) {
sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin);
intel_gmbus_set_speed(sdvo->i2c, GMBUS_RATE_1MHZ);
intel_gmbus_force_bit(sdvo->i2c, true);
} else {
sdvo->i2c = dev_priv->gmbus[GMBUS_PORT_DPB];
sdvo->i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB);
}
}
@ -1959,12 +1964,12 @@ intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device)
}
static u8
intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct sdvo_device_mapping *my_mapping, *other_mapping;
if (IS_SDVOB(sdvo_reg)) {
if (sdvo->is_sdvob) {
my_mapping = &dev_priv->sdvo_mappings[0];
other_mapping = &dev_priv->sdvo_mappings[1];
} else {
@ -1989,7 +1994,7 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
/* No SDVO device info is found for another DVO port,
* so use mapping assumption we had before BIOS parsing.
*/
if (IS_SDVOB(sdvo_reg))
if (sdvo->is_sdvob)
return 0x70;
else
return 0x72;
@ -2214,6 +2219,10 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags)
if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_CVBS0))
return false;
if (flags & SDVO_OUTPUT_YPRPB0)
if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_YPRPB0))
return false;
if (flags & SDVO_OUTPUT_RGB0)
if (!intel_sdvo_analog_init(intel_sdvo, 0))
return false;
@ -2583,7 +2592,7 @@ DRIVER_MODULE_ORDERED(intel_sdvo_ddc_proxy, drmn, intel_sdvo_ddc_proxy_driver,
intel_sdvo_devclass, 0, 0, SI_ORDER_FIRST);
bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder;
@ -2594,7 +2603,8 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
M_WAITOK | M_ZERO);
intel_sdvo->sdvo_reg = sdvo_reg;
intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg) >> 1;
intel_sdvo->is_sdvob = is_sdvob;
intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1;
intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg);
if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev, sdvo_reg)) {
free(intel_sdvo, DRM_MEM_KMS);
@ -2611,13 +2621,13 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
u8 byte;
if (!intel_sdvo_read_byte(intel_sdvo, i, &byte)) {
DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
IS_SDVOB(sdvo_reg) ? 'B' : 'C');
DRM_DEBUG_KMS("No SDVO device found on %s\n",
SDVO_NAME(intel_sdvo));
goto err;
}
}
if (IS_SDVOB(sdvo_reg))
if (intel_sdvo->is_sdvob)
dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
else
dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
@ -2636,12 +2646,12 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
&intel_sdvo->hotplug_active, 2);
intel_sdvo->hotplug_active[0] &= ~0x3;
if (!intel_sdvo_output_setup(intel_sdvo,
intel_sdvo->caps.output_flags)) {
DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
IS_SDVOB(sdvo_reg) ? 'B' : 'C');
goto err;
}
if (intel_sdvo_output_setup(intel_sdvo,
intel_sdvo->caps.output_flags) != true) {
DRM_DEBUG_KMS("SDVO output failed to setup on %s\n",
SDVO_NAME(intel_sdvo));
goto err;
}
intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg);

View File

@ -114,14 +114,18 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
* when scaling is disabled.
*/
if (crtc_w != src_w || crtc_h != src_h) {
dev_priv->sprite_scaling_enabled = true;
sandybridge_update_wm(dev);
intel_wait_for_vblank(dev, pipe);
if (!dev_priv->sprite_scaling_enabled) {
dev_priv->sprite_scaling_enabled = true;
intel_update_watermarks(dev);
intel_wait_for_vblank(dev, pipe);
}
sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
} else {
dev_priv->sprite_scaling_enabled = false;
/* potentially re-enable LP watermarks */
sandybridge_update_wm(dev);
if (dev_priv->sprite_scaling_enabled) {
dev_priv->sprite_scaling_enabled = false;
/* potentially re-enable LP watermarks */
intel_update_watermarks(dev);
}
}
I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
@ -137,7 +141,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
I915_WRITE(SPRSCALE(pipe), sprscale);
I915_WRITE(SPRCTL(pipe), sprctl);
I915_WRITE(SPRSURF(pipe), obj->gtt_offset);
I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset);
POSTING_READ(SPRSURF(pipe));
}
@ -153,8 +157,11 @@ ivb_disable_plane(struct drm_plane *plane)
/* Can't leave the scaler enabled... */
I915_WRITE(SPRSCALE(pipe), 0);
/* Activate double buffered register update */
I915_WRITE(SPRSURF(pipe), 0);
I915_MODIFY_DISPBASE(SPRSURF(pipe), 0);
POSTING_READ(SPRSURF(pipe));
dev_priv->sprite_scaling_enabled = false;
intel_update_watermarks(dev);
}
static int
@ -212,7 +219,7 @@ ivb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
}
static void
snb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
@ -222,7 +229,7 @@ snb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
int pipe = intel_plane->pipe, pixel_size;
u32 dvscntr, dvsscale = 0;
u32 dvscntr, dvsscale;
dvscntr = I915_READ(DVSCNTR(pipe));
@ -266,8 +273,8 @@ snb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
if (obj->tiling_mode != I915_TILING_NONE)
dvscntr |= DVS_TILED;
/* must disable */
dvscntr |= DVS_TRICKLE_FEED_DISABLE;
if (IS_GEN6(dev))
dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
dvscntr |= DVS_ENABLE;
/* Sizes are 0 based */
@ -278,7 +285,8 @@ snb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size);
if (crtc_w != src_w || crtc_h != src_h)
dvsscale = 0;
if (IS_GEN5(dev) || crtc_w != src_w || crtc_h != src_h)
dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
@ -294,12 +302,12 @@ snb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
I915_WRITE(DVSSCALE(pipe), dvsscale);
I915_WRITE(DVSCNTR(pipe), dvscntr);
I915_WRITE(DVSSURF(pipe), obj->gtt_offset);
I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset);
POSTING_READ(DVSSURF(pipe));
}
static void
snb_disable_plane(struct drm_plane *plane)
ilk_disable_plane(struct drm_plane *plane)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@ -310,7 +318,7 @@ snb_disable_plane(struct drm_plane *plane)
/* Disable the scaler */
I915_WRITE(DVSSCALE(pipe), 0);
/* Flush double buffered register updates */
I915_WRITE(DVSSURF(pipe), 0);
I915_MODIFY_DISPBASE(DVSSURF(pipe), 0);
POSTING_READ(DVSSURF(pipe));
}
@ -337,7 +345,7 @@ intel_disable_primary(struct drm_crtc *crtc)
}
static int
snb_update_colorkey(struct drm_plane *plane,
ilk_update_colorkey(struct drm_plane *plane,
struct drm_intel_sprite_colorkey *key)
{
struct drm_device *dev = plane->dev;
@ -366,7 +374,7 @@ snb_update_colorkey(struct drm_plane *plane,
}
static void
snb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
ilk_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@ -554,14 +562,13 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_intel_sprite_colorkey *set = data;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mode_object *obj;
struct drm_plane *plane;
struct intel_plane *intel_plane;
int ret = 0;
if (!dev_priv)
return -EINVAL;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
/* Make sure we don't try to enable both src & dest simultaneously */
if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
@ -588,14 +595,13 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_intel_sprite_colorkey *get = data;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mode_object *obj;
struct drm_plane *plane;
struct intel_plane *intel_plane;
int ret = 0;
if (!dev_priv)
return -EINVAL;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
sx_xlock(&dev->mode_config.mutex);
@ -620,6 +626,14 @@ static const struct drm_plane_funcs intel_plane_funcs = {
.destroy = intel_destroy_plane,
};
static uint32_t ilk_plane_formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY,
};
static uint32_t snb_plane_formats[] = {
DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB8888,
@ -634,33 +648,55 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe)
{
struct intel_plane *intel_plane;
unsigned long possible_crtcs;
const uint32_t *plane_formats;
int num_plane_formats;
int ret;
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
if (INTEL_INFO(dev)->gen < 5)
return -ENODEV;
intel_plane = malloc(sizeof(struct intel_plane), DRM_MEM_KMS,
M_WAITOK | M_ZERO);
if (IS_GEN6(dev)) {
switch (INTEL_INFO(dev)->gen) {
case 5:
case 6:
intel_plane->max_downscale = 16;
intel_plane->update_plane = snb_update_plane;
intel_plane->disable_plane = snb_disable_plane;
intel_plane->update_colorkey = snb_update_colorkey;
intel_plane->get_colorkey = snb_get_colorkey;
} else if (IS_GEN7(dev)) {
intel_plane->update_plane = ilk_update_plane;
intel_plane->disable_plane = ilk_disable_plane;
intel_plane->update_colorkey = ilk_update_colorkey;
intel_plane->get_colorkey = ilk_get_colorkey;
if (IS_GEN6(dev)) {
plane_formats = snb_plane_formats;
num_plane_formats = DRM_ARRAY_SIZE(snb_plane_formats);
} else {
plane_formats = ilk_plane_formats;
num_plane_formats = DRM_ARRAY_SIZE(ilk_plane_formats);
}
break;
case 7:
intel_plane->max_downscale = 2;
intel_plane->update_plane = ivb_update_plane;
intel_plane->disable_plane = ivb_disable_plane;
intel_plane->update_colorkey = ivb_update_colorkey;
intel_plane->get_colorkey = ivb_get_colorkey;
plane_formats = snb_plane_formats;
num_plane_formats = DRM_ARRAY_SIZE(snb_plane_formats);
break;
default:
return -ENODEV;
}
intel_plane->pipe = pipe;
possible_crtcs = (1 << pipe);
ret = drm_plane_init(dev, &intel_plane->base, possible_crtcs,
&intel_plane_funcs, snb_plane_formats,
DRM_ARRAY_SIZE(snb_plane_formats), false);
&intel_plane_funcs,
plane_formats, num_plane_formats,
false);
if (ret)
free(intel_plane, DRM_MEM_KMS);

View File

@ -814,7 +814,7 @@ intel_tv_mode_lookup(const char *tv_format)
{
int i;
for (i = 0; i < sizeof(tv_modes) / sizeof(tv_modes[0]); i++) {
for (i = 0; i < DRM_ARRAY_SIZE(tv_modes); i++) {
const struct tv_mode *tv_mode = &tv_modes[i];
if (!strcmp(tv_format, tv_mode->name))
@ -846,7 +846,7 @@ intel_tv_mode_valid(struct drm_connector *connector,
static bool
intel_tv_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode,
intel_tv_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
@ -1155,6 +1155,15 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
DAC_B_0_7_V |
DAC_C_0_7_V);
/*
* The TV sense state should be cleared to zero on cantiga platform. Otherwise
* the TV is misdetected. This is hardware requirement.
*/
if (IS_GM45(dev))
tv_dac &= ~(TVDAC_STATE_CHG_EN | TVDAC_A_SENSE_CTL |
TVDAC_B_SENSE_CTL | TVDAC_C_SENSE_CTL);
I915_WRITE(TV_CTL, tv_ctl);
I915_WRITE(TV_DAC, tv_dac);
POSTING_READ(TV_DAC);
@ -1244,9 +1253,7 @@ intel_tv_detect(struct drm_connector *connector, bool force)
mode = reported_modes[0];
drm_mode_set_crtcinfo(&mode, 0);
if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) {
type = intel_tv_detect_type(intel_tv, connector);
} else if (force) {
if (force) {
struct intel_load_detect_pipe tmp;
if (intel_get_load_detect_pipe(&intel_tv->base, connector,

View File

@ -302,7 +302,7 @@ static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder)
}
static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@ -2451,7 +2451,7 @@ radeon_atom_ext_dpms(struct drm_encoder *encoder, int mode)
}
static bool radeon_atom_ext_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;

View File

@ -245,7 +245,7 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder,
}
static bool radeon_legacy_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);

View File

@ -11,11 +11,13 @@ SRCS = \
i915_gem_execbuffer.c \
i915_gem_evict.c \
i915_gem_gtt.c \
i915_gem_stolen.c \
i915_gem_tiling.c \
i915_irq.c \
i915_suspend.c \
intel_bios.c \
intel_crt.c \
intel_ddi.c \
intel_display.c \
intel_dp.c \
intel_fb.c \
@ -26,6 +28,7 @@ SRCS = \
intel_opregion.c \
intel_overlay.c \
intel_panel.c \
intel_pm.c \
intel_ringbuffer.c \
intel_sdvo.c \
intel_sprite.c \