drm/i915: Further reduce the diff with Linux 3.8

There is no functional change. The goal is to ease the future update to
Linux 3.8's i915 driver.
This commit is contained in:
dumbbell 2016-01-13 19:52:25 +00:00
parent 9dcfa1d85c
commit 86151baeba
8 changed files with 82 additions and 69 deletions

View File

@ -156,7 +156,7 @@ i915_gem_wait_for_error(struct drm_device *dev)
int ret;
if (!atomic_load_acq_int(&dev_priv->mm.wedged))
return (0);
return 0;
mtx_lock(&dev_priv->error_completion_lock);
while (dev_priv->error_completion == 0) {
@ -166,7 +166,7 @@ i915_gem_wait_for_error(struct drm_device *dev)
ret = -ERESTARTSYS;
if (ret != 0) {
mtx_unlock(&dev_priv->error_completion_lock);
return (ret);
return ret;
}
}
mtx_unlock(&dev_priv->error_completion_lock);
@ -1861,26 +1861,30 @@ i915_gem_object_put_pages_range(struct drm_i915_gem_object *obj,
static void
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
{
vm_page_t page;
int page_count, i;
int page_count = obj->base.size / PAGE_SIZE;
int i;
KASSERT(obj->madv != I915_MADV_PURGED_INTERNAL, ("Purged object"));
if (obj->tiling_mode != I915_TILING_NONE)
i915_gem_object_save_bit_17_swizzle(obj);
if (obj->madv == I915_MADV_DONTNEED)
obj->dirty = 0;
page_count = obj->base.size / PAGE_SIZE;
VM_OBJECT_WLOCK(obj->base.vm_obj);
#if GEM_PARANOID_CHECK_GTT
i915_gem_assert_pages_not_mapped(obj->base.dev, obj->pages, page_count);
#endif
for (i = 0; i < page_count; i++) {
page = obj->pages[i];
vm_page_t page = obj->pages[i];
if (obj->dirty)
vm_page_dirty(page);
if (obj->madv == I915_MADV_WILLNEED)
vm_page_reference(page);
vm_page_lock(page);
vm_page_unwire(obj->pages[i], PQ_ACTIVE);
vm_page_unlock(page);
@ -1888,6 +1892,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
}
VM_OBJECT_WUNLOCK(obj->base.vm_obj);
obj->dirty = 0;
free(obj->pages, DRM_I915_GEM);
obj->pages = NULL;
}

View File

@ -411,8 +411,8 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
reloc->offset += obj->gtt_offset;
reloc_page = pmap_mapdev_attr(dev->agp->base + (reloc->offset &
~PAGE_MASK), PAGE_SIZE, PAT_WRITE_COMBINING);
reloc_entry = (uint32_t *)(reloc_page + (reloc->offset &
PAGE_MASK));
reloc_entry = (uint32_t *)
(reloc_page + (reloc->offset & PAGE_MASK));
*(volatile uint32_t *)reloc_entry = reloc->delta;
pmap_unmapdev((vm_offset_t)reloc_page, PAGE_SIZE);
}
@ -502,7 +502,7 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
struct list_head *objects)
{
struct drm_i915_gem_object *obj;
int ret, pflags;
int ret = 0, pflags;
/* Try to move as many of the relocation targets off the active list
* to avoid unnecessary fallbacks to the slow path, as we cannot wait
@ -510,7 +510,6 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
*/
i915_gem_retire_requests(dev);
ret = 0;
/* This is the fast path and we cannot handle a pagefault whilst
* holding the device lock lest the user pass in the relocations
* contained within a mmaped bo. For in such a case we, the page
@ -952,6 +951,7 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec, int count,
*map = malloc(count * sizeof(*ma), DRM_I915_GEM, M_WAITOK | M_ZERO);
*maplen = malloc(count * sizeof(*maplen), DRM_I915_GEM, M_WAITOK |
M_ZERO);
for (i = 0; i < count; i++) {
/* First check for malicious input causing overflow */
if (exec[i].relocation_count >

View File

@ -107,21 +107,22 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
I915_PPGTT_PT_ENTRIES);
ppgtt->pd_offset = (first_pd_entry_in_global_pt) * sizeof(uint32_t);
dev_priv->mm.aliasing_ppgtt = ppgtt;
return (0);
return 0;
}
static void
i915_ppgtt_insert_pages(struct i915_hw_ppgtt *ppgtt, unsigned first_entry,
unsigned num_entries, vm_page_t *pages, uint32_t pte_flags)
static void i915_ppgtt_insert_pages(struct i915_hw_ppgtt *ppgtt,
unsigned first_entry,
unsigned num_entries,
vm_page_t *pages,
uint32_t pte_flags)
{
uint32_t *pt_vaddr, pte;
struct sf_buf *sf;
unsigned act_pd, first_pte;
unsigned last_pte, i;
unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned j, last_pte;
vm_paddr_t page_addr;
act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
struct sf_buf *sf;
while (num_entries) {
last_pte = first_pte + num_entries;
@ -132,10 +133,10 @@ i915_ppgtt_insert_pages(struct i915_hw_ppgtt *ppgtt, unsigned first_entry,
sf = sf_buf_alloc(ppgtt->pt_pages[act_pd], SFB_CPUPRIVATE);
pt_vaddr = (uint32_t *)(uintptr_t)sf_buf_kva(sf);
for (i = first_pte; i < last_pte; i++) {
for (j = first_pte; j < last_pte; j++) {
page_addr = VM_PAGE_TO_PHYS(*pages);
pte = GEN6_PTE_ADDR_ENCODE(page_addr);
pt_vaddr[i] = pte | pte_flags;
pt_vaddr[j] = pte | pte_flags;
pages++;
}
@ -194,18 +195,21 @@ void i915_gem_init_ppgtt(struct drm_device *dev)
struct intel_ring_buffer *ring;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
u_int first_pd_entry_in_global_pt;
vm_paddr_t pt_addr;
uint32_t pd_entry;
int i;
if (!dev_priv->mm.aliasing_ppgtt)
return;
first_pd_entry_in_global_pt = 512 * 1024 - I915_PPGTT_PD_ENTRIES;
for (i = 0; i < ppgtt->num_pd_entries; i++) {
vm_paddr_t pt_addr;
pt_addr = VM_PAGE_TO_PHYS(ppgtt->pt_pages[i]);
pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
pd_entry |= GEN6_PDE_VALID;
intel_gtt_write(first_pd_entry_in_global_pt + i, pd_entry);
}
intel_gtt_read_pte(first_pd_entry_in_global_pt);
@ -217,7 +221,7 @@ void i915_gem_init_ppgtt(struct drm_device *dev)
if (INTEL_INFO(dev)->gen == 6) {
uint32_t ecochk, gab_ctl, ecobits;
ecobits = I915_READ(GAC_ECO_BITS);
ecobits = I915_READ(GAC_ECO_BITS);
I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
gab_ctl = I915_READ(GAB_CTL);
@ -336,9 +340,8 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
return 0;
}
void
i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
struct drm_device *dev;
struct drm_i915_private *dev_priv;
@ -375,15 +378,14 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
}
int i915_gem_init_global_gtt(struct drm_device *dev,
unsigned long start,
unsigned long mappable_end,
unsigned long end)
unsigned long start,
unsigned long mappable_end,
unsigned long end)
{
drm_i915_private_t *dev_priv;
drm_i915_private_t *dev_priv = dev->dev_private;
unsigned long mappable;
int error;
dev_priv = dev->dev_private;
mappable = min(end, mappable_end) - start;
/* Substract the guard page ... */

View File

@ -453,15 +453,15 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
* by the GPU.
*/
static void
i915_gem_swizzle_page(vm_page_t m)
i915_gem_swizzle_page(vm_page_t page)
{
char temp[64];
char *vaddr;
struct sf_buf *sf;
char *vaddr;
int i;
/* XXXKIB sleep */
sf = sf_buf_alloc(m, SFB_DEFAULT);
sf = sf_buf_alloc(page, SFB_DEFAULT);
vaddr = (char *)sf_buf_kva(sf);
for (i = 0; i < PAGE_SIZE; i += 128) {

View File

@ -34,6 +34,7 @@ __FBSDID("$FreeBSD$");
#include <dev/drm2/i915/i915_drm.h>
#include <dev/drm2/i915/i915_drv.h>
#include <dev/drm2/i915/intel_drv.h>
#include <sys/sched.h>
#include <sys/sf_buf.h>
#include <sys/sleepqueue.h>
@ -770,42 +771,47 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
struct drm_i915_gem_object *src)
{
struct drm_i915_error_object *dst;
struct sf_buf *sf;
void *d, *s;
int page, page_count;
int i, count;
u32 reloc_offset;
if (src == NULL || src->pages == NULL)
return NULL;
page_count = src->base.size / PAGE_SIZE;
count = src->base.size / PAGE_SIZE;
dst = malloc(sizeof(*dst) + page_count * sizeof(u32 *), DRM_I915_GEM,
M_NOWAIT);
dst = malloc(sizeof(*dst) + count * sizeof(u32 *), DRM_I915_GEM, M_NOWAIT);
if (dst == NULL)
return (NULL);
return NULL;
reloc_offset = src->gtt_offset;
for (page = 0; page < page_count; page++) {
for (i = 0; i < count; i++) {
void *d;
d = malloc(PAGE_SIZE, DRM_I915_GEM, M_NOWAIT);
if (d == NULL)
goto unwind;
if (reloc_offset < dev_priv->mm.gtt_mappable_end &&
src->has_global_gtt_mapping) {
void *s;
/* Simply ignore tiling or any overlapping fence.
* It's part of the error state, and this hopefully
* captures what the GPU read.
*/
s = pmap_mapdev_attr(src->base.dev->agp->base +
reloc_offset, PAGE_SIZE, PAT_WRITE_COMBINING);
reloc_offset,
PAGE_SIZE, PAT_WRITE_COMBINING);
memcpy(d, s, PAGE_SIZE);
pmap_unmapdev((vm_offset_t)s, PAGE_SIZE);
} else {
drm_clflush_pages(&src->pages[page], 1);
struct sf_buf *sf;
void *s;
drm_clflush_pages(&src->pages[i], 1);
sched_pin();
sf = sf_buf_alloc(src->pages[page], SFB_CPUPRIVATE |
sf = sf_buf_alloc(src->pages[i], SFB_CPUPRIVATE |
SFB_NOWAIT);
if (sf != NULL) {
s = (void *)(uintptr_t)sf_buf_kva(sf);
@ -817,21 +823,21 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
}
sched_unpin();
drm_clflush_pages(&src->pages[page], 1);
drm_clflush_pages(&src->pages[i], 1);
}
dst->pages[page] = d;
dst->pages[i] = d;
reloc_offset += PAGE_SIZE;
}
dst->page_count = page_count;
dst->page_count = count;
dst->gtt_offset = src->gtt_offset;
return dst;
unwind:
while (page--)
free(dst->pages[page], DRM_I915_GEM);
while (i--)
free(dst->pages[i], DRM_I915_GEM);
free(dst, DRM_I915_GEM);
return NULL;
}
@ -2571,6 +2577,7 @@ void intel_irq_init(struct drm_device *dev)
dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
dev->driver->get_vblank_counter = gm45_get_vblank_counter;
}
if (drm_core_check_feature(dev, DRIVER_MODESET))
dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
else

View File

@ -598,8 +598,7 @@ void intel_crt_init(struct drm_device *dev)
crt = malloc(sizeof(struct intel_crt), DRM_MEM_KMS, M_WAITOK | M_ZERO);
intel_connector = malloc(sizeof(struct intel_connector), DRM_MEM_KMS,
M_WAITOK | M_ZERO);
intel_connector = malloc(sizeof(struct intel_connector), DRM_MEM_KMS, M_WAITOK | M_ZERO);
connector = &intel_connector->base;
drm_connector_init(dev, &intel_connector->base,

View File

@ -1382,9 +1382,8 @@ intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value)
static u32
intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg)
{
u32 value;
u32 value = 0;
value = 0;
mtx_lock(&dev_priv->dpio_lock);
if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_READY) == 0, 100)) {
DRM_ERROR("timeout waiting for SBI to become ready\n");
@ -1469,7 +1468,7 @@ static void intel_disable_pch_pll(struct intel_crtc *intel_crtc)
/* PCH only available on ILK+ */
KASSERT(dev_priv->info->gen >= 5, ("Wrong device gen"));
if (pll == NULL)
return;
return;
if (pll->refcount == 0) {
DRM_DEBUG_KMS("pll->refcount == 0\n");
@ -1495,7 +1494,7 @@ static void intel_disable_pch_pll(struct intel_crtc *intel_crtc)
/* Make sure transcoder isn't still depending on us */
assert_transcoder_disabled(dev_priv, intel_crtc->pipe);
reg = pll->pll_reg;
val = I915_READ(reg);
val &= ~DPLL_VCO_ENABLE;
@ -1507,7 +1506,7 @@ static void intel_disable_pch_pll(struct intel_crtc *intel_crtc)
}
static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
enum pipe pipe)
enum pipe pipe)
{
int reg;
u32 val, pipeconf_val;
@ -1517,7 +1516,8 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
KASSERT(dev_priv->info->gen >= 5, ("Wrong device gen"));
/* Make sure PCH DPLL is enabled */
assert_pch_pll_enabled(dev_priv, to_intel_crtc(crtc));
assert_pch_pll_enabled(dev_priv,
to_intel_crtc(crtc));
/* FDI must be feeding us bits for PCH ports */
assert_fdi_tx_enabled(dev_priv, pipe);
@ -1527,9 +1527,11 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
DRM_ERROR("Attempting to enable transcoder on Haswell with pipe > 0\n");
return;
}
reg = TRANSCONF(pipe);
val = I915_READ(reg);
pipeconf_val = I915_READ(PIPECONF(pipe));
if (HAS_PCH_IBX(dev_priv->dev)) {
/*
* make the BPC in transcoder be consistent with
@ -1886,6 +1888,7 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
return -EINVAL;
}
if (INTEL_INFO(dev)->gen >= 4) {
if (obj->tiling_mode != I915_TILING_NONE)
dspcntr |= DISPPLANE_TILED;
@ -2525,7 +2528,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
POSTING_READ(reg);
DELAY(150);
for (i = 0; i < 4; i++ ) {
for (i = 0; i < 4; i++) {
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
@ -7132,7 +7135,7 @@ int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
else
gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
pci_write_config(bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl, 2);
return (0);
return 0;
}
struct intel_display_error_state {

View File

@ -1138,8 +1138,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
return ret;
}
params = malloc(sizeof(struct put_image_params), DRM_I915_GEM,
M_WAITOK | M_ZERO);
params = malloc(sizeof(struct put_image_params), DRM_I915_GEM, M_WAITOK | M_ZERO);
drmmode_obj = drm_mode_object_find(dev, put_image_rec->crtc_id,
DRM_MODE_OBJECT_CRTC);
@ -1403,8 +1402,7 @@ void intel_setup_overlay(struct drm_device *dev)
if (!HAS_OVERLAY(dev))
return;
overlay = malloc(sizeof(struct intel_overlay), DRM_I915_GEM,
M_WAITOK | M_ZERO);
overlay = malloc(sizeof(struct intel_overlay), DRM_I915_GEM, M_WAITOK | M_ZERO);
DRM_LOCK(dev);
if (dev_priv->overlay != NULL)
goto out_free;
@ -1523,16 +1521,15 @@ intel_overlay_capture_error_state(struct drm_device *dev)
memcpy(&error->regs, regs, sizeof(struct overlay_registers));
intel_overlay_unmap_regs(overlay, regs);
return (error);
return error;
err:
free(error, DRM_I915_GEM);
return (NULL);
return NULL;
}
void
intel_overlay_print_error_state(struct sbuf *m,
struct intel_overlay_error_state *error)
intel_overlay_print_error_state(struct sbuf *m, struct intel_overlay_error_state *error)
{
sbuf_printf(m, "Overlay, status: 0x%08x, interrupt: 0x%08x\n",
error->dovsta, error->isr);