Reduce diff against linux 3.8
Reviewed by: dumbbell Differential Revision: https://reviews.freebsd.org/D3492
This commit is contained in:
parent
cca2c8b54c
commit
cb293687d9
@ -1,4 +1,4 @@
|
|||||||
/*-
|
/*
|
||||||
* Copyright © 2008 Intel Corporation
|
* Copyright © 2008 Intel Corporation
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
@ -203,11 +203,10 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
static inline bool
|
||||||
i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
|
i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
|
||||||
{
|
{
|
||||||
|
return obj->gtt_space && !obj->active;
|
||||||
return !obj->active;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
@ -1239,9 +1238,17 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
|
|||||||
uint32_t write_domain = args->write_domain;
|
uint32_t write_domain = args->write_domain;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if ((write_domain & I915_GEM_GPU_DOMAINS) != 0 ||
|
/* Only handle setting domains to types used by the CPU. */
|
||||||
(read_domains & I915_GEM_GPU_DOMAINS) != 0 ||
|
if (write_domain & I915_GEM_GPU_DOMAINS)
|
||||||
(write_domain != 0 && read_domains != write_domain))
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (read_domains & I915_GEM_GPU_DOMAINS)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
/* Having something in the write domain implies it's in the read
|
||||||
|
* domain, and only that read domain. Enforce that in the request.
|
||||||
|
*/
|
||||||
|
if (write_domain != 0 && read_domains != write_domain)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
ret = i915_mutex_lock_interruptible(dev);
|
ret = i915_mutex_lock_interruptible(dev);
|
||||||
@ -1686,13 +1693,11 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
|
|||||||
uint32_t size,
|
uint32_t size,
|
||||||
int tiling_mode)
|
int tiling_mode)
|
||||||
{
|
{
|
||||||
if (tiling_mode == I915_TILING_NONE)
|
|
||||||
return 4096;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Minimum alignment is 4k (GTT page size) for sane hw.
|
* Minimum alignment is 4k (GTT page size) for sane hw.
|
||||||
*/
|
*/
|
||||||
if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev))
|
if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
|
||||||
|
tiling_mode == I915_TILING_NONE)
|
||||||
return 4096;
|
return 4096;
|
||||||
|
|
||||||
/* Previous hardware however needs to be aligned to a power-of-two
|
/* Previous hardware however needs to be aligned to a power-of-two
|
||||||
@ -3155,7 +3160,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
|
|||||||
|
|
||||||
ret = i915_gem_object_flush_gpu_write_domain(obj);
|
ret = i915_gem_object_flush_gpu_write_domain(obj);
|
||||||
if (ret)
|
if (ret)
|
||||||
return (ret);
|
return ret;
|
||||||
|
|
||||||
if (obj->pending_gpu_write || write) {
|
if (obj->pending_gpu_write || write) {
|
||||||
ret = i915_gem_object_wait_rendering(obj);
|
ret = i915_gem_object_wait_rendering(obj);
|
||||||
@ -3366,6 +3371,12 @@ i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Moves a single object to the CPU read, and possibly write domain.
|
||||||
|
*
|
||||||
|
* This function returns when the move is complete, including waiting on
|
||||||
|
* flushes to occur.
|
||||||
|
*/
|
||||||
int
|
int
|
||||||
i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
|
i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
|
||||||
{
|
{
|
||||||
@ -3644,7 +3655,6 @@ int
|
|||||||
i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
|
i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
|
||||||
struct drm_file *file_priv)
|
struct drm_file *file_priv)
|
||||||
{
|
{
|
||||||
|
|
||||||
return i915_gem_ring_throttle(dev, file_priv);
|
return i915_gem_ring_throttle(dev, file_priv);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4101,6 +4111,10 @@ i915_gem_unload(struct drm_device *dev)
|
|||||||
EVENTHANDLER_DEREGISTER(vm_lowmem, dev_priv->mm.i915_lowmem);
|
EVENTHANDLER_DEREGISTER(vm_lowmem, dev_priv->mm.i915_lowmem);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Create a physically contiguous memory object for this object
|
||||||
|
* e.g. for cursor + overlay regs
|
||||||
|
*/
|
||||||
static int i915_gem_init_phys_object(struct drm_device *dev,
|
static int i915_gem_init_phys_object(struct drm_device *dev,
|
||||||
int id, int size, int align)
|
int id, int size, int align)
|
||||||
{
|
{
|
||||||
|
@ -302,7 +302,7 @@ void i915_gem_context_fini(struct drm_device *dev)
|
|||||||
do_destroy(dev_priv->rings[RCS].default_context);
|
do_destroy(dev_priv->rings[RCS].default_context);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int context_idr_cleanup(uint32_t id, void *p, void *data)
|
static int context_idr_cleanup(int id, void *p, void *data)
|
||||||
{
|
{
|
||||||
struct i915_hw_context *ctx = p;
|
struct i915_hw_context *ctx = p;
|
||||||
|
|
||||||
|
@ -405,10 +405,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
/*
|
/* Map the page containing the relocation we're going to perform. */
|
||||||
* Map the page containing the relocation we're going
|
|
||||||
* to perform.
|
|
||||||
*/
|
|
||||||
reloc->offset += obj->gtt_offset;
|
reloc->offset += obj->gtt_offset;
|
||||||
reloc_page = pmap_mapdev_attr(dev->agp->base + (reloc->offset &
|
reloc_page = pmap_mapdev_attr(dev->agp->base + (reloc->offset &
|
||||||
~PAGE_MASK), PAGE_SIZE, PAT_WRITE_COMBINING);
|
~PAGE_MASK), PAGE_SIZE, PAT_WRITE_COMBINING);
|
||||||
@ -475,13 +472,15 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
|
|||||||
r++;
|
r++;
|
||||||
} while (--count);
|
} while (--count);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
#undef N_RELOC
|
#undef N_RELOC
|
||||||
return (0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
|
i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
|
||||||
struct eb_objects *eb, struct drm_i915_gem_relocation_entry *relocs)
|
struct eb_objects *eb,
|
||||||
|
struct drm_i915_gem_relocation_entry *relocs)
|
||||||
{
|
{
|
||||||
const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
|
const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
|
||||||
int i, ret;
|
int i, ret;
|
||||||
@ -520,11 +519,12 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
|
|||||||
|
|
||||||
list_for_each_entry(obj, objects, exec_list) {
|
list_for_each_entry(obj, objects, exec_list) {
|
||||||
ret = i915_gem_execbuffer_relocate_object(obj, eb);
|
ret = i915_gem_execbuffer_relocate_object(obj, eb);
|
||||||
if (ret != 0)
|
if (ret)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
vm_fault_enable_pagefaults(pflags);
|
vm_fault_enable_pagefaults(pflags);
|
||||||
return (ret);
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define __EXEC_OBJECT_HAS_FENCE (1<<31)
|
#define __EXEC_OBJECT_HAS_FENCE (1<<31)
|
||||||
@ -583,9 +583,9 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
|
|||||||
{
|
{
|
||||||
drm_i915_private_t *dev_priv;
|
drm_i915_private_t *dev_priv;
|
||||||
struct drm_i915_gem_object *obj;
|
struct drm_i915_gem_object *obj;
|
||||||
int ret, retry;
|
|
||||||
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
|
|
||||||
struct list_head ordered_objects;
|
struct list_head ordered_objects;
|
||||||
|
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
|
||||||
|
int ret, retry;
|
||||||
|
|
||||||
dev_priv = ring->dev->dev_private;
|
dev_priv = ring->dev->dev_private;
|
||||||
INIT_LIST_HEAD(&ordered_objects);
|
INIT_LIST_HEAD(&ordered_objects);
|
||||||
@ -619,12 +619,11 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
|
|||||||
*
|
*
|
||||||
* 1a. Unbind all objects that do not match the GTT constraints for
|
* 1a. Unbind all objects that do not match the GTT constraints for
|
||||||
* the execbuffer (fenceable, mappable, alignment etc).
|
* the execbuffer (fenceable, mappable, alignment etc).
|
||||||
* 1b. Increment pin count for already bound objects and obtain
|
* 1b. Increment pin count for already bound objects.
|
||||||
* a fence register if required.
|
|
||||||
* 2. Bind new objects.
|
* 2. Bind new objects.
|
||||||
* 3. Decrement pin count.
|
* 3. Decrement pin count.
|
||||||
*
|
*
|
||||||
* This avoid unnecessary unbinding of later objects in order to makr
|
* This avoid unnecessary unbinding of later objects in order to make
|
||||||
* room for the earlier objects *unless* we need to defragment.
|
* room for the earlier objects *unless* we need to defragment.
|
||||||
*/
|
*/
|
||||||
retry = 0;
|
retry = 0;
|
||||||
@ -735,9 +734,12 @@ err:
|
|||||||
|
|
||||||
static int
|
static int
|
||||||
i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
|
i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
|
||||||
struct drm_file *file, struct intel_ring_buffer *ring,
|
struct drm_file *file,
|
||||||
struct list_head *objects, struct eb_objects *eb,
|
struct intel_ring_buffer *ring,
|
||||||
struct drm_i915_gem_exec_object2 *exec, int count)
|
struct list_head *objects,
|
||||||
|
struct eb_objects *eb,
|
||||||
|
struct drm_i915_gem_exec_object2 *exec,
|
||||||
|
int count)
|
||||||
{
|
{
|
||||||
struct drm_i915_gem_relocation_entry *reloc;
|
struct drm_i915_gem_relocation_entry *reloc;
|
||||||
struct drm_i915_gem_object *obj;
|
struct drm_i915_gem_object *obj;
|
||||||
@ -1256,6 +1258,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||||||
INIT_LIST_HEAD(&objects);
|
INIT_LIST_HEAD(&objects);
|
||||||
for (i = 0; i < args->buffer_count; i++) {
|
for (i = 0; i < args->buffer_count; i++) {
|
||||||
struct drm_i915_gem_object *obj;
|
struct drm_i915_gem_object *obj;
|
||||||
|
|
||||||
obj = to_intel_bo(drm_gem_object_lookup(dev, file,
|
obj = to_intel_bo(drm_gem_object_lookup(dev, file,
|
||||||
exec[i].handle));
|
exec[i].handle));
|
||||||
if (&obj->base == NULL) {
|
if (&obj->base == NULL) {
|
||||||
@ -1294,7 +1297,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||||||
if (ret) {
|
if (ret) {
|
||||||
if (ret == -EFAULT) {
|
if (ret == -EFAULT) {
|
||||||
ret = i915_gem_execbuffer_relocate_slow(dev, file, ring,
|
ret = i915_gem_execbuffer_relocate_slow(dev, file, ring,
|
||||||
&objects, eb, exec, args->buffer_count);
|
&objects, eb,
|
||||||
|
exec,
|
||||||
|
args->buffer_count);
|
||||||
DRM_LOCK_ASSERT(dev);
|
DRM_LOCK_ASSERT(dev);
|
||||||
}
|
}
|
||||||
if (ret)
|
if (ret)
|
||||||
@ -1372,13 +1377,14 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||||||
if (ret)
|
if (ret)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
ret = ring->dispatch_execbuffer(ring, exec_start,
|
ret = ring->dispatch_execbuffer(ring,
|
||||||
exec_len);
|
exec_start, exec_len);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
ret = ring->dispatch_execbuffer(ring, exec_start, exec_len);
|
ret = ring->dispatch_execbuffer(ring,
|
||||||
|
exec_start, exec_len);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
@ -1391,7 +1397,8 @@ err:
|
|||||||
while (!list_empty(&objects)) {
|
while (!list_empty(&objects)) {
|
||||||
struct drm_i915_gem_object *obj;
|
struct drm_i915_gem_object *obj;
|
||||||
|
|
||||||
obj = list_first_entry(&objects, struct drm_i915_gem_object,
|
obj = list_first_entry(&objects,
|
||||||
|
struct drm_i915_gem_object,
|
||||||
exec_list);
|
exec_list);
|
||||||
list_del_init(&obj->exec_list);
|
list_del_init(&obj->exec_list);
|
||||||
drm_gem_object_unreference(&obj->base);
|
drm_gem_object_unreference(&obj->base);
|
||||||
@ -1520,7 +1527,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
|
|||||||
DRM_DEBUG("copy %d exec entries failed %d\n",
|
DRM_DEBUG("copy %d exec entries failed %d\n",
|
||||||
args->buffer_count, ret);
|
args->buffer_count, ret);
|
||||||
free(exec2_list, DRM_I915_GEM);
|
free(exec2_list, DRM_I915_GEM);
|
||||||
return (ret);
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
|
ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
|
||||||
|
@ -34,9 +34,9 @@ __FBSDID("$FreeBSD$");
|
|||||||
#include <sys/sf_buf.h>
|
#include <sys/sf_buf.h>
|
||||||
|
|
||||||
/* PPGTT support for Sandybdrige/Gen6 and later */
|
/* PPGTT support for Sandybdrige/Gen6 and later */
|
||||||
static void
|
static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
|
||||||
i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
|
unsigned first_entry,
|
||||||
unsigned first_entry, unsigned num_entries)
|
unsigned num_entries)
|
||||||
{
|
{
|
||||||
uint32_t *pt_vaddr;
|
uint32_t *pt_vaddr;
|
||||||
uint32_t scratch_pte;
|
uint32_t scratch_pte;
|
||||||
@ -71,20 +71,17 @@ i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
|
||||||
i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
|
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
struct i915_hw_ppgtt *ppgtt;
|
struct i915_hw_ppgtt *ppgtt;
|
||||||
u_int first_pd_entry_in_global_pt, i;
|
unsigned first_pd_entry_in_global_pt;
|
||||||
|
int i;
|
||||||
|
|
||||||
dev_priv = dev->dev_private;
|
|
||||||
|
|
||||||
/*
|
/* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
|
||||||
* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
|
|
||||||
* entries. For aliasing ppgtt support we just steal them at the end for
|
* entries. For aliasing ppgtt support we just steal them at the end for
|
||||||
* now.
|
* now. */
|
||||||
*/
|
|
||||||
first_pd_entry_in_global_pt = 512 * 1024 - I915_PPGTT_PD_ENTRIES;
|
first_pd_entry_in_global_pt = 512 * 1024 - I915_PPGTT_PD_ENTRIES;
|
||||||
|
|
||||||
ppgtt = malloc(sizeof(*ppgtt), DRM_I915_GEM, M_WAITOK | M_ZERO);
|
ppgtt = malloc(sizeof(*ppgtt), DRM_I915_GEM, M_WAITOK | M_ZERO);
|
||||||
@ -152,9 +149,9 @@ i915_ppgtt_insert_pages(struct i915_hw_ppgtt *ppgtt, unsigned first_entry,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
|
||||||
i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
|
struct drm_i915_gem_object *obj,
|
||||||
struct drm_i915_gem_object *obj, enum i915_cache_level cache_level)
|
enum i915_cache_level cache_level)
|
||||||
{
|
{
|
||||||
struct drm_device *dev;
|
struct drm_device *dev;
|
||||||
struct drm_i915_private *dev_priv;
|
struct drm_i915_private *dev_priv;
|
||||||
@ -185,22 +182,23 @@ i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
|
|||||||
void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
|
void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
|
||||||
struct drm_i915_gem_object *obj)
|
struct drm_i915_gem_object *obj)
|
||||||
{
|
{
|
||||||
i915_ppgtt_clear_range(ppgtt, obj->gtt_space->start >> PAGE_SHIFT,
|
i915_ppgtt_clear_range(ppgtt,
|
||||||
|
obj->gtt_space->start >> PAGE_SHIFT,
|
||||||
obj->base.size >> PAGE_SHIFT);
|
obj->base.size >> PAGE_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
void i915_gem_init_ppgtt(struct drm_device *dev)
|
void i915_gem_init_ppgtt(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
drm_i915_private_t *dev_priv;
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||||
struct i915_hw_ppgtt *ppgtt;
|
uint32_t pd_offset;
|
||||||
uint32_t pd_offset, pd_entry;
|
|
||||||
vm_paddr_t pt_addr;
|
|
||||||
struct intel_ring_buffer *ring;
|
struct intel_ring_buffer *ring;
|
||||||
u_int first_pd_entry_in_global_pt, i;
|
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
|
||||||
|
u_int first_pd_entry_in_global_pt;
|
||||||
|
vm_paddr_t pt_addr;
|
||||||
|
uint32_t pd_entry;
|
||||||
|
int i;
|
||||||
|
|
||||||
dev_priv = dev->dev_private;
|
if (!dev_priv->mm.aliasing_ppgtt)
|
||||||
ppgtt = dev_priv->mm.aliasing_ppgtt;
|
|
||||||
if (ppgtt == NULL)
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
first_pd_entry_in_global_pt = 512 * 1024 - I915_PPGTT_PD_ENTRIES;
|
first_pd_entry_in_global_pt = 512 * 1024 - I915_PPGTT_PD_ENTRIES;
|
||||||
@ -244,6 +242,28 @@ void i915_gem_init_ppgtt(struct drm_device *dev)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool do_idling(struct drm_i915_private *dev_priv)
|
||||||
|
{
|
||||||
|
bool ret = dev_priv->mm.interruptible;
|
||||||
|
|
||||||
|
if (dev_priv->mm.gtt.do_idle_maps) {
|
||||||
|
dev_priv->mm.interruptible = false;
|
||||||
|
if (i915_gpu_idle(dev_priv->dev)) {
|
||||||
|
DRM_ERROR("Couldn't idle GPU\n");
|
||||||
|
/* Wait a bit, in hopes it avoids the hang */
|
||||||
|
DELAY(10);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
|
||||||
|
{
|
||||||
|
if (dev_priv->mm.gtt.do_idle_maps)
|
||||||
|
dev_priv->mm.interruptible = interruptible;
|
||||||
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
|
i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
@ -293,39 +313,11 @@ cache_level_to_agp_type(struct drm_device *dev, enum i915_cache_level
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
|
||||||
do_idling(struct drm_i915_private *dev_priv)
|
|
||||||
{
|
{
|
||||||
bool ret = dev_priv->mm.interruptible;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
|
|
||||||
if (dev_priv->mm.gtt.do_idle_maps) {
|
|
||||||
dev_priv->mm.interruptible = false;
|
|
||||||
if (i915_gpu_idle(dev_priv->dev)) {
|
|
||||||
DRM_ERROR("Couldn't idle GPU\n");
|
|
||||||
/* Wait a bit, in hopes it avoids the hang */
|
|
||||||
DELAY(10);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void
|
|
||||||
undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
|
|
||||||
{
|
|
||||||
|
|
||||||
if (dev_priv->mm.gtt.do_idle_maps)
|
|
||||||
dev_priv->mm.interruptible = interruptible;
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
i915_gem_restore_gtt_mappings(struct drm_device *dev)
|
|
||||||
{
|
|
||||||
struct drm_i915_private *dev_priv;
|
|
||||||
struct drm_i915_gem_object *obj;
|
struct drm_i915_gem_object *obj;
|
||||||
|
|
||||||
dev_priv = dev->dev_private;
|
|
||||||
|
|
||||||
/* First fill our portion of the GTT with scratch pages */
|
/* First fill our portion of the GTT with scratch pages */
|
||||||
intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
|
intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
|
||||||
(dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
|
(dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
|
||||||
@ -338,11 +330,10 @@ i915_gem_restore_gtt_mappings(struct drm_device *dev)
|
|||||||
intel_gtt_chipset_flush();
|
intel_gtt_chipset_flush();
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
|
||||||
i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
|
|
||||||
{
|
{
|
||||||
|
|
||||||
return (0);
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@ -363,8 +354,7 @@ i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
|
|||||||
obj->has_global_gtt_mapping = 1;
|
obj->has_global_gtt_mapping = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
|
||||||
i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
|
|
||||||
{
|
{
|
||||||
|
|
||||||
intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
|
intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
|
||||||
@ -373,24 +363,21 @@ i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
|
|||||||
obj->has_global_gtt_mapping = 0;
|
obj->has_global_gtt_mapping = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
|
||||||
i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
|
|
||||||
{
|
{
|
||||||
struct drm_device *dev = obj->base.dev;
|
struct drm_device *dev = obj->base.dev;
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
bool interruptible;
|
bool interruptible;
|
||||||
|
|
||||||
dev = obj->base.dev;
|
|
||||||
dev_priv = dev->dev_private;
|
|
||||||
|
|
||||||
interruptible = do_idling(dev_priv);
|
interruptible = do_idling(dev_priv);
|
||||||
|
|
||||||
undo_idling(dev_priv, interruptible);
|
undo_idling(dev_priv, interruptible);
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int i915_gem_init_global_gtt(struct drm_device *dev,
|
||||||
i915_gem_init_global_gtt(struct drm_device *dev, unsigned long start,
|
unsigned long start,
|
||||||
unsigned long mappable_end, unsigned long end)
|
unsigned long mappable_end,
|
||||||
|
unsigned long end)
|
||||||
{
|
{
|
||||||
drm_i915_private_t *dev_priv;
|
drm_i915_private_t *dev_priv;
|
||||||
unsigned long mappable;
|
unsigned long mappable;
|
||||||
|
@ -209,7 +209,7 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
|
|||||||
|
|
||||||
/* Linear is always fine */
|
/* Linear is always fine */
|
||||||
if (tiling_mode == I915_TILING_NONE)
|
if (tiling_mode == I915_TILING_NONE)
|
||||||
return (true);
|
return true;
|
||||||
|
|
||||||
if (IS_GEN2(dev) ||
|
if (IS_GEN2(dev) ||
|
||||||
(tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
|
(tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
|
||||||
@ -222,35 +222,35 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
|
|||||||
/* i965 stores the end address of the gtt mapping in the fence
|
/* i965 stores the end address of the gtt mapping in the fence
|
||||||
* reg, so dont bother to check the size */
|
* reg, so dont bother to check the size */
|
||||||
if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
|
if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
|
||||||
return (false);
|
return false;
|
||||||
} else {
|
} else {
|
||||||
if (stride > 8192)
|
if (stride > 8192)
|
||||||
return (false);
|
return false;
|
||||||
|
|
||||||
if (IS_GEN3(dev)) {
|
if (IS_GEN3(dev)) {
|
||||||
if (size > I830_FENCE_MAX_SIZE_VAL << 20)
|
if (size > I830_FENCE_MAX_SIZE_VAL << 20)
|
||||||
return (false);
|
return false;
|
||||||
} else {
|
} else {
|
||||||
if (size > I830_FENCE_MAX_SIZE_VAL << 19)
|
if (size > I830_FENCE_MAX_SIZE_VAL << 19)
|
||||||
return (false);
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* 965+ just needs multiples of tile width */
|
/* 965+ just needs multiples of tile width */
|
||||||
if (INTEL_INFO(dev)->gen >= 4) {
|
if (INTEL_INFO(dev)->gen >= 4) {
|
||||||
if (stride & (tile_width - 1))
|
if (stride & (tile_width - 1))
|
||||||
return (false);
|
return false;
|
||||||
return (true);
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Pre-965 needs power of two tile widths */
|
/* Pre-965 needs power of two tile widths */
|
||||||
if (stride < tile_width)
|
if (stride < tile_width)
|
||||||
return (false);
|
return false;
|
||||||
|
|
||||||
if (stride & (stride - 1))
|
if (stride & (stride - 1))
|
||||||
return (false);
|
return false;
|
||||||
|
|
||||||
return (true);
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Is the current GTT allocation valid for the change in tiling? */
|
/* Is the current GTT allocation valid for the change in tiling? */
|
||||||
@ -260,17 +260,17 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
|
|||||||
u32 size;
|
u32 size;
|
||||||
|
|
||||||
if (tiling_mode == I915_TILING_NONE)
|
if (tiling_mode == I915_TILING_NONE)
|
||||||
return (true);
|
return true;
|
||||||
|
|
||||||
if (INTEL_INFO(obj->base.dev)->gen >= 4)
|
if (INTEL_INFO(obj->base.dev)->gen >= 4)
|
||||||
return (true);
|
return true;
|
||||||
|
|
||||||
if (INTEL_INFO(obj->base.dev)->gen == 3) {
|
if (INTEL_INFO(obj->base.dev)->gen == 3) {
|
||||||
if (obj->gtt_offset & ~I915_FENCE_START_MASK)
|
if (obj->gtt_offset & ~I915_FENCE_START_MASK)
|
||||||
return (false);
|
return false;
|
||||||
} else {
|
} else {
|
||||||
if (obj->gtt_offset & ~I830_FENCE_START_MASK)
|
if (obj->gtt_offset & ~I830_FENCE_START_MASK)
|
||||||
return (false);
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -286,12 +286,12 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
|
|||||||
size <<= 1;
|
size <<= 1;
|
||||||
|
|
||||||
if (obj->gtt_space->size != size)
|
if (obj->gtt_space->size != size)
|
||||||
return (false);
|
return false;
|
||||||
|
|
||||||
if (obj->gtt_offset & (size - 1))
|
if (obj->gtt_offset & (size - 1))
|
||||||
return (false);
|
return false;
|
||||||
|
|
||||||
return (true);
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -305,9 +305,8 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
|
|||||||
struct drm_i915_gem_set_tiling *args = data;
|
struct drm_i915_gem_set_tiling *args = data;
|
||||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||||
struct drm_i915_gem_object *obj;
|
struct drm_i915_gem_object *obj;
|
||||||
int ret;
|
int ret = 0;
|
||||||
|
|
||||||
ret = 0;
|
|
||||||
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
|
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
|
||||||
if (&obj->base == NULL)
|
if (&obj->base == NULL)
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
@ -370,15 +369,15 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
|
|||||||
|
|
||||||
obj->map_and_fenceable =
|
obj->map_and_fenceable =
|
||||||
obj->gtt_space == NULL ||
|
obj->gtt_space == NULL ||
|
||||||
(obj->gtt_offset + obj->base.size <=
|
(obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end &&
|
||||||
dev_priv->mm.gtt_mappable_end &&
|
|
||||||
i915_gem_object_fence_ok(obj, args->tiling_mode));
|
i915_gem_object_fence_ok(obj, args->tiling_mode));
|
||||||
|
|
||||||
/* Rebind if we need a change of alignment */
|
/* Rebind if we need a change of alignment */
|
||||||
if (!obj->map_and_fenceable) {
|
if (!obj->map_and_fenceable) {
|
||||||
uint32_t unfenced_alignment =
|
u32 unfenced_alignment =
|
||||||
i915_gem_get_unfenced_gtt_alignment(dev,
|
i915_gem_get_unfenced_gtt_alignment(dev,
|
||||||
obj->base.size, args->tiling_mode);
|
obj->base.size,
|
||||||
|
args->tiling_mode);
|
||||||
if (obj->gtt_offset & (unfenced_alignment - 1))
|
if (obj->gtt_offset & (unfenced_alignment - 1))
|
||||||
ret = i915_gem_object_unbind(obj);
|
ret = i915_gem_object_unbind(obj);
|
||||||
}
|
}
|
||||||
@ -388,7 +387,6 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
|
|||||||
obj->fenced_gpu_access ||
|
obj->fenced_gpu_access ||
|
||||||
obj->fence_reg != I915_FENCE_REG_NONE;
|
obj->fence_reg != I915_FENCE_REG_NONE;
|
||||||
|
|
||||||
|
|
||||||
obj->tiling_mode = args->tiling_mode;
|
obj->tiling_mode = args->tiling_mode;
|
||||||
obj->stride = args->stride;
|
obj->stride = args->stride;
|
||||||
|
|
||||||
@ -402,7 +400,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
|
|||||||
drm_gem_object_unreference(&obj->base);
|
drm_gem_object_unreference(&obj->base);
|
||||||
DRM_UNLOCK(dev);
|
DRM_UNLOCK(dev);
|
||||||
|
|
||||||
return (ret);
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
Loading…
x
Reference in New Issue
Block a user