Import the preliminary port of the TTM.
The early commit is done to facilitate the off-tree work on the porting of the Radeon driver. Sponsored by: The FreeBSD Foundation Debugged and tested by: dumbbell MFC after: 1 month
This commit is contained in:
parent
9cfa0e9e3c
commit
e6cd8542ed
@ -906,6 +906,7 @@ struct drm_device {
|
||||
struct drm_minor *control; /**< Control node for card */
|
||||
struct drm_minor *primary; /**< render type primary screen head */
|
||||
|
||||
void *drm_ttm_bo;
|
||||
struct unrhdr *drw_unrhdr;
|
||||
/* RB tree of drawable infos */
|
||||
RB_HEAD(drawable_tree, bsd_drm_drawable_info) drw_head;
|
||||
@ -1302,10 +1303,14 @@ void drm_gem_release(struct drm_device *dev, struct drm_file *file_priv);
|
||||
|
||||
int drm_gem_create_mmap_offset(struct drm_gem_object *obj);
|
||||
void drm_gem_free_mmap_offset(struct drm_gem_object *obj);
|
||||
int drm_gem_mmap_single(struct cdev *kdev, vm_ooffset_t *offset, vm_size_t size,
|
||||
struct vm_object **obj_res, int nprot);
|
||||
int drm_gem_mmap_single(struct drm_device *dev, vm_ooffset_t *offset,
|
||||
vm_size_t size, struct vm_object **obj_res, int nprot);
|
||||
void drm_gem_pager_dtr(void *obj);
|
||||
|
||||
struct ttm_bo_device;
|
||||
int ttm_bo_mmap_single(struct ttm_bo_device *bdev, vm_ooffset_t *offset,
|
||||
vm_size_t size, struct vm_object **obj_res, int nprot);
|
||||
|
||||
void drm_device_lock_mtx(struct drm_device *dev);
|
||||
void drm_device_unlock_mtx(struct drm_device *dev);
|
||||
int drm_device_sleep_mtx(struct drm_device *dev, void *chan, int flags,
|
||||
|
@ -58,6 +58,8 @@ static int drm_load(struct drm_device *dev);
|
||||
static void drm_unload(struct drm_device *dev);
|
||||
static drm_pci_id_list_t *drm_find_description(int vendor, int device,
|
||||
drm_pci_id_list_t *idlist);
|
||||
static int drm_mmap_single(struct cdev *kdev, vm_ooffset_t *offset,
|
||||
vm_size_t size, struct vm_object **obj_res, int nprot);
|
||||
|
||||
static int
|
||||
drm_modevent(module_t mod, int type, void *data)
|
||||
@ -187,7 +189,7 @@ static struct cdevsw drm_cdevsw = {
|
||||
.d_ioctl = drm_ioctl,
|
||||
.d_poll = drm_poll,
|
||||
.d_mmap = drm_mmap,
|
||||
.d_mmap_single = drm_gem_mmap_single,
|
||||
.d_mmap_single = drm_mmap_single,
|
||||
.d_name = "drm",
|
||||
.d_flags = D_TRACKCLOSE
|
||||
};
|
||||
@ -955,6 +957,23 @@ drm_add_busid_modesetting(struct drm_device *dev, struct sysctl_ctx_list *ctx,
|
||||
return (0);
|
||||
}
|
||||
|
||||
static int
|
||||
drm_mmap_single(struct cdev *kdev, vm_ooffset_t *offset, vm_size_t size,
|
||||
struct vm_object **obj_res, int nprot)
|
||||
{
|
||||
struct drm_device *dev;
|
||||
|
||||
dev = drm_get_device_from_kdev(kdev);
|
||||
if ((dev->driver->driver_features & DRIVER_GEM) != 0) {
|
||||
return (drm_gem_mmap_single(dev, offset, size, obj_res, nprot));
|
||||
} else if (dev->drm_ttm_bo != NULL) {
|
||||
return (ttm_bo_mmap_single(dev->drm_ttm_bo, offset, size,
|
||||
obj_res, nprot));
|
||||
} else {
|
||||
return (ENODEV);
|
||||
}
|
||||
}
|
||||
|
||||
#if DRM_LINUX
|
||||
|
||||
#include <sys/sysproto.h>
|
||||
|
@ -441,16 +441,12 @@ drm_gem_free_mmap_offset(struct drm_gem_object *obj)
|
||||
}
|
||||
|
||||
int
|
||||
drm_gem_mmap_single(struct cdev *kdev, vm_ooffset_t *offset, vm_size_t size,
|
||||
drm_gem_mmap_single(struct drm_device *dev, vm_ooffset_t *offset, vm_size_t size,
|
||||
struct vm_object **obj_res, int nprot)
|
||||
{
|
||||
struct drm_device *dev;
|
||||
struct drm_gem_object *gem_obj;
|
||||
struct vm_object *vm_obj;
|
||||
|
||||
dev = drm_get_device_from_kdev(kdev);
|
||||
if ((dev->driver->driver_features & DRIVER_GEM) == 0)
|
||||
return (ENODEV);
|
||||
DRM_LOCK(dev);
|
||||
gem_obj = drm_gem_object_from_offset(dev, *offset);
|
||||
if (gem_obj == NULL) {
|
||||
|
145
sys/dev/drm2/ttm/ttm_agp_backend.c
Normal file
145
sys/dev/drm2/ttm/ttm_agp_backend.c
Normal file
@ -0,0 +1,145 @@
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||
* Keith Packard.
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__FBSDID("$FreeBSD$");
|
||||
|
||||
#include <dev/drm2/drmP.h>
|
||||
#include <dev/drm2/ttm/ttm_module.h>
|
||||
#include <dev/drm2/ttm/ttm_bo_driver.h>
|
||||
#include <dev/drm2/ttm/ttm_page_alloc.h>
|
||||
#ifdef TTM_HAS_AGP
|
||||
#include <dev/drm2/ttm/ttm_placement.h>
|
||||
|
||||
struct ttm_agp_backend {
|
||||
struct ttm_tt ttm;
|
||||
struct agp_memory *mem;
|
||||
device_t bridge;
|
||||
};
|
||||
|
||||
MALLOC_DEFINE(M_TTM_AGP, "ttm_agp", "TTM AGP Backend");
|
||||
|
||||
static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
|
||||
{
|
||||
struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
|
||||
struct drm_mm_node *node = bo_mem->mm_node;
|
||||
struct agp_memory *mem;
|
||||
int ret, cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
|
||||
unsigned i;
|
||||
|
||||
mem = agp_alloc_memory(agp_be->bridge, AGP_USER_MEMORY, ttm->num_pages);
|
||||
if (unlikely(mem == NULL))
|
||||
return -ENOMEM;
|
||||
|
||||
mem->page_count = 0;
|
||||
for (i = 0; i < ttm->num_pages; i++) {
|
||||
vm_page_t page = ttm->pages[i];
|
||||
|
||||
if (!page)
|
||||
page = ttm->dummy_read_page;
|
||||
|
||||
mem->pages[mem->page_count++] = page;
|
||||
}
|
||||
agp_be->mem = mem;
|
||||
|
||||
mem->is_flushed = 1;
|
||||
mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY;
|
||||
|
||||
ret = agp_bind_memory(mem, node->start);
|
||||
if (ret)
|
||||
pr_err("AGP Bind memory failed\n");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ttm_agp_unbind(struct ttm_tt *ttm)
|
||||
{
|
||||
struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
|
||||
|
||||
if (agp_be->mem) {
|
||||
if (agp_be->mem->is_bound)
|
||||
return agp_unbind_memory(agp_be->mem);
|
||||
agp_free_memory(agp_be->mem);
|
||||
agp_be->mem = NULL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ttm_agp_destroy(struct ttm_tt *ttm)
|
||||
{
|
||||
struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
|
||||
|
||||
if (agp_be->mem)
|
||||
ttm_agp_unbind(ttm);
|
||||
ttm_tt_fini(ttm);
|
||||
free(agp_be, M_TTM_AGP);
|
||||
}
|
||||
|
||||
static struct ttm_backend_func ttm_agp_func = {
|
||||
.bind = ttm_agp_bind,
|
||||
.unbind = ttm_agp_unbind,
|
||||
.destroy = ttm_agp_destroy,
|
||||
};
|
||||
|
||||
struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
|
||||
device_t bridge,
|
||||
unsigned long size, uint32_t page_flags,
|
||||
vm_page_t dummy_read_page)
|
||||
{
|
||||
struct ttm_agp_backend *agp_be;
|
||||
|
||||
agp_be = malloc(sizeof(*agp_be), M_TTM_AGP, M_WAITOK | M_ZERO);
|
||||
|
||||
agp_be->mem = NULL;
|
||||
agp_be->bridge = bridge;
|
||||
agp_be->ttm.func = &ttm_agp_func;
|
||||
|
||||
if (ttm_tt_init(&agp_be->ttm, bdev, size, page_flags, dummy_read_page)) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return &agp_be->ttm;
|
||||
}
|
||||
|
||||
int ttm_agp_tt_populate(struct ttm_tt *ttm)
|
||||
{
|
||||
if (ttm->state != tt_unpopulated)
|
||||
return 0;
|
||||
|
||||
return ttm_pool_populate(ttm);
|
||||
}
|
||||
|
||||
void ttm_agp_tt_unpopulate(struct ttm_tt *ttm)
|
||||
{
|
||||
ttm_pool_unpopulate(ttm);
|
||||
}
|
||||
|
||||
#endif
|
1820
sys/dev/drm2/ttm/ttm_bo.c
Normal file
1820
sys/dev/drm2/ttm/ttm_bo.c
Normal file
File diff suppressed because it is too large
Load Diff
740
sys/dev/drm2/ttm/ttm_bo_api.h
Normal file
740
sys/dev/drm2/ttm/ttm_bo_api.h
Normal file
@ -0,0 +1,740 @@
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||
*/
|
||||
/* $FreeBSD$ */
|
||||
|
||||
#ifndef _TTM_BO_API_H_
|
||||
#define _TTM_BO_API_H_
|
||||
|
||||
#include <dev/drm2/drmP.h>
|
||||
|
||||
struct ttm_bo_device;
|
||||
|
||||
struct drm_mm_node;
|
||||
|
||||
|
||||
/**
|
||||
* struct ttm_placement
|
||||
*
|
||||
* @fpfn: first valid page frame number to put the object
|
||||
* @lpfn: last valid page frame number to put the object
|
||||
* @num_placement: number of preferred placements
|
||||
* @placement: preferred placements
|
||||
* @num_busy_placement: number of preferred placements when need to evict buffer
|
||||
* @busy_placement: preferred placements when need to evict buffer
|
||||
*
|
||||
* Structure indicating the placement you request for an object.
|
||||
*/
|
||||
struct ttm_placement {
|
||||
unsigned fpfn;
|
||||
unsigned lpfn;
|
||||
unsigned num_placement;
|
||||
const uint32_t *placement;
|
||||
unsigned num_busy_placement;
|
||||
const uint32_t *busy_placement;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct ttm_bus_placement
|
||||
*
|
||||
* @addr: mapped virtual address
|
||||
* @base: bus base address
|
||||
* @is_iomem: is this io memory ?
|
||||
* @size: size in byte
|
||||
* @offset: offset from the base address
|
||||
* @io_reserved_vm: The VM system has a refcount in @io_reserved_count
|
||||
* @io_reserved_count: Refcounting the numbers of callers to ttm_mem_io_reserve
|
||||
*
|
||||
* Structure indicating the bus placement of an object.
|
||||
*/
|
||||
struct ttm_bus_placement {
|
||||
void *addr;
|
||||
unsigned long base;
|
||||
unsigned long size;
|
||||
unsigned long offset;
|
||||
bool is_iomem;
|
||||
bool io_reserved_vm;
|
||||
uint64_t io_reserved_count;
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* struct ttm_mem_reg
|
||||
*
|
||||
* @mm_node: Memory manager node.
|
||||
* @size: Requested size of memory region.
|
||||
* @num_pages: Actual size of memory region in pages.
|
||||
* @page_alignment: Page alignment.
|
||||
* @placement: Placement flags.
|
||||
* @bus: Placement on io bus accessible to the CPU
|
||||
*
|
||||
* Structure indicating the placement and space resources used by a
|
||||
* buffer object.
|
||||
*/
|
||||
|
||||
struct ttm_mem_reg {
|
||||
void *mm_node;
|
||||
unsigned long start;
|
||||
unsigned long size;
|
||||
unsigned long num_pages;
|
||||
uint32_t page_alignment;
|
||||
uint32_t mem_type;
|
||||
uint32_t placement;
|
||||
struct ttm_bus_placement bus;
|
||||
};
|
||||
|
||||
/**
|
||||
* enum ttm_bo_type
|
||||
*
|
||||
* @ttm_bo_type_device: These are 'normal' buffers that can
|
||||
* be mmapped by user space. Each of these bos occupy a slot in the
|
||||
* device address space, that can be used for normal vm operations.
|
||||
*
|
||||
* @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers,
|
||||
* but they cannot be accessed from user-space. For kernel-only use.
|
||||
*
|
||||
* @ttm_bo_type_sg: Buffer made from dmabuf sg table shared with another
|
||||
* driver.
|
||||
*/
|
||||
|
||||
enum ttm_bo_type {
|
||||
ttm_bo_type_device,
|
||||
ttm_bo_type_kernel,
|
||||
ttm_bo_type_sg
|
||||
};
|
||||
|
||||
struct ttm_tt;
|
||||
|
||||
/**
|
||||
* struct ttm_buffer_object
|
||||
*
|
||||
* @bdev: Pointer to the buffer object device structure.
|
||||
* @type: The bo type.
|
||||
* @destroy: Destruction function. If NULL, kfree is used.
|
||||
* @num_pages: Actual number of pages.
|
||||
* @addr_space_offset: Address space offset.
|
||||
* @acc_size: Accounted size for this object.
|
||||
* @kref: Reference count of this buffer object. When this refcount reaches
|
||||
* zero, the object is put on the delayed delete list.
|
||||
* @list_kref: List reference count of this buffer object. This member is
|
||||
* used to avoid destruction while the buffer object is still on a list.
|
||||
* Lru lists may keep one refcount, the delayed delete list, and kref != 0
|
||||
* keeps one refcount. When this refcount reaches zero,
|
||||
* the object is destroyed.
|
||||
* @event_queue: Queue for processes waiting on buffer object status change.
|
||||
* @mem: structure describing current placement.
|
||||
* @persistent_swap_storage: Usually the swap storage is deleted for buffers
|
||||
* pinned in physical memory. If this behaviour is not desired, this member
|
||||
* holds a pointer to a persistent shmem object.
|
||||
* @ttm: TTM structure holding system pages.
|
||||
* @evicted: Whether the object was evicted without user-space knowing.
|
||||
* @cpu_writes: For synchronization. Number of cpu writers.
|
||||
* @lru: List head for the lru list.
|
||||
* @ddestroy: List head for the delayed destroy list.
|
||||
* @swap: List head for swap LRU list.
|
||||
* @val_seq: Sequence of the validation holding the @reserved lock.
|
||||
* Used to avoid starvation when many processes compete to validate the
|
||||
* buffer. This member is protected by the bo_device::lru_lock.
|
||||
* @seq_valid: The value of @val_seq is valid. This value is protected by
|
||||
* the bo_device::lru_lock.
|
||||
* @reserved: Deadlock-free lock used for synchronization state transitions.
|
||||
* @sync_obj: Pointer to a synchronization object.
|
||||
* @priv_flags: Flags describing buffer object internal state.
|
||||
* @vm_rb: Rb node for the vm rb tree.
|
||||
* @vm_node: Address space manager node.
|
||||
* @offset: The current GPU offset, which can have different meanings
|
||||
* depending on the memory type. For SYSTEM type memory, it should be 0.
|
||||
* @cur_placement: Hint of current placement.
|
||||
*
|
||||
* Base class for TTM buffer object, that deals with data placement and CPU
|
||||
* mappings. GPU mappings are really up to the driver, but for simpler GPUs
|
||||
* the driver can usually use the placement offset @offset directly as the
|
||||
* GPU virtual address. For drivers implementing multiple
|
||||
* GPU memory manager contexts, the driver should manage the address space
|
||||
* in these contexts separately and use these objects to get the correct
|
||||
* placement and caching for these GPU maps. This makes it possible to use
|
||||
* these objects for even quite elaborate memory management schemes.
|
||||
* The destroy member, the API visibility of this object makes it possible
|
||||
* to derive driver specific types.
|
||||
*/
|
||||
|
||||
struct ttm_buffer_object {
|
||||
/**
|
||||
* Members constant at init.
|
||||
*/
|
||||
|
||||
struct ttm_bo_global *glob;
|
||||
struct ttm_bo_device *bdev;
|
||||
enum ttm_bo_type type;
|
||||
void (*destroy) (struct ttm_buffer_object *);
|
||||
unsigned long num_pages;
|
||||
uint64_t addr_space_offset;
|
||||
size_t acc_size;
|
||||
|
||||
/**
|
||||
* Members not needing protection.
|
||||
*/
|
||||
|
||||
u_int kref;
|
||||
u_int list_kref;
|
||||
/* wait_queue_head_t event_queue; */
|
||||
|
||||
/**
|
||||
* Members protected by the bo::reserved lock.
|
||||
*/
|
||||
|
||||
struct ttm_mem_reg mem;
|
||||
struct vm_object *persistent_swap_storage;
|
||||
struct ttm_tt *ttm;
|
||||
bool evicted;
|
||||
|
||||
/**
|
||||
* Members protected by the bo::reserved lock only when written to.
|
||||
*/
|
||||
|
||||
atomic_t cpu_writers;
|
||||
|
||||
/**
|
||||
* Members protected by the bdev::lru_lock.
|
||||
*/
|
||||
|
||||
struct list_head lru;
|
||||
struct list_head ddestroy;
|
||||
struct list_head swap;
|
||||
struct list_head io_reserve_lru;
|
||||
uint32_t val_seq;
|
||||
bool seq_valid;
|
||||
|
||||
/**
|
||||
* Members protected by the bdev::lru_lock
|
||||
* only when written to.
|
||||
*/
|
||||
|
||||
atomic_t reserved;
|
||||
|
||||
/**
|
||||
* Members protected by struct buffer_object_device::fence_lock
|
||||
* In addition, setting sync_obj to anything else
|
||||
* than NULL requires bo::reserved to be held. This allows for
|
||||
* checking NULL while reserved but not holding the mentioned lock.
|
||||
*/
|
||||
|
||||
void *sync_obj;
|
||||
unsigned long priv_flags;
|
||||
|
||||
/**
|
||||
* Members protected by the bdev::vm_lock
|
||||
*/
|
||||
|
||||
RB_ENTRY(ttm_buffer_object) vm_rb;
|
||||
struct drm_mm_node *vm_node;
|
||||
|
||||
|
||||
/**
|
||||
* Special members that are protected by the reserve lock
|
||||
* and the bo::lock when written to. Can be read with
|
||||
* either of these locks held.
|
||||
*/
|
||||
|
||||
unsigned long offset;
|
||||
uint32_t cur_placement;
|
||||
|
||||
struct sg_table *sg;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct ttm_bo_kmap_obj
|
||||
*
|
||||
* @virtual: The current kernel virtual address.
|
||||
* @page: The page when kmap'ing a single page.
|
||||
* @bo_kmap_type: Type of bo_kmap.
|
||||
*
|
||||
* Object describing a kernel mapping. Since a TTM bo may be located
|
||||
* in various memory types with various caching policies, the
|
||||
* mapping can either be an ioremap, a vmap, a kmap or part of a
|
||||
* premapped region.
|
||||
*/
|
||||
|
||||
#define TTM_BO_MAP_IOMEM_MASK 0x80
|
||||
struct ttm_bo_kmap_obj {
|
||||
void *virtual;
|
||||
struct vm_page *page;
|
||||
struct sf_buf *sf;
|
||||
int num_pages;
|
||||
unsigned long size;
|
||||
enum {
|
||||
ttm_bo_map_iomap = 1 | TTM_BO_MAP_IOMEM_MASK,
|
||||
ttm_bo_map_vmap = 2,
|
||||
ttm_bo_map_kmap = 3,
|
||||
ttm_bo_map_premapped = 4 | TTM_BO_MAP_IOMEM_MASK,
|
||||
} bo_kmap_type;
|
||||
struct ttm_buffer_object *bo;
|
||||
};
|
||||
|
||||
/**
|
||||
* ttm_bo_reference - reference a struct ttm_buffer_object
|
||||
*
|
||||
* @bo: The buffer object.
|
||||
*
|
||||
* Returns a refcounted pointer to a buffer object.
|
||||
*/
|
||||
|
||||
static inline struct ttm_buffer_object *
|
||||
ttm_bo_reference(struct ttm_buffer_object *bo)
|
||||
{
|
||||
refcount_acquire(&bo->kref);
|
||||
return bo;
|
||||
}
|
||||
|
||||
/**
|
||||
* ttm_bo_wait - wait for buffer idle.
|
||||
*
|
||||
* @bo: The buffer object.
|
||||
* @interruptible: Use interruptible wait.
|
||||
* @no_wait: Return immediately if buffer is busy.
|
||||
*
|
||||
* This function must be called with the bo::mutex held, and makes
|
||||
* sure any previous rendering to the buffer is completed.
|
||||
* Note: It might be necessary to block validations before the
|
||||
* wait by reserving the buffer.
|
||||
* Returns -EBUSY if no_wait is true and the buffer is busy.
|
||||
* Returns -ERESTARTSYS if interrupted by a signal.
|
||||
*/
|
||||
extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
|
||||
bool interruptible, bool no_wait);
|
||||
/**
|
||||
* ttm_bo_validate
|
||||
*
|
||||
* @bo: The buffer object.
|
||||
* @placement: Proposed placement for the buffer object.
|
||||
* @interruptible: Sleep interruptible if sleeping.
|
||||
* @no_wait_gpu: Return immediately if the GPU is busy.
|
||||
*
|
||||
* Changes placement and caching policy of the buffer object
|
||||
* according proposed placement.
|
||||
* Returns
|
||||
* -EINVAL on invalid proposed placement.
|
||||
* -ENOMEM on out-of-memory condition.
|
||||
* -EBUSY if no_wait is true and buffer busy.
|
||||
* -ERESTARTSYS if interrupted by a signal.
|
||||
*/
|
||||
extern int ttm_bo_validate(struct ttm_buffer_object *bo,
|
||||
struct ttm_placement *placement,
|
||||
bool interruptible,
|
||||
bool no_wait_gpu);
|
||||
|
||||
/**
|
||||
* ttm_bo_unref
|
||||
*
|
||||
* @bo: The buffer object.
|
||||
*
|
||||
* Unreference and clear a pointer to a buffer object.
|
||||
*/
|
||||
extern void ttm_bo_unref(struct ttm_buffer_object **bo);
|
||||
|
||||
|
||||
/**
|
||||
* ttm_bo_list_ref_sub
|
||||
*
|
||||
* @bo: The buffer object.
|
||||
* @count: The number of references with which to decrease @bo::list_kref;
|
||||
* @never_free: The refcount should not reach zero with this operation.
|
||||
*
|
||||
* Release @count lru list references to this buffer object.
|
||||
*/
|
||||
extern void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
|
||||
bool never_free);
|
||||
|
||||
/**
|
||||
* ttm_bo_add_to_lru
|
||||
*
|
||||
* @bo: The buffer object.
|
||||
*
|
||||
* Add this bo to the relevant mem type lru and, if it's backed by
|
||||
* system pages (ttms) to the swap list.
|
||||
* This function must be called with struct ttm_bo_global::lru_lock held, and
|
||||
* is typically called immediately prior to unreserving a bo.
|
||||
*/
|
||||
extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
|
||||
|
||||
/**
|
||||
* ttm_bo_del_from_lru
|
||||
*
|
||||
* @bo: The buffer object.
|
||||
*
|
||||
* Remove this bo from all lru lists used to lookup and reserve an object.
|
||||
* This function must be called with struct ttm_bo_global::lru_lock held,
|
||||
* and is usually called just immediately after the bo has been reserved to
|
||||
* avoid recursive reservation from lru lists.
|
||||
*/
|
||||
extern int ttm_bo_del_from_lru(struct ttm_buffer_object *bo);
|
||||
|
||||
|
||||
/**
|
||||
* ttm_bo_lock_delayed_workqueue
|
||||
*
|
||||
* Prevent the delayed workqueue from running.
|
||||
* Returns
|
||||
* True if the workqueue was queued at the time
|
||||
*/
|
||||
extern int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev);
|
||||
|
||||
/**
|
||||
* ttm_bo_unlock_delayed_workqueue
|
||||
*
|
||||
* Allows the delayed workqueue to run.
|
||||
*/
|
||||
extern void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev,
|
||||
int resched);
|
||||
|
||||
/**
|
||||
* ttm_bo_synccpu_write_grab
|
||||
*
|
||||
* @bo: The buffer object:
|
||||
* @no_wait: Return immediately if buffer is busy.
|
||||
*
|
||||
* Synchronizes a buffer object for CPU RW access. This means
|
||||
* command submission that affects the buffer will return -EBUSY
|
||||
* until ttm_bo_synccpu_write_release is called.
|
||||
*
|
||||
* Returns
|
||||
* -EBUSY if the buffer is busy and no_wait is true.
|
||||
* -ERESTARTSYS if interrupted by a signal.
|
||||
*/
|
||||
extern int
|
||||
ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
|
||||
|
||||
/**
|
||||
* ttm_bo_synccpu_write_release:
|
||||
*
|
||||
* @bo : The buffer object.
|
||||
*
|
||||
* Releases a synccpu lock.
|
||||
*/
|
||||
extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
|
||||
|
||||
/**
|
||||
* ttm_bo_acc_size
|
||||
*
|
||||
* @bdev: Pointer to a ttm_bo_device struct.
|
||||
* @bo_size: size of the buffer object in byte.
|
||||
* @struct_size: size of the structure holding buffer object datas
|
||||
*
|
||||
* Returns size to account for a buffer object
|
||||
*/
|
||||
size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
|
||||
unsigned long bo_size,
|
||||
unsigned struct_size);
|
||||
size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
|
||||
unsigned long bo_size,
|
||||
unsigned struct_size);
|
||||
|
||||
/**
|
||||
* ttm_bo_init
|
||||
*
|
||||
* @bdev: Pointer to a ttm_bo_device struct.
|
||||
* @bo: Pointer to a ttm_buffer_object to be initialized.
|
||||
* @size: Requested size of buffer object.
|
||||
* @type: Requested type of buffer object.
|
||||
* @flags: Initial placement flags.
|
||||
* @page_alignment: Data alignment in pages.
|
||||
* @interruptible: If needing to sleep to wait for GPU resources,
|
||||
* sleep interruptible.
|
||||
* @persistent_swap_storage: Usually the swap storage is deleted for buffers
|
||||
* pinned in physical memory. If this behaviour is not desired, this member
|
||||
* holds a pointer to a persistent shmem object. Typically, this would
|
||||
* point to the shmem object backing a GEM object if TTM is used to back a
|
||||
* GEM user interface.
|
||||
* @acc_size: Accounted size for this object.
|
||||
* @destroy: Destroy function. Use NULL for kfree().
|
||||
*
|
||||
* This function initializes a pre-allocated struct ttm_buffer_object.
|
||||
* As this object may be part of a larger structure, this function,
|
||||
* together with the @destroy function,
|
||||
* enables driver-specific objects derived from a ttm_buffer_object.
|
||||
* On successful return, the object kref and list_kref are set to 1.
|
||||
* If a failure occurs, the function will call the @destroy function, or
|
||||
* kfree() if @destroy is NULL. Thus, after a failure, dereferencing @bo is
|
||||
* illegal and will likely cause memory corruption.
|
||||
*
|
||||
* Returns
|
||||
* -ENOMEM: Out of memory.
|
||||
* -EINVAL: Invalid placement flags.
|
||||
* -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
|
||||
*/
|
||||
|
||||
extern int ttm_bo_init(struct ttm_bo_device *bdev,
|
||||
struct ttm_buffer_object *bo,
|
||||
unsigned long size,
|
||||
enum ttm_bo_type type,
|
||||
struct ttm_placement *placement,
|
||||
uint32_t page_alignment,
|
||||
bool interrubtible,
|
||||
struct vm_object *persistent_swap_storage,
|
||||
size_t acc_size,
|
||||
struct sg_table *sg,
|
||||
void (*destroy) (struct ttm_buffer_object *));
|
||||
|
||||
/**
|
||||
* ttm_bo_synccpu_object_init
|
||||
*
|
||||
* @bdev: Pointer to a ttm_bo_device struct.
|
||||
* @bo: Pointer to a ttm_buffer_object to be initialized.
|
||||
* @size: Requested size of buffer object.
|
||||
* @type: Requested type of buffer object.
|
||||
* @flags: Initial placement flags.
|
||||
* @page_alignment: Data alignment in pages.
|
||||
* @interruptible: If needing to sleep while waiting for GPU resources,
|
||||
* sleep interruptible.
|
||||
* @persistent_swap_storage: Usually the swap storage is deleted for buffers
|
||||
* pinned in physical memory. If this behaviour is not desired, this member
|
||||
* holds a pointer to a persistent shmem object. Typically, this would
|
||||
* point to the shmem object backing a GEM object if TTM is used to back a
|
||||
* GEM user interface.
|
||||
* @p_bo: On successful completion *p_bo points to the created object.
|
||||
*
|
||||
* This function allocates a ttm_buffer_object, and then calls ttm_bo_init
|
||||
* on that object. The destroy function is set to kfree().
|
||||
* Returns
|
||||
* -ENOMEM: Out of memory.
|
||||
* -EINVAL: Invalid placement flags.
|
||||
* -ERESTARTSYS: Interrupted by signal while waiting for resources.
|
||||
*/
|
||||
|
||||
extern int ttm_bo_create(struct ttm_bo_device *bdev,
|
||||
unsigned long size,
|
||||
enum ttm_bo_type type,
|
||||
struct ttm_placement *placement,
|
||||
uint32_t page_alignment,
|
||||
bool interruptible,
|
||||
struct vm_object *persistent_swap_storage,
|
||||
struct ttm_buffer_object **p_bo);
|
||||
|
||||
/**
|
||||
* ttm_bo_check_placement
|
||||
*
|
||||
* @bo: the buffer object.
|
||||
* @placement: placements
|
||||
*
|
||||
* Performs minimal validity checking on an intended change of
|
||||
* placement flags.
|
||||
* Returns
|
||||
* -EINVAL: Intended change is invalid or not allowed.
|
||||
*/
|
||||
extern int ttm_bo_check_placement(struct ttm_buffer_object *bo,
|
||||
struct ttm_placement *placement);
|
||||
|
||||
/**
|
||||
* ttm_bo_init_mm
|
||||
*
|
||||
* @bdev: Pointer to a ttm_bo_device struct.
|
||||
* @mem_type: The memory type.
|
||||
* @p_size: size managed area in pages.
|
||||
*
|
||||
* Initialize a manager for a given memory type.
|
||||
* Note: if part of driver firstopen, it must be protected from a
|
||||
* potentially racing lastclose.
|
||||
* Returns:
|
||||
* -EINVAL: invalid size or memory type.
|
||||
* -ENOMEM: Not enough memory.
|
||||
* May also return driver-specified errors.
|
||||
*/
|
||||
|
||||
extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
|
||||
unsigned long p_size);
|
||||
/**
|
||||
* ttm_bo_clean_mm
|
||||
*
|
||||
* @bdev: Pointer to a ttm_bo_device struct.
|
||||
* @mem_type: The memory type.
|
||||
*
|
||||
* Take down a manager for a given memory type after first walking
|
||||
* the LRU list to evict any buffers left alive.
|
||||
*
|
||||
* Normally, this function is part of lastclose() or unload(), and at that
|
||||
* point there shouldn't be any buffers left created by user-space, since
|
||||
* there should've been removed by the file descriptor release() method.
|
||||
* However, before this function is run, make sure to signal all sync objects,
|
||||
* and verify that the delayed delete queue is empty. The driver must also
|
||||
* make sure that there are no NO_EVICT buffers present in this memory type
|
||||
* when the call is made.
|
||||
*
|
||||
* If this function is part of a VT switch, the caller must make sure that
|
||||
* there are no appications currently validating buffers before this
|
||||
* function is called. The caller can do that by first taking the
|
||||
* struct ttm_bo_device::ttm_lock in write mode.
|
||||
*
|
||||
* Returns:
|
||||
* -EINVAL: invalid or uninitialized memory type.
|
||||
* -EBUSY: There are still buffers left in this memory type.
|
||||
*/
|
||||
|
||||
extern int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type);
|
||||
|
||||
/**
|
||||
* ttm_bo_evict_mm
|
||||
*
|
||||
* @bdev: Pointer to a ttm_bo_device struct.
|
||||
* @mem_type: The memory type.
|
||||
*
|
||||
* Evicts all buffers on the lru list of the memory type.
|
||||
* This is normally part of a VT switch or an
|
||||
* out-of-memory-space-due-to-fragmentation handler.
|
||||
* The caller must make sure that there are no other processes
|
||||
* currently validating buffers, and can do that by taking the
|
||||
* struct ttm_bo_device::ttm_lock in write mode.
|
||||
*
|
||||
* Returns:
|
||||
* -EINVAL: Invalid or uninitialized memory type.
|
||||
* -ERESTARTSYS: The call was interrupted by a signal while waiting to
|
||||
* evict a buffer.
|
||||
*/
|
||||
|
||||
extern int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type);
|
||||
|
||||
/**
|
||||
* ttm_kmap_obj_virtual
|
||||
*
|
||||
* @map: A struct ttm_bo_kmap_obj returned from ttm_bo_kmap.
|
||||
* @is_iomem: Pointer to an integer that on return indicates 1 if the
|
||||
* virtual map is io memory, 0 if normal memory.
|
||||
*
|
||||
* Returns the virtual address of a buffer object area mapped by ttm_bo_kmap.
|
||||
* If *is_iomem is 1 on return, the virtual address points to an io memory area,
|
||||
* that should strictly be accessed by the iowriteXX() and similar functions.
|
||||
*/
|
||||
|
||||
static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map,
|
||||
bool *is_iomem)
|
||||
{
|
||||
*is_iomem = !!(map->bo_kmap_type & TTM_BO_MAP_IOMEM_MASK);
|
||||
return map->virtual;
|
||||
}
|
||||
|
||||
/**
|
||||
* ttm_bo_kmap
|
||||
*
|
||||
* @bo: The buffer object.
|
||||
* @start_page: The first page to map.
|
||||
* @num_pages: Number of pages to map.
|
||||
* @map: pointer to a struct ttm_bo_kmap_obj representing the map.
|
||||
*
|
||||
* Sets up a kernel virtual mapping, using ioremap, vmap or kmap to the
|
||||
* data in the buffer object. The ttm_kmap_obj_virtual function can then be
|
||||
* used to obtain a virtual address to the data.
|
||||
*
|
||||
* Returns
|
||||
* -ENOMEM: Out of memory.
|
||||
* -EINVAL: Invalid range.
|
||||
*/
|
||||
|
||||
extern int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
|
||||
unsigned long num_pages, struct ttm_bo_kmap_obj *map);
|
||||
|
||||
/**
|
||||
* ttm_bo_kunmap
|
||||
*
|
||||
* @map: Object describing the map to unmap.
|
||||
*
|
||||
* Unmaps a kernel map set up by ttm_bo_kmap.
|
||||
*/
|
||||
|
||||
extern void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
|
||||
|
||||
/**
|
||||
* ttm_fbdev_mmap - mmap fbdev memory backed by a ttm buffer object.
|
||||
*
|
||||
* @vma: vma as input from the fbdev mmap method.
|
||||
* @bo: The bo backing the address space. The address space will
|
||||
* have the same size as the bo, and start at offset 0.
|
||||
*
|
||||
* This function is intended to be called by the fbdev mmap method
|
||||
* if the fbdev address space is to be backed by a bo.
|
||||
*/
|
||||
|
||||
/* XXXKIB
|
||||
extern int ttm_fbdev_mmap(struct vm_area_struct *vma,
|
||||
struct ttm_buffer_object *bo);
|
||||
*/
|
||||
/**
|
||||
* ttm_bo_mmap - mmap out of the ttm device address space.
|
||||
*
|
||||
* @filp: filp as input from the mmap method.
|
||||
* @vma: vma as input from the mmap method.
|
||||
* @bdev: Pointer to the ttm_bo_device with the address space manager.
|
||||
*
|
||||
* This function is intended to be called by the device mmap method.
|
||||
* if the device address space is to be backed by the bo manager.
|
||||
*/
|
||||
/* XXXKIB
|
||||
extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
|
||||
struct ttm_bo_device *bdev);
|
||||
*/
|
||||
/**
|
||||
* ttm_bo_io
|
||||
*
|
||||
* @bdev: Pointer to the struct ttm_bo_device.
|
||||
* @filp: Pointer to the struct file attempting to read / write.
|
||||
* @wbuf: User-space pointer to address of buffer to write. NULL on read.
|
||||
* @rbuf: User-space pointer to address of buffer to read into.
|
||||
* Null on write.
|
||||
* @count: Number of bytes to read / write.
|
||||
* @f_pos: Pointer to current file position.
|
||||
* @write: 1 for read, 0 for write.
|
||||
*
|
||||
* This function implements read / write into ttm buffer objects, and is
|
||||
* intended to
|
||||
* be called from the fops::read and fops::write method.
|
||||
* Returns:
|
||||
* See man (2) write, man(2) read. In particular,
|
||||
* the function may return -ERESTARTSYS if
|
||||
* interrupted by a signal.
|
||||
*/
|
||||
|
||||
extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
|
||||
const char *wbuf, char *rbuf,
|
||||
size_t count, off_t *f_pos, bool write);
|
||||
|
||||
extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
|
||||
|
||||
/**
|
||||
* ttm_bo_is_reserved - return an indication if a ttm buffer object is reserved
|
||||
*
|
||||
* @bo: The buffer object to check.
|
||||
*
|
||||
* This function returns an indication if a bo is reserved or not, and should
|
||||
* only be used to print an error when it is not from incorrect api usage, since
|
||||
* there's no guarantee that it is the caller that is holding the reservation.
|
||||
*/
|
||||
static inline bool ttm_bo_is_reserved(struct ttm_buffer_object *bo)
|
||||
{
|
||||
return atomic_read(&bo->reserved);
|
||||
}
|
||||
|
||||
#endif
|
1018
sys/dev/drm2/ttm/ttm_bo_driver.h
Normal file
1018
sys/dev/drm2/ttm/ttm_bo_driver.h
Normal file
File diff suppressed because it is too large
Load Diff
157
sys/dev/drm2/ttm/ttm_bo_manager.c
Normal file
157
sys/dev/drm2/ttm/ttm_bo_manager.c
Normal file
@ -0,0 +1,157 @@
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__FBSDID("$FreeBSD$");
|
||||
|
||||
#include <dev/drm2/drmP.h>
|
||||
#include <dev/drm2/ttm/ttm_module.h>
|
||||
#include <dev/drm2/ttm/ttm_bo_driver.h>
|
||||
#include <dev/drm2/ttm/ttm_placement.h>
|
||||
#include <dev/drm2/drm_mm.h>
|
||||
|
||||
/**
|
||||
* Currently we use a spinlock for the lock, but a mutex *may* be
|
||||
* more appropriate to reduce scheduling latency if the range manager
|
||||
* ends up with very fragmented allocation patterns.
|
||||
*/
|
||||
|
||||
struct ttm_range_manager {
|
||||
struct drm_mm mm;
|
||||
struct mtx lock;
|
||||
};
|
||||
|
||||
MALLOC_DEFINE(M_TTM_RMAN, "ttm_rman", "TTM Range Manager");
|
||||
|
||||
static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
|
||||
struct ttm_buffer_object *bo,
|
||||
struct ttm_placement *placement,
|
||||
struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
|
||||
struct drm_mm *mm = &rman->mm;
|
||||
struct drm_mm_node *node = NULL;
|
||||
unsigned long lpfn;
|
||||
int ret;
|
||||
|
||||
lpfn = placement->lpfn;
|
||||
if (!lpfn)
|
||||
lpfn = man->size;
|
||||
do {
|
||||
ret = drm_mm_pre_get(mm);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
mtx_lock(&rman->lock);
|
||||
node = drm_mm_search_free_in_range(mm,
|
||||
mem->num_pages, mem->page_alignment,
|
||||
placement->fpfn, lpfn, 1);
|
||||
if (unlikely(node == NULL)) {
|
||||
mtx_unlock(&rman->lock);
|
||||
return 0;
|
||||
}
|
||||
node = drm_mm_get_block_atomic_range(node, mem->num_pages,
|
||||
mem->page_alignment,
|
||||
placement->fpfn,
|
||||
lpfn);
|
||||
mtx_unlock(&rman->lock);
|
||||
} while (node == NULL);
|
||||
|
||||
mem->mm_node = node;
|
||||
mem->start = node->start;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
|
||||
struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
|
||||
|
||||
if (mem->mm_node) {
|
||||
mtx_lock(&rman->lock);
|
||||
drm_mm_put_block(mem->mm_node);
|
||||
mtx_unlock(&rman->lock);
|
||||
mem->mm_node = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
|
||||
unsigned long p_size)
|
||||
{
|
||||
struct ttm_range_manager *rman;
|
||||
int ret;
|
||||
|
||||
rman = malloc(sizeof(*rman), M_TTM_RMAN, M_ZERO | M_WAITOK);
|
||||
ret = drm_mm_init(&rman->mm, 0, p_size);
|
||||
if (ret) {
|
||||
free(rman, M_TTM_RMAN);
|
||||
return ret;
|
||||
}
|
||||
|
||||
mtx_init(&rman->lock, "ttmrman", NULL, MTX_DEF);
|
||||
man->priv = rman;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
|
||||
{
|
||||
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
|
||||
struct drm_mm *mm = &rman->mm;
|
||||
|
||||
mtx_lock(&rman->lock);
|
||||
if (drm_mm_clean(mm)) {
|
||||
drm_mm_takedown(mm);
|
||||
mtx_unlock(&rman->lock);
|
||||
mtx_destroy(&rman->lock);
|
||||
free(rman, M_TTM_RMAN);
|
||||
man->priv = NULL;
|
||||
return 0;
|
||||
}
|
||||
mtx_unlock(&rman->lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
|
||||
const char *prefix)
|
||||
{
|
||||
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
|
||||
|
||||
mtx_lock(&rman->lock);
|
||||
drm_mm_debug_table(&rman->mm, prefix);
|
||||
mtx_unlock(&rman->lock);
|
||||
}
|
||||
|
||||
const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
|
||||
ttm_bo_man_init,
|
||||
ttm_bo_man_takedown,
|
||||
ttm_bo_man_get_node,
|
||||
ttm_bo_man_put_node,
|
||||
ttm_bo_man_debug
|
||||
};
|
658
sys/dev/drm2/ttm/ttm_bo_util.c
Normal file
658
sys/dev/drm2/ttm/ttm_bo_util.c
Normal file
@ -0,0 +1,658 @@
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__FBSDID("$FreeBSD$");
|
||||
|
||||
#include <dev/drm2/drmP.h>
|
||||
#include <dev/drm2/ttm/ttm_bo_driver.h>
|
||||
#include <dev/drm2/ttm/ttm_placement.h>
|
||||
#include <sys/sf_buf.h>
|
||||
|
||||
void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
|
||||
{
|
||||
ttm_bo_mem_put(bo, &bo->mem);
|
||||
}
|
||||
|
||||
int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
|
||||
bool evict,
|
||||
bool no_wait_gpu, struct ttm_mem_reg *new_mem)
|
||||
{
|
||||
struct ttm_tt *ttm = bo->ttm;
|
||||
struct ttm_mem_reg *old_mem = &bo->mem;
|
||||
int ret;
|
||||
|
||||
if (old_mem->mem_type != TTM_PL_SYSTEM) {
|
||||
ttm_tt_unbind(ttm);
|
||||
ttm_bo_free_old_node(bo);
|
||||
ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
|
||||
TTM_PL_MASK_MEM);
|
||||
old_mem->mem_type = TTM_PL_SYSTEM;
|
||||
}
|
||||
|
||||
ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
if (new_mem->mem_type != TTM_PL_SYSTEM) {
|
||||
ret = ttm_tt_bind(ttm, new_mem);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
}
|
||||
|
||||
*old_mem = *new_mem;
|
||||
new_mem->mm_node = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
|
||||
{
|
||||
if (likely(man->io_reserve_fastpath))
|
||||
return 0;
|
||||
|
||||
if (interruptible) {
|
||||
if (sx_xlock_sig(&man->io_reserve_mutex))
|
||||
return (-EINTR);
|
||||
else
|
||||
return (0);
|
||||
}
|
||||
|
||||
sx_xlock(&man->io_reserve_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
|
||||
{
|
||||
if (likely(man->io_reserve_fastpath))
|
||||
return;
|
||||
|
||||
sx_xunlock(&man->io_reserve_mutex);
|
||||
}
|
||||
|
||||
static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
|
||||
{
|
||||
struct ttm_buffer_object *bo;
|
||||
|
||||
if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
|
||||
return -EAGAIN;
|
||||
|
||||
bo = list_first_entry(&man->io_reserve_lru,
|
||||
struct ttm_buffer_object,
|
||||
io_reserve_lru);
|
||||
list_del_init(&bo->io_reserve_lru);
|
||||
ttm_bo_unmap_virtual_locked(bo);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
|
||||
struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
|
||||
int ret = 0;
|
||||
|
||||
if (!bdev->driver->io_mem_reserve)
|
||||
return 0;
|
||||
if (likely(man->io_reserve_fastpath))
|
||||
return bdev->driver->io_mem_reserve(bdev, mem);
|
||||
|
||||
if (bdev->driver->io_mem_reserve &&
|
||||
mem->bus.io_reserved_count++ == 0) {
|
||||
retry:
|
||||
ret = bdev->driver->io_mem_reserve(bdev, mem);
|
||||
if (ret == -EAGAIN) {
|
||||
ret = ttm_mem_io_evict(man);
|
||||
if (ret == 0)
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ttm_mem_io_free(struct ttm_bo_device *bdev,
|
||||
struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
|
||||
|
||||
if (likely(man->io_reserve_fastpath))
|
||||
return;
|
||||
|
||||
if (bdev->driver->io_mem_reserve &&
|
||||
--mem->bus.io_reserved_count == 0 &&
|
||||
bdev->driver->io_mem_free)
|
||||
bdev->driver->io_mem_free(bdev, mem);
|
||||
|
||||
}
|
||||
|
||||
int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct ttm_mem_reg *mem = &bo->mem;
|
||||
int ret;
|
||||
|
||||
if (!mem->bus.io_reserved_vm) {
|
||||
struct ttm_mem_type_manager *man =
|
||||
&bo->bdev->man[mem->mem_type];
|
||||
|
||||
ret = ttm_mem_io_reserve(bo->bdev, mem);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
mem->bus.io_reserved_vm = true;
|
||||
if (man->use_io_reserve_lru)
|
||||
list_add_tail(&bo->io_reserve_lru,
|
||||
&man->io_reserve_lru);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct ttm_mem_reg *mem = &bo->mem;
|
||||
|
||||
if (mem->bus.io_reserved_vm) {
|
||||
mem->bus.io_reserved_vm = false;
|
||||
list_del_init(&bo->io_reserve_lru);
|
||||
ttm_mem_io_free(bo->bdev, mem);
|
||||
}
|
||||
}
|
||||
|
||||
static
|
||||
int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
|
||||
void **virtual)
|
||||
{
|
||||
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
|
||||
int ret;
|
||||
void *addr;
|
||||
|
||||
*virtual = NULL;
|
||||
(void) ttm_mem_io_lock(man, false);
|
||||
ret = ttm_mem_io_reserve(bdev, mem);
|
||||
ttm_mem_io_unlock(man);
|
||||
if (ret || !mem->bus.is_iomem)
|
||||
return ret;
|
||||
|
||||
if (mem->bus.addr) {
|
||||
addr = mem->bus.addr;
|
||||
} else {
|
||||
addr = pmap_mapdev_attr(mem->bus.base + mem->bus.offset,
|
||||
mem->bus.size, (mem->placement & TTM_PL_FLAG_WC) ?
|
||||
VM_MEMATTR_WRITE_COMBINING : VM_MEMATTR_UNCACHEABLE);
|
||||
if (!addr) {
|
||||
(void) ttm_mem_io_lock(man, false);
|
||||
ttm_mem_io_free(bdev, mem);
|
||||
ttm_mem_io_unlock(man);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
*virtual = addr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static
|
||||
void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
|
||||
void *virtual)
|
||||
{
|
||||
struct ttm_mem_type_manager *man;
|
||||
|
||||
man = &bdev->man[mem->mem_type];
|
||||
|
||||
if (virtual && mem->bus.addr == NULL)
|
||||
pmap_unmapdev((vm_offset_t)virtual, mem->bus.size);
|
||||
(void) ttm_mem_io_lock(man, false);
|
||||
ttm_mem_io_free(bdev, mem);
|
||||
ttm_mem_io_unlock(man);
|
||||
}
|
||||
|
||||
static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
|
||||
{
|
||||
uint32_t *dstP =
|
||||
(uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
|
||||
uint32_t *srcP =
|
||||
(uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
|
||||
|
||||
int i;
|
||||
for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
|
||||
/* iowrite32(ioread32(srcP++), dstP++); */
|
||||
*dstP++ = *srcP++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
|
||||
unsigned long page,
|
||||
vm_memattr_t prot)
|
||||
{
|
||||
vm_page_t d = ttm->pages[page];
|
||||
void *dst;
|
||||
|
||||
if (!d)
|
||||
return -ENOMEM;
|
||||
|
||||
src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
|
||||
|
||||
/* XXXKIB can't sleep ? */
|
||||
dst = pmap_mapdev_attr(VM_PAGE_TO_PHYS(d), PAGE_SIZE, prot);
|
||||
if (!dst)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy(dst, src, PAGE_SIZE);
|
||||
|
||||
pmap_unmapdev((vm_offset_t)dst, PAGE_SIZE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
|
||||
unsigned long page,
|
||||
vm_memattr_t prot)
|
||||
{
|
||||
vm_page_t s = ttm->pages[page];
|
||||
void *src;
|
||||
|
||||
if (!s)
|
||||
return -ENOMEM;
|
||||
|
||||
dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
|
||||
src = pmap_mapdev_attr(VM_PAGE_TO_PHYS(s), PAGE_SIZE, prot);
|
||||
if (!src)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy(dst, src, PAGE_SIZE);
|
||||
|
||||
pmap_unmapdev((vm_offset_t)src, PAGE_SIZE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
|
||||
bool evict, bool no_wait_gpu,
|
||||
struct ttm_mem_reg *new_mem)
|
||||
{
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
|
||||
struct ttm_tt *ttm = bo->ttm;
|
||||
struct ttm_mem_reg *old_mem = &bo->mem;
|
||||
struct ttm_mem_reg old_copy = *old_mem;
|
||||
void *old_iomap;
|
||||
void *new_iomap;
|
||||
int ret;
|
||||
unsigned long i;
|
||||
unsigned long page;
|
||||
unsigned long add = 0;
|
||||
int dir;
|
||||
|
||||
ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (old_iomap == NULL && new_iomap == NULL)
|
||||
goto out2;
|
||||
if (old_iomap == NULL && ttm == NULL)
|
||||
goto out2;
|
||||
|
||||
if (ttm->state == tt_unpopulated) {
|
||||
ret = ttm->bdev->driver->ttm_tt_populate(ttm);
|
||||
if (ret)
|
||||
goto out1;
|
||||
}
|
||||
|
||||
add = 0;
|
||||
dir = 1;
|
||||
|
||||
if ((old_mem->mem_type == new_mem->mem_type) &&
|
||||
(new_mem->start < old_mem->start + old_mem->size)) {
|
||||
dir = -1;
|
||||
add = new_mem->num_pages - 1;
|
||||
}
|
||||
|
||||
for (i = 0; i < new_mem->num_pages; ++i) {
|
||||
page = i * dir + add;
|
||||
if (old_iomap == NULL) {
|
||||
vm_memattr_t prot = ttm_io_prot(old_mem->placement);
|
||||
ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
|
||||
prot);
|
||||
} else if (new_iomap == NULL) {
|
||||
vm_memattr_t prot = ttm_io_prot(new_mem->placement);
|
||||
ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
|
||||
prot);
|
||||
} else
|
||||
ret = ttm_copy_io_page(new_iomap, old_iomap, page);
|
||||
if (ret)
|
||||
goto out1;
|
||||
}
|
||||
mb();
|
||||
out2:
|
||||
old_copy = *old_mem;
|
||||
*old_mem = *new_mem;
|
||||
new_mem->mm_node = NULL;
|
||||
|
||||
if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
|
||||
ttm_tt_unbind(ttm);
|
||||
ttm_tt_destroy(ttm);
|
||||
bo->ttm = NULL;
|
||||
}
|
||||
|
||||
out1:
|
||||
ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
|
||||
out:
|
||||
ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
|
||||
ttm_bo_mem_put(bo, &old_copy);
|
||||
return ret;
|
||||
}
|
||||
|
||||
MALLOC_DEFINE(M_TTM_TRANSF_OBJ, "ttm_transf_obj", "TTM Transfer Objects");
|
||||
|
||||
static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
|
||||
{
|
||||
free(bo, M_TTM_TRANSF_OBJ);
|
||||
}
|
||||
|
||||
/**
|
||||
* ttm_buffer_object_transfer
|
||||
*
|
||||
* @bo: A pointer to a struct ttm_buffer_object.
|
||||
* @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
|
||||
* holding the data of @bo with the old placement.
|
||||
*
|
||||
* This is a utility function that may be called after an accelerated move
|
||||
* has been scheduled. A new buffer object is created as a placeholder for
|
||||
* the old data while it's being copied. When that buffer object is idle,
|
||||
* it can be destroyed, releasing the space of the old placement.
|
||||
* Returns:
|
||||
* !0: Failure.
|
||||
*/
|
||||
|
||||
static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
|
||||
struct ttm_buffer_object **new_obj)
|
||||
{
|
||||
struct ttm_buffer_object *fbo;
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
struct ttm_bo_driver *driver = bdev->driver;
|
||||
|
||||
fbo = malloc(sizeof(*fbo), M_TTM_TRANSF_OBJ, M_ZERO | M_WAITOK);
|
||||
*fbo = *bo;
|
||||
|
||||
/**
|
||||
* Fix up members that we shouldn't copy directly:
|
||||
* TODO: Explicit member copy would probably be better here.
|
||||
*/
|
||||
|
||||
INIT_LIST_HEAD(&fbo->ddestroy);
|
||||
INIT_LIST_HEAD(&fbo->lru);
|
||||
INIT_LIST_HEAD(&fbo->swap);
|
||||
INIT_LIST_HEAD(&fbo->io_reserve_lru);
|
||||
fbo->vm_node = NULL;
|
||||
atomic_set(&fbo->cpu_writers, 0);
|
||||
|
||||
fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
|
||||
refcount_init(&fbo->list_kref, 1);
|
||||
refcount_init(&fbo->kref, 1);
|
||||
fbo->destroy = &ttm_transfered_destroy;
|
||||
fbo->acc_size = 0;
|
||||
|
||||
*new_obj = fbo;
|
||||
return 0;
|
||||
}
|
||||
|
||||
vm_memattr_t
|
||||
ttm_io_prot(uint32_t caching_flags)
|
||||
{
|
||||
#if defined(__i386__) || defined(__amd64__)
|
||||
if (caching_flags & TTM_PL_FLAG_WC)
|
||||
return (VM_MEMATTR_WRITE_COMBINING);
|
||||
else
|
||||
/*
|
||||
* We do not support i386, look at the linux source
|
||||
* for the reason of the comment.
|
||||
*/
|
||||
return (VM_MEMATTR_UNCACHEABLE);
|
||||
#else
|
||||
#error Port me
|
||||
#endif
|
||||
}
|
||||
|
||||
static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
|
||||
unsigned long offset,
|
||||
unsigned long size,
|
||||
struct ttm_bo_kmap_obj *map)
|
||||
{
|
||||
struct ttm_mem_reg *mem = &bo->mem;
|
||||
|
||||
if (bo->mem.bus.addr) {
|
||||
map->bo_kmap_type = ttm_bo_map_premapped;
|
||||
map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
|
||||
} else {
|
||||
map->bo_kmap_type = ttm_bo_map_iomap;
|
||||
map->virtual = pmap_mapdev_attr(bo->mem.bus.base +
|
||||
bo->mem.bus.offset + offset, size,
|
||||
(mem->placement & TTM_PL_FLAG_WC) ?
|
||||
VM_MEMATTR_WRITE_COMBINING : VM_MEMATTR_UNCACHEABLE);
|
||||
map->size = size;
|
||||
}
|
||||
return (!map->virtual) ? -ENOMEM : 0;
|
||||
}
|
||||
|
||||
static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
|
||||
unsigned long start_page,
|
||||
unsigned long num_pages,
|
||||
struct ttm_bo_kmap_obj *map)
|
||||
{
|
||||
struct ttm_mem_reg *mem = &bo->mem;
|
||||
vm_memattr_t prot;
|
||||
struct ttm_tt *ttm = bo->ttm;
|
||||
int i, ret;
|
||||
|
||||
MPASS(ttm != NULL);
|
||||
|
||||
if (ttm->state == tt_unpopulated) {
|
||||
ret = ttm->bdev->driver->ttm_tt_populate(ttm);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
|
||||
/*
|
||||
* We're mapping a single page, and the desired
|
||||
* page protection is consistent with the bo.
|
||||
*/
|
||||
|
||||
map->bo_kmap_type = ttm_bo_map_kmap;
|
||||
map->page = ttm->pages[start_page];
|
||||
map->sf = sf_buf_alloc(map->page, 0);
|
||||
map->virtual = (void *)sf_buf_kva(map->sf);
|
||||
} else {
|
||||
/*
|
||||
* We need to use vmap to get the desired page protection
|
||||
* or to make the buffer object look contiguous.
|
||||
*/
|
||||
prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
|
||||
VM_MEMATTR_WRITE_COMBINING :
|
||||
ttm_io_prot(mem->placement);
|
||||
map->bo_kmap_type = ttm_bo_map_vmap;
|
||||
map->num_pages = num_pages;
|
||||
map->virtual = (void *)kmem_alloc_nofault(kernel_map,
|
||||
num_pages * PAGE_SIZE);
|
||||
if (map->virtual != NULL) {
|
||||
for (i = 0; i < num_pages; i++) {
|
||||
/* XXXKIB hack */
|
||||
pmap_page_set_memattr(ttm->pages[start_page +
|
||||
i], prot);
|
||||
}
|
||||
pmap_qenter((vm_offset_t)map->virtual,
|
||||
&ttm->pages[start_page], num_pages);
|
||||
}
|
||||
}
|
||||
return (!map->virtual) ? -ENOMEM : 0;
|
||||
}
|
||||
|
||||
int ttm_bo_kmap(struct ttm_buffer_object *bo,
|
||||
unsigned long start_page, unsigned long num_pages,
|
||||
struct ttm_bo_kmap_obj *map)
|
||||
{
|
||||
struct ttm_mem_type_manager *man =
|
||||
&bo->bdev->man[bo->mem.mem_type];
|
||||
unsigned long offset, size;
|
||||
int ret;
|
||||
|
||||
MPASS(list_empty(&bo->swap));
|
||||
map->virtual = NULL;
|
||||
map->bo = bo;
|
||||
if (num_pages > bo->num_pages)
|
||||
return -EINVAL;
|
||||
if (start_page > bo->num_pages)
|
||||
return -EINVAL;
|
||||
#if 0
|
||||
if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
|
||||
return -EPERM;
|
||||
#endif
|
||||
(void) ttm_mem_io_lock(man, false);
|
||||
ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
|
||||
ttm_mem_io_unlock(man);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (!bo->mem.bus.is_iomem) {
|
||||
return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
|
||||
} else {
|
||||
offset = start_page << PAGE_SHIFT;
|
||||
size = num_pages << PAGE_SHIFT;
|
||||
return ttm_bo_ioremap(bo, offset, size, map);
|
||||
}
|
||||
}
|
||||
|
||||
void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
|
||||
{
|
||||
struct ttm_buffer_object *bo = map->bo;
|
||||
struct ttm_mem_type_manager *man =
|
||||
&bo->bdev->man[bo->mem.mem_type];
|
||||
|
||||
if (!map->virtual)
|
||||
return;
|
||||
switch (map->bo_kmap_type) {
|
||||
case ttm_bo_map_iomap:
|
||||
pmap_unmapdev((vm_offset_t)map->virtual, map->size);
|
||||
break;
|
||||
case ttm_bo_map_vmap:
|
||||
pmap_qremove((vm_offset_t)(map->virtual), map->num_pages);
|
||||
kmem_free(kernel_map, (vm_offset_t)map->virtual,
|
||||
map->num_pages * PAGE_SIZE);
|
||||
break;
|
||||
case ttm_bo_map_kmap:
|
||||
sf_buf_free(map->sf);
|
||||
break;
|
||||
case ttm_bo_map_premapped:
|
||||
break;
|
||||
default:
|
||||
MPASS(0);
|
||||
}
|
||||
(void) ttm_mem_io_lock(man, false);
|
||||
ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
|
||||
ttm_mem_io_unlock(man);
|
||||
map->virtual = NULL;
|
||||
map->page = NULL;
|
||||
map->sf = NULL;
|
||||
}
|
||||
|
||||
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
|
||||
void *sync_obj,
|
||||
bool evict,
|
||||
bool no_wait_gpu,
|
||||
struct ttm_mem_reg *new_mem)
|
||||
{
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
struct ttm_bo_driver *driver = bdev->driver;
|
||||
struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
|
||||
struct ttm_mem_reg *old_mem = &bo->mem;
|
||||
int ret;
|
||||
struct ttm_buffer_object *ghost_obj;
|
||||
void *tmp_obj = NULL;
|
||||
|
||||
mtx_lock(&bdev->fence_lock);
|
||||
if (bo->sync_obj) {
|
||||
tmp_obj = bo->sync_obj;
|
||||
bo->sync_obj = NULL;
|
||||
}
|
||||
bo->sync_obj = driver->sync_obj_ref(sync_obj);
|
||||
if (evict) {
|
||||
ret = ttm_bo_wait(bo, false, false, false);
|
||||
mtx_unlock(&bdev->fence_lock);
|
||||
if (tmp_obj)
|
||||
driver->sync_obj_unref(&tmp_obj);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
|
||||
(bo->ttm != NULL)) {
|
||||
ttm_tt_unbind(bo->ttm);
|
||||
ttm_tt_destroy(bo->ttm);
|
||||
bo->ttm = NULL;
|
||||
}
|
||||
ttm_bo_free_old_node(bo);
|
||||
} else {
|
||||
/**
|
||||
* This should help pipeline ordinary buffer moves.
|
||||
*
|
||||
* Hang old buffer memory on a new buffer object,
|
||||
* and leave it to be released when the GPU
|
||||
* operation has completed.
|
||||
*/
|
||||
|
||||
set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
|
||||
|
||||
/* ttm_buffer_object_transfer accesses bo->sync_obj */
|
||||
ret = ttm_buffer_object_transfer(bo, &ghost_obj);
|
||||
mtx_unlock(&bdev->fence_lock);
|
||||
if (tmp_obj)
|
||||
driver->sync_obj_unref(&tmp_obj);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/**
|
||||
* If we're not moving to fixed memory, the TTM object
|
||||
* needs to stay alive. Otherwhise hang it on the ghost
|
||||
* bo to be unbound and destroyed.
|
||||
*/
|
||||
|
||||
if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
|
||||
ghost_obj->ttm = NULL;
|
||||
else
|
||||
bo->ttm = NULL;
|
||||
|
||||
ttm_bo_unreserve(ghost_obj);
|
||||
ttm_bo_unref(&ghost_obj);
|
||||
}
|
||||
|
||||
*old_mem = *new_mem;
|
||||
new_mem->mm_node = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
492
sys/dev/drm2/ttm/ttm_bo_vm.c
Normal file
492
sys/dev/drm2/ttm/ttm_bo_vm.c
Normal file
@ -0,0 +1,492 @@
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||
*/
|
||||
/*
|
||||
* Copyright (c) 2013 The FreeBSD Foundation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Portions of this software were developed by Konstantin Belousov
|
||||
* <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__FBSDID("$FreeBSD$");
|
||||
|
||||
#include "opt_vm.h"
|
||||
|
||||
#include <dev/drm2/drmP.h>
|
||||
#include <dev/drm2/ttm/ttm_module.h>
|
||||
#include <dev/drm2/ttm/ttm_bo_driver.h>
|
||||
#include <dev/drm2/ttm/ttm_placement.h>
|
||||
|
||||
#include <vm/vm.h>
|
||||
#include <vm/vm_page.h>
|
||||
|
||||
#define TTM_BO_VM_NUM_PREFAULT 16
|
||||
|
||||
RB_GENERATE(ttm_bo_device_buffer_objects, ttm_buffer_object, vm_rb,
|
||||
ttm_bo_cmp_rb_tree_items);
|
||||
|
||||
int
|
||||
ttm_bo_cmp_rb_tree_items(struct ttm_buffer_object *a,
|
||||
struct ttm_buffer_object *b)
|
||||
{
|
||||
|
||||
if (a->vm_node->start < b->vm_node->start) {
|
||||
return (-1);
|
||||
} else if (a->vm_node->start > b->vm_node->start) {
|
||||
return (1);
|
||||
} else {
|
||||
return (0);
|
||||
}
|
||||
}
|
||||
|
||||
static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
|
||||
unsigned long page_start,
|
||||
unsigned long num_pages)
|
||||
{
|
||||
unsigned long cur_offset;
|
||||
struct ttm_buffer_object *bo;
|
||||
struct ttm_buffer_object *best_bo = NULL;
|
||||
|
||||
RB_FOREACH(bo, ttm_bo_device_buffer_objects, &bdev->addr_space_rb) {
|
||||
cur_offset = bo->vm_node->start;
|
||||
if (page_start >= cur_offset) {
|
||||
best_bo = bo;
|
||||
if (page_start == cur_offset)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (unlikely(best_bo == NULL))
|
||||
return NULL;
|
||||
|
||||
if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
|
||||
(page_start + num_pages)))
|
||||
return NULL;
|
||||
|
||||
return best_bo;
|
||||
}
|
||||
|
||||
static int
|
||||
ttm_bo_vm_fault(vm_object_t vm_obj, vm_ooffset_t offset,
|
||||
int prot, vm_page_t *mres)
|
||||
{
|
||||
|
||||
struct ttm_buffer_object *bo = vm_obj->handle;
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
struct ttm_tt *ttm = NULL;
|
||||
vm_page_t m, oldm;
|
||||
int ret;
|
||||
int retval = VM_PAGER_OK;
|
||||
struct ttm_mem_type_manager *man =
|
||||
&bdev->man[bo->mem.mem_type];
|
||||
|
||||
vm_object_pip_add(vm_obj, 1);
|
||||
oldm = *mres;
|
||||
if (oldm != NULL) {
|
||||
vm_page_lock(oldm);
|
||||
vm_page_remove(oldm);
|
||||
vm_page_unlock(oldm);
|
||||
*mres = NULL;
|
||||
} else
|
||||
oldm = NULL;
|
||||
retry:
|
||||
VM_OBJECT_UNLOCK(vm_obj);
|
||||
m = NULL;
|
||||
|
||||
reserve:
|
||||
mtx_lock(&bo->glob->lru_lock);
|
||||
ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
|
||||
mtx_unlock(&bo->glob->lru_lock);
|
||||
if (unlikely(ret != 0)) {
|
||||
if (ret == -EBUSY) {
|
||||
kern_yield(0);
|
||||
goto reserve;
|
||||
}
|
||||
}
|
||||
|
||||
if (bdev->driver->fault_reserve_notify) {
|
||||
ret = bdev->driver->fault_reserve_notify(bo);
|
||||
switch (ret) {
|
||||
case 0:
|
||||
break;
|
||||
case -EBUSY:
|
||||
case -ERESTART:
|
||||
case -EINTR:
|
||||
kern_yield(0);
|
||||
goto reserve;
|
||||
default:
|
||||
retval = VM_PAGER_ERROR;
|
||||
goto out_unlock;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Wait for buffer data in transit, due to a pipelined
|
||||
* move.
|
||||
*/
|
||||
|
||||
mtx_lock(&bdev->fence_lock);
|
||||
if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
|
||||
ret = ttm_bo_wait(bo, false, true, false);
|
||||
mtx_unlock(&bdev->fence_lock);
|
||||
if (unlikely(ret != 0)) {
|
||||
retval = VM_PAGER_ERROR;
|
||||
goto out_unlock;
|
||||
}
|
||||
} else
|
||||
mtx_unlock(&bdev->fence_lock);
|
||||
|
||||
ret = ttm_mem_io_lock(man, true);
|
||||
if (unlikely(ret != 0)) {
|
||||
retval = VM_PAGER_ERROR;
|
||||
goto out_unlock;
|
||||
}
|
||||
ret = ttm_mem_io_reserve_vm(bo);
|
||||
if (unlikely(ret != 0)) {
|
||||
retval = VM_PAGER_ERROR;
|
||||
goto out_io_unlock;
|
||||
}
|
||||
|
||||
/*
|
||||
* Strictly, we're not allowed to modify vma->vm_page_prot here,
|
||||
* since the mmap_sem is only held in read mode. However, we
|
||||
* modify only the caching bits of vma->vm_page_prot and
|
||||
* consider those bits protected by
|
||||
* the bo->mutex, as we should be the only writers.
|
||||
* There shouldn't really be any readers of these bits except
|
||||
* within vm_insert_mixed()? fork?
|
||||
*
|
||||
* TODO: Add a list of vmas to the bo, and change the
|
||||
* vma->vm_page_prot when the object changes caching policy, with
|
||||
* the correct locks held.
|
||||
*/
|
||||
if (!bo->mem.bus.is_iomem) {
|
||||
/* Allocate all page at once, most common usage */
|
||||
ttm = bo->ttm;
|
||||
if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
|
||||
retval = VM_PAGER_ERROR;
|
||||
goto out_io_unlock;
|
||||
}
|
||||
}
|
||||
|
||||
if (bo->mem.bus.is_iomem) {
|
||||
m = vm_phys_fictitious_to_vm_page(bo->mem.bus.base +
|
||||
bo->mem.bus.offset + offset);
|
||||
pmap_page_set_memattr(m, ttm_io_prot(bo->mem.placement));
|
||||
} else {
|
||||
ttm = bo->ttm;
|
||||
m = ttm->pages[OFF_TO_IDX(offset)];
|
||||
if (unlikely(!m)) {
|
||||
retval = VM_PAGER_ERROR;
|
||||
goto out_io_unlock;
|
||||
}
|
||||
pmap_page_set_memattr(m,
|
||||
(bo->mem.placement & TTM_PL_FLAG_CACHED) ?
|
||||
VM_MEMATTR_WRITE_BACK : ttm_io_prot(bo->mem.placement));
|
||||
}
|
||||
|
||||
VM_OBJECT_LOCK(vm_obj);
|
||||
if ((m->flags & VPO_BUSY) != 0) {
|
||||
vm_page_sleep(m, "ttmpbs");
|
||||
ttm_mem_io_unlock(man);
|
||||
ttm_bo_unreserve(bo);
|
||||
goto retry;
|
||||
}
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
*mres = m;
|
||||
vm_page_lock(m);
|
||||
vm_page_insert(m, vm_obj, OFF_TO_IDX(offset));
|
||||
vm_page_unlock(m);
|
||||
vm_page_busy(m);
|
||||
|
||||
if (oldm != NULL) {
|
||||
vm_page_lock(oldm);
|
||||
vm_page_free(oldm);
|
||||
vm_page_unlock(oldm);
|
||||
}
|
||||
|
||||
out_io_unlock1:
|
||||
ttm_mem_io_unlock(man);
|
||||
out_unlock1:
|
||||
ttm_bo_unreserve(bo);
|
||||
vm_object_pip_wakeup(vm_obj);
|
||||
return (retval);
|
||||
|
||||
out_io_unlock:
|
||||
VM_OBJECT_LOCK(vm_obj);
|
||||
goto out_io_unlock1;
|
||||
|
||||
out_unlock:
|
||||
VM_OBJECT_LOCK(vm_obj);
|
||||
goto out_unlock1;
|
||||
}
|
||||
|
||||
static int
|
||||
ttm_bo_vm_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
|
||||
vm_ooffset_t foff, struct ucred *cred, u_short *color)
|
||||
{
|
||||
struct ttm_buffer_object *bo = handle;
|
||||
|
||||
*color = 0;
|
||||
(void)ttm_bo_reference(bo);
|
||||
return (0);
|
||||
}
|
||||
|
||||
static void
|
||||
ttm_bo_vm_dtor(void *handle)
|
||||
{
|
||||
struct ttm_buffer_object *bo = handle;
|
||||
|
||||
ttm_bo_unref(&bo);
|
||||
}
|
||||
|
||||
static struct cdev_pager_ops ttm_pager_ops = {
|
||||
.cdev_pg_fault = ttm_bo_vm_fault,
|
||||
.cdev_pg_ctor = ttm_bo_vm_ctor,
|
||||
.cdev_pg_dtor = ttm_bo_vm_dtor
|
||||
};
|
||||
|
||||
int
|
||||
ttm_bo_mmap_single(struct ttm_bo_device *bdev, vm_ooffset_t *offset, vm_size_t size,
|
||||
struct vm_object **obj_res, int nprot)
|
||||
{
|
||||
struct ttm_bo_driver *driver;
|
||||
struct ttm_buffer_object *bo;
|
||||
struct vm_object *vm_obj;
|
||||
int ret;
|
||||
|
||||
rw_wlock(&bdev->vm_lock);
|
||||
bo = ttm_bo_vm_lookup_rb(bdev, OFF_TO_IDX(*offset), OFF_TO_IDX(size));
|
||||
if (likely(bo != NULL))
|
||||
refcount_acquire(&bo->kref);
|
||||
rw_wunlock(&bdev->vm_lock);
|
||||
|
||||
if (unlikely(bo == NULL)) {
|
||||
printf("[TTM] Could not find buffer object to map\n");
|
||||
return (EINVAL);
|
||||
}
|
||||
|
||||
driver = bo->bdev->driver;
|
||||
if (unlikely(!driver->verify_access)) {
|
||||
ret = EPERM;
|
||||
goto out_unref;
|
||||
}
|
||||
ret = -driver->verify_access(bo);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_unref;
|
||||
|
||||
vm_obj = cdev_pager_allocate(bo, OBJT_MGTDEVICE, &ttm_pager_ops,
|
||||
size, nprot, 0, curthread->td_ucred);
|
||||
if (vm_obj == NULL) {
|
||||
ret = EINVAL;
|
||||
goto out_unref;
|
||||
}
|
||||
/*
|
||||
* Note: We're transferring the bo reference to vm_obj->handle here.
|
||||
*/
|
||||
*offset = 0;
|
||||
*obj_res = vm_obj;
|
||||
return 0;
|
||||
out_unref:
|
||||
ttm_bo_unref(&bo);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#if 0
|
||||
int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
|
||||
{
|
||||
if (vma->vm_pgoff != 0)
|
||||
return -EACCES;
|
||||
|
||||
vma->vm_ops = &ttm_bo_vm_ops;
|
||||
vma->vm_private_data = ttm_bo_reference(bo);
|
||||
vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
|
||||
return 0;
|
||||
}
|
||||
|
||||
ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
|
||||
const char __user *wbuf, char __user *rbuf, size_t count,
|
||||
loff_t *f_pos, bool write)
|
||||
{
|
||||
struct ttm_buffer_object *bo;
|
||||
struct ttm_bo_driver *driver;
|
||||
struct ttm_bo_kmap_obj map;
|
||||
unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
|
||||
unsigned long kmap_offset;
|
||||
unsigned long kmap_end;
|
||||
unsigned long kmap_num;
|
||||
size_t io_size;
|
||||
unsigned int page_offset;
|
||||
char *virtual;
|
||||
int ret;
|
||||
bool no_wait = false;
|
||||
bool dummy;
|
||||
|
||||
read_lock(&bdev->vm_lock);
|
||||
bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
|
||||
if (likely(bo != NULL))
|
||||
ttm_bo_reference(bo);
|
||||
read_unlock(&bdev->vm_lock);
|
||||
|
||||
if (unlikely(bo == NULL))
|
||||
return -EFAULT;
|
||||
|
||||
driver = bo->bdev->driver;
|
||||
if (unlikely(!driver->verify_access)) {
|
||||
ret = -EPERM;
|
||||
goto out_unref;
|
||||
}
|
||||
|
||||
ret = driver->verify_access(bo, filp);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_unref;
|
||||
|
||||
kmap_offset = dev_offset - bo->vm_node->start;
|
||||
if (unlikely(kmap_offset >= bo->num_pages)) {
|
||||
ret = -EFBIG;
|
||||
goto out_unref;
|
||||
}
|
||||
|
||||
page_offset = *f_pos & ~PAGE_MASK;
|
||||
io_size = bo->num_pages - kmap_offset;
|
||||
io_size = (io_size << PAGE_SHIFT) - page_offset;
|
||||
if (count < io_size)
|
||||
io_size = count;
|
||||
|
||||
kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
|
||||
kmap_num = kmap_end - kmap_offset + 1;
|
||||
|
||||
ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
|
||||
|
||||
switch (ret) {
|
||||
case 0:
|
||||
break;
|
||||
case -EBUSY:
|
||||
ret = -EAGAIN;
|
||||
goto out_unref;
|
||||
default:
|
||||
goto out_unref;
|
||||
}
|
||||
|
||||
ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
|
||||
if (unlikely(ret != 0)) {
|
||||
ttm_bo_unreserve(bo);
|
||||
goto out_unref;
|
||||
}
|
||||
|
||||
virtual = ttm_kmap_obj_virtual(&map, &dummy);
|
||||
virtual += page_offset;
|
||||
|
||||
if (write)
|
||||
ret = copy_from_user(virtual, wbuf, io_size);
|
||||
else
|
||||
ret = copy_to_user(rbuf, virtual, io_size);
|
||||
|
||||
ttm_bo_kunmap(&map);
|
||||
ttm_bo_unreserve(bo);
|
||||
ttm_bo_unref(&bo);
|
||||
|
||||
if (unlikely(ret != 0))
|
||||
return -EFBIG;
|
||||
|
||||
*f_pos += io_size;
|
||||
|
||||
return io_size;
|
||||
out_unref:
|
||||
ttm_bo_unref(&bo);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
|
||||
char __user *rbuf, size_t count, loff_t *f_pos,
|
||||
bool write)
|
||||
{
|
||||
struct ttm_bo_kmap_obj map;
|
||||
unsigned long kmap_offset;
|
||||
unsigned long kmap_end;
|
||||
unsigned long kmap_num;
|
||||
size_t io_size;
|
||||
unsigned int page_offset;
|
||||
char *virtual;
|
||||
int ret;
|
||||
bool no_wait = false;
|
||||
bool dummy;
|
||||
|
||||
kmap_offset = (*f_pos >> PAGE_SHIFT);
|
||||
if (unlikely(kmap_offset >= bo->num_pages))
|
||||
return -EFBIG;
|
||||
|
||||
page_offset = *f_pos & ~PAGE_MASK;
|
||||
io_size = bo->num_pages - kmap_offset;
|
||||
io_size = (io_size << PAGE_SHIFT) - page_offset;
|
||||
if (count < io_size)
|
||||
io_size = count;
|
||||
|
||||
kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
|
||||
kmap_num = kmap_end - kmap_offset + 1;
|
||||
|
||||
ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
|
||||
|
||||
switch (ret) {
|
||||
case 0:
|
||||
break;
|
||||
case -EBUSY:
|
||||
return -EAGAIN;
|
||||
default:
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
|
||||
if (unlikely(ret != 0)) {
|
||||
ttm_bo_unreserve(bo);
|
||||
return ret;
|
||||
}
|
||||
|
||||
virtual = ttm_kmap_obj_virtual(&map, &dummy);
|
||||
virtual += page_offset;
|
||||
|
||||
if (write)
|
||||
ret = copy_from_user(virtual, wbuf, io_size);
|
||||
else
|
||||
ret = copy_to_user(rbuf, virtual, io_size);
|
||||
|
||||
ttm_bo_kunmap(&map);
|
||||
ttm_bo_unreserve(bo);
|
||||
ttm_bo_unref(&bo);
|
||||
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
*f_pos += io_size;
|
||||
|
||||
return io_size;
|
||||
}
|
||||
#endif
|
230
sys/dev/drm2/ttm/ttm_execbuf_util.c
Normal file
230
sys/dev/drm2/ttm/ttm_execbuf_util.c
Normal file
@ -0,0 +1,230 @@
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__FBSDID("$FreeBSD$");
|
||||
|
||||
#include <dev/drm2/drmP.h>
|
||||
#include <dev/drm2/ttm/ttm_execbuf_util.h>
|
||||
#include <dev/drm2/ttm/ttm_bo_driver.h>
|
||||
#include <dev/drm2/ttm/ttm_placement.h>
|
||||
|
||||
static void ttm_eu_backoff_reservation_locked(struct list_head *list)
|
||||
{
|
||||
struct ttm_validate_buffer *entry;
|
||||
|
||||
list_for_each_entry(entry, list, head) {
|
||||
struct ttm_buffer_object *bo = entry->bo;
|
||||
if (!entry->reserved)
|
||||
continue;
|
||||
|
||||
if (entry->removed) {
|
||||
ttm_bo_add_to_lru(bo);
|
||||
entry->removed = false;
|
||||
|
||||
}
|
||||
entry->reserved = false;
|
||||
atomic_set(&bo->reserved, 0);
|
||||
wakeup(bo);
|
||||
}
|
||||
}
|
||||
|
||||
static void ttm_eu_del_from_lru_locked(struct list_head *list)
|
||||
{
|
||||
struct ttm_validate_buffer *entry;
|
||||
|
||||
list_for_each_entry(entry, list, head) {
|
||||
struct ttm_buffer_object *bo = entry->bo;
|
||||
if (!entry->reserved)
|
||||
continue;
|
||||
|
||||
if (!entry->removed) {
|
||||
entry->put_count = ttm_bo_del_from_lru(bo);
|
||||
entry->removed = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void ttm_eu_list_ref_sub(struct list_head *list)
|
||||
{
|
||||
struct ttm_validate_buffer *entry;
|
||||
|
||||
list_for_each_entry(entry, list, head) {
|
||||
struct ttm_buffer_object *bo = entry->bo;
|
||||
|
||||
if (entry->put_count) {
|
||||
ttm_bo_list_ref_sub(bo, entry->put_count, true);
|
||||
entry->put_count = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int ttm_eu_wait_unreserved_locked(struct list_head *list,
|
||||
struct ttm_buffer_object *bo)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ttm_eu_del_from_lru_locked(list);
|
||||
ret = ttm_bo_wait_unreserved_locked(bo, true);
|
||||
if (unlikely(ret != 0))
|
||||
ttm_eu_backoff_reservation_locked(list);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
void ttm_eu_backoff_reservation(struct list_head *list)
|
||||
{
|
||||
struct ttm_validate_buffer *entry;
|
||||
struct ttm_bo_global *glob;
|
||||
|
||||
if (list_empty(list))
|
||||
return;
|
||||
|
||||
entry = list_first_entry(list, struct ttm_validate_buffer, head);
|
||||
glob = entry->bo->glob;
|
||||
mtx_lock(&glob->lru_lock);
|
||||
ttm_eu_backoff_reservation_locked(list);
|
||||
mtx_unlock(&glob->lru_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Reserve buffers for validation.
|
||||
*
|
||||
* If a buffer in the list is marked for CPU access, we back off and
|
||||
* wait for that buffer to become free for GPU access.
|
||||
*
|
||||
* If a buffer is reserved for another validation, the validator with
|
||||
* the highest validation sequence backs off and waits for that buffer
|
||||
* to become unreserved. This prevents deadlocks when validating multiple
|
||||
* buffers in different orders.
|
||||
*/
|
||||
|
||||
int ttm_eu_reserve_buffers(struct list_head *list)
|
||||
{
|
||||
struct ttm_bo_global *glob;
|
||||
struct ttm_validate_buffer *entry;
|
||||
int ret;
|
||||
uint32_t val_seq;
|
||||
|
||||
if (list_empty(list))
|
||||
return 0;
|
||||
|
||||
list_for_each_entry(entry, list, head) {
|
||||
entry->reserved = false;
|
||||
entry->put_count = 0;
|
||||
entry->removed = false;
|
||||
}
|
||||
|
||||
entry = list_first_entry(list, struct ttm_validate_buffer, head);
|
||||
glob = entry->bo->glob;
|
||||
|
||||
mtx_lock(&glob->lru_lock);
|
||||
retry_locked:
|
||||
val_seq = entry->bo->bdev->val_seq++;
|
||||
|
||||
list_for_each_entry(entry, list, head) {
|
||||
struct ttm_buffer_object *bo = entry->bo;
|
||||
|
||||
retry_this_bo:
|
||||
ret = ttm_bo_reserve_locked(bo, true, true, true, val_seq);
|
||||
switch (ret) {
|
||||
case 0:
|
||||
break;
|
||||
case -EBUSY:
|
||||
ret = ttm_eu_wait_unreserved_locked(list, bo);
|
||||
if (unlikely(ret != 0)) {
|
||||
mtx_unlock(&glob->lru_lock);
|
||||
ttm_eu_list_ref_sub(list);
|
||||
return ret;
|
||||
}
|
||||
goto retry_this_bo;
|
||||
case -EAGAIN:
|
||||
ttm_eu_backoff_reservation_locked(list);
|
||||
ttm_eu_list_ref_sub(list);
|
||||
ret = ttm_bo_wait_unreserved_locked(bo, true);
|
||||
if (unlikely(ret != 0)) {
|
||||
mtx_unlock(&glob->lru_lock);
|
||||
return ret;
|
||||
}
|
||||
goto retry_locked;
|
||||
default:
|
||||
ttm_eu_backoff_reservation_locked(list);
|
||||
mtx_unlock(&glob->lru_lock);
|
||||
ttm_eu_list_ref_sub(list);
|
||||
return ret;
|
||||
}
|
||||
|
||||
entry->reserved = true;
|
||||
if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
|
||||
ttm_eu_backoff_reservation_locked(list);
|
||||
mtx_unlock(&glob->lru_lock);
|
||||
ttm_eu_list_ref_sub(list);
|
||||
return -EBUSY;
|
||||
}
|
||||
}
|
||||
|
||||
ttm_eu_del_from_lru_locked(list);
|
||||
mtx_unlock(&glob->lru_lock);
|
||||
ttm_eu_list_ref_sub(list);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
|
||||
{
|
||||
struct ttm_validate_buffer *entry;
|
||||
struct ttm_buffer_object *bo;
|
||||
struct ttm_bo_global *glob;
|
||||
struct ttm_bo_device *bdev;
|
||||
struct ttm_bo_driver *driver;
|
||||
|
||||
if (list_empty(list))
|
||||
return;
|
||||
|
||||
bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
|
||||
bdev = bo->bdev;
|
||||
driver = bdev->driver;
|
||||
glob = bo->glob;
|
||||
|
||||
mtx_lock(&glob->lru_lock);
|
||||
mtx_lock(&bdev->fence_lock);
|
||||
|
||||
list_for_each_entry(entry, list, head) {
|
||||
bo = entry->bo;
|
||||
entry->old_sync_obj = bo->sync_obj;
|
||||
bo->sync_obj = driver->sync_obj_ref(sync_obj);
|
||||
ttm_bo_unreserve_locked(bo);
|
||||
entry->reserved = false;
|
||||
}
|
||||
mtx_unlock(&bdev->fence_lock);
|
||||
mtx_unlock(&glob->lru_lock);
|
||||
|
||||
list_for_each_entry(entry, list, head) {
|
||||
if (entry->old_sync_obj)
|
||||
driver->sync_obj_unref(&entry->old_sync_obj);
|
||||
}
|
||||
}
|
109
sys/dev/drm2/ttm/ttm_execbuf_util.h
Normal file
109
sys/dev/drm2/ttm/ttm_execbuf_util.h
Normal file
@ -0,0 +1,109 @@
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||
*/
|
||||
/* $FreeBSD$ */
|
||||
|
||||
#ifndef _TTM_EXECBUF_UTIL_H_
|
||||
#define _TTM_EXECBUF_UTIL_H_
|
||||
|
||||
#include <dev/drm2/ttm/ttm_bo_api.h>
|
||||
|
||||
/**
|
||||
* struct ttm_validate_buffer
|
||||
*
|
||||
* @head: list head for thread-private list.
|
||||
* @bo: refcounted buffer object pointer.
|
||||
* @reserved: Indicates whether @bo has been reserved for validation.
|
||||
* @removed: Indicates whether @bo has been removed from lru lists.
|
||||
* @put_count: Number of outstanding references on bo::list_kref.
|
||||
* @old_sync_obj: Pointer to a sync object about to be unreferenced
|
||||
*/
|
||||
|
||||
struct ttm_validate_buffer {
|
||||
struct list_head head;
|
||||
struct ttm_buffer_object *bo;
|
||||
bool reserved;
|
||||
bool removed;
|
||||
int put_count;
|
||||
void *old_sync_obj;
|
||||
};
|
||||
|
||||
/**
|
||||
* function ttm_eu_backoff_reservation
|
||||
*
|
||||
* @list: thread private list of ttm_validate_buffer structs.
|
||||
*
|
||||
* Undoes all buffer validation reservations for bos pointed to by
|
||||
* the list entries.
|
||||
*/
|
||||
|
||||
extern void ttm_eu_backoff_reservation(struct list_head *list);
|
||||
|
||||
/**
|
||||
* function ttm_eu_reserve_buffers
|
||||
*
|
||||
* @list: thread private list of ttm_validate_buffer structs.
|
||||
*
|
||||
* Tries to reserve bos pointed to by the list entries for validation.
|
||||
* If the function returns 0, all buffers are marked as "unfenced",
|
||||
* taken off the lru lists and are not synced for write CPU usage.
|
||||
*
|
||||
* If the function detects a deadlock due to multiple threads trying to
|
||||
* reserve the same buffers in reverse order, all threads except one will
|
||||
* back off and retry. This function may sleep while waiting for
|
||||
* CPU write reservations to be cleared, and for other threads to
|
||||
* unreserve their buffers.
|
||||
*
|
||||
* This function may return -ERESTART or -EAGAIN if the calling process
|
||||
* receives a signal while waiting. In that case, no buffers on the list
|
||||
* will be reserved upon return.
|
||||
*
|
||||
* Buffers reserved by this function should be unreserved by
|
||||
* a call to either ttm_eu_backoff_reservation() or
|
||||
* ttm_eu_fence_buffer_objects() when command submission is complete or
|
||||
* has failed.
|
||||
*/
|
||||
|
||||
extern int ttm_eu_reserve_buffers(struct list_head *list);
|
||||
|
||||
/**
|
||||
* function ttm_eu_fence_buffer_objects.
|
||||
*
|
||||
* @list: thread private list of ttm_validate_buffer structs.
|
||||
* @sync_obj: The new sync object for the buffers.
|
||||
*
|
||||
* This function should be called when command submission is complete, and
|
||||
* it will add a new sync object to bos pointed to by entries on @list.
|
||||
* It also unreserves all buffers, putting them on lru lists.
|
||||
*
|
||||
*/
|
||||
|
||||
extern void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj);
|
||||
|
||||
#endif
|
340
sys/dev/drm2/ttm/ttm_lock.c
Normal file
340
sys/dev/drm2/ttm/ttm_lock.c
Normal file
@ -0,0 +1,340 @@
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||
*/
|
||||
/*
|
||||
* Copyright (c) 2013 The FreeBSD Foundation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Portions of this software were developed by Konstantin Belousov
|
||||
* <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__FBSDID("$FreeBSD$");
|
||||
|
||||
#include <dev/drm2/ttm/ttm_lock.h>
|
||||
#include <dev/drm2/ttm/ttm_module.h>
|
||||
|
||||
#define TTM_WRITE_LOCK_PENDING (1 << 0)
|
||||
#define TTM_VT_LOCK_PENDING (1 << 1)
|
||||
#define TTM_SUSPEND_LOCK_PENDING (1 << 2)
|
||||
#define TTM_VT_LOCK (1 << 3)
|
||||
#define TTM_SUSPEND_LOCK (1 << 4)
|
||||
|
||||
void ttm_lock_init(struct ttm_lock *lock)
|
||||
{
|
||||
mtx_init(&lock->lock, "ttmlk", NULL, MTX_DEF);
|
||||
lock->rw = 0;
|
||||
lock->flags = 0;
|
||||
lock->kill_takers = false;
|
||||
lock->signal = SIGKILL;
|
||||
}
|
||||
|
||||
static void
|
||||
ttm_lock_send_sig(int signo)
|
||||
{
|
||||
struct proc *p;
|
||||
|
||||
p = curproc; /* XXXKIB curthread ? */
|
||||
PROC_LOCK(p);
|
||||
kern_psignal(p, signo);
|
||||
PROC_UNLOCK(p);
|
||||
}
|
||||
|
||||
void ttm_read_unlock(struct ttm_lock *lock)
|
||||
{
|
||||
mtx_lock(&lock->lock);
|
||||
if (--lock->rw == 0)
|
||||
wakeup(lock);
|
||||
mtx_unlock(&lock->lock);
|
||||
}
|
||||
|
||||
static bool __ttm_read_lock(struct ttm_lock *lock)
|
||||
{
|
||||
bool locked = false;
|
||||
|
||||
if (unlikely(lock->kill_takers)) {
|
||||
ttm_lock_send_sig(lock->signal);
|
||||
return false;
|
||||
}
|
||||
if (lock->rw >= 0 && lock->flags == 0) {
|
||||
++lock->rw;
|
||||
locked = true;
|
||||
}
|
||||
return locked;
|
||||
}
|
||||
|
||||
int
|
||||
ttm_read_lock(struct ttm_lock *lock, bool interruptible)
|
||||
{
|
||||
const char *wmsg;
|
||||
int flags, ret;
|
||||
|
||||
ret = 0;
|
||||
if (interruptible) {
|
||||
flags = PCATCH;
|
||||
wmsg = "ttmri";
|
||||
} else {
|
||||
flags = 0;
|
||||
wmsg = "ttmr";
|
||||
}
|
||||
mtx_lock(&lock->lock);
|
||||
while (!__ttm_read_lock(lock)) {
|
||||
ret = msleep(lock, &lock->lock, flags, wmsg, 0);
|
||||
if (ret != 0)
|
||||
break;
|
||||
}
|
||||
return (-ret);
|
||||
}
|
||||
|
||||
static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked)
|
||||
{
|
||||
bool block = true;
|
||||
|
||||
*locked = false;
|
||||
|
||||
if (unlikely(lock->kill_takers)) {
|
||||
ttm_lock_send_sig(lock->signal);
|
||||
return false;
|
||||
}
|
||||
if (lock->rw >= 0 && lock->flags == 0) {
|
||||
++lock->rw;
|
||||
block = false;
|
||||
*locked = true;
|
||||
} else if (lock->flags == 0) {
|
||||
block = false;
|
||||
}
|
||||
|
||||
return !block;
|
||||
}
|
||||
|
||||
int ttm_read_trylock(struct ttm_lock *lock, bool interruptible)
|
||||
{
|
||||
const char *wmsg;
|
||||
int flags, ret;
|
||||
bool locked;
|
||||
|
||||
ret = 0;
|
||||
if (interruptible) {
|
||||
flags = PCATCH;
|
||||
wmsg = "ttmrti";
|
||||
} else {
|
||||
flags = 0;
|
||||
wmsg = "ttmrt";
|
||||
}
|
||||
mtx_lock(&lock->lock);
|
||||
while (!__ttm_read_trylock(lock, &locked)) {
|
||||
ret = msleep(lock, &lock->lock, flags, wmsg, 0);
|
||||
if (ret != 0)
|
||||
break;
|
||||
}
|
||||
MPASS(!locked || ret == 0);
|
||||
mtx_unlock(&lock->lock);
|
||||
|
||||
return (locked) ? 0 : -EBUSY;
|
||||
}
|
||||
|
||||
void ttm_write_unlock(struct ttm_lock *lock)
|
||||
{
|
||||
mtx_lock(&lock->lock);
|
||||
lock->rw = 0;
|
||||
wakeup(lock);
|
||||
mtx_unlock(&lock->lock);
|
||||
}
|
||||
|
||||
static bool __ttm_write_lock(struct ttm_lock *lock)
|
||||
{
|
||||
bool locked = false;
|
||||
|
||||
if (unlikely(lock->kill_takers)) {
|
||||
ttm_lock_send_sig(lock->signal);
|
||||
return false;
|
||||
}
|
||||
if (lock->rw == 0 && ((lock->flags & ~TTM_WRITE_LOCK_PENDING) == 0)) {
|
||||
lock->rw = -1;
|
||||
lock->flags &= ~TTM_WRITE_LOCK_PENDING;
|
||||
locked = true;
|
||||
} else {
|
||||
lock->flags |= TTM_WRITE_LOCK_PENDING;
|
||||
}
|
||||
return locked;
|
||||
}
|
||||
|
||||
int
|
||||
ttm_write_lock(struct ttm_lock *lock, bool interruptible)
|
||||
{
|
||||
const char *wmsg;
|
||||
int flags, ret;
|
||||
|
||||
ret = 0;
|
||||
if (interruptible) {
|
||||
flags = PCATCH;
|
||||
wmsg = "ttmwi";
|
||||
} else {
|
||||
flags = 0;
|
||||
wmsg = "ttmw";
|
||||
}
|
||||
mtx_lock(&lock->lock);
|
||||
/* XXXKIB: linux uses __ttm_read_lock for uninterruptible sleeps */
|
||||
while (!__ttm_write_lock(lock)) {
|
||||
ret = msleep(lock, &lock->lock, flags, wmsg, 0);
|
||||
if (interruptible && ret != 0) {
|
||||
lock->flags &= ~TTM_WRITE_LOCK_PENDING;
|
||||
wakeup(lock);
|
||||
break;
|
||||
}
|
||||
}
|
||||
mtx_unlock(&lock->lock);
|
||||
|
||||
return (-ret);
|
||||
}
|
||||
|
||||
void ttm_write_lock_downgrade(struct ttm_lock *lock)
|
||||
{
|
||||
mtx_lock(&lock->lock);
|
||||
lock->rw = 1;
|
||||
wakeup(lock);
|
||||
mtx_unlock(&lock->lock);
|
||||
}
|
||||
|
||||
static int __ttm_vt_unlock(struct ttm_lock *lock)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
mtx_lock(&lock->lock);
|
||||
if (unlikely(!(lock->flags & TTM_VT_LOCK)))
|
||||
ret = -EINVAL;
|
||||
lock->flags &= ~TTM_VT_LOCK;
|
||||
wakeup(lock);
|
||||
mtx_unlock(&lock->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ttm_vt_lock_remove(struct ttm_base_object **p_base)
|
||||
{
|
||||
struct ttm_base_object *base = *p_base;
|
||||
struct ttm_lock *lock = container_of(base, struct ttm_lock, base);
|
||||
int ret;
|
||||
|
||||
*p_base = NULL;
|
||||
ret = __ttm_vt_unlock(lock);
|
||||
MPASS(ret == 0);
|
||||
}
|
||||
|
||||
static bool __ttm_vt_lock(struct ttm_lock *lock)
|
||||
{
|
||||
bool locked = false;
|
||||
|
||||
if (lock->rw == 0) {
|
||||
lock->flags &= ~TTM_VT_LOCK_PENDING;
|
||||
lock->flags |= TTM_VT_LOCK;
|
||||
locked = true;
|
||||
} else {
|
||||
lock->flags |= TTM_VT_LOCK_PENDING;
|
||||
}
|
||||
return locked;
|
||||
}
|
||||
|
||||
int ttm_vt_lock(struct ttm_lock *lock,
|
||||
bool interruptible,
|
||||
struct ttm_object_file *tfile)
|
||||
{
|
||||
const char *wmsg;
|
||||
int flags, ret;
|
||||
|
||||
ret = 0;
|
||||
if (interruptible) {
|
||||
flags = PCATCH;
|
||||
wmsg = "ttmwi";
|
||||
} else {
|
||||
flags = 0;
|
||||
wmsg = "ttmw";
|
||||
}
|
||||
mtx_lock(&lock->lock);
|
||||
while (!__ttm_vt_lock(lock)) {
|
||||
ret = msleep(lock, &lock->lock, flags, wmsg, 0);
|
||||
if (interruptible && ret != 0) {
|
||||
lock->flags &= ~TTM_VT_LOCK_PENDING;
|
||||
wakeup(lock);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Add a base-object, the destructor of which will
|
||||
* make sure the lock is released if the client dies
|
||||
* while holding it.
|
||||
*/
|
||||
|
||||
ret = ttm_base_object_init(tfile, &lock->base, false,
|
||||
ttm_lock_type, &ttm_vt_lock_remove, NULL);
|
||||
if (ret)
|
||||
(void)__ttm_vt_unlock(lock);
|
||||
else
|
||||
lock->vt_holder = tfile;
|
||||
|
||||
return (-ret);
|
||||
}
|
||||
|
||||
int ttm_vt_unlock(struct ttm_lock *lock)
|
||||
{
|
||||
return ttm_ref_object_base_unref(lock->vt_holder,
|
||||
lock->base.hash.key, TTM_REF_USAGE);
|
||||
}
|
||||
|
||||
void ttm_suspend_unlock(struct ttm_lock *lock)
|
||||
{
|
||||
mtx_lock(&lock->lock);
|
||||
lock->flags &= ~TTM_SUSPEND_LOCK;
|
||||
wakeup(lock);
|
||||
mtx_unlock(&lock->lock);
|
||||
}
|
||||
|
||||
static bool __ttm_suspend_lock(struct ttm_lock *lock)
|
||||
{
|
||||
bool locked = false;
|
||||
|
||||
if (lock->rw == 0) {
|
||||
lock->flags &= ~TTM_SUSPEND_LOCK_PENDING;
|
||||
lock->flags |= TTM_SUSPEND_LOCK;
|
||||
locked = true;
|
||||
} else {
|
||||
lock->flags |= TTM_SUSPEND_LOCK_PENDING;
|
||||
}
|
||||
return locked;
|
||||
}
|
||||
|
||||
void ttm_suspend_lock(struct ttm_lock *lock)
|
||||
{
|
||||
mtx_lock(&lock->lock);
|
||||
while (!__ttm_suspend_lock(lock))
|
||||
msleep(lock, &lock->lock, 0, "ttms", 0);
|
||||
mtx_unlock(&lock->lock);
|
||||
}
|
249
sys/dev/drm2/ttm/ttm_lock.h
Normal file
249
sys/dev/drm2/ttm/ttm_lock.h
Normal file
@ -0,0 +1,249 @@
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||
*/
|
||||
/* $FreeBSD$ */
|
||||
|
||||
/** @file ttm_lock.h
|
||||
* This file implements a simple replacement for the buffer manager use
|
||||
* of the DRM heavyweight hardware lock.
|
||||
* The lock is a read-write lock. Taking it in read mode and write mode
|
||||
* is relatively fast, and intended for in-kernel use only.
|
||||
*
|
||||
* The vt mode is used only when there is a need to block all
|
||||
* user-space processes from validating buffers.
|
||||
* It's allowed to leave kernel space with the vt lock held.
|
||||
* If a user-space process dies while having the vt-lock,
|
||||
* it will be released during the file descriptor release. The vt lock
|
||||
* excludes write lock and read lock.
|
||||
*
|
||||
* The suspend mode is used to lock out all TTM users when preparing for
|
||||
* and executing suspend operations.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _TTM_LOCK_H_
|
||||
#define _TTM_LOCK_H_
|
||||
|
||||
#include <dev/drm2/drmP.h>
|
||||
#include <dev/drm2/drm.h>
|
||||
#include <dev/drm2/ttm/ttm_object.h>
|
||||
|
||||
/**
|
||||
* struct ttm_lock
|
||||
*
|
||||
* @base: ttm base object used solely to release the lock if the client
|
||||
* holding the lock dies.
|
||||
* @queue: Queue for processes waiting for lock change-of-status.
|
||||
* @lock: Spinlock protecting some lock members.
|
||||
* @rw: Read-write lock counter. Protected by @lock.
|
||||
* @flags: Lock state. Protected by @lock.
|
||||
* @kill_takers: Boolean whether to kill takers of the lock.
|
||||
* @signal: Signal to send when kill_takers is true.
|
||||
*/
|
||||
|
||||
struct ttm_lock {
|
||||
struct ttm_base_object base;
|
||||
struct mtx lock;
|
||||
int32_t rw;
|
||||
uint32_t flags;
|
||||
bool kill_takers;
|
||||
int signal;
|
||||
struct ttm_object_file *vt_holder;
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* ttm_lock_init
|
||||
*
|
||||
* @lock: Pointer to a struct ttm_lock
|
||||
* Initializes the lock.
|
||||
*/
|
||||
extern void ttm_lock_init(struct ttm_lock *lock);
|
||||
|
||||
/**
|
||||
* ttm_read_unlock
|
||||
*
|
||||
* @lock: Pointer to a struct ttm_lock
|
||||
*
|
||||
* Releases a read lock.
|
||||
*/
|
||||
extern void ttm_read_unlock(struct ttm_lock *lock);
|
||||
|
||||
/**
|
||||
* ttm_read_lock
|
||||
*
|
||||
* @lock: Pointer to a struct ttm_lock
|
||||
* @interruptible: Interruptible sleeping while waiting for a lock.
|
||||
*
|
||||
* Takes the lock in read mode.
|
||||
* Returns:
|
||||
* -ERESTARTSYS If interrupted by a signal and interruptible is true.
|
||||
*/
|
||||
extern int ttm_read_lock(struct ttm_lock *lock, bool interruptible);
|
||||
|
||||
/**
|
||||
* ttm_read_trylock
|
||||
*
|
||||
* @lock: Pointer to a struct ttm_lock
|
||||
* @interruptible: Interruptible sleeping while waiting for a lock.
|
||||
*
|
||||
* Tries to take the lock in read mode. If the lock is already held
|
||||
* in write mode, the function will return -EBUSY. If the lock is held
|
||||
* in vt or suspend mode, the function will sleep until these modes
|
||||
* are unlocked.
|
||||
*
|
||||
* Returns:
|
||||
* -EBUSY The lock was already held in write mode.
|
||||
* -ERESTARTSYS If interrupted by a signal and interruptible is true.
|
||||
*/
|
||||
extern int ttm_read_trylock(struct ttm_lock *lock, bool interruptible);
|
||||
|
||||
/**
|
||||
* ttm_write_unlock
|
||||
*
|
||||
* @lock: Pointer to a struct ttm_lock
|
||||
*
|
||||
* Releases a write lock.
|
||||
*/
|
||||
extern void ttm_write_unlock(struct ttm_lock *lock);
|
||||
|
||||
/**
|
||||
* ttm_write_lock
|
||||
*
|
||||
* @lock: Pointer to a struct ttm_lock
|
||||
* @interruptible: Interruptible sleeping while waiting for a lock.
|
||||
*
|
||||
* Takes the lock in write mode.
|
||||
* Returns:
|
||||
* -ERESTARTSYS If interrupted by a signal and interruptible is true.
|
||||
*/
|
||||
extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible);
|
||||
|
||||
/**
|
||||
* ttm_lock_downgrade
|
||||
*
|
||||
* @lock: Pointer to a struct ttm_lock
|
||||
*
|
||||
* Downgrades a write lock to a read lock.
|
||||
*/
|
||||
extern void ttm_lock_downgrade(struct ttm_lock *lock);
|
||||
|
||||
/**
|
||||
* ttm_suspend_lock
|
||||
*
|
||||
* @lock: Pointer to a struct ttm_lock
|
||||
*
|
||||
* Takes the lock in suspend mode. Excludes read and write mode.
|
||||
*/
|
||||
extern void ttm_suspend_lock(struct ttm_lock *lock);
|
||||
|
||||
/**
|
||||
* ttm_suspend_unlock
|
||||
*
|
||||
* @lock: Pointer to a struct ttm_lock
|
||||
*
|
||||
* Releases a suspend lock
|
||||
*/
|
||||
extern void ttm_suspend_unlock(struct ttm_lock *lock);
|
||||
|
||||
/**
|
||||
* ttm_vt_lock
|
||||
*
|
||||
* @lock: Pointer to a struct ttm_lock
|
||||
* @interruptible: Interruptible sleeping while waiting for a lock.
|
||||
* @tfile: Pointer to a struct ttm_object_file to register the lock with.
|
||||
*
|
||||
* Takes the lock in vt mode.
|
||||
* Returns:
|
||||
* -ERESTARTSYS If interrupted by a signal and interruptible is true.
|
||||
* -ENOMEM: Out of memory when locking.
|
||||
*/
|
||||
extern int ttm_vt_lock(struct ttm_lock *lock, bool interruptible,
|
||||
struct ttm_object_file *tfile);
|
||||
|
||||
/**
|
||||
* ttm_vt_unlock
|
||||
*
|
||||
* @lock: Pointer to a struct ttm_lock
|
||||
*
|
||||
* Releases a vt lock.
|
||||
* Returns:
|
||||
* -EINVAL If the lock was not held.
|
||||
*/
|
||||
extern int ttm_vt_unlock(struct ttm_lock *lock);
|
||||
|
||||
/**
|
||||
* ttm_write_unlock
|
||||
*
|
||||
* @lock: Pointer to a struct ttm_lock
|
||||
*
|
||||
* Releases a write lock.
|
||||
*/
|
||||
extern void ttm_write_unlock(struct ttm_lock *lock);
|
||||
|
||||
/**
|
||||
* ttm_write_lock
|
||||
*
|
||||
* @lock: Pointer to a struct ttm_lock
|
||||
* @interruptible: Interruptible sleeping while waiting for a lock.
|
||||
*
|
||||
* Takes the lock in write mode.
|
||||
* Returns:
|
||||
* -ERESTARTSYS If interrupted by a signal and interruptible is true.
|
||||
*/
|
||||
extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible);
|
||||
|
||||
void ttm_write_lock_downgrade(struct ttm_lock *lock);
|
||||
|
||||
/**
|
||||
* ttm_lock_set_kill
|
||||
*
|
||||
* @lock: Pointer to a struct ttm_lock
|
||||
* @val: Boolean whether to kill processes taking the lock.
|
||||
* @signal: Signal to send to the process taking the lock.
|
||||
*
|
||||
* The kill-when-taking-lock functionality is used to kill processes that keep
|
||||
* on using the TTM functionality when its resources has been taken down, for
|
||||
* example when the X server exits. A typical sequence would look like this:
|
||||
* - X server takes lock in write mode.
|
||||
* - ttm_lock_set_kill() is called with @val set to true.
|
||||
* - As part of X server exit, TTM resources are taken down.
|
||||
* - X server releases the lock on file release.
|
||||
* - Another dri client wants to render, takes the lock and is killed.
|
||||
*
|
||||
*/
|
||||
static inline void ttm_lock_set_kill(struct ttm_lock *lock, bool val,
|
||||
int signal)
|
||||
{
|
||||
lock->kill_takers = val;
|
||||
if (val)
|
||||
lock->signal = signal;
|
||||
}
|
||||
|
||||
#endif
|
471
sys/dev/drm2/ttm/ttm_memory.c
Normal file
471
sys/dev/drm2/ttm/ttm_memory.c
Normal file
@ -0,0 +1,471 @@
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__FBSDID("$FreeBSD$");
|
||||
|
||||
#include <dev/drm2/drmP.h>
|
||||
#include <dev/drm2/ttm/ttm_memory.h>
|
||||
#include <dev/drm2/ttm/ttm_module.h>
|
||||
#include <dev/drm2/ttm/ttm_page_alloc.h>
|
||||
|
||||
#define TTM_MEMORY_ALLOC_RETRIES 4
|
||||
|
||||
struct ttm_mem_zone {
|
||||
u_int kobj_ref;
|
||||
struct ttm_mem_global *glob;
|
||||
const char *name;
|
||||
uint64_t zone_mem;
|
||||
uint64_t emer_mem;
|
||||
uint64_t max_mem;
|
||||
uint64_t swap_limit;
|
||||
uint64_t used_mem;
|
||||
};
|
||||
|
||||
MALLOC_DEFINE(M_TTM_ZONE, "ttm_zone", "TTM Zone");
|
||||
|
||||
static void ttm_mem_zone_kobj_release(struct ttm_mem_zone *zone)
|
||||
{
|
||||
|
||||
printf("pTTM] Zone %7s: Used memory at exit: %llu kiB\n",
|
||||
zone->name, (unsigned long long)zone->used_mem >> 10);
|
||||
free(zone, M_TTM_ZONE);
|
||||
}
|
||||
|
||||
#if 0
|
||||
/* XXXKIB sysctl */
|
||||
static ssize_t ttm_mem_zone_show(struct ttm_mem_zone *zone;
|
||||
struct attribute *attr,
|
||||
char *buffer)
|
||||
{
|
||||
uint64_t val = 0;
|
||||
|
||||
mtx_lock(&zone->glob->lock);
|
||||
if (attr == &ttm_mem_sys)
|
||||
val = zone->zone_mem;
|
||||
else if (attr == &ttm_mem_emer)
|
||||
val = zone->emer_mem;
|
||||
else if (attr == &ttm_mem_max)
|
||||
val = zone->max_mem;
|
||||
else if (attr == &ttm_mem_swap)
|
||||
val = zone->swap_limit;
|
||||
else if (attr == &ttm_mem_used)
|
||||
val = zone->used_mem;
|
||||
mtx_unlock(&zone->glob->lock);
|
||||
|
||||
return snprintf(buffer, PAGE_SIZE, "%llu\n",
|
||||
(unsigned long long) val >> 10);
|
||||
}
|
||||
#endif
|
||||
|
||||
static void ttm_check_swapping(struct ttm_mem_global *glob);
|
||||
|
||||
#if 0
|
||||
/* XXXKIB sysctl */
|
||||
static ssize_t ttm_mem_zone_store(struct ttm_mem_zone *zone,
|
||||
struct attribute *attr,
|
||||
const char *buffer,
|
||||
size_t size)
|
||||
{
|
||||
int chars;
|
||||
unsigned long val;
|
||||
uint64_t val64;
|
||||
|
||||
chars = sscanf(buffer, "%lu", &val);
|
||||
if (chars == 0)
|
||||
return size;
|
||||
|
||||
val64 = val;
|
||||
val64 <<= 10;
|
||||
|
||||
mtx_lock(&zone->glob->lock);
|
||||
if (val64 > zone->zone_mem)
|
||||
val64 = zone->zone_mem;
|
||||
if (attr == &ttm_mem_emer) {
|
||||
zone->emer_mem = val64;
|
||||
if (zone->max_mem > val64)
|
||||
zone->max_mem = val64;
|
||||
} else if (attr == &ttm_mem_max) {
|
||||
zone->max_mem = val64;
|
||||
if (zone->emer_mem < val64)
|
||||
zone->emer_mem = val64;
|
||||
} else if (attr == &ttm_mem_swap)
|
||||
zone->swap_limit = val64;
|
||||
mtx_unlock(&zone->glob->lock);
|
||||
|
||||
ttm_check_swapping(zone->glob);
|
||||
|
||||
return size;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void ttm_mem_global_kobj_release(struct ttm_mem_global *glob)
|
||||
{
|
||||
|
||||
free(glob, M_TTM_ZONE);
|
||||
}
|
||||
|
||||
static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob,
|
||||
bool from_wq, uint64_t extra)
|
||||
{
|
||||
unsigned int i;
|
||||
struct ttm_mem_zone *zone;
|
||||
uint64_t target;
|
||||
|
||||
for (i = 0; i < glob->num_zones; ++i) {
|
||||
zone = glob->zones[i];
|
||||
|
||||
if (from_wq)
|
||||
target = zone->swap_limit;
|
||||
else if (priv_check(curthread, PRIV_VM_MLOCK) == 0)
|
||||
target = zone->emer_mem;
|
||||
else
|
||||
target = zone->max_mem;
|
||||
|
||||
target = (extra > target) ? 0ULL : target;
|
||||
|
||||
if (zone->used_mem > target)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* At this point we only support a single shrink callback.
|
||||
* Extend this if needed, perhaps using a linked list of callbacks.
|
||||
* Note that this function is reentrant:
|
||||
* many threads may try to swap out at any given time.
|
||||
*/
|
||||
|
||||
static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
|
||||
uint64_t extra)
|
||||
{
|
||||
int ret;
|
||||
struct ttm_mem_shrink *shrink;
|
||||
|
||||
mtx_lock(&glob->lock);
|
||||
if (glob->shrink == NULL)
|
||||
goto out;
|
||||
|
||||
while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
|
||||
shrink = glob->shrink;
|
||||
mtx_unlock(&glob->lock);
|
||||
ret = shrink->do_shrink(shrink);
|
||||
mtx_lock(&glob->lock);
|
||||
if (unlikely(ret != 0))
|
||||
goto out;
|
||||
}
|
||||
out:
|
||||
mtx_unlock(&glob->lock);
|
||||
}
|
||||
|
||||
|
||||
|
||||
static void ttm_shrink_work(void *arg, int pending __unused)
|
||||
{
|
||||
struct ttm_mem_global *glob = arg;
|
||||
|
||||
ttm_shrink(glob, true, 0ULL);
|
||||
}
|
||||
|
||||
static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
|
||||
uint64_t mem)
|
||||
{
|
||||
struct ttm_mem_zone *zone;
|
||||
|
||||
zone = malloc(sizeof(*zone), M_TTM_ZONE, M_WAITOK | M_ZERO);
|
||||
|
||||
zone->name = "kernel";
|
||||
zone->zone_mem = mem;
|
||||
zone->max_mem = mem >> 1;
|
||||
zone->emer_mem = (mem >> 1) + (mem >> 2);
|
||||
zone->swap_limit = zone->max_mem - (mem >> 3);
|
||||
zone->used_mem = 0;
|
||||
zone->glob = glob;
|
||||
glob->zone_kernel = zone;
|
||||
refcount_init(&zone->kobj_ref, 1);
|
||||
glob->zones[glob->num_zones++] = zone;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
|
||||
uint64_t mem)
|
||||
{
|
||||
struct ttm_mem_zone *zone;
|
||||
|
||||
zone = malloc(sizeof(*zone), M_TTM_ZONE, M_WAITOK | M_ZERO);
|
||||
|
||||
/**
|
||||
* No special dma32 zone needed.
|
||||
*/
|
||||
|
||||
if (mem <= ((uint64_t) 1ULL << 32)) {
|
||||
free(zone, M_TTM_ZONE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Limit max dma32 memory to 4GB for now
|
||||
* until we can figure out how big this
|
||||
* zone really is.
|
||||
*/
|
||||
|
||||
mem = ((uint64_t) 1ULL << 32);
|
||||
zone->name = "dma32";
|
||||
zone->zone_mem = mem;
|
||||
zone->max_mem = mem >> 1;
|
||||
zone->emer_mem = (mem >> 1) + (mem >> 2);
|
||||
zone->swap_limit = zone->max_mem - (mem >> 3);
|
||||
zone->used_mem = 0;
|
||||
zone->glob = glob;
|
||||
glob->zone_dma32 = zone;
|
||||
refcount_init(&zone->kobj_ref, 1);
|
||||
glob->zones[glob->num_zones++] = zone;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ttm_mem_global_init(struct ttm_mem_global *glob)
|
||||
{
|
||||
u_int64_t mem;
|
||||
int ret;
|
||||
int i;
|
||||
struct ttm_mem_zone *zone;
|
||||
|
||||
mtx_init(&glob->lock, "ttmgz", NULL, MTX_DEF);
|
||||
glob->swap_queue = taskqueue_create("ttm_swap", M_WAITOK,
|
||||
taskqueue_thread_enqueue, &glob->swap_queue);
|
||||
taskqueue_start_threads(&glob->swap_queue, 1, PVM, "ttm swap");
|
||||
TASK_INIT(&glob->work, 0, ttm_shrink_work, glob);
|
||||
|
||||
refcount_init(&glob->kobj_ref, 1);
|
||||
|
||||
mem = physmem * PAGE_SIZE;
|
||||
|
||||
ret = ttm_mem_init_kernel_zone(glob, mem);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_no_zone;
|
||||
ret = ttm_mem_init_dma32_zone(glob, mem);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_no_zone;
|
||||
for (i = 0; i < glob->num_zones; ++i) {
|
||||
zone = glob->zones[i];
|
||||
printf("[TTM] Zone %7s: Available graphics memory: %llu kiB\n",
|
||||
zone->name, (unsigned long long)zone->max_mem >> 10);
|
||||
}
|
||||
ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
|
||||
ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
|
||||
return 0;
|
||||
out_no_zone:
|
||||
ttm_mem_global_release(glob);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void ttm_mem_global_release(struct ttm_mem_global *glob)
|
||||
{
|
||||
unsigned int i;
|
||||
struct ttm_mem_zone *zone;
|
||||
|
||||
/* let the page allocator first stop the shrink work. */
|
||||
ttm_page_alloc_fini();
|
||||
ttm_dma_page_alloc_fini();
|
||||
|
||||
taskqueue_drain(glob->swap_queue, &glob->work);
|
||||
taskqueue_free(glob->swap_queue);
|
||||
glob->swap_queue = NULL;
|
||||
for (i = 0; i < glob->num_zones; ++i) {
|
||||
zone = glob->zones[i];
|
||||
if (refcount_release(&zone->kobj_ref))
|
||||
ttm_mem_zone_kobj_release(zone);
|
||||
}
|
||||
if (refcount_release(&glob->kobj_ref))
|
||||
ttm_mem_global_kobj_release(glob);
|
||||
}
|
||||
|
||||
static void ttm_check_swapping(struct ttm_mem_global *glob)
|
||||
{
|
||||
bool needs_swapping = false;
|
||||
unsigned int i;
|
||||
struct ttm_mem_zone *zone;
|
||||
|
||||
mtx_lock(&glob->lock);
|
||||
for (i = 0; i < glob->num_zones; ++i) {
|
||||
zone = glob->zones[i];
|
||||
if (zone->used_mem > zone->swap_limit) {
|
||||
needs_swapping = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
mtx_unlock(&glob->lock);
|
||||
|
||||
if (unlikely(needs_swapping))
|
||||
taskqueue_enqueue(glob->swap_queue, &glob->work);
|
||||
|
||||
}
|
||||
|
||||
static void ttm_mem_global_free_zone(struct ttm_mem_global *glob,
|
||||
struct ttm_mem_zone *single_zone,
|
||||
uint64_t amount)
|
||||
{
|
||||
unsigned int i;
|
||||
struct ttm_mem_zone *zone;
|
||||
|
||||
mtx_lock(&glob->lock);
|
||||
for (i = 0; i < glob->num_zones; ++i) {
|
||||
zone = glob->zones[i];
|
||||
if (single_zone && zone != single_zone)
|
||||
continue;
|
||||
zone->used_mem -= amount;
|
||||
}
|
||||
mtx_unlock(&glob->lock);
|
||||
}
|
||||
|
||||
void ttm_mem_global_free(struct ttm_mem_global *glob,
|
||||
uint64_t amount)
|
||||
{
|
||||
return ttm_mem_global_free_zone(glob, NULL, amount);
|
||||
}
|
||||
|
||||
static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
|
||||
struct ttm_mem_zone *single_zone,
|
||||
uint64_t amount, bool reserve)
|
||||
{
|
||||
uint64_t limit;
|
||||
int ret = -ENOMEM;
|
||||
unsigned int i;
|
||||
struct ttm_mem_zone *zone;
|
||||
|
||||
mtx_lock(&glob->lock);
|
||||
for (i = 0; i < glob->num_zones; ++i) {
|
||||
zone = glob->zones[i];
|
||||
if (single_zone && zone != single_zone)
|
||||
continue;
|
||||
|
||||
limit = (priv_check(curthread, PRIV_VM_MLOCK) == 0) ?
|
||||
zone->emer_mem : zone->max_mem;
|
||||
|
||||
if (zone->used_mem > limit)
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (reserve) {
|
||||
for (i = 0; i < glob->num_zones; ++i) {
|
||||
zone = glob->zones[i];
|
||||
if (single_zone && zone != single_zone)
|
||||
continue;
|
||||
zone->used_mem += amount;
|
||||
}
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
out_unlock:
|
||||
mtx_unlock(&glob->lock);
|
||||
ttm_check_swapping(glob);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob,
|
||||
struct ttm_mem_zone *single_zone,
|
||||
uint64_t memory,
|
||||
bool no_wait, bool interruptible)
|
||||
{
|
||||
int count = TTM_MEMORY_ALLOC_RETRIES;
|
||||
|
||||
while (unlikely(ttm_mem_global_reserve(glob,
|
||||
single_zone,
|
||||
memory, true)
|
||||
!= 0)) {
|
||||
if (no_wait)
|
||||
return -ENOMEM;
|
||||
if (unlikely(count-- == 0))
|
||||
return -ENOMEM;
|
||||
ttm_shrink(glob, false, memory + (memory >> 2) + 16);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
|
||||
bool no_wait, bool interruptible)
|
||||
{
|
||||
/**
|
||||
* Normal allocations of kernel memory are registered in
|
||||
* all zones.
|
||||
*/
|
||||
|
||||
return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait,
|
||||
interruptible);
|
||||
}
|
||||
|
||||
#define page_to_pfn(pp) OFF_TO_IDX(VM_PAGE_TO_PHYS(pp))
|
||||
|
||||
int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
|
||||
struct vm_page *page,
|
||||
bool no_wait, bool interruptible)
|
||||
{
|
||||
|
||||
struct ttm_mem_zone *zone = NULL;
|
||||
|
||||
/**
|
||||
* Page allocations may be registed in a single zone
|
||||
* only if highmem or !dma32.
|
||||
*/
|
||||
|
||||
if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
|
||||
zone = glob->zone_kernel;
|
||||
return ttm_mem_global_alloc_zone(glob, zone, PAGE_SIZE, no_wait,
|
||||
interruptible);
|
||||
}
|
||||
|
||||
void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct vm_page *page)
|
||||
{
|
||||
struct ttm_mem_zone *zone = NULL;
|
||||
|
||||
if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
|
||||
zone = glob->zone_kernel;
|
||||
ttm_mem_global_free_zone(glob, zone, PAGE_SIZE);
|
||||
}
|
||||
|
||||
|
||||
size_t ttm_round_pot(size_t size)
|
||||
{
|
||||
if ((size & (size - 1)) == 0)
|
||||
return size;
|
||||
else if (size > PAGE_SIZE)
|
||||
return PAGE_ALIGN(size);
|
||||
else {
|
||||
size_t tmp_size = 4;
|
||||
|
||||
while (tmp_size < size)
|
||||
tmp_size <<= 1;
|
||||
|
||||
return tmp_size;
|
||||
}
|
||||
return 0;
|
||||
}
|
149
sys/dev/drm2/ttm/ttm_memory.h
Normal file
149
sys/dev/drm2/ttm/ttm_memory.h
Normal file
@ -0,0 +1,149 @@
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
/* $FreeBSD$ */
|
||||
|
||||
#ifndef TTM_MEMORY_H
|
||||
#define TTM_MEMORY_H
|
||||
|
||||
/**
|
||||
* struct ttm_mem_shrink - callback to shrink TTM memory usage.
|
||||
*
|
||||
* @do_shrink: The callback function.
|
||||
*
|
||||
* Arguments to the do_shrink functions are intended to be passed using
|
||||
* inheritance. That is, the argument class derives from struct ttm_mem_shrink,
|
||||
* and can be accessed using container_of().
|
||||
*/
|
||||
|
||||
struct ttm_mem_shrink {
|
||||
int (*do_shrink) (struct ttm_mem_shrink *);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct ttm_mem_global - Global memory accounting structure.
|
||||
*
|
||||
* @shrink: A single callback to shrink TTM memory usage. Extend this
|
||||
* to a linked list to be able to handle multiple callbacks when needed.
|
||||
* @swap_queue: A workqueue to handle shrinking in low memory situations. We
|
||||
* need a separate workqueue since it will spend a lot of time waiting
|
||||
* for the GPU, and this will otherwise block other workqueue tasks(?)
|
||||
* At this point we use only a single-threaded workqueue.
|
||||
* @work: The workqueue callback for the shrink queue.
|
||||
* @lock: Lock to protect the @shrink - and the memory accounting members,
|
||||
* that is, essentially the whole structure with some exceptions.
|
||||
* @zones: Array of pointers to accounting zones.
|
||||
* @num_zones: Number of populated entries in the @zones array.
|
||||
* @zone_kernel: Pointer to the kernel zone.
|
||||
* @zone_highmem: Pointer to the highmem zone if there is one.
|
||||
* @zone_dma32: Pointer to the dma32 zone if there is one.
|
||||
*
|
||||
* Note that this structure is not per device. It should be global for all
|
||||
* graphics devices.
|
||||
*/
|
||||
|
||||
#define TTM_MEM_MAX_ZONES 2
|
||||
struct ttm_mem_zone;
|
||||
struct ttm_mem_global {
|
||||
u_int kobj_ref;
|
||||
struct ttm_mem_shrink *shrink;
|
||||
struct taskqueue *swap_queue;
|
||||
struct task work;
|
||||
struct mtx lock;
|
||||
struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES];
|
||||
unsigned int num_zones;
|
||||
struct ttm_mem_zone *zone_kernel;
|
||||
struct ttm_mem_zone *zone_dma32;
|
||||
};
|
||||
|
||||
/**
|
||||
* ttm_mem_init_shrink - initialize a struct ttm_mem_shrink object
|
||||
*
|
||||
* @shrink: The object to initialize.
|
||||
* @func: The callback function.
|
||||
*/
|
||||
|
||||
static inline void ttm_mem_init_shrink(struct ttm_mem_shrink *shrink,
|
||||
int (*func) (struct ttm_mem_shrink *))
|
||||
{
|
||||
shrink->do_shrink = func;
|
||||
}
|
||||
|
||||
/**
|
||||
* ttm_mem_register_shrink - register a struct ttm_mem_shrink object.
|
||||
*
|
||||
* @glob: The struct ttm_mem_global object to register with.
|
||||
* @shrink: An initialized struct ttm_mem_shrink object to register.
|
||||
*
|
||||
* Returns:
|
||||
* -EBUSY: There's already a callback registered. (May change).
|
||||
*/
|
||||
|
||||
static inline int ttm_mem_register_shrink(struct ttm_mem_global *glob,
|
||||
struct ttm_mem_shrink *shrink)
|
||||
{
|
||||
mtx_lock(&glob->lock);
|
||||
if (glob->shrink != NULL) {
|
||||
mtx_unlock(&glob->lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
glob->shrink = shrink;
|
||||
mtx_unlock(&glob->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ttm_mem_unregister_shrink - unregister a struct ttm_mem_shrink object.
|
||||
*
|
||||
* @glob: The struct ttm_mem_global object to unregister from.
|
||||
* @shrink: A previously registert struct ttm_mem_shrink object.
|
||||
*
|
||||
*/
|
||||
|
||||
static inline void ttm_mem_unregister_shrink(struct ttm_mem_global *glob,
|
||||
struct ttm_mem_shrink *shrink)
|
||||
{
|
||||
mtx_lock(&glob->lock);
|
||||
MPASS(glob->shrink == shrink);
|
||||
glob->shrink = NULL;
|
||||
mtx_unlock(&glob->lock);
|
||||
}
|
||||
|
||||
struct vm_page;
|
||||
|
||||
extern int ttm_mem_global_init(struct ttm_mem_global *glob);
|
||||
extern void ttm_mem_global_release(struct ttm_mem_global *glob);
|
||||
extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
|
||||
bool no_wait, bool interruptible);
|
||||
extern void ttm_mem_global_free(struct ttm_mem_global *glob,
|
||||
uint64_t amount);
|
||||
extern int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
|
||||
struct vm_page *page,
|
||||
bool no_wait, bool interruptible);
|
||||
extern void ttm_mem_global_free_page(struct ttm_mem_global *glob,
|
||||
struct vm_page *page);
|
||||
extern size_t ttm_round_pot(size_t size);
|
||||
#endif
|
37
sys/dev/drm2/ttm/ttm_module.h
Normal file
37
sys/dev/drm2/ttm/ttm_module.h
Normal file
@ -0,0 +1,37 @@
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||
*/
|
||||
/* $FreeBSD$ */
|
||||
|
||||
#ifndef _TTM_MODULE_H_
|
||||
#define _TTM_MODULE_H_
|
||||
|
||||
#define TTM_PFX "[TTM] "
|
||||
|
||||
#endif /* _TTM_MODULE_H_ */
|
455
sys/dev/drm2/ttm/ttm_object.c
Normal file
455
sys/dev/drm2/ttm/ttm_object.c
Normal file
@ -0,0 +1,455 @@
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||
*/
|
||||
/** @file ttm_ref_object.c
|
||||
*
|
||||
* Base- and reference object implementation for the various
|
||||
* ttm objects. Implements reference counting, minimal security checks
|
||||
* and release on file close.
|
||||
*/
|
||||
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__FBSDID("$FreeBSD$");
|
||||
|
||||
/**
|
||||
* struct ttm_object_file
|
||||
*
|
||||
* @tdev: Pointer to the ttm_object_device.
|
||||
*
|
||||
* @lock: Lock that protects the ref_list list and the
|
||||
* ref_hash hash tables.
|
||||
*
|
||||
* @ref_list: List of ttm_ref_objects to be destroyed at
|
||||
* file release.
|
||||
*
|
||||
* @ref_hash: Hash tables of ref objects, one per ttm_ref_type,
|
||||
* for fast lookup of ref objects given a base object.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "[TTM] " fmt
|
||||
|
||||
#include <dev/drm2/drmP.h>
|
||||
#include <dev/drm2/drm.h>
|
||||
#include <sys/rwlock.h>
|
||||
#include <dev/drm2/ttm/ttm_object.h>
|
||||
#include <dev/drm2/ttm/ttm_module.h>
|
||||
|
||||
struct ttm_object_file {
|
||||
struct ttm_object_device *tdev;
|
||||
struct rwlock lock;
|
||||
struct list_head ref_list;
|
||||
struct drm_open_hash ref_hash[TTM_REF_NUM];
|
||||
u_int refcount;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct ttm_object_device
|
||||
*
|
||||
* @object_lock: lock that protects the object_hash hash table.
|
||||
*
|
||||
* @object_hash: hash table for fast lookup of object global names.
|
||||
*
|
||||
* @object_count: Per device object count.
|
||||
*
|
||||
* This is the per-device data structure needed for ttm object management.
|
||||
*/
|
||||
|
||||
struct ttm_object_device {
|
||||
struct rwlock object_lock;
|
||||
struct drm_open_hash object_hash;
|
||||
atomic_t object_count;
|
||||
struct ttm_mem_global *mem_glob;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct ttm_ref_object
|
||||
*
|
||||
* @hash: Hash entry for the per-file object reference hash.
|
||||
*
|
||||
* @head: List entry for the per-file list of ref-objects.
|
||||
*
|
||||
* @kref: Ref count.
|
||||
*
|
||||
* @obj: Base object this ref object is referencing.
|
||||
*
|
||||
* @ref_type: Type of ref object.
|
||||
*
|
||||
* This is similar to an idr object, but it also has a hash table entry
|
||||
* that allows lookup with a pointer to the referenced object as a key. In
|
||||
* that way, one can easily detect whether a base object is referenced by
|
||||
* a particular ttm_object_file. It also carries a ref count to avoid creating
|
||||
* multiple ref objects if a ttm_object_file references the same base
|
||||
* object more than once.
|
||||
*/
|
||||
|
||||
struct ttm_ref_object {
|
||||
struct drm_hash_item hash;
|
||||
struct list_head head;
|
||||
u_int kref;
|
||||
enum ttm_ref_type ref_type;
|
||||
struct ttm_base_object *obj;
|
||||
struct ttm_object_file *tfile;
|
||||
};
|
||||
|
||||
MALLOC_DEFINE(M_TTM_OBJ_FILE, "ttm_obj_file", "TTM File Objects");
|
||||
|
||||
static inline struct ttm_object_file *
|
||||
ttm_object_file_ref(struct ttm_object_file *tfile)
|
||||
{
|
||||
refcount_acquire(&tfile->refcount);
|
||||
return tfile;
|
||||
}
|
||||
|
||||
static void ttm_object_file_destroy(struct ttm_object_file *tfile)
|
||||
{
|
||||
|
||||
free(tfile, M_TTM_OBJ_FILE);
|
||||
}
|
||||
|
||||
|
||||
static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
|
||||
{
|
||||
struct ttm_object_file *tfile = *p_tfile;
|
||||
|
||||
*p_tfile = NULL;
|
||||
if (refcount_release(&tfile->refcount))
|
||||
ttm_object_file_destroy(tfile);
|
||||
}
|
||||
|
||||
|
||||
int ttm_base_object_init(struct ttm_object_file *tfile,
|
||||
struct ttm_base_object *base,
|
||||
bool shareable,
|
||||
enum ttm_object_type object_type,
|
||||
void (*rcount_release) (struct ttm_base_object **),
|
||||
void (*ref_obj_release) (struct ttm_base_object *,
|
||||
enum ttm_ref_type ref_type))
|
||||
{
|
||||
struct ttm_object_device *tdev = tfile->tdev;
|
||||
int ret;
|
||||
|
||||
base->shareable = shareable;
|
||||
base->tfile = ttm_object_file_ref(tfile);
|
||||
base->refcount_release = rcount_release;
|
||||
base->ref_obj_release = ref_obj_release;
|
||||
base->object_type = object_type;
|
||||
refcount_init(&base->refcount, 1);
|
||||
rw_init(&tdev->object_lock, "ttmbao");
|
||||
rw_wlock(&tdev->object_lock);
|
||||
ret = drm_ht_just_insert_please(&tdev->object_hash,
|
||||
&base->hash,
|
||||
(unsigned long)base, 31, 0, 0);
|
||||
rw_wunlock(&tdev->object_lock);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_err0;
|
||||
|
||||
ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_err1;
|
||||
|
||||
ttm_base_object_unref(&base);
|
||||
|
||||
return 0;
|
||||
out_err1:
|
||||
rw_wlock(&tdev->object_lock);
|
||||
(void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
|
||||
rw_wunlock(&tdev->object_lock);
|
||||
out_err0:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ttm_release_base(struct ttm_base_object *base)
|
||||
{
|
||||
struct ttm_object_device *tdev = base->tfile->tdev;
|
||||
|
||||
(void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
|
||||
rw_wunlock(&tdev->object_lock);
|
||||
/*
|
||||
* Note: We don't use synchronize_rcu() here because it's far
|
||||
* too slow. It's up to the user to free the object using
|
||||
* call_rcu() or ttm_base_object_kfree().
|
||||
*/
|
||||
|
||||
if (base->refcount_release) {
|
||||
ttm_object_file_unref(&base->tfile);
|
||||
base->refcount_release(&base);
|
||||
}
|
||||
rw_wlock(&tdev->object_lock);
|
||||
}
|
||||
|
||||
void ttm_base_object_unref(struct ttm_base_object **p_base)
|
||||
{
|
||||
struct ttm_base_object *base = *p_base;
|
||||
struct ttm_object_device *tdev = base->tfile->tdev;
|
||||
|
||||
*p_base = NULL;
|
||||
|
||||
/*
|
||||
* Need to take the lock here to avoid racing with
|
||||
* users trying to look up the object.
|
||||
*/
|
||||
|
||||
rw_wlock(&tdev->object_lock);
|
||||
if (refcount_release(&base->refcount))
|
||||
ttm_release_base(base);
|
||||
rw_wunlock(&tdev->object_lock);
|
||||
}
|
||||
|
||||
struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
|
||||
uint32_t key)
|
||||
{
|
||||
struct ttm_object_device *tdev = tfile->tdev;
|
||||
struct ttm_base_object *base;
|
||||
struct drm_hash_item *hash;
|
||||
int ret;
|
||||
|
||||
rw_rlock(&tdev->object_lock);
|
||||
ret = drm_ht_find_item(&tdev->object_hash, key, &hash);
|
||||
|
||||
if (ret == 0) {
|
||||
base = drm_hash_entry(hash, struct ttm_base_object, hash);
|
||||
refcount_acquire(&base->refcount);
|
||||
}
|
||||
rw_runlock(&tdev->object_lock);
|
||||
|
||||
if (unlikely(ret != 0))
|
||||
return NULL;
|
||||
|
||||
if (tfile != base->tfile && !base->shareable) {
|
||||
printf("[TTM] Attempted access of non-shareable object %p\n",
|
||||
base);
|
||||
ttm_base_object_unref(&base);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return base;
|
||||
}
|
||||
|
||||
MALLOC_DEFINE(M_TTM_OBJ_REF, "ttm_obj_ref", "TTM Ref Objects");
|
||||
|
||||
int ttm_ref_object_add(struct ttm_object_file *tfile,
|
||||
struct ttm_base_object *base,
|
||||
enum ttm_ref_type ref_type, bool *existed)
|
||||
{
|
||||
struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
|
||||
struct ttm_ref_object *ref;
|
||||
struct drm_hash_item *hash;
|
||||
struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (existed != NULL)
|
||||
*existed = true;
|
||||
|
||||
while (ret == -EINVAL) {
|
||||
rw_rlock(&tfile->lock);
|
||||
ret = drm_ht_find_item(ht, base->hash.key, &hash);
|
||||
|
||||
if (ret == 0) {
|
||||
ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
|
||||
refcount_acquire(&ref->kref);
|
||||
rw_runlock(&tfile->lock);
|
||||
break;
|
||||
}
|
||||
|
||||
rw_runlock(&tfile->lock);
|
||||
ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
|
||||
false, false);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
ref = malloc(sizeof(*ref), M_TTM_OBJ_REF, M_WAITOK);
|
||||
if (unlikely(ref == NULL)) {
|
||||
ttm_mem_global_free(mem_glob, sizeof(*ref));
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ref->hash.key = base->hash.key;
|
||||
ref->obj = base;
|
||||
ref->tfile = tfile;
|
||||
ref->ref_type = ref_type;
|
||||
refcount_init(&ref->kref, 1);
|
||||
|
||||
rw_wlock(&tfile->lock);
|
||||
ret = drm_ht_insert_item(ht, &ref->hash);
|
||||
|
||||
if (ret == 0) {
|
||||
list_add_tail(&ref->head, &tfile->ref_list);
|
||||
refcount_acquire(&base->refcount);
|
||||
rw_wunlock(&tfile->lock);
|
||||
if (existed != NULL)
|
||||
*existed = false;
|
||||
break;
|
||||
}
|
||||
|
||||
rw_wunlock(&tfile->lock);
|
||||
MPASS(ret == -EINVAL);
|
||||
|
||||
ttm_mem_global_free(mem_glob, sizeof(*ref));
|
||||
free(ref, M_TTM_OBJ_REF);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ttm_ref_object_release(struct ttm_ref_object *ref)
|
||||
{
|
||||
struct ttm_base_object *base = ref->obj;
|
||||
struct ttm_object_file *tfile = ref->tfile;
|
||||
struct drm_open_hash *ht;
|
||||
struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
|
||||
|
||||
ht = &tfile->ref_hash[ref->ref_type];
|
||||
(void)drm_ht_remove_item(ht, &ref->hash);
|
||||
list_del(&ref->head);
|
||||
rw_wunlock(&tfile->lock);
|
||||
|
||||
if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
|
||||
base->ref_obj_release(base, ref->ref_type);
|
||||
|
||||
ttm_base_object_unref(&ref->obj);
|
||||
ttm_mem_global_free(mem_glob, sizeof(*ref));
|
||||
free(ref, M_TTM_OBJ_REF);
|
||||
rw_wlock(&tfile->lock);
|
||||
}
|
||||
|
||||
int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
|
||||
unsigned long key, enum ttm_ref_type ref_type)
|
||||
{
|
||||
struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
|
||||
struct ttm_ref_object *ref;
|
||||
struct drm_hash_item *hash;
|
||||
int ret;
|
||||
|
||||
rw_wlock(&tfile->lock);
|
||||
ret = drm_ht_find_item(ht, key, &hash);
|
||||
if (unlikely(ret != 0)) {
|
||||
rw_wunlock(&tfile->lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
|
||||
if (refcount_release(&ref->kref))
|
||||
ttm_ref_object_release(ref);
|
||||
rw_wunlock(&tfile->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ttm_object_file_release(struct ttm_object_file **p_tfile)
|
||||
{
|
||||
struct ttm_ref_object *ref;
|
||||
struct list_head *list;
|
||||
unsigned int i;
|
||||
struct ttm_object_file *tfile = *p_tfile;
|
||||
|
||||
*p_tfile = NULL;
|
||||
rw_wlock(&tfile->lock);
|
||||
|
||||
/*
|
||||
* Since we release the lock within the loop, we have to
|
||||
* restart it from the beginning each time.
|
||||
*/
|
||||
|
||||
while (!list_empty(&tfile->ref_list)) {
|
||||
list = tfile->ref_list.next;
|
||||
ref = list_entry(list, struct ttm_ref_object, head);
|
||||
ttm_ref_object_release(ref);
|
||||
}
|
||||
|
||||
for (i = 0; i < TTM_REF_NUM; ++i)
|
||||
drm_ht_remove(&tfile->ref_hash[i]);
|
||||
|
||||
rw_wunlock(&tfile->lock);
|
||||
ttm_object_file_unref(&tfile);
|
||||
}
|
||||
|
||||
struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
|
||||
unsigned int hash_order)
|
||||
{
|
||||
struct ttm_object_file *tfile;
|
||||
unsigned int i;
|
||||
unsigned int j = 0;
|
||||
int ret;
|
||||
|
||||
tfile = malloc(sizeof(*tfile), M_TTM_OBJ_FILE, M_WAITOK);
|
||||
rw_init(&tfile->lock, "ttmfo");
|
||||
tfile->tdev = tdev;
|
||||
refcount_init(&tfile->refcount, 1);
|
||||
INIT_LIST_HEAD(&tfile->ref_list);
|
||||
|
||||
for (i = 0; i < TTM_REF_NUM; ++i) {
|
||||
ret = drm_ht_create(&tfile->ref_hash[i], hash_order);
|
||||
if (ret) {
|
||||
j = i;
|
||||
goto out_err;
|
||||
}
|
||||
}
|
||||
|
||||
return tfile;
|
||||
out_err:
|
||||
for (i = 0; i < j; ++i)
|
||||
drm_ht_remove(&tfile->ref_hash[i]);
|
||||
|
||||
free(tfile, M_TTM_OBJ_FILE);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
MALLOC_DEFINE(M_TTM_OBJ_DEV, "ttm_obj_dev", "TTM Device Objects");
|
||||
|
||||
struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
|
||||
*mem_glob,
|
||||
unsigned int hash_order)
|
||||
{
|
||||
struct ttm_object_device *tdev;
|
||||
int ret;
|
||||
|
||||
tdev = malloc(sizeof(*tdev), M_TTM_OBJ_DEV, M_WAITOK);
|
||||
tdev->mem_glob = mem_glob;
|
||||
rw_init(&tdev->object_lock, "ttmdo");
|
||||
atomic_set(&tdev->object_count, 0);
|
||||
ret = drm_ht_create(&tdev->object_hash, hash_order);
|
||||
|
||||
if (ret == 0)
|
||||
return tdev;
|
||||
|
||||
free(tdev, M_TTM_OBJ_DEV);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void ttm_object_device_release(struct ttm_object_device **p_tdev)
|
||||
{
|
||||
struct ttm_object_device *tdev = *p_tdev;
|
||||
|
||||
*p_tdev = NULL;
|
||||
|
||||
rw_wlock(&tdev->object_lock);
|
||||
drm_ht_remove(&tdev->object_hash);
|
||||
rw_wunlock(&tdev->object_lock);
|
||||
|
||||
free(tdev, M_TTM_OBJ_DEV);
|
||||
}
|
271
sys/dev/drm2/ttm/ttm_object.h
Normal file
271
sys/dev/drm2/ttm/ttm_object.h
Normal file
@ -0,0 +1,271 @@
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||
*/
|
||||
/* $FreeBSD$ */
|
||||
/** @file ttm_object.h
|
||||
*
|
||||
* Base- and reference object implementation for the various
|
||||
* ttm objects. Implements reference counting, minimal security checks
|
||||
* and release on file close.
|
||||
*/
|
||||
|
||||
#ifndef _TTM_OBJECT_H_
|
||||
#define _TTM_OBJECT_H_
|
||||
|
||||
#include <dev/drm2/drm_hashtab.h>
|
||||
#include <dev/drm2/ttm/ttm_memory.h>
|
||||
|
||||
/**
|
||||
* enum ttm_ref_type
|
||||
*
|
||||
* Describes what type of reference a ref object holds.
|
||||
*
|
||||
* TTM_REF_USAGE is a simple refcount on a base object.
|
||||
*
|
||||
* TTM_REF_SYNCCPU_READ is a SYNCCPU_READ reference on a
|
||||
* buffer object.
|
||||
*
|
||||
* TTM_REF_SYNCCPU_WRITE is a SYNCCPU_WRITE reference on a
|
||||
* buffer object.
|
||||
*
|
||||
*/
|
||||
|
||||
enum ttm_ref_type {
|
||||
TTM_REF_USAGE,
|
||||
TTM_REF_SYNCCPU_READ,
|
||||
TTM_REF_SYNCCPU_WRITE,
|
||||
TTM_REF_NUM
|
||||
};
|
||||
|
||||
/**
|
||||
* enum ttm_object_type
|
||||
*
|
||||
* One entry per ttm object type.
|
||||
* Device-specific types should use the
|
||||
* ttm_driver_typex types.
|
||||
*/
|
||||
|
||||
enum ttm_object_type {
|
||||
ttm_fence_type,
|
||||
ttm_buffer_type,
|
||||
ttm_lock_type,
|
||||
ttm_driver_type0 = 256,
|
||||
ttm_driver_type1,
|
||||
ttm_driver_type2,
|
||||
ttm_driver_type3,
|
||||
ttm_driver_type4,
|
||||
ttm_driver_type5
|
||||
};
|
||||
|
||||
struct ttm_object_file;
|
||||
struct ttm_object_device;
|
||||
|
||||
/**
|
||||
* struct ttm_base_object
|
||||
*
|
||||
* @hash: hash entry for the per-device object hash.
|
||||
* @type: derived type this object is base class for.
|
||||
* @shareable: Other ttm_object_files can access this object.
|
||||
*
|
||||
* @tfile: Pointer to ttm_object_file of the creator.
|
||||
* NULL if the object was not created by a user request.
|
||||
* (kernel object).
|
||||
*
|
||||
* @refcount: Number of references to this object, not
|
||||
* including the hash entry. A reference to a base object can
|
||||
* only be held by a ref object.
|
||||
*
|
||||
* @refcount_release: A function to be called when there are
|
||||
* no more references to this object. This function should
|
||||
* destroy the object (or make sure destruction eventually happens),
|
||||
* and when it is called, the object has
|
||||
* already been taken out of the per-device hash. The parameter
|
||||
* "base" should be set to NULL by the function.
|
||||
*
|
||||
* @ref_obj_release: A function to be called when a reference object
|
||||
* with another ttm_ref_type than TTM_REF_USAGE is deleted.
|
||||
* This function may, for example, release a lock held by a user-space
|
||||
* process.
|
||||
*
|
||||
* This struct is intended to be used as a base struct for objects that
|
||||
* are visible to user-space. It provides a global name, race-safe
|
||||
* access and refcounting, minimal access contol and hooks for unref actions.
|
||||
*/
|
||||
|
||||
struct ttm_base_object {
|
||||
/* struct rcu_head rhead;XXXKIB */
|
||||
struct drm_hash_item hash;
|
||||
enum ttm_object_type object_type;
|
||||
bool shareable;
|
||||
struct ttm_object_file *tfile;
|
||||
u_int refcount;
|
||||
void (*refcount_release) (struct ttm_base_object **base);
|
||||
void (*ref_obj_release) (struct ttm_base_object *base,
|
||||
enum ttm_ref_type ref_type);
|
||||
};
|
||||
|
||||
/**
|
||||
* ttm_base_object_init
|
||||
*
|
||||
* @tfile: Pointer to a struct ttm_object_file.
|
||||
* @base: The struct ttm_base_object to initialize.
|
||||
* @shareable: This object is shareable with other applcations.
|
||||
* (different @tfile pointers.)
|
||||
* @type: The object type.
|
||||
* @refcount_release: See the struct ttm_base_object description.
|
||||
* @ref_obj_release: See the struct ttm_base_object description.
|
||||
*
|
||||
* Initializes a struct ttm_base_object.
|
||||
*/
|
||||
|
||||
extern int ttm_base_object_init(struct ttm_object_file *tfile,
|
||||
struct ttm_base_object *base,
|
||||
bool shareable,
|
||||
enum ttm_object_type type,
|
||||
void (*refcount_release) (struct ttm_base_object
|
||||
**),
|
||||
void (*ref_obj_release) (struct ttm_base_object
|
||||
*,
|
||||
enum ttm_ref_type
|
||||
ref_type));
|
||||
|
||||
/**
|
||||
* ttm_base_object_lookup
|
||||
*
|
||||
* @tfile: Pointer to a struct ttm_object_file.
|
||||
* @key: Hash key
|
||||
*
|
||||
* Looks up a struct ttm_base_object with the key @key.
|
||||
* Also verifies that the object is visible to the application, by
|
||||
* comparing the @tfile argument and checking the object shareable flag.
|
||||
*/
|
||||
|
||||
extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file
|
||||
*tfile, uint32_t key);
|
||||
|
||||
/**
|
||||
* ttm_base_object_unref
|
||||
*
|
||||
* @p_base: Pointer to a pointer referencing a struct ttm_base_object.
|
||||
*
|
||||
* Decrements the base object refcount and clears the pointer pointed to by
|
||||
* p_base.
|
||||
*/
|
||||
|
||||
extern void ttm_base_object_unref(struct ttm_base_object **p_base);
|
||||
|
||||
/**
|
||||
* ttm_ref_object_add.
|
||||
*
|
||||
* @tfile: A struct ttm_object_file representing the application owning the
|
||||
* ref_object.
|
||||
* @base: The base object to reference.
|
||||
* @ref_type: The type of reference.
|
||||
* @existed: Upon completion, indicates that an identical reference object
|
||||
* already existed, and the refcount was upped on that object instead.
|
||||
*
|
||||
* Adding a ref object to a base object is basically like referencing the
|
||||
* base object, but a user-space application holds the reference. When the
|
||||
* file corresponding to @tfile is closed, all its reference objects are
|
||||
* deleted. A reference object can have different types depending on what
|
||||
* it's intended for. It can be refcounting to prevent object destruction,
|
||||
* When user-space takes a lock, it can add a ref object to that lock to
|
||||
* make sure the lock is released if the application dies. A ref object
|
||||
* will hold a single reference on a base object.
|
||||
*/
|
||||
extern int ttm_ref_object_add(struct ttm_object_file *tfile,
|
||||
struct ttm_base_object *base,
|
||||
enum ttm_ref_type ref_type, bool *existed);
|
||||
/**
|
||||
* ttm_ref_object_base_unref
|
||||
*
|
||||
* @key: Key representing the base object.
|
||||
* @ref_type: Ref type of the ref object to be dereferenced.
|
||||
*
|
||||
* Unreference a ref object with type @ref_type
|
||||
* on the base object identified by @key. If there are no duplicate
|
||||
* references, the ref object will be destroyed and the base object
|
||||
* will be unreferenced.
|
||||
*/
|
||||
extern int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
|
||||
unsigned long key,
|
||||
enum ttm_ref_type ref_type);
|
||||
|
||||
/**
|
||||
* ttm_object_file_init - initialize a struct ttm_object file
|
||||
*
|
||||
* @tdev: A struct ttm_object device this file is initialized on.
|
||||
* @hash_order: Order of the hash table used to hold the reference objects.
|
||||
*
|
||||
* This is typically called by the file_ops::open function.
|
||||
*/
|
||||
|
||||
extern struct ttm_object_file *ttm_object_file_init(struct ttm_object_device
|
||||
*tdev,
|
||||
unsigned int hash_order);
|
||||
|
||||
/**
|
||||
* ttm_object_file_release - release data held by a ttm_object_file
|
||||
*
|
||||
* @p_tfile: Pointer to pointer to the ttm_object_file object to release.
|
||||
* *p_tfile will be set to NULL by this function.
|
||||
*
|
||||
* Releases all data associated by a ttm_object_file.
|
||||
* Typically called from file_ops::release. The caller must
|
||||
* ensure that there are no concurrent users of tfile.
|
||||
*/
|
||||
|
||||
extern void ttm_object_file_release(struct ttm_object_file **p_tfile);
|
||||
|
||||
/**
|
||||
* ttm_object device init - initialize a struct ttm_object_device
|
||||
*
|
||||
* @hash_order: Order of hash table used to hash the base objects.
|
||||
*
|
||||
* This function is typically called on device initialization to prepare
|
||||
* data structures needed for ttm base and ref objects.
|
||||
*/
|
||||
|
||||
extern struct ttm_object_device *ttm_object_device_init
|
||||
(struct ttm_mem_global *mem_glob, unsigned int hash_order);
|
||||
|
||||
/**
|
||||
* ttm_object_device_release - release data held by a ttm_object_device
|
||||
*
|
||||
* @p_tdev: Pointer to pointer to the ttm_object_device object to release.
|
||||
* *p_tdev will be set to NULL by this function.
|
||||
*
|
||||
* Releases all data associated by a ttm_object_device.
|
||||
* Typically called from driver::unload before the destruction of the
|
||||
* device private data structure.
|
||||
*/
|
||||
|
||||
extern void ttm_object_device_release(struct ttm_object_device **p_tdev);
|
||||
|
||||
#endif
|
894
sys/dev/drm2/ttm/ttm_page_alloc.c
Normal file
894
sys/dev/drm2/ttm/ttm_page_alloc.c
Normal file
@ -0,0 +1,894 @@
|
||||
/*
|
||||
* Copyright (c) Red Hat Inc.
|
||||
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sub license,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie <airlied@redhat.com>
|
||||
* Jerome Glisse <jglisse@redhat.com>
|
||||
* Pauli Nieminen <suokkos@gmail.com>
|
||||
*/
|
||||
/*
|
||||
* Copyright (c) 2013 The FreeBSD Foundation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Portions of this software were developed by Konstantin Belousov
|
||||
* <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
|
||||
*/
|
||||
|
||||
/* simple list based uncached page pool
|
||||
* - Pool collects resently freed pages for reuse
|
||||
* - Use page->lru to keep a free list
|
||||
* - doesn't track currently in use pages
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__FBSDID("$FreeBSD$");
|
||||
|
||||
#include <dev/drm2/drmP.h>
|
||||
#include <dev/drm2/ttm/ttm_bo_driver.h>
|
||||
#include <dev/drm2/ttm/ttm_page_alloc.h>
|
||||
|
||||
#ifdef TTM_HAS_AGP
|
||||
#include <asm/agp.h>
|
||||
#endif
|
||||
|
||||
#define VM_ALLOC_DMA32 VM_ALLOC_RESERVED1
|
||||
|
||||
#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(vm_page_t))
|
||||
#define SMALL_ALLOCATION 16
|
||||
#define FREE_ALL_PAGES (~0U)
|
||||
/* times are in msecs */
|
||||
#define PAGE_FREE_INTERVAL 1000
|
||||
|
||||
/**
|
||||
* struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
|
||||
*
|
||||
* @lock: Protects the shared pool from concurrnet access. Must be used with
|
||||
* irqsave/irqrestore variants because pool allocator maybe called from
|
||||
* delayed work.
|
||||
* @fill_lock: Prevent concurrent calls to fill.
|
||||
* @list: Pool of free uc/wc pages for fast reuse.
|
||||
* @gfp_flags: Flags to pass for alloc_page.
|
||||
* @npages: Number of pages in pool.
|
||||
*/
|
||||
struct ttm_page_pool {
|
||||
struct mtx lock;
|
||||
bool fill_lock;
|
||||
bool dma32;
|
||||
struct pglist list;
|
||||
int ttm_page_alloc_flags;
|
||||
unsigned npages;
|
||||
char *name;
|
||||
unsigned long nfrees;
|
||||
unsigned long nrefills;
|
||||
};
|
||||
|
||||
/**
|
||||
* Limits for the pool. They are handled without locks because only place where
|
||||
* they may change is in sysfs store. They won't have immediate effect anyway
|
||||
* so forcing serialization to access them is pointless.
|
||||
*/
|
||||
|
||||
struct ttm_pool_opts {
|
||||
unsigned alloc_size;
|
||||
unsigned max_size;
|
||||
unsigned small;
|
||||
};
|
||||
|
||||
#define NUM_POOLS 4
|
||||
|
||||
/**
|
||||
* struct ttm_pool_manager - Holds memory pools for fst allocation
|
||||
*
|
||||
* Manager is read only object for pool code so it doesn't need locking.
|
||||
*
|
||||
* @free_interval: minimum number of jiffies between freeing pages from pool.
|
||||
* @page_alloc_inited: reference counting for pool allocation.
|
||||
* @work: Work that is used to shrink the pool. Work is only run when there is
|
||||
* some pages to free.
|
||||
* @small_allocation: Limit in number of pages what is small allocation.
|
||||
*
|
||||
* @pools: All pool objects in use.
|
||||
**/
|
||||
struct ttm_pool_manager {
|
||||
unsigned int kobj_ref;
|
||||
eventhandler_tag lowmem_handler;
|
||||
struct ttm_pool_opts options;
|
||||
|
||||
union {
|
||||
struct ttm_page_pool pools[NUM_POOLS];
|
||||
struct {
|
||||
struct ttm_page_pool wc_pool;
|
||||
struct ttm_page_pool uc_pool;
|
||||
struct ttm_page_pool wc_pool_dma32;
|
||||
struct ttm_page_pool uc_pool_dma32;
|
||||
} ;
|
||||
};
|
||||
};
|
||||
|
||||
MALLOC_DEFINE(M_TTM_POOLMGR, "ttm_poolmgr", "TTM Pool Manager");
|
||||
|
||||
static void
|
||||
ttm_vm_page_free(vm_page_t m)
|
||||
{
|
||||
|
||||
KASSERT(m->object == NULL, ("ttm page %p is owned", m));
|
||||
KASSERT(m->wire_count == 1, ("ttm lost wire %p", m));
|
||||
KASSERT((m->flags & PG_FICTITIOUS) != 0, ("ttm lost fictitious %p", m));
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("ttm got unmanaged %p", m));
|
||||
m->flags &= ~PG_FICTITIOUS;
|
||||
m->oflags |= VPO_UNMANAGED;
|
||||
vm_page_unwire(m, 0);
|
||||
vm_page_free(m);
|
||||
}
|
||||
|
||||
static vm_memattr_t
|
||||
ttm_caching_state_to_vm(enum ttm_caching_state cstate)
|
||||
{
|
||||
|
||||
switch (cstate) {
|
||||
case tt_uncached:
|
||||
return (VM_MEMATTR_UNCACHEABLE);
|
||||
case tt_wc:
|
||||
return (VM_MEMATTR_WRITE_COMBINING);
|
||||
case tt_cached:
|
||||
return (VM_MEMATTR_WRITE_BACK);
|
||||
}
|
||||
panic("caching state %d\n", cstate);
|
||||
}
|
||||
|
||||
static void ttm_pool_kobj_release(struct ttm_pool_manager *m)
|
||||
{
|
||||
|
||||
free(m, M_TTM_POOLMGR);
|
||||
}
|
||||
|
||||
#if 0
|
||||
/* XXXKIB sysctl */
|
||||
static ssize_t ttm_pool_store(struct ttm_pool_manager *m,
|
||||
struct attribute *attr, const char *buffer, size_t size)
|
||||
{
|
||||
int chars;
|
||||
unsigned val;
|
||||
chars = sscanf(buffer, "%u", &val);
|
||||
if (chars == 0)
|
||||
return size;
|
||||
|
||||
/* Convert kb to number of pages */
|
||||
val = val / (PAGE_SIZE >> 10);
|
||||
|
||||
if (attr == &ttm_page_pool_max)
|
||||
m->options.max_size = val;
|
||||
else if (attr == &ttm_page_pool_small)
|
||||
m->options.small = val;
|
||||
else if (attr == &ttm_page_pool_alloc_size) {
|
||||
if (val > NUM_PAGES_TO_ALLOC*8) {
|
||||
pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
|
||||
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
|
||||
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
|
||||
return size;
|
||||
} else if (val > NUM_PAGES_TO_ALLOC) {
|
||||
pr_warn("Setting allocation size to larger than %lu is not recommended\n",
|
||||
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
|
||||
}
|
||||
m->options.alloc_size = val;
|
||||
}
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
static ssize_t ttm_pool_show(struct ttm_pool_manager *m,
|
||||
struct attribute *attr, char *buffer)
|
||||
{
|
||||
unsigned val = 0;
|
||||
|
||||
if (attr == &ttm_page_pool_max)
|
||||
val = m->options.max_size;
|
||||
else if (attr == &ttm_page_pool_small)
|
||||
val = m->options.small;
|
||||
else if (attr == &ttm_page_pool_alloc_size)
|
||||
val = m->options.alloc_size;
|
||||
|
||||
val = val * (PAGE_SIZE >> 10);
|
||||
|
||||
return snprintf(buffer, PAGE_SIZE, "%u\n", val);
|
||||
}
|
||||
#endif
|
||||
|
||||
static struct ttm_pool_manager *_manager;
|
||||
|
||||
static int set_pages_array_wb(vm_page_t *pages, int addrinarray)
|
||||
{
|
||||
vm_page_t m;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < addrinarray; i++) {
|
||||
m = pages[i];
|
||||
#ifdef TTM_HAS_AGP
|
||||
unmap_page_from_agp(m);
|
||||
#endif
|
||||
pmap_page_set_memattr(m, VM_MEMATTR_WRITE_BACK);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int set_pages_array_wc(vm_page_t *pages, int addrinarray)
|
||||
{
|
||||
vm_page_t m;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < addrinarray; i++) {
|
||||
m = pages[i];
|
||||
#ifdef TTM_HAS_AGP
|
||||
map_page_into_agp(pages[i]);
|
||||
#endif
|
||||
pmap_page_set_memattr(m, VM_MEMATTR_WRITE_COMBINING);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int set_pages_array_uc(vm_page_t *pages, int addrinarray)
|
||||
{
|
||||
vm_page_t m;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < addrinarray; i++) {
|
||||
m = pages[i];
|
||||
#ifdef TTM_HAS_AGP
|
||||
map_page_into_agp(pages[i]);
|
||||
#endif
|
||||
pmap_page_set_memattr(m, VM_MEMATTR_UNCACHEABLE);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Select the right pool or requested caching state and ttm flags. */
|
||||
static struct ttm_page_pool *ttm_get_pool(int flags,
|
||||
enum ttm_caching_state cstate)
|
||||
{
|
||||
int pool_index;
|
||||
|
||||
if (cstate == tt_cached)
|
||||
return NULL;
|
||||
|
||||
if (cstate == tt_wc)
|
||||
pool_index = 0x0;
|
||||
else
|
||||
pool_index = 0x1;
|
||||
|
||||
if (flags & TTM_PAGE_FLAG_DMA32)
|
||||
pool_index |= 0x2;
|
||||
|
||||
return &_manager->pools[pool_index];
|
||||
}
|
||||
|
||||
/* set memory back to wb and free the pages. */
|
||||
static void ttm_pages_put(vm_page_t *pages, unsigned npages)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
/* Our VM handles vm memattr automatically on the page free. */
|
||||
if (set_pages_array_wb(pages, npages))
|
||||
printf("[TTM] Failed to set %d pages to wb!\n", npages);
|
||||
for (i = 0; i < npages; ++i)
|
||||
ttm_vm_page_free(pages[i]);
|
||||
}
|
||||
|
||||
static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
|
||||
unsigned freed_pages)
|
||||
{
|
||||
pool->npages -= freed_pages;
|
||||
pool->nfrees += freed_pages;
|
||||
}
|
||||
|
||||
/**
|
||||
* Free pages from pool.
|
||||
*
|
||||
* To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
|
||||
* number of pages in one go.
|
||||
*
|
||||
* @pool: to free the pages from
|
||||
* @free_all: If set to true will free all pages in pool
|
||||
**/
|
||||
static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
|
||||
{
|
||||
vm_page_t p, p1;
|
||||
vm_page_t *pages_to_free;
|
||||
unsigned freed_pages = 0,
|
||||
npages_to_free = nr_free;
|
||||
|
||||
if (NUM_PAGES_TO_ALLOC < nr_free)
|
||||
npages_to_free = NUM_PAGES_TO_ALLOC;
|
||||
|
||||
pages_to_free = malloc(npages_to_free * sizeof(vm_page_t),
|
||||
M_TEMP, M_WAITOK | M_ZERO);
|
||||
|
||||
restart:
|
||||
mtx_lock(&pool->lock);
|
||||
|
||||
TAILQ_FOREACH_REVERSE_SAFE(p, &pool->list, pglist, pageq, p1) {
|
||||
if (freed_pages >= npages_to_free)
|
||||
break;
|
||||
|
||||
pages_to_free[freed_pages++] = p;
|
||||
/* We can only remove NUM_PAGES_TO_ALLOC at a time. */
|
||||
if (freed_pages >= NUM_PAGES_TO_ALLOC) {
|
||||
/* remove range of pages from the pool */
|
||||
TAILQ_REMOVE(&pool->list, p, pageq);
|
||||
|
||||
ttm_pool_update_free_locked(pool, freed_pages);
|
||||
/**
|
||||
* Because changing page caching is costly
|
||||
* we unlock the pool to prevent stalling.
|
||||
*/
|
||||
mtx_unlock(&pool->lock);
|
||||
|
||||
ttm_pages_put(pages_to_free, freed_pages);
|
||||
if (likely(nr_free != FREE_ALL_PAGES))
|
||||
nr_free -= freed_pages;
|
||||
|
||||
if (NUM_PAGES_TO_ALLOC >= nr_free)
|
||||
npages_to_free = nr_free;
|
||||
else
|
||||
npages_to_free = NUM_PAGES_TO_ALLOC;
|
||||
|
||||
freed_pages = 0;
|
||||
|
||||
/* free all so restart the processing */
|
||||
if (nr_free)
|
||||
goto restart;
|
||||
|
||||
/* Not allowed to fall through or break because
|
||||
* following context is inside spinlock while we are
|
||||
* outside here.
|
||||
*/
|
||||
goto out;
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
/* remove range of pages from the pool */
|
||||
if (freed_pages) {
|
||||
TAILQ_REMOVE(&pool->list, p, pageq);
|
||||
|
||||
ttm_pool_update_free_locked(pool, freed_pages);
|
||||
nr_free -= freed_pages;
|
||||
}
|
||||
|
||||
mtx_unlock(&pool->lock);
|
||||
|
||||
if (freed_pages)
|
||||
ttm_pages_put(pages_to_free, freed_pages);
|
||||
out:
|
||||
free(pages_to_free, M_TEMP);
|
||||
return nr_free;
|
||||
}
|
||||
|
||||
/* Get good estimation how many pages are free in pools */
|
||||
static int ttm_pool_get_num_unused_pages(void)
|
||||
{
|
||||
unsigned i;
|
||||
int total = 0;
|
||||
for (i = 0; i < NUM_POOLS; ++i)
|
||||
total += _manager->pools[i].npages;
|
||||
|
||||
return total;
|
||||
}
|
||||
|
||||
/**
|
||||
* Callback for mm to request pool to reduce number of page held.
|
||||
*/
|
||||
static int ttm_pool_mm_shrink(void *arg)
|
||||
{
|
||||
static unsigned int start_pool = 0;
|
||||
unsigned i;
|
||||
unsigned pool_offset = atomic_fetchadd_int(&start_pool, 1);
|
||||
struct ttm_page_pool *pool;
|
||||
int shrink_pages = 100; /* XXXKIB */
|
||||
|
||||
pool_offset = pool_offset % NUM_POOLS;
|
||||
/* select start pool in round robin fashion */
|
||||
for (i = 0; i < NUM_POOLS; ++i) {
|
||||
unsigned nr_free = shrink_pages;
|
||||
if (shrink_pages == 0)
|
||||
break;
|
||||
pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
|
||||
shrink_pages = ttm_page_pool_free(pool, nr_free);
|
||||
}
|
||||
/* return estimated number of unused pages in pool */
|
||||
return ttm_pool_get_num_unused_pages();
|
||||
}
|
||||
|
||||
static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
|
||||
{
|
||||
|
||||
manager->lowmem_handler = EVENTHANDLER_REGISTER(vm_lowmem,
|
||||
ttm_pool_mm_shrink, manager, EVENTHANDLER_PRI_ANY);
|
||||
}
|
||||
|
||||
static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
|
||||
{
|
||||
|
||||
EVENTHANDLER_DEREGISTER(vm_lowmem, manager->lowmem_handler);
|
||||
}
|
||||
|
||||
static int ttm_set_pages_caching(vm_page_t *pages,
|
||||
enum ttm_caching_state cstate, unsigned cpages)
|
||||
{
|
||||
int r = 0;
|
||||
/* Set page caching */
|
||||
switch (cstate) {
|
||||
case tt_uncached:
|
||||
r = set_pages_array_uc(pages, cpages);
|
||||
if (r)
|
||||
printf("[TTM] Failed to set %d pages to uc!\n", cpages);
|
||||
break;
|
||||
case tt_wc:
|
||||
r = set_pages_array_wc(pages, cpages);
|
||||
if (r)
|
||||
printf("[TTM] Failed to set %d pages to wc!\n", cpages);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* Free pages the pages that failed to change the caching state. If there is
|
||||
* any pages that have changed their caching state already put them to the
|
||||
* pool.
|
||||
*/
|
||||
static void ttm_handle_caching_state_failure(struct pglist *pages,
|
||||
int ttm_flags, enum ttm_caching_state cstate,
|
||||
vm_page_t *failed_pages, unsigned cpages)
|
||||
{
|
||||
unsigned i;
|
||||
/* Failed pages have to be freed */
|
||||
for (i = 0; i < cpages; ++i) {
|
||||
TAILQ_REMOVE(pages, failed_pages[i], pageq);
|
||||
ttm_vm_page_free(failed_pages[i]);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Allocate new pages with correct caching.
|
||||
*
|
||||
* This function is reentrant if caller updates count depending on number of
|
||||
* pages returned in pages array.
|
||||
*/
|
||||
static int ttm_alloc_new_pages(struct pglist *pages, int ttm_alloc_flags,
|
||||
int ttm_flags, enum ttm_caching_state cstate, unsigned count)
|
||||
{
|
||||
vm_page_t *caching_array;
|
||||
vm_page_t p;
|
||||
int r = 0;
|
||||
unsigned i, cpages, aflags;
|
||||
unsigned max_cpages = min(count,
|
||||
(unsigned)(PAGE_SIZE/sizeof(vm_page_t)));
|
||||
|
||||
aflags = VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ |
|
||||
((ttm_alloc_flags & TTM_PAGE_FLAG_ZERO_ALLOC) != 0 ?
|
||||
VM_ALLOC_ZERO : 0);
|
||||
|
||||
/* allocate array for page caching change */
|
||||
caching_array = malloc(max_cpages * sizeof(vm_page_t), M_TEMP,
|
||||
M_WAITOK | M_ZERO);
|
||||
|
||||
for (i = 0, cpages = 0; i < count; ++i) {
|
||||
p = vm_page_alloc_contig(NULL, 0, aflags, 1, 0,
|
||||
(ttm_alloc_flags & TTM_PAGE_FLAG_DMA32) ? 0xffffffff :
|
||||
VM_MAX_ADDRESS, PAGE_SIZE, 0,
|
||||
ttm_caching_state_to_vm(cstate));
|
||||
if (!p) {
|
||||
printf("[TTM] Unable to get page %u\n", i);
|
||||
|
||||
/* store already allocated pages in the pool after
|
||||
* setting the caching state */
|
||||
if (cpages) {
|
||||
r = ttm_set_pages_caching(caching_array,
|
||||
cstate, cpages);
|
||||
if (r)
|
||||
ttm_handle_caching_state_failure(pages,
|
||||
ttm_flags, cstate,
|
||||
caching_array, cpages);
|
||||
}
|
||||
r = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
p->oflags &= ~VPO_UNMANAGED;
|
||||
p->flags |= PG_FICTITIOUS;
|
||||
|
||||
#ifdef CONFIG_HIGHMEM /* KIB: nop */
|
||||
/* gfp flags of highmem page should never be dma32 so we
|
||||
* we should be fine in such case
|
||||
*/
|
||||
if (!PageHighMem(p))
|
||||
#endif
|
||||
{
|
||||
caching_array[cpages++] = p;
|
||||
if (cpages == max_cpages) {
|
||||
|
||||
r = ttm_set_pages_caching(caching_array,
|
||||
cstate, cpages);
|
||||
if (r) {
|
||||
ttm_handle_caching_state_failure(pages,
|
||||
ttm_flags, cstate,
|
||||
caching_array, cpages);
|
||||
goto out;
|
||||
}
|
||||
cpages = 0;
|
||||
}
|
||||
}
|
||||
|
||||
TAILQ_INSERT_HEAD(pages, p, pageq);
|
||||
}
|
||||
|
||||
if (cpages) {
|
||||
r = ttm_set_pages_caching(caching_array, cstate, cpages);
|
||||
if (r)
|
||||
ttm_handle_caching_state_failure(pages,
|
||||
ttm_flags, cstate,
|
||||
caching_array, cpages);
|
||||
}
|
||||
out:
|
||||
free(caching_array, M_TEMP);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* Fill the given pool if there aren't enough pages and the requested number of
|
||||
* pages is small.
|
||||
*/
|
||||
static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
|
||||
int ttm_flags, enum ttm_caching_state cstate, unsigned count)
|
||||
{
|
||||
vm_page_t p;
|
||||
int r;
|
||||
unsigned cpages = 0;
|
||||
/**
|
||||
* Only allow one pool fill operation at a time.
|
||||
* If pool doesn't have enough pages for the allocation new pages are
|
||||
* allocated from outside of pool.
|
||||
*/
|
||||
if (pool->fill_lock)
|
||||
return;
|
||||
|
||||
pool->fill_lock = true;
|
||||
|
||||
/* If allocation request is small and there are not enough
|
||||
* pages in a pool we fill the pool up first. */
|
||||
if (count < _manager->options.small
|
||||
&& count > pool->npages) {
|
||||
struct pglist new_pages;
|
||||
unsigned alloc_size = _manager->options.alloc_size;
|
||||
|
||||
/**
|
||||
* Can't change page caching if in irqsave context. We have to
|
||||
* drop the pool->lock.
|
||||
*/
|
||||
mtx_unlock(&pool->lock);
|
||||
|
||||
TAILQ_INIT(&new_pages);
|
||||
r = ttm_alloc_new_pages(&new_pages, pool->ttm_page_alloc_flags,
|
||||
ttm_flags, cstate, alloc_size);
|
||||
mtx_lock(&pool->lock);
|
||||
|
||||
if (!r) {
|
||||
TAILQ_CONCAT(&pool->list, &new_pages, pageq);
|
||||
++pool->nrefills;
|
||||
pool->npages += alloc_size;
|
||||
} else {
|
||||
printf("[TTM] Failed to fill pool (%p)\n", pool);
|
||||
/* If we have any pages left put them to the pool. */
|
||||
TAILQ_FOREACH(p, &pool->list, pageq) {
|
||||
++cpages;
|
||||
}
|
||||
TAILQ_CONCAT(&pool->list, &new_pages, pageq);
|
||||
pool->npages += cpages;
|
||||
}
|
||||
|
||||
}
|
||||
pool->fill_lock = false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Cut 'count' number of pages from the pool and put them on the return list.
|
||||
*
|
||||
* @return count of pages still required to fulfill the request.
|
||||
*/
|
||||
static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
|
||||
struct pglist *pages,
|
||||
int ttm_flags,
|
||||
enum ttm_caching_state cstate,
|
||||
unsigned count)
|
||||
{
|
||||
vm_page_t p;
|
||||
unsigned i;
|
||||
|
||||
mtx_lock(&pool->lock);
|
||||
ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count);
|
||||
|
||||
if (count >= pool->npages) {
|
||||
/* take all pages from the pool */
|
||||
TAILQ_CONCAT(pages, &pool->list, pageq);
|
||||
count -= pool->npages;
|
||||
pool->npages = 0;
|
||||
goto out;
|
||||
}
|
||||
for (i = 0; i < count; i++) {
|
||||
p = TAILQ_FIRST(&pool->list);
|
||||
TAILQ_REMOVE(&pool->list, p, pageq);
|
||||
TAILQ_INSERT_TAIL(pages, p, pageq);
|
||||
}
|
||||
pool->npages -= count;
|
||||
count = 0;
|
||||
out:
|
||||
mtx_unlock(&pool->lock);
|
||||
return count;
|
||||
}
|
||||
|
||||
/* Put all pages in pages list to correct pool to wait for reuse */
|
||||
static void ttm_put_pages(vm_page_t *pages, unsigned npages, int flags,
|
||||
enum ttm_caching_state cstate)
|
||||
{
|
||||
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
|
||||
unsigned i;
|
||||
|
||||
if (pool == NULL) {
|
||||
/* No pool for this memory type so free the pages */
|
||||
for (i = 0; i < npages; i++) {
|
||||
if (pages[i]) {
|
||||
ttm_vm_page_free(pages[i]);
|
||||
pages[i] = NULL;
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
mtx_lock(&pool->lock);
|
||||
for (i = 0; i < npages; i++) {
|
||||
if (pages[i]) {
|
||||
TAILQ_INSERT_TAIL(&pool->list, pages[i], pageq);
|
||||
pages[i] = NULL;
|
||||
pool->npages++;
|
||||
}
|
||||
}
|
||||
/* Check that we don't go over the pool limit */
|
||||
npages = 0;
|
||||
if (pool->npages > _manager->options.max_size) {
|
||||
npages = pool->npages - _manager->options.max_size;
|
||||
/* free at least NUM_PAGES_TO_ALLOC number of pages
|
||||
* to reduce calls to set_memory_wb */
|
||||
if (npages < NUM_PAGES_TO_ALLOC)
|
||||
npages = NUM_PAGES_TO_ALLOC;
|
||||
}
|
||||
mtx_unlock(&pool->lock);
|
||||
if (npages)
|
||||
ttm_page_pool_free(pool, npages);
|
||||
}
|
||||
|
||||
/*
|
||||
* On success pages list will hold count number of correctly
|
||||
* cached pages.
|
||||
*/
|
||||
static int ttm_get_pages(vm_page_t *pages, unsigned npages, int flags,
|
||||
enum ttm_caching_state cstate)
|
||||
{
|
||||
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
|
||||
struct pglist plist;
|
||||
vm_page_t p = NULL;
|
||||
int gfp_flags, aflags;
|
||||
unsigned count;
|
||||
int r;
|
||||
|
||||
aflags = VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
|
||||
((flags & TTM_PAGE_FLAG_ZERO_ALLOC) != 0 ? VM_ALLOC_ZERO : 0);
|
||||
|
||||
/* No pool for cached pages */
|
||||
if (pool == NULL) {
|
||||
for (r = 0; r < npages; ++r) {
|
||||
p = vm_page_alloc_contig(NULL, 0, aflags, 1, 0,
|
||||
(flags & TTM_PAGE_FLAG_DMA32) ? 0xffffffff :
|
||||
VM_MAX_ADDRESS, PAGE_SIZE,
|
||||
0, ttm_caching_state_to_vm(cstate));
|
||||
if (!p) {
|
||||
printf("[TTM] Unable to allocate page\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
p->oflags &= ~VPO_UNMANAGED;
|
||||
p->flags |= PG_FICTITIOUS;
|
||||
pages[r] = p;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* combine zero flag to pool flags */
|
||||
gfp_flags = flags | pool->ttm_page_alloc_flags;
|
||||
|
||||
/* First we take pages from the pool */
|
||||
TAILQ_INIT(&plist);
|
||||
npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
|
||||
count = 0;
|
||||
TAILQ_FOREACH(p, &plist, pageq) {
|
||||
pages[count++] = p;
|
||||
}
|
||||
|
||||
/* clear the pages coming from the pool if requested */
|
||||
if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
|
||||
TAILQ_FOREACH(p, &plist, pageq) {
|
||||
pmap_zero_page(p);
|
||||
}
|
||||
}
|
||||
|
||||
/* If pool didn't have enough pages allocate new one. */
|
||||
if (npages > 0) {
|
||||
/* ttm_alloc_new_pages doesn't reference pool so we can run
|
||||
* multiple requests in parallel.
|
||||
**/
|
||||
TAILQ_INIT(&plist);
|
||||
r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate,
|
||||
npages);
|
||||
TAILQ_FOREACH(p, &plist, pageq) {
|
||||
pages[count++] = p;
|
||||
}
|
||||
if (r) {
|
||||
/* If there is any pages in the list put them back to
|
||||
* the pool. */
|
||||
printf("[TTM] Failed to allocate extra pages for large request\n");
|
||||
ttm_put_pages(pages, count, flags, cstate);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
|
||||
char *name)
|
||||
{
|
||||
mtx_init(&pool->lock, "ttmpool", NULL, MTX_DEF);
|
||||
pool->fill_lock = false;
|
||||
TAILQ_INIT(&pool->list);
|
||||
pool->npages = pool->nfrees = 0;
|
||||
pool->ttm_page_alloc_flags = flags;
|
||||
pool->name = name;
|
||||
}
|
||||
|
||||
int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
|
||||
{
|
||||
|
||||
if (_manager != NULL)
|
||||
printf("[TTM] manager != NULL\n");
|
||||
printf("[TTM] Initializing pool allocator\n");
|
||||
|
||||
_manager = malloc(sizeof(*_manager), M_TTM_POOLMGR, M_WAITOK | M_ZERO);
|
||||
|
||||
ttm_page_pool_init_locked(&_manager->wc_pool, 0, "wc");
|
||||
ttm_page_pool_init_locked(&_manager->uc_pool, 0, "uc");
|
||||
ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
|
||||
TTM_PAGE_FLAG_DMA32, "wc dma");
|
||||
ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
|
||||
TTM_PAGE_FLAG_DMA32, "uc dma");
|
||||
|
||||
_manager->options.max_size = max_pages;
|
||||
_manager->options.small = SMALL_ALLOCATION;
|
||||
_manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
|
||||
|
||||
refcount_init(&_manager->kobj_ref, 1);
|
||||
ttm_pool_mm_shrink_init(_manager);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ttm_page_alloc_fini(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
printf("[TTM] Finalizing pool allocator\n");
|
||||
ttm_pool_mm_shrink_fini(_manager);
|
||||
|
||||
for (i = 0; i < NUM_POOLS; ++i)
|
||||
ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES);
|
||||
|
||||
if (refcount_release(&_manager->kobj_ref))
|
||||
ttm_pool_kobj_release(_manager);
|
||||
_manager = NULL;
|
||||
}
|
||||
|
||||
int ttm_pool_populate(struct ttm_tt *ttm)
|
||||
{
|
||||
struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
|
||||
unsigned i;
|
||||
int ret;
|
||||
|
||||
if (ttm->state != tt_unpopulated)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < ttm->num_pages; ++i) {
|
||||
ret = ttm_get_pages(&ttm->pages[i], 1,
|
||||
ttm->page_flags,
|
||||
ttm->caching_state);
|
||||
if (ret != 0) {
|
||||
ttm_pool_unpopulate(ttm);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
|
||||
false, false);
|
||||
if (unlikely(ret != 0)) {
|
||||
ttm_pool_unpopulate(ttm);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
|
||||
ret = ttm_tt_swapin(ttm);
|
||||
if (unlikely(ret != 0)) {
|
||||
ttm_pool_unpopulate(ttm);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
ttm->state = tt_unbound;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ttm_pool_unpopulate(struct ttm_tt *ttm)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < ttm->num_pages; ++i) {
|
||||
if (ttm->pages[i]) {
|
||||
ttm_mem_global_free_page(ttm->glob->mem_glob,
|
||||
ttm->pages[i]);
|
||||
ttm_put_pages(&ttm->pages[i], 1,
|
||||
ttm->page_flags,
|
||||
ttm->caching_state);
|
||||
}
|
||||
}
|
||||
ttm->state = tt_unpopulated;
|
||||
}
|
||||
|
||||
#if 0
|
||||
/* XXXKIB sysctl */
|
||||
int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
|
||||
{
|
||||
struct ttm_page_pool *p;
|
||||
unsigned i;
|
||||
char *h[] = {"pool", "refills", "pages freed", "size"};
|
||||
if (!_manager) {
|
||||
seq_printf(m, "No pool allocator running.\n");
|
||||
return 0;
|
||||
}
|
||||
seq_printf(m, "%6s %12s %13s %8s\n",
|
||||
h[0], h[1], h[2], h[3]);
|
||||
for (i = 0; i < NUM_POOLS; ++i) {
|
||||
p = &_manager->pools[i];
|
||||
|
||||
seq_printf(m, "%6s %12ld %13ld %8d\n",
|
||||
p->name, p->nrefills,
|
||||
p->nfrees, p->npages);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
#endif
|
103
sys/dev/drm2/ttm/ttm_page_alloc.h
Normal file
103
sys/dev/drm2/ttm/ttm_page_alloc.h
Normal file
@ -0,0 +1,103 @@
|
||||
/*
|
||||
* Copyright (c) Red Hat Inc.
|
||||
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sub license,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie <airlied@redhat.com>
|
||||
* Jerome Glisse <jglisse@redhat.com>
|
||||
*/
|
||||
/* $FreeBSD$ */
|
||||
#ifndef TTM_PAGE_ALLOC
|
||||
#define TTM_PAGE_ALLOC
|
||||
|
||||
#include <dev/drm2/ttm/ttm_bo_driver.h>
|
||||
#include <dev/drm2/ttm/ttm_memory.h>
|
||||
|
||||
/**
|
||||
* Initialize pool allocator.
|
||||
*/
|
||||
int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
|
||||
/**
|
||||
* Free pool allocator.
|
||||
*/
|
||||
void ttm_page_alloc_fini(void);
|
||||
|
||||
/**
|
||||
* ttm_pool_populate:
|
||||
*
|
||||
* @ttm: The struct ttm_tt to contain the backing pages.
|
||||
*
|
||||
* Add backing pages to all of @ttm
|
||||
*/
|
||||
extern int ttm_pool_populate(struct ttm_tt *ttm);
|
||||
|
||||
/**
|
||||
* ttm_pool_unpopulate:
|
||||
*
|
||||
* @ttm: The struct ttm_tt which to free backing pages.
|
||||
*
|
||||
* Free all pages of @ttm
|
||||
*/
|
||||
extern void ttm_pool_unpopulate(struct ttm_tt *ttm);
|
||||
|
||||
/**
|
||||
* Output the state of pools to debugfs file
|
||||
*/
|
||||
/* XXXKIB
|
||||
extern int ttm_page_alloc_debugfs(struct seq_file *m, void *data);
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
/**
|
||||
* Initialize pool allocator.
|
||||
*/
|
||||
int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
|
||||
|
||||
/**
|
||||
* Free pool allocator.
|
||||
*/
|
||||
void ttm_dma_page_alloc_fini(void);
|
||||
|
||||
/**
|
||||
* Output the state of pools to debugfs file
|
||||
*/
|
||||
extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
|
||||
|
||||
extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
|
||||
extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
|
||||
|
||||
#else
|
||||
static inline int ttm_dma_page_alloc_init(struct ttm_mem_global *glob,
|
||||
unsigned max_pages)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline void ttm_dma_page_alloc_fini(void) { return; }
|
||||
|
||||
/* XXXKIB
|
||||
static inline int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
*/
|
||||
#endif
|
||||
|
||||
#endif
|
1134
sys/dev/drm2/ttm/ttm_page_alloc_dma.c
Normal file
1134
sys/dev/drm2/ttm/ttm_page_alloc_dma.c
Normal file
File diff suppressed because it is too large
Load Diff
93
sys/dev/drm2/ttm/ttm_placement.h
Normal file
93
sys/dev/drm2/ttm/ttm_placement.h
Normal file
@ -0,0 +1,93 @@
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||
*/
|
||||
/* $FreeBSD$ */
|
||||
|
||||
#ifndef _TTM_PLACEMENT_H_
|
||||
#define _TTM_PLACEMENT_H_
|
||||
/*
|
||||
* Memory regions for data placement.
|
||||
*/
|
||||
|
||||
#define TTM_PL_SYSTEM 0
|
||||
#define TTM_PL_TT 1
|
||||
#define TTM_PL_VRAM 2
|
||||
#define TTM_PL_PRIV0 3
|
||||
#define TTM_PL_PRIV1 4
|
||||
#define TTM_PL_PRIV2 5
|
||||
#define TTM_PL_PRIV3 6
|
||||
#define TTM_PL_PRIV4 7
|
||||
#define TTM_PL_PRIV5 8
|
||||
#define TTM_PL_SWAPPED 15
|
||||
|
||||
#define TTM_PL_FLAG_SYSTEM (1 << TTM_PL_SYSTEM)
|
||||
#define TTM_PL_FLAG_TT (1 << TTM_PL_TT)
|
||||
#define TTM_PL_FLAG_VRAM (1 << TTM_PL_VRAM)
|
||||
#define TTM_PL_FLAG_PRIV0 (1 << TTM_PL_PRIV0)
|
||||
#define TTM_PL_FLAG_PRIV1 (1 << TTM_PL_PRIV1)
|
||||
#define TTM_PL_FLAG_PRIV2 (1 << TTM_PL_PRIV2)
|
||||
#define TTM_PL_FLAG_PRIV3 (1 << TTM_PL_PRIV3)
|
||||
#define TTM_PL_FLAG_PRIV4 (1 << TTM_PL_PRIV4)
|
||||
#define TTM_PL_FLAG_PRIV5 (1 << TTM_PL_PRIV5)
|
||||
#define TTM_PL_FLAG_SWAPPED (1 << TTM_PL_SWAPPED)
|
||||
#define TTM_PL_MASK_MEM 0x0000FFFF
|
||||
|
||||
/*
|
||||
* Other flags that affects data placement.
|
||||
* TTM_PL_FLAG_CACHED indicates cache-coherent mappings
|
||||
* if available.
|
||||
* TTM_PL_FLAG_SHARED means that another application may
|
||||
* reference the buffer.
|
||||
* TTM_PL_FLAG_NO_EVICT means that the buffer may never
|
||||
* be evicted to make room for other buffers.
|
||||
*/
|
||||
|
||||
#define TTM_PL_FLAG_CACHED (1 << 16)
|
||||
#define TTM_PL_FLAG_UNCACHED (1 << 17)
|
||||
#define TTM_PL_FLAG_WC (1 << 18)
|
||||
#define TTM_PL_FLAG_SHARED (1 << 20)
|
||||
#define TTM_PL_FLAG_NO_EVICT (1 << 21)
|
||||
|
||||
#define TTM_PL_MASK_CACHING (TTM_PL_FLAG_CACHED | \
|
||||
TTM_PL_FLAG_UNCACHED | \
|
||||
TTM_PL_FLAG_WC)
|
||||
|
||||
#define TTM_PL_MASK_MEMTYPE (TTM_PL_MASK_MEM | TTM_PL_MASK_CACHING)
|
||||
|
||||
/*
|
||||
* Access flags to be used for CPU- and GPU- mappings.
|
||||
* The idea is that the TTM synchronization mechanism will
|
||||
* allow concurrent READ access and exclusive write access.
|
||||
* Currently GPU- and CPU accesses are exclusive.
|
||||
*/
|
||||
|
||||
#define TTM_ACCESS_READ (1 << 0)
|
||||
#define TTM_ACCESS_WRITE (1 << 1)
|
||||
|
||||
#endif
|
370
sys/dev/drm2/ttm/ttm_tt.c
Normal file
370
sys/dev/drm2/ttm/ttm_tt.c
Normal file
@ -0,0 +1,370 @@
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||
*/
|
||||
/*
|
||||
* Copyright (c) 2013 The FreeBSD Foundation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Portions of this software were developed by Konstantin Belousov
|
||||
* <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__FBSDID("$FreeBSD$");
|
||||
|
||||
#include <dev/drm2/drmP.h>
|
||||
#include <dev/drm2/ttm/ttm_module.h>
|
||||
#include <dev/drm2/ttm/ttm_bo_driver.h>
|
||||
#include <dev/drm2/ttm/ttm_placement.h>
|
||||
#include <dev/drm2/ttm/ttm_page_alloc.h>
|
||||
|
||||
MALLOC_DEFINE(M_TTM_PD, "ttm_pd", "TTM Page Directories");
|
||||
|
||||
/**
|
||||
* Allocates storage for pointers to the pages that back the ttm.
|
||||
*/
|
||||
static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
|
||||
{
|
||||
ttm->pages = malloc(ttm->num_pages * sizeof(void *),
|
||||
M_TTM_PD, M_WAITOK | M_ZERO);
|
||||
}
|
||||
|
||||
static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
|
||||
{
|
||||
ttm->ttm.pages = malloc(ttm->ttm.num_pages * sizeof(void *),
|
||||
M_TTM_PD, M_WAITOK | M_ZERO);
|
||||
ttm->dma_address = malloc(ttm->ttm.num_pages *
|
||||
sizeof(*ttm->dma_address), M_TTM_PD, M_WAITOK);
|
||||
}
|
||||
|
||||
#if defined(__i386__) || defined(__amd64__)
|
||||
static inline int ttm_tt_set_page_caching(vm_page_t p,
|
||||
enum ttm_caching_state c_old,
|
||||
enum ttm_caching_state c_new)
|
||||
{
|
||||
|
||||
/* XXXKIB our VM does not need this. */
|
||||
#if 0
|
||||
if (c_old != tt_cached) {
|
||||
/* p isn't in the default caching state, set it to
|
||||
* writeback first to free its current memtype. */
|
||||
pmap_page_set_memattr(p, VM_MEMATTR_WRITE_BACK);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (c_new == tt_wc)
|
||||
pmap_page_set_memattr(p, VM_MEMATTR_WRITE_COMBINING);
|
||||
else if (c_new == tt_uncached)
|
||||
pmap_page_set_memattr(p, VM_MEMATTR_UNCACHEABLE);
|
||||
|
||||
return (0);
|
||||
}
|
||||
#else
|
||||
static inline int ttm_tt_set_page_caching(vm_page_t p,
|
||||
enum ttm_caching_state c_old,
|
||||
enum ttm_caching_state c_new)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Change caching policy for the linear kernel map
|
||||
* for range of pages in a ttm.
|
||||
*/
|
||||
|
||||
static int ttm_tt_set_caching(struct ttm_tt *ttm,
|
||||
enum ttm_caching_state c_state)
|
||||
{
|
||||
int i, j;
|
||||
vm_page_t cur_page;
|
||||
int ret;
|
||||
|
||||
if (ttm->caching_state == c_state)
|
||||
return 0;
|
||||
|
||||
if (ttm->state == tt_unpopulated) {
|
||||
/* Change caching but don't populate */
|
||||
ttm->caching_state = c_state;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (ttm->caching_state == tt_cached)
|
||||
drm_clflush_pages(ttm->pages, ttm->num_pages);
|
||||
|
||||
for (i = 0; i < ttm->num_pages; ++i) {
|
||||
cur_page = ttm->pages[i];
|
||||
if (likely(cur_page != NULL)) {
|
||||
ret = ttm_tt_set_page_caching(cur_page,
|
||||
ttm->caching_state,
|
||||
c_state);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_err;
|
||||
}
|
||||
}
|
||||
|
||||
ttm->caching_state = c_state;
|
||||
|
||||
return 0;
|
||||
|
||||
out_err:
|
||||
for (j = 0; j < i; ++j) {
|
||||
cur_page = ttm->pages[j];
|
||||
if (cur_page != NULL) {
|
||||
(void)ttm_tt_set_page_caching(cur_page, c_state,
|
||||
ttm->caching_state);
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
|
||||
{
|
||||
enum ttm_caching_state state;
|
||||
|
||||
if (placement & TTM_PL_FLAG_WC)
|
||||
state = tt_wc;
|
||||
else if (placement & TTM_PL_FLAG_UNCACHED)
|
||||
state = tt_uncached;
|
||||
else
|
||||
state = tt_cached;
|
||||
|
||||
return ttm_tt_set_caching(ttm, state);
|
||||
}
|
||||
|
||||
void ttm_tt_destroy(struct ttm_tt *ttm)
|
||||
{
|
||||
if (unlikely(ttm == NULL))
|
||||
return;
|
||||
|
||||
if (ttm->state == tt_bound) {
|
||||
ttm_tt_unbind(ttm);
|
||||
}
|
||||
|
||||
if (likely(ttm->pages != NULL)) {
|
||||
ttm->bdev->driver->ttm_tt_unpopulate(ttm);
|
||||
}
|
||||
|
||||
if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
|
||||
ttm->swap_storage)
|
||||
vm_object_deallocate(ttm->swap_storage);
|
||||
|
||||
ttm->swap_storage = NULL;
|
||||
ttm->func->destroy(ttm);
|
||||
}
|
||||
|
||||
int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
|
||||
unsigned long size, uint32_t page_flags,
|
||||
vm_page_t dummy_read_page)
|
||||
{
|
||||
ttm->bdev = bdev;
|
||||
ttm->glob = bdev->glob;
|
||||
ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
ttm->caching_state = tt_cached;
|
||||
ttm->page_flags = page_flags;
|
||||
ttm->dummy_read_page = dummy_read_page;
|
||||
ttm->state = tt_unpopulated;
|
||||
ttm->swap_storage = NULL;
|
||||
|
||||
ttm_tt_alloc_page_directory(ttm);
|
||||
if (!ttm->pages) {
|
||||
ttm_tt_destroy(ttm);
|
||||
printf("Failed allocating page table\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ttm_tt_fini(struct ttm_tt *ttm)
|
||||
{
|
||||
free(ttm->pages, M_TTM_PD);
|
||||
ttm->pages = NULL;
|
||||
}
|
||||
|
||||
int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
|
||||
unsigned long size, uint32_t page_flags,
|
||||
vm_page_t dummy_read_page)
|
||||
{
|
||||
struct ttm_tt *ttm = &ttm_dma->ttm;
|
||||
|
||||
ttm->bdev = bdev;
|
||||
ttm->glob = bdev->glob;
|
||||
ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
ttm->caching_state = tt_cached;
|
||||
ttm->page_flags = page_flags;
|
||||
ttm->dummy_read_page = dummy_read_page;
|
||||
ttm->state = tt_unpopulated;
|
||||
ttm->swap_storage = NULL;
|
||||
|
||||
INIT_LIST_HEAD(&ttm_dma->pages_list);
|
||||
ttm_dma_tt_alloc_page_directory(ttm_dma);
|
||||
if (!ttm->pages || !ttm_dma->dma_address) {
|
||||
ttm_tt_destroy(ttm);
|
||||
printf("Failed allocating page table\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
|
||||
{
|
||||
struct ttm_tt *ttm = &ttm_dma->ttm;
|
||||
|
||||
free(ttm->pages, M_TTM_PD);
|
||||
ttm->pages = NULL;
|
||||
free(ttm_dma->dma_address, M_TTM_PD);
|
||||
ttm_dma->dma_address = NULL;
|
||||
}
|
||||
|
||||
void ttm_tt_unbind(struct ttm_tt *ttm)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (ttm->state == tt_bound) {
|
||||
ret = ttm->func->unbind(ttm);
|
||||
MPASS(ret == 0);
|
||||
ttm->state = tt_unbound;
|
||||
}
|
||||
}
|
||||
|
||||
int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (!ttm)
|
||||
return -EINVAL;
|
||||
|
||||
if (ttm->state == tt_bound)
|
||||
return 0;
|
||||
|
||||
ret = ttm->bdev->driver->ttm_tt_populate(ttm);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = ttm->func->bind(ttm, bo_mem);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
ttm->state = tt_bound;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ttm_tt_swapin(struct ttm_tt *ttm)
|
||||
{
|
||||
vm_object_t obj;
|
||||
vm_page_t from_page, to_page;
|
||||
int i, ret, rv;
|
||||
|
||||
obj = ttm->swap_storage;
|
||||
|
||||
VM_OBJECT_LOCK(obj);
|
||||
vm_object_pip_add(obj, 1);
|
||||
for (i = 0; i < ttm->num_pages; ++i) {
|
||||
from_page = vm_page_grab(obj, i, VM_ALLOC_RETRY);
|
||||
if (from_page->valid != VM_PAGE_BITS_ALL) {
|
||||
if (vm_pager_has_page(obj, i, NULL, NULL)) {
|
||||
rv = vm_pager_get_pages(obj, &from_page, 1, 0);
|
||||
if (rv != VM_PAGER_OK) {
|
||||
vm_page_lock(from_page);
|
||||
vm_page_free(from_page);
|
||||
vm_page_unlock(from_page);
|
||||
ret = -EIO;
|
||||
goto err_ret;
|
||||
}
|
||||
} else
|
||||
vm_page_zero_invalid(from_page, TRUE);
|
||||
}
|
||||
to_page = ttm->pages[i];
|
||||
if (unlikely(to_page == NULL)) {
|
||||
vm_page_wakeup(from_page);
|
||||
ret = -ENOMEM;
|
||||
goto err_ret;
|
||||
}
|
||||
pmap_copy_page(from_page, to_page);
|
||||
vm_page_wakeup(from_page);
|
||||
}
|
||||
vm_object_pip_wakeup(obj);
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
|
||||
if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
|
||||
vm_object_deallocate(obj);
|
||||
ttm->swap_storage = NULL;
|
||||
ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
|
||||
return (0);
|
||||
|
||||
err_ret:
|
||||
vm_object_pip_wakeup(obj);
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
int ttm_tt_swapout(struct ttm_tt *ttm, vm_object_t persistent_swap_storage)
|
||||
{
|
||||
vm_object_t obj;
|
||||
vm_page_t from_page, to_page;
|
||||
int i;
|
||||
|
||||
MPASS(ttm->state == tt_unbound || ttm->state == tt_unpopulated);
|
||||
MPASS(ttm->caching_state == tt_cached);
|
||||
|
||||
if (persistent_swap_storage == NULL) {
|
||||
obj = vm_pager_allocate(OBJT_SWAP, NULL,
|
||||
IDX_TO_OFF(ttm->num_pages), VM_PROT_DEFAULT, 0,
|
||||
curthread->td_ucred);
|
||||
if (obj == NULL) {
|
||||
printf("[TTM] Failed allocating swap storage\n");
|
||||
return (-ENOMEM);
|
||||
}
|
||||
} else
|
||||
obj = persistent_swap_storage;
|
||||
|
||||
VM_OBJECT_LOCK(obj);
|
||||
vm_object_pip_add(obj, 1);
|
||||
for (i = 0; i < ttm->num_pages; ++i) {
|
||||
from_page = ttm->pages[i];
|
||||
if (unlikely(from_page == NULL))
|
||||
continue;
|
||||
to_page = vm_page_grab(obj, i, VM_ALLOC_RETRY);
|
||||
pmap_copy_page(from_page, to_page);
|
||||
vm_page_dirty(to_page);
|
||||
to_page->valid = VM_PAGE_BITS_ALL;
|
||||
vm_page_wakeup(to_page);
|
||||
}
|
||||
vm_object_pip_wakeup(obj);
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
|
||||
ttm->bdev->driver->ttm_tt_unpopulate(ttm);
|
||||
ttm->swap_storage = obj;
|
||||
ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
|
||||
if (persistent_swap_storage != NULL)
|
||||
ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
|
||||
return (0);
|
||||
}
|
@ -1,6 +1,6 @@
|
||||
# $FreeBSD$
|
||||
|
||||
.PATH: ${.CURDIR}/../../../dev/drm2
|
||||
.PATH: ${.CURDIR}/../../../dev/drm2 ${.CURDIR}/../../../dev/drm2/ttm
|
||||
KMOD = drm2
|
||||
SRCS = \
|
||||
drm_agpsupport.c \
|
||||
@ -32,7 +32,19 @@ SRCS = \
|
||||
drm_sman.c \
|
||||
drm_stub.c \
|
||||
drm_sysctl.c \
|
||||
drm_vm.c
|
||||
drm_vm.c \
|
||||
ttm_lock.c \
|
||||
ttm_object.c \
|
||||
ttm_tt.c \
|
||||
ttm_bo_util.c \
|
||||
ttm_bo.c \
|
||||
ttm_bo_manager.c \
|
||||
ttm_execbuf_util.c \
|
||||
ttm_memory.c \
|
||||
ttm_page_alloc.c \
|
||||
ttm_bo_vm.c
|
||||
#ttm_agp_backend.c
|
||||
#ttm_page_alloc_dma.c
|
||||
|
||||
.if ${MACHINE_CPUARCH} == "amd64"
|
||||
SRCS += drm_ioc32.c
|
||||
|
Loading…
x
Reference in New Issue
Block a user