Start splitting-out the Intel DMAR busdma backend to a generic place,
so it can be used on other IOMMU systems. Provide MI iommu_unit, iommu_domain and iommu_ctx structs in sys/iommu.h; use them as a first member of MD dmar_unit, dmar_domain and dmar_ctx. Change the namespace in DMAR backend: use iommu_ prefix instead of dmar_. Move some macroses and function prototypes to sys/iommu.h. Reviewed by: kib Sponsored by: DARPA, AFRL Differential Revision: https://reviews.freebsd.org/D25574
This commit is contained in:
parent
420306c3b7
commit
59e37c8a54
155
sys/sys/iommu.h
Normal file
155
sys/sys/iommu.h
Normal file
@ -0,0 +1,155 @@
|
||||
/*-
|
||||
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
|
||||
*
|
||||
* Copyright (c) 2013 The FreeBSD Foundation
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software was developed by Konstantin Belousov <kib@FreeBSD.org>
|
||||
* under sponsorship from the FreeBSD Foundation.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
#ifndef _SYS_IOMMU_H_
|
||||
#define _SYS_IOMMU_H_
|
||||
|
||||
#include <sys/queue.h>
|
||||
#include <sys/tree.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
/* Host or physical memory address, after translation. */
|
||||
typedef uint64_t iommu_haddr_t;
|
||||
/* Guest or bus address, before translation. */
|
||||
typedef uint64_t iommu_gaddr_t;
|
||||
|
||||
struct iommu_map_entry;
|
||||
TAILQ_HEAD(iommu_map_entries_tailq, iommu_map_entry);
|
||||
|
||||
struct iommu_qi_genseq {
|
||||
u_int gen;
|
||||
uint32_t seq;
|
||||
};
|
||||
|
||||
struct iommu_map_entry {
|
||||
iommu_gaddr_t start;
|
||||
iommu_gaddr_t end;
|
||||
iommu_gaddr_t first; /* Least start in subtree */
|
||||
iommu_gaddr_t last; /* Greatest end in subtree */
|
||||
iommu_gaddr_t free_down; /* Max free space below the
|
||||
current R/B tree node */
|
||||
u_int flags;
|
||||
TAILQ_ENTRY(iommu_map_entry) dmamap_link; /* Link for dmamap entries */
|
||||
RB_ENTRY(iommu_map_entry) rb_entry; /* Links for domain entries */
|
||||
TAILQ_ENTRY(iommu_map_entry) unroll_link; /* Link for unroll after
|
||||
dmamap_load failure */
|
||||
struct iommu_domain *domain;
|
||||
struct iommu_qi_genseq gseq;
|
||||
};
|
||||
|
||||
#define IOMMU_MAP_ENTRY_PLACE 0x0001 /* Fake entry */
|
||||
#define IOMMU_MAP_ENTRY_RMRR 0x0002 /* Permanent, not linked by
|
||||
dmamap_link */
|
||||
#define IOMMU_MAP_ENTRY_MAP 0x0004 /* Busdma created, linked by
|
||||
dmamap_link */
|
||||
#define IOMMU_MAP_ENTRY_UNMAPPED 0x0010 /* No backing pages */
|
||||
#define IOMMU_MAP_ENTRY_QI_NF 0x0020 /* qi task, do not free entry */
|
||||
#define IOMMU_MAP_ENTRY_READ 0x1000 /* Read permitted */
|
||||
#define IOMMU_MAP_ENTRY_WRITE 0x2000 /* Write permitted */
|
||||
#define IOMMU_MAP_ENTRY_SNOOP 0x4000 /* Snoop */
|
||||
#define IOMMU_MAP_ENTRY_TM 0x8000 /* Transient */
|
||||
|
||||
struct iommu_unit {
|
||||
struct mtx lock;
|
||||
int unit;
|
||||
|
||||
int dma_enabled;
|
||||
|
||||
/* Busdma delayed map load */
|
||||
struct task dmamap_load_task;
|
||||
TAILQ_HEAD(, bus_dmamap_iommu) delayed_maps;
|
||||
struct taskqueue *delayed_taskqueue;
|
||||
};
|
||||
|
||||
/*
|
||||
* Locking annotations:
|
||||
* (u) - Protected by iommu unit lock
|
||||
* (d) - Protected by domain lock
|
||||
* (c) - Immutable after initialization
|
||||
*/
|
||||
|
||||
struct iommu_domain {
|
||||
struct iommu_unit *iommu; /* (c) */
|
||||
struct mtx lock; /* (c) */
|
||||
struct task unload_task; /* (c) */
|
||||
struct iommu_map_entries_tailq unload_entries; /* (d) Entries to
|
||||
unload */
|
||||
};
|
||||
|
||||
struct iommu_ctx {
|
||||
struct iommu_domain *domain; /* (c) */
|
||||
struct bus_dma_tag_iommu *tag; /* (c) Root tag */
|
||||
u_long loads; /* atomic updates, for stat only */
|
||||
u_long unloads; /* same */
|
||||
u_int flags; /* (u) */
|
||||
};
|
||||
|
||||
/* struct iommu_ctx flags */
|
||||
#define IOMMU_CTX_FAULTED 0x0001 /* Fault was reported,
|
||||
last_fault_rec is valid */
|
||||
#define IOMMU_CTX_DISABLED 0x0002 /* Device is disabled, the
|
||||
ephemeral reference is kept
|
||||
to prevent context destruction */
|
||||
|
||||
#define IOMMU_LOCK(unit) mtx_lock(&(unit)->lock)
|
||||
#define IOMMU_UNLOCK(unit) mtx_unlock(&(unit)->lock)
|
||||
#define IOMMU_ASSERT_LOCKED(unit) mtx_assert(&(unit)->lock, MA_OWNED)
|
||||
|
||||
#define IOMMU_DOMAIN_LOCK(dom) mtx_lock(&(dom)->lock)
|
||||
#define IOMMU_DOMAIN_UNLOCK(dom) mtx_unlock(&(dom)->lock)
|
||||
#define IOMMU_DOMAIN_ASSERT_LOCKED(dom) mtx_assert(&(dom)->lock, MA_OWNED)
|
||||
|
||||
void iommu_free_ctx(struct iommu_ctx *ctx);
|
||||
void iommu_free_ctx_locked(struct iommu_unit *iommu, struct iommu_ctx *ctx);
|
||||
struct iommu_ctx *iommu_get_ctx(struct iommu_unit *, device_t dev,
|
||||
uint16_t rid, bool id_mapped, bool rmrr_init);
|
||||
struct iommu_unit *iommu_find(device_t dev, bool verbose);
|
||||
void iommu_domain_unload_entry(struct iommu_map_entry *entry, bool free);
|
||||
void iommu_domain_unload(struct iommu_domain *domain,
|
||||
struct iommu_map_entries_tailq *entries, bool cansleep);
|
||||
|
||||
struct iommu_ctx *iommu_instantiate_ctx(struct iommu_unit *iommu,
|
||||
device_t dev, bool rmrr);
|
||||
device_t iommu_get_requester(device_t dev, uint16_t *rid);
|
||||
int iommu_init_busdma(struct iommu_unit *unit);
|
||||
void iommu_fini_busdma(struct iommu_unit *unit);
|
||||
struct iommu_map_entry *iommu_map_alloc_entry(struct iommu_domain *iodom,
|
||||
u_int flags);
|
||||
void iommu_map_free_entry(struct iommu_domain *, struct iommu_map_entry *);
|
||||
int iommu_map(struct iommu_domain *iodom,
|
||||
const struct bus_dma_tag_common *common, iommu_gaddr_t size, int offset,
|
||||
u_int eflags, u_int flags, vm_page_t *ma, struct iommu_map_entry **res);
|
||||
int iommu_map_region(struct iommu_domain *domain,
|
||||
struct iommu_map_entry *entry, u_int eflags, u_int flags, vm_page_t *ma);
|
||||
|
||||
#endif /* !_SYS_IOMMU_H_ */
|
@ -74,20 +74,22 @@ __FBSDID("$FreeBSD$");
|
||||
*/
|
||||
|
||||
static bool
|
||||
dmar_bus_dma_is_dev_disabled(int domain, int bus, int slot, int func)
|
||||
iommu_bus_dma_is_dev_disabled(int domain, int bus, int slot, int func)
|
||||
{
|
||||
char str[128], *env;
|
||||
int default_bounce;
|
||||
bool ret;
|
||||
static const char bounce_str[] = "bounce";
|
||||
static const char dmar_str[] = "dmar";
|
||||
static const char iommu_str[] = "iommu";
|
||||
static const char dmar_str[] = "dmar"; /* compatibility */
|
||||
|
||||
default_bounce = 0;
|
||||
env = kern_getenv("hw.busdma.default");
|
||||
if (env != NULL) {
|
||||
if (strcmp(env, bounce_str) == 0)
|
||||
default_bounce = 1;
|
||||
else if (strcmp(env, dmar_str) == 0)
|
||||
else if (strcmp(env, iommu_str) == 0 ||
|
||||
strcmp(env, dmar_str) == 0)
|
||||
default_bounce = 0;
|
||||
freeenv(env);
|
||||
}
|
||||
@ -99,7 +101,8 @@ dmar_bus_dma_is_dev_disabled(int domain, int bus, int slot, int func)
|
||||
return (default_bounce != 0);
|
||||
if (strcmp(env, bounce_str) == 0)
|
||||
ret = true;
|
||||
else if (strcmp(env, dmar_str) == 0)
|
||||
else if (strcmp(env, iommu_str) == 0 ||
|
||||
strcmp(env, dmar_str) == 0)
|
||||
ret = false;
|
||||
else
|
||||
ret = default_bounce != 0;
|
||||
@ -117,7 +120,7 @@ dmar_bus_dma_is_dev_disabled(int domain, int bus, int slot, int func)
|
||||
* bounce mapping.
|
||||
*/
|
||||
device_t
|
||||
dmar_get_requester(device_t dev, uint16_t *rid)
|
||||
iommu_get_requester(device_t dev, uint16_t *rid)
|
||||
{
|
||||
devclass_t pci_class;
|
||||
device_t l, pci, pcib, pcip, pcibp, requester;
|
||||
@ -137,15 +140,15 @@ dmar_get_requester(device_t dev, uint16_t *rid)
|
||||
*/
|
||||
for (;;) {
|
||||
pci = device_get_parent(l);
|
||||
KASSERT(pci != NULL, ("dmar_get_requester(%s): NULL parent "
|
||||
KASSERT(pci != NULL, ("iommu_get_requester(%s): NULL parent "
|
||||
"for %s", device_get_name(dev), device_get_name(l)));
|
||||
KASSERT(device_get_devclass(pci) == pci_class,
|
||||
("dmar_get_requester(%s): non-pci parent %s for %s",
|
||||
("iommu_get_requester(%s): non-pci parent %s for %s",
|
||||
device_get_name(dev), device_get_name(pci),
|
||||
device_get_name(l)));
|
||||
|
||||
pcib = device_get_parent(pci);
|
||||
KASSERT(pcib != NULL, ("dmar_get_requester(%s): NULL bridge "
|
||||
KASSERT(pcib != NULL, ("iommu_get_requester(%s): NULL bridge "
|
||||
"for %s", device_get_name(dev), device_get_name(pci)));
|
||||
|
||||
/*
|
||||
@ -228,15 +231,15 @@ dmar_get_requester(device_t dev, uint16_t *rid)
|
||||
return (requester);
|
||||
}
|
||||
|
||||
struct dmar_ctx *
|
||||
dmar_instantiate_ctx(struct dmar_unit *dmar, device_t dev, bool rmrr)
|
||||
struct iommu_ctx *
|
||||
iommu_instantiate_ctx(struct iommu_unit *unit, device_t dev, bool rmrr)
|
||||
{
|
||||
device_t requester;
|
||||
struct dmar_ctx *ctx;
|
||||
struct iommu_ctx *ctx;
|
||||
bool disabled;
|
||||
uint16_t rid;
|
||||
|
||||
requester = dmar_get_requester(dev, &rid);
|
||||
requester = iommu_get_requester(dev, &rid);
|
||||
|
||||
/*
|
||||
* If the user requested the IOMMU disabled for the device, we
|
||||
@ -245,10 +248,10 @@ dmar_instantiate_ctx(struct dmar_unit *dmar, device_t dev, bool rmrr)
|
||||
* Instead provide the identity mapping for the device
|
||||
* context.
|
||||
*/
|
||||
disabled = dmar_bus_dma_is_dev_disabled(pci_get_domain(requester),
|
||||
disabled = iommu_bus_dma_is_dev_disabled(pci_get_domain(requester),
|
||||
pci_get_bus(requester), pci_get_slot(requester),
|
||||
pci_get_function(requester));
|
||||
ctx = dmar_get_ctx_for_dev(dmar, requester, rid, disabled, rmrr);
|
||||
ctx = iommu_get_ctx(unit, requester, rid, disabled, rmrr);
|
||||
if (ctx == NULL)
|
||||
return (NULL);
|
||||
if (disabled) {
|
||||
@ -256,12 +259,12 @@ dmar_instantiate_ctx(struct dmar_unit *dmar, device_t dev, bool rmrr)
|
||||
* Keep the first reference on context, release the
|
||||
* later refs.
|
||||
*/
|
||||
DMAR_LOCK(dmar);
|
||||
if ((ctx->flags & DMAR_CTX_DISABLED) == 0) {
|
||||
ctx->flags |= DMAR_CTX_DISABLED;
|
||||
DMAR_UNLOCK(dmar);
|
||||
IOMMU_LOCK(unit);
|
||||
if ((ctx->flags & IOMMU_CTX_DISABLED) == 0) {
|
||||
ctx->flags |= IOMMU_CTX_DISABLED;
|
||||
IOMMU_UNLOCK(unit);
|
||||
} else {
|
||||
dmar_free_ctx_locked(dmar, ctx);
|
||||
iommu_free_ctx_locked(unit, ctx);
|
||||
}
|
||||
ctx = NULL;
|
||||
}
|
||||
@ -271,36 +274,36 @@ dmar_instantiate_ctx(struct dmar_unit *dmar, device_t dev, bool rmrr)
|
||||
bus_dma_tag_t
|
||||
acpi_iommu_get_dma_tag(device_t dev, device_t child)
|
||||
{
|
||||
struct dmar_unit *dmar;
|
||||
struct dmar_ctx *ctx;
|
||||
struct iommu_unit *unit;
|
||||
struct iommu_ctx *ctx;
|
||||
bus_dma_tag_t res;
|
||||
|
||||
dmar = dmar_find(child, bootverbose);
|
||||
unit = iommu_find(child, bootverbose);
|
||||
/* Not in scope of any DMAR ? */
|
||||
if (dmar == NULL)
|
||||
if (unit == NULL)
|
||||
return (NULL);
|
||||
if (!dmar->dma_enabled)
|
||||
if (!unit->dma_enabled)
|
||||
return (NULL);
|
||||
dmar_quirks_pre_use(dmar);
|
||||
dmar_instantiate_rmrr_ctxs(dmar);
|
||||
dmar_quirks_pre_use(unit);
|
||||
dmar_instantiate_rmrr_ctxs(unit);
|
||||
|
||||
ctx = dmar_instantiate_ctx(dmar, child, false);
|
||||
res = ctx == NULL ? NULL : (bus_dma_tag_t)&ctx->ctx_tag;
|
||||
ctx = iommu_instantiate_ctx(unit, child, false);
|
||||
res = ctx == NULL ? NULL : (bus_dma_tag_t)ctx->tag;
|
||||
return (res);
|
||||
}
|
||||
|
||||
bool
|
||||
bus_dma_dmar_set_buswide(device_t dev)
|
||||
{
|
||||
struct dmar_unit *dmar;
|
||||
struct iommu_unit *unit;
|
||||
device_t parent;
|
||||
u_int busno, slot, func;
|
||||
|
||||
parent = device_get_parent(dev);
|
||||
if (device_get_devclass(parent) != devclass_find("pci"))
|
||||
return (false);
|
||||
dmar = dmar_find(dev, bootverbose);
|
||||
if (dmar == NULL)
|
||||
unit = iommu_find(dev, bootverbose);
|
||||
if (unit == NULL)
|
||||
return (false);
|
||||
busno = pci_get_bus(dev);
|
||||
slot = pci_get_slot(dev);
|
||||
@ -309,40 +312,40 @@ bus_dma_dmar_set_buswide(device_t dev)
|
||||
if (bootverbose) {
|
||||
device_printf(dev,
|
||||
"dmar%d pci%d:%d:%d requested buswide busdma\n",
|
||||
dmar->unit, busno, slot, func);
|
||||
unit->unit, busno, slot, func);
|
||||
}
|
||||
return (false);
|
||||
}
|
||||
dmar_set_buswide_ctx(dmar, busno);
|
||||
dmar_set_buswide_ctx(unit, busno);
|
||||
return (true);
|
||||
}
|
||||
|
||||
static MALLOC_DEFINE(M_DMAR_DMAMAP, "dmar_dmamap", "Intel DMAR DMA Map");
|
||||
static MALLOC_DEFINE(M_IOMMU_DMAMAP, "iommu_dmamap", "IOMMU DMA Map");
|
||||
|
||||
static void dmar_bus_schedule_dmamap(struct dmar_unit *unit,
|
||||
struct bus_dmamap_dmar *map);
|
||||
static void iommu_bus_schedule_dmamap(struct iommu_unit *unit,
|
||||
struct bus_dmamap_iommu *map);
|
||||
|
||||
static int
|
||||
dmar_bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
|
||||
iommu_bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
|
||||
bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr,
|
||||
bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize,
|
||||
int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
|
||||
void *lockfuncarg, bus_dma_tag_t *dmat)
|
||||
{
|
||||
struct bus_dma_tag_dmar *newtag, *oldtag;
|
||||
struct bus_dma_tag_iommu *newtag, *oldtag;
|
||||
int error;
|
||||
|
||||
*dmat = NULL;
|
||||
error = common_bus_dma_tag_create(parent != NULL ?
|
||||
&((struct bus_dma_tag_dmar *)parent)->common : NULL, alignment,
|
||||
&((struct bus_dma_tag_iommu *)parent)->common : NULL, alignment,
|
||||
boundary, lowaddr, highaddr, filter, filterarg, maxsize,
|
||||
nsegments, maxsegsz, flags, lockfunc, lockfuncarg,
|
||||
sizeof(struct bus_dma_tag_dmar), (void **)&newtag);
|
||||
sizeof(struct bus_dma_tag_iommu), (void **)&newtag);
|
||||
if (error != 0)
|
||||
goto out;
|
||||
|
||||
oldtag = (struct bus_dma_tag_dmar *)parent;
|
||||
newtag->common.impl = &bus_dma_dmar_impl;
|
||||
oldtag = (struct bus_dma_tag_iommu *)parent;
|
||||
newtag->common.impl = &bus_dma_iommu_impl;
|
||||
newtag->ctx = oldtag->ctx;
|
||||
newtag->owner = oldtag->owner;
|
||||
|
||||
@ -355,20 +358,20 @@ dmar_bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
|
||||
}
|
||||
|
||||
static int
|
||||
dmar_bus_dma_tag_set_domain(bus_dma_tag_t dmat)
|
||||
iommu_bus_dma_tag_set_domain(bus_dma_tag_t dmat)
|
||||
{
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
static int
|
||||
dmar_bus_dma_tag_destroy(bus_dma_tag_t dmat1)
|
||||
iommu_bus_dma_tag_destroy(bus_dma_tag_t dmat1)
|
||||
{
|
||||
struct bus_dma_tag_dmar *dmat, *dmat_copy, *parent;
|
||||
struct bus_dma_tag_iommu *dmat, *dmat_copy, *parent;
|
||||
int error;
|
||||
|
||||
error = 0;
|
||||
dmat_copy = dmat = (struct bus_dma_tag_dmar *)dmat1;
|
||||
dmat_copy = dmat = (struct bus_dma_tag_iommu *)dmat1;
|
||||
|
||||
if (dmat != NULL) {
|
||||
if (dmat->map_count != 0) {
|
||||
@ -376,12 +379,12 @@ dmar_bus_dma_tag_destroy(bus_dma_tag_t dmat1)
|
||||
goto out;
|
||||
}
|
||||
while (dmat != NULL) {
|
||||
parent = (struct bus_dma_tag_dmar *)dmat->common.parent;
|
||||
parent = (struct bus_dma_tag_iommu *)dmat->common.parent;
|
||||
if (atomic_fetchadd_int(&dmat->common.ref_count, -1) ==
|
||||
1) {
|
||||
if (dmat == &dmat->ctx->ctx_tag)
|
||||
dmar_free_ctx(dmat->ctx);
|
||||
free_domain(dmat->segments, M_DMAR_DMAMAP);
|
||||
if (dmat == dmat->ctx->tag)
|
||||
iommu_free_ctx(dmat->ctx);
|
||||
free_domain(dmat->segments, M_IOMMU_DMAMAP);
|
||||
free(dmat, M_DEVBUF);
|
||||
dmat = parent;
|
||||
} else
|
||||
@ -394,20 +397,20 @@ dmar_bus_dma_tag_destroy(bus_dma_tag_t dmat1)
|
||||
}
|
||||
|
||||
static bool
|
||||
dmar_bus_dma_id_mapped(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen)
|
||||
iommu_bus_dma_id_mapped(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen)
|
||||
{
|
||||
|
||||
return (false);
|
||||
}
|
||||
|
||||
static int
|
||||
dmar_bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
|
||||
iommu_bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
|
||||
{
|
||||
struct bus_dma_tag_dmar *tag;
|
||||
struct bus_dmamap_dmar *map;
|
||||
struct bus_dma_tag_iommu *tag;
|
||||
struct bus_dmamap_iommu *map;
|
||||
|
||||
tag = (struct bus_dma_tag_dmar *)dmat;
|
||||
map = malloc_domainset(sizeof(*map), M_DMAR_DMAMAP,
|
||||
tag = (struct bus_dma_tag_iommu *)dmat;
|
||||
map = malloc_domainset(sizeof(*map), M_IOMMU_DMAMAP,
|
||||
DOMAINSET_PREF(tag->common.domain), M_NOWAIT | M_ZERO);
|
||||
if (map == NULL) {
|
||||
*mapp = NULL;
|
||||
@ -415,10 +418,10 @@ dmar_bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
|
||||
}
|
||||
if (tag->segments == NULL) {
|
||||
tag->segments = malloc_domainset(sizeof(bus_dma_segment_t) *
|
||||
tag->common.nsegments, M_DMAR_DMAMAP,
|
||||
tag->common.nsegments, M_IOMMU_DMAMAP,
|
||||
DOMAINSET_PREF(tag->common.domain), M_NOWAIT);
|
||||
if (tag->segments == NULL) {
|
||||
free_domain(map, M_DMAR_DMAMAP);
|
||||
free_domain(map, M_IOMMU_DMAMAP);
|
||||
*mapp = NULL;
|
||||
return (ENOMEM);
|
||||
}
|
||||
@ -434,23 +437,23 @@ dmar_bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
|
||||
}
|
||||
|
||||
static int
|
||||
dmar_bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map1)
|
||||
iommu_bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map1)
|
||||
{
|
||||
struct bus_dma_tag_dmar *tag;
|
||||
struct bus_dmamap_dmar *map;
|
||||
struct dmar_domain *domain;
|
||||
struct bus_dma_tag_iommu *tag;
|
||||
struct bus_dmamap_iommu *map;
|
||||
struct iommu_domain *domain;
|
||||
|
||||
tag = (struct bus_dma_tag_dmar *)dmat;
|
||||
map = (struct bus_dmamap_dmar *)map1;
|
||||
tag = (struct bus_dma_tag_iommu *)dmat;
|
||||
map = (struct bus_dmamap_iommu *)map1;
|
||||
if (map != NULL) {
|
||||
domain = tag->ctx->domain;
|
||||
DMAR_DOMAIN_LOCK(domain);
|
||||
IOMMU_DOMAIN_LOCK(domain);
|
||||
if (!TAILQ_EMPTY(&map->map_entries)) {
|
||||
DMAR_DOMAIN_UNLOCK(domain);
|
||||
IOMMU_DOMAIN_UNLOCK(domain);
|
||||
return (EBUSY);
|
||||
}
|
||||
DMAR_DOMAIN_UNLOCK(domain);
|
||||
free_domain(map, M_DMAR_DMAMAP);
|
||||
IOMMU_DOMAIN_UNLOCK(domain);
|
||||
free_domain(map, M_IOMMU_DMAMAP);
|
||||
}
|
||||
tag->map_count--;
|
||||
return (0);
|
||||
@ -458,15 +461,15 @@ dmar_bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map1)
|
||||
|
||||
|
||||
static int
|
||||
dmar_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
|
||||
iommu_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
|
||||
bus_dmamap_t *mapp)
|
||||
{
|
||||
struct bus_dma_tag_dmar *tag;
|
||||
struct bus_dmamap_dmar *map;
|
||||
struct bus_dma_tag_iommu *tag;
|
||||
struct bus_dmamap_iommu *map;
|
||||
int error, mflags;
|
||||
vm_memattr_t attr;
|
||||
|
||||
error = dmar_bus_dmamap_create(dmat, flags, mapp);
|
||||
error = iommu_bus_dmamap_create(dmat, flags, mapp);
|
||||
if (error != 0)
|
||||
return (error);
|
||||
|
||||
@ -475,23 +478,23 @@ dmar_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
|
||||
attr = (flags & BUS_DMA_NOCACHE) != 0 ? VM_MEMATTR_UNCACHEABLE :
|
||||
VM_MEMATTR_DEFAULT;
|
||||
|
||||
tag = (struct bus_dma_tag_dmar *)dmat;
|
||||
map = (struct bus_dmamap_dmar *)*mapp;
|
||||
tag = (struct bus_dma_tag_iommu *)dmat;
|
||||
map = (struct bus_dmamap_iommu *)*mapp;
|
||||
|
||||
if (tag->common.maxsize < PAGE_SIZE &&
|
||||
tag->common.alignment <= tag->common.maxsize &&
|
||||
attr == VM_MEMATTR_DEFAULT) {
|
||||
*vaddr = malloc_domainset(tag->common.maxsize, M_DEVBUF,
|
||||
DOMAINSET_PREF(tag->common.domain), mflags);
|
||||
map->flags |= BUS_DMAMAP_DMAR_MALLOC;
|
||||
map->flags |= BUS_DMAMAP_IOMMU_MALLOC;
|
||||
} else {
|
||||
*vaddr = (void *)kmem_alloc_attr_domainset(
|
||||
DOMAINSET_PREF(tag->common.domain), tag->common.maxsize,
|
||||
mflags, 0ul, BUS_SPACE_MAXADDR, attr);
|
||||
map->flags |= BUS_DMAMAP_DMAR_KMEM_ALLOC;
|
||||
map->flags |= BUS_DMAMAP_IOMMU_KMEM_ALLOC;
|
||||
}
|
||||
if (*vaddr == NULL) {
|
||||
dmar_bus_dmamap_destroy(dmat, *mapp);
|
||||
iommu_bus_dmamap_destroy(dmat, *mapp);
|
||||
*mapp = NULL;
|
||||
return (ENOMEM);
|
||||
}
|
||||
@ -499,37 +502,37 @@ dmar_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
|
||||
}
|
||||
|
||||
static void
|
||||
dmar_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map1)
|
||||
iommu_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map1)
|
||||
{
|
||||
struct bus_dma_tag_dmar *tag;
|
||||
struct bus_dmamap_dmar *map;
|
||||
struct bus_dma_tag_iommu *tag;
|
||||
struct bus_dmamap_iommu *map;
|
||||
|
||||
tag = (struct bus_dma_tag_dmar *)dmat;
|
||||
map = (struct bus_dmamap_dmar *)map1;
|
||||
tag = (struct bus_dma_tag_iommu *)dmat;
|
||||
map = (struct bus_dmamap_iommu *)map1;
|
||||
|
||||
if ((map->flags & BUS_DMAMAP_DMAR_MALLOC) != 0) {
|
||||
if ((map->flags & BUS_DMAMAP_IOMMU_MALLOC) != 0) {
|
||||
free_domain(vaddr, M_DEVBUF);
|
||||
map->flags &= ~BUS_DMAMAP_DMAR_MALLOC;
|
||||
map->flags &= ~BUS_DMAMAP_IOMMU_MALLOC;
|
||||
} else {
|
||||
KASSERT((map->flags & BUS_DMAMAP_DMAR_KMEM_ALLOC) != 0,
|
||||
("dmar_bus_dmamem_free for non alloced map %p", map));
|
||||
KASSERT((map->flags & BUS_DMAMAP_IOMMU_KMEM_ALLOC) != 0,
|
||||
("iommu_bus_dmamem_free for non alloced map %p", map));
|
||||
kmem_free((vm_offset_t)vaddr, tag->common.maxsize);
|
||||
map->flags &= ~BUS_DMAMAP_DMAR_KMEM_ALLOC;
|
||||
map->flags &= ~BUS_DMAMAP_IOMMU_KMEM_ALLOC;
|
||||
}
|
||||
|
||||
dmar_bus_dmamap_destroy(dmat, map1);
|
||||
iommu_bus_dmamap_destroy(dmat, map1);
|
||||
}
|
||||
|
||||
static int
|
||||
dmar_bus_dmamap_load_something1(struct bus_dma_tag_dmar *tag,
|
||||
struct bus_dmamap_dmar *map, vm_page_t *ma, int offset, bus_size_t buflen,
|
||||
iommu_bus_dmamap_load_something1(struct bus_dma_tag_iommu *tag,
|
||||
struct bus_dmamap_iommu *map, vm_page_t *ma, int offset, bus_size_t buflen,
|
||||
int flags, bus_dma_segment_t *segs, int *segp,
|
||||
struct dmar_map_entries_tailq *unroll_list)
|
||||
struct iommu_map_entries_tailq *unroll_list)
|
||||
{
|
||||
struct dmar_ctx *ctx;
|
||||
struct dmar_domain *domain;
|
||||
struct dmar_map_entry *entry;
|
||||
dmar_gaddr_t size;
|
||||
struct iommu_ctx *ctx;
|
||||
struct iommu_domain *domain;
|
||||
struct iommu_map_entry *entry;
|
||||
iommu_gaddr_t size;
|
||||
bus_size_t buflen1;
|
||||
int error, idx, gas_flags, seg;
|
||||
|
||||
@ -555,17 +558,17 @@ dmar_bus_dmamap_load_something1(struct bus_dma_tag_dmar *tag,
|
||||
* (Too) optimistically allow split if there are more
|
||||
* then one segments left.
|
||||
*/
|
||||
gas_flags = map->cansleep ? DMAR_GM_CANWAIT : 0;
|
||||
gas_flags = map->cansleep ? IOMMU_MF_CANWAIT : 0;
|
||||
if (seg + 1 < tag->common.nsegments)
|
||||
gas_flags |= DMAR_GM_CANSPLIT;
|
||||
gas_flags |= IOMMU_MF_CANSPLIT;
|
||||
|
||||
error = dmar_gas_map(domain, &tag->common, size, offset,
|
||||
DMAR_MAP_ENTRY_READ |
|
||||
((flags & BUS_DMA_NOWRITE) == 0 ? DMAR_MAP_ENTRY_WRITE : 0),
|
||||
error = iommu_map(domain, &tag->common, size, offset,
|
||||
IOMMU_MAP_ENTRY_READ |
|
||||
((flags & BUS_DMA_NOWRITE) == 0 ? IOMMU_MAP_ENTRY_WRITE : 0),
|
||||
gas_flags, ma + idx, &entry);
|
||||
if (error != 0)
|
||||
break;
|
||||
if ((gas_flags & DMAR_GM_CANSPLIT) != 0) {
|
||||
if ((gas_flags & IOMMU_MF_CANSPLIT) != 0) {
|
||||
KASSERT(size >= entry->end - entry->start,
|
||||
("split increased entry size %jx %jx %jx",
|
||||
(uintmax_t)size, (uintmax_t)entry->start,
|
||||
@ -596,7 +599,7 @@ dmar_bus_dmamap_load_something1(struct bus_dma_tag_dmar *tag,
|
||||
(uintmax_t)entry->start, (uintmax_t)entry->end,
|
||||
(uintmax_t)tag->common.lowaddr,
|
||||
(uintmax_t)tag->common.highaddr));
|
||||
KASSERT(dmar_test_boundary(entry->start + offset, buflen1,
|
||||
KASSERT(iommu_test_boundary(entry->start + offset, buflen1,
|
||||
tag->common.boundary),
|
||||
("boundary failed: ctx %p start 0x%jx end 0x%jx "
|
||||
"boundary 0x%jx", ctx, (uintmax_t)entry->start,
|
||||
@ -607,10 +610,10 @@ dmar_bus_dmamap_load_something1(struct bus_dma_tag_dmar *tag,
|
||||
(uintmax_t)entry->start, (uintmax_t)entry->end,
|
||||
(uintmax_t)buflen1, (uintmax_t)tag->common.maxsegsz));
|
||||
|
||||
DMAR_DOMAIN_LOCK(domain);
|
||||
IOMMU_DOMAIN_LOCK(domain);
|
||||
TAILQ_INSERT_TAIL(&map->map_entries, entry, dmamap_link);
|
||||
entry->flags |= DMAR_MAP_ENTRY_MAP;
|
||||
DMAR_DOMAIN_UNLOCK(domain);
|
||||
entry->flags |= IOMMU_MAP_ENTRY_MAP;
|
||||
IOMMU_DOMAIN_UNLOCK(domain);
|
||||
TAILQ_INSERT_TAIL(unroll_list, entry, unroll_link);
|
||||
|
||||
segs[seg].ds_addr = entry->start + offset;
|
||||
@ -627,14 +630,14 @@ dmar_bus_dmamap_load_something1(struct bus_dma_tag_dmar *tag,
|
||||
}
|
||||
|
||||
static int
|
||||
dmar_bus_dmamap_load_something(struct bus_dma_tag_dmar *tag,
|
||||
struct bus_dmamap_dmar *map, vm_page_t *ma, int offset, bus_size_t buflen,
|
||||
iommu_bus_dmamap_load_something(struct bus_dma_tag_iommu *tag,
|
||||
struct bus_dmamap_iommu *map, vm_page_t *ma, int offset, bus_size_t buflen,
|
||||
int flags, bus_dma_segment_t *segs, int *segp)
|
||||
{
|
||||
struct dmar_ctx *ctx;
|
||||
struct dmar_domain *domain;
|
||||
struct dmar_map_entry *entry, *entry1;
|
||||
struct dmar_map_entries_tailq unroll_list;
|
||||
struct iommu_ctx *ctx;
|
||||
struct iommu_domain *domain;
|
||||
struct iommu_map_entry *entry, *entry1;
|
||||
struct iommu_map_entries_tailq unroll_list;
|
||||
int error;
|
||||
|
||||
ctx = tag->ctx;
|
||||
@ -642,7 +645,7 @@ dmar_bus_dmamap_load_something(struct bus_dma_tag_dmar *tag,
|
||||
atomic_add_long(&ctx->loads, 1);
|
||||
|
||||
TAILQ_INIT(&unroll_list);
|
||||
error = dmar_bus_dmamap_load_something1(tag, map, ma, offset,
|
||||
error = iommu_bus_dmamap_load_something1(tag, map, ma, offset,
|
||||
buflen, flags, segs, segp, &unroll_list);
|
||||
if (error != 0) {
|
||||
/*
|
||||
@ -650,7 +653,7 @@ dmar_bus_dmamap_load_something(struct bus_dma_tag_dmar *tag,
|
||||
* partial buffer load, so unfortunately we have to
|
||||
* revert all work done.
|
||||
*/
|
||||
DMAR_DOMAIN_LOCK(domain);
|
||||
IOMMU_DOMAIN_LOCK(domain);
|
||||
TAILQ_FOREACH_SAFE(entry, &unroll_list, unroll_link,
|
||||
entry1) {
|
||||
/*
|
||||
@ -664,8 +667,8 @@ dmar_bus_dmamap_load_something(struct bus_dma_tag_dmar *tag,
|
||||
TAILQ_INSERT_TAIL(&domain->unload_entries, entry,
|
||||
dmamap_link);
|
||||
}
|
||||
DMAR_DOMAIN_UNLOCK(domain);
|
||||
taskqueue_enqueue(domain->dmar->delayed_taskqueue,
|
||||
IOMMU_DOMAIN_UNLOCK(domain);
|
||||
taskqueue_enqueue(domain->iommu->delayed_taskqueue,
|
||||
&domain->unload_task);
|
||||
}
|
||||
|
||||
@ -673,37 +676,37 @@ dmar_bus_dmamap_load_something(struct bus_dma_tag_dmar *tag,
|
||||
!map->cansleep)
|
||||
error = EINPROGRESS;
|
||||
if (error == EINPROGRESS)
|
||||
dmar_bus_schedule_dmamap(domain->dmar, map);
|
||||
iommu_bus_schedule_dmamap(domain->iommu, map);
|
||||
return (error);
|
||||
}
|
||||
|
||||
static int
|
||||
dmar_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map1,
|
||||
iommu_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map1,
|
||||
struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags,
|
||||
bus_dma_segment_t *segs, int *segp)
|
||||
{
|
||||
struct bus_dma_tag_dmar *tag;
|
||||
struct bus_dmamap_dmar *map;
|
||||
struct bus_dma_tag_iommu *tag;
|
||||
struct bus_dmamap_iommu *map;
|
||||
|
||||
tag = (struct bus_dma_tag_dmar *)dmat;
|
||||
map = (struct bus_dmamap_dmar *)map1;
|
||||
return (dmar_bus_dmamap_load_something(tag, map, ma, ma_offs, tlen,
|
||||
tag = (struct bus_dma_tag_iommu *)dmat;
|
||||
map = (struct bus_dmamap_iommu *)map1;
|
||||
return (iommu_bus_dmamap_load_something(tag, map, ma, ma_offs, tlen,
|
||||
flags, segs, segp));
|
||||
}
|
||||
|
||||
static int
|
||||
dmar_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map1,
|
||||
iommu_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map1,
|
||||
vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs,
|
||||
int *segp)
|
||||
{
|
||||
struct bus_dma_tag_dmar *tag;
|
||||
struct bus_dmamap_dmar *map;
|
||||
struct bus_dma_tag_iommu *tag;
|
||||
struct bus_dmamap_iommu *map;
|
||||
vm_page_t *ma, fma;
|
||||
vm_paddr_t pstart, pend, paddr;
|
||||
int error, i, ma_cnt, mflags, offset;
|
||||
|
||||
tag = (struct bus_dma_tag_dmar *)dmat;
|
||||
map = (struct bus_dmamap_dmar *)map1;
|
||||
tag = (struct bus_dma_tag_iommu *)dmat;
|
||||
map = (struct bus_dmamap_iommu *)map1;
|
||||
pstart = trunc_page(buf);
|
||||
pend = round_page(buf + buflen);
|
||||
offset = buf & PAGE_MASK;
|
||||
@ -735,7 +738,7 @@ dmar_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map1,
|
||||
ma[i] = &fma[i];
|
||||
}
|
||||
}
|
||||
error = dmar_bus_dmamap_load_something(tag, map, ma, offset, buflen,
|
||||
error = iommu_bus_dmamap_load_something(tag, map, ma, offset, buflen,
|
||||
flags, segs, segp);
|
||||
free(fma, M_DEVBUF);
|
||||
free(ma, M_DEVBUF);
|
||||
@ -743,18 +746,18 @@ dmar_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map1,
|
||||
}
|
||||
|
||||
static int
|
||||
dmar_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map1, void *buf,
|
||||
iommu_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map1, void *buf,
|
||||
bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs,
|
||||
int *segp)
|
||||
{
|
||||
struct bus_dma_tag_dmar *tag;
|
||||
struct bus_dmamap_dmar *map;
|
||||
struct bus_dma_tag_iommu *tag;
|
||||
struct bus_dmamap_iommu *map;
|
||||
vm_page_t *ma, fma;
|
||||
vm_paddr_t pstart, pend, paddr;
|
||||
int error, i, ma_cnt, mflags, offset;
|
||||
|
||||
tag = (struct bus_dma_tag_dmar *)dmat;
|
||||
map = (struct bus_dmamap_dmar *)map1;
|
||||
tag = (struct bus_dma_tag_iommu *)dmat;
|
||||
map = (struct bus_dmamap_iommu *)map1;
|
||||
pstart = trunc_page((vm_offset_t)buf);
|
||||
pend = round_page((vm_offset_t)buf + buflen);
|
||||
offset = (vm_offset_t)buf & PAGE_MASK;
|
||||
@ -788,7 +791,7 @@ dmar_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map1, void *buf,
|
||||
ma[i] = &fma[i];
|
||||
}
|
||||
}
|
||||
error = dmar_bus_dmamap_load_something(tag, map, ma, offset, buflen,
|
||||
error = iommu_bus_dmamap_load_something(tag, map, ma, offset, buflen,
|
||||
flags, segs, segp);
|
||||
free(ma, M_DEVBUF);
|
||||
free(fma, M_DEVBUF);
|
||||
@ -796,29 +799,29 @@ dmar_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map1, void *buf,
|
||||
}
|
||||
|
||||
static void
|
||||
dmar_bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map1,
|
||||
iommu_bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map1,
|
||||
struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg)
|
||||
{
|
||||
struct bus_dmamap_dmar *map;
|
||||
struct bus_dmamap_iommu *map;
|
||||
|
||||
if (map1 == NULL)
|
||||
return;
|
||||
map = (struct bus_dmamap_dmar *)map1;
|
||||
map = (struct bus_dmamap_iommu *)map1;
|
||||
map->mem = *mem;
|
||||
map->tag = (struct bus_dma_tag_dmar *)dmat;
|
||||
map->tag = (struct bus_dma_tag_iommu *)dmat;
|
||||
map->callback = callback;
|
||||
map->callback_arg = callback_arg;
|
||||
}
|
||||
|
||||
static bus_dma_segment_t *
|
||||
dmar_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map1,
|
||||
iommu_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map1,
|
||||
bus_dma_segment_t *segs, int nsegs, int error)
|
||||
{
|
||||
struct bus_dma_tag_dmar *tag;
|
||||
struct bus_dmamap_dmar *map;
|
||||
struct bus_dma_tag_iommu *tag;
|
||||
struct bus_dmamap_iommu *map;
|
||||
|
||||
tag = (struct bus_dma_tag_dmar *)dmat;
|
||||
map = (struct bus_dmamap_dmar *)map1;
|
||||
tag = (struct bus_dma_tag_iommu *)dmat;
|
||||
map = (struct bus_dmamap_iommu *)map1;
|
||||
|
||||
if (!map->locked) {
|
||||
KASSERT(map->cansleep,
|
||||
@ -848,76 +851,76 @@ dmar_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map1,
|
||||
* On amd64, we assume that sf allocation cannot fail.
|
||||
*/
|
||||
static void
|
||||
dmar_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map1)
|
||||
iommu_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map1)
|
||||
{
|
||||
struct bus_dma_tag_dmar *tag;
|
||||
struct bus_dmamap_dmar *map;
|
||||
struct dmar_ctx *ctx;
|
||||
struct dmar_domain *domain;
|
||||
struct bus_dma_tag_iommu *tag;
|
||||
struct bus_dmamap_iommu *map;
|
||||
struct iommu_ctx *ctx;
|
||||
struct iommu_domain *domain;
|
||||
#if defined(__amd64__)
|
||||
struct dmar_map_entries_tailq entries;
|
||||
struct iommu_map_entries_tailq entries;
|
||||
#endif
|
||||
|
||||
tag = (struct bus_dma_tag_dmar *)dmat;
|
||||
map = (struct bus_dmamap_dmar *)map1;
|
||||
tag = (struct bus_dma_tag_iommu *)dmat;
|
||||
map = (struct bus_dmamap_iommu *)map1;
|
||||
ctx = tag->ctx;
|
||||
domain = ctx->domain;
|
||||
atomic_add_long(&ctx->unloads, 1);
|
||||
|
||||
#if defined(__i386__)
|
||||
DMAR_DOMAIN_LOCK(domain);
|
||||
IOMMU_DOMAIN_LOCK(domain);
|
||||
TAILQ_CONCAT(&domain->unload_entries, &map->map_entries, dmamap_link);
|
||||
DMAR_DOMAIN_UNLOCK(domain);
|
||||
taskqueue_enqueue(domain->dmar->delayed_taskqueue,
|
||||
IOMMU_DOMAIN_UNLOCK(domain);
|
||||
taskqueue_enqueue(domain->iommu->delayed_taskqueue,
|
||||
&domain->unload_task);
|
||||
#else /* defined(__amd64__) */
|
||||
TAILQ_INIT(&entries);
|
||||
DMAR_DOMAIN_LOCK(domain);
|
||||
IOMMU_DOMAIN_LOCK(domain);
|
||||
TAILQ_CONCAT(&entries, &map->map_entries, dmamap_link);
|
||||
DMAR_DOMAIN_UNLOCK(domain);
|
||||
IOMMU_DOMAIN_UNLOCK(domain);
|
||||
THREAD_NO_SLEEPING();
|
||||
dmar_domain_unload(domain, &entries, false);
|
||||
iommu_domain_unload(domain, &entries, false);
|
||||
THREAD_SLEEPING_OK();
|
||||
KASSERT(TAILQ_EMPTY(&entries), ("lazy dmar_ctx_unload %p", ctx));
|
||||
KASSERT(TAILQ_EMPTY(&entries), ("lazy iommu_ctx_unload %p", ctx));
|
||||
#endif
|
||||
}
|
||||
|
||||
static void
|
||||
dmar_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
|
||||
iommu_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
|
||||
bus_dmasync_op_t op)
|
||||
{
|
||||
}
|
||||
|
||||
struct bus_dma_impl bus_dma_dmar_impl = {
|
||||
.tag_create = dmar_bus_dma_tag_create,
|
||||
.tag_destroy = dmar_bus_dma_tag_destroy,
|
||||
.tag_set_domain = dmar_bus_dma_tag_set_domain,
|
||||
.id_mapped = dmar_bus_dma_id_mapped,
|
||||
.map_create = dmar_bus_dmamap_create,
|
||||
.map_destroy = dmar_bus_dmamap_destroy,
|
||||
.mem_alloc = dmar_bus_dmamem_alloc,
|
||||
.mem_free = dmar_bus_dmamem_free,
|
||||
.load_phys = dmar_bus_dmamap_load_phys,
|
||||
.load_buffer = dmar_bus_dmamap_load_buffer,
|
||||
.load_ma = dmar_bus_dmamap_load_ma,
|
||||
.map_waitok = dmar_bus_dmamap_waitok,
|
||||
.map_complete = dmar_bus_dmamap_complete,
|
||||
.map_unload = dmar_bus_dmamap_unload,
|
||||
.map_sync = dmar_bus_dmamap_sync,
|
||||
struct bus_dma_impl bus_dma_iommu_impl = {
|
||||
.tag_create = iommu_bus_dma_tag_create,
|
||||
.tag_destroy = iommu_bus_dma_tag_destroy,
|
||||
.tag_set_domain = iommu_bus_dma_tag_set_domain,
|
||||
.id_mapped = iommu_bus_dma_id_mapped,
|
||||
.map_create = iommu_bus_dmamap_create,
|
||||
.map_destroy = iommu_bus_dmamap_destroy,
|
||||
.mem_alloc = iommu_bus_dmamem_alloc,
|
||||
.mem_free = iommu_bus_dmamem_free,
|
||||
.load_phys = iommu_bus_dmamap_load_phys,
|
||||
.load_buffer = iommu_bus_dmamap_load_buffer,
|
||||
.load_ma = iommu_bus_dmamap_load_ma,
|
||||
.map_waitok = iommu_bus_dmamap_waitok,
|
||||
.map_complete = iommu_bus_dmamap_complete,
|
||||
.map_unload = iommu_bus_dmamap_unload,
|
||||
.map_sync = iommu_bus_dmamap_sync,
|
||||
};
|
||||
|
||||
static void
|
||||
dmar_bus_task_dmamap(void *arg, int pending)
|
||||
iommu_bus_task_dmamap(void *arg, int pending)
|
||||
{
|
||||
struct bus_dma_tag_dmar *tag;
|
||||
struct bus_dmamap_dmar *map;
|
||||
struct dmar_unit *unit;
|
||||
struct bus_dma_tag_iommu *tag;
|
||||
struct bus_dmamap_iommu *map;
|
||||
struct iommu_unit *unit;
|
||||
|
||||
unit = arg;
|
||||
DMAR_LOCK(unit);
|
||||
IOMMU_LOCK(unit);
|
||||
while ((map = TAILQ_FIRST(&unit->delayed_maps)) != NULL) {
|
||||
TAILQ_REMOVE(&unit->delayed_maps, map, delay_link);
|
||||
DMAR_UNLOCK(unit);
|
||||
IOMMU_UNLOCK(unit);
|
||||
tag = map->tag;
|
||||
map->cansleep = true;
|
||||
map->locked = false;
|
||||
@ -931,39 +934,42 @@ dmar_bus_task_dmamap(void *arg, int pending)
|
||||
} else
|
||||
map->locked = true;
|
||||
map->cansleep = false;
|
||||
DMAR_LOCK(unit);
|
||||
IOMMU_LOCK(unit);
|
||||
}
|
||||
DMAR_UNLOCK(unit);
|
||||
IOMMU_UNLOCK(unit);
|
||||
}
|
||||
|
||||
static void
|
||||
dmar_bus_schedule_dmamap(struct dmar_unit *unit, struct bus_dmamap_dmar *map)
|
||||
iommu_bus_schedule_dmamap(struct iommu_unit *unit, struct bus_dmamap_iommu *map)
|
||||
{
|
||||
|
||||
map->locked = false;
|
||||
DMAR_LOCK(unit);
|
||||
IOMMU_LOCK(unit);
|
||||
TAILQ_INSERT_TAIL(&unit->delayed_maps, map, delay_link);
|
||||
DMAR_UNLOCK(unit);
|
||||
IOMMU_UNLOCK(unit);
|
||||
taskqueue_enqueue(unit->delayed_taskqueue, &unit->dmamap_load_task);
|
||||
}
|
||||
|
||||
int
|
||||
dmar_init_busdma(struct dmar_unit *unit)
|
||||
iommu_init_busdma(struct iommu_unit *unit)
|
||||
{
|
||||
int error;
|
||||
|
||||
unit->dma_enabled = 1;
|
||||
TUNABLE_INT_FETCH("hw.dmar.dma", &unit->dma_enabled);
|
||||
error = TUNABLE_INT_FETCH("hw.iommu.dma", &unit->dma_enabled);
|
||||
if (error == 0) /* compatibility */
|
||||
TUNABLE_INT_FETCH("hw.dmar.dma", &unit->dma_enabled);
|
||||
TAILQ_INIT(&unit->delayed_maps);
|
||||
TASK_INIT(&unit->dmamap_load_task, 0, dmar_bus_task_dmamap, unit);
|
||||
unit->delayed_taskqueue = taskqueue_create("dmar", M_WAITOK,
|
||||
TASK_INIT(&unit->dmamap_load_task, 0, iommu_bus_task_dmamap, unit);
|
||||
unit->delayed_taskqueue = taskqueue_create("iommu", M_WAITOK,
|
||||
taskqueue_thread_enqueue, &unit->delayed_taskqueue);
|
||||
taskqueue_start_threads(&unit->delayed_taskqueue, 1, PI_DISK,
|
||||
"dmar%d busdma taskq", unit->unit);
|
||||
"iommu%d busdma taskq", unit->unit);
|
||||
return (0);
|
||||
}
|
||||
|
||||
void
|
||||
dmar_fini_busdma(struct dmar_unit *unit)
|
||||
iommu_fini_busdma(struct iommu_unit *unit)
|
||||
{
|
||||
|
||||
if (unit->delayed_taskqueue == NULL)
|
||||
@ -979,11 +985,11 @@ bus_dma_dmar_load_ident(bus_dma_tag_t dmat, bus_dmamap_t map1,
|
||||
vm_paddr_t start, vm_size_t length, int flags)
|
||||
{
|
||||
struct bus_dma_tag_common *tc;
|
||||
struct bus_dma_tag_dmar *tag;
|
||||
struct bus_dmamap_dmar *map;
|
||||
struct dmar_ctx *ctx;
|
||||
struct dmar_domain *domain;
|
||||
struct dmar_map_entry *entry;
|
||||
struct bus_dma_tag_iommu *tag;
|
||||
struct bus_dmamap_iommu *map;
|
||||
struct iommu_ctx *ctx;
|
||||
struct iommu_domain *domain;
|
||||
struct iommu_map_entry *entry;
|
||||
vm_page_t *ma;
|
||||
vm_size_t i;
|
||||
int error;
|
||||
@ -996,16 +1002,16 @@ bus_dma_dmar_load_ident(bus_dma_tag_t dmat, bus_dmamap_t map1,
|
||||
MPASS((flags & ~(BUS_DMA_NOWAIT | BUS_DMA_NOWRITE)) == 0);
|
||||
|
||||
tc = (struct bus_dma_tag_common *)dmat;
|
||||
if (tc->impl != &bus_dma_dmar_impl)
|
||||
if (tc->impl != &bus_dma_iommu_impl)
|
||||
return (0);
|
||||
|
||||
tag = (struct bus_dma_tag_dmar *)dmat;
|
||||
tag = (struct bus_dma_tag_iommu *)dmat;
|
||||
ctx = tag->ctx;
|
||||
domain = ctx->domain;
|
||||
map = (struct bus_dmamap_dmar *)map1;
|
||||
map = (struct bus_dmamap_iommu *)map1;
|
||||
waitok = (flags & BUS_DMA_NOWAIT) != 0;
|
||||
|
||||
entry = dmar_gas_alloc_entry(domain, waitok ? 0 : DMAR_PGF_WAITOK);
|
||||
entry = iommu_map_alloc_entry(domain, waitok ? 0 : DMAR_PGF_WAITOK);
|
||||
if (entry == NULL)
|
||||
return (ENOMEM);
|
||||
entry->start = start;
|
||||
@ -1013,23 +1019,23 @@ bus_dma_dmar_load_ident(bus_dma_tag_t dmat, bus_dmamap_t map1,
|
||||
ma = malloc(sizeof(vm_page_t) * atop(length), M_TEMP, waitok ?
|
||||
M_WAITOK : M_NOWAIT);
|
||||
if (ma == NULL) {
|
||||
dmar_gas_free_entry(domain, entry);
|
||||
iommu_map_free_entry(domain, entry);
|
||||
return (ENOMEM);
|
||||
}
|
||||
for (i = 0; i < atop(length); i++) {
|
||||
ma[i] = vm_page_getfake(entry->start + PAGE_SIZE * i,
|
||||
VM_MEMATTR_DEFAULT);
|
||||
}
|
||||
error = dmar_gas_map_region(domain, entry, DMAR_MAP_ENTRY_READ |
|
||||
((flags & BUS_DMA_NOWRITE) ? 0 : DMAR_MAP_ENTRY_WRITE),
|
||||
waitok ? DMAR_GM_CANWAIT : 0, ma);
|
||||
error = iommu_map_region(domain, entry, IOMMU_MAP_ENTRY_READ |
|
||||
((flags & BUS_DMA_NOWRITE) ? 0 : IOMMU_MAP_ENTRY_WRITE),
|
||||
waitok ? IOMMU_MF_CANWAIT : 0, ma);
|
||||
if (error == 0) {
|
||||
DMAR_DOMAIN_LOCK(domain);
|
||||
IOMMU_DOMAIN_LOCK(domain);
|
||||
TAILQ_INSERT_TAIL(&map->map_entries, entry, dmamap_link);
|
||||
entry->flags |= DMAR_MAP_ENTRY_MAP;
|
||||
DMAR_DOMAIN_UNLOCK(domain);
|
||||
entry->flags |= IOMMU_MAP_ENTRY_MAP;
|
||||
IOMMU_DOMAIN_UNLOCK(domain);
|
||||
} else {
|
||||
dmar_domain_unload_entry(entry, true);
|
||||
iommu_domain_unload_entry(entry, true);
|
||||
}
|
||||
for (i = 0; i < atop(length); i++)
|
||||
vm_page_putfake(ma[i]);
|
||||
|
@ -34,33 +34,32 @@
|
||||
#ifndef __X86_IOMMU_BUSDMA_DMAR_H
|
||||
#define __X86_IOMMU_BUSDMA_DMAR_H
|
||||
|
||||
struct dmar_map_entry;
|
||||
TAILQ_HEAD(dmar_map_entries_tailq, dmar_map_entry);
|
||||
#include <sys/iommu.h>
|
||||
|
||||
struct bus_dma_tag_dmar {
|
||||
struct bus_dma_tag_iommu {
|
||||
struct bus_dma_tag_common common;
|
||||
struct dmar_ctx *ctx;
|
||||
struct iommu_ctx *ctx;
|
||||
device_t owner;
|
||||
int map_count;
|
||||
bus_dma_segment_t *segments;
|
||||
};
|
||||
|
||||
struct bus_dmamap_dmar {
|
||||
struct bus_dma_tag_dmar *tag;
|
||||
struct bus_dmamap_iommu {
|
||||
struct bus_dma_tag_iommu *tag;
|
||||
struct memdesc mem;
|
||||
bus_dmamap_callback_t *callback;
|
||||
void *callback_arg;
|
||||
struct dmar_map_entries_tailq map_entries;
|
||||
TAILQ_ENTRY(bus_dmamap_dmar) delay_link;
|
||||
struct iommu_map_entries_tailq map_entries;
|
||||
TAILQ_ENTRY(bus_dmamap_iommu) delay_link;
|
||||
bool locked;
|
||||
bool cansleep;
|
||||
int flags;
|
||||
};
|
||||
|
||||
#define BUS_DMAMAP_DMAR_MALLOC 0x0001
|
||||
#define BUS_DMAMAP_DMAR_KMEM_ALLOC 0x0002
|
||||
#define BUS_DMAMAP_IOMMU_MALLOC 0x0001
|
||||
#define BUS_DMAMAP_IOMMU_KMEM_ALLOC 0x0002
|
||||
|
||||
extern struct bus_dma_impl bus_dma_dmar_impl;
|
||||
extern struct bus_dma_impl bus_dma_iommu_impl;
|
||||
|
||||
bus_dma_tag_t acpi_iommu_get_dma_tag(device_t dev, device_t child);
|
||||
|
||||
|
@ -114,30 +114,35 @@ dmar_ensure_ctx_page(struct dmar_unit *dmar, int bus)
|
||||
static dmar_ctx_entry_t *
|
||||
dmar_map_ctx_entry(struct dmar_ctx *ctx, struct sf_buf **sfp)
|
||||
{
|
||||
struct dmar_unit *dmar;
|
||||
dmar_ctx_entry_t *ctxp;
|
||||
|
||||
ctxp = dmar_map_pgtbl(ctx->domain->dmar->ctx_obj, 1 +
|
||||
dmar = (struct dmar_unit *)ctx->context.domain->iommu;
|
||||
|
||||
ctxp = dmar_map_pgtbl(dmar->ctx_obj, 1 +
|
||||
PCI_RID2BUS(ctx->rid), DMAR_PGF_NOALLOC | DMAR_PGF_WAITOK, sfp);
|
||||
ctxp += ctx->rid & 0xff;
|
||||
return (ctxp);
|
||||
}
|
||||
|
||||
static void
|
||||
ctx_tag_init(struct dmar_ctx *ctx, device_t dev)
|
||||
device_tag_init(struct dmar_ctx *ctx, device_t dev)
|
||||
{
|
||||
struct dmar_domain *domain;
|
||||
bus_addr_t maxaddr;
|
||||
|
||||
maxaddr = MIN(ctx->domain->end, BUS_SPACE_MAXADDR);
|
||||
ctx->ctx_tag.common.ref_count = 1; /* Prevent free */
|
||||
ctx->ctx_tag.common.impl = &bus_dma_dmar_impl;
|
||||
ctx->ctx_tag.common.boundary = 0;
|
||||
ctx->ctx_tag.common.lowaddr = maxaddr;
|
||||
ctx->ctx_tag.common.highaddr = maxaddr;
|
||||
ctx->ctx_tag.common.maxsize = maxaddr;
|
||||
ctx->ctx_tag.common.nsegments = BUS_SPACE_UNRESTRICTED;
|
||||
ctx->ctx_tag.common.maxsegsz = maxaddr;
|
||||
ctx->ctx_tag.ctx = ctx;
|
||||
ctx->ctx_tag.owner = dev;
|
||||
domain = (struct dmar_domain *)ctx->context.domain;
|
||||
maxaddr = MIN(domain->end, BUS_SPACE_MAXADDR);
|
||||
ctx->context.tag->common.ref_count = 1; /* Prevent free */
|
||||
ctx->context.tag->common.impl = &bus_dma_iommu_impl;
|
||||
ctx->context.tag->common.boundary = 0;
|
||||
ctx->context.tag->common.lowaddr = maxaddr;
|
||||
ctx->context.tag->common.highaddr = maxaddr;
|
||||
ctx->context.tag->common.maxsize = maxaddr;
|
||||
ctx->context.tag->common.nsegments = BUS_SPACE_UNRESTRICTED;
|
||||
ctx->context.tag->common.maxsegsz = maxaddr;
|
||||
ctx->context.tag->ctx = (struct iommu_ctx *)ctx;
|
||||
ctx->context.tag->owner = dev;
|
||||
}
|
||||
|
||||
static void
|
||||
@ -173,12 +178,12 @@ ctx_id_entry_init(struct dmar_ctx *ctx, dmar_ctx_entry_t *ctxp, bool move,
|
||||
vm_page_t ctx_root;
|
||||
int i;
|
||||
|
||||
domain = ctx->domain;
|
||||
unit = domain->dmar;
|
||||
domain = (struct dmar_domain *)ctx->context.domain;
|
||||
unit = (struct dmar_unit *)domain->iodom.iommu;
|
||||
KASSERT(move || (ctxp->ctx1 == 0 && ctxp->ctx2 == 0),
|
||||
("dmar%d: initialized ctx entry %d:%d:%d 0x%jx 0x%jx",
|
||||
unit->unit, busno, pci_get_slot(ctx->ctx_tag.owner),
|
||||
pci_get_function(ctx->ctx_tag.owner),
|
||||
unit->iommu.unit, busno, pci_get_slot(ctx->context.tag->owner),
|
||||
pci_get_function(ctx->context.tag->owner),
|
||||
ctxp->ctx1, ctxp->ctx2));
|
||||
|
||||
if ((domain->flags & DMAR_DOMAIN_IDMAP) != 0 &&
|
||||
@ -230,10 +235,10 @@ domain_init_rmrr(struct dmar_domain *domain, device_t dev, int bus,
|
||||
int slot, int func, int dev_domain, int dev_busno,
|
||||
const void *dev_path, int dev_path_len)
|
||||
{
|
||||
struct dmar_map_entries_tailq rmrr_entries;
|
||||
struct dmar_map_entry *entry, *entry1;
|
||||
struct iommu_map_entries_tailq rmrr_entries;
|
||||
struct iommu_map_entry *entry, *entry1;
|
||||
vm_page_t *ma;
|
||||
dmar_gaddr_t start, end;
|
||||
iommu_gaddr_t start, end;
|
||||
vm_pindex_t size, i;
|
||||
int error, error1;
|
||||
|
||||
@ -255,7 +260,7 @@ domain_init_rmrr(struct dmar_domain *domain, device_t dev, int bus,
|
||||
end = entry->end;
|
||||
if (bootverbose)
|
||||
printf("dmar%d ctx pci%d:%d:%d RMRR [%#jx, %#jx]\n",
|
||||
domain->dmar->unit, bus, slot, func,
|
||||
domain->iodom.iommu->unit, bus, slot, func,
|
||||
(uintmax_t)start, (uintmax_t)end);
|
||||
entry->start = trunc_page(start);
|
||||
entry->end = round_page(end);
|
||||
@ -267,7 +272,7 @@ domain_init_rmrr(struct dmar_domain *domain, device_t dev, int bus,
|
||||
printf("pci%d:%d:%d ", bus, slot, func);
|
||||
printf("BIOS bug: dmar%d RMRR "
|
||||
"region (%jx, %jx) corrected\n",
|
||||
domain->dmar->unit, start, end);
|
||||
domain->iodom.iommu->unit, start, end);
|
||||
}
|
||||
entry->end += DMAR_PAGE_SIZE * 0x20;
|
||||
}
|
||||
@ -278,8 +283,8 @@ domain_init_rmrr(struct dmar_domain *domain, device_t dev, int bus,
|
||||
VM_MEMATTR_DEFAULT);
|
||||
}
|
||||
error1 = dmar_gas_map_region(domain, entry,
|
||||
DMAR_MAP_ENTRY_READ | DMAR_MAP_ENTRY_WRITE,
|
||||
DMAR_GM_CANWAIT | DMAR_GM_RMRR, ma);
|
||||
IOMMU_MAP_ENTRY_READ | IOMMU_MAP_ENTRY_WRITE,
|
||||
IOMMU_MF_CANWAIT | IOMMU_MF_RMRR, ma);
|
||||
/*
|
||||
* Non-failed RMRR entries are owned by context rb
|
||||
* tree. Get rid of the failed entry, but do not stop
|
||||
@ -287,10 +292,10 @@ domain_init_rmrr(struct dmar_domain *domain, device_t dev, int bus,
|
||||
* loaded and removed on the context destruction.
|
||||
*/
|
||||
if (error1 == 0 && entry->end != entry->start) {
|
||||
DMAR_LOCK(domain->dmar);
|
||||
IOMMU_LOCK(domain->iodom.iommu);
|
||||
domain->refs++; /* XXXKIB prevent free */
|
||||
domain->flags |= DMAR_DOMAIN_RMRR;
|
||||
DMAR_UNLOCK(domain->dmar);
|
||||
IOMMU_UNLOCK(domain->iodom.iommu);
|
||||
} else {
|
||||
if (error1 != 0) {
|
||||
if (dev != NULL)
|
||||
@ -298,7 +303,7 @@ domain_init_rmrr(struct dmar_domain *domain, device_t dev, int bus,
|
||||
printf("pci%d:%d:%d ", bus, slot, func);
|
||||
printf(
|
||||
"dmar%d failed to map RMRR region (%jx, %jx) %d\n",
|
||||
domain->dmar->unit, start, end,
|
||||
domain->iodom.iommu->unit, start, end,
|
||||
error1);
|
||||
error = error1;
|
||||
}
|
||||
@ -325,10 +330,12 @@ dmar_domain_alloc(struct dmar_unit *dmar, bool id_mapped)
|
||||
domain->domain = id;
|
||||
LIST_INIT(&domain->contexts);
|
||||
RB_INIT(&domain->rb_root);
|
||||
TAILQ_INIT(&domain->unload_entries);
|
||||
TASK_INIT(&domain->unload_task, 0, dmar_domain_unload_task, domain);
|
||||
mtx_init(&domain->lock, "dmardom", NULL, MTX_DEF);
|
||||
TAILQ_INIT(&domain->iodom.unload_entries);
|
||||
TASK_INIT(&domain->iodom.unload_task, 0, dmar_domain_unload_task,
|
||||
domain);
|
||||
mtx_init(&domain->iodom.lock, "dmardom", NULL, MTX_DEF);
|
||||
domain->dmar = dmar;
|
||||
domain->iodom.iommu = &dmar->iommu;
|
||||
|
||||
/*
|
||||
* For now, use the maximal usable physical address of the
|
||||
@ -376,7 +383,9 @@ dmar_ctx_alloc(struct dmar_domain *domain, uint16_t rid)
|
||||
struct dmar_ctx *ctx;
|
||||
|
||||
ctx = malloc(sizeof(*ctx), M_DMAR_CTX, M_WAITOK | M_ZERO);
|
||||
ctx->domain = domain;
|
||||
ctx->context.domain = (struct iommu_domain *)domain;
|
||||
ctx->context.tag = malloc(sizeof(struct bus_dma_tag_iommu),
|
||||
M_DMAR_CTX, M_WAITOK | M_ZERO);
|
||||
ctx->rid = rid;
|
||||
ctx->refs = 1;
|
||||
return (ctx);
|
||||
@ -387,8 +396,8 @@ dmar_ctx_link(struct dmar_ctx *ctx)
|
||||
{
|
||||
struct dmar_domain *domain;
|
||||
|
||||
domain = ctx->domain;
|
||||
DMAR_ASSERT_LOCKED(domain->dmar);
|
||||
domain = (struct dmar_domain *)ctx->context.domain;
|
||||
IOMMU_ASSERT_LOCKED(domain->iodom.iommu);
|
||||
KASSERT(domain->refs >= domain->ctx_cnt,
|
||||
("dom %p ref underflow %d %d", domain, domain->refs,
|
||||
domain->ctx_cnt));
|
||||
@ -402,8 +411,8 @@ dmar_ctx_unlink(struct dmar_ctx *ctx)
|
||||
{
|
||||
struct dmar_domain *domain;
|
||||
|
||||
domain = ctx->domain;
|
||||
DMAR_ASSERT_LOCKED(domain->dmar);
|
||||
domain = (struct dmar_domain *)ctx->context.domain;
|
||||
IOMMU_ASSERT_LOCKED(domain->iodom.iommu);
|
||||
KASSERT(domain->refs > 0,
|
||||
("domain %p ctx dtr refs %d", domain, domain->refs));
|
||||
KASSERT(domain->ctx_cnt >= domain->refs,
|
||||
@ -417,8 +426,9 @@ dmar_ctx_unlink(struct dmar_ctx *ctx)
|
||||
static void
|
||||
dmar_domain_destroy(struct dmar_domain *domain)
|
||||
{
|
||||
struct dmar_unit *dmar;
|
||||
|
||||
KASSERT(TAILQ_EMPTY(&domain->unload_entries),
|
||||
KASSERT(TAILQ_EMPTY(&domain->iodom.unload_entries),
|
||||
("unfinished unloads %p", domain));
|
||||
KASSERT(LIST_EMPTY(&domain->contexts),
|
||||
("destroying dom %p with contexts", domain));
|
||||
@ -436,8 +446,9 @@ dmar_domain_destroy(struct dmar_domain *domain)
|
||||
DMAR_DOMAIN_PGLOCK(domain);
|
||||
domain_free_pgtbl(domain);
|
||||
}
|
||||
mtx_destroy(&domain->lock);
|
||||
free_unr(domain->dmar->domids, domain->domain);
|
||||
mtx_destroy(&domain->iodom.lock);
|
||||
dmar = (struct dmar_unit *)domain->iodom.iommu;
|
||||
free_unr(dmar->domids, domain->domain);
|
||||
free(domain, M_DMAR_DOMAIN);
|
||||
}
|
||||
|
||||
@ -466,7 +477,7 @@ dmar_get_ctx_for_dev1(struct dmar_unit *dmar, device_t dev, uint16_t rid,
|
||||
TD_PREP_PINNED_ASSERT;
|
||||
DMAR_LOCK(dmar);
|
||||
KASSERT(!dmar_is_buswide_ctx(dmar, bus) || (slot == 0 && func == 0),
|
||||
("dmar%d pci%d:%d:%d get_ctx for buswide", dmar->unit, bus,
|
||||
("dmar%d pci%d:%d:%d get_ctx for buswide", dmar->iommu.unit, bus,
|
||||
slot, func));
|
||||
ctx = dmar_find_ctx_locked(dmar, rid);
|
||||
error = 0;
|
||||
@ -505,8 +516,8 @@ dmar_get_ctx_for_dev1(struct dmar_unit *dmar, device_t dev, uint16_t rid,
|
||||
domain = domain1;
|
||||
ctx = ctx1;
|
||||
dmar_ctx_link(ctx);
|
||||
ctx->ctx_tag.owner = dev;
|
||||
ctx_tag_init(ctx, dev);
|
||||
ctx->context.tag->owner = dev;
|
||||
device_tag_init(ctx, dev);
|
||||
|
||||
/*
|
||||
* This is the first activated context for the
|
||||
@ -521,7 +532,7 @@ dmar_get_ctx_for_dev1(struct dmar_unit *dmar, device_t dev, uint16_t rid,
|
||||
device_printf(dev,
|
||||
"dmar%d pci%d:%d:%d:%d rid %x domain %d mgaw %d "
|
||||
"agaw %d %s-mapped\n",
|
||||
dmar->unit, dmar->segment, bus, slot,
|
||||
dmar->iommu.unit, dmar->segment, bus, slot,
|
||||
func, rid, domain->domain, domain->mgaw,
|
||||
domain->agaw, id_mapped ? "id" : "re");
|
||||
}
|
||||
@ -531,13 +542,13 @@ dmar_get_ctx_for_dev1(struct dmar_unit *dmar, device_t dev, uint16_t rid,
|
||||
dmar_domain_destroy(domain1);
|
||||
/* Nothing needs to be done to destroy ctx1. */
|
||||
free(ctx1, M_DMAR_CTX);
|
||||
domain = ctx->domain;
|
||||
domain = (struct dmar_domain *)ctx->context.domain;
|
||||
ctx->refs++; /* tag referenced us */
|
||||
}
|
||||
} else {
|
||||
domain = ctx->domain;
|
||||
if (ctx->ctx_tag.owner == NULL)
|
||||
ctx->ctx_tag.owner = dev;
|
||||
domain = (struct dmar_domain *)ctx->context.domain;
|
||||
if (ctx->context.tag->owner == NULL)
|
||||
ctx->context.tag->owner = dev;
|
||||
ctx->refs++; /* tag referenced us */
|
||||
}
|
||||
|
||||
@ -558,11 +569,11 @@ dmar_get_ctx_for_dev1(struct dmar_unit *dmar, device_t dev, uint16_t rid,
|
||||
if (error == 0) {
|
||||
if (bootverbose) {
|
||||
printf("dmar%d: enabled translation\n",
|
||||
dmar->unit);
|
||||
dmar->iommu.unit);
|
||||
}
|
||||
} else {
|
||||
printf("dmar%d: enabling translation failed, "
|
||||
"error %d\n", dmar->unit, error);
|
||||
"error %d\n", dmar->iommu.unit, error);
|
||||
dmar_free_ctx_locked(dmar, ctx);
|
||||
TD_PINNED_ASSERT;
|
||||
return (NULL);
|
||||
@ -608,25 +619,26 @@ dmar_move_ctx_to_domain(struct dmar_domain *domain, struct dmar_ctx *ctx)
|
||||
int error;
|
||||
|
||||
dmar = domain->dmar;
|
||||
old_domain = ctx->domain;
|
||||
old_domain = (struct dmar_domain *)ctx->context.domain;
|
||||
if (domain == old_domain)
|
||||
return (0);
|
||||
KASSERT(old_domain->dmar == dmar,
|
||||
KASSERT(old_domain->iodom.iommu == domain->iodom.iommu,
|
||||
("domain %p %u moving between dmars %u %u", domain,
|
||||
domain->domain, old_domain->dmar->unit, domain->dmar->unit));
|
||||
domain->domain, old_domain->iodom.iommu->unit,
|
||||
domain->iodom.iommu->unit));
|
||||
TD_PREP_PINNED_ASSERT;
|
||||
|
||||
ctxp = dmar_map_ctx_entry(ctx, &sf);
|
||||
DMAR_LOCK(dmar);
|
||||
dmar_ctx_unlink(ctx);
|
||||
ctx->domain = domain;
|
||||
ctx->context.domain = &domain->iodom;
|
||||
dmar_ctx_link(ctx);
|
||||
ctx_id_entry_init(ctx, ctxp, true, PCI_BUSMAX + 100);
|
||||
dmar_unmap_pgtbl(sf);
|
||||
error = dmar_flush_for_ctx_entry(dmar, true);
|
||||
/* If flush failed, rolling back would not work as well. */
|
||||
printf("dmar%d rid %x domain %d->%d %s-mapped\n",
|
||||
dmar->unit, ctx->rid, old_domain->domain, domain->domain,
|
||||
dmar->iommu.unit, ctx->rid, old_domain->domain, domain->domain,
|
||||
(domain->flags & DMAR_DOMAIN_IDMAP) != 0 ? "id" : "re");
|
||||
dmar_unref_domain_locked(dmar, old_domain);
|
||||
TD_PINNED_ASSERT;
|
||||
@ -639,9 +651,10 @@ dmar_unref_domain_locked(struct dmar_unit *dmar, struct dmar_domain *domain)
|
||||
|
||||
DMAR_ASSERT_LOCKED(dmar);
|
||||
KASSERT(domain->refs >= 1,
|
||||
("dmar %d domain %p refs %u", dmar->unit, domain, domain->refs));
|
||||
("dmar %d domain %p refs %u", dmar->iommu.unit, domain,
|
||||
domain->refs));
|
||||
KASSERT(domain->refs > domain->ctx_cnt,
|
||||
("dmar %d domain %p refs %d ctx_cnt %d", dmar->unit, domain,
|
||||
("dmar %d domain %p refs %d ctx_cnt %d", dmar->iommu.unit, domain,
|
||||
domain->refs, domain->ctx_cnt));
|
||||
|
||||
if (domain->refs > 1) {
|
||||
@ -656,7 +669,8 @@ dmar_unref_domain_locked(struct dmar_unit *dmar, struct dmar_domain *domain)
|
||||
LIST_REMOVE(domain, link);
|
||||
DMAR_UNLOCK(dmar);
|
||||
|
||||
taskqueue_drain(dmar->delayed_taskqueue, &domain->unload_task);
|
||||
taskqueue_drain(dmar->iommu.delayed_taskqueue,
|
||||
&domain->iodom.unload_task);
|
||||
dmar_domain_destroy(domain);
|
||||
}
|
||||
|
||||
@ -681,7 +695,7 @@ dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx)
|
||||
return;
|
||||
}
|
||||
|
||||
KASSERT((ctx->flags & DMAR_CTX_DISABLED) == 0,
|
||||
KASSERT((ctx->context.flags & IOMMU_CTX_DISABLED) == 0,
|
||||
("lost ref on disabled ctx %p", ctx));
|
||||
|
||||
/*
|
||||
@ -708,7 +722,7 @@ dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx)
|
||||
return;
|
||||
}
|
||||
|
||||
KASSERT((ctx->flags & DMAR_CTX_DISABLED) == 0,
|
||||
KASSERT((ctx->context.flags & IOMMU_CTX_DISABLED) == 0,
|
||||
("lost ref on disabled ctx %p", ctx));
|
||||
|
||||
/*
|
||||
@ -726,8 +740,9 @@ dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx)
|
||||
dmar_inv_iotlb_glob(dmar);
|
||||
}
|
||||
dmar_unmap_pgtbl(sf);
|
||||
domain = ctx->domain;
|
||||
domain = (struct dmar_domain *)ctx->context.domain;
|
||||
dmar_ctx_unlink(ctx);
|
||||
free(ctx->context.tag, M_DMAR_CTX);
|
||||
free(ctx, M_DMAR_CTX);
|
||||
dmar_unref_domain_locked(dmar, domain);
|
||||
TD_PINNED_ASSERT;
|
||||
@ -738,7 +753,7 @@ dmar_free_ctx(struct dmar_ctx *ctx)
|
||||
{
|
||||
struct dmar_unit *dmar;
|
||||
|
||||
dmar = ctx->domain->dmar;
|
||||
dmar = (struct dmar_unit *)ctx->context.domain->iommu;
|
||||
DMAR_LOCK(dmar);
|
||||
dmar_free_ctx_locked(dmar, ctx);
|
||||
}
|
||||
@ -764,13 +779,13 @@ dmar_find_ctx_locked(struct dmar_unit *dmar, uint16_t rid)
|
||||
}
|
||||
|
||||
void
|
||||
dmar_domain_free_entry(struct dmar_map_entry *entry, bool free)
|
||||
dmar_domain_free_entry(struct iommu_map_entry *entry, bool free)
|
||||
{
|
||||
struct dmar_domain *domain;
|
||||
|
||||
domain = entry->domain;
|
||||
domain = (struct dmar_domain *)entry->domain;
|
||||
DMAR_DOMAIN_LOCK(domain);
|
||||
if ((entry->flags & DMAR_MAP_ENTRY_RMRR) != 0)
|
||||
if ((entry->flags & IOMMU_MAP_ENTRY_RMRR) != 0)
|
||||
dmar_gas_free_region(domain, entry);
|
||||
else
|
||||
dmar_gas_free_space(domain, entry);
|
||||
@ -782,29 +797,32 @@ dmar_domain_free_entry(struct dmar_map_entry *entry, bool free)
|
||||
}
|
||||
|
||||
void
|
||||
dmar_domain_unload_entry(struct dmar_map_entry *entry, bool free)
|
||||
dmar_domain_unload_entry(struct iommu_map_entry *entry, bool free)
|
||||
{
|
||||
struct dmar_domain *domain;
|
||||
struct dmar_unit *unit;
|
||||
|
||||
unit = entry->domain->dmar;
|
||||
domain = (struct dmar_domain *)entry->domain;
|
||||
unit = (struct dmar_unit *)domain->iodom.iommu;
|
||||
if (unit->qi_enabled) {
|
||||
DMAR_LOCK(unit);
|
||||
dmar_qi_invalidate_locked(entry->domain, entry->start,
|
||||
entry->end - entry->start, &entry->gseq, true);
|
||||
dmar_qi_invalidate_locked((struct dmar_domain *)entry->domain,
|
||||
entry->start, entry->end - entry->start, &entry->gseq,
|
||||
true);
|
||||
if (!free)
|
||||
entry->flags |= DMAR_MAP_ENTRY_QI_NF;
|
||||
entry->flags |= IOMMU_MAP_ENTRY_QI_NF;
|
||||
TAILQ_INSERT_TAIL(&unit->tlb_flush_entries, entry, dmamap_link);
|
||||
DMAR_UNLOCK(unit);
|
||||
} else {
|
||||
domain_flush_iotlb_sync(entry->domain, entry->start,
|
||||
entry->end - entry->start);
|
||||
domain_flush_iotlb_sync((struct dmar_domain *)entry->domain,
|
||||
entry->start, entry->end - entry->start);
|
||||
dmar_domain_free_entry(entry, free);
|
||||
}
|
||||
}
|
||||
|
||||
static bool
|
||||
dmar_domain_unload_emit_wait(struct dmar_domain *domain,
|
||||
struct dmar_map_entry *entry)
|
||||
struct iommu_map_entry *entry)
|
||||
{
|
||||
|
||||
if (TAILQ_NEXT(entry, dmamap_link) == NULL)
|
||||
@ -814,16 +832,16 @@ dmar_domain_unload_emit_wait(struct dmar_domain *domain,
|
||||
|
||||
void
|
||||
dmar_domain_unload(struct dmar_domain *domain,
|
||||
struct dmar_map_entries_tailq *entries, bool cansleep)
|
||||
struct iommu_map_entries_tailq *entries, bool cansleep)
|
||||
{
|
||||
struct dmar_unit *unit;
|
||||
struct dmar_map_entry *entry, *entry1;
|
||||
struct iommu_map_entry *entry, *entry1;
|
||||
int error;
|
||||
|
||||
unit = domain->dmar;
|
||||
unit = (struct dmar_unit *)domain->iodom.iommu;
|
||||
|
||||
TAILQ_FOREACH_SAFE(entry, entries, dmamap_link, entry1) {
|
||||
KASSERT((entry->flags & DMAR_MAP_ENTRY_MAP) != 0,
|
||||
KASSERT((entry->flags & IOMMU_MAP_ENTRY_MAP) != 0,
|
||||
("not mapped entry %p %p", domain, entry));
|
||||
error = domain_unmap_buf(domain, entry->start, entry->end -
|
||||
entry->start, cansleep ? DMAR_PGF_WAITOK : 0);
|
||||
@ -853,18 +871,74 @@ static void
|
||||
dmar_domain_unload_task(void *arg, int pending)
|
||||
{
|
||||
struct dmar_domain *domain;
|
||||
struct dmar_map_entries_tailq entries;
|
||||
struct iommu_map_entries_tailq entries;
|
||||
|
||||
domain = arg;
|
||||
TAILQ_INIT(&entries);
|
||||
|
||||
for (;;) {
|
||||
DMAR_DOMAIN_LOCK(domain);
|
||||
TAILQ_SWAP(&domain->unload_entries, &entries, dmar_map_entry,
|
||||
dmamap_link);
|
||||
TAILQ_SWAP(&domain->iodom.unload_entries, &entries,
|
||||
iommu_map_entry, dmamap_link);
|
||||
DMAR_DOMAIN_UNLOCK(domain);
|
||||
if (TAILQ_EMPTY(&entries))
|
||||
break;
|
||||
dmar_domain_unload(domain, &entries, true);
|
||||
}
|
||||
}
|
||||
|
||||
struct iommu_ctx *
|
||||
iommu_get_ctx(struct iommu_unit *iommu, device_t dev, uint16_t rid,
|
||||
bool id_mapped, bool rmrr_init)
|
||||
{
|
||||
struct dmar_unit *dmar;
|
||||
struct dmar_ctx *ret;
|
||||
|
||||
dmar = (struct dmar_unit *)iommu;
|
||||
|
||||
ret = dmar_get_ctx_for_dev(dmar, dev, rid, id_mapped, rmrr_init);
|
||||
|
||||
return ((struct iommu_ctx *)ret);
|
||||
}
|
||||
|
||||
void
|
||||
iommu_free_ctx_locked(struct iommu_unit *iommu, struct iommu_ctx *context)
|
||||
{
|
||||
struct dmar_unit *dmar;
|
||||
struct dmar_ctx *ctx;
|
||||
|
||||
dmar = (struct dmar_unit *)iommu;
|
||||
ctx = (struct dmar_ctx *)context;
|
||||
|
||||
dmar_free_ctx_locked(dmar, ctx);
|
||||
}
|
||||
|
||||
void
|
||||
iommu_free_ctx(struct iommu_ctx *context)
|
||||
{
|
||||
struct dmar_unit *dmar;
|
||||
struct dmar_ctx *ctx;
|
||||
|
||||
ctx = (struct dmar_ctx *)context;
|
||||
dmar = (struct dmar_unit *)ctx->context.domain->iommu;
|
||||
|
||||
dmar_free_ctx(ctx);
|
||||
}
|
||||
|
||||
void
|
||||
iommu_domain_unload_entry(struct iommu_map_entry *entry, bool free)
|
||||
{
|
||||
|
||||
dmar_domain_unload_entry(entry, free);
|
||||
}
|
||||
|
||||
void
|
||||
iommu_domain_unload(struct iommu_domain *iodom,
|
||||
struct iommu_map_entries_tailq *entries, bool cansleep)
|
||||
{
|
||||
struct dmar_domain *domain;
|
||||
|
||||
domain = (struct dmar_domain *)iodom;
|
||||
|
||||
dmar_domain_unload(domain, entries, cansleep);
|
||||
}
|
||||
|
@ -34,51 +34,17 @@
|
||||
#ifndef __X86_IOMMU_INTEL_DMAR_H
|
||||
#define __X86_IOMMU_INTEL_DMAR_H
|
||||
|
||||
/* Host or physical memory address, after translation. */
|
||||
typedef uint64_t dmar_haddr_t;
|
||||
/* Guest or bus address, before translation. */
|
||||
typedef uint64_t dmar_gaddr_t;
|
||||
#include <sys/iommu.h>
|
||||
|
||||
struct dmar_qi_genseq {
|
||||
u_int gen;
|
||||
uint32_t seq;
|
||||
};
|
||||
struct dmar_unit;
|
||||
|
||||
struct dmar_map_entry {
|
||||
dmar_gaddr_t start;
|
||||
dmar_gaddr_t end;
|
||||
dmar_gaddr_t first; /* Least start in subtree */
|
||||
dmar_gaddr_t last; /* Greatest end in subtree */
|
||||
dmar_gaddr_t free_down; /* Max free space below the
|
||||
current R/B tree node */
|
||||
u_int flags;
|
||||
TAILQ_ENTRY(dmar_map_entry) dmamap_link; /* Link for dmamap entries */
|
||||
RB_ENTRY(dmar_map_entry) rb_entry; /* Links for domain entries */
|
||||
TAILQ_ENTRY(dmar_map_entry) unroll_link; /* Link for unroll after
|
||||
dmamap_load failure */
|
||||
struct dmar_domain *domain;
|
||||
struct dmar_qi_genseq gseq;
|
||||
};
|
||||
|
||||
RB_HEAD(dmar_gas_entries_tree, dmar_map_entry);
|
||||
RB_PROTOTYPE(dmar_gas_entries_tree, dmar_map_entry, rb_entry,
|
||||
RB_HEAD(dmar_gas_entries_tree, iommu_map_entry);
|
||||
RB_PROTOTYPE(dmar_gas_entries_tree, iommu_map_entry, rb_entry,
|
||||
dmar_gas_cmp_entries);
|
||||
|
||||
#define DMAR_MAP_ENTRY_PLACE 0x0001 /* Fake entry */
|
||||
#define DMAR_MAP_ENTRY_RMRR 0x0002 /* Permanent, not linked by
|
||||
dmamap_link */
|
||||
#define DMAR_MAP_ENTRY_MAP 0x0004 /* Busdma created, linked by
|
||||
dmamap_link */
|
||||
#define DMAR_MAP_ENTRY_UNMAPPED 0x0010 /* No backing pages */
|
||||
#define DMAR_MAP_ENTRY_QI_NF 0x0020 /* qi task, do not free entry */
|
||||
#define DMAR_MAP_ENTRY_READ 0x1000 /* Read permitted */
|
||||
#define DMAR_MAP_ENTRY_WRITE 0x2000 /* Write permitted */
|
||||
#define DMAR_MAP_ENTRY_SNOOP 0x4000 /* Snoop */
|
||||
#define DMAR_MAP_ENTRY_TM 0x8000 /* Transient */
|
||||
|
||||
/*
|
||||
* Locking annotations:
|
||||
* (u) - Protected by dmar unit lock
|
||||
* (u) - Protected by iommu unit lock
|
||||
* (d) - Protected by domain lock
|
||||
* (c) - Immutable after initialization
|
||||
*/
|
||||
@ -95,41 +61,34 @@ RB_PROTOTYPE(dmar_gas_entries_tree, dmar_map_entry, rb_entry,
|
||||
* lock pgtbl_obj, which contains the page tables pages.
|
||||
*/
|
||||
struct dmar_domain {
|
||||
struct iommu_domain iodom;
|
||||
int domain; /* (c) DID, written in context entry */
|
||||
int mgaw; /* (c) Real max address width */
|
||||
int agaw; /* (c) Adjusted guest address width */
|
||||
int pglvl; /* (c) The pagelevel */
|
||||
int awlvl; /* (c) The pagelevel as the bitmask,
|
||||
to set in context entry */
|
||||
dmar_gaddr_t end; /* (c) Highest address + 1 in
|
||||
iommu_gaddr_t end; /* (c) Highest address + 1 in
|
||||
the guest AS */
|
||||
u_int ctx_cnt; /* (u) Number of contexts owned */
|
||||
u_int refs; /* (u) Refs, including ctx */
|
||||
struct dmar_unit *dmar; /* (c) */
|
||||
struct mtx lock; /* (c) */
|
||||
LIST_ENTRY(dmar_domain) link; /* (u) Member in the dmar list */
|
||||
LIST_HEAD(, dmar_ctx) contexts; /* (u) */
|
||||
vm_object_t pgtbl_obj; /* (c) Page table pages */
|
||||
u_int flags; /* (u) */
|
||||
u_int entries_cnt; /* (d) */
|
||||
struct dmar_gas_entries_tree rb_root; /* (d) */
|
||||
struct dmar_map_entries_tailq unload_entries; /* (d) Entries to
|
||||
unload */
|
||||
struct dmar_map_entry *first_place, *last_place; /* (d) */
|
||||
struct task unload_task; /* (c) */
|
||||
struct iommu_map_entry *first_place, *last_place; /* (d) */
|
||||
u_int batch_no;
|
||||
};
|
||||
|
||||
struct dmar_ctx {
|
||||
struct bus_dma_tag_dmar ctx_tag; /* (c) Root tag */
|
||||
struct iommu_ctx context;
|
||||
uint16_t rid; /* (c) pci RID */
|
||||
uint64_t last_fault_rec[2]; /* Last fault reported */
|
||||
struct dmar_domain *domain; /* (c) */
|
||||
LIST_ENTRY(dmar_ctx) link; /* (u) Member in the domain list */
|
||||
u_int refs; /* (u) References from tags */
|
||||
u_int flags; /* (u) */
|
||||
u_long loads; /* atomic updates, for stat only */
|
||||
u_long unloads; /* same */
|
||||
};
|
||||
|
||||
#define DMAR_DOMAIN_GAS_INITED 0x0001
|
||||
@ -139,22 +98,15 @@ struct dmar_ctx {
|
||||
#define DMAR_DOMAIN_RMRR 0x0020 /* Domain contains RMRR entry,
|
||||
cannot be turned off */
|
||||
|
||||
/* struct dmar_ctx flags */
|
||||
#define DMAR_CTX_FAULTED 0x0001 /* Fault was reported,
|
||||
last_fault_rec is valid */
|
||||
#define DMAR_CTX_DISABLED 0x0002 /* Device is disabled, the
|
||||
ephemeral reference is kept
|
||||
to prevent context destruction */
|
||||
|
||||
#define DMAR_DOMAIN_PGLOCK(dom) VM_OBJECT_WLOCK((dom)->pgtbl_obj)
|
||||
#define DMAR_DOMAIN_PGTRYLOCK(dom) VM_OBJECT_TRYWLOCK((dom)->pgtbl_obj)
|
||||
#define DMAR_DOMAIN_PGUNLOCK(dom) VM_OBJECT_WUNLOCK((dom)->pgtbl_obj)
|
||||
#define DMAR_DOMAIN_ASSERT_PGLOCKED(dom) \
|
||||
VM_OBJECT_ASSERT_WLOCKED((dom)->pgtbl_obj)
|
||||
|
||||
#define DMAR_DOMAIN_LOCK(dom) mtx_lock(&(dom)->lock)
|
||||
#define DMAR_DOMAIN_UNLOCK(dom) mtx_unlock(&(dom)->lock)
|
||||
#define DMAR_DOMAIN_ASSERT_LOCKED(dom) mtx_assert(&(dom)->lock, MA_OWNED)
|
||||
#define DMAR_DOMAIN_LOCK(dom) mtx_lock(&(dom)->iodom.lock)
|
||||
#define DMAR_DOMAIN_UNLOCK(dom) mtx_unlock(&(dom)->iodom.lock)
|
||||
#define DMAR_DOMAIN_ASSERT_LOCKED(dom) mtx_assert(&(dom)->iodom.lock, MA_OWNED)
|
||||
|
||||
struct dmar_msi_data {
|
||||
int irq;
|
||||
@ -175,8 +127,8 @@ struct dmar_msi_data {
|
||||
#define DMAR_INTR_TOTAL 2
|
||||
|
||||
struct dmar_unit {
|
||||
struct iommu_unit iommu;
|
||||
device_t dev;
|
||||
int unit;
|
||||
uint16_t segment;
|
||||
uint64_t base;
|
||||
|
||||
@ -193,7 +145,6 @@ struct dmar_unit {
|
||||
uint32_t hw_gcmd;
|
||||
|
||||
/* Data for being a dmar */
|
||||
struct mtx lock;
|
||||
LIST_HEAD(, dmar_domain) domains;
|
||||
struct unrhdr *domids;
|
||||
vm_object_t ctx_obj;
|
||||
@ -230,17 +181,10 @@ struct dmar_unit {
|
||||
vmem_t *irtids;
|
||||
|
||||
/* Delayed freeing of map entries queue processing */
|
||||
struct dmar_map_entries_tailq tlb_flush_entries;
|
||||
struct iommu_map_entries_tailq tlb_flush_entries;
|
||||
struct task qi_task;
|
||||
struct taskqueue *qi_taskqueue;
|
||||
|
||||
/* Busdma delayed map load */
|
||||
struct task dmamap_load_task;
|
||||
TAILQ_HEAD(, bus_dmamap_dmar) delayed_maps;
|
||||
struct taskqueue *delayed_taskqueue;
|
||||
|
||||
int dma_enabled;
|
||||
|
||||
/*
|
||||
* Bitmap of buses for which context must ignore slot:func,
|
||||
* duplicating the page table pointer into all context table
|
||||
@ -251,9 +195,9 @@ struct dmar_unit {
|
||||
|
||||
};
|
||||
|
||||
#define DMAR_LOCK(dmar) mtx_lock(&(dmar)->lock)
|
||||
#define DMAR_UNLOCK(dmar) mtx_unlock(&(dmar)->lock)
|
||||
#define DMAR_ASSERT_LOCKED(dmar) mtx_assert(&(dmar)->lock, MA_OWNED)
|
||||
#define DMAR_LOCK(dmar) mtx_lock(&(dmar)->iommu.lock)
|
||||
#define DMAR_UNLOCK(dmar) mtx_unlock(&(dmar)->iommu.lock)
|
||||
#define DMAR_ASSERT_LOCKED(dmar) mtx_assert(&(dmar)->iommu.lock, MA_OWNED)
|
||||
|
||||
#define DMAR_FAULT_LOCK(dmar) mtx_lock_spin(&(dmar)->fault_lock)
|
||||
#define DMAR_FAULT_UNLOCK(dmar) mtx_unlock_spin(&(dmar)->fault_lock)
|
||||
@ -275,14 +219,14 @@ struct dmar_unit *dmar_find_ioapic(u_int apic_id, uint16_t *rid);
|
||||
u_int dmar_nd2mask(u_int nd);
|
||||
bool dmar_pglvl_supported(struct dmar_unit *unit, int pglvl);
|
||||
int domain_set_agaw(struct dmar_domain *domain, int mgaw);
|
||||
int dmar_maxaddr2mgaw(struct dmar_unit *unit, dmar_gaddr_t maxaddr,
|
||||
int dmar_maxaddr2mgaw(struct dmar_unit *unit, iommu_gaddr_t maxaddr,
|
||||
bool allow_less);
|
||||
vm_pindex_t pglvl_max_pages(int pglvl);
|
||||
int domain_is_sp_lvl(struct dmar_domain *domain, int lvl);
|
||||
dmar_gaddr_t pglvl_page_size(int total_pglvl, int lvl);
|
||||
dmar_gaddr_t domain_page_size(struct dmar_domain *domain, int lvl);
|
||||
int calc_am(struct dmar_unit *unit, dmar_gaddr_t base, dmar_gaddr_t size,
|
||||
dmar_gaddr_t *isizep);
|
||||
iommu_gaddr_t pglvl_page_size(int total_pglvl, int lvl);
|
||||
iommu_gaddr_t domain_page_size(struct dmar_domain *domain, int lvl);
|
||||
int calc_am(struct dmar_unit *unit, iommu_gaddr_t base, iommu_gaddr_t size,
|
||||
iommu_gaddr_t *isizep);
|
||||
struct vm_page *dmar_pgalloc(vm_object_t obj, vm_pindex_t idx, int flags);
|
||||
void dmar_pgfree(vm_object_t obj, vm_pindex_t idx, int flags);
|
||||
void *dmar_map_pgtbl(vm_object_t obj, vm_pindex_t idx, int flags,
|
||||
@ -316,30 +260,28 @@ void dmar_enable_qi_intr(struct dmar_unit *unit);
|
||||
void dmar_disable_qi_intr(struct dmar_unit *unit);
|
||||
int dmar_init_qi(struct dmar_unit *unit);
|
||||
void dmar_fini_qi(struct dmar_unit *unit);
|
||||
void dmar_qi_invalidate_locked(struct dmar_domain *domain, dmar_gaddr_t start,
|
||||
dmar_gaddr_t size, struct dmar_qi_genseq *psec, bool emit_wait);
|
||||
void dmar_qi_invalidate_locked(struct dmar_domain *domain, iommu_gaddr_t start,
|
||||
iommu_gaddr_t size, struct iommu_qi_genseq *psec, bool emit_wait);
|
||||
void dmar_qi_invalidate_ctx_glob_locked(struct dmar_unit *unit);
|
||||
void dmar_qi_invalidate_iotlb_glob_locked(struct dmar_unit *unit);
|
||||
void dmar_qi_invalidate_iec_glob(struct dmar_unit *unit);
|
||||
void dmar_qi_invalidate_iec(struct dmar_unit *unit, u_int start, u_int cnt);
|
||||
|
||||
vm_object_t domain_get_idmap_pgtbl(struct dmar_domain *domain,
|
||||
dmar_gaddr_t maxaddr);
|
||||
iommu_gaddr_t maxaddr);
|
||||
void put_idmap_pgtbl(vm_object_t obj);
|
||||
int domain_map_buf(struct dmar_domain *domain, dmar_gaddr_t base,
|
||||
dmar_gaddr_t size, vm_page_t *ma, uint64_t pflags, int flags);
|
||||
int domain_unmap_buf(struct dmar_domain *domain, dmar_gaddr_t base,
|
||||
dmar_gaddr_t size, int flags);
|
||||
void domain_flush_iotlb_sync(struct dmar_domain *domain, dmar_gaddr_t base,
|
||||
dmar_gaddr_t size);
|
||||
int domain_map_buf(struct dmar_domain *domain, iommu_gaddr_t base,
|
||||
iommu_gaddr_t size, vm_page_t *ma, uint64_t pflags, int flags);
|
||||
int domain_unmap_buf(struct dmar_domain *domain, iommu_gaddr_t base,
|
||||
iommu_gaddr_t size, int flags);
|
||||
void domain_flush_iotlb_sync(struct dmar_domain *domain, iommu_gaddr_t base,
|
||||
iommu_gaddr_t size);
|
||||
int domain_alloc_pgtbl(struct dmar_domain *domain);
|
||||
void domain_free_pgtbl(struct dmar_domain *domain);
|
||||
|
||||
int dmar_dev_depth(device_t child);
|
||||
void dmar_dev_path(device_t child, int *busno, void *path1, int depth);
|
||||
|
||||
struct dmar_ctx *dmar_instantiate_ctx(struct dmar_unit *dmar, device_t dev,
|
||||
bool rmrr);
|
||||
struct dmar_ctx *dmar_get_ctx_for_dev(struct dmar_unit *dmar, device_t dev,
|
||||
uint16_t rid, bool id_mapped, bool rmrr_init);
|
||||
struct dmar_ctx *dmar_get_ctx_for_devpath(struct dmar_unit *dmar, uint16_t rid,
|
||||
@ -349,50 +291,47 @@ int dmar_move_ctx_to_domain(struct dmar_domain *domain, struct dmar_ctx *ctx);
|
||||
void dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx);
|
||||
void dmar_free_ctx(struct dmar_ctx *ctx);
|
||||
struct dmar_ctx *dmar_find_ctx_locked(struct dmar_unit *dmar, uint16_t rid);
|
||||
void dmar_domain_unload_entry(struct dmar_map_entry *entry, bool free);
|
||||
void dmar_domain_unload_entry(struct iommu_map_entry *entry, bool free);
|
||||
void dmar_domain_unload(struct dmar_domain *domain,
|
||||
struct dmar_map_entries_tailq *entries, bool cansleep);
|
||||
void dmar_domain_free_entry(struct dmar_map_entry *entry, bool free);
|
||||
|
||||
int dmar_init_busdma(struct dmar_unit *unit);
|
||||
void dmar_fini_busdma(struct dmar_unit *unit);
|
||||
device_t dmar_get_requester(device_t dev, uint16_t *rid);
|
||||
struct iommu_map_entries_tailq *entries, bool cansleep);
|
||||
void dmar_domain_free_entry(struct iommu_map_entry *entry, bool free);
|
||||
|
||||
void dmar_gas_init_domain(struct dmar_domain *domain);
|
||||
void dmar_gas_fini_domain(struct dmar_domain *domain);
|
||||
struct dmar_map_entry *dmar_gas_alloc_entry(struct dmar_domain *domain,
|
||||
struct iommu_map_entry *dmar_gas_alloc_entry(struct dmar_domain *domain,
|
||||
u_int flags);
|
||||
void dmar_gas_free_entry(struct dmar_domain *domain,
|
||||
struct dmar_map_entry *entry);
|
||||
struct iommu_map_entry *entry);
|
||||
void dmar_gas_free_space(struct dmar_domain *domain,
|
||||
struct dmar_map_entry *entry);
|
||||
struct iommu_map_entry *entry);
|
||||
int dmar_gas_map(struct dmar_domain *domain,
|
||||
const struct bus_dma_tag_common *common, dmar_gaddr_t size, int offset,
|
||||
u_int eflags, u_int flags, vm_page_t *ma, struct dmar_map_entry **res);
|
||||
const struct bus_dma_tag_common *common, iommu_gaddr_t size, int offset,
|
||||
u_int eflags, u_int flags, vm_page_t *ma, struct iommu_map_entry **res);
|
||||
void dmar_gas_free_region(struct dmar_domain *domain,
|
||||
struct dmar_map_entry *entry);
|
||||
struct iommu_map_entry *entry);
|
||||
int dmar_gas_map_region(struct dmar_domain *domain,
|
||||
struct dmar_map_entry *entry, u_int eflags, u_int flags, vm_page_t *ma);
|
||||
int dmar_gas_reserve_region(struct dmar_domain *domain, dmar_gaddr_t start,
|
||||
dmar_gaddr_t end);
|
||||
struct iommu_map_entry *entry, u_int eflags, u_int flags, vm_page_t *ma);
|
||||
int dmar_gas_reserve_region(struct dmar_domain *domain, iommu_gaddr_t start,
|
||||
iommu_gaddr_t end);
|
||||
|
||||
void dmar_dev_parse_rmrr(struct dmar_domain *domain, int dev_domain,
|
||||
int dev_busno, const void *dev_path, int dev_path_len,
|
||||
struct dmar_map_entries_tailq *rmrr_entries);
|
||||
int dmar_instantiate_rmrr_ctxs(struct dmar_unit *dmar);
|
||||
struct iommu_map_entries_tailq *rmrr_entries);
|
||||
int dmar_instantiate_rmrr_ctxs(struct iommu_unit *dmar);
|
||||
|
||||
void dmar_quirks_post_ident(struct dmar_unit *dmar);
|
||||
void dmar_quirks_pre_use(struct dmar_unit *dmar);
|
||||
void dmar_quirks_pre_use(struct iommu_unit *dmar);
|
||||
|
||||
int dmar_init_irt(struct dmar_unit *unit);
|
||||
void dmar_fini_irt(struct dmar_unit *unit);
|
||||
|
||||
void dmar_set_buswide_ctx(struct dmar_unit *unit, u_int busno);
|
||||
void dmar_set_buswide_ctx(struct iommu_unit *unit, u_int busno);
|
||||
bool dmar_is_buswide_ctx(struct dmar_unit *unit, u_int busno);
|
||||
|
||||
#define DMAR_GM_CANWAIT 0x0001
|
||||
#define DMAR_GM_CANSPLIT 0x0002
|
||||
#define DMAR_GM_RMRR 0x0004
|
||||
/* Map flags */
|
||||
#define IOMMU_MF_CANWAIT 0x0001
|
||||
#define IOMMU_MF_CANSPLIT 0x0002
|
||||
#define IOMMU_MF_RMRR 0x0004
|
||||
|
||||
#define DMAR_PGF_WAITOK 0x0001
|
||||
#define DMAR_PGF_ZERO 0x0002
|
||||
@ -400,7 +339,7 @@ bool dmar_is_buswide_ctx(struct dmar_unit *unit, u_int busno);
|
||||
#define DMAR_PGF_NOALLOC 0x0008
|
||||
#define DMAR_PGF_OBJL 0x0010
|
||||
|
||||
extern dmar_haddr_t dmar_high;
|
||||
extern iommu_haddr_t dmar_high;
|
||||
extern int haw;
|
||||
extern int dmar_tbl_pagecnt;
|
||||
extern int dmar_batch_coalesce;
|
||||
@ -433,7 +372,7 @@ dmar_write4(const struct dmar_unit *unit, int reg, uint32_t val)
|
||||
|
||||
KASSERT(reg != DMAR_GCMD_REG || (val & DMAR_GCMD_TE) ==
|
||||
(unit->hw_gcmd & DMAR_GCMD_TE),
|
||||
("dmar%d clearing TE 0x%08x 0x%08x", unit->unit,
|
||||
("dmar%d clearing TE 0x%08x 0x%08x", unit->iommu.unit,
|
||||
unit->hw_gcmd, val));
|
||||
bus_write_4(unit->regs, reg, val);
|
||||
}
|
||||
@ -522,8 +461,8 @@ dmar_pte_clear(volatile uint64_t *dst)
|
||||
}
|
||||
|
||||
static inline bool
|
||||
dmar_test_boundary(dmar_gaddr_t start, dmar_gaddr_t size,
|
||||
dmar_gaddr_t boundary)
|
||||
iommu_test_boundary(iommu_gaddr_t start, iommu_gaddr_t size,
|
||||
iommu_gaddr_t boundary)
|
||||
{
|
||||
|
||||
if (boundary == 0)
|
||||
|
@ -252,7 +252,7 @@ dmar_release_resources(device_t dev, struct dmar_unit *unit)
|
||||
{
|
||||
int i;
|
||||
|
||||
dmar_fini_busdma(unit);
|
||||
iommu_fini_busdma(&unit->iommu);
|
||||
dmar_fini_irt(unit);
|
||||
dmar_fini_qi(unit);
|
||||
dmar_fini_fault_log(unit);
|
||||
@ -413,8 +413,8 @@ dmar_attach(device_t dev)
|
||||
|
||||
unit = device_get_softc(dev);
|
||||
unit->dev = dev;
|
||||
unit->unit = device_get_unit(dev);
|
||||
dmaru = dmar_find_by_index(unit->unit);
|
||||
unit->iommu.unit = device_get_unit(dev);
|
||||
dmaru = dmar_find_by_index(unit->iommu.unit);
|
||||
if (dmaru == NULL)
|
||||
return (EINVAL);
|
||||
unit->segment = dmaru->Segment;
|
||||
@ -469,9 +469,9 @@ dmar_attach(device_t dev)
|
||||
}
|
||||
}
|
||||
|
||||
mtx_init(&unit->lock, "dmarhw", NULL, MTX_DEF);
|
||||
mtx_init(&unit->iommu.lock, "dmarhw", NULL, MTX_DEF);
|
||||
unit->domids = new_unrhdr(0, dmar_nd2mask(DMAR_CAP_ND(unit->hw_cap)),
|
||||
&unit->lock);
|
||||
&unit->iommu.lock);
|
||||
LIST_INIT(&unit->domains);
|
||||
|
||||
/*
|
||||
@ -531,7 +531,7 @@ dmar_attach(device_t dev)
|
||||
dmar_release_resources(dev, unit);
|
||||
return (error);
|
||||
}
|
||||
error = dmar_init_busdma(unit);
|
||||
error = iommu_init_busdma(&unit->iommu);
|
||||
if (error != 0) {
|
||||
dmar_release_resources(dev, unit);
|
||||
return (error);
|
||||
@ -596,14 +596,17 @@ DRIVER_MODULE(dmar, acpi, dmar_driver, dmar_devclass, 0, 0);
|
||||
MODULE_DEPEND(dmar, acpi, 1, 1, 1);
|
||||
|
||||
void
|
||||
dmar_set_buswide_ctx(struct dmar_unit *unit, u_int busno)
|
||||
dmar_set_buswide_ctx(struct iommu_unit *unit, u_int busno)
|
||||
{
|
||||
struct dmar_unit *dmar;
|
||||
|
||||
dmar = (struct dmar_unit *)unit;
|
||||
|
||||
MPASS(busno <= PCI_BUSMAX);
|
||||
DMAR_LOCK(unit);
|
||||
unit->buswide_ctxs[busno / NBBY / sizeof(uint32_t)] |=
|
||||
DMAR_LOCK(dmar);
|
||||
dmar->buswide_ctxs[busno / NBBY / sizeof(uint32_t)] |=
|
||||
1 << (busno % (NBBY * sizeof(uint32_t)));
|
||||
DMAR_UNLOCK(unit);
|
||||
DMAR_UNLOCK(dmar);
|
||||
}
|
||||
|
||||
bool
|
||||
@ -736,7 +739,7 @@ dmar_match_by_path(struct dmar_unit *unit, int dev_domain, int dev_busno,
|
||||
char *ptr, *ptrend;
|
||||
int match;
|
||||
|
||||
dmarh = dmar_find_by_index(unit->unit);
|
||||
dmarh = dmar_find_by_index(unit->iommu.unit);
|
||||
if (dmarh == NULL)
|
||||
return (false);
|
||||
if (dmarh->Segment != dev_domain)
|
||||
@ -818,7 +821,7 @@ dmar_find(device_t dev, bool verbose)
|
||||
if (verbose) {
|
||||
device_printf(dev, "pci%d:%d:%d:%d matched dmar%d by %s",
|
||||
dev_domain, pci_get_bus(dev), pci_get_slot(dev),
|
||||
pci_get_function(dev), unit->unit, banner);
|
||||
pci_get_function(dev), unit->iommu.unit, banner);
|
||||
printf(" scope path ");
|
||||
dmar_print_path(dev_busno, dev_path_len, dev_path);
|
||||
printf("\n");
|
||||
@ -911,7 +914,7 @@ struct rmrr_iter_args {
|
||||
int dev_busno;
|
||||
const ACPI_DMAR_PCI_PATH *dev_path;
|
||||
int dev_path_len;
|
||||
struct dmar_map_entries_tailq *rmrr_entries;
|
||||
struct iommu_map_entries_tailq *rmrr_entries;
|
||||
};
|
||||
|
||||
static int
|
||||
@ -920,7 +923,7 @@ dmar_rmrr_iter(ACPI_DMAR_HEADER *dmarh, void *arg)
|
||||
struct rmrr_iter_args *ria;
|
||||
ACPI_DMAR_RESERVED_MEMORY *resmem;
|
||||
ACPI_DMAR_DEVICE_SCOPE *devscope;
|
||||
struct dmar_map_entry *entry;
|
||||
struct iommu_map_entry *entry;
|
||||
char *ptr, *ptrend;
|
||||
int match;
|
||||
|
||||
@ -958,7 +961,7 @@ dmar_rmrr_iter(ACPI_DMAR_HEADER *dmarh, void *arg)
|
||||
void
|
||||
dmar_dev_parse_rmrr(struct dmar_domain *domain, int dev_domain, int dev_busno,
|
||||
const void *dev_path, int dev_path_len,
|
||||
struct dmar_map_entries_tailq *rmrr_entries)
|
||||
struct iommu_map_entries_tailq *rmrr_entries)
|
||||
{
|
||||
struct rmrr_iter_args ria;
|
||||
|
||||
@ -1037,7 +1040,7 @@ dmar_inst_rmrr_iter(ACPI_DMAR_HEADER *dmarh, void *arg)
|
||||
if (bootverbose) {
|
||||
printf("dmar%d no dev found for RMRR "
|
||||
"[%#jx, %#jx] rid %#x scope path ",
|
||||
iria->dmar->unit,
|
||||
iria->dmar->iommu.unit,
|
||||
(uintmax_t)resmem->BaseAddress,
|
||||
(uintmax_t)resmem->EndAddress,
|
||||
rid);
|
||||
@ -1059,7 +1062,8 @@ dmar_inst_rmrr_iter(ACPI_DMAR_HEADER *dmarh, void *arg)
|
||||
unit = dmar_find(dev, false);
|
||||
if (iria->dmar != unit)
|
||||
continue;
|
||||
dmar_instantiate_ctx(iria->dmar, dev, true);
|
||||
iommu_instantiate_ctx(&(iria)->dmar->iommu,
|
||||
dev, true);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1071,11 +1075,14 @@ dmar_inst_rmrr_iter(ACPI_DMAR_HEADER *dmarh, void *arg)
|
||||
* Pre-create all contexts for the DMAR which have RMRR entries.
|
||||
*/
|
||||
int
|
||||
dmar_instantiate_rmrr_ctxs(struct dmar_unit *dmar)
|
||||
dmar_instantiate_rmrr_ctxs(struct iommu_unit *unit)
|
||||
{
|
||||
struct dmar_unit *dmar;
|
||||
struct inst_rmrr_iter_args iria;
|
||||
int error;
|
||||
|
||||
dmar = (struct dmar_unit *)unit;
|
||||
|
||||
if (!dmar_barrier_enter(dmar, DMAR_BARRIER_RMRR))
|
||||
return (0);
|
||||
|
||||
@ -1086,15 +1093,15 @@ dmar_instantiate_rmrr_ctxs(struct dmar_unit *dmar)
|
||||
if (!LIST_EMPTY(&dmar->domains)) {
|
||||
KASSERT((dmar->hw_gcmd & DMAR_GCMD_TE) == 0,
|
||||
("dmar%d: RMRR not handled but translation is already enabled",
|
||||
dmar->unit));
|
||||
dmar->iommu.unit));
|
||||
error = dmar_enable_translation(dmar);
|
||||
if (bootverbose) {
|
||||
if (error == 0) {
|
||||
printf("dmar%d: enabled translation\n",
|
||||
dmar->unit);
|
||||
dmar->iommu.unit);
|
||||
} else {
|
||||
printf("dmar%d: enabling translation failed, "
|
||||
"error %d\n", dmar->unit, error);
|
||||
"error %d\n", dmar->iommu.unit, error);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1107,9 +1114,9 @@ dmar_instantiate_rmrr_ctxs(struct dmar_unit *dmar)
|
||||
#include <ddb/db_lex.h>
|
||||
|
||||
static void
|
||||
dmar_print_domain_entry(const struct dmar_map_entry *entry)
|
||||
dmar_print_domain_entry(const struct iommu_map_entry *entry)
|
||||
{
|
||||
struct dmar_map_entry *l, *r;
|
||||
struct iommu_map_entry *l, *r;
|
||||
|
||||
db_printf(
|
||||
" start %jx end %jx first %jx last %jx free_down %jx flags %x ",
|
||||
@ -1136,16 +1143,16 @@ dmar_print_ctx(struct dmar_ctx *ctx)
|
||||
|
||||
db_printf(
|
||||
" @%p pci%d:%d:%d refs %d flags %x loads %lu unloads %lu\n",
|
||||
ctx, pci_get_bus(ctx->ctx_tag.owner),
|
||||
pci_get_slot(ctx->ctx_tag.owner),
|
||||
pci_get_function(ctx->ctx_tag.owner), ctx->refs, ctx->flags,
|
||||
ctx->loads, ctx->unloads);
|
||||
ctx, pci_get_bus(ctx->context.tag->owner),
|
||||
pci_get_slot(ctx->context.tag->owner),
|
||||
pci_get_function(ctx->context.tag->owner), ctx->refs,
|
||||
ctx->context.flags, ctx->context.loads, ctx->context.unloads);
|
||||
}
|
||||
|
||||
static void
|
||||
dmar_print_domain(struct dmar_domain *domain, bool show_mappings)
|
||||
{
|
||||
struct dmar_map_entry *entry;
|
||||
struct iommu_map_entry *entry;
|
||||
struct dmar_ctx *ctx;
|
||||
|
||||
db_printf(
|
||||
@ -1170,7 +1177,7 @@ dmar_print_domain(struct dmar_domain *domain, bool show_mappings)
|
||||
if (db_pager_quit)
|
||||
return;
|
||||
db_printf(" unloading:\n");
|
||||
TAILQ_FOREACH(entry, &domain->unload_entries, dmamap_link) {
|
||||
TAILQ_FOREACH(entry, &domain->iodom.unload_entries, dmamap_link) {
|
||||
dmar_print_domain_entry(entry);
|
||||
if (db_pager_quit)
|
||||
break;
|
||||
@ -1231,11 +1238,11 @@ DB_FUNC(dmar_domain, db_dmar_print_domain, db_show_table, CS_OWN, NULL)
|
||||
LIST_FOREACH(domain, &unit->domains, link) {
|
||||
LIST_FOREACH(ctx, &domain->contexts, link) {
|
||||
if (pci_domain == unit->segment &&
|
||||
bus == pci_get_bus(ctx->ctx_tag.owner) &&
|
||||
bus == pci_get_bus(ctx->context.tag->owner) &&
|
||||
device ==
|
||||
pci_get_slot(ctx->ctx_tag.owner) &&
|
||||
pci_get_slot(ctx->context.tag->owner) &&
|
||||
function ==
|
||||
pci_get_function(ctx->ctx_tag.owner)) {
|
||||
pci_get_function(ctx->context.tag->owner)) {
|
||||
dmar_print_domain(domain,
|
||||
show_mappings);
|
||||
goto out;
|
||||
@ -1254,8 +1261,9 @@ dmar_print_one(int idx, bool show_domains, bool show_mappings)
|
||||
int i, frir;
|
||||
|
||||
unit = device_get_softc(dmar_devs[idx]);
|
||||
db_printf("dmar%d at %p, root at 0x%jx, ver 0x%x\n", unit->unit, unit,
|
||||
dmar_read8(unit, DMAR_RTADDR_REG), dmar_read4(unit, DMAR_VER_REG));
|
||||
db_printf("dmar%d at %p, root at 0x%jx, ver 0x%x\n", unit->iommu.unit,
|
||||
unit, dmar_read8(unit, DMAR_RTADDR_REG),
|
||||
dmar_read4(unit, DMAR_VER_REG));
|
||||
db_printf("cap 0x%jx ecap 0x%jx gsts 0x%x fsts 0x%x fectl 0x%x\n",
|
||||
(uintmax_t)dmar_read8(unit, DMAR_CAP_REG),
|
||||
(uintmax_t)dmar_read8(unit, DMAR_ECAP_REG),
|
||||
@ -1342,3 +1350,13 @@ DB_SHOW_ALL_COMMAND(dmars, db_show_all_dmars)
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
struct iommu_unit *
|
||||
iommu_find(device_t dev, bool verbose)
|
||||
{
|
||||
struct dmar_unit *dmar;
|
||||
|
||||
dmar = dmar_find(dev, verbose);
|
||||
|
||||
return (&dmar->iommu);
|
||||
}
|
||||
|
@ -96,25 +96,25 @@ dmar_fault_intr_clear(struct dmar_unit *unit, uint32_t fsts)
|
||||
|
||||
clear = 0;
|
||||
if ((fsts & DMAR_FSTS_ITE) != 0) {
|
||||
printf("DMAR%d: Invalidation timed out\n", unit->unit);
|
||||
printf("DMAR%d: Invalidation timed out\n", unit->iommu.unit);
|
||||
clear |= DMAR_FSTS_ITE;
|
||||
}
|
||||
if ((fsts & DMAR_FSTS_ICE) != 0) {
|
||||
printf("DMAR%d: Invalidation completion error\n",
|
||||
unit->unit);
|
||||
unit->iommu.unit);
|
||||
clear |= DMAR_FSTS_ICE;
|
||||
}
|
||||
if ((fsts & DMAR_FSTS_IQE) != 0) {
|
||||
printf("DMAR%d: Invalidation queue error\n",
|
||||
unit->unit);
|
||||
unit->iommu.unit);
|
||||
clear |= DMAR_FSTS_IQE;
|
||||
}
|
||||
if ((fsts & DMAR_FSTS_APF) != 0) {
|
||||
printf("DMAR%d: Advanced pending fault\n", unit->unit);
|
||||
printf("DMAR%d: Advanced pending fault\n", unit->iommu.unit);
|
||||
clear |= DMAR_FSTS_APF;
|
||||
}
|
||||
if ((fsts & DMAR_FSTS_AFO) != 0) {
|
||||
printf("DMAR%d: Advanced fault overflow\n", unit->unit);
|
||||
printf("DMAR%d: Advanced fault overflow\n", unit->iommu.unit);
|
||||
clear |= DMAR_FSTS_AFO;
|
||||
}
|
||||
if (clear != 0)
|
||||
@ -176,7 +176,7 @@ dmar_fault_intr(void *arg)
|
||||
*
|
||||
*/
|
||||
if ((fsts & DMAR_FSTS_PFO) != 0) {
|
||||
printf("DMAR%d: Fault Overflow\n", unit->unit);
|
||||
printf("DMAR%d: Fault Overflow\n", unit->iommu.unit);
|
||||
dmar_write4(unit, DMAR_FSTS_REG, DMAR_FSTS_PFO);
|
||||
}
|
||||
|
||||
@ -208,7 +208,7 @@ dmar_fault_task(void *arg, int pending __unused)
|
||||
DMAR_FAULT_UNLOCK(unit);
|
||||
|
||||
sid = DMAR_FRCD2_SID(fault_rec[1]);
|
||||
printf("DMAR%d: ", unit->unit);
|
||||
printf("DMAR%d: ", unit->iommu.unit);
|
||||
DMAR_LOCK(unit);
|
||||
ctx = dmar_find_ctx_locked(unit, sid);
|
||||
if (ctx == NULL) {
|
||||
@ -223,13 +223,13 @@ dmar_fault_task(void *arg, int pending __unused)
|
||||
slot = PCI_RID2SLOT(sid);
|
||||
func = PCI_RID2FUNC(sid);
|
||||
} else {
|
||||
ctx->flags |= DMAR_CTX_FAULTED;
|
||||
ctx->context.flags |= IOMMU_CTX_FAULTED;
|
||||
ctx->last_fault_rec[0] = fault_rec[0];
|
||||
ctx->last_fault_rec[1] = fault_rec[1];
|
||||
device_print_prettyname(ctx->ctx_tag.owner);
|
||||
bus = pci_get_bus(ctx->ctx_tag.owner);
|
||||
slot = pci_get_slot(ctx->ctx_tag.owner);
|
||||
func = pci_get_function(ctx->ctx_tag.owner);
|
||||
device_print_prettyname(ctx->context.tag->owner);
|
||||
bus = pci_get_bus(ctx->context.tag->owner);
|
||||
slot = pci_get_slot(ctx->context.tag->owner);
|
||||
func = pci_get_function(ctx->context.tag->owner);
|
||||
}
|
||||
DMAR_UNLOCK(unit);
|
||||
printf(
|
||||
@ -276,7 +276,7 @@ dmar_init_fault_log(struct dmar_unit *unit)
|
||||
unit->fault_taskqueue = taskqueue_create_fast("dmarff", M_WAITOK,
|
||||
taskqueue_thread_enqueue, &unit->fault_taskqueue);
|
||||
taskqueue_start_threads(&unit->fault_taskqueue, 1, PI_AV,
|
||||
"dmar%d fault taskq", unit->unit);
|
||||
"dmar%d fault taskq", unit->iommu.unit);
|
||||
|
||||
DMAR_LOCK(unit);
|
||||
dmar_disable_fault_intr(unit);
|
||||
|
@ -74,48 +74,48 @@ __FBSDID("$FreeBSD$");
|
||||
* Guest Address Space management.
|
||||
*/
|
||||
|
||||
static uma_zone_t dmar_map_entry_zone;
|
||||
static uma_zone_t iommu_map_entry_zone;
|
||||
|
||||
static void
|
||||
intel_gas_init(void)
|
||||
{
|
||||
|
||||
dmar_map_entry_zone = uma_zcreate("DMAR_MAP_ENTRY",
|
||||
sizeof(struct dmar_map_entry), NULL, NULL,
|
||||
iommu_map_entry_zone = uma_zcreate("IOMMU_MAP_ENTRY",
|
||||
sizeof(struct iommu_map_entry), NULL, NULL,
|
||||
NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NODUMP);
|
||||
}
|
||||
SYSINIT(intel_gas, SI_SUB_DRIVERS, SI_ORDER_FIRST, intel_gas_init, NULL);
|
||||
|
||||
struct dmar_map_entry *
|
||||
struct iommu_map_entry *
|
||||
dmar_gas_alloc_entry(struct dmar_domain *domain, u_int flags)
|
||||
{
|
||||
struct dmar_map_entry *res;
|
||||
struct iommu_map_entry *res;
|
||||
|
||||
KASSERT((flags & ~(DMAR_PGF_WAITOK)) == 0,
|
||||
("unsupported flags %x", flags));
|
||||
|
||||
res = uma_zalloc(dmar_map_entry_zone, ((flags & DMAR_PGF_WAITOK) !=
|
||||
res = uma_zalloc(iommu_map_entry_zone, ((flags & DMAR_PGF_WAITOK) !=
|
||||
0 ? M_WAITOK : M_NOWAIT) | M_ZERO);
|
||||
if (res != NULL) {
|
||||
res->domain = domain;
|
||||
res->domain = (struct iommu_domain *)domain;
|
||||
atomic_add_int(&domain->entries_cnt, 1);
|
||||
}
|
||||
return (res);
|
||||
}
|
||||
|
||||
void
|
||||
dmar_gas_free_entry(struct dmar_domain *domain, struct dmar_map_entry *entry)
|
||||
dmar_gas_free_entry(struct dmar_domain *domain, struct iommu_map_entry *entry)
|
||||
{
|
||||
|
||||
KASSERT(domain == entry->domain,
|
||||
KASSERT(domain == (struct dmar_domain *)entry->domain,
|
||||
("mismatched free domain %p entry %p entry->domain %p", domain,
|
||||
entry, entry->domain));
|
||||
atomic_subtract_int(&domain->entries_cnt, 1);
|
||||
uma_zfree(dmar_map_entry_zone, entry);
|
||||
uma_zfree(iommu_map_entry_zone, entry);
|
||||
}
|
||||
|
||||
static int
|
||||
dmar_gas_cmp_entries(struct dmar_map_entry *a, struct dmar_map_entry *b)
|
||||
dmar_gas_cmp_entries(struct iommu_map_entry *a, struct iommu_map_entry *b)
|
||||
{
|
||||
|
||||
/* Last entry have zero size, so <= */
|
||||
@ -137,10 +137,10 @@ dmar_gas_cmp_entries(struct dmar_map_entry *a, struct dmar_map_entry *b)
|
||||
}
|
||||
|
||||
static void
|
||||
dmar_gas_augment_entry(struct dmar_map_entry *entry)
|
||||
dmar_gas_augment_entry(struct iommu_map_entry *entry)
|
||||
{
|
||||
struct dmar_map_entry *child;
|
||||
dmar_gaddr_t free_down;
|
||||
struct iommu_map_entry *child;
|
||||
iommu_gaddr_t free_down;
|
||||
|
||||
free_down = 0;
|
||||
if ((child = RB_LEFT(entry, rb_entry)) != NULL) {
|
||||
@ -159,18 +159,18 @@ dmar_gas_augment_entry(struct dmar_map_entry *entry)
|
||||
entry->free_down = free_down;
|
||||
}
|
||||
|
||||
RB_GENERATE(dmar_gas_entries_tree, dmar_map_entry, rb_entry,
|
||||
RB_GENERATE(dmar_gas_entries_tree, iommu_map_entry, rb_entry,
|
||||
dmar_gas_cmp_entries);
|
||||
|
||||
#ifdef INVARIANTS
|
||||
static void
|
||||
dmar_gas_check_free(struct dmar_domain *domain)
|
||||
{
|
||||
struct dmar_map_entry *entry, *l, *r;
|
||||
dmar_gaddr_t v;
|
||||
struct iommu_map_entry *entry, *l, *r;
|
||||
iommu_gaddr_t v;
|
||||
|
||||
RB_FOREACH(entry, dmar_gas_entries_tree, &domain->rb_root) {
|
||||
KASSERT(domain == entry->domain,
|
||||
KASSERT(domain == (struct dmar_domain *)entry->domain,
|
||||
("mismatched free domain %p entry %p entry->domain %p",
|
||||
domain, entry, entry->domain));
|
||||
l = RB_LEFT(entry, rb_entry);
|
||||
@ -190,16 +190,16 @@ dmar_gas_check_free(struct dmar_domain *domain)
|
||||
#endif
|
||||
|
||||
static bool
|
||||
dmar_gas_rb_insert(struct dmar_domain *domain, struct dmar_map_entry *entry)
|
||||
dmar_gas_rb_insert(struct dmar_domain *domain, struct iommu_map_entry *entry)
|
||||
{
|
||||
struct dmar_map_entry *found;
|
||||
struct iommu_map_entry *found;
|
||||
|
||||
found = RB_INSERT(dmar_gas_entries_tree, &domain->rb_root, entry);
|
||||
return (found == NULL);
|
||||
}
|
||||
|
||||
static void
|
||||
dmar_gas_rb_remove(struct dmar_domain *domain, struct dmar_map_entry *entry)
|
||||
dmar_gas_rb_remove(struct dmar_domain *domain, struct iommu_map_entry *entry)
|
||||
{
|
||||
|
||||
RB_REMOVE(dmar_gas_entries_tree, &domain->rb_root, entry);
|
||||
@ -208,7 +208,7 @@ dmar_gas_rb_remove(struct dmar_domain *domain, struct dmar_map_entry *entry)
|
||||
void
|
||||
dmar_gas_init_domain(struct dmar_domain *domain)
|
||||
{
|
||||
struct dmar_map_entry *begin, *end;
|
||||
struct iommu_map_entry *begin, *end;
|
||||
|
||||
begin = dmar_gas_alloc_entry(domain, DMAR_PGF_WAITOK);
|
||||
end = dmar_gas_alloc_entry(domain, DMAR_PGF_WAITOK);
|
||||
@ -219,12 +219,12 @@ dmar_gas_init_domain(struct dmar_domain *domain)
|
||||
|
||||
begin->start = 0;
|
||||
begin->end = DMAR_PAGE_SIZE;
|
||||
begin->flags = DMAR_MAP_ENTRY_PLACE | DMAR_MAP_ENTRY_UNMAPPED;
|
||||
begin->flags = IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED;
|
||||
dmar_gas_rb_insert(domain, begin);
|
||||
|
||||
end->start = domain->end;
|
||||
end->end = domain->end;
|
||||
end->flags = DMAR_MAP_ENTRY_PLACE | DMAR_MAP_ENTRY_UNMAPPED;
|
||||
end->flags = IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_UNMAPPED;
|
||||
dmar_gas_rb_insert(domain, end);
|
||||
|
||||
domain->first_place = begin;
|
||||
@ -236,7 +236,7 @@ dmar_gas_init_domain(struct dmar_domain *domain)
|
||||
void
|
||||
dmar_gas_fini_domain(struct dmar_domain *domain)
|
||||
{
|
||||
struct dmar_map_entry *entry, *entry1;
|
||||
struct iommu_map_entry *entry, *entry1;
|
||||
|
||||
DMAR_DOMAIN_ASSERT_LOCKED(domain);
|
||||
KASSERT(domain->entries_cnt == 2, ("domain still in use %p", domain));
|
||||
@ -244,7 +244,7 @@ dmar_gas_fini_domain(struct dmar_domain *domain)
|
||||
entry = RB_MIN(dmar_gas_entries_tree, &domain->rb_root);
|
||||
KASSERT(entry->start == 0, ("start entry start %p", domain));
|
||||
KASSERT(entry->end == DMAR_PAGE_SIZE, ("start entry end %p", domain));
|
||||
KASSERT(entry->flags == DMAR_MAP_ENTRY_PLACE,
|
||||
KASSERT(entry->flags == IOMMU_MAP_ENTRY_PLACE,
|
||||
("start entry flags %p", domain));
|
||||
RB_REMOVE(dmar_gas_entries_tree, &domain->rb_root, entry);
|
||||
dmar_gas_free_entry(domain, entry);
|
||||
@ -252,14 +252,14 @@ dmar_gas_fini_domain(struct dmar_domain *domain)
|
||||
entry = RB_MAX(dmar_gas_entries_tree, &domain->rb_root);
|
||||
KASSERT(entry->start == domain->end, ("end entry start %p", domain));
|
||||
KASSERT(entry->end == domain->end, ("end entry end %p", domain));
|
||||
KASSERT(entry->flags == DMAR_MAP_ENTRY_PLACE,
|
||||
KASSERT(entry->flags == IOMMU_MAP_ENTRY_PLACE,
|
||||
("end entry flags %p", domain));
|
||||
RB_REMOVE(dmar_gas_entries_tree, &domain->rb_root, entry);
|
||||
dmar_gas_free_entry(domain, entry);
|
||||
|
||||
RB_FOREACH_SAFE(entry, dmar_gas_entries_tree, &domain->rb_root,
|
||||
entry1) {
|
||||
KASSERT((entry->flags & DMAR_MAP_ENTRY_RMRR) != 0,
|
||||
KASSERT((entry->flags & IOMMU_MAP_ENTRY_RMRR) != 0,
|
||||
("non-RMRR entry left %p", domain));
|
||||
RB_REMOVE(dmar_gas_entries_tree, &domain->rb_root, entry);
|
||||
dmar_gas_free_entry(domain, entry);
|
||||
@ -268,11 +268,11 @@ dmar_gas_fini_domain(struct dmar_domain *domain)
|
||||
|
||||
struct dmar_gas_match_args {
|
||||
struct dmar_domain *domain;
|
||||
dmar_gaddr_t size;
|
||||
iommu_gaddr_t size;
|
||||
int offset;
|
||||
const struct bus_dma_tag_common *common;
|
||||
u_int gas_flags;
|
||||
struct dmar_map_entry *entry;
|
||||
struct iommu_map_entry *entry;
|
||||
};
|
||||
|
||||
/*
|
||||
@ -282,10 +282,10 @@ struct dmar_gas_match_args {
|
||||
* by a, and return 'true' if and only if the allocation attempt succeeds.
|
||||
*/
|
||||
static bool
|
||||
dmar_gas_match_one(struct dmar_gas_match_args *a, dmar_gaddr_t beg,
|
||||
dmar_gaddr_t end, dmar_gaddr_t maxaddr)
|
||||
dmar_gas_match_one(struct dmar_gas_match_args *a, iommu_gaddr_t beg,
|
||||
iommu_gaddr_t end, iommu_gaddr_t maxaddr)
|
||||
{
|
||||
dmar_gaddr_t bs, start;
|
||||
iommu_gaddr_t bs, start;
|
||||
|
||||
a->entry->start = roundup2(beg + DMAR_PAGE_SIZE,
|
||||
a->common->alignment);
|
||||
@ -298,7 +298,7 @@ dmar_gas_match_one(struct dmar_gas_match_args *a, dmar_gaddr_t beg,
|
||||
return (false);
|
||||
|
||||
/* No boundary crossing. */
|
||||
if (dmar_test_boundary(a->entry->start + a->offset, a->size,
|
||||
if (iommu_test_boundary(a->entry->start + a->offset, a->size,
|
||||
a->common->boundary))
|
||||
return (true);
|
||||
|
||||
@ -313,7 +313,7 @@ dmar_gas_match_one(struct dmar_gas_match_args *a, dmar_gaddr_t beg,
|
||||
/* DMAR_PAGE_SIZE to create gap after new entry. */
|
||||
if (start + a->offset + a->size + DMAR_PAGE_SIZE <= end &&
|
||||
start + a->offset + a->size <= maxaddr &&
|
||||
dmar_test_boundary(start + a->offset, a->size,
|
||||
iommu_test_boundary(start + a->offset, a->size,
|
||||
a->common->boundary)) {
|
||||
a->entry->start = start;
|
||||
return (true);
|
||||
@ -327,7 +327,7 @@ dmar_gas_match_one(struct dmar_gas_match_args *a, dmar_gaddr_t beg,
|
||||
* XXXKIB. It is possible that bs is exactly at the start of
|
||||
* the next entry, then we do not have gap. Ignore for now.
|
||||
*/
|
||||
if ((a->gas_flags & DMAR_GM_CANSPLIT) != 0) {
|
||||
if ((a->gas_flags & IOMMU_MF_CANSPLIT) != 0) {
|
||||
a->size = bs - a->entry->start;
|
||||
return (true);
|
||||
}
|
||||
@ -353,13 +353,13 @@ dmar_gas_match_insert(struct dmar_gas_match_args *a)
|
||||
found = dmar_gas_rb_insert(a->domain, a->entry);
|
||||
KASSERT(found, ("found dup %p start %jx size %jx",
|
||||
a->domain, (uintmax_t)a->entry->start, (uintmax_t)a->size));
|
||||
a->entry->flags = DMAR_MAP_ENTRY_MAP;
|
||||
a->entry->flags = IOMMU_MAP_ENTRY_MAP;
|
||||
}
|
||||
|
||||
static int
|
||||
dmar_gas_lowermatch(struct dmar_gas_match_args *a, struct dmar_map_entry *entry)
|
||||
dmar_gas_lowermatch(struct dmar_gas_match_args *a, struct iommu_map_entry *entry)
|
||||
{
|
||||
struct dmar_map_entry *child;
|
||||
struct iommu_map_entry *child;
|
||||
|
||||
child = RB_RIGHT(entry, rb_entry);
|
||||
if (child != NULL && entry->end < a->common->lowaddr &&
|
||||
@ -388,9 +388,9 @@ dmar_gas_lowermatch(struct dmar_gas_match_args *a, struct dmar_map_entry *entry)
|
||||
}
|
||||
|
||||
static int
|
||||
dmar_gas_uppermatch(struct dmar_gas_match_args *a, struct dmar_map_entry *entry)
|
||||
dmar_gas_uppermatch(struct dmar_gas_match_args *a, struct iommu_map_entry *entry)
|
||||
{
|
||||
struct dmar_map_entry *child;
|
||||
struct iommu_map_entry *child;
|
||||
|
||||
if (entry->free_down < a->size + a->offset + DMAR_PAGE_SIZE)
|
||||
return (ENOMEM);
|
||||
@ -419,8 +419,8 @@ dmar_gas_uppermatch(struct dmar_gas_match_args *a, struct dmar_map_entry *entry)
|
||||
|
||||
static int
|
||||
dmar_gas_find_space(struct dmar_domain *domain,
|
||||
const struct bus_dma_tag_common *common, dmar_gaddr_t size,
|
||||
int offset, u_int flags, struct dmar_map_entry *entry)
|
||||
const struct bus_dma_tag_common *common, iommu_gaddr_t size,
|
||||
int offset, u_int flags, struct iommu_map_entry *entry)
|
||||
{
|
||||
struct dmar_gas_match_args a;
|
||||
int error;
|
||||
@ -454,10 +454,10 @@ dmar_gas_find_space(struct dmar_domain *domain,
|
||||
}
|
||||
|
||||
static int
|
||||
dmar_gas_alloc_region(struct dmar_domain *domain, struct dmar_map_entry *entry,
|
||||
dmar_gas_alloc_region(struct dmar_domain *domain, struct iommu_map_entry *entry,
|
||||
u_int flags)
|
||||
{
|
||||
struct dmar_map_entry *next, *prev;
|
||||
struct iommu_map_entry *next, *prev;
|
||||
bool found;
|
||||
|
||||
DMAR_DOMAIN_ASSERT_LOCKED(domain);
|
||||
@ -485,16 +485,16 @@ dmar_gas_alloc_region(struct dmar_domain *domain, struct dmar_map_entry *entry,
|
||||
* extends both ways.
|
||||
*/
|
||||
if (prev != NULL && prev->end > entry->start &&
|
||||
(prev->flags & DMAR_MAP_ENTRY_PLACE) == 0) {
|
||||
if ((flags & DMAR_GM_RMRR) == 0 ||
|
||||
(prev->flags & DMAR_MAP_ENTRY_RMRR) == 0)
|
||||
(prev->flags & IOMMU_MAP_ENTRY_PLACE) == 0) {
|
||||
if ((flags & IOMMU_MF_RMRR) == 0 ||
|
||||
(prev->flags & IOMMU_MAP_ENTRY_RMRR) == 0)
|
||||
return (EBUSY);
|
||||
entry->start = prev->end;
|
||||
}
|
||||
if (next->start < entry->end &&
|
||||
(next->flags & DMAR_MAP_ENTRY_PLACE) == 0) {
|
||||
if ((flags & DMAR_GM_RMRR) == 0 ||
|
||||
(next->flags & DMAR_MAP_ENTRY_RMRR) == 0)
|
||||
(next->flags & IOMMU_MAP_ENTRY_PLACE) == 0) {
|
||||
if ((flags & IOMMU_MF_RMRR) == 0 ||
|
||||
(next->flags & IOMMU_MAP_ENTRY_RMRR) == 0)
|
||||
return (EBUSY);
|
||||
entry->end = next->start;
|
||||
}
|
||||
@ -514,11 +514,11 @@ dmar_gas_alloc_region(struct dmar_domain *domain, struct dmar_map_entry *entry,
|
||||
found = dmar_gas_rb_insert(domain, entry);
|
||||
KASSERT(found, ("found RMRR dup %p start %jx end %jx",
|
||||
domain, (uintmax_t)entry->start, (uintmax_t)entry->end));
|
||||
if ((flags & DMAR_GM_RMRR) != 0)
|
||||
entry->flags = DMAR_MAP_ENTRY_RMRR;
|
||||
if ((flags & IOMMU_MF_RMRR) != 0)
|
||||
entry->flags = IOMMU_MAP_ENTRY_RMRR;
|
||||
|
||||
#ifdef INVARIANTS
|
||||
struct dmar_map_entry *ip, *in;
|
||||
struct iommu_map_entry *ip, *in;
|
||||
ip = RB_PREV(dmar_gas_entries_tree, &domain->rb_root, entry);
|
||||
in = RB_NEXT(dmar_gas_entries_tree, &domain->rb_root, entry);
|
||||
KASSERT(prev == NULL || ip == prev,
|
||||
@ -537,16 +537,16 @@ dmar_gas_alloc_region(struct dmar_domain *domain, struct dmar_map_entry *entry,
|
||||
}
|
||||
|
||||
void
|
||||
dmar_gas_free_space(struct dmar_domain *domain, struct dmar_map_entry *entry)
|
||||
dmar_gas_free_space(struct dmar_domain *domain, struct iommu_map_entry *entry)
|
||||
{
|
||||
|
||||
DMAR_DOMAIN_ASSERT_LOCKED(domain);
|
||||
KASSERT((entry->flags & (DMAR_MAP_ENTRY_PLACE | DMAR_MAP_ENTRY_RMRR |
|
||||
DMAR_MAP_ENTRY_MAP)) == DMAR_MAP_ENTRY_MAP,
|
||||
KASSERT((entry->flags & (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_RMRR |
|
||||
IOMMU_MAP_ENTRY_MAP)) == IOMMU_MAP_ENTRY_MAP,
|
||||
("permanent entry %p %p", domain, entry));
|
||||
|
||||
dmar_gas_rb_remove(domain, entry);
|
||||
entry->flags &= ~DMAR_MAP_ENTRY_MAP;
|
||||
entry->flags &= ~IOMMU_MAP_ENTRY_MAP;
|
||||
#ifdef INVARIANTS
|
||||
if (dmar_check_free)
|
||||
dmar_gas_check_free(domain);
|
||||
@ -554,19 +554,19 @@ dmar_gas_free_space(struct dmar_domain *domain, struct dmar_map_entry *entry)
|
||||
}
|
||||
|
||||
void
|
||||
dmar_gas_free_region(struct dmar_domain *domain, struct dmar_map_entry *entry)
|
||||
dmar_gas_free_region(struct dmar_domain *domain, struct iommu_map_entry *entry)
|
||||
{
|
||||
struct dmar_map_entry *next, *prev;
|
||||
struct iommu_map_entry *next, *prev;
|
||||
|
||||
DMAR_DOMAIN_ASSERT_LOCKED(domain);
|
||||
KASSERT((entry->flags & (DMAR_MAP_ENTRY_PLACE | DMAR_MAP_ENTRY_RMRR |
|
||||
DMAR_MAP_ENTRY_MAP)) == DMAR_MAP_ENTRY_RMRR,
|
||||
KASSERT((entry->flags & (IOMMU_MAP_ENTRY_PLACE | IOMMU_MAP_ENTRY_RMRR |
|
||||
IOMMU_MAP_ENTRY_MAP)) == IOMMU_MAP_ENTRY_RMRR,
|
||||
("non-RMRR entry %p %p", domain, entry));
|
||||
|
||||
prev = RB_PREV(dmar_gas_entries_tree, &domain->rb_root, entry);
|
||||
next = RB_NEXT(dmar_gas_entries_tree, &domain->rb_root, entry);
|
||||
dmar_gas_rb_remove(domain, entry);
|
||||
entry->flags &= ~DMAR_MAP_ENTRY_RMRR;
|
||||
entry->flags &= ~IOMMU_MAP_ENTRY_RMRR;
|
||||
|
||||
if (prev == NULL)
|
||||
dmar_gas_rb_insert(domain, domain->first_place);
|
||||
@ -576,17 +576,17 @@ dmar_gas_free_region(struct dmar_domain *domain, struct dmar_map_entry *entry)
|
||||
|
||||
int
|
||||
dmar_gas_map(struct dmar_domain *domain,
|
||||
const struct bus_dma_tag_common *common, dmar_gaddr_t size, int offset,
|
||||
u_int eflags, u_int flags, vm_page_t *ma, struct dmar_map_entry **res)
|
||||
const struct bus_dma_tag_common *common, iommu_gaddr_t size, int offset,
|
||||
u_int eflags, u_int flags, vm_page_t *ma, struct iommu_map_entry **res)
|
||||
{
|
||||
struct dmar_map_entry *entry;
|
||||
struct iommu_map_entry *entry;
|
||||
int error;
|
||||
|
||||
KASSERT((flags & ~(DMAR_GM_CANWAIT | DMAR_GM_CANSPLIT)) == 0,
|
||||
KASSERT((flags & ~(IOMMU_MF_CANWAIT | IOMMU_MF_CANSPLIT)) == 0,
|
||||
("invalid flags 0x%x", flags));
|
||||
|
||||
entry = dmar_gas_alloc_entry(domain, (flags & DMAR_GM_CANWAIT) != 0 ?
|
||||
DMAR_PGF_WAITOK : 0);
|
||||
entry = dmar_gas_alloc_entry(domain,
|
||||
(flags & IOMMU_MF_CANWAIT) != 0 ? DMAR_PGF_WAITOK : 0);
|
||||
if (entry == NULL)
|
||||
return (ENOMEM);
|
||||
DMAR_DOMAIN_LOCK(domain);
|
||||
@ -610,11 +610,11 @@ dmar_gas_map(struct dmar_domain *domain,
|
||||
|
||||
error = domain_map_buf(domain, entry->start, entry->end - entry->start,
|
||||
ma,
|
||||
((eflags & DMAR_MAP_ENTRY_READ) != 0 ? DMAR_PTE_R : 0) |
|
||||
((eflags & DMAR_MAP_ENTRY_WRITE) != 0 ? DMAR_PTE_W : 0) |
|
||||
((eflags & DMAR_MAP_ENTRY_SNOOP) != 0 ? DMAR_PTE_SNP : 0) |
|
||||
((eflags & DMAR_MAP_ENTRY_TM) != 0 ? DMAR_PTE_TM : 0),
|
||||
(flags & DMAR_GM_CANWAIT) != 0 ? DMAR_PGF_WAITOK : 0);
|
||||
((eflags & IOMMU_MAP_ENTRY_READ) != 0 ? DMAR_PTE_R : 0) |
|
||||
((eflags & IOMMU_MAP_ENTRY_WRITE) != 0 ? DMAR_PTE_W : 0) |
|
||||
((eflags & IOMMU_MAP_ENTRY_SNOOP) != 0 ? DMAR_PTE_SNP : 0) |
|
||||
((eflags & IOMMU_MAP_ENTRY_TM) != 0 ? DMAR_PTE_TM : 0),
|
||||
(flags & IOMMU_MF_CANWAIT) != 0 ? DMAR_PGF_WAITOK : 0);
|
||||
if (error == ENOMEM) {
|
||||
dmar_domain_unload_entry(entry, true);
|
||||
return (error);
|
||||
@ -627,15 +627,15 @@ dmar_gas_map(struct dmar_domain *domain,
|
||||
}
|
||||
|
||||
int
|
||||
dmar_gas_map_region(struct dmar_domain *domain, struct dmar_map_entry *entry,
|
||||
dmar_gas_map_region(struct dmar_domain *domain, struct iommu_map_entry *entry,
|
||||
u_int eflags, u_int flags, vm_page_t *ma)
|
||||
{
|
||||
dmar_gaddr_t start;
|
||||
iommu_gaddr_t start;
|
||||
int error;
|
||||
|
||||
KASSERT(entry->flags == 0, ("used RMRR entry %p %p %x", domain,
|
||||
entry, entry->flags));
|
||||
KASSERT((flags & ~(DMAR_GM_CANWAIT | DMAR_GM_RMRR)) == 0,
|
||||
KASSERT((flags & ~(IOMMU_MF_CANWAIT | IOMMU_MF_RMRR)) == 0,
|
||||
("invalid flags 0x%x", flags));
|
||||
|
||||
start = entry->start;
|
||||
@ -652,11 +652,11 @@ dmar_gas_map_region(struct dmar_domain *domain, struct dmar_map_entry *entry,
|
||||
|
||||
error = domain_map_buf(domain, entry->start, entry->end - entry->start,
|
||||
ma + OFF_TO_IDX(start - entry->start),
|
||||
((eflags & DMAR_MAP_ENTRY_READ) != 0 ? DMAR_PTE_R : 0) |
|
||||
((eflags & DMAR_MAP_ENTRY_WRITE) != 0 ? DMAR_PTE_W : 0) |
|
||||
((eflags & DMAR_MAP_ENTRY_SNOOP) != 0 ? DMAR_PTE_SNP : 0) |
|
||||
((eflags & DMAR_MAP_ENTRY_TM) != 0 ? DMAR_PTE_TM : 0),
|
||||
(flags & DMAR_GM_CANWAIT) != 0 ? DMAR_PGF_WAITOK : 0);
|
||||
((eflags & IOMMU_MAP_ENTRY_READ) != 0 ? DMAR_PTE_R : 0) |
|
||||
((eflags & IOMMU_MAP_ENTRY_WRITE) != 0 ? DMAR_PTE_W : 0) |
|
||||
((eflags & IOMMU_MAP_ENTRY_SNOOP) != 0 ? DMAR_PTE_SNP : 0) |
|
||||
((eflags & IOMMU_MAP_ENTRY_TM) != 0 ? DMAR_PTE_TM : 0),
|
||||
(flags & IOMMU_MF_CANWAIT) != 0 ? DMAR_PGF_WAITOK : 0);
|
||||
if (error == ENOMEM) {
|
||||
dmar_domain_unload_entry(entry, false);
|
||||
return (error);
|
||||
@ -668,21 +668,74 @@ dmar_gas_map_region(struct dmar_domain *domain, struct dmar_map_entry *entry,
|
||||
}
|
||||
|
||||
int
|
||||
dmar_gas_reserve_region(struct dmar_domain *domain, dmar_gaddr_t start,
|
||||
dmar_gaddr_t end)
|
||||
dmar_gas_reserve_region(struct dmar_domain *domain, iommu_gaddr_t start,
|
||||
iommu_gaddr_t end)
|
||||
{
|
||||
struct dmar_map_entry *entry;
|
||||
struct iommu_map_entry *entry;
|
||||
int error;
|
||||
|
||||
entry = dmar_gas_alloc_entry(domain, DMAR_PGF_WAITOK);
|
||||
entry->start = start;
|
||||
entry->end = end;
|
||||
DMAR_DOMAIN_LOCK(domain);
|
||||
error = dmar_gas_alloc_region(domain, entry, DMAR_GM_CANWAIT);
|
||||
error = dmar_gas_alloc_region(domain, entry, IOMMU_MF_CANWAIT);
|
||||
if (error == 0)
|
||||
entry->flags |= DMAR_MAP_ENTRY_UNMAPPED;
|
||||
entry->flags |= IOMMU_MAP_ENTRY_UNMAPPED;
|
||||
DMAR_DOMAIN_UNLOCK(domain);
|
||||
if (error != 0)
|
||||
dmar_gas_free_entry(domain, entry);
|
||||
return (error);
|
||||
}
|
||||
|
||||
struct iommu_map_entry *
|
||||
iommu_map_alloc_entry(struct iommu_domain *iodom, u_int flags)
|
||||
{
|
||||
struct dmar_domain *domain;
|
||||
struct iommu_map_entry *res;
|
||||
|
||||
domain = (struct dmar_domain *)iodom;
|
||||
|
||||
res = dmar_gas_alloc_entry(domain, flags);
|
||||
|
||||
return (res);
|
||||
}
|
||||
|
||||
void
|
||||
iommu_map_free_entry(struct iommu_domain *iodom, struct iommu_map_entry *entry)
|
||||
{
|
||||
struct dmar_domain *domain;
|
||||
|
||||
domain = (struct dmar_domain *)iodom;
|
||||
|
||||
dmar_gas_free_entry(domain, entry);
|
||||
}
|
||||
|
||||
int
|
||||
iommu_map(struct iommu_domain *iodom,
|
||||
const struct bus_dma_tag_common *common, iommu_gaddr_t size, int offset,
|
||||
u_int eflags, u_int flags, vm_page_t *ma, struct iommu_map_entry **res)
|
||||
{
|
||||
struct dmar_domain *domain;
|
||||
int error;
|
||||
|
||||
domain = (struct dmar_domain *)iodom;
|
||||
|
||||
error = dmar_gas_map(domain, common, size, offset, eflags, flags,
|
||||
ma, res);
|
||||
|
||||
return (error);
|
||||
}
|
||||
|
||||
int
|
||||
iommu_map_region(struct iommu_domain *iodom, struct iommu_map_entry *entry,
|
||||
u_int eflags, u_int flags, vm_page_t *ma)
|
||||
{
|
||||
struct dmar_domain *domain;
|
||||
int error;
|
||||
|
||||
domain = (struct dmar_domain *)iodom;
|
||||
|
||||
error = dmar_gas_map_region(domain, entry, eflags, flags, ma);
|
||||
|
||||
return (error);
|
||||
}
|
||||
|
@ -70,7 +70,7 @@ __FBSDID("$FreeBSD$");
|
||||
#include <x86/iommu/intel_dmar.h>
|
||||
|
||||
static int domain_unmap_buf_locked(struct dmar_domain *domain,
|
||||
dmar_gaddr_t base, dmar_gaddr_t size, int flags);
|
||||
iommu_gaddr_t base, iommu_gaddr_t size, int flags);
|
||||
|
||||
/*
|
||||
* The cache of the identity mapping page tables for the DMARs. Using
|
||||
@ -82,7 +82,7 @@ static int domain_unmap_buf_locked(struct dmar_domain *domain,
|
||||
*/
|
||||
|
||||
struct idpgtbl {
|
||||
dmar_gaddr_t maxaddr; /* Page table covers the guest address
|
||||
iommu_gaddr_t maxaddr; /* Page table covers the guest address
|
||||
range [0..maxaddr) */
|
||||
int pglvl; /* Total page table levels ignoring
|
||||
superpages */
|
||||
@ -109,12 +109,12 @@ static MALLOC_DEFINE(M_DMAR_IDPGTBL, "dmar_idpgtbl",
|
||||
*/
|
||||
static void
|
||||
domain_idmap_nextlvl(struct idpgtbl *tbl, int lvl, vm_pindex_t idx,
|
||||
dmar_gaddr_t addr)
|
||||
iommu_gaddr_t addr)
|
||||
{
|
||||
vm_page_t m1;
|
||||
dmar_pte_t *pte;
|
||||
struct sf_buf *sf;
|
||||
dmar_gaddr_t f, pg_sz;
|
||||
iommu_gaddr_t f, pg_sz;
|
||||
vm_pindex_t base;
|
||||
int i;
|
||||
|
||||
@ -163,7 +163,7 @@ domain_idmap_nextlvl(struct idpgtbl *tbl, int lvl, vm_pindex_t idx,
|
||||
* maxaddr is typically mapped.
|
||||
*/
|
||||
vm_object_t
|
||||
domain_get_idmap_pgtbl(struct dmar_domain *domain, dmar_gaddr_t maxaddr)
|
||||
domain_get_idmap_pgtbl(struct dmar_domain *domain, iommu_gaddr_t maxaddr)
|
||||
{
|
||||
struct dmar_unit *unit;
|
||||
struct idpgtbl *tbl;
|
||||
@ -323,7 +323,7 @@ put_idmap_pgtbl(vm_object_t obj)
|
||||
* the level lvl.
|
||||
*/
|
||||
static int
|
||||
domain_pgtbl_pte_off(struct dmar_domain *domain, dmar_gaddr_t base, int lvl)
|
||||
domain_pgtbl_pte_off(struct dmar_domain *domain, iommu_gaddr_t base, int lvl)
|
||||
{
|
||||
|
||||
base >>= DMAR_PAGE_SHIFT + (domain->pglvl - lvl - 1) *
|
||||
@ -337,7 +337,7 @@ domain_pgtbl_pte_off(struct dmar_domain *domain, dmar_gaddr_t base, int lvl)
|
||||
* lvl.
|
||||
*/
|
||||
static vm_pindex_t
|
||||
domain_pgtbl_get_pindex(struct dmar_domain *domain, dmar_gaddr_t base, int lvl)
|
||||
domain_pgtbl_get_pindex(struct dmar_domain *domain, iommu_gaddr_t base, int lvl)
|
||||
{
|
||||
vm_pindex_t idx, pidx;
|
||||
int i;
|
||||
@ -353,7 +353,7 @@ domain_pgtbl_get_pindex(struct dmar_domain *domain, dmar_gaddr_t base, int lvl)
|
||||
}
|
||||
|
||||
static dmar_pte_t *
|
||||
domain_pgtbl_map_pte(struct dmar_domain *domain, dmar_gaddr_t base, int lvl,
|
||||
domain_pgtbl_map_pte(struct dmar_domain *domain, iommu_gaddr_t base, int lvl,
|
||||
int flags, vm_pindex_t *idxp, struct sf_buf **sf)
|
||||
{
|
||||
vm_page_t m;
|
||||
@ -421,12 +421,12 @@ domain_pgtbl_map_pte(struct dmar_domain *domain, dmar_gaddr_t base, int lvl,
|
||||
}
|
||||
|
||||
static int
|
||||
domain_map_buf_locked(struct dmar_domain *domain, dmar_gaddr_t base,
|
||||
dmar_gaddr_t size, vm_page_t *ma, uint64_t pflags, int flags)
|
||||
domain_map_buf_locked(struct dmar_domain *domain, iommu_gaddr_t base,
|
||||
iommu_gaddr_t size, vm_page_t *ma, uint64_t pflags, int flags)
|
||||
{
|
||||
dmar_pte_t *pte;
|
||||
struct sf_buf *sf;
|
||||
dmar_gaddr_t pg_sz, base1, size1;
|
||||
iommu_gaddr_t pg_sz, base1, size1;
|
||||
vm_pindex_t pi, c, idx, run_sz;
|
||||
int lvl;
|
||||
bool superpage;
|
||||
@ -499,7 +499,7 @@ domain_map_buf_locked(struct dmar_domain *domain, dmar_gaddr_t base,
|
||||
}
|
||||
|
||||
int
|
||||
domain_map_buf(struct dmar_domain *domain, dmar_gaddr_t base, dmar_gaddr_t size,
|
||||
domain_map_buf(struct dmar_domain *domain, iommu_gaddr_t base, iommu_gaddr_t size,
|
||||
vm_page_t *ma, uint64_t pflags, int flags)
|
||||
{
|
||||
struct dmar_unit *unit;
|
||||
@ -559,11 +559,11 @@ domain_map_buf(struct dmar_domain *domain, dmar_gaddr_t base, dmar_gaddr_t size,
|
||||
}
|
||||
|
||||
static void domain_unmap_clear_pte(struct dmar_domain *domain,
|
||||
dmar_gaddr_t base, int lvl, int flags, dmar_pte_t *pte,
|
||||
iommu_gaddr_t base, int lvl, int flags, dmar_pte_t *pte,
|
||||
struct sf_buf **sf, bool free_fs);
|
||||
|
||||
static void
|
||||
domain_free_pgtbl_pde(struct dmar_domain *domain, dmar_gaddr_t base,
|
||||
domain_free_pgtbl_pde(struct dmar_domain *domain, iommu_gaddr_t base,
|
||||
int lvl, int flags)
|
||||
{
|
||||
struct sf_buf *sf;
|
||||
@ -576,7 +576,7 @@ domain_free_pgtbl_pde(struct dmar_domain *domain, dmar_gaddr_t base,
|
||||
}
|
||||
|
||||
static void
|
||||
domain_unmap_clear_pte(struct dmar_domain *domain, dmar_gaddr_t base, int lvl,
|
||||
domain_unmap_clear_pte(struct dmar_domain *domain, iommu_gaddr_t base, int lvl,
|
||||
int flags, dmar_pte_t *pte, struct sf_buf **sf, bool free_sf)
|
||||
{
|
||||
vm_page_t m;
|
||||
@ -605,13 +605,13 @@ domain_unmap_clear_pte(struct dmar_domain *domain, dmar_gaddr_t base, int lvl,
|
||||
* Assumes that the unmap is never partial.
|
||||
*/
|
||||
static int
|
||||
domain_unmap_buf_locked(struct dmar_domain *domain, dmar_gaddr_t base,
|
||||
dmar_gaddr_t size, int flags)
|
||||
domain_unmap_buf_locked(struct dmar_domain *domain, iommu_gaddr_t base,
|
||||
iommu_gaddr_t size, int flags)
|
||||
{
|
||||
dmar_pte_t *pte;
|
||||
struct sf_buf *sf;
|
||||
vm_pindex_t idx;
|
||||
dmar_gaddr_t pg_sz;
|
||||
iommu_gaddr_t pg_sz;
|
||||
int lvl;
|
||||
|
||||
DMAR_DOMAIN_ASSERT_PGLOCKED(domain);
|
||||
@ -677,8 +677,8 @@ domain_unmap_buf_locked(struct dmar_domain *domain, dmar_gaddr_t base,
|
||||
}
|
||||
|
||||
int
|
||||
domain_unmap_buf(struct dmar_domain *domain, dmar_gaddr_t base,
|
||||
dmar_gaddr_t size, int flags)
|
||||
domain_unmap_buf(struct dmar_domain *domain, iommu_gaddr_t base,
|
||||
iommu_gaddr_t size, int flags)
|
||||
{
|
||||
int error;
|
||||
|
||||
@ -757,17 +757,17 @@ domain_wait_iotlb_flush(struct dmar_unit *unit, uint64_t wt, int iro)
|
||||
}
|
||||
|
||||
void
|
||||
domain_flush_iotlb_sync(struct dmar_domain *domain, dmar_gaddr_t base,
|
||||
dmar_gaddr_t size)
|
||||
domain_flush_iotlb_sync(struct dmar_domain *domain, iommu_gaddr_t base,
|
||||
iommu_gaddr_t size)
|
||||
{
|
||||
struct dmar_unit *unit;
|
||||
dmar_gaddr_t isize;
|
||||
iommu_gaddr_t isize;
|
||||
uint64_t iotlbr;
|
||||
int am, iro;
|
||||
|
||||
unit = domain->dmar;
|
||||
KASSERT(!unit->qi_enabled, ("dmar%d: sync iotlb flush call",
|
||||
unit->unit));
|
||||
unit->iommu.unit));
|
||||
iro = DMAR_ECAP_IRO(unit->hw_ecap) * 16;
|
||||
DMAR_LOCK(unit);
|
||||
if ((unit->hw_cap & DMAR_CAP_PSI) == 0 || size > 2 * 1024 * 1024) {
|
||||
@ -775,7 +775,7 @@ domain_flush_iotlb_sync(struct dmar_domain *domain, dmar_gaddr_t base,
|
||||
DMAR_IOTLB_DID(domain->domain), iro);
|
||||
KASSERT((iotlbr & DMAR_IOTLB_IAIG_MASK) !=
|
||||
DMAR_IOTLB_IAIG_INVLD,
|
||||
("dmar%d: invalidation failed %jx", unit->unit,
|
||||
("dmar%d: invalidation failed %jx", unit->iommu.unit,
|
||||
(uintmax_t)iotlbr));
|
||||
} else {
|
||||
for (; size > 0; base += isize, size -= isize) {
|
||||
@ -788,7 +788,7 @@ domain_flush_iotlb_sync(struct dmar_domain *domain, dmar_gaddr_t base,
|
||||
DMAR_IOTLB_IAIG_INVLD,
|
||||
("dmar%d: PSI invalidation failed "
|
||||
"iotlbr 0x%jx base 0x%jx size 0x%jx am %d",
|
||||
unit->unit, (uintmax_t)iotlbr,
|
||||
unit->iommu.unit, (uintmax_t)iotlbr,
|
||||
(uintmax_t)base, (uintmax_t)size, am));
|
||||
/*
|
||||
* Any non-page granularity covers whole guest
|
||||
|
@ -255,7 +255,7 @@ dmar_ir_find(device_t src, uint16_t *rid, int *is_dmar)
|
||||
} else {
|
||||
unit = dmar_find(src, bootverbose);
|
||||
if (unit != NULL && rid != NULL)
|
||||
dmar_get_requester(src, rid);
|
||||
iommu_get_requester(src, rid);
|
||||
}
|
||||
return (unit);
|
||||
}
|
||||
|
@ -63,7 +63,7 @@ __FBSDID("$FreeBSD$");
|
||||
|
||||
static bool
|
||||
dmar_qi_seq_processed(const struct dmar_unit *unit,
|
||||
const struct dmar_qi_genseq *pseq)
|
||||
const struct iommu_qi_genseq *pseq)
|
||||
{
|
||||
|
||||
return (pseq->gen < unit->inv_waitd_gen ||
|
||||
@ -174,10 +174,10 @@ dmar_qi_emit_wait_descr(struct dmar_unit *unit, uint32_t seq, bool intr,
|
||||
}
|
||||
|
||||
static void
|
||||
dmar_qi_emit_wait_seq(struct dmar_unit *unit, struct dmar_qi_genseq *pseq,
|
||||
dmar_qi_emit_wait_seq(struct dmar_unit *unit, struct iommu_qi_genseq *pseq,
|
||||
bool emit_wait)
|
||||
{
|
||||
struct dmar_qi_genseq gsec;
|
||||
struct iommu_qi_genseq gsec;
|
||||
uint32_t seq;
|
||||
|
||||
KASSERT(pseq != NULL, ("wait descriptor with no place for seq"));
|
||||
@ -203,7 +203,7 @@ dmar_qi_emit_wait_seq(struct dmar_unit *unit, struct dmar_qi_genseq *pseq,
|
||||
}
|
||||
|
||||
static void
|
||||
dmar_qi_wait_for_seq(struct dmar_unit *unit, const struct dmar_qi_genseq *gseq,
|
||||
dmar_qi_wait_for_seq(struct dmar_unit *unit, const struct iommu_qi_genseq *gseq,
|
||||
bool nowait)
|
||||
{
|
||||
|
||||
@ -213,7 +213,7 @@ dmar_qi_wait_for_seq(struct dmar_unit *unit, const struct dmar_qi_genseq *gseq,
|
||||
if (cold || nowait) {
|
||||
cpu_spinwait();
|
||||
} else {
|
||||
msleep(&unit->inv_seq_waiters, &unit->lock, 0,
|
||||
msleep(&unit->inv_seq_waiters, &unit->iommu.lock, 0,
|
||||
"dmarse", hz);
|
||||
}
|
||||
}
|
||||
@ -221,11 +221,11 @@ dmar_qi_wait_for_seq(struct dmar_unit *unit, const struct dmar_qi_genseq *gseq,
|
||||
}
|
||||
|
||||
void
|
||||
dmar_qi_invalidate_locked(struct dmar_domain *domain, dmar_gaddr_t base,
|
||||
dmar_gaddr_t size, struct dmar_qi_genseq *pseq, bool emit_wait)
|
||||
dmar_qi_invalidate_locked(struct dmar_domain *domain, iommu_gaddr_t base,
|
||||
iommu_gaddr_t size, struct iommu_qi_genseq *pseq, bool emit_wait)
|
||||
{
|
||||
struct dmar_unit *unit;
|
||||
dmar_gaddr_t isize;
|
||||
iommu_gaddr_t isize;
|
||||
int am;
|
||||
|
||||
unit = domain->dmar;
|
||||
@ -246,7 +246,7 @@ dmar_qi_invalidate_locked(struct dmar_domain *domain, dmar_gaddr_t base,
|
||||
void
|
||||
dmar_qi_invalidate_ctx_glob_locked(struct dmar_unit *unit)
|
||||
{
|
||||
struct dmar_qi_genseq gseq;
|
||||
struct iommu_qi_genseq gseq;
|
||||
|
||||
DMAR_ASSERT_LOCKED(unit);
|
||||
dmar_qi_ensure(unit, 2);
|
||||
@ -259,7 +259,7 @@ dmar_qi_invalidate_ctx_glob_locked(struct dmar_unit *unit)
|
||||
void
|
||||
dmar_qi_invalidate_iotlb_glob_locked(struct dmar_unit *unit)
|
||||
{
|
||||
struct dmar_qi_genseq gseq;
|
||||
struct iommu_qi_genseq gseq;
|
||||
|
||||
DMAR_ASSERT_LOCKED(unit);
|
||||
dmar_qi_ensure(unit, 2);
|
||||
@ -273,7 +273,7 @@ dmar_qi_invalidate_iotlb_glob_locked(struct dmar_unit *unit)
|
||||
void
|
||||
dmar_qi_invalidate_iec_glob(struct dmar_unit *unit)
|
||||
{
|
||||
struct dmar_qi_genseq gseq;
|
||||
struct iommu_qi_genseq gseq;
|
||||
|
||||
DMAR_ASSERT_LOCKED(unit);
|
||||
dmar_qi_ensure(unit, 2);
|
||||
@ -286,7 +286,7 @@ dmar_qi_invalidate_iec_glob(struct dmar_unit *unit)
|
||||
void
|
||||
dmar_qi_invalidate_iec(struct dmar_unit *unit, u_int start, u_int cnt)
|
||||
{
|
||||
struct dmar_qi_genseq gseq;
|
||||
struct iommu_qi_genseq gseq;
|
||||
u_int c, l;
|
||||
|
||||
DMAR_ASSERT_LOCKED(unit);
|
||||
@ -329,7 +329,8 @@ dmar_qi_intr(void *arg)
|
||||
struct dmar_unit *unit;
|
||||
|
||||
unit = arg;
|
||||
KASSERT(unit->qi_enabled, ("dmar%d: QI is not enabled", unit->unit));
|
||||
KASSERT(unit->qi_enabled, ("dmar%d: QI is not enabled",
|
||||
unit->iommu.unit));
|
||||
taskqueue_enqueue(unit->qi_taskqueue, &unit->qi_task);
|
||||
return (FILTER_HANDLED);
|
||||
}
|
||||
@ -338,7 +339,7 @@ static void
|
||||
dmar_qi_task(void *arg, int pending __unused)
|
||||
{
|
||||
struct dmar_unit *unit;
|
||||
struct dmar_map_entry *entry;
|
||||
struct iommu_map_entry *entry;
|
||||
uint32_t ics;
|
||||
|
||||
unit = arg;
|
||||
@ -353,7 +354,7 @@ dmar_qi_task(void *arg, int pending __unused)
|
||||
TAILQ_REMOVE(&unit->tlb_flush_entries, entry, dmamap_link);
|
||||
DMAR_UNLOCK(unit);
|
||||
dmar_domain_free_entry(entry, (entry->flags &
|
||||
DMAR_MAP_ENTRY_QI_NF) == 0);
|
||||
IOMMU_MAP_ENTRY_QI_NF) == 0);
|
||||
DMAR_LOCK(unit);
|
||||
}
|
||||
ics = dmar_read4(unit, DMAR_ICS_REG);
|
||||
@ -385,7 +386,7 @@ dmar_init_qi(struct dmar_unit *unit)
|
||||
unit->qi_taskqueue = taskqueue_create_fast("dmarqf", M_WAITOK,
|
||||
taskqueue_thread_enqueue, &unit->qi_taskqueue);
|
||||
taskqueue_start_threads(&unit->qi_taskqueue, 1, PI_AV,
|
||||
"dmar%d qi taskq", unit->unit);
|
||||
"dmar%d qi taskq", unit->iommu.unit);
|
||||
|
||||
unit->inv_waitd_gen = 0;
|
||||
unit->inv_waitd_seq = 1;
|
||||
@ -424,7 +425,7 @@ dmar_init_qi(struct dmar_unit *unit)
|
||||
void
|
||||
dmar_fini_qi(struct dmar_unit *unit)
|
||||
{
|
||||
struct dmar_qi_genseq gseq;
|
||||
struct iommu_qi_genseq gseq;
|
||||
|
||||
if (!unit->qi_enabled)
|
||||
return;
|
||||
@ -442,7 +443,7 @@ dmar_fini_qi(struct dmar_unit *unit)
|
||||
dmar_disable_qi_intr(unit);
|
||||
dmar_disable_qi(unit);
|
||||
KASSERT(unit->inv_seq_waiters == 0,
|
||||
("dmar%d: waiters on disabled queue", unit->unit));
|
||||
("dmar%d: waiters on disabled queue", unit->iommu.unit));
|
||||
DMAR_UNLOCK(unit);
|
||||
|
||||
kmem_free(unit->inv_queue, unit->inv_queue_size);
|
||||
@ -457,7 +458,8 @@ dmar_enable_qi_intr(struct dmar_unit *unit)
|
||||
uint32_t iectl;
|
||||
|
||||
DMAR_ASSERT_LOCKED(unit);
|
||||
KASSERT(DMAR_HAS_QI(unit), ("dmar%d: QI is not supported", unit->unit));
|
||||
KASSERT(DMAR_HAS_QI(unit), ("dmar%d: QI is not supported",
|
||||
unit->iommu.unit));
|
||||
iectl = dmar_read4(unit, DMAR_IECTL_REG);
|
||||
iectl &= ~DMAR_IECTL_IM;
|
||||
dmar_write4(unit, DMAR_IECTL_REG, iectl);
|
||||
@ -469,7 +471,8 @@ dmar_disable_qi_intr(struct dmar_unit *unit)
|
||||
uint32_t iectl;
|
||||
|
||||
DMAR_ASSERT_LOCKED(unit);
|
||||
KASSERT(DMAR_HAS_QI(unit), ("dmar%d: QI is not supported", unit->unit));
|
||||
KASSERT(DMAR_HAS_QI(unit), ("dmar%d: QI is not supported",
|
||||
unit->iommu.unit));
|
||||
iectl = dmar_read4(unit, DMAR_IECTL_REG);
|
||||
dmar_write4(unit, DMAR_IECTL_REG, iectl | DMAR_IECTL_IM);
|
||||
}
|
||||
|
@ -222,8 +222,11 @@ static const struct intel_dmar_quirk_cpu post_ident_cpu[] = {
|
||||
};
|
||||
|
||||
void
|
||||
dmar_quirks_pre_use(struct dmar_unit *dmar)
|
||||
dmar_quirks_pre_use(struct iommu_unit *unit)
|
||||
{
|
||||
struct dmar_unit *dmar;
|
||||
|
||||
dmar = (struct dmar_unit *)unit;
|
||||
|
||||
if (!dmar_barrier_enter(dmar, DMAR_BARRIER_USEQ))
|
||||
return;
|
||||
|
@ -148,7 +148,7 @@ domain_set_agaw(struct dmar_domain *domain, int mgaw)
|
||||
* address space, accept the biggest sagaw, whatever is it.
|
||||
*/
|
||||
int
|
||||
dmar_maxaddr2mgaw(struct dmar_unit *unit, dmar_gaddr_t maxaddr, bool allow_less)
|
||||
dmar_maxaddr2mgaw(struct dmar_unit *unit, iommu_gaddr_t maxaddr, bool allow_less)
|
||||
{
|
||||
int i;
|
||||
|
||||
@ -207,17 +207,17 @@ domain_is_sp_lvl(struct dmar_domain *domain, int lvl)
|
||||
return (alvl < nitems(sagaw_sp) && (sagaw_sp[alvl] & cap_sps) != 0);
|
||||
}
|
||||
|
||||
dmar_gaddr_t
|
||||
iommu_gaddr_t
|
||||
pglvl_page_size(int total_pglvl, int lvl)
|
||||
{
|
||||
int rlvl;
|
||||
static const dmar_gaddr_t pg_sz[] = {
|
||||
(dmar_gaddr_t)DMAR_PAGE_SIZE,
|
||||
(dmar_gaddr_t)DMAR_PAGE_SIZE << DMAR_NPTEPGSHIFT,
|
||||
(dmar_gaddr_t)DMAR_PAGE_SIZE << (2 * DMAR_NPTEPGSHIFT),
|
||||
(dmar_gaddr_t)DMAR_PAGE_SIZE << (3 * DMAR_NPTEPGSHIFT),
|
||||
(dmar_gaddr_t)DMAR_PAGE_SIZE << (4 * DMAR_NPTEPGSHIFT),
|
||||
(dmar_gaddr_t)DMAR_PAGE_SIZE << (5 * DMAR_NPTEPGSHIFT)
|
||||
static const iommu_gaddr_t pg_sz[] = {
|
||||
(iommu_gaddr_t)DMAR_PAGE_SIZE,
|
||||
(iommu_gaddr_t)DMAR_PAGE_SIZE << DMAR_NPTEPGSHIFT,
|
||||
(iommu_gaddr_t)DMAR_PAGE_SIZE << (2 * DMAR_NPTEPGSHIFT),
|
||||
(iommu_gaddr_t)DMAR_PAGE_SIZE << (3 * DMAR_NPTEPGSHIFT),
|
||||
(iommu_gaddr_t)DMAR_PAGE_SIZE << (4 * DMAR_NPTEPGSHIFT),
|
||||
(iommu_gaddr_t)DMAR_PAGE_SIZE << (5 * DMAR_NPTEPGSHIFT)
|
||||
};
|
||||
|
||||
KASSERT(lvl >= 0 && lvl < total_pglvl,
|
||||
@ -227,7 +227,7 @@ pglvl_page_size(int total_pglvl, int lvl)
|
||||
return (pg_sz[rlvl]);
|
||||
}
|
||||
|
||||
dmar_gaddr_t
|
||||
iommu_gaddr_t
|
||||
domain_page_size(struct dmar_domain *domain, int lvl)
|
||||
{
|
||||
|
||||
@ -235,10 +235,10 @@ domain_page_size(struct dmar_domain *domain, int lvl)
|
||||
}
|
||||
|
||||
int
|
||||
calc_am(struct dmar_unit *unit, dmar_gaddr_t base, dmar_gaddr_t size,
|
||||
dmar_gaddr_t *isizep)
|
||||
calc_am(struct dmar_unit *unit, iommu_gaddr_t base, iommu_gaddr_t size,
|
||||
iommu_gaddr_t *isizep)
|
||||
{
|
||||
dmar_gaddr_t isize;
|
||||
iommu_gaddr_t isize;
|
||||
int am;
|
||||
|
||||
for (am = DMAR_CAP_MAMV(unit->hw_cap);; am--) {
|
||||
@ -252,7 +252,7 @@ calc_am(struct dmar_unit *unit, dmar_gaddr_t base, dmar_gaddr_t size,
|
||||
return (am);
|
||||
}
|
||||
|
||||
dmar_haddr_t dmar_high;
|
||||
iommu_haddr_t dmar_high;
|
||||
int haw;
|
||||
int dmar_tbl_pagecnt;
|
||||
|
||||
@ -482,7 +482,7 @@ dmar_flush_write_bufs(struct dmar_unit *unit)
|
||||
* DMAR_GCMD_WBF is only valid when CAP_RWBF is reported.
|
||||
*/
|
||||
KASSERT((unit->hw_cap & DMAR_CAP_RWBF) != 0,
|
||||
("dmar%d: no RWBF", unit->unit));
|
||||
("dmar%d: no RWBF", unit->iommu.unit));
|
||||
|
||||
dmar_write4(unit, DMAR_GCMD_REG, unit->hw_gcmd | DMAR_GCMD_WBF);
|
||||
DMAR_WAIT_UNTIL(((dmar_read4(unit, DMAR_GSTS_REG) & DMAR_GSTS_WBFS)
|
||||
@ -586,11 +586,12 @@ dmar_barrier_enter(struct dmar_unit *dmar, u_int barrier_id)
|
||||
if ((dmar->barrier_flags & f_inproc) != 0) {
|
||||
while ((dmar->barrier_flags & f_inproc) != 0) {
|
||||
dmar->barrier_flags |= f_wakeup;
|
||||
msleep(&dmar->barrier_flags, &dmar->lock, 0,
|
||||
msleep(&dmar->barrier_flags, &dmar->iommu.lock, 0,
|
||||
"dmarb", 0);
|
||||
}
|
||||
KASSERT((dmar->barrier_flags & f_done) != 0,
|
||||
("dmar%d barrier %d missing done", dmar->unit, barrier_id));
|
||||
("dmar%d barrier %d missing done", dmar->iommu.unit,
|
||||
barrier_id));
|
||||
DMAR_UNLOCK(dmar);
|
||||
return (false);
|
||||
}
|
||||
@ -607,7 +608,7 @@ dmar_barrier_exit(struct dmar_unit *dmar, u_int barrier_id)
|
||||
|
||||
DMAR_ASSERT_LOCKED(dmar);
|
||||
KASSERT((dmar->barrier_flags & (f_done | f_inproc)) == f_inproc,
|
||||
("dmar%d barrier %d missed entry", dmar->unit, barrier_id));
|
||||
("dmar%d barrier %d missed entry", dmar->iommu.unit, barrier_id));
|
||||
dmar->barrier_flags |= f_done;
|
||||
if ((dmar->barrier_flags & f_wakeup) != 0)
|
||||
wakeup(&dmar->barrier_flags);
|
||||
|
Loading…
Reference in New Issue
Block a user