Add a few macroses for conversion between DMAR unit, domain, ctx

and IOMMU unit, domain, ctx.

Reviewed by:	kib
Sponsored by:	DARPA, AFRL
Differential Revision:	https://reviews.freebsd.org/D25926
This commit is contained in:
Ruslan Bukin 2020-08-04 20:51:05 +00:00
parent 8541ae04b4
commit 78b517543b
5 changed files with 56 additions and 45 deletions

View File

@ -117,7 +117,7 @@ dmar_map_ctx_entry(struct dmar_ctx *ctx, struct sf_buf **sfp)
struct dmar_unit *dmar;
dmar_ctx_entry_t *ctxp;
dmar = (struct dmar_unit *)ctx->context.domain->iommu;
dmar = CTX2DMAR(ctx);
ctxp = dmar_map_pgtbl(dmar->ctx_obj, 1 +
PCI_RID2BUS(ctx->rid), IOMMU_PGF_NOALLOC | IOMMU_PGF_WAITOK, sfp);
@ -131,7 +131,7 @@ device_tag_init(struct dmar_ctx *ctx, device_t dev)
struct dmar_domain *domain;
bus_addr_t maxaddr;
domain = (struct dmar_domain *)ctx->context.domain;
domain = CTX2DOM(ctx);
maxaddr = MIN(domain->iodom.end, BUS_SPACE_MAXADDR);
ctx->context.tag->common.ref_count = 1; /* Prevent free */
ctx->context.tag->common.impl = &bus_dma_iommu_impl;
@ -141,7 +141,7 @@ device_tag_init(struct dmar_ctx *ctx, device_t dev)
ctx->context.tag->common.maxsize = maxaddr;
ctx->context.tag->common.nsegments = BUS_SPACE_UNRESTRICTED;
ctx->context.tag->common.maxsegsz = maxaddr;
ctx->context.tag->ctx = (struct iommu_ctx *)ctx;
ctx->context.tag->ctx = CTX2IOCTX(ctx);
ctx->context.tag->owner = dev;
}
@ -178,8 +178,8 @@ ctx_id_entry_init(struct dmar_ctx *ctx, dmar_ctx_entry_t *ctxp, bool move,
vm_page_t ctx_root;
int i;
domain = (struct dmar_domain *)ctx->context.domain;
unit = (struct dmar_unit *)domain->iodom.iommu;
domain = CTX2DOM(ctx);
unit = DOM2DMAR(domain);
KASSERT(move || (ctxp->ctx1 == 0 && ctxp->ctx2 == 0),
("dmar%d: initialized ctx entry %d:%d:%d 0x%jx 0x%jx",
unit->iommu.unit, busno, pci_get_slot(ctx->context.tag->owner),
@ -196,7 +196,7 @@ ctx_id_entry_init(struct dmar_ctx *ctx, dmar_ctx_entry_t *ctxp, bool move,
IOMMU_PGF_NOALLOC);
}
if (iommu_is_buswide_ctx((struct iommu_unit *)unit, busno)) {
if (iommu_is_buswide_ctx(DMAR2IOMMU(unit), busno)) {
MPASS(!move);
for (i = 0; i <= PCI_BUSMAX; i++) {
ctx_id_entry_init_one(&ctxp[i], domain, ctx_root);
@ -283,8 +283,7 @@ domain_init_rmrr(struct dmar_domain *domain, device_t dev, int bus,
ma[i] = vm_page_getfake(entry->start + PAGE_SIZE * i,
VM_MEMATTR_DEFAULT);
}
error1 = iommu_gas_map_region((struct iommu_domain *)domain,
entry,
error1 = iommu_gas_map_region(DOM2IODOM(domain), entry,
IOMMU_MAP_ENTRY_READ | IOMMU_MAP_ENTRY_WRITE,
IOMMU_MF_CANWAIT | IOMMU_MF_RMRR, ma);
/*
@ -310,8 +309,7 @@ domain_init_rmrr(struct dmar_domain *domain, device_t dev, int bus,
error = error1;
}
TAILQ_REMOVE(&rmrr_entries, entry, unroll_link);
iommu_gas_free_entry((struct iommu_domain *)domain,
entry);
iommu_gas_free_entry(DOM2IODOM(domain), entry);
}
for (i = 0; i < size; i++)
vm_page_putfake(ma[i]);
@ -331,7 +329,7 @@ dmar_domain_alloc(struct dmar_unit *dmar, bool id_mapped)
if (id == -1)
return (NULL);
domain = malloc(sizeof(*domain), M_DMAR_DOMAIN, M_WAITOK | M_ZERO);
iodom = (struct iommu_domain *)domain;
iodom = DOM2IODOM(domain);
domain->domain = id;
LIST_INIT(&domain->contexts);
RB_INIT(&domain->iodom.rb_root);
@ -358,7 +356,7 @@ dmar_domain_alloc(struct dmar_unit *dmar, bool id_mapped)
/* Use all supported address space for remapping. */
domain->iodom.end = 1ULL << (domain->agaw - 1);
iommu_gas_init_domain((struct iommu_domain *)domain);
iommu_gas_init_domain(DOM2IODOM(domain));
if (id_mapped) {
if ((dmar->hw_ecap & DMAR_ECAP_PT) == 0) {
@ -389,7 +387,7 @@ dmar_ctx_alloc(struct dmar_domain *domain, uint16_t rid)
struct dmar_ctx *ctx;
ctx = malloc(sizeof(*ctx), M_DMAR_CTX, M_WAITOK | M_ZERO);
ctx->context.domain = (struct iommu_domain *)domain;
ctx->context.domain = DOM2IODOM(domain);
ctx->context.tag = malloc(sizeof(struct bus_dma_tag_iommu),
M_DMAR_CTX, M_WAITOK | M_ZERO);
ctx->rid = rid;
@ -402,7 +400,7 @@ dmar_ctx_link(struct dmar_ctx *ctx)
{
struct dmar_domain *domain;
domain = (struct dmar_domain *)ctx->context.domain;
domain = CTX2DOM(ctx);
IOMMU_ASSERT_LOCKED(domain->iodom.iommu);
KASSERT(domain->refs >= domain->ctx_cnt,
("dom %p ref underflow %d %d", domain, domain->refs,
@ -417,7 +415,7 @@ dmar_ctx_unlink(struct dmar_ctx *ctx)
{
struct dmar_domain *domain;
domain = (struct dmar_domain *)ctx->context.domain;
domain = CTX2DOM(ctx);
IOMMU_ASSERT_LOCKED(domain->iodom.iommu);
KASSERT(domain->refs > 0,
("domain %p ctx dtr refs %d", domain, domain->refs));
@ -444,7 +442,7 @@ dmar_domain_destroy(struct dmar_domain *domain)
("destroying dom %p with refs %d", domain, domain->refs));
if ((domain->iodom.flags & IOMMU_DOMAIN_GAS_INITED) != 0) {
DMAR_DOMAIN_LOCK(domain);
iommu_gas_fini_domain((struct iommu_domain *)domain);
iommu_gas_fini_domain(DOM2IODOM(domain));
DMAR_DOMAIN_UNLOCK(domain);
}
if ((domain->iodom.flags & IOMMU_DOMAIN_PGTBL_INITED) != 0) {
@ -453,7 +451,7 @@ dmar_domain_destroy(struct dmar_domain *domain)
domain_free_pgtbl(domain);
}
mtx_destroy(&domain->iodom.lock);
dmar = (struct dmar_unit *)domain->iodom.iommu;
dmar = DOM2DMAR(domain);
free_unr(dmar->domids, domain->domain);
free(domain, M_DMAR_DOMAIN);
}
@ -482,7 +480,7 @@ dmar_get_ctx_for_dev1(struct dmar_unit *dmar, device_t dev, uint16_t rid,
}
enable = false;
TD_PREP_PINNED_ASSERT;
unit = (struct iommu_unit *)dmar;
unit = DMAR2IOMMU(dmar);
DMAR_LOCK(dmar);
KASSERT(!iommu_is_buswide_ctx(unit, bus) || (slot == 0 && func == 0),
("iommu%d pci%d:%d:%d get_ctx for buswide", dmar->iommu.unit, bus,
@ -550,11 +548,11 @@ dmar_get_ctx_for_dev1(struct dmar_unit *dmar, device_t dev, uint16_t rid,
dmar_domain_destroy(domain1);
/* Nothing needs to be done to destroy ctx1. */
free(ctx1, M_DMAR_CTX);
domain = (struct dmar_domain *)ctx->context.domain;
domain = CTX2DOM(ctx);
ctx->refs++; /* tag referenced us */
}
} else {
domain = (struct dmar_domain *)ctx->context.domain;
domain = CTX2DOM(ctx);
if (ctx->context.tag->owner == NULL)
ctx->context.tag->owner = dev;
ctx->refs++; /* tag referenced us */
@ -627,7 +625,7 @@ dmar_move_ctx_to_domain(struct dmar_domain *domain, struct dmar_ctx *ctx)
int error;
dmar = domain->dmar;
old_domain = (struct dmar_domain *)ctx->context.domain;
old_domain = CTX2DOM(ctx);
if (domain == old_domain)
return (0);
KASSERT(old_domain->iodom.iommu == domain->iodom.iommu,
@ -748,7 +746,7 @@ dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx)
dmar_inv_iotlb_glob(dmar);
}
dmar_unmap_pgtbl(sf);
domain = (struct dmar_domain *)ctx->context.domain;
domain = CTX2DOM(ctx);
dmar_ctx_unlink(ctx);
free(ctx->context.tag, M_DMAR_CTX);
free(ctx, M_DMAR_CTX);
@ -761,7 +759,7 @@ dmar_free_ctx(struct dmar_ctx *ctx)
{
struct dmar_unit *dmar;
dmar = (struct dmar_unit *)ctx->context.domain->iommu;
dmar = CTX2DMAR(ctx);
DMAR_LOCK(dmar);
dmar_free_ctx_locked(dmar, ctx);
}
@ -810,11 +808,11 @@ dmar_domain_unload_entry(struct iommu_map_entry *entry, bool free)
struct dmar_domain *domain;
struct dmar_unit *unit;
domain = (struct dmar_domain *)entry->domain;
unit = (struct dmar_unit *)domain->iodom.iommu;
domain = IODOM2DOM(entry->domain);
unit = DOM2DMAR(domain);
if (unit->qi_enabled) {
DMAR_LOCK(unit);
dmar_qi_invalidate_locked((struct dmar_domain *)entry->domain,
dmar_qi_invalidate_locked(IODOM2DOM(entry->domain),
entry->start, entry->end - entry->start, &entry->gseq,
true);
if (!free)
@ -822,7 +820,7 @@ dmar_domain_unload_entry(struct iommu_map_entry *entry, bool free)
TAILQ_INSERT_TAIL(&unit->tlb_flush_entries, entry, dmamap_link);
DMAR_UNLOCK(unit);
} else {
domain_flush_iotlb_sync((struct dmar_domain *)entry->domain,
domain_flush_iotlb_sync(IODOM2DOM(entry->domain),
entry->start, entry->end - entry->start);
dmar_domain_free_entry(entry, free);
}
@ -847,8 +845,8 @@ dmar_domain_unload(struct dmar_domain *domain,
struct iommu_map_entry *entry, *entry1;
int error;
iodom = (struct iommu_domain *)domain;
unit = (struct dmar_unit *)domain->iodom.iommu;
iodom = DOM2IODOM(domain);
unit = DOM2DMAR(domain);
TAILQ_FOREACH_SAFE(entry, entries, dmamap_link, entry1) {
KASSERT((entry->flags & IOMMU_MAP_ENTRY_MAP) != 0,
@ -904,11 +902,11 @@ iommu_get_ctx(struct iommu_unit *iommu, device_t dev, uint16_t rid,
struct dmar_unit *dmar;
struct dmar_ctx *ret;
dmar = (struct dmar_unit *)iommu;
dmar = IOMMU2DMAR(iommu);
ret = dmar_get_ctx_for_dev(dmar, dev, rid, id_mapped, rmrr_init);
return ((struct iommu_ctx *)ret);
return (CTX2IOCTX(ret));
}
void
@ -917,8 +915,8 @@ iommu_free_ctx_locked(struct iommu_unit *iommu, struct iommu_ctx *context)
struct dmar_unit *dmar;
struct dmar_ctx *ctx;
dmar = (struct dmar_unit *)iommu;
ctx = (struct dmar_ctx *)context;
dmar = IOMMU2DMAR(iommu);
ctx = IOCTX2CTX(context);
dmar_free_ctx_locked(dmar, ctx);
}
@ -926,11 +924,9 @@ iommu_free_ctx_locked(struct iommu_unit *iommu, struct iommu_ctx *context)
void
iommu_free_ctx(struct iommu_ctx *context)
{
struct dmar_unit *dmar;
struct dmar_ctx *ctx;
ctx = (struct dmar_ctx *)context;
dmar = (struct dmar_unit *)ctx->context.domain->iommu;
ctx = IOCTX2CTX(context);
dmar_free_ctx(ctx);
}
@ -948,7 +944,7 @@ iommu_domain_unload(struct iommu_domain *iodom,
{
struct dmar_domain *domain;
domain = (struct dmar_domain *)iodom;
domain = IODOM2DOM(iodom);
dmar_domain_unload(domain, entries, cansleep);
}

View File

@ -91,6 +91,22 @@ struct dmar_ctx {
#define DMAR_DOMAIN_UNLOCK(dom) mtx_unlock(&(dom)->iodom.lock)
#define DMAR_DOMAIN_ASSERT_LOCKED(dom) mtx_assert(&(dom)->iodom.lock, MA_OWNED)
#define DMAR2IOMMU(dmar) &((dmar)->iommu)
#define IOMMU2DMAR(dmar) \
__containerof((dmar), struct dmar_unit, iommu)
#define DOM2IODOM(domain) &((domain)->iodom)
#define IODOM2DOM(domain) \
__containerof((domain), struct dmar_domain, iodom)
#define CTX2IOCTX(ctx) &((ctx)->context)
#define IOCTX2CTX(ctx) \
__containerof((ctx), struct dmar_ctx, context)
#define CTX2DOM(ctx) IODOM2DOM((ctx)->context.domain)
#define CTX2DMAR(ctx) (CTX2DOM(ctx)->dmar)
#define DOM2DMAR(domain) ((domain)->dmar)
struct dmar_msi_data {
int irq;
int irq_rid;

View File

@ -919,8 +919,7 @@ dmar_rmrr_iter(ACPI_DMAR_HEADER *dmarh, void *arg)
match = dmar_match_devscope(devscope, ria->dev_busno,
ria->dev_path, ria->dev_path_len);
if (match == 1) {
entry = iommu_gas_alloc_entry(
(struct iommu_domain *)ria->domain,
entry = iommu_gas_alloc_entry(DOM2IODOM(ria->domain),
IOMMU_PGF_WAITOK);
entry->start = resmem->BaseAddress;
/* The RMRR entry end address is inclusive. */
@ -1056,7 +1055,7 @@ dmar_instantiate_rmrr_ctxs(struct iommu_unit *unit)
struct inst_rmrr_iter_args iria;
int error;
dmar = (struct dmar_unit *)unit;
dmar = IOMMU2DMAR(unit);
if (!dmar_barrier_enter(dmar, DMAR_BARRIER_RMRR))
return (0);
@ -1131,7 +1130,7 @@ dmar_print_domain(struct dmar_domain *domain, bool show_mappings)
struct iommu_map_entry *entry;
struct dmar_ctx *ctx;
iodom = (struct iommu_domain *)domain;
iodom = DOM2IODOM(domain);
db_printf(
" @%p dom %d mgaw %d agaw %d pglvl %d end %jx refs %d\n"

View File

@ -512,7 +512,7 @@ domain_map_buf(struct iommu_domain *iodom, iommu_gaddr_t base,
((eflags & IOMMU_MAP_ENTRY_SNOOP) != 0 ? DMAR_PTE_SNP : 0) |
((eflags & IOMMU_MAP_ENTRY_TM) != 0 ? DMAR_PTE_TM : 0);
domain = (struct dmar_domain *)iodom;
domain = IODOM2DOM(iodom);
unit = domain->dmar;
KASSERT((domain->iodom.flags & IOMMU_DOMAIN_IDMAP) == 0,
@ -691,7 +691,7 @@ domain_unmap_buf(struct iommu_domain *iodom, iommu_gaddr_t base,
struct dmar_domain *domain;
int error;
domain = (struct dmar_domain *)iodom;
domain = IODOM2DOM(iodom);
DMAR_DOMAIN_PGLOCK(domain);
error = domain_unmap_buf_locked(domain, base, size, flags);
@ -823,6 +823,6 @@ domain_pgtbl_init(struct dmar_domain *domain)
{
struct iommu_domain *iodom;
iodom = (struct iommu_domain *)domain;
iodom = DOM2IODOM(domain);
iodom->ops = &dmar_domain_map_ops;
}

View File

@ -226,7 +226,7 @@ dmar_quirks_pre_use(struct iommu_unit *unit)
{
struct dmar_unit *dmar;
dmar = (struct dmar_unit *)unit;
dmar = IOMMU2DMAR(unit);
if (!dmar_barrier_enter(dmar, DMAR_BARRIER_USEQ))
return;