Rename DMAR flags:

o DMAR_DOMAIN_* -> IOMMU_DOMAIN_*
o DMAR_PGF_* -> IOMMU_PGF_*

Reviewed by:	kib
Sponsored by:	DARPA/AFRL
Differential Revision:	https://reviews.freebsd.org/D25812
This commit is contained in:
Ruslan Bukin 2020-07-26 12:29:22 +00:00
parent 4111838d37
commit 15f6baf445
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=363560
7 changed files with 75 additions and 74 deletions

View File

@ -1017,7 +1017,7 @@ bus_dma_dmar_load_ident(bus_dma_tag_t dmat, bus_dmamap_t map1,
map = (struct bus_dmamap_iommu *)map1;
waitok = (flags & BUS_DMA_NOWAIT) != 0;
entry = iommu_map_alloc_entry(domain, waitok ? 0 : DMAR_PGF_WAITOK);
entry = iommu_map_alloc_entry(domain, waitok ? 0 : IOMMU_PGF_WAITOK);
if (entry == NULL)
return (ENOMEM);
entry->start = start;

View File

@ -134,11 +134,11 @@ struct iommu_ctx {
ephemeral reference is kept
to prevent context destruction */
#define DMAR_DOMAIN_GAS_INITED 0x0001
#define DMAR_DOMAIN_PGTBL_INITED 0x0002
#define DMAR_DOMAIN_IDMAP 0x0010 /* Domain uses identity
#define IOMMU_DOMAIN_GAS_INITED 0x0001
#define IOMMU_DOMAIN_PGTBL_INITED 0x0002
#define IOMMU_DOMAIN_IDMAP 0x0010 /* Domain uses identity
page table */
#define DMAR_DOMAIN_RMRR 0x0020 /* Domain contains RMRR entry,
#define IOMMU_DOMAIN_RMRR 0x0020 /* Domain contains RMRR entry,
cannot be turned off */
/* Map flags */
@ -146,11 +146,11 @@ struct iommu_ctx {
#define IOMMU_MF_CANSPLIT 0x0002
#define IOMMU_MF_RMRR 0x0004
#define DMAR_PGF_WAITOK 0x0001
#define DMAR_PGF_ZERO 0x0002
#define DMAR_PGF_ALLOC 0x0004
#define DMAR_PGF_NOALLOC 0x0008
#define DMAR_PGF_OBJL 0x0010
#define IOMMU_PGF_WAITOK 0x0001
#define IOMMU_PGF_ZERO 0x0002
#define IOMMU_PGF_ALLOC 0x0004
#define IOMMU_PGF_NOALLOC 0x0008
#define IOMMU_PGF_OBJL 0x0010
#define IOMMU_LOCK(unit) mtx_lock(&(unit)->lock)
#define IOMMU_UNLOCK(unit) mtx_unlock(&(unit)->lock)

View File

@ -98,10 +98,10 @@ iommu_gas_alloc_entry(struct iommu_domain *domain, u_int flags)
{
struct iommu_map_entry *res;
KASSERT((flags & ~(DMAR_PGF_WAITOK)) == 0,
KASSERT((flags & ~(IOMMU_PGF_WAITOK)) == 0,
("unsupported flags %x", flags));
res = uma_zalloc(iommu_map_entry_zone, ((flags & DMAR_PGF_WAITOK) !=
res = uma_zalloc(iommu_map_entry_zone, ((flags & IOMMU_PGF_WAITOK) !=
0 ? M_WAITOK : M_NOWAIT) | M_ZERO);
if (res != NULL) {
res->domain = domain;
@ -218,8 +218,8 @@ iommu_gas_init_domain(struct iommu_domain *domain)
{
struct iommu_map_entry *begin, *end;
begin = iommu_gas_alloc_entry(domain, DMAR_PGF_WAITOK);
end = iommu_gas_alloc_entry(domain, DMAR_PGF_WAITOK);
begin = iommu_gas_alloc_entry(domain, IOMMU_PGF_WAITOK);
end = iommu_gas_alloc_entry(domain, IOMMU_PGF_WAITOK);
IOMMU_DOMAIN_LOCK(domain);
KASSERT(domain->entries_cnt == 2, ("dirty domain %p", domain));
@ -238,7 +238,7 @@ iommu_gas_init_domain(struct iommu_domain *domain)
domain->first_place = begin;
domain->last_place = end;
domain->flags |= DMAR_DOMAIN_GAS_INITED;
domain->flags |= IOMMU_DOMAIN_GAS_INITED;
IOMMU_DOMAIN_UNLOCK(domain);
}
@ -598,7 +598,7 @@ iommu_gas_map(struct iommu_domain *domain,
("invalid flags 0x%x", flags));
entry = iommu_gas_alloc_entry(domain,
(flags & IOMMU_MF_CANWAIT) != 0 ? DMAR_PGF_WAITOK : 0);
(flags & IOMMU_MF_CANWAIT) != 0 ? IOMMU_PGF_WAITOK : 0);
if (entry == NULL)
return (ENOMEM);
IOMMU_DOMAIN_LOCK(domain);
@ -622,7 +622,7 @@ iommu_gas_map(struct iommu_domain *domain,
error = domain_map_buf(domain, entry->start, entry->end - entry->start,
ma, eflags,
((flags & IOMMU_MF_CANWAIT) != 0 ? DMAR_PGF_WAITOK : 0));
((flags & IOMMU_MF_CANWAIT) != 0 ? IOMMU_PGF_WAITOK : 0));
if (error == ENOMEM) {
iommu_domain_unload_entry(entry, true);
return (error);
@ -660,7 +660,7 @@ iommu_gas_map_region(struct iommu_domain *domain, struct iommu_map_entry *entry,
error = domain_map_buf(domain, entry->start, entry->end - entry->start,
ma + OFF_TO_IDX(start - entry->start), eflags,
((flags & IOMMU_MF_CANWAIT) != 0 ? DMAR_PGF_WAITOK : 0));
((flags & IOMMU_MF_CANWAIT) != 0 ? IOMMU_PGF_WAITOK : 0));
if (error == ENOMEM) {
iommu_domain_unload_entry(entry, false);
return (error);
@ -678,7 +678,7 @@ iommu_gas_reserve_region(struct iommu_domain *domain, iommu_gaddr_t start,
struct iommu_map_entry *entry;
int error;
entry = iommu_gas_alloc_entry(domain, DMAR_PGF_WAITOK);
entry = iommu_gas_alloc_entry(domain, IOMMU_PGF_WAITOK);
entry->start = start;
entry->end = end;
IOMMU_DOMAIN_LOCK(domain);

View File

@ -89,7 +89,7 @@ dmar_ensure_ctx_page(struct dmar_unit *dmar, int bus)
/*
* Allocated context page must be linked.
*/
ctxm = dmar_pgalloc(dmar->ctx_obj, 1 + bus, DMAR_PGF_NOALLOC);
ctxm = dmar_pgalloc(dmar->ctx_obj, 1 + bus, IOMMU_PGF_NOALLOC);
if (ctxm != NULL)
return;
@ -100,9 +100,9 @@ dmar_ensure_ctx_page(struct dmar_unit *dmar, int bus)
* threads are equal.
*/
TD_PREP_PINNED_ASSERT;
ctxm = dmar_pgalloc(dmar->ctx_obj, 1 + bus, DMAR_PGF_ZERO |
DMAR_PGF_WAITOK);
re = dmar_map_pgtbl(dmar->ctx_obj, 0, DMAR_PGF_NOALLOC, &sf);
ctxm = dmar_pgalloc(dmar->ctx_obj, 1 + bus, IOMMU_PGF_ZERO |
IOMMU_PGF_WAITOK);
re = dmar_map_pgtbl(dmar->ctx_obj, 0, IOMMU_PGF_NOALLOC, &sf);
re += bus;
dmar_pte_store(&re->r1, DMAR_ROOT_R1_P | (DMAR_ROOT_R1_CTP_MASK &
VM_PAGE_TO_PHYS(ctxm)));
@ -120,7 +120,7 @@ dmar_map_ctx_entry(struct dmar_ctx *ctx, struct sf_buf **sfp)
dmar = (struct dmar_unit *)ctx->context.domain->iommu;
ctxp = dmar_map_pgtbl(dmar->ctx_obj, 1 +
PCI_RID2BUS(ctx->rid), DMAR_PGF_NOALLOC | DMAR_PGF_WAITOK, sfp);
PCI_RID2BUS(ctx->rid), IOMMU_PGF_NOALLOC | IOMMU_PGF_WAITOK, sfp);
ctxp += ctx->rid & 0xff;
return (ctxp);
}
@ -186,13 +186,14 @@ ctx_id_entry_init(struct dmar_ctx *ctx, dmar_ctx_entry_t *ctxp, bool move,
pci_get_function(ctx->context.tag->owner),
ctxp->ctx1, ctxp->ctx2));
if ((domain->iodom.flags & DMAR_DOMAIN_IDMAP) != 0 &&
if ((domain->iodom.flags & IOMMU_DOMAIN_IDMAP) != 0 &&
(unit->hw_ecap & DMAR_ECAP_PT) != 0) {
KASSERT(domain->pgtbl_obj == NULL,
("ctx %p non-null pgtbl_obj", ctx));
ctx_root = NULL;
} else {
ctx_root = dmar_pgalloc(domain->pgtbl_obj, 0, DMAR_PGF_NOALLOC);
ctx_root = dmar_pgalloc(domain->pgtbl_obj, 0,
IOMMU_PGF_NOALLOC);
}
if (dmar_is_buswide_ctx(unit, busno)) {
@ -295,7 +296,7 @@ domain_init_rmrr(struct dmar_domain *domain, device_t dev, int bus,
if (error1 == 0 && entry->end != entry->start) {
IOMMU_LOCK(domain->iodom.iommu);
domain->refs++; /* XXXKIB prevent free */
domain->iodom.flags |= DMAR_DOMAIN_RMRR;
domain->iodom.flags |= IOMMU_DOMAIN_RMRR;
IOMMU_UNLOCK(domain->iodom.iommu);
} else {
if (error1 != 0) {
@ -363,7 +364,7 @@ dmar_domain_alloc(struct dmar_unit *dmar, bool id_mapped)
domain->pgtbl_obj = domain_get_idmap_pgtbl(domain,
domain->iodom.end);
}
domain->iodom.flags |= DMAR_DOMAIN_IDMAP;
domain->iodom.flags |= IOMMU_DOMAIN_IDMAP;
} else {
error = domain_alloc_pgtbl(domain);
if (error != 0)
@ -440,12 +441,12 @@ dmar_domain_destroy(struct dmar_domain *domain)
("destroying dom %p with ctx_cnt %d", domain, domain->ctx_cnt));
KASSERT(domain->refs == 0,
("destroying dom %p with refs %d", domain, domain->refs));
if ((domain->iodom.flags & DMAR_DOMAIN_GAS_INITED) != 0) {
if ((domain->iodom.flags & IOMMU_DOMAIN_GAS_INITED) != 0) {
DMAR_DOMAIN_LOCK(domain);
iommu_gas_fini_domain((struct iommu_domain *)domain);
DMAR_DOMAIN_UNLOCK(domain);
}
if ((domain->iodom.flags & DMAR_DOMAIN_PGTBL_INITED) != 0) {
if ((domain->iodom.flags & IOMMU_DOMAIN_PGTBL_INITED) != 0) {
if (domain->pgtbl_obj != NULL)
DMAR_DOMAIN_PGLOCK(domain);
domain_free_pgtbl(domain);
@ -643,7 +644,7 @@ dmar_move_ctx_to_domain(struct dmar_domain *domain, struct dmar_ctx *ctx)
/* If flush failed, rolling back would not work as well. */
printf("dmar%d rid %x domain %d->%d %s-mapped\n",
dmar->iommu.unit, ctx->rid, old_domain->domain, domain->domain,
(domain->iodom.flags & DMAR_DOMAIN_IDMAP) != 0 ? "id" : "re");
(domain->iodom.flags & IOMMU_DOMAIN_IDMAP) != 0 ? "id" : "re");
dmar_unref_domain_locked(dmar, old_domain);
TD_PINNED_ASSERT;
return (error);
@ -667,7 +668,7 @@ dmar_unref_domain_locked(struct dmar_unit *dmar, struct dmar_domain *domain)
return;
}
KASSERT((domain->iodom.flags & DMAR_DOMAIN_RMRR) == 0,
KASSERT((domain->iodom.flags & IOMMU_DOMAIN_RMRR) == 0,
("lost ref on RMRR domain %p", domain));
LIST_REMOVE(domain, link);
@ -848,7 +849,7 @@ dmar_domain_unload(struct dmar_domain *domain,
KASSERT((entry->flags & IOMMU_MAP_ENTRY_MAP) != 0,
("not mapped entry %p %p", domain, entry));
error = domain_unmap_buf(domain, entry->start, entry->end -
entry->start, cansleep ? DMAR_PGF_WAITOK : 0);
entry->start, cansleep ? IOMMU_PGF_WAITOK : 0);
KASSERT(error == 0, ("unmap %p error %d", domain, error));
if (!unit->qi_enabled) {
domain_flush_iotlb_sync(domain, entry->start,

View File

@ -489,7 +489,7 @@ dmar_attach(device_t dev)
* address translation after the required invalidations are
* done.
*/
dmar_pgalloc(unit->ctx_obj, 0, DMAR_PGF_WAITOK | DMAR_PGF_ZERO);
dmar_pgalloc(unit->ctx_obj, 0, IOMMU_PGF_WAITOK | IOMMU_PGF_ZERO);
DMAR_LOCK(unit);
error = dmar_load_root_entry_ptr(unit);
if (error != 0) {
@ -944,7 +944,7 @@ dmar_rmrr_iter(ACPI_DMAR_HEADER *dmarh, void *arg)
if (match == 1) {
entry = iommu_gas_alloc_entry(
(struct iommu_domain *)ria->domain,
DMAR_PGF_WAITOK);
IOMMU_PGF_WAITOK);
entry->start = resmem->BaseAddress;
/* The RMRR entry end address is inclusive. */
entry->end = resmem->EndAddress;

View File

@ -121,8 +121,8 @@ domain_idmap_nextlvl(struct idpgtbl *tbl, int lvl, vm_pindex_t idx,
VM_OBJECT_ASSERT_LOCKED(tbl->pgtbl_obj);
if (addr >= tbl->maxaddr)
return;
(void)dmar_pgalloc(tbl->pgtbl_obj, idx, DMAR_PGF_OBJL | DMAR_PGF_WAITOK |
DMAR_PGF_ZERO);
(void)dmar_pgalloc(tbl->pgtbl_obj, idx, IOMMU_PGF_OBJL |
IOMMU_PGF_WAITOK | IOMMU_PGF_ZERO);
base = idx * DMAR_NPTEPG + 1; /* Index of the first child page of idx */
pg_sz = pglvl_page_size(tbl->pglvl, lvl);
if (lvl != tbl->leaf) {
@ -130,7 +130,7 @@ domain_idmap_nextlvl(struct idpgtbl *tbl, int lvl, vm_pindex_t idx,
domain_idmap_nextlvl(tbl, lvl + 1, base + i, f);
}
VM_OBJECT_WUNLOCK(tbl->pgtbl_obj);
pte = dmar_map_pgtbl(tbl->pgtbl_obj, idx, DMAR_PGF_WAITOK, &sf);
pte = dmar_map_pgtbl(tbl->pgtbl_obj, idx, IOMMU_PGF_WAITOK, &sf);
if (lvl == tbl->leaf) {
for (i = 0, f = addr; i < DMAR_NPTEPG; i++, f += pg_sz) {
if (f >= tbl->maxaddr)
@ -143,7 +143,7 @@ domain_idmap_nextlvl(struct idpgtbl *tbl, int lvl, vm_pindex_t idx,
if (f >= tbl->maxaddr)
break;
m1 = dmar_pgalloc(tbl->pgtbl_obj, base + i,
DMAR_PGF_NOALLOC);
IOMMU_PGF_NOALLOC);
KASSERT(m1 != NULL, ("lost page table page"));
pte[i].pte = (DMAR_PTE_ADDR_MASK &
VM_PAGE_TO_PHYS(m1)) | DMAR_PTE_R | DMAR_PTE_W;
@ -362,7 +362,7 @@ domain_pgtbl_map_pte(struct dmar_domain *domain, iommu_gaddr_t base, int lvl,
vm_pindex_t idx, idx1;
DMAR_DOMAIN_ASSERT_PGLOCKED(domain);
KASSERT((flags & DMAR_PGF_OBJL) != 0, ("lost PGF_OBJL"));
KASSERT((flags & IOMMU_PGF_OBJL) != 0, ("lost PGF_OBJL"));
idx = domain_pgtbl_get_pindex(domain, base, lvl);
if (*sf != NULL && idx == *idxp) {
@ -382,7 +382,7 @@ domain_pgtbl_map_pte(struct dmar_domain *domain, iommu_gaddr_t base, int lvl,
* to reference the allocated page table page.
*/
m = dmar_pgalloc(domain->pgtbl_obj, idx, flags |
DMAR_PGF_ZERO);
IOMMU_PGF_ZERO);
if (m == NULL)
return (NULL);
@ -435,7 +435,7 @@ domain_map_buf_locked(struct dmar_domain *domain, iommu_gaddr_t base,
base1 = base;
size1 = size;
flags |= DMAR_PGF_OBJL;
flags |= IOMMU_PGF_OBJL;
TD_PREP_PINNED_ASSERT;
for (sf = NULL, pi = 0; size > 0; base += pg_sz, size -= pg_sz,
@ -478,7 +478,7 @@ domain_map_buf_locked(struct dmar_domain *domain, iommu_gaddr_t base,
KASSERT(pg_sz > 0, ("pg_sz 0 lvl %d", lvl));
pte = domain_pgtbl_map_pte(domain, base, lvl, flags, &idx, &sf);
if (pte == NULL) {
KASSERT((flags & DMAR_PGF_WAITOK) == 0,
KASSERT((flags & IOMMU_PGF_WAITOK) == 0,
("failed waitable pte alloc %p", domain));
if (sf != NULL)
dmar_unmap_pgtbl(sf);
@ -515,7 +515,7 @@ domain_map_buf(struct iommu_domain *iodom, iommu_gaddr_t base,
domain = (struct dmar_domain *)iodom;
unit = domain->dmar;
KASSERT((domain->iodom.flags & DMAR_DOMAIN_IDMAP) == 0,
KASSERT((domain->iodom.flags & IOMMU_DOMAIN_IDMAP) == 0,
("modifying idmap pagetable domain %p", domain));
KASSERT((base & DMAR_PAGE_MASK) == 0,
("non-aligned base %p %jx %jx", domain, (uintmax_t)base,
@ -547,7 +547,7 @@ domain_map_buf(struct iommu_domain *iodom, iommu_gaddr_t base,
(unit->hw_ecap & DMAR_ECAP_DI) != 0,
("PTE_TM for dmar without DIOTLB %p %jx",
domain, (uintmax_t)pflags));
KASSERT((flags & ~DMAR_PGF_WAITOK) == 0, ("invalid flags %x", flags));
KASSERT((flags & ~IOMMU_PGF_WAITOK) == 0, ("invalid flags %x", flags));
DMAR_DOMAIN_PGLOCK(domain);
error = domain_map_buf_locked(domain, base, size, ma, pflags, flags);
@ -626,7 +626,7 @@ domain_unmap_buf_locked(struct dmar_domain *domain, iommu_gaddr_t base,
if (size == 0)
return (0);
KASSERT((domain->iodom.flags & DMAR_DOMAIN_IDMAP) == 0,
KASSERT((domain->iodom.flags & IOMMU_DOMAIN_IDMAP) == 0,
("modifying idmap pagetable domain %p", domain));
KASSERT((base & DMAR_PAGE_MASK) == 0,
("non-aligned base %p %jx %jx", domain, (uintmax_t)base,
@ -643,10 +643,10 @@ domain_unmap_buf_locked(struct dmar_domain *domain, iommu_gaddr_t base,
KASSERT(base + size > base,
("size overflow %p %jx %jx", domain, (uintmax_t)base,
(uintmax_t)size));
KASSERT((flags & ~DMAR_PGF_WAITOK) == 0, ("invalid flags %x", flags));
KASSERT((flags & ~IOMMU_PGF_WAITOK) == 0, ("invalid flags %x", flags));
pg_sz = 0; /* silence gcc */
flags |= DMAR_PGF_OBJL;
flags |= IOMMU_PGF_OBJL;
TD_PREP_PINNED_ASSERT;
for (sf = NULL; size > 0; base += pg_sz, size -= pg_sz) {
@ -707,13 +707,13 @@ domain_alloc_pgtbl(struct dmar_domain *domain)
domain->pgtbl_obj = vm_pager_allocate(OBJT_PHYS, NULL,
IDX_TO_OFF(pglvl_max_pages(domain->pglvl)), 0, 0, NULL);
DMAR_DOMAIN_PGLOCK(domain);
m = dmar_pgalloc(domain->pgtbl_obj, 0, DMAR_PGF_WAITOK |
DMAR_PGF_ZERO | DMAR_PGF_OBJL);
m = dmar_pgalloc(domain->pgtbl_obj, 0, IOMMU_PGF_WAITOK |
IOMMU_PGF_ZERO | IOMMU_PGF_OBJL);
/* No implicit free of the top level page table page. */
m->ref_count = 1;
DMAR_DOMAIN_PGUNLOCK(domain);
DMAR_LOCK(domain->dmar);
domain->iodom.flags |= DMAR_DOMAIN_PGTBL_INITED;
domain->iodom.flags |= IOMMU_DOMAIN_PGTBL_INITED;
DMAR_UNLOCK(domain->dmar);
return (0);
}
@ -727,16 +727,16 @@ domain_free_pgtbl(struct dmar_domain *domain)
obj = domain->pgtbl_obj;
if (obj == NULL) {
KASSERT((domain->dmar->hw_ecap & DMAR_ECAP_PT) != 0 &&
(domain->iodom.flags & DMAR_DOMAIN_IDMAP) != 0,
(domain->iodom.flags & IOMMU_DOMAIN_IDMAP) != 0,
("lost pagetable object domain %p", domain));
return;
}
DMAR_DOMAIN_ASSERT_PGLOCKED(domain);
domain->pgtbl_obj = NULL;
if ((domain->iodom.flags & DMAR_DOMAIN_IDMAP) != 0) {
if ((domain->iodom.flags & IOMMU_DOMAIN_IDMAP) != 0) {
put_idmap_pgtbl(obj);
domain->iodom.flags &= ~DMAR_DOMAIN_IDMAP;
domain->iodom.flags &= ~IOMMU_DOMAIN_IDMAP;
return;
}

View File

@ -262,22 +262,22 @@ dmar_pgalloc(vm_object_t obj, vm_pindex_t idx, int flags)
vm_page_t m;
int zeroed, aflags;
zeroed = (flags & DMAR_PGF_ZERO) != 0 ? VM_ALLOC_ZERO : 0;
zeroed = (flags & IOMMU_PGF_ZERO) != 0 ? VM_ALLOC_ZERO : 0;
aflags = zeroed | VM_ALLOC_NOBUSY | VM_ALLOC_SYSTEM | VM_ALLOC_NODUMP |
((flags & DMAR_PGF_WAITOK) != 0 ? VM_ALLOC_WAITFAIL :
((flags & IOMMU_PGF_WAITOK) != 0 ? VM_ALLOC_WAITFAIL :
VM_ALLOC_NOWAIT);
for (;;) {
if ((flags & DMAR_PGF_OBJL) == 0)
if ((flags & IOMMU_PGF_OBJL) == 0)
VM_OBJECT_WLOCK(obj);
m = vm_page_lookup(obj, idx);
if ((flags & DMAR_PGF_NOALLOC) != 0 || m != NULL) {
if ((flags & DMAR_PGF_OBJL) == 0)
if ((flags & IOMMU_PGF_NOALLOC) != 0 || m != NULL) {
if ((flags & IOMMU_PGF_OBJL) == 0)
VM_OBJECT_WUNLOCK(obj);
break;
}
m = vm_page_alloc_contig(obj, idx, aflags, 1, 0,
dmar_high, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
if ((flags & DMAR_PGF_OBJL) == 0)
if ((flags & IOMMU_PGF_OBJL) == 0)
VM_OBJECT_WUNLOCK(obj);
if (m != NULL) {
if (zeroed && (m->flags & PG_ZERO) == 0)
@ -285,7 +285,7 @@ dmar_pgalloc(vm_object_t obj, vm_pindex_t idx, int flags)
atomic_add_int(&dmar_tbl_pagecnt, 1);
break;
}
if ((flags & DMAR_PGF_WAITOK) == 0)
if ((flags & IOMMU_PGF_WAITOK) == 0)
break;
}
return (m);
@ -296,14 +296,14 @@ dmar_pgfree(vm_object_t obj, vm_pindex_t idx, int flags)
{
vm_page_t m;
if ((flags & DMAR_PGF_OBJL) == 0)
if ((flags & IOMMU_PGF_OBJL) == 0)
VM_OBJECT_WLOCK(obj);
m = vm_page_grab(obj, idx, VM_ALLOC_NOCREAT);
if (m != NULL) {
vm_page_free(m);
atomic_subtract_int(&dmar_tbl_pagecnt, 1);
}
if ((flags & DMAR_PGF_OBJL) == 0)
if ((flags & IOMMU_PGF_OBJL) == 0)
VM_OBJECT_WUNLOCK(obj);
}
@ -314,39 +314,39 @@ dmar_map_pgtbl(vm_object_t obj, vm_pindex_t idx, int flags,
vm_page_t m;
bool allocated;
if ((flags & DMAR_PGF_OBJL) == 0)
if ((flags & IOMMU_PGF_OBJL) == 0)
VM_OBJECT_WLOCK(obj);
m = vm_page_lookup(obj, idx);
if (m == NULL && (flags & DMAR_PGF_ALLOC) != 0) {
m = dmar_pgalloc(obj, idx, flags | DMAR_PGF_OBJL);
if (m == NULL && (flags & IOMMU_PGF_ALLOC) != 0) {
m = dmar_pgalloc(obj, idx, flags | IOMMU_PGF_OBJL);
allocated = true;
} else
allocated = false;
if (m == NULL) {
if ((flags & DMAR_PGF_OBJL) == 0)
if ((flags & IOMMU_PGF_OBJL) == 0)
VM_OBJECT_WUNLOCK(obj);
return (NULL);
}
/* Sleepable allocations cannot fail. */
if ((flags & DMAR_PGF_WAITOK) != 0)
if ((flags & IOMMU_PGF_WAITOK) != 0)
VM_OBJECT_WUNLOCK(obj);
sched_pin();
*sf = sf_buf_alloc(m, SFB_CPUPRIVATE | ((flags & DMAR_PGF_WAITOK)
*sf = sf_buf_alloc(m, SFB_CPUPRIVATE | ((flags & IOMMU_PGF_WAITOK)
== 0 ? SFB_NOWAIT : 0));
if (*sf == NULL) {
sched_unpin();
if (allocated) {
VM_OBJECT_ASSERT_WLOCKED(obj);
dmar_pgfree(obj, m->pindex, flags | DMAR_PGF_OBJL);
dmar_pgfree(obj, m->pindex, flags | IOMMU_PGF_OBJL);
}
if ((flags & DMAR_PGF_OBJL) == 0)
if ((flags & IOMMU_PGF_OBJL) == 0)
VM_OBJECT_WUNLOCK(obj);
return (NULL);
}
if ((flags & (DMAR_PGF_WAITOK | DMAR_PGF_OBJL)) ==
(DMAR_PGF_WAITOK | DMAR_PGF_OBJL))
if ((flags & (IOMMU_PGF_WAITOK | IOMMU_PGF_OBJL)) ==
(IOMMU_PGF_WAITOK | IOMMU_PGF_OBJL))
VM_OBJECT_WLOCK(obj);
else if ((flags & (DMAR_PGF_WAITOK | DMAR_PGF_OBJL)) == 0)
else if ((flags & (IOMMU_PGF_WAITOK | IOMMU_PGF_OBJL)) == 0)
VM_OBJECT_WUNLOCK(obj);
return ((void *)sf_buf_kva(*sf));
}