iommu_gas: Eliminate a possible case of use-after-free

Eliminate a possible case of use-after-free in an error handling path
after a mapping failure.  Specifically, eliminate IOMMU_MAP_ENTRY_QI_NF
and instead perform the IOTLB invalidation synchronously.  Otherwise,
when iommu_domain_unload_entry() is called and told not to free the
IOMMU map entry, the caller could free the entry before dmar_qi_task()
is finished with it.

Reviewed by:	kib
MFC after:	1 week
Differential Revision:	https://reviews.freebsd.org/D35878
This commit is contained in:
Alan Cox 2022-07-22 12:00:26 -05:00
parent 3416f5cde7
commit 8bc3673847
7 changed files with 41 additions and 16 deletions

View File

@ -509,7 +509,8 @@ iommu_find(device_t dev, bool verbose)
}
void
iommu_domain_unload_entry(struct iommu_map_entry *entry, bool free)
iommu_domain_unload_entry(struct iommu_map_entry *entry, bool free,
bool cansleep __unused)
{
dprintf("%s\n", __func__);

View File

@ -151,7 +151,8 @@ void iommu_free_ctx_locked(struct iommu_unit *iommu, struct iommu_ctx *ctx);
struct iommu_ctx *iommu_get_ctx(struct iommu_unit *, device_t dev,
uint16_t rid, bool id_mapped, bool rmrr_init);
struct iommu_unit *iommu_find(device_t dev, bool verbose);
void iommu_domain_unload_entry(struct iommu_map_entry *entry, bool free);
void iommu_domain_unload_entry(struct iommu_map_entry *entry, bool free,
bool cansleep);
void iommu_domain_unload(struct iommu_domain *domain,
struct iommu_map_entries_tailq *entries, bool cansleep);

View File

@ -638,7 +638,8 @@ iommu_gas_map(struct iommu_domain *domain,
entry->end - entry->start, ma, eflags,
((flags & IOMMU_MF_CANWAIT) != 0 ? IOMMU_PGF_WAITOK : 0));
if (error == ENOMEM) {
iommu_domain_unload_entry(entry, true);
iommu_domain_unload_entry(entry, true,
(flags & IOMMU_MF_CANWAIT) != 0);
return (error);
}
KASSERT(error == 0,
@ -676,7 +677,8 @@ iommu_gas_map_region(struct iommu_domain *domain, struct iommu_map_entry *entry,
entry->end - entry->start, ma + OFF_TO_IDX(start - entry->start),
eflags, ((flags & IOMMU_MF_CANWAIT) != 0 ? IOMMU_PGF_WAITOK : 0));
if (error == ENOMEM) {
iommu_domain_unload_entry(entry, false);
iommu_domain_unload_entry(entry, false,
(flags & IOMMU_MF_CANWAIT) != 0);
return (error);
}
KASSERT(error == 0,

View File

@ -50,7 +50,6 @@
#define IOMMU_MAP_ENTRY_MAP 0x0004 /* Busdma created, linked by
dmamap_link */
#define IOMMU_MAP_ENTRY_UNMAPPED 0x0010 /* No backing pages */
#define IOMMU_MAP_ENTRY_QI_NF 0x0020 /* qi task, do not free entry */
#define IOMMU_MAP_ENTRY_READ 0x1000 /* Read permitted */
#define IOMMU_MAP_ENTRY_WRITE 0x2000 /* Write permitted */
#define IOMMU_MAP_ENTRY_SNOOP 0x4000 /* Snoop */

View File

@ -868,25 +868,35 @@ dmar_domain_free_entry(struct iommu_map_entry *entry, bool free)
}
void
iommu_domain_unload_entry(struct iommu_map_entry *entry, bool free)
iommu_domain_unload_entry(struct iommu_map_entry *entry, bool free,
bool cansleep)
{
struct dmar_domain *domain;
struct dmar_unit *unit;
domain = IODOM2DOM(entry->domain);
unit = DOM2DMAR(domain);
/*
* If "free" is false, then the IOTLB invalidation must be performed
* synchronously. Otherwise, the caller might free the entry before
* dmar_qi_task() is finished processing it.
*/
if (unit->qi_enabled) {
DMAR_LOCK(unit);
dmar_qi_invalidate_locked(IODOM2DOM(entry->domain),
entry->start, entry->end - entry->start, &entry->gseq,
true);
if (!free)
entry->flags |= IOMMU_MAP_ENTRY_QI_NF;
TAILQ_INSERT_TAIL(&unit->tlb_flush_entries, entry, dmamap_link);
if (free) {
dmar_qi_invalidate_locked(domain, entry->start,
entry->end - entry->start, &entry->gseq, true);
TAILQ_INSERT_TAIL(&unit->tlb_flush_entries, entry,
dmamap_link);
} else {
dmar_qi_invalidate_sync_locked(domain, entry->start,
entry->end - entry->start, cansleep);
}
DMAR_UNLOCK(unit);
} else {
domain_flush_iotlb_sync(IODOM2DOM(entry->domain),
entry->start, entry->end - entry->start);
domain_flush_iotlb_sync(domain, entry->start, entry->end -
entry->start);
dmar_domain_free_entry(entry, free);
}
}

View File

@ -251,6 +251,8 @@ int dmar_init_qi(struct dmar_unit *unit);
void dmar_fini_qi(struct dmar_unit *unit);
void dmar_qi_invalidate_locked(struct dmar_domain *domain, iommu_gaddr_t start,
iommu_gaddr_t size, struct iommu_qi_genseq *psec, bool emit_wait);
void dmar_qi_invalidate_sync_locked(struct dmar_domain *domain,
iommu_gaddr_t start, iommu_gaddr_t size, bool cansleep);
void dmar_qi_invalidate_ctx_glob_locked(struct dmar_unit *unit);
void dmar_qi_invalidate_iotlb_glob_locked(struct dmar_unit *unit);
void dmar_qi_invalidate_iec_glob(struct dmar_unit *unit);

View File

@ -242,6 +242,17 @@ dmar_qi_invalidate_locked(struct dmar_domain *domain, iommu_gaddr_t base,
dmar_qi_advance_tail(unit);
}
void
dmar_qi_invalidate_sync_locked(struct dmar_domain *domain, iommu_gaddr_t base,
iommu_gaddr_t size, bool cansleep)
{
struct iommu_qi_genseq gseq;
DMAR_ASSERT_LOCKED(domain->dmar);
dmar_qi_invalidate_locked(domain, base, size, &gseq, true);
dmar_qi_wait_for_seq(domain->dmar, &gseq, !cansleep);
}
void
dmar_qi_invalidate_ctx_glob_locked(struct dmar_unit *unit)
{
@ -362,8 +373,7 @@ dmar_qi_task(void *arg, int pending __unused)
break;
TAILQ_REMOVE(&unit->tlb_flush_entries, entry, dmamap_link);
DMAR_UNLOCK(unit);
dmar_domain_free_entry(entry, (entry->flags &
IOMMU_MAP_ENTRY_QI_NF) == 0);
dmar_domain_free_entry(entry, true);
DMAR_LOCK(unit);
}
if (unit->inv_seq_waiters > 0)