busdma: Add KMSAN integration

Sanitizer instrumentation of course cannot automatically update shadow
state when devices write to host memory.  KMSAN thus hooks into busdma,
both to update shadow state after a device write, and to verify that the
kernel does not publish uninitalized bytes to devices.

To implement this, when KMSAN is configured, each dmamap embeds a memory
descriptor describing the region currently loaded into the map.
bus_dmamap_sync() uses the operation flags to determine whether to
validate the loaded region or to mark it as initialized in the shadow
map.

Note that in cases where the amount of data written is less than the
buffer size, the entire buffer is marked initialized even when it is
not.  For example, if a NIC writes a 128B packet into a 2KB buffer, the
entire buffer will be marked initialized, but subsequent accesses past
the first 128 bytes are likely caused by bugs.

Reviewed by:	kib
Sponsored by:	The FreeBSD Foundation
Differential Revision:	https://reviews.freebsd.org/D31338
This commit is contained in:
Mark Johnston 2021-08-10 17:14:15 -04:00
parent 3a1802fef4
commit 693c9516fa
6 changed files with 103 additions and 2 deletions

View File

@ -43,6 +43,7 @@ __FBSDID("$FreeBSD$");
#include <sys/lock.h>
#include <sys/proc.h>
#include <sys/memdesc.h>
#include <sys/msan.h>
#include <sys/mutex.h>
#include <sys/sysctl.h>
#include <sys/rman.h>
@ -916,11 +917,28 @@ iommu_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map1)
}
static void
iommu_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
iommu_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map1,
bus_dmasync_op_t op)
{
struct bus_dmamap_iommu *map;
map = (struct bus_dmamap_iommu *)map1;
kmsan_bus_dmamap_sync(&map->kmsan_mem, op);
}
#ifdef KMSAN
static void
iommu_bus_dmamap_load_kmsan(bus_dmamap_t map1, struct memdesc *mem)
{
struct bus_dmamap_iommu *map;
map = (struct bus_dmamap_iommu *)map1;
if (map == NULL)
return;
memcpy(&map->kmsan_mem, mem, sizeof(struct memdesc));
}
#endif
struct bus_dma_impl bus_dma_iommu_impl = {
.tag_create = iommu_bus_dma_tag_create,
.tag_destroy = iommu_bus_dma_tag_destroy,
@ -937,6 +955,9 @@ struct bus_dma_impl bus_dma_iommu_impl = {
.map_complete = iommu_bus_dmamap_complete,
.map_unload = iommu_bus_dmamap_unload,
.map_sync = iommu_bus_dmamap_sync,
#ifdef KMSAN
.load_kmsan = iommu_bus_dmamap_load_kmsan,
#endif
};
static void

View File

@ -54,6 +54,9 @@ struct bus_dmamap_iommu {
bool locked;
bool cansleep;
int flags;
#ifdef KMSAN
struct memdesc kmsan_mem;
#endif
};
#define BUS_DMAMAP_IOMMU_MALLOC 0x0001

View File

@ -408,6 +408,11 @@ bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
int error;
int nsegs;
#ifdef KMSAN
mem = memdesc_vaddr(buf, buflen);
_bus_dmamap_load_kmsan(dmat, map, &mem);
#endif
if ((flags & BUS_DMA_NOWAIT) == 0) {
mem = memdesc_vaddr(buf, buflen);
_bus_dmamap_waitok(dmat, map, &mem, callback, callback_arg);
@ -449,6 +454,11 @@ bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
M_ASSERTPKTHDR(m0);
#ifdef KMSAN
struct memdesc mem = memdesc_mbuf(m0);
_bus_dmamap_load_kmsan(dmat, map, &mem);
#endif
flags |= BUS_DMA_NOWAIT;
nsegs = -1;
error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, NULL, &nsegs, flags);
@ -471,6 +481,11 @@ bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
{
int error;
#ifdef KMSAN
struct memdesc mem = memdesc_mbuf(m0);
_bus_dmamap_load_kmsan(dmat, map, &mem);
#endif
flags |= BUS_DMA_NOWAIT;
*nsegs = -1;
error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, segs, nsegs, flags);
@ -486,6 +501,11 @@ bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
bus_dma_segment_t *segs;
int nsegs, error;
#ifdef KMSAN
struct memdesc mem = memdesc_uio(uio);
_bus_dmamap_load_kmsan(dmat, map, &mem);
#endif
flags |= BUS_DMA_NOWAIT;
nsegs = -1;
error = _bus_dmamap_load_uio(dmat, map, uio, &nsegs, flags);
@ -513,6 +533,11 @@ bus_dmamap_load_ccb(bus_dma_tag_t dmat, bus_dmamap_t map, union ccb *ccb,
int error;
int nsegs;
#ifdef KMSAN
mem = memdesc_ccb(ccb);
_bus_dmamap_load_kmsan(dmat, map, &mem);
#endif
ccb_h = &ccb->ccb_h;
if ((ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_NONE) {
callback(callback_arg, NULL, 0, 0);
@ -557,6 +582,11 @@ bus_dmamap_load_bio(bus_dma_tag_t dmat, bus_dmamap_t map, struct bio *bio,
int error;
int nsegs;
#ifdef KMSAN
mem = memdesc_bio(bio);
_bus_dmamap_load_kmsan(dmat, map, &mem);
#endif
if ((flags & BUS_DMA_NOWAIT) == 0) {
mem = memdesc_bio(bio);
_bus_dmamap_waitok(dmat, map, &mem, callback, callback_arg);
@ -595,6 +625,10 @@ bus_dmamap_load_mem(bus_dma_tag_t dmat, bus_dmamap_t map,
int error;
int nsegs;
#ifdef KMSAN
_bus_dmamap_load_kmsan(dmat, map, mem);
#endif
if ((flags & BUS_DMA_NOWAIT) == 0)
_bus_dmamap_waitok(dmat, map, mem, callback, callback_arg);

View File

@ -191,4 +191,16 @@ _bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
return (tc->impl->map_complete(dmat, map, segs, nsegs, error));
}
#ifdef KMSAN
static inline void
_bus_dmamap_load_kmsan(bus_dma_tag_t dmat, bus_dmamap_t map,
struct memdesc *mem)
{
struct bus_dma_tag_common *tc;
tc = (struct bus_dma_tag_common *)dmat;
return (tc->impl->load_kmsan(map, mem));
}
#endif
#endif /* !_X86_BUS_DMA_H_ */

View File

@ -84,6 +84,9 @@ struct bus_dma_impl {
void (*map_unload)(bus_dma_tag_t dmat, bus_dmamap_t map);
void (*map_sync)(bus_dma_tag_t dmat, bus_dmamap_t map,
bus_dmasync_op_t op);
#ifdef KMSAN
void (*load_kmsan)(bus_dmamap_t map, struct memdesc *mem);
#endif
};
void bus_dma_dflt_lock(void *arg, bus_dma_lock_op_t op);

View File

@ -40,6 +40,7 @@ __FBSDID("$FreeBSD$");
#include <sys/lock.h>
#include <sys/proc.h>
#include <sys/memdesc.h>
#include <sys/msan.h>
#include <sys/mutex.h>
#include <sys/sysctl.h>
#include <sys/uio.h>
@ -129,6 +130,9 @@ struct bus_dmamap {
bus_dmamap_callback_t *callback;
void *callback_arg;
STAILQ_ENTRY(bus_dmamap) links;
#ifdef KMSAN
struct memdesc kmsan_mem;
#endif
};
static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
@ -203,6 +207,14 @@ bounce_bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
newtag->map_count = 0;
newtag->segments = NULL;
#ifdef KMSAN
/*
* When KMSAN is configured, we need a map to store a memory descriptor
* which can be used for validation.
*/
newtag->bounce_flags |= BUS_DMA_FORCE_MAP;
#endif
if (parent != NULL && (newtag->common.filter != NULL ||
(parent->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0))
newtag->bounce_flags |= BUS_DMA_COULD_BOUNCE;
@ -975,7 +987,10 @@ bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
vm_offset_t datavaddr, tempvaddr;
bus_size_t datacount1, datacount2;
if (map == NULL || (bpage = STAILQ_FIRST(&map->bpages)) == NULL)
if (map == NULL)
goto out;
kmsan_bus_dmamap_sync(&map->kmsan_mem, op);
if ((bpage = STAILQ_FIRST(&map->bpages)) == NULL)
goto out;
/*
@ -1070,6 +1085,16 @@ bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
atomic_thread_fence_rel();
}
#ifdef KMSAN
static void
bounce_bus_dmamap_load_kmsan(bus_dmamap_t map, struct memdesc *mem)
{
if (map == NULL)
return;
memcpy(&map->kmsan_mem, mem, sizeof(map->kmsan_mem));
}
#endif
static void
init_bounce_pages(void *dummy __unused)
{
@ -1351,4 +1376,7 @@ struct bus_dma_impl bus_dma_bounce_impl = {
.map_complete = bounce_bus_dmamap_complete,
.map_unload = bounce_bus_dmamap_unload,
.map_sync = bounce_bus_dmamap_sync,
#ifdef KMSAN
.load_kmsan = bounce_bus_dmamap_load_kmsan,
#endif
};