[ndis] Fix unregistered use of FPU by NDIS in kernel on amd64

amd64 miniport drivers are allowed to use FPU which triggers "Unregistered use
of FPU in kernel" panic.

Wrap all variants of MSCALL with fpu_kern_enter/fpu_kern_leave.  To reduce
amount of allocations/deallocations done via
fpu_kern_alloc_ctx/fpu_kern_free_ctx maintain cache of fpu_kern_ctx elements.

Based on the patch by Paul B Mahol

PR:		165622
Submitted by:	Vlad Movchan <vladislav.movchan@gmail.com>
MFC after:	1 month
This commit is contained in:
Oleksandr Tymoshenko 2019-01-22 03:53:42 +00:00
parent fe5a065d4b
commit 1715256316
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=343298
2 changed files with 195 additions and 6 deletions

View File

@ -58,6 +58,10 @@ __FBSDID("$FreeBSD$");
#include <machine/segments.h>
#endif
#ifdef __amd64__
#include <machine/fpu.h>
#endif
#include <dev/usb/usb.h>
#include <compat/ndis/pe_var.h>
@ -68,6 +72,19 @@ __FBSDID("$FreeBSD$");
#include <compat/ndis/hal_var.h>
#include <compat/ndis/usbd_var.h>
#ifdef __amd64__
struct fpu_cc_ent {
struct fpu_kern_ctx *ctx;
LIST_ENTRY(fpu_cc_ent) entries;
};
static LIST_HEAD(fpu_ctx_free, fpu_cc_ent) fpu_free_head =
LIST_HEAD_INITIALIZER(fpu_free_head);
static LIST_HEAD(fpu_ctx_busy, fpu_cc_ent) fpu_busy_head =
LIST_HEAD_INITIALIZER(fpu_busy_head);
static struct mtx fpu_free_mtx;
static struct mtx fpu_busy_mtx;
#endif
static struct mtx drvdb_mtx;
static STAILQ_HEAD(drvdb, drvdb_ent) drvdb_head;
@ -98,6 +115,13 @@ windrv_libinit(void)
mtx_init(&drvdb_mtx, "Windows driver DB lock",
"Windows internal lock", MTX_DEF);
#ifdef __amd64__
LIST_INIT(&fpu_free_head);
LIST_INIT(&fpu_busy_head);
mtx_init(&fpu_free_mtx, "free fpu context list lock", NULL, MTX_DEF);
mtx_init(&fpu_busy_mtx, "busy fpu context list lock", NULL, MTX_DEF);
#endif
/*
* PCI and pccard devices don't need to use IRPs to
* interact with their bus drivers (usually), so our
@ -132,6 +156,9 @@ int
windrv_libfini(void)
{
struct drvdb_ent *d;
#ifdef __amd64__
struct fpu_cc_ent *ent;
#endif
mtx_lock(&drvdb_mtx);
while(STAILQ_FIRST(&drvdb_head) != NULL) {
@ -149,6 +176,18 @@ windrv_libfini(void)
#ifdef __i386__
smp_rendezvous(NULL, x86_oldldt, NULL, NULL);
ExFreePool(my_tids);
#endif
#ifdef __amd64__
while ((ent = LIST_FIRST(&fpu_free_head)) != NULL) {
LIST_REMOVE(ent, entries);
fpu_kern_free_ctx(ent->ctx);
free(ent, M_DEVBUF);
}
mtx_destroy(&fpu_free_mtx);
ent = LIST_FIRST(&fpu_busy_head);
KASSERT(ent == NULL, ("busy fpu context list is not empty"));
mtx_destroy(&fpu_busy_mtx);
#endif
return (0);
}
@ -615,6 +654,148 @@ windrv_wrap(func, wrap, argcnt, ftype)
return (0);
}
static struct fpu_cc_ent *
request_fpu_cc_ent(void)
{
struct fpu_cc_ent *ent;
mtx_lock(&fpu_free_mtx);
if ((ent = LIST_FIRST(&fpu_free_head)) != NULL) {
LIST_REMOVE(ent, entries);
mtx_unlock(&fpu_free_mtx);
mtx_lock(&fpu_busy_mtx);
LIST_INSERT_HEAD(&fpu_busy_head, ent, entries);
mtx_unlock(&fpu_busy_mtx);
return (ent);
}
mtx_unlock(&fpu_free_mtx);
if ((ent = malloc(sizeof(struct fpu_cc_ent), M_DEVBUF, M_NOWAIT |
M_ZERO)) != NULL) {
ent->ctx = fpu_kern_alloc_ctx(FPU_KERN_NORMAL |
FPU_KERN_NOWAIT);
if (ent->ctx != NULL) {
mtx_lock(&fpu_busy_mtx);
LIST_INSERT_HEAD(&fpu_busy_head, ent, entries);
mtx_unlock(&fpu_busy_mtx);
} else {
free(ent, M_DEVBUF);
ent = NULL;
}
}
return (ent);
}
static void
release_fpu_cc_ent(struct fpu_cc_ent *ent)
{
mtx_lock(&fpu_busy_mtx);
LIST_REMOVE(ent, entries);
mtx_unlock(&fpu_busy_mtx);
mtx_lock(&fpu_free_mtx);
LIST_INSERT_HEAD(&fpu_free_head, ent, entries);
mtx_unlock(&fpu_free_mtx);
}
uint64_t
_x86_64_call1(void *fn, uint64_t a)
{
struct fpu_cc_ent *ent;
uint64_t ret;
if ((ent = request_fpu_cc_ent()) == NULL)
return (ENOMEM);
fpu_kern_enter(curthread, ent->ctx, FPU_KERN_NORMAL);
ret = x86_64_call1(fn, a);
fpu_kern_leave(curthread, ent->ctx);
release_fpu_cc_ent(ent);
return (ret);
}
uint64_t
_x86_64_call2(void *fn, uint64_t a, uint64_t b)
{
struct fpu_cc_ent *ent;
uint64_t ret;
if ((ent = request_fpu_cc_ent()) == NULL)
return (ENOMEM);
fpu_kern_enter(curthread, ent->ctx, FPU_KERN_NORMAL);
ret = x86_64_call2(fn, a, b);
fpu_kern_leave(curthread, ent->ctx);
release_fpu_cc_ent(ent);
return (ret);
}
uint64_t
_x86_64_call3(void *fn, uint64_t a, uint64_t b, uint64_t c)
{
struct fpu_cc_ent *ent;
uint64_t ret;
if ((ent = request_fpu_cc_ent()) == NULL)
return (ENOMEM);
fpu_kern_enter(curthread, ent->ctx, FPU_KERN_NORMAL);
ret = x86_64_call3(fn, a, b, c);
fpu_kern_leave(curthread, ent->ctx);
release_fpu_cc_ent(ent);
return (ret);
}
uint64_t
_x86_64_call4(void *fn, uint64_t a, uint64_t b, uint64_t c, uint64_t d)
{
struct fpu_cc_ent *ent;
uint64_t ret;
if ((ent = request_fpu_cc_ent()) == NULL)
return (ENOMEM);
fpu_kern_enter(curthread, ent->ctx, FPU_KERN_NORMAL);
ret = x86_64_call4(fn, a, b, c, d);
fpu_kern_leave(curthread, ent->ctx);
release_fpu_cc_ent(ent);
return (ret);
}
uint64_t
_x86_64_call5(void *fn, uint64_t a, uint64_t b, uint64_t c, uint64_t d,
uint64_t e)
{
struct fpu_cc_ent *ent;
uint64_t ret;
if ((ent = request_fpu_cc_ent()) == NULL)
return (ENOMEM);
fpu_kern_enter(curthread, ent->ctx, FPU_KERN_NORMAL);
ret = x86_64_call5(fn, a, b, c, d, e);
fpu_kern_leave(curthread, ent->ctx);
release_fpu_cc_ent(ent);
return (ret);
}
uint64_t
_x86_64_call6(void *fn, uint64_t a, uint64_t b, uint64_t c, uint64_t d,
uint64_t e, uint64_t f)
{
struct fpu_cc_ent *ent;
uint64_t ret;
if ((ent = request_fpu_cc_ent()) == NULL)
return (ENOMEM);
fpu_kern_enter(curthread, ent->ctx, FPU_KERN_NORMAL);
ret = x86_64_call6(fn, a, b, c, d, e, f);
fpu_kern_leave(curthread, ent->ctx);
release_fpu_cc_ent(ent);
return (ret);
}
#endif /* __amd64__ */

View File

@ -460,22 +460,30 @@ extern uint64_t x86_64_call5(void *, uint64_t, uint64_t, uint64_t, uint64_t,
extern uint64_t x86_64_call6(void *, uint64_t, uint64_t, uint64_t, uint64_t,
uint64_t, uint64_t);
uint64_t _x86_64_call1(void *, uint64_t);
uint64_t _x86_64_call2(void *, uint64_t, uint64_t);
uint64_t _x86_64_call3(void *, uint64_t, uint64_t, uint64_t);
uint64_t _x86_64_call4(void *, uint64_t, uint64_t, uint64_t, uint64_t);
uint64_t _x86_64_call5(void *, uint64_t, uint64_t, uint64_t, uint64_t,
uint64_t);
uint64_t _x86_64_call6(void *, uint64_t, uint64_t, uint64_t, uint64_t,
uint64_t, uint64_t);
#define MSCALL1(fn, a) \
x86_64_call1((fn), (uint64_t)(a))
_x86_64_call1((fn), (uint64_t)(a))
#define MSCALL2(fn, a, b) \
x86_64_call2((fn), (uint64_t)(a), (uint64_t)(b))
_x86_64_call2((fn), (uint64_t)(a), (uint64_t)(b))
#define MSCALL3(fn, a, b, c) \
x86_64_call3((fn), (uint64_t)(a), (uint64_t)(b), \
_x86_64_call3((fn), (uint64_t)(a), (uint64_t)(b), \
(uint64_t)(c))
#define MSCALL4(fn, a, b, c, d) \
x86_64_call4((fn), (uint64_t)(a), (uint64_t)(b), \
_x86_64_call4((fn), (uint64_t)(a), (uint64_t)(b), \
(uint64_t)(c), (uint64_t)(d))
#define MSCALL5(fn, a, b, c, d, e) \
x86_64_call5((fn), (uint64_t)(a), (uint64_t)(b), \
_x86_64_call5((fn), (uint64_t)(a), (uint64_t)(b), \
(uint64_t)(c), (uint64_t)(d), (uint64_t)(e))
#define MSCALL6(fn, a, b, c, d, e, f) \
x86_64_call6((fn), (uint64_t)(a), (uint64_t)(b), \
_x86_64_call6((fn), (uint64_t)(a), (uint64_t)(b), \
(uint64_t)(c), (uint64_t)(d), (uint64_t)(e), (uint64_t)(f))
#endif /* __amd64__ */