Record part of the owner struct thread pointer into busy_lock.

Record as much bits from curthread into busy_lock as fits.  Low bits
for struct thread * representation are zero due to struct and zone
alignment, and they leave space for busy flags (perhaps except
statically allocated thread0).  Upper bits are not very interesting
for assert, and in most practical situations recorded value should
allow to manually identify the owner with certainity.

Assert that unbusy is performed by the owner, except few places where
unbusy is done in io completion handler.  For this case, add
_unchecked variants of asserts and unbusy primitives.

Reviewed by:	markj (previous version)
Tested by:	pho
Sponsored by:	The FreeBSD Foundation
Differential revision:	https://reviews.freebsd.org/D22298
This commit is contained in:
Konstantin Belousov 2019-11-24 19:12:23 +00:00
parent dbe257d253
commit b631c36f0d
3 changed files with 51 additions and 20 deletions

View File

@ -261,7 +261,7 @@ sendfile_iodone(void *arg, vm_page_t *pg, int count, int error)
for (int i = 0; i < count; i++)
if (pg[i] != bogus_page)
vm_page_xunbusy(pg[i]);
vm_page_xunbusy_unchecked(pg[i]);
if (error)
sfio->error = error;

View File

@ -437,7 +437,7 @@ vm_page_init_marker(vm_page_t marker, int queue, uint16_t aflags)
bzero(marker, sizeof(*marker));
marker->flags = PG_MARKER;
marker->aflags = aflags;
marker->busy_lock = VPB_SINGLE_EXCLUSIVER;
marker->busy_lock = VPB_CURTHREAD_EXCLUSIVE;
marker->queue = queue;
}
@ -939,18 +939,19 @@ vm_page_busy_downgrade(vm_page_t m)
int
vm_page_busy_tryupgrade(vm_page_t m)
{
u_int x;
u_int ce, x;
vm_page_assert_sbusied(m);
x = m->busy_lock;
ce = VPB_CURTHREAD_EXCLUSIVE;
for (;;) {
if (VPB_SHARERS(x) > 1)
return (0);
KASSERT((x & ~VPB_BIT_WAITERS) == VPB_SHARERS_WORD(1),
("vm_page_busy_tryupgrade: invalid lock state"));
if (!atomic_fcmpset_acq_int(&m->busy_lock, &x,
VPB_SINGLE_EXCLUSIVER | (x & VPB_BIT_WAITERS)))
ce | (x & VPB_BIT_WAITERS)))
continue;
return (1);
}
@ -1108,7 +1109,7 @@ vm_page_tryxbusy(vm_page_t m)
vm_object_t obj;
if (atomic_cmpset_acq_int(&(m)->busy_lock, VPB_UNBUSIED,
VPB_SINGLE_EXCLUSIVER) == 0)
VPB_CURTHREAD_EXCLUSIVE) == 0)
return (0);
obj = m->object;
@ -1119,6 +1120,14 @@ vm_page_tryxbusy(vm_page_t m)
return (1);
}
static void
vm_page_xunbusy_hard_tail(vm_page_t m)
{
atomic_store_rel_int(&m->busy_lock, VPB_UNBUSIED);
/* Wake the waiter. */
wakeup(m);
}
/*
* vm_page_xunbusy_hard:
*
@ -1127,14 +1136,15 @@ vm_page_tryxbusy(vm_page_t m)
void
vm_page_xunbusy_hard(vm_page_t m)
{
vm_page_assert_xbusied(m);
vm_page_xunbusy_hard_tail(m);
}
/*
* Wake the waiter.
*/
atomic_store_rel_int(&m->busy_lock, VPB_UNBUSIED);
wakeup(m);
void
vm_page_xunbusy_hard_unchecked(vm_page_t m)
{
vm_page_assert_xbusied_unchecked(m);
vm_page_xunbusy_hard_tail(m);
}
/*
@ -1228,7 +1238,7 @@ vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr)
m->flags = PG_FICTITIOUS;
/* Fictitious pages don't use "order" or "pool". */
m->oflags = VPO_UNMANAGED;
m->busy_lock = VPB_SINGLE_EXCLUSIVER;
m->busy_lock = VPB_CURTHREAD_EXCLUSIVE;
/* Fictitious pages are unevictable. */
m->ref_count = 1;
pmap_page_init(m);
@ -1318,7 +1328,7 @@ vm_page_readahead_finish(vm_page_t m)
else
vm_page_deactivate(m);
vm_page_unlock(m);
vm_page_xunbusy(m);
vm_page_xunbusy_unchecked(m);
}
/*
@ -1967,7 +1977,7 @@ found:
VPO_UNMANAGED : 0;
m->busy_lock = VPB_UNBUSIED;
if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ | VM_ALLOC_SBUSY)) == 0)
m->busy_lock = VPB_SINGLE_EXCLUSIVER;
m->busy_lock = VPB_CURTHREAD_EXCLUSIVE;
if ((req & VM_ALLOC_SBUSY) != 0)
m->busy_lock = VPB_SHARERS_WORD(1);
if (req & VM_ALLOC_WIRED) {
@ -2161,7 +2171,7 @@ found:
VPO_UNMANAGED : 0;
busy_lock = VPB_UNBUSIED;
if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ | VM_ALLOC_SBUSY)) == 0)
busy_lock = VPB_SINGLE_EXCLUSIVER;
busy_lock = VPB_CURTHREAD_EXCLUSIVE;
if ((req & VM_ALLOC_SBUSY) != 0)
busy_lock = VPB_SHARERS_WORD(1);
if ((req & VM_ALLOC_WIRED) != 0)

View File

@ -307,7 +307,13 @@ struct vm_page {
#define VPB_SHARERS_WORD(x) ((x) << VPB_SHARERS_SHIFT | VPB_BIT_SHARED)
#define VPB_ONE_SHARER (1 << VPB_SHARERS_SHIFT)
#define VPB_SINGLE_EXCLUSIVER VPB_BIT_EXCLUSIVE
#define VPB_SINGLE_EXCLUSIVE VPB_BIT_EXCLUSIVE
#ifdef INVARIANTS
#define VPB_CURTHREAD_EXCLUSIVE \
(VPB_BIT_EXCLUSIVE | ((u_int)(uintptr_t)curthread & ~VPB_BIT_FLAGMASK))
#else
#define VPB_CURTHREAD_EXCLUSIVE VPB_SINGLE_EXCLUSIVE
#endif
#define VPB_UNBUSIED VPB_SHARERS_WORD(0)
@ -649,6 +655,7 @@ void vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
void vm_page_wire(vm_page_t);
bool vm_page_wire_mapped(vm_page_t m);
void vm_page_xunbusy_hard(vm_page_t m);
void vm_page_xunbusy_hard_unchecked(vm_page_t m);
void vm_page_set_validclean (vm_page_t, int, int);
void vm_page_clear_dirty(vm_page_t, int, int);
void vm_page_set_invalid(vm_page_t, int, int);
@ -684,10 +691,19 @@ void vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line);
("vm_page_assert_unbusied: page %p busy @ %s:%d", \
(m), __FILE__, __LINE__))
#define vm_page_assert_xbusied(m) \
#define vm_page_assert_xbusied_unchecked(m) do { \
KASSERT(vm_page_xbusied(m), \
("vm_page_assert_xbusied: page %p not exclusive busy @ %s:%d", \
(m), __FILE__, __LINE__))
(m), __FILE__, __LINE__)); \
} while (0)
#define vm_page_assert_xbusied(m) do { \
vm_page_assert_xbusied_unchecked(m); \
KASSERT((m->busy_lock & ~VPB_BIT_WAITERS) == \
VPB_CURTHREAD_EXCLUSIVE, \
("vm_page_assert_xbusied: page %p busy_lock %#x not owned" \
" by me @ %s:%d", \
(m), (m)->busy_lock, __FILE__, __LINE__)); \
} while (0)
#define vm_page_busied(m) \
((m)->busy_lock != VPB_UNBUSIED)
@ -699,7 +715,7 @@ void vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line);
} while (0)
#define vm_page_xbusied(m) \
(((m)->busy_lock & VPB_SINGLE_EXCLUSIVER) != 0)
(((m)->busy_lock & VPB_SINGLE_EXCLUSIVE) != 0)
#define vm_page_xbusy(m) do { \
if (!vm_page_tryxbusy(m)) \
@ -710,9 +726,14 @@ void vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line);
/* Note: page m's lock must not be owned by the caller. */
#define vm_page_xunbusy(m) do { \
if (!atomic_cmpset_rel_int(&(m)->busy_lock, \
VPB_SINGLE_EXCLUSIVER, VPB_UNBUSIED)) \
VPB_CURTHREAD_EXCLUSIVE, VPB_UNBUSIED)) \
vm_page_xunbusy_hard(m); \
} while (0)
#define vm_page_xunbusy_unchecked(m) do { \
if (!atomic_cmpset_rel_int(&(m)->busy_lock, \
VPB_CURTHREAD_EXCLUSIVE, VPB_UNBUSIED)) \
vm_page_xunbusy_hard_unchecked(m); \
} while (0)
#ifdef INVARIANTS
void vm_page_object_busy_assert(vm_page_t m);