Implement atomic state updates using the new vm_page_astate_t structure.

Introduce primitives vm_page_astate_load() and vm_page_astate_fcmpset()
to operate on the 32-bit per-page atomic state.  Modify
vm_page_pqstate_fcmpset() to use them.  No functional change intended.

Introduce PGA_QUEUE_OP_MASK, a subset of PGA_QUEUE_STATE_MASK that only
includes queue operation flags.  This will be used in subsequent
patches.

Reviewed by:	alc, jeff, kib
Sponsored by:	Netflix, Intel
Differential Revision:	https://reviews.freebsd.org/D22753
This commit is contained in:
Mark Johnston 2019-12-12 21:13:20 +00:00
parent aab65fc55e
commit 6fbaf6859c

View File

@ -439,8 +439,8 @@ extern struct mtx_padalign pa_lock[];
#define PGA_REQUEUE_HEAD 0x0040 /* page requeue should bypass LRU */
#define PGA_NOSYNC 0x0080 /* do not collect for syncer */
#define PGA_QUEUE_STATE_MASK (PGA_ENQUEUED | PGA_DEQUEUE | PGA_REQUEUE | \
PGA_REQUEUE_HEAD)
#define PGA_QUEUE_OP_MASK (PGA_DEQUEUE | PGA_REQUEUE | PGA_REQUEUE_HEAD)
#define PGA_QUEUE_STATE_MASK (PGA_ENQUEUED | PGA_QUEUE_OP_MASK)
/*
* Page flags. If changed at any other time than page allocation or
@ -756,35 +756,36 @@ void vm_page_assert_pga_writeable(vm_page_t m, uint16_t bits);
#define VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits) (void)0
#endif
/*
* We want to use atomic updates for the aflags field, which is 8 bits wide.
* However, not all architectures support atomic operations on 8-bit
* destinations. In order that we can easily use a 32-bit operation, we
* require that the aflags field be 32-bit aligned.
*/
_Static_assert(offsetof(struct vm_page, a.flags) % sizeof(uint32_t) == 0,
"aflags field is not 32-bit aligned");
#define VM_PAGE_AFLAG_SHIFT (__offsetof(vm_page_astate_t, flags) * NBBY)
/*
* We want to be able to update the aflags and queue fields atomically in
* the same operation.
* Load a snapshot of a page's 32-bit atomic state.
*/
_Static_assert(offsetof(struct vm_page, a.flags) / sizeof(uint32_t) ==
offsetof(struct vm_page, a.queue) / sizeof(uint32_t),
"aflags and queue fields do not belong to the same 32-bit word");
_Static_assert(offsetof(struct vm_page, a.queue) % sizeof(uint32_t) == 2,
"queue field is at an unexpected offset");
_Static_assert(sizeof(((struct vm_page *)NULL)->a.queue) == 1,
"queue field has an unexpected size");
static inline vm_page_astate_t
vm_page_astate_load(vm_page_t m)
{
vm_page_astate_t a;
#if BYTE_ORDER == LITTLE_ENDIAN
#define VM_PAGE_AFLAG_SHIFT 0
#define VM_PAGE_QUEUE_SHIFT 16
#else
#define VM_PAGE_AFLAG_SHIFT 16
#define VM_PAGE_QUEUE_SHIFT 8
#endif
#define VM_PAGE_QUEUE_MASK (0xff << VM_PAGE_QUEUE_SHIFT)
a._bits = atomic_load_32(&m->a);
return (a);
}
/*
* Atomically compare and set a page's atomic state.
*/
static inline bool
vm_page_astate_fcmpset(vm_page_t m, vm_page_astate_t *old, vm_page_astate_t new)
{
KASSERT(new.queue == PQ_INACTIVE || (new.flags & PGA_REQUEUE_HEAD) == 0,
("%s: invalid head requeue request for page %p", __func__, m));
KASSERT((new.flags & PGA_ENQUEUED) == 0 || new.queue != PQ_NONE,
("%s: setting PGA_ENQUEUED with PQ_NONE in page %p", __func__, m));
KASSERT(new._bits != old->_bits,
("%s: bits are unchanged", __func__));
return (atomic_fcmpset_32(&m->a._bits, &old->_bits, new._bits) != 0);
}
/*
* Clear the given bits in the specified page.
@ -805,7 +806,7 @@ vm_page_aflag_clear(vm_page_t m, uint16_t bits)
* atomic update. Parallel non-atomic updates to the other fields
* within this word are handled properly by the atomic update.
*/
addr = (void *)&m->a.flags;
addr = (void *)&m->a;
val = bits << VM_PAGE_AFLAG_SHIFT;
atomic_clear_32(addr, val);
}
@ -825,7 +826,7 @@ vm_page_aflag_set(vm_page_t m, uint16_t bits)
* atomic update. Parallel non-atomic updates to the other fields
* within this word are handled properly by the atomic update.
*/
addr = (void *)&m->a.flags;
addr = (void *)&m->a;
val = bits << VM_PAGE_AFLAG_SHIFT;
atomic_set_32(addr, val);
}
@ -841,24 +842,16 @@ static inline bool
vm_page_pqstate_cmpset(vm_page_t m, uint32_t oldq, uint32_t newq,
uint32_t fflags, uint32_t nflags)
{
uint32_t *addr, nval, oval, qsmask;
vm_page_astate_t new, old;
fflags <<= VM_PAGE_AFLAG_SHIFT;
nflags <<= VM_PAGE_AFLAG_SHIFT;
newq <<= VM_PAGE_QUEUE_SHIFT;
oldq <<= VM_PAGE_QUEUE_SHIFT;
qsmask = ((PGA_DEQUEUE | PGA_REQUEUE | PGA_REQUEUE_HEAD) <<
VM_PAGE_AFLAG_SHIFT) | VM_PAGE_QUEUE_MASK;
addr = (void *)&m->a.flags;
oval = atomic_load_32(addr);
old = vm_page_astate_load(m);
do {
if ((oval & fflags) != 0)
if ((old.flags & fflags) != 0 || old.queue != oldq)
return (false);
if ((oval & VM_PAGE_QUEUE_MASK) != oldq)
return (false);
nval = (oval & ~qsmask) | nflags | newq;
} while (!atomic_fcmpset_32(addr, &oval, nval));
new = old;
new.flags = (new.flags & ~PGA_QUEUE_OP_MASK) | nflags;
new.queue = newq;
} while (!vm_page_astate_fcmpset(m, &old, new));
return (true);
}