Rework the known rwlock to benefit about staying on their own
cache line in order to avoid manual frobbing but using struct rwlock_padalign. Reviewed by: alc, jimharris
This commit is contained in:
parent
d40c846abf
commit
cfedf924d3
@ -225,16 +225,7 @@ u_int64_t KPML4phys; /* phys addr of kernel level 4 */
|
||||
static u_int64_t DMPDphys; /* phys addr of direct mapped level 2 */
|
||||
static u_int64_t DMPDPphys; /* phys addr of direct mapped level 3 */
|
||||
|
||||
/*
|
||||
* Isolate the global pv list lock from data and other locks to prevent false
|
||||
* sharing within the cache.
|
||||
*/
|
||||
static struct {
|
||||
struct rwlock lock;
|
||||
char padding[CACHE_LINE_SIZE - sizeof(struct rwlock)];
|
||||
} pvh_global __aligned(CACHE_LINE_SIZE);
|
||||
|
||||
#define pvh_global_lock pvh_global.lock
|
||||
static struct rwlock_padalign pvh_global_lock;
|
||||
|
||||
/*
|
||||
* Data for the pv entry allocation mechanism
|
||||
|
@ -224,16 +224,7 @@ SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RDTUN, &pg_ps_enabled, 0,
|
||||
#define PAT_INDEX_SIZE 8
|
||||
static int pat_index[PAT_INDEX_SIZE]; /* cache mode to PAT index conversion */
|
||||
|
||||
/*
|
||||
* Isolate the global pv list lock from data and other locks to prevent false
|
||||
* sharing within the cache.
|
||||
*/
|
||||
static struct {
|
||||
struct rwlock lock;
|
||||
char padding[CACHE_LINE_SIZE - sizeof(struct rwlock)];
|
||||
} pvh_global __aligned(CACHE_LINE_SIZE);
|
||||
|
||||
#define pvh_global_lock pvh_global.lock
|
||||
static struct rwlock_padalign pvh_global_lock;
|
||||
|
||||
/*
|
||||
* Data for the pv entry allocation mechanism
|
||||
|
@ -214,16 +214,7 @@ static int pmap_ridmax;
|
||||
static uint64_t *pmap_ridmap;
|
||||
struct mtx pmap_ridmutex;
|
||||
|
||||
/*
|
||||
* Isolate the global pv list lock from data and other locks to prevent false
|
||||
* sharing within the cache.
|
||||
*/
|
||||
static struct {
|
||||
struct rwlock lock;
|
||||
char padding[CACHE_LINE_SIZE - sizeof(struct rwlock)];
|
||||
} pvh_global __aligned(CACHE_LINE_SIZE);
|
||||
|
||||
#define pvh_global_lock pvh_global.lock
|
||||
static struct rwlock_padalign pvh_global_lock;
|
||||
|
||||
/*
|
||||
* Data for the pv entry allocation mechanism
|
||||
|
@ -148,16 +148,7 @@ vm_offset_t kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
|
||||
|
||||
static void pmap_asid_alloc(pmap_t pmap);
|
||||
|
||||
/*
|
||||
* Isolate the global pv list lock from data and other locks to prevent false
|
||||
* sharing within the cache.
|
||||
*/
|
||||
static struct {
|
||||
struct rwlock lock;
|
||||
char padding[CACHE_LINE_SIZE - sizeof(struct rwlock)];
|
||||
} pvh_global __aligned(CACHE_LINE_SIZE);
|
||||
|
||||
#define pvh_global_lock pvh_global.lock
|
||||
static struct rwlock_padalign pvh_global_lock;
|
||||
|
||||
/*
|
||||
* Data for the pv entry allocation mechanism
|
||||
|
@ -200,16 +200,7 @@ struct pvo_head *moea_pvo_table; /* pvo entries by pteg index */
|
||||
struct pvo_head moea_pvo_kunmanaged =
|
||||
LIST_HEAD_INITIALIZER(moea_pvo_kunmanaged); /* list of unmanaged pages */
|
||||
|
||||
/*
|
||||
* Isolate the global pv list lock from data and other locks to prevent false
|
||||
* sharing within the cache.
|
||||
*/
|
||||
static struct {
|
||||
struct rwlock lock;
|
||||
char padding[CACHE_LINE_SIZE - sizeof(struct rwlock)];
|
||||
} pvh_global __aligned(CACHE_LINE_SIZE);
|
||||
|
||||
#define pvh_global_lock pvh_global.lock
|
||||
static struct rwlock_padalign pvh_global_lock;
|
||||
|
||||
uma_zone_t moea_upvo_zone; /* zone for pvo entries for unmanaged pages */
|
||||
uma_zone_t moea_mpvo_zone; /* zone for pvo entries for managed pages */
|
||||
|
@ -68,11 +68,6 @@ struct pmap {
|
||||
struct pmap_statistics pm_stats;
|
||||
};
|
||||
|
||||
struct tte_list_lock {
|
||||
struct rwlock lock;
|
||||
char padding[CACHE_LINE_SIZE - sizeof(struct rwlock)];
|
||||
};
|
||||
|
||||
#define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx)
|
||||
#define PMAP_LOCK_ASSERT(pmap, type) \
|
||||
mtx_assert(&(pmap)->pm_mtx, (type))
|
||||
@ -108,8 +103,7 @@ void pmap_set_kctx(void);
|
||||
|
||||
extern struct pmap kernel_pmap_store;
|
||||
#define kernel_pmap (&kernel_pmap_store)
|
||||
extern struct tte_list_lock tte_list_global;
|
||||
#define tte_list_global_lock tte_list_global.lock
|
||||
extern struct rwlock_padalign tte_list_global_lock;
|
||||
extern vm_paddr_t phys_avail[];
|
||||
extern vm_offset_t virtual_avail;
|
||||
extern vm_offset_t virtual_end;
|
||||
|
@ -129,12 +129,7 @@ vm_offset_t vm_max_kernel_address;
|
||||
*/
|
||||
struct pmap kernel_pmap_store;
|
||||
|
||||
/*
|
||||
* Isolate the global TTE list lock from data and other locks to prevent
|
||||
* false sharing within the cache (see also the declaration of struct
|
||||
* tte_list_lock).
|
||||
*/
|
||||
struct tte_list_lock tte_list_global __aligned(CACHE_LINE_SIZE);
|
||||
struct rwlock_padalign tte_list_global_lock;
|
||||
|
||||
/*
|
||||
* Allocate physical memory for use in pmap_bootstrap.
|
||||
|
Loading…
Reference in New Issue
Block a user