Change certain heavily used network related mutexes and rwlocks to
reside on their own cache line to prevent false sharing with other nearby structures, especially for those in the .bss segment. NB: Those mutexes and rwlocks with variables next to them that get changed on every invocation do not benefit from their own cache line. Actually it may be net negative because two cache misses would be incurred in those cases.
This commit is contained in:
parent
982c1675ff
commit
e8b3186b6a
@ -240,14 +240,14 @@ SYSCTL_INT(_kern_ipc_zero_copy, OID_AUTO, send, CTLFLAG_RW,
|
||||
* accept_mtx locks down per-socket fields relating to accept queues. See
|
||||
* socketvar.h for an annotation of the protected fields of struct socket.
|
||||
*/
|
||||
struct mtx accept_mtx;
|
||||
struct mtx_padalign accept_mtx;
|
||||
MTX_SYSINIT(accept_mtx, &accept_mtx, "accept", MTX_DEF);
|
||||
|
||||
/*
|
||||
* so_global_mtx protects so_gencnt, numopensockets, and the per-socket
|
||||
* so_gencnt field.
|
||||
*/
|
||||
static struct mtx so_global_mtx;
|
||||
static struct so_global_mtx so_global_mtx;
|
||||
MTX_SYSINIT(so_global_mtx, &so_global_mtx, "so_glabel", MTX_DEF);
|
||||
|
||||
/*
|
||||
|
@ -206,7 +206,7 @@ VNET_DEFINE(struct ifindex_entry *, ifindex_table);
|
||||
* also to stablize it over long-running ioctls, without introducing priority
|
||||
* inversions and deadlocks.
|
||||
*/
|
||||
struct rwlock ifnet_rwlock;
|
||||
struct rwlock_padalign ifnet_rwlock;
|
||||
struct sx ifnet_sxlock;
|
||||
|
||||
/*
|
||||
|
@ -67,7 +67,7 @@ static VNET_DEFINE(SLIST_HEAD(, lltable), lltables);
|
||||
|
||||
static void vnet_lltable_init(void);
|
||||
|
||||
struct rwlock lltable_rwlock;
|
||||
struct rwlock_padalign lltable_rwlock;
|
||||
RW_SYSINIT(lltable_rwlock, &lltable_rwlock, "lltable_rwlock");
|
||||
|
||||
/*
|
||||
|
@ -191,9 +191,9 @@ struct ifnet {
|
||||
void *if_unused[2];
|
||||
void *if_afdata[AF_MAX];
|
||||
int if_afdata_initialized;
|
||||
struct rwlock if_afdata_lock;
|
||||
struct task if_linktask; /* task for link change events */
|
||||
struct rwlock if_addr_lock; /* lock to protect address lists */
|
||||
struct rwlock_padalign if_afdata_lock;
|
||||
struct rwlock_padalign if_addr_lock; /* lock to protect address lists */
|
||||
|
||||
LIST_ENTRY(ifnet) if_clones; /* interfaces of a cloner */
|
||||
TAILQ_HEAD(, ifg_list) if_groups; /* linked list of groups per if */
|
||||
|
@ -330,7 +330,7 @@ struct inpcbinfo {
|
||||
/*
|
||||
* Global lock protecting non-pcbgroup hash lookup tables.
|
||||
*/
|
||||
struct rwlock ipi_hash_lock;
|
||||
struct rwlock_padalign ipi_hash_lock;
|
||||
|
||||
/*
|
||||
* Global hash of inpcbs, hashed by local and foreign addresses and
|
||||
|
@ -97,7 +97,7 @@ static int array_ptr = 0;
|
||||
static int array_size = 8192;
|
||||
static int random_id_collisions = 0;
|
||||
static int random_id_total = 0;
|
||||
static struct mtx ip_id_mtx;
|
||||
static struct mtx_padalign ip_id_mtx;
|
||||
|
||||
static void ip_initid(void);
|
||||
static int sysctl_ip_id_change(SYSCTL_HANDLER_ARGS);
|
||||
|
@ -85,7 +85,7 @@ __FBSDID("$FreeBSD$");
|
||||
CTASSERT(sizeof(struct ip) == 20);
|
||||
#endif
|
||||
|
||||
struct rwlock in_ifaddr_lock;
|
||||
struct rwlock_padalign in_ifaddr_lock;
|
||||
RW_SYSINIT(in_ifaddr_lock, &in_ifaddr_lock, "in_ifaddr_lock");
|
||||
|
||||
VNET_DEFINE(int, rsvp_on);
|
||||
@ -155,7 +155,7 @@ VNET_DEFINE(u_long, in_ifaddrhmask); /* mask for hash table */
|
||||
|
||||
static VNET_DEFINE(uma_zone_t, ipq_zone);
|
||||
static VNET_DEFINE(TAILQ_HEAD(ipqhead, ipq), ipq[IPREASS_NHASH]);
|
||||
static struct mtx ipqlock;
|
||||
static struct mtx_padalign ipqlock;
|
||||
|
||||
#define V_ipq_zone VNET(ipq_zone)
|
||||
#define V_ipq VNET(ipq)
|
||||
|
@ -255,7 +255,7 @@ static VNET_DEFINE(uma_zone_t, tcpcb_zone);
|
||||
#define V_tcpcb_zone VNET(tcpcb_zone)
|
||||
|
||||
MALLOC_DEFINE(M_TCPLOG, "tcplog", "TCP address and flags print buffers");
|
||||
static struct mtx isn_mtx;
|
||||
static struct mtx_padalign isn_mtx;
|
||||
|
||||
#define ISN_LOCK_INIT() mtx_init(&isn_mtx, "isn_mtx", NULL, MTX_DEF)
|
||||
#define ISN_LOCK() mtx_lock(&isn_mtx)
|
||||
|
Loading…
Reference in New Issue
Block a user