sfxge: Remove extra cache-line alignment and reorder sfxge_evq_t

Remove the first member alignment to cacheline since it is nop.
Use __aligned() for the whole structure to make sure that the structure
size is cacheline aligned.
Remove lock alignment to make the structure smaller and fit all members
used on event queue processing into one cacheline (128 bytes) on x86-64.
The lock is obtained as well from different context when event queue
statistics are retrived from sysctl context, but it is infrequent.
Reorder members to avoid padding and go in usage order on event
processing.
As the result all structure members used on event queue processing fit
into exactly one cacheline (128 byte) now.

Sponsored by:   Solarflare Communications, Inc.
Approved by:    gnn (mentor)
This commit is contained in:
arybchik 2015-01-29 18:57:27 +00:00
parent 92c267b4a9
commit 46a080cb74

View File

@ -103,26 +103,26 @@ enum sfxge_evq_state {
#define SFXGE_EV_BATCH 16384
struct sfxge_evq {
struct sfxge_softc *sc __aligned(CACHE_LINE_SIZE);
struct mtx lock __aligned(CACHE_LINE_SIZE);
enum sfxge_evq_state init_state;
/* Structure members below are sorted by usage order */
struct sfxge_softc *sc;
struct mtx lock;
unsigned int index;
unsigned int entries;
enum sfxge_evq_state init_state;
efsys_mem_t mem;
unsigned int buf_base_id;
boolean_t exception;
efx_evq_t *common;
unsigned int read_ptr;
boolean_t exception;
unsigned int rx_done;
unsigned int tx_done;
/* Linked list of TX queues with completions to process */
struct sfxge_txq *txq;
struct sfxge_txq **txqs;
};
/* Structure members not used on event processing path */
unsigned int buf_base_id;
unsigned int entries;
} __aligned(CACHE_LINE_SIZE);
#define SFXGE_NDESCS 1024
#define SFXGE_MODERATION 30