Store offset into zpcpu allocations in the per-cpu area.
This shorten zpcpu_get and allows more optimizations. Reviewed by: jeff Differential Revision: https://reviews.freebsd.org/D23570
This commit is contained in:
parent
48baf00f54
commit
3acb6572fc
@ -95,6 +95,7 @@ pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
|
||||
cpu_pcpu_init(pcpu, cpuid, size);
|
||||
pcpu->pc_rm_queue.rmq_next = &pcpu->pc_rm_queue;
|
||||
pcpu->pc_rm_queue.rmq_prev = &pcpu->pc_rm_queue;
|
||||
pcpu->pc_zpcpu_offset = zpcpu_offset_cpu(cpuid);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -48,7 +48,7 @@
|
||||
struct pmap *pc_curpmap; /* Currently active pmap */ \
|
||||
uint32_t pc_pending_ipis; /* IPIs pending to this CPU */ \
|
||||
uint32_t pc_hart; /* Hart ID */ \
|
||||
char __pad[57]
|
||||
char __pad[49]
|
||||
|
||||
#ifdef _KERNEL
|
||||
|
||||
|
@ -194,6 +194,7 @@ struct pcpu {
|
||||
struct rm_queue pc_rm_queue; /* rmlock list of trackers */
|
||||
uintptr_t pc_dynamic; /* Dynamic per-cpu data area */
|
||||
uint64_t pc_early_dummy_counter; /* Startup time counter(9) */
|
||||
uintptr_t pc_zpcpu_offset; /* Offset into zpcpu allocs */
|
||||
|
||||
/*
|
||||
* Keep MD fields last, so that CPU-specific variations on a
|
||||
@ -227,14 +228,28 @@ extern struct pcpu *cpuid_to_pcpu[];
|
||||
#endif
|
||||
#define curproc (curthread->td_proc)
|
||||
|
||||
#ifndef zpcpu_offset_cpu
|
||||
#define zpcpu_offset_cpu(cpu) (UMA_PCPU_ALLOC_SIZE * cpu)
|
||||
#endif
|
||||
#ifndef zpcpu_offset
|
||||
#define zpcpu_offset() (PCPU_GET(zpcpu_offset))
|
||||
#endif
|
||||
|
||||
#ifndef zpcpu_base_to_offset
|
||||
#define zpcpu_base_to_offset(base) (base)
|
||||
#endif
|
||||
#ifndef zpcpu_offset_to_base
|
||||
#define zpcpu_offset_to_base(base) (base)
|
||||
#endif
|
||||
|
||||
/* Accessor to elements allocated via UMA_ZONE_PCPU zone. */
|
||||
#define zpcpu_get(base) ({ \
|
||||
__typeof(base) _ptr = (void *)((char *)(base) + UMA_PCPU_ALLOC_SIZE * curcpu); \
|
||||
__typeof(base) _ptr = (void *)((char *)(base) + zpcpu_offset()); \
|
||||
_ptr; \
|
||||
})
|
||||
|
||||
#define zpcpu_get_cpu(base, cpu) ({ \
|
||||
__typeof(base) _ptr = (void *)((char *)(base) + UMA_PCPU_ALLOC_SIZE * cpu); \
|
||||
__typeof(base) _ptr = (void *)((char *)(base) + zpcpu_offset_cpu(cpu)); \
|
||||
_ptr; \
|
||||
})
|
||||
|
||||
|
@ -2944,34 +2944,39 @@ uma_zwait(uma_zone_t zone)
|
||||
void *
|
||||
uma_zalloc_pcpu_arg(uma_zone_t zone, void *udata, int flags)
|
||||
{
|
||||
void *item;
|
||||
void *item, *pcpu_item;
|
||||
#ifdef SMP
|
||||
int i;
|
||||
|
||||
MPASS(zone->uz_flags & UMA_ZONE_PCPU);
|
||||
#endif
|
||||
item = uma_zalloc_arg(zone, udata, flags & ~M_ZERO);
|
||||
if (item != NULL && (flags & M_ZERO)) {
|
||||
if (item == NULL)
|
||||
return (NULL);
|
||||
pcpu_item = zpcpu_base_to_offset(item);
|
||||
if (flags & M_ZERO) {
|
||||
#ifdef SMP
|
||||
for (i = 0; i <= mp_maxid; i++)
|
||||
bzero(zpcpu_get_cpu(item, i), zone->uz_size);
|
||||
bzero(zpcpu_get_cpu(pcpu_item, i), zone->uz_size);
|
||||
#else
|
||||
bzero(item, zone->uz_size);
|
||||
#endif
|
||||
}
|
||||
return (item);
|
||||
return (pcpu_item);
|
||||
}
|
||||
|
||||
/*
|
||||
* A stub while both regular and pcpu cases are identical.
|
||||
*/
|
||||
void
|
||||
uma_zfree_pcpu_arg(uma_zone_t zone, void *item, void *udata)
|
||||
uma_zfree_pcpu_arg(uma_zone_t zone, void *pcpu_item, void *udata)
|
||||
{
|
||||
void *item;
|
||||
|
||||
#ifdef SMP
|
||||
MPASS(zone->uz_flags & UMA_ZONE_PCPU);
|
||||
#endif
|
||||
item = zpcpu_offset_to_base(pcpu_item);
|
||||
uma_zfree_arg(zone, item, udata);
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user