Implement unr64

Important users of unr like tmpfs or pipes can get away with just
ever-increasing counters, making the overhead of managing the state
for 32 bit counters a pessimization.

Change it to an atomic variable. This can be further sped up by making
the counts variable "allocate" ranges and store them per-cpu.

Reviewed by:	kib
Sponsored by:	The FreeBSD Foundation
Differential Revision:	https://reviews.freebsd.org/D18054
This commit is contained in:
Mateusz Guzik 2018-11-20 14:58:41 +00:00
parent abfc3b2fef
commit 435bef7a2f
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=340676
2 changed files with 39 additions and 0 deletions

View File

@ -98,6 +98,19 @@ static struct mtx unitmtx;
MTX_SYSINIT(unit, &unitmtx, "unit# allocation", MTX_DEF);
#ifdef UNR64_LOCKED
uint64_t
alloc_unr64(struct unrhdr64 *unr64)
{
uint64_t item;
mtx_lock(&unitmtx);
item = unr64->counter++;
mtx_unlock(&unitmtx);
return (item);
}
#endif
#else /* ...USERLAND */
#include <bitstring.h>

View File

@ -523,6 +523,32 @@ int alloc_unr_specific(struct unrhdr *uh, u_int item);
int alloc_unrl(struct unrhdr *uh);
void free_unr(struct unrhdr *uh, u_int item);
#if defined(__mips__) || defined(__powerpc__)
#define UNR64_LOCKED
#endif
struct unrhdr64 {
uint64_t counter;
};
static __inline void
new_unrhdr64(struct unrhdr64 *unr64, uint64_t low)
{
unr64->counter = low;
}
#ifdef UNR64_LOCKED
uint64_t alloc_unr64(struct unrhdr64 *);
#else
static __inline uint64_t
alloc_unr64(struct unrhdr64 *unr64)
{
return (atomic_fetchadd_64(&unr64->counter, 1));
}
#endif
void intr_prof_stack_use(struct thread *td, struct trapframe *frame);
void counted_warning(unsigned *counter, const char *msg);