Provide a streamlined '#define curthread __curthread()' for amd64 to avoid
the compiler having to parse and optimize the PCPU_GET(curthread) so often. __curthread() is an inline optimized version of PCPU_GET(curthread) that knows that pc_curthread is at offset zero in the pcpu struct. Add a CTASSERT() to catch any possible changes to this. This accounts for just over a 1% wall clock speedup for total kernel compile/link time, and 20% compile time speedup on some specific files depending on which compile options are used. Approved by: re (jhb)
This commit is contained in:
parent
de8e370ec1
commit
5dd883833c
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=122930
@ -121,6 +121,9 @@ __FBSDID("$FreeBSD$");
|
||||
#include <sys/ptrace.h>
|
||||
#include <machine/sigframe.h>
|
||||
|
||||
/* Sanity check for __curthread() */
|
||||
CTASSERT(offsetof(struct pcpu, pc_curthread) == 0);
|
||||
|
||||
extern u_int64_t hammer_time(u_int64_t, u_int64_t);
|
||||
extern void dblfault_handler(void);
|
||||
|
||||
|
@ -159,6 +159,16 @@ extern struct pcpu *pcpup;
|
||||
#define PCPU_PTR(member) __PCPU_PTR(pc_ ## member)
|
||||
#define PCPU_SET(member, val) __PCPU_SET(pc_ ## member, val)
|
||||
|
||||
static __inline struct thread *
|
||||
__curthread(void)
|
||||
{
|
||||
struct thread *td;
|
||||
|
||||
__asm __volatile("movq %%gs:0,%0" : "=r" (td));
|
||||
return (td);
|
||||
}
|
||||
#define Xurthread (__curthread())
|
||||
|
||||
#else
|
||||
#error gcc or lint is required to use this file
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user