Reduce overhead of ktrace checks in the common case.
KTRPOINT() checks both if we are tracing _and_ if we are recursing within ktrace. The second condition is only ever executed if ktrace is actually enabled. This change moves the check out of the hot path in to the functions themselves. Discussed with mjg@ Reported by: mjg@ Approved by: sbruno@
This commit is contained in:
parent
4f52dfbb8d
commit
ad738f3791
@ -448,6 +448,9 @@ ktrsyscall(int code, int narg, register_t args[])
|
||||
size_t buflen;
|
||||
char *buf = NULL;
|
||||
|
||||
if (__predict_false(curthread->td_pflags & TDP_INKTRACE))
|
||||
return;
|
||||
|
||||
buflen = sizeof(register_t) * narg;
|
||||
if (buflen > 0) {
|
||||
buf = malloc(buflen, M_KTRACE, M_WAITOK);
|
||||
@ -475,6 +478,9 @@ ktrsysret(int code, int error, register_t retval)
|
||||
struct ktr_request *req;
|
||||
struct ktr_sysret *ktp;
|
||||
|
||||
if (__predict_false(curthread->td_pflags & TDP_INKTRACE))
|
||||
return;
|
||||
|
||||
req = ktr_getrequest(KTR_SYSRET);
|
||||
if (req == NULL)
|
||||
return;
|
||||
@ -729,6 +735,9 @@ ktrcsw(int out, int user, const char *wmesg)
|
||||
struct ktr_request *req;
|
||||
struct ktr_csw *kc;
|
||||
|
||||
if (__predict_false(curthread->td_pflags & TDP_INKTRACE))
|
||||
return;
|
||||
|
||||
req = ktr_getrequest(KTR_CSW);
|
||||
if (req == NULL)
|
||||
return;
|
||||
@ -750,6 +759,9 @@ ktrstruct(const char *name, const void *data, size_t datalen)
|
||||
char *buf;
|
||||
size_t buflen, namelen;
|
||||
|
||||
if (__predict_false(curthread->td_pflags & TDP_INKTRACE))
|
||||
return;
|
||||
|
||||
if (data == NULL)
|
||||
datalen = 0;
|
||||
namelen = strlen(name) + 1;
|
||||
@ -776,6 +788,9 @@ ktrstructarray(const char *name, enum uio_seg seg, const void *data,
|
||||
size_t buflen, datalen, namelen;
|
||||
int max_items;
|
||||
|
||||
if (__predict_false(curthread->td_pflags & TDP_INKTRACE))
|
||||
return;
|
||||
|
||||
/* Trim array length to genio size. */
|
||||
max_items = ktr_geniosize / struct_size;
|
||||
if (num_items > max_items) {
|
||||
@ -820,6 +835,9 @@ ktrcapfail(enum ktr_cap_fail_type type, const cap_rights_t *needed,
|
||||
struct ktr_request *req;
|
||||
struct ktr_cap_fail *kcf;
|
||||
|
||||
if (__predict_false(curthread->td_pflags & TDP_INKTRACE))
|
||||
return;
|
||||
|
||||
req = ktr_getrequest(KTR_CAPFAIL);
|
||||
if (req == NULL)
|
||||
return;
|
||||
@ -844,6 +862,9 @@ ktrfault(vm_offset_t vaddr, int type)
|
||||
struct ktr_request *req;
|
||||
struct ktr_fault *kf;
|
||||
|
||||
if (__predict_false(curthread->td_pflags & TDP_INKTRACE))
|
||||
return;
|
||||
|
||||
req = ktr_getrequest(KTR_FAULT);
|
||||
if (req == NULL)
|
||||
return;
|
||||
@ -861,6 +882,9 @@ ktrfaultend(int result)
|
||||
struct ktr_request *req;
|
||||
struct ktr_faultend *kf;
|
||||
|
||||
if (__predict_false(curthread->td_pflags & TDP_INKTRACE))
|
||||
return;
|
||||
|
||||
req = ktr_getrequest(KTR_FAULTEND);
|
||||
if (req == NULL)
|
||||
return;
|
||||
|
@ -70,8 +70,7 @@ struct ktr_header {
|
||||
* is the public interface.
|
||||
*/
|
||||
#define KTRCHECK(td, type) ((td)->td_proc->p_traceflag & (1 << type))
|
||||
#define KTRPOINT(td, type) \
|
||||
(KTRCHECK((td), (type)) && !((td)->td_pflags & TDP_INKTRACE))
|
||||
#define KTRPOINT(td, type) (__predict_false(KTRCHECK((td), (type))))
|
||||
#define KTRCHECKDRAIN(td) (!(STAILQ_EMPTY(&(td)->td_proc->p_ktr)))
|
||||
#define KTRUSERRET(td) do { \
|
||||
if (KTRCHECKDRAIN(td)) \
|
||||
|
Loading…
Reference in New Issue
Block a user