diff --git a/sys/kern/kern_rmlock.c b/sys/kern/kern_rmlock.c index db6e08e05198..af593a862d35 100644 --- a/sys/kern/kern_rmlock.c +++ b/sys/kern/kern_rmlock.c @@ -934,6 +934,30 @@ rms_rlock(struct rmslock *rms) critical_exit(); } +int +rms_try_rlock(struct rmslock *rms) +{ + int *influx; + + critical_enter(); + influx = zpcpu_get(rms->readers_influx); + __compiler_membar(); + *influx = 1; + __compiler_membar(); + if (__predict_false(rms->writers > 0)) { + __compiler_membar(); + *influx = 0; + critical_exit(); + return (0); + } + __compiler_membar(); + (*zpcpu_get(rms->readers_pcpu))++; + __compiler_membar(); + *influx = 0; + critical_exit(); + return (1); +} + static void __noinline rms_runlock_fallback(struct rmslock *rms) { diff --git a/sys/sys/rmlock.h b/sys/sys/rmlock.h index 0fd4406d0886..ba2b9bbb732d 100644 --- a/sys/sys/rmlock.h +++ b/sys/sys/rmlock.h @@ -136,9 +136,21 @@ struct rm_args { void rms_init(struct rmslock *rms, const char *name); void rms_destroy(struct rmslock *rms); void rms_rlock(struct rmslock *rms); +int rms_try_rlock(struct rmslock *rms); void rms_runlock(struct rmslock *rms); void rms_wlock(struct rmslock *rms); void rms_wunlock(struct rmslock *rms); +/* + * Writers are not explicitly tracked, thus until that changes the best we can + * do is indicate the lock is taken for writing by *someone*. + */ +static inline int +rms_wowned(struct rmslock *rms) +{ + + return (rms->writers > 0); +} + #endif /* _KERNEL */ #endif /* !_SYS_RMLOCK_H_ */