For compatibility KPI functions like if_addr_rlock() that used to have

mutexes but now are converted to epoch(9) use thread-private epoch_tracker.
Embedding tracker into ifnet(9) or ifnet derived structures creates a non
reentrable function, that will fail miserably if called simultaneously from
two different contexts.
A thread private tracker will provide a single tracker that would allow to
call these functions safely. It doesn't allow nested call, but this is not
expected from compatibility KPIs.

Reviewed by:	markj
This commit is contained in:
Gleb Smirnoff 2018-11-13 22:58:38 +00:00
parent 85939654f3
commit b79aa45e0e
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=340413
6 changed files with 27 additions and 14 deletions

View File

@ -272,6 +272,7 @@ thread_init(void *mem, int size, int flags)
td->td_rlqe = NULL;
EVENTHANDLER_DIRECT_INVOKE(thread_init, td);
umtx_thread_init(td);
epoch_thread_init(td);
td->td_kstack = 0;
td->td_sel = NULL;
return (0);
@ -291,6 +292,7 @@ thread_fini(void *mem, int size)
turnstile_free(td->td_turnstile);
sleepq_free(td->td_sleepqueue);
umtx_thread_fini(td);
epoch_thread_fini(td);
seltdfini(td);
}

View File

@ -667,3 +667,17 @@ in_epoch(epoch_t epoch)
{
return (in_epoch_verbose(epoch, 0));
}
void
epoch_thread_init(struct thread *td)
{
td->td_et = malloc(sizeof(struct epoch_tracker), M_EPOCH, M_WAITOK);
}
void
epoch_thread_fini(struct thread *td)
{
free(td->td_et, M_EPOCH);
}

View File

@ -1767,35 +1767,29 @@ if_data_copy(struct ifnet *ifp, struct if_data *ifd)
void
if_addr_rlock(struct ifnet *ifp)
{
MPASS(*(uint64_t *)&ifp->if_addr_et == 0);
epoch_enter_preempt(net_epoch_preempt, &ifp->if_addr_et);
epoch_enter_preempt(net_epoch_preempt, curthread->td_et);
}
void
if_addr_runlock(struct ifnet *ifp)
{
epoch_exit_preempt(net_epoch_preempt, &ifp->if_addr_et);
#ifdef INVARIANTS
bzero(&ifp->if_addr_et, sizeof(struct epoch_tracker));
#endif
epoch_exit_preempt(net_epoch_preempt, curthread->td_et);
}
void
if_maddr_rlock(if_t ifp)
{
MPASS(*(uint64_t *)&ifp->if_maddr_et == 0);
epoch_enter_preempt(net_epoch_preempt, &ifp->if_maddr_et);
epoch_enter_preempt(net_epoch_preempt, curthread->td_et);
}
void
if_maddr_runlock(if_t ifp)
{
epoch_exit_preempt(net_epoch_preempt, &ifp->if_maddr_et);
#ifdef INVARIANTS
bzero(&ifp->if_maddr_et, sizeof(struct epoch_tracker));
#endif
epoch_exit_preempt(net_epoch_preempt, curthread->td_et);
}
/*

View File

@ -381,8 +381,6 @@ struct ifnet {
*/
struct netdump_methods *if_netdump_methods;
struct epoch_context if_epoch_ctx;
struct epoch_tracker if_addr_et;
struct epoch_tracker if_maddr_et;
/*
* Spare fields to be added before branching a stable branch, so

View File

@ -82,5 +82,8 @@ void epoch_exit_preempt(epoch_t epoch, epoch_tracker_t et);
void epoch_enter(epoch_t epoch);
void epoch_exit(epoch_t epoch);
void epoch_thread_init(struct thread *);
void epoch_thread_fini(struct thread *);
#endif /* _KERNEL */
#endif /* _SYS_EPOCH_H_ */

View File

@ -193,6 +193,7 @@ struct trapframe;
struct turnstile;
struct vm_map;
struct vm_map_entry;
struct epoch_tracker;
/*
* XXX: Does this belong in resource.h or resourcevar.h instead?
@ -360,6 +361,7 @@ struct thread {
int td_lastcpu; /* (t) Last cpu we were on. */
int td_oncpu; /* (t) Which cpu we are on. */
void *td_lkpi_task; /* LinuxKPI task struct pointer */
struct epoch_tracker *td_et; /* (k) compat KPI spare tracker */
int td_pmcpend;
};