Add smp_rendezvous_cpus_retry

This is a wrapper around smp_rendezvous_cpus which enables use of IPI
handlers which can fail and require retrying.

wait_func argument is added to to provide a routine which can be used to
poll CPU of interest for when the IPI can be retried.

Handlers which succeed must call smp_rendezvous_cpus_done to denote that
fact.

Discussed with:	 jeff
Differential Revision:	https://reviews.freebsd.org/D23582
This commit is contained in:
mjg 2020-02-12 11:16:55 +00:00
parent 9d8144f925
commit e165df173d
2 changed files with 54 additions and 0 deletions

View File

@ -884,6 +884,47 @@ smp_no_rendezvous_barrier(void *dummy)
#endif
}
void
smp_rendezvous_cpus_retry(cpuset_t map,
void (* setup_func)(void *),
void (* action_func)(void *),
void (* teardown_func)(void *),
void (* wait_func)(void *, int),
struct smp_rendezvous_cpus_retry_arg *arg)
{
int cpu;
/*
* Execute an action on all specified CPUs while retrying until they
* all acknowledge completion.
*/
CPU_COPY(&map, &arg->cpus);
for (;;) {
smp_rendezvous_cpus(
arg->cpus,
setup_func,
action_func,
teardown_func,
arg);
if (CPU_EMPTY(&arg->cpus))
break;
CPU_FOREACH(cpu) {
if (!CPU_ISSET(cpu, &arg->cpus))
continue;
wait_func(arg, cpu);
}
}
}
void
smp_rendezvous_cpus_done(struct smp_rendezvous_cpus_retry_arg *arg)
{
CPU_CLR_ATOMIC(curcpu, &arg->cpus);
}
/*
* Wait for specified idle threads to switch once. This ensures that even
* preempted threads have cycled through the switch function once,

View File

@ -276,6 +276,19 @@ void smp_rendezvous_cpus(cpuset_t,
void (*)(void *),
void (*)(void *),
void *arg);
struct smp_rendezvous_cpus_retry_arg {
cpuset_t cpus;
};
void smp_rendezvous_cpus_retry(cpuset_t,
void (*)(void *),
void (*)(void *),
void (*)(void *),
void (*)(void *, int),
struct smp_rendezvous_cpus_retry_arg *);
void smp_rendezvous_cpus_done(struct smp_rendezvous_cpus_retry_arg *);
#endif /* !LOCORE */
#endif /* _KERNEL */
#endif /* _SYS_SMP_H_ */