epoch(9): Guarantee forward progress on busy sections
Add epoch section to struct thread. We can use this to ennable epoch counter to advance even if a section is perpetually occupied by a thread. Approved by: sbruno
This commit is contained in:
parent
8ab507588b
commit
b8205686b4
@ -54,7 +54,7 @@ __FBSDID("$FreeBSD$");
|
||||
static MALLOC_DEFINE(M_EPOCH, "epoch", "epoch based reclamation");
|
||||
|
||||
/* arbitrary --- needs benchmarking */
|
||||
#define MAX_ADAPTIVE_SPIN 5000
|
||||
#define MAX_ADAPTIVE_SPIN 1000
|
||||
|
||||
#define EPOCH_EXITING 0x1
|
||||
#ifdef __amd64__
|
||||
@ -63,6 +63,7 @@ static MALLOC_DEFINE(M_EPOCH, "epoch", "epoch based reclamation");
|
||||
#define EPOCH_ALIGN CACHE_LINE_SIZE
|
||||
#endif
|
||||
|
||||
CTASSERT(sizeof(epoch_section_t) == sizeof(ck_epoch_section_t));
|
||||
SYSCTL_NODE(_kern, OID_AUTO, epoch, CTLFLAG_RW, 0, "epoch information");
|
||||
SYSCTL_NODE(_kern_epoch, OID_AUTO, stats, CTLFLAG_RW, 0, "epoch stats");
|
||||
|
||||
@ -308,8 +309,12 @@ epoch_enter(epoch_t epoch)
|
||||
KASSERT(found, ("recursing on a second epoch"));
|
||||
}
|
||||
#endif
|
||||
if (td->td_epochnest > 1) {
|
||||
critical_exit();
|
||||
return;
|
||||
}
|
||||
sched_pin();
|
||||
ck_epoch_begin(&eps->eps_record.er_record, NULL);
|
||||
ck_epoch_begin(&eps->eps_record.er_record, (ck_epoch_section_t*)&td->td_epoch_section);
|
||||
critical_exit();
|
||||
}
|
||||
|
||||
@ -324,11 +329,15 @@ epoch_exit(epoch_t epoch)
|
||||
MPASS(td->td_epochnest);
|
||||
critical_enter();
|
||||
eps = epoch->e_pcpu[curcpu];
|
||||
sched_unpin();
|
||||
ck_epoch_end(&eps->eps_record.er_record, NULL);
|
||||
td->td_epochnest--;
|
||||
if (td->td_epochnest == 0)
|
||||
TAILQ_REMOVE(&eps->eps_record.er_tdlist, td, td_epochq);
|
||||
else {
|
||||
critical_exit();
|
||||
return;
|
||||
}
|
||||
sched_unpin();
|
||||
ck_epoch_end(&eps->eps_record.er_record, (ck_epoch_section_t*)&td->td_epoch_section);
|
||||
eps->eps_record.er_gen++;
|
||||
critical_exit();
|
||||
}
|
||||
|
@ -75,6 +75,18 @@
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* A section object may be passed to every begin-end pair to allow for
|
||||
* forward progress guarantees with-in prolonged active sections.
|
||||
*
|
||||
* We can't include ck_epoch.h so we define our own variant here and
|
||||
* then CTASSERT that it's the same size in subr_epoch.c
|
||||
*/
|
||||
struct epoch_section {
|
||||
unsigned int bucket;
|
||||
};
|
||||
typedef struct epoch_section epoch_section_t;
|
||||
|
||||
/*
|
||||
* One structure allocated per session.
|
||||
*
|
||||
@ -352,6 +364,7 @@ struct thread {
|
||||
struct proc *td_rfppwait_p; /* (k) The vforked child */
|
||||
struct vm_page **td_ma; /* (k) uio pages held */
|
||||
int td_ma_cnt; /* (k) size of *td_ma */
|
||||
epoch_section_t td_epoch_section; /* (t) epoch section object */
|
||||
void *td_emuldata; /* Emulator state data */
|
||||
int td_lastcpu; /* (t) Last cpu we were on. */
|
||||
int td_oncpu; /* (t) Which cpu we are on. */
|
||||
|
Loading…
Reference in New Issue
Block a user