Sched flag refactoring. Implemented wqCPUX, QUEUEX. RW lock refactoring for KQDOM and KVLST

This commit is contained in:
Oscar Zhao 2019-08-29 20:31:11 -04:00
parent f085e08d18
commit caba92060d
4 changed files with 507 additions and 363 deletions

File diff suppressed because it is too large Load Diff

View File

@ -395,13 +395,25 @@ __END_DECLS
* The ioctl to set multithreaded mode
*/
#define FKQMULTI _IOW('f', 89, int)
#define FKQMPRNT _IO('f', 90)
/*
* KQ scheduler flags
* KQ sched
*/
#define KQ_SCHED_WS 0x01
#define KQ_SCHED_QUEUE 0x02 /* make kq affinitize the knote depending on the first cpu it's scheduled to */
#define KQ_SCHED_CPU 0x04 /* make kq affinitize the knote depending on the runtime cpu it's scheduled to */
#define KQ_SCHED_BOT 0x08
#define KQ_SCHED_QUEUE 0x01 /* affnitizes knotes to the current cpu, sarg = extra queues to check */
#define KQ_SCHED_CPU 0x02 /* affinitize knotes to the first cpu, sarg = extra queues to check */
#define KQ_SCHED_BEST 0x04 /* Best of N, sarg = N */
/*
* KQ sched flags
*/
#define KQ_SCHED_FLAG_WS 0x01 /* work stealing, farg = # of knotes to steal */
/*
* 0 - 7: sched
* 8 - 15: sargs
* 16 - 23: flags
* 24 - 31: fargs
*/
#define KQSCHED_MAKE(sched, sargs, flags, fargs) (((sched) & 0xFF) | (((sargs) & 0xFF) << 8) | (((flags) & 0xFF) << 16) | (((fargs) & 0xFF) << 24))
#endif /* !_SYS_EVENT_H_ */

View File

@ -45,7 +45,6 @@
#define KQ_NEVENTS 8 /* minimize copy{in,out} calls */
#define KQEXTENT 256 /* linear growth by this amount */
#define KQDOM_EXTENT 8 /* linear growth by this amount */
#define KQDIR_ACTIVE (0)
#define KQDIR_INACTIVE (1)
@ -55,19 +54,20 @@ struct kevq {
LIST_ENTRY(kevq) kevq_th_tqe; /* entry into kevq_thred's kevq_list */
struct kqueue *kq; /* the kq that the kevq belongs to */
struct kqdom *kevq_kqd; /* the kq domain the kevq is on */
/* XXX: Make kevq contain a struct thread ptr instead of this dude */
struct kevq_thred *kevq_th; /* the thread that the kevq belongs to */
struct mtx lock; /* the lock for the kevq */
TAILQ_HEAD(, knote) kn_head; /* list of pending knotes */
int kn_count; /* number of pending knotes */
#define KEVQ_SLEEP 0x01
#define KEVQ_CLOSING 0x02
#define KEVQ_RDY 0x04
#define KEVQ_ACTIVE 0x04
int kevq_state;
int kevq_refcnt;
/* Used by the scheduler */
unsigned long kevq_avg_lat;
struct timespec kevq_last_kev;
uint64_t kevq_avg_lat;
uint64_t kevq_last_kev;
uint64_t kevq_last_nkev;
};
@ -75,18 +75,17 @@ struct kevq {
struct kqdom {
/* static */
int id;
struct mtx kqd_lock;
struct rwlock kqd_lock;
struct kqdom *parent;
cpuset_t cpu_mask;
struct veclist children; /* child kqdoms */
/* statistics. Atomically updated, doesn't require the lock*/
unsigned long avg_lat;
uint64_t avg_lat;
/* dynamic members*/
struct veclist kqd_activelist; /* active child kqdoms */
struct veclist kqd_kevqs; /* kevqs for this kqdom */
int kqd_ckevq; /* current kevq for round robbin. XXX: Remove round robbin it has literally no benefit but maintainance nightmares */
};
struct kqueue {
@ -113,13 +112,16 @@ struct kqueue {
struct ucred *kq_cred;
struct kevqlist kq_kevqlist; /* list of kevqs */
/* scheduling stuff */
int kq_sched_flags; /* Scheduler flag for the KQ */
/* Round robbin (only as a fall back) */
struct kevq *kq_ckevq; /* current kevq for multithreaded kqueue, used for round robbin */
/* Best of two */
struct rwlock sched_bot_lk;
struct veclist sched_bot_lst;
/* scheduler flags for the KQ, set by IOCTL */
int kq_sflags;
int kq_ssargs;
int kq_ssched;
int kq_sfargs;
/* Default */
struct rwlock kevq_vlist_lk;
struct veclist kevq_vlist;
/* CPU queue */
struct kqdom *kq_kqd; /* root domain */
};

View File

@ -51,26 +51,6 @@ struct thread_info g_thrd_info[THREAD_CNT];
/* Test threads signals this upon receiving events */
sem_t g_sem_driver;
static void
check_sched(struct thread_info *info, int size, unsigned int max_diff)
{
int max = 0, min = INT_MAX;
for(int i = 0; i < size; i++) {
int cur = info[i].evcnt;
if (cur > max) {
max = cur;
}
if (cur < min) {
min = cur;
}
}
if ((max - min) > max_diff) {
err(1, "READ_M: check_sched: max difference is %d\n", max - min);
}
}
static char
socket_pop(int sockfd)
{
@ -189,12 +169,8 @@ test_socket_read(int delay)
socket_push(g_sockfd[1], '.');
/* wait for thread events */
sem_wait(&g_sem_driver);
if (!delay)
check_sched(g_thrd_info, THREAD_CNT, 1);
}
#ifdef TEST_DEBUG
printf("READ_M: finished testing, system shutting down...\n");
#endif
@ -386,6 +362,11 @@ test_socket_queue(void)
}
}
/* dump KQ */
error = ioctl(g_kqfd, FKQMPRNT);
if (error == -1) {
err(1, "dump ioctl failed");
}
#ifdef TEST_DEBUG
printf("READ_M: finished testing, system shutting down...\n");
@ -717,63 +698,76 @@ test_evfilt_read_m()
{
int flags = 0;
g_kqfd = kqueue();
/* Default rand */
int error = ioctl(g_kqfd, FKQMULTI, &flags);
if (error == -1) {
err(1, "ioctl");
}
test_socket_read(0);
test_socket_brutal("round robbin");
test_socket_brutal("rand");
close(g_kqfd);
/* test scheduler */
flags = KQ_SCHED_QUEUE;
/* Queue + bo0 */
flags = KQSCHED_MAKE(KQ_SCHED_QUEUE,0,0,0);
g_kqfd = kqueue();
error = ioctl(g_kqfd, FKQMULTI, &flags);
if (error == -1) {
err(1, "ioctl");
}
//test_socket_queue();
test_socket_brutal("queue");
test_socket_queue();
test_socket_brutal("queue0");
close(g_kqfd);
flags = KQ_SCHED_CPU;
/* CPU + Bo0 */
flags = KQSCHED_MAKE(KQ_SCHED_CPU,0,0,0);;
g_kqfd = kqueue();
error = ioctl(g_kqfd, FKQMULTI, &flags);
if (error == -1) {
err(1, "ioctl");
}
test_socket_brutal("cpu");
test_socket_brutal("cpu0");
close(g_kqfd);
flags = KQ_SCHED_WS;
/* CPU + Bo1 */
flags = KQSCHED_MAKE(KQ_SCHED_CPU,1,0,0);;
g_kqfd = kqueue();
error = ioctl(g_kqfd, FKQMULTI, &flags);
if (error == -1) {
err(1, "ioctl");
}
test_socket_ws();
test_socket_brutal("work stealing");
test_socket_brutal("cpu1");
close(g_kqfd);
flags = KQ_SCHED_BOT;
/* CPU + Bo2 */
flags = KQSCHED_MAKE(KQ_SCHED_CPU,2,0,0);
g_kqfd = kqueue();
error = ioctl(g_kqfd, FKQMULTI, &flags);
if (error == -1) {
err(1, "ioctl");
}
test_socket_brutal("cpu2");
close(g_kqfd);
test_socket_brutal("best of two");
/* BO2 */
flags = KQSCHED_MAKE(KQ_SCHED_BEST,2,0,0);
g_kqfd = kqueue();
error = ioctl(g_kqfd, FKQMULTI, &flags);
if (error == -1) {
err(1, "ioctl");
}
test_socket_brutal("best2");
test_socket_read(1);
close(g_kqfd);
/* WS */
flags = KQSCHED_MAKE(0,0,KQ_SCHED_FLAG_WS,1);;
g_kqfd = kqueue();
error = ioctl(g_kqfd, FKQMULTI, &flags);
if (error == -1) {
err(1, "ioctl");
}
test_socket_ws();
test_socket_brutal("ws1");
close(g_kqfd);
}