Add scheduler API sched_relinquish(), the API is used to implement

yield() and sched_yield() syscalls. Every scheduler has its own way
to relinquish cpu, the ULE and CORE schedulers have two internal run-
queues, a timesharing thread which calls yield() syscall should be
moved to inactive queue.
This commit is contained in:
David Xu 2006-06-15 06:37:39 +00:00
parent 2053c12705
commit 36ec198bd5
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=159630
6 changed files with 46 additions and 6 deletions

View File

@ -253,9 +253,7 @@ ksched_getscheduler(register_t *ret, struct ksched *ksched, struct thread *td)
int
ksched_yield(register_t *ret, struct ksched *ksched)
{
mtx_lock_spin(&sched_lock);
curthread->td_flags |= TDF_NEEDRESCHED;
mtx_unlock_spin(&sched_lock);
sched_relinquish(curthread);
return 0;
}

View File

@ -1354,6 +1354,19 @@ sched_is_bound(struct thread *td)
return (td->td_kse->ke_flags & KEF_BOUND);
}
void
sched_relinquish(struct thread *td)
{
struct ksegrp *kg;
kg = td->td_ksegrp;
mtx_lock_spin(&sched_lock);
if (kg->kg_pri_class == PRI_TIMESHARE)
sched_prio(td, PRI_MAX_TIMESHARE);
mi_switch(SW_VOL, NULL);
mtx_unlock_spin(&sched_lock);
}
int
sched_load(void)
{
@ -1365,11 +1378,13 @@ sched_sizeof_ksegrp(void)
{
return (sizeof(struct ksegrp) + sizeof(struct kg_sched));
}
int
sched_sizeof_proc(void)
{
return (sizeof(struct proc));
}
int
sched_sizeof_thread(void)
{

View File

@ -2310,6 +2310,21 @@ sched_load(void)
#endif
}
void
sched_relinquish(struct thread *td)
{
struct ksegrp *kg;
kg = td->td_ksegrp;
mtx_lock_spin(&sched_lock);
if (sched_is_timeshare(kg)) {
sched_prio(td, PRI_MAX_TIMESHARE);
td->td_kse->ke_flags |= KEF_NEXTRQ;
}
mi_switch(SW_VOL, NULL);
mtx_unlock_spin(&sched_lock);
}
int
sched_sizeof_ksegrp(void)
{

View File

@ -1974,6 +1974,19 @@ sched_is_bound(struct thread *td)
return (td->td_kse->ke_flags & KEF_BOUND);
}
void
sched_relinquish(struct thread *td)
{
struct ksegrp *kg;
kg = td->td_ksegrp;
mtx_lock_spin(&sched_lock);
if (kg->kg_pri_class == PRI_TIMESHARE)
sched_prio(td, PRI_MAX_TIMESHARE);
mi_switch(SW_VOL, NULL);
mtx_unlock_spin(&sched_lock);
}
int
sched_load(void)
{

View File

@ -253,9 +253,7 @@ ksched_getscheduler(register_t *ret, struct ksched *ksched, struct thread *td)
int
ksched_yield(register_t *ret, struct ksched *ksched)
{
mtx_lock_spin(&sched_lock);
curthread->td_flags |= TDF_NEEDRESCHED;
mtx_unlock_spin(&sched_lock);
sched_relinquish(curthread);
return 0;
}

View File

@ -79,6 +79,7 @@ void sched_add(struct thread *td, int flags);
void sched_clock(struct thread *td);
void sched_rem(struct thread *td);
void sched_tick(void);
void sched_relinquish(struct thread *td);
/*
* Binding makes cpu affinity permanent while pinning is used to temporarily