44f3b09204
sleep queue interface: - Sleep queues attempt to merge some of the benefits of both sleep queues and condition variables. Having sleep qeueus in a hash table avoids having to allocate a queue head for each wait channel. Thus, struct cv has shrunk down to just a single char * pointer now. However, the hash table does not hold threads directly, but queue heads. This means that once you have located a queue in the hash bucket, you no longer have to walk the rest of the hash chain looking for threads. Instead, you have a list of all the threads sleeping on that wait channel. - Outside of the sleepq code and the sleep/cv code the kernel no longer differentiates between cv's and sleep/wakeup. For example, calls to abortsleep() and cv_abort() are replaced with a call to sleepq_abort(). Thus, the TDF_CVWAITQ flag is removed. Also, calls to unsleep() and cv_waitq_remove() have been replaced with calls to sleepq_remove(). - The sched_sleep() function no longer accepts a priority argument as sleep's no longer inherently bump the priority. Instead, this is soley a propery of msleep() which explicitly calls sched_prio() before blocking. - The TDF_ONSLEEPQ flag has been dropped as it was never used. The associated TDF_SET_ONSLEEPQ and TDF_CLR_ON_SLEEPQ macros have also been dropped and replaced with a single explicit clearing of td_wchan. TD_SET_ONSLEEPQ() would really have only made sense if it had taken the wait channel and message as arguments anyway. Now that that only happens in one place, a macro would be overkill.
123 lines
3.8 KiB
C
123 lines
3.8 KiB
C
/*-
|
|
* Copyright (c) 2002, Jeffrey Roberson <jeff@freebsd.org>
|
|
* All rights reserved.
|
|
*
|
|
* Redistribution and use in source and binary forms, with or without
|
|
* modification, are permitted provided that the following conditions
|
|
* are met:
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
* notice unmodified, this list of conditions, and the following
|
|
* disclaimer.
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
* documentation and/or other materials provided with the distribution.
|
|
*
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
*
|
|
* $FreeBSD$
|
|
*/
|
|
|
|
#ifndef _SYS_SCHED_H_
|
|
#define _SYS_SCHED_H_
|
|
|
|
/*
|
|
* General scheduling info.
|
|
*
|
|
* sched_load:
|
|
* Total runnable non-ithread threads in the system.
|
|
*
|
|
* sched_runnable:
|
|
* Runnable threads for this processor.
|
|
*/
|
|
int sched_load(void);
|
|
int sched_rr_interval(void);
|
|
int sched_runnable(void);
|
|
|
|
/*
|
|
* Proc related scheduling hooks.
|
|
*/
|
|
void sched_exit(struct proc *p, struct proc *child);
|
|
void sched_fork(struct proc *p, struct proc *child);
|
|
|
|
/*
|
|
* KSE Groups contain scheduling priority information. They record the
|
|
* behavior of groups of KSEs and threads.
|
|
*/
|
|
void sched_class(struct ksegrp *kg, int class);
|
|
void sched_exit_ksegrp(struct ksegrp *kg, struct ksegrp *child);
|
|
void sched_fork_ksegrp(struct ksegrp *kg, struct ksegrp *child);
|
|
void sched_nice(struct ksegrp *kg, int nice);
|
|
|
|
/*
|
|
* Threads are switched in and out, block on resources, have temporary
|
|
* priorities inherited from their ksegs, and use up cpu time.
|
|
*/
|
|
void sched_exit_thread(struct thread *td, struct thread *child);
|
|
void sched_fork_thread(struct thread *td, struct thread *child);
|
|
fixpt_t sched_pctcpu(struct thread *td);
|
|
void sched_prio(struct thread *td, u_char prio);
|
|
void sched_sleep(struct thread *td);
|
|
void sched_switch(struct thread *td);
|
|
void sched_userret(struct thread *td);
|
|
void sched_wakeup(struct thread *td);
|
|
|
|
/*
|
|
* Threads are moved on and off of run queues
|
|
*/
|
|
void sched_add(struct thread *td);
|
|
struct kse *sched_choose(void); /* XXX Should be thread * */
|
|
void sched_clock(struct thread *td);
|
|
void sched_rem(struct thread *td);
|
|
|
|
/*
|
|
* Binding makes cpu affinity permanent while pinning is used to temporarily
|
|
* hold a thread on a particular CPU.
|
|
*/
|
|
void sched_bind(struct thread *td, int cpu);
|
|
static __inline void sched_pin(void);
|
|
void sched_unbind(struct thread *td);
|
|
static __inline void sched_unpin(void);
|
|
|
|
/*
|
|
* These interfaces will eventually be removed.
|
|
*/
|
|
void sched_exit_kse(struct kse *ke, struct kse *child);
|
|
void sched_fork_kse(struct kse *ke, struct kse *child);
|
|
|
|
/*
|
|
* These procedures tell the process data structure allocation code how
|
|
* many bytes to actually allocate.
|
|
*/
|
|
int sched_sizeof_kse(void);
|
|
int sched_sizeof_ksegrp(void);
|
|
int sched_sizeof_proc(void);
|
|
int sched_sizeof_thread(void);
|
|
|
|
extern struct ke_sched *kse0_sched;
|
|
extern struct kg_sched *ksegrp0_sched;
|
|
extern struct p_sched *proc0_sched;
|
|
extern struct td_sched *thread0_sched;
|
|
|
|
static __inline void
|
|
sched_pin(void)
|
|
{
|
|
curthread->td_pinned++;
|
|
}
|
|
|
|
static __inline void
|
|
sched_unpin(void)
|
|
{
|
|
curthread->td_pinned--;
|
|
}
|
|
|
|
#endif /* !_SYS_SCHED_H_ */
|