- Add a pinned count to the thread so that cpu pinning may nest. This is
not in scheduler specific data because eventually it will be required by all schedulers. - Implement sched_pin and unpin as an inline for now. If a scheduler needs to do something more complicated than adjusting the pinned count we can move this into a function later in an api compatible way.
This commit is contained in:
parent
be32bf8001
commit
f5b5faded5
@ -284,6 +284,7 @@ struct thread {
|
||||
LIST_HEAD(, mtx) td_contested; /* (j) Contested locks. */
|
||||
struct lock_list_entry *td_sleeplocks; /* (k) Held sleep locks. */
|
||||
int td_intr_nesting_level; /* (k) Interrupt recursion. */
|
||||
int td_pinned; /* (k) Temporary cpu pin count. */
|
||||
struct kse_thr_mailbox *td_mailbox; /* (*) Userland mailbox address */
|
||||
struct ucred *td_ucred; /* (k) Reference to credentials. */
|
||||
struct thread *td_standin; /* (*) Use this for an upcall */
|
||||
|
@ -76,9 +76,9 @@ void sched_rem(struct thread *td);
|
||||
* hold a thread on a particular CPU.
|
||||
*/
|
||||
void sched_bind(struct thread *td, int cpu);
|
||||
void sched_pin(struct thread *td);
|
||||
static __inline void sched_pin(struct thread *td);
|
||||
void sched_unbind(struct thread *td);
|
||||
void sched_unpin(struct thread *td);
|
||||
static __inline void sched_unpin(struct thread *td);
|
||||
|
||||
/*
|
||||
* These interfaces will eventually be removed.
|
||||
@ -100,4 +100,16 @@ extern struct kg_sched *ksegrp0_sched;
|
||||
extern struct p_sched *proc0_sched;
|
||||
extern struct td_sched *thread0_sched;
|
||||
|
||||
static __inline void
|
||||
sched_pin(struct thread *td)
|
||||
{
|
||||
td->td_pinned++;
|
||||
}
|
||||
|
||||
static __inline void
|
||||
sched_unpin(struct thread *td)
|
||||
{
|
||||
td->td_pinned--;
|
||||
}
|
||||
|
||||
#endif /* !_SYS_SCHED_H_ */
|
||||
|
Loading…
Reference in New Issue
Block a user