Revert the previous round of changes to td_pinned. The scheduler isn't
fully initialed when the pmap layer tries to call sched_pini() early in the boot and results in an quick panic. Use ke_pinned instead as was originally done with Tor's patch. Approved by: julian
This commit is contained in:
parent
a2b00744ab
commit
1e56230631
@ -807,7 +807,7 @@ pmap_pte_quick(pmap_t pmap, vm_offset_t va)
|
||||
if (pmap_is_current(pmap))
|
||||
return (vtopte(va));
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
KASSERT(sched_ispinned(), ("curthread not pinned"));
|
||||
KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
|
||||
newpf = *pde & PG_FRAME;
|
||||
if ((*PMAP1 & PG_FRAME) != newpf) {
|
||||
*PMAP1 = newpf | PG_RW | PG_V | PG_A | PG_M;
|
||||
@ -1622,7 +1622,7 @@ pmap_remove_page(pmap_t pmap, vm_offset_t va)
|
||||
pt_entry_t *pte;
|
||||
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
KASSERT(sched_ispinned(), ("curthread not pinned"));
|
||||
KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
|
||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
|
||||
if ((pte = pmap_pte_quick(pmap, va)) == NULL || *pte == 0)
|
||||
return;
|
||||
|
@ -87,7 +87,6 @@ struct kse {
|
||||
} ke_state; /* (j) KSE status. */
|
||||
int ke_cpticks; /* (j) Ticks of cpu time. */
|
||||
struct runq *ke_runq; /* runq the kse is currently on */
|
||||
int ke_pinned; /* (k) nested count, pinned to a cpu */
|
||||
};
|
||||
|
||||
#define ke_proc ke_thread->td_proc
|
||||
@ -125,7 +124,7 @@ struct kg_sched {
|
||||
* cpus.
|
||||
*/
|
||||
#define KSE_CAN_MIGRATE(ke) \
|
||||
((ke)->ke_pinned == 0 && ((ke)->ke_flags & KEF_BOUND) == 0)
|
||||
((ke)->ke_thread->td_pinned == 0 && ((ke)->ke_flags & KEF_BOUND) == 0)
|
||||
|
||||
static struct kse kse0;
|
||||
static struct kg_sched kg_sched0;
|
||||
@ -1172,26 +1171,5 @@ sched_pctcpu(struct thread *td)
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
void
|
||||
sched_pin(void)
|
||||
{
|
||||
curthread->td_sched->ke_pinned++;
|
||||
}
|
||||
|
||||
void
|
||||
sched_unpin(void)
|
||||
{
|
||||
curthread->td_sched->ke_pinned--;
|
||||
}
|
||||
|
||||
#ifdef INVARIANTS
|
||||
int
|
||||
sched_ispinned(void)
|
||||
{
|
||||
return (curthread->td_sched->ke_pinned);
|
||||
}
|
||||
#endif
|
||||
|
||||
#define KERN_SWITCH_INCLUDE 1
|
||||
#include "kern/kern_switch.c"
|
||||
|
@ -112,7 +112,6 @@ struct kse {
|
||||
KES_ONRUNQ
|
||||
} ke_state; /* (j) thread sched specific status. */
|
||||
int ke_slptime;
|
||||
int ke_pinned; /* (k) nested coult.. pinned to a cpu */
|
||||
int ke_slice;
|
||||
struct runq *ke_runq;
|
||||
u_char ke_cpu; /* CPU that we have affinity for. */
|
||||
@ -344,10 +343,10 @@ static struct kse *kseq_steal(struct kseq *kseq, int stealidle);
|
||||
*/
|
||||
#ifdef __i386__
|
||||
#define KSE_CAN_MIGRATE(ke, class) \
|
||||
((ke)->ke_pinned == 0 && ((ke)->ke_flags & KEF_BOUND) == 0)
|
||||
((ke)->ke_thread->td_pinned == 0 && ((ke)->ke_flags & KEF_BOUND) == 0)
|
||||
#else /* !__i386__ */
|
||||
#define KSE_CAN_MIGRATE(ke, class) \
|
||||
((class) != PRI_ITHD && (ke)->ke_pinned == 0 && \
|
||||
((class) != PRI_ITHD && (ke)->ke_thread->td_pinned == 0 && \
|
||||
((ke)->ke_flags & KEF_BOUND) == 0)
|
||||
#endif /* !__i386__ */
|
||||
#endif
|
||||
@ -1902,26 +1901,5 @@ sched_sizeof_thread(void)
|
||||
{
|
||||
return (sizeof(struct thread) + sizeof(struct td_sched));
|
||||
}
|
||||
|
||||
void
|
||||
sched_pin(void)
|
||||
{
|
||||
curthread->td_sched->ke_pinned++;
|
||||
}
|
||||
|
||||
void
|
||||
sched_unpin(void)
|
||||
{
|
||||
curthread->td_sched->ke_pinned--;
|
||||
}
|
||||
|
||||
#ifdef INVARIANTS
|
||||
int
|
||||
sched_ispinned(void)
|
||||
{
|
||||
return (curthread->td_sched->ke_pinned);
|
||||
}
|
||||
#endif
|
||||
|
||||
#define KERN_SWITCH_INCLUDE 1
|
||||
#include "kern/kern_switch.c"
|
||||
|
@ -268,6 +268,7 @@ struct thread {
|
||||
LIST_HEAD(, turnstile) td_contested; /* (q) Contested locks. */
|
||||
struct lock_list_entry *td_sleeplocks; /* (k) Held sleep locks. */
|
||||
int td_intr_nesting_level; /* (k) Interrupt recursion. */
|
||||
int td_pinned; /* (k) Temporary cpu pin count. */
|
||||
struct kse_thr_mailbox *td_mailbox; /* (*) Userland mailbox address. */
|
||||
struct ucred *td_ucred; /* (k) Reference to credentials. */
|
||||
struct thread *td_standin; /* (*) Use this for an upcall. */
|
||||
|
@ -82,14 +82,10 @@ void sched_rem(struct thread *td);
|
||||
* hold a thread on a particular CPU.
|
||||
*/
|
||||
void sched_bind(struct thread *td, int cpu);
|
||||
static __inline void sched_pin(void);
|
||||
void sched_unbind(struct thread *td);
|
||||
static __inline void sched_unpin(void);
|
||||
|
||||
/* these only work for curthread */
|
||||
void sched_pin(void);
|
||||
void sched_unpin(void);
|
||||
#ifdef INVARIANTS
|
||||
int sched_ispinned(void);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* These procedures tell the process data structure allocation code how
|
||||
@ -99,6 +95,18 @@ int sched_sizeof_ksegrp(void);
|
||||
int sched_sizeof_proc(void);
|
||||
int sched_sizeof_thread(void);
|
||||
|
||||
static __inline void
|
||||
sched_pin(void)
|
||||
{
|
||||
curthread->td_pinned++;
|
||||
}
|
||||
|
||||
static __inline void
|
||||
sched_unpin(void)
|
||||
{
|
||||
curthread->td_pinned--;
|
||||
}
|
||||
|
||||
/* temporarily here */
|
||||
void schedinit(void);
|
||||
void sched_destroyproc(struct proc *p);
|
||||
|
Loading…
Reference in New Issue
Block a user