From 8deb66c7d1f6bd6a065e846b389e1e3331a921ea Mon Sep 17 00:00:00 2001 From: julian Date: Fri, 3 Jan 2003 20:55:52 +0000 Subject: [PATCH] White space fixes --- sys/kern/kern_kse.c | 20 ++++++++++---------- sys/kern/kern_thread.c | 20 ++++++++++---------- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/sys/kern/kern_kse.c b/sys/kern/kern_kse.c index 492df0d159ce..b1e0abd5af37 100644 --- a/sys/kern/kern_kse.c +++ b/sys/kern/kern_kse.c @@ -1297,7 +1297,7 @@ thread_user_enter(struct proc *p, struct thread *td) * when thread limit reached, act like that the thread * has already done an upcall. */ - if (p->p_numthreads > max_threads_per_proc) { + if (p->p_numthreads > max_threads_per_proc) { if (td->td_standin != NULL) { thread_stash(td->td_standin); td->td_standin = NULL; @@ -1352,10 +1352,10 @@ thread_userret(struct thread *td, struct trapframe *frame) unbound = TD_IS_UNBOUND(td); mtx_lock_spin(&sched_lock); - if ((worktodo = kg->kg_last_assigned)) - worktodo = TAILQ_NEXT(worktodo, td_runq); - else - worktodo = TAILQ_FIRST(&kg->kg_runq); + if ((worktodo = kg->kg_last_assigned)) + worktodo = TAILQ_NEXT(worktodo, td_runq); + else + worktodo = TAILQ_FIRST(&kg->kg_runq); /* * Permanently bound threads never upcall but they may @@ -1402,10 +1402,10 @@ thread_userret(struct thread *td, struct trapframe *frame) td->td_flags |= TDF_UPCALLING; /* there may be more work since we re-locked schedlock */ - if ((worktodo = kg->kg_last_assigned)) - worktodo = TAILQ_NEXT(worktodo, td_runq); - else - worktodo = TAILQ_FIRST(&kg->kg_runq); + if ((worktodo = kg->kg_last_assigned)) + worktodo = TAILQ_NEXT(worktodo, td_runq); + else + worktodo = TAILQ_FIRST(&kg->kg_runq); } else if (unbound) { /* * We are an unbound thread, looking to @@ -1553,7 +1553,7 @@ thread_userret(struct thread *td, struct trapframe *frame) bad: /* * Things are going to be so screwed we should just kill the process. - * how do we do that? + * how do we do that? */ PROC_LOCK(td->td_proc); psignal(td->td_proc, SIGSEGV); diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c index 492df0d159ce..b1e0abd5af37 100644 --- a/sys/kern/kern_thread.c +++ b/sys/kern/kern_thread.c @@ -1297,7 +1297,7 @@ thread_user_enter(struct proc *p, struct thread *td) * when thread limit reached, act like that the thread * has already done an upcall. */ - if (p->p_numthreads > max_threads_per_proc) { + if (p->p_numthreads > max_threads_per_proc) { if (td->td_standin != NULL) { thread_stash(td->td_standin); td->td_standin = NULL; @@ -1352,10 +1352,10 @@ thread_userret(struct thread *td, struct trapframe *frame) unbound = TD_IS_UNBOUND(td); mtx_lock_spin(&sched_lock); - if ((worktodo = kg->kg_last_assigned)) - worktodo = TAILQ_NEXT(worktodo, td_runq); - else - worktodo = TAILQ_FIRST(&kg->kg_runq); + if ((worktodo = kg->kg_last_assigned)) + worktodo = TAILQ_NEXT(worktodo, td_runq); + else + worktodo = TAILQ_FIRST(&kg->kg_runq); /* * Permanently bound threads never upcall but they may @@ -1402,10 +1402,10 @@ thread_userret(struct thread *td, struct trapframe *frame) td->td_flags |= TDF_UPCALLING; /* there may be more work since we re-locked schedlock */ - if ((worktodo = kg->kg_last_assigned)) - worktodo = TAILQ_NEXT(worktodo, td_runq); - else - worktodo = TAILQ_FIRST(&kg->kg_runq); + if ((worktodo = kg->kg_last_assigned)) + worktodo = TAILQ_NEXT(worktodo, td_runq); + else + worktodo = TAILQ_FIRST(&kg->kg_runq); } else if (unbound) { /* * We are an unbound thread, looking to @@ -1553,7 +1553,7 @@ thread_userret(struct thread *td, struct trapframe *frame) bad: /* * Things are going to be so screwed we should just kill the process. - * how do we do that? + * how do we do that? */ PROC_LOCK(td->td_proc); psignal(td->td_proc, SIGSEGV);