Style fixes.

Submitted by: Diane Bruce < db at db dot net >
This commit is contained in:
David Xu 2006-05-19 06:37:24 +00:00
parent 41cfbdeb51
commit f6c040a2c5
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=158745
4 changed files with 102 additions and 68 deletions

View File

@ -55,7 +55,8 @@ struct ksched {
struct timespec rr_interval;
};
int ksched_attach(struct ksched **p)
int
ksched_attach(struct ksched **p)
{
struct ksched *ksched= p31b_malloc(sizeof(*ksched));
@ -66,7 +67,8 @@ int ksched_attach(struct ksched **p)
return 0;
}
int ksched_detach(struct ksched *ks)
int
ksched_detach(struct ksched *ks)
{
p31b_free(ks);
@ -124,8 +126,9 @@ getscheduler(register_t *ret, struct ksched *ksched, struct thread *td)
return e;
}
int ksched_setparam(register_t *ret, struct ksched *ksched,
struct thread *td, const struct sched_param *param)
int
ksched_setparam(register_t *ret, struct ksched *ksched,
struct thread *td, const struct sched_param *param)
{
register_t policy;
int e;
@ -143,8 +146,9 @@ int ksched_setparam(register_t *ret, struct ksched *ksched,
return e;
}
int ksched_getparam(register_t *ret, struct ksched *ksched,
struct thread *td, struct sched_param *param)
int
ksched_getparam(register_t *ret, struct ksched *ksched,
struct thread *td, struct sched_param *param)
{
struct rtprio rtp;
@ -164,8 +168,9 @@ int ksched_getparam(register_t *ret, struct ksched *ksched,
* The permissions to modify process p were checked in "p31b_proc()".
*
*/
int ksched_setscheduler(register_t *ret, struct ksched *ksched,
struct thread *td, int policy, const struct sched_param *param)
int
ksched_setscheduler(register_t *ret, struct ksched *ksched,
struct thread *td, int policy, const struct sched_param *param)
{
int e = 0;
struct rtprio rtp;
@ -237,14 +242,16 @@ int ksched_setscheduler(register_t *ret, struct ksched *ksched,
return e;
}
int ksched_getscheduler(register_t *ret, struct ksched *ksched, struct thread *td)
int
ksched_getscheduler(register_t *ret, struct ksched *ksched, struct thread *td)
{
return getscheduler(ret, ksched, td);
}
/* ksched_yield: Yield the CPU.
*/
int ksched_yield(register_t *ret, struct ksched *ksched)
int
ksched_yield(register_t *ret, struct ksched *ksched)
{
mtx_lock_spin(&sched_lock);
curthread->td_flags |= TDF_NEEDRESCHED;
@ -252,7 +259,8 @@ int ksched_yield(register_t *ret, struct ksched *ksched)
return 0;
}
int ksched_get_priority_max(register_t*ret, struct ksched *ksched, int policy)
int
ksched_get_priority_max(register_t*ret, struct ksched *ksched, int policy)
{
int e = 0;
@ -274,7 +282,8 @@ int ksched_get_priority_max(register_t*ret, struct ksched *ksched, int policy)
return e;
}
int ksched_get_priority_min(register_t *ret, struct ksched *ksched, int policy)
int
ksched_get_priority_min(register_t *ret, struct ksched *ksched, int policy)
{
int e = 0;
@ -296,8 +305,9 @@ int ksched_get_priority_min(register_t *ret, struct ksched *ksched, int policy)
return e;
}
int ksched_rr_get_interval(register_t *ret, struct ksched *ksched,
struct thread *td, struct timespec *timespec)
int
ksched_rr_get_interval(register_t *ret, struct ksched *ksched,
struct thread *td, struct timespec *timespec)
{
*timespec = ksched->rr_interval;

View File

@ -76,7 +76,8 @@ syscall_not_present(struct thread *td, const char *s, struct nosys_args *uap)
/* Not configured but loadable via a module:
*/
static int sched_attach(void)
static int
sched_attach(void)
{
return 0;
}
@ -96,7 +97,8 @@ SYSCALL_NOT_PRESENT_GEN(sched_rr_get_interval)
*/
static struct ksched *ksched;
static int sched_attach(void)
static int
sched_attach(void)
{
int ret = ksched_attach(&ksched);
@ -109,8 +111,8 @@ static int sched_attach(void)
/*
* MPSAFE
*/
int sched_setparam(struct thread *td,
struct sched_setparam_args *uap)
int
sched_setparam(struct thread *td, struct sched_setparam_args *uap)
{
struct thread *targettd;
struct proc *targetp;
@ -147,8 +149,8 @@ int sched_setparam(struct thread *td,
/*
* MPSAFE
*/
int sched_getparam(struct thread *td,
struct sched_getparam_args *uap)
int
sched_getparam(struct thread *td, struct sched_getparam_args *uap)
{
int e;
struct sched_param sched_param;
@ -183,8 +185,8 @@ int sched_getparam(struct thread *td,
/*
* MPSAFE
*/
int sched_setscheduler(struct thread *td,
struct sched_setscheduler_args *uap)
int
sched_setscheduler(struct thread *td, struct sched_setscheduler_args *uap)
{
int e;
struct sched_param sched_param;
@ -221,8 +223,8 @@ int sched_setscheduler(struct thread *td,
/*
* MPSAFE
*/
int sched_getscheduler(struct thread *td,
struct sched_getscheduler_args *uap)
int
sched_getscheduler(struct thread *td, struct sched_getscheduler_args *uap)
{
int e;
struct thread *targettd;
@ -253,8 +255,8 @@ int sched_getscheduler(struct thread *td,
/*
* MPSAFE
*/
int sched_yield(struct thread *td,
struct sched_yield_args *uap)
int
sched_yield(struct thread *td, struct sched_yield_args *uap)
{
int error;
@ -265,8 +267,9 @@ int sched_yield(struct thread *td,
/*
* MPSAFE
*/
int sched_get_priority_max(struct thread *td,
struct sched_get_priority_max_args *uap)
int
sched_get_priority_max(struct thread *td,
struct sched_get_priority_max_args *uap)
{
int error;
@ -277,8 +280,9 @@ int sched_get_priority_max(struct thread *td,
/*
* MPSAFE
*/
int sched_get_priority_min(struct thread *td,
struct sched_get_priority_min_args *uap)
int
sched_get_priority_min(struct thread *td,
struct sched_get_priority_min_args *uap)
{
int error;
@ -289,8 +293,9 @@ int sched_get_priority_min(struct thread *td,
/*
* MPSAFE
*/
int sched_rr_get_interval(struct thread *td,
struct sched_rr_get_interval_args *uap)
int
sched_rr_get_interval(struct thread *td,
struct sched_rr_get_interval_args *uap)
{
struct timespec timespec;
int error;
@ -301,7 +306,8 @@ int sched_rr_get_interval(struct thread *td,
return (error);
}
int kern_sched_rr_get_interval(struct thread *td, pid_t pid,
int
kern_sched_rr_get_interval(struct thread *td, pid_t pid,
struct timespec *ts)
{
int e;
@ -330,7 +336,8 @@ int kern_sched_rr_get_interval(struct thread *td, pid_t pid,
#endif
static void p31binit(void *notused)
static void
p31binit(void *notused)
{
(void) sched_attach();
p31b_setcfg(CTL_P1003_1B_PAGESIZE, PAGE_SIZE);

View File

@ -55,7 +55,8 @@ struct ksched {
struct timespec rr_interval;
};
int ksched_attach(struct ksched **p)
int
ksched_attach(struct ksched **p)
{
struct ksched *ksched= p31b_malloc(sizeof(*ksched));
@ -66,7 +67,8 @@ int ksched_attach(struct ksched **p)
return 0;
}
int ksched_detach(struct ksched *ks)
int
ksched_detach(struct ksched *ks)
{
p31b_free(ks);
@ -124,8 +126,9 @@ getscheduler(register_t *ret, struct ksched *ksched, struct thread *td)
return e;
}
int ksched_setparam(register_t *ret, struct ksched *ksched,
struct thread *td, const struct sched_param *param)
int
ksched_setparam(register_t *ret, struct ksched *ksched,
struct thread *td, const struct sched_param *param)
{
register_t policy;
int e;
@ -143,8 +146,9 @@ int ksched_setparam(register_t *ret, struct ksched *ksched,
return e;
}
int ksched_getparam(register_t *ret, struct ksched *ksched,
struct thread *td, struct sched_param *param)
int
ksched_getparam(register_t *ret, struct ksched *ksched,
struct thread *td, struct sched_param *param)
{
struct rtprio rtp;
@ -164,8 +168,9 @@ int ksched_getparam(register_t *ret, struct ksched *ksched,
* The permissions to modify process p were checked in "p31b_proc()".
*
*/
int ksched_setscheduler(register_t *ret, struct ksched *ksched,
struct thread *td, int policy, const struct sched_param *param)
int
ksched_setscheduler(register_t *ret, struct ksched *ksched,
struct thread *td, int policy, const struct sched_param *param)
{
int e = 0;
struct rtprio rtp;
@ -237,14 +242,16 @@ int ksched_setscheduler(register_t *ret, struct ksched *ksched,
return e;
}
int ksched_getscheduler(register_t *ret, struct ksched *ksched, struct thread *td)
int
ksched_getscheduler(register_t *ret, struct ksched *ksched, struct thread *td)
{
return getscheduler(ret, ksched, td);
}
/* ksched_yield: Yield the CPU.
*/
int ksched_yield(register_t *ret, struct ksched *ksched)
int
ksched_yield(register_t *ret, struct ksched *ksched)
{
mtx_lock_spin(&sched_lock);
curthread->td_flags |= TDF_NEEDRESCHED;
@ -252,7 +259,8 @@ int ksched_yield(register_t *ret, struct ksched *ksched)
return 0;
}
int ksched_get_priority_max(register_t*ret, struct ksched *ksched, int policy)
int
ksched_get_priority_max(register_t*ret, struct ksched *ksched, int policy)
{
int e = 0;
@ -274,7 +282,8 @@ int ksched_get_priority_max(register_t*ret, struct ksched *ksched, int policy)
return e;
}
int ksched_get_priority_min(register_t *ret, struct ksched *ksched, int policy)
int
ksched_get_priority_min(register_t *ret, struct ksched *ksched, int policy)
{
int e = 0;
@ -296,8 +305,9 @@ int ksched_get_priority_min(register_t *ret, struct ksched *ksched, int policy)
return e;
}
int ksched_rr_get_interval(register_t *ret, struct ksched *ksched,
struct thread *td, struct timespec *timespec)
int
ksched_rr_get_interval(register_t *ret, struct ksched *ksched,
struct thread *td, struct timespec *timespec)
{
*timespec = ksched->rr_interval;

View File

@ -76,7 +76,8 @@ syscall_not_present(struct thread *td, const char *s, struct nosys_args *uap)
/* Not configured but loadable via a module:
*/
static int sched_attach(void)
static int
sched_attach(void)
{
return 0;
}
@ -96,7 +97,8 @@ SYSCALL_NOT_PRESENT_GEN(sched_rr_get_interval)
*/
static struct ksched *ksched;
static int sched_attach(void)
static int
sched_attach(void)
{
int ret = ksched_attach(&ksched);
@ -109,8 +111,8 @@ static int sched_attach(void)
/*
* MPSAFE
*/
int sched_setparam(struct thread *td,
struct sched_setparam_args *uap)
int
sched_setparam(struct thread *td, struct sched_setparam_args *uap)
{
struct thread *targettd;
struct proc *targetp;
@ -147,8 +149,8 @@ int sched_setparam(struct thread *td,
/*
* MPSAFE
*/
int sched_getparam(struct thread *td,
struct sched_getparam_args *uap)
int
sched_getparam(struct thread *td, struct sched_getparam_args *uap)
{
int e;
struct sched_param sched_param;
@ -183,8 +185,8 @@ int sched_getparam(struct thread *td,
/*
* MPSAFE
*/
int sched_setscheduler(struct thread *td,
struct sched_setscheduler_args *uap)
int
sched_setscheduler(struct thread *td, struct sched_setscheduler_args *uap)
{
int e;
struct sched_param sched_param;
@ -221,8 +223,8 @@ int sched_setscheduler(struct thread *td,
/*
* MPSAFE
*/
int sched_getscheduler(struct thread *td,
struct sched_getscheduler_args *uap)
int
sched_getscheduler(struct thread *td, struct sched_getscheduler_args *uap)
{
int e;
struct thread *targettd;
@ -253,8 +255,8 @@ int sched_getscheduler(struct thread *td,
/*
* MPSAFE
*/
int sched_yield(struct thread *td,
struct sched_yield_args *uap)
int
sched_yield(struct thread *td, struct sched_yield_args *uap)
{
int error;
@ -265,8 +267,9 @@ int sched_yield(struct thread *td,
/*
* MPSAFE
*/
int sched_get_priority_max(struct thread *td,
struct sched_get_priority_max_args *uap)
int
sched_get_priority_max(struct thread *td,
struct sched_get_priority_max_args *uap)
{
int error;
@ -277,8 +280,9 @@ int sched_get_priority_max(struct thread *td,
/*
* MPSAFE
*/
int sched_get_priority_min(struct thread *td,
struct sched_get_priority_min_args *uap)
int
sched_get_priority_min(struct thread *td,
struct sched_get_priority_min_args *uap)
{
int error;
@ -289,8 +293,9 @@ int sched_get_priority_min(struct thread *td,
/*
* MPSAFE
*/
int sched_rr_get_interval(struct thread *td,
struct sched_rr_get_interval_args *uap)
int
sched_rr_get_interval(struct thread *td,
struct sched_rr_get_interval_args *uap)
{
struct timespec timespec;
int error;
@ -301,7 +306,8 @@ int sched_rr_get_interval(struct thread *td,
return (error);
}
int kern_sched_rr_get_interval(struct thread *td, pid_t pid,
int
kern_sched_rr_get_interval(struct thread *td, pid_t pid,
struct timespec *ts)
{
int e;
@ -330,7 +336,8 @@ int kern_sched_rr_get_interval(struct thread *td, pid_t pid,
#endif
static void p31binit(void *notused)
static void
p31binit(void *notused)
{
(void) sched_attach();
p31b_setcfg(CTL_P1003_1B_PAGESIZE, PAGE_SIZE);