Finish cpuset_getaffinity() after f35093f8
Split cpuset_getaffinity() into a two counterparts, where the user_cpuset_getaffinity() is intended to operate on the cpuset_t from user va, while kern_cpuset_getaffinity() expects the cpuset from kernel va. Accordingly, the code that clears the high bits is moved to the user_cpuset_getaffinity(). Linux sched_getaffinity() syscall returns the size of set copied to the user-space and then glibc wrapper clears the high bits. MFC after: 2 weeks
This commit is contained in:
parent
31d1b816fe
commit
d46174cd88
@ -3373,7 +3373,7 @@ freebsd32_cpuset_getaffinity(struct thread *td,
|
||||
struct freebsd32_cpuset_getaffinity_args *uap)
|
||||
{
|
||||
|
||||
return (kern_cpuset_getaffinity(td, uap->level, uap->which,
|
||||
return (user_cpuset_getaffinity(td, uap->level, uap->which,
|
||||
PAIR32TO64(id_t,uap->id), uap->cpusetsize, uap->mask,
|
||||
&cpuset_copy32_cb));
|
||||
}
|
||||
|
@ -2241,11 +2241,6 @@ linux_sched_getparam(struct thread *td,
|
||||
return (error);
|
||||
}
|
||||
|
||||
static const struct cpuset_copy_cb copy_set = {
|
||||
.cpuset_copyin = copyin,
|
||||
.cpuset_copyout = copyout
|
||||
};
|
||||
|
||||
/*
|
||||
* Get affinity of a process.
|
||||
*/
|
||||
@ -2254,6 +2249,8 @@ linux_sched_getaffinity(struct thread *td,
|
||||
struct linux_sched_getaffinity_args *args)
|
||||
{
|
||||
struct thread *tdt;
|
||||
cpuset_t *mask;
|
||||
size_t size;
|
||||
int error;
|
||||
id_t tid;
|
||||
|
||||
@ -2263,13 +2260,17 @@ linux_sched_getaffinity(struct thread *td,
|
||||
tid = tdt->td_tid;
|
||||
PROC_UNLOCK(tdt->td_proc);
|
||||
|
||||
mask = malloc(sizeof(cpuset_t), M_LINUX, M_WAITOK | M_ZERO);
|
||||
size = min(args->len, sizeof(cpuset_t));
|
||||
error = kern_cpuset_getaffinity(td, CPU_LEVEL_WHICH, CPU_WHICH_TID,
|
||||
tid, args->len, (cpuset_t *)args->user_mask_ptr, ©_set);
|
||||
tid, size, mask);
|
||||
if (error == ERANGE)
|
||||
error = EINVAL;
|
||||
if (error == 0)
|
||||
error = copyout(mask, args->user_mask_ptr, size);
|
||||
if (error == 0)
|
||||
td->td_retval[0] = min(args->len, sizeof(cpuset_t));
|
||||
|
||||
td->td_retval[0] = size;
|
||||
free(mask, M_LINUX);
|
||||
return (error);
|
||||
}
|
||||
|
||||
|
@ -1888,29 +1888,26 @@ int
|
||||
sys_cpuset_getaffinity(struct thread *td, struct cpuset_getaffinity_args *uap)
|
||||
{
|
||||
|
||||
return (kern_cpuset_getaffinity(td, uap->level, uap->which,
|
||||
return (user_cpuset_getaffinity(td, uap->level, uap->which,
|
||||
uap->id, uap->cpusetsize, uap->mask, ©_set));
|
||||
}
|
||||
|
||||
int
|
||||
kern_cpuset_getaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which,
|
||||
id_t id, size_t cpusetsize, cpuset_t *maskp, const struct cpuset_copy_cb *cb)
|
||||
id_t id, size_t cpusetsize, cpuset_t *mask)
|
||||
{
|
||||
struct thread *ttd;
|
||||
struct cpuset *nset;
|
||||
struct cpuset *set;
|
||||
struct proc *p;
|
||||
cpuset_t *mask;
|
||||
int error;
|
||||
size_t size;
|
||||
|
||||
error = cpuset_check_capabilities(td, level, which, id);
|
||||
if (error != 0)
|
||||
return (error);
|
||||
mask = malloc(sizeof(cpuset_t), M_TEMP, M_WAITOK | M_ZERO);
|
||||
error = cpuset_which(which, id, &p, &ttd, &set);
|
||||
if (error)
|
||||
goto out;
|
||||
if (error != 0)
|
||||
return (error);
|
||||
switch (level) {
|
||||
case CPU_LEVEL_ROOT:
|
||||
case CPU_LEVEL_CPUSET:
|
||||
@ -1928,8 +1925,7 @@ kern_cpuset_getaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which,
|
||||
case CPU_WHICH_INTRHANDLER:
|
||||
case CPU_WHICH_ITHREAD:
|
||||
case CPU_WHICH_DOMAIN:
|
||||
error = EINVAL;
|
||||
goto out;
|
||||
return (EINVAL);
|
||||
}
|
||||
if (level == CPU_LEVEL_ROOT)
|
||||
nset = cpuset_refroot(set);
|
||||
@ -1978,11 +1974,28 @@ kern_cpuset_getaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which,
|
||||
if (p)
|
||||
PROC_UNLOCK(p);
|
||||
if (error == 0) {
|
||||
if (cpusetsize < howmany(CPU_FLS(mask), NBBY)) {
|
||||
error = ERANGE;
|
||||
goto out;
|
||||
}
|
||||
size = min(cpusetsize, sizeof(cpuset_t));
|
||||
if (cpusetsize < howmany(CPU_FLS(mask), NBBY))
|
||||
return (ERANGE);
|
||||
#ifdef KTRACE
|
||||
if (KTRPOINT(td, KTR_STRUCT))
|
||||
ktrcpuset(mask, cpusetsize);
|
||||
#endif
|
||||
}
|
||||
return (error);
|
||||
}
|
||||
|
||||
int
|
||||
user_cpuset_getaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which,
|
||||
id_t id, size_t cpusetsize, cpuset_t *maskp, const struct cpuset_copy_cb *cb)
|
||||
{
|
||||
cpuset_t *mask;
|
||||
size_t size;
|
||||
int error;
|
||||
|
||||
mask = malloc(sizeof(cpuset_t), M_TEMP, M_WAITOK | M_ZERO);
|
||||
size = min(cpusetsize, sizeof(cpuset_t));
|
||||
error = kern_cpuset_getaffinity(td, level, which, id, size, mask);
|
||||
if (error == 0) {
|
||||
error = cb->cpuset_copyout(mask, maskp, size);
|
||||
if (error != 0)
|
||||
goto out;
|
||||
@ -2003,10 +2016,6 @@ kern_cpuset_getaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which,
|
||||
cp++;
|
||||
}
|
||||
}
|
||||
#ifdef KTRACE
|
||||
if ( KTRPOINT(td, KTR_STRUCT))
|
||||
ktrcpuset(mask, size);
|
||||
#endif
|
||||
}
|
||||
out:
|
||||
free(mask, M_TEMP);
|
||||
|
@ -119,9 +119,11 @@ int kern_connectat(struct thread *td, int dirfd, int fd,
|
||||
struct sockaddr *sa);
|
||||
int kern_copy_file_range(struct thread *td, int infd, off_t *inoffp,
|
||||
int outfd, off_t *outoffp, size_t len, unsigned int flags);
|
||||
int kern_cpuset_getaffinity(struct thread *td, cpulevel_t level,
|
||||
int user_cpuset_getaffinity(struct thread *td, cpulevel_t level,
|
||||
cpuwhich_t which, id_t id, size_t cpusetsize, cpuset_t *maskp,
|
||||
const struct cpuset_copy_cb *cb);
|
||||
int kern_cpuset_getaffinity(struct thread *td, cpulevel_t level,
|
||||
cpuwhich_t which, id_t id, size_t cpusetsize, cpuset_t *mask);
|
||||
int kern_cpuset_setaffinity(struct thread *td, cpulevel_t level,
|
||||
cpuwhich_t which, id_t id, cpuset_t *maskp);
|
||||
int user_cpuset_setaffinity(struct thread *td, cpulevel_t level,
|
||||
|
Loading…
x
Reference in New Issue
Block a user