Wrap copyin/copyout for kevent so the 32bit wrapper does not have
to malloc nchanges * sizeof(struct kevent) AND/OR nevents * sizeof(struct kevent) on every syscall. Glanced at by: peter, jmg Obtained from: Yahoo! MFC after: 2 weeks
This commit is contained in:
parent
f09aa88c33
commit
efe5becafa
@ -52,6 +52,7 @@ __FBSDID("$FreeBSD$");
|
||||
#include <sys/resource.h>
|
||||
#include <sys/resourcevar.h>
|
||||
#include <sys/selinfo.h>
|
||||
#include <sys/eventvar.h> /* Must come after sys/selinfo.h */
|
||||
#include <sys/pipe.h> /* Must come after sys/selinfo.h */
|
||||
#include <sys/signal.h>
|
||||
#include <sys/signalvar.h>
|
||||
@ -552,16 +553,75 @@ struct kevent32 {
|
||||
};
|
||||
|
||||
CTASSERT(sizeof(struct kevent32) == 20);
|
||||
static int freebsd32_kevent_copyout(void *arg, struct kevent *kevp, int count);
|
||||
static int freebsd32_kevent_copyin(void *arg, struct kevent *kevp, int count);
|
||||
|
||||
/*
|
||||
* Copy 'count' items into the destination list pointed to by uap->eventlist.
|
||||
*/
|
||||
static int
|
||||
freebsd32_kevent_copyout(void *arg, struct kevent *kevp, int count)
|
||||
{
|
||||
struct freebsd32_kevent_args *uap;
|
||||
struct kevent32 ks32[KQ_NEVENTS];
|
||||
int i, error = 0;
|
||||
|
||||
KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count));
|
||||
uap = (struct freebsd32_kevent_args *)arg;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
CP(kevp[i], ks32[i], ident);
|
||||
CP(kevp[i], ks32[i], filter);
|
||||
CP(kevp[i], ks32[i], flags);
|
||||
CP(kevp[i], ks32[i], fflags);
|
||||
CP(kevp[i], ks32[i], data);
|
||||
PTROUT_CP(kevp[i], ks32[i], udata);
|
||||
}
|
||||
error = copyout(ks32, uap->eventlist, count * sizeof *ks32);
|
||||
if (error == 0)
|
||||
uap->eventlist += count;
|
||||
return (error);
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy 'count' items from the list pointed to by uap->changelist.
|
||||
*/
|
||||
static int
|
||||
freebsd32_kevent_copyin(void *arg, struct kevent *kevp, int count)
|
||||
{
|
||||
struct freebsd32_kevent_args *uap;
|
||||
struct kevent32 ks32[KQ_NEVENTS];
|
||||
int i, error = 0;
|
||||
|
||||
KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count));
|
||||
uap = (struct freebsd32_kevent_args *)arg;
|
||||
|
||||
error = copyin(uap->changelist, ks32, count * sizeof *ks32);
|
||||
if (error)
|
||||
goto done;
|
||||
uap->changelist += count;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
CP(ks32[i], kevp[i], ident);
|
||||
CP(ks32[i], kevp[i], filter);
|
||||
CP(ks32[i], kevp[i], flags);
|
||||
CP(ks32[i], kevp[i], fflags);
|
||||
CP(ks32[i], kevp[i], data);
|
||||
PTRIN_CP(ks32[i], kevp[i], udata);
|
||||
}
|
||||
done:
|
||||
return (error);
|
||||
}
|
||||
|
||||
int
|
||||
freebsd32_kevent(struct thread *td, struct freebsd32_kevent_args *uap)
|
||||
{
|
||||
struct timespec32 ts32;
|
||||
struct timespec ts, *tsp;
|
||||
struct kevent *ks;
|
||||
struct kevent32 ks32;
|
||||
struct kevent *changes, *events;
|
||||
int error, i;
|
||||
struct kevent_copyops k_ops = { uap,
|
||||
freebsd32_kevent_copyout,
|
||||
freebsd32_kevent_copyin};
|
||||
int error;
|
||||
|
||||
|
||||
if (uap->timeout) {
|
||||
@ -573,51 +633,8 @@ freebsd32_kevent(struct thread *td, struct freebsd32_kevent_args *uap)
|
||||
tsp = &ts;
|
||||
} else
|
||||
tsp = NULL;
|
||||
if (uap->changelist && uap->nchanges > 0) {
|
||||
changes = malloc(sizeof(struct kevent) * uap->nchanges, M_TEMP,
|
||||
M_WAITOK);
|
||||
for (i = 0; i < uap->nchanges; i++) {
|
||||
error = copyin(&uap->changelist[i], &ks32,
|
||||
sizeof(ks32));
|
||||
if (error) {
|
||||
free(changes, M_TEMP);
|
||||
return (error);
|
||||
}
|
||||
ks = &changes[i];
|
||||
CP(ks32, *ks, ident);
|
||||
CP(ks32, *ks, filter);
|
||||
CP(ks32, *ks, flags);
|
||||
CP(ks32, *ks, fflags);
|
||||
CP(ks32, *ks, data);
|
||||
PTRIN_CP(ks32, *ks, udata);
|
||||
}
|
||||
} else
|
||||
changes = NULL;
|
||||
if (uap->eventlist && uap->nevents > 0)
|
||||
events = malloc(sizeof(struct kevent) * uap->nevents, M_TEMP,
|
||||
M_WAITOK);
|
||||
else
|
||||
events = NULL;
|
||||
error = kern_kevent(td, uap->fd, changes, uap->nchanges, UIO_SYSSPACE,
|
||||
events, uap->nevents, UIO_SYSSPACE, tsp);
|
||||
free(changes, M_TEMP);
|
||||
if (uap->eventlist && events && td->td_retval[0] > 0) {
|
||||
for (i = 0; i < td->td_retval[0]; i++) {
|
||||
ks = &events[i];
|
||||
CP(*ks, ks32, ident);
|
||||
CP(*ks, ks32, filter);
|
||||
CP(*ks, ks32, flags);
|
||||
CP(*ks, ks32, fflags);
|
||||
CP(*ks, ks32, data);
|
||||
PTROUT_CP(*ks, ks32, udata);
|
||||
error = copyout(&ks32, &uap->eventlist[i],
|
||||
sizeof(ks32));
|
||||
if (error)
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (events)
|
||||
free(events, M_TEMP);
|
||||
error = kern_kevent(td, uap->fd, uap->nchanges, uap->nevents,
|
||||
&k_ops, tsp);
|
||||
return (error);
|
||||
}
|
||||
|
||||
|
@ -81,17 +81,17 @@ MTX_SYSINIT(kq_global, &kq_global, "kqueue order", MTX_DEF);
|
||||
|
||||
TASKQUEUE_DEFINE_THREAD(kqueue);
|
||||
|
||||
static int kevent_copyout(struct kevent **eventlist, enum uio_seg eventseg,
|
||||
struct kevent *kevp, int count);
|
||||
static int kevent_copyout(void *arg, struct kevent *kevp, int count);
|
||||
static int kevent_copyin(void *arg, struct kevent *kevp, int count);
|
||||
static int kqueue_aquire(struct file *fp, struct kqueue **kqp);
|
||||
static void kqueue_release(struct kqueue *kq, int locked);
|
||||
static int kqueue_expand(struct kqueue *kq, struct filterops *fops,
|
||||
uintptr_t ident, int waitok);
|
||||
static void kqueue_task(void *arg, int pending);
|
||||
static int kqueue_scan(struct kqueue *kq, int maxevents,
|
||||
struct kevent *eventlist, enum uio_seg eventseg,
|
||||
const struct timespec *timeout, struct kevent *keva,
|
||||
struct thread *td);
|
||||
struct kevent_copyops *k_ops,
|
||||
const struct timespec *timeout,
|
||||
struct kevent *keva, struct thread *td);
|
||||
static void kqueue_wakeup(struct kqueue *kq);
|
||||
static struct filterops *kqueue_fo_find(int filt);
|
||||
static void kqueue_fo_release(int filt);
|
||||
@ -536,6 +536,9 @@ int
|
||||
kevent(struct thread *td, struct kevent_args *uap)
|
||||
{
|
||||
struct timespec ts, *tsp;
|
||||
struct kevent_copyops k_ops = { uap,
|
||||
kevent_copyout,
|
||||
kevent_copyin};
|
||||
int error;
|
||||
|
||||
if (uap->timeout != NULL) {
|
||||
@ -546,36 +549,49 @@ kevent(struct thread *td, struct kevent_args *uap)
|
||||
} else
|
||||
tsp = NULL;
|
||||
|
||||
return (kern_kevent(td, uap->fd, uap->changelist, uap->nchanges,
|
||||
UIO_USERSPACE, uap->eventlist, uap->nevents, UIO_USERSPACE, tsp));
|
||||
return (kern_kevent(td, uap->fd, uap->nchanges, uap->nevents,
|
||||
&k_ops, tsp));
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy 'count' items into the destination list pointd to by *eventlist. The
|
||||
* eventlist and nevents values are updated to point after the copied out
|
||||
* item(s) upon return.
|
||||
* Copy 'count' items into the destination list pointed to by uap->eventlist.
|
||||
*/
|
||||
static int
|
||||
kevent_copyout(struct kevent **eventlist, enum uio_seg eventseg,
|
||||
struct kevent *kevp, int count)
|
||||
kevent_copyout(void *arg, struct kevent *kevp, int count)
|
||||
{
|
||||
struct kevent_args *uap;
|
||||
int error;
|
||||
|
||||
if (eventseg == UIO_USERSPACE)
|
||||
error = copyout(kevp, *eventlist,
|
||||
sizeof(struct kevent) * count);
|
||||
else {
|
||||
bcopy(kevp, *eventlist, sizeof(struct kevent) * count);
|
||||
error = 0;
|
||||
}
|
||||
*eventlist += count;
|
||||
KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count));
|
||||
uap = (struct kevent_args *)arg;
|
||||
|
||||
error = copyout(kevp, uap->eventlist, count * sizeof *kevp);
|
||||
if (error == 0)
|
||||
uap->eventlist += count;
|
||||
return (error);
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy 'count' items from the list pointed to by uap->changelist.
|
||||
*/
|
||||
static int
|
||||
kevent_copyin(void *arg, struct kevent *kevp, int count)
|
||||
{
|
||||
struct kevent_args *uap;
|
||||
int error;
|
||||
|
||||
KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count));
|
||||
uap = (struct kevent_args *)arg;
|
||||
|
||||
error = copyin(uap->changelist, kevp, count * sizeof *kevp);
|
||||
if (error == 0)
|
||||
uap->changelist += count;
|
||||
return (error);
|
||||
}
|
||||
|
||||
int
|
||||
kern_kevent(struct thread *td, int fd, struct kevent *changelist, int nchanges,
|
||||
enum uio_seg changeseg, struct kevent *eventlist, int nevents,
|
||||
enum uio_seg eventseg, const struct timespec *timeout)
|
||||
kern_kevent(struct thread *td, int fd, int nchanges, int nevents,
|
||||
struct kevent_copyops *k_ops, const struct timespec *timeout)
|
||||
{
|
||||
struct kevent keva[KQ_NEVENTS];
|
||||
struct kevent *kevp, *changes;
|
||||
@ -591,16 +607,11 @@ kern_kevent(struct thread *td, int fd, struct kevent *changelist, int nchanges,
|
||||
nerrors = 0;
|
||||
|
||||
while (nchanges > 0) {
|
||||
if (changeseg == UIO_USERSPACE) {
|
||||
n = nchanges > KQ_NEVENTS ? KQ_NEVENTS : nchanges;
|
||||
error = copyin(changelist, keva, n * sizeof *keva);
|
||||
if (error)
|
||||
goto done;
|
||||
changes = keva;
|
||||
} else {
|
||||
changes = changelist;
|
||||
n = nchanges;
|
||||
}
|
||||
n = nchanges > KQ_NEVENTS ? KQ_NEVENTS : nchanges;
|
||||
error = k_ops->k_copyin(k_ops->arg, keva, n);
|
||||
if (error)
|
||||
goto done;
|
||||
changes = keva;
|
||||
for (i = 0; i < n; i++) {
|
||||
kevp = &changes[i];
|
||||
kevp->flags &= ~EV_SYSFLAGS;
|
||||
@ -609,8 +620,8 @@ kern_kevent(struct thread *td, int fd, struct kevent *changelist, int nchanges,
|
||||
if (nevents != 0) {
|
||||
kevp->flags = EV_ERROR;
|
||||
kevp->data = error;
|
||||
(void) kevent_copyout(&eventlist,
|
||||
eventseg, kevp, 1);
|
||||
(void) k_ops->k_copyout(k_ops->arg,
|
||||
kevp, 1);
|
||||
nevents--;
|
||||
nerrors++;
|
||||
} else {
|
||||
@ -619,7 +630,6 @@ kern_kevent(struct thread *td, int fd, struct kevent *changelist, int nchanges,
|
||||
}
|
||||
}
|
||||
nchanges -= n;
|
||||
changelist += n;
|
||||
}
|
||||
if (nerrors) {
|
||||
td->td_retval[0] = nerrors;
|
||||
@ -627,8 +637,7 @@ kern_kevent(struct thread *td, int fd, struct kevent *changelist, int nchanges,
|
||||
goto done;
|
||||
}
|
||||
|
||||
error = kqueue_scan(kq, nevents, eventlist, eventseg, timeout,
|
||||
keva, td);
|
||||
error = kqueue_scan(kq, nevents, k_ops, timeout, keva, td);
|
||||
done:
|
||||
kqueue_release(kq, 0);
|
||||
done_norel:
|
||||
@ -1087,9 +1096,8 @@ kqueue_task(void *arg, int pending)
|
||||
* We treat KN_MARKER knotes as if they are INFLUX.
|
||||
*/
|
||||
static int
|
||||
kqueue_scan(struct kqueue *kq, int maxevents, struct kevent *eventlist,
|
||||
enum uio_seg eventseg, const struct timespec *tsp, struct kevent *keva,
|
||||
struct thread *td)
|
||||
kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops,
|
||||
const struct timespec *tsp, struct kevent *keva, struct thread *td)
|
||||
{
|
||||
struct kevent *kevp;
|
||||
struct timeval atv, rtv, ttv;
|
||||
@ -1242,8 +1250,7 @@ kqueue_scan(struct kqueue *kq, int maxevents, struct kevent *eventlist,
|
||||
|
||||
if (nkev == KQ_NEVENTS) {
|
||||
KQ_UNLOCK_FLUX(kq);
|
||||
error = kevent_copyout(&eventlist, eventseg, keva,
|
||||
nkev);
|
||||
error = k_ops->k_copyout(k_ops->arg, keva, nkev);
|
||||
nkev = 0;
|
||||
kevp = keva;
|
||||
KQ_LOCK(kq);
|
||||
@ -1260,7 +1267,7 @@ kqueue_scan(struct kqueue *kq, int maxevents, struct kevent *eventlist,
|
||||
done_nl:
|
||||
KQ_NOTOWNED(kq);
|
||||
if (nkev != 0)
|
||||
error = kevent_copyout(&eventlist, eventseg, keva, nkev);
|
||||
error = k_ops->k_copyout(k_ops->arg, keva, nkev);
|
||||
td->td_retval[0] = maxevents - count;
|
||||
return (error);
|
||||
}
|
||||
|
@ -192,6 +192,11 @@ struct knote {
|
||||
#define kn_data kn_kevent.data
|
||||
#define kn_fp kn_ptr.p_fp
|
||||
};
|
||||
struct kevent_copyops {
|
||||
void *arg;
|
||||
int (*k_copyout)(void *arg, struct kevent *kevp, int count);
|
||||
int (*k_copyin)(void *arg, struct kevent *kevp, int count);
|
||||
};
|
||||
|
||||
struct thread;
|
||||
struct proc;
|
||||
|
@ -44,6 +44,7 @@ struct rusage;
|
||||
struct sockaddr;
|
||||
struct stat;
|
||||
struct kevent;
|
||||
struct kevent_copyops;
|
||||
|
||||
int kern___getcwd(struct thread *td, u_char *buf, enum uio_seg bufseg,
|
||||
u_int buflen);
|
||||
@ -72,9 +73,8 @@ int kern_getitimer(struct thread *, u_int, struct itimerval *);
|
||||
int kern_getrusage(struct thread *td, int who, struct rusage *rup);
|
||||
int kern_getsockopt(struct thread *td, int s, int level, int name,
|
||||
void *optval, enum uio_seg valseg, socklen_t *valsize);
|
||||
int kern_kevent(struct thread *td, int fd, struct kevent *changelist,
|
||||
int nchanges, enum uio_seg changeseg, struct kevent *eventlist,
|
||||
int nevents, enum uio_seg eventseg, const struct timespec *timeout);
|
||||
int kern_kevent(struct thread *td, int fd, int nchanges, int nevents,
|
||||
struct kevent_copyops *k_ops, const struct timespec *timeout);
|
||||
int kern_lchown(struct thread *td, char *path, enum uio_seg pathseg,
|
||||
int uid, int gid);
|
||||
int kern_link(struct thread *td, char *path, char *link,
|
||||
|
Loading…
Reference in New Issue
Block a user