1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Copyright (c) 1982, 1986, 1989, 1990, 1993
|
|
|
|
* The Regents of the University of California. All rights reserved.
|
|
|
|
*
|
1998-11-05 14:28:26 +00:00
|
|
|
* sendfile(2) and related extensions:
|
|
|
|
* Copyright (c) 1998, David Greenman. All rights reserved.
|
|
|
|
*
|
1994-05-24 10:09:53 +00:00
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
* This product includes software developed by the University of
|
|
|
|
* California, Berkeley and its contributors.
|
|
|
|
* 4. Neither the name of the University nor the names of its contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* @(#)uipc_syscalls.c 8.4 (Berkeley) 2/21/94
|
1999-08-28 01:08:13 +00:00
|
|
|
* $FreeBSD$
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
|
|
|
|
1997-12-16 17:40:42 +00:00
|
|
|
#include "opt_compat.h"
|
1996-01-03 21:42:35 +00:00
|
|
|
#include "opt_ktrace.h"
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/param.h>
|
1994-05-25 09:21:21 +00:00
|
|
|
#include <sys/systm.h>
|
1998-11-05 14:28:26 +00:00
|
|
|
#include <sys/kernel.h>
|
2001-05-01 08:13:21 +00:00
|
|
|
#include <sys/lock.h>
|
|
|
|
#include <sys/mutex.h>
|
1995-10-23 15:42:12 +00:00
|
|
|
#include <sys/sysproto.h>
|
1997-10-12 20:26:33 +00:00
|
|
|
#include <sys/malloc.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/filedesc.h>
|
2000-04-16 18:53:38 +00:00
|
|
|
#include <sys/event.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/proc.h>
|
1997-03-23 03:37:54 +00:00
|
|
|
#include <sys/fcntl.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/file.h>
|
2001-05-01 08:13:21 +00:00
|
|
|
#include <sys/lock.h>
|
|
|
|
#include <sys/mount.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/mbuf.h>
|
|
|
|
#include <sys/protosw.h>
|
|
|
|
#include <sys/socket.h>
|
|
|
|
#include <sys/socketvar.h>
|
1994-10-02 17:35:40 +00:00
|
|
|
#include <sys/signalvar.h>
|
1998-03-28 10:33:27 +00:00
|
|
|
#include <sys/uio.h>
|
1998-11-05 14:28:26 +00:00
|
|
|
#include <sys/vnode.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#ifdef KTRACE
|
|
|
|
#include <sys/ktrace.h>
|
|
|
|
#endif
|
2001-05-01 08:13:21 +00:00
|
|
|
|
1998-11-05 14:28:26 +00:00
|
|
|
#include <vm/vm.h>
|
|
|
|
#include <vm/vm_object.h>
|
|
|
|
#include <vm/vm_page.h>
|
|
|
|
#include <vm/vm_pageout.h>
|
|
|
|
#include <vm/vm_kern.h>
|
|
|
|
#include <vm/vm_extern.h>
|
|
|
|
|
|
|
|
static void sf_buf_init(void *arg);
|
|
|
|
SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL)
|
|
|
|
static struct sf_buf *sf_buf_alloc(void);
|
2000-08-19 08:32:59 +00:00
|
|
|
static void sf_buf_free(caddr_t addr, void *args);
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1998-02-09 06:11:36 +00:00
|
|
|
static int sendit __P((struct proc *p, int s, struct msghdr *mp, int flags));
|
|
|
|
static int recvit __P((struct proc *p, int s, struct msghdr *mp,
|
1997-11-06 19:29:57 +00:00
|
|
|
caddr_t namelenp));
|
1995-10-23 15:42:12 +00:00
|
|
|
|
1997-11-06 19:29:57 +00:00
|
|
|
static int accept1 __P((struct proc *p, struct accept_args *uap, int compat));
|
1995-10-23 15:42:12 +00:00
|
|
|
static int getsockname1 __P((struct proc *p, struct getsockname_args *uap,
|
1997-11-06 19:29:57 +00:00
|
|
|
int compat));
|
1995-10-23 15:42:12 +00:00
|
|
|
static int getpeername1 __P((struct proc *p, struct getpeername_args *uap,
|
1997-11-06 19:29:57 +00:00
|
|
|
int compat));
|
1995-10-23 15:42:12 +00:00
|
|
|
|
2000-11-04 07:16:08 +00:00
|
|
|
/*
|
|
|
|
* Expanded sf_freelist head. Really an SLIST_HEAD() in disguise, with the
|
2001-01-02 11:51:55 +00:00
|
|
|
* sf_freelist head with the sf_lock mutex.
|
2000-11-04 07:16:08 +00:00
|
|
|
*/
|
|
|
|
static struct {
|
2001-01-02 11:51:55 +00:00
|
|
|
SLIST_HEAD(, sf_buf) sf_head;
|
2000-11-04 07:16:08 +00:00
|
|
|
struct mtx sf_lock;
|
|
|
|
} sf_freelist;
|
|
|
|
|
1998-11-05 14:28:26 +00:00
|
|
|
static vm_offset_t sf_base;
|
|
|
|
static struct sf_buf *sf_bufs;
|
2000-11-04 21:55:25 +00:00
|
|
|
static u_int sf_buf_alloc_want;
|
1998-11-05 14:28:26 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* System call interface to the socket abstraction.
|
|
|
|
*/
|
|
|
|
#if defined(COMPAT_43) || defined(COMPAT_SUNOS)
|
|
|
|
#define COMPAT_OLDSOCK
|
|
|
|
#endif
|
|
|
|
|
|
|
|
extern struct fileops socketops;
|
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
socket(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1995-10-23 15:42:12 +00:00
|
|
|
register struct socket_args /* {
|
|
|
|
int domain;
|
|
|
|
int type;
|
|
|
|
int protocol;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct filedesc *fdp = p->p_fd;
|
|
|
|
struct socket *so;
|
|
|
|
struct file *fp;
|
|
|
|
int fd, error;
|
|
|
|
|
1994-10-02 17:35:40 +00:00
|
|
|
error = falloc(p, &fp, &fd);
|
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
2000-11-18 21:01:04 +00:00
|
|
|
fhold(fp);
|
1996-02-13 18:16:31 +00:00
|
|
|
error = socreate(uap->domain, &so, uap->type, uap->protocol, p);
|
1994-10-02 17:35:40 +00:00
|
|
|
if (error) {
|
2000-11-18 21:01:04 +00:00
|
|
|
if (fdp->fd_ofiles[fd] == fp) {
|
|
|
|
fdp->fd_ofiles[fd] = NULL;
|
|
|
|
fdrop(fp, p);
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
} else {
|
|
|
|
fp->f_data = (caddr_t)so;
|
1999-08-04 18:53:50 +00:00
|
|
|
fp->f_flag = FREAD|FWRITE;
|
|
|
|
fp->f_ops = &socketops;
|
|
|
|
fp->f_type = DTYPE_SOCKET;
|
1997-11-06 19:29:57 +00:00
|
|
|
p->p_retval[0] = fd;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2000-11-18 21:01:04 +00:00
|
|
|
fdrop(fp, p);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
bind(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1995-10-23 15:42:12 +00:00
|
|
|
register struct bind_args /* {
|
|
|
|
int s;
|
|
|
|
caddr_t name;
|
|
|
|
int namelen;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct file *fp;
|
1997-08-16 19:16:27 +00:00
|
|
|
struct sockaddr *sa;
|
1994-05-24 10:09:53 +00:00
|
|
|
int error;
|
|
|
|
|
2000-11-18 21:01:04 +00:00
|
|
|
error = holdsock(p->p_fd, uap->s, &fp);
|
1994-10-02 17:35:40 +00:00
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
1997-08-16 19:16:27 +00:00
|
|
|
error = getsockaddr(&sa, uap->name, uap->namelen);
|
2000-11-18 21:01:04 +00:00
|
|
|
if (error) {
|
|
|
|
fdrop(fp, p);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
2000-11-18 21:01:04 +00:00
|
|
|
}
|
1997-08-16 19:16:27 +00:00
|
|
|
error = sobind((struct socket *)fp->f_data, sa, p);
|
|
|
|
FREE(sa, M_SONAME);
|
2000-11-18 21:01:04 +00:00
|
|
|
fdrop(fp, p);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
listen(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1995-10-23 15:42:12 +00:00
|
|
|
register struct listen_args /* {
|
|
|
|
int s;
|
|
|
|
int backlog;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct file *fp;
|
|
|
|
int error;
|
|
|
|
|
2000-11-18 21:01:04 +00:00
|
|
|
error = holdsock(p->p_fd, uap->s, &fp);
|
1994-10-02 17:35:40 +00:00
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
2000-11-18 21:01:04 +00:00
|
|
|
error = solisten((struct socket *)fp->f_data, uap->backlog, p);
|
|
|
|
fdrop(fp, p);
|
|
|
|
return(error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
1995-10-07 23:47:26 +00:00
|
|
|
static int
|
1997-11-06 19:29:57 +00:00
|
|
|
accept1(p, uap, compat)
|
1995-10-07 23:47:26 +00:00
|
|
|
struct proc *p;
|
1995-10-23 15:42:12 +00:00
|
|
|
register struct accept_args /* {
|
|
|
|
int s;
|
|
|
|
caddr_t name;
|
|
|
|
int *anamelen;
|
|
|
|
} */ *uap;
|
1995-10-07 23:47:26 +00:00
|
|
|
int compat;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
1999-09-30 19:13:17 +00:00
|
|
|
struct filedesc *fdp = p->p_fd;
|
2000-11-18 21:01:04 +00:00
|
|
|
struct file *lfp = NULL;
|
|
|
|
struct file *nfp = NULL;
|
1997-08-16 19:16:27 +00:00
|
|
|
struct sockaddr *sa;
|
1994-05-24 10:09:53 +00:00
|
|
|
int namelen, error, s;
|
1996-03-11 15:37:44 +00:00
|
|
|
struct socket *head, *so;
|
1998-06-10 10:30:23 +00:00
|
|
|
int fd;
|
1996-10-15 19:28:44 +00:00
|
|
|
short fflag; /* type must match fp->f_flag */
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1994-10-02 17:35:40 +00:00
|
|
|
if (uap->name) {
|
|
|
|
error = copyin((caddr_t)uap->anamelen, (caddr_t)&namelen,
|
|
|
|
sizeof (namelen));
|
|
|
|
if(error)
|
|
|
|
return (error);
|
|
|
|
}
|
2000-11-18 21:01:04 +00:00
|
|
|
error = holdsock(fdp, uap->s, &lfp);
|
1994-10-02 17:35:40 +00:00
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
s = splnet();
|
2000-11-18 21:01:04 +00:00
|
|
|
head = (struct socket *)lfp->f_data;
|
1996-03-11 15:37:44 +00:00
|
|
|
if ((head->so_options & SO_ACCEPTCONN) == 0) {
|
1994-05-24 10:09:53 +00:00
|
|
|
splx(s);
|
2000-11-18 21:01:04 +00:00
|
|
|
error = EINVAL;
|
|
|
|
goto done;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
1999-11-16 10:56:05 +00:00
|
|
|
if ((head->so_state & SS_NBIO) && TAILQ_EMPTY(&head->so_comp)) {
|
1994-05-24 10:09:53 +00:00
|
|
|
splx(s);
|
2000-11-18 21:01:04 +00:00
|
|
|
error = EWOULDBLOCK;
|
|
|
|
goto done;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
1999-11-16 10:56:05 +00:00
|
|
|
while (TAILQ_EMPTY(&head->so_comp) && head->so_error == 0) {
|
1996-03-11 15:37:44 +00:00
|
|
|
if (head->so_state & SS_CANTRCVMORE) {
|
|
|
|
head->so_error = ECONNABORTED;
|
1994-05-24 10:09:53 +00:00
|
|
|
break;
|
|
|
|
}
|
1996-03-11 15:37:44 +00:00
|
|
|
error = tsleep((caddr_t)&head->so_timeo, PSOCK | PCATCH,
|
1995-12-14 22:51:13 +00:00
|
|
|
"accept", 0);
|
1994-10-02 17:35:40 +00:00
|
|
|
if (error) {
|
1994-05-24 10:09:53 +00:00
|
|
|
splx(s);
|
2000-11-18 21:01:04 +00:00
|
|
|
goto done;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
}
|
1996-03-11 15:37:44 +00:00
|
|
|
if (head->so_error) {
|
|
|
|
error = head->so_error;
|
|
|
|
head->so_error = 0;
|
1994-05-24 10:09:53 +00:00
|
|
|
splx(s);
|
2000-11-18 21:01:04 +00:00
|
|
|
goto done;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
1997-03-31 12:30:01 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* At this point we know that there is at least one connection
|
|
|
|
* ready to be accepted. Remove it from the queue prior to
|
|
|
|
* allocating the file descriptor for it since falloc() may
|
|
|
|
* block allowing another process to accept the connection
|
|
|
|
* instead.
|
|
|
|
*/
|
1999-11-16 10:56:05 +00:00
|
|
|
so = TAILQ_FIRST(&head->so_comp);
|
1997-03-31 12:30:01 +00:00
|
|
|
TAILQ_REMOVE(&head->so_comp, so, so_list);
|
|
|
|
head->so_qlen--;
|
|
|
|
|
2000-11-18 21:01:04 +00:00
|
|
|
fflag = lfp->f_flag;
|
|
|
|
error = falloc(p, &nfp, &fd);
|
1994-10-02 17:35:40 +00:00
|
|
|
if (error) {
|
1997-03-31 12:30:01 +00:00
|
|
|
/*
|
|
|
|
* Probably ran out of file descriptors. Put the
|
|
|
|
* unaccepted connection back onto the queue and
|
|
|
|
* do another wakeup so some other process might
|
|
|
|
* have a chance at it.
|
|
|
|
*/
|
|
|
|
TAILQ_INSERT_HEAD(&head->so_comp, so, so_list);
|
|
|
|
head->so_qlen++;
|
|
|
|
wakeup_one(&head->so_timeo);
|
1994-05-24 10:09:53 +00:00
|
|
|
splx(s);
|
2000-11-18 21:01:04 +00:00
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
fhold(nfp);
|
|
|
|
p->p_retval[0] = fd;
|
1996-03-11 15:37:44 +00:00
|
|
|
|
2000-04-16 18:53:38 +00:00
|
|
|
/* connection has been removed from the listen queue */
|
|
|
|
KNOTE(&head->so_rcv.sb_sel.si_note, 0);
|
|
|
|
|
1996-03-11 15:37:44 +00:00
|
|
|
so->so_state &= ~SS_COMP;
|
|
|
|
so->so_head = NULL;
|
1998-11-23 00:45:39 +00:00
|
|
|
if (head->so_sigio != NULL)
|
|
|
|
fsetown(fgetown(head->so_sigio), &so->so_sigio);
|
1996-03-11 15:37:44 +00:00
|
|
|
|
2000-11-18 21:01:04 +00:00
|
|
|
nfp->f_data = (caddr_t)so;
|
|
|
|
nfp->f_flag = fflag;
|
|
|
|
nfp->f_ops = &socketops;
|
|
|
|
nfp->f_type = DTYPE_SOCKET;
|
1997-08-16 19:16:27 +00:00
|
|
|
sa = 0;
|
2001-02-14 02:09:11 +00:00
|
|
|
error = soaccept(so, &sa);
|
|
|
|
if (error) {
|
|
|
|
/*
|
|
|
|
* return a namelen of zero for older code which might
|
|
|
|
* ignore the return value from accept.
|
|
|
|
*/
|
|
|
|
if (uap->name != NULL) {
|
|
|
|
namelen = 0;
|
|
|
|
(void) copyout((caddr_t)&namelen,
|
|
|
|
(caddr_t)uap->anamelen, sizeof(*uap->anamelen));
|
|
|
|
}
|
|
|
|
goto noconnection;
|
|
|
|
}
|
2000-11-18 21:01:04 +00:00
|
|
|
if (sa == NULL) {
|
1997-08-16 19:16:27 +00:00
|
|
|
namelen = 0;
|
|
|
|
if (uap->name)
|
|
|
|
goto gotnoname;
|
1999-10-14 05:16:16 +00:00
|
|
|
splx(s);
|
2000-11-18 21:01:04 +00:00
|
|
|
error = 0;
|
|
|
|
goto done;
|
1997-08-16 19:16:27 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
if (uap->name) {
|
1997-12-15 02:29:11 +00:00
|
|
|
/* check sa_len before it is destroyed */
|
|
|
|
if (namelen > sa->sa_len)
|
|
|
|
namelen = sa->sa_len;
|
1994-05-24 10:09:53 +00:00
|
|
|
#ifdef COMPAT_OLDSOCK
|
1995-10-07 23:47:26 +00:00
|
|
|
if (compat)
|
1997-08-16 19:16:27 +00:00
|
|
|
((struct osockaddr *)sa)->sa_family =
|
|
|
|
sa->sa_family;
|
1994-05-24 10:09:53 +00:00
|
|
|
#endif
|
1997-08-16 19:16:27 +00:00
|
|
|
error = copyout(sa, (caddr_t)uap->name, (u_int)namelen);
|
1994-10-02 17:35:40 +00:00
|
|
|
if (!error)
|
1997-08-16 19:16:27 +00:00
|
|
|
gotnoname:
|
1994-05-24 10:09:53 +00:00
|
|
|
error = copyout((caddr_t)&namelen,
|
|
|
|
(caddr_t)uap->anamelen, sizeof (*uap->anamelen));
|
|
|
|
}
|
2001-02-14 02:09:11 +00:00
|
|
|
noconnection:
|
1999-01-25 16:53:53 +00:00
|
|
|
if (sa)
|
|
|
|
FREE(sa, M_SONAME);
|
2000-11-18 21:01:04 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* close the new descriptor, assuming someone hasn't ripped it
|
|
|
|
* out from under us.
|
|
|
|
*/
|
1999-09-30 19:13:17 +00:00
|
|
|
if (error) {
|
2000-11-18 21:01:04 +00:00
|
|
|
if (fdp->fd_ofiles[fd] == nfp) {
|
|
|
|
fdp->fd_ofiles[fd] = NULL;
|
|
|
|
fdrop(nfp, p);
|
|
|
|
}
|
1999-09-30 19:13:17 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
splx(s);
|
2000-11-18 21:01:04 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Release explicitly held references before returning.
|
|
|
|
*/
|
|
|
|
done:
|
|
|
|
if (nfp != NULL)
|
|
|
|
fdrop(nfp, p);
|
|
|
|
fdrop(lfp, p);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1995-10-23 15:42:12 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
accept(p, uap)
|
1995-10-23 15:42:12 +00:00
|
|
|
struct proc *p;
|
|
|
|
struct accept_args *uap;
|
|
|
|
{
|
|
|
|
|
1997-11-06 19:29:57 +00:00
|
|
|
return (accept1(p, uap, 0));
|
1995-10-23 15:42:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef COMPAT_OLDSOCK
|
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
oaccept(p, uap)
|
1995-10-23 15:42:12 +00:00
|
|
|
struct proc *p;
|
|
|
|
struct accept_args *uap;
|
|
|
|
{
|
|
|
|
|
1997-11-06 19:29:57 +00:00
|
|
|
return (accept1(p, uap, 1));
|
1995-10-23 15:42:12 +00:00
|
|
|
}
|
|
|
|
#endif /* COMPAT_OLDSOCK */
|
1994-10-02 17:35:40 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
connect(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1995-10-23 15:42:12 +00:00
|
|
|
register struct connect_args /* {
|
|
|
|
int s;
|
|
|
|
caddr_t name;
|
|
|
|
int namelen;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct file *fp;
|
|
|
|
register struct socket *so;
|
1997-08-16 19:16:27 +00:00
|
|
|
struct sockaddr *sa;
|
1994-05-24 10:09:53 +00:00
|
|
|
int error, s;
|
|
|
|
|
2000-11-18 21:01:04 +00:00
|
|
|
error = holdsock(p->p_fd, uap->s, &fp);
|
1994-10-02 17:35:40 +00:00
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
so = (struct socket *)fp->f_data;
|
2000-11-18 21:01:04 +00:00
|
|
|
if ((so->so_state & SS_NBIO) && (so->so_state & SS_ISCONNECTING)) {
|
|
|
|
error = EALREADY;
|
|
|
|
goto done;
|
|
|
|
}
|
1997-08-16 19:16:27 +00:00
|
|
|
error = getsockaddr(&sa, uap->name, uap->namelen);
|
1994-10-02 17:35:40 +00:00
|
|
|
if (error)
|
2000-11-18 21:01:04 +00:00
|
|
|
goto done;
|
1997-08-16 19:16:27 +00:00
|
|
|
error = soconnect(so, sa, p);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (error)
|
|
|
|
goto bad;
|
|
|
|
if ((so->so_state & SS_NBIO) && (so->so_state & SS_ISCONNECTING)) {
|
1997-08-16 19:16:27 +00:00
|
|
|
FREE(sa, M_SONAME);
|
2000-11-18 21:01:04 +00:00
|
|
|
error = EINPROGRESS;
|
|
|
|
goto done;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
s = splnet();
|
1994-10-02 17:35:40 +00:00
|
|
|
while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) {
|
|
|
|
error = tsleep((caddr_t)&so->so_timeo, PSOCK | PCATCH,
|
1995-12-14 22:51:13 +00:00
|
|
|
"connec", 0);
|
1994-10-02 17:35:40 +00:00
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
break;
|
1994-10-02 17:35:40 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
if (error == 0) {
|
|
|
|
error = so->so_error;
|
|
|
|
so->so_error = 0;
|
|
|
|
}
|
|
|
|
splx(s);
|
|
|
|
bad:
|
|
|
|
so->so_state &= ~SS_ISCONNECTING;
|
1997-08-16 19:16:27 +00:00
|
|
|
FREE(sa, M_SONAME);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (error == ERESTART)
|
|
|
|
error = EINTR;
|
2000-11-18 21:01:04 +00:00
|
|
|
done:
|
|
|
|
fdrop(fp, p);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
socketpair(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1995-10-23 15:42:12 +00:00
|
|
|
register struct socketpair_args /* {
|
|
|
|
int domain;
|
|
|
|
int type;
|
|
|
|
int protocol;
|
|
|
|
int *rsv;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
register struct filedesc *fdp = p->p_fd;
|
|
|
|
struct file *fp1, *fp2;
|
|
|
|
struct socket *so1, *so2;
|
|
|
|
int fd, error, sv[2];
|
|
|
|
|
1996-02-13 18:16:31 +00:00
|
|
|
error = socreate(uap->domain, &so1, uap->type, uap->protocol, p);
|
1994-10-02 17:35:40 +00:00
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
1996-02-13 18:16:31 +00:00
|
|
|
error = socreate(uap->domain, &so2, uap->type, uap->protocol, p);
|
1994-10-02 17:35:40 +00:00
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
goto free1;
|
1994-10-02 17:35:40 +00:00
|
|
|
error = falloc(p, &fp1, &fd);
|
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
goto free2;
|
2000-11-18 21:01:04 +00:00
|
|
|
fhold(fp1);
|
1994-05-24 10:09:53 +00:00
|
|
|
sv[0] = fd;
|
|
|
|
fp1->f_data = (caddr_t)so1;
|
1994-10-02 17:35:40 +00:00
|
|
|
error = falloc(p, &fp2, &fd);
|
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
goto free3;
|
2000-11-18 21:01:04 +00:00
|
|
|
fhold(fp2);
|
1994-05-24 10:09:53 +00:00
|
|
|
fp2->f_data = (caddr_t)so2;
|
|
|
|
sv[1] = fd;
|
1994-10-02 17:35:40 +00:00
|
|
|
error = soconnect2(so1, so2);
|
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
goto free4;
|
|
|
|
if (uap->type == SOCK_DGRAM) {
|
|
|
|
/*
|
|
|
|
* Datagram socket connection is asymmetric.
|
|
|
|
*/
|
1994-10-02 17:35:40 +00:00
|
|
|
error = soconnect2(so2, so1);
|
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
goto free4;
|
|
|
|
}
|
1999-08-04 18:53:50 +00:00
|
|
|
fp1->f_flag = fp2->f_flag = FREAD|FWRITE;
|
|
|
|
fp1->f_ops = fp2->f_ops = &socketops;
|
|
|
|
fp1->f_type = fp2->f_type = DTYPE_SOCKET;
|
1994-05-24 10:09:53 +00:00
|
|
|
error = copyout((caddr_t)sv, (caddr_t)uap->rsv, 2 * sizeof (int));
|
2000-11-18 21:01:04 +00:00
|
|
|
fdrop(fp1, p);
|
|
|
|
fdrop(fp2, p);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
free4:
|
2000-11-18 21:01:04 +00:00
|
|
|
if (fdp->fd_ofiles[sv[1]] == fp2) {
|
|
|
|
fdp->fd_ofiles[sv[1]] = NULL;
|
|
|
|
fdrop(fp2, p);
|
|
|
|
}
|
|
|
|
fdrop(fp2, p);
|
1994-05-24 10:09:53 +00:00
|
|
|
free3:
|
2000-11-18 21:01:04 +00:00
|
|
|
if (fdp->fd_ofiles[sv[0]] == fp1) {
|
|
|
|
fdp->fd_ofiles[sv[0]] = NULL;
|
|
|
|
fdrop(fp1, p);
|
|
|
|
}
|
|
|
|
fdrop(fp1, p);
|
1994-05-24 10:09:53 +00:00
|
|
|
free2:
|
|
|
|
(void)soclose(so2);
|
|
|
|
free1:
|
|
|
|
(void)soclose(so1);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1998-02-09 06:11:36 +00:00
|
|
|
static int
|
1997-11-06 19:29:57 +00:00
|
|
|
sendit(p, s, mp, flags)
|
1994-10-02 17:35:40 +00:00
|
|
|
register struct proc *p;
|
|
|
|
int s;
|
|
|
|
register struct msghdr *mp;
|
1997-11-06 19:29:57 +00:00
|
|
|
int flags;
|
1994-10-02 17:35:40 +00:00
|
|
|
{
|
|
|
|
struct file *fp;
|
|
|
|
struct uio auio;
|
|
|
|
register struct iovec *iov;
|
|
|
|
register int i;
|
1997-08-16 19:16:27 +00:00
|
|
|
struct mbuf *control;
|
|
|
|
struct sockaddr *to;
|
1994-10-02 17:35:40 +00:00
|
|
|
int len, error;
|
1997-04-27 20:01:29 +00:00
|
|
|
struct socket *so;
|
1994-10-02 17:35:40 +00:00
|
|
|
#ifdef KTRACE
|
|
|
|
struct iovec *ktriov = NULL;
|
2000-07-02 08:08:09 +00:00
|
|
|
struct uio ktruio;
|
1994-10-02 17:35:40 +00:00
|
|
|
#endif
|
1995-05-30 08:16:23 +00:00
|
|
|
|
2000-11-18 21:01:04 +00:00
|
|
|
error = holdsock(p->p_fd, s, &fp);
|
1994-10-02 17:35:40 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
auio.uio_iov = mp->msg_iov;
|
|
|
|
auio.uio_iovcnt = mp->msg_iovlen;
|
|
|
|
auio.uio_segflg = UIO_USERSPACE;
|
|
|
|
auio.uio_rw = UIO_WRITE;
|
|
|
|
auio.uio_procp = p;
|
|
|
|
auio.uio_offset = 0; /* XXX */
|
|
|
|
auio.uio_resid = 0;
|
|
|
|
iov = mp->msg_iov;
|
|
|
|
for (i = 0; i < mp->msg_iovlen; i++, iov++) {
|
2000-11-18 21:01:04 +00:00
|
|
|
if ((auio.uio_resid += iov->iov_len) < 0) {
|
|
|
|
fdrop(fp, p);
|
1994-10-02 17:35:40 +00:00
|
|
|
return (EINVAL);
|
2000-11-18 21:01:04 +00:00
|
|
|
}
|
1994-10-02 17:35:40 +00:00
|
|
|
}
|
|
|
|
if (mp->msg_name) {
|
1997-08-16 19:16:27 +00:00
|
|
|
error = getsockaddr(&to, mp->msg_name, mp->msg_namelen);
|
2000-11-18 21:01:04 +00:00
|
|
|
if (error) {
|
|
|
|
fdrop(fp, p);
|
1994-10-02 17:35:40 +00:00
|
|
|
return (error);
|
2000-11-18 21:01:04 +00:00
|
|
|
}
|
|
|
|
} else {
|
1994-10-02 17:35:40 +00:00
|
|
|
to = 0;
|
2000-11-18 21:01:04 +00:00
|
|
|
}
|
1994-10-02 17:35:40 +00:00
|
|
|
if (mp->msg_control) {
|
|
|
|
if (mp->msg_controllen < sizeof(struct cmsghdr)
|
|
|
|
#ifdef COMPAT_OLDSOCK
|
|
|
|
&& mp->msg_flags != MSG_COMPAT
|
|
|
|
#endif
|
|
|
|
) {
|
|
|
|
error = EINVAL;
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
error = sockargs(&control, mp->msg_control,
|
|
|
|
mp->msg_controllen, MT_CONTROL);
|
|
|
|
if (error)
|
|
|
|
goto bad;
|
|
|
|
#ifdef COMPAT_OLDSOCK
|
|
|
|
if (mp->msg_flags == MSG_COMPAT) {
|
|
|
|
register struct cmsghdr *cm;
|
|
|
|
|
2000-12-21 21:44:31 +00:00
|
|
|
M_PREPEND(control, sizeof(*cm), M_TRYWAIT);
|
1994-10-02 17:35:40 +00:00
|
|
|
if (control == 0) {
|
|
|
|
error = ENOBUFS;
|
|
|
|
goto bad;
|
|
|
|
} else {
|
|
|
|
cm = mtod(control, struct cmsghdr *);
|
|
|
|
cm->cmsg_len = control->m_len;
|
|
|
|
cm->cmsg_level = SOL_SOCKET;
|
|
|
|
cm->cmsg_type = SCM_RIGHTS;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
2000-11-18 21:01:04 +00:00
|
|
|
} else {
|
1994-10-02 17:35:40 +00:00
|
|
|
control = 0;
|
2000-11-18 21:01:04 +00:00
|
|
|
}
|
1994-10-02 17:35:40 +00:00
|
|
|
#ifdef KTRACE
|
|
|
|
if (KTRPOINT(p, KTR_GENIO)) {
|
|
|
|
int iovlen = auio.uio_iovcnt * sizeof (struct iovec);
|
|
|
|
|
|
|
|
MALLOC(ktriov, struct iovec *, iovlen, M_TEMP, M_WAITOK);
|
|
|
|
bcopy((caddr_t)auio.uio_iov, (caddr_t)ktriov, iovlen);
|
2000-07-02 08:08:09 +00:00
|
|
|
ktruio = auio;
|
1994-10-02 17:35:40 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
len = auio.uio_resid;
|
1997-04-27 20:01:29 +00:00
|
|
|
so = (struct socket *)fp->f_data;
|
|
|
|
error = so->so_proto->pr_usrreqs->pru_sosend(so, to, &auio, 0, control,
|
1997-08-16 19:16:27 +00:00
|
|
|
flags, p);
|
1994-10-02 17:35:40 +00:00
|
|
|
if (error) {
|
|
|
|
if (auio.uio_resid != len && (error == ERESTART ||
|
|
|
|
error == EINTR || error == EWOULDBLOCK))
|
|
|
|
error = 0;
|
2001-03-07 03:37:06 +00:00
|
|
|
if (error == EPIPE) {
|
|
|
|
PROC_LOCK(p);
|
1994-10-02 17:35:40 +00:00
|
|
|
psignal(p, SIGPIPE);
|
2001-03-07 03:37:06 +00:00
|
|
|
PROC_UNLOCK(p);
|
|
|
|
}
|
1994-10-02 17:35:40 +00:00
|
|
|
}
|
|
|
|
if (error == 0)
|
1997-11-06 19:29:57 +00:00
|
|
|
p->p_retval[0] = len - auio.uio_resid;
|
1994-10-02 17:35:40 +00:00
|
|
|
#ifdef KTRACE
|
|
|
|
if (ktriov != NULL) {
|
2000-07-02 08:08:09 +00:00
|
|
|
if (error == 0) {
|
|
|
|
ktruio.uio_iov = ktriov;
|
|
|
|
ktruio.uio_resid = p->p_retval[0];
|
|
|
|
ktrgenio(p->p_tracep, s, UIO_WRITE, &ktruio, error);
|
|
|
|
}
|
1994-10-02 17:35:40 +00:00
|
|
|
FREE(ktriov, M_TEMP);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
bad:
|
2000-11-18 21:01:04 +00:00
|
|
|
fdrop(fp, p);
|
1994-10-02 17:35:40 +00:00
|
|
|
if (to)
|
1997-08-16 19:16:27 +00:00
|
|
|
FREE(to, M_SONAME);
|
1994-10-02 17:35:40 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
sendto(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1995-10-23 15:42:12 +00:00
|
|
|
register struct sendto_args /* {
|
|
|
|
int s;
|
|
|
|
caddr_t buf;
|
|
|
|
size_t len;
|
|
|
|
int flags;
|
|
|
|
caddr_t to;
|
|
|
|
int tolen;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct msghdr msg;
|
|
|
|
struct iovec aiov;
|
|
|
|
|
|
|
|
msg.msg_name = uap->to;
|
|
|
|
msg.msg_namelen = uap->tolen;
|
|
|
|
msg.msg_iov = &aiov;
|
|
|
|
msg.msg_iovlen = 1;
|
|
|
|
msg.msg_control = 0;
|
|
|
|
#ifdef COMPAT_OLDSOCK
|
|
|
|
msg.msg_flags = 0;
|
|
|
|
#endif
|
|
|
|
aiov.iov_base = uap->buf;
|
|
|
|
aiov.iov_len = uap->len;
|
1997-11-06 19:29:57 +00:00
|
|
|
return (sendit(p, uap->s, &msg, uap->flags));
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef COMPAT_OLDSOCK
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
osend(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1995-10-23 15:42:12 +00:00
|
|
|
register struct osend_args /* {
|
|
|
|
int s;
|
|
|
|
caddr_t buf;
|
|
|
|
int len;
|
|
|
|
int flags;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct msghdr msg;
|
|
|
|
struct iovec aiov;
|
|
|
|
|
|
|
|
msg.msg_name = 0;
|
|
|
|
msg.msg_namelen = 0;
|
|
|
|
msg.msg_iov = &aiov;
|
|
|
|
msg.msg_iovlen = 1;
|
|
|
|
aiov.iov_base = uap->buf;
|
|
|
|
aiov.iov_len = uap->len;
|
|
|
|
msg.msg_control = 0;
|
|
|
|
msg.msg_flags = 0;
|
1997-11-06 19:29:57 +00:00
|
|
|
return (sendit(p, uap->s, &msg, uap->flags));
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
osendmsg(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1995-10-23 15:42:12 +00:00
|
|
|
register struct osendmsg_args /* {
|
|
|
|
int s;
|
|
|
|
caddr_t msg;
|
|
|
|
int flags;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct msghdr msg;
|
|
|
|
struct iovec aiov[UIO_SMALLIOV], *iov;
|
|
|
|
int error;
|
|
|
|
|
1994-10-02 17:35:40 +00:00
|
|
|
error = copyin(uap->msg, (caddr_t)&msg, sizeof (struct omsghdr));
|
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
if ((u_int)msg.msg_iovlen >= UIO_SMALLIOV) {
|
|
|
|
if ((u_int)msg.msg_iovlen >= UIO_MAXIOV)
|
|
|
|
return (EMSGSIZE);
|
|
|
|
MALLOC(iov, struct iovec *,
|
1995-05-30 08:16:23 +00:00
|
|
|
sizeof(struct iovec) * (u_int)msg.msg_iovlen, M_IOV,
|
1994-05-24 10:09:53 +00:00
|
|
|
M_WAITOK);
|
|
|
|
} else
|
|
|
|
iov = aiov;
|
1994-10-02 17:35:40 +00:00
|
|
|
error = copyin((caddr_t)msg.msg_iov, (caddr_t)iov,
|
|
|
|
(unsigned)(msg.msg_iovlen * sizeof (struct iovec)));
|
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
goto done;
|
|
|
|
msg.msg_flags = MSG_COMPAT;
|
|
|
|
msg.msg_iov = iov;
|
1997-11-06 19:29:57 +00:00
|
|
|
error = sendit(p, uap->s, &msg, uap->flags);
|
1994-05-24 10:09:53 +00:00
|
|
|
done:
|
|
|
|
if (iov != aiov)
|
|
|
|
FREE(iov, M_IOV);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
sendmsg(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1995-10-23 15:42:12 +00:00
|
|
|
register struct sendmsg_args /* {
|
|
|
|
int s;
|
|
|
|
caddr_t msg;
|
|
|
|
int flags;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct msghdr msg;
|
|
|
|
struct iovec aiov[UIO_SMALLIOV], *iov;
|
|
|
|
int error;
|
|
|
|
|
1994-10-02 17:35:40 +00:00
|
|
|
error = copyin(uap->msg, (caddr_t)&msg, sizeof (msg));
|
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
if ((u_int)msg.msg_iovlen >= UIO_SMALLIOV) {
|
|
|
|
if ((u_int)msg.msg_iovlen >= UIO_MAXIOV)
|
|
|
|
return (EMSGSIZE);
|
|
|
|
MALLOC(iov, struct iovec *,
|
|
|
|
sizeof(struct iovec) * (u_int)msg.msg_iovlen, M_IOV,
|
|
|
|
M_WAITOK);
|
|
|
|
} else
|
|
|
|
iov = aiov;
|
|
|
|
if (msg.msg_iovlen &&
|
|
|
|
(error = copyin((caddr_t)msg.msg_iov, (caddr_t)iov,
|
|
|
|
(unsigned)(msg.msg_iovlen * sizeof (struct iovec)))))
|
|
|
|
goto done;
|
|
|
|
msg.msg_iov = iov;
|
|
|
|
#ifdef COMPAT_OLDSOCK
|
|
|
|
msg.msg_flags = 0;
|
|
|
|
#endif
|
1997-11-06 19:29:57 +00:00
|
|
|
error = sendit(p, uap->s, &msg, uap->flags);
|
1994-05-24 10:09:53 +00:00
|
|
|
done:
|
|
|
|
if (iov != aiov)
|
|
|
|
FREE(iov, M_IOV);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1998-02-09 06:11:36 +00:00
|
|
|
static int
|
1997-11-06 19:29:57 +00:00
|
|
|
recvit(p, s, mp, namelenp)
|
1994-05-24 10:09:53 +00:00
|
|
|
register struct proc *p;
|
|
|
|
int s;
|
|
|
|
register struct msghdr *mp;
|
1994-10-02 17:35:40 +00:00
|
|
|
caddr_t namelenp;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct file *fp;
|
|
|
|
struct uio auio;
|
|
|
|
register struct iovec *iov;
|
|
|
|
register int i;
|
|
|
|
int len, error;
|
1997-08-16 19:16:27 +00:00
|
|
|
struct mbuf *m, *control = 0;
|
1996-05-09 20:15:26 +00:00
|
|
|
caddr_t ctlbuf;
|
1997-04-27 20:01:29 +00:00
|
|
|
struct socket *so;
|
1997-08-16 19:16:27 +00:00
|
|
|
struct sockaddr *fromsa = 0;
|
1994-05-24 10:09:53 +00:00
|
|
|
#ifdef KTRACE
|
|
|
|
struct iovec *ktriov = NULL;
|
2000-07-02 08:08:09 +00:00
|
|
|
struct uio ktruio;
|
1994-05-24 10:09:53 +00:00
|
|
|
#endif
|
1995-05-30 08:16:23 +00:00
|
|
|
|
2000-11-18 21:01:04 +00:00
|
|
|
error = holdsock(p->p_fd, s, &fp);
|
1994-10-02 17:35:40 +00:00
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
auio.uio_iov = mp->msg_iov;
|
|
|
|
auio.uio_iovcnt = mp->msg_iovlen;
|
|
|
|
auio.uio_segflg = UIO_USERSPACE;
|
1994-10-02 17:35:40 +00:00
|
|
|
auio.uio_rw = UIO_READ;
|
1994-05-24 10:09:53 +00:00
|
|
|
auio.uio_procp = p;
|
|
|
|
auio.uio_offset = 0; /* XXX */
|
|
|
|
auio.uio_resid = 0;
|
|
|
|
iov = mp->msg_iov;
|
|
|
|
for (i = 0; i < mp->msg_iovlen; i++, iov++) {
|
2000-11-18 21:01:04 +00:00
|
|
|
if ((auio.uio_resid += iov->iov_len) < 0) {
|
|
|
|
fdrop(fp, p);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (EINVAL);
|
2000-11-18 21:01:04 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
#ifdef KTRACE
|
|
|
|
if (KTRPOINT(p, KTR_GENIO)) {
|
|
|
|
int iovlen = auio.uio_iovcnt * sizeof (struct iovec);
|
|
|
|
|
|
|
|
MALLOC(ktriov, struct iovec *, iovlen, M_TEMP, M_WAITOK);
|
|
|
|
bcopy((caddr_t)auio.uio_iov, (caddr_t)ktriov, iovlen);
|
2000-07-02 08:08:09 +00:00
|
|
|
ktruio = auio;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
len = auio.uio_resid;
|
1997-04-27 20:01:29 +00:00
|
|
|
so = (struct socket *)fp->f_data;
|
1997-08-16 19:16:27 +00:00
|
|
|
error = so->so_proto->pr_usrreqs->pru_soreceive(so, &fromsa, &auio,
|
1994-10-02 17:35:40 +00:00
|
|
|
(struct mbuf **)0, mp->msg_control ? &control : (struct mbuf **)0,
|
|
|
|
&mp->msg_flags);
|
|
|
|
if (error) {
|
1994-05-24 10:09:53 +00:00
|
|
|
if (auio.uio_resid != len && (error == ERESTART ||
|
|
|
|
error == EINTR || error == EWOULDBLOCK))
|
|
|
|
error = 0;
|
|
|
|
}
|
|
|
|
#ifdef KTRACE
|
|
|
|
if (ktriov != NULL) {
|
2000-07-02 08:08:09 +00:00
|
|
|
if (error == 0) {
|
|
|
|
ktruio.uio_iov = ktriov;
|
|
|
|
ktruio.uio_resid = len - auio.uio_resid;
|
|
|
|
ktrgenio(p->p_tracep, s, UIO_READ, &ktruio, error);
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
FREE(ktriov, M_TEMP);
|
|
|
|
}
|
|
|
|
#endif
|
1994-10-02 17:35:40 +00:00
|
|
|
if (error)
|
|
|
|
goto out;
|
1997-11-06 19:29:57 +00:00
|
|
|
p->p_retval[0] = len - auio.uio_resid;
|
1994-10-02 17:35:40 +00:00
|
|
|
if (mp->msg_name) {
|
|
|
|
len = mp->msg_namelen;
|
1997-08-16 19:16:27 +00:00
|
|
|
if (len <= 0 || fromsa == 0)
|
1994-10-02 17:35:40 +00:00
|
|
|
len = 0;
|
|
|
|
else {
|
1997-12-14 03:15:21 +00:00
|
|
|
#ifndef MIN
|
|
|
|
#define MIN(a,b) ((a)>(b)?(b):(a))
|
|
|
|
#endif
|
|
|
|
/* save sa_len before it is destroyed by MSG_COMPAT */
|
|
|
|
len = MIN(len, fromsa->sa_len);
|
1994-05-24 10:09:53 +00:00
|
|
|
#ifdef COMPAT_OLDSOCK
|
1994-10-02 17:35:40 +00:00
|
|
|
if (mp->msg_flags & MSG_COMPAT)
|
1997-08-16 19:16:27 +00:00
|
|
|
((struct osockaddr *)fromsa)->sa_family =
|
|
|
|
fromsa->sa_family;
|
|
|
|
#endif
|
|
|
|
error = copyout(fromsa,
|
1994-10-02 17:35:40 +00:00
|
|
|
(caddr_t)mp->msg_name, (unsigned)len);
|
|
|
|
if (error)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
mp->msg_namelen = len;
|
|
|
|
if (namelenp &&
|
|
|
|
(error = copyout((caddr_t)&len, namelenp, sizeof (int)))) {
|
|
|
|
#ifdef COMPAT_OLDSOCK
|
|
|
|
if (mp->msg_flags & MSG_COMPAT)
|
|
|
|
error = 0; /* old recvfrom didn't check */
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (mp->msg_control) {
|
|
|
|
#ifdef COMPAT_OLDSOCK
|
|
|
|
/*
|
|
|
|
* We assume that old recvmsg calls won't receive access
|
|
|
|
* rights and other control info, esp. as control info
|
|
|
|
* is always optional and those options didn't exist in 4.3.
|
|
|
|
* If we receive rights, trim the cmsghdr; anything else
|
|
|
|
* is tossed.
|
|
|
|
*/
|
|
|
|
if (control && mp->msg_flags & MSG_COMPAT) {
|
|
|
|
if (mtod(control, struct cmsghdr *)->cmsg_level !=
|
|
|
|
SOL_SOCKET ||
|
|
|
|
mtod(control, struct cmsghdr *)->cmsg_type !=
|
|
|
|
SCM_RIGHTS) {
|
|
|
|
mp->msg_controllen = 0;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
control->m_len -= sizeof (struct cmsghdr);
|
|
|
|
control->m_data += sizeof (struct cmsghdr);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
len = mp->msg_controllen;
|
1996-05-09 20:15:26 +00:00
|
|
|
m = control;
|
|
|
|
mp->msg_controllen = 0;
|
|
|
|
ctlbuf = (caddr_t) mp->msg_control;
|
|
|
|
|
|
|
|
while (m && len > 0) {
|
|
|
|
unsigned int tocopy;
|
|
|
|
|
|
|
|
if (len >= m->m_len)
|
|
|
|
tocopy = m->m_len;
|
|
|
|
else {
|
1994-10-02 17:35:40 +00:00
|
|
|
mp->msg_flags |= MSG_CTRUNC;
|
1996-05-09 20:15:26 +00:00
|
|
|
tocopy = len;
|
|
|
|
}
|
|
|
|
|
1999-01-27 21:50:00 +00:00
|
|
|
if ((error = copyout((caddr_t)mtod(m, caddr_t),
|
|
|
|
ctlbuf, tocopy)) != 0)
|
1996-05-09 20:15:26 +00:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
ctlbuf += tocopy;
|
|
|
|
len -= tocopy;
|
|
|
|
m = m->m_next;
|
1994-10-02 17:35:40 +00:00
|
|
|
}
|
1999-11-24 20:49:04 +00:00
|
|
|
mp->msg_controllen = ctlbuf - (caddr_t)mp->msg_control;
|
1994-10-02 17:35:40 +00:00
|
|
|
}
|
|
|
|
out:
|
2000-11-18 21:01:04 +00:00
|
|
|
fdrop(fp, p);
|
1997-08-16 19:16:27 +00:00
|
|
|
if (fromsa)
|
|
|
|
FREE(fromsa, M_SONAME);
|
1994-10-02 17:35:40 +00:00
|
|
|
if (control)
|
|
|
|
m_freem(control);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
recvfrom(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1995-10-23 15:42:12 +00:00
|
|
|
register struct recvfrom_args /* {
|
|
|
|
int s;
|
|
|
|
caddr_t buf;
|
|
|
|
size_t len;
|
|
|
|
int flags;
|
|
|
|
caddr_t from;
|
|
|
|
int *fromlenaddr;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct msghdr msg;
|
|
|
|
struct iovec aiov;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
if (uap->fromlenaddr) {
|
1994-10-02 17:35:40 +00:00
|
|
|
error = copyin((caddr_t)uap->fromlenaddr,
|
|
|
|
(caddr_t)&msg.msg_namelen, sizeof (msg.msg_namelen));
|
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
} else
|
|
|
|
msg.msg_namelen = 0;
|
|
|
|
msg.msg_name = uap->from;
|
|
|
|
msg.msg_iov = &aiov;
|
|
|
|
msg.msg_iovlen = 1;
|
|
|
|
aiov.iov_base = uap->buf;
|
|
|
|
aiov.iov_len = uap->len;
|
|
|
|
msg.msg_control = 0;
|
|
|
|
msg.msg_flags = uap->flags;
|
1997-11-06 19:29:57 +00:00
|
|
|
return (recvit(p, uap->s, &msg, (caddr_t)uap->fromlenaddr));
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
1994-10-02 17:35:40 +00:00
|
|
|
#ifdef COMPAT_OLDSOCK
|
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
orecvfrom(p, uap)
|
1994-10-02 17:35:40 +00:00
|
|
|
struct proc *p;
|
|
|
|
struct recvfrom_args *uap;
|
|
|
|
{
|
|
|
|
|
|
|
|
uap->flags |= MSG_COMPAT;
|
1997-11-06 19:29:57 +00:00
|
|
|
return (recvfrom(p, uap));
|
1994-10-02 17:35:40 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
#ifdef COMPAT_OLDSOCK
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
orecv(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1995-10-23 15:42:12 +00:00
|
|
|
register struct orecv_args /* {
|
|
|
|
int s;
|
|
|
|
caddr_t buf;
|
|
|
|
int len;
|
|
|
|
int flags;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct msghdr msg;
|
|
|
|
struct iovec aiov;
|
|
|
|
|
|
|
|
msg.msg_name = 0;
|
|
|
|
msg.msg_namelen = 0;
|
|
|
|
msg.msg_iov = &aiov;
|
|
|
|
msg.msg_iovlen = 1;
|
|
|
|
aiov.iov_base = uap->buf;
|
|
|
|
aiov.iov_len = uap->len;
|
|
|
|
msg.msg_control = 0;
|
|
|
|
msg.msg_flags = uap->flags;
|
1997-11-06 19:29:57 +00:00
|
|
|
return (recvit(p, uap->s, &msg, (caddr_t)0));
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Old recvmsg. This code takes advantage of the fact that the old msghdr
|
|
|
|
* overlays the new one, missing only the flags, and with the (old) access
|
|
|
|
* rights where the control fields are now.
|
|
|
|
*/
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
orecvmsg(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1995-10-23 15:42:12 +00:00
|
|
|
register struct orecvmsg_args /* {
|
|
|
|
int s;
|
|
|
|
struct omsghdr *msg;
|
|
|
|
int flags;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct msghdr msg;
|
|
|
|
struct iovec aiov[UIO_SMALLIOV], *iov;
|
|
|
|
int error;
|
|
|
|
|
1994-10-02 17:35:40 +00:00
|
|
|
error = copyin((caddr_t)uap->msg, (caddr_t)&msg,
|
|
|
|
sizeof (struct omsghdr));
|
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
if ((u_int)msg.msg_iovlen >= UIO_SMALLIOV) {
|
|
|
|
if ((u_int)msg.msg_iovlen >= UIO_MAXIOV)
|
|
|
|
return (EMSGSIZE);
|
|
|
|
MALLOC(iov, struct iovec *,
|
|
|
|
sizeof(struct iovec) * (u_int)msg.msg_iovlen, M_IOV,
|
|
|
|
M_WAITOK);
|
|
|
|
} else
|
|
|
|
iov = aiov;
|
|
|
|
msg.msg_flags = uap->flags | MSG_COMPAT;
|
1994-10-02 17:35:40 +00:00
|
|
|
error = copyin((caddr_t)msg.msg_iov, (caddr_t)iov,
|
|
|
|
(unsigned)(msg.msg_iovlen * sizeof (struct iovec)));
|
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
goto done;
|
|
|
|
msg.msg_iov = iov;
|
1997-11-06 19:29:57 +00:00
|
|
|
error = recvit(p, uap->s, &msg, (caddr_t)&uap->msg->msg_namelen);
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
if (msg.msg_controllen && error == 0)
|
|
|
|
error = copyout((caddr_t)&msg.msg_controllen,
|
|
|
|
(caddr_t)&uap->msg->msg_accrightslen, sizeof (int));
|
|
|
|
done:
|
|
|
|
if (iov != aiov)
|
|
|
|
FREE(iov, M_IOV);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
recvmsg(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1995-10-23 15:42:12 +00:00
|
|
|
register struct recvmsg_args /* {
|
|
|
|
int s;
|
|
|
|
struct msghdr *msg;
|
|
|
|
int flags;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct msghdr msg;
|
|
|
|
struct iovec aiov[UIO_SMALLIOV], *uiov, *iov;
|
|
|
|
register int error;
|
|
|
|
|
1994-10-02 17:35:40 +00:00
|
|
|
error = copyin((caddr_t)uap->msg, (caddr_t)&msg, sizeof (msg));
|
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
if ((u_int)msg.msg_iovlen >= UIO_SMALLIOV) {
|
|
|
|
if ((u_int)msg.msg_iovlen >= UIO_MAXIOV)
|
|
|
|
return (EMSGSIZE);
|
|
|
|
MALLOC(iov, struct iovec *,
|
|
|
|
sizeof(struct iovec) * (u_int)msg.msg_iovlen, M_IOV,
|
|
|
|
M_WAITOK);
|
|
|
|
} else
|
|
|
|
iov = aiov;
|
|
|
|
#ifdef COMPAT_OLDSOCK
|
|
|
|
msg.msg_flags = uap->flags &~ MSG_COMPAT;
|
|
|
|
#else
|
|
|
|
msg.msg_flags = uap->flags;
|
|
|
|
#endif
|
|
|
|
uiov = msg.msg_iov;
|
|
|
|
msg.msg_iov = iov;
|
1994-10-02 17:35:40 +00:00
|
|
|
error = copyin((caddr_t)uiov, (caddr_t)iov,
|
|
|
|
(unsigned)(msg.msg_iovlen * sizeof (struct iovec)));
|
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
goto done;
|
1997-11-06 19:29:57 +00:00
|
|
|
error = recvit(p, uap->s, &msg, (caddr_t)0);
|
1994-10-02 17:35:40 +00:00
|
|
|
if (!error) {
|
1994-05-24 10:09:53 +00:00
|
|
|
msg.msg_iov = uiov;
|
|
|
|
error = copyout((caddr_t)&msg, (caddr_t)uap->msg, sizeof(msg));
|
|
|
|
}
|
|
|
|
done:
|
|
|
|
if (iov != aiov)
|
|
|
|
FREE(iov, M_IOV);
|
|
|
|
return (error);
|
|
|
|
}
|
1995-10-07 23:47:26 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
shutdown(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1995-10-23 15:42:12 +00:00
|
|
|
register struct shutdown_args /* {
|
|
|
|
int s;
|
|
|
|
int how;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct file *fp;
|
|
|
|
int error;
|
|
|
|
|
2000-11-18 21:01:04 +00:00
|
|
|
error = holdsock(p->p_fd, uap->s, &fp);
|
1994-10-02 17:35:40 +00:00
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
2000-11-18 21:01:04 +00:00
|
|
|
error = soshutdown((struct socket *)fp->f_data, uap->how);
|
|
|
|
fdrop(fp, p);
|
|
|
|
return(error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
setsockopt(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1995-10-23 15:42:12 +00:00
|
|
|
register struct setsockopt_args /* {
|
|
|
|
int s;
|
|
|
|
int level;
|
|
|
|
int name;
|
|
|
|
caddr_t val;
|
|
|
|
int valsize;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct file *fp;
|
1998-08-23 03:07:17 +00:00
|
|
|
struct sockopt sopt;
|
1994-05-24 10:09:53 +00:00
|
|
|
int error;
|
|
|
|
|
1998-08-23 03:07:17 +00:00
|
|
|
if (uap->val == 0 && uap->valsize != 0)
|
|
|
|
return (EFAULT);
|
|
|
|
if (uap->valsize < 0)
|
|
|
|
return (EINVAL);
|
|
|
|
|
2000-11-18 21:01:04 +00:00
|
|
|
error = holdsock(p->p_fd, uap->s, &fp);
|
1994-10-02 17:35:40 +00:00
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
1998-08-23 03:07:17 +00:00
|
|
|
|
|
|
|
sopt.sopt_dir = SOPT_SET;
|
|
|
|
sopt.sopt_level = uap->level;
|
|
|
|
sopt.sopt_name = uap->name;
|
|
|
|
sopt.sopt_val = uap->val;
|
|
|
|
sopt.sopt_valsize = uap->valsize;
|
|
|
|
sopt.sopt_p = p;
|
2000-11-18 21:01:04 +00:00
|
|
|
error = sosetopt((struct socket *)fp->f_data, &sopt);
|
|
|
|
fdrop(fp, p);
|
|
|
|
return(error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
getsockopt(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1995-10-23 15:42:12 +00:00
|
|
|
register struct getsockopt_args /* {
|
|
|
|
int s;
|
|
|
|
int level;
|
|
|
|
int name;
|
|
|
|
caddr_t val;
|
|
|
|
int *avalsize;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
1998-08-23 03:07:17 +00:00
|
|
|
int valsize, error;
|
|
|
|
struct file *fp;
|
|
|
|
struct sockopt sopt;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2000-11-18 21:01:04 +00:00
|
|
|
error = holdsock(p->p_fd, uap->s, &fp);
|
1994-10-02 17:35:40 +00:00
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
if (uap->val) {
|
1994-10-02 17:35:40 +00:00
|
|
|
error = copyin((caddr_t)uap->avalsize, (caddr_t)&valsize,
|
|
|
|
sizeof (valsize));
|
2000-11-18 21:01:04 +00:00
|
|
|
if (error) {
|
|
|
|
fdrop(fp, p);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
2000-11-18 21:01:04 +00:00
|
|
|
}
|
|
|
|
if (valsize < 0) {
|
|
|
|
fdrop(fp, p);
|
1998-08-23 03:07:17 +00:00
|
|
|
return (EINVAL);
|
2000-11-18 21:01:04 +00:00
|
|
|
}
|
|
|
|
} else {
|
1994-05-24 10:09:53 +00:00
|
|
|
valsize = 0;
|
2000-11-18 21:01:04 +00:00
|
|
|
}
|
1998-08-23 03:07:17 +00:00
|
|
|
|
|
|
|
sopt.sopt_dir = SOPT_GET;
|
|
|
|
sopt.sopt_level = uap->level;
|
|
|
|
sopt.sopt_name = uap->name;
|
|
|
|
sopt.sopt_val = uap->val;
|
|
|
|
sopt.sopt_valsize = (size_t)valsize; /* checked non-negative above */
|
|
|
|
sopt.sopt_p = p;
|
|
|
|
|
|
|
|
error = sogetopt((struct socket *)fp->f_data, &sopt);
|
|
|
|
if (error == 0) {
|
|
|
|
valsize = sopt.sopt_valsize;
|
|
|
|
error = copyout((caddr_t)&valsize,
|
|
|
|
(caddr_t)uap->avalsize, sizeof (valsize));
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2000-11-18 21:01:04 +00:00
|
|
|
fdrop(fp, p);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get socket name.
|
|
|
|
*/
|
1995-10-23 15:42:12 +00:00
|
|
|
/* ARGSUSED */
|
1995-10-07 23:47:26 +00:00
|
|
|
static int
|
1997-11-06 19:29:57 +00:00
|
|
|
getsockname1(p, uap, compat)
|
1995-10-07 23:47:26 +00:00
|
|
|
struct proc *p;
|
1995-10-23 15:42:12 +00:00
|
|
|
register struct getsockname_args /* {
|
|
|
|
int fdes;
|
|
|
|
caddr_t asa;
|
|
|
|
int *alen;
|
|
|
|
} */ *uap;
|
1995-10-07 23:47:26 +00:00
|
|
|
int compat;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct file *fp;
|
|
|
|
register struct socket *so;
|
1997-08-16 19:16:27 +00:00
|
|
|
struct sockaddr *sa;
|
1994-05-24 10:09:53 +00:00
|
|
|
int len, error;
|
|
|
|
|
2000-11-18 21:01:04 +00:00
|
|
|
error = holdsock(p->p_fd, uap->fdes, &fp);
|
1994-10-02 17:35:40 +00:00
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
1994-10-02 17:35:40 +00:00
|
|
|
error = copyin((caddr_t)uap->alen, (caddr_t)&len, sizeof (len));
|
2000-11-18 21:01:04 +00:00
|
|
|
if (error) {
|
|
|
|
fdrop(fp, p);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
2000-11-18 21:01:04 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
so = (struct socket *)fp->f_data;
|
1997-08-16 19:16:27 +00:00
|
|
|
sa = 0;
|
|
|
|
error = (*so->so_proto->pr_usrreqs->pru_sockaddr)(so, &sa);
|
1994-10-02 17:35:40 +00:00
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
goto bad;
|
1997-08-16 19:16:27 +00:00
|
|
|
if (sa == 0) {
|
|
|
|
len = 0;
|
|
|
|
goto gotnothing;
|
|
|
|
}
|
|
|
|
|
|
|
|
len = MIN(len, sa->sa_len);
|
1994-05-24 10:09:53 +00:00
|
|
|
#ifdef COMPAT_OLDSOCK
|
1995-10-07 23:47:26 +00:00
|
|
|
if (compat)
|
1997-08-16 19:16:27 +00:00
|
|
|
((struct osockaddr *)sa)->sa_family = sa->sa_family;
|
1994-05-24 10:09:53 +00:00
|
|
|
#endif
|
1997-08-16 19:16:27 +00:00
|
|
|
error = copyout(sa, (caddr_t)uap->asa, (u_int)len);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (error == 0)
|
1997-08-16 19:16:27 +00:00
|
|
|
gotnothing:
|
1994-05-24 10:09:53 +00:00
|
|
|
error = copyout((caddr_t)&len, (caddr_t)uap->alen,
|
|
|
|
sizeof (len));
|
|
|
|
bad:
|
1997-08-16 19:16:27 +00:00
|
|
|
if (sa)
|
|
|
|
FREE(sa, M_SONAME);
|
2000-11-18 21:01:04 +00:00
|
|
|
fdrop(fp, p);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
getsockname(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1995-10-23 15:42:12 +00:00
|
|
|
struct getsockname_args *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
1995-10-23 15:42:12 +00:00
|
|
|
|
1997-11-06 19:29:57 +00:00
|
|
|
return (getsockname1(p, uap, 0));
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
1995-10-23 15:42:12 +00:00
|
|
|
#ifdef COMPAT_OLDSOCK
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
ogetsockname(p, uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc *p;
|
1995-10-23 15:42:12 +00:00
|
|
|
struct getsockname_args *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
1995-10-23 15:42:12 +00:00
|
|
|
|
1997-11-06 19:29:57 +00:00
|
|
|
return (getsockname1(p, uap, 1));
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
1995-10-23 15:42:12 +00:00
|
|
|
#endif /* COMPAT_OLDSOCK */
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1995-10-23 15:42:12 +00:00
|
|
|
/*
|
|
|
|
* Get name of peer for connected socket.
|
|
|
|
*/
|
|
|
|
/* ARGSUSED */
|
1995-10-07 23:47:26 +00:00
|
|
|
static int
|
1997-11-06 19:29:57 +00:00
|
|
|
getpeername1(p, uap, compat)
|
1995-10-07 23:47:26 +00:00
|
|
|
struct proc *p;
|
1995-10-23 15:42:12 +00:00
|
|
|
register struct getpeername_args /* {
|
|
|
|
int fdes;
|
|
|
|
caddr_t asa;
|
|
|
|
int *alen;
|
|
|
|
} */ *uap;
|
1995-10-07 23:47:26 +00:00
|
|
|
int compat;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct file *fp;
|
|
|
|
register struct socket *so;
|
1997-08-16 19:16:27 +00:00
|
|
|
struct sockaddr *sa;
|
1994-05-24 10:09:53 +00:00
|
|
|
int len, error;
|
|
|
|
|
2000-11-18 21:01:04 +00:00
|
|
|
error = holdsock(p->p_fd, uap->fdes, &fp);
|
1994-10-02 17:35:40 +00:00
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
so = (struct socket *)fp->f_data;
|
2000-11-18 21:01:04 +00:00
|
|
|
if ((so->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING)) == 0) {
|
|
|
|
fdrop(fp, p);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (ENOTCONN);
|
2000-11-18 21:01:04 +00:00
|
|
|
}
|
1994-10-02 17:35:40 +00:00
|
|
|
error = copyin((caddr_t)uap->alen, (caddr_t)&len, sizeof (len));
|
2000-11-18 21:01:04 +00:00
|
|
|
if (error) {
|
|
|
|
fdrop(fp, p);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
2000-11-18 21:01:04 +00:00
|
|
|
}
|
1997-08-16 19:16:27 +00:00
|
|
|
sa = 0;
|
|
|
|
error = (*so->so_proto->pr_usrreqs->pru_peeraddr)(so, &sa);
|
1994-10-02 17:35:40 +00:00
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
goto bad;
|
1997-08-16 19:16:27 +00:00
|
|
|
if (sa == 0) {
|
|
|
|
len = 0;
|
|
|
|
goto gotnothing;
|
|
|
|
}
|
|
|
|
len = MIN(len, sa->sa_len);
|
1994-05-24 10:09:53 +00:00
|
|
|
#ifdef COMPAT_OLDSOCK
|
1995-10-07 23:47:26 +00:00
|
|
|
if (compat)
|
1997-08-16 19:16:27 +00:00
|
|
|
((struct osockaddr *)sa)->sa_family =
|
|
|
|
sa->sa_family;
|
1994-05-24 10:09:53 +00:00
|
|
|
#endif
|
1997-08-16 19:16:27 +00:00
|
|
|
error = copyout(sa, (caddr_t)uap->asa, (u_int)len);
|
1994-10-02 17:35:40 +00:00
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
goto bad;
|
1997-08-16 19:16:27 +00:00
|
|
|
gotnothing:
|
1994-05-24 10:09:53 +00:00
|
|
|
error = copyout((caddr_t)&len, (caddr_t)uap->alen, sizeof (len));
|
|
|
|
bad:
|
2000-11-18 21:01:04 +00:00
|
|
|
if (sa)
|
|
|
|
FREE(sa, M_SONAME);
|
|
|
|
fdrop(fp, p);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1995-10-23 15:42:12 +00:00
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
getpeername(p, uap)
|
1995-10-23 15:42:12 +00:00
|
|
|
struct proc *p;
|
|
|
|
struct getpeername_args *uap;
|
|
|
|
{
|
|
|
|
|
1997-11-06 19:29:57 +00:00
|
|
|
return (getpeername1(p, uap, 0));
|
1995-10-23 15:42:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef COMPAT_OLDSOCK
|
|
|
|
int
|
1997-11-06 19:29:57 +00:00
|
|
|
ogetpeername(p, uap)
|
1995-10-23 15:42:12 +00:00
|
|
|
struct proc *p;
|
|
|
|
struct ogetpeername_args *uap;
|
|
|
|
{
|
|
|
|
|
|
|
|
/* XXX uap should have type `getpeername_args *' to begin with. */
|
1997-11-06 19:29:57 +00:00
|
|
|
return (getpeername1(p, (struct getpeername_args *)uap, 1));
|
1995-10-23 15:42:12 +00:00
|
|
|
}
|
|
|
|
#endif /* COMPAT_OLDSOCK */
|
1994-10-02 17:35:40 +00:00
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1994-05-24 10:09:53 +00:00
|
|
|
sockargs(mp, buf, buflen, type)
|
|
|
|
struct mbuf **mp;
|
|
|
|
caddr_t buf;
|
|
|
|
int buflen, type;
|
|
|
|
{
|
|
|
|
register struct sockaddr *sa;
|
|
|
|
register struct mbuf *m;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
if ((u_int)buflen > MLEN) {
|
|
|
|
#ifdef COMPAT_OLDSOCK
|
|
|
|
if (type == MT_SONAME && (u_int)buflen <= 112)
|
|
|
|
buflen = MLEN; /* unix domain compat. hack */
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
return (EINVAL);
|
|
|
|
}
|
2000-12-21 21:44:31 +00:00
|
|
|
m = m_get(M_TRYWAIT, type);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (m == NULL)
|
|
|
|
return (ENOBUFS);
|
|
|
|
m->m_len = buflen;
|
|
|
|
error = copyin(buf, mtod(m, caddr_t), (u_int)buflen);
|
|
|
|
if (error)
|
|
|
|
(void) m_free(m);
|
|
|
|
else {
|
|
|
|
*mp = m;
|
|
|
|
if (type == MT_SONAME) {
|
|
|
|
sa = mtod(m, struct sockaddr *);
|
|
|
|
|
|
|
|
#if defined(COMPAT_OLDSOCK) && BYTE_ORDER != BIG_ENDIAN
|
|
|
|
if (sa->sa_family == 0 && sa->sa_len < AF_MAX)
|
|
|
|
sa->sa_family = sa->sa_len;
|
|
|
|
#endif
|
|
|
|
sa->sa_len = buflen;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1997-08-16 19:16:27 +00:00
|
|
|
int
|
|
|
|
getsockaddr(namp, uaddr, len)
|
|
|
|
struct sockaddr **namp;
|
|
|
|
caddr_t uaddr;
|
|
|
|
size_t len;
|
|
|
|
{
|
|
|
|
struct sockaddr *sa;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
if (len > SOCK_MAXADDRLEN)
|
|
|
|
return ENAMETOOLONG;
|
|
|
|
MALLOC(sa, struct sockaddr *, len, M_SONAME, M_WAITOK);
|
|
|
|
error = copyin(uaddr, sa, len);
|
|
|
|
if (error) {
|
|
|
|
FREE(sa, M_SONAME);
|
|
|
|
} else {
|
|
|
|
#if defined(COMPAT_OLDSOCK) && BYTE_ORDER != BIG_ENDIAN
|
|
|
|
if (sa->sa_family == 0 && sa->sa_len < AF_MAX)
|
|
|
|
sa->sa_family = sa->sa_len;
|
|
|
|
#endif
|
|
|
|
sa->sa_len = len;
|
|
|
|
*namp = sa;
|
|
|
|
}
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2000-11-18 21:01:04 +00:00
|
|
|
/*
|
|
|
|
* holdsock() - load the struct file pointer associated
|
|
|
|
* with a socket into *fpp. If an error occurs, non-zero
|
|
|
|
* will be returned and *fpp will be set to NULL.
|
|
|
|
*/
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2000-11-18 21:01:04 +00:00
|
|
|
holdsock(fdp, fdes, fpp)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct filedesc *fdp;
|
|
|
|
int fdes;
|
|
|
|
struct file **fpp;
|
|
|
|
{
|
2000-11-18 21:01:04 +00:00
|
|
|
register struct file *fp = NULL;
|
|
|
|
int error = 0;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
if ((unsigned)fdes >= fdp->fd_nfiles ||
|
2000-11-18 21:01:04 +00:00
|
|
|
(fp = fdp->fd_ofiles[fdes]) == NULL) {
|
|
|
|
error = EBADF;
|
|
|
|
} else if (fp->f_type != DTYPE_SOCKET) {
|
|
|
|
error = ENOTSOCK;
|
|
|
|
fp = NULL;
|
|
|
|
} else {
|
|
|
|
fhold(fp);
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
*fpp = fp;
|
2000-11-18 21:01:04 +00:00
|
|
|
return(error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
1998-11-05 14:28:26 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-))
|
|
|
|
* XXX - The sf_buf functions are currently private to sendfile(2), so have
|
|
|
|
* been made static, but may be useful in the future for doing zero-copy in
|
|
|
|
* other parts of the networking code.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
sf_buf_init(void *arg)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2000-11-04 07:16:08 +00:00
|
|
|
mtx_init(&sf_freelist.sf_lock, "sf_bufs list lock", MTX_DEF);
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&sf_freelist.sf_lock);
|
2001-01-02 11:51:55 +00:00
|
|
|
SLIST_INIT(&sf_freelist.sf_head);
|
1998-11-05 14:28:26 +00:00
|
|
|
sf_base = kmem_alloc_pageable(kernel_map, nsfbufs * PAGE_SIZE);
|
2000-12-08 21:51:06 +00:00
|
|
|
sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP,
|
|
|
|
M_NOWAIT | M_ZERO);
|
1998-11-05 14:28:26 +00:00
|
|
|
for (i = 0; i < nsfbufs; i++) {
|
|
|
|
sf_bufs[i].kva = sf_base + i * PAGE_SIZE;
|
2001-01-02 11:51:55 +00:00
|
|
|
SLIST_INSERT_HEAD(&sf_freelist.sf_head, &sf_bufs[i], free_list);
|
1998-11-05 14:28:26 +00:00
|
|
|
}
|
2000-11-04 21:55:25 +00:00
|
|
|
sf_buf_alloc_want = 0;
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&sf_freelist.sf_lock);
|
1998-11-05 14:28:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get an sf_buf from the freelist. Will block if none are available.
|
|
|
|
*/
|
|
|
|
static struct sf_buf *
|
|
|
|
sf_buf_alloc()
|
|
|
|
{
|
|
|
|
struct sf_buf *sf;
|
2001-03-08 16:28:10 +00:00
|
|
|
int error;
|
1998-11-05 14:28:26 +00:00
|
|
|
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&sf_freelist.sf_lock);
|
2001-01-02 11:51:55 +00:00
|
|
|
while ((sf = SLIST_FIRST(&sf_freelist.sf_head)) == NULL) {
|
2000-11-04 21:55:25 +00:00
|
|
|
sf_buf_alloc_want++;
|
2001-03-08 16:28:10 +00:00
|
|
|
error = msleep(&sf_freelist, &sf_freelist.sf_lock, PVM|PCATCH,
|
|
|
|
"sfbufa", 0);
|
2001-03-08 19:21:45 +00:00
|
|
|
sf_buf_alloc_want--;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we got a signal, don't risk going back to sleep.
|
|
|
|
*/
|
|
|
|
if (error)
|
2001-03-08 16:28:10 +00:00
|
|
|
break;
|
1998-11-05 14:28:26 +00:00
|
|
|
}
|
2001-01-02 11:51:55 +00:00
|
|
|
SLIST_REMOVE_HEAD(&sf_freelist.sf_head, free_list);
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&sf_freelist.sf_lock);
|
1998-11-05 14:28:26 +00:00
|
|
|
return (sf);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define dtosf(x) (&sf_bufs[((uintptr_t)(x) - (uintptr_t)sf_base) >> PAGE_SHIFT])
|
|
|
|
|
|
|
|
/*
|
2000-08-19 08:32:59 +00:00
|
|
|
* Detatch mapped page and release resources back to the system.
|
1998-11-05 14:28:26 +00:00
|
|
|
*/
|
|
|
|
static void
|
2000-08-19 08:32:59 +00:00
|
|
|
sf_buf_free(caddr_t addr, void *args)
|
1998-11-05 14:28:26 +00:00
|
|
|
{
|
|
|
|
struct sf_buf *sf;
|
|
|
|
struct vm_page *m;
|
|
|
|
|
|
|
|
sf = dtosf(addr);
|
2001-05-25 19:23:04 +00:00
|
|
|
mtx_lock(&vm_mtx);
|
2000-08-19 08:32:59 +00:00
|
|
|
pmap_qremove((vm_offset_t)addr, 1);
|
|
|
|
m = sf->m;
|
|
|
|
vm_page_unwire(m, 0);
|
|
|
|
/*
|
|
|
|
* Check for the object going away on us. This can
|
|
|
|
* happen since we don't hold a reference to it.
|
|
|
|
* If so, we're responsible for freeing the page.
|
|
|
|
*/
|
|
|
|
if (m->wire_count == 0 && m->object == NULL)
|
|
|
|
vm_page_free(m);
|
2001-05-25 19:23:04 +00:00
|
|
|
mtx_unlock(&vm_mtx);
|
2000-08-19 08:32:59 +00:00
|
|
|
sf->m = NULL;
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&sf_freelist.sf_lock);
|
2001-01-02 11:51:55 +00:00
|
|
|
SLIST_INSERT_HEAD(&sf_freelist.sf_head, sf, free_list);
|
2001-03-08 19:21:45 +00:00
|
|
|
if (sf_buf_alloc_want > 0)
|
2000-11-04 21:55:25 +00:00
|
|
|
wakeup_one(&sf_freelist);
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&sf_freelist.sf_lock);
|
1998-11-05 14:28:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2000-11-04 07:16:08 +00:00
|
|
|
* sendfile(2)
|
1998-11-05 14:28:26 +00:00
|
|
|
* int sendfile(int fd, int s, off_t offset, size_t nbytes,
|
|
|
|
* struct sf_hdtr *hdtr, off_t *sbytes, int flags)
|
|
|
|
*
|
|
|
|
* Send a file specified by 'fd' and starting at 'offset' to a socket
|
|
|
|
* specified by 's'. Send only 'nbytes' of the file or until EOF if
|
|
|
|
* nbytes == 0. Optionally add a header and/or trailer to the socket
|
|
|
|
* output. If specified, write the total number of bytes sent into *sbytes.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
sendfile(struct proc *p, struct sendfile_args *uap)
|
|
|
|
{
|
|
|
|
struct file *fp;
|
|
|
|
struct filedesc *fdp = p->p_fd;
|
|
|
|
struct vnode *vp;
|
|
|
|
struct vm_object *obj;
|
|
|
|
struct socket *so;
|
|
|
|
struct mbuf *m;
|
|
|
|
struct sf_buf *sf;
|
|
|
|
struct vm_page *pg;
|
|
|
|
struct writev_args nuap;
|
|
|
|
struct sf_hdtr hdtr;
|
|
|
|
off_t off, xfsize, sbytes = 0;
|
1998-12-07 21:58:50 +00:00
|
|
|
int error = 0, s;
|
1998-11-05 14:28:26 +00:00
|
|
|
|
This is what was "fdfix2.patch," a fix for fd sharing. It's pretty
far-reaching in fd-land, so you'll want to consult the code for
changes. The biggest change is that now, you don't use
fp->f_ops->fo_foo(fp, bar)
but instead
fo_foo(fp, bar),
which increments and decrements the fp refcount upon entry and exit.
Two new calls, fhold() and fdrop(), are provided. Each does what it
seems like it should, and if fdrop() brings the refcount to zero, the
fd is freed as well.
Thanks to peter ("to hell with it, it looks ok to me.") for his review.
Thanks to msmith for keeping me from putting locks everywhere :)
Reviewed by: peter
1999-09-19 17:00:25 +00:00
|
|
|
vp = NULL;
|
1998-11-05 14:28:26 +00:00
|
|
|
/*
|
|
|
|
* Do argument checking. Must be a regular file in, stream
|
|
|
|
* type and connected socket out, positive offset.
|
|
|
|
*/
|
2000-11-18 21:01:04 +00:00
|
|
|
fp = holdfp(fdp, uap->fd, FREAD);
|
2000-06-12 18:06:12 +00:00
|
|
|
if (fp == NULL) {
|
1998-11-05 14:28:26 +00:00
|
|
|
error = EBADF;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
if (fp->f_type != DTYPE_VNODE) {
|
|
|
|
error = EINVAL;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
vp = (struct vnode *)fp->f_data;
|
This is what was "fdfix2.patch," a fix for fd sharing. It's pretty
far-reaching in fd-land, so you'll want to consult the code for
changes. The biggest change is that now, you don't use
fp->f_ops->fo_foo(fp, bar)
but instead
fo_foo(fp, bar),
which increments and decrements the fp refcount upon entry and exit.
Two new calls, fhold() and fdrop(), are provided. Each does what it
seems like it should, and if fdrop() brings the refcount to zero, the
fd is freed as well.
Thanks to peter ("to hell with it, it looks ok to me.") for his review.
Thanks to msmith for keeping me from putting locks everywhere :)
Reviewed by: peter
1999-09-19 17:00:25 +00:00
|
|
|
vref(vp);
|
2000-09-12 09:49:08 +00:00
|
|
|
if (vp->v_type != VREG || VOP_GETVOBJECT(vp, &obj) != 0) {
|
1998-11-05 14:28:26 +00:00
|
|
|
error = EINVAL;
|
|
|
|
goto done;
|
|
|
|
}
|
2000-11-18 21:01:04 +00:00
|
|
|
fdrop(fp, p);
|
|
|
|
error = holdsock(p->p_fd, uap->s, &fp);
|
1998-11-05 14:28:26 +00:00
|
|
|
if (error)
|
|
|
|
goto done;
|
|
|
|
so = (struct socket *)fp->f_data;
|
|
|
|
if (so->so_type != SOCK_STREAM) {
|
|
|
|
error = EINVAL;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
if ((so->so_state & SS_ISCONNECTED) == 0) {
|
|
|
|
error = ENOTCONN;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
if (uap->offset < 0) {
|
|
|
|
error = EINVAL;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If specified, get the pointer to the sf_hdtr struct for
|
|
|
|
* any headers/trailers.
|
|
|
|
*/
|
|
|
|
if (uap->hdtr != NULL) {
|
|
|
|
error = copyin(uap->hdtr, &hdtr, sizeof(hdtr));
|
|
|
|
if (error)
|
|
|
|
goto done;
|
|
|
|
/*
|
|
|
|
* Send any headers. Wimp out and use writev(2).
|
|
|
|
*/
|
|
|
|
if (hdtr.headers != NULL) {
|
|
|
|
nuap.fd = uap->s;
|
|
|
|
nuap.iovp = hdtr.headers;
|
|
|
|
nuap.iovcnt = hdtr.hdr_cnt;
|
|
|
|
error = writev(p, &nuap);
|
|
|
|
if (error)
|
|
|
|
goto done;
|
|
|
|
sbytes += p->p_retval[0];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Protect against multiple writers to the socket.
|
|
|
|
*/
|
|
|
|
(void) sblock(&so->so_snd, M_WAITOK);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Loop through the pages in the file, starting with the requested
|
|
|
|
* offset. Get a file page (do I/O if necessary), map the file page
|
|
|
|
* into an sf_buf, attach an mbuf header to the sf_buf, and queue
|
|
|
|
* it on the socket.
|
|
|
|
*/
|
|
|
|
for (off = uap->offset; ; off += xfsize, sbytes += xfsize) {
|
|
|
|
vm_pindex_t pindex;
|
1998-12-03 12:35:47 +00:00
|
|
|
vm_offset_t pgoff;
|
1998-11-05 14:28:26 +00:00
|
|
|
|
|
|
|
pindex = OFF_TO_IDX(off);
|
|
|
|
retry_lookup:
|
|
|
|
/*
|
|
|
|
* Calculate the amount to transfer. Not to exceed a page,
|
|
|
|
* the EOF, or the passed in nbytes.
|
|
|
|
*/
|
|
|
|
xfsize = obj->un_pager.vnp.vnp_size - off;
|
|
|
|
if (xfsize > PAGE_SIZE)
|
|
|
|
xfsize = PAGE_SIZE;
|
1998-12-03 12:35:47 +00:00
|
|
|
pgoff = (vm_offset_t)(off & PAGE_MASK);
|
|
|
|
if (PAGE_SIZE - pgoff < xfsize)
|
|
|
|
xfsize = PAGE_SIZE - pgoff;
|
1998-11-05 14:28:26 +00:00
|
|
|
if (uap->nbytes && xfsize > (uap->nbytes - sbytes))
|
|
|
|
xfsize = uap->nbytes - sbytes;
|
|
|
|
if (xfsize <= 0)
|
|
|
|
break;
|
1998-11-06 19:16:30 +00:00
|
|
|
/*
|
|
|
|
* Optimize the non-blocking case by looking at the socket space
|
|
|
|
* before going to the extra work of constituting the sf_buf.
|
|
|
|
*/
|
|
|
|
if ((so->so_state & SS_NBIO) && sbspace(&so->so_snd) <= 0) {
|
|
|
|
if (so->so_state & SS_CANTSENDMORE)
|
|
|
|
error = EPIPE;
|
|
|
|
else
|
|
|
|
error = EAGAIN;
|
|
|
|
sbunlock(&so->so_snd);
|
|
|
|
goto done;
|
|
|
|
}
|
1998-11-05 14:28:26 +00:00
|
|
|
/*
|
1999-01-24 01:15:58 +00:00
|
|
|
* Attempt to look up the page.
|
|
|
|
*
|
|
|
|
* Allocate if not found
|
|
|
|
*
|
|
|
|
* Wait and loop if busy.
|
1998-11-05 14:28:26 +00:00
|
|
|
*/
|
2001-05-25 19:23:04 +00:00
|
|
|
mtx_lock(&vm_mtx);
|
1998-11-05 14:28:26 +00:00
|
|
|
pg = vm_page_lookup(obj, pindex);
|
1999-01-24 01:15:58 +00:00
|
|
|
|
|
|
|
if (pg == NULL) {
|
|
|
|
pg = vm_page_alloc(obj, pindex, VM_ALLOC_NORMAL);
|
|
|
|
if (pg == NULL) {
|
|
|
|
VM_WAIT;
|
2001-05-25 19:23:04 +00:00
|
|
|
mtx_unlock(&vm_mtx);
|
1999-01-24 01:15:58 +00:00
|
|
|
goto retry_lookup;
|
|
|
|
}
|
|
|
|
vm_page_wakeup(pg);
|
|
|
|
} else if (vm_page_sleep_busy(pg, TRUE, "sfpbsy")) {
|
2001-05-25 19:23:04 +00:00
|
|
|
mtx_unlock(&vm_mtx);
|
1999-01-24 01:15:58 +00:00
|
|
|
goto retry_lookup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Wire the page so it does not get ripped out from under
|
|
|
|
* us.
|
|
|
|
*/
|
|
|
|
|
|
|
|
vm_page_wire(pg);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If page is not valid for what we need, initiate I/O
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (!pg->valid || !vm_page_is_valid(pg, pgoff, xfsize)) {
|
1998-11-05 14:28:26 +00:00
|
|
|
struct uio auio;
|
|
|
|
struct iovec aiov;
|
|
|
|
int bsize;
|
|
|
|
|
|
|
|
/*
|
1999-01-24 01:15:58 +00:00
|
|
|
* Ensure that our page is still around when the I/O
|
|
|
|
* completes.
|
1998-11-05 14:28:26 +00:00
|
|
|
*/
|
|
|
|
vm_page_io_start(pg);
|
2001-05-25 19:23:04 +00:00
|
|
|
mtx_unlock(&vm_mtx);
|
1999-01-24 01:15:58 +00:00
|
|
|
|
1998-11-05 14:28:26 +00:00
|
|
|
/*
|
|
|
|
* Get the page from backing store.
|
|
|
|
*/
|
|
|
|
bsize = vp->v_mount->mnt_stat.f_iosize;
|
|
|
|
auio.uio_iov = &aiov;
|
|
|
|
auio.uio_iovcnt = 1;
|
|
|
|
aiov.iov_base = 0;
|
|
|
|
aiov.iov_len = MAXBSIZE;
|
|
|
|
auio.uio_resid = MAXBSIZE;
|
|
|
|
auio.uio_offset = trunc_page(off);
|
|
|
|
auio.uio_segflg = UIO_NOCOPY;
|
|
|
|
auio.uio_rw = UIO_READ;
|
1998-11-14 23:36:17 +00:00
|
|
|
auio.uio_procp = p;
|
1998-11-05 14:28:26 +00:00
|
|
|
vn_lock(vp, LK_SHARED | LK_NOPAUSE | LK_RETRY, p);
|
|
|
|
error = VOP_READ(vp, &auio, IO_VMIO | ((MAXBSIZE / bsize) << 16),
|
|
|
|
p->p_ucred);
|
|
|
|
VOP_UNLOCK(vp, 0, p);
|
2001-05-25 19:23:04 +00:00
|
|
|
mtx_lock(&vm_mtx);
|
1998-11-05 14:28:26 +00:00
|
|
|
vm_page_flag_clear(pg, PG_ZERO);
|
1998-11-14 23:36:17 +00:00
|
|
|
vm_page_io_finish(pg);
|
1998-11-05 14:28:26 +00:00
|
|
|
if (error) {
|
|
|
|
vm_page_unwire(pg, 0);
|
|
|
|
/*
|
|
|
|
* See if anyone else might know about this page.
|
|
|
|
* If not and it is not valid, then free it.
|
|
|
|
*/
|
|
|
|
if (pg->wire_count == 0 && pg->valid == 0 &&
|
|
|
|
pg->busy == 0 && !(pg->flags & PG_BUSY) &&
|
2000-11-12 14:51:15 +00:00
|
|
|
pg->hold_count == 0) {
|
|
|
|
vm_page_busy(pg);
|
1998-11-05 14:28:26 +00:00
|
|
|
vm_page_free(pg);
|
2000-11-12 14:51:15 +00:00
|
|
|
}
|
2001-05-25 19:23:04 +00:00
|
|
|
mtx_unlock(&vm_mtx);
|
1998-11-05 14:28:26 +00:00
|
|
|
sbunlock(&so->so_snd);
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
}
|
1999-01-24 01:15:58 +00:00
|
|
|
|
2001-03-08 16:28:10 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Get a sendfile buf. We usually wait as long as necessary,
|
|
|
|
* but this wait can be interrupted.
|
|
|
|
*/
|
2001-05-25 19:23:04 +00:00
|
|
|
mtx_unlock(&vm_mtx);
|
2001-03-08 16:28:10 +00:00
|
|
|
if ((sf = sf_buf_alloc()) == NULL) {
|
2001-05-25 19:23:04 +00:00
|
|
|
mtx_lock(&vm_mtx);
|
2001-03-08 16:28:10 +00:00
|
|
|
vm_page_unwire(pg, 0);
|
|
|
|
if (pg->wire_count == 0 && pg->object == NULL)
|
|
|
|
vm_page_free(pg);
|
2001-05-25 19:23:04 +00:00
|
|
|
mtx_unlock(&vm_mtx);
|
2001-03-08 16:28:10 +00:00
|
|
|
sbunlock(&so->so_snd);
|
|
|
|
error = EINTR;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
1998-11-05 14:28:26 +00:00
|
|
|
/*
|
|
|
|
* Allocate a kernel virtual page and insert the physical page
|
|
|
|
* into it.
|
|
|
|
*/
|
2001-05-25 19:23:04 +00:00
|
|
|
mtx_lock(&vm_mtx);
|
1998-11-05 14:28:26 +00:00
|
|
|
sf->m = pg;
|
|
|
|
pmap_qenter(sf->kva, &pg, 1);
|
2001-05-25 19:23:04 +00:00
|
|
|
mtx_unlock(&vm_mtx);
|
1998-11-05 14:28:26 +00:00
|
|
|
/*
|
|
|
|
* Get an mbuf header and set it up as having external storage.
|
|
|
|
*/
|
2000-12-21 21:44:31 +00:00
|
|
|
MGETHDR(m, M_TRYWAIT, MT_DATA);
|
1999-12-12 05:52:51 +00:00
|
|
|
if (m == NULL) {
|
|
|
|
error = ENOBUFS;
|
2000-12-03 01:35:46 +00:00
|
|
|
sf_buf_free((void *)sf->kva, NULL);
|
2001-03-08 16:28:10 +00:00
|
|
|
sbunlock(&so->so_snd);
|
1999-12-12 05:52:51 +00:00
|
|
|
goto done;
|
|
|
|
}
|
2000-08-19 08:32:59 +00:00
|
|
|
/*
|
|
|
|
* Setup external storage for mbuf.
|
|
|
|
*/
|
2000-11-11 23:04:15 +00:00
|
|
|
MEXTADD(m, sf->kva, PAGE_SIZE, sf_buf_free, NULL, M_RDONLY,
|
|
|
|
EXT_SFBUF);
|
1998-12-03 12:35:47 +00:00
|
|
|
m->m_data = (char *) sf->kva + pgoff;
|
1998-11-05 14:28:26 +00:00
|
|
|
m->m_pkthdr.len = m->m_len = xfsize;
|
|
|
|
/*
|
|
|
|
* Add the buffer to the socket buffer chain.
|
|
|
|
*/
|
|
|
|
s = splnet();
|
|
|
|
retry_space:
|
|
|
|
/*
|
|
|
|
* Make sure that the socket is still able to take more data.
|
|
|
|
* CANTSENDMORE being true usually means that the connection
|
|
|
|
* was closed. so_error is true when an error was sensed after
|
|
|
|
* a previous send.
|
|
|
|
* The state is checked after the page mapping and buffer
|
|
|
|
* allocation above since those operations may block and make
|
|
|
|
* any socket checks stale. From this point forward, nothing
|
|
|
|
* blocks before the pru_send (or more accurately, any blocking
|
|
|
|
* results in a loop back to here to re-check).
|
|
|
|
*/
|
|
|
|
if ((so->so_state & SS_CANTSENDMORE) || so->so_error) {
|
|
|
|
if (so->so_state & SS_CANTSENDMORE) {
|
|
|
|
error = EPIPE;
|
|
|
|
} else {
|
|
|
|
error = so->so_error;
|
|
|
|
so->so_error = 0;
|
|
|
|
}
|
|
|
|
m_freem(m);
|
|
|
|
sbunlock(&so->so_snd);
|
|
|
|
splx(s);
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Wait for socket space to become available. We do this just
|
|
|
|
* after checking the connection state above in order to avoid
|
|
|
|
* a race condition with sbwait().
|
|
|
|
*/
|
1998-11-15 16:55:09 +00:00
|
|
|
if (sbspace(&so->so_snd) < so->so_snd.sb_lowat) {
|
1998-11-06 19:16:30 +00:00
|
|
|
if (so->so_state & SS_NBIO) {
|
|
|
|
m_freem(m);
|
|
|
|
sbunlock(&so->so_snd);
|
|
|
|
splx(s);
|
|
|
|
error = EAGAIN;
|
|
|
|
goto done;
|
|
|
|
}
|
1998-11-05 14:28:26 +00:00
|
|
|
error = sbwait(&so->so_snd);
|
|
|
|
/*
|
|
|
|
* An error from sbwait usually indicates that we've
|
|
|
|
* been interrupted by a signal. If we've sent anything
|
|
|
|
* then return bytes sent, otherwise return the error.
|
|
|
|
*/
|
|
|
|
if (error) {
|
|
|
|
m_freem(m);
|
|
|
|
sbunlock(&so->so_snd);
|
|
|
|
splx(s);
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
goto retry_space;
|
|
|
|
}
|
|
|
|
error = (*so->so_proto->pr_usrreqs->pru_send)(so, 0, m, 0, 0, p);
|
|
|
|
splx(s);
|
|
|
|
if (error) {
|
|
|
|
sbunlock(&so->so_snd);
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
sbunlock(&so->so_snd);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Send trailers. Wimp out and use writev(2).
|
|
|
|
*/
|
|
|
|
if (uap->hdtr != NULL && hdtr.trailers != NULL) {
|
|
|
|
nuap.fd = uap->s;
|
|
|
|
nuap.iovp = hdtr.trailers;
|
|
|
|
nuap.iovcnt = hdtr.trl_cnt;
|
|
|
|
error = writev(p, &nuap);
|
|
|
|
if (error)
|
|
|
|
goto done;
|
|
|
|
sbytes += p->p_retval[0];
|
|
|
|
}
|
|
|
|
|
|
|
|
done:
|
2001-04-26 00:14:14 +00:00
|
|
|
/*
|
|
|
|
* If there was no error we have to clear p->p_retval[0]
|
|
|
|
* because it may have been set by writev.
|
|
|
|
*/
|
|
|
|
if (error == 0) {
|
|
|
|
p->p_retval[0] = 0;
|
|
|
|
}
|
1998-11-05 14:28:26 +00:00
|
|
|
if (uap->sbytes != NULL) {
|
|
|
|
copyout(&sbytes, uap->sbytes, sizeof(off_t));
|
|
|
|
}
|
This is what was "fdfix2.patch," a fix for fd sharing. It's pretty
far-reaching in fd-land, so you'll want to consult the code for
changes. The biggest change is that now, you don't use
fp->f_ops->fo_foo(fp, bar)
but instead
fo_foo(fp, bar),
which increments and decrements the fp refcount upon entry and exit.
Two new calls, fhold() and fdrop(), are provided. Each does what it
seems like it should, and if fdrop() brings the refcount to zero, the
fd is freed as well.
Thanks to peter ("to hell with it, it looks ok to me.") for his review.
Thanks to msmith for keeping me from putting locks everywhere :)
Reviewed by: peter
1999-09-19 17:00:25 +00:00
|
|
|
if (vp)
|
|
|
|
vrele(vp);
|
2000-11-18 21:01:04 +00:00
|
|
|
if (fp)
|
|
|
|
fdrop(fp, p);
|
1998-11-05 14:28:26 +00:00
|
|
|
return (error);
|
|
|
|
}
|