1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Copyright (c) 1982, 1986, 1988, 1991, 1993
|
|
|
|
* The Regents of the University of California. All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
* This product includes software developed by the University of
|
|
|
|
* California, Berkeley and its contributors.
|
|
|
|
* 4. Neither the name of the University nor the names of its contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
|
1999-08-28 01:08:13 +00:00
|
|
|
* $FreeBSD$
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
|
|
|
|
2001-06-01 09:51:14 +00:00
|
|
|
#include "opt_param.h"
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
2001-05-01 08:13:21 +00:00
|
|
|
#include <sys/kernel.h>
|
|
|
|
#include <sys/lock.h>
|
1997-12-28 01:01:13 +00:00
|
|
|
#include <sys/malloc.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/mbuf.h>
|
1997-02-24 20:30:58 +00:00
|
|
|
#include <sys/sysctl.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/domain.h>
|
|
|
|
#include <sys/protosw.h>
|
2001-05-01 08:13:21 +00:00
|
|
|
|
1995-07-29 11:44:31 +00:00
|
|
|
int max_linkhdr;
|
|
|
|
int max_protohdr;
|
|
|
|
int max_hdr;
|
|
|
|
int max_datalen;
|
Big mbuf subsystem diff #1: incorporate mutexes and fix things up somewhat
to accomodate the changes.
Here's a list of things that have changed (I may have left out a few); for a
relatively complete list, see http://people.freebsd.org/~bmilekic/mtx_journal
* Remove old (once useful) mcluster code for MCLBYTES > PAGE_SIZE which
nobody uses anymore. It was great while it lasted, but now we're moving
onto bigger and better things (Approved by: wollman).
* Practically re-wrote the allocation macros in sys/sys/mbuf.h to accomodate
new allocations which grab the necessary lock.
* Make sure that necessary mbstat variables are manipulated with
corresponding atomic() routines.
* Changed the "wait" routines, cleaned it up, made one routine that does
the job.
* Generalized MWAKEUP() macro. Got rid of m_retry and m_retryhdr, as they
are now included in the generalized "wait" routines.
* Sleep routines now use msleep().
* Free lists have locks.
* etc... probably other stuff I'm missing...
Things to look out for and work on later:
* find a better way to (dynamically) adjust EXT_COUNTERS
* move necessity to recurse on a lock from drain routines by providing
lock-free lower-level version of MFREE() (and possibly m_free()?).
* checkout include of mutex.h in sys/sys/mbuf.h - probably violating
general philosophy here.
The code has been reviewed quite a bit, but problems may arise... please,
don't panic! Send me Emails: bmilekic@freebsd.org
Reviewed by: jlemon, cp, alfred, others?
2000-09-30 06:30:39 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* sysctl(8) exported objects
|
|
|
|
*/
|
1999-02-16 10:49:55 +00:00
|
|
|
SYSCTL_DECL(_kern_ipc);
|
1997-02-24 20:30:58 +00:00
|
|
|
SYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW,
|
|
|
|
&max_linkhdr, 0, "");
|
|
|
|
SYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW,
|
|
|
|
&max_protohdr, 0, "");
|
|
|
|
SYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, "");
|
|
|
|
SYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW,
|
|
|
|
&max_datalen, 0, "");
|
1994-05-24 10:09:53 +00:00
|
|
|
|
Implement m_getm() which will perform an "all or nothing" mbuf + cluster
allocation, as required.
If m_getm() receives NULL as a first argument, then it allocates `len'
(second argument) bytes worth of mbufs + clusters and returns the chain
only if it was able to allocate everything.
If the first argument is non-NULL, then it should be an existing mbuf
chain (e.g. pre-allocated mbuf sitting on a ring, on some list, etc.) and
so it will allocate `len' bytes worth of clusters and mbufs, as needed,
and append them to the tail of the passed in chain, only if it was able
to allocate everything requested.
If allocation fails, only what was allocated by the routine will be freed,
and NULL will be returned.
Also, get rid of existing m_getm() in netncp code and replace calls to it
to calls to this new generic code.
Heavily Reviewed by: bp
2001-02-14 05:13:04 +00:00
|
|
|
/*
|
|
|
|
* struct mbuf *
|
|
|
|
* m_getm(m, len, how, type)
|
|
|
|
*
|
|
|
|
* This will allocate len-worth of mbufs and/or mbuf clusters (whatever fits
|
|
|
|
* best) and return a pointer to the top of the allocated chain. If m is
|
|
|
|
* non-null, then we assume that it is a single mbuf or an mbuf chain to
|
|
|
|
* which we want len bytes worth of mbufs and/or clusters attached, and so
|
|
|
|
* if we succeed in allocating it, we will just return a pointer to m.
|
|
|
|
*
|
|
|
|
* If we happen to fail at any point during the allocation, we will free
|
|
|
|
* up everything we have already allocated and return NULL.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
struct mbuf *
|
|
|
|
m_getm(struct mbuf *m, int len, int how, int type)
|
|
|
|
{
|
|
|
|
struct mbuf *top, *tail, *mp, *mtail = NULL;
|
|
|
|
|
|
|
|
KASSERT(len >= 0, ("len is < 0 in m_getm"));
|
|
|
|
|
2001-02-21 09:24:13 +00:00
|
|
|
MGET(mp, how, type);
|
Implement m_getm() which will perform an "all or nothing" mbuf + cluster
allocation, as required.
If m_getm() receives NULL as a first argument, then it allocates `len'
(second argument) bytes worth of mbufs + clusters and returns the chain
only if it was able to allocate everything.
If the first argument is non-NULL, then it should be an existing mbuf
chain (e.g. pre-allocated mbuf sitting on a ring, on some list, etc.) and
so it will allocate `len' bytes worth of clusters and mbufs, as needed,
and append them to the tail of the passed in chain, only if it was able
to allocate everything requested.
If allocation fails, only what was allocated by the routine will be freed,
and NULL will be returned.
Also, get rid of existing m_getm() in netncp code and replace calls to it
to calls to this new generic code.
Heavily Reviewed by: bp
2001-02-14 05:13:04 +00:00
|
|
|
if (mp == NULL)
|
|
|
|
return (NULL);
|
|
|
|
else if (len > MINCLSIZE) {
|
|
|
|
MCLGET(mp, how);
|
|
|
|
if ((mp->m_flags & M_EXT) == 0) {
|
|
|
|
m_free(mp);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
mp->m_len = 0;
|
|
|
|
len -= M_TRAILINGSPACE(mp);
|
|
|
|
|
|
|
|
if (m != NULL)
|
|
|
|
for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next);
|
|
|
|
else
|
|
|
|
m = mp;
|
|
|
|
|
|
|
|
top = tail = mp;
|
|
|
|
while (len > 0) {
|
2001-02-21 09:24:13 +00:00
|
|
|
MGET(mp, how, type);
|
Implement m_getm() which will perform an "all or nothing" mbuf + cluster
allocation, as required.
If m_getm() receives NULL as a first argument, then it allocates `len'
(second argument) bytes worth of mbufs + clusters and returns the chain
only if it was able to allocate everything.
If the first argument is non-NULL, then it should be an existing mbuf
chain (e.g. pre-allocated mbuf sitting on a ring, on some list, etc.) and
so it will allocate `len' bytes worth of clusters and mbufs, as needed,
and append them to the tail of the passed in chain, only if it was able
to allocate everything requested.
If allocation fails, only what was allocated by the routine will be freed,
and NULL will be returned.
Also, get rid of existing m_getm() in netncp code and replace calls to it
to calls to this new generic code.
Heavily Reviewed by: bp
2001-02-14 05:13:04 +00:00
|
|
|
if (mp == NULL)
|
|
|
|
goto failed;
|
|
|
|
|
|
|
|
tail->m_next = mp;
|
|
|
|
tail = mp;
|
|
|
|
if (len > MINCLSIZE) {
|
|
|
|
MCLGET(mp, how);
|
|
|
|
if ((mp->m_flags & M_EXT) == 0)
|
|
|
|
goto failed;
|
|
|
|
}
|
|
|
|
|
|
|
|
mp->m_len = 0;
|
|
|
|
len -= M_TRAILINGSPACE(mp);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mtail != NULL)
|
|
|
|
mtail->m_next = top;
|
|
|
|
return (m);
|
|
|
|
|
|
|
|
failed:
|
|
|
|
m_freem(top);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
void
|
2001-02-11 05:02:06 +00:00
|
|
|
m_freem(struct mbuf *m)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2002-02-05 02:00:56 +00:00
|
|
|
while (m) {
|
|
|
|
m = m_free(m);
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Lesser-used path for M_PREPEND:
|
|
|
|
* allocate new mbuf to prepend to chain,
|
|
|
|
* copy junk along.
|
|
|
|
*/
|
|
|
|
struct mbuf *
|
2001-02-11 05:02:06 +00:00
|
|
|
m_prepend(struct mbuf *m, int len, int how)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct mbuf *mn;
|
|
|
|
|
|
|
|
MGET(mn, how, m->m_type);
|
2001-02-11 05:02:06 +00:00
|
|
|
if (mn == NULL) {
|
1994-05-24 10:09:53 +00:00
|
|
|
m_freem(m);
|
2001-02-11 05:02:06 +00:00
|
|
|
return (NULL);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
if (m->m_flags & M_PKTHDR) {
|
|
|
|
M_COPY_PKTHDR(mn, m);
|
|
|
|
m->m_flags &= ~M_PKTHDR;
|
|
|
|
}
|
|
|
|
mn->m_next = m;
|
|
|
|
m = mn;
|
|
|
|
if (len < MHLEN)
|
|
|
|
MH_ALIGN(m, len);
|
|
|
|
m->m_len = len;
|
|
|
|
return (m);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make a copy of an mbuf chain starting "off0" bytes from the beginning,
|
|
|
|
* continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
|
2000-12-21 21:44:31 +00:00
|
|
|
* The wait parameter is a choice of M_TRYWAIT/M_DONTWAIT from caller.
|
1999-12-01 22:31:32 +00:00
|
|
|
* Note that the copy is read-only, because clusters are not copied,
|
|
|
|
* only their reference counts are incremented.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
|
|
|
struct mbuf *
|
2001-02-11 05:02:06 +00:00
|
|
|
m_copym(struct mbuf *m, int off0, int len, int wait)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2001-02-11 05:02:06 +00:00
|
|
|
struct mbuf *n, **np;
|
|
|
|
int off = off0;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct mbuf *top;
|
|
|
|
int copyhdr = 0;
|
|
|
|
|
1999-10-13 09:55:42 +00:00
|
|
|
KASSERT(off >= 0, ("m_copym, negative off %d", off));
|
|
|
|
KASSERT(len >= 0, ("m_copym, negative len %d", len));
|
1994-05-24 10:09:53 +00:00
|
|
|
if (off == 0 && m->m_flags & M_PKTHDR)
|
|
|
|
copyhdr = 1;
|
|
|
|
while (off > 0) {
|
1999-10-13 09:55:42 +00:00
|
|
|
KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain"));
|
1994-05-24 10:09:53 +00:00
|
|
|
if (off < m->m_len)
|
|
|
|
break;
|
|
|
|
off -= m->m_len;
|
|
|
|
m = m->m_next;
|
|
|
|
}
|
|
|
|
np = ⊤
|
|
|
|
top = 0;
|
|
|
|
while (len > 0) {
|
2001-02-11 05:02:06 +00:00
|
|
|
if (m == NULL) {
|
1999-10-13 09:55:42 +00:00
|
|
|
KASSERT(len == M_COPYALL,
|
|
|
|
("m_copym, length > size of mbuf chain"));
|
1994-05-24 10:09:53 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
MGET(n, wait, m->m_type);
|
|
|
|
*np = n;
|
2001-02-11 05:02:06 +00:00
|
|
|
if (n == NULL)
|
1994-05-24 10:09:53 +00:00
|
|
|
goto nospace;
|
|
|
|
if (copyhdr) {
|
|
|
|
M_COPY_PKTHDR(n, m);
|
|
|
|
if (len == M_COPYALL)
|
|
|
|
n->m_pkthdr.len -= off0;
|
|
|
|
else
|
|
|
|
n->m_pkthdr.len = len;
|
|
|
|
copyhdr = 0;
|
|
|
|
}
|
|
|
|
n->m_len = min(len, m->m_len - off);
|
|
|
|
if (m->m_flags & M_EXT) {
|
|
|
|
n->m_data = m->m_data + off;
|
|
|
|
n->m_ext = m->m_ext;
|
|
|
|
n->m_flags |= M_EXT;
|
2000-08-19 08:32:59 +00:00
|
|
|
MEXT_ADD_REF(m);
|
1994-05-24 10:09:53 +00:00
|
|
|
} else
|
|
|
|
bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
|
|
|
|
(unsigned)n->m_len);
|
|
|
|
if (len != M_COPYALL)
|
|
|
|
len -= n->m_len;
|
|
|
|
off = 0;
|
|
|
|
m = m->m_next;
|
|
|
|
np = &n->m_next;
|
|
|
|
}
|
2001-06-22 06:35:32 +00:00
|
|
|
if (top == NULL)
|
|
|
|
mbstat.m_mcfail++; /* XXX: No consistency. */
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
return (top);
|
|
|
|
nospace:
|
|
|
|
m_freem(top);
|
2001-06-22 06:35:32 +00:00
|
|
|
mbstat.m_mcfail++; /* XXX: No consistency. */
|
2001-02-11 05:02:06 +00:00
|
|
|
return (NULL);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
1996-05-08 19:38:27 +00:00
|
|
|
/*
|
|
|
|
* Copy an entire packet, including header (which must be present).
|
|
|
|
* An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
|
1999-12-01 22:31:32 +00:00
|
|
|
* Note that the copy is read-only, because clusters are not copied,
|
|
|
|
* only their reference counts are incremented.
|
2001-02-20 08:23:41 +00:00
|
|
|
* Preserve alignment of the first mbuf so if the creator has left
|
|
|
|
* some room at the beginning (e.g. for inserting protocol headers)
|
|
|
|
* the copies still have the room available.
|
1996-05-08 19:38:27 +00:00
|
|
|
*/
|
|
|
|
struct mbuf *
|
2001-02-11 05:02:06 +00:00
|
|
|
m_copypacket(struct mbuf *m, int how)
|
1996-05-08 19:38:27 +00:00
|
|
|
{
|
|
|
|
struct mbuf *top, *n, *o;
|
|
|
|
|
|
|
|
MGET(n, how, m->m_type);
|
|
|
|
top = n;
|
2001-02-11 05:02:06 +00:00
|
|
|
if (n == NULL)
|
1996-05-08 19:38:27 +00:00
|
|
|
goto nospace;
|
|
|
|
|
|
|
|
M_COPY_PKTHDR(n, m);
|
|
|
|
n->m_len = m->m_len;
|
|
|
|
if (m->m_flags & M_EXT) {
|
|
|
|
n->m_data = m->m_data;
|
|
|
|
n->m_ext = m->m_ext;
|
|
|
|
n->m_flags |= M_EXT;
|
2000-08-19 08:32:59 +00:00
|
|
|
MEXT_ADD_REF(m);
|
1996-05-08 19:38:27 +00:00
|
|
|
} else {
|
2001-02-20 08:23:41 +00:00
|
|
|
n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat );
|
1996-05-08 19:38:27 +00:00
|
|
|
bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
|
|
|
|
}
|
|
|
|
|
|
|
|
m = m->m_next;
|
|
|
|
while (m) {
|
|
|
|
MGET(o, how, m->m_type);
|
2001-02-11 05:02:06 +00:00
|
|
|
if (o == NULL)
|
1996-05-08 19:38:27 +00:00
|
|
|
goto nospace;
|
|
|
|
|
|
|
|
n->m_next = o;
|
|
|
|
n = n->m_next;
|
|
|
|
|
|
|
|
n->m_len = m->m_len;
|
|
|
|
if (m->m_flags & M_EXT) {
|
|
|
|
n->m_data = m->m_data;
|
|
|
|
n->m_ext = m->m_ext;
|
|
|
|
n->m_flags |= M_EXT;
|
2000-08-19 08:32:59 +00:00
|
|
|
MEXT_ADD_REF(m);
|
1996-05-08 19:38:27 +00:00
|
|
|
} else {
|
|
|
|
bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
|
|
|
|
}
|
|
|
|
|
|
|
|
m = m->m_next;
|
|
|
|
}
|
|
|
|
return top;
|
|
|
|
nospace:
|
|
|
|
m_freem(top);
|
2001-06-22 06:35:32 +00:00
|
|
|
mbstat.m_mcfail++; /* XXX: No consistency. */
|
2001-02-11 05:02:06 +00:00
|
|
|
return (NULL);
|
1996-05-08 19:38:27 +00:00
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Copy data from an mbuf chain starting "off" bytes from the beginning,
|
|
|
|
* continuing for "len" bytes, into the indicated buffer.
|
|
|
|
*/
|
1994-05-25 09:21:21 +00:00
|
|
|
void
|
2001-08-19 04:30:13 +00:00
|
|
|
m_copydata(const struct mbuf *m, int off, int len, caddr_t cp)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2001-02-11 05:02:06 +00:00
|
|
|
unsigned count;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1999-10-13 09:55:42 +00:00
|
|
|
KASSERT(off >= 0, ("m_copydata, negative off %d", off));
|
|
|
|
KASSERT(len >= 0, ("m_copydata, negative len %d", len));
|
1994-05-24 10:09:53 +00:00
|
|
|
while (off > 0) {
|
1999-10-13 09:55:42 +00:00
|
|
|
KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain"));
|
1994-05-24 10:09:53 +00:00
|
|
|
if (off < m->m_len)
|
|
|
|
break;
|
|
|
|
off -= m->m_len;
|
|
|
|
m = m->m_next;
|
|
|
|
}
|
|
|
|
while (len > 0) {
|
1999-10-13 09:55:42 +00:00
|
|
|
KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain"));
|
1994-05-24 10:09:53 +00:00
|
|
|
count = min(m->m_len - off, len);
|
|
|
|
bcopy(mtod(m, caddr_t) + off, cp, count);
|
|
|
|
len -= count;
|
|
|
|
cp += count;
|
|
|
|
off = 0;
|
|
|
|
m = m->m_next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1999-12-01 22:31:32 +00:00
|
|
|
/*
|
|
|
|
* Copy a packet header mbuf chain into a completely new chain, including
|
|
|
|
* copying any mbuf clusters. Use this instead of m_copypacket() when
|
|
|
|
* you need a writable copy of an mbuf chain.
|
|
|
|
*/
|
|
|
|
struct mbuf *
|
2001-02-11 05:02:06 +00:00
|
|
|
m_dup(struct mbuf *m, int how)
|
1999-12-01 22:31:32 +00:00
|
|
|
{
|
|
|
|
struct mbuf **p, *top = NULL;
|
|
|
|
int remain, moff, nsize;
|
|
|
|
|
|
|
|
/* Sanity check */
|
|
|
|
if (m == NULL)
|
2001-02-11 05:02:06 +00:00
|
|
|
return (NULL);
|
2001-12-10 05:51:45 +00:00
|
|
|
KASSERT((m->m_flags & M_PKTHDR) != 0, ("%s: !PKTHDR", __func__));
|
1999-12-01 22:31:32 +00:00
|
|
|
|
|
|
|
/* While there's more data, get a new mbuf, tack it on, and fill it */
|
|
|
|
remain = m->m_pkthdr.len;
|
|
|
|
moff = 0;
|
|
|
|
p = ⊤
|
|
|
|
while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */
|
|
|
|
struct mbuf *n;
|
|
|
|
|
|
|
|
/* Get the next new mbuf */
|
|
|
|
MGET(n, how, m->m_type);
|
|
|
|
if (n == NULL)
|
|
|
|
goto nospace;
|
|
|
|
if (top == NULL) { /* first one, must be PKTHDR */
|
|
|
|
M_COPY_PKTHDR(n, m);
|
|
|
|
nsize = MHLEN;
|
|
|
|
} else /* not the first one */
|
|
|
|
nsize = MLEN;
|
|
|
|
if (remain >= MINCLSIZE) {
|
|
|
|
MCLGET(n, how);
|
|
|
|
if ((n->m_flags & M_EXT) == 0) {
|
|
|
|
(void)m_free(n);
|
|
|
|
goto nospace;
|
|
|
|
}
|
|
|
|
nsize = MCLBYTES;
|
|
|
|
}
|
|
|
|
n->m_len = 0;
|
|
|
|
|
|
|
|
/* Link it into the new chain */
|
|
|
|
*p = n;
|
|
|
|
p = &n->m_next;
|
|
|
|
|
|
|
|
/* Copy data from original mbuf(s) into new mbuf */
|
|
|
|
while (n->m_len < nsize && m != NULL) {
|
|
|
|
int chunk = min(nsize - n->m_len, m->m_len - moff);
|
|
|
|
|
|
|
|
bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
|
|
|
|
moff += chunk;
|
|
|
|
n->m_len += chunk;
|
|
|
|
remain -= chunk;
|
|
|
|
if (moff == m->m_len) {
|
|
|
|
m = m->m_next;
|
|
|
|
moff = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check correct total mbuf length */
|
|
|
|
KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL),
|
2001-12-10 05:51:45 +00:00
|
|
|
("%s: bogus m_pkthdr.len", __func__));
|
1999-12-01 22:31:32 +00:00
|
|
|
}
|
|
|
|
return (top);
|
|
|
|
|
|
|
|
nospace:
|
|
|
|
m_freem(top);
|
2001-06-22 06:35:32 +00:00
|
|
|
mbstat.m_mcfail++; /* XXX: No consistency. */
|
2001-02-11 05:02:06 +00:00
|
|
|
return (NULL);
|
1999-12-01 22:31:32 +00:00
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Concatenate mbuf chain n to m.
|
|
|
|
* Both chains must be of the same type (e.g. MT_DATA).
|
|
|
|
* Any m_pkthdr is not updated.
|
|
|
|
*/
|
1994-05-25 09:21:21 +00:00
|
|
|
void
|
2001-02-11 05:02:06 +00:00
|
|
|
m_cat(struct mbuf *m, struct mbuf *n)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
while (m->m_next)
|
|
|
|
m = m->m_next;
|
|
|
|
while (n) {
|
|
|
|
if (m->m_flags & M_EXT ||
|
|
|
|
m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
|
|
|
|
/* just join the two chains */
|
|
|
|
m->m_next = n;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
/* splat the data from one into the other */
|
|
|
|
bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
|
|
|
|
(u_int)n->m_len);
|
|
|
|
m->m_len += n->m_len;
|
|
|
|
n = m_free(n);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
void
|
2001-02-11 05:02:06 +00:00
|
|
|
m_adj(struct mbuf *mp, int req_len)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2001-02-11 05:02:06 +00:00
|
|
|
int len = req_len;
|
|
|
|
struct mbuf *m;
|
|
|
|
int count;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
if ((m = mp) == NULL)
|
|
|
|
return;
|
|
|
|
if (len >= 0) {
|
|
|
|
/*
|
|
|
|
* Trim from head.
|
|
|
|
*/
|
|
|
|
while (m != NULL && len > 0) {
|
|
|
|
if (m->m_len <= len) {
|
|
|
|
len -= m->m_len;
|
|
|
|
m->m_len = 0;
|
|
|
|
m = m->m_next;
|
|
|
|
} else {
|
|
|
|
m->m_len -= len;
|
|
|
|
m->m_data += len;
|
|
|
|
len = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
m = mp;
|
|
|
|
if (mp->m_flags & M_PKTHDR)
|
|
|
|
m->m_pkthdr.len -= (req_len - len);
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Trim from tail. Scan the mbuf chain,
|
|
|
|
* calculating its length and finding the last mbuf.
|
|
|
|
* If the adjustment only affects this mbuf, then just
|
|
|
|
* adjust and return. Otherwise, rescan and truncate
|
|
|
|
* after the remaining size.
|
|
|
|
*/
|
|
|
|
len = -len;
|
|
|
|
count = 0;
|
|
|
|
for (;;) {
|
|
|
|
count += m->m_len;
|
|
|
|
if (m->m_next == (struct mbuf *)0)
|
|
|
|
break;
|
|
|
|
m = m->m_next;
|
|
|
|
}
|
|
|
|
if (m->m_len >= len) {
|
|
|
|
m->m_len -= len;
|
|
|
|
if (mp->m_flags & M_PKTHDR)
|
|
|
|
mp->m_pkthdr.len -= len;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
count -= len;
|
|
|
|
if (count < 0)
|
|
|
|
count = 0;
|
|
|
|
/*
|
|
|
|
* Correct length for chain is "count".
|
|
|
|
* Find the mbuf with last data, adjust its length,
|
|
|
|
* and toss data from remaining mbufs on chain.
|
|
|
|
*/
|
|
|
|
m = mp;
|
|
|
|
if (m->m_flags & M_PKTHDR)
|
|
|
|
m->m_pkthdr.len = count;
|
|
|
|
for (; m; m = m->m_next) {
|
|
|
|
if (m->m_len >= count) {
|
|
|
|
m->m_len = count;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
count -= m->m_len;
|
|
|
|
}
|
1994-10-02 17:35:40 +00:00
|
|
|
while (m->m_next)
|
|
|
|
(m = m->m_next) ->m_len = 0;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Rearange an mbuf chain so that len bytes are contiguous
|
|
|
|
* and in the data area of an mbuf (so that mtod and dtom
|
|
|
|
* will work for a structure of size len). Returns the resulting
|
|
|
|
* mbuf chain on success, frees it and returns null on failure.
|
|
|
|
* If there is room, it will add up to max_protohdr-len extra bytes to the
|
|
|
|
* contiguous region in an attempt to avoid being called next time.
|
|
|
|
*/
|
|
|
|
struct mbuf *
|
2001-02-11 05:02:06 +00:00
|
|
|
m_pullup(struct mbuf *n, int len)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2001-02-11 05:02:06 +00:00
|
|
|
struct mbuf *m;
|
|
|
|
int count;
|
1994-05-24 10:09:53 +00:00
|
|
|
int space;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If first mbuf has no cluster, and has room for len bytes
|
|
|
|
* without shifting current data, pullup into it,
|
|
|
|
* otherwise allocate a new mbuf to prepend to the chain.
|
|
|
|
*/
|
|
|
|
if ((n->m_flags & M_EXT) == 0 &&
|
|
|
|
n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
|
|
|
|
if (n->m_len >= len)
|
|
|
|
return (n);
|
|
|
|
m = n;
|
|
|
|
n = n->m_next;
|
|
|
|
len -= m->m_len;
|
|
|
|
} else {
|
|
|
|
if (len > MHLEN)
|
|
|
|
goto bad;
|
|
|
|
MGET(m, M_DONTWAIT, n->m_type);
|
2001-02-11 05:02:06 +00:00
|
|
|
if (m == NULL)
|
1994-05-24 10:09:53 +00:00
|
|
|
goto bad;
|
|
|
|
m->m_len = 0;
|
|
|
|
if (n->m_flags & M_PKTHDR) {
|
|
|
|
M_COPY_PKTHDR(m, n);
|
|
|
|
n->m_flags &= ~M_PKTHDR;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
|
|
|
|
do {
|
|
|
|
count = min(min(max(len, max_protohdr), space), n->m_len);
|
|
|
|
bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
|
|
|
|
(unsigned)count);
|
|
|
|
len -= count;
|
|
|
|
m->m_len += count;
|
|
|
|
n->m_len -= count;
|
|
|
|
space -= count;
|
|
|
|
if (n->m_len)
|
|
|
|
n->m_data += count;
|
|
|
|
else
|
|
|
|
n = m_free(n);
|
|
|
|
} while (len > 0 && n);
|
|
|
|
if (len > 0) {
|
|
|
|
(void) m_free(m);
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
m->m_next = n;
|
|
|
|
return (m);
|
|
|
|
bad:
|
|
|
|
m_freem(n);
|
2001-06-22 06:35:32 +00:00
|
|
|
mbstat.m_mpfail++; /* XXX: No consistency. */
|
2001-02-11 05:02:06 +00:00
|
|
|
return (NULL);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Partition an mbuf chain in two pieces, returning the tail --
|
|
|
|
* all but the first len0 bytes. In case of failure, it returns NULL and
|
|
|
|
* attempts to restore the chain to its original state.
|
|
|
|
*/
|
|
|
|
struct mbuf *
|
2001-02-11 05:02:06 +00:00
|
|
|
m_split(struct mbuf *m0, int len0, int wait)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2001-02-11 05:02:06 +00:00
|
|
|
struct mbuf *m, *n;
|
1994-05-24 10:09:53 +00:00
|
|
|
unsigned len = len0, remain;
|
|
|
|
|
|
|
|
for (m = m0; m && len > m->m_len; m = m->m_next)
|
|
|
|
len -= m->m_len;
|
2001-02-11 05:02:06 +00:00
|
|
|
if (m == NULL)
|
|
|
|
return (NULL);
|
1994-05-24 10:09:53 +00:00
|
|
|
remain = m->m_len - len;
|
|
|
|
if (m0->m_flags & M_PKTHDR) {
|
|
|
|
MGETHDR(n, wait, m0->m_type);
|
2001-02-11 05:02:06 +00:00
|
|
|
if (n == NULL)
|
|
|
|
return (NULL);
|
1994-05-24 10:09:53 +00:00
|
|
|
n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
|
|
|
|
n->m_pkthdr.len = m0->m_pkthdr.len - len0;
|
|
|
|
m0->m_pkthdr.len = len0;
|
|
|
|
if (m->m_flags & M_EXT)
|
|
|
|
goto extpacket;
|
|
|
|
if (remain > MHLEN) {
|
|
|
|
/* m can't be the lead packet */
|
|
|
|
MH_ALIGN(n, 0);
|
|
|
|
n->m_next = m_split(m, len, wait);
|
2001-02-11 05:02:06 +00:00
|
|
|
if (n->m_next == NULL) {
|
1994-05-24 10:09:53 +00:00
|
|
|
(void) m_free(n);
|
2001-02-11 05:02:06 +00:00
|
|
|
return (NULL);
|
1994-05-24 10:09:53 +00:00
|
|
|
} else
|
|
|
|
return (n);
|
|
|
|
} else
|
|
|
|
MH_ALIGN(n, remain);
|
|
|
|
} else if (remain == 0) {
|
|
|
|
n = m->m_next;
|
2001-02-11 05:02:06 +00:00
|
|
|
m->m_next = NULL;
|
1994-05-24 10:09:53 +00:00
|
|
|
return (n);
|
|
|
|
} else {
|
|
|
|
MGET(n, wait, m->m_type);
|
2001-02-11 05:02:06 +00:00
|
|
|
if (n == NULL)
|
|
|
|
return (NULL);
|
1994-05-24 10:09:53 +00:00
|
|
|
M_ALIGN(n, remain);
|
|
|
|
}
|
|
|
|
extpacket:
|
|
|
|
if (m->m_flags & M_EXT) {
|
|
|
|
n->m_flags |= M_EXT;
|
|
|
|
n->m_ext = m->m_ext;
|
2000-08-19 08:32:59 +00:00
|
|
|
MEXT_ADD_REF(m);
|
1994-05-24 10:09:53 +00:00
|
|
|
m->m_ext.ext_size = 0; /* For Accounting XXXXXX danger */
|
|
|
|
n->m_data = m->m_data + len;
|
|
|
|
} else {
|
|
|
|
bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
|
|
|
|
}
|
|
|
|
n->m_len = remain;
|
|
|
|
m->m_len = len;
|
|
|
|
n->m_next = m->m_next;
|
2001-02-11 05:02:06 +00:00
|
|
|
m->m_next = NULL;
|
1994-05-24 10:09:53 +00:00
|
|
|
return (n);
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Routine to copy from device local memory into mbufs.
|
2001-06-20 19:48:35 +00:00
|
|
|
* Note that `off' argument is offset into first mbuf of target chain from
|
|
|
|
* which to begin copying the data to.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
|
|
|
struct mbuf *
|
2001-06-20 19:48:35 +00:00
|
|
|
m_devget(char *buf, int totlen, int off, struct ifnet *ifp,
|
2001-02-11 05:02:06 +00:00
|
|
|
void (*copy)(char *from, caddr_t to, u_int len))
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2001-02-11 05:02:06 +00:00
|
|
|
struct mbuf *m;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct mbuf *top = 0, **mp = ⊤
|
2001-06-20 19:48:35 +00:00
|
|
|
int len;
|
|
|
|
|
|
|
|
if (off < 0 || off > MHLEN)
|
|
|
|
return (NULL);
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
MGETHDR(m, M_DONTWAIT, MT_DATA);
|
2001-02-11 05:02:06 +00:00
|
|
|
if (m == NULL)
|
|
|
|
return (NULL);
|
1994-05-24 10:09:53 +00:00
|
|
|
m->m_pkthdr.rcvif = ifp;
|
|
|
|
m->m_pkthdr.len = totlen;
|
2001-06-20 19:48:35 +00:00
|
|
|
len = MHLEN;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
while (totlen > 0) {
|
|
|
|
if (top) {
|
|
|
|
MGET(m, M_DONTWAIT, MT_DATA);
|
2001-02-11 05:02:06 +00:00
|
|
|
if (m == NULL) {
|
1994-05-24 10:09:53 +00:00
|
|
|
m_freem(top);
|
2001-02-11 05:02:06 +00:00
|
|
|
return (NULL);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2001-06-20 19:48:35 +00:00
|
|
|
len = MLEN;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2001-06-20 19:48:35 +00:00
|
|
|
if (totlen + off >= MINCLSIZE) {
|
1994-05-24 10:09:53 +00:00
|
|
|
MCLGET(m, M_DONTWAIT);
|
|
|
|
if (m->m_flags & M_EXT)
|
2001-06-20 19:48:35 +00:00
|
|
|
len = MCLBYTES;
|
1994-05-24 10:09:53 +00:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Place initial small packet/header at end of mbuf.
|
|
|
|
*/
|
2001-06-20 19:48:35 +00:00
|
|
|
if (top == NULL && totlen + off + max_linkhdr <= len) {
|
|
|
|
m->m_data += max_linkhdr;
|
|
|
|
len -= max_linkhdr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (off) {
|
|
|
|
m->m_data += off;
|
|
|
|
len -= off;
|
|
|
|
off = 0;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2001-06-20 19:48:35 +00:00
|
|
|
m->m_len = len = min(totlen, len);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (copy)
|
2001-06-20 19:48:35 +00:00
|
|
|
copy(buf, mtod(m, caddr_t), (unsigned)len);
|
1994-05-24 10:09:53 +00:00
|
|
|
else
|
2001-06-20 19:48:35 +00:00
|
|
|
bcopy(buf, mtod(m, caddr_t), (unsigned)len);
|
|
|
|
buf += len;
|
1994-05-24 10:09:53 +00:00
|
|
|
*mp = m;
|
|
|
|
mp = &m->m_next;
|
|
|
|
totlen -= len;
|
|
|
|
}
|
|
|
|
return (top);
|
|
|
|
}
|
1994-10-04 06:50:01 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Copy data from a buffer back into the indicated mbuf chain,
|
|
|
|
* starting "off" bytes from the beginning, extending the mbuf
|
|
|
|
* chain if necessary.
|
|
|
|
*/
|
|
|
|
void
|
2001-02-11 05:02:06 +00:00
|
|
|
m_copyback(struct mbuf *m0, int off, int len, caddr_t cp)
|
1994-10-04 06:50:01 +00:00
|
|
|
{
|
2001-02-11 05:02:06 +00:00
|
|
|
int mlen;
|
|
|
|
struct mbuf *m = m0, *n;
|
1994-10-04 06:50:01 +00:00
|
|
|
int totlen = 0;
|
|
|
|
|
2001-02-11 05:02:06 +00:00
|
|
|
if (m0 == NULL)
|
1994-10-04 06:50:01 +00:00
|
|
|
return;
|
|
|
|
while (off > (mlen = m->m_len)) {
|
|
|
|
off -= mlen;
|
|
|
|
totlen += mlen;
|
2001-02-11 05:02:06 +00:00
|
|
|
if (m->m_next == NULL) {
|
2001-06-22 06:35:32 +00:00
|
|
|
n = m_get_clrd(M_DONTWAIT, m->m_type);
|
2001-02-11 05:02:06 +00:00
|
|
|
if (n == NULL)
|
1994-10-04 06:50:01 +00:00
|
|
|
goto out;
|
|
|
|
n->m_len = min(MLEN, len + off);
|
|
|
|
m->m_next = n;
|
|
|
|
}
|
|
|
|
m = m->m_next;
|
|
|
|
}
|
|
|
|
while (len > 0) {
|
|
|
|
mlen = min (m->m_len - off, len);
|
|
|
|
bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen);
|
|
|
|
cp += mlen;
|
|
|
|
len -= mlen;
|
|
|
|
mlen += off;
|
|
|
|
off = 0;
|
|
|
|
totlen += mlen;
|
|
|
|
if (len == 0)
|
|
|
|
break;
|
2001-02-11 05:02:06 +00:00
|
|
|
if (m->m_next == NULL) {
|
1994-10-04 06:50:01 +00:00
|
|
|
n = m_get(M_DONTWAIT, m->m_type);
|
2001-02-11 05:02:06 +00:00
|
|
|
if (n == NULL)
|
1994-10-04 06:50:01 +00:00
|
|
|
break;
|
|
|
|
n->m_len = min(MLEN, len);
|
|
|
|
m->m_next = n;
|
|
|
|
}
|
|
|
|
m = m->m_next;
|
|
|
|
}
|
|
|
|
out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
|
|
|
|
m->m_pkthdr.len = totlen;
|
|
|
|
}
|
1999-11-01 15:03:20 +00:00
|
|
|
|
|
|
|
void
|
|
|
|
m_print(const struct mbuf *m)
|
|
|
|
{
|
|
|
|
int len;
|
1999-12-20 18:10:00 +00:00
|
|
|
const struct mbuf *m2;
|
1999-11-01 15:03:20 +00:00
|
|
|
|
|
|
|
len = m->m_pkthdr.len;
|
|
|
|
m2 = m;
|
|
|
|
while (len) {
|
|
|
|
printf("%p %*D\n", m2, m2->m_len, (u_char *)m2->m_data, "-");
|
|
|
|
len -= m2->m_len;
|
|
|
|
m2 = m2->m_next;
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|