Step 2.5: Stop using 'struct mbuf_ext_pgs' in the kernel itself.

Reviewed by:	gallatin
Differential Revision:	https://reviews.freebsd.org/D24598
This commit is contained in:
Gleb Smirnoff 2020-05-03 00:08:05 +00:00
parent b363a438b1
commit bccf6e26e9
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=360578
5 changed files with 70 additions and 102 deletions

View File

@ -934,7 +934,6 @@ mb_unmapped_free_mext(struct mbuf *m)
static struct mbuf *
_mb_unmapped_to_ext(struct mbuf *m)
{
struct mbuf_ext_pgs *ext_pgs;
struct mbuf *m_new, *top, *prev, *mref;
struct sf_buf *sf;
vm_page_t pg;
@ -943,9 +942,8 @@ _mb_unmapped_to_ext(struct mbuf *m)
u_int ref_inc = 0;
MBUF_EXT_PGS_ASSERT(m);
ext_pgs = &m->m_ext_pgs;
len = m->m_len;
KASSERT(ext_pgs->tls == NULL, ("%s: can't convert TLS mbuf %p",
KASSERT(m->m_ext_pgs.tls == NULL, ("%s: can't convert TLS mbuf %p",
__func__, m));
/* See if this is the mbuf that holds the embedded refcount. */
@ -963,11 +961,11 @@ _mb_unmapped_to_ext(struct mbuf *m)
off = mtod(m, vm_offset_t);
top = NULL;
if (ext_pgs->hdr_len != 0) {
if (off >= ext_pgs->hdr_len) {
off -= ext_pgs->hdr_len;
if (m->m_ext_pgs.hdr_len != 0) {
if (off >= m->m_ext_pgs.hdr_len) {
off -= m->m_ext_pgs.hdr_len;
} else {
seglen = ext_pgs->hdr_len - off;
seglen = m->m_ext_pgs.hdr_len - off;
segoff = off;
seglen = min(seglen, len);
off = 0;
@ -981,8 +979,8 @@ _mb_unmapped_to_ext(struct mbuf *m)
seglen);
}
}
pgoff = ext_pgs->first_pg_off;
for (i = 0; i < ext_pgs->npgs && len > 0; i++) {
pgoff = m->m_ext_pgs.first_pg_off;
for (i = 0; i < m->m_ext_pgs.npgs && len > 0; i++) {
pglen = m_epg_pagelen(m, i, pgoff);
if (off >= pglen) {
off -= pglen;
@ -1018,9 +1016,9 @@ _mb_unmapped_to_ext(struct mbuf *m)
pgoff = 0;
};
if (len != 0) {
KASSERT((off + len) <= ext_pgs->trail_len,
KASSERT((off + len) <= m->m_ext_pgs.trail_len,
("off + len > trail (%d + %d > %d)", off, len,
ext_pgs->trail_len));
m->m_ext_pgs.trail_len));
m_new = m_get(M_NOWAIT, MT_DATA);
if (m_new == NULL)
goto fail;
@ -1119,22 +1117,20 @@ struct mbuf *
mb_alloc_ext_pgs(int how, m_ext_free_t ext_free)
{
struct mbuf *m;
struct mbuf_ext_pgs *ext_pgs;
m = m_get(how, MT_DATA);
if (m == NULL)
return (NULL);
ext_pgs = &m->m_ext_pgs;
ext_pgs->npgs = 0;
ext_pgs->nrdy = 0;
ext_pgs->first_pg_off = 0;
ext_pgs->last_pg_len = 0;
ext_pgs->flags = 0;
ext_pgs->hdr_len = 0;
ext_pgs->trail_len = 0;
ext_pgs->tls = NULL;
ext_pgs->so = NULL;
m->m_ext_pgs.npgs = 0;
m->m_ext_pgs.nrdy = 0;
m->m_ext_pgs.first_pg_off = 0;
m->m_ext_pgs.last_pg_len = 0;
m->m_ext_pgs.flags = 0;
m->m_ext_pgs.hdr_len = 0;
m->m_ext_pgs.trail_len = 0;
m->m_ext_pgs.tls = NULL;
m->m_ext_pgs.so = NULL;
m->m_data = NULL;
m->m_flags |= (M_EXT | M_RDONLY | M_NOMAP);
m->m_ext.ext_type = EXT_PGS;

View File

@ -188,7 +188,6 @@ sendfile_free_mext(struct mbuf *m)
static void
sendfile_free_mext_pg(struct mbuf *m)
{
struct mbuf_ext_pgs *ext_pgs;
vm_page_t pg;
int flags, i;
bool cache_last;
@ -197,11 +196,10 @@ sendfile_free_mext_pg(struct mbuf *m)
("%s: m %p !M_EXT or !EXT_PGS", __func__, m));
cache_last = m->m_ext.ext_flags & EXT_FLAG_CACHE_LAST;
ext_pgs = &m->m_ext_pgs;
flags = (m->m_ext.ext_flags & EXT_FLAG_NOCACHE) != 0 ? VPR_TRYFREE : 0;
for (i = 0; i < ext_pgs->npgs; i++) {
if (cache_last && i == ext_pgs->npgs - 1)
for (i = 0; i < m->m_ext_pgs.npgs; i++) {
if (cache_last && i == m->m_ext_pgs.npgs - 1)
flags = 0;
pg = PHYS_TO_VM_PAGE(m->m_epg_pa[i]);
vm_page_release(pg, flags);
@ -692,7 +690,6 @@ vn_sendfile(struct file *fp, int sockfd, struct uio *hdr_uio,
#ifdef KERN_TLS
struct ktls_session *tls;
#endif
struct mbuf_ext_pgs *ext_pgs;
struct mbuf *m, *mh, *mhtail;
struct sf_buf *sf;
struct shmfd *shmfd;
@ -1029,7 +1026,6 @@ vn_sendfile(struct file *fp, int sockfd, struct uio *hdr_uio,
sfs->count++;
mtx_unlock(&sfs->mtx);
}
ext_pgs = &m0->m_ext_pgs;
ext_pgs_idx = 0;
/* Append to mbuf chain. */
@ -1038,18 +1034,18 @@ vn_sendfile(struct file *fp, int sockfd, struct uio *hdr_uio,
else
m = m0;
mtail = m0;
ext_pgs->first_pg_off =
m0->m_ext_pgs.first_pg_off =
vmoff(i, off) & PAGE_MASK;
}
if (nios) {
mtail->m_flags |= M_NOTREADY;
ext_pgs->nrdy++;
m0->m_ext_pgs.nrdy++;
}
m0->m_epg_pa[ext_pgs_idx] = VM_PAGE_TO_PHYS(pga);
ext_pgs->npgs++;
m0->m_ext_pgs.npgs++;
xfs = xfsize(i, npages, off, space);
ext_pgs->last_pg_len = xfs;
m0->m_ext_pgs.last_pg_len = xfs;
MBUF_EXT_PGS_ASSERT_SANITY(m0);
mtail->m_len += xfs;
mtail->m_ext.ext_size += PAGE_SIZE;

View File

@ -1287,14 +1287,12 @@ ktls_destroy(struct ktls_session *tls)
void
ktls_seq(struct sockbuf *sb, struct mbuf *m)
{
struct mbuf_ext_pgs *pgs;
for (; m != NULL; m = m->m_next) {
KASSERT((m->m_flags & M_NOMAP) != 0,
("ktls_seq: mapped mbuf %p", m));
pgs = &m->m_ext_pgs;
pgs->seqno = sb->sb_tls_seqno;
m->m_ext_pgs.seqno = sb->sb_tls_seqno;
sb->sb_tls_seqno++;
}
}
@ -1318,7 +1316,6 @@ ktls_frame(struct mbuf *top, struct ktls_session *tls, int *enq_cnt,
{
struct tls_record_layer *tlshdr;
struct mbuf *m;
struct mbuf_ext_pgs *pgs;
uint64_t *noncep;
uint16_t tls_len;
int maxlen;
@ -1341,13 +1338,12 @@ ktls_frame(struct mbuf *top, struct ktls_session *tls, int *enq_cnt,
("ktls_frame: mapped mbuf %p (top = %p)\n", m, top));
tls_len = m->m_len;
pgs = &m->m_ext_pgs;
/* Save a reference to the session. */
pgs->tls = ktls_hold(tls);
m->m_ext_pgs.tls = ktls_hold(tls);
pgs->hdr_len = tls->params.tls_hlen;
pgs->trail_len = tls->params.tls_tlen;
m->m_ext_pgs.hdr_len = tls->params.tls_hlen;
m->m_ext_pgs.trail_len = tls->params.tls_tlen;
if (tls->params.cipher_algorithm == CRYPTO_AES_CBC) {
int bs, delta;
@ -1369,9 +1365,9 @@ ktls_frame(struct mbuf *top, struct ktls_session *tls, int *enq_cnt,
*/
bs = tls->params.tls_bs;
delta = (tls_len + tls->params.tls_tlen) & (bs - 1);
pgs->trail_len -= delta;
m->m_ext_pgs.trail_len -= delta;
}
m->m_len += pgs->hdr_len + pgs->trail_len;
m->m_len += m->m_ext_pgs.hdr_len + m->m_ext_pgs.trail_len;
/* Populate the TLS header. */
tlshdr = (void *)m->m_epg_hdr;
@ -1386,7 +1382,7 @@ ktls_frame(struct mbuf *top, struct ktls_session *tls, int *enq_cnt,
tlshdr->tls_vminor = TLS_MINOR_VER_TWO;
tlshdr->tls_type = TLS_RLTYPE_APP;
/* save the real record type for later */
pgs->record_type = record_type;
m->m_ext_pgs.record_type = record_type;
m->m_epg_trail[0] = record_type;
} else {
tlshdr->tls_vminor = tls->params.tls_vminor;
@ -1423,8 +1419,8 @@ ktls_frame(struct mbuf *top, struct ktls_session *tls, int *enq_cnt,
*/
if (tls->mode == TCP_TLS_MODE_SW) {
m->m_flags |= M_NOTREADY;
pgs->nrdy = pgs->npgs;
*enq_cnt += pgs->npgs;
m->m_ext_pgs.nrdy = m->m_ext_pgs.npgs;
*enq_cnt += m->m_ext_pgs.npgs;
}
}
}
@ -1432,15 +1428,12 @@ ktls_frame(struct mbuf *top, struct ktls_session *tls, int *enq_cnt,
void
ktls_enqueue_to_free(struct mbuf *m)
{
struct mbuf_ext_pgs *pgs;
struct ktls_wq *wq;
bool running;
pgs = &m->m_ext_pgs;
/* Mark it for freeing. */
pgs->flags |= EPG_FLAG_2FREE;
wq = &ktls_wq[pgs->tls->wq_index];
m->m_ext_pgs.flags |= EPG_FLAG_2FREE;
wq = &ktls_wq[m->m_ext_pgs.tls->wq_index];
mtx_lock(&wq->mtx);
STAILQ_INSERT_TAIL(&wq->head, m, m_ext_pgs.stailq);
running = wq->running;
@ -1452,7 +1445,6 @@ ktls_enqueue_to_free(struct mbuf *m)
void
ktls_enqueue(struct mbuf *m, struct socket *so, int page_count)
{
struct mbuf_ext_pgs *pgs;
struct ktls_wq *wq;
bool running;
@ -1461,19 +1453,17 @@ ktls_enqueue(struct mbuf *m, struct socket *so, int page_count)
("ktls_enqueue: %p not unready & nomap mbuf\n", m));
KASSERT(page_count != 0, ("enqueueing TLS mbuf with zero page count"));
pgs = &m->m_ext_pgs;
KASSERT(m->m_ext_pgs.tls->mode == TCP_TLS_MODE_SW, ("!SW TLS mbuf"));
KASSERT(pgs->tls->mode == TCP_TLS_MODE_SW, ("!SW TLS mbuf"));
pgs->enc_cnt = page_count;
m->m_ext_pgs.enc_cnt = page_count;
/*
* Save a pointer to the socket. The caller is responsible
* for taking an additional reference via soref().
*/
pgs->so = so;
m->m_ext_pgs.so = so;
wq = &ktls_wq[pgs->tls->wq_index];
wq = &ktls_wq[m->m_ext_pgs.tls->wq_index];
mtx_lock(&wq->mtx);
STAILQ_INSERT_TAIL(&wq->head, m, m_ext_pgs.stailq);
running = wq->running;
@ -1489,7 +1479,6 @@ ktls_encrypt(struct mbuf *top)
struct ktls_session *tls;
struct socket *so;
struct mbuf *m;
struct mbuf_ext_pgs *pgs;
vm_paddr_t parray[1 + btoc(TLS_MAX_MSG_SIZE_V10_2)];
struct iovec src_iov[1 + btoc(TLS_MAX_MSG_SIZE_V10_2)];
struct iovec dst_iov[1 + btoc(TLS_MAX_MSG_SIZE_V10_2)];
@ -1526,15 +1515,13 @@ ktls_encrypt(struct mbuf *top)
*/
error = 0;
for (m = top; npages != total_pages; m = m->m_next) {
pgs = &m->m_ext_pgs;
KASSERT(pgs->tls == tls,
KASSERT(m->m_ext_pgs.tls == tls,
("different TLS sessions in a single mbuf chain: %p vs %p",
tls, pgs->tls));
tls, m->m_ext_pgs.tls));
KASSERT((m->m_flags & (M_NOMAP | M_NOTREADY)) ==
(M_NOMAP | M_NOTREADY),
("%p not unready & nomap mbuf (top = %p)\n", m, top));
KASSERT(npages + pgs->npgs <= total_pages,
KASSERT(npages + m->m_ext_pgs.npgs <= total_pages,
("page count mismatch: top %p, total_pages %d, m %p", top,
total_pages, m));
@ -1546,10 +1533,10 @@ ktls_encrypt(struct mbuf *top)
* (from sendfile), anonymous wired pages are
* allocated and assigned to the destination iovec.
*/
is_anon = (pgs->flags & EPG_FLAG_ANON) != 0;
is_anon = (m->m_ext_pgs.flags & EPG_FLAG_ANON) != 0;
off = pgs->first_pg_off;
for (i = 0; i < pgs->npgs; i++, off = 0) {
off = m->m_ext_pgs.first_pg_off;
for (i = 0; i < m->m_ext_pgs.npgs; i++, off = 0) {
len = m_epg_pagelen(m, i, off);
src_iov[i].iov_len = len;
src_iov[i].iov_base =
@ -1578,8 +1565,8 @@ ktls_encrypt(struct mbuf *top)
error = (*tls->sw_encrypt)(tls,
(const struct tls_record_layer *)m->m_epg_hdr,
m->m_epg_trail, src_iov, dst_iov, i, pgs->seqno,
pgs->record_type);
m->m_epg_trail, src_iov, dst_iov, i, m->m_ext_pgs.seqno,
m->m_ext_pgs.record_type);
if (error) {
counter_u64_add(ktls_offload_failed_crypto, 1);
break;
@ -1595,14 +1582,14 @@ ktls_encrypt(struct mbuf *top)
m->m_ext.ext_free(m);
/* Replace them with the new pages. */
for (i = 0; i < pgs->npgs; i++)
for (i = 0; i < m->m_ext_pgs.npgs; i++)
m->m_epg_pa[i] = parray[i];
/* Use the basic free routine. */
m->m_ext.ext_free = mb_free_mext_pgs;
/* Pages are now writable. */
pgs->flags |= EPG_FLAG_ANON;
m->m_ext_pgs.flags |= EPG_FLAG_ANON;
}
/*
@ -1612,7 +1599,7 @@ ktls_encrypt(struct mbuf *top)
* yet-to-be-encrypted records having an associated
* session.
*/
pgs->tls = NULL;
m->m_ext_pgs.tls = NULL;
ktls_free(tls);
}

View File

@ -1427,7 +1427,6 @@ m_defrag(struct mbuf *m0, int how)
static int
frags_per_mbuf(struct mbuf *m)
{
struct mbuf_ext_pgs *ext_pgs;
int frags;
if ((m->m_flags & M_NOMAP) == 0)
@ -1440,12 +1439,11 @@ frags_per_mbuf(struct mbuf *m)
* XXX: This overestimates the number of fragments by assuming
* all the backing physical pages are disjoint.
*/
ext_pgs = &m->m_ext_pgs;
frags = 0;
if (ext_pgs->hdr_len != 0)
if (m->m_ext_pgs.hdr_len != 0)
frags++;
frags += ext_pgs->npgs;
if (ext_pgs->trail_len != 0)
frags += m->m_ext_pgs.npgs;
if (m->m_ext_pgs.trail_len != 0)
frags++;
return (frags);
@ -1628,12 +1626,10 @@ m_fragment(struct mbuf *m0, int how, int length)
void
mb_free_mext_pgs(struct mbuf *m)
{
struct mbuf_ext_pgs *ext_pgs;
vm_page_t pg;
MBUF_EXT_PGS_ASSERT(m);
ext_pgs = &m->m_ext_pgs;
for (int i = 0; i < ext_pgs->npgs; i++) {
for (int i = 0; i < m->m_ext_pgs.npgs; i++) {
pg = PHYS_TO_VM_PAGE(m->m_epg_pa[i]);
vm_page_unwire_noq(pg);
vm_page_free(pg);
@ -1644,7 +1640,6 @@ static struct mbuf *
m_uiotombuf_nomap(struct uio *uio, int how, int len, int maxseg, int flags)
{
struct mbuf *m, *mb, *prev;
struct mbuf_ext_pgs *pgs;
vm_page_t pg_array[MBUF_PEXT_MAX_PGS];
int error, length, i, needed;
ssize_t total;
@ -1677,8 +1672,7 @@ m_uiotombuf_nomap(struct uio *uio, int how, int len, int maxseg, int flags)
else
prev->m_next = mb;
prev = mb;
pgs = &mb->m_ext_pgs;
pgs->flags = EPG_FLAG_ANON;
mb->m_ext_pgs.flags = EPG_FLAG_ANON;
needed = length = MIN(maxseg, total);
for (i = 0; needed > 0; i++, needed -= PAGE_SIZE) {
retry_page:
@ -1693,16 +1687,16 @@ m_uiotombuf_nomap(struct uio *uio, int how, int len, int maxseg, int flags)
}
pg_array[i]->flags &= ~PG_ZERO;
mb->m_epg_pa[i] = VM_PAGE_TO_PHYS(pg_array[i]);
pgs->npgs++;
mb->m_ext_pgs.npgs++;
}
pgs->last_pg_len = length - PAGE_SIZE * (pgs->npgs - 1);
mb->m_ext_pgs.last_pg_len = length - PAGE_SIZE * (mb->m_ext_pgs.npgs - 1);
MBUF_EXT_PGS_ASSERT_SANITY(mb);
total -= length;
error = uiomove_fromphys(pg_array, 0, length, uio);
if (error != 0)
goto failed;
mb->m_len = length;
mb->m_ext.ext_size += PAGE_SIZE * pgs->npgs;
mb->m_ext.ext_size += PAGE_SIZE * mb->m_ext_pgs.npgs;
if (flags & M_PKTHDR)
m->m_pkthdr.len += length;
}
@ -1778,23 +1772,21 @@ m_uiotombuf(struct uio *uio, int how, int len, int align, int flags)
int
m_unmappedtouio(const struct mbuf *m, int m_off, struct uio *uio, int len)
{
struct mbuf_ext_pgs *ext_pgs;
vm_page_t pg;
int error, i, off, pglen, pgoff, seglen, segoff;
MBUF_EXT_PGS_ASSERT(m);
ext_pgs = __DECONST(void *, &m->m_ext_pgs);
error = 0;
/* Skip over any data removed from the front. */
off = mtod(m, vm_offset_t);
off += m_off;
if (ext_pgs->hdr_len != 0) {
if (off >= ext_pgs->hdr_len) {
off -= ext_pgs->hdr_len;
if (m->m_ext_pgs.hdr_len != 0) {
if (off >= m->m_ext_pgs.hdr_len) {
off -= m->m_ext_pgs.hdr_len;
} else {
seglen = ext_pgs->hdr_len - off;
seglen = m->m_ext_pgs.hdr_len - off;
segoff = off;
seglen = min(seglen, len);
off = 0;
@ -1803,8 +1795,8 @@ m_unmappedtouio(const struct mbuf *m, int m_off, struct uio *uio, int len)
&m->m_epg_hdr[segoff]), seglen, uio);
}
}
pgoff = ext_pgs->first_pg_off;
for (i = 0; i < ext_pgs->npgs && error == 0 && len > 0; i++) {
pgoff = m->m_ext_pgs.first_pg_off;
for (i = 0; i < m->m_ext_pgs.npgs && error == 0 && len > 0; i++) {
pglen = m_epg_pagelen(m, i, pgoff);
if (off >= pglen) {
off -= pglen;
@ -1821,9 +1813,9 @@ m_unmappedtouio(const struct mbuf *m, int m_off, struct uio *uio, int len)
pgoff = 0;
};
if (len != 0 && error == 0) {
KASSERT((off + len) <= ext_pgs->trail_len,
KASSERT((off + len) <= m->m_ext_pgs.trail_len,
("off + len > trail (%d + %d > %d, m_off = %d)", off, len,
ext_pgs->trail_len, m_off));
m->m_ext_pgs.trail_len, m_off));
error = uiomove(__DECONST(void *, &m->m_epg_trail[off]),
len, uio);
}

View File

@ -128,21 +128,18 @@ sbready_compress(struct sockbuf *sb, struct mbuf *m0, struct mbuf *end)
(n->m_flags & M_NOMAP) &&
!mbuf_has_tls_session(m) &&
!mbuf_has_tls_session(n)) {
struct mbuf_ext_pgs *mpgs, *npgs;
int hdr_len, trail_len;
mpgs = &m->m_ext_pgs;
npgs = &n->m_ext_pgs;
hdr_len = npgs->hdr_len;
trail_len = mpgs->trail_len;
hdr_len = n->m_ext_pgs.hdr_len;
trail_len = m->m_ext_pgs.trail_len;
if (trail_len != 0 && hdr_len != 0 &&
trail_len + hdr_len <= MBUF_PEXT_TRAIL_LEN) {
/* copy n's header to m's trailer */
memcpy(&m->m_epg_trail[trail_len],
n->m_epg_hdr, hdr_len);
mpgs->trail_len += hdr_len;
m->m_ext_pgs.trail_len += hdr_len;
m->m_len += hdr_len;
npgs->hdr_len = 0;
n->m_ext_pgs.hdr_len = 0;
n->m_len -= hdr_len;
}
}