Step 3: anonymize struct mbuf_ext_pgs and move all its fields into mbuf

within m_epg namespace.
All edits except the 'struct mbuf' declaration and mb_dupcl() were done
mechanically with sed:

s/->m_ext_pgs.nrdy/->m_epg_nrdy/g
s/->m_ext_pgs.hdr_len/->m_epg_hdrlen/g
s/->m_ext_pgs.trail_len/->m_epg_trllen/g
s/->m_ext_pgs.first_pg_off/->m_epg_1st_off/g
s/->m_ext_pgs.last_pg_len/->m_epg_last_len/g
s/->m_ext_pgs.flags/->m_epg_flags/g
s/->m_ext_pgs.record_type/->m_epg_record_type/g
s/->m_ext_pgs.enc_cnt/->m_epg_enc_cnt/g
s/->m_ext_pgs.tls/->m_epg_tls/g
s/->m_ext_pgs.so/->m_epg_so/g
s/->m_ext_pgs.seqno/->m_epg_seqno/g
s/->m_ext_pgs.stailq/->m_epg_stailq/g

Reviewed by:	gallatin
Differential Revision:	https://reviews.freebsd.org/D24598
This commit is contained in:
Gleb Smirnoff 2020-05-03 00:12:56 +00:00
parent bccf6e26e9
commit 7b6c99d08d
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=360579
16 changed files with 206 additions and 204 deletions

View File

@ -922,8 +922,8 @@ ktls_tcp_payload_length(struct tlspcb *tlsp, struct mbuf *m_tls)
* trim the length to avoid sending any of the trailer. There
* is no way to send a partial trailer currently.
*/
if (mlen > TLS_HEADER_LENGTH + plen - m_tls->m_ext_pgs.trail_len)
mlen = TLS_HEADER_LENGTH + plen - m_tls->m_ext_pgs.trail_len;
if (mlen > TLS_HEADER_LENGTH + plen - m_tls->m_epg_trllen)
mlen = TLS_HEADER_LENGTH + plen - m_tls->m_epg_trllen;
/*
@ -964,7 +964,7 @@ ktls_payload_offset(struct tlspcb *tlsp, struct mbuf *m_tls)
mlen = mtod(m_tls, vm_offset_t) + m_tls->m_len;
MPASS(mlen < TLS_HEADER_LENGTH + plen);
#endif
if (mtod(m_tls, vm_offset_t) <= m_tls->m_ext_pgs.hdr_len)
if (mtod(m_tls, vm_offset_t) <= m_tls->m_epg_hdrlen)
return (0);
if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM) {
/*
@ -975,8 +975,8 @@ ktls_payload_offset(struct tlspcb *tlsp, struct mbuf *m_tls)
* the offset at the last byte of the record payload
* to send the last cipher block.
*/
offset = min(mtod(m_tls, vm_offset_t) - m_tls->m_ext_pgs.hdr_len,
(plen - TLS_HEADER_LENGTH - m_tls->m_ext_pgs.trail_len) - 1);
offset = min(mtod(m_tls, vm_offset_t) - m_tls->m_epg_hdrlen,
(plen - TLS_HEADER_LENGTH - m_tls->m_epg_trllen) - 1);
return (rounddown(offset, AES_BLOCK_LEN));
}
return (0);
@ -1009,7 +1009,7 @@ ktls_wr_len(struct tlspcb *tlsp, struct mbuf *m, struct mbuf *m_tls,
* excluding header and trailer.
*/
tlen = ktls_tcp_payload_length(tlsp, m_tls);
if (tlen <= m_tls->m_ext_pgs.hdr_len) {
if (tlen <= m_tls->m_epg_hdrlen) {
/*
* For requests that only want to send the TLS header,
* send a tunnelled packet as immediate data.
@ -1035,7 +1035,7 @@ ktls_wr_len(struct tlspcb *tlsp, struct mbuf *m, struct mbuf *m_tls,
}
hdr = (void *)m_tls->m_epg_hdr;
plen = TLS_HEADER_LENGTH + ntohs(hdr->tls_length) - m_tls->m_ext_pgs.trail_len;
plen = TLS_HEADER_LENGTH + ntohs(hdr->tls_length) - m_tls->m_epg_trllen;
if (tlen < plen) {
plen = tlen;
offset = ktls_payload_offset(tlsp, m_tls);
@ -1052,14 +1052,14 @@ ktls_wr_len(struct tlspcb *tlsp, struct mbuf *m, struct mbuf *m_tls,
*/
imm_len = 0;
if (offset == 0)
imm_len += m_tls->m_ext_pgs.hdr_len;
imm_len += m_tls->m_epg_hdrlen;
if (plen == tlen)
imm_len += AES_BLOCK_LEN;
wr_len += roundup2(imm_len, 16);
/* TLS record payload via DSGL. */
*nsegsp = sglist_count_mbuf_epg(m_tls, m_tls->m_ext_pgs.hdr_len + offset,
plen - (m_tls->m_ext_pgs.hdr_len + offset));
*nsegsp = sglist_count_mbuf_epg(m_tls, m_tls->m_epg_hdrlen + offset,
plen - (m_tls->m_epg_hdrlen + offset));
wr_len += ktls_sgl_size(*nsegsp);
wr_len = roundup2(wr_len, 16);
@ -1595,18 +1595,18 @@ ktls_write_tls_wr(struct tlspcb *tlsp, struct sge_txq *txq,
/* Locate the TLS header. */
MBUF_EXT_PGS_ASSERT(m_tls);
hdr = (void *)m_tls->m_epg_hdr;
plen = TLS_HEADER_LENGTH + ntohs(hdr->tls_length) - m_tls->m_ext_pgs.trail_len;
plen = TLS_HEADER_LENGTH + ntohs(hdr->tls_length) - m_tls->m_epg_trllen;
/* Determine how much of the TLS record to send. */
tlen = ktls_tcp_payload_length(tlsp, m_tls);
if (tlen <= m_tls->m_ext_pgs.hdr_len) {
if (tlen <= m_tls->m_epg_hdrlen) {
/*
* For requests that only want to send the TLS header,
* send a tunnelled packet as immediate data.
*/
#ifdef VERBOSE_TRACES
CTR3(KTR_CXGBE, "%s: tid %d header-only TLS record %u",
__func__, tlsp->tid, (u_int)m_tls->m_ext_pgs.seqno);
__func__, tlsp->tid, (u_int)m_tls->m_epg_seqno);
#endif
return (ktls_write_tunnel_packet(txq, dst, m, m_tls, available,
tcp_seqno, pidx));
@ -1616,7 +1616,7 @@ ktls_write_tls_wr(struct tlspcb *tlsp, struct sge_txq *txq,
offset = ktls_payload_offset(tlsp, m_tls);
#ifdef VERBOSE_TRACES
CTR4(KTR_CXGBE, "%s: tid %d short TLS record %u with offset %u",
__func__, tlsp->tid, (u_int)m_tls->m_ext_pgs.seqno, offset);
__func__, tlsp->tid, (u_int)m_tls->m_epg_seqno, offset);
#endif
if (m_tls->m_next == NULL && (tcp->th_flags & TH_FIN) != 0) {
txq->kern_tls_fin_short++;
@ -1671,10 +1671,10 @@ ktls_write_tls_wr(struct tlspcb *tlsp, struct sge_txq *txq,
*/
tx_max_offset = mtod(m_tls, vm_offset_t);
if (tx_max_offset > TLS_HEADER_LENGTH + ntohs(hdr->tls_length) -
m_tls->m_ext_pgs.trail_len) {
m_tls->m_epg_trllen) {
/* Always send the full trailer. */
tx_max_offset = TLS_HEADER_LENGTH + ntohs(hdr->tls_length) -
m_tls->m_ext_pgs.trail_len;
m_tls->m_epg_trllen;
}
if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_CBC &&
tx_max_offset > TLS_HEADER_LENGTH) {
@ -1789,15 +1789,15 @@ ktls_write_tls_wr(struct tlspcb *tlsp, struct sge_txq *txq,
/* Recalculate 'nsegs' if cached value is not available. */
if (nsegs == 0)
nsegs = sglist_count_mbuf_epg(m_tls, m_tls->m_ext_pgs.hdr_len +
offset, plen - (m_tls->m_ext_pgs.hdr_len + offset));
nsegs = sglist_count_mbuf_epg(m_tls, m_tls->m_epg_hdrlen +
offset, plen - (m_tls->m_epg_hdrlen + offset));
/* Calculate the size of the TLS work request. */
twr_len = ktls_base_wr_size(tlsp);
imm_len = 0;
if (offset == 0)
imm_len += m_tls->m_ext_pgs.hdr_len;
imm_len += m_tls->m_epg_hdrlen;
if (plen == tlen)
imm_len += AES_BLOCK_LEN;
twr_len += roundup2(imm_len, 16);
@ -1913,13 +1913,13 @@ ktls_write_tls_wr(struct tlspcb *tlsp, struct sge_txq *txq,
cipher_stop = 0;
sec_pdu->pldlen = htobe32(16 + plen -
(m_tls->m_ext_pgs.hdr_len + offset));
(m_tls->m_epg_hdrlen + offset));
/* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
sec_pdu->seqno_numivs = tlsp->scmd0_short.seqno_numivs;
sec_pdu->ivgen_hdrlen = htobe32(
tlsp->scmd0_short.ivgen_hdrlen |
V_SCMD_HDR_LEN(offset == 0 ? m_tls->m_ext_pgs.hdr_len : 0));
V_SCMD_HDR_LEN(offset == 0 ? m_tls->m_epg_hdrlen : 0));
txq->kern_tls_short++;
} else {
@ -1932,7 +1932,7 @@ ktls_write_tls_wr(struct tlspcb *tlsp, struct sge_txq *txq,
aad_start = 1;
aad_stop = TLS_HEADER_LENGTH;
iv_offset = TLS_HEADER_LENGTH + 1;
cipher_start = m_tls->m_ext_pgs.hdr_len + 1;
cipher_start = m_tls->m_epg_hdrlen + 1;
if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM) {
cipher_stop = 0;
auth_start = cipher_start;
@ -1971,7 +1971,7 @@ ktls_write_tls_wr(struct tlspcb *tlsp, struct sge_txq *txq,
V_CPL_TX_SEC_PDU_AUTHSTOP(auth_stop) |
V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert));
sec_pdu->scmd1 = htobe64(m_tls->m_ext_pgs.seqno);
sec_pdu->scmd1 = htobe64(m_tls->m_epg_seqno);
/* Key context */
out = (void *)(sec_pdu + 1);
@ -2011,8 +2011,8 @@ ktls_write_tls_wr(struct tlspcb *tlsp, struct sge_txq *txq,
tx_data->rsvd = htobe32(tcp_seqno);
} else {
tx_data->len = htobe32(V_TX_DATA_MSS(mss) |
V_TX_LENGTH(tlen - (m_tls->m_ext_pgs.hdr_len + offset)));
tx_data->rsvd = htobe32(tcp_seqno + m_tls->m_ext_pgs.hdr_len + offset);
V_TX_LENGTH(tlen - (m_tls->m_epg_hdrlen + offset)));
tx_data->rsvd = htobe32(tcp_seqno + m_tls->m_epg_hdrlen + offset);
}
tx_data->flags = htobe32(F_TX_BYPASS);
if (last_wr && tcp->th_flags & TH_PUSH)
@ -2021,8 +2021,8 @@ ktls_write_tls_wr(struct tlspcb *tlsp, struct sge_txq *txq,
/* Populate the TLS header */
out = (void *)(tx_data + 1);
if (offset == 0) {
memcpy(out, m_tls->m_epg_hdr, m_tls->m_ext_pgs.hdr_len);
out += m_tls->m_ext_pgs.hdr_len;
memcpy(out, m_tls->m_epg_hdr, m_tls->m_epg_hdrlen);
out += m_tls->m_epg_hdrlen;
}
/* AES IV for a short record. */
@ -2057,8 +2057,8 @@ ktls_write_tls_wr(struct tlspcb *tlsp, struct sge_txq *txq,
/* SGL for record payload */
sglist_reset(txq->gl);
if (sglist_append_mbuf_epg(txq->gl, m_tls, m_tls->m_ext_pgs.hdr_len + offset,
plen - (m_tls->m_ext_pgs.hdr_len + offset)) != 0) {
if (sglist_append_mbuf_epg(txq->gl, m_tls, m_tls->m_epg_hdrlen + offset,
plen - (m_tls->m_epg_hdrlen + offset)) != 0) {
#ifdef INVARIANTS
panic("%s: failed to append sglist", __func__);
#endif
@ -2080,7 +2080,7 @@ ktls_write_tls_wr(struct tlspcb *tlsp, struct sge_txq *txq,
txq->kern_tls_waste += mtod(m_tls, vm_offset_t);
else
txq->kern_tls_waste += mtod(m_tls, vm_offset_t) -
(m_tls->m_ext_pgs.hdr_len + offset);
(m_tls->m_epg_hdrlen + offset);
}
txsd = &txq->sdesc[pidx];

View File

@ -2423,11 +2423,11 @@ count_mbuf_ext_pgs(struct mbuf *m, int skip, vm_paddr_t *nextaddr)
off += skip;
len -= skip;
if (m->m_ext_pgs.hdr_len != 0) {
if (off >= m->m_ext_pgs.hdr_len) {
off -= m->m_ext_pgs.hdr_len;
if (m->m_epg_hdrlen != 0) {
if (off >= m->m_epg_hdrlen) {
off -= m->m_epg_hdrlen;
} else {
seglen = m->m_ext_pgs.hdr_len - off;
seglen = m->m_epg_hdrlen - off;
segoff = off;
seglen = min(seglen, len);
off = 0;
@ -2439,8 +2439,8 @@ count_mbuf_ext_pgs(struct mbuf *m, int skip, vm_paddr_t *nextaddr)
*nextaddr = paddr + seglen;
}
}
pgoff = m->m_ext_pgs.first_pg_off;
for (i = 0; i < m->m_ext_pgs.npgs && len > 0; i++) {
pgoff = m->m_epg_1st_off;
for (i = 0; i < m->m_epg_npgs && len > 0; i++) {
pglen = m_epg_pagelen(m, i, pgoff);
if (off >= pglen) {
off -= pglen;
@ -2459,7 +2459,7 @@ count_mbuf_ext_pgs(struct mbuf *m, int skip, vm_paddr_t *nextaddr)
pgoff = 0;
};
if (len != 0) {
seglen = min(len, m->m_ext_pgs.trail_len - off);
seglen = min(len, m->m_epg_trllen - off);
len -= seglen;
paddr = pmap_kextract((vm_offset_t)&m->m_epg_trail[off]);
if (*nextaddr != paddr)

View File

@ -733,7 +733,7 @@ t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop)
if (m->m_flags & M_NOMAP) {
#ifdef KERN_TLS
if (m->m_ext_pgs.tls != NULL) {
if (m->m_epg_tls != NULL) {
toep->flags |= TPF_KTLS;
if (plen == 0) {
SOCKBUF_UNLOCK(sb);
@ -1934,7 +1934,7 @@ aiotx_free_pgs(struct mbuf *m)
m->m_len, jobtotid(job));
#endif
for (int i = 0; i < m->m_ext_pgs.npgs; i++) {
for (int i = 0; i < m->m_epg_npgs; i++) {
pg = PHYS_TO_VM_PAGE(m->m_epg_pa[i]);
vm_page_unwire(pg, PQ_ACTIVE);
}
@ -1989,15 +1989,15 @@ alloc_aiotx_mbuf(struct kaiocb *job, int len)
break;
}
m->m_ext_pgs.first_pg_off = pgoff;
m->m_ext_pgs.npgs = npages;
m->m_epg_1st_off = pgoff;
m->m_epg_npgs = npages;
if (npages == 1) {
KASSERT(mlen + pgoff <= PAGE_SIZE,
("%s: single page is too large (off %d len %d)",
__func__, pgoff, mlen));
m->m_ext_pgs.last_pg_len = mlen;
m->m_epg_last_len = mlen;
} else {
m->m_ext_pgs.last_pg_len = mlen - (PAGE_SIZE - pgoff) -
m->m_epg_last_len = mlen - (PAGE_SIZE - pgoff) -
(npages - 2) * PAGE_SIZE;
}
for (i = 0; i < npages; i++)

View File

@ -1628,10 +1628,10 @@ count_ext_pgs_segs(struct mbuf *m)
vm_paddr_t nextpa;
u_int i, nsegs;
MPASS(m->m_ext_pgs.npgs > 0);
MPASS(m->m_epg_npgs > 0);
nsegs = 1;
nextpa = m->m_epg_pa[0] + PAGE_SIZE;
for (i = 1; i < m->m_ext_pgs.npgs; i++) {
for (i = 1; i < m->m_epg_npgs; i++) {
if (nextpa != m->m_epg_pa[i])
nsegs++;
nextpa = m->m_epg_pa[i] + PAGE_SIZE;
@ -1653,11 +1653,11 @@ write_ktlstx_sgl(void *dst, struct mbuf *m, int nsegs)
V_ULPTX_NSGE(nsegs));
/* Figure out the first S/G length. */
pa = m->m_epg_pa[0] + m->m_ext_pgs.first_pg_off;
pa = m->m_epg_pa[0] + m->m_epg_1st_off;
usgl->addr0 = htobe64(pa);
len = m_epg_pagelen(m, 0, m->m_ext_pgs.first_pg_off);
len = m_epg_pagelen(m, 0, m->m_epg_1st_off);
pa += len;
for (i = 1; i < m->m_ext_pgs.npgs; i++) {
for (i = 1; i < m->m_epg_npgs; i++) {
if (m->m_epg_pa[i] != pa)
break;
len += m_epg_pagelen(m, i, 0);
@ -1669,7 +1669,7 @@ write_ktlstx_sgl(void *dst, struct mbuf *m, int nsegs)
#endif
j = -1;
for (; i < m->m_ext_pgs.npgs; i++) {
for (; i < m->m_epg_npgs; i++) {
if (j == -1 || m->m_epg_pa[i] != pa) {
if (j >= 0)
usgl->sge[j / 2].len[j & 1] = htobe32(len);
@ -1798,7 +1798,7 @@ t4_push_ktls(struct adapter *sc, struct toepcb *toep, int drop)
KASSERT(m->m_flags & M_NOMAP, ("%s: mbuf %p is not NOMAP",
__func__, m));
KASSERT(m->m_ext_pgs.tls != NULL,
KASSERT(m->m_epg_tls != NULL,
("%s: mbuf %p doesn't have TLS session", __func__, m));
/* Calculate WR length. */
@ -1867,19 +1867,19 @@ t4_push_ktls(struct adapter *sc, struct toepcb *toep, int drop)
thdr = (struct tls_hdr *)&m->m_epg_hdr;
#ifdef VERBOSE_TRACES
CTR5(KTR_CXGBE, "%s: tid %d TLS record %ju type %d len %#x",
__func__, toep->tid, m->m_ext_pgs.seqno, thdr->type,
__func__, toep->tid, m->m_epg_seqno, thdr->type,
m->m_len);
#endif
txwr = wrtod(wr);
cpl = (struct cpl_tx_tls_sfo *)(txwr + 1);
memset(txwr, 0, roundup2(wr_len, 16));
credits = howmany(wr_len, 16);
expn_size = m->m_ext_pgs.hdr_len +
m->m_ext_pgs.trail_len;
expn_size = m->m_epg_hdrlen +
m->m_epg_trllen;
tls_size = m->m_len - expn_size;
write_tlstx_wr(txwr, toep, 0,
tls_size, expn_size, 1, credits, shove, 1);
toep->tls.tx_seq_no = m->m_ext_pgs.seqno;
toep->tls.tx_seq_no = m->m_epg_seqno;
write_tlstx_cpl(cpl, toep, thdr, tls_size, 1);
tls_copy_tx_key(toep, cpl + 1);

View File

@ -684,7 +684,7 @@ mlx5e_sq_tls_populate(struct mbuf *mb, uint64_t *pseq)
for (; mb != NULL; mb = mb->m_next) {
if (!(mb->m_flags & M_NOMAP))
continue;
*pseq = mb->m_ext_pgs.seqno;
*pseq = mb->m_epg_seqno;
return (1);
}
return (0);

View File

@ -833,8 +833,8 @@ mb_free_notready(struct mbuf *m, int count)
for (i = 0; i < count && m != NULL; i++) {
if ((m->m_flags & M_EXT) != 0 &&
m->m_ext.ext_type == EXT_PGS) {
m->m_ext_pgs.nrdy--;
if (m->m_ext_pgs.nrdy != 0)
m->m_epg_nrdy--;
if (m->m_epg_nrdy != 0)
continue;
}
m = m_free(m);
@ -943,7 +943,7 @@ _mb_unmapped_to_ext(struct mbuf *m)
MBUF_EXT_PGS_ASSERT(m);
len = m->m_len;
KASSERT(m->m_ext_pgs.tls == NULL, ("%s: can't convert TLS mbuf %p",
KASSERT(m->m_epg_tls == NULL, ("%s: can't convert TLS mbuf %p",
__func__, m));
/* See if this is the mbuf that holds the embedded refcount. */
@ -961,11 +961,11 @@ _mb_unmapped_to_ext(struct mbuf *m)
off = mtod(m, vm_offset_t);
top = NULL;
if (m->m_ext_pgs.hdr_len != 0) {
if (off >= m->m_ext_pgs.hdr_len) {
off -= m->m_ext_pgs.hdr_len;
if (m->m_epg_hdrlen != 0) {
if (off >= m->m_epg_hdrlen) {
off -= m->m_epg_hdrlen;
} else {
seglen = m->m_ext_pgs.hdr_len - off;
seglen = m->m_epg_hdrlen - off;
segoff = off;
seglen = min(seglen, len);
off = 0;
@ -979,8 +979,8 @@ _mb_unmapped_to_ext(struct mbuf *m)
seglen);
}
}
pgoff = m->m_ext_pgs.first_pg_off;
for (i = 0; i < m->m_ext_pgs.npgs && len > 0; i++) {
pgoff = m->m_epg_1st_off;
for (i = 0; i < m->m_epg_npgs && len > 0; i++) {
pglen = m_epg_pagelen(m, i, pgoff);
if (off >= pglen) {
off -= pglen;
@ -1016,9 +1016,9 @@ _mb_unmapped_to_ext(struct mbuf *m)
pgoff = 0;
};
if (len != 0) {
KASSERT((off + len) <= m->m_ext_pgs.trail_len,
KASSERT((off + len) <= m->m_epg_trllen,
("off + len > trail (%d + %d > %d)", off, len,
m->m_ext_pgs.trail_len));
m->m_epg_trllen));
m_new = m_get(M_NOWAIT, MT_DATA);
if (m_new == NULL)
goto fail;
@ -1122,15 +1122,15 @@ mb_alloc_ext_pgs(int how, m_ext_free_t ext_free)
if (m == NULL)
return (NULL);
m->m_ext_pgs.npgs = 0;
m->m_ext_pgs.nrdy = 0;
m->m_ext_pgs.first_pg_off = 0;
m->m_ext_pgs.last_pg_len = 0;
m->m_ext_pgs.flags = 0;
m->m_ext_pgs.hdr_len = 0;
m->m_ext_pgs.trail_len = 0;
m->m_ext_pgs.tls = NULL;
m->m_ext_pgs.so = NULL;
m->m_epg_npgs = 0;
m->m_epg_nrdy = 0;
m->m_epg_1st_off = 0;
m->m_epg_last_len = 0;
m->m_epg_flags = 0;
m->m_epg_hdrlen = 0;
m->m_epg_trllen = 0;
m->m_epg_tls = NULL;
m->m_epg_so = NULL;
m->m_data = NULL;
m->m_flags |= (M_EXT | M_RDONLY | M_NOMAP);
m->m_ext.ext_type = EXT_PGS;
@ -1215,7 +1215,7 @@ mb_free_ext(struct mbuf *m)
("%s: ext_free not set", __func__));
mref->m_ext.ext_free(mref);
#ifdef KERN_TLS
tls = mref->m_ext_pgs.tls;
tls = mref->m_epg_tls;
if (tls != NULL &&
!refcount_release_if_not_last(&tls->refcount))
ktls_enqueue_to_free(mref);

View File

@ -198,8 +198,8 @@ sendfile_free_mext_pg(struct mbuf *m)
cache_last = m->m_ext.ext_flags & EXT_FLAG_CACHE_LAST;
flags = (m->m_ext.ext_flags & EXT_FLAG_NOCACHE) != 0 ? VPR_TRYFREE : 0;
for (i = 0; i < m->m_ext_pgs.npgs; i++) {
if (cache_last && i == m->m_ext_pgs.npgs - 1)
for (i = 0; i < m->m_epg_npgs; i++) {
if (cache_last && i == m->m_epg_npgs - 1)
flags = 0;
pg = PHYS_TO_VM_PAGE(m->m_epg_pa[i]);
vm_page_release(pg, flags);
@ -365,7 +365,7 @@ sendfile_iodone(void *arg, vm_page_t *pa, int count, int error)
#if defined(KERN_TLS) && defined(INVARIANTS)
if ((sfio->m->m_flags & M_EXT) != 0 &&
sfio->m->m_ext.ext_type == EXT_PGS)
KASSERT(sfio->tls == sfio->m->m_ext_pgs.tls,
KASSERT(sfio->tls == sfio->m->m_epg_tls,
("TLS session mismatch"));
else
KASSERT(sfio->tls == NULL,
@ -1034,18 +1034,18 @@ vn_sendfile(struct file *fp, int sockfd, struct uio *hdr_uio,
else
m = m0;
mtail = m0;
m0->m_ext_pgs.first_pg_off =
m0->m_epg_1st_off =
vmoff(i, off) & PAGE_MASK;
}
if (nios) {
mtail->m_flags |= M_NOTREADY;
m0->m_ext_pgs.nrdy++;
m0->m_epg_nrdy++;
}
m0->m_epg_pa[ext_pgs_idx] = VM_PAGE_TO_PHYS(pga);
m0->m_ext_pgs.npgs++;
m0->m_epg_npgs++;
xfs = xfsize(i, npages, off, space);
m0->m_ext_pgs.last_pg_len = xfs;
m0->m_epg_last_len = xfs;
MBUF_EXT_PGS_ASSERT_SANITY(m0);
mtail->m_len += xfs;
mtail->m_ext.ext_size += PAGE_SIZE;

View File

@ -129,11 +129,11 @@ _bus_dmamap_load_mbuf_epg(bus_dma_tag_t dmat, bus_dmamap_t map,
/* Skip over any data removed from the front. */
off = mtod(m, vm_offset_t);
if (m->m_ext_pgs.hdr_len != 0) {
if (off >= m->m_ext_pgs.hdr_len) {
off -= m->m_ext_pgs.hdr_len;
if (m->m_epg_hdrlen != 0) {
if (off >= m->m_epg_hdrlen) {
off -= m->m_epg_hdrlen;
} else {
seglen = m->m_ext_pgs.hdr_len - off;
seglen = m->m_epg_hdrlen - off;
segoff = off;
seglen = min(seglen, len);
off = 0;
@ -143,8 +143,8 @@ _bus_dmamap_load_mbuf_epg(bus_dma_tag_t dmat, bus_dmamap_t map,
flags, segs, nsegs);
}
}
pgoff = m->m_ext_pgs.first_pg_off;
for (i = 0; i < m->m_ext_pgs.npgs && error == 0 && len > 0; i++) {
pgoff = m->m_epg_1st_off;
for (i = 0; i < m->m_epg_npgs && error == 0 && len > 0; i++) {
pglen = m_epg_pagelen(m, i, pgoff);
if (off >= pglen) {
off -= pglen;
@ -161,9 +161,9 @@ _bus_dmamap_load_mbuf_epg(bus_dma_tag_t dmat, bus_dmamap_t map,
pgoff = 0;
};
if (len != 0 && error == 0) {
KASSERT((off + len) <= m->m_ext_pgs.trail_len,
KASSERT((off + len) <= m->m_epg_trllen,
("off + len > trail (%d + %d > %d)", off, len,
m->m_ext_pgs.trail_len));
m->m_epg_trllen));
error = _bus_dmamap_load_buffer(dmat, map,
&m->m_epg_trail[off], len, kernel_pmap, flags, segs,
nsegs);

View File

@ -233,11 +233,11 @@ sglist_count_mbuf_epg(struct mbuf *m, size_t off, size_t len)
return (0);
nsegs = 0;
if (m->m_ext_pgs.hdr_len != 0) {
if (off >= m->m_ext_pgs.hdr_len) {
off -= m->m_ext_pgs.hdr_len;
if (m->m_epg_hdrlen != 0) {
if (off >= m->m_epg_hdrlen) {
off -= m->m_epg_hdrlen;
} else {
seglen = m->m_ext_pgs.hdr_len - off;
seglen = m->m_epg_hdrlen - off;
segoff = off;
seglen = MIN(seglen, len);
off = 0;
@ -247,8 +247,8 @@ sglist_count_mbuf_epg(struct mbuf *m, size_t off, size_t len)
}
}
nextaddr = 0;
pgoff = m->m_ext_pgs.first_pg_off;
for (i = 0; i < m->m_ext_pgs.npgs && len > 0; i++) {
pgoff = m->m_epg_1st_off;
for (i = 0; i < m->m_epg_npgs && len > 0; i++) {
pglen = m_epg_pagelen(m, i, pgoff);
if (off >= pglen) {
off -= pglen;
@ -267,7 +267,7 @@ sglist_count_mbuf_epg(struct mbuf *m, size_t off, size_t len)
pgoff = 0;
};
if (len != 0) {
seglen = MIN(len, m->m_ext_pgs.trail_len - off);
seglen = MIN(len, m->m_epg_trllen - off);
len -= seglen;
nsegs += sglist_count(&m->m_epg_trail[off], seglen);
}
@ -391,11 +391,11 @@ sglist_append_mbuf_epg(struct sglist *sg, struct mbuf *m, size_t off,
MBUF_EXT_PGS_ASSERT(m);
error = 0;
if (m->m_ext_pgs.hdr_len != 0) {
if (off >= m->m_ext_pgs.hdr_len) {
off -= m->m_ext_pgs.hdr_len;
if (m->m_epg_hdrlen != 0) {
if (off >= m->m_epg_hdrlen) {
off -= m->m_epg_hdrlen;
} else {
seglen = m->m_ext_pgs.hdr_len - off;
seglen = m->m_epg_hdrlen - off;
segoff = off;
seglen = MIN(seglen, len);
off = 0;
@ -404,8 +404,8 @@ sglist_append_mbuf_epg(struct sglist *sg, struct mbuf *m, size_t off,
&m->m_epg_hdr[segoff], seglen);
}
}
pgoff = m->m_ext_pgs.first_pg_off;
for (i = 0; i < m->m_ext_pgs.npgs && error == 0 && len > 0; i++) {
pgoff = m->m_epg_1st_off;
for (i = 0; i < m->m_epg_npgs && error == 0 && len > 0; i++) {
pglen = m_epg_pagelen(m, i, pgoff);
if (off >= pglen) {
off -= pglen;
@ -422,7 +422,7 @@ sglist_append_mbuf_epg(struct sglist *sg, struct mbuf *m, size_t off,
pgoff = 0;
};
if (error == 0 && len > 0) {
seglen = MIN(len, m->m_ext_pgs.trail_len - off);
seglen = MIN(len, m->m_epg_trllen - off);
len -= seglen;
error = sglist_append(sg,
&m->m_epg_trail[off], seglen);

View File

@ -1292,7 +1292,7 @@ ktls_seq(struct sockbuf *sb, struct mbuf *m)
KASSERT((m->m_flags & M_NOMAP) != 0,
("ktls_seq: mapped mbuf %p", m));
m->m_ext_pgs.seqno = sb->sb_tls_seqno;
m->m_epg_seqno = sb->sb_tls_seqno;
sb->sb_tls_seqno++;
}
}
@ -1340,10 +1340,10 @@ ktls_frame(struct mbuf *top, struct ktls_session *tls, int *enq_cnt,
tls_len = m->m_len;
/* Save a reference to the session. */
m->m_ext_pgs.tls = ktls_hold(tls);
m->m_epg_tls = ktls_hold(tls);
m->m_ext_pgs.hdr_len = tls->params.tls_hlen;
m->m_ext_pgs.trail_len = tls->params.tls_tlen;
m->m_epg_hdrlen = tls->params.tls_hlen;
m->m_epg_trllen = tls->params.tls_tlen;
if (tls->params.cipher_algorithm == CRYPTO_AES_CBC) {
int bs, delta;
@ -1365,9 +1365,9 @@ ktls_frame(struct mbuf *top, struct ktls_session *tls, int *enq_cnt,
*/
bs = tls->params.tls_bs;
delta = (tls_len + tls->params.tls_tlen) & (bs - 1);
m->m_ext_pgs.trail_len -= delta;
m->m_epg_trllen -= delta;
}
m->m_len += m->m_ext_pgs.hdr_len + m->m_ext_pgs.trail_len;
m->m_len += m->m_epg_hdrlen + m->m_epg_trllen;
/* Populate the TLS header. */
tlshdr = (void *)m->m_epg_hdr;
@ -1382,7 +1382,7 @@ ktls_frame(struct mbuf *top, struct ktls_session *tls, int *enq_cnt,
tlshdr->tls_vminor = TLS_MINOR_VER_TWO;
tlshdr->tls_type = TLS_RLTYPE_APP;
/* save the real record type for later */
m->m_ext_pgs.record_type = record_type;
m->m_epg_record_type = record_type;
m->m_epg_trail[0] = record_type;
} else {
tlshdr->tls_vminor = tls->params.tls_vminor;
@ -1419,8 +1419,8 @@ ktls_frame(struct mbuf *top, struct ktls_session *tls, int *enq_cnt,
*/
if (tls->mode == TCP_TLS_MODE_SW) {
m->m_flags |= M_NOTREADY;
m->m_ext_pgs.nrdy = m->m_ext_pgs.npgs;
*enq_cnt += m->m_ext_pgs.npgs;
m->m_epg_nrdy = m->m_epg_npgs;
*enq_cnt += m->m_epg_npgs;
}
}
}
@ -1432,10 +1432,10 @@ ktls_enqueue_to_free(struct mbuf *m)
bool running;
/* Mark it for freeing. */
m->m_ext_pgs.flags |= EPG_FLAG_2FREE;
wq = &ktls_wq[m->m_ext_pgs.tls->wq_index];
m->m_epg_flags |= EPG_FLAG_2FREE;
wq = &ktls_wq[m->m_epg_tls->wq_index];
mtx_lock(&wq->mtx);
STAILQ_INSERT_TAIL(&wq->head, m, m_ext_pgs.stailq);
STAILQ_INSERT_TAIL(&wq->head, m, m_epg_stailq);
running = wq->running;
mtx_unlock(&wq->mtx);
if (!running)
@ -1453,19 +1453,19 @@ ktls_enqueue(struct mbuf *m, struct socket *so, int page_count)
("ktls_enqueue: %p not unready & nomap mbuf\n", m));
KASSERT(page_count != 0, ("enqueueing TLS mbuf with zero page count"));
KASSERT(m->m_ext_pgs.tls->mode == TCP_TLS_MODE_SW, ("!SW TLS mbuf"));
KASSERT(m->m_epg_tls->mode == TCP_TLS_MODE_SW, ("!SW TLS mbuf"));
m->m_ext_pgs.enc_cnt = page_count;
m->m_epg_enc_cnt = page_count;
/*
* Save a pointer to the socket. The caller is responsible
* for taking an additional reference via soref().
*/
m->m_ext_pgs.so = so;
m->m_epg_so = so;
wq = &ktls_wq[m->m_ext_pgs.tls->wq_index];
wq = &ktls_wq[m->m_epg_tls->wq_index];
mtx_lock(&wq->mtx);
STAILQ_INSERT_TAIL(&wq->head, m, m_ext_pgs.stailq);
STAILQ_INSERT_TAIL(&wq->head, m, m_epg_stailq);
running = wq->running;
mtx_unlock(&wq->mtx);
if (!running)
@ -1486,14 +1486,14 @@ ktls_encrypt(struct mbuf *top)
int error, i, len, npages, off, total_pages;
bool is_anon;
so = top->m_ext_pgs.so;
tls = top->m_ext_pgs.tls;
so = top->m_epg_so;
tls = top->m_epg_tls;
KASSERT(tls != NULL, ("tls = NULL, top = %p\n", top));
KASSERT(so != NULL, ("so = NULL, top = %p\n", top));
#ifdef INVARIANTS
top->m_ext_pgs.so = NULL;
top->m_epg_so = NULL;
#endif
total_pages = top->m_ext_pgs.enc_cnt;
total_pages = top->m_epg_enc_cnt;
npages = 0;
/*
@ -1515,13 +1515,13 @@ ktls_encrypt(struct mbuf *top)
*/
error = 0;
for (m = top; npages != total_pages; m = m->m_next) {
KASSERT(m->m_ext_pgs.tls == tls,
KASSERT(m->m_epg_tls == tls,
("different TLS sessions in a single mbuf chain: %p vs %p",
tls, m->m_ext_pgs.tls));
tls, m->m_epg_tls));
KASSERT((m->m_flags & (M_NOMAP | M_NOTREADY)) ==
(M_NOMAP | M_NOTREADY),
("%p not unready & nomap mbuf (top = %p)\n", m, top));
KASSERT(npages + m->m_ext_pgs.npgs <= total_pages,
KASSERT(npages + m->m_epg_npgs <= total_pages,
("page count mismatch: top %p, total_pages %d, m %p", top,
total_pages, m));
@ -1533,10 +1533,10 @@ ktls_encrypt(struct mbuf *top)
* (from sendfile), anonymous wired pages are
* allocated and assigned to the destination iovec.
*/
is_anon = (m->m_ext_pgs.flags & EPG_FLAG_ANON) != 0;
is_anon = (m->m_epg_flags & EPG_FLAG_ANON) != 0;
off = m->m_ext_pgs.first_pg_off;
for (i = 0; i < m->m_ext_pgs.npgs; i++, off = 0) {
off = m->m_epg_1st_off;
for (i = 0; i < m->m_epg_npgs; i++, off = 0) {
len = m_epg_pagelen(m, i, off);
src_iov[i].iov_len = len;
src_iov[i].iov_base =
@ -1565,8 +1565,8 @@ ktls_encrypt(struct mbuf *top)
error = (*tls->sw_encrypt)(tls,
(const struct tls_record_layer *)m->m_epg_hdr,
m->m_epg_trail, src_iov, dst_iov, i, m->m_ext_pgs.seqno,
m->m_ext_pgs.record_type);
m->m_epg_trail, src_iov, dst_iov, i, m->m_epg_seqno,
m->m_epg_record_type);
if (error) {
counter_u64_add(ktls_offload_failed_crypto, 1);
break;
@ -1582,14 +1582,14 @@ ktls_encrypt(struct mbuf *top)
m->m_ext.ext_free(m);
/* Replace them with the new pages. */
for (i = 0; i < m->m_ext_pgs.npgs; i++)
for (i = 0; i < m->m_epg_npgs; i++)
m->m_epg_pa[i] = parray[i];
/* Use the basic free routine. */
m->m_ext.ext_free = mb_free_mext_pgs;
/* Pages are now writable. */
m->m_ext_pgs.flags |= EPG_FLAG_ANON;
m->m_epg_flags |= EPG_FLAG_ANON;
}
/*
@ -1599,7 +1599,7 @@ ktls_encrypt(struct mbuf *top)
* yet-to-be-encrypted records having an associated
* session.
*/
m->m_ext_pgs.tls = NULL;
m->m_epg_tls = NULL;
ktls_free(tls);
}
@ -1639,9 +1639,9 @@ ktls_work_thread(void *ctx)
STAILQ_CONCAT(&local_head, &wq->head);
mtx_unlock(&wq->mtx);
STAILQ_FOREACH_SAFE(m, &local_head, m_ext_pgs.stailq, n) {
if (m->m_ext_pgs.flags & EPG_FLAG_2FREE) {
ktls_free(m->m_ext_pgs.tls);
STAILQ_FOREACH_SAFE(m, &local_head, m_epg_stailq, n) {
if (m->m_epg_flags & EPG_FLAG_2FREE) {
ktls_free(m->m_epg_tls);
uma_zfree(zone_mbuf, m);
} else {
ktls_encrypt(m);

View File

@ -208,9 +208,9 @@ mb_dupcl(struct mbuf *n, struct mbuf *m)
*/
switch (m->m_ext.ext_type) {
case EXT_PGS:
bcopy(&m->m_ext, &n->m_ext, m_epg_copylen);
bcopy(&m->m_ext_pgs, &n->m_ext_pgs,
sizeof(struct mbuf_ext_pgs));
bcopy(&m->m_epg_startcopy, &n->m_epg_startcopy,
__rangeof(struct mbuf, m_epg_startcopy, m_epg_endcopy));
bcopy(&m->m_ext, &n->m_ext, m_epg_ext_copylen);
break;
case EXT_EXTREF:
bcopy(&m->m_ext, &n->m_ext, sizeof(struct m_ext));
@ -1440,10 +1440,10 @@ frags_per_mbuf(struct mbuf *m)
* all the backing physical pages are disjoint.
*/
frags = 0;
if (m->m_ext_pgs.hdr_len != 0)
if (m->m_epg_hdrlen != 0)
frags++;
frags += m->m_ext_pgs.npgs;
if (m->m_ext_pgs.trail_len != 0)
frags += m->m_epg_npgs;
if (m->m_epg_trllen != 0)
frags++;
return (frags);
@ -1629,7 +1629,7 @@ mb_free_mext_pgs(struct mbuf *m)
vm_page_t pg;
MBUF_EXT_PGS_ASSERT(m);
for (int i = 0; i < m->m_ext_pgs.npgs; i++) {
for (int i = 0; i < m->m_epg_npgs; i++) {
pg = PHYS_TO_VM_PAGE(m->m_epg_pa[i]);
vm_page_unwire_noq(pg);
vm_page_free(pg);
@ -1672,7 +1672,7 @@ m_uiotombuf_nomap(struct uio *uio, int how, int len, int maxseg, int flags)
else
prev->m_next = mb;
prev = mb;
mb->m_ext_pgs.flags = EPG_FLAG_ANON;
mb->m_epg_flags = EPG_FLAG_ANON;
needed = length = MIN(maxseg, total);
for (i = 0; needed > 0; i++, needed -= PAGE_SIZE) {
retry_page:
@ -1687,16 +1687,16 @@ m_uiotombuf_nomap(struct uio *uio, int how, int len, int maxseg, int flags)
}
pg_array[i]->flags &= ~PG_ZERO;
mb->m_epg_pa[i] = VM_PAGE_TO_PHYS(pg_array[i]);
mb->m_ext_pgs.npgs++;
mb->m_epg_npgs++;
}
mb->m_ext_pgs.last_pg_len = length - PAGE_SIZE * (mb->m_ext_pgs.npgs - 1);
mb->m_epg_last_len = length - PAGE_SIZE * (mb->m_epg_npgs - 1);
MBUF_EXT_PGS_ASSERT_SANITY(mb);
total -= length;
error = uiomove_fromphys(pg_array, 0, length, uio);
if (error != 0)
goto failed;
mb->m_len = length;
mb->m_ext.ext_size += PAGE_SIZE * mb->m_ext_pgs.npgs;
mb->m_ext.ext_size += PAGE_SIZE * mb->m_epg_npgs;
if (flags & M_PKTHDR)
m->m_pkthdr.len += length;
}
@ -1782,11 +1782,11 @@ m_unmappedtouio(const struct mbuf *m, int m_off, struct uio *uio, int len)
off = mtod(m, vm_offset_t);
off += m_off;
if (m->m_ext_pgs.hdr_len != 0) {
if (off >= m->m_ext_pgs.hdr_len) {
off -= m->m_ext_pgs.hdr_len;
if (m->m_epg_hdrlen != 0) {
if (off >= m->m_epg_hdrlen) {
off -= m->m_epg_hdrlen;
} else {
seglen = m->m_ext_pgs.hdr_len - off;
seglen = m->m_epg_hdrlen - off;
segoff = off;
seglen = min(seglen, len);
off = 0;
@ -1795,8 +1795,8 @@ m_unmappedtouio(const struct mbuf *m, int m_off, struct uio *uio, int len)
&m->m_epg_hdr[segoff]), seglen, uio);
}
}
pgoff = m->m_ext_pgs.first_pg_off;
for (i = 0; i < m->m_ext_pgs.npgs && error == 0 && len > 0; i++) {
pgoff = m->m_epg_1st_off;
for (i = 0; i < m->m_epg_npgs && error == 0 && len > 0; i++) {
pglen = m_epg_pagelen(m, i, pgoff);
if (off >= pglen) {
off -= pglen;
@ -1813,9 +1813,9 @@ m_unmappedtouio(const struct mbuf *m, int m_off, struct uio *uio, int len)
pgoff = 0;
};
if (len != 0 && error == 0) {
KASSERT((off + len) <= m->m_ext_pgs.trail_len,
KASSERT((off + len) <= m->m_epg_trllen,
("off + len > trail (%d + %d > %d, m_off = %d)", off, len,
m->m_ext_pgs.trail_len, m_off));
m->m_epg_trllen, m_off));
error = uiomove(__DECONST(void *, &m->m_epg_trail[off]),
len, uio);
}

View File

@ -130,16 +130,16 @@ sbready_compress(struct sockbuf *sb, struct mbuf *m0, struct mbuf *end)
!mbuf_has_tls_session(n)) {
int hdr_len, trail_len;
hdr_len = n->m_ext_pgs.hdr_len;
trail_len = m->m_ext_pgs.trail_len;
hdr_len = n->m_epg_hdrlen;
trail_len = m->m_epg_trllen;
if (trail_len != 0 && hdr_len != 0 &&
trail_len + hdr_len <= MBUF_PEXT_TRAIL_LEN) {
/* copy n's header to m's trailer */
memcpy(&m->m_epg_trail[trail_len],
n->m_epg_hdr, hdr_len);
m->m_ext_pgs.trail_len += hdr_len;
m->m_epg_trllen += hdr_len;
m->m_len += hdr_len;
n->m_ext_pgs.hdr_len = 0;
n->m_epg_hdrlen = 0;
n->m_len -= hdr_len;
}
}
@ -211,13 +211,13 @@ sbready(struct sockbuf *sb, struct mbuf *m0, int count)
("%s: m %p !M_NOTREADY", __func__, m));
if ((m->m_flags & M_EXT) != 0 &&
m->m_ext.ext_type == EXT_PGS) {
if (count < m->m_ext_pgs.nrdy) {
m->m_ext_pgs.nrdy -= count;
if (count < m->m_epg_nrdy) {
m->m_epg_nrdy -= count;
count = 0;
break;
}
count -= m->m_ext_pgs.nrdy;
m->m_ext_pgs.nrdy = 0;
count -= m->m_epg_nrdy;
m->m_epg_nrdy = 0;
} else
count--;

View File

@ -230,7 +230,7 @@ ip_output_send(struct inpcb *inp, struct ifnet *ifp, struct mbuf *m,
* dropping the mbuf's reference) in if_output.
*/
if (m->m_next != NULL && mbuf_has_tls_session(m->m_next)) {
tls = ktls_hold(m->m_next->m_ext_pgs.tls);
tls = ktls_hold(m->m_next->m_epg_tls);
mst = tls->snd_tag;
/*

View File

@ -1912,7 +1912,7 @@ tcp_m_copym(struct mbuf *m, int32_t off0, int32_t *plen,
pkthdrlen = NULL;
#ifdef KERN_TLS
if (hw_tls && (m->m_flags & M_NOMAP))
tls = m->m_ext_pgs.tls;
tls = m->m_epg_tls;
else
tls = NULL;
start = m;
@ -1929,7 +1929,7 @@ tcp_m_copym(struct mbuf *m, int32_t off0, int32_t *plen,
#ifdef KERN_TLS
if (hw_tls) {
if (m->m_flags & M_NOMAP)
ntls = m->m_ext_pgs.tls;
ntls = m->m_epg_tls;
else
ntls = NULL;

View File

@ -341,7 +341,7 @@ ip6_output_send(struct inpcb *inp, struct ifnet *ifp, struct ifnet *origifp,
* dropping the mbuf's reference) in if_output.
*/
if (m->m_next != NULL && mbuf_has_tls_session(m->m_next)) {
tls = ktls_hold(m->m_next->m_ext_pgs.tls);
tls = ktls_hold(m->m_next->m_epg_tls);
mst = tls->snd_tag;
/*

View File

@ -289,7 +289,7 @@ struct m_ext {
#define m_epg_pa m_ext.extpg_pa
#define m_epg_trail m_ext.extpg_trail
#define m_epg_hdr m_ext.extpg_hdr
#define m_epg_copylen offsetof(struct m_ext, ext_free)
#define m_epg_ext_copylen offsetof(struct m_ext, ext_free)
};
};
/*
@ -345,35 +345,37 @@ struct mbuf {
/* M_EXTPG set.
* Multi-page M_EXTPG mbuf has its meta data
* split between the mbuf_ext_pgs structure
* split between the below anonymous structure
* and m_ext. It carries vector of pages,
* optional header and trailer char vectors
* and pointers to socket/TLS data.
*/
struct mbuf_ext_pgs {
#define m_epg_startcopy m_epg_npgs
#define m_epg_endcopy m_epg_stailq
struct {
/* Overall count of pages and count of
* pages with I/O pending. */
uint8_t npgs;
uint8_t nrdy;
uint8_t m_epg_npgs;
uint8_t m_epg_nrdy;
/* TLS header and trailer lengths.
* The data itself resides in m_ext. */
uint8_t hdr_len;
uint8_t trail_len;
/* Offset into 1st page and lenght of
uint8_t m_epg_hdrlen;
uint8_t m_epg_trllen;
/* Offset into 1st page and length of
* data in the last page. */
uint16_t first_pg_off;
uint16_t last_pg_len;
uint8_t flags;
uint16_t m_epg_1st_off;
uint16_t m_epg_last_len;
uint8_t m_epg_flags;
#define EPG_FLAG_ANON 0x1 /* Data can be encrypted in place. */
#define EPG_FLAG_2FREE 0x2 /* Scheduled for free. */
uint8_t record_type;
uint8_t spare[2];
int enc_cnt;
struct ktls_session *tls;
struct socket *so;
uint64_t seqno;
STAILQ_ENTRY(mbuf) stailq;
} m_ext_pgs;
uint8_t m_epg_record_type;
uint8_t __spare[2];
int m_epg_enc_cnt;
struct ktls_session *m_epg_tls;
struct socket *m_epg_so;
uint64_t m_epg_seqno;
STAILQ_ENTRY(mbuf) m_epg_stailq;
};
};
union {
/* M_EXT or M_EXTPG set. */
@ -394,8 +396,8 @@ m_epg_pagelen(const struct mbuf *m, int pidx, int pgoff)
KASSERT(pgoff == 0 || pidx == 0,
("page %d with non-zero offset %d in %p", pidx, pgoff, m));
if (pidx == m->m_ext_pgs.npgs - 1) {
return (m->m_ext_pgs.last_pg_len);
if (pidx == m->m_epg_npgs - 1) {
return (m->m_epg_last_len);
} else {
return (PAGE_SIZE - pgoff);
}
@ -410,23 +412,23 @@ m_epg_pagelen(const struct mbuf *m, int pidx, int pgoff)
* last_pg_len > 0).
*/
#define MBUF_EXT_PGS_ASSERT_SANITY(m) do { \
MCHECK(m->m_ext_pgs.npgs > 0, "no valid pages"); \
MCHECK(m->m_ext_pgs.npgs <= nitems(m->m_epg_pa), \
MCHECK(m->m_epg_npgs > 0, "no valid pages"); \
MCHECK(m->m_epg_npgs <= nitems(m->m_epg_pa), \
"too many pages"); \
MCHECK(m->m_ext_pgs.nrdy <= m->m_ext_pgs.npgs, \
MCHECK(m->m_epg_nrdy <= m->m_epg_npgs, \
"too many ready pages"); \
MCHECK(m->m_ext_pgs.first_pg_off < PAGE_SIZE, \
MCHECK(m->m_epg_1st_off < PAGE_SIZE, \
"too large page offset"); \
MCHECK(m->m_ext_pgs.last_pg_len > 0, "zero last page length"); \
MCHECK(m->m_ext_pgs.last_pg_len <= PAGE_SIZE, \
MCHECK(m->m_epg_last_len > 0, "zero last page length"); \
MCHECK(m->m_epg_last_len <= PAGE_SIZE, \
"too large last page length"); \
if (m->m_ext_pgs.npgs == 1) \
MCHECK(m->m_ext_pgs.first_pg_off + \
m->m_ext_pgs.last_pg_len <= PAGE_SIZE, \
if (m->m_epg_npgs == 1) \
MCHECK(m->m_epg_1st_off + \
m->m_epg_last_len <= PAGE_SIZE, \
"single page too large"); \
MCHECK(m->m_ext_pgs.hdr_len <= sizeof(m->m_epg_hdr), \
MCHECK(m->m_epg_hdrlen <= sizeof(m->m_epg_hdr), \
"too large header length"); \
MCHECK(m->m_ext_pgs.trail_len <= sizeof(m->m_epg_trail), \
MCHECK(m->m_epg_trllen <= sizeof(m->m_epg_trail), \
"too large header length"); \
} while (0)
#else
@ -1559,7 +1561,7 @@ mbuf_has_tls_session(struct mbuf *m)
if (m->m_flags & M_NOMAP) {
MBUF_EXT_PGS_ASSERT(m);
if (m->m_ext_pgs.tls != NULL) {
if (m->m_epg_tls != NULL) {
return (true);
}
}