IFLIB updates:

We found routing performance dropped significantly when configuring
FreeBSD as a router, we are applying the following changes in order to
resolve those issues and hopefully perform better.
 - don't prefetch the flags array, we usually don't need it
 - prefetch the next cache line of each of the software descriptor arrays as
   well as the first cache line of each of the next four packets' mbufs and
   clusters
 - reduce max copy size to 63 bytes
 - convert rx soft descriptors from array of structures to a structure of arrays
 - update copyrights

Submitted by:	Matt Macy <mmacy@nextbsd.org>
This commit is contained in:
sbruno 2017-01-27 23:08:06 +00:00
parent 1b5ced141f
commit ecadb704c9
2 changed files with 170 additions and 103 deletions

View File

@ -1,5 +1,5 @@
/*-
* Copyright (c) 2014-2016, Matthew Macy <mmacy@nextbsd.org>
* Copyright (c) 2014-2017, Matthew Macy <mmacy@nextbsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -264,18 +264,12 @@ iflib_get_sctx(if_ctx_t ctx)
#define RX_SW_DESC_INUSE (1 << 3)
#define TX_SW_DESC_MAPPED (1 << 4)
typedef struct iflib_sw_rx_desc {
bus_dmamap_t ifsd_map; /* bus_dma map for packet */
struct mbuf *ifsd_m; /* rx: uninitialized mbuf */
caddr_t ifsd_cl; /* direct cluster pointer for rx */
uint16_t ifsd_flags;
} *iflib_rxsd_t;
typedef struct iflib_sw_tx_desc_val {
bus_dmamap_t ifsd_map; /* bus_dma map for packet */
struct mbuf *ifsd_m; /* pkthdr mbuf */
uint8_t ifsd_flags;
} *iflib_txsd_val_t;
typedef struct iflib_sw_rx_desc_array {
bus_dmamap_t *ifsd_map; /* bus_dma maps for packet */
struct mbuf **ifsd_m; /* pkthdr mbufs */
caddr_t *ifsd_cl; /* direct cluster pointer for rx */
uint8_t *ifsd_flags;
} iflib_rxsd_array_t;
typedef struct iflib_sw_tx_desc_array {
bus_dmamap_t *ifsd_map; /* bus_dma maps for packet */
@ -287,7 +281,7 @@ typedef struct iflib_sw_tx_desc_array {
/* magic number that should be high enough for any hardware */
#define IFLIB_MAX_TX_SEGS 128
#define IFLIB_MAX_RX_SEGS 32
#define IFLIB_RX_COPY_THRESH 128
#define IFLIB_RX_COPY_THRESH 63
#define IFLIB_MAX_RX_REFRESH 32
#define IFLIB_QUEUE_IDLE 0
#define IFLIB_QUEUE_HUNG 1
@ -383,7 +377,7 @@ struct iflib_fl {
uint16_t ifl_buf_size;
uint16_t ifl_cltype;
uma_zone_t ifl_zone;
iflib_rxsd_t ifl_sds;
iflib_rxsd_array_t ifl_sds;
iflib_rxq_t ifl_rxq;
uint8_t ifl_id;
bus_dma_tag_t ifl_desc_tag;
@ -909,7 +903,7 @@ iflib_netmap_rxsync(struct netmap_kring *kring, int flags)
ring->slot[nm_i].len = ri.iri_len - crclen;
ring->slot[nm_i].flags = slot_flags;
bus_dmamap_sync(fl->ifl_ifdi->idi_tag,
fl->ifl_sds[nic_i].ifsd_map, BUS_DMASYNC_POSTREAD);
fl->ifl_sds.ifsd_map[nic_i], BUS_DMASYNC_POSTREAD);
nm_i = nm_next(nm_i, lim);
nic_i = nm_next(nic_i, lim);
}
@ -949,14 +943,14 @@ iflib_netmap_rxsync(struct netmap_kring *kring, int flags)
vaddr = addr;
if (slot->flags & NS_BUF_CHANGED) {
/* buffer has changed, reload map */
netmap_reload_map(na, fl->ifl_ifdi->idi_tag, fl->ifl_sds[nic_i].ifsd_map, addr);
netmap_reload_map(na, fl->ifl_ifdi->idi_tag, fl->ifl_sds.ifsd_map[nic_i], addr);
slot->flags &= ~NS_BUF_CHANGED;
}
/*
* XXX we should be batching this operation - TODO
*/
ctx->isc_rxd_refill(ctx->ifc_softc, rxq->ifr_id, fl->ifl_id, nic_i, &paddr, &vaddr, 1, fl->ifl_buf_size);
bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_sds[nic_i].ifsd_map,
bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_sds.ifsd_map[nic_i],
BUS_DMASYNC_PREREAD);
nm_i = nm_next(nm_i, lim);
nic_i = nm_next(nic_i, lim);
@ -1030,22 +1024,22 @@ iflib_netmap_rxq_init(if_ctx_t ctx, iflib_rxq_t rxq)
{
struct netmap_adapter *na = NA(ctx->ifc_ifp);
struct netmap_slot *slot;
iflib_rxsd_t sd;
bus_dmamap_t *map;
int nrxd;
slot = netmap_reset(na, NR_RX, rxq->ifr_id, 0);
if (slot == 0)
return;
sd = rxq->ifr_fl[0].ifl_sds;
map = rxq->ifr_fl[0].ifl_sds.ifsd_map;
nrxd = ctx->ifc_softc_ctx.isc_nrxd[0];
for (int i = 0; i < nrxd; i++, sd++) {
for (int i = 0; i < nrxd; i++, map++) {
int sj = netmap_idx_n2k(&na->rx_rings[rxq->ifr_id], i);
uint64_t paddr;
void *addr;
caddr_t vaddr;
vaddr = addr = PNMB(na, slot + sj, &paddr);
netmap_load_map(na, rxq->ifr_fl[0].ifl_ifdi->idi_tag, sd->ifsd_map, addr);
netmap_load_map(na, rxq->ifr_fl[0].ifl_ifdi->idi_tag, *map, addr);
/* Update descriptor and the cached value */
ctx->isc_rxd_refill(ctx->ifc_softc, rxq->ifr_id, 0 /* fl_id */, i, &paddr, &vaddr, 1, rxq->ifr_fl[0].ifl_buf_size);
}
@ -1476,7 +1470,6 @@ iflib_rxsd_alloc(iflib_rxq_t rxq)
if_softc_ctx_t scctx = &ctx->ifc_softc_ctx;
device_t dev = ctx->ifc_dev;
iflib_fl_t fl;
iflib_rxsd_t rxsd;
int err;
MPASS(scctx->isc_nrxd[0] > 0);
@ -1484,13 +1477,6 @@ iflib_rxsd_alloc(iflib_rxq_t rxq)
fl = rxq->ifr_fl;
for (int i = 0; i < rxq->ifr_nfl; i++, fl++) {
fl->ifl_sds = malloc(sizeof(struct iflib_sw_rx_desc) *
scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB,
M_WAITOK | M_ZERO);
if (fl->ifl_sds == NULL) {
device_printf(dev, "Unable to allocate rx sw desc memory\n");
return (ENOMEM);
}
fl->ifl_size = scctx->isc_nrxd[rxq->ifr_fl_offset]; /* this isn't necessarily the same */
err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
1, 0, /* alignment, bounds */
@ -1509,17 +1495,49 @@ iflib_rxsd_alloc(iflib_rxq_t rxq)
__func__, err);
goto fail;
}
if (!(fl->ifl_sds.ifsd_flags =
(uint8_t *) malloc(sizeof(uint8_t) *
scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate tx_buffer memory\n");
err = ENOMEM;
goto fail;
}
if (!(fl->ifl_sds.ifsd_m =
(struct mbuf **) malloc(sizeof(struct mbuf *) *
scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate tx_buffer memory\n");
err = ENOMEM;
goto fail;
}
if (!(fl->ifl_sds.ifsd_cl =
(caddr_t *) malloc(sizeof(caddr_t) *
scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate tx_buffer memory\n");
err = ENOMEM;
goto fail;
}
rxsd = fl->ifl_sds;
for (int i = 0; i < scctx->isc_nrxd[rxq->ifr_fl_offset]; i++, rxsd++) {
err = bus_dmamap_create(fl->ifl_desc_tag, 0, &rxsd->ifsd_map);
if (err) {
device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
__func__, err);
/* Create the descriptor buffer dma maps */
#if defined(ACPI_DMAR) || (!(defined(__i386__) && !defined(__amd64__)))
if ((ctx->ifc_flags & IFC_DMAR) == 0)
continue;
if (!(fl->ifl_sds.ifsd_map =
(bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate tx_buffer map memory\n");
err = ENOMEM;
goto fail;
}
for (int i = 0; i < scctx->isc_nrxd[rxq->ifr_fl_offset]; i++) {
err = bus_dmamap_create(fl->ifl_desc_tag, 0, &fl->ifl_sds.ifsd_map[i]);
if (err != 0) {
device_printf(dev, "Unable to create TX DMA map\n");
goto fail;
}
}
}
#endif
return (0);
fail:
@ -1568,13 +1586,21 @@ static void
_iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count)
{
struct mbuf *m;
int pidx = fl->ifl_pidx;
iflib_rxsd_t rxsd = &fl->ifl_sds[pidx];
caddr_t cl;
int idx, pidx = fl->ifl_pidx;
caddr_t cl, *sd_cl;
struct mbuf **sd_m;
uint8_t *sd_flags;
bus_dmamap_t *sd_map;
int n, i = 0;
uint64_t bus_addr;
int err;
sd_m = fl->ifl_sds.ifsd_m;
sd_map = fl->ifl_sds.ifsd_map;
sd_cl = fl->ifl_sds.ifsd_cl;
sd_flags = fl->ifl_sds.ifsd_flags;
idx = pidx;
n = count;
MPASS(n > 0);
MPASS(fl->ifl_credits + n <= fl->ifl_size);
@ -1597,8 +1623,8 @@ _iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count)
*
* If the cluster is still set then we know a minimum sized packet was received
*/
if ((cl = rxsd->ifsd_cl) == NULL) {
if ((cl = rxsd->ifsd_cl = m_cljget(NULL, M_NOWAIT, fl->ifl_buf_size)) == NULL)
if ((cl = sd_cl[idx]) == NULL) {
if ((cl = sd_cl[idx] = m_cljget(NULL, M_NOWAIT, fl->ifl_buf_size)) == NULL)
break;
#if MEMORY_LOGGING
fl->ifl_cl_enqueued++;
@ -1613,16 +1639,16 @@ _iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count)
DBG_COUNTER_INC(rx_allocs);
#ifdef notyet
if ((rxsd->ifsd_flags & RX_SW_DESC_MAP_CREATED) == 0) {
if ((sd_flags[pidx] & RX_SW_DESC_MAP_CREATED) == 0) {
int err;
if ((err = bus_dmamap_create(fl->ifl_ifdi->idi_tag, 0, &rxsd->ifsd_map))) {
if ((err = bus_dmamap_create(fl->ifl_ifdi->idi_tag, 0, &sd_map[idx]))) {
log(LOG_WARNING, "bus_dmamap_create failed %d\n", err);
uma_zfree(fl->ifl_zone, cl);
n = 0;
goto done;
}
rxsd->ifsd_flags |= RX_SW_DESC_MAP_CREATED;
sd_flags[idx] |= RX_SW_DESC_MAP_CREATED;
}
#endif
#if defined(__i386__) || defined(__amd64__)
@ -1636,7 +1662,7 @@ _iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count)
cb_arg.error = 0;
q = fl->ifl_rxq;
err = bus_dmamap_load(fl->ifl_desc_tag, rxsd->ifsd_map,
err = bus_dmamap_load(fl->ifl_desc_tag, sd_map[idx],
cl, fl->ifl_buf_size, _rxq_refill_cb, &cb_arg, 0);
if (err != 0 || cb_arg.error) {
@ -1651,28 +1677,28 @@ _iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count)
}
bus_addr = cb_arg.seg.ds_addr;
}
rxsd->ifsd_flags |= RX_SW_DESC_INUSE;
sd_flags[idx] |= RX_SW_DESC_INUSE;
MPASS(rxsd->ifsd_m == NULL);
rxsd->ifsd_cl = cl;
rxsd->ifsd_m = m;
MPASS(sd_m[idx] == NULL);
sd_cl[idx] = cl;
sd_m[idx] = m;
fl->ifl_bus_addrs[i] = bus_addr;
fl->ifl_vm_addrs[i] = cl;
rxsd++;
fl->ifl_credits++;
i++;
MPASS(fl->ifl_credits <= fl->ifl_size);
if (++fl->ifl_pidx == fl->ifl_size) {
fl->ifl_pidx = 0;
if (++idx == fl->ifl_size) {
fl->ifl_gen = 1;
rxsd = fl->ifl_sds;
idx = 0;
}
if (n == 0 || i == IFLIB_MAX_RX_REFRESH) {
ctx->isc_rxd_refill(ctx->ifc_softc, fl->ifl_rxq->ifr_id, fl->ifl_id, pidx,
fl->ifl_bus_addrs, fl->ifl_vm_addrs, i, fl->ifl_buf_size);
i = 0;
pidx = fl->ifl_pidx;
pidx = idx;
}
fl->ifl_pidx = idx;
}
done:
DBG_COUNTER_INC(rxd_flush);
@ -1706,28 +1732,33 @@ iflib_fl_bufs_free(iflib_fl_t fl)
uint32_t i;
for (i = 0; i < fl->ifl_size; i++) {
iflib_rxsd_t d = &fl->ifl_sds[i];
struct mbuf **sd_m = &fl->ifl_sds.ifsd_m[i];
uint8_t *sd_flags = &fl->ifl_sds.ifsd_flags[i];
caddr_t *sd_cl = &fl->ifl_sds.ifsd_cl[i];
if (d->ifsd_flags & RX_SW_DESC_INUSE) {
bus_dmamap_unload(fl->ifl_desc_tag, d->ifsd_map);
bus_dmamap_destroy(fl->ifl_desc_tag, d->ifsd_map);
if (d->ifsd_m != NULL) {
m_init(d->ifsd_m, M_NOWAIT, MT_DATA, 0);
uma_zfree(zone_mbuf, d->ifsd_m);
if (*sd_flags & RX_SW_DESC_INUSE) {
if (fl->ifl_sds.ifsd_map != NULL) {
bus_dmamap_t sd_map = fl->ifl_sds.ifsd_map[i];
bus_dmamap_unload(fl->ifl_desc_tag, sd_map);
bus_dmamap_destroy(fl->ifl_desc_tag, sd_map);
}
if (d->ifsd_cl != NULL)
uma_zfree(fl->ifl_zone, d->ifsd_cl);
d->ifsd_flags = 0;
if (*sd_m != NULL) {
m_init(*sd_m, M_NOWAIT, MT_DATA, 0);
uma_zfree(zone_mbuf, *sd_m);
}
if (*sd_cl != NULL)
uma_zfree(fl->ifl_zone, *sd_cl);
*sd_flags = 0;
} else {
MPASS(d->ifsd_cl == NULL);
MPASS(d->ifsd_m == NULL);
MPASS(*sd_cl == NULL);
MPASS(*sd_m == NULL);
}
#if MEMORY_LOGGING
fl->ifl_m_dequeued++;
fl->ifl_cl_dequeued++;
#endif
d->ifsd_cl = NULL;
d->ifsd_m = NULL;
*sd_cl = NULL;
*sd_m = NULL;
}
/*
* Reset free list values
@ -1807,10 +1838,14 @@ iflib_rx_sds_free(iflib_rxq_t rxq)
bus_dma_tag_destroy(fl->ifl_desc_tag);
fl->ifl_desc_tag = NULL;
}
free(fl->ifl_sds.ifsd_m, M_IFLIB);
free(fl->ifl_sds.ifsd_cl, M_IFLIB);
/* XXX destroy maps first */
free(fl->ifl_sds.ifsd_map, M_IFLIB);
fl->ifl_sds.ifsd_m = NULL;
fl->ifl_sds.ifsd_cl = NULL;
fl->ifl_sds.ifsd_map = NULL;
}
if (rxq->ifr_fl->ifl_sds != NULL)
free(rxq->ifr_fl->ifl_sds, M_IFLIB);
free(rxq->ifr_fl, M_IFLIB);
rxq->ifr_fl = NULL;
rxq->ifr_cq_gen = rxq->ifr_cq_cidx = rxq->ifr_cq_pidx = 0;
@ -1995,13 +2030,33 @@ iflib_stop(if_ctx_t ctx)
}
}
static iflib_rxsd_t
rxd_frag_to_sd(iflib_rxq_t rxq, if_rxd_frag_t irf, int *cltype, int unload)
static inline void
prefetch_pkts(iflib_fl_t fl, int cidx)
{
int nextptr;
int nrxd = fl->ifl_size;
nextptr = (cidx + CACHE_PTR_INCREMENT) & (nrxd-1);
prefetch(&fl->ifl_sds.ifsd_m[nextptr]);
prefetch(&fl->ifl_sds.ifsd_cl[nextptr]);
prefetch(fl->ifl_sds.ifsd_m[(cidx + 1) & (nrxd-1)]);
prefetch(fl->ifl_sds.ifsd_m[(cidx + 2) & (nrxd-1)]);
prefetch(fl->ifl_sds.ifsd_m[(cidx + 3) & (nrxd-1)]);
prefetch(fl->ifl_sds.ifsd_m[(cidx + 4) & (nrxd-1)]);
prefetch(fl->ifl_sds.ifsd_cl[(cidx + 1) & (nrxd-1)]);
prefetch(fl->ifl_sds.ifsd_cl[(cidx + 2) & (nrxd-1)]);
prefetch(fl->ifl_sds.ifsd_cl[(cidx + 3) & (nrxd-1)]);
prefetch(fl->ifl_sds.ifsd_cl[(cidx + 4) & (nrxd-1)]);
}
static void
rxd_frag_to_sd(iflib_rxq_t rxq, if_rxd_frag_t irf, int *cltype, int unload, iflib_fl_t *pfl, int *pcidx)
{
int flid, cidx;
iflib_rxsd_t sd;
bus_dmamap_t map;
iflib_fl_t fl;
iflib_dma_info_t di;
int next;
flid = irf->irf_flid;
cidx = irf->irf_idx;
@ -2012,16 +2067,22 @@ rxd_frag_to_sd(iflib_rxq_t rxq, if_rxd_frag_t irf, int *cltype, int unload)
if (cltype)
fl->ifl_cl_dequeued++;
#endif
sd = &fl->ifl_sds[cidx];
di = fl->ifl_ifdi;
bus_dmamap_sync(di->idi_tag, di->idi_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
prefetch_pkts(fl, cidx);
if (fl->ifl_sds.ifsd_map != NULL) {
next = (cidx + CACHE_PTR_INCREMENT) & (fl->ifl_size-1);
prefetch(&fl->ifl_sds.ifsd_map[next]);
map = fl->ifl_sds.ifsd_map[cidx];
di = fl->ifl_ifdi;
next = (cidx + CACHE_LINE_SIZE) & (fl->ifl_size-1);
prefetch(&fl->ifl_sds.ifsd_flags[next]);
bus_dmamap_sync(di->idi_tag, di->idi_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
/* not valid assert if bxe really does SGE from non-contiguous elements */
MPASS(fl->ifl_cidx == cidx);
if (unload)
bus_dmamap_unload(fl->ifl_desc_tag, sd->ifsd_map);
MPASS(fl->ifl_cidx == cidx);
if (unload)
bus_dmamap_unload(fl->ifl_desc_tag, map);
}
if (__predict_false(++fl->ifl_cidx == fl->ifl_size)) {
fl->ifl_cidx = 0;
fl->ifl_gen = 0;
@ -2029,35 +2090,38 @@ rxd_frag_to_sd(iflib_rxq_t rxq, if_rxd_frag_t irf, int *cltype, int unload)
/* YES ick */
if (cltype)
*cltype = fl->ifl_cltype;
return (sd);
*pfl = fl;
*pcidx = cidx;
}
static struct mbuf *
assemble_segments(iflib_rxq_t rxq, if_rxd_info_t ri)
{
int i, padlen , flags, cltype;
struct mbuf *m, *mh, *mt;
iflib_rxsd_t sd;
caddr_t cl;
struct mbuf *m, *mh, *mt, *sd_m;
iflib_fl_t fl;
int cidx;
caddr_t cl, sd_cl;
i = 0;
mh = NULL;
do {
sd = rxd_frag_to_sd(rxq, &ri->iri_frags[i], &cltype, TRUE);
rxd_frag_to_sd(rxq, &ri->iri_frags[i], &cltype, TRUE, &fl, &cidx);
sd_m = fl->ifl_sds.ifsd_m[cidx];
sd_cl = fl->ifl_sds.ifsd_cl[cidx];
MPASS(sd->ifsd_cl != NULL);
MPASS(sd->ifsd_m != NULL);
MPASS(sd_cl != NULL);
MPASS(sd_m != NULL);
/* Don't include zero-length frags */
if (ri->iri_frags[i].irf_len == 0) {
/* XXX we can save the cluster here, but not the mbuf */
m_init(sd->ifsd_m, M_NOWAIT, MT_DATA, 0);
m_free(sd->ifsd_m);
sd->ifsd_m = NULL;
m_init(sd_m, M_NOWAIT, MT_DATA, 0);
m_free(sd_m);
fl->ifl_sds.ifsd_m[cidx] = NULL;
continue;
}
m = sd->ifsd_m;
m = sd_m;
if (mh == NULL) {
flags = M_PKTHDR|M_EXT;
mh = mt = m;
@ -2069,9 +2133,9 @@ assemble_segments(iflib_rxq_t rxq, if_rxd_info_t ri)
/* assuming padding is only on the first fragment */
padlen = 0;
}
sd->ifsd_m = NULL;
cl = sd->ifsd_cl;
sd->ifsd_cl = NULL;
fl->ifl_sds.ifsd_m[cidx] = NULL;
cl = fl->ifl_sds.ifsd_cl[cidx];
fl->ifl_sds.ifsd_cl[cidx] = NULL;
/* Can these two be made one ? */
m_init(m, M_NOWAIT, MT_DATA, flags);
@ -2094,16 +2158,19 @@ static struct mbuf *
iflib_rxd_pkt_get(iflib_rxq_t rxq, if_rxd_info_t ri)
{
struct mbuf *m;
iflib_rxsd_t sd;
iflib_fl_t fl;
caddr_t sd_cl;
int cidx;
/* should I merge this back in now that the two paths are basically duplicated? */
if (ri->iri_nfrags == 1 &&
ri->iri_frags[0].irf_len <= IFLIB_RX_COPY_THRESH) {
sd = rxd_frag_to_sd(rxq, &ri->iri_frags[0], NULL, FALSE);
m = sd->ifsd_m;
sd->ifsd_m = NULL;
rxd_frag_to_sd(rxq, &ri->iri_frags[0], NULL, FALSE, &fl, &cidx);
m = fl->ifl_sds.ifsd_m[cidx];
fl->ifl_sds.ifsd_m[cidx] = NULL;
sd_cl = fl->ifl_sds.ifsd_cl[cidx];
m_init(m, M_NOWAIT, MT_DATA, M_PKTHDR);
memcpy(m->m_data, sd->ifsd_cl, ri->iri_len);
memcpy(m->m_data, sd_cl, ri->iri_len);
m->m_len = ri->iri_frags[0].irf_len;
} else {
m = assemble_segments(rxq, ri);

View File

@ -1,5 +1,5 @@
/*-
* Copyright (c) 2014-2015, Matthew Macy (mmacy@nextbsd.org)
* Copyright (c) 2014-2017, Matthew Macy (mmacy@nextbsd.org)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without