netmap: pull fix for 32-bit support from upstream
Approved by: sbruno
This commit is contained in:
parent
1b01335c5d
commit
98af9f469b
@ -53,7 +53,6 @@ LINT: ${NOTES} ${MAKELINT_SED}
|
||||
.if ${TARGET} == "powerpc"
|
||||
# cat is available, not sure if cp is?
|
||||
cat ${.TARGET} > ${.TARGET}64
|
||||
echo "nodevice netmap" >> ${.TARGET}
|
||||
echo "machine ${TARGET} powerpc" >> ${.TARGET}
|
||||
echo "machine ${TARGET} powerpc64" >> ${.TARGET}64
|
||||
.endif
|
||||
|
@ -210,7 +210,7 @@ static int ptnet_irqs_init(struct ptnet_softc *sc);
|
||||
static void ptnet_irqs_fini(struct ptnet_softc *sc);
|
||||
|
||||
static uint32_t ptnet_nm_ptctl(if_t ifp, uint32_t cmd);
|
||||
static int ptnet_nm_config(struct netmap_adapter *na,
|
||||
static int ptnet_nm_config(struct netmap_adapter *na,
|
||||
struct nm_config_info *info);
|
||||
static void ptnet_update_vnet_hdr(struct ptnet_softc *sc);
|
||||
static int ptnet_nm_register(struct netmap_adapter *na, int onoff);
|
||||
|
@ -536,35 +536,35 @@ SYSBEGIN(main_init);
|
||||
SYSCTL_DECL(_dev_netmap);
|
||||
SYSCTL_NODE(_dev, OID_AUTO, netmap, CTLFLAG_RW, 0, "Netmap args");
|
||||
SYSCTL_INT(_dev_netmap, OID_AUTO, verbose,
|
||||
CTLFLAG_RW, &netmap_verbose, 0, "Verbose mode");
|
||||
CTLFLAG_RW, &netmap_verbose, 0, "Verbose mode");
|
||||
SYSCTL_INT(_dev_netmap, OID_AUTO, no_timestamp,
|
||||
CTLFLAG_RW, &netmap_no_timestamp, 0, "no_timestamp");
|
||||
CTLFLAG_RW, &netmap_no_timestamp, 0, "no_timestamp");
|
||||
SYSCTL_INT(_dev_netmap, OID_AUTO, no_pendintr, CTLFLAG_RW, &netmap_no_pendintr,
|
||||
0, "Always look for new received packets.");
|
||||
0, "Always look for new received packets.");
|
||||
SYSCTL_INT(_dev_netmap, OID_AUTO, txsync_retry, CTLFLAG_RW,
|
||||
&netmap_txsync_retry, 0, "Number of txsync loops in bridge's flush.");
|
||||
&netmap_txsync_retry, 0, "Number of txsync loops in bridge's flush.");
|
||||
|
||||
SYSCTL_INT(_dev_netmap, OID_AUTO, fwd, CTLFLAG_RW, &netmap_fwd, 0,
|
||||
"Force NR_FORWARD mode");
|
||||
"Force NR_FORWARD mode");
|
||||
SYSCTL_INT(_dev_netmap, OID_AUTO, admode, CTLFLAG_RW, &netmap_admode, 0,
|
||||
"Adapter mode. 0 selects the best option available,"
|
||||
"1 forces native adapter, 2 forces emulated adapter");
|
||||
"Adapter mode. 0 selects the best option available,"
|
||||
"1 forces native adapter, 2 forces emulated adapter");
|
||||
SYSCTL_INT(_dev_netmap, OID_AUTO, generic_mit, CTLFLAG_RW, &netmap_generic_mit,
|
||||
0, "RX notification interval in nanoseconds");
|
||||
0, "RX notification interval in nanoseconds");
|
||||
SYSCTL_INT(_dev_netmap, OID_AUTO, generic_ringsize, CTLFLAG_RW,
|
||||
&netmap_generic_ringsize, 0,
|
||||
"Number of per-ring slots for emulated netmap mode");
|
||||
&netmap_generic_ringsize, 0,
|
||||
"Number of per-ring slots for emulated netmap mode");
|
||||
SYSCTL_INT(_dev_netmap, OID_AUTO, generic_rings, CTLFLAG_RW,
|
||||
&netmap_generic_rings, 0,
|
||||
"Number of TX/RX queues for emulated netmap adapters");
|
||||
&netmap_generic_rings, 0,
|
||||
"Number of TX/RX queues for emulated netmap adapters");
|
||||
#ifdef linux
|
||||
SYSCTL_INT(_dev_netmap, OID_AUTO, generic_txqdisc, CTLFLAG_RW,
|
||||
&netmap_generic_txqdisc, 0, "Use qdisc for generic adapters");
|
||||
&netmap_generic_txqdisc, 0, "Use qdisc for generic adapters");
|
||||
#endif
|
||||
SYSCTL_INT(_dev_netmap, OID_AUTO, ptnet_vnet_hdr, CTLFLAG_RW, &ptnet_vnet_hdr,
|
||||
0, "Allow ptnet devices to use virtio-net headers");
|
||||
0, "Allow ptnet devices to use virtio-net headers");
|
||||
SYSCTL_INT(_dev_netmap, OID_AUTO, ptnetmap_tx_workers, CTLFLAG_RW,
|
||||
&ptnetmap_tx_workers, 0, "Use worker threads for pnetmap TX processing");
|
||||
&ptnetmap_tx_workers, 0, "Use worker threads for pnetmap TX processing");
|
||||
|
||||
SYSEND;
|
||||
|
||||
@ -765,15 +765,15 @@ netmap_update_config(struct netmap_adapter *na)
|
||||
na->rx_buf_maxsize == info.rx_buf_maxsize)
|
||||
return 0; /* nothing changed */
|
||||
if (na->active_fds == 0) {
|
||||
D("configuration changed for %s: txring %d x %d, "
|
||||
"rxring %d x %d, rxbufsz %d",
|
||||
na->name, na->num_tx_rings, na->num_tx_desc,
|
||||
na->num_rx_rings, na->num_rx_desc, na->rx_buf_maxsize);
|
||||
na->num_tx_rings = info.num_tx_rings;
|
||||
na->num_tx_desc = info.num_tx_descs;
|
||||
na->num_rx_rings = info.num_rx_rings;
|
||||
na->num_rx_desc = info.num_rx_descs;
|
||||
na->rx_buf_maxsize = info.rx_buf_maxsize;
|
||||
D("configuration changed for %s: txring %d x %d, "
|
||||
"rxring %d x %d, rxbufsz %d",
|
||||
na->name, na->num_tx_rings, na->num_tx_desc,
|
||||
na->num_rx_rings, na->num_rx_desc, na->rx_buf_maxsize);
|
||||
return 0;
|
||||
}
|
||||
D("WARNING: configuration changed for %s while active: "
|
||||
@ -830,7 +830,7 @@ netmap_krings_create(struct netmap_adapter *na, u_int tailroom)
|
||||
n[NR_TX] = na->num_tx_rings + 1;
|
||||
n[NR_RX] = na->num_rx_rings + 1;
|
||||
|
||||
len = (n[NR_TX] + n[NR_RX]) *
|
||||
len = (n[NR_TX] + n[NR_RX]) *
|
||||
(sizeof(struct netmap_kring) + sizeof(struct netmap_kring *))
|
||||
+ tailroom;
|
||||
|
||||
@ -841,7 +841,7 @@ netmap_krings_create(struct netmap_adapter *na, u_int tailroom)
|
||||
}
|
||||
na->rx_rings = na->tx_rings + n[NR_TX];
|
||||
na->tailroom = na->rx_rings + n[NR_RX];
|
||||
|
||||
|
||||
/* link the krings in the krings array */
|
||||
kring = (struct netmap_kring *)((char *)na->tailroom + tailroom);
|
||||
for (i = 0; i < n[NR_TX] + n[NR_RX]; i++) {
|
||||
@ -1006,9 +1006,9 @@ netmap_do_unregif(struct netmap_priv_d *priv)
|
||||
if (netmap_verbose)
|
||||
D("deleting last instance for %s", na->name);
|
||||
|
||||
if (nm_netmap_on(na)) {
|
||||
D("BUG: netmap on while going to delete the krings");
|
||||
}
|
||||
if (nm_netmap_on(na)) {
|
||||
D("BUG: netmap on while going to delete the krings");
|
||||
}
|
||||
|
||||
na->nm_krings_delete(na);
|
||||
}
|
||||
@ -1324,7 +1324,7 @@ netmap_rxsync_from_host(struct netmap_kring *kring, int flags)
|
||||
m_copydata(m, 0, len, NMB(na, slot));
|
||||
ND("nm %d len %d", nm_i, len);
|
||||
if (netmap_verbose)
|
||||
D("%s", nm_dump_buf(NMB(na, slot),len, 128, NULL));
|
||||
D("%s", nm_dump_buf(NMB(na, slot),len, 128, NULL));
|
||||
|
||||
slot->len = len;
|
||||
slot->flags = 0;
|
||||
@ -1476,7 +1476,7 @@ netmap_get_na(struct nmreq_header *hdr,
|
||||
struct netmap_adapter **na, struct ifnet **ifp,
|
||||
struct netmap_mem_d *nmd, int create)
|
||||
{
|
||||
struct nmreq_register *req = (struct nmreq_register *)hdr->nr_body;
|
||||
struct nmreq_register *req = (struct nmreq_register *)(uintptr_t)hdr->nr_body;
|
||||
int error = 0;
|
||||
struct netmap_adapter *ret = NULL;
|
||||
int nmd_ref = 0;
|
||||
@ -2091,9 +2091,6 @@ netmap_do_regif(struct netmap_priv_d *priv, struct netmap_adapter *na,
|
||||
|
||||
NMG_LOCK_ASSERT();
|
||||
priv->np_na = na; /* store the reference */
|
||||
error = netmap_set_ringid(priv, nr_mode, nr_ringid, nr_flags);
|
||||
if (error)
|
||||
goto err;
|
||||
error = netmap_mem_finalize(na->nm_mem, na);
|
||||
if (error)
|
||||
goto err;
|
||||
@ -2109,7 +2106,14 @@ netmap_do_regif(struct netmap_priv_d *priv, struct netmap_adapter *na,
|
||||
|
||||
/* ring configuration may have changed, fetch from the card */
|
||||
netmap_update_config(na);
|
||||
}
|
||||
|
||||
/* compute the range of tx and rx rings to monitor */
|
||||
error = netmap_set_ringid(priv, nr_mode, nr_ringid, nr_flags);
|
||||
if (error)
|
||||
goto err_put_lut;
|
||||
|
||||
if (na->active_fds == 0) {
|
||||
/*
|
||||
* If this is the first registration of the adapter,
|
||||
* perform sanity checks and create the in-kernel view
|
||||
@ -2117,11 +2121,17 @@ netmap_do_regif(struct netmap_priv_d *priv, struct netmap_adapter *na,
|
||||
*/
|
||||
if (na->ifp && nm_priv_rx_enabled(priv)) {
|
||||
/* This netmap adapter is attached to an ifnet. */
|
||||
unsigned nbs = netmap_mem_bufsize(na->nm_mem);
|
||||
unsigned nbs = NETMAP_BUF_SIZE(na);
|
||||
unsigned mtu = nm_os_ifnet_mtu(na->ifp);
|
||||
|
||||
ND("mtu %d rx_buf_maxsize %d netmap_buf_size %d",
|
||||
mtu, na->rx_buf_maxsize, nbs);
|
||||
ND("%s: mtu %d rx_buf_maxsize %d netmap_buf_size %d",
|
||||
na->name, mtu, na->rx_buf_maxsize, nbs);
|
||||
|
||||
if (na->rx_buf_maxsize == 0) {
|
||||
D("%s: error: rx_buf_maxsize == 0", na->name);
|
||||
error = EIO;
|
||||
goto err_drop_mem;
|
||||
}
|
||||
|
||||
if (mtu <= na->rx_buf_maxsize) {
|
||||
/* The MTU fits a single NIC slot. We only
|
||||
@ -2191,7 +2201,7 @@ netmap_do_regif(struct netmap_priv_d *priv, struct netmap_adapter *na,
|
||||
nifp = netmap_mem_if_new(na, priv);
|
||||
if (nifp == NULL) {
|
||||
error = ENOMEM;
|
||||
goto err_del_rings;
|
||||
goto err_rel_excl;
|
||||
}
|
||||
|
||||
if (nm_kring_pending(priv)) {
|
||||
@ -2217,10 +2227,9 @@ netmap_do_regif(struct netmap_priv_d *priv, struct netmap_adapter *na,
|
||||
|
||||
err_del_if:
|
||||
netmap_mem_if_delete(na, nifp);
|
||||
err_del_rings:
|
||||
netmap_mem_rings_delete(na);
|
||||
err_rel_excl:
|
||||
netmap_krings_put(priv);
|
||||
netmap_mem_rings_delete(na);
|
||||
err_del_krings:
|
||||
if (na->active_fds == 0)
|
||||
na->nm_krings_delete(na);
|
||||
@ -2313,8 +2322,8 @@ netmap_ioctl(struct netmap_priv_d *priv, u_long cmd, caddr_t data,
|
||||
* For convenince, the nr_body pointer and the pointers
|
||||
* in the options list will be replaced with their
|
||||
* kernel-space counterparts. The original pointers are
|
||||
* saved internally and later restored by nmreq_copyout
|
||||
*/
|
||||
* saved internally and later restored by nmreq_copyout
|
||||
*/
|
||||
error = nmreq_copyin(hdr, nr_body_is_user);
|
||||
if (error) {
|
||||
return error;
|
||||
@ -2326,7 +2335,7 @@ netmap_ioctl(struct netmap_priv_d *priv, u_long cmd, caddr_t data,
|
||||
switch (hdr->nr_reqtype) {
|
||||
case NETMAP_REQ_REGISTER: {
|
||||
struct nmreq_register *req =
|
||||
(struct nmreq_register *)hdr->nr_body;
|
||||
(struct nmreq_register *)(uintptr_t)hdr->nr_body;
|
||||
/* Protect access to priv from concurrent requests. */
|
||||
NMG_LOCK();
|
||||
do {
|
||||
@ -2341,7 +2350,7 @@ netmap_ioctl(struct netmap_priv_d *priv, u_long cmd, caddr_t data,
|
||||
}
|
||||
|
||||
#ifdef WITH_EXTMEM
|
||||
opt = nmreq_findoption((struct nmreq_option *)hdr->nr_options,
|
||||
opt = nmreq_findoption((struct nmreq_option *)(uintptr_t)hdr->nr_options,
|
||||
NETMAP_REQ_OPT_EXTMEM);
|
||||
if (opt != NULL) {
|
||||
struct nmreq_opt_extmem *e =
|
||||
@ -2444,7 +2453,7 @@ netmap_ioctl(struct netmap_priv_d *priv, u_long cmd, caddr_t data,
|
||||
|
||||
case NETMAP_REQ_PORT_INFO_GET: {
|
||||
struct nmreq_port_info_get *req =
|
||||
(struct nmreq_port_info_get *)hdr->nr_body;
|
||||
(struct nmreq_port_info_get *)(uintptr_t)hdr->nr_body;
|
||||
|
||||
NMG_LOCK();
|
||||
do {
|
||||
@ -2463,10 +2472,10 @@ netmap_ioctl(struct netmap_priv_d *priv, u_long cmd, caddr_t data,
|
||||
|
||||
/* get a refcount */
|
||||
hdr->nr_reqtype = NETMAP_REQ_REGISTER;
|
||||
hdr->nr_body = (uint64_t)®req;
|
||||
hdr->nr_body = (uintptr_t)®req;
|
||||
error = netmap_get_na(hdr, &na, &ifp, NULL, 1 /* create */);
|
||||
hdr->nr_reqtype = NETMAP_REQ_PORT_INFO_GET; /* reset type */
|
||||
hdr->nr_body = (uint64_t)req; /* reset nr_body */
|
||||
hdr->nr_body = (uintptr_t)req; /* reset nr_body */
|
||||
if (error) {
|
||||
na = NULL;
|
||||
ifp = NULL;
|
||||
@ -2517,7 +2526,7 @@ netmap_ioctl(struct netmap_priv_d *priv, u_long cmd, caddr_t data,
|
||||
|
||||
case NETMAP_REQ_PORT_HDR_SET: {
|
||||
struct nmreq_port_hdr *req =
|
||||
(struct nmreq_port_hdr *)hdr->nr_body;
|
||||
(struct nmreq_port_hdr *)(uintptr_t)hdr->nr_body;
|
||||
/* Build a nmreq_register out of the nmreq_port_hdr,
|
||||
* so that we can call netmap_get_bdg_na(). */
|
||||
struct nmreq_register regreq;
|
||||
@ -2533,10 +2542,10 @@ netmap_ioctl(struct netmap_priv_d *priv, u_long cmd, caddr_t data,
|
||||
}
|
||||
NMG_LOCK();
|
||||
hdr->nr_reqtype = NETMAP_REQ_REGISTER;
|
||||
hdr->nr_body = (uint64_t)®req;
|
||||
hdr->nr_body = (uintptr_t)®req;
|
||||
error = netmap_get_bdg_na(hdr, &na, NULL, 0);
|
||||
hdr->nr_reqtype = NETMAP_REQ_PORT_HDR_SET;
|
||||
hdr->nr_body = (uint64_t)req;
|
||||
hdr->nr_body = (uintptr_t)req;
|
||||
if (na && !error) {
|
||||
struct netmap_vp_adapter *vpna =
|
||||
(struct netmap_vp_adapter *)na;
|
||||
@ -2556,7 +2565,7 @@ netmap_ioctl(struct netmap_priv_d *priv, u_long cmd, caddr_t data,
|
||||
case NETMAP_REQ_PORT_HDR_GET: {
|
||||
/* Get vnet-header length for this netmap port */
|
||||
struct nmreq_port_hdr *req =
|
||||
(struct nmreq_port_hdr *)hdr->nr_body;
|
||||
(struct nmreq_port_hdr *)(uintptr_t)hdr->nr_body;
|
||||
/* Build a nmreq_register out of the nmreq_port_hdr,
|
||||
* so that we can call netmap_get_bdg_na(). */
|
||||
struct nmreq_register regreq;
|
||||
@ -2565,10 +2574,10 @@ netmap_ioctl(struct netmap_priv_d *priv, u_long cmd, caddr_t data,
|
||||
bzero(®req, sizeof(regreq));
|
||||
NMG_LOCK();
|
||||
hdr->nr_reqtype = NETMAP_REQ_REGISTER;
|
||||
hdr->nr_body = (uint64_t)®req;
|
||||
hdr->nr_body = (uintptr_t)®req;
|
||||
error = netmap_get_na(hdr, &na, &ifp, NULL, 0);
|
||||
hdr->nr_reqtype = NETMAP_REQ_PORT_HDR_GET;
|
||||
hdr->nr_body = (uint64_t)req;
|
||||
hdr->nr_body = (uintptr_t)req;
|
||||
if (na && !error) {
|
||||
req->nr_hdr_len = na->virt_hdr_len;
|
||||
}
|
||||
@ -2595,7 +2604,7 @@ netmap_ioctl(struct netmap_priv_d *priv, u_long cmd, caddr_t data,
|
||||
#endif /* WITH_VALE */
|
||||
case NETMAP_REQ_POOLS_INFO_GET: {
|
||||
struct nmreq_pools_info *req =
|
||||
(struct nmreq_pools_info *)hdr->nr_body;
|
||||
(struct nmreq_pools_info *)(uintptr_t)hdr->nr_body;
|
||||
/* Get information from the memory allocator. This
|
||||
* netmap device must already be bound to a port.
|
||||
* Note that hdr->nr_name is ignored. */
|
||||
@ -2774,8 +2783,8 @@ nmreq_copyin(struct nmreq_header *hdr, int nr_body_is_user)
|
||||
error = EMSGSIZE;
|
||||
goto out_err;
|
||||
}
|
||||
if ((rqsz && hdr->nr_body == (uint64_t)NULL) ||
|
||||
(!rqsz && hdr->nr_body != (uint64_t)NULL)) {
|
||||
if ((rqsz && hdr->nr_body == (uintptr_t)NULL) ||
|
||||
(!rqsz && hdr->nr_body != (uintptr_t)NULL)) {
|
||||
/* Request body expected, but not found; or
|
||||
* request body found but unexpected. */
|
||||
error = EINVAL;
|
||||
@ -2784,8 +2793,8 @@ nmreq_copyin(struct nmreq_header *hdr, int nr_body_is_user)
|
||||
|
||||
bufsz = 2 * sizeof(void *) + rqsz;
|
||||
optsz = 0;
|
||||
for (src = (struct nmreq_option *)hdr->nr_options; src;
|
||||
src = (struct nmreq_option *)buf.nro_next)
|
||||
for (src = (struct nmreq_option *)(uintptr_t)hdr->nr_options; src;
|
||||
src = (struct nmreq_option *)(uintptr_t)buf.nro_next)
|
||||
{
|
||||
error = copyin(src, &buf, sizeof(*src));
|
||||
if (error)
|
||||
@ -2813,11 +2822,11 @@ nmreq_copyin(struct nmreq_header *hdr, int nr_body_is_user)
|
||||
p = (char *)ptrs;
|
||||
|
||||
/* copy the body */
|
||||
error = copyin((void *)hdr->nr_body, p, rqsz);
|
||||
error = copyin((void *)(uintptr_t)hdr->nr_body, p, rqsz);
|
||||
if (error)
|
||||
goto out_restore;
|
||||
/* overwrite the user pointer with the in-kernel one */
|
||||
hdr->nr_body = (uint64_t)p;
|
||||
hdr->nr_body = (uintptr_t)p;
|
||||
p += rqsz;
|
||||
|
||||
/* copy the options */
|
||||
@ -2874,7 +2883,7 @@ static int
|
||||
nmreq_copyout(struct nmreq_header *hdr, int rerror)
|
||||
{
|
||||
struct nmreq_option *src, *dst;
|
||||
void *ker = (void *)hdr->nr_body, *bufstart;
|
||||
void *ker = (void *)(uintptr_t)hdr->nr_body, *bufstart;
|
||||
uint64_t *ptrs;
|
||||
size_t bodysz;
|
||||
int error;
|
||||
@ -2886,13 +2895,13 @@ nmreq_copyout(struct nmreq_header *hdr, int rerror)
|
||||
ptrs = (uint64_t *)ker - 2;
|
||||
bufstart = ptrs;
|
||||
hdr->nr_body = *ptrs++;
|
||||
src = (struct nmreq_option *)hdr->nr_options;
|
||||
src = (struct nmreq_option *)(uintptr_t)hdr->nr_options;
|
||||
hdr->nr_options = *ptrs;
|
||||
|
||||
if (!rerror) {
|
||||
/* copy the body */
|
||||
bodysz = nmreq_size_by_type(hdr->nr_reqtype);
|
||||
error = copyout(ker, (void *)hdr->nr_body, bodysz);
|
||||
error = copyout(ker, (void *)(uintptr_t)hdr->nr_body, bodysz);
|
||||
if (error) {
|
||||
rerror = error;
|
||||
goto out;
|
||||
@ -2900,7 +2909,7 @@ nmreq_copyout(struct nmreq_header *hdr, int rerror)
|
||||
}
|
||||
|
||||
/* copy the options */
|
||||
dst = (struct nmreq_option *)hdr->nr_options;
|
||||
dst = (struct nmreq_option *)(uintptr_t)hdr->nr_options;
|
||||
while (src) {
|
||||
size_t optsz;
|
||||
uint64_t next;
|
||||
@ -2916,7 +2925,7 @@ nmreq_copyout(struct nmreq_header *hdr, int rerror)
|
||||
rerror = error;
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
||||
/* copy the option body only if there was no error */
|
||||
if (!rerror && !src->nro_status) {
|
||||
optsz = nmreq_opt_size_by_type(src->nro_reqtype);
|
||||
@ -2928,8 +2937,8 @@ nmreq_copyout(struct nmreq_header *hdr, int rerror)
|
||||
}
|
||||
}
|
||||
}
|
||||
src = (struct nmreq_option *)next;
|
||||
dst = (struct nmreq_option *)*ptrs;
|
||||
src = (struct nmreq_option *)(uintptr_t)next;
|
||||
dst = (struct nmreq_option *)(uintptr_t)*ptrs;
|
||||
}
|
||||
|
||||
|
||||
@ -2942,7 +2951,7 @@ nmreq_copyout(struct nmreq_header *hdr, int rerror)
|
||||
struct nmreq_option *
|
||||
nmreq_findoption(struct nmreq_option *opt, uint16_t reqtype)
|
||||
{
|
||||
for ( ; opt; opt = (struct nmreq_option *)opt->nro_next)
|
||||
for ( ; opt; opt = (struct nmreq_option *)(uintptr_t)opt->nro_next)
|
||||
if (opt->nro_reqtype == reqtype)
|
||||
return opt;
|
||||
return NULL;
|
||||
@ -2953,7 +2962,7 @@ nmreq_checkduplicate(struct nmreq_option *opt) {
|
||||
uint16_t type = opt->nro_reqtype;
|
||||
int dup = 0;
|
||||
|
||||
while ((opt = nmreq_findoption((struct nmreq_option *)opt->nro_next,
|
||||
while ((opt = nmreq_findoption((struct nmreq_option *)(uintptr_t)opt->nro_next,
|
||||
type))) {
|
||||
dup++;
|
||||
opt->nro_status = EINVAL;
|
||||
@ -2969,8 +2978,8 @@ nmreq_checkoptions(struct nmreq_header *hdr)
|
||||
* marked as not supported
|
||||
*/
|
||||
|
||||
for (opt = (struct nmreq_option *)hdr->nr_options; opt;
|
||||
opt = (struct nmreq_option *)opt->nro_next)
|
||||
for (opt = (struct nmreq_option *)(uintptr_t)hdr->nr_options; opt;
|
||||
opt = (struct nmreq_option *)(uintptr_t)opt->nro_next)
|
||||
if (opt->nro_status == EOPNOTSUPP)
|
||||
return EOPNOTSUPP;
|
||||
|
||||
@ -3643,9 +3652,9 @@ netmap_transmit(struct ifnet *ifp, struct mbuf *m)
|
||||
*/
|
||||
mbq_lock(q);
|
||||
|
||||
busy = kring->nr_hwtail - kring->nr_hwcur;
|
||||
if (busy < 0)
|
||||
busy += kring->nkr_num_slots;
|
||||
busy = kring->nr_hwtail - kring->nr_hwcur;
|
||||
if (busy < 0)
|
||||
busy += kring->nkr_num_slots;
|
||||
if (busy + mbq_len(q) >= kring->nkr_num_slots - 1) {
|
||||
RD(2, "%s full hwcur %d hwtail %d qlen %d", na->name,
|
||||
kring->nr_hwcur, kring->nr_hwtail, mbq_len(q));
|
||||
|
@ -138,13 +138,13 @@ nm_os_put_module(void)
|
||||
static void
|
||||
netmap_ifnet_arrival_handler(void *arg __unused, struct ifnet *ifp)
|
||||
{
|
||||
netmap_undo_zombie(ifp);
|
||||
netmap_undo_zombie(ifp);
|
||||
}
|
||||
|
||||
static void
|
||||
netmap_ifnet_departure_handler(void *arg __unused, struct ifnet *ifp)
|
||||
{
|
||||
netmap_make_zombie(ifp);
|
||||
netmap_make_zombie(ifp);
|
||||
}
|
||||
|
||||
static eventhandler_tag nm_ifnet_ah_tag;
|
||||
@ -153,33 +153,33 @@ static eventhandler_tag nm_ifnet_dh_tag;
|
||||
int
|
||||
nm_os_ifnet_init(void)
|
||||
{
|
||||
nm_ifnet_ah_tag =
|
||||
EVENTHANDLER_REGISTER(ifnet_arrival_event,
|
||||
netmap_ifnet_arrival_handler,
|
||||
NULL, EVENTHANDLER_PRI_ANY);
|
||||
nm_ifnet_dh_tag =
|
||||
EVENTHANDLER_REGISTER(ifnet_departure_event,
|
||||
netmap_ifnet_departure_handler,
|
||||
NULL, EVENTHANDLER_PRI_ANY);
|
||||
return 0;
|
||||
nm_ifnet_ah_tag =
|
||||
EVENTHANDLER_REGISTER(ifnet_arrival_event,
|
||||
netmap_ifnet_arrival_handler,
|
||||
NULL, EVENTHANDLER_PRI_ANY);
|
||||
nm_ifnet_dh_tag =
|
||||
EVENTHANDLER_REGISTER(ifnet_departure_event,
|
||||
netmap_ifnet_departure_handler,
|
||||
NULL, EVENTHANDLER_PRI_ANY);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nm_os_ifnet_fini(void)
|
||||
{
|
||||
EVENTHANDLER_DEREGISTER(ifnet_arrival_event,
|
||||
nm_ifnet_ah_tag);
|
||||
EVENTHANDLER_DEREGISTER(ifnet_departure_event,
|
||||
nm_ifnet_dh_tag);
|
||||
EVENTHANDLER_DEREGISTER(ifnet_arrival_event,
|
||||
nm_ifnet_ah_tag);
|
||||
EVENTHANDLER_DEREGISTER(ifnet_departure_event,
|
||||
nm_ifnet_dh_tag);
|
||||
}
|
||||
|
||||
unsigned
|
||||
nm_os_ifnet_mtu(struct ifnet *ifp)
|
||||
{
|
||||
#if __FreeBSD_version < 1100030
|
||||
return ifp->if_data.ifi_mtu;
|
||||
return ifp->if_data.ifi_mtu;
|
||||
#else /* __FreeBSD_version >= 1100030 */
|
||||
return ifp->if_mtu;
|
||||
return ifp->if_mtu;
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -625,14 +625,14 @@ nm_os_vi_detach(struct ifnet *ifp)
|
||||
struct nm_os_extmem {
|
||||
vm_object_t obj;
|
||||
vm_offset_t kva;
|
||||
vm_offset_t size;
|
||||
vm_pindex_t scan;
|
||||
vm_offset_t size;
|
||||
uintptr_t scan;
|
||||
};
|
||||
|
||||
void
|
||||
nm_os_extmem_delete(struct nm_os_extmem *e)
|
||||
{
|
||||
D("freeing %zx bytes", (size_t)e->size);
|
||||
D("freeing %jx bytes", (uintmax_t)e->size);
|
||||
vm_map_remove(kernel_map, e->kva, e->kva + e->size);
|
||||
nm_os_free(e);
|
||||
}
|
||||
@ -701,7 +701,7 @@ nm_os_extmem_create(unsigned long p, struct nmreq_pools_info *pi, int *perror)
|
||||
VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE,
|
||||
VM_PROT_READ | VM_PROT_WRITE, 0);
|
||||
if (rv != KERN_SUCCESS) {
|
||||
D("vm_map_find(%zx) failed", (size_t)e->size);
|
||||
D("vm_map_find(%jx) failed", (uintmax_t)e->size);
|
||||
goto out_rel;
|
||||
}
|
||||
rv = vm_map_wire(kernel_map, e->kva, e->kva + e->size,
|
||||
@ -942,7 +942,7 @@ struct netmap_vm_handle_t {
|
||||
|
||||
static int
|
||||
netmap_dev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
|
||||
vm_ooffset_t foff, struct ucred *cred, u_short *color)
|
||||
vm_ooffset_t foff, struct ucred *cred, u_short *color)
|
||||
{
|
||||
struct netmap_vm_handle_t *vmh = handle;
|
||||
|
||||
@ -1519,7 +1519,7 @@ freebsd_netmap_poll(struct cdev *cdevi __unused, int events, struct thread *td)
|
||||
|
||||
static int
|
||||
freebsd_netmap_ioctl(struct cdev *dev __unused, u_long cmd, caddr_t data,
|
||||
int ffla __unused, struct thread *td)
|
||||
int ffla __unused, struct thread *td)
|
||||
{
|
||||
int error;
|
||||
struct netmap_priv_d *priv;
|
||||
|
@ -235,14 +235,14 @@ nm_os_get_mbuf(struct ifnet *ifp, int len)
|
||||
for ((_k)=*(_karr), (_i) = 0; (_i) < (_n); (_i)++, (_k) = (_karr)[(_i)])
|
||||
|
||||
#define for_each_tx_kring(_i, _k, _na) \
|
||||
for_each_kring_n(_i, _k, (_na)->tx_rings, (_na)->num_tx_rings)
|
||||
for_each_kring_n(_i, _k, (_na)->tx_rings, (_na)->num_tx_rings)
|
||||
#define for_each_tx_kring_h(_i, _k, _na) \
|
||||
for_each_kring_n(_i, _k, (_na)->tx_rings, (_na)->num_tx_rings + 1)
|
||||
for_each_kring_n(_i, _k, (_na)->tx_rings, (_na)->num_tx_rings + 1)
|
||||
|
||||
#define for_each_rx_kring(_i, _k, _na) \
|
||||
for_each_kring_n(_i, _k, (_na)->rx_rings, (_na)->num_rx_rings)
|
||||
for_each_kring_n(_i, _k, (_na)->rx_rings, (_na)->num_rx_rings)
|
||||
#define for_each_rx_kring_h(_i, _k, _na) \
|
||||
for_each_kring_n(_i, _k, (_na)->rx_rings, (_na)->num_rx_rings + 1)
|
||||
for_each_kring_n(_i, _k, (_na)->rx_rings, (_na)->num_rx_rings + 1)
|
||||
|
||||
|
||||
/* ======================== PERFORMANCE STATISTICS =========================== */
|
||||
@ -297,12 +297,12 @@ static struct rate_context rate_ctx;
|
||||
|
||||
void generic_rate(int txp, int txs, int txi, int rxp, int rxs, int rxi)
|
||||
{
|
||||
if (txp) rate_ctx.new.txpkt++;
|
||||
if (txs) rate_ctx.new.txsync++;
|
||||
if (txi) rate_ctx.new.txirq++;
|
||||
if (rxp) rate_ctx.new.rxpkt++;
|
||||
if (rxs) rate_ctx.new.rxsync++;
|
||||
if (rxi) rate_ctx.new.rxirq++;
|
||||
if (txp) rate_ctx.new.txpkt++;
|
||||
if (txs) rate_ctx.new.txsync++;
|
||||
if (txi) rate_ctx.new.txirq++;
|
||||
if (rxp) rate_ctx.new.rxpkt++;
|
||||
if (rxs) rate_ctx.new.rxsync++;
|
||||
if (rxi) rate_ctx.new.rxirq++;
|
||||
}
|
||||
|
||||
#else /* !RATE */
|
||||
@ -586,7 +586,7 @@ generic_mbuf_destructor(struct mbuf *m)
|
||||
* MBUF_TXQ(m) under our feet. If the match is not found
|
||||
* on 'r', we try to see if it belongs to some other ring.
|
||||
*/
|
||||
for (;;) {
|
||||
for (;;) {
|
||||
bool match = false;
|
||||
|
||||
kring = na->tx_rings[r];
|
||||
|
@ -450,7 +450,7 @@ struct netmap_kring {
|
||||
|
||||
/* the adapter the owns this kring */
|
||||
struct netmap_adapter *na;
|
||||
|
||||
|
||||
/* the adapter that wants to be notified when this kring has
|
||||
* new slots avaialable. This is usually the same as the above,
|
||||
* but wrappers may let it point to themselves
|
||||
@ -1528,7 +1528,7 @@ int netmap_get_monitor_na(struct nmreq_header *hdr, struct netmap_adapter **na,
|
||||
void netmap_monitor_stop(struct netmap_adapter *na);
|
||||
#else
|
||||
#define netmap_get_monitor_na(hdr, _2, _3, _4) \
|
||||
(((struct nmreq_register *)hdr->nr_body)->nr_flags & (NR_MONITOR_TX | NR_MONITOR_RX) ? EOPNOTSUPP : 0)
|
||||
(((struct nmreq_register *)(uintptr_t)hdr->nr_body)->nr_flags & (NR_MONITOR_TX | NR_MONITOR_RX) ? EOPNOTSUPP : 0)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_NET_NS
|
||||
@ -1823,7 +1823,7 @@ struct lut_entry {
|
||||
};
|
||||
#else /* linux & _WIN32 */
|
||||
/* dma-mapping in linux can assign a buffer a different address
|
||||
* depending on the device, so we need to have a separate
|
||||
* depending on the device, so we need to have a separate
|
||||
* physical-address look-up table for each na.
|
||||
* We can still share the vaddrs, though, therefore we split
|
||||
* the lut_entry structure.
|
||||
@ -2177,7 +2177,7 @@ nm_ptnetmap_host_on(struct netmap_adapter *na)
|
||||
}
|
||||
#else /* !WITH_PTNETMAP_HOST */
|
||||
#define netmap_get_pt_host_na(hdr, _2, _3, _4) \
|
||||
(((struct nmreq_register *)hdr->nr_body)->nr_flags & (NR_PTNETMAP_HOST) ? EOPNOTSUPP : 0)
|
||||
(((struct nmreq_register *)(uintptr_t)hdr->nr_body)->nr_flags & (NR_PTNETMAP_HOST) ? EOPNOTSUPP : 0)
|
||||
#define ptnetmap_ctl(_1, _2, _3) EINVAL
|
||||
#define nm_ptnetmap_host_on(_1) EINVAL
|
||||
#endif /* !WITH_PTNETMAP_HOST */
|
||||
|
@ -132,8 +132,8 @@ nmreq_from_legacy(struct nmreq *nmr, u_long ioctl_cmd)
|
||||
/* First prepare the request header. */
|
||||
hdr->nr_version = NETMAP_API; /* new API */
|
||||
strncpy(hdr->nr_name, nmr->nr_name, sizeof(nmr->nr_name));
|
||||
hdr->nr_options = (uint64_t)NULL;
|
||||
hdr->nr_body = (uint64_t)NULL;
|
||||
hdr->nr_options = (uintptr_t)NULL;
|
||||
hdr->nr_body = (uintptr_t)NULL;
|
||||
|
||||
switch (ioctl_cmd) {
|
||||
case NIOCREGIF: {
|
||||
@ -142,7 +142,7 @@ nmreq_from_legacy(struct nmreq *nmr, u_long ioctl_cmd)
|
||||
/* Regular NIOCREGIF operation. */
|
||||
struct nmreq_register *req = nm_os_malloc(sizeof(*req));
|
||||
if (!req) { goto oom; }
|
||||
hdr->nr_body = (uint64_t)req;
|
||||
hdr->nr_body = (uintptr_t)req;
|
||||
hdr->nr_reqtype = NETMAP_REQ_REGISTER;
|
||||
if (nmreq_register_from_legacy(nmr, hdr, req)) {
|
||||
goto oom;
|
||||
@ -152,7 +152,7 @@ nmreq_from_legacy(struct nmreq *nmr, u_long ioctl_cmd)
|
||||
case NETMAP_BDG_ATTACH: {
|
||||
struct nmreq_vale_attach *req = nm_os_malloc(sizeof(*req));
|
||||
if (!req) { goto oom; }
|
||||
hdr->nr_body = (uint64_t)req;
|
||||
hdr->nr_body = (uintptr_t)req;
|
||||
hdr->nr_reqtype = NETMAP_REQ_VALE_ATTACH;
|
||||
if (nmreq_register_from_legacy(nmr, hdr, &req->reg)) {
|
||||
goto oom;
|
||||
@ -167,14 +167,14 @@ nmreq_from_legacy(struct nmreq *nmr, u_long ioctl_cmd)
|
||||
}
|
||||
case NETMAP_BDG_DETACH: {
|
||||
hdr->nr_reqtype = NETMAP_REQ_VALE_DETACH;
|
||||
hdr->nr_body = (uint64_t)nm_os_malloc(sizeof(struct nmreq_vale_detach));
|
||||
hdr->nr_body = (uintptr_t)nm_os_malloc(sizeof(struct nmreq_vale_detach));
|
||||
break;
|
||||
}
|
||||
case NETMAP_BDG_VNET_HDR:
|
||||
case NETMAP_VNET_HDR_GET: {
|
||||
struct nmreq_port_hdr *req = nm_os_malloc(sizeof(*req));
|
||||
if (!req) { goto oom; }
|
||||
hdr->nr_body = (uint64_t)req;
|
||||
hdr->nr_body = (uintptr_t)req;
|
||||
hdr->nr_reqtype = (nmr->nr_cmd == NETMAP_BDG_VNET_HDR) ?
|
||||
NETMAP_REQ_PORT_HDR_SET : NETMAP_REQ_PORT_HDR_GET;
|
||||
req->nr_hdr_len = nmr->nr_arg1;
|
||||
@ -183,7 +183,7 @@ nmreq_from_legacy(struct nmreq *nmr, u_long ioctl_cmd)
|
||||
case NETMAP_BDG_NEWIF : {
|
||||
struct nmreq_vale_newif *req = nm_os_malloc(sizeof(*req));
|
||||
if (!req) { goto oom; }
|
||||
hdr->nr_body = (uint64_t)req;
|
||||
hdr->nr_body = (uintptr_t)req;
|
||||
hdr->nr_reqtype = NETMAP_REQ_VALE_NEWIF;
|
||||
req->nr_tx_slots = nmr->nr_tx_slots;
|
||||
req->nr_rx_slots = nmr->nr_rx_slots;
|
||||
@ -200,7 +200,7 @@ nmreq_from_legacy(struct nmreq *nmr, u_long ioctl_cmd)
|
||||
case NETMAP_BDG_POLLING_OFF: {
|
||||
struct nmreq_vale_polling *req = nm_os_malloc(sizeof(*req));
|
||||
if (!req) { goto oom; }
|
||||
hdr->nr_body = (uint64_t)req;
|
||||
hdr->nr_body = (uintptr_t)req;
|
||||
hdr->nr_reqtype = (nmr->nr_cmd == NETMAP_BDG_POLLING_ON) ?
|
||||
NETMAP_REQ_VALE_POLLING_ENABLE :
|
||||
NETMAP_REQ_VALE_POLLING_DISABLE;
|
||||
@ -232,7 +232,7 @@ nmreq_from_legacy(struct nmreq *nmr, u_long ioctl_cmd)
|
||||
if (nmr->nr_cmd == NETMAP_BDG_LIST) {
|
||||
struct nmreq_vale_list *req = nm_os_malloc(sizeof(*req));
|
||||
if (!req) { goto oom; }
|
||||
hdr->nr_body = (uint64_t)req;
|
||||
hdr->nr_body = (uintptr_t)req;
|
||||
hdr->nr_reqtype = NETMAP_REQ_VALE_LIST;
|
||||
req->nr_bridge_idx = nmr->nr_arg1;
|
||||
req->nr_port_idx = nmr->nr_arg2;
|
||||
@ -240,7 +240,7 @@ nmreq_from_legacy(struct nmreq *nmr, u_long ioctl_cmd)
|
||||
/* Regular NIOCGINFO. */
|
||||
struct nmreq_port_info_get *req = nm_os_malloc(sizeof(*req));
|
||||
if (!req) { goto oom; }
|
||||
hdr->nr_body = (uint64_t)req;
|
||||
hdr->nr_body = (uintptr_t)req;
|
||||
hdr->nr_reqtype = NETMAP_REQ_PORT_INFO_GET;
|
||||
req->nr_offset = nmr->nr_offset;
|
||||
req->nr_memsize = nmr->nr_memsize;
|
||||
@ -258,7 +258,7 @@ nmreq_from_legacy(struct nmreq *nmr, u_long ioctl_cmd)
|
||||
oom:
|
||||
if (hdr) {
|
||||
if (hdr->nr_body) {
|
||||
nm_os_free((void *)hdr->nr_body);
|
||||
nm_os_free((void *)(uintptr_t)hdr->nr_body);
|
||||
}
|
||||
nm_os_free(hdr);
|
||||
}
|
||||
@ -293,13 +293,13 @@ nmreq_to_legacy(struct nmreq_header *hdr, struct nmreq *nmr)
|
||||
switch (hdr->nr_reqtype) {
|
||||
case NETMAP_REQ_REGISTER: {
|
||||
struct nmreq_register *req =
|
||||
(struct nmreq_register *)hdr->nr_body;
|
||||
(struct nmreq_register *)(uintptr_t)hdr->nr_body;
|
||||
nmreq_register_to_legacy(req, nmr);
|
||||
break;
|
||||
}
|
||||
case NETMAP_REQ_PORT_INFO_GET: {
|
||||
struct nmreq_port_info_get *req =
|
||||
(struct nmreq_port_info_get *)hdr->nr_body;
|
||||
(struct nmreq_port_info_get *)(uintptr_t)hdr->nr_body;
|
||||
nmr->nr_offset = req->nr_offset;
|
||||
nmr->nr_memsize = req->nr_memsize;
|
||||
nmr->nr_tx_slots = req->nr_tx_slots;
|
||||
@ -311,7 +311,7 @@ nmreq_to_legacy(struct nmreq_header *hdr, struct nmreq *nmr)
|
||||
}
|
||||
case NETMAP_REQ_VALE_ATTACH: {
|
||||
struct nmreq_vale_attach *req =
|
||||
(struct nmreq_vale_attach *)hdr->nr_body;
|
||||
(struct nmreq_vale_attach *)(uintptr_t)hdr->nr_body;
|
||||
nmreq_register_to_legacy(&req->reg, nmr);
|
||||
break;
|
||||
}
|
||||
@ -320,7 +320,7 @@ nmreq_to_legacy(struct nmreq_header *hdr, struct nmreq *nmr)
|
||||
}
|
||||
case NETMAP_REQ_VALE_LIST: {
|
||||
struct nmreq_vale_list *req =
|
||||
(struct nmreq_vale_list *)hdr->nr_body;
|
||||
(struct nmreq_vale_list *)(uintptr_t)hdr->nr_body;
|
||||
strncpy(nmr->nr_name, hdr->nr_name, sizeof(nmr->nr_name));
|
||||
nmr->nr_arg1 = req->nr_bridge_idx;
|
||||
nmr->nr_arg2 = req->nr_port_idx;
|
||||
@ -329,13 +329,13 @@ nmreq_to_legacy(struct nmreq_header *hdr, struct nmreq *nmr)
|
||||
case NETMAP_REQ_PORT_HDR_SET:
|
||||
case NETMAP_REQ_PORT_HDR_GET: {
|
||||
struct nmreq_port_hdr *req =
|
||||
(struct nmreq_port_hdr *)hdr->nr_body;
|
||||
(struct nmreq_port_hdr *)(uintptr_t)hdr->nr_body;
|
||||
nmr->nr_arg1 = req->nr_hdr_len;
|
||||
break;
|
||||
}
|
||||
case NETMAP_REQ_VALE_NEWIF: {
|
||||
struct nmreq_vale_newif *req =
|
||||
(struct nmreq_vale_newif *)hdr->nr_body;
|
||||
(struct nmreq_vale_newif *)(uintptr_t)hdr->nr_body;
|
||||
nmr->nr_tx_slots = req->nr_tx_slots;
|
||||
nmr->nr_rx_slots = req->nr_rx_slots;
|
||||
nmr->nr_tx_rings = req->nr_tx_rings;
|
||||
@ -375,7 +375,7 @@ netmap_ioctl_legacy(struct netmap_priv_d *priv, u_long cmd, caddr_t data,
|
||||
nmreq_to_legacy(hdr, nmr);
|
||||
}
|
||||
if (hdr->nr_body) {
|
||||
nm_os_free((void *)hdr->nr_body);
|
||||
nm_os_free((void *)(uintptr_t)hdr->nr_body);
|
||||
}
|
||||
nm_os_free(hdr);
|
||||
break;
|
||||
|
@ -673,10 +673,10 @@ nm_mem_assign_id_locked(struct netmap_mem_d *nmd)
|
||||
static int
|
||||
nm_mem_assign_id(struct netmap_mem_d *nmd)
|
||||
{
|
||||
int ret;
|
||||
int ret;
|
||||
|
||||
NM_MTX_LOCK(nm_mem_list_lock);
|
||||
ret = nm_mem_assign_id_locked(nmd);
|
||||
ret = nm_mem_assign_id_locked(nmd);
|
||||
NM_MTX_UNLOCK(nm_mem_list_lock);
|
||||
|
||||
return ret;
|
||||
@ -1143,7 +1143,7 @@ netmap_extra_alloc(struct netmap_adapter *na, uint32_t *head, uint32_t n)
|
||||
static void
|
||||
netmap_extra_free(struct netmap_adapter *na, uint32_t head)
|
||||
{
|
||||
struct lut_entry *lut = na->na_lut.lut;
|
||||
struct lut_entry *lut = na->na_lut.lut;
|
||||
struct netmap_mem_d *nmd = na->nm_mem;
|
||||
struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
|
||||
uint32_t i, cur, *buf;
|
||||
@ -1516,10 +1516,11 @@ netmap_mem_unmap(struct netmap_obj_pool *p, struct netmap_adapter *na)
|
||||
return 0;
|
||||
|
||||
#if defined(__FreeBSD__)
|
||||
/* On FreeBSD mapping and unmapping is performed by the txsync
|
||||
* and rxsync routine, packet by packet. */
|
||||
(void)i;
|
||||
(void)lim;
|
||||
(void)lut;
|
||||
D("unsupported on FreeBSD");
|
||||
#elif defined(_WIN32)
|
||||
(void)i;
|
||||
(void)lim;
|
||||
@ -1551,10 +1552,11 @@ netmap_mem_map(struct netmap_obj_pool *p, struct netmap_adapter *na)
|
||||
return 0;
|
||||
|
||||
#if defined(__FreeBSD__)
|
||||
/* On FreeBSD mapping and unmapping is performed by the txsync
|
||||
* and rxsync routine, packet by packet. */
|
||||
(void)i;
|
||||
(void)lim;
|
||||
(void)lut;
|
||||
D("unsupported on FreeBSD");
|
||||
#elif defined(_WIN32)
|
||||
(void)i;
|
||||
(void)lim;
|
||||
@ -1572,7 +1574,7 @@ netmap_mem_map(struct netmap_obj_pool *p, struct netmap_adapter *na)
|
||||
if (lut->plut == NULL) {
|
||||
D("Failed to allocate physical lut for %s", na->name);
|
||||
return ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < lim; i += p->_clustentries) {
|
||||
lut->plut[i].paddr = 0;
|
||||
@ -1644,7 +1646,7 @@ netmap_mem_finalize_all(struct netmap_mem_d *nmd)
|
||||
* allocator for private memory
|
||||
*/
|
||||
static void *
|
||||
_netmap_mem_private_new(size_t size, struct netmap_obj_params *p,
|
||||
_netmap_mem_private_new(size_t size, struct netmap_obj_params *p,
|
||||
struct netmap_mem_ops *ops, int *perr)
|
||||
{
|
||||
struct netmap_mem_d *d = NULL;
|
||||
@ -1722,16 +1724,16 @@ netmap_mem_private_new(u_int txr, u_int txd, u_int rxr, u_int rxd,
|
||||
if (p[NETMAP_RING_POOL].size < v)
|
||||
p[NETMAP_RING_POOL].size = v;
|
||||
/* each pipe endpoint needs two tx rings (1 normal + 1 host, fake)
|
||||
* and two rx rings (again, 1 normal and 1 fake host)
|
||||
*/
|
||||
* and two rx rings (again, 1 normal and 1 fake host)
|
||||
*/
|
||||
v = txr + rxr + 8 * npipes;
|
||||
if (p[NETMAP_RING_POOL].num < v)
|
||||
p[NETMAP_RING_POOL].num = v;
|
||||
/* for each pipe we only need the buffers for the 4 "real" rings.
|
||||
* On the other end, the pipe ring dimension may be different from
|
||||
* the parent port ring dimension. As a compromise, we allocate twice the
|
||||
* space actually needed if the pipe rings were the same size as the parent rings
|
||||
*/
|
||||
* On the other end, the pipe ring dimension may be different from
|
||||
* the parent port ring dimension. As a compromise, we allocate twice the
|
||||
* space actually needed if the pipe rings were the same size as the parent rings
|
||||
*/
|
||||
v = (4 * npipes + rxr) * rxd + (4 * npipes + txr) * txd + 2 + extra_bufs;
|
||||
/* the +2 is for the tx and rx fake buffers (indices 0 and 1) */
|
||||
if (p[NETMAP_BUF_POOL].num < v)
|
||||
@ -1942,7 +1944,11 @@ netmap_mem2_rings_create(struct netmap_adapter *na)
|
||||
return 0;
|
||||
|
||||
cleanup:
|
||||
netmap_free_rings(na);
|
||||
/* we cannot actually cleanup here, since we don't own kring->users
|
||||
* and kring->nr_klags & NKR_NEEDRING. The caller must decrement
|
||||
* the first or zero-out the second, then call netmap_free_rings()
|
||||
* to do the cleanup
|
||||
*/
|
||||
|
||||
return ENOMEM;
|
||||
}
|
||||
@ -2155,7 +2161,7 @@ netmap_mem_ext_delete(struct netmap_mem_d *d)
|
||||
|
||||
for (i = 0; i < NETMAP_POOLS_NR; i++) {
|
||||
struct netmap_obj_pool *p = &d->pools[i];
|
||||
|
||||
|
||||
if (p->lut) {
|
||||
nm_free_lut(p->lut, p->objtotal);
|
||||
p->lut = NULL;
|
||||
@ -2215,7 +2221,7 @@ netmap_mem_ext_create(uint64_t usrptr, struct nmreq_pools_info *pi, int *perror)
|
||||
pi->nr_if_pool_objtotal, pi->nr_if_pool_objsize,
|
||||
pi->nr_ring_pool_objtotal, pi->nr_ring_pool_objsize,
|
||||
pi->nr_buf_pool_objtotal, pi->nr_buf_pool_objsize);
|
||||
|
||||
|
||||
os = nm_os_extmem_create(usrptr, pi, &error);
|
||||
if (os == NULL) {
|
||||
D("os extmem creation failed");
|
||||
@ -2238,7 +2244,7 @@ netmap_mem_ext_create(uint64_t usrptr, struct nmreq_pools_info *pi, int *perror)
|
||||
&error);
|
||||
if (nme == NULL)
|
||||
goto out_unmap;
|
||||
|
||||
|
||||
nr_pages = nm_os_extmem_nr_pages(os);
|
||||
|
||||
/* from now on pages will be released by nme destructor;
|
||||
@ -2262,7 +2268,7 @@ netmap_mem_ext_create(uint64_t usrptr, struct nmreq_pools_info *pi, int *perror)
|
||||
error = ENOMEM;
|
||||
goto out_delete;
|
||||
}
|
||||
|
||||
|
||||
p->bitmap_slots = (o->num + sizeof(uint32_t) - 1) / sizeof(uint32_t);
|
||||
p->invalid_bitmap = nm_os_malloc(sizeof(uint32_t) * p->bitmap_slots);
|
||||
if (p->invalid_bitmap == NULL) {
|
||||
@ -2515,11 +2521,11 @@ netmap_mem_pt_guest_finalize(struct netmap_mem_d *nmd)
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
/* Initialize the lut using the information contained in the
|
||||
/* Initialize the lut using the information contained in the
|
||||
* ptnetmap memory device. */
|
||||
bufsize = nm_os_pt_memdev_ioread(ptnmd->ptn_dev,
|
||||
bufsize = nm_os_pt_memdev_ioread(ptnmd->ptn_dev,
|
||||
PTNET_MDEV_IO_BUF_POOL_OBJSZ);
|
||||
nbuffers = nm_os_pt_memdev_ioread(ptnmd->ptn_dev,
|
||||
nbuffers = nm_os_pt_memdev_ioread(ptnmd->ptn_dev,
|
||||
PTNET_MDEV_IO_BUF_POOL_OBJNUM);
|
||||
|
||||
/* allocate the lut */
|
||||
@ -2740,7 +2746,7 @@ netmap_mem_pt_guest_create(nm_memid_t mem_id)
|
||||
ptnmd->host_mem_id = mem_id;
|
||||
ptnmd->pt_ifs = NULL;
|
||||
|
||||
/* Assign new id in the guest (We have the lock) */
|
||||
/* Assign new id in the guest (We have the lock) */
|
||||
err = nm_mem_assign_id_locked(&ptnmd->up);
|
||||
if (err)
|
||||
goto error;
|
||||
|
@ -139,7 +139,7 @@ nm_is_zmon(struct netmap_adapter *na)
|
||||
static int
|
||||
netmap_monitor_txsync(struct netmap_kring *kring, int flags)
|
||||
{
|
||||
RD(1, "%s %x", kring->name, flags);
|
||||
RD(1, "%s %x", kring->name, flags);
|
||||
return EIO;
|
||||
}
|
||||
|
||||
@ -152,10 +152,10 @@ netmap_monitor_txsync(struct netmap_kring *kring, int flags)
|
||||
static int
|
||||
netmap_monitor_rxsync(struct netmap_kring *kring, int flags)
|
||||
{
|
||||
ND("%s %x", kring->name, flags);
|
||||
ND("%s %x", kring->name, flags);
|
||||
kring->nr_hwcur = kring->rhead;
|
||||
mb();
|
||||
return 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* nm_krings_create callbacks for monitors.
|
||||
@ -198,7 +198,7 @@ nm_monitor_alloc(struct netmap_kring *kring, u_int n)
|
||||
return 0;
|
||||
|
||||
old_len = sizeof(struct netmap_kring *)*kring->max_monitors;
|
||||
len = sizeof(struct netmap_kring *) * n;
|
||||
len = sizeof(struct netmap_kring *) * n;
|
||||
nm = nm_os_realloc(kring->monitors, len, old_len);
|
||||
if (nm == NULL)
|
||||
return ENOMEM;
|
||||
@ -621,14 +621,14 @@ netmap_zmon_parent_sync(struct netmap_kring *kring, int flags, enum txrx tx)
|
||||
static int
|
||||
netmap_zmon_parent_txsync(struct netmap_kring *kring, int flags)
|
||||
{
|
||||
return netmap_zmon_parent_sync(kring, flags, NR_TX);
|
||||
return netmap_zmon_parent_sync(kring, flags, NR_TX);
|
||||
}
|
||||
|
||||
/* callback used to replace the nm_sync callback in the monitored rx rings */
|
||||
static int
|
||||
netmap_zmon_parent_rxsync(struct netmap_kring *kring, int flags)
|
||||
{
|
||||
return netmap_zmon_parent_sync(kring, flags, NR_RX);
|
||||
return netmap_zmon_parent_sync(kring, flags, NR_RX);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -802,7 +802,7 @@ netmap_monitor_parent_notify(struct netmap_kring *kring, int flags)
|
||||
notify = kring->mon_notify;
|
||||
}
|
||||
nm_kr_put(kring);
|
||||
return notify(kring, flags);
|
||||
return notify(kring, flags);
|
||||
}
|
||||
|
||||
|
||||
@ -829,7 +829,7 @@ int
|
||||
netmap_get_monitor_na(struct nmreq_header *hdr, struct netmap_adapter **na,
|
||||
struct netmap_mem_d *nmd, int create)
|
||||
{
|
||||
struct nmreq_register *req = (struct nmreq_register *)hdr->nr_body;
|
||||
struct nmreq_register *req = (struct nmreq_register *)(uintptr_t)hdr->nr_body;
|
||||
struct nmreq_register preq;
|
||||
struct netmap_adapter *pna; /* parent adapter */
|
||||
struct netmap_monitor_adapter *mna;
|
||||
@ -856,9 +856,9 @@ netmap_get_monitor_na(struct nmreq_header *hdr, struct netmap_adapter **na,
|
||||
*/
|
||||
memcpy(&preq, req, sizeof(preq));
|
||||
preq.nr_flags &= ~(NR_MONITOR_TX | NR_MONITOR_RX | NR_ZCOPY_MON);
|
||||
hdr->nr_body = (uint64_t)&preq;
|
||||
hdr->nr_body = (uintptr_t)&preq;
|
||||
error = netmap_get_na(hdr, &pna, &ifp, nmd, create);
|
||||
hdr->nr_body = (uint64_t)req;
|
||||
hdr->nr_body = (uintptr_t)req;
|
||||
if (error) {
|
||||
D("parent lookup failed: %d", error);
|
||||
return error;
|
||||
|
@ -83,7 +83,7 @@ static int netmap_default_pipes = 0; /* ignored, kept for compatibility */
|
||||
SYSBEGIN(vars_pipes);
|
||||
SYSCTL_DECL(_dev_netmap);
|
||||
SYSCTL_INT(_dev_netmap, OID_AUTO, default_pipes, CTLFLAG_RW,
|
||||
&netmap_default_pipes, 0, "For compatibility only");
|
||||
&netmap_default_pipes, 0, "For compatibility only");
|
||||
SYSEND;
|
||||
|
||||
/* allocate the pipe array in the parent adapter */
|
||||
@ -101,7 +101,7 @@ nm_pipe_alloc(struct netmap_adapter *na, u_int npipes)
|
||||
return EINVAL;
|
||||
|
||||
old_len = sizeof(struct netmap_pipe_adapter *)*na->na_max_pipes;
|
||||
len = sizeof(struct netmap_pipe_adapter *) * npipes;
|
||||
len = sizeof(struct netmap_pipe_adapter *) * npipes;
|
||||
npa = nm_os_realloc(na->na_pipes, len, old_len);
|
||||
if (npa == NULL)
|
||||
return ENOMEM;
|
||||
@ -184,28 +184,28 @@ netmap_pipe_remove(struct netmap_adapter *parent, struct netmap_pipe_adapter *na
|
||||
int
|
||||
netmap_pipe_txsync(struct netmap_kring *txkring, int flags)
|
||||
{
|
||||
struct netmap_kring *rxkring = txkring->pipe;
|
||||
u_int k, lim = txkring->nkr_num_slots - 1;
|
||||
int m; /* slots to transfer */
|
||||
struct netmap_kring *rxkring = txkring->pipe;
|
||||
u_int k, lim = txkring->nkr_num_slots - 1;
|
||||
int m; /* slots to transfer */
|
||||
struct netmap_ring *txring = txkring->ring, *rxring = rxkring->ring;
|
||||
|
||||
ND("%p: %s %x -> %s", txkring, txkring->name, flags, rxkring->name);
|
||||
ND(20, "TX before: hwcur %d hwtail %d cur %d head %d tail %d",
|
||||
ND("%p: %s %x -> %s", txkring, txkring->name, flags, rxkring->name);
|
||||
ND(20, "TX before: hwcur %d hwtail %d cur %d head %d tail %d",
|
||||
txkring->nr_hwcur, txkring->nr_hwtail,
|
||||
txkring->rcur, txkring->rhead, txkring->rtail);
|
||||
txkring->rcur, txkring->rhead, txkring->rtail);
|
||||
|
||||
m = txkring->rhead - txkring->nr_hwcur; /* new slots */
|
||||
if (m < 0)
|
||||
m += txkring->nkr_num_slots;
|
||||
m = txkring->rhead - txkring->nr_hwcur; /* new slots */
|
||||
if (m < 0)
|
||||
m += txkring->nkr_num_slots;
|
||||
|
||||
if (m == 0) {
|
||||
/* nothing to send */
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (k = txkring->nr_hwcur; m; m--, k = nm_next(k, lim)) {
|
||||
struct netmap_slot *rs = &rxring->slot[k];
|
||||
struct netmap_slot *ts = &txring->slot[k];
|
||||
for (k = txkring->nr_hwcur; m; m--, k = nm_next(k, lim)) {
|
||||
struct netmap_slot *rs = &rxring->slot[k];
|
||||
struct netmap_slot *ts = &txring->slot[k];
|
||||
|
||||
rs->len = ts->len;
|
||||
rs->ptr = ts->ptr;
|
||||
@ -215,17 +215,17 @@ netmap_pipe_txsync(struct netmap_kring *txkring, int flags)
|
||||
rs->flags |= NS_BUF_CHANGED;
|
||||
ts->flags &= ~NS_BUF_CHANGED;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mb(); /* make sure the slots are updated before publishing them */
|
||||
rxkring->nr_hwtail = k;
|
||||
txkring->nr_hwcur = k;
|
||||
mb(); /* make sure the slots are updated before publishing them */
|
||||
rxkring->nr_hwtail = k;
|
||||
txkring->nr_hwcur = k;
|
||||
|
||||
ND(20, "TX after : hwcur %d hwtail %d cur %d head %d tail %d k %d",
|
||||
ND(20, "TX after : hwcur %d hwtail %d cur %d head %d tail %d k %d",
|
||||
txkring->nr_hwcur, txkring->nr_hwtail,
|
||||
txkring->rcur, txkring->rhead, txkring->rtail, k);
|
||||
txkring->rcur, txkring->rhead, txkring->rtail, k);
|
||||
|
||||
rxkring->nm_notify(rxkring, 0);
|
||||
rxkring->nm_notify(rxkring, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -233,45 +233,45 @@ netmap_pipe_txsync(struct netmap_kring *txkring, int flags)
|
||||
int
|
||||
netmap_pipe_rxsync(struct netmap_kring *rxkring, int flags)
|
||||
{
|
||||
struct netmap_kring *txkring = rxkring->pipe;
|
||||
u_int k, lim = rxkring->nkr_num_slots - 1;
|
||||
int m; /* slots to release */
|
||||
struct netmap_kring *txkring = rxkring->pipe;
|
||||
u_int k, lim = rxkring->nkr_num_slots - 1;
|
||||
int m; /* slots to release */
|
||||
struct netmap_ring *txring = txkring->ring, *rxring = rxkring->ring;
|
||||
|
||||
ND("%p: %s %x -> %s", txkring, txkring->name, flags, rxkring->name);
|
||||
ND(20, "RX before: hwcur %d hwtail %d cur %d head %d tail %d",
|
||||
ND("%p: %s %x -> %s", txkring, txkring->name, flags, rxkring->name);
|
||||
ND(20, "RX before: hwcur %d hwtail %d cur %d head %d tail %d",
|
||||
rxkring->nr_hwcur, rxkring->nr_hwtail,
|
||||
rxkring->rcur, rxkring->rhead, rxkring->rtail);
|
||||
rxkring->rcur, rxkring->rhead, rxkring->rtail);
|
||||
|
||||
m = rxkring->rhead - rxkring->nr_hwcur; /* released slots */
|
||||
if (m < 0)
|
||||
m += rxkring->nkr_num_slots;
|
||||
m = rxkring->rhead - rxkring->nr_hwcur; /* released slots */
|
||||
if (m < 0)
|
||||
m += rxkring->nkr_num_slots;
|
||||
|
||||
if (m == 0) {
|
||||
/* nothing to release */
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (k = rxkring->nr_hwcur; m; m--, k = nm_next(k, lim)) {
|
||||
struct netmap_slot *rs = &rxring->slot[k];
|
||||
struct netmap_slot *ts = &txring->slot[k];
|
||||
for (k = rxkring->nr_hwcur; m; m--, k = nm_next(k, lim)) {
|
||||
struct netmap_slot *rs = &rxring->slot[k];
|
||||
struct netmap_slot *ts = &txring->slot[k];
|
||||
|
||||
if (rs->flags & NS_BUF_CHANGED) {
|
||||
/* copy the slot and report the buffer change */
|
||||
*ts = *rs;
|
||||
rs->flags &= ~NS_BUF_CHANGED;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mb(); /* make sure the slots are updated before publishing them */
|
||||
txkring->nr_hwtail = nm_prev(k, lim);
|
||||
rxkring->nr_hwcur = k;
|
||||
mb(); /* make sure the slots are updated before publishing them */
|
||||
txkring->nr_hwtail = nm_prev(k, lim);
|
||||
rxkring->nr_hwcur = k;
|
||||
|
||||
ND(20, "RX after : hwcur %d hwtail %d cur %d head %d tail %d k %d",
|
||||
ND(20, "RX after : hwcur %d hwtail %d cur %d head %d tail %d k %d",
|
||||
rxkring->nr_hwcur, rxkring->nr_hwtail,
|
||||
rxkring->rcur, rxkring->rhead, rxkring->rtail, k);
|
||||
rxkring->rcur, rxkring->rhead, rxkring->rtail, k);
|
||||
|
||||
txkring->nm_notify(txkring, 0);
|
||||
txkring->nm_notify(txkring, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -577,10 +577,10 @@ netmap_pipe_krings_delete(struct netmap_adapter *na)
|
||||
|
||||
netmap_mem_rings_delete(na);
|
||||
netmap_krings_delete(na); /* also zeroes tx_rings etc. */
|
||||
|
||||
|
||||
if (ona->tx_rings == NULL) {
|
||||
/* already deleted, we must be on an
|
||||
* cleanup-after-error path */
|
||||
* cleanup-after-error path */
|
||||
return;
|
||||
}
|
||||
netmap_mem_rings_delete(ona);
|
||||
@ -611,7 +611,7 @@ int
|
||||
netmap_get_pipe_na(struct nmreq_header *hdr, struct netmap_adapter **na,
|
||||
struct netmap_mem_d *nmd, int create)
|
||||
{
|
||||
struct nmreq_register *req = (struct nmreq_register *)hdr->nr_body;
|
||||
struct nmreq_register *req = (struct nmreq_register *)(uintptr_t)hdr->nr_body;
|
||||
struct netmap_adapter *pna; /* parent adapter */
|
||||
struct netmap_pipe_adapter *mna, *sna, *reqna;
|
||||
struct ifnet *ifp = NULL;
|
||||
@ -696,8 +696,8 @@ netmap_get_pipe_na(struct nmreq_header *hdr, struct netmap_adapter **na,
|
||||
reqna = mna->peer;
|
||||
}
|
||||
/* the pipe we have found already holds a ref to the parent,
|
||||
* so we need to drop the one we got from netmap_get_na()
|
||||
*/
|
||||
* so we need to drop the one we got from netmap_get_na()
|
||||
*/
|
||||
netmap_unget_na(pna, ifp);
|
||||
goto found;
|
||||
}
|
||||
@ -707,9 +707,9 @@ netmap_get_pipe_na(struct nmreq_header *hdr, struct netmap_adapter **na,
|
||||
goto put_out;
|
||||
}
|
||||
/* we create both master and slave.
|
||||
* The endpoint we were asked for holds a reference to
|
||||
* the other one.
|
||||
*/
|
||||
* The endpoint we were asked for holds a reference to
|
||||
* the other one.
|
||||
*/
|
||||
mna = nm_os_malloc(sizeof(*mna));
|
||||
if (mna == NULL) {
|
||||
error = ENOMEM;
|
||||
@ -774,8 +774,8 @@ netmap_get_pipe_na(struct nmreq_header *hdr, struct netmap_adapter **na,
|
||||
sna->peer = mna;
|
||||
|
||||
/* we already have a reference to the parent, but we
|
||||
* need another one for the other endpoint we created
|
||||
*/
|
||||
* need another one for the other endpoint we created
|
||||
*/
|
||||
netmap_adapter_get(pna);
|
||||
/* likewise for the ifp, if any */
|
||||
if (ifp)
|
||||
@ -799,8 +799,8 @@ netmap_get_pipe_na(struct nmreq_header *hdr, struct netmap_adapter **na,
|
||||
netmap_adapter_get(*na);
|
||||
|
||||
/* keep the reference to the parent.
|
||||
* It will be released by the req destructor
|
||||
*/
|
||||
* It will be released by the req destructor
|
||||
*/
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -1160,7 +1160,7 @@ int
|
||||
netmap_get_pt_host_na(struct nmreq_header *hdr, struct netmap_adapter **na,
|
||||
struct netmap_mem_d *nmd, int create)
|
||||
{
|
||||
struct nmreq_register *req = (struct nmreq_register *)hdr->nr_body;
|
||||
struct nmreq_register *req = (struct nmreq_register *)(uintptr_t)hdr->nr_body;
|
||||
struct nmreq_register preq;
|
||||
struct netmap_adapter *parent; /* target adapter */
|
||||
struct netmap_pt_host_adapter *pth_na;
|
||||
@ -1186,9 +1186,9 @@ netmap_get_pt_host_na(struct nmreq_header *hdr, struct netmap_adapter **na,
|
||||
*/
|
||||
memcpy(&preq, req, sizeof(preq));
|
||||
preq.nr_flags &= ~(NR_PTNETMAP_HOST);
|
||||
hdr->nr_body = (uint64_t)&preq;
|
||||
hdr->nr_body = (uintptr_t)&preq;
|
||||
error = netmap_get_na(hdr, &parent, &ifp, nmd, create);
|
||||
hdr->nr_body = (uint64_t)req;
|
||||
hdr->nr_body = (uintptr_t)req;
|
||||
if (error) {
|
||||
D("parent lookup failed: %d", error);
|
||||
goto put_out_noputparent;
|
||||
|
@ -163,7 +163,7 @@ static int bridge_batch = NM_BDG_BATCH; /* bridge batch size */
|
||||
SYSBEGIN(vars_vale);
|
||||
SYSCTL_DECL(_dev_netmap);
|
||||
SYSCTL_INT(_dev_netmap, OID_AUTO, bridge_batch, CTLFLAG_RW, &bridge_batch, 0,
|
||||
"Max batch size to be used in the bridge");
|
||||
"Max batch size to be used in the bridge");
|
||||
SYSEND;
|
||||
|
||||
static int netmap_vp_create(struct nmreq_header *hdr, struct ifnet *,
|
||||
@ -231,7 +231,7 @@ struct nm_bridge {
|
||||
* The function is set by netmap_bdg_regops().
|
||||
*/
|
||||
struct netmap_bdg_ops *bdg_ops;
|
||||
|
||||
|
||||
/*
|
||||
* Contains the data structure used by the bdg_ops.lookup function.
|
||||
* By default points to *ht which is allocated on attach and used by the default lookup
|
||||
@ -285,22 +285,22 @@ static struct nm_bridge *nm_bridges;
|
||||
static inline void
|
||||
pkt_copy(void *_src, void *_dst, int l)
|
||||
{
|
||||
uint64_t *src = _src;
|
||||
uint64_t *dst = _dst;
|
||||
if (unlikely(l >= 1024)) {
|
||||
memcpy(dst, src, l);
|
||||
return;
|
||||
}
|
||||
for (; likely(l > 0); l-=64) {
|
||||
*dst++ = *src++;
|
||||
*dst++ = *src++;
|
||||
*dst++ = *src++;
|
||||
*dst++ = *src++;
|
||||
*dst++ = *src++;
|
||||
*dst++ = *src++;
|
||||
*dst++ = *src++;
|
||||
*dst++ = *src++;
|
||||
}
|
||||
uint64_t *src = _src;
|
||||
uint64_t *dst = _dst;
|
||||
if (unlikely(l >= 1024)) {
|
||||
memcpy(dst, src, l);
|
||||
return;
|
||||
}
|
||||
for (; likely(l > 0); l-=64) {
|
||||
*dst++ = *src++;
|
||||
*dst++ = *src++;
|
||||
*dst++ = *src++;
|
||||
*dst++ = *src++;
|
||||
*dst++ = *src++;
|
||||
*dst++ = *src++;
|
||||
*dst++ = *src++;
|
||||
*dst++ = *src++;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -542,7 +542,7 @@ netmap_bdg_detach_common(struct nm_bridge *b, int hw, int sw)
|
||||
netmap_bdg_free(b);
|
||||
}
|
||||
|
||||
static inline void *
|
||||
static inline void *
|
||||
nm_bdg_get_auth_token(struct nm_bridge *b)
|
||||
{
|
||||
return b->ht;
|
||||
@ -679,7 +679,7 @@ int
|
||||
nm_vi_create(struct nmreq_header *hdr)
|
||||
{
|
||||
struct nmreq_vale_newif *req =
|
||||
(struct nmreq_vale_newif *)hdr->nr_body;
|
||||
(struct nmreq_vale_newif *)(uintptr_t)hdr->nr_body;
|
||||
int error = 0;
|
||||
/* Build a nmreq_register out of the nmreq_vale_newif,
|
||||
* so that we can call netmap_get_bdg_na(). */
|
||||
@ -691,11 +691,11 @@ nm_vi_create(struct nmreq_header *hdr)
|
||||
regreq.nr_rx_rings = req->nr_rx_rings;
|
||||
regreq.nr_mem_id = req->nr_mem_id;
|
||||
hdr->nr_reqtype = NETMAP_REQ_REGISTER;
|
||||
hdr->nr_body = (uint64_t)®req;
|
||||
hdr->nr_body = (uintptr_t)®req;
|
||||
error = netmap_vi_create(hdr, 0 /* no autodelete */);
|
||||
hdr->nr_reqtype = NETMAP_REQ_VALE_NEWIF;
|
||||
hdr->nr_body = (uint64_t)req;
|
||||
/* Write back to the original struct. */
|
||||
hdr->nr_body = (uintptr_t)req;
|
||||
/* Write back to the original struct. */
|
||||
req->nr_tx_slots = regreq.nr_tx_slots;
|
||||
req->nr_rx_slots = regreq.nr_rx_slots;
|
||||
req->nr_tx_rings = regreq.nr_tx_rings;
|
||||
@ -772,7 +772,7 @@ nm_update_info(struct nmreq_register *req, struct netmap_adapter *na)
|
||||
int
|
||||
netmap_vi_create(struct nmreq_header *hdr, int autodelete)
|
||||
{
|
||||
struct nmreq_register *req = (struct nmreq_register *)hdr->nr_body;
|
||||
struct nmreq_register *req = (struct nmreq_register *)(uintptr_t)hdr->nr_body;
|
||||
struct ifnet *ifp;
|
||||
struct netmap_vp_adapter *vpna;
|
||||
struct netmap_mem_d *nmd = NULL;
|
||||
@ -883,7 +883,7 @@ netmap_get_bdg_na(struct nmreq_header *hdr, struct netmap_adapter **na,
|
||||
|
||||
b = nm_find_bridge(nr_name, create);
|
||||
if (b == NULL) {
|
||||
D("no bridges available for '%s'", nr_name);
|
||||
ND("no bridges available for '%s'", nr_name);
|
||||
return (create ? ENOMEM : ENXIO);
|
||||
}
|
||||
if (strlen(nr_name) < b->bdg_namelen) /* impossible */
|
||||
@ -977,7 +977,7 @@ netmap_get_bdg_na(struct nmreq_header *hdr, struct netmap_adapter **na,
|
||||
if (hdr->nr_reqtype == NETMAP_REQ_VALE_ATTACH) {
|
||||
/* Check if we need to skip the host rings. */
|
||||
struct nmreq_vale_attach *areq =
|
||||
(struct nmreq_vale_attach *)hdr->nr_body;
|
||||
(struct nmreq_vale_attach *)(uintptr_t)hdr->nr_body;
|
||||
if (areq->reg.nr_mode != NR_REG_NIC_SW) {
|
||||
hostna = NULL;
|
||||
}
|
||||
@ -1017,7 +1017,7 @@ int
|
||||
nm_bdg_ctl_attach(struct nmreq_header *hdr, void *auth_token)
|
||||
{
|
||||
struct nmreq_vale_attach *req =
|
||||
(struct nmreq_vale_attach *)hdr->nr_body;
|
||||
(struct nmreq_vale_attach *)(uintptr_t)hdr->nr_body;
|
||||
struct netmap_vp_adapter * vpna;
|
||||
struct netmap_adapter *na;
|
||||
struct netmap_mem_d *nmd = NULL;
|
||||
@ -1094,7 +1094,7 @@ nm_is_bwrap(struct netmap_adapter *na)
|
||||
int
|
||||
nm_bdg_ctl_detach(struct nmreq_header *hdr, void *auth_token)
|
||||
{
|
||||
struct nmreq_vale_detach *nmreq_det = (void *)hdr->nr_body;
|
||||
struct nmreq_vale_detach *nmreq_det = (void *)(uintptr_t)hdr->nr_body;
|
||||
struct netmap_vp_adapter *vpna;
|
||||
struct netmap_adapter *na;
|
||||
struct nm_bridge *b = NULL;
|
||||
@ -1413,7 +1413,7 @@ int
|
||||
nm_bdg_polling(struct nmreq_header *hdr)
|
||||
{
|
||||
struct nmreq_vale_polling *req =
|
||||
(struct nmreq_vale_polling *)hdr->nr_body;
|
||||
(struct nmreq_vale_polling *)(uintptr_t)hdr->nr_body;
|
||||
struct netmap_adapter *na = NULL;
|
||||
int error = 0;
|
||||
|
||||
@ -1446,7 +1446,7 @@ int
|
||||
netmap_bdg_list(struct nmreq_header *hdr)
|
||||
{
|
||||
struct nmreq_vale_list *req =
|
||||
(struct nmreq_vale_list *)hdr->nr_body;
|
||||
(struct nmreq_vale_list *)(uintptr_t)hdr->nr_body;
|
||||
int namelen = strlen(hdr->nr_name);
|
||||
struct nm_bridge *b, *bridges;
|
||||
struct netmap_vp_adapter *vpna;
|
||||
@ -1528,7 +1528,7 @@ netmap_bdg_list(struct nmreq_header *hdr)
|
||||
*
|
||||
* Called without NMG_LOCK.
|
||||
*/
|
||||
|
||||
|
||||
int
|
||||
netmap_bdg_regops(const char *name, struct netmap_bdg_ops *bdg_ops, void *private_data, void *auth_token)
|
||||
{
|
||||
@ -1766,33 +1766,33 @@ nm_bdg_preflush(struct netmap_kring *kring, u_int end)
|
||||
*/
|
||||
#define mix(a, b, c) \
|
||||
do { \
|
||||
a -= b; a -= c; a ^= (c >> 13); \
|
||||
b -= c; b -= a; b ^= (a << 8); \
|
||||
c -= a; c -= b; c ^= (b >> 13); \
|
||||
a -= b; a -= c; a ^= (c >> 12); \
|
||||
b -= c; b -= a; b ^= (a << 16); \
|
||||
c -= a; c -= b; c ^= (b >> 5); \
|
||||
a -= b; a -= c; a ^= (c >> 3); \
|
||||
b -= c; b -= a; b ^= (a << 10); \
|
||||
c -= a; c -= b; c ^= (b >> 15); \
|
||||
a -= b; a -= c; a ^= (c >> 13); \
|
||||
b -= c; b -= a; b ^= (a << 8); \
|
||||
c -= a; c -= b; c ^= (b >> 13); \
|
||||
a -= b; a -= c; a ^= (c >> 12); \
|
||||
b -= c; b -= a; b ^= (a << 16); \
|
||||
c -= a; c -= b; c ^= (b >> 5); \
|
||||
a -= b; a -= c; a ^= (c >> 3); \
|
||||
b -= c; b -= a; b ^= (a << 10); \
|
||||
c -= a; c -= b; c ^= (b >> 15); \
|
||||
} while (/*CONSTCOND*/0)
|
||||
|
||||
|
||||
static __inline uint32_t
|
||||
nm_bridge_rthash(const uint8_t *addr)
|
||||
{
|
||||
uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = 0; // hask key
|
||||
uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = 0; // hask key
|
||||
|
||||
b += addr[5] << 8;
|
||||
b += addr[4];
|
||||
a += addr[3] << 24;
|
||||
a += addr[2] << 16;
|
||||
a += addr[1] << 8;
|
||||
a += addr[0];
|
||||
b += addr[5] << 8;
|
||||
b += addr[4];
|
||||
a += addr[3] << 24;
|
||||
a += addr[2] << 16;
|
||||
a += addr[1] << 8;
|
||||
a += addr[0];
|
||||
|
||||
mix(a, b, c);
|
||||
mix(a, b, c);
|
||||
#define BRIDGE_RTHASH_MASK (NM_BDG_HASH-1)
|
||||
return (c & BRIDGE_RTHASH_MASK);
|
||||
return (c & BRIDGE_RTHASH_MASK);
|
||||
}
|
||||
|
||||
#undef mix
|
||||
@ -2115,10 +2115,10 @@ nm_bdg_flush(struct nm_bdg_fwd *ft, u_int n, struct netmap_vp_adapter *na,
|
||||
needed = d->bq_len + brddst->bq_len;
|
||||
|
||||
if (unlikely(dst_na->up.virt_hdr_len != na->up.virt_hdr_len)) {
|
||||
if (netmap_verbose) {
|
||||
RD(3, "virt_hdr_mismatch, src %d dst %d", na->up.virt_hdr_len,
|
||||
dst_na->up.virt_hdr_len);
|
||||
}
|
||||
if (netmap_verbose) {
|
||||
RD(3, "virt_hdr_mismatch, src %d dst %d", na->up.virt_hdr_len,
|
||||
dst_na->up.virt_hdr_len);
|
||||
}
|
||||
/* There is a virtio-net header/offloadings mismatch between
|
||||
* source and destination. The slower mismatch datapath will
|
||||
* be used to cope with all the mismatches.
|
||||
@ -2448,7 +2448,7 @@ static int
|
||||
netmap_vp_create(struct nmreq_header *hdr, struct ifnet *ifp,
|
||||
struct netmap_mem_d *nmd, struct netmap_vp_adapter **ret)
|
||||
{
|
||||
struct nmreq_register *req = (struct nmreq_register *)hdr->nr_body;
|
||||
struct nmreq_register *req = (struct nmreq_register *)(uintptr_t)hdr->nr_body;
|
||||
struct netmap_vp_adapter *vpna;
|
||||
struct netmap_adapter *na;
|
||||
int error = 0;
|
||||
@ -2497,7 +2497,7 @@ netmap_vp_create(struct nmreq_header *hdr, struct ifnet *ifp,
|
||||
vpna->last_smac = ~0llu;
|
||||
/*if (vpna->mfs > netmap_buf_size) TODO netmap_buf_size is zero??
|
||||
vpna->mfs = netmap_buf_size; */
|
||||
if (netmap_verbose)
|
||||
if (netmap_verbose)
|
||||
D("max frame size %u", vpna->mfs);
|
||||
|
||||
na->na_flags |= NAF_BDG_MAYSLEEP;
|
||||
@ -2784,8 +2784,14 @@ netmap_bwrap_config(struct netmap_adapter *na, struct nm_config_info *info)
|
||||
struct netmap_bwrap_adapter *bna =
|
||||
(struct netmap_bwrap_adapter *)na;
|
||||
struct netmap_adapter *hwna = bna->hwna;
|
||||
int error;
|
||||
|
||||
/* forward the request */
|
||||
/* Forward the request to the hwna. It may happen that nobody
|
||||
* registered hwna yet, so netmap_mem_get_lut() may have not
|
||||
* been called yet. */
|
||||
error = netmap_mem_get_lut(hwna->nm_mem, &hwna->na_lut);
|
||||
if (error)
|
||||
return error;
|
||||
netmap_update_config(hwna);
|
||||
/* swap the results and propagate */
|
||||
info->num_tx_rings = hwna->num_rx_rings;
|
||||
@ -2823,11 +2829,11 @@ netmap_bwrap_krings_create(struct netmap_adapter *na)
|
||||
}
|
||||
|
||||
/* increment the usage counter for all the hwna krings */
|
||||
for_rx_tx(t) {
|
||||
for (i = 0; i < nma_get_nrings(hwna, t) + 1; i++) {
|
||||
for_rx_tx(t) {
|
||||
for (i = 0; i < nma_get_nrings(hwna, t) + 1; i++) {
|
||||
NMR(hwna, t)[i]->users++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* now create the actual rings */
|
||||
error = netmap_mem_rings_create(hwna);
|
||||
@ -2839,13 +2845,13 @@ netmap_bwrap_krings_create(struct netmap_adapter *na)
|
||||
* The original number of rings comes from hwna,
|
||||
* rx rings on one side equals tx rings on the other.
|
||||
*/
|
||||
for_rx_tx(t) {
|
||||
enum txrx r = nm_txrx_swap(t); /* swap NR_TX <-> NR_RX */
|
||||
for (i = 0; i < nma_get_nrings(hwna, r) + 1; i++) {
|
||||
NMR(na, t)[i]->nkr_num_slots = NMR(hwna, r)[i]->nkr_num_slots;
|
||||
NMR(na, t)[i]->ring = NMR(hwna, r)[i]->ring;
|
||||
}
|
||||
}
|
||||
for_rx_tx(t) {
|
||||
enum txrx r = nm_txrx_swap(t); /* swap NR_TX <-> NR_RX */
|
||||
for (i = 0; i < nma_get_nrings(hwna, r) + 1; i++) {
|
||||
NMR(na, t)[i]->nkr_num_slots = NMR(hwna, r)[i]->nkr_num_slots;
|
||||
NMR(na, t)[i]->ring = NMR(hwna, r)[i]->ring;
|
||||
}
|
||||
}
|
||||
|
||||
if (na->na_flags & NAF_HOST_RINGS) {
|
||||
/* the hostna rings are the host rings of the bwrap.
|
||||
@ -2861,9 +2867,9 @@ netmap_bwrap_krings_create(struct netmap_adapter *na)
|
||||
return 0;
|
||||
|
||||
err_dec_users:
|
||||
for_rx_tx(t) {
|
||||
for_rx_tx(t) {
|
||||
NMR(hwna, t)[i]->users--;
|
||||
}
|
||||
}
|
||||
hwna->nm_krings_delete(hwna);
|
||||
err_del_vp_rings:
|
||||
netmap_vp_krings_delete(na);
|
||||
@ -2884,11 +2890,11 @@ netmap_bwrap_krings_delete(struct netmap_adapter *na)
|
||||
ND("%s", na->name);
|
||||
|
||||
/* decrement the usage counter for all the hwna krings */
|
||||
for_rx_tx(t) {
|
||||
for (i = 0; i < nma_get_nrings(hwna, t) + 1; i++) {
|
||||
for_rx_tx(t) {
|
||||
for (i = 0; i < nma_get_nrings(hwna, t) + 1; i++) {
|
||||
NMR(hwna, t)[i]->users--;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* delete any netmap rings that are no longer needed */
|
||||
netmap_mem_rings_delete(hwna);
|
||||
@ -2969,7 +2975,7 @@ netmap_bwrap_bdg_ctl(struct nmreq_header *hdr, struct netmap_adapter *na)
|
||||
|
||||
if (hdr->nr_reqtype == NETMAP_REQ_VALE_ATTACH) {
|
||||
struct nmreq_vale_attach *req =
|
||||
(struct nmreq_vale_attach *)hdr->nr_body;
|
||||
(struct nmreq_vale_attach *)(uintptr_t)hdr->nr_body;
|
||||
if (req->reg.nr_ringid != 0 ||
|
||||
(req->reg.nr_mode != NR_REG_ALL_NIC &&
|
||||
req->reg.nr_mode != NR_REG_NIC_SW)) {
|
||||
@ -3055,6 +3061,7 @@ netmap_bwrap_attach(const char *nr_name, struct netmap_adapter *hwna)
|
||||
na->pdev = hwna->pdev;
|
||||
na->nm_mem = netmap_mem_get(hwna->nm_mem);
|
||||
na->virt_hdr_len = hwna->virt_hdr_len;
|
||||
na->rx_buf_maxsize = hwna->rx_buf_maxsize;
|
||||
bna->up.retry = 1; /* XXX maybe this should depend on the hwna */
|
||||
/* Set the mfs, needed on the VALE mismatch datapath. */
|
||||
bna->up.mfs = NM_BDG_MFS_DEFAULT;
|
||||
@ -3087,6 +3094,7 @@ netmap_bwrap_attach(const char *nr_name, struct netmap_adapter *hwna)
|
||||
na->na_hostvp = hwna->na_hostvp =
|
||||
hostna->na_hostvp = &bna->host;
|
||||
hostna->na_flags = NAF_BUSY; /* prevent NIOCREGIF */
|
||||
hostna->rx_buf_maxsize = hwna->rx_buf_maxsize;
|
||||
bna->host.mfs = NM_BDG_MFS_DEFAULT;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user