netmap: import changes from upstream (SHA 137f537eae513)

- Rework option processing.
 - Use larger integers for memory size values in the
   memory management code.

MFC after:	2 weeks
This commit is contained in:
vmaffione 2019-09-01 14:47:41 +00:00
parent 11bc2f123b
commit b67d437ec8
6 changed files with 152 additions and 74 deletions

View File

@ -1037,8 +1037,13 @@ netmap_do_unregif(struct netmap_priv_d *priv)
na->nm_krings_delete(na);
/* restore the default number of host tx and rx rings */
na->num_host_tx_rings = 1;
na->num_host_rx_rings = 1;
if (na->na_flags & NAF_HOST_RINGS) {
na->num_host_tx_rings = 1;
na->num_host_rx_rings = 1;
} else {
na->num_host_tx_rings = 0;
na->num_host_rx_rings = 0;
}
}
/* possibily decrement counter of tx_si/rx_si users */
@ -2505,17 +2510,11 @@ netmap_ioctl(struct netmap_priv_d *priv, u_long cmd, caddr_t data,
}
#ifdef WITH_EXTMEM
opt = nmreq_findoption((struct nmreq_option *)(uintptr_t)hdr->nr_options,
NETMAP_REQ_OPT_EXTMEM);
opt = nmreq_getoption(hdr, NETMAP_REQ_OPT_EXTMEM);
if (opt != NULL) {
struct nmreq_opt_extmem *e =
(struct nmreq_opt_extmem *)opt;
error = nmreq_checkduplicate(opt);
if (error) {
opt->nro_status = error;
break;
}
nmd = netmap_mem_ext_create(e->nro_usrptr,
&e->nro_info, &error);
opt->nro_status = error;
@ -2559,15 +2558,11 @@ netmap_ioctl(struct netmap_priv_d *priv, u_long cmd, caddr_t data,
break;
}
opt = nmreq_findoption((struct nmreq_option *)(uintptr_t)hdr->nr_options,
NETMAP_REQ_OPT_CSB);
opt = nmreq_getoption(hdr, NETMAP_REQ_OPT_CSB);
if (opt != NULL) {
struct nmreq_opt_csb *csbo =
(struct nmreq_opt_csb *)opt;
error = nmreq_checkduplicate(opt);
if (!error) {
error = netmap_csb_validate(priv, csbo);
}
error = netmap_csb_validate(priv, csbo);
opt->nro_status = error;
if (error) {
netmap_do_unregif(priv);
@ -2841,19 +2836,15 @@ netmap_ioctl(struct netmap_priv_d *priv, u_long cmd, caddr_t data,
case NETMAP_REQ_CSB_ENABLE: {
struct nmreq_option *opt;
opt = nmreq_findoption((struct nmreq_option *)(uintptr_t)hdr->nr_options,
NETMAP_REQ_OPT_CSB);
opt = nmreq_getoption(hdr, NETMAP_REQ_OPT_CSB);
if (opt == NULL) {
error = EINVAL;
} else {
struct nmreq_opt_csb *csbo =
(struct nmreq_opt_csb *)opt;
error = nmreq_checkduplicate(opt);
if (!error) {
NMG_LOCK();
error = netmap_csb_validate(priv, csbo);
NMG_UNLOCK();
}
NMG_LOCK();
error = netmap_csb_validate(priv, csbo);
NMG_UNLOCK();
opt->nro_status = error;
}
break;
@ -3021,13 +3012,72 @@ nmreq_opt_size_by_type(uint32_t nro_reqtype, uint64_t nro_size)
return rv - sizeof(struct nmreq_option);
}
/*
* nmreq_copyin: create an in-kernel version of the request.
*
* We build the following data structure:
*
* hdr -> +-------+ buf
* | | +---------------+
* +-------+ |usr body ptr |
* |options|-. +---------------+
* +-------+ | |usr options ptr|
* |body |--------->+---------------+
* +-------+ | | |
* | | copy of body |
* | | |
* | +---------------+
* | | NULL |
* | +---------------+
* | .---| |\
* | | +---------------+ |
* | .------| | |
* | | | +---------------+ \ option table
* | | | | ... | / indexed by option
* | | | +---------------+ | type
* | | | | | |
* | | | +---------------+/
* | | | |usr next ptr 1 |
* `-|----->+---------------+
* | | | copy of opt 1 |
* | | | |
* | | .-| nro_next |
* | | | +---------------+
* | | | |usr next ptr 2 |
* | `-`>+---------------+
* | | copy of opt 2 |
* | | |
* | .-| nro_next |
* | | +---------------+
* | | | |
* ~ ~ ~ ... ~
* | .-| |
* `----->+---------------+
* | |usr next ptr n |
* `>+---------------+
* | copy of opt n |
* | |
* | nro_next(NULL)|
* +---------------+
*
* The options and body fields of the hdr structure are overwritten
* with in-kernel valid pointers inside the buf. The original user
* pointers are saved in the buf and restored on copyout.
* The list of options is copied and the pointers adjusted. The
* original pointers are saved before the option they belonged.
*
* The option table has an entry for every availabe option. Entries
* for options that have not been passed contain NULL.
*
*/
int
nmreq_copyin(struct nmreq_header *hdr, int nr_body_is_user)
{
size_t rqsz, optsz, bufsz;
int error;
int error = 0;
char *ker = NULL, *p;
struct nmreq_option **next, *src;
struct nmreq_option **next, *src, **opt_tab;
struct nmreq_option buf;
uint64_t *ptrs;
@ -3058,7 +3108,13 @@ nmreq_copyin(struct nmreq_header *hdr, int nr_body_is_user)
goto out_err;
}
bufsz = 2 * sizeof(void *) + rqsz;
bufsz = 2 * sizeof(void *) + rqsz +
NETMAP_REQ_OPT_MAX * sizeof(opt_tab);
/* compute the size of the buf below the option table.
* It must contain a copy of every received option structure.
* For every option we also need to store a copy of the user
* list pointer.
*/
optsz = 0;
for (src = (struct nmreq_option *)(uintptr_t)hdr->nr_options; src;
src = (struct nmreq_option *)(uintptr_t)buf.nro_next)
@ -3072,15 +3128,16 @@ nmreq_copyin(struct nmreq_header *hdr, int nr_body_is_user)
error = EMSGSIZE;
goto out_err;
}
bufsz += optsz + sizeof(void *);
bufsz += sizeof(void *);
}
bufsz += optsz;
ker = nm_os_malloc(bufsz);
if (ker == NULL) {
error = ENOMEM;
goto out_err;
}
p = ker;
p = ker; /* write pointer into the buffer */
/* make a copy of the user pointers */
ptrs = (uint64_t*)p;
@ -3095,6 +3152,9 @@ nmreq_copyin(struct nmreq_header *hdr, int nr_body_is_user)
/* overwrite the user pointer with the in-kernel one */
hdr->nr_body = (uintptr_t)p;
p += rqsz;
/* start of the options table */
opt_tab = (struct nmreq_option **)p;
p += sizeof(opt_tab) * NETMAP_REQ_OPT_MAX;
/* copy the options */
next = (struct nmreq_option **)&hdr->nr_options;
@ -3118,6 +3178,34 @@ nmreq_copyin(struct nmreq_header *hdr, int nr_body_is_user)
*/
opt->nro_status = EOPNOTSUPP;
/* check for invalid types */
if (opt->nro_reqtype < 1) {
if (netmap_verbose)
nm_prinf("invalid option type: %u", opt->nro_reqtype);
opt->nro_status = EINVAL;
error = EINVAL;
goto next;
}
if (opt->nro_reqtype >= NETMAP_REQ_OPT_MAX) {
/* opt->nro_status is already EOPNOTSUPP */
error = EOPNOTSUPP;
goto next;
}
/* if the type is valid, index the option in the table
* unless it is a duplicate.
*/
if (opt_tab[opt->nro_reqtype] != NULL) {
if (netmap_verbose)
nm_prinf("duplicate option: %u", opt->nro_reqtype);
opt->nro_status = EINVAL;
opt_tab[opt->nro_reqtype]->nro_status = EINVAL;
error = EINVAL;
goto next;
}
opt_tab[opt->nro_reqtype] = opt;
p = (char *)(opt + 1);
/* copy the option body */
@ -3131,11 +3219,14 @@ nmreq_copyin(struct nmreq_header *hdr, int nr_body_is_user)
p += optsz;
}
next:
/* move to next option */
next = (struct nmreq_option **)&opt->nro_next;
src = *next;
}
return 0;
if (error)
nmreq_copyout(hdr, error);
return error;
out_restore:
ptrs = (uint64_t *)ker;
@ -3218,25 +3309,15 @@ nmreq_copyout(struct nmreq_header *hdr, int rerror)
}
struct nmreq_option *
nmreq_findoption(struct nmreq_option *opt, uint16_t reqtype)
nmreq_getoption(struct nmreq_header *hdr, uint16_t reqtype)
{
for ( ; opt; opt = (struct nmreq_option *)(uintptr_t)opt->nro_next)
if (opt->nro_reqtype == reqtype)
return opt;
return NULL;
}
struct nmreq_option **opt_tab;
int
nmreq_checkduplicate(struct nmreq_option *opt) {
uint16_t type = opt->nro_reqtype;
int dup = 0;
if (!hdr->nr_options)
return NULL;
while ((opt = nmreq_findoption((struct nmreq_option *)(uintptr_t)opt->nro_next,
type))) {
dup++;
opt->nro_status = EINVAL;
}
return (dup ? EINVAL : 0);
opt_tab = (struct nmreq_option **)(hdr->nr_options) - (NETMAP_REQ_OPT_MAX + 1);
return opt_tab[reqtype];
}
static int

View File

@ -2390,8 +2390,7 @@ nm_os_get_mbuf(struct ifnet *ifp, int len)
#endif /* __FreeBSD_version >= 1100000 */
#endif /* __FreeBSD__ */
struct nmreq_option * nmreq_findoption(struct nmreq_option *, uint16_t);
int nmreq_checkduplicate(struct nmreq_option *);
struct nmreq_option * nmreq_getoption(struct nmreq_header *, uint16_t);
int netmap_init_bridges(void);
void netmap_uninit_bridges(void);

View File

@ -649,8 +649,7 @@ netmap_sync_kloop(struct netmap_priv_d *priv, struct nmreq_header *hdr)
}
/* Validate notification options. */
opt = nmreq_findoption((struct nmreq_option *)(uintptr_t)hdr->nr_options,
NETMAP_REQ_OPT_SYNC_KLOOP_MODE);
opt = nmreq_getoption(hdr, NETMAP_REQ_OPT_SYNC_KLOOP_MODE);
if (opt != NULL) {
struct nmreq_opt_sync_kloop_mode *mode_opt =
(struct nmreq_opt_sync_kloop_mode *)opt;
@ -664,14 +663,8 @@ netmap_sync_kloop(struct netmap_priv_d *priv, struct nmreq_header *hdr)
}
opt->nro_status = 0;
}
opt = nmreq_findoption((struct nmreq_option *)(uintptr_t)hdr->nr_options,
NETMAP_REQ_OPT_SYNC_KLOOP_EVENTFDS);
opt = nmreq_getoption(hdr, NETMAP_REQ_OPT_SYNC_KLOOP_EVENTFDS);
if (opt != NULL) {
err = nmreq_checkduplicate(opt);
if (err) {
opt->nro_status = err;
goto out;
}
if (opt->nro_size != sizeof(*eventfds_opt) +
sizeof(eventfds_opt->eventfds[0]) * num_rings) {
/* Option size not consistent with the number of

View File

@ -100,16 +100,17 @@ struct netmap_obj_pool {
/* ---------------------------------------------------*/
/* these are only meaningful if the pool is finalized */
/* (see 'finalized' field in netmap_mem_d) */
u_int objtotal; /* actual total number of objects. */
u_int memtotal; /* actual total memory space */
u_int numclusters; /* actual number of clusters */
u_int objfree; /* number of free objects. */
size_t memtotal; /* actual total memory space */
struct lut_entry *lut; /* virt,phys addresses, objtotal entries */
uint32_t *bitmap; /* one bit per buffer, 1 means free */
uint32_t *invalid_bitmap;/* one bit per buffer, 1 means invalid */
uint32_t bitmap_slots; /* number of uint32 entries in bitmap */
u_int objtotal; /* actual total number of objects. */
u_int numclusters; /* actual number of clusters */
u_int objfree; /* number of free objects. */
int alloc_done; /* we have allocated the memory */
/* ---------------------------------------------------*/
@ -159,7 +160,7 @@ struct netmap_mem_ops {
struct netmap_mem_d {
NMA_LOCK_T nm_mtx; /* protect the allocator */
u_int nm_totalsize; /* shorthand */
size_t nm_totalsize; /* shorthand */
u_int flags;
#define NETMAP_MEM_FINALIZED 0x1 /* preallocation done */
@ -817,7 +818,7 @@ netmap_mem2_ofstophys(struct netmap_mem_d* nmd, vm_ooffset_t offset)
return pa;
}
/* this is only in case of errors */
nm_prerr("invalid ofs 0x%x out of 0x%x 0x%x 0x%x", (u_int)o,
nm_prerr("invalid ofs 0x%x out of 0x%zx 0x%zx 0x%zx", (u_int)o,
p[NETMAP_IF_POOL].memtotal,
p[NETMAP_IF_POOL].memtotal
+ p[NETMAP_RING_POOL].memtotal,
@ -947,7 +948,7 @@ netmap_mem2_get_info(struct netmap_mem_d* nmd, uint64_t* size,
*size = 0;
for (i = 0; i < NETMAP_POOLS_NR; i++) {
struct netmap_obj_pool *p = nmd->pools + i;
*size += (p->_numclusters * p->_clustsize);
*size += ((size_t)p->_numclusters * (size_t)p->_clustsize);
}
}
}
@ -1476,9 +1477,9 @@ netmap_finalize_obj_allocator(struct netmap_obj_pool *p)
#endif
}
}
p->memtotal = p->numclusters * p->_clustsize;
p->memtotal = (size_t)p->numclusters * (size_t)p->_clustsize;
if (netmap_verbose)
nm_prinf("Pre-allocated %d clusters (%d/%dKB) for '%s'",
nm_prinf("Pre-allocated %d clusters (%d/%zuKB) for '%s'",
p->numclusters, p->_clustsize >> 10,
p->memtotal >> 10, p->name);
@ -1639,7 +1640,7 @@ netmap_mem_finalize_all(struct netmap_mem_d *nmd)
nmd->flags |= NETMAP_MEM_FINALIZED;
if (netmap_verbose)
nm_prinf("interfaces %d KB, rings %d KB, buffers %d MB",
nm_prinf("interfaces %zd KB, rings %zd KB, buffers %zd MB",
nmd->pools[NETMAP_IF_POOL].memtotal >> 10,
nmd->pools[NETMAP_RING_POOL].memtotal >> 10,
nmd->pools[NETMAP_BUF_POOL].memtotal >> 20);
@ -2341,8 +2342,8 @@ netmap_mem_ext_create(uint64_t usrptr, struct nmreq_pools_info *pi, int *perror)
}
p->objtotal = j;
p->numclusters = p->objtotal;
p->memtotal = j * p->_objsize;
nm_prdis("%d memtotal %u", j, p->memtotal);
p->memtotal = j * (size_t)p->_objsize;
nm_prdis("%d memtotal %zu", j, p->memtotal);
}
netmap_mem_ext_register(nme);
@ -2446,8 +2447,8 @@ netmap_mem_pt_guest_ifp_del(struct netmap_mem_d *nmd, struct ifnet *ifp)
} else {
ptnmd->pt_ifs = curr->next;
}
nm_prinf("removed (ifp=%s,nifp_offset=%u)",
curr->ifp->if_xname, curr->nifp_offset);
nm_prinf("removed (ifp=%p,nifp_offset=%u)",
curr->ifp, curr->nifp_offset);
nm_os_free(curr);
ret = 0;
break;
@ -2573,7 +2574,7 @@ netmap_mem_pt_guest_finalize(struct netmap_mem_d *nmd)
ptnmd->buf_lut.objtotal = nbuffers;
ptnmd->buf_lut.objsize = bufsize;
nmd->nm_totalsize = (unsigned int)mem_size;
nmd->nm_totalsize = mem_size;
/* Initialize these fields as are needed by
* netmap_mem_bufsize().

View File

@ -562,6 +562,10 @@ enum {
* This requires the 'ioeventfd' fields to be valid (cannot be < 0).
*/
NETMAP_REQ_OPT_SYNC_KLOOP_MODE,
/* This is a marker to count the number of available options.
* New options must be added above it. */
NETMAP_REQ_OPT_MAX,
};
/*

View File

@ -117,7 +117,7 @@
(nifp)->ni_host_tx_rings] )
#define NETMAP_BUF(ring, index) \
((char *)(ring) + (ring)->buf_ofs + ((index)*(ring)->nr_buf_size))
((char *)(ring) + (ring)->buf_ofs + ((size_t)(index)*(ring)->nr_buf_size))
#define NETMAP_BUF_IDX(ring, buf) \
( ((char *)(buf) - ((char *)(ring) + (ring)->buf_ofs) ) / \
@ -254,7 +254,7 @@ struct nm_desc {
struct nm_desc *self; /* point to self if netmap. */
int fd;
void *mem;
uint32_t memsize;
size_t memsize;
int done_mmap; /* set if mem is the result of mmap */
struct netmap_if * const nifp;
uint16_t first_tx_ring, last_tx_ring, cur_tx_ring;