2012-04-13 16:32:33 +00:00
|
|
|
/*
|
It is 2014 and we have a new version of netmap.
Most relevant features:
- netmap emulation on any NIC, even those without native netmap support.
On the ixgbe we have measured about 4Mpps/core/queue in this mode,
which is still a lot more than with sockets/bpf.
- seamless interconnection of VALE switch, NICs and host stack.
If you disable accelerations on your NIC (say em0)
ifconfig em0 -txcsum -txcsum
you can use the VALE switch to connect the NIC and the host stack:
vale-ctl -h valeXX:em0
allowing sharing the NIC with other netmap clients.
- THE USER API HAS SLIGHTLY CHANGED (head/cur/tail pointers
instead of pointers/count as before). This was unavoidable to support,
in the future, multiple threads operating on the same rings.
Netmap clients require very small source code changes to compile again.
On the plus side, the new API should be easier to understand
and the internals are a lot simpler.
The manual page has been updated extensively to reflect the current
features and give some examples.
This is the result of work of several people including Giuseppe Lettieri,
Vincenzo Maffione, Michio Honda and myself, and has been financially
supported by EU projects CHANGE and OPENLAB, from NetApp University
Research Fund, NEC, and of course the Universita` di Pisa.
2014-01-06 12:53:15 +00:00
|
|
|
* Copyright (C) 2012-2014 Matteo Landi, Luigi Rizzo, Giuseppe Lettieri. All rights reserved.
|
2012-04-13 16:32:33 +00:00
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
2013-12-15 08:37:24 +00:00
|
|
|
* documentation and/or other materials provided with the distribution.
|
2012-04-13 16:32:33 +00:00
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
2013-11-01 21:21:14 +00:00
|
|
|
#ifdef linux
|
|
|
|
#include "bsd_glue.h"
|
|
|
|
#endif /* linux */
|
2012-04-13 16:32:33 +00:00
|
|
|
|
2013-11-01 21:21:14 +00:00
|
|
|
#ifdef __APPLE__
|
|
|
|
#include "osx_glue.h"
|
|
|
|
#endif /* __APPLE__ */
|
|
|
|
|
|
|
|
#ifdef __FreeBSD__
|
|
|
|
#include <sys/cdefs.h> /* prerequisite */
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
|
|
|
#include <sys/types.h>
|
|
|
|
#include <sys/malloc.h>
|
|
|
|
#include <sys/proc.h>
|
|
|
|
#include <vm/vm.h> /* vtophys */
|
|
|
|
#include <vm/pmap.h> /* vtophys */
|
|
|
|
#include <sys/socket.h> /* sockaddrs */
|
|
|
|
#include <sys/selinfo.h>
|
|
|
|
#include <sys/sysctl.h>
|
|
|
|
#include <net/if.h>
|
|
|
|
#include <net/if_var.h>
|
|
|
|
#include <net/vnet.h>
|
|
|
|
#include <machine/bus.h> /* bus_dmamap_* */
|
|
|
|
|
|
|
|
#endif /* __FreeBSD__ */
|
|
|
|
|
|
|
|
#include <net/netmap.h>
|
|
|
|
#include <dev/netmap/netmap_kern.h>
|
|
|
|
#include "netmap_mem2.h"
|
2012-04-13 16:32:33 +00:00
|
|
|
|
2012-10-19 04:13:12 +00:00
|
|
|
#ifdef linux
|
2013-11-01 21:21:14 +00:00
|
|
|
#define NMA_LOCK_INIT(n) sema_init(&(n)->nm_mtx, 1)
|
|
|
|
#define NMA_LOCK_DESTROY(n)
|
|
|
|
#define NMA_LOCK(n) down(&(n)->nm_mtx)
|
|
|
|
#define NMA_UNLOCK(n) up(&(n)->nm_mtx)
|
2012-10-19 04:13:12 +00:00
|
|
|
#else /* !linux */
|
2013-11-01 21:21:14 +00:00
|
|
|
#define NMA_LOCK_INIT(n) mtx_init(&(n)->nm_mtx, "netmap memory allocator lock", NULL, MTX_DEF)
|
|
|
|
#define NMA_LOCK_DESTROY(n) mtx_destroy(&(n)->nm_mtx)
|
|
|
|
#define NMA_LOCK(n) mtx_lock(&(n)->nm_mtx)
|
|
|
|
#define NMA_UNLOCK(n) mtx_unlock(&(n)->nm_mtx)
|
2012-10-19 04:13:12 +00:00
|
|
|
#endif /* linux */
|
|
|
|
|
|
|
|
|
|
|
|
struct netmap_obj_params netmap_params[NETMAP_POOLS_NR] = {
|
|
|
|
[NETMAP_IF_POOL] = {
|
|
|
|
.size = 1024,
|
|
|
|
.num = 100,
|
|
|
|
},
|
|
|
|
[NETMAP_RING_POOL] = {
|
|
|
|
.size = 9*PAGE_SIZE,
|
|
|
|
.num = 200,
|
|
|
|
},
|
|
|
|
[NETMAP_BUF_POOL] = {
|
|
|
|
.size = 2048,
|
|
|
|
.num = NETMAP_BUF_MAX_NUM,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2012-04-13 16:32:33 +00:00
|
|
|
|
2013-04-19 21:08:21 +00:00
|
|
|
/*
|
|
|
|
* nm_mem is the memory allocator used for all physical interfaces
|
|
|
|
* running in netmap mode.
|
|
|
|
* Virtual (VALE) ports will have each its own allocator.
|
|
|
|
*/
|
2013-11-01 21:21:14 +00:00
|
|
|
static int netmap_mem_global_config(struct netmap_mem_d *nmd);
|
|
|
|
static int netmap_mem_global_finalize(struct netmap_mem_d *nmd);
|
|
|
|
static void netmap_mem_global_deref(struct netmap_mem_d *nmd);
|
|
|
|
struct netmap_mem_d nm_mem = { /* Our memory allocator. */
|
2012-10-19 04:13:12 +00:00
|
|
|
.pools = {
|
|
|
|
[NETMAP_IF_POOL] = {
|
|
|
|
.name = "netmap_if",
|
|
|
|
.objminsize = sizeof(struct netmap_if),
|
|
|
|
.objmaxsize = 4096,
|
|
|
|
.nummin = 10, /* don't be stingy */
|
|
|
|
.nummax = 10000, /* XXX very large */
|
|
|
|
},
|
|
|
|
[NETMAP_RING_POOL] = {
|
|
|
|
.name = "netmap_ring",
|
|
|
|
.objminsize = sizeof(struct netmap_ring),
|
|
|
|
.objmaxsize = 32*PAGE_SIZE,
|
|
|
|
.nummin = 2,
|
|
|
|
.nummax = 1024,
|
|
|
|
},
|
|
|
|
[NETMAP_BUF_POOL] = {
|
|
|
|
.name = "netmap_buf",
|
|
|
|
.objminsize = 64,
|
|
|
|
.objmaxsize = 65536,
|
|
|
|
.nummin = 4,
|
|
|
|
.nummax = 1000000, /* one million! */
|
|
|
|
},
|
|
|
|
},
|
2013-11-01 21:21:14 +00:00
|
|
|
.config = netmap_mem_global_config,
|
|
|
|
.finalize = netmap_mem_global_finalize,
|
|
|
|
.deref = netmap_mem_global_deref,
|
2012-04-13 16:32:33 +00:00
|
|
|
};
|
|
|
|
|
2013-11-01 21:21:14 +00:00
|
|
|
|
2013-04-19 21:08:21 +00:00
|
|
|
// XXX logically belongs to nm_mem
|
2012-04-13 16:32:33 +00:00
|
|
|
struct lut_entry *netmap_buffer_lut; /* exported */
|
|
|
|
|
2013-11-01 21:21:14 +00:00
|
|
|
/* blueprint for the private memory allocators */
|
|
|
|
static int netmap_mem_private_config(struct netmap_mem_d *nmd);
|
|
|
|
static int netmap_mem_private_finalize(struct netmap_mem_d *nmd);
|
|
|
|
static void netmap_mem_private_deref(struct netmap_mem_d *nmd);
|
|
|
|
const struct netmap_mem_d nm_blueprint = {
|
|
|
|
.pools = {
|
|
|
|
[NETMAP_IF_POOL] = {
|
|
|
|
.name = "%s_if",
|
|
|
|
.objminsize = sizeof(struct netmap_if),
|
|
|
|
.objmaxsize = 4096,
|
|
|
|
.nummin = 1,
|
|
|
|
.nummax = 10,
|
|
|
|
},
|
|
|
|
[NETMAP_RING_POOL] = {
|
|
|
|
.name = "%s_ring",
|
|
|
|
.objminsize = sizeof(struct netmap_ring),
|
|
|
|
.objmaxsize = 32*PAGE_SIZE,
|
|
|
|
.nummin = 2,
|
|
|
|
.nummax = 1024,
|
|
|
|
},
|
|
|
|
[NETMAP_BUF_POOL] = {
|
|
|
|
.name = "%s_buf",
|
|
|
|
.objminsize = 64,
|
|
|
|
.objmaxsize = 65536,
|
|
|
|
.nummin = 4,
|
|
|
|
.nummax = 1000000, /* one million! */
|
|
|
|
},
|
|
|
|
},
|
|
|
|
.config = netmap_mem_private_config,
|
|
|
|
.finalize = netmap_mem_private_finalize,
|
|
|
|
.deref = netmap_mem_private_deref,
|
|
|
|
|
|
|
|
.flags = NETMAP_MEM_PRIVATE,
|
|
|
|
};
|
|
|
|
|
2012-10-19 04:13:12 +00:00
|
|
|
/* memory allocator related sysctls */
|
|
|
|
|
|
|
|
#define STRINGIFY(x) #x
|
|
|
|
|
2013-11-01 21:21:14 +00:00
|
|
|
|
2012-10-19 04:13:12 +00:00
|
|
|
#define DECLARE_SYSCTLS(id, name) \
|
|
|
|
SYSCTL_INT(_dev_netmap, OID_AUTO, name##_size, \
|
|
|
|
CTLFLAG_RW, &netmap_params[id].size, 0, "Requested size of netmap " STRINGIFY(name) "s"); \
|
2013-12-15 08:37:24 +00:00
|
|
|
SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_size, \
|
|
|
|
CTLFLAG_RD, &nm_mem.pools[id]._objsize, 0, "Current size of netmap " STRINGIFY(name) "s"); \
|
|
|
|
SYSCTL_INT(_dev_netmap, OID_AUTO, name##_num, \
|
|
|
|
CTLFLAG_RW, &netmap_params[id].num, 0, "Requested number of netmap " STRINGIFY(name) "s"); \
|
|
|
|
SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_num, \
|
|
|
|
CTLFLAG_RD, &nm_mem.pools[id].objtotal, 0, "Current number of netmap " STRINGIFY(name) "s")
|
2012-10-19 04:13:12 +00:00
|
|
|
|
2013-11-01 21:21:14 +00:00
|
|
|
SYSCTL_DECL(_dev_netmap);
|
2012-10-19 04:13:12 +00:00
|
|
|
DECLARE_SYSCTLS(NETMAP_IF_POOL, if);
|
|
|
|
DECLARE_SYSCTLS(NETMAP_RING_POOL, ring);
|
|
|
|
DECLARE_SYSCTLS(NETMAP_BUF_POOL, buf);
|
2012-04-13 16:32:33 +00:00
|
|
|
|
|
|
|
/*
|
2013-04-19 21:08:21 +00:00
|
|
|
* First, find the allocator that contains the requested offset,
|
|
|
|
* then locate the cluster through a lookup table.
|
2012-04-13 16:32:33 +00:00
|
|
|
*/
|
2013-11-01 21:21:14 +00:00
|
|
|
vm_paddr_t
|
|
|
|
netmap_mem_ofstophys(struct netmap_mem_d* nmd, vm_ooffset_t offset)
|
2012-04-13 16:32:33 +00:00
|
|
|
{
|
|
|
|
int i;
|
2013-11-01 21:21:14 +00:00
|
|
|
vm_ooffset_t o = offset;
|
|
|
|
vm_paddr_t pa;
|
|
|
|
struct netmap_obj_pool *p;
|
|
|
|
|
|
|
|
NMA_LOCK(nmd);
|
|
|
|
p = nmd->pools;
|
2012-04-13 16:32:33 +00:00
|
|
|
|
2013-11-01 21:21:14 +00:00
|
|
|
for (i = 0; i < NETMAP_POOLS_NR; offset -= p[i].memtotal, i++) {
|
|
|
|
if (offset >= p[i].memtotal)
|
2012-04-13 16:32:33 +00:00
|
|
|
continue;
|
2013-04-19 21:08:21 +00:00
|
|
|
// now lookup the cluster's address
|
2013-11-01 21:21:14 +00:00
|
|
|
pa = p[i].lut[offset / p[i]._objsize].paddr +
|
2012-10-19 04:13:12 +00:00
|
|
|
offset % p[i]._objsize;
|
2013-11-01 21:21:14 +00:00
|
|
|
NMA_UNLOCK(nmd);
|
|
|
|
return pa;
|
2012-04-13 16:32:33 +00:00
|
|
|
}
|
2012-10-19 04:13:12 +00:00
|
|
|
/* this is only in case of errors */
|
2012-04-14 16:44:18 +00:00
|
|
|
D("invalid ofs 0x%x out of 0x%x 0x%x 0x%x", (u_int)o,
|
2013-11-01 21:21:14 +00:00
|
|
|
p[NETMAP_IF_POOL].memtotal,
|
|
|
|
p[NETMAP_IF_POOL].memtotal
|
|
|
|
+ p[NETMAP_RING_POOL].memtotal,
|
|
|
|
p[NETMAP_IF_POOL].memtotal
|
|
|
|
+ p[NETMAP_RING_POOL].memtotal
|
|
|
|
+ p[NETMAP_BUF_POOL].memtotal);
|
|
|
|
NMA_UNLOCK(nmd);
|
2012-04-13 16:32:33 +00:00
|
|
|
return 0; // XXX bad address
|
|
|
|
}
|
|
|
|
|
2013-11-01 21:21:14 +00:00
|
|
|
int
|
|
|
|
netmap_mem_get_info(struct netmap_mem_d* nmd, u_int* size, u_int *memflags)
|
|
|
|
{
|
|
|
|
int error = 0;
|
|
|
|
NMA_LOCK(nmd);
|
|
|
|
error = nmd->config(nmd);
|
|
|
|
if (error)
|
|
|
|
goto out;
|
|
|
|
if (nmd->flags & NETMAP_MEM_FINALIZED) {
|
|
|
|
*size = nmd->nm_totalsize;
|
|
|
|
} else {
|
|
|
|
int i;
|
|
|
|
*size = 0;
|
|
|
|
for (i = 0; i < NETMAP_POOLS_NR; i++) {
|
|
|
|
struct netmap_obj_pool *p = nmd->pools + i;
|
|
|
|
*size += (p->_numclusters * p->_clustsize);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
*memflags = nmd->flags;
|
|
|
|
out:
|
|
|
|
NMA_UNLOCK(nmd);
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2012-04-13 16:32:33 +00:00
|
|
|
/*
|
|
|
|
* we store objects by kernel address, need to find the offset
|
|
|
|
* within the pool to export the value to userspace.
|
|
|
|
* Algorithm: scan until we find the cluster, then add the
|
|
|
|
* actual offset in the cluster
|
|
|
|
*/
|
2012-04-13 22:24:57 +00:00
|
|
|
static ssize_t
|
2012-04-13 16:32:33 +00:00
|
|
|
netmap_obj_offset(struct netmap_obj_pool *p, const void *vaddr)
|
|
|
|
{
|
2013-11-01 21:21:14 +00:00
|
|
|
int i, k = p->_clustentries, n = p->objtotal;
|
2012-04-13 16:32:33 +00:00
|
|
|
ssize_t ofs = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < n; i += k, ofs += p->_clustsize) {
|
|
|
|
const char *base = p->lut[i].vaddr;
|
|
|
|
ssize_t relofs = (const char *) vaddr - base;
|
|
|
|
|
2013-04-15 11:49:16 +00:00
|
|
|
if (relofs < 0 || relofs >= p->_clustsize)
|
2012-04-13 16:32:33 +00:00
|
|
|
continue;
|
|
|
|
|
|
|
|
ofs = ofs + relofs;
|
|
|
|
ND("%s: return offset %d (cluster %d) for pointer %p",
|
|
|
|
p->name, ofs, i, vaddr);
|
|
|
|
return ofs;
|
|
|
|
}
|
|
|
|
D("address %p is not contained inside any cluster (%s)",
|
|
|
|
vaddr, p->name);
|
|
|
|
return 0; /* An error occurred */
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Helper functions which convert virtual addresses to offsets */
|
2013-11-01 21:21:14 +00:00
|
|
|
#define netmap_if_offset(n, v) \
|
|
|
|
netmap_obj_offset(&(n)->pools[NETMAP_IF_POOL], (v))
|
2012-04-13 16:32:33 +00:00
|
|
|
|
2013-11-01 21:21:14 +00:00
|
|
|
#define netmap_ring_offset(n, v) \
|
|
|
|
((n)->pools[NETMAP_IF_POOL].memtotal + \
|
|
|
|
netmap_obj_offset(&(n)->pools[NETMAP_RING_POOL], (v)))
|
2012-04-13 16:32:33 +00:00
|
|
|
|
2013-11-01 21:21:14 +00:00
|
|
|
#define netmap_buf_offset(n, v) \
|
|
|
|
((n)->pools[NETMAP_IF_POOL].memtotal + \
|
|
|
|
(n)->pools[NETMAP_RING_POOL].memtotal + \
|
|
|
|
netmap_obj_offset(&(n)->pools[NETMAP_BUF_POOL], (v)))
|
2012-04-13 16:32:33 +00:00
|
|
|
|
|
|
|
|
2013-11-01 21:21:14 +00:00
|
|
|
ssize_t
|
|
|
|
netmap_mem_if_offset(struct netmap_mem_d *nmd, const void *addr)
|
|
|
|
{
|
|
|
|
ssize_t v;
|
|
|
|
NMA_LOCK(nmd);
|
|
|
|
v = netmap_if_offset(nmd, addr);
|
|
|
|
NMA_UNLOCK(nmd);
|
|
|
|
return v;
|
|
|
|
}
|
|
|
|
|
2012-10-19 04:13:12 +00:00
|
|
|
/*
|
|
|
|
* report the index, and use start position as a hint,
|
|
|
|
* otherwise buffer allocation becomes terribly expensive.
|
|
|
|
*/
|
2012-04-13 16:32:33 +00:00
|
|
|
static void *
|
2013-11-01 21:21:14 +00:00
|
|
|
netmap_obj_malloc(struct netmap_obj_pool *p, u_int len, uint32_t *start, uint32_t *index)
|
2012-04-13 16:32:33 +00:00
|
|
|
{
|
|
|
|
uint32_t i = 0; /* index in the bitmap */
|
|
|
|
uint32_t mask, j; /* slot counter */
|
|
|
|
void *vaddr = NULL;
|
|
|
|
|
|
|
|
if (len > p->_objsize) {
|
|
|
|
D("%s request size %d too large", p->name, len);
|
|
|
|
// XXX cannot reduce the size
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (p->objfree == 0) {
|
2013-12-15 08:37:24 +00:00
|
|
|
D("no more %s objects", p->name);
|
2012-04-13 16:32:33 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
2012-10-19 04:13:12 +00:00
|
|
|
if (start)
|
|
|
|
i = *start;
|
2012-04-13 16:32:33 +00:00
|
|
|
|
2012-10-19 04:13:12 +00:00
|
|
|
/* termination is guaranteed by p->free, but better check bounds on i */
|
|
|
|
while (vaddr == NULL && i < p->bitmap_slots) {
|
2012-04-13 16:32:33 +00:00
|
|
|
uint32_t cur = p->bitmap[i];
|
|
|
|
if (cur == 0) { /* bitmask is fully used */
|
|
|
|
i++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
/* locate a slot */
|
|
|
|
for (j = 0, mask = 1; (cur & mask) == 0; j++, mask <<= 1)
|
|
|
|
;
|
|
|
|
|
|
|
|
p->bitmap[i] &= ~mask; /* mark object as in use */
|
|
|
|
p->objfree--;
|
|
|
|
|
|
|
|
vaddr = p->lut[i * 32 + j].vaddr;
|
2012-10-19 04:13:12 +00:00
|
|
|
if (index)
|
|
|
|
*index = i * 32 + j;
|
2012-04-13 16:32:33 +00:00
|
|
|
}
|
|
|
|
ND("%s allocator: allocated object @ [%d][%d]: vaddr %p", i, j, vaddr);
|
|
|
|
|
2012-10-19 04:13:12 +00:00
|
|
|
if (start)
|
|
|
|
*start = i;
|
2012-04-13 16:32:33 +00:00
|
|
|
return vaddr;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
2013-04-19 21:08:21 +00:00
|
|
|
* free by index, not by address. This is slow, but is only used
|
|
|
|
* for a small number of objects (rings, nifp)
|
2012-04-13 16:32:33 +00:00
|
|
|
*/
|
|
|
|
static void
|
|
|
|
netmap_obj_free(struct netmap_obj_pool *p, uint32_t j)
|
|
|
|
{
|
|
|
|
if (j >= p->objtotal) {
|
|
|
|
D("invalid index %u, max %u", j, p->objtotal);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
p->bitmap[j / 32] |= (1 << (j % 32));
|
|
|
|
p->objfree++;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
netmap_obj_free_va(struct netmap_obj_pool *p, void *vaddr)
|
|
|
|
{
|
2013-11-01 21:21:14 +00:00
|
|
|
u_int i, j, n = p->numclusters;
|
2012-04-13 16:32:33 +00:00
|
|
|
|
2013-11-01 21:21:14 +00:00
|
|
|
for (i = 0, j = 0; i < n; i++, j += p->_clustentries) {
|
|
|
|
void *base = p->lut[i * p->_clustentries].vaddr;
|
2012-04-13 16:32:33 +00:00
|
|
|
ssize_t relofs = (ssize_t) vaddr - (ssize_t) base;
|
|
|
|
|
|
|
|
/* Given address, is out of the scope of the current cluster.*/
|
2013-05-10 08:46:10 +00:00
|
|
|
if (vaddr < base || relofs >= p->_clustsize)
|
2012-04-13 16:32:33 +00:00
|
|
|
continue;
|
|
|
|
|
|
|
|
j = j + relofs / p->_objsize;
|
2013-11-01 21:21:14 +00:00
|
|
|
/* KASSERT(j != 0, ("Cannot free object 0")); */
|
2012-04-13 16:32:33 +00:00
|
|
|
netmap_obj_free(p, j);
|
|
|
|
return;
|
|
|
|
}
|
2013-01-23 03:51:47 +00:00
|
|
|
D("address %p is not contained inside any cluster (%s)",
|
2012-04-13 16:32:33 +00:00
|
|
|
vaddr, p->name);
|
|
|
|
}
|
|
|
|
|
2013-11-01 21:21:14 +00:00
|
|
|
#define netmap_if_malloc(n, len) netmap_obj_malloc(&(n)->pools[NETMAP_IF_POOL], len, NULL, NULL)
|
|
|
|
#define netmap_if_free(n, v) netmap_obj_free_va(&(n)->pools[NETMAP_IF_POOL], (v))
|
|
|
|
#define netmap_ring_malloc(n, len) netmap_obj_malloc(&(n)->pools[NETMAP_RING_POOL], len, NULL, NULL)
|
|
|
|
#define netmap_ring_free(n, v) netmap_obj_free_va(&(n)->pools[NETMAP_RING_POOL], (v))
|
|
|
|
#define netmap_buf_malloc(n, _pos, _index) \
|
|
|
|
netmap_obj_malloc(&(n)->pools[NETMAP_BUF_POOL], NETMAP_BDG_BUF_SIZE(n), _pos, _index)
|
2012-04-13 16:32:33 +00:00
|
|
|
|
|
|
|
|
|
|
|
/* Return the index associated to the given packet buffer */
|
2013-11-01 21:21:14 +00:00
|
|
|
#define netmap_buf_index(n, v) \
|
|
|
|
(netmap_obj_offset(&(n)->pools[NETMAP_BUF_POOL], (v)) / NETMAP_BDG_BUF_SIZE(n))
|
2012-04-13 16:32:33 +00:00
|
|
|
|
|
|
|
|
2012-10-19 04:13:12 +00:00
|
|
|
/* Return nonzero on error */
|
|
|
|
static int
|
2013-12-15 08:37:24 +00:00
|
|
|
netmap_new_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n)
|
2012-04-13 16:32:33 +00:00
|
|
|
{
|
2013-11-01 21:21:14 +00:00
|
|
|
struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
|
|
|
|
u_int i = 0; /* slot counter */
|
2012-10-19 04:13:12 +00:00
|
|
|
uint32_t pos = 0; /* slot in p->bitmap */
|
|
|
|
uint32_t index = 0; /* buffer index */
|
2012-04-13 16:32:33 +00:00
|
|
|
|
|
|
|
for (i = 0; i < n; i++) {
|
2013-11-01 21:21:14 +00:00
|
|
|
void *vaddr = netmap_buf_malloc(nmd, &pos, &index);
|
2012-04-13 16:32:33 +00:00
|
|
|
if (vaddr == NULL) {
|
2013-12-15 08:37:24 +00:00
|
|
|
D("no more buffers after %d of %d", i, n);
|
2012-04-13 16:32:33 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
2012-10-19 04:13:12 +00:00
|
|
|
slot[i].buf_idx = index;
|
2012-04-13 16:32:33 +00:00
|
|
|
slot[i].len = p->_objsize;
|
2013-12-15 08:37:24 +00:00
|
|
|
slot[i].flags = 0;
|
2012-04-13 16:32:33 +00:00
|
|
|
}
|
|
|
|
|
2012-10-19 04:13:12 +00:00
|
|
|
ND("allocated %d buffers, %d available, first at %d", n, p->objfree, pos);
|
|
|
|
return (0);
|
2012-04-13 16:32:33 +00:00
|
|
|
|
|
|
|
cleanup:
|
2012-10-17 18:21:14 +00:00
|
|
|
while (i > 0) {
|
|
|
|
i--;
|
2012-10-19 04:13:12 +00:00
|
|
|
netmap_obj_free(p, slot[i].buf_idx);
|
2012-04-13 16:32:33 +00:00
|
|
|
}
|
2012-10-19 04:13:12 +00:00
|
|
|
bzero(slot, n * sizeof(slot[0]));
|
|
|
|
return (ENOMEM);
|
2012-04-13 16:32:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
2013-12-15 08:37:24 +00:00
|
|
|
netmap_free_buf(struct netmap_mem_d *nmd, uint32_t i)
|
2012-04-13 16:32:33 +00:00
|
|
|
{
|
2013-11-01 21:21:14 +00:00
|
|
|
struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL];
|
2012-10-19 04:13:12 +00:00
|
|
|
|
2012-04-13 16:32:33 +00:00
|
|
|
if (i < 2 || i >= p->objtotal) {
|
|
|
|
D("Cannot free buf#%d: should be in [2, %d[", i, p->objtotal);
|
|
|
|
return;
|
|
|
|
}
|
2012-10-19 04:13:12 +00:00
|
|
|
netmap_obj_free(p, i);
|
2012-04-13 16:32:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2012-10-19 04:13:12 +00:00
|
|
|
netmap_reset_obj_allocator(struct netmap_obj_pool *p)
|
2012-04-13 16:32:33 +00:00
|
|
|
{
|
2013-11-01 21:21:14 +00:00
|
|
|
|
2012-04-13 16:32:33 +00:00
|
|
|
if (p == NULL)
|
|
|
|
return;
|
|
|
|
if (p->bitmap)
|
|
|
|
free(p->bitmap, M_NETMAP);
|
2012-10-19 04:13:12 +00:00
|
|
|
p->bitmap = NULL;
|
2012-04-13 16:32:33 +00:00
|
|
|
if (p->lut) {
|
2013-11-01 21:21:14 +00:00
|
|
|
u_int i;
|
|
|
|
size_t sz = p->_clustsize;
|
|
|
|
|
|
|
|
for (i = 0; i < p->objtotal; i += p->_clustentries) {
|
2012-04-13 16:32:33 +00:00
|
|
|
if (p->lut[i].vaddr)
|
2013-11-01 21:21:14 +00:00
|
|
|
contigfree(p->lut[i].vaddr, sz, M_NETMAP);
|
2012-04-13 16:32:33 +00:00
|
|
|
}
|
|
|
|
bzero(p->lut, sizeof(struct lut_entry) * p->objtotal);
|
2012-10-19 04:13:12 +00:00
|
|
|
#ifdef linux
|
|
|
|
vfree(p->lut);
|
|
|
|
#else
|
2012-04-13 16:32:33 +00:00
|
|
|
free(p->lut, M_NETMAP);
|
2012-10-19 04:13:12 +00:00
|
|
|
#endif
|
2012-04-13 16:32:33 +00:00
|
|
|
}
|
2012-10-19 04:13:12 +00:00
|
|
|
p->lut = NULL;
|
2013-11-01 21:21:14 +00:00
|
|
|
p->objtotal = 0;
|
|
|
|
p->memtotal = 0;
|
|
|
|
p->numclusters = 0;
|
|
|
|
p->objfree = 0;
|
2012-10-19 04:13:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Free all resources related to an allocator.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
netmap_destroy_obj_allocator(struct netmap_obj_pool *p)
|
|
|
|
{
|
|
|
|
if (p == NULL)
|
|
|
|
return;
|
|
|
|
netmap_reset_obj_allocator(p);
|
2012-04-13 16:32:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We receive a request for objtotal objects, of size objsize each.
|
|
|
|
* Internally we may round up both numbers, as we allocate objects
|
|
|
|
* in small clusters multiple of the page size.
|
2013-11-01 21:21:14 +00:00
|
|
|
* We need to keep track of objtotal and clustentries,
|
2012-04-13 16:32:33 +00:00
|
|
|
* as they are needed when freeing memory.
|
|
|
|
*
|
|
|
|
* XXX note -- userspace needs the buffers to be contiguous,
|
|
|
|
* so we cannot afford gaps at the end of a cluster.
|
|
|
|
*/
|
2012-10-19 04:13:12 +00:00
|
|
|
|
|
|
|
|
|
|
|
/* call with NMA_LOCK held */
|
|
|
|
static int
|
|
|
|
netmap_config_obj_allocator(struct netmap_obj_pool *p, u_int objtotal, u_int objsize)
|
2012-04-13 16:32:33 +00:00
|
|
|
{
|
2013-11-01 21:21:14 +00:00
|
|
|
int i;
|
2012-04-13 16:32:33 +00:00
|
|
|
u_int clustsize; /* the cluster size, multiple of page size */
|
|
|
|
u_int clustentries; /* how many objects per entry */
|
|
|
|
|
2013-11-01 21:21:14 +00:00
|
|
|
/* we store the current request, so we can
|
|
|
|
* detect configuration changes later */
|
|
|
|
p->r_objtotal = objtotal;
|
|
|
|
p->r_objsize = objsize;
|
|
|
|
|
2012-04-13 16:32:33 +00:00
|
|
|
#define MAX_CLUSTSIZE (1<<17)
|
It is 2014 and we have a new version of netmap.
Most relevant features:
- netmap emulation on any NIC, even those without native netmap support.
On the ixgbe we have measured about 4Mpps/core/queue in this mode,
which is still a lot more than with sockets/bpf.
- seamless interconnection of VALE switch, NICs and host stack.
If you disable accelerations on your NIC (say em0)
ifconfig em0 -txcsum -txcsum
you can use the VALE switch to connect the NIC and the host stack:
vale-ctl -h valeXX:em0
allowing sharing the NIC with other netmap clients.
- THE USER API HAS SLIGHTLY CHANGED (head/cur/tail pointers
instead of pointers/count as before). This was unavoidable to support,
in the future, multiple threads operating on the same rings.
Netmap clients require very small source code changes to compile again.
On the plus side, the new API should be easier to understand
and the internals are a lot simpler.
The manual page has been updated extensively to reflect the current
features and give some examples.
This is the result of work of several people including Giuseppe Lettieri,
Vincenzo Maffione, Michio Honda and myself, and has been financially
supported by EU projects CHANGE and OPENLAB, from NetApp University
Research Fund, NEC, and of course the Universita` di Pisa.
2014-01-06 12:53:15 +00:00
|
|
|
#define LINE_ROUND NM_CACHE_ALIGN // 64
|
2012-04-13 16:32:33 +00:00
|
|
|
if (objsize >= MAX_CLUSTSIZE) {
|
|
|
|
/* we could do it but there is no point */
|
|
|
|
D("unsupported allocation for %d bytes", objsize);
|
2013-11-01 21:21:14 +00:00
|
|
|
return EINVAL;
|
2012-04-13 16:32:33 +00:00
|
|
|
}
|
|
|
|
/* make sure objsize is a multiple of LINE_ROUND */
|
|
|
|
i = (objsize & (LINE_ROUND - 1));
|
|
|
|
if (i) {
|
|
|
|
D("XXX aligning object by %d bytes", LINE_ROUND - i);
|
|
|
|
objsize += LINE_ROUND - i;
|
|
|
|
}
|
2012-10-19 04:13:12 +00:00
|
|
|
if (objsize < p->objminsize || objsize > p->objmaxsize) {
|
2013-05-02 16:01:04 +00:00
|
|
|
D("requested objsize %d out of range [%d, %d]",
|
2012-10-19 04:13:12 +00:00
|
|
|
objsize, p->objminsize, p->objmaxsize);
|
2013-11-01 21:21:14 +00:00
|
|
|
return EINVAL;
|
2012-10-19 04:13:12 +00:00
|
|
|
}
|
|
|
|
if (objtotal < p->nummin || objtotal > p->nummax) {
|
2013-05-02 16:01:04 +00:00
|
|
|
D("requested objtotal %d out of range [%d, %d]",
|
2012-10-19 04:13:12 +00:00
|
|
|
objtotal, p->nummin, p->nummax);
|
2013-11-01 21:21:14 +00:00
|
|
|
return EINVAL;
|
2012-10-19 04:13:12 +00:00
|
|
|
}
|
2012-04-13 16:32:33 +00:00
|
|
|
/*
|
|
|
|
* Compute number of objects using a brute-force approach:
|
|
|
|
* given a max cluster size,
|
|
|
|
* we try to fill it with objects keeping track of the
|
|
|
|
* wasted space to the next page boundary.
|
|
|
|
*/
|
|
|
|
for (clustentries = 0, i = 1;; i++) {
|
|
|
|
u_int delta, used = i * objsize;
|
|
|
|
if (used > MAX_CLUSTSIZE)
|
|
|
|
break;
|
|
|
|
delta = used % PAGE_SIZE;
|
|
|
|
if (delta == 0) { // exact solution
|
|
|
|
clustentries = i;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (delta > ( (clustentries*objsize) % PAGE_SIZE) )
|
|
|
|
clustentries = i;
|
|
|
|
}
|
|
|
|
// D("XXX --- ouch, delta %d (bad for buffers)", delta);
|
|
|
|
/* compute clustsize and round to the next page */
|
|
|
|
clustsize = clustentries * objsize;
|
|
|
|
i = (clustsize & (PAGE_SIZE - 1));
|
|
|
|
if (i)
|
|
|
|
clustsize += PAGE_SIZE - i;
|
2013-01-23 03:51:47 +00:00
|
|
|
if (netmap_verbose)
|
|
|
|
D("objsize %d clustsize %d objects %d",
|
|
|
|
objsize, clustsize, clustentries);
|
2012-04-13 16:32:33 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The number of clusters is n = ceil(objtotal/clustentries)
|
|
|
|
* objtotal' = n * clustentries
|
|
|
|
*/
|
2013-11-01 21:21:14 +00:00
|
|
|
p->_clustentries = clustentries;
|
2012-04-13 16:32:33 +00:00
|
|
|
p->_clustsize = clustsize;
|
2013-11-01 21:21:14 +00:00
|
|
|
p->_numclusters = (objtotal + clustentries - 1) / clustentries;
|
2012-10-19 04:13:12 +00:00
|
|
|
|
2013-11-01 21:21:14 +00:00
|
|
|
/* actual values (may be larger than requested) */
|
2012-10-19 04:13:12 +00:00
|
|
|
p->_objsize = objsize;
|
2013-11-01 21:21:14 +00:00
|
|
|
p->_objtotal = p->_numclusters * clustentries;
|
2012-10-19 04:13:12 +00:00
|
|
|
|
2013-11-01 21:21:14 +00:00
|
|
|
return 0;
|
2012-10-19 04:13:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* call with NMA_LOCK held */
|
|
|
|
static int
|
|
|
|
netmap_finalize_obj_allocator(struct netmap_obj_pool *p)
|
|
|
|
{
|
2013-11-01 21:21:14 +00:00
|
|
|
int i; /* must be signed */
|
|
|
|
size_t n;
|
|
|
|
|
|
|
|
/* optimistically assume we have enough memory */
|
|
|
|
p->numclusters = p->_numclusters;
|
|
|
|
p->objtotal = p->_objtotal;
|
2012-04-13 16:32:33 +00:00
|
|
|
|
2012-10-19 04:13:12 +00:00
|
|
|
n = sizeof(struct lut_entry) * p->objtotal;
|
|
|
|
#ifdef linux
|
|
|
|
p->lut = vmalloc(n);
|
|
|
|
#else
|
2012-10-19 19:28:35 +00:00
|
|
|
p->lut = malloc(n, M_NETMAP, M_NOWAIT | M_ZERO);
|
2012-10-19 04:13:12 +00:00
|
|
|
#endif
|
2012-04-13 16:32:33 +00:00
|
|
|
if (p->lut == NULL) {
|
2013-11-01 21:21:14 +00:00
|
|
|
D("Unable to create lookup table (%d bytes) for '%s'", (int)n, p->name);
|
2012-04-13 16:32:33 +00:00
|
|
|
goto clean;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Allocate the bitmap */
|
|
|
|
n = (p->objtotal + 31) / 32;
|
2012-10-19 19:28:35 +00:00
|
|
|
p->bitmap = malloc(sizeof(uint32_t) * n, M_NETMAP, M_NOWAIT | M_ZERO);
|
2012-04-13 16:32:33 +00:00
|
|
|
if (p->bitmap == NULL) {
|
2013-11-01 21:21:14 +00:00
|
|
|
D("Unable to create bitmap (%d entries) for allocator '%s'", (int)n,
|
2012-10-19 04:13:12 +00:00
|
|
|
p->name);
|
2012-04-13 16:32:33 +00:00
|
|
|
goto clean;
|
|
|
|
}
|
2012-10-19 04:13:12 +00:00
|
|
|
p->bitmap_slots = n;
|
2012-04-13 16:32:33 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate clusters, init pointers and bitmap
|
|
|
|
*/
|
2013-11-01 21:21:14 +00:00
|
|
|
|
|
|
|
n = p->_clustsize;
|
|
|
|
for (i = 0; i < (int)p->objtotal;) {
|
|
|
|
int lim = i + p->_clustentries;
|
2012-04-13 16:32:33 +00:00
|
|
|
char *clust;
|
|
|
|
|
2013-11-01 21:21:14 +00:00
|
|
|
clust = contigmalloc(n, M_NETMAP, M_NOWAIT | M_ZERO,
|
|
|
|
(size_t)0, -1UL, PAGE_SIZE, 0);
|
2012-04-13 16:32:33 +00:00
|
|
|
if (clust == NULL) {
|
|
|
|
/*
|
|
|
|
* If we get here, there is a severe memory shortage,
|
|
|
|
* so halve the allocated memory to reclaim some.
|
|
|
|
*/
|
|
|
|
D("Unable to create cluster at %d for '%s' allocator",
|
2012-10-19 04:13:12 +00:00
|
|
|
i, p->name);
|
2013-11-01 21:21:14 +00:00
|
|
|
if (i < 2) /* nothing to halve */
|
|
|
|
goto out;
|
2012-04-13 16:32:33 +00:00
|
|
|
lim = i / 2;
|
2012-10-19 04:13:12 +00:00
|
|
|
for (i--; i >= lim; i--) {
|
2012-04-13 16:32:33 +00:00
|
|
|
p->bitmap[ (i>>5) ] &= ~( 1 << (i & 31) );
|
2013-11-01 21:21:14 +00:00
|
|
|
if (i % p->_clustentries == 0 && p->lut[i].vaddr)
|
2012-04-13 16:32:33 +00:00
|
|
|
contigfree(p->lut[i].vaddr,
|
2013-11-01 21:21:14 +00:00
|
|
|
n, M_NETMAP);
|
2012-04-13 16:32:33 +00:00
|
|
|
}
|
2013-11-01 21:21:14 +00:00
|
|
|
out:
|
2012-04-13 16:32:33 +00:00
|
|
|
p->objtotal = i;
|
2013-11-01 21:21:14 +00:00
|
|
|
/* we may have stopped in the middle of a cluster */
|
|
|
|
p->numclusters = (i + p->_clustentries - 1) / p->_clustentries;
|
2012-04-13 16:32:33 +00:00
|
|
|
break;
|
|
|
|
}
|
2012-10-19 04:13:12 +00:00
|
|
|
for (; i < lim; i++, clust += p->_objsize) {
|
2012-04-13 16:32:33 +00:00
|
|
|
p->bitmap[ (i>>5) ] |= ( 1 << (i & 31) );
|
|
|
|
p->lut[i].vaddr = clust;
|
|
|
|
p->lut[i].paddr = vtophys(clust);
|
|
|
|
}
|
|
|
|
}
|
2013-11-01 21:21:14 +00:00
|
|
|
p->objfree = p->objtotal;
|
|
|
|
p->memtotal = p->numclusters * p->_clustsize;
|
|
|
|
if (p->objfree == 0)
|
|
|
|
goto clean;
|
2013-01-23 03:51:47 +00:00
|
|
|
if (netmap_verbose)
|
|
|
|
D("Pre-allocated %d clusters (%d/%dKB) for '%s'",
|
2013-11-01 21:21:14 +00:00
|
|
|
p->numclusters, p->_clustsize >> 10,
|
|
|
|
p->memtotal >> 10, p->name);
|
2012-04-13 16:32:33 +00:00
|
|
|
|
2012-10-19 04:13:12 +00:00
|
|
|
return 0;
|
2012-04-13 16:32:33 +00:00
|
|
|
|
|
|
|
clean:
|
2012-10-19 04:13:12 +00:00
|
|
|
netmap_reset_obj_allocator(p);
|
|
|
|
return ENOMEM;
|
2012-04-13 16:32:33 +00:00
|
|
|
}
|
|
|
|
|
2012-10-19 04:13:12 +00:00
|
|
|
/* call with lock held */
|
2012-04-13 16:32:33 +00:00
|
|
|
static int
|
2013-11-01 21:21:14 +00:00
|
|
|
netmap_memory_config_changed(struct netmap_mem_d *nmd)
|
2012-04-13 16:32:33 +00:00
|
|
|
{
|
2012-10-19 04:13:12 +00:00
|
|
|
int i;
|
2012-04-13 16:32:33 +00:00
|
|
|
|
2012-10-19 04:13:12 +00:00
|
|
|
for (i = 0; i < NETMAP_POOLS_NR; i++) {
|
2013-11-01 21:21:14 +00:00
|
|
|
if (nmd->pools[i].r_objsize != netmap_params[i].size ||
|
|
|
|
nmd->pools[i].r_objtotal != netmap_params[i].num)
|
2012-10-19 04:13:12 +00:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2012-04-13 16:32:33 +00:00
|
|
|
|
2013-11-01 21:21:14 +00:00
|
|
|
static void
|
|
|
|
netmap_mem_reset_all(struct netmap_mem_d *nmd)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
D("resetting %p", nmd);
|
|
|
|
for (i = 0; i < NETMAP_POOLS_NR; i++) {
|
|
|
|
netmap_reset_obj_allocator(&nmd->pools[i]);
|
|
|
|
}
|
|
|
|
nmd->flags &= ~NETMAP_MEM_FINALIZED;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
netmap_mem_finalize_all(struct netmap_mem_d *nmd)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
if (nmd->flags & NETMAP_MEM_FINALIZED)
|
|
|
|
return 0;
|
|
|
|
nmd->lasterr = 0;
|
|
|
|
nmd->nm_totalsize = 0;
|
|
|
|
for (i = 0; i < NETMAP_POOLS_NR; i++) {
|
|
|
|
nmd->lasterr = netmap_finalize_obj_allocator(&nmd->pools[i]);
|
|
|
|
if (nmd->lasterr)
|
|
|
|
goto error;
|
|
|
|
nmd->nm_totalsize += nmd->pools[i].memtotal;
|
|
|
|
}
|
|
|
|
/* buffers 0 and 1 are reserved */
|
|
|
|
nmd->pools[NETMAP_BUF_POOL].objfree -= 2;
|
|
|
|
nmd->pools[NETMAP_BUF_POOL].bitmap[0] = ~3;
|
|
|
|
nmd->flags |= NETMAP_MEM_FINALIZED;
|
|
|
|
|
|
|
|
D("Have %d KB for interfaces, %d KB for rings and %d MB for buffers",
|
|
|
|
nmd->pools[NETMAP_IF_POOL].memtotal >> 10,
|
|
|
|
nmd->pools[NETMAP_RING_POOL].memtotal >> 10,
|
|
|
|
nmd->pools[NETMAP_BUF_POOL].memtotal >> 20);
|
|
|
|
|
|
|
|
D("Free buffers: %d", nmd->pools[NETMAP_BUF_POOL].objfree);
|
|
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
error:
|
|
|
|
netmap_mem_reset_all(nmd);
|
|
|
|
return nmd->lasterr;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
netmap_mem_private_delete(struct netmap_mem_d *nmd)
|
|
|
|
{
|
|
|
|
if (nmd == NULL)
|
|
|
|
return;
|
|
|
|
D("deleting %p", nmd);
|
|
|
|
if (nmd->refcount > 0)
|
|
|
|
D("bug: deleting mem allocator with refcount=%d!", nmd->refcount);
|
|
|
|
D("done deleting %p", nmd);
|
|
|
|
NMA_LOCK_DESTROY(nmd);
|
|
|
|
free(nmd, M_DEVBUF);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
netmap_mem_private_config(struct netmap_mem_d *nmd)
|
|
|
|
{
|
|
|
|
/* nothing to do, we are configured on creation
|
|
|
|
* and configuration never changes thereafter
|
|
|
|
*/
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
netmap_mem_private_finalize(struct netmap_mem_d *nmd)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
NMA_LOCK(nmd);
|
|
|
|
nmd->refcount++;
|
|
|
|
err = netmap_mem_finalize_all(nmd);
|
|
|
|
NMA_UNLOCK(nmd);
|
|
|
|
return err;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2013-12-15 08:37:24 +00:00
|
|
|
static void
|
|
|
|
netmap_mem_private_deref(struct netmap_mem_d *nmd)
|
2013-11-01 21:21:14 +00:00
|
|
|
{
|
|
|
|
NMA_LOCK(nmd);
|
|
|
|
if (--nmd->refcount <= 0)
|
|
|
|
netmap_mem_reset_all(nmd);
|
|
|
|
NMA_UNLOCK(nmd);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct netmap_mem_d *
|
|
|
|
netmap_mem_private_new(const char *name, u_int txr, u_int txd, u_int rxr, u_int rxd)
|
|
|
|
{
|
|
|
|
struct netmap_mem_d *d = NULL;
|
|
|
|
struct netmap_obj_params p[NETMAP_POOLS_NR];
|
|
|
|
int i;
|
|
|
|
u_int maxd;
|
|
|
|
|
|
|
|
d = malloc(sizeof(struct netmap_mem_d),
|
|
|
|
M_DEVBUF, M_NOWAIT | M_ZERO);
|
|
|
|
if (d == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
*d = nm_blueprint;
|
|
|
|
|
|
|
|
/* XXX the rest of the code assumes the stack rings are alwasy present */
|
|
|
|
txr++;
|
|
|
|
rxr++;
|
|
|
|
p[NETMAP_IF_POOL].size = sizeof(struct netmap_if) +
|
|
|
|
sizeof(ssize_t) * (txr + rxr);
|
|
|
|
p[NETMAP_IF_POOL].num = 2;
|
|
|
|
maxd = (txd > rxd) ? txd : rxd;
|
|
|
|
p[NETMAP_RING_POOL].size = sizeof(struct netmap_ring) +
|
|
|
|
sizeof(struct netmap_slot) * maxd;
|
|
|
|
p[NETMAP_RING_POOL].num = txr + rxr;
|
|
|
|
p[NETMAP_BUF_POOL].size = 2048; /* XXX find a way to let the user choose this */
|
|
|
|
p[NETMAP_BUF_POOL].num = rxr * (rxd + 2) + txr * (txd + 2);
|
|
|
|
|
|
|
|
D("req if %d*%d ring %d*%d buf %d*%d",
|
|
|
|
p[NETMAP_IF_POOL].num,
|
|
|
|
p[NETMAP_IF_POOL].size,
|
|
|
|
p[NETMAP_RING_POOL].num,
|
|
|
|
p[NETMAP_RING_POOL].size,
|
|
|
|
p[NETMAP_BUF_POOL].num,
|
|
|
|
p[NETMAP_BUF_POOL].size);
|
|
|
|
|
|
|
|
for (i = 0; i < NETMAP_POOLS_NR; i++) {
|
|
|
|
snprintf(d->pools[i].name, NETMAP_POOL_MAX_NAMSZ,
|
|
|
|
nm_blueprint.pools[i].name,
|
|
|
|
name);
|
|
|
|
if (netmap_config_obj_allocator(&d->pools[i],
|
|
|
|
p[i].num, p[i].size))
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
d->flags &= ~NETMAP_MEM_FINALIZED;
|
|
|
|
|
|
|
|
NMA_LOCK_INIT(d);
|
|
|
|
|
|
|
|
return d;
|
|
|
|
error:
|
|
|
|
netmap_mem_private_delete(d);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2012-04-13 16:32:33 +00:00
|
|
|
|
2012-10-19 04:13:12 +00:00
|
|
|
/* call with lock held */
|
|
|
|
static int
|
2013-11-01 21:21:14 +00:00
|
|
|
netmap_mem_global_config(struct netmap_mem_d *nmd)
|
2012-10-19 04:13:12 +00:00
|
|
|
{
|
|
|
|
int i;
|
2012-04-13 16:32:33 +00:00
|
|
|
|
2013-11-01 21:21:14 +00:00
|
|
|
if (nmd->refcount)
|
|
|
|
/* already in use, we cannot change the configuration */
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (!netmap_memory_config_changed(nmd))
|
2012-10-19 04:13:12 +00:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
D("reconfiguring");
|
|
|
|
|
2013-11-01 21:21:14 +00:00
|
|
|
if (nmd->flags & NETMAP_MEM_FINALIZED) {
|
2012-10-19 04:13:12 +00:00
|
|
|
/* reset previous allocation */
|
|
|
|
for (i = 0; i < NETMAP_POOLS_NR; i++) {
|
2013-11-01 21:21:14 +00:00
|
|
|
netmap_reset_obj_allocator(&nmd->pools[i]);
|
2013-05-02 16:01:04 +00:00
|
|
|
}
|
2013-11-01 21:21:14 +00:00
|
|
|
nmd->flags &= ~NETMAP_MEM_FINALIZED;
|
2013-12-15 08:37:24 +00:00
|
|
|
}
|
2012-10-19 04:13:12 +00:00
|
|
|
|
|
|
|
for (i = 0; i < NETMAP_POOLS_NR; i++) {
|
2013-11-01 21:21:14 +00:00
|
|
|
nmd->lasterr = netmap_config_obj_allocator(&nmd->pools[i],
|
2012-10-19 04:13:12 +00:00
|
|
|
netmap_params[i].num, netmap_params[i].size);
|
2013-11-01 21:21:14 +00:00
|
|
|
if (nmd->lasterr)
|
2012-10-19 04:13:12 +00:00
|
|
|
goto out;
|
|
|
|
}
|
2012-04-13 16:32:33 +00:00
|
|
|
|
2012-10-19 04:13:12 +00:00
|
|
|
out:
|
|
|
|
|
2013-11-01 21:21:14 +00:00
|
|
|
return nmd->lasterr;
|
2012-10-19 04:13:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2013-11-01 21:21:14 +00:00
|
|
|
netmap_mem_global_finalize(struct netmap_mem_d *nmd)
|
2012-10-19 04:13:12 +00:00
|
|
|
{
|
2013-11-01 21:21:14 +00:00
|
|
|
int err;
|
|
|
|
|
|
|
|
NMA_LOCK(nmd);
|
2012-10-19 04:13:12 +00:00
|
|
|
|
|
|
|
|
|
|
|
/* update configuration if changed */
|
2013-11-01 21:21:14 +00:00
|
|
|
if (netmap_mem_global_config(nmd))
|
2012-10-19 04:13:12 +00:00
|
|
|
goto out;
|
|
|
|
|
2013-11-01 21:21:14 +00:00
|
|
|
nmd->refcount++;
|
|
|
|
|
|
|
|
if (nmd->flags & NETMAP_MEM_FINALIZED) {
|
2012-10-19 04:13:12 +00:00
|
|
|
/* may happen if config is not changed */
|
|
|
|
ND("nothing to do");
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2013-11-01 21:21:14 +00:00
|
|
|
if (netmap_mem_finalize_all(nmd))
|
|
|
|
goto out;
|
2012-10-19 04:13:12 +00:00
|
|
|
|
|
|
|
/* backward compatibility */
|
2013-11-01 21:21:14 +00:00
|
|
|
netmap_buf_size = nmd->pools[NETMAP_BUF_POOL]._objsize;
|
|
|
|
netmap_total_buffers = nmd->pools[NETMAP_BUF_POOL].objtotal;
|
2012-10-19 04:13:12 +00:00
|
|
|
|
2013-11-01 21:21:14 +00:00
|
|
|
netmap_buffer_lut = nmd->pools[NETMAP_BUF_POOL].lut;
|
|
|
|
netmap_buffer_base = nmd->pools[NETMAP_BUF_POOL].lut[0].vaddr;
|
2012-10-19 04:13:12 +00:00
|
|
|
|
2013-11-01 21:21:14 +00:00
|
|
|
nmd->lasterr = 0;
|
2012-10-19 04:13:12 +00:00
|
|
|
|
|
|
|
out:
|
2013-11-01 21:21:14 +00:00
|
|
|
if (nmd->lasterr)
|
|
|
|
nmd->refcount--;
|
|
|
|
err = nmd->lasterr;
|
2012-10-19 04:13:12 +00:00
|
|
|
|
2013-11-01 21:21:14 +00:00
|
|
|
NMA_UNLOCK(nmd);
|
2012-10-19 04:13:12 +00:00
|
|
|
|
2013-11-01 21:21:14 +00:00
|
|
|
return err;
|
2012-10-19 04:13:12 +00:00
|
|
|
|
2012-04-13 16:32:33 +00:00
|
|
|
}
|
|
|
|
|
2013-11-01 21:21:14 +00:00
|
|
|
int
|
|
|
|
netmap_mem_init(void)
|
2012-10-19 04:13:12 +00:00
|
|
|
{
|
2013-11-01 21:21:14 +00:00
|
|
|
NMA_LOCK_INIT(&nm_mem);
|
2012-10-19 04:13:12 +00:00
|
|
|
return (0);
|
|
|
|
}
|
2012-04-13 16:32:33 +00:00
|
|
|
|
2013-11-01 21:21:14 +00:00
|
|
|
void
|
|
|
|
netmap_mem_fini(void)
|
2012-04-13 16:32:33 +00:00
|
|
|
{
|
2012-10-19 04:13:12 +00:00
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < NETMAP_POOLS_NR; i++) {
|
|
|
|
netmap_destroy_obj_allocator(&nm_mem.pools[i]);
|
|
|
|
}
|
2013-11-01 21:21:14 +00:00
|
|
|
NMA_LOCK_DESTROY(&nm_mem);
|
2012-10-19 04:13:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
netmap_free_rings(struct netmap_adapter *na)
|
|
|
|
{
|
2013-11-01 21:21:14 +00:00
|
|
|
u_int i;
|
2013-01-23 03:51:47 +00:00
|
|
|
if (!na->tx_rings)
|
|
|
|
return;
|
2012-10-19 04:13:12 +00:00
|
|
|
for (i = 0; i < na->num_tx_rings + 1; i++) {
|
2013-11-01 21:21:14 +00:00
|
|
|
if (na->tx_rings[i].ring) {
|
|
|
|
netmap_ring_free(na->nm_mem, na->tx_rings[i].ring);
|
|
|
|
na->tx_rings[i].ring = NULL;
|
|
|
|
}
|
2012-10-19 04:13:12 +00:00
|
|
|
}
|
|
|
|
for (i = 0; i < na->num_rx_rings + 1; i++) {
|
2013-11-01 21:21:14 +00:00
|
|
|
if (na->rx_rings[i].ring) {
|
|
|
|
netmap_ring_free(na->nm_mem, na->rx_rings[i].ring);
|
|
|
|
na->rx_rings[i].ring = NULL;
|
|
|
|
}
|
2012-10-19 04:13:12 +00:00
|
|
|
}
|
2012-04-13 16:32:33 +00:00
|
|
|
}
|
|
|
|
|
2013-12-15 08:37:24 +00:00
|
|
|
/* call with NMA_LOCK held *
|
2013-11-01 21:21:14 +00:00
|
|
|
*
|
2013-12-15 08:37:24 +00:00
|
|
|
* Allocate netmap rings and buffers for this card
|
|
|
|
* The rings are contiguous, but have variable size.
|
2013-01-23 03:51:47 +00:00
|
|
|
*/
|
2013-12-15 08:37:24 +00:00
|
|
|
int
|
|
|
|
netmap_mem_rings_create(struct netmap_adapter *na)
|
2012-04-13 16:32:33 +00:00
|
|
|
{
|
|
|
|
struct netmap_ring *ring;
|
2013-12-15 08:37:24 +00:00
|
|
|
u_int len, ndesc;
|
2012-04-13 16:32:33 +00:00
|
|
|
struct netmap_kring *kring;
|
2013-11-01 21:21:14 +00:00
|
|
|
|
|
|
|
NMA_LOCK(na->nm_mem);
|
|
|
|
|
2013-12-15 08:37:24 +00:00
|
|
|
for (kring = na->tx_rings; kring != na->rx_rings; kring++) { /* Transmit rings */
|
|
|
|
ndesc = kring->nkr_num_slots;
|
2012-04-13 16:32:33 +00:00
|
|
|
len = sizeof(struct netmap_ring) +
|
|
|
|
ndesc * sizeof(struct netmap_slot);
|
2013-11-01 21:21:14 +00:00
|
|
|
ring = netmap_ring_malloc(na->nm_mem, len);
|
2012-04-13 16:32:33 +00:00
|
|
|
if (ring == NULL) {
|
2013-12-15 08:37:24 +00:00
|
|
|
D("Cannot allocate tx_ring");
|
2012-04-13 16:32:33 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
ND("txring[%d] at %p ofs %d", i, ring);
|
|
|
|
kring->ring = ring;
|
2013-12-15 08:37:24 +00:00
|
|
|
*(uint32_t *)(uintptr_t)&ring->num_slots = ndesc;
|
It is 2014 and we have a new version of netmap.
Most relevant features:
- netmap emulation on any NIC, even those without native netmap support.
On the ixgbe we have measured about 4Mpps/core/queue in this mode,
which is still a lot more than with sockets/bpf.
- seamless interconnection of VALE switch, NICs and host stack.
If you disable accelerations on your NIC (say em0)
ifconfig em0 -txcsum -txcsum
you can use the VALE switch to connect the NIC and the host stack:
vale-ctl -h valeXX:em0
allowing sharing the NIC with other netmap clients.
- THE USER API HAS SLIGHTLY CHANGED (head/cur/tail pointers
instead of pointers/count as before). This was unavoidable to support,
in the future, multiple threads operating on the same rings.
Netmap clients require very small source code changes to compile again.
On the plus side, the new API should be easier to understand
and the internals are a lot simpler.
The manual page has been updated extensively to reflect the current
features and give some examples.
This is the result of work of several people including Giuseppe Lettieri,
Vincenzo Maffione, Michio Honda and myself, and has been financially
supported by EU projects CHANGE and OPENLAB, from NetApp University
Research Fund, NEC, and of course the Universita` di Pisa.
2014-01-06 12:53:15 +00:00
|
|
|
*(int64_t *)(uintptr_t)&ring->buf_ofs =
|
2013-11-01 21:21:14 +00:00
|
|
|
(na->nm_mem->pools[NETMAP_IF_POOL].memtotal +
|
|
|
|
na->nm_mem->pools[NETMAP_RING_POOL].memtotal) -
|
|
|
|
netmap_ring_offset(na->nm_mem, ring);
|
2012-04-13 16:32:33 +00:00
|
|
|
|
It is 2014 and we have a new version of netmap.
Most relevant features:
- netmap emulation on any NIC, even those without native netmap support.
On the ixgbe we have measured about 4Mpps/core/queue in this mode,
which is still a lot more than with sockets/bpf.
- seamless interconnection of VALE switch, NICs and host stack.
If you disable accelerations on your NIC (say em0)
ifconfig em0 -txcsum -txcsum
you can use the VALE switch to connect the NIC and the host stack:
vale-ctl -h valeXX:em0
allowing sharing the NIC with other netmap clients.
- THE USER API HAS SLIGHTLY CHANGED (head/cur/tail pointers
instead of pointers/count as before). This was unavoidable to support,
in the future, multiple threads operating on the same rings.
Netmap clients require very small source code changes to compile again.
On the plus side, the new API should be easier to understand
and the internals are a lot simpler.
The manual page has been updated extensively to reflect the current
features and give some examples.
This is the result of work of several people including Giuseppe Lettieri,
Vincenzo Maffione, Michio Honda and myself, and has been financially
supported by EU projects CHANGE and OPENLAB, from NetApp University
Research Fund, NEC, and of course the Universita` di Pisa.
2014-01-06 12:53:15 +00:00
|
|
|
/* copy values from kring */
|
|
|
|
ring->head = kring->rhead;
|
|
|
|
ring->cur = kring->rcur;
|
|
|
|
ring->tail = kring->rtail;
|
2013-11-01 21:21:14 +00:00
|
|
|
*(uint16_t *)(uintptr_t)&ring->nr_buf_size =
|
|
|
|
NETMAP_BDG_BUF_SIZE(na->nm_mem);
|
2013-12-15 08:37:24 +00:00
|
|
|
ND("initializing slots for txring");
|
|
|
|
if (netmap_new_bufs(na->nm_mem, ring->slot, ndesc)) {
|
|
|
|
D("Cannot allocate buffers for tx_ring");
|
2012-10-19 04:13:12 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
2012-04-13 16:32:33 +00:00
|
|
|
}
|
|
|
|
|
2013-12-15 08:37:24 +00:00
|
|
|
for ( ; kring != na->tailroom; kring++) { /* Receive rings */
|
|
|
|
ndesc = kring->nkr_num_slots;
|
2012-04-13 16:32:33 +00:00
|
|
|
len = sizeof(struct netmap_ring) +
|
|
|
|
ndesc * sizeof(struct netmap_slot);
|
2013-11-01 21:21:14 +00:00
|
|
|
ring = netmap_ring_malloc(na->nm_mem, len);
|
2012-04-13 16:32:33 +00:00
|
|
|
if (ring == NULL) {
|
2013-12-15 08:37:24 +00:00
|
|
|
D("Cannot allocate rx_ring");
|
2012-04-13 16:32:33 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
2013-12-15 08:37:24 +00:00
|
|
|
ND("rxring at %p ofs %d", ring);
|
2012-04-13 16:32:33 +00:00
|
|
|
|
|
|
|
kring->ring = ring;
|
2013-12-15 08:37:24 +00:00
|
|
|
*(uint32_t *)(uintptr_t)&ring->num_slots = ndesc;
|
It is 2014 and we have a new version of netmap.
Most relevant features:
- netmap emulation on any NIC, even those without native netmap support.
On the ixgbe we have measured about 4Mpps/core/queue in this mode,
which is still a lot more than with sockets/bpf.
- seamless interconnection of VALE switch, NICs and host stack.
If you disable accelerations on your NIC (say em0)
ifconfig em0 -txcsum -txcsum
you can use the VALE switch to connect the NIC and the host stack:
vale-ctl -h valeXX:em0
allowing sharing the NIC with other netmap clients.
- THE USER API HAS SLIGHTLY CHANGED (head/cur/tail pointers
instead of pointers/count as before). This was unavoidable to support,
in the future, multiple threads operating on the same rings.
Netmap clients require very small source code changes to compile again.
On the plus side, the new API should be easier to understand
and the internals are a lot simpler.
The manual page has been updated extensively to reflect the current
features and give some examples.
This is the result of work of several people including Giuseppe Lettieri,
Vincenzo Maffione, Michio Honda and myself, and has been financially
supported by EU projects CHANGE and OPENLAB, from NetApp University
Research Fund, NEC, and of course the Universita` di Pisa.
2014-01-06 12:53:15 +00:00
|
|
|
*(int64_t *)(uintptr_t)&ring->buf_ofs =
|
2013-11-01 21:21:14 +00:00
|
|
|
(na->nm_mem->pools[NETMAP_IF_POOL].memtotal +
|
|
|
|
na->nm_mem->pools[NETMAP_RING_POOL].memtotal) -
|
|
|
|
netmap_ring_offset(na->nm_mem, ring);
|
2012-04-13 16:32:33 +00:00
|
|
|
|
It is 2014 and we have a new version of netmap.
Most relevant features:
- netmap emulation on any NIC, even those without native netmap support.
On the ixgbe we have measured about 4Mpps/core/queue in this mode,
which is still a lot more than with sockets/bpf.
- seamless interconnection of VALE switch, NICs and host stack.
If you disable accelerations on your NIC (say em0)
ifconfig em0 -txcsum -txcsum
you can use the VALE switch to connect the NIC and the host stack:
vale-ctl -h valeXX:em0
allowing sharing the NIC with other netmap clients.
- THE USER API HAS SLIGHTLY CHANGED (head/cur/tail pointers
instead of pointers/count as before). This was unavoidable to support,
in the future, multiple threads operating on the same rings.
Netmap clients require very small source code changes to compile again.
On the plus side, the new API should be easier to understand
and the internals are a lot simpler.
The manual page has been updated extensively to reflect the current
features and give some examples.
This is the result of work of several people including Giuseppe Lettieri,
Vincenzo Maffione, Michio Honda and myself, and has been financially
supported by EU projects CHANGE and OPENLAB, from NetApp University
Research Fund, NEC, and of course the Universita` di Pisa.
2014-01-06 12:53:15 +00:00
|
|
|
/* copy values from kring */
|
|
|
|
ring->head = kring->rhead;
|
|
|
|
ring->cur = kring->rcur;
|
|
|
|
ring->tail = kring->rtail;
|
2013-11-01 21:21:14 +00:00
|
|
|
*(int *)(uintptr_t)&ring->nr_buf_size =
|
|
|
|
NETMAP_BDG_BUF_SIZE(na->nm_mem);
|
2012-04-13 16:32:33 +00:00
|
|
|
ND("initializing slots for rxring[%d]", i);
|
2013-12-15 08:37:24 +00:00
|
|
|
if (netmap_new_bufs(na->nm_mem, ring->slot, ndesc)) {
|
|
|
|
D("Cannot allocate buffers for rx_ring");
|
2012-10-19 04:13:12 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
2012-04-13 16:32:33 +00:00
|
|
|
}
|
2013-12-15 08:37:24 +00:00
|
|
|
|
|
|
|
NMA_UNLOCK(na->nm_mem);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
netmap_free_rings(na);
|
|
|
|
|
|
|
|
NMA_UNLOCK(na->nm_mem);
|
|
|
|
|
|
|
|
return ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
netmap_mem_rings_delete(struct netmap_adapter *na)
|
|
|
|
{
|
|
|
|
/* last instance, release bufs and rings */
|
|
|
|
u_int i, lim;
|
|
|
|
struct netmap_kring *kring;
|
|
|
|
struct netmap_ring *ring;
|
|
|
|
|
|
|
|
NMA_LOCK(na->nm_mem);
|
|
|
|
|
|
|
|
for (kring = na->tx_rings; kring != na->tailroom; kring++) {
|
|
|
|
ring = kring->ring;
|
|
|
|
if (ring == NULL)
|
|
|
|
continue;
|
|
|
|
lim = kring->nkr_num_slots;
|
|
|
|
for (i = 0; i < lim; i++)
|
|
|
|
netmap_free_buf(na->nm_mem, ring->slot[i].buf_idx);
|
|
|
|
}
|
|
|
|
netmap_free_rings(na);
|
|
|
|
|
|
|
|
NMA_UNLOCK(na->nm_mem);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* call with NMA_LOCK held */
|
|
|
|
/*
|
|
|
|
* Allocate the per-fd structure netmap_if.
|
|
|
|
*
|
|
|
|
* We assume that the configuration stored in na
|
|
|
|
* (number of tx/rx rings and descs) does not change while
|
|
|
|
* the interface is in netmap mode.
|
|
|
|
*/
|
|
|
|
struct netmap_if *
|
|
|
|
netmap_mem_if_new(const char *ifname, struct netmap_adapter *na)
|
|
|
|
{
|
|
|
|
struct netmap_if *nifp;
|
|
|
|
ssize_t base; /* handy for relative offsets between rings and nifp */
|
|
|
|
u_int i, len, ntx, nrx;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* verify whether virtual port need the stack ring
|
|
|
|
*/
|
|
|
|
ntx = na->num_tx_rings + 1; /* shorthand, include stack ring */
|
|
|
|
nrx = na->num_rx_rings + 1; /* shorthand, include stack ring */
|
|
|
|
/*
|
|
|
|
* the descriptor is followed inline by an array of offsets
|
|
|
|
* to the tx and rx rings in the shared memory region.
|
|
|
|
* For virtual rx rings we also allocate an array of
|
|
|
|
* pointers to assign to nkr_leases.
|
|
|
|
*/
|
|
|
|
|
|
|
|
NMA_LOCK(na->nm_mem);
|
|
|
|
|
|
|
|
len = sizeof(struct netmap_if) + (nrx + ntx) * sizeof(ssize_t);
|
|
|
|
nifp = netmap_if_malloc(na->nm_mem, len);
|
|
|
|
if (nifp == NULL) {
|
|
|
|
NMA_UNLOCK(na->nm_mem);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* initialize base fields -- override const */
|
|
|
|
*(u_int *)(uintptr_t)&nifp->ni_tx_rings = na->num_tx_rings;
|
|
|
|
*(u_int *)(uintptr_t)&nifp->ni_rx_rings = na->num_rx_rings;
|
|
|
|
strncpy(nifp->ni_name, ifname, (size_t)IFNAMSIZ);
|
|
|
|
|
2012-04-13 16:32:33 +00:00
|
|
|
/*
|
|
|
|
* fill the slots for the rx and tx rings. They contain the offset
|
|
|
|
* between the ring and nifp, so the information is usable in
|
|
|
|
* userspace to reach the ring from the nifp.
|
|
|
|
*/
|
2013-11-01 21:21:14 +00:00
|
|
|
base = netmap_if_offset(na->nm_mem, nifp);
|
2012-04-13 16:32:33 +00:00
|
|
|
for (i = 0; i < ntx; i++) {
|
|
|
|
*(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] =
|
2013-11-01 21:21:14 +00:00
|
|
|
netmap_ring_offset(na->nm_mem, na->tx_rings[i].ring) - base;
|
2012-04-13 16:32:33 +00:00
|
|
|
}
|
|
|
|
for (i = 0; i < nrx; i++) {
|
|
|
|
*(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+ntx] =
|
2013-11-01 21:21:14 +00:00
|
|
|
netmap_ring_offset(na->nm_mem, na->rx_rings[i].ring) - base;
|
2012-04-13 16:32:33 +00:00
|
|
|
}
|
2013-11-01 21:21:14 +00:00
|
|
|
|
|
|
|
NMA_UNLOCK(na->nm_mem);
|
|
|
|
|
2012-04-13 16:32:33 +00:00
|
|
|
return (nifp);
|
|
|
|
}
|
|
|
|
|
2013-11-01 21:21:14 +00:00
|
|
|
void
|
|
|
|
netmap_mem_if_delete(struct netmap_adapter *na, struct netmap_if *nifp)
|
|
|
|
{
|
|
|
|
if (nifp == NULL)
|
|
|
|
/* nothing to do */
|
|
|
|
return;
|
|
|
|
NMA_LOCK(na->nm_mem);
|
|
|
|
|
|
|
|
netmap_if_free(na->nm_mem, nifp);
|
|
|
|
|
|
|
|
NMA_UNLOCK(na->nm_mem);
|
|
|
|
}
|
|
|
|
|
2012-04-13 16:32:33 +00:00
|
|
|
static void
|
2013-11-01 21:21:14 +00:00
|
|
|
netmap_mem_global_deref(struct netmap_mem_d *nmd)
|
2012-04-13 16:32:33 +00:00
|
|
|
{
|
2013-11-01 21:21:14 +00:00
|
|
|
NMA_LOCK(nmd);
|
|
|
|
|
|
|
|
nmd->refcount--;
|
2013-01-23 03:51:47 +00:00
|
|
|
if (netmap_verbose)
|
2013-11-01 21:21:14 +00:00
|
|
|
D("refcount = %d", nmd->refcount);
|
|
|
|
|
|
|
|
NMA_UNLOCK(nmd);
|
|
|
|
}
|
|
|
|
|
2013-12-15 08:37:24 +00:00
|
|
|
int
|
|
|
|
netmap_mem_finalize(struct netmap_mem_d *nmd)
|
2013-11-01 21:21:14 +00:00
|
|
|
{
|
|
|
|
return nmd->finalize(nmd);
|
|
|
|
}
|
|
|
|
|
2013-12-15 08:37:24 +00:00
|
|
|
void
|
|
|
|
netmap_mem_deref(struct netmap_mem_d *nmd)
|
2013-11-01 21:21:14 +00:00
|
|
|
{
|
|
|
|
return nmd->deref(nmd);
|
2012-04-13 16:32:33 +00:00
|
|
|
}
|