netmap: several typo fixes

No functional changes intended.
This commit is contained in:
Vincenzo Maffione 2021-04-02 07:01:20 +00:00
parent 66671ae589
commit 45c67e8f6b
18 changed files with 65 additions and 55 deletions

View File

@ -418,7 +418,7 @@ struct nmreq_pools_info* nmport_extmem_getinfo(struct nmport_d *d);
* @initial the initial offset for all the slots
* @maxoff the maximum offset
* @bits the number of bits of slot->ptr to use for the offsets
* @mingap the minimum gap betwen offsets (in shared buffers)
* @mingap the minimum gap between offsets (in shared buffers)
*
* With this option the lower @bits bits of the ptr field in the netmap_slot
* can be used to specify an offset into the buffer. All offsets will be set
@ -440,7 +440,7 @@ struct nmreq_pools_info* nmport_extmem_getinfo(struct nmport_d *d);
* starting o bytes in the buffer. Note that the address of the packet must
* comply with any alignment constraints that the port may have, or the result
* will be undefined. The user may read the alignment constraint in the new
* ring->buf_align field. It is also possibile that empty slots already come
* ring->buf_align field. It is also possible that empty slots already come
* with a non-zero offset o specified in the offset field. In this case, the
* user will have to write the packet at an offset o' >= o.
*

View File

@ -114,7 +114,7 @@ vtnet_netmap_txsync(struct netmap_kring *kring, int flags)
virtqueue_notify(vq);
/* Update hwcur depending on where we stopped. */
kring->nr_hwcur = nm_i; /* note we migth break early */
kring->nr_hwcur = nm_i; /* note we might break early */
}
/* Free used slots. We only consider our own used buffers, recognized

View File

@ -193,7 +193,7 @@ ports attached to the switch)
* always attached to a bridge.
* Persistent VALE ports must must be created separately, and i
* then attached like normal NICs. The NIOCREGIF we are examining
* will find them only if they had previosly been created and
* will find them only if they had previously been created and
* attached (see VALE_CTL below).
*
* - netmap_pipe_adapter [netmap_pipe.c]
@ -994,7 +994,7 @@ netmap_mem_restore(struct netmap_adapter *na)
static void
netmap_mem_drop(struct netmap_adapter *na)
{
/* if the native allocator had been overrided on regif,
/* if the native allocator had been overridden on regif,
* restore it now and drop the temporary one
*/
if (netmap_mem_deref(na->nm_mem, na)) {
@ -1072,7 +1072,7 @@ netmap_do_unregif(struct netmap_priv_d *priv)
}
}
/* possibily decrement counter of tx_si/rx_si users */
/* possibly decrement counter of tx_si/rx_si users */
netmap_unset_ringid(priv);
/* delete the nifp */
netmap_mem_if_delete(na, priv->np_nifp);
@ -1154,7 +1154,7 @@ netmap_dtor(void *data)
* they will be forwarded to the hw TX rings, saving the application
* from doing the same task in user-space.
*
* Transparent fowarding can be enabled per-ring, by setting the NR_FORWARD
* Transparent forwarding can be enabled per-ring, by setting the NR_FORWARD
* flag, or globally with the netmap_fwd sysctl.
*
* The transfer NIC --> host is relatively easy, just encapsulate
@ -1618,7 +1618,7 @@ netmap_get_na(struct nmreq_header *hdr,
netmap_adapter_get(ret);
/*
* if the adapter supports the host rings and it is not alread open,
* if the adapter supports the host rings and it is not already open,
* try to set the number of host rings as requested by the user
*/
if (((*na)->na_flags & NAF_HOST_RINGS) && (*na)->active_fds == 0) {
@ -2042,7 +2042,7 @@ netmap_krings_get(struct netmap_priv_d *priv)
priv->np_qlast[NR_RX]);
/* first round: check that all the requested rings
* are neither alread exclusively owned, nor we
* are neither already exclusively owned, nor we
* want exclusive ownership when they are already in use
*/
foreach_selected_ring(priv, t, i, kring) {
@ -2597,7 +2597,7 @@ netmap_do_regif(struct netmap_priv_d *priv, struct netmap_adapter *na,
if (error)
goto err_rel_excl;
/* compute and validate the buf lenghts */
/* compute and validate the buf lengths */
error = netmap_compute_buf_len(priv);
if (error)
goto err_rel_excl;
@ -2719,7 +2719,7 @@ netmap_ioctl(struct netmap_priv_d *priv, u_long cmd, caddr_t data,
}
/* Make a kernel-space copy of the user-space nr_body.
* For convenince, the nr_body pointer and the pointers
* For convenience, the nr_body pointer and the pointers
* in the options list will be replaced with their
* kernel-space counterparts. The original pointers are
* saved internally and later restored by nmreq_copyout
@ -3312,7 +3312,7 @@ nmreq_opt_size_by_type(uint32_t nro_reqtype, uint64_t nro_size)
* The list of options is copied and the pointers adjusted. The
* original pointers are saved before the option they belonged.
*
* The option table has an entry for every availabe option. Entries
* The option table has an entry for every available option. Entries
* for options that have not been passed contain NULL.
*
*/

View File

@ -951,7 +951,7 @@ nm_bdg_ctl_polling_stop(struct netmap_adapter *na)
bps->configured = false;
nm_os_free(bps);
bna->na_polling_state = NULL;
/* reenable interrupts */
/* re-enable interrupts */
nma_intr_enable(bna->hwna, 1);
return 0;
}

View File

@ -264,7 +264,7 @@ nm_os_csum_tcpudp_ipv4(struct nm_iphdr *iph, void *data,
#ifdef INET
uint16_t pseudolen = datalen + iph->protocol;
/* Compute and insert the pseudo-header cheksum. */
/* Compute and insert the pseudo-header checksum. */
*check = in_pseudo(iph->saddr, iph->daddr,
htobe16(pseudolen));
/* Compute the checksum on TCP/UDP header + payload

View File

@ -446,7 +446,7 @@ generic_mbuf_destructor(struct mbuf *m)
/*
* First, clear the event mbuf.
* In principle, the event 'm' should match the one stored
* on ring 'r'. However we check it explicitely to stay
* on ring 'r'. However we check it explicitly to stay
* safe against lower layers (qdisc, driver, etc.) changing
* MBUF_TXQ(m) under our feet. If the match is not found
* on 'r', we try to see if it belongs to some other ring.

View File

@ -294,7 +294,7 @@ struct nm_bridge;
struct netmap_priv_d;
struct nm_bdg_args;
/* os-specific NM_SELINFO_T initialzation/destruction functions */
/* os-specific NM_SELINFO_T initialization/destruction functions */
int nm_os_selinfo_init(NM_SELINFO_T *, const char *name);
void nm_os_selinfo_uninit(NM_SELINFO_T *);
@ -485,7 +485,7 @@ struct netmap_kring {
struct netmap_adapter *na;
/* the adapter that wants to be notified when this kring has
* new slots avaialable. This is usually the same as the above,
* new slots available. This is usually the same as the above,
* but wrappers may let it point to themselves
*/
struct netmap_adapter *notify_na;
@ -562,7 +562,7 @@ struct netmap_kring {
*/
uint64_t hwbuf_len;
/* required aligment (in bytes) for the buffers used by this ring.
/* required alignment (in bytes) for the buffers used by this ring.
* Netmap buffers are aligned to cachelines, which should suffice
* for most NICs. If the user is passing offsets, though, we need
* to check that the resulting buf address complies with any
@ -570,7 +570,7 @@ struct netmap_kring {
*/
uint64_t buf_align;
/* harware specific logic for the selection of the hwbuf_len */
/* hardware specific logic for the selection of the hwbuf_len */
int (*nm_bufcfg)(struct netmap_kring *kring, uint64_t target);
int (*save_notify)(struct netmap_kring *kring, int flags);
@ -709,7 +709,7 @@ struct nm_config_info {
/*
* default type for the magic field.
* May be overriden in glue code.
* May be overridden in glue code.
*/
#ifndef NM_OS_MAGIC
#define NM_OS_MAGIC uint32_t
@ -827,7 +827,7 @@ struct netmap_adapter {
* (l) and kring->buf_align fields. The l value is most important
* for RX rings, where we want to disallow writes outside of the
* netmap buffer. The l value must be computed taking into account
* the stipulated max_offset (o), possibily increased if there are
* the stipulated max_offset (o), possibly increased if there are
* alignment constraints, the maxframe (m), if known, and the
* current NETMAP_BUF_SIZE (b) of the memory region used by the
* adapter. We want the largest supported l such that o + l <= b.
@ -1680,7 +1680,7 @@ extern int netmap_debug; /* for debugging */
#define netmap_debug (0)
#endif /* !CONFIG_NETMAP_DEBUG */
enum { /* debug flags */
NM_DEBUG_ON = 1, /* generic debug messsages */
NM_DEBUG_ON = 1, /* generic debug messages */
NM_DEBUG_HOST = 0x2, /* debug host stack */
NM_DEBUG_RXSYNC = 0x10, /* debug on rxsync/txsync */
NM_DEBUG_TXSYNC = 0x20,

View File

@ -222,7 +222,7 @@ netmap_sync_kloop_tx_ring(const struct sync_kloop_ring_args *a)
if (unlikely(kring->nm_sync(kring, shadow_ring.flags))) {
if (!a->busy_wait) {
/* Reenable notifications. */
/* Re-enable notifications. */
csb_ktoa_kick_enable(csb_ktoa, 1);
}
nm_prerr("txsync() failed");
@ -267,7 +267,7 @@ netmap_sync_kloop_tx_ring(const struct sync_kloop_ring_args *a)
* go to sleep, waiting for a kick from the application when new
* new slots are ready for transmission.
*/
/* Reenable notifications. */
/* Re-enable notifications. */
csb_ktoa_kick_enable(csb_ktoa, 1);
/* Double check, with store-load memory barrier. */
nm_stld_barrier();
@ -356,7 +356,7 @@ netmap_sync_kloop_rx_ring(const struct sync_kloop_ring_args *a)
if (unlikely(kring->nm_sync(kring, shadow_ring.flags))) {
if (!a->busy_wait) {
/* Reenable notifications. */
/* Re-enable notifications. */
csb_ktoa_kick_enable(csb_ktoa, 1);
}
nm_prerr("rxsync() failed");
@ -402,7 +402,7 @@ netmap_sync_kloop_rx_ring(const struct sync_kloop_ring_args *a)
* go to sleep, waiting for a kick from the application when new receive
* slots are available.
*/
/* Reenable notifications. */
/* Re-enable notifications. */
csb_ktoa_kick_enable(csb_ktoa, 1);
/* Double check, with store-load memory barrier. */
nm_stld_barrier();
@ -1000,7 +1000,7 @@ netmap_pt_guest_txsync(struct nm_csb_atok *atok, struct nm_csb_ktoa *ktoa,
* space is available.
*/
if (nm_kr_wouldblock(kring) && !(kring->nr_kflags & NKR_NOINTR)) {
/* Reenable notifications. */
/* Re-enable notifications. */
atok->appl_need_kick = 1;
/* Double check, with store-load memory barrier. */
nm_stld_barrier();
@ -1061,7 +1061,7 @@ netmap_pt_guest_rxsync(struct nm_csb_atok *atok, struct nm_csb_ktoa *ktoa,
* completed.
*/
if (nm_kr_wouldblock(kring) && !(kring->nr_kflags & NKR_NOINTR)) {
/* Reenable notifications. */
/* Re-enable notifications. */
atok->appl_need_kick = 1;
/* Double check, with store-load memory barrier. */
nm_stld_barrier();

View File

@ -167,7 +167,7 @@ struct netmap_mem_d {
u_int flags;
#define NETMAP_MEM_FINALIZED 0x1 /* preallocation done */
#define NETMAP_MEM_HIDDEN 0x8 /* beeing prepared */
#define NETMAP_MEM_HIDDEN 0x8 /* being prepared */
#define NETMAP_MEM_NOMAP 0x10 /* do not map/unmap pdevs */
int lasterr; /* last error for curr config */
int active; /* active users */
@ -176,7 +176,7 @@ struct netmap_mem_d {
struct netmap_obj_pool pools[NETMAP_POOLS_NR];
nm_memid_t nm_id; /* allocator identifier */
int nm_grp; /* iommu groupd id */
int nm_grp; /* iommu group id */
/* list of all existing allocators, sorted by nm_id */
struct netmap_mem_d *prev, *next;
@ -856,7 +856,7 @@ netmap_mem2_ofstophys(struct netmap_mem_d* nmd, vm_ooffset_t offset)
*
* 2a - cycle all the objects in every pool, get the list
* of the physical address descriptors
* 2b - calculate the offset in the array of pages desciptor in the
* 2b - calculate the offset in the array of pages descriptor in the
* main MDL
* 2c - copy the descriptors of the object in the main MDL
*
@ -1408,7 +1408,7 @@ netmap_finalize_obj_allocator(struct netmap_obj_pool *p)
if (p->lut) {
/* if the lut is already there we assume that also all the
* clusters have already been allocated, possibily by somebody
* clusters have already been allocated, possibly by somebody
* else (e.g., extmem). In the latter case, the alloc_done flag
* will remain at zero, so that we will not attempt to
* deallocate the clusters by ourselves in
@ -1984,7 +1984,7 @@ netmap_mem2_rings_create(struct netmap_mem_d *nmd, struct netmap_adapter *na)
u_int len, ndesc;
if (!netmap_mem_ring_needed(kring)) {
/* uneeded, or already created by somebody else */
/* unneeded, or already created by somebody else */
if (netmap_debug & NM_DEBUG_MEM)
nm_prinf("NOT creating ring %s (ring %p, users %d neekring %d)",
kring->name, ring, kring->users, kring->nr_kflags & NKR_NEEDRING);

View File

@ -57,7 +57,7 @@
* of the object, and from there locate the offset from the beginning
* of the region.
*
* The invididual allocators manage a pool of memory for objects of
* The individual allocators manage a pool of memory for objects of
* the same size.
* The pool is split into smaller clusters, whose size is a
* multiple of the page size. The cluster size is chosen
@ -70,7 +70,7 @@
* Allocation scans the bitmap; this is done only on attach, so we are not
* too worried about performance
*
* For each allocator we can define (thorugh sysctl) the size and
* For each allocator we can define (through sysctl) the size and
* number of each object. Memory is allocated at the first use of a
* netmap file descriptor, and can be freed when all such descriptors
* have been released (including unmapping the memory).

View File

@ -483,7 +483,7 @@ netmap_monitor_stop(struct netmap_adapter *na)
netmap_adapter_put(next->priv.np_na); /* nop if null */
next->priv.np_na = NULL;
}
/* orhpan the zmon list */
/* orphan the zmon list */
if (z->next != NULL)
z->next->zmon_list[t].prev = NULL;
z->next = NULL;
@ -601,7 +601,7 @@ netmap_zmon_parent_sync(struct netmap_kring *kring, int flags, enum txrx tx)
mring = mkring->ring;
mlim = mkring->nkr_num_slots - 1;
/* get the relased slots (rel_slots) */
/* get the released slots (rel_slots) */
if (tx == NR_TX) {
beg = kring->nr_hwtail + 1;
error = kring->mon_sync(kring, flags);

View File

@ -578,7 +578,7 @@ do { \
static __inline uint32_t
nm_vale_rthash(const uint8_t *addr)
{
uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = 0; // hask key
uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = 0; // hash key
b += addr[5] << 8;
b += addr[4];
@ -1369,7 +1369,7 @@ nm_vi_destroy(const char *name)
goto err;
}
/* also make sure that nobody is using the inferface */
/* also make sure that nobody is using the interface */
if (NETMAP_OWNED_BY_ANY(&vpna->up) ||
vpna->up.na_refcount > 1 /* any ref besides the one in nm_vi_create()? */) {
error = EBUSY;

View File

@ -147,7 +147,7 @@
* netmap:foo*, or another registration should be done to open at least a
* NIC TX queue in netmap mode.
*
* + Netmap is not currently able to deal with intercepted trasmit mbufs which
* + Netmap is not currently able to deal with intercepted transmit mbufs which
* require offloadings like TSO, UFO, checksumming offloadings, etc. It is
* responsibility of the user to disable those offloadings (e.g. using
* ifconfig on FreeBSD or ethtool -K on Linux) for an interface that is being
@ -311,7 +311,7 @@ struct netmap_ring {
/* the alignment requirement, in bytes, for the start
* of the packets inside the buffers.
* User programs should take this alignment into
* account when specifing buffer-offsets in TX slots.
* account when specifying buffer-offsets in TX slots.
*/
const uint64_t buf_align;
@ -494,7 +494,7 @@ struct netmap_if {
/* Header common to all request options. */
struct nmreq_option {
/* Pointer ot the next option. */
/* Pointer to the next option. */
uint64_t nro_next;
/* Option type. */
uint32_t nro_reqtype;
@ -980,7 +980,7 @@ struct nmreq_opt_offsets {
/* optional initial offset value, to be set in all slots. */
uint64_t nro_initial_offset;
/* number of bits in the lower part of the 'ptr' field to be
* used as the offset field. On output the (possibily larger)
* used as the offset field. On output the (possibly larger)
* effective number of bits is returned.
* 0 means: use the whole ptr field.
*/

View File

@ -55,7 +55,7 @@
* To compute the next index in a circular ring you can use
* i = nm_ring_next(ring, i);
*
* To ease porting apps from pcap to netmap we supply a few fuctions
* To ease porting apps from pcap to netmap we supply a few functions
* that can be called to open, close, read and write on netmap in a way
* similar to libpcap. Note that the read/write function depend on
* an ioctl()/select()/poll() being issued to refill rings or push
@ -133,7 +133,7 @@
((offset) & (ring)->offset_mask); } while (0)
/* obtain the start of the buffer pointed to by a ring's slot, taking the
* offset field into accout
* offset field into account
*/
#define NETMAP_BUF_OFFSET(ring, slot) \
(NETMAP_BUF(ring, (slot)->buf_idx) + NETMAP_ROFFSET(ring, slot))
@ -322,7 +322,7 @@ typedef void (*nm_cb_t)(u_char *, const struct nm_pkthdr *, const u_char *d);
* nm_open() opens a file descriptor, binds to a port and maps memory.
*
* ifname (netmap:foo or vale:foo) is the port name
* a suffix can indicate the follwing:
* a suffix can indicate the following:
* ^ bind the host (sw) ring pair
* * bind host and NIC ring pairs
* -NN bind individual NIC ring pair
@ -701,7 +701,7 @@ nm_parse(const char *ifname, struct nm_desc *d, char *err)
nr_flags = NR_REG_PIPE_MASTER;
p_state = P_GETNUM;
break;
case '}': /* pipe (slave endoint) */
case '}': /* pipe (slave endpoint) */
nr_flags = NR_REG_PIPE_SLAVE;
p_state = P_GETNUM;
break;
@ -991,11 +991,11 @@ nm_close(struct nm_desc *d)
return 0;
}
static int
nm_mmap(struct nm_desc *d, const struct nm_desc *parent)
{
if (d->done_mmap)
return 0;
//XXX TODO: check if mmap is already done
if (IS_NETMAP_DESC(parent) && parent->mem &&
parent->req.nr_arg2 == d->req.nr_arg2) {

View File

@ -27,6 +27,16 @@
* $FreeBSD$
*/
/*
* This program contains a suite of unit tests for the netmap control device.
*
* On FreeBSD, you can run these tests with Kyua once installed in the system:
* # kyua test -k /usr/tests/sys/netmap/Kyuafile
*
* On Linux, you can run them directly:
* # ./ctrl-api-test
*/
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/wait.h>

View File

@ -498,7 +498,7 @@ init_groups(void)
* when the need to drop arises, we roll it back to head.
*/
struct morefrag {
uint16_t last_flag; /* for intput rings */
uint16_t last_flag; /* for input rings */
uint32_t last_hash; /* for input rings */
uint32_t shadow_head; /* for output rings */
};

View File

@ -3149,7 +3149,7 @@ main(int arc, char **argv)
if (g.virt_header) {
/* Set the virtio-net header length, since the user asked
* for it explicitely. */
* for it explicitly. */
set_vnet_hdr_len(&g);
} else {
/* Check whether the netmap port we opened requires us to send

View File

@ -24,7 +24,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd March 31, 2020
.Dd April 2, 2021
.Dt VALECTL 8
.Os
.Sh NAME
@ -97,7 +97,7 @@ The name must be different from any other network interface
already present in the system.
.It Fl r Ar interface
Destroy the persistent VALE port with name
.Ar inteface .
.Ar interface .
.It Fl l Ar valeSSS:PPP
Show the internal bridge number and port number of the given switch port.
.It Fl p Ar valeSSS:PPP
@ -150,7 +150,7 @@ Using this option you can let them share memory with other ports.
Pass 1 as
.Ar memid
to use the global memory region already shared by all
harware netmap ports.
hardware netmap ports.
.El
.Sh SEE ALSO
.Xr netmap 4 ,
@ -158,6 +158,6 @@ harware netmap ports.
.Sh AUTHORS
.An -nosplit
.Nm
was written by
has been written by
.An Michio Honda
at NetApp.