add some definition and driver changes in preparation for
two upcoming features: semi-transparent mode: when a device is opened in this mode, the user program will be able to mark slots that must be forwarded to the "other" side (i.e. from NIC to host stack, or viceversa), and the forwarding will occur automatically at the next netmap syscall. This saves the need to open another file descriptor and do the forwarding manually. direct-forwarding mode: when operating with a VALE port, the user can specify in the slot the actual destination port, overriding the forwarding decision made by a lookup of the destination MAC. This can be useful to implement packet dispatchers. No API changes will be introduced. No new functionality in this patch yet.
This commit is contained in:
parent
f876ffeae3
commit
1dce924d25
@ -292,6 +292,8 @@ em_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
|
||||
l = rxr->next_to_check;
|
||||
j = netmap_idx_n2k(kring, l);
|
||||
if (netmap_no_pendintr || force_update) {
|
||||
uint16_t slot_flags = kring->nkr_slot_flags;
|
||||
|
||||
for (n = 0; ; n++) {
|
||||
struct e1000_rx_desc *curr = &rxr->rx_base[l];
|
||||
uint32_t staterr = le32toh(curr->status);
|
||||
@ -299,6 +301,7 @@ em_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
|
||||
if ((staterr & E1000_RXD_STAT_DD) == 0)
|
||||
break;
|
||||
ring->slot[j].len = le16toh(curr->length);
|
||||
ring->slot[j].flags = slot_flags;
|
||||
bus_dmamap_sync(rxr->rxtag, rxr->rx_buffers[l].map,
|
||||
BUS_DMASYNC_POSTREAD);
|
||||
j = (j == lim) ? 0 : j + 1;
|
||||
|
@ -263,6 +263,8 @@ igb_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
|
||||
l = rxr->next_to_check;
|
||||
j = netmap_idx_n2k(kring, l);
|
||||
if (netmap_no_pendintr || force_update) {
|
||||
uint16_t slot_flags = kring->nkr_slot_flags;
|
||||
|
||||
for (n = 0; ; n++) {
|
||||
union e1000_adv_rx_desc *curr = &rxr->rx_base[l];
|
||||
uint32_t staterr = le32toh(curr->wb.upper.status_error);
|
||||
@ -270,6 +272,7 @@ igb_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
|
||||
if ((staterr & E1000_RXD_STAT_DD) == 0)
|
||||
break;
|
||||
ring->slot[j].len = le16toh(curr->wb.upper.length);
|
||||
ring->slot[j].flags = slot_flags;
|
||||
bus_dmamap_sync(rxr->ptag,
|
||||
rxr->rx_buffers[l].pmap, BUS_DMASYNC_POSTREAD);
|
||||
j = (j == lim) ? 0 : j + 1;
|
||||
|
@ -253,6 +253,8 @@ lem_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
|
||||
l = adapter->next_rx_desc_to_check;
|
||||
j = netmap_idx_n2k(kring, l);
|
||||
if (netmap_no_pendintr || force_update) {
|
||||
uint16_t slot_flags = kring->nkr_slot_flags;
|
||||
|
||||
for (n = 0; ; n++) {
|
||||
struct e1000_rx_desc *curr = &adapter->rx_desc_base[l];
|
||||
uint32_t staterr = le32toh(curr->status);
|
||||
@ -266,6 +268,7 @@ lem_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
|
||||
len = 0;
|
||||
}
|
||||
ring->slot[j].len = len;
|
||||
ring->slot[j].flags = slot_flags;
|
||||
bus_dmamap_sync(adapter->rxtag,
|
||||
adapter->rx_buffer_area[l].map,
|
||||
BUS_DMASYNC_POSTREAD);
|
||||
|
@ -245,6 +245,8 @@ re_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
|
||||
l = sc->rl_ldata.rl_rx_prodidx; /* next pkt to check */
|
||||
j = netmap_idx_n2k(kring, l); /* the kring index */
|
||||
if (netmap_no_pendintr || force_update) {
|
||||
uint16_t slot_flags = kring->nkr_slot_flags;
|
||||
|
||||
for (n = kring->nr_hwavail; n < lim ; n++) {
|
||||
struct rl_desc *cur_rx = &sc->rl_ldata.rl_rx_list[l];
|
||||
uint32_t rxstat = le32toh(cur_rx->rl_cmdstat);
|
||||
@ -256,6 +258,7 @@ re_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
|
||||
/* XXX subtract crc */
|
||||
total_len = (total_len < 4) ? 0 : total_len - 4;
|
||||
kring->ring->slot[j].len = total_len;
|
||||
kring->ring->slot[j].flags = slot_flags;
|
||||
/* sync was in re_newbuf() */
|
||||
bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag,
|
||||
rxd[l].rx_dmamap, BUS_DMASYNC_POSTREAD);
|
||||
|
@ -483,6 +483,7 @@ ixgbe_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
|
||||
*/
|
||||
if (netmap_no_pendintr || force_update) {
|
||||
int crclen = ix_crcstrip ? 0 : 4;
|
||||
uint16_t slot_flags = kring->nkr_slot_flags;
|
||||
|
||||
l = rxr->next_to_check;
|
||||
j = netmap_idx_n2k(kring, l);
|
||||
@ -494,6 +495,7 @@ ixgbe_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
|
||||
if ((staterr & IXGBE_RXD_STAT_DD) == 0)
|
||||
break;
|
||||
ring->slot[j].len = le16toh(curr->wb.upper.length) - crclen;
|
||||
ring->slot[j].flags = slot_flags;
|
||||
bus_dmamap_sync(rxr->ptag,
|
||||
rxr->rx_buffers[l].pmap, BUS_DMASYNC_POSTREAD);
|
||||
j = (j == lim) ? 0 : j + 1;
|
||||
|
@ -119,6 +119,10 @@ struct netmap_adapter;
|
||||
* RX rings: the next empty buffer (hwcur + hwavail + hwofs) coincides with
|
||||
* the next empty buffer as known by the hardware (next_to_check or so).
|
||||
* TX rings: hwcur + hwofs coincides with next_to_send
|
||||
*
|
||||
* For received packets, slot->flags is set to nkr_slot_flags
|
||||
* so we can provide a proper initial value (e.g. set NS_FORWARD
|
||||
* when operating in 'transparent' mode).
|
||||
*/
|
||||
struct netmap_kring {
|
||||
struct netmap_ring *ring;
|
||||
@ -128,6 +132,7 @@ struct netmap_kring {
|
||||
#define NKR_PENDINTR 0x1 // Pending interrupt.
|
||||
u_int nkr_num_slots;
|
||||
|
||||
uint16_t nkr_slot_flags; /* initial value for flags */
|
||||
int nkr_hwofs; /* offset between NIC and netmap ring */
|
||||
struct netmap_adapter *na;
|
||||
NM_SELINFO_T si; /* poll/select wait queue */
|
||||
|
Loading…
Reference in New Issue
Block a user