kni: support chained mbufs

rx_q fifo may have chained mbufs, merge them into single skb before
handing to the network stack.

Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
Acked-by: Helin Zhang <helin.zhang@intel.com>
This commit is contained in:
Ferruh Yigit 2016-04-26 13:37:58 +01:00 committed by Thomas Monjalon
parent 166605b1bf
commit d89a58dfe9
2 changed files with 64 additions and 23 deletions

View File

@ -113,7 +113,9 @@ struct rte_kni_mbuf {
void *buf_addr __attribute__((__aligned__(RTE_CACHE_LINE_SIZE))); void *buf_addr __attribute__((__aligned__(RTE_CACHE_LINE_SIZE)));
char pad0[10]; char pad0[10];
uint16_t data_off; /**< Start address of data in segment buffer. */ uint16_t data_off; /**< Start address of data in segment buffer. */
char pad1[4]; char pad1[2];
uint8_t nb_segs; /**< Number of segments. */
char pad4[1];
uint64_t ol_flags; /**< Offload features. */ uint64_t ol_flags; /**< Offload features. */
char pad2[4]; char pad2[4];
uint32_t pkt_len; /**< Total pkt len: sum of all segment data_len. */ uint32_t pkt_len; /**< Total pkt len: sum of all segment data_len. */

View File

@ -147,7 +147,8 @@ kni_net_rx_normal(struct kni_dev *kni)
/* Transfer received packets to netif */ /* Transfer received packets to netif */
for (i = 0; i < num_rx; i++) { for (i = 0; i < num_rx; i++) {
kva = (void *)va[i] - kni->mbuf_va + kni->mbuf_kva; kva = (void *)va[i] - kni->mbuf_va + kni->mbuf_kva;
len = kva->data_len; len = kva->pkt_len;
data_kva = kva->buf_addr + kva->data_off - kni->mbuf_va data_kva = kva->buf_addr + kva->data_off - kni->mbuf_va
+ kni->mbuf_kva; + kni->mbuf_kva;
@ -156,22 +157,41 @@ kni_net_rx_normal(struct kni_dev *kni)
KNI_ERR("Out of mem, dropping pkts\n"); KNI_ERR("Out of mem, dropping pkts\n");
/* Update statistics */ /* Update statistics */
kni->stats.rx_dropped++; kni->stats.rx_dropped++;
continue;
} }
else {
/* Align IP on 16B boundary */ /* Align IP on 16B boundary */
skb_reserve(skb, 2); skb_reserve(skb, 2);
if (kva->nb_segs == 1) {
memcpy(skb_put(skb, len), data_kva, len); memcpy(skb_put(skb, len), data_kva, len);
skb->dev = dev; } else {
skb->protocol = eth_type_trans(skb, dev); int nb_segs;
skb->ip_summed = CHECKSUM_UNNECESSARY; int kva_nb_segs = kva->nb_segs;
/* Call netif interface */ for (nb_segs = 0; nb_segs < kva_nb_segs; nb_segs++) {
netif_rx_ni(skb); memcpy(skb_put(skb, kva->data_len),
data_kva, kva->data_len);
/* Update statistics */ if (!kva->next)
kni->stats.rx_bytes += len; break;
kni->stats.rx_packets++;
kva = kva->next - kni->mbuf_va + kni->mbuf_kva;
data_kva = kva->buf_addr + kva->data_off
- kni->mbuf_va + kni->mbuf_kva;
}
} }
skb->dev = dev;
skb->protocol = eth_type_trans(skb, dev);
skb->ip_summed = CHECKSUM_UNNECESSARY;
/* Call netif interface */
netif_rx_ni(skb);
/* Update statistics */
kni->stats.rx_bytes += len;
kni->stats.rx_packets++;
} }
/* Burst enqueue mbufs into free_q */ /* Burst enqueue mbufs into free_q */
@ -308,7 +328,7 @@ kni_net_rx_lo_fifo_skb(struct kni_dev *kni)
/* Copy mbufs to sk buffer and then call tx interface */ /* Copy mbufs to sk buffer and then call tx interface */
for (i = 0; i < num; i++) { for (i = 0; i < num; i++) {
kva = (void *)va[i] - kni->mbuf_va + kni->mbuf_kva; kva = (void *)va[i] - kni->mbuf_va + kni->mbuf_kva;
len = kva->data_len; len = kva->pkt_len;
data_kva = kva->buf_addr + kva->data_off - kni->mbuf_va + data_kva = kva->buf_addr + kva->data_off - kni->mbuf_va +
kni->mbuf_kva; kni->mbuf_kva;
@ -329,20 +349,39 @@ kni_net_rx_lo_fifo_skb(struct kni_dev *kni)
if (skb == NULL) { if (skb == NULL) {
KNI_ERR("Out of mem, dropping pkts\n"); KNI_ERR("Out of mem, dropping pkts\n");
kni->stats.rx_dropped++; kni->stats.rx_dropped++;
continue;
} }
else {
/* Align IP on 16B boundary */ /* Align IP on 16B boundary */
skb_reserve(skb, 2); skb_reserve(skb, 2);
if (kva->nb_segs == 1) {
memcpy(skb_put(skb, len), data_kva, len); memcpy(skb_put(skb, len), data_kva, len);
skb->dev = dev; } else {
skb->ip_summed = CHECKSUM_UNNECESSARY; int nb_segs;
int kva_nb_segs = kva->nb_segs;
kni->stats.rx_bytes += len; for (nb_segs = 0; nb_segs < kva_nb_segs; nb_segs++) {
kni->stats.rx_packets++; memcpy(skb_put(skb, kva->data_len),
data_kva, kva->data_len);
/* call tx interface */ if (!kva->next)
kni_net_tx(skb, dev); break;
kva = kva->next - kni->mbuf_va + kni->mbuf_kva;
data_kva = kva->buf_addr + kva->data_off
- kni->mbuf_va + kni->mbuf_kva;
}
} }
skb->dev = dev;
skb->ip_summed = CHECKSUM_UNNECESSARY;
kni->stats.rx_bytes += len;
kni->stats.rx_packets++;
/* call tx interface */
kni_net_tx(skb, dev);
} }
/* enqueue all the mbufs from rx_q into free_q */ /* enqueue all the mbufs from rx_q into free_q */